mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Sync to trunk.
This commit is contained in:
commit
7ccec34155
@ -19,6 +19,83 @@
|
|||||||
# new recipes:
|
# new recipes:
|
||||||
# - title:
|
# - title:
|
||||||
|
|
||||||
|
- version: 0.7.55
|
||||||
|
date: 2011-04-15
|
||||||
|
|
||||||
|
new features:
|
||||||
|
- title: "Add a menu bar. Useful if you use a lot of plugins and are running out of space in your toolbars. By default the menu bar is hidden (except on OS X). You can add actions to it via Preferences->Toolbars. As soon as you add actions, it will become visible."
|
||||||
|
|
||||||
|
- title: "OS X: Make the main calibre window look a little more 'native' on OS X"
|
||||||
|
|
||||||
|
- title: "Show recently viewed books in the View button's drop down menu"
|
||||||
|
|
||||||
|
- title: "Add a button next to the search bar to toggle easily between highlight and restrict search modes"
|
||||||
|
|
||||||
|
- title: "Allow the use of arbitrary searches as search restrictions, rather than just saved searches. Do this by using the special entry '*Current Search' in the Search Restriction dropdown."
|
||||||
|
|
||||||
|
- title: "The Connect/share icon now changes color to indicate that the content server is running"
|
||||||
|
tickets: [755444]
|
||||||
|
|
||||||
|
- title: "Device drivers for Viewpad 7, Motorola Xoom and Asus Eee Note"
|
||||||
|
|
||||||
|
- title: "Add tags like composite custom column."
|
||||||
|
tickets: [759663]
|
||||||
|
|
||||||
|
- title: "Add a new date format code 'iso'. Permits formatting dates to see the complete time (via Preferences->Tweaks)"
|
||||||
|
|
||||||
|
- title: "Allow the use of data from the size column in the template language"
|
||||||
|
tickets: [759645]
|
||||||
|
|
||||||
|
- title: "Support reading/writing covers to txtz/htmlz files"
|
||||||
|
|
||||||
|
- title: "Speedup for large library sorting when using composite custom columns"
|
||||||
|
|
||||||
|
- title: "Move the boolean columns are tristate tweak to Preferences->Behavior"
|
||||||
|
|
||||||
|
bug fixes:
|
||||||
|
- title: "Fix a regression in 0.7.54 that broke reading covers/metadata from cbz files."
|
||||||
|
tickets: [756892]
|
||||||
|
|
||||||
|
- title: "Fix tweak names and help not translatable"
|
||||||
|
tickets: [756736]
|
||||||
|
|
||||||
|
- title: "When the size of a book is less that 0.1MB but not zero, display the size as <0.1 instead of 0.0."
|
||||||
|
tickets: [755768]
|
||||||
|
|
||||||
|
- title: "HTMLZ input: Fix handling of HTML files encoded in an encoding other than UTF-8"
|
||||||
|
|
||||||
|
- title: "EPUB Input: Fix EPUB files with empty Adobe PAGE templates causing conversion to abort."
|
||||||
|
tickets: [760390]
|
||||||
|
|
||||||
|
- title: "Fix CHM input plugin not closing opened input file"
|
||||||
|
tickets: [760589]
|
||||||
|
|
||||||
|
- title: "MOBI Output: Make super/subscripts use a slightly smaller font when rendered on a Kindle. Also allow the use of vertical-align:top/bottom in the CSS to specify a super/subscript."
|
||||||
|
tickets: [758667]
|
||||||
|
|
||||||
|
- title: "LRF Input: Detect and workaround LRF files that have deeply nested spans, instead of crashing."
|
||||||
|
tickets: [759680]
|
||||||
|
|
||||||
|
- title: "MOBI Output: Fix bug that would cause conversion to unneccessarily abort when malformed hyperlinks are present in the input document."
|
||||||
|
tickets: [759313]
|
||||||
|
|
||||||
|
- title: "Make true and false searches work correctly for numeric fields."
|
||||||
|
|
||||||
|
- title: "MOBI Output: The Ignore margins setting no longer ignores blockquotes, only margins set via CSS on other elements."
|
||||||
|
tickets: [758675]
|
||||||
|
|
||||||
|
- title: "Fix regression that caused clicking auto send to also change the email address in Preferences->Email"
|
||||||
|
|
||||||
|
improved recipes:
|
||||||
|
- Wall Street Journal
|
||||||
|
- Weblogs SL
|
||||||
|
- Tabu.ro
|
||||||
|
- Vecernje Novosti
|
||||||
|
|
||||||
|
new recipes:
|
||||||
|
- title: Hallo Assen and Dvhn
|
||||||
|
author: Reijendert
|
||||||
|
|
||||||
|
|
||||||
- version: 0.7.54
|
- version: 0.7.54
|
||||||
date: 2011-04-08
|
date: 2011-04-08
|
||||||
|
@ -36,29 +36,38 @@ class BigOven(BasicNewsRecipe):
|
|||||||
|
|
||||||
remove_attributes = ['style', 'font']
|
remove_attributes = ['style', 'font']
|
||||||
|
|
||||||
remove_tags = [dict(name='div', attrs={'class':['ppy-caption']})
|
def get_article_url(self, article):
|
||||||
,dict(name='div', attrs={'id':['float_corner']})
|
url = article.get('feedburner_origlink',article.get('link', None))
|
||||||
|
front, middle, end = url.partition('comhttp//www.bigoven.com')
|
||||||
|
url = front + 'com' + end
|
||||||
|
return url
|
||||||
|
|
||||||
|
keep_only_tags = [dict(name='div', attrs={'id':['nosidebar_main']})]
|
||||||
|
|
||||||
|
remove_tags_after = [dict(name='div', attrs={'class':['display-field']})]
|
||||||
|
|
||||||
|
remove_tags = [dict(name='ul', attrs={'class':['tabs']})]
|
||||||
|
|
||||||
|
preprocess_regexps = [
|
||||||
|
(re.compile(r'Want detailed nutrition information?', re.DOTALL), lambda match: ''),
|
||||||
|
(re.compile('\(You could win \$100 in our ', re.DOTALL), lambda match: ''),
|
||||||
]
|
]
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for tag in soup.findAll(name='a', attrs={'class':['deflink']}):
|
|
||||||
tag.replaceWith(tag.string)
|
|
||||||
for tag in soup.findAll(name='a', text=re.compile(r'.*View Metric.*', re.DOTALL)):
|
for tag in soup.findAll(name='a', text=re.compile(r'.*View Metric.*', re.DOTALL)):
|
||||||
tag.parent.parent.extract()
|
tag.parent.parent.extract()
|
||||||
for tag in soup.findAll(name='a', text=re.compile(r'.*Add my own photo.*', re.DOTALL)):
|
for tag in soup.findAll(text=re.compile(r'.*Try BigOven Pro for Free.*', re.DOTALL)):
|
||||||
tag.parent.parent.extract()
|
|
||||||
for tag in soup.findAll(name='div', attrs={'class':['container']}):
|
|
||||||
if tag.find(name='h1'):
|
|
||||||
continue
|
|
||||||
if tag.find(name='h2', text=re.compile(r'.*Ingredients.*', re.DOTALL)):
|
|
||||||
print 'tag found Ingred h2'
|
|
||||||
continue
|
|
||||||
if tag.find(name='h2', text=re.compile(r'Preparation.*', re.DOTALL)):
|
|
||||||
print 'tag found Prep h2'
|
|
||||||
continue
|
|
||||||
tag.extract()
|
tag.extract()
|
||||||
|
for tag in soup.findAll(text=re.compile(r'.*Add my photo of this recipe.*', re.DOTALL)):
|
||||||
|
tag.parent.extract()
|
||||||
|
for tag in soup.findAll(name='a', text=re.compile(r'.*photo contest.*', re.DOTALL)):
|
||||||
|
tag.parent.extract()
|
||||||
|
for tag in soup.findAll(name='a', text='Remove ads'):
|
||||||
|
tag.parent.parent.extract()
|
||||||
|
for tag in soup.findAll(name='ol', attrs={'class':['recipe-tags']}):
|
||||||
|
tag.parent.extract()
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
feeds = [(u'4 & 5 Star Rated Recipes', u'http://feeds.feedburner.com/Bigovencom-RecipeRaves?format=xml')]
|
feeds = [(u'Recent Raves', u'http://www.bigoven.com/rss/recentraves'),
|
||||||
|
(u'Recipe Of The Day', u'http://feeds.feedburner.com/bigovencom-RecipeOfTheDay')]
|
||||||
|
|
||||||
|
36
recipes/hallo_assen.recipe
Normal file
36
recipes/hallo_assen.recipe
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class AdvancedUserRecipe1302341394(BasicNewsRecipe):
|
||||||
|
title = u'Hallo Assen'
|
||||||
|
oldest_article = 180
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
|
||||||
|
__author__ = 'Reijndert'
|
||||||
|
no_stylesheets = True
|
||||||
|
cover_url = 'http://www.halloassen.nl/multimedia/halloassen/archive/00002/HalloAssen_2518a.gif'
|
||||||
|
language = 'nl'
|
||||||
|
country = 'NL'
|
||||||
|
version = 1
|
||||||
|
category = u'Nieuws'
|
||||||
|
timefmt = ' %Y-%m-%d (%a)'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keep_only_tags = [dict(name='div', attrs={'class':'photoFrame'})
|
||||||
|
,dict(name='div', attrs={'class':'textContent'})
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='div',attrs={'id':'articleLinks'})
|
||||||
|
,dict(name='div',attrs={'class':'categories clearfix'})
|
||||||
|
,dict(name='div',attrs={'id':'rating'})
|
||||||
|
,dict(name='div',attrs={'id':'comments'})
|
||||||
|
]
|
||||||
|
|
||||||
|
feeds = [(u'Ons Nieuws', u'http://feeds.feedburner.com/halloassen/onsnieuws'), (u'Politie', u'http://www.halloassen.nl/rss/?c=37'), (u'Rechtbank', u'http://www.halloassen.nl/rss/?c=39'), (u'Justitie', u'http://www.halloassen.nl/rss/?c=36'), (u'Evenementen', u'http://www.halloassen.nl/rss/?c=34'), (u'Cultuur', u'http://www.halloassen.nl/rss/?c=32'), (u'Politiek', u'http://www.halloassen.nl/rss/?c=38'), (u'Economie', u'http://www.halloassen.nl/rss/?c=33')]
|
||||||
|
|
||||||
|
|
||||||
|
extra_css = '''
|
||||||
|
body {font-family: verdana, arial, helvetica, geneva, sans-serif;}
|
||||||
|
'''
|
||||||
|
|
@ -18,7 +18,6 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
oldest_article = 1.0
|
oldest_article = 1.0
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
simultaneous_downloads= 5
|
|
||||||
|
|
||||||
r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*')
|
r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*')
|
||||||
remove_tags = [dict(name='div', attrs={'class':'footer'})]
|
remove_tags = [dict(name='div', attrs={'class':'footer'})]
|
||||||
@ -26,17 +25,17 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'),
|
('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'),
|
||||||
('Ireland', 'http://www.irishtimes.com/feeds/rss/newspaper/ireland.rss'),
|
('Ireland', 'http://rss.feedsportal.com/c/851/f/10845/index.rss'),
|
||||||
('World', 'http://www.irishtimes.com/feeds/rss/newspaper/world.rss'),
|
('World', 'http://rss.feedsportal.com/c/851/f/10846/index.rss'),
|
||||||
('Finance', 'http://www.irishtimes.com/feeds/rss/newspaper/finance.rss'),
|
('Finance', 'http://rss.feedsportal.com/c/851/f/10847/index.rss'),
|
||||||
('Features', 'http://www.irishtimes.com/feeds/rss/newspaper/features.rss'),
|
('Features', 'http://rss.feedsportal.com/c/851/f/10848/index.rss'),
|
||||||
('Sport', 'http://www.irishtimes.com/feeds/rss/newspaper/sport.rss'),
|
('Sport', 'http://rss.feedsportal.com/c/851/f/10849/index.rss'),
|
||||||
('Opinion', 'http://www.irishtimes.com/feeds/rss/newspaper/opinion.rss'),
|
('Opinion', 'http://rss.feedsportal.com/c/851/f/10850/index.rss'),
|
||||||
('Letters', 'http://www.irishtimes.com/feeds/rss/newspaper/letters.rss'),
|
('Letters', 'http://rss.feedsportal.com/c/851/f/10851/index.rss'),
|
||||||
('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'),
|
('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'),
|
||||||
('Health', 'http://www.irishtimes.com/feeds/rss/newspaper/health.rss'),
|
('Health', 'http://rss.feedsportal.com/c/851/f/10852/index.rss'),
|
||||||
('Education & Parenting', 'http://www.irishtimes.com/feeds/rss/newspaper/education.rss'),
|
('Education & Parenting', 'http://rss.feedsportal.com/c/851/f/10853/index.rss'),
|
||||||
('Motors', 'http://www.irishtimes.com/feeds/rss/newspaper/motors.rss'),
|
('Motors', 'http://rss.feedsportal.com/c/851/f/10854/index.rss'),
|
||||||
('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'),
|
('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'),
|
||||||
('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'),
|
('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'),
|
||||||
('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'),
|
('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'),
|
||||||
@ -57,5 +56,3 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
|
|
||||||
def get_article_url(self, article):
|
def get_article_url(self, article):
|
||||||
return article.link
|
return article.link
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008-2010, Darko Miletic <darko.miletic at gmail.com>'
|
__copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||||
'''
|
'''
|
||||||
nspm.rs
|
nspm.rs
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from calibre.ebooks.BeautifulSoup import NavigableString
|
from calibre.ebooks.BeautifulSoup import NavigableString, Tag
|
||||||
|
|
||||||
class Nspm(BasicNewsRecipe):
|
class Nspm(BasicNewsRecipe):
|
||||||
title = 'Nova srpska politicka misao'
|
title = 'Nova srpska politicka misao'
|
||||||
@ -21,7 +21,6 @@ class Nspm(BasicNewsRecipe):
|
|||||||
INDEX = 'http://www.nspm.rs/?alphabet=l'
|
INDEX = 'http://www.nspm.rs/?alphabet=l'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
language = 'sr'
|
language = 'sr'
|
||||||
delay = 2
|
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
publication_type = 'magazine'
|
publication_type = 'magazine'
|
||||||
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
|
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
|
||||||
@ -29,7 +28,7 @@ class Nspm(BasicNewsRecipe):
|
|||||||
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
|
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
|
||||||
body{font-family: "Times New Roman", serif1, serif}
|
body{font-family: "Times New Roman", serif1, serif}
|
||||||
.article_description{font-family: Arial, sans1, sans-serif}
|
.article_description{font-family: Arial, sans1, sans-serif}
|
||||||
img{margin-top:0.5em; margin-bottom: 0.7em}
|
img{margin-top:0.5em; margin-bottom: 0.7em; display: block}
|
||||||
.author{color: #990000; font-weight: bold}
|
.author{color: #990000; font-weight: bold}
|
||||||
.author,.createdate{font-size: 0.9em} """
|
.author,.createdate{font-size: 0.9em} """
|
||||||
|
|
||||||
@ -38,18 +37,12 @@ class Nspm(BasicNewsRecipe):
|
|||||||
, 'tags' : category
|
, 'tags' : category
|
||||||
, 'publisher' : publisher
|
, 'publisher' : publisher
|
||||||
, 'language' : language
|
, 'language' : language
|
||||||
, 'linearize_tables' : True
|
, 'pretty_print' : True
|
||||||
}
|
}
|
||||||
|
|
||||||
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
|
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
|
||||||
keep_only_tags = [dict(attrs={'id':'jsn-mainbody'})]
|
remove_tags = [dict(name=['link','script','meta','base','img'])]
|
||||||
remove_tags = [
|
remove_attributes = ['width','height','lang','xmlns:fb','xmlns:og','vspace','hspace','type','start','size']
|
||||||
dict(name=['link','object','embed','script','meta','base','iframe'])
|
|
||||||
,dict(attrs={'class':'buttonheading'})
|
|
||||||
]
|
|
||||||
remove_tags_before = dict(attrs={'class':'contentheading'})
|
|
||||||
remove_tags_after = dict(attrs={'class':'article_separator'})
|
|
||||||
remove_attributes = ['width','height']
|
|
||||||
|
|
||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
br = BasicNewsRecipe.get_browser()
|
br = BasicNewsRecipe.get_browser()
|
||||||
@ -57,21 +50,67 @@ class Nspm(BasicNewsRecipe):
|
|||||||
return br
|
return br
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'Rubrike' , u'http://www.nspm.rs/rubrike/feed/rss.html')
|
(u'Rubrike' , u'http://www.nspm.rs/rubrike/feed/rss.html' )
|
||||||
,(u'Debate' , u'http://www.nspm.rs/debate/feed/rss.html')
|
,(u'Debate' , u'http://www.nspm.rs/debate/feed/rss.html' )
|
||||||
,(u'Reci i misli' , u'http://www.nspm.rs/reci-i-misli/feed/rss.html')
|
,(u'Reci i misli' , u'http://www.nspm.rs/reci-i-misli/feed/rss.html' )
|
||||||
,(u'Samo smeh srbina spasava', u'http://www.nspm.rs/samo-smeh-srbina-spasava/feed/rss.html')
|
,(u'Samo smeh srbina spasava', u'http://www.nspm.rs/samo-smeh-srbina-spasava/feed/rss.html')
|
||||||
,(u'Polemike' , u'http://www.nspm.rs/polemike/feed/rss.html')
|
,(u'Polemike' , u'http://www.nspm.rs/polemike/feed/rss.html' )
|
||||||
,(u'Prikazi' , u'http://www.nspm.rs/prikazi/feed/rss.html')
|
,(u'Prikazi' , u'http://www.nspm.rs/prikazi/feed/rss.html' )
|
||||||
,(u'Prenosimo' , u'http://www.nspm.rs/prenosimo/feed/rss.html')
|
,(u'Prenosimo' , u'http://www.nspm.rs/prenosimo/feed/rss.html' )
|
||||||
,(u'Hronika' , u'http://www.nspm.rs/tabela/hronika/feed/rss.html')
|
,(u'Hronika' , u'http://www.nspm.rs/tabela/hronika/feed/rss.html' )
|
||||||
]
|
]
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for item in soup.body.findAll(style=True):
|
atitle = soup.body.find('a',attrs={'class':'contentpagetitle'})
|
||||||
del item['style']
|
if atitle:
|
||||||
for item in soup.body.findAll('h1'):
|
cleanTitle = Tag(soup,'h1',[('class','contentpagetitle')])
|
||||||
nh = NavigableString(item.a.string)
|
cnt = NavigableString(self.tag_to_string(atitle))
|
||||||
item.a.extract()
|
cleanTitle.append(cnt)
|
||||||
item.insert(0,nh)
|
|
||||||
return self.adeify_images(soup)
|
author = soup.body.find('span',attrs={'class':'author'})
|
||||||
|
if author:
|
||||||
|
author.extract()
|
||||||
|
author.name = 'div'
|
||||||
|
|
||||||
|
crdate = soup.body.find('td',attrs={'class':'createdate'})
|
||||||
|
if crdate:
|
||||||
|
cleanCrdate = Tag(soup,'div',[('class','createdate')])
|
||||||
|
cnt = NavigableString(self.tag_to_string(crdate))
|
||||||
|
cleanCrdate.append(cnt)
|
||||||
|
|
||||||
|
#get the dependant element
|
||||||
|
artText = Tag(soup,'div',[('class','text')])
|
||||||
|
textHolderp = crdate.parent
|
||||||
|
textHolder = textHolderp.nextSibling
|
||||||
|
while textHolder and (not isinstance(textHolder,Tag) or (textHolder.name <> textHolderp.name)):
|
||||||
|
textHolder = textHolder.nextSibling
|
||||||
|
if textHolder.td:
|
||||||
|
artText = textHolder.td
|
||||||
|
artText.name = 'div'
|
||||||
|
artText.attrs = []
|
||||||
|
artText['class'] = 'text'
|
||||||
|
artText.extract()
|
||||||
|
|
||||||
|
soup.body.contents=[]
|
||||||
|
|
||||||
|
soup.body.append(cleanTitle)
|
||||||
|
soup.body.append(author)
|
||||||
|
soup.body.append(cleanCrdate)
|
||||||
|
soup.body.append(artText)
|
||||||
|
|
||||||
|
for item in soup.findAll('a'):
|
||||||
|
limg = item.find('img')
|
||||||
|
if item.string is not None:
|
||||||
|
str = item.string
|
||||||
|
item.replaceWith(str)
|
||||||
|
else:
|
||||||
|
if limg:
|
||||||
|
item.name = 'div'
|
||||||
|
item.attrs = []
|
||||||
|
else:
|
||||||
|
str = self.tag_to_string(item)
|
||||||
|
item.replaceWith(str)
|
||||||
|
for item in soup.findAll('img'):
|
||||||
|
if not item.has_key('alt'):
|
||||||
|
item['alt'] = 'image'
|
||||||
|
return soup
|
||||||
|
@ -31,23 +31,22 @@ class TabuRo(BasicNewsRecipe):
|
|||||||
}
|
}
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='div', attrs={'id':'Article'}),
|
dict(name='h2', attrs={'class':'articol_titlu'}),
|
||||||
|
dict(name='div', attrs={'class':'poza_articol_featured'}),
|
||||||
|
dict(name='div', attrs={'class':'articol_text'})
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name='div', attrs={'id':['advertisementArticle']}),
|
dict(name='div', attrs={'class':'asemanatoare'})
|
||||||
dict(name='div', attrs={'class':'voting_number'}),
|
|
||||||
dict(name='div', attrs={'id':'number_votes'}),
|
|
||||||
dict(name='div', attrs={'id':'rating_one'}),
|
|
||||||
dict(name='div', attrs={'class':'float: right;'})
|
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_tags_after = [
|
remove_tags_after = [
|
||||||
dict(name='div', attrs={'id':'comments'}),
|
dict(name='div', attrs={'id':'comments'}),
|
||||||
|
dict(name='div', attrs={'class':'asemanatoare'})
|
||||||
]
|
]
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'Feeds', u'http://www.tabu.ro/rss_all.xml')
|
(u'Feeds', u'http://www.tabu.ro/feed/')
|
||||||
]
|
]
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
|
@ -3,7 +3,7 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '4 February 2011, desUBIKado'
|
__copyright__ = '4 February 2011, desUBIKado'
|
||||||
__author__ = 'desUBIKado'
|
__author__ = 'desUBIKado'
|
||||||
__version__ = 'v0.05'
|
__version__ = 'v0.05'
|
||||||
__date__ = '9, February 2011'
|
__date__ = '13, April 2011'
|
||||||
'''
|
'''
|
||||||
http://www.weblogssl.com/
|
http://www.weblogssl.com/
|
||||||
'''
|
'''
|
||||||
@ -19,7 +19,7 @@ class weblogssl(BasicNewsRecipe):
|
|||||||
category = 'Gadgets, Tech news, Product reviews, mobiles, science, cinema, entertainment, culture, tv, food, recipes, life style, motor, F1, sports, economy'
|
category = 'Gadgets, Tech news, Product reviews, mobiles, science, cinema, entertainment, culture, tv, food, recipes, life style, motor, F1, sports, economy'
|
||||||
language = 'es'
|
language = 'es'
|
||||||
timefmt = '[%a, %d %b, %Y]'
|
timefmt = '[%a, %d %b, %Y]'
|
||||||
oldest_article = 1.5
|
oldest_article = 1
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
@ -28,50 +28,52 @@ class weblogssl(BasicNewsRecipe):
|
|||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
|
||||||
# Si no se quiere recuperar todos los blogs se puede suprimir la descarga del que se desee poniendo
|
# Si no se quiere recuperar todos los blogs se puede suprimir la descarga del que se desee poniendo
|
||||||
# un caracter # por delante, es decir, # (u'Applesfera', u'http://feeds.weblogssl.com/applesfera'),
|
# un caracter # por delante, es decir, # ,(u'Applesfera', u'http://feeds.weblogssl.com/applesfera')
|
||||||
# haría que no se descargase Applesfera. OJO: El último feed no debe llevar la coma al final
|
# haría que no se descargase Applesfera.
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'Xataka', u'http://feeds.weblogssl.com/xataka2'),
|
(u'Xataka', u'http://feeds.weblogssl.com/xataka2')
|
||||||
(u'Xataka M\xf3vil', u'http://feeds.weblogssl.com/xatakamovil'),
|
,(u'Xataka M\xf3vil', u'http://feeds.weblogssl.com/xatakamovil')
|
||||||
(u'Xataka Android', u'http://feeds.weblogssl.com/xatakandroid'),
|
,(u'Xataka Android', u'http://feeds.weblogssl.com/xatakandroid')
|
||||||
(u'Xataka Foto', u'http://feeds.weblogssl.com/xatakafoto'),
|
,(u'Xataka Foto', u'http://feeds.weblogssl.com/xatakafoto')
|
||||||
(u'Xataka ON', u'http://feeds.weblogssl.com/xatakaon'),
|
,(u'Xataka ON', u'http://feeds.weblogssl.com/xatakaon')
|
||||||
(u'Xataka Ciencia', u'http://feeds.weblogssl.com/xatakaciencia'),
|
,(u'Xataka Ciencia', u'http://feeds.weblogssl.com/xatakaciencia')
|
||||||
(u'Genbeta', u'http://feeds.weblogssl.com/genbeta'),
|
,(u'Genbeta', u'http://feeds.weblogssl.com/genbeta')
|
||||||
(u'Applesfera', u'http://feeds.weblogssl.com/applesfera'),
|
,(u'Genbeta Dev', u'http://feeds.weblogssl.com/genbetadev')
|
||||||
(u'Vida Extra', u'http://feeds.weblogssl.com/vidaextra'),
|
,(u'Applesfera', u'http://feeds.weblogssl.com/applesfera')
|
||||||
(u'Naci\xf3n Red', u'http://feeds.weblogssl.com/nacionred'),
|
,(u'Vida Extra', u'http://feeds.weblogssl.com/vidaextra')
|
||||||
(u'Blog de Cine', u'http://feeds.weblogssl.com/blogdecine'),
|
,(u'Naci\xf3n Red', u'http://feeds.weblogssl.com/nacionred')
|
||||||
(u'Vaya tele', u'http://feeds.weblogssl.com/vayatele2'),
|
,(u'Blog de Cine', u'http://feeds.weblogssl.com/blogdecine')
|
||||||
(u'Hipers\xf3nica', u'http://feeds.weblogssl.com/hipersonica'),
|
,(u'Vaya tele', u'http://feeds.weblogssl.com/vayatele2')
|
||||||
(u'Diario del viajero', u'http://feeds.weblogssl.com/diariodelviajero'),
|
,(u'Hipers\xf3nica', u'http://feeds.weblogssl.com/hipersonica')
|
||||||
(u'Papel en blanco', u'http://feeds.weblogssl.com/papelenblanco'),
|
,(u'Diario del viajero', u'http://feeds.weblogssl.com/diariodelviajero')
|
||||||
(u'Pop rosa', u'http://feeds.weblogssl.com/poprosa'),
|
,(u'Papel en blanco', u'http://feeds.weblogssl.com/papelenblanco')
|
||||||
(u'Zona FandoM', u'http://feeds.weblogssl.com/zonafandom'),
|
,(u'Pop rosa', u'http://feeds.weblogssl.com/poprosa')
|
||||||
(u'Fandemia', u'http://feeds.weblogssl.com/fandemia'),
|
,(u'Zona FandoM', u'http://feeds.weblogssl.com/zonafandom')
|
||||||
(u'Noctamina', u'http://feeds.weblogssl.com/noctamina'),
|
,(u'Fandemia', u'http://feeds.weblogssl.com/fandemia')
|
||||||
(u'Tendencias', u'http://feeds.weblogssl.com/trendencias'),
|
,(u'Noctamina', u'http://feeds.weblogssl.com/noctamina')
|
||||||
(u'Beb\xe9s y m\xe1s', u'http://feeds.weblogssl.com/bebesymas'),
|
,(u'Tendencias', u'http://feeds.weblogssl.com/trendencias')
|
||||||
(u'Directo al paladar', u'http://feeds.weblogssl.com/directoalpaladar'),
|
,(u'Beb\xe9s y m\xe1s', u'http://feeds.weblogssl.com/bebesymas')
|
||||||
(u'Compradicci\xf3n', u'http://feeds.weblogssl.com/compradiccion'),
|
,(u'Directo al paladar', u'http://feeds.weblogssl.com/directoalpaladar')
|
||||||
(u'Decoesfera', u'http://feeds.weblogssl.com/decoesfera'),
|
,(u'Compradicci\xf3n', u'http://feeds.weblogssl.com/compradiccion')
|
||||||
(u'Embelezzia', u'http://feeds.weblogssl.com/embelezzia'),
|
,(u'Decoesfera', u'http://feeds.weblogssl.com/decoesfera')
|
||||||
(u'Vit\xf3nica', u'http://feeds.weblogssl.com/vitonica'),
|
,(u'Embelezzia', u'http://feeds.weblogssl.com/embelezzia')
|
||||||
(u'Ambiente G', u'http://feeds.weblogssl.com/ambienteg'),
|
,(u'Vit\xf3nica', u'http://feeds.weblogssl.com/vitonica')
|
||||||
(u'Arrebatadora', u'http://feeds.weblogssl.com/arrebatadora'),
|
,(u'Ambiente G', u'http://feeds.weblogssl.com/ambienteg')
|
||||||
(u'Mensencia', u'http://feeds.weblogssl.com/mensencia'),
|
,(u'Arrebatadora', u'http://feeds.weblogssl.com/arrebatadora')
|
||||||
(u'Peques y m\xe1s', u'http://feeds.weblogssl.com/pequesymas'),
|
,(u'Mensencia', u'http://feeds.weblogssl.com/mensencia')
|
||||||
(u'Motorpasi\xf3n', u'http://feeds.weblogssl.com/motorpasion'),
|
,(u'Peques y m\xe1s', u'http://feeds.weblogssl.com/pequesymas')
|
||||||
(u'Motorpasi\xf3n F1', u'http://feeds.weblogssl.com/motorpasionf1'),
|
,(u'Motorpasi\xf3n', u'http://feeds.weblogssl.com/motorpasion')
|
||||||
(u'Motorpasi\xf3n Moto', u'http://feeds.weblogssl.com/motorpasionmoto'),
|
,(u'Motorpasi\xf3n F1', u'http://feeds.weblogssl.com/motorpasionf1')
|
||||||
(u'Notas de futbol', u'http://feeds.weblogssl.com/notasdefutbol'),
|
,(u'Motorpasi\xf3n Moto', u'http://feeds.weblogssl.com/motorpasionmoto')
|
||||||
(u'Fuera de l\xedmites', u'http://feeds.weblogssl.com/fueradelimites'),
|
,(u'Motorpasi\xf3n Futuro', u'http://feeds.weblogssl.com/motorpasionfuturo')
|
||||||
(u'Salir a ganar', u'http://feeds.weblogssl.com/saliraganar'),
|
,(u'Notas de futbol', u'http://feeds.weblogssl.com/notasdefutbol')
|
||||||
(u'El blog salm\xf3n', u'http://feeds.weblogssl.com/elblogsalmon2'),
|
,(u'Fuera de l\xedmites', u'http://feeds.weblogssl.com/fueradelimites')
|
||||||
(u'Pymes y aut\xf3nomos', u'http://feeds.weblogssl.com/pymesyautonomos'),
|
,(u'Salir a ganar', u'http://feeds.weblogssl.com/saliraganar')
|
||||||
(u'Tecnolog\xeda Pyme', u'http://feeds.weblogssl.com/tecnologiapyme'),
|
,(u'El blog salm\xf3n', u'http://feeds.weblogssl.com/elblogsalmon2')
|
||||||
(u'Ahorro diario', u'http://feeds.weblogssl.com/ahorrodiario')
|
,(u'Pymes y aut\xf3nomos', u'http://feeds.weblogssl.com/pymesyautonomos')
|
||||||
|
,(u'Tecnolog\xeda Pyme', u'http://feeds.weblogssl.com/tecnologiapyme')
|
||||||
|
,(u'Ahorro diario', u'http://feeds.weblogssl.com/ahorrodiario')
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -102,3 +104,4 @@ class weblogssl(BasicNewsRecipe):
|
|||||||
video_yt['src'] = fuente3 + '/0.jpg'
|
video_yt['src'] = fuente3 + '/0.jpg'
|
||||||
|
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
@ -81,6 +81,11 @@ class WallStreetJournal(BasicNewsRecipe):
|
|||||||
feeds.append((title, articles))
|
feeds.append((title, articles))
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
|
def abs_wsj_url(self, href):
|
||||||
|
if not href.startswith('http'):
|
||||||
|
href = 'http://online.wsj.com' + href
|
||||||
|
return href
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.wsj_get_index()
|
soup = self.wsj_get_index()
|
||||||
|
|
||||||
@ -99,14 +104,14 @@ class WallStreetJournal(BasicNewsRecipe):
|
|||||||
pageone = a['href'].endswith('pageone')
|
pageone = a['href'].endswith('pageone')
|
||||||
if pageone:
|
if pageone:
|
||||||
title = 'Front Section'
|
title = 'Front Section'
|
||||||
url = 'http://online.wsj.com' + a['href']
|
url = self.abs_wsj_url(a['href'])
|
||||||
feeds = self.wsj_add_feed(feeds,title,url)
|
feeds = self.wsj_add_feed(feeds,title,url)
|
||||||
title = "What's News"
|
title = "What's News"
|
||||||
url = url.replace('pageone','whatsnews')
|
url = url.replace('pageone','whatsnews')
|
||||||
feeds = self.wsj_add_feed(feeds,title,url)
|
feeds = self.wsj_add_feed(feeds,title,url)
|
||||||
else:
|
else:
|
||||||
title = self.tag_to_string(a)
|
title = self.tag_to_string(a)
|
||||||
url = 'http://online.wsj.com' + a['href']
|
url = self.abs_wsj_url(a['href'])
|
||||||
feeds = self.wsj_add_feed(feeds,title,url)
|
feeds = self.wsj_add_feed(feeds,title,url)
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
@ -163,7 +168,7 @@ class WallStreetJournal(BasicNewsRecipe):
|
|||||||
title = self.tag_to_string(a).strip() + ' [%s]'%meta
|
title = self.tag_to_string(a).strip() + ' [%s]'%meta
|
||||||
else:
|
else:
|
||||||
title = self.tag_to_string(a).strip()
|
title = self.tag_to_string(a).strip()
|
||||||
url = 'http://online.wsj.com'+a['href']
|
url = self.abs_wsj_url(a['href'])
|
||||||
desc = ''
|
desc = ''
|
||||||
for p in container.findAll('p'):
|
for p in container.findAll('p'):
|
||||||
desc = self.tag_to_string(p)
|
desc = self.tag_to_string(p)
|
||||||
|
BIN
resources/images/highlight_only_off.png
Normal file
BIN
resources/images/highlight_only_off.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 810 B |
BIN
resources/images/highlight_only_on.png
Normal file
BIN
resources/images/highlight_only_on.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 396 B |
@ -2,7 +2,7 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
__appname__ = 'calibre'
|
__appname__ = 'calibre'
|
||||||
__version__ = '0.7.54'
|
__version__ = '0.7.55'
|
||||||
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
|
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
|
||||||
|
|
||||||
import re, importlib
|
import re, importlib
|
||||||
|
@ -625,8 +625,9 @@ if test_eight_code:
|
|||||||
from calibre.ebooks.metadata.sources.google import GoogleBooks
|
from calibre.ebooks.metadata.sources.google import GoogleBooks
|
||||||
from calibre.ebooks.metadata.sources.amazon import Amazon
|
from calibre.ebooks.metadata.sources.amazon import Amazon
|
||||||
from calibre.ebooks.metadata.sources.openlibrary import OpenLibrary
|
from calibre.ebooks.metadata.sources.openlibrary import OpenLibrary
|
||||||
|
from calibre.ebooks.metadata.sources.isbndb import ISBNDB
|
||||||
|
|
||||||
plugins += [GoogleBooks, Amazon, OpenLibrary]
|
plugins += [GoogleBooks, Amazon, OpenLibrary, ISBNDB]
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
else:
|
else:
|
||||||
|
@ -344,6 +344,7 @@ class iPadOutput(OutputProfile):
|
|||||||
border-spacing:1px;
|
border-spacing:1px;
|
||||||
margin-left: 5%;
|
margin-left: 5%;
|
||||||
margin-right: 5%;
|
margin-right: 5%;
|
||||||
|
page-break-inside:avoid;
|
||||||
width: 90%;
|
width: 90%;
|
||||||
-webkit-border-radius:4px;
|
-webkit-border-radius:4px;
|
||||||
}
|
}
|
||||||
|
@ -51,6 +51,8 @@ Run an embedded python interpreter.
|
|||||||
'with sqlite3 works.')
|
'with sqlite3 works.')
|
||||||
parser.add_option('-p', '--py-console', help='Run python console',
|
parser.add_option('-p', '--py-console', help='Run python console',
|
||||||
default=False, action='store_true')
|
default=False, action='store_true')
|
||||||
|
parser.add_option('-m', '--inspect-mobi',
|
||||||
|
help='Inspect the MOBI file at the specified path', default=None)
|
||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
@ -227,6 +229,9 @@ def main(args=sys.argv):
|
|||||||
if len(args) > 1 and os.access(args[-1], os.R_OK):
|
if len(args) > 1 and os.access(args[-1], os.R_OK):
|
||||||
sql_dump = args[-1]
|
sql_dump = args[-1]
|
||||||
reinit_db(opts.reinitialize_db, sql_dump=sql_dump)
|
reinit_db(opts.reinitialize_db, sql_dump=sql_dump)
|
||||||
|
elif opts.inspect_mobi is not None:
|
||||||
|
from calibre.ebooks.mobi.debug import inspect_mobi
|
||||||
|
inspect_mobi(opts.inspect_mobi)
|
||||||
else:
|
else:
|
||||||
from calibre import ipython
|
from calibre import ipython
|
||||||
ipython()
|
ipython()
|
||||||
|
@ -54,6 +54,9 @@ class ANDROID(USBMS):
|
|||||||
0x6877 : [0x0400],
|
0x6877 : [0x0400],
|
||||||
},
|
},
|
||||||
|
|
||||||
|
# Viewsonic
|
||||||
|
0x0489 : { 0xc001 : [0x0226] },
|
||||||
|
|
||||||
# Acer
|
# Acer
|
||||||
0x502 : { 0x3203 : [0x0100]},
|
0x502 : { 0x3203 : [0x0100]},
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ class ITUNES(DriverBase):
|
|||||||
break
|
break
|
||||||
break
|
break
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(j+1/task_count, _('Updating device metadata listing...'))
|
self.report_progress((j+1)/task_count, _('Updating device metadata listing...'))
|
||||||
|
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(1.0, _('Updating device metadata listing...'))
|
self.report_progress(1.0, _('Updating device metadata listing...'))
|
||||||
@ -428,7 +428,7 @@ class ITUNES(DriverBase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(i+1/book_count, _('%d of %d') % (i+1, book_count))
|
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||||
self._purge_orphans(library_books, cached_books)
|
self._purge_orphans(library_books, cached_books)
|
||||||
|
|
||||||
elif iswindows:
|
elif iswindows:
|
||||||
@ -466,7 +466,7 @@ class ITUNES(DriverBase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(i+1/book_count,
|
self.report_progress((i+1)/book_count,
|
||||||
_('%d of %d') % (i+1, book_count))
|
_('%d of %d') % (i+1, book_count))
|
||||||
self._purge_orphans(library_books, cached_books)
|
self._purge_orphans(library_books, cached_books)
|
||||||
|
|
||||||
@ -916,6 +916,8 @@ class ITUNES(DriverBase):
|
|||||||
"""
|
"""
|
||||||
if DEBUG:
|
if DEBUG:
|
||||||
self.log.info("ITUNES.reset()")
|
self.log.info("ITUNES.reset()")
|
||||||
|
if report_progress:
|
||||||
|
self.set_progress_reporter(report_progress)
|
||||||
|
|
||||||
def set_progress_reporter(self, report_progress):
|
def set_progress_reporter(self, report_progress):
|
||||||
'''
|
'''
|
||||||
@ -924,6 +926,9 @@ class ITUNES(DriverBase):
|
|||||||
If it is called with -1 that means that the
|
If it is called with -1 that means that the
|
||||||
task does not have any progress information
|
task does not have any progress information
|
||||||
'''
|
'''
|
||||||
|
if DEBUG:
|
||||||
|
self.log.info("ITUNES.set_progress_reporter()")
|
||||||
|
|
||||||
self.report_progress = report_progress
|
self.report_progress = report_progress
|
||||||
|
|
||||||
def set_plugboards(self, plugboards, pb_func):
|
def set_plugboards(self, plugboards, pb_func):
|
||||||
@ -1041,7 +1046,7 @@ class ITUNES(DriverBase):
|
|||||||
|
|
||||||
# Report progress
|
# Report progress
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(i+1/file_count, _('%d of %d') % (i+1, file_count))
|
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||||
|
|
||||||
elif iswindows:
|
elif iswindows:
|
||||||
try:
|
try:
|
||||||
@ -1081,7 +1086,7 @@ class ITUNES(DriverBase):
|
|||||||
|
|
||||||
# Report progress
|
# Report progress
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(i+1/file_count, _('%d of %d') % (i+1, file_count))
|
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||||
finally:
|
finally:
|
||||||
pythoncom.CoUninitialize()
|
pythoncom.CoUninitialize()
|
||||||
|
|
||||||
@ -3065,7 +3070,7 @@ class ITUNES_ASYNC(ITUNES):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(i+1/book_count, _('%d of %d') % (i+1, book_count))
|
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||||
|
|
||||||
elif iswindows:
|
elif iswindows:
|
||||||
try:
|
try:
|
||||||
@ -3104,7 +3109,7 @@ class ITUNES_ASYNC(ITUNES):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.report_progress is not None:
|
if self.report_progress is not None:
|
||||||
self.report_progress(i+1/book_count,
|
self.report_progress((i+1)/book_count,
|
||||||
_('%d of %d') % (i+1, book_count))
|
_('%d of %d') % (i+1, book_count))
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
|
@ -203,6 +203,8 @@ class CollectionsBookList(BookList):
|
|||||||
val = [orig_val]
|
val = [orig_val]
|
||||||
elif fm['datatype'] == 'text' and fm['is_multiple']:
|
elif fm['datatype'] == 'text' and fm['is_multiple']:
|
||||||
val = orig_val
|
val = orig_val
|
||||||
|
elif fm['datatype'] == 'composite' and fm['is_multiple']:
|
||||||
|
val = [v.strip() for v in val.split(fm['is_multiple'])]
|
||||||
else:
|
else:
|
||||||
val = [val]
|
val = [val]
|
||||||
|
|
||||||
|
@ -51,6 +51,7 @@ class CHMInput(InputFormatPlugin):
|
|||||||
mainpath = os.path.join(tdir, mainname)
|
mainpath = os.path.join(tdir, mainname)
|
||||||
|
|
||||||
metadata = get_metadata_from_reader(self._chm_reader)
|
metadata = get_metadata_from_reader(self._chm_reader)
|
||||||
|
self._chm_reader.CloseCHM()
|
||||||
|
|
||||||
odi = options.debug_pipeline
|
odi = options.debug_pipeline
|
||||||
options.debug_pipeline = None
|
options.debug_pipeline = None
|
||||||
|
@ -175,18 +175,18 @@ class EPUBInput(InputFormatPlugin):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
'EPUB files with DTBook markup are not supported')
|
'EPUB files with DTBook markup are not supported')
|
||||||
|
|
||||||
|
not_for_spine = set()
|
||||||
|
for y in opf.itermanifest():
|
||||||
|
id_ = y.get('id', None)
|
||||||
|
if id_ and y.get('media-type', None) in \
|
||||||
|
('application/vnd.adobe-page-template+xml',):
|
||||||
|
not_for_spine.add(id_)
|
||||||
|
|
||||||
for x in list(opf.iterspine()):
|
for x in list(opf.iterspine()):
|
||||||
ref = x.get('idref', None)
|
ref = x.get('idref', None)
|
||||||
if ref is None:
|
if ref is None or ref in not_for_spine:
|
||||||
x.getparent().remove(x)
|
x.getparent().remove(x)
|
||||||
continue
|
continue
|
||||||
for y in opf.itermanifest():
|
|
||||||
if y.get('id', None) == ref and y.get('media-type', None) in \
|
|
||||||
('application/vnd.adobe-page-template+xml',):
|
|
||||||
p = x.getparent()
|
|
||||||
if p is not None:
|
|
||||||
p.remove(x)
|
|
||||||
break
|
|
||||||
|
|
||||||
with open('content.opf', 'wb') as nopf:
|
with open('content.opf', 'wb') as nopf:
|
||||||
nopf.write(opf.render())
|
nopf.write(opf.render())
|
||||||
|
@ -6,8 +6,8 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import os, textwrap, sys
|
import os, textwrap, sys, operator
|
||||||
from copy import deepcopy
|
from copy import deepcopy, copy
|
||||||
|
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
|
|
||||||
@ -149,9 +149,65 @@ class TextBlock(etree.XSLTExtension):
|
|||||||
self.root = root
|
self.root = root
|
||||||
self.parent = root
|
self.parent = root
|
||||||
self.add_text_to = (self.parent, 'text')
|
self.add_text_to = (self.parent, 'text')
|
||||||
|
self.fix_deep_nesting(node)
|
||||||
for child in node:
|
for child in node:
|
||||||
self.process_child(child)
|
self.process_child(child)
|
||||||
|
|
||||||
|
def fix_deep_nesting(self, node):
|
||||||
|
deepest = 1
|
||||||
|
|
||||||
|
def depth(node):
|
||||||
|
parent = node.getparent()
|
||||||
|
ans = 1
|
||||||
|
while parent is not None:
|
||||||
|
ans += 1
|
||||||
|
parent = parent.getparent()
|
||||||
|
return ans
|
||||||
|
|
||||||
|
for span in node.xpath('descendant::Span'):
|
||||||
|
d = depth(span)
|
||||||
|
if d > deepest:
|
||||||
|
deepest = d
|
||||||
|
if d > 500:
|
||||||
|
break
|
||||||
|
|
||||||
|
if deepest < 500:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.log.warn('Found deeply nested spans. Flattening.')
|
||||||
|
#with open('/t/before.xml', 'wb') as f:
|
||||||
|
# f.write(etree.tostring(node, method='xml'))
|
||||||
|
|
||||||
|
spans = [(depth(span), span) for span in node.xpath('descendant::Span')]
|
||||||
|
spans.sort(key=operator.itemgetter(0), reverse=True)
|
||||||
|
|
||||||
|
for depth, span in spans:
|
||||||
|
if depth < 3:
|
||||||
|
continue
|
||||||
|
p = span.getparent()
|
||||||
|
gp = p.getparent()
|
||||||
|
idx = p.index(span)
|
||||||
|
pidx = gp.index(p)
|
||||||
|
children = list(p)[idx:]
|
||||||
|
t = children[-1].tail
|
||||||
|
t = t if t else ''
|
||||||
|
children[-1].tail = t + (p.tail if p.tail else '')
|
||||||
|
p.tail = ''
|
||||||
|
pattrib = dict(**p.attrib) if p.tag == 'Span' else {}
|
||||||
|
for child in children:
|
||||||
|
p.remove(child)
|
||||||
|
if pattrib and child.tag == "Span":
|
||||||
|
attrib = copy(pattrib)
|
||||||
|
attrib.update(child.attrib)
|
||||||
|
child.attrib.update(attrib)
|
||||||
|
|
||||||
|
|
||||||
|
for child in reversed(children):
|
||||||
|
gp.insert(pidx+1, child)
|
||||||
|
|
||||||
|
#with open('/t/after.xml', 'wb') as f:
|
||||||
|
# f.write(etree.tostring(node, method='xml'))
|
||||||
|
|
||||||
def add_text(self, text):
|
def add_text(self, text):
|
||||||
if text:
|
if text:
|
||||||
if getattr(self.add_text_to[0], self.add_text_to[1]) is None:
|
if getattr(self.add_text_to[0], self.add_text_to[1]) is None:
|
||||||
|
@ -483,7 +483,7 @@ class Metadata(object):
|
|||||||
self_tags = self.get(x, [])
|
self_tags = self.get(x, [])
|
||||||
self.set_user_metadata(x, meta) # get... did the deepcopy
|
self.set_user_metadata(x, meta) # get... did the deepcopy
|
||||||
other_tags = other.get(x, [])
|
other_tags = other.get(x, [])
|
||||||
if meta['is_multiple']:
|
if meta['datatype'] == 'text' and meta['is_multiple']:
|
||||||
# Case-insensitive but case preserving merging
|
# Case-insensitive but case preserving merging
|
||||||
lotags = [t.lower() for t in other_tags]
|
lotags = [t.lower() for t in other_tags]
|
||||||
lstags = [t.lower() for t in self_tags]
|
lstags = [t.lower() for t in self_tags]
|
||||||
|
@ -26,7 +26,7 @@ def get_metadata(stream, extract_cover=True):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
with ZipFile(stream) as zf:
|
with ZipFile(stream) as zf:
|
||||||
opf_name = get_first_opf_name(stream)
|
opf_name = get_first_opf_name(zf)
|
||||||
opf_stream = StringIO(zf.read(opf_name))
|
opf_stream = StringIO(zf.read(opf_name))
|
||||||
opf = OPF(opf_stream)
|
opf = OPF(opf_stream)
|
||||||
mi = opf.to_book_metadata()
|
mi = opf.to_book_metadata()
|
||||||
@ -42,14 +42,10 @@ def set_metadata(stream, mi):
|
|||||||
replacements = {}
|
replacements = {}
|
||||||
|
|
||||||
# Get the OPF in the archive.
|
# Get the OPF in the archive.
|
||||||
try:
|
|
||||||
opf_path = get_first_opf_name(stream)
|
|
||||||
with ZipFile(stream) as zf:
|
with ZipFile(stream) as zf:
|
||||||
|
opf_path = get_first_opf_name(zf)
|
||||||
opf_stream = StringIO(zf.read(opf_path))
|
opf_stream = StringIO(zf.read(opf_path))
|
||||||
opf = OPF(opf_stream)
|
opf = OPF(opf_stream)
|
||||||
except:
|
|
||||||
opf_path = 'metadata.opf'
|
|
||||||
opf = OPF(StringIO())
|
|
||||||
|
|
||||||
# Cover.
|
# Cover.
|
||||||
new_cdata = None
|
new_cdata = None
|
||||||
@ -83,8 +79,7 @@ def set_metadata(stream, mi):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_first_opf_name(stream):
|
def get_first_opf_name(zf):
|
||||||
with ZipFile(stream) as zf:
|
|
||||||
names = zf.namelist()
|
names = zf.namelist()
|
||||||
opfs = []
|
opfs = []
|
||||||
for n in names:
|
for n in names:
|
||||||
|
@ -259,6 +259,7 @@ class MetadataUpdater(object):
|
|||||||
trail = len(new_record0.getvalue()) % 4
|
trail = len(new_record0.getvalue()) % 4
|
||||||
pad = '\0' * (4 - trail) # Always pad w/ at least 1 byte
|
pad = '\0' * (4 - trail) # Always pad w/ at least 1 byte
|
||||||
new_record0.write(pad)
|
new_record0.write(pad)
|
||||||
|
new_record0.write('\0'*(1024*8))
|
||||||
|
|
||||||
# Rebuild the stream, update the pdbrecords pointers
|
# Rebuild the stream, update the pdbrecords pointers
|
||||||
self.patchSection(0,new_record0.getvalue())
|
self.patchSection(0,new_record0.getvalue())
|
||||||
|
@ -24,6 +24,7 @@ msprefs.defaults['ignore_fields'] = []
|
|||||||
msprefs.defaults['max_tags'] = 20
|
msprefs.defaults['max_tags'] = 20
|
||||||
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
|
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
|
||||||
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
|
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
|
||||||
|
msprefs.defaults['swap_author_names'] = False
|
||||||
|
|
||||||
# Google covers are often poor quality (scans/errors) but they have high
|
# Google covers are often poor quality (scans/errors) but they have high
|
||||||
# resolution, so they trump covers from better sources. So make sure they
|
# resolution, so they trump covers from better sources. So make sure they
|
||||||
@ -181,6 +182,10 @@ class Source(Plugin):
|
|||||||
#: construct the configuration widget for this plugin
|
#: construct the configuration widget for this plugin
|
||||||
options = ()
|
options = ()
|
||||||
|
|
||||||
|
#: A string that is displayed at the top of the config widget for this
|
||||||
|
#: plugin
|
||||||
|
config_help_message = None
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
Plugin.__init__(self, *args, **kwargs)
|
Plugin.__init__(self, *args, **kwargs)
|
||||||
|
@ -76,6 +76,11 @@ def run_download(log, results, abort,
|
|||||||
(plugin, width, height, fmt, bytes)
|
(plugin, width, height, fmt, bytes)
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
if title == _('Unknown'):
|
||||||
|
title = None
|
||||||
|
if authors == [_('Unknown')]:
|
||||||
|
authors = None
|
||||||
|
|
||||||
plugins = [p for p in metadata_plugins(['cover']) if p.is_configured()]
|
plugins = [p for p in metadata_plugins(['cover']) if p.is_configured()]
|
||||||
|
|
||||||
rq = Queue()
|
rq = Queue()
|
||||||
@ -145,7 +150,7 @@ def download_cover(log,
|
|||||||
Synchronous cover download. Returns the "best" cover as per user
|
Synchronous cover download. Returns the "best" cover as per user
|
||||||
prefs/cover resolution.
|
prefs/cover resolution.
|
||||||
|
|
||||||
Return cover is a tuple: (plugin, width, height, fmt, data)
|
Returned cover is a tuple: (plugin, width, height, fmt, data)
|
||||||
|
|
||||||
Returns None if no cover is found.
|
Returns None if no cover is found.
|
||||||
'''
|
'''
|
||||||
|
@ -253,6 +253,10 @@ def merge_identify_results(result_map, log):
|
|||||||
|
|
||||||
def identify(log, abort, # {{{
|
def identify(log, abort, # {{{
|
||||||
title=None, authors=None, identifiers={}, timeout=30):
|
title=None, authors=None, identifiers={}, timeout=30):
|
||||||
|
if title == _('Unknown'):
|
||||||
|
title = None
|
||||||
|
if authors == [_('Unknown')]:
|
||||||
|
authors = None
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
plugins = [p for p in metadata_plugins(['identify']) if p.is_configured()]
|
plugins = [p for p in metadata_plugins(['identify']) if p.is_configured()]
|
||||||
|
|
||||||
@ -361,6 +365,18 @@ def identify(log, abort, # {{{
|
|||||||
for r in results:
|
for r in results:
|
||||||
r.tags = r.tags[:max_tags]
|
r.tags = r.tags[:max_tags]
|
||||||
|
|
||||||
|
if msprefs['swap_author_names']:
|
||||||
|
for r in results:
|
||||||
|
def swap_to_ln_fn(a):
|
||||||
|
if ',' in a:
|
||||||
|
return a
|
||||||
|
parts = a.split(None)
|
||||||
|
if len(parts) <= 1:
|
||||||
|
return a
|
||||||
|
surname = parts[-1]
|
||||||
|
return '%s, %s' % (surname, ' '.join(parts[:-1]))
|
||||||
|
r.authors = [swap_to_ln_fn(a) for a in r.authors]
|
||||||
|
|
||||||
return results
|
return results
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
@ -391,8 +407,8 @@ if __name__ == '__main__': # tests {{{
|
|||||||
# unknown to Amazon
|
# unknown to Amazon
|
||||||
{'identifiers':{'isbn': '9780307459671'},
|
{'identifiers':{'isbn': '9780307459671'},
|
||||||
'title':'Invisible Gorilla', 'authors':['Christopher Chabris']},
|
'title':'Invisible Gorilla', 'authors':['Christopher Chabris']},
|
||||||
[title_test('The Invisible Gorilla: And Other Ways Our Intuitions Deceive Us',
|
[title_test('The Invisible Gorilla',
|
||||||
exact=True), authors_test(['Christopher Chabris', 'Daniel Simons'])]
|
exact=True), authors_test(['Christopher F. Chabris', 'Daniel Simons'])]
|
||||||
|
|
||||||
),
|
),
|
||||||
|
|
||||||
@ -400,7 +416,7 @@ if __name__ == '__main__': # tests {{{
|
|||||||
{'title':'Learning Python',
|
{'title':'Learning Python',
|
||||||
'authors':['Lutz']},
|
'authors':['Lutz']},
|
||||||
[title_test('Learning Python',
|
[title_test('Learning Python',
|
||||||
exact=True), authors_test(['Mark Lutz'])
|
exact=True), authors_test(['Mark J. Lutz', 'David Ascher'])
|
||||||
]
|
]
|
||||||
|
|
||||||
),
|
),
|
||||||
|
@ -7,7 +7,19 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
from calibre.ebooks.metadata.sources.base import Source
|
from urllib import quote
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
|
||||||
|
from calibre.ebooks.metadata import check_isbn
|
||||||
|
from calibre.ebooks.metadata.sources.base import Source, Option
|
||||||
|
from calibre.ebooks.chardet import xml_to_unicode
|
||||||
|
from calibre.utils.cleantext import clean_ascii_chars
|
||||||
|
from calibre.utils.icu import lower
|
||||||
|
from calibre.ebooks.metadata.book.base import Metadata
|
||||||
|
|
||||||
|
BASE_URL = 'http://isbndb.com/api/books.xml?access_key=%s&page_number=1&results=subjects,authors,texts&'
|
||||||
|
|
||||||
|
|
||||||
class ISBNDB(Source):
|
class ISBNDB(Source):
|
||||||
|
|
||||||
@ -18,6 +30,20 @@ class ISBNDB(Source):
|
|||||||
touched_fields = frozenset(['title', 'authors',
|
touched_fields = frozenset(['title', 'authors',
|
||||||
'identifier:isbn', 'comments', 'publisher'])
|
'identifier:isbn', 'comments', 'publisher'])
|
||||||
supports_gzip_transfer_encoding = True
|
supports_gzip_transfer_encoding = True
|
||||||
|
# Shortcut, since we have no cached cover URLS
|
||||||
|
cached_cover_url_is_reliable = False
|
||||||
|
|
||||||
|
options = (
|
||||||
|
Option('isbndb_key', 'string', None, _('IsbnDB key:'),
|
||||||
|
_('To use isbndb.com you have to sign up for a free account'
|
||||||
|
'at isbndb.com and get an access key.')),
|
||||||
|
)
|
||||||
|
|
||||||
|
config_help_message = '<p>'+_('To use metadata from isbndb.com you must sign'
|
||||||
|
' up for a free account and get an isbndb key and enter it below.'
|
||||||
|
' Instructions to get the key are '
|
||||||
|
'<a href="http://isbndb.com/docs/api/30-keys.html">here</a>.')
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
Source.__init__(self, *args, **kwargs)
|
Source.__init__(self, *args, **kwargs)
|
||||||
@ -35,9 +61,186 @@ class ISBNDB(Source):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.isbndb_key = prefs['isbndb_key']
|
@property
|
||||||
|
def isbndb_key(self):
|
||||||
|
return self.prefs['isbndb_key']
|
||||||
|
|
||||||
def is_configured(self):
|
def is_configured(self):
|
||||||
return self.isbndb_key is not None
|
return self.isbndb_key is not None
|
||||||
|
|
||||||
|
def create_query(self, title=None, authors=None, identifiers={}): # {{{
|
||||||
|
base_url = BASE_URL%self.isbndb_key
|
||||||
|
isbn = check_isbn(identifiers.get('isbn', None))
|
||||||
|
q = ''
|
||||||
|
if isbn is not None:
|
||||||
|
q = 'index1=isbn&value1='+isbn
|
||||||
|
elif title or authors:
|
||||||
|
tokens = []
|
||||||
|
title_tokens = list(self.get_title_tokens(title))
|
||||||
|
tokens += title_tokens
|
||||||
|
author_tokens = self.get_author_tokens(authors,
|
||||||
|
only_first_author=True)
|
||||||
|
tokens += author_tokens
|
||||||
|
tokens = [quote(t) for t in tokens]
|
||||||
|
q = '+'.join(tokens)
|
||||||
|
q = 'index1=combined&value1='+q
|
||||||
|
|
||||||
|
if not q:
|
||||||
|
return None
|
||||||
|
if isinstance(q, unicode):
|
||||||
|
q = q.encode('utf-8')
|
||||||
|
return base_url + q
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def identify(self, log, result_queue, abort, title=None, authors=None, # {{{
|
||||||
|
identifiers={}, timeout=30):
|
||||||
|
if not self.is_configured():
|
||||||
|
return
|
||||||
|
query = self.create_query(title=title, authors=authors,
|
||||||
|
identifiers=identifiers)
|
||||||
|
if not query:
|
||||||
|
err = 'Insufficient metadata to construct query'
|
||||||
|
log.error(err)
|
||||||
|
return err
|
||||||
|
|
||||||
|
results = []
|
||||||
|
try:
|
||||||
|
results = self.make_query(query, abort, title=title, authors=authors,
|
||||||
|
identifiers=identifiers, timeout=timeout)
|
||||||
|
except:
|
||||||
|
err = 'Failed to make query to ISBNDb, aborting.'
|
||||||
|
log.exception(err)
|
||||||
|
return err
|
||||||
|
|
||||||
|
if not results and identifiers.get('isbn', False) and title and authors and \
|
||||||
|
not abort.is_set():
|
||||||
|
return self.identify(log, result_queue, abort, title=title,
|
||||||
|
authors=authors, timeout=timeout)
|
||||||
|
|
||||||
|
for result in results:
|
||||||
|
self.clean_downloaded_metadata(result)
|
||||||
|
result_queue.put(result)
|
||||||
|
|
||||||
|
def parse_feed(self, feed, seen, orig_title, orig_authors, identifiers):
|
||||||
|
|
||||||
|
def tostring(x):
|
||||||
|
if x is None:
|
||||||
|
return ''
|
||||||
|
return etree.tostring(x, method='text', encoding=unicode).strip()
|
||||||
|
|
||||||
|
orig_isbn = identifiers.get('isbn', None)
|
||||||
|
title_tokens = list(self.get_title_tokens(orig_title))
|
||||||
|
author_tokens = list(self.get_author_tokens(orig_authors))
|
||||||
|
results = []
|
||||||
|
|
||||||
|
def ismatch(title, authors):
|
||||||
|
authors = lower(' '.join(authors))
|
||||||
|
title = lower(title)
|
||||||
|
match = not title_tokens
|
||||||
|
for t in title_tokens:
|
||||||
|
if lower(t) in title:
|
||||||
|
match = True
|
||||||
|
break
|
||||||
|
amatch = not author_tokens
|
||||||
|
for a in author_tokens:
|
||||||
|
if lower(a) in authors:
|
||||||
|
amatch = True
|
||||||
|
break
|
||||||
|
if not author_tokens: amatch = True
|
||||||
|
return match and amatch
|
||||||
|
|
||||||
|
bl = feed.find('BookList')
|
||||||
|
if bl is None:
|
||||||
|
err = tostring(etree.find('errormessage'))
|
||||||
|
raise ValueError('ISBNDb query failed:' + err)
|
||||||
|
total_results = int(bl.get('total_results'))
|
||||||
|
shown_results = int(bl.get('shown_results'))
|
||||||
|
for bd in bl.xpath('.//BookData'):
|
||||||
|
isbn = check_isbn(bd.get('isbn13', bd.get('isbn', None)))
|
||||||
|
if not isbn:
|
||||||
|
continue
|
||||||
|
if orig_isbn and isbn != orig_isbn:
|
||||||
|
continue
|
||||||
|
title = tostring(bd.find('Title'))
|
||||||
|
if not title:
|
||||||
|
continue
|
||||||
|
authors = []
|
||||||
|
for au in bd.xpath('.//Authors/Person'):
|
||||||
|
au = tostring(au)
|
||||||
|
if au:
|
||||||
|
if ',' in au:
|
||||||
|
ln, _, fn = au.partition(',')
|
||||||
|
au = fn.strip() + ' ' + ln.strip()
|
||||||
|
authors.append(au)
|
||||||
|
if not authors:
|
||||||
|
continue
|
||||||
|
comments = tostring(bd.find('Summary'))
|
||||||
|
if not comments:
|
||||||
|
# Require comments, since without them the result is useless
|
||||||
|
# anyway
|
||||||
|
continue
|
||||||
|
id_ = (title, tuple(authors))
|
||||||
|
if id_ in seen:
|
||||||
|
continue
|
||||||
|
seen.add(id_)
|
||||||
|
if not ismatch(title, authors):
|
||||||
|
continue
|
||||||
|
publisher = tostring(bd.find('PublisherText'))
|
||||||
|
if not publisher: publisher = None
|
||||||
|
if publisher and 'audio' in publisher.lower():
|
||||||
|
continue
|
||||||
|
mi = Metadata(title, authors)
|
||||||
|
mi.isbn = isbn
|
||||||
|
mi.publisher = publisher
|
||||||
|
mi.comments = comments
|
||||||
|
results.append(mi)
|
||||||
|
return total_results, shown_results, results
|
||||||
|
|
||||||
|
def make_query(self, q, abort, title=None, authors=None, identifiers={},
|
||||||
|
max_pages=10, timeout=30):
|
||||||
|
page_num = 1
|
||||||
|
parser = etree.XMLParser(recover=True, no_network=True)
|
||||||
|
br = self.browser
|
||||||
|
|
||||||
|
seen = set()
|
||||||
|
|
||||||
|
candidates = []
|
||||||
|
total_found = 0
|
||||||
|
while page_num <= max_pages and not abort.is_set():
|
||||||
|
url = q.replace('&page_number=1&', '&page_number=%d&'%page_num)
|
||||||
|
page_num += 1
|
||||||
|
raw = br.open_novisit(url, timeout=timeout).read()
|
||||||
|
feed = etree.fromstring(xml_to_unicode(clean_ascii_chars(raw),
|
||||||
|
strip_encoding_pats=True)[0], parser=parser)
|
||||||
|
total, found, results = self.parse_feed(
|
||||||
|
feed, seen, title, authors, identifiers)
|
||||||
|
total_found += found
|
||||||
|
candidates += results
|
||||||
|
if total_found >= total or len(candidates) > 9:
|
||||||
|
break
|
||||||
|
|
||||||
|
return candidates
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# To run these test use:
|
||||||
|
# calibre-debug -e src/calibre/ebooks/metadata/sources/isbndb.py
|
||||||
|
from calibre.ebooks.metadata.sources.test import (test_identify_plugin,
|
||||||
|
title_test, authors_test)
|
||||||
|
test_identify_plugin(ISBNDB.name,
|
||||||
|
[
|
||||||
|
|
||||||
|
|
||||||
|
(
|
||||||
|
{'title':'Great Gatsby',
|
||||||
|
'authors':['Fitzgerald']},
|
||||||
|
[title_test('The great gatsby', exact=True),
|
||||||
|
authors_test(['F. Scott Fitzgerald'])]
|
||||||
|
),
|
||||||
|
|
||||||
|
(
|
||||||
|
{'title': 'Flatland', 'authors':['Abbott']},
|
||||||
|
[title_test('Flatland', exact=False)]
|
||||||
|
),
|
||||||
|
])
|
||||||
|
|
||||||
|
@ -218,11 +218,11 @@ def test_identify_plugin(name, tests): # {{{
|
|||||||
'')+'-%s-cover.jpg'%sanitize_file_name2(mi.title.replace(' ',
|
'')+'-%s-cover.jpg'%sanitize_file_name2(mi.title.replace(' ',
|
||||||
'_')))
|
'_')))
|
||||||
with open(cover, 'wb') as f:
|
with open(cover, 'wb') as f:
|
||||||
f.write(cdata)
|
f.write(cdata[-1])
|
||||||
|
|
||||||
prints('Cover downloaded to:', cover)
|
prints('Cover downloaded to:', cover)
|
||||||
|
|
||||||
if len(cdata) < 10240:
|
if len(cdata[-1]) < 10240:
|
||||||
prints('Downloaded cover too small')
|
prints('Downloaded cover too small')
|
||||||
raise SystemExit(1)
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
379
src/calibre/ebooks/mobi/debug.py
Normal file
379
src/calibre/ebooks/mobi/debug.py
Normal file
@ -0,0 +1,379 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import struct, datetime
|
||||||
|
from calibre.utils.date import utc_tz
|
||||||
|
from calibre.ebooks.mobi.langcodes import main_language, sub_language
|
||||||
|
|
||||||
|
class PalmDOCAttributes(object):
|
||||||
|
|
||||||
|
class Attr(object):
|
||||||
|
|
||||||
|
def __init__(self, name, field, val):
|
||||||
|
self.name = name
|
||||||
|
self.val = val & field
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '%s: %s'%(self.name, bool(self.val))
|
||||||
|
|
||||||
|
def __init__(self, raw):
|
||||||
|
self.val = struct.unpack(b'<H', raw)[0]
|
||||||
|
self.attributes = []
|
||||||
|
for name, field in [('Read Only', 0x02), ('Dirty AppInfoArea', 0x04),
|
||||||
|
('Backup this database', 0x08),
|
||||||
|
('Okay to install newer over existing copy, if present on PalmPilot', 0x10),
|
||||||
|
('Force the PalmPilot to reset after this database is installed', 0x12),
|
||||||
|
('Don\'t allow copy of file to be beamed to other Pilot',
|
||||||
|
0x14)]:
|
||||||
|
self.attributes.append(PalmDOCAttributes.Attr(name, field,
|
||||||
|
self.val))
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
attrs = '\n\t'.join([str(x) for x in self.attributes])
|
||||||
|
return 'PalmDOC Attributes: %s\n\t%s'%(bin(self.val), attrs)
|
||||||
|
|
||||||
|
class PalmDB(object):
|
||||||
|
|
||||||
|
def __init__(self, raw):
|
||||||
|
self.raw = raw
|
||||||
|
|
||||||
|
if self.raw.startswith(b'TPZ'):
|
||||||
|
raise ValueError('This is a Topaz file')
|
||||||
|
|
||||||
|
self.name = self.raw[:32].replace(b'\x00', b'')
|
||||||
|
self.attributes = PalmDOCAttributes(self.raw[32:34])
|
||||||
|
self.version = struct.unpack(b'>H', self.raw[34:36])[0]
|
||||||
|
|
||||||
|
palm_epoch = datetime.datetime(1904, 1, 1, tzinfo=utc_tz)
|
||||||
|
self.creation_date_raw = struct.unpack(b'>I', self.raw[36:40])[0]
|
||||||
|
self.creation_date = (palm_epoch +
|
||||||
|
datetime.timedelta(seconds=self.creation_date_raw))
|
||||||
|
self.modification_date_raw = struct.unpack(b'>I', self.raw[40:44])[0]
|
||||||
|
self.modification_date = (palm_epoch +
|
||||||
|
datetime.timedelta(seconds=self.modification_date_raw))
|
||||||
|
self.last_backup_date_raw = struct.unpack(b'>I', self.raw[44:48])[0]
|
||||||
|
self.last_backup_date = (palm_epoch +
|
||||||
|
datetime.timedelta(seconds=self.last_backup_date_raw))
|
||||||
|
self.modification_number = struct.unpack(b'>I', self.raw[48:52])[0]
|
||||||
|
self.app_info_id = self.raw[52:56]
|
||||||
|
self.sort_info_id = self.raw[56:60]
|
||||||
|
self.type = self.raw[60:64]
|
||||||
|
self.creator = self.raw[64:68]
|
||||||
|
self.ident = self.type + self.creator
|
||||||
|
if self.ident not in (b'BOOKMOBI', b'TEXTREAD'):
|
||||||
|
raise ValueError('Unknown book ident: %r'%self.ident)
|
||||||
|
self.uid_seed = self.raw[68:72]
|
||||||
|
self.next_rec_list_id = self.raw[72:76]
|
||||||
|
|
||||||
|
self.number_of_records, = struct.unpack(b'>H', self.raw[76:78])
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
ans = ['*'*20 + ' PalmDB Header '+ '*'*20]
|
||||||
|
ans.append('Name: %r'%self.name)
|
||||||
|
ans.append(str(self.attributes))
|
||||||
|
ans.append('Version: %s'%self.version)
|
||||||
|
ans.append('Creation date: %s (%s)'%(self.creation_date.isoformat(),
|
||||||
|
self.creation_date_raw))
|
||||||
|
ans.append('Modification date: %s (%s)'%(self.modification_date.isoformat(),
|
||||||
|
self.modification_date_raw))
|
||||||
|
ans.append('Backup date: %s (%s)'%(self.last_backup_date.isoformat(),
|
||||||
|
self.last_backup_date_raw))
|
||||||
|
ans.append('Modification number: %s'%self.modification_number)
|
||||||
|
ans.append('App Info ID: %r'%self.app_info_id)
|
||||||
|
ans.append('Sort Info ID: %r'%self.sort_info_id)
|
||||||
|
ans.append('Type: %r'%self.type)
|
||||||
|
ans.append('Creator: %r'%self.creator)
|
||||||
|
ans.append('UID seed: %r'%self.uid_seed)
|
||||||
|
ans.append('Next record list id: %r'%self.next_rec_list_id)
|
||||||
|
ans.append('Number of records: %s'%self.number_of_records)
|
||||||
|
|
||||||
|
return '\n'.join(ans)
|
||||||
|
|
||||||
|
class Record(object):
|
||||||
|
|
||||||
|
def __init__(self, raw, header):
|
||||||
|
self.offset, self.flags, self.uid = header
|
||||||
|
self.raw = raw
|
||||||
|
|
||||||
|
@property
|
||||||
|
def header(self):
|
||||||
|
return 'Offset: %d Flags: %d UID: %d'%(self.offset, self.flags,
|
||||||
|
self.uid)
|
||||||
|
|
||||||
|
class EXTHRecord(object):
|
||||||
|
|
||||||
|
def __init__(self, type_, data):
|
||||||
|
self.type = type_
|
||||||
|
self.data = data
|
||||||
|
self.name = {
|
||||||
|
1 : 'DRM Server id',
|
||||||
|
2 : 'DRM Commerce id',
|
||||||
|
3 : 'DRM ebookbase book id',
|
||||||
|
100 : 'author',
|
||||||
|
101 : 'publisher',
|
||||||
|
102 : 'imprint',
|
||||||
|
103 : 'description',
|
||||||
|
104 : 'isbn',
|
||||||
|
105 : 'subject',
|
||||||
|
106 : 'publishingdate',
|
||||||
|
107 : 'review',
|
||||||
|
108 : 'contributor',
|
||||||
|
109 : 'rights',
|
||||||
|
110 : 'subjectcode',
|
||||||
|
111 : 'type',
|
||||||
|
112 : 'source',
|
||||||
|
113 : 'asin',
|
||||||
|
114 : 'versionnumber',
|
||||||
|
115 : 'sample',
|
||||||
|
116 : 'startreading',
|
||||||
|
117 : 'adult',
|
||||||
|
118 : 'retailprice',
|
||||||
|
119 : 'retailpricecurrency',
|
||||||
|
201 : 'coveroffset',
|
||||||
|
202 : 'thumboffset',
|
||||||
|
203 : 'hasfakecover',
|
||||||
|
204 : 'Creator Software',
|
||||||
|
205 : 'Creator Major Version', # '>I'
|
||||||
|
206 : 'Creator Minor Version', # '>I'
|
||||||
|
207 : 'Creator Build number', # '>I'
|
||||||
|
208 : 'watermark',
|
||||||
|
209 : 'tamper_proof_keys',
|
||||||
|
300 : 'fontsignature',
|
||||||
|
301 : 'clippinglimit', # percentage '>B'
|
||||||
|
402 : 'publisherlimit',
|
||||||
|
404 : 'TTS flag', # '>B' 1 - TTS disabled 0 - TTS enabled
|
||||||
|
501 : 'cdetype', # 4 chars (PDOC or EBOK)
|
||||||
|
502 : 'lastupdatetime',
|
||||||
|
503 : 'updatedtitle',
|
||||||
|
}.get(self.type, repr(self.type))
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '%s (%d): %r'%(self.name, self.type, self.data)
|
||||||
|
|
||||||
|
class EXTHHeader(object):
|
||||||
|
|
||||||
|
def __init__(self, raw):
|
||||||
|
self.raw = raw
|
||||||
|
if not self.raw.startswith(b'EXTH'):
|
||||||
|
raise ValueError('EXTH header does not start with EXTH')
|
||||||
|
self.length, = struct.unpack(b'>I', self.raw[4:8])
|
||||||
|
self.count, = struct.unpack(b'>I', self.raw[8:12])
|
||||||
|
|
||||||
|
pos = 12
|
||||||
|
self.records = []
|
||||||
|
for i in xrange(self.count):
|
||||||
|
pos = self.read_record(pos)
|
||||||
|
|
||||||
|
def read_record(self, pos):
|
||||||
|
type_, length = struct.unpack(b'>II', self.raw[pos:pos+8])
|
||||||
|
data = self.raw[(pos+8):(pos+length)]
|
||||||
|
self.records.append(EXTHRecord(type_, data))
|
||||||
|
return pos + length
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
ans = ['*'*20 + ' EXTH Header '+ '*'*20]
|
||||||
|
ans.append('EXTH header length: %d'%self.length)
|
||||||
|
ans.append('Number of EXTH records: %d'%self.count)
|
||||||
|
ans.append('EXTH records...')
|
||||||
|
for r in self.records:
|
||||||
|
ans.append(str(r))
|
||||||
|
return '\n'.join(ans)
|
||||||
|
|
||||||
|
|
||||||
|
class MOBIHeader(object):
|
||||||
|
|
||||||
|
def __init__(self, record0):
|
||||||
|
self.raw = record0.raw
|
||||||
|
|
||||||
|
self.compression_raw = self.raw[:2]
|
||||||
|
self.compression = {1: 'No compression', 2: 'PalmDoc compression',
|
||||||
|
17480: 'HUFF/CDIC compression'}.get(struct.unpack(b'>H',
|
||||||
|
self.compression_raw)[0],
|
||||||
|
repr(self.compression_raw))
|
||||||
|
self.unused = self.raw[2:4]
|
||||||
|
self.text_length, = struct.unpack(b'>I', self.raw[4:8])
|
||||||
|
self.number_of_text_records, self.text_record_size = \
|
||||||
|
struct.unpack(b'>HH', self.raw[8:12])
|
||||||
|
self.encryption_type_raw, = struct.unpack(b'>H', self.raw[12:14])
|
||||||
|
self.encryption_type = {0: 'No encryption',
|
||||||
|
1: 'Old mobipocket encryption',
|
||||||
|
2:'Mobipocket encryption'}.get(self.encryption_type_raw,
|
||||||
|
repr(self.encryption_type_raw))
|
||||||
|
self.unknown = self.raw[14:16]
|
||||||
|
|
||||||
|
self.identifier = self.raw[16:20]
|
||||||
|
if self.identifier != b'MOBI':
|
||||||
|
raise ValueError('Identifier %r unknown'%self.identifier)
|
||||||
|
|
||||||
|
self.length, = struct.unpack(b'>I', self.raw[20:24])
|
||||||
|
self.type_raw, = struct.unpack(b'>I', self.raw[24:28])
|
||||||
|
self.type = {
|
||||||
|
2 : 'Mobipocket book',
|
||||||
|
3 : 'PalmDOC book',
|
||||||
|
4 : 'Audio',
|
||||||
|
257 : 'News',
|
||||||
|
258 : 'News Feed',
|
||||||
|
259 : 'News magazine',
|
||||||
|
513 : 'PICS',
|
||||||
|
514 : 'Word',
|
||||||
|
515 : 'XLS',
|
||||||
|
516 : 'PPT',
|
||||||
|
517 : 'TEXT',
|
||||||
|
518 : 'HTML',
|
||||||
|
}.get(self.type_raw, repr(self.type_raw))
|
||||||
|
|
||||||
|
self.encoding_raw, = struct.unpack(b'>I', self.raw[28:32])
|
||||||
|
self.encoding = {
|
||||||
|
1252 : 'cp1252',
|
||||||
|
65001: 'utf-8',
|
||||||
|
}.get(self.encoding_raw, repr(self.encoding_raw))
|
||||||
|
self.uid = self.raw[32:36]
|
||||||
|
self.file_version = struct.unpack(b'>I', self.raw[36:40])
|
||||||
|
self.reserved = self.raw[40:80]
|
||||||
|
self.first_non_book_record, = struct.unpack(b'>I', self.raw[80:84])
|
||||||
|
self.fullname_offset, = struct.unpack(b'>I', self.raw[84:88])
|
||||||
|
self.fullname_length, = struct.unpack(b'>I', self.raw[88:92])
|
||||||
|
self.locale_raw, = struct.unpack(b'>I', self.raw[92:96])
|
||||||
|
langcode = self.locale_raw
|
||||||
|
langid = langcode & 0xFF
|
||||||
|
sublangid = (langcode >> 10) & 0xFF
|
||||||
|
self.language = main_language.get(langid, 'ENGLISH')
|
||||||
|
self.sublanguage = sub_language.get(sublangid, 'NEUTRAL')
|
||||||
|
|
||||||
|
self.input_language = self.raw[96:100]
|
||||||
|
self.output_langauage = self.raw[100:104]
|
||||||
|
self.min_version, = struct.unpack(b'>I', self.raw[104:108])
|
||||||
|
self.first_image_index, = struct.unpack(b'>I', self.raw[108:112])
|
||||||
|
self.huffman_record_offset, = struct.unpack(b'>I', self.raw[112:116])
|
||||||
|
self.huffman_record_count, = struct.unpack(b'>I', self.raw[116:120])
|
||||||
|
self.unknown2 = self.raw[120:128]
|
||||||
|
self.exth_flags, = struct.unpack(b'>I', self.raw[128:132])
|
||||||
|
self.has_exth = bool(self.exth_flags & 0x40)
|
||||||
|
self.has_drm_data = self.length >= 184 and len(self.raw) >= 184
|
||||||
|
if self.has_drm_data:
|
||||||
|
self.unknown3 = self.raw[132:164]
|
||||||
|
self.drm_offset, = struct.unpack(b'>I', self.raw[164:168])
|
||||||
|
self.drm_count, = struct.unpack(b'>I', self.raw[168:172])
|
||||||
|
self.drm_size, = struct.unpack(b'>I', self.raw[172:176])
|
||||||
|
self.drm_flags = bin(struct.unpack(b'>I', self.raw[176:180])[0])
|
||||||
|
self.has_extra_data_flags = self.length >= 244 and len(self.raw) >= 244
|
||||||
|
if self.has_extra_data_flags:
|
||||||
|
self.unknown4 = self.raw[180:242]
|
||||||
|
self.extra_data_flags = bin(struct.unpack(b'>H',
|
||||||
|
self.raw[242:244])[0])
|
||||||
|
|
||||||
|
if self.has_exth:
|
||||||
|
self.exth_offset = 16 + self.length
|
||||||
|
|
||||||
|
self.exth = EXTHHeader(self.raw[self.exth_offset:])
|
||||||
|
|
||||||
|
self.end_of_exth = self.exth_offset + self.exth.length
|
||||||
|
self.bytes_after_exth = self.fullname_offset - self.end_of_exth
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
ans = ['*'*20 + ' MOBI Header '+ '*'*20]
|
||||||
|
ans.append('Compression: %s'%self.compression)
|
||||||
|
ans.append('Unused: %r'%self.unused)
|
||||||
|
ans.append('Number of text records: %d'%self.number_of_text_records)
|
||||||
|
ans.append('Text record size: %d'%self.text_record_size)
|
||||||
|
ans.append('Encryption: %s'%self.encryption_type)
|
||||||
|
ans.append('Unknown: %r'%self.unknown)
|
||||||
|
ans.append('Identifier: %r'%self.identifier)
|
||||||
|
ans.append('Header length: %d'% self.length)
|
||||||
|
ans.append('Type: %s'%self.type)
|
||||||
|
ans.append('Encoding: %s'%self.encoding)
|
||||||
|
ans.append('UID: %r'%self.uid)
|
||||||
|
ans.append('File version: %d'%self.file_version)
|
||||||
|
ans.append('Reserved: %r'%self.reserved)
|
||||||
|
ans.append('First non-book record: %d'% self.first_non_book_record)
|
||||||
|
ans.append('Full name offset: %d'%self.fullname_offset)
|
||||||
|
ans.append('Full name length: %d bytes'%self.fullname_length)
|
||||||
|
ans.append('Langcode: %r'%self.locale_raw)
|
||||||
|
ans.append('Language: %s'%self.language)
|
||||||
|
ans.append('Sub language: %s'%self.sublanguage)
|
||||||
|
ans.append('Input language: %r'%self.input_language)
|
||||||
|
ans.append('Output language: %r'%self.output_langauage)
|
||||||
|
ans.append('Min version: %d'%self.min_version)
|
||||||
|
ans.append('First Image index: %d'%self.first_image_index)
|
||||||
|
ans.append('Huffman record offset: %d'%self.huffman_record_offset)
|
||||||
|
ans.append('Huffman record count: %d'%self.huffman_record_count)
|
||||||
|
ans.append('Unknown2: %r'%self.unknown2)
|
||||||
|
ans.append('EXTH flags: %r (%s)'%(self.exth_flags, self.has_exth))
|
||||||
|
if self.has_drm_data:
|
||||||
|
ans.append('Unknown3: %r'%self.unknown3)
|
||||||
|
ans.append('DRM Offset: %s'%self.drm_offset)
|
||||||
|
ans.append('DRM Count: %s'%self.drm_count)
|
||||||
|
ans.append('DRM Size: %s'%self.drm_size)
|
||||||
|
ans.append('DRM Flags: %r'%self.drm_flags)
|
||||||
|
if self.has_extra_data_flags:
|
||||||
|
ans.append('Unknown4: %r'%self.unknown4)
|
||||||
|
ans.append('Extra data flags: %r'%self.extra_data_flags)
|
||||||
|
|
||||||
|
ans = '\n'.join(ans)
|
||||||
|
|
||||||
|
if self.has_exth:
|
||||||
|
ans += '\n\n' + str(self.exth)
|
||||||
|
ans += '\n\nBytes after EXTH: %d'%self.bytes_after_exth
|
||||||
|
|
||||||
|
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset +
|
||||||
|
self.fullname_length))
|
||||||
|
|
||||||
|
ans += '\nRecord 0 length: %d'%len(self.raw)
|
||||||
|
return ans
|
||||||
|
|
||||||
|
class MOBIFile(object):
|
||||||
|
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.raw = stream.read()
|
||||||
|
|
||||||
|
self.palmdb = PalmDB(self.raw[:78])
|
||||||
|
|
||||||
|
self.record_headers = []
|
||||||
|
self.records = []
|
||||||
|
for i in xrange(self.palmdb.number_of_records):
|
||||||
|
pos = 78 + i * 8
|
||||||
|
offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8])
|
||||||
|
flags, val = a1, a2 << 16 | a3 << 8 | a4
|
||||||
|
self.record_headers.append((offset, flags, val))
|
||||||
|
|
||||||
|
def section(section_number):
|
||||||
|
if section_number == self.palmdb.number_of_records - 1:
|
||||||
|
end_off = len(self.raw)
|
||||||
|
else:
|
||||||
|
end_off = self.record_headers[section_number + 1][0]
|
||||||
|
off = self.record_headers[section_number][0]
|
||||||
|
return self.raw[off:end_off]
|
||||||
|
|
||||||
|
for i in range(self.palmdb.number_of_records):
|
||||||
|
self.records.append(Record(section(i), self.record_headers[i]))
|
||||||
|
|
||||||
|
self.mobi_header = MOBIHeader(self.records[0])
|
||||||
|
|
||||||
|
|
||||||
|
def print_header(self):
|
||||||
|
print (str(self.palmdb).encode('utf-8'))
|
||||||
|
print ()
|
||||||
|
print ('Record headers:')
|
||||||
|
for i, r in enumerate(self.records):
|
||||||
|
print ('%6d. %s'%(i, r.header))
|
||||||
|
|
||||||
|
print ()
|
||||||
|
print (str(self.mobi_header).encode('utf-8'))
|
||||||
|
|
||||||
|
def inspect_mobi(path_or_stream):
|
||||||
|
stream = (path_or_stream if hasattr(path_or_stream, 'read') else
|
||||||
|
open(path_or_stream, 'rb'))
|
||||||
|
f = MOBIFile(stream)
|
||||||
|
f.print_header()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
f = MOBIFile(open(sys.argv[1], 'rb'))
|
||||||
|
f.print_header()
|
||||||
|
|
@ -463,9 +463,9 @@ class MobiMLizer(object):
|
|||||||
text = COLLAPSE.sub(' ', elem.text)
|
text = COLLAPSE.sub(' ', elem.text)
|
||||||
valign = style['vertical-align']
|
valign = style['vertical-align']
|
||||||
not_baseline = valign in ('super', 'sub', 'text-top',
|
not_baseline = valign in ('super', 'sub', 'text-top',
|
||||||
'text-bottom') or (
|
'text-bottom', 'top', 'bottom') or (
|
||||||
isinstance(valign, (float, int)) and abs(valign) != 0)
|
isinstance(valign, (float, int)) and abs(valign) != 0)
|
||||||
issup = valign in ('super', 'text-top') or (
|
issup = valign in ('super', 'text-top', 'top') or (
|
||||||
isinstance(valign, (float, int)) and valign > 0)
|
isinstance(valign, (float, int)) and valign > 0)
|
||||||
vtag = 'sup' if issup else 'sub'
|
vtag = 'sup' if issup else 'sub'
|
||||||
if not_baseline and not ignore_valign and tag not in NOT_VTAGS and not isblock:
|
if not_baseline and not ignore_valign and tag not in NOT_VTAGS and not isblock:
|
||||||
@ -484,6 +484,7 @@ class MobiMLizer(object):
|
|||||||
parent = bstate.para if bstate.inline is None else bstate.inline
|
parent = bstate.para if bstate.inline is None else bstate.inline
|
||||||
if parent is not None:
|
if parent is not None:
|
||||||
vtag = etree.SubElement(parent, XHTML(vtag))
|
vtag = etree.SubElement(parent, XHTML(vtag))
|
||||||
|
vtag = etree.SubElement(vtag, XHTML('small'))
|
||||||
# Add anchors
|
# Add anchors
|
||||||
for child in vbstate.body:
|
for child in vbstate.body:
|
||||||
if child is not vbstate.para:
|
if child is not vbstate.para:
|
||||||
|
@ -310,6 +310,7 @@ class Serializer(object):
|
|||||||
if href not in id_offsets:
|
if href not in id_offsets:
|
||||||
self.logger.warn('Hyperlink target %r not found' % href)
|
self.logger.warn('Hyperlink target %r not found' % href)
|
||||||
href, _ = urldefrag(href)
|
href, _ = urldefrag(href)
|
||||||
|
if href in self.id_offsets:
|
||||||
ioff = self.id_offsets[href]
|
ioff = self.id_offsets[href]
|
||||||
for hoff in hoffs:
|
for hoff in hoffs:
|
||||||
buffer.seek(hoff)
|
buffer.seek(hoff)
|
||||||
@ -1510,7 +1511,7 @@ class MobiWriter(object):
|
|||||||
record0.write(exth)
|
record0.write(exth)
|
||||||
record0.write(title)
|
record0.write(title)
|
||||||
record0 = record0.getvalue()
|
record0 = record0.getvalue()
|
||||||
self._records[0] = record0 + ('\0' * (2452 - len(record0)))
|
self._records[0] = record0 + ('\0' * (1024*8))
|
||||||
|
|
||||||
def _build_exth(self):
|
def _build_exth(self):
|
||||||
oeb = self._oeb
|
oeb = self._oeb
|
||||||
|
@ -20,8 +20,9 @@ class RemoveAdobeMargins(object):
|
|||||||
self.oeb, self.opts, self.log = oeb, opts, log
|
self.oeb, self.opts, self.log = oeb, opts, log
|
||||||
|
|
||||||
for item in self.oeb.manifest:
|
for item in self.oeb.manifest:
|
||||||
if item.media_type in ('application/vnd.adobe-page-template+xml',
|
if (item.media_type in ('application/vnd.adobe-page-template+xml',
|
||||||
'application/vnd.adobe.page-template+xml'):
|
'application/vnd.adobe.page-template+xml') and
|
||||||
|
hasattr(item.data, 'xpath')):
|
||||||
self.log('Removing page margins specified in the'
|
self.log('Removing page margins specified in the'
|
||||||
' Adobe page template')
|
' Adobe page template')
|
||||||
for elem in item.data.xpath(
|
for elem in item.data.xpath(
|
||||||
|
@ -94,7 +94,7 @@ class EditMetadataAction(InterfaceAction):
|
|||||||
|
|
||||||
def bulk_metadata_downloaded(self, job):
|
def bulk_metadata_downloaded(self, job):
|
||||||
if job.failed:
|
if job.failed:
|
||||||
self.job_exception(job, dialog_title=_('Failed to download metadata'))
|
self.gui.job_exception(job, dialog_title=_('Failed to download metadata'))
|
||||||
return
|
return
|
||||||
from calibre.gui2.metadata.bulk_download2 import proceed
|
from calibre.gui2.metadata.bulk_download2 import proceed
|
||||||
proceed(self.gui, job)
|
proceed(self.gui, job)
|
||||||
|
@ -6,9 +6,8 @@ __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import os, time
|
import os, time
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
from PyQt4.Qt import Qt, QMenu
|
from PyQt4.Qt import Qt, QMenu, QAction, pyqtSignal
|
||||||
|
|
||||||
from calibre.constants import isosx
|
from calibre.constants import isosx
|
||||||
from calibre.gui2 import error_dialog, Dispatcher, question_dialog, config, \
|
from calibre.gui2 import error_dialog, Dispatcher, question_dialog, config, \
|
||||||
@ -18,6 +17,19 @@ from calibre.utils.config import prefs
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
from calibre.ptempfile import PersistentTemporaryFile
|
||||||
from calibre.gui2.actions import InterfaceAction
|
from calibre.gui2.actions import InterfaceAction
|
||||||
|
|
||||||
|
class HistoryAction(QAction):
|
||||||
|
|
||||||
|
view_historical = pyqtSignal(object)
|
||||||
|
|
||||||
|
def __init__(self, id_, title, parent):
|
||||||
|
QAction.__init__(self, title, parent)
|
||||||
|
self.id = id_
|
||||||
|
self.triggered.connect(self._triggered)
|
||||||
|
|
||||||
|
def _triggered(self):
|
||||||
|
self.view_historical.emit(self.id)
|
||||||
|
|
||||||
|
|
||||||
class ViewAction(InterfaceAction):
|
class ViewAction(InterfaceAction):
|
||||||
|
|
||||||
name = 'View'
|
name = 'View'
|
||||||
@ -28,18 +40,51 @@ class ViewAction(InterfaceAction):
|
|||||||
self.persistent_files = []
|
self.persistent_files = []
|
||||||
self.qaction.triggered.connect(self.view_book)
|
self.qaction.triggered.connect(self.view_book)
|
||||||
self.view_menu = QMenu()
|
self.view_menu = QMenu()
|
||||||
self.view_menu.addAction(_('View'), partial(self.view_book, False))
|
ac = self.view_specific_action = QAction(_('View specific format'),
|
||||||
ac = self.view_menu.addAction(_('View specific format'))
|
self.gui)
|
||||||
ac.setShortcut((Qt.ControlModifier if isosx else Qt.AltModifier)+Qt.Key_V)
|
|
||||||
self.qaction.setMenu(self.view_menu)
|
self.qaction.setMenu(self.view_menu)
|
||||||
|
ac.setShortcut((Qt.ControlModifier if isosx else Qt.AltModifier)+Qt.Key_V)
|
||||||
ac.triggered.connect(self.view_specific_format, type=Qt.QueuedConnection)
|
ac.triggered.connect(self.view_specific_format, type=Qt.QueuedConnection)
|
||||||
|
ac = self.view_action = QAction(self.qaction.icon(),
|
||||||
self.view_menu.addSeparator()
|
self.qaction.text(), self.gui)
|
||||||
|
ac.triggered.connect(self.view_book)
|
||||||
ac = self.create_action(spec=(_('Read a random book'), 'catalog.png',
|
ac = self.create_action(spec=(_('Read a random book'), 'catalog.png',
|
||||||
None, None), attr='action_pick_random')
|
None, None), attr='action_pick_random')
|
||||||
ac.triggered.connect(self.view_random)
|
ac.triggered.connect(self.view_random)
|
||||||
self.view_menu.addAction(ac)
|
ac = self.clear_history_action = QAction(
|
||||||
|
_('Clear recently viewed list'), self.gui)
|
||||||
|
ac.triggered.connect(self.clear_history)
|
||||||
|
|
||||||
|
def initialization_complete(self):
|
||||||
|
self.build_menus(self.gui.current_db)
|
||||||
|
|
||||||
|
def build_menus(self, db):
|
||||||
|
self.view_menu.clear()
|
||||||
|
self.view_menu.addAction(self.qaction)
|
||||||
|
self.view_menu.addAction(self.view_specific_action)
|
||||||
|
self.view_menu.addSeparator()
|
||||||
|
self.view_menu.addAction(self.action_pick_random)
|
||||||
|
self.history_actions = []
|
||||||
|
history = db.prefs.get('gui_view_history', [])
|
||||||
|
if history:
|
||||||
|
self.view_menu.addSeparator()
|
||||||
|
for id_, title in history:
|
||||||
|
ac = HistoryAction(id_, title, self.view_menu)
|
||||||
|
self.view_menu.addAction(ac)
|
||||||
|
ac.view_historical.connect(self.view_historical)
|
||||||
|
self.view_menu.addSeparator()
|
||||||
|
self.view_menu.addAction(self.clear_history_action)
|
||||||
|
|
||||||
|
def clear_history(self):
|
||||||
|
db = self.gui.current_db
|
||||||
|
db.prefs['gui_view_history'] = []
|
||||||
|
self.build_menus(db)
|
||||||
|
|
||||||
|
def view_historical(self, id_):
|
||||||
|
self._view_calibre_books([id_])
|
||||||
|
|
||||||
|
def library_changed(self, db):
|
||||||
|
self.build_menus(db)
|
||||||
|
|
||||||
def location_selected(self, loc):
|
def location_selected(self, loc):
|
||||||
enabled = loc == 'library'
|
enabled = loc == 'library'
|
||||||
@ -47,15 +92,17 @@ class ViewAction(InterfaceAction):
|
|||||||
action.setEnabled(enabled)
|
action.setEnabled(enabled)
|
||||||
|
|
||||||
def view_format(self, row, format):
|
def view_format(self, row, format):
|
||||||
fmt_path = self.gui.library_view.model().db.format_abspath(row, format)
|
id_ = self.gui.library_view.model().id(row)
|
||||||
if fmt_path:
|
self.view_format_by_id(id_, format)
|
||||||
self._view_file(fmt_path)
|
|
||||||
|
|
||||||
def view_format_by_id(self, id_, format):
|
def view_format_by_id(self, id_, format):
|
||||||
fmt_path = self.gui.library_view.model().db.format_abspath(id_, format,
|
db = self.gui.current_db
|
||||||
|
fmt_path = db.format_abspath(id_, format,
|
||||||
index_is_id=True)
|
index_is_id=True)
|
||||||
if fmt_path:
|
if fmt_path:
|
||||||
|
title = db.title(id_, index_is_id=True)
|
||||||
self._view_file(fmt_path)
|
self._view_file(fmt_path)
|
||||||
|
self.update_history([(id_, title)])
|
||||||
|
|
||||||
def book_downloaded_for_viewing(self, job):
|
def book_downloaded_for_viewing(self, job):
|
||||||
if job.failed:
|
if job.failed:
|
||||||
@ -162,6 +209,54 @@ class ViewAction(InterfaceAction):
|
|||||||
self.gui.iactions['Choose Library'].pick_random()
|
self.gui.iactions['Choose Library'].pick_random()
|
||||||
self._view_books([self.gui.library_view.currentIndex()])
|
self._view_books([self.gui.library_view.currentIndex()])
|
||||||
|
|
||||||
|
def _view_calibre_books(self, ids):
|
||||||
|
db = self.gui.current_db
|
||||||
|
views = []
|
||||||
|
for id_ in ids:
|
||||||
|
try:
|
||||||
|
formats = db.formats(id_, index_is_id=True)
|
||||||
|
except:
|
||||||
|
error_dialog(self.gui, _('Cannot view'),
|
||||||
|
_('This book no longer exists in your library'), show=True)
|
||||||
|
self.update_history([], remove=set([id_]))
|
||||||
|
continue
|
||||||
|
|
||||||
|
title = db.title(id_, index_is_id=True)
|
||||||
|
if not formats:
|
||||||
|
error_dialog(self.gui, _('Cannot view'),
|
||||||
|
_('%s has no available formats.')%(title,), show=True)
|
||||||
|
continue
|
||||||
|
|
||||||
|
formats = formats.upper().split(',')
|
||||||
|
|
||||||
|
fmt = formats[0]
|
||||||
|
for format in prefs['input_format_order']:
|
||||||
|
if format in formats:
|
||||||
|
fmt = format
|
||||||
|
break
|
||||||
|
views.append((id_, title))
|
||||||
|
self.view_format_by_id(id_, fmt)
|
||||||
|
|
||||||
|
self.update_history(views)
|
||||||
|
|
||||||
|
def update_history(self, views, remove=frozenset()):
|
||||||
|
db = self.gui.current_db
|
||||||
|
if views:
|
||||||
|
seen = set()
|
||||||
|
history = []
|
||||||
|
for id_, title in views + db.prefs.get('gui_view_history', []):
|
||||||
|
if title not in seen:
|
||||||
|
seen.add(title)
|
||||||
|
history.append((id_, title))
|
||||||
|
|
||||||
|
db.prefs['gui_view_history'] = history[:10]
|
||||||
|
self.build_menus(db)
|
||||||
|
if remove:
|
||||||
|
history = db.prefs.get('gui_view_history', [])
|
||||||
|
history = [x for x in history if x[0] not in remove]
|
||||||
|
db.prefs['gui_view_history'] = history[:10]
|
||||||
|
self.build_menus(db)
|
||||||
|
|
||||||
def _view_books(self, rows):
|
def _view_books(self, rows):
|
||||||
if not rows or len(rows) == 0:
|
if not rows or len(rows) == 0:
|
||||||
self._launch_viewer()
|
self._launch_viewer()
|
||||||
@ -171,28 +266,8 @@ class ViewAction(InterfaceAction):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if self.gui.current_view() is self.gui.library_view:
|
if self.gui.current_view() is self.gui.library_view:
|
||||||
for row in rows:
|
ids = list(map(self.gui.library_view.model().id, rows))
|
||||||
if hasattr(row, 'row'):
|
self._view_calibre_books(ids)
|
||||||
row = row.row()
|
|
||||||
|
|
||||||
formats = self.gui.library_view.model().db.formats(row)
|
|
||||||
title = self.gui.library_view.model().db.title(row)
|
|
||||||
if not formats:
|
|
||||||
error_dialog(self.gui, _('Cannot view'),
|
|
||||||
_('%s has no available formats.')%(title,), show=True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
formats = formats.upper().split(',')
|
|
||||||
|
|
||||||
|
|
||||||
in_prefs = False
|
|
||||||
for format in prefs['input_format_order']:
|
|
||||||
if format in formats:
|
|
||||||
in_prefs = True
|
|
||||||
self.view_format(row, format)
|
|
||||||
break
|
|
||||||
if not in_prefs:
|
|
||||||
self.view_format(row, formats[0])
|
|
||||||
else:
|
else:
|
||||||
paths = self.gui.current_view().model().paths(rows)
|
paths = self.gui.current_view().model().paths(rows)
|
||||||
for path in paths:
|
for path in paths:
|
||||||
|
@ -519,6 +519,8 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
|||||||
val = [] if fm['is_multiple'] else ['']
|
val = [] if fm['is_multiple'] else ['']
|
||||||
elif not fm['is_multiple']:
|
elif not fm['is_multiple']:
|
||||||
val = [val]
|
val = [val]
|
||||||
|
elif fm['datatype'] == 'composite':
|
||||||
|
val = [v.strip() for v in val.split(fm['is_multiple'])]
|
||||||
elif field == 'authors':
|
elif field == 'authors':
|
||||||
val = [v.replace('|', ',') for v in val]
|
val = [v.replace('|', ',') for v in val]
|
||||||
else:
|
else:
|
||||||
|
@ -247,6 +247,11 @@ class LayoutMixin(object): # {{{
|
|||||||
for x in ('cb', 'tb', 'bd'):
|
for x in ('cb', 'tb', 'bd'):
|
||||||
button = getattr(self, x+'_splitter').button
|
button = getattr(self, x+'_splitter').button
|
||||||
button.setIconSize(QSize(24, 24))
|
button.setIconSize(QSize(24, 24))
|
||||||
|
if isosx:
|
||||||
|
button.setStyleSheet('''
|
||||||
|
QToolButton { background: none; border:none; padding: 0px; }
|
||||||
|
QToolButton:checked { background: rgba(0, 0, 0, 25%); }
|
||||||
|
''')
|
||||||
self.status_bar.addPermanentWidget(button)
|
self.status_bar.addPermanentWidget(button)
|
||||||
self.status_bar.addPermanentWidget(self.jobs_button)
|
self.status_bar.addPermanentWidget(self.jobs_button)
|
||||||
self.setStatusBar(self.status_bar)
|
self.setStatusBar(self.status_bar)
|
||||||
|
@ -196,6 +196,10 @@ class SearchBar(QWidget): # {{{
|
|||||||
l.addWidget(x)
|
l.addWidget(x)
|
||||||
x.setToolTip(_("Reset Quick Search"))
|
x.setToolTip(_("Reset Quick Search"))
|
||||||
|
|
||||||
|
x = parent.highlight_only_button = QToolButton(self)
|
||||||
|
x.setIcon(QIcon(I('arrow-down.png')))
|
||||||
|
l.addWidget(x)
|
||||||
|
|
||||||
x = parent.search_options_button = QToolButton(self)
|
x = parent.search_options_button = QToolButton(self)
|
||||||
x.setIcon(QIcon(I('config.png')))
|
x.setIcon(QIcon(I('config.png')))
|
||||||
x.setObjectName("search_option_button")
|
x.setObjectName("search_option_button")
|
||||||
@ -408,6 +412,7 @@ class ToolBar(BaseToolBar): # {{{
|
|||||||
self.d_widget.layout().addWidget(self.donate_button)
|
self.d_widget.layout().addWidget(self.donate_button)
|
||||||
if isosx:
|
if isosx:
|
||||||
self.d_widget.setStyleSheet('QWidget, QToolButton {background-color: none; border: none; }')
|
self.d_widget.setStyleSheet('QWidget, QToolButton {background-color: none; border: none; }')
|
||||||
|
self.d_widget.layout().addWidget(QLabel(u'\u00a0'))
|
||||||
bar.addWidget(self.d_widget)
|
bar.addWidget(self.d_widget)
|
||||||
self.showing_donate = True
|
self.showing_donate = True
|
||||||
elif what in self.gui.iactions:
|
elif what in self.gui.iactions:
|
||||||
|
@ -310,10 +310,17 @@ class BooksModel(QAbstractTableModel): # {{{
|
|||||||
def sort(self, col, order, reset=True):
|
def sort(self, col, order, reset=True):
|
||||||
if not self.db:
|
if not self.db:
|
||||||
return
|
return
|
||||||
self.about_to_be_sorted.emit(self.db.id)
|
|
||||||
if not isinstance(order, bool):
|
if not isinstance(order, bool):
|
||||||
order = order == Qt.AscendingOrder
|
order = order == Qt.AscendingOrder
|
||||||
label = self.column_map[col]
|
label = self.column_map[col]
|
||||||
|
self._sort(label, order, reset)
|
||||||
|
|
||||||
|
def sort_by_named_field(self, field, order, reset=True):
|
||||||
|
if field in self.db.field_metadata.keys():
|
||||||
|
self._sort(field, order, reset)
|
||||||
|
|
||||||
|
def _sort(self, label, order, reset):
|
||||||
|
self.about_to_be_sorted.emit(self.db.id)
|
||||||
self.db.sort(label, order)
|
self.db.sort(label, order)
|
||||||
if reset:
|
if reset:
|
||||||
self.reset()
|
self.reset()
|
||||||
|
@ -236,6 +236,46 @@ class BooksView(QTableView): # {{{
|
|||||||
sm.select(idx, sm.Select|sm.Rows)
|
sm.select(idx, sm.Select|sm.Rows)
|
||||||
self.scroll_to_row(indices[0].row())
|
self.scroll_to_row(indices[0].row())
|
||||||
self.selected_ids = []
|
self.selected_ids = []
|
||||||
|
|
||||||
|
def sort_by_named_field(self, field, order, reset=True):
|
||||||
|
if field in self.column_map:
|
||||||
|
idx = self.column_map.index(field)
|
||||||
|
if order:
|
||||||
|
self.sortByColumn(idx, Qt.AscendingOrder)
|
||||||
|
else:
|
||||||
|
self.sortByColumn(idx, Qt.DescendingOrder)
|
||||||
|
else:
|
||||||
|
self._model.sort_by_named_field(field, order, reset)
|
||||||
|
|
||||||
|
def multisort(self, fields, reset=True, only_if_different=False):
|
||||||
|
if len(fields) == 0:
|
||||||
|
return
|
||||||
|
sh = self.cleanup_sort_history(self._model.sort_history,
|
||||||
|
ignore_column_map=True)
|
||||||
|
if only_if_different and len(sh) >= len(fields):
|
||||||
|
ret=True
|
||||||
|
for i,t in enumerate(fields):
|
||||||
|
if t[0] != sh[i][0]:
|
||||||
|
ret = False
|
||||||
|
break
|
||||||
|
if ret:
|
||||||
|
return
|
||||||
|
|
||||||
|
for n,d in reversed(fields):
|
||||||
|
if n in self._model.db.field_metadata.keys():
|
||||||
|
sh.insert(0, (n, d))
|
||||||
|
sh = self.cleanup_sort_history(sh, ignore_column_map=True)
|
||||||
|
self._model.sort_history = [tuple(x) for x in sh]
|
||||||
|
self._model.resort(reset=reset)
|
||||||
|
col = fields[0][0]
|
||||||
|
dir = Qt.AscendingOrder if fields[0][1] else Qt.DescendingOrder
|
||||||
|
if col in self.column_map:
|
||||||
|
col = self.column_map.index(col)
|
||||||
|
hdrs = self.horizontalHeader()
|
||||||
|
try:
|
||||||
|
hdrs.setSortIndicator(col, dir)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
# Ondevice column {{{
|
# Ondevice column {{{
|
||||||
@ -280,14 +320,14 @@ class BooksView(QTableView): # {{{
|
|||||||
state = self.get_state()
|
state = self.get_state()
|
||||||
self.write_state(state)
|
self.write_state(state)
|
||||||
|
|
||||||
def cleanup_sort_history(self, sort_history):
|
def cleanup_sort_history(self, sort_history, ignore_column_map=False):
|
||||||
history = []
|
history = []
|
||||||
for col, order in sort_history:
|
for col, order in sort_history:
|
||||||
if not isinstance(order, bool):
|
if not isinstance(order, bool):
|
||||||
continue
|
continue
|
||||||
if col == 'date':
|
if col == 'date':
|
||||||
col = 'timestamp'
|
col = 'timestamp'
|
||||||
if col in self.column_map:
|
if ignore_column_map or col in self.column_map:
|
||||||
if (not history or history[-1][0] != col):
|
if (not history or history[-1][0] != col):
|
||||||
history.append([col, order])
|
history.append([col, order])
|
||||||
return history
|
return history
|
||||||
@ -621,7 +661,7 @@ class BooksView(QTableView): # {{{
|
|||||||
h = self.horizontalHeader()
|
h = self.horizontalHeader()
|
||||||
for i in range(h.count()):
|
for i in range(h.count()):
|
||||||
if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0:
|
if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0:
|
||||||
self.scrollTo(self.model().index(row, i))
|
self.scrollTo(self.model().index(row, i), self.PositionAtCenter)
|
||||||
break
|
break
|
||||||
|
|
||||||
def set_current_row(self, row, select=True):
|
def set_current_row(self, row, select=True):
|
||||||
|
@ -846,7 +846,7 @@ class RatingEdit(QSpinBox): # {{{
|
|||||||
class TagsEdit(MultiCompleteLineEdit): # {{{
|
class TagsEdit(MultiCompleteLineEdit): # {{{
|
||||||
LABEL = _('Ta&gs:')
|
LABEL = _('Ta&gs:')
|
||||||
TOOLTIP = '<p>'+_('Tags categorize the book. This is particularly '
|
TOOLTIP = '<p>'+_('Tags categorize the book. This is particularly '
|
||||||
'useful while searching. <br><br>They can be any words'
|
'useful while searching. <br><br>They can be any words '
|
||||||
'or phrases, separated by commas.')
|
'or phrases, separated by commas.')
|
||||||
|
|
||||||
def __init__(self, parent):
|
def __init__(self, parent):
|
||||||
|
@ -54,6 +54,8 @@ def start_download(gui, ids, callback, identify, covers):
|
|||||||
_('Download metadata for %d books')%len(ids),
|
_('Download metadata for %d books')%len(ids),
|
||||||
download, (ids, gui.current_db, identify, covers), {}, callback)
|
download, (ids, gui.current_db, identify, covers), {}, callback)
|
||||||
gui.job_manager.run_threaded_job(job)
|
gui.job_manager.run_threaded_job(job)
|
||||||
|
gui.status_bar.show_message(_('Metadata download started'), 3000)
|
||||||
|
|
||||||
|
|
||||||
class ViewLog(QDialog): # {{{
|
class ViewLog(QDialog): # {{{
|
||||||
|
|
||||||
@ -75,7 +77,7 @@ class ViewLog(QDialog): # {{{
|
|||||||
self.copy_button.clicked.connect(self.copy_to_clipboard)
|
self.copy_button.clicked.connect(self.copy_to_clipboard)
|
||||||
l.addWidget(self.bb)
|
l.addWidget(self.bb)
|
||||||
self.setModal(False)
|
self.setModal(False)
|
||||||
self.resize(QSize(500, 400))
|
self.resize(QSize(700, 500))
|
||||||
self.setWindowTitle(_('Download log'))
|
self.setWindowTitle(_('Download log'))
|
||||||
self.setWindowIcon(QIcon(I('debug.png')))
|
self.setWindowIcon(QIcon(I('debug.png')))
|
||||||
self.show()
|
self.show()
|
||||||
@ -110,25 +112,27 @@ class ApplyDialog(QDialog):
|
|||||||
self.bb.accepted.connect(self.accept)
|
self.bb.accepted.connect(self.accept)
|
||||||
l.addWidget(self.bb)
|
l.addWidget(self.bb)
|
||||||
|
|
||||||
self.db = gui.current_db
|
self.gui = gui
|
||||||
self.id_map = list(id_map.iteritems())
|
self.id_map = list(id_map.iteritems())
|
||||||
self.current_idx = 0
|
self.current_idx = 0
|
||||||
|
|
||||||
self.failures = []
|
self.failures = []
|
||||||
|
self.ids = []
|
||||||
self.canceled = False
|
self.canceled = False
|
||||||
|
|
||||||
QTimer.singleShot(20, self.do_one)
|
QTimer.singleShot(20, self.do_one)
|
||||||
self.exec_()
|
|
||||||
|
|
||||||
def do_one(self):
|
def do_one(self):
|
||||||
if self.canceled:
|
if self.canceled:
|
||||||
return
|
return
|
||||||
i, mi = self.id_map[self.current_idx]
|
i, mi = self.id_map[self.current_idx]
|
||||||
|
db = self.gui.current_db
|
||||||
try:
|
try:
|
||||||
set_title = not mi.is_null('title')
|
set_title = not mi.is_null('title')
|
||||||
set_authors = not mi.is_null('authors')
|
set_authors = not mi.is_null('authors')
|
||||||
self.db.set_metadata(i, mi, commit=False, set_title=set_title,
|
db.set_metadata(i, mi, commit=False, set_title=set_title,
|
||||||
set_authors=set_authors)
|
set_authors=set_authors)
|
||||||
|
self.ids.append(i)
|
||||||
except:
|
except:
|
||||||
import traceback
|
import traceback
|
||||||
self.failures.append((i, traceback.format_exc()))
|
self.failures.append((i, traceback.format_exc()))
|
||||||
@ -156,9 +160,10 @@ class ApplyDialog(QDialog):
|
|||||||
return
|
return
|
||||||
if self.failures:
|
if self.failures:
|
||||||
msg = []
|
msg = []
|
||||||
|
db = self.gui.current_db
|
||||||
for i, tb in self.failures:
|
for i, tb in self.failures:
|
||||||
title = self.db.title(i, index_is_id=True)
|
title = db.title(i, index_is_id=True)
|
||||||
authors = self.db.authors(i, index_is_id=True)
|
authors = db.authors(i, index_is_id=True)
|
||||||
if authors:
|
if authors:
|
||||||
authors = [x.replace('|', ',') for x in authors.split(',')]
|
authors = [x.replace('|', ',') for x in authors.split(',')]
|
||||||
title += ' - ' + authors_to_string(authors)
|
title += ' - ' + authors_to_string(authors)
|
||||||
@ -169,6 +174,12 @@ class ApplyDialog(QDialog):
|
|||||||
' in your library. Click "Show Details" to see '
|
' in your library. Click "Show Details" to see '
|
||||||
'details.'), det_msg='\n\n'.join(msg), show=True)
|
'details.'), det_msg='\n\n'.join(msg), show=True)
|
||||||
self.accept()
|
self.accept()
|
||||||
|
if self.ids:
|
||||||
|
cr = self.gui.library_view.currentIndex().row()
|
||||||
|
self.gui.library_view.model().refresh_ids(
|
||||||
|
self.ids, cr)
|
||||||
|
if self.gui.cover_flow:
|
||||||
|
self.gui.cover_flow.dataChanged()
|
||||||
|
|
||||||
_amd = None
|
_amd = None
|
||||||
def apply_metadata(job, gui, q, result):
|
def apply_metadata(job, gui, q, result):
|
||||||
@ -177,7 +188,7 @@ def apply_metadata(job, gui, q, result):
|
|||||||
q.finished.disconnect()
|
q.finished.disconnect()
|
||||||
if result != q.Accepted:
|
if result != q.Accepted:
|
||||||
return
|
return
|
||||||
id_map, failed_ids = job.result
|
id_map, failed_ids, failed_covers, title_map = job.result
|
||||||
id_map = dict([(k, v) for k, v in id_map.iteritems() if k not in
|
id_map = dict([(k, v) for k, v in id_map.iteritems() if k not in
|
||||||
failed_ids])
|
failed_ids])
|
||||||
if not id_map:
|
if not id_map:
|
||||||
@ -207,23 +218,32 @@ def apply_metadata(job, gui, q, result):
|
|||||||
return
|
return
|
||||||
|
|
||||||
_amd = ApplyDialog(id_map, gui)
|
_amd = ApplyDialog(id_map, gui)
|
||||||
|
_amd.exec_()
|
||||||
|
|
||||||
def proceed(gui, job):
|
def proceed(gui, job):
|
||||||
id_map, failed_ids = job.result
|
gui.status_bar.show_message(_('Metadata download completed'), 3000)
|
||||||
|
id_map, failed_ids, failed_covers, title_map = job.result
|
||||||
fmsg = det_msg = ''
|
fmsg = det_msg = ''
|
||||||
if failed_ids:
|
if failed_ids or failed_covers:
|
||||||
fmsg = _('Could not download metadata for %d of the books. Click'
|
fmsg = '<p>'+_('Could not download metadata and/or covers for %d of the books. Click'
|
||||||
' "Show details" to see which books.')%len(failed_ids)
|
' "Show details" to see which books.')%len(failed_ids)
|
||||||
det_msg = '\n'.join([id_map[i].title for i in failed_ids])
|
det_msg = []
|
||||||
|
for i in failed_ids | failed_covers:
|
||||||
|
title = title_map[i]
|
||||||
|
if i in failed_ids:
|
||||||
|
title += (' ' + _('(Failed metadata)'))
|
||||||
|
if i in failed_covers:
|
||||||
|
title += (' ' + _('(Failed cover)'))
|
||||||
|
det_msg.append(title)
|
||||||
msg = '<p>' + _('Finished downloading metadata for <b>%d book(s)</b>. '
|
msg = '<p>' + _('Finished downloading metadata for <b>%d book(s)</b>. '
|
||||||
'Proceed with updating the metadata in your library?')%len(id_map)
|
'Proceed with updating the metadata in your library?')%len(id_map)
|
||||||
q = MessageBox(MessageBox.QUESTION, _('Download complete'),
|
q = MessageBox(MessageBox.QUESTION, _('Download complete'),
|
||||||
msg + fmsg, det_msg=det_msg, show_copy_button=bool(failed_ids),
|
msg + fmsg, det_msg='\n'.join(det_msg), show_copy_button=bool(failed_ids),
|
||||||
parent=gui)
|
parent=gui)
|
||||||
q.vlb = q.bb.addButton(_('View log'), q.bb.ActionRole)
|
q.vlb = q.bb.addButton(_('View log'), q.bb.ActionRole)
|
||||||
q.vlb.setIcon(QIcon(I('debug.png')))
|
q.vlb.setIcon(QIcon(I('debug.png')))
|
||||||
q.vlb.clicked.connect(partial(view_log, job, q))
|
q.vlb.clicked.connect(partial(view_log, job, q))
|
||||||
q.det_msg_toggle.setVisible(bool(failed_ids))
|
q.det_msg_toggle.setVisible(bool(failed_ids | failed_covers))
|
||||||
q.setModal(False)
|
q.setModal(False)
|
||||||
q.show()
|
q.show()
|
||||||
q.finished.connect(partial(apply_metadata, job, gui, q))
|
q.finished.connect(partial(apply_metadata, job, gui, q))
|
||||||
@ -242,12 +262,18 @@ def merge_result(oldmi, newmi):
|
|||||||
if (not newmi.is_null(f) and getattr(newmi, f) == getattr(oldmi, f)):
|
if (not newmi.is_null(f) and getattr(newmi, f) == getattr(oldmi, f)):
|
||||||
setattr(newmi, f, getattr(dummy, f))
|
setattr(newmi, f, getattr(dummy, f))
|
||||||
|
|
||||||
|
newmi.last_modified = oldmi.last_modified
|
||||||
|
|
||||||
|
return newmi
|
||||||
|
|
||||||
def download(ids, db, do_identify, covers,
|
def download(ids, db, do_identify, covers,
|
||||||
log=None, abort=None, notifications=None):
|
log=None, abort=None, notifications=None):
|
||||||
ids = list(ids)
|
ids = list(ids)
|
||||||
metadata = [db.get_metadata(i, index_is_id=True, get_user_categories=False)
|
metadata = [db.get_metadata(i, index_is_id=True, get_user_categories=False)
|
||||||
for i in ids]
|
for i in ids]
|
||||||
failed_ids = set()
|
failed_ids = set()
|
||||||
|
failed_covers = set()
|
||||||
|
title_map = {}
|
||||||
ans = {}
|
ans = {}
|
||||||
count = 0
|
count = 0
|
||||||
for i, mi in izip(ids, metadata):
|
for i, mi in izip(ids, metadata):
|
||||||
@ -255,6 +281,7 @@ def download(ids, db, do_identify, covers,
|
|||||||
log.error('Aborting...')
|
log.error('Aborting...')
|
||||||
break
|
break
|
||||||
title, authors, identifiers = mi.title, mi.authors, mi.identifiers
|
title, authors, identifiers = mi.title, mi.authors, mi.identifiers
|
||||||
|
title_map[i] = title
|
||||||
if do_identify:
|
if do_identify:
|
||||||
results = []
|
results = []
|
||||||
try:
|
try:
|
||||||
@ -265,22 +292,29 @@ def download(ids, db, do_identify, covers,
|
|||||||
if results:
|
if results:
|
||||||
mi = merge_result(mi, results[0])
|
mi = merge_result(mi, results[0])
|
||||||
identifiers = mi.identifiers
|
identifiers = mi.identifiers
|
||||||
|
if not mi.is_null('rating'):
|
||||||
|
# set_metadata expects a rating out of 10
|
||||||
|
mi.rating *= 2
|
||||||
else:
|
else:
|
||||||
log.error('Failed to download metadata for', title)
|
log.error('Failed to download metadata for', title)
|
||||||
failed_ids.add(mi)
|
failed_ids.add(i)
|
||||||
|
# We don't want set_metadata operating on anything but covers
|
||||||
|
mi = merge_result(mi, mi)
|
||||||
if covers:
|
if covers:
|
||||||
cdata = download_cover(log, title=title, authors=authors,
|
cdata = download_cover(log, title=title, authors=authors,
|
||||||
identifiers=identifiers)
|
identifiers=identifiers)
|
||||||
if cdata:
|
if cdata is not None:
|
||||||
with PersistentTemporaryFile('.jpg', 'downloaded-cover-') as f:
|
with PersistentTemporaryFile('.jpg', 'downloaded-cover-') as f:
|
||||||
f.write(cdata)
|
f.write(cdata[-1])
|
||||||
mi.cover = f.name
|
mi.cover = f.name
|
||||||
|
else:
|
||||||
|
failed_covers.add(i)
|
||||||
ans[i] = mi
|
ans[i] = mi
|
||||||
count += 1
|
count += 1
|
||||||
notifications.put((count/len(ids),
|
notifications.put((count/len(ids),
|
||||||
_('Downloaded %d of %d')%(count, len(ids))))
|
_('Downloaded %d of %d')%(count, len(ids))))
|
||||||
log('Download complete, with %d failures'%len(failed_ids))
|
log('Download complete, with %d failures'%len(failed_ids))
|
||||||
return (ans, failed_ids)
|
return (ans, failed_ids, failed_covers, title_map)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -56,7 +56,12 @@ class ConfigWidget(QWidget):
|
|||||||
self.setLayout(l)
|
self.setLayout(l)
|
||||||
|
|
||||||
self.gb = QGroupBox(_('Downloaded metadata fields'), self)
|
self.gb = QGroupBox(_('Downloaded metadata fields'), self)
|
||||||
l.addWidget(self.gb, 0, 0, 1, 2)
|
if plugin.config_help_message:
|
||||||
|
self.pchm = QLabel(plugin.config_help_message)
|
||||||
|
self.pchm.setWordWrap(True)
|
||||||
|
self.pchm.setOpenExternalLinks(True)
|
||||||
|
l.addWidget(self.pchm, 0, 0, 1, 2)
|
||||||
|
l.addWidget(self.gb, l.rowCount(), 0, 1, 2)
|
||||||
self.gb.l = QGridLayout()
|
self.gb.l = QGridLayout()
|
||||||
self.gb.setLayout(self.gb.l)
|
self.gb.setLayout(self.gb.l)
|
||||||
self.fields_view = v = QListView(self)
|
self.fields_view = v = QListView(self)
|
||||||
@ -81,7 +86,7 @@ class ConfigWidget(QWidget):
|
|||||||
widget.setValue(val)
|
widget.setValue(val)
|
||||||
elif opt.type == 'string':
|
elif opt.type == 'string':
|
||||||
widget = QLineEdit(self)
|
widget = QLineEdit(self)
|
||||||
widget.setText(val)
|
widget.setText(val if val else '')
|
||||||
elif opt.type == 'bool':
|
elif opt.type == 'bool':
|
||||||
widget = QCheckBox(opt.label, self)
|
widget = QCheckBox(opt.label, self)
|
||||||
widget.setChecked(bool(val))
|
widget.setChecked(bool(val))
|
||||||
|
@ -164,7 +164,8 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
cc = self.custcols[c]
|
cc = self.custcols[c]
|
||||||
db.set_custom_column_metadata(cc['colnum'], name=cc['name'],
|
db.set_custom_column_metadata(cc['colnum'], name=cc['name'],
|
||||||
label=cc['label'],
|
label=cc['label'],
|
||||||
display = self.custcols[c]['display'])
|
display = self.custcols[c]['display'],
|
||||||
|
notify=False)
|
||||||
if '*must_restart' in self.custcols[c]:
|
if '*must_restart' in self.custcols[c]:
|
||||||
must_restart = True
|
must_restart = True
|
||||||
return must_restart
|
return must_restart
|
||||||
|
@ -41,6 +41,8 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
|||||||
'text':_('Yes/No'), 'is_multiple':False},
|
'text':_('Yes/No'), 'is_multiple':False},
|
||||||
10:{'datatype':'composite',
|
10:{'datatype':'composite',
|
||||||
'text':_('Column built from other columns'), 'is_multiple':False},
|
'text':_('Column built from other columns'), 'is_multiple':False},
|
||||||
|
11:{'datatype':'*composite',
|
||||||
|
'text':_('Column built from other columns, behaves like tags'), 'is_multiple':True},
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, parent, editing, standard_colheads, standard_colnames):
|
def __init__(self, parent, editing, standard_colheads, standard_colnames):
|
||||||
@ -99,7 +101,9 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
|||||||
c = parent.custcols[col]
|
c = parent.custcols[col]
|
||||||
self.column_name_box.setText(c['label'])
|
self.column_name_box.setText(c['label'])
|
||||||
self.column_heading_box.setText(c['name'])
|
self.column_heading_box.setText(c['name'])
|
||||||
ct = c['datatype'] if not c['is_multiple'] else '*text'
|
ct = c['datatype']
|
||||||
|
if c['is_multiple']:
|
||||||
|
ct = '*' + ct
|
||||||
self.orig_column_number = c['colnum']
|
self.orig_column_number = c['colnum']
|
||||||
self.orig_column_name = col
|
self.orig_column_name = col
|
||||||
column_numbers = dict(map(lambda x:(self.column_types[x]['datatype'], x),
|
column_numbers = dict(map(lambda x:(self.column_types[x]['datatype'], x),
|
||||||
@ -109,7 +113,7 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
|||||||
if ct == 'datetime':
|
if ct == 'datetime':
|
||||||
if c['display'].get('date_format', None):
|
if c['display'].get('date_format', None):
|
||||||
self.date_format_box.setText(c['display'].get('date_format', ''))
|
self.date_format_box.setText(c['display'].get('date_format', ''))
|
||||||
elif ct == 'composite':
|
elif ct in ['composite', '*composite']:
|
||||||
self.composite_box.setText(c['display'].get('composite_template', ''))
|
self.composite_box.setText(c['display'].get('composite_template', ''))
|
||||||
sb = c['display'].get('composite_sort', 'text')
|
sb = c['display'].get('composite_sort', 'text')
|
||||||
vals = ['text', 'number', 'date', 'bool']
|
vals = ['text', 'number', 'date', 'bool']
|
||||||
@ -167,7 +171,7 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
|||||||
getattr(self, 'date_format_'+x).setVisible(col_type == 'datetime')
|
getattr(self, 'date_format_'+x).setVisible(col_type == 'datetime')
|
||||||
for x in ('box', 'default_label', 'label', 'sort_by', 'sort_by_label',
|
for x in ('box', 'default_label', 'label', 'sort_by', 'sort_by_label',
|
||||||
'make_category'):
|
'make_category'):
|
||||||
getattr(self, 'composite_'+x).setVisible(col_type == 'composite')
|
getattr(self, 'composite_'+x).setVisible(col_type in ['composite', '*composite'])
|
||||||
for x in ('box', 'default_label', 'label'):
|
for x in ('box', 'default_label', 'label'):
|
||||||
getattr(self, 'enum_'+x).setVisible(col_type == 'enumeration')
|
getattr(self, 'enum_'+x).setVisible(col_type == 'enumeration')
|
||||||
self.use_decorations.setVisible(col_type in ['text', 'composite', 'enumeration'])
|
self.use_decorations.setVisible(col_type in ['text', 'composite', 'enumeration'])
|
||||||
@ -187,8 +191,8 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
|||||||
'because these names are reserved for the index of a series column.'))
|
'because these names are reserved for the index of a series column.'))
|
||||||
col_heading = unicode(self.column_heading_box.text()).strip()
|
col_heading = unicode(self.column_heading_box.text()).strip()
|
||||||
col_type = self.column_types[self.column_type_box.currentIndex()]['datatype']
|
col_type = self.column_types[self.column_type_box.currentIndex()]['datatype']
|
||||||
if col_type == '*text':
|
if col_type[0] == '*':
|
||||||
col_type='text'
|
col_type = col_type[1:]
|
||||||
is_multiple = True
|
is_multiple = True
|
||||||
else:
|
else:
|
||||||
is_multiple = False
|
is_multiple = False
|
||||||
@ -249,11 +253,10 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
|||||||
elif col_type == 'text' and is_multiple:
|
elif col_type == 'text' and is_multiple:
|
||||||
display_dict = {'is_names': self.is_names.isChecked()}
|
display_dict = {'is_names': self.is_names.isChecked()}
|
||||||
|
|
||||||
if col_type in ['text', 'composite', 'enumeration']:
|
if col_type in ['text', 'composite', 'enumeration'] and not is_multiple:
|
||||||
display_dict['use_decorations'] = self.use_decorations.checkState()
|
display_dict['use_decorations'] = self.use_decorations.checkState()
|
||||||
|
|
||||||
if not self.editing_col:
|
if not self.editing_col:
|
||||||
db.field_metadata
|
|
||||||
self.parent.custcols[key] = {
|
self.parent.custcols[key] = {
|
||||||
'label':col,
|
'label':col,
|
||||||
'name':col_heading,
|
'name':col_heading,
|
||||||
|
@ -202,7 +202,8 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
self.changed_signal.emit()
|
self.changed_signal.emit()
|
||||||
|
|
||||||
def refresh_gui(self, gui):
|
def refresh_gui(self, gui):
|
||||||
gui.emailer.calculate_rate_limit()
|
from calibre.gui2.email import gui_sendmail
|
||||||
|
gui_sendmail.calculate_rate_limit()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
|
|||||||
from operator import attrgetter
|
from operator import attrgetter
|
||||||
|
|
||||||
from PyQt4.Qt import (QAbstractTableModel, Qt, QAbstractListModel, QWidget,
|
from PyQt4.Qt import (QAbstractTableModel, Qt, QAbstractListModel, QWidget,
|
||||||
pyqtSignal, QVBoxLayout, QDialogButtonBox, QFrame, QLabel)
|
pyqtSignal, QVBoxLayout, QDialogButtonBox, QFrame, QLabel, QIcon)
|
||||||
|
|
||||||
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
|
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
|
||||||
from calibre.gui2.preferences.metadata_sources_ui import Ui_Form
|
from calibre.gui2.preferences.metadata_sources_ui import Ui_Form
|
||||||
@ -67,6 +67,13 @@ class SourcesModel(QAbstractTableModel): # {{{
|
|||||||
return self.enabled_overrides.get(plugin, orig)
|
return self.enabled_overrides.get(plugin, orig)
|
||||||
elif role == Qt.UserRole:
|
elif role == Qt.UserRole:
|
||||||
return plugin
|
return plugin
|
||||||
|
elif (role == Qt.DecorationRole and col == 0 and not
|
||||||
|
plugin.is_configured()):
|
||||||
|
return QIcon(I('list_remove.png'))
|
||||||
|
elif role == Qt.ToolTipRole:
|
||||||
|
if plugin.is_configured():
|
||||||
|
return _('This source is configured and ready to go')
|
||||||
|
return _('This source needs configuration')
|
||||||
return NONE
|
return NONE
|
||||||
|
|
||||||
def setData(self, index, val, role):
|
def setData(self, index, val, role):
|
||||||
@ -251,6 +258,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
r('max_tags', msprefs)
|
r('max_tags', msprefs)
|
||||||
r('wait_after_first_identify_result', msprefs)
|
r('wait_after_first_identify_result', msprefs)
|
||||||
r('wait_after_first_cover_result', msprefs)
|
r('wait_after_first_cover_result', msprefs)
|
||||||
|
r('swap_author_names', msprefs)
|
||||||
|
|
||||||
self.configure_plugin_button.clicked.connect(self.configure_plugin)
|
self.configure_plugin_button.clicked.connect(self.configure_plugin)
|
||||||
self.sources_model = SourcesModel(self)
|
self.sources_model = SourcesModel(self)
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
<widget class="QStackedWidget" name="stack">
|
<widget class="QStackedWidget" name="stack">
|
||||||
<widget class="QWidget" name="page">
|
<widget class="QWidget" name="page">
|
||||||
<layout class="QGridLayout" name="gridLayout">
|
<layout class="QGridLayout" name="gridLayout">
|
||||||
<item row="0" column="0" rowspan="5">
|
<item row="0" column="0" rowspan="6">
|
||||||
<widget class="QGroupBox" name="groupBox">
|
<widget class="QGroupBox" name="groupBox">
|
||||||
<property name="title">
|
<property name="title">
|
||||||
<string>Metadata sources</string>
|
<string>Metadata sources</string>
|
||||||
@ -48,6 +48,16 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QLabel" name="label_5">
|
||||||
|
<property name="text">
|
||||||
|
<string>Sources with a red X next to their names must be configured before they will be used. </string>
|
||||||
|
</property>
|
||||||
|
<property name="wordWrap">
|
||||||
|
<bool>true</bool>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
<item>
|
<item>
|
||||||
<widget class="QPushButton" name="configure_plugin_button">
|
<widget class="QPushButton" name="configure_plugin_button">
|
||||||
<property name="text">
|
<property name="text">
|
||||||
@ -88,7 +98,14 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="2" column="1">
|
<item row="2" column="1" colspan="2">
|
||||||
|
<widget class="QCheckBox" name="opt_swap_author_names">
|
||||||
|
<property name="text">
|
||||||
|
<string>Swap author names from FN LN to LN, FN</string>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item row="3" column="1">
|
||||||
<widget class="QLabel" name="label_2">
|
<widget class="QLabel" name="label_2">
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Max. number of &tags to download:</string>
|
<string>Max. number of &tags to download:</string>
|
||||||
@ -98,10 +115,10 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="2" column="2">
|
<item row="3" column="2">
|
||||||
<widget class="QSpinBox" name="opt_max_tags"/>
|
<widget class="QSpinBox" name="opt_max_tags"/>
|
||||||
</item>
|
</item>
|
||||||
<item row="3" column="1">
|
<item row="4" column="1">
|
||||||
<widget class="QLabel" name="label_3">
|
<widget class="QLabel" name="label_3">
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Max. &time to wait after first match is found:</string>
|
<string>Max. &time to wait after first match is found:</string>
|
||||||
@ -111,14 +128,14 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="3" column="2">
|
<item row="4" column="2">
|
||||||
<widget class="QSpinBox" name="opt_wait_after_first_identify_result">
|
<widget class="QSpinBox" name="opt_wait_after_first_identify_result">
|
||||||
<property name="suffix">
|
<property name="suffix">
|
||||||
<string> secs</string>
|
<string> secs</string>
|
||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="4" column="1">
|
<item row="5" column="1">
|
||||||
<widget class="QLabel" name="label_4">
|
<widget class="QLabel" name="label_4">
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Max. time to wait after first &cover is found:</string>
|
<string>Max. time to wait after first &cover is found:</string>
|
||||||
@ -128,7 +145,7 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="4" column="2">
|
<item row="5" column="2">
|
||||||
<widget class="QSpinBox" name="opt_wait_after_first_cover_result">
|
<widget class="QSpinBox" name="opt_wait_after_first_cover_result">
|
||||||
<property name="suffix">
|
<property name="suffix">
|
||||||
<string> secs</string>
|
<string> secs</string>
|
||||||
|
@ -171,10 +171,10 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
|||||||
return ConfigWidgetBase.commit(self)
|
return ConfigWidgetBase.commit(self)
|
||||||
|
|
||||||
def refresh_gui(self, gui):
|
def refresh_gui(self, gui):
|
||||||
|
gui.set_highlight_only_button_icon()
|
||||||
if self.muc_changed:
|
if self.muc_changed:
|
||||||
gui.tags_view.set_new_model()
|
gui.tags_view.set_new_model()
|
||||||
gui.search.search_as_you_type(config['search_as_you_type'])
|
gui.search.search_as_you_type(config['search_as_you_type'])
|
||||||
gui.library_view.model().set_highlight_only(config['highlight_search_matches'])
|
|
||||||
gui.search.do_search()
|
gui.search.do_search()
|
||||||
|
|
||||||
def clear_histories(self, *args):
|
def clear_histories(self, *args):
|
||||||
|
@ -10,7 +10,7 @@ import re
|
|||||||
|
|
||||||
from PyQt4.Qt import QComboBox, Qt, QLineEdit, QStringList, pyqtSlot, QDialog, \
|
from PyQt4.Qt import QComboBox, Qt, QLineEdit, QStringList, pyqtSlot, QDialog, \
|
||||||
pyqtSignal, QCompleter, QAction, QKeySequence, QTimer, \
|
pyqtSignal, QCompleter, QAction, QKeySequence, QTimer, \
|
||||||
QString
|
QString, QIcon
|
||||||
|
|
||||||
from calibre.gui2 import config
|
from calibre.gui2 import config
|
||||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||||
@ -383,6 +383,22 @@ class SearchBoxMixin(object): # {{{
|
|||||||
self.advanced_search_button.setStatusTip(self.advanced_search_button.toolTip())
|
self.advanced_search_button.setStatusTip(self.advanced_search_button.toolTip())
|
||||||
self.clear_button.setStatusTip(self.clear_button.toolTip())
|
self.clear_button.setStatusTip(self.clear_button.toolTip())
|
||||||
self.search_options_button.clicked.connect(self.search_options_button_clicked)
|
self.search_options_button.clicked.connect(self.search_options_button_clicked)
|
||||||
|
self.set_highlight_only_button_icon()
|
||||||
|
self.highlight_only_button.clicked.connect(self.highlight_only_clicked)
|
||||||
|
tt = _('Enable or disable search highlighting.') + '<br><br>'
|
||||||
|
tt += config.help('highlight_search_matches')
|
||||||
|
self.highlight_only_button.setToolTip(tt)
|
||||||
|
|
||||||
|
def highlight_only_clicked(self, state):
|
||||||
|
config['highlight_search_matches'] = not config['highlight_search_matches']
|
||||||
|
self.set_highlight_only_button_icon()
|
||||||
|
|
||||||
|
def set_highlight_only_button_icon(self):
|
||||||
|
if config['highlight_search_matches']:
|
||||||
|
self.highlight_only_button.setIcon(QIcon(I('highlight_only_on.png')))
|
||||||
|
else:
|
||||||
|
self.highlight_only_button.setIcon(QIcon(I('highlight_only_off.png')))
|
||||||
|
self.library_view.model().set_highlight_only(config['highlight_search_matches'])
|
||||||
|
|
||||||
def focus_search_box(self, *args):
|
def focus_search_box(self, *args):
|
||||||
self.search.setFocus(Qt.OtherFocusReason)
|
self.search.setFocus(Qt.OtherFocusReason)
|
||||||
@ -443,6 +459,7 @@ class SavedSearchBoxMixin(object): # {{{
|
|||||||
# rebuild the restrictions combobox using current saved searches
|
# rebuild the restrictions combobox using current saved searches
|
||||||
self.search_restriction.clear()
|
self.search_restriction.clear()
|
||||||
self.search_restriction.addItem('')
|
self.search_restriction.addItem('')
|
||||||
|
self.search_restriction.addItem(_('*Current search'))
|
||||||
if recount:
|
if recount:
|
||||||
self.tags_view.recount()
|
self.tags_view.recount()
|
||||||
for s in p:
|
for s in p:
|
||||||
|
@ -29,13 +29,32 @@ class SearchRestrictionMixin(object):
|
|||||||
self.search_restriction.setCurrentIndex(r)
|
self.search_restriction.setCurrentIndex(r)
|
||||||
self.apply_search_restriction(r)
|
self.apply_search_restriction(r)
|
||||||
|
|
||||||
|
def apply_text_search_restriction(self, search):
|
||||||
|
if not search:
|
||||||
|
self.search_restriction.setItemText(1, _('*Current search'))
|
||||||
|
self.search_restriction.setCurrentIndex(0)
|
||||||
|
else:
|
||||||
|
self.search_restriction.setCurrentIndex(1)
|
||||||
|
self.search_restriction.setItemText(1, search)
|
||||||
|
self._apply_search_restriction(search)
|
||||||
|
|
||||||
def apply_search_restriction(self, i):
|
def apply_search_restriction(self, i):
|
||||||
|
self.search_restriction.setItemText(1, _('*Current search'))
|
||||||
|
if i == 1:
|
||||||
|
restriction = unicode(self.search.currentText())
|
||||||
|
if not restriction:
|
||||||
|
self.search_restriction.setCurrentIndex(0)
|
||||||
|
else:
|
||||||
|
self.search_restriction.setItemText(1, restriction)
|
||||||
|
else:
|
||||||
r = unicode(self.search_restriction.currentText())
|
r = unicode(self.search_restriction.currentText())
|
||||||
if r is not None and r != '':
|
if r is not None and r != '':
|
||||||
restriction = 'search:"%s"'%(r)
|
restriction = 'search:"%s"'%(r)
|
||||||
else:
|
else:
|
||||||
restriction = ''
|
restriction = ''
|
||||||
|
self._apply_search_restriction(restriction)
|
||||||
|
|
||||||
|
def _apply_search_restriction(self, restriction):
|
||||||
self.saved_search.clear()
|
self.saved_search.clear()
|
||||||
# The order below is important. Set the restriction, force a '' search
|
# The order below is important. Set the restriction, force a '' search
|
||||||
# to apply it, reset the tag browser to take it into account, then set
|
# to apply it, reset the tag browser to take it into account, then set
|
||||||
|
@ -86,6 +86,7 @@ class TagsView(QTreeView): # {{{
|
|||||||
tag_item_renamed = pyqtSignal()
|
tag_item_renamed = pyqtSignal()
|
||||||
search_item_renamed = pyqtSignal()
|
search_item_renamed = pyqtSignal()
|
||||||
drag_drop_finished = pyqtSignal(object)
|
drag_drop_finished = pyqtSignal(object)
|
||||||
|
restriction_error = pyqtSignal()
|
||||||
|
|
||||||
def __init__(self, parent=None):
|
def __init__(self, parent=None):
|
||||||
QTreeView.__init__(self, parent=None)
|
QTreeView.__init__(self, parent=None)
|
||||||
@ -1117,9 +1118,13 @@ class TagsModel(QAbstractItemModel): # {{{
|
|||||||
|
|
||||||
# Get the categories
|
# Get the categories
|
||||||
if self.search_restriction:
|
if self.search_restriction:
|
||||||
|
try:
|
||||||
data = self.db.get_categories(sort=sort,
|
data = self.db.get_categories(sort=sort,
|
||||||
icon_map=self.category_icon_map,
|
icon_map=self.category_icon_map,
|
||||||
ids=self.db.search('', return_matches=True))
|
ids=self.db.search('', return_matches=True))
|
||||||
|
except:
|
||||||
|
data = self.db.get_categories(sort=sort, icon_map=self.category_icon_map)
|
||||||
|
self.tags_view.restriction_error.emit()
|
||||||
else:
|
else:
|
||||||
data = self.db.get_categories(sort=sort, icon_map=self.category_icon_map)
|
data = self.db.get_categories(sort=sort, icon_map=self.category_icon_map)
|
||||||
|
|
||||||
@ -1822,9 +1827,15 @@ class TagBrowserMixin(object): # {{{
|
|||||||
self.tags_view.tag_item_renamed.connect(self.do_tag_item_renamed)
|
self.tags_view.tag_item_renamed.connect(self.do_tag_item_renamed)
|
||||||
self.tags_view.search_item_renamed.connect(self.saved_searches_changed)
|
self.tags_view.search_item_renamed.connect(self.saved_searches_changed)
|
||||||
self.tags_view.drag_drop_finished.connect(self.drag_drop_finished)
|
self.tags_view.drag_drop_finished.connect(self.drag_drop_finished)
|
||||||
|
self.tags_view.restriction_error.connect(self.do_restriction_error,
|
||||||
|
type=Qt.QueuedConnection)
|
||||||
self.edit_categories.clicked.connect(lambda x:
|
self.edit_categories.clicked.connect(lambda x:
|
||||||
self.do_edit_user_categories())
|
self.do_edit_user_categories())
|
||||||
|
|
||||||
|
def do_restriction_error(self):
|
||||||
|
error_dialog(self.tags_view, _('Invalid search restriction'),
|
||||||
|
_('The current search restriction is invalid'), show=True)
|
||||||
|
|
||||||
def do_add_subcategory(self, on_category_key, new_category_name=None):
|
def do_add_subcategory(self, on_category_key, new_category_name=None):
|
||||||
'''
|
'''
|
||||||
Add a subcategory to the category 'on_category'. If new_category_name is
|
Add a subcategory to the category 'on_category'. If new_category_name is
|
||||||
|
@ -189,7 +189,11 @@ class ThreadedJobServer(Thread):
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
while self.keep_going:
|
while self.keep_going:
|
||||||
|
try:
|
||||||
self.run_once()
|
self.run_once()
|
||||||
|
except:
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
def run_once(self):
|
def run_once(self):
|
||||||
|
@ -391,21 +391,38 @@ class ResultCache(SearchQueryParser): # {{{
|
|||||||
def build_numeric_relop_dict(self):
|
def build_numeric_relop_dict(self):
|
||||||
self.numeric_search_relops = {
|
self.numeric_search_relops = {
|
||||||
'=':[1, lambda r, q: r == q],
|
'=':[1, lambda r, q: r == q],
|
||||||
'>':[1, lambda r, q: r > q],
|
'>':[1, lambda r, q: r is not None and r > q],
|
||||||
'<':[1, lambda r, q: r < q],
|
'<':[1, lambda r, q: r is not None and r < q],
|
||||||
'!=':[2, lambda r, q: r != q],
|
'!=':[2, lambda r, q: r != q],
|
||||||
'>=':[2, lambda r, q: r >= q],
|
'>=':[2, lambda r, q: r is not None and r >= q],
|
||||||
'<=':[2, lambda r, q: r <= q]
|
'<=':[2, lambda r, q: r is not None and r <= q]
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_numeric_matches(self, location, query, candidates, val_func = None):
|
def get_numeric_matches(self, location, query, candidates, val_func = None):
|
||||||
matches = set([])
|
matches = set([])
|
||||||
if len(query) == 0:
|
if len(query) == 0:
|
||||||
return matches
|
return matches
|
||||||
|
|
||||||
|
if val_func is None:
|
||||||
|
loc = self.field_metadata[location]['rec_index']
|
||||||
|
val_func = lambda item, loc=loc: item[loc]
|
||||||
|
dt = self.field_metadata[location]['datatype']
|
||||||
|
|
||||||
|
q = ''
|
||||||
|
val_func = lambda item, loc=loc: item[loc]
|
||||||
|
cast = adjust = lambda x: x
|
||||||
|
|
||||||
if query == 'false':
|
if query == 'false':
|
||||||
query = '0'
|
if dt == 'rating' or location == 'cover':
|
||||||
|
relop = lambda x,y: not bool(x)
|
||||||
|
else:
|
||||||
|
relop = lambda x,y: x is None
|
||||||
elif query == 'true':
|
elif query == 'true':
|
||||||
query = '!=0'
|
if dt == 'rating' or location == 'cover':
|
||||||
|
relop = lambda x,y: bool(x)
|
||||||
|
else:
|
||||||
|
relop = lambda x,y: x is not None
|
||||||
|
else:
|
||||||
relop = None
|
relop = None
|
||||||
for k in self.numeric_search_relops.keys():
|
for k in self.numeric_search_relops.keys():
|
||||||
if query.startswith(k):
|
if query.startswith(k):
|
||||||
@ -414,23 +431,15 @@ class ResultCache(SearchQueryParser): # {{{
|
|||||||
if relop is None:
|
if relop is None:
|
||||||
(p, relop) = self.numeric_search_relops['=']
|
(p, relop) = self.numeric_search_relops['=']
|
||||||
|
|
||||||
if val_func is None:
|
|
||||||
loc = self.field_metadata[location]['rec_index']
|
|
||||||
val_func = lambda item, loc=loc: item[loc]
|
|
||||||
|
|
||||||
dt = self.field_metadata[location]['datatype']
|
|
||||||
if dt == 'int':
|
if dt == 'int':
|
||||||
cast = (lambda x: int (x))
|
cast = lambda x: int (x)
|
||||||
adjust = lambda x: x
|
|
||||||
elif dt == 'rating':
|
elif dt == 'rating':
|
||||||
cast = (lambda x: int (x))
|
cast = lambda x: 0 if x is None else int (x)
|
||||||
adjust = lambda x: x/2
|
adjust = lambda x: x/2
|
||||||
elif dt in ('float', 'composite'):
|
elif dt in ('float', 'composite'):
|
||||||
cast = lambda x : float (x)
|
cast = lambda x : float (x)
|
||||||
adjust = lambda x: x
|
|
||||||
else: # count operation
|
else: # count operation
|
||||||
cast = (lambda x: int (x))
|
cast = (lambda x: int (x))
|
||||||
adjust = lambda x: x
|
|
||||||
|
|
||||||
if len(query) > 1:
|
if len(query) > 1:
|
||||||
mult = query[-1:].lower()
|
mult = query[-1:].lower()
|
||||||
@ -442,7 +451,8 @@ class ResultCache(SearchQueryParser): # {{{
|
|||||||
try:
|
try:
|
||||||
q = cast(query) * mult
|
q = cast(query) * mult
|
||||||
except:
|
except:
|
||||||
return matches
|
raise ParseException(query, len(query),
|
||||||
|
'Non-numeric value in query', self)
|
||||||
|
|
||||||
for id_ in candidates:
|
for id_ in candidates:
|
||||||
item = self._data[id_]
|
item = self._data[id_]
|
||||||
@ -451,10 +461,8 @@ class ResultCache(SearchQueryParser): # {{{
|
|||||||
try:
|
try:
|
||||||
v = cast(val_func(item))
|
v = cast(val_func(item))
|
||||||
except:
|
except:
|
||||||
v = 0
|
v = None
|
||||||
if not v:
|
if v:
|
||||||
v = 0
|
|
||||||
else:
|
|
||||||
v = adjust(v)
|
v = adjust(v)
|
||||||
if relop(v, q):
|
if relop(v, q):
|
||||||
matches.add(item[0])
|
matches.add(item[0])
|
||||||
@ -744,7 +752,7 @@ class ResultCache(SearchQueryParser): # {{{
|
|||||||
|
|
||||||
if loc not in exclude_fields: # time for text matching
|
if loc not in exclude_fields: # time for text matching
|
||||||
if is_multiple_cols[loc] is not None:
|
if is_multiple_cols[loc] is not None:
|
||||||
vals = item[loc].split(is_multiple_cols[loc])
|
vals = [v.strip() for v in item[loc].split(is_multiple_cols[loc])]
|
||||||
else:
|
else:
|
||||||
vals = [item[loc]] ### make into list to make _match happy
|
vals = [item[loc]] ### make into list to make _match happy
|
||||||
if _match(q, vals, matchkind):
|
if _match(q, vals, matchkind):
|
||||||
|
@ -182,7 +182,7 @@ class CustomColumns(object):
|
|||||||
else:
|
else:
|
||||||
is_category = False
|
is_category = False
|
||||||
if v['is_multiple']:
|
if v['is_multiple']:
|
||||||
is_m = '|'
|
is_m = ',' if v['datatype'] == 'composite' else '|'
|
||||||
else:
|
else:
|
||||||
is_m = None
|
is_m = None
|
||||||
tn = 'custom_column_{0}'.format(v['num'])
|
tn = 'custom_column_{0}'.format(v['num'])
|
||||||
@ -318,7 +318,7 @@ class CustomColumns(object):
|
|||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
|
|
||||||
def set_custom_column_metadata(self, num, name=None, label=None,
|
def set_custom_column_metadata(self, num, name=None, label=None,
|
||||||
is_editable=None, display=None):
|
is_editable=None, display=None, notify=True):
|
||||||
changed = False
|
changed = False
|
||||||
if name is not None:
|
if name is not None:
|
||||||
self.conn.execute('UPDATE custom_columns SET name=? WHERE id=?',
|
self.conn.execute('UPDATE custom_columns SET name=? WHERE id=?',
|
||||||
@ -340,6 +340,9 @@ class CustomColumns(object):
|
|||||||
|
|
||||||
if changed:
|
if changed:
|
||||||
self.conn.commit()
|
self.conn.commit()
|
||||||
|
if notify:
|
||||||
|
self.notify('metadata', [])
|
||||||
|
|
||||||
return changed
|
return changed
|
||||||
|
|
||||||
def set_custom_bulk_multiple(self, ids, add=[], remove=[],
|
def set_custom_bulk_multiple(self, ids, add=[], remove=[],
|
||||||
@ -595,7 +598,7 @@ class CustomColumns(object):
|
|||||||
raise ValueError('%r is not a supported data type'%datatype)
|
raise ValueError('%r is not a supported data type'%datatype)
|
||||||
normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
|
normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
|
||||||
'float', 'composite')
|
'float', 'composite')
|
||||||
is_multiple = is_multiple and datatype in ('text',)
|
is_multiple = is_multiple and datatype in ('text', 'composite')
|
||||||
num = self.conn.execute(
|
num = self.conn.execute(
|
||||||
('INSERT INTO '
|
('INSERT INTO '
|
||||||
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
|
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
|
||||||
|
@ -854,6 +854,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
mi.uuid = row[fm['uuid']]
|
mi.uuid = row[fm['uuid']]
|
||||||
mi.title_sort = row[fm['sort']]
|
mi.title_sort = row[fm['sort']]
|
||||||
mi.last_modified = row[fm['last_modified']]
|
mi.last_modified = row[fm['last_modified']]
|
||||||
|
mi.size = row[fm['size']]
|
||||||
formats = row[fm['formats']]
|
formats = row[fm['formats']]
|
||||||
if not formats:
|
if not formats:
|
||||||
formats = None
|
formats = None
|
||||||
@ -1223,7 +1224,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
if field['datatype'] == 'composite':
|
if field['datatype'] == 'composite':
|
||||||
dex = field['rec_index']
|
dex = field['rec_index']
|
||||||
for book in self.data.iterall():
|
for book in self.data.iterall():
|
||||||
if book[dex] == id_:
|
if field['is_multiple']:
|
||||||
|
vals = [v.strip() for v in book[dex].split(field['is_multiple'])
|
||||||
|
if v.strip()]
|
||||||
|
if id_ in vals:
|
||||||
|
ans.add(book[0])
|
||||||
|
elif book[dex] == id_:
|
||||||
ans.add(book[0])
|
ans.add(book[0])
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
@ -1353,6 +1359,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
cat = tb_cats[category]
|
cat = tb_cats[category]
|
||||||
if cat['datatype'] == 'composite' and \
|
if cat['datatype'] == 'composite' and \
|
||||||
cat['display'].get('make_category', False):
|
cat['display'].get('make_category', False):
|
||||||
|
tids[category] = {}
|
||||||
tcategories[category] = {}
|
tcategories[category] = {}
|
||||||
md.append((category, cat['rec_index'], cat['is_multiple'],
|
md.append((category, cat['rec_index'], cat['is_multiple'],
|
||||||
cat['datatype'] == 'composite'))
|
cat['datatype'] == 'composite'))
|
||||||
@ -1401,8 +1408,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
prints('get_categories: item', val, 'is not in', cat, 'list!')
|
prints('get_categories: item', val, 'is not in', cat, 'list!')
|
||||||
else:
|
else:
|
||||||
vals = book[dex].split(mult)
|
vals = book[dex].split(mult)
|
||||||
|
if is_comp:
|
||||||
|
vals = [v.strip() for v in vals if v.strip()]
|
||||||
|
for val in vals:
|
||||||
|
if val not in tids:
|
||||||
|
tids[cat][val] = (val, val)
|
||||||
|
item = tcategories[cat].get(val, None)
|
||||||
|
if not item:
|
||||||
|
item = tag_class(val, val)
|
||||||
|
tcategories[cat][val] = item
|
||||||
|
item.c += 1
|
||||||
|
item.id = val
|
||||||
for val in vals:
|
for val in vals:
|
||||||
if not val: continue
|
|
||||||
try:
|
try:
|
||||||
(item_id, sort_val) = tids[cat][val] # let exceptions fly
|
(item_id, sort_val) = tids[cat][val] # let exceptions fly
|
||||||
item = tcategories[cat].get(val, None)
|
item = tcategories[cat].get(val, None)
|
||||||
|
@ -364,11 +364,11 @@ class FieldMetadata(dict):
|
|||||||
self._tb_cats[k]['display'] = {}
|
self._tb_cats[k]['display'] = {}
|
||||||
self._tb_cats[k]['is_editable'] = True
|
self._tb_cats[k]['is_editable'] = True
|
||||||
self._add_search_terms_to_map(k, v['search_terms'])
|
self._add_search_terms_to_map(k, v['search_terms'])
|
||||||
for x in ('timestamp', 'last_modified'):
|
self._tb_cats['timestamp']['display'] = {
|
||||||
self._tb_cats[x]['display'] = {
|
|
||||||
'date_format': tweaks['gui_timestamp_display_format']}
|
'date_format': tweaks['gui_timestamp_display_format']}
|
||||||
self._tb_cats['pubdate']['display'] = {
|
self._tb_cats['pubdate']['display'] = {
|
||||||
'date_format': tweaks['gui_pubdate_display_format']}
|
'date_format': tweaks['gui_pubdate_display_format']}
|
||||||
|
self._tb_cats['last_modified']['display'] = {'date_format': 'iso'}
|
||||||
self.custom_field_prefix = '#'
|
self.custom_field_prefix = '#'
|
||||||
self.get = self._tb_cats.get
|
self.get = self._tb_cats.get
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ What formats does |app| support conversion to/from?
|
|||||||
|app| supports the conversion of many input formats to many output formats.
|
|app| supports the conversion of many input formats to many output formats.
|
||||||
It can convert every input format in the following list, to every output format.
|
It can convert every input format in the following list, to every output format.
|
||||||
|
|
||||||
*Input Formats:* CBZ, CBR, CBC, CHM, EPUB, FB2, HTML, LIT, LRF, MOBI, ODT, PDF, PRC**, PDB, PML, RB, RTF, SNB, TCR, TXT
|
*Input Formats:* CBZ, CBR, CBC, CHM, EPUB, FB2, HTML, HTMLZ, LIT, LRF, MOBI, ODT, PDF, PRC**, PDB, PML, RB, RTF, SNB, TCR, TXT, TXTZ
|
||||||
|
|
||||||
*Output Formats:* EPUB, FB2, OEB, LIT, LRF, MOBI, PDB, PML, RB, PDF, SNB, TCR, TXT
|
*Output Formats:* EPUB, FB2, OEB, LIT, LRF, MOBI, HTMLZ, PDB, PML, RB, PDF, SNB, TCR, TXT, TXTZ
|
||||||
|
|
||||||
** PRC is a generic format, |app| supports PRC files with TextRead and MOBIBook headers
|
** PRC is a generic format, |app| supports PRC files with TextRead and MOBIBook headers
|
||||||
|
|
||||||
@ -30,7 +30,7 @@ It can convert every input format in the following list, to every output format.
|
|||||||
|
|
||||||
What are the best source formats to convert?
|
What are the best source formats to convert?
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
In order of decreasing preference: LIT, MOBI, EPUB, HTML, PRC, RTF, PDB, TXT, PDF
|
In order of decreasing preference: LIT, MOBI, EPUB, FB2, HTML, PRC, RTF, PDB, TXT, PDF
|
||||||
|
|
||||||
Why does the PDF conversion lose some images/tables?
|
Why does the PDF conversion lose some images/tables?
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -22,6 +22,8 @@ First start the |app| content server as shown below::
|
|||||||
|
|
||||||
calibre-server --url-prefix /calibre --port 8080
|
calibre-server --url-prefix /calibre --port 8080
|
||||||
|
|
||||||
|
The key parameter here is ``--url-prefix /calibre``. This causes the content server to serve all URLs prefixed by calibre. To see this in action, visit ``http://localhost:8080/calibre`` in your browser. You should see the normal content server website, but now it will run under /calibre.
|
||||||
|
|
||||||
Now suppose you are using Apache as your main server. First enable the proxy modules in apache, by adding the following to :file:`httpd.conf`::
|
Now suppose you are using Apache as your main server. First enable the proxy modules in apache, by adding the following to :file:`httpd.conf`::
|
||||||
|
|
||||||
LoadModule proxy_module modules/mod_proxy.so
|
LoadModule proxy_module modules/mod_proxy.so
|
||||||
|
@ -236,15 +236,16 @@ The following functions are available in addition to those described in single-f
|
|||||||
* ``format_date(x, date_format)`` -- format_date(val, format_string) -- format the value, which must be a date field, using the format_string, returning a string. The formatting codes are::
|
* ``format_date(x, date_format)`` -- format_date(val, format_string) -- format the value, which must be a date field, using the format_string, returning a string. The formatting codes are::
|
||||||
|
|
||||||
d : the day as number without a leading zero (1 to 31)
|
d : the day as number without a leading zero (1 to 31)
|
||||||
dd : the day as number with a leading zero (01 to 31) '
|
dd : the day as number with a leading zero (01 to 31)
|
||||||
ddd : the abbreviated localized day name (e.g. "Mon" to "Sun"). '
|
ddd : the abbreviated localized day name (e.g. "Mon" to "Sun").
|
||||||
dddd : the long localized day name (e.g. "Monday" to "Sunday"). '
|
dddd : the long localized day name (e.g. "Monday" to "Sunday").
|
||||||
M : the month as number without a leading zero (1 to 12). '
|
M : the month as number without a leading zero (1 to 12).
|
||||||
MM : the month as number with a leading zero (01 to 12) '
|
MM : the month as number with a leading zero (01 to 12)
|
||||||
MMM : the abbreviated localized month name (e.g. "Jan" to "Dec"). '
|
MMM : the abbreviated localized month name (e.g. "Jan" to "Dec").
|
||||||
MMMM : the long localized month name (e.g. "January" to "December"). '
|
MMMM : the long localized month name (e.g. "January" to "December").
|
||||||
yy : the year as two digit number (00 to 99). '
|
yy : the year as two digit number (00 to 99).
|
||||||
yyyy : the year as four digit number.'
|
yyyy : the year as four digit number.
|
||||||
|
iso : the date with time and timezone. Must be the only format present.
|
||||||
|
|
||||||
* ``eval(string)`` -- evaluates the string as a program, passing the local variables (those ``assign`` ed to). This permits using the template processor to construct complex results from local variables.
|
* ``eval(string)`` -- evaluates the string as a program, passing the local variables (those ``assign`` ed to). This permits using the template processor to construct complex results from local variables.
|
||||||
* ``multiply(x, y)`` -- returns x * y. Throws an exception if either x or y are not numbers.
|
* ``multiply(x, y)`` -- returns x * y. Throws an exception if either x or y are not numbers.
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user