mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
merge from trunk
This commit is contained in:
commit
183b5a75fa
@ -19,6 +19,83 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.7.55
|
||||
date: 2011-04-15
|
||||
|
||||
new features:
|
||||
- title: "Add a menu bar. Useful if you use a lot of plugins and are running out of space in your toolbars. By default the menu bar is hidden (except on OS X). You can add actions to it via Preferences->Toolbars. As soon as you add actions, it will become visible."
|
||||
|
||||
- title: "OS X: Make the main calibre window look a little more 'native' on OS X"
|
||||
|
||||
- title: "Show recently viewed books in the View button's drop down menu"
|
||||
|
||||
- title: "Add a button next to the search bar to toggle easily between highlight and restrict search modes"
|
||||
|
||||
- title: "Allow the use of arbitrary searches as search restrictions, rather than just saved searches. Do this by using the special entry '*Current Search' in the Search Restriction dropdown."
|
||||
|
||||
- title: "The Connect/share icon now changes color to indicate that the content server is running"
|
||||
tickets: [755444]
|
||||
|
||||
- title: "Device drivers for Viewpad 7, Motorola Xoom and Asus Eee Note"
|
||||
|
||||
- title: "Add tags like composite custom column."
|
||||
tickets: [759663]
|
||||
|
||||
- title: "Add a new date format code 'iso'. Permits formatting dates to see the complete time (via Preferences->Tweaks)"
|
||||
|
||||
- title: "Allow the use of data from the size column in the template language"
|
||||
tickets: [759645]
|
||||
|
||||
- title: "Support reading/writing covers to txtz/htmlz files"
|
||||
|
||||
- title: "Speedup for large library sorting when using composite custom columns"
|
||||
|
||||
- title: "Move the boolean columns are tristate tweak to Preferences->Behavior"
|
||||
|
||||
bug fixes:
|
||||
- title: "Fix a regression in 0.7.54 that broke reading covers/metadata from cbz files."
|
||||
tickets: [756892]
|
||||
|
||||
- title: "Fix tweak names and help not translatable"
|
||||
tickets: [756736]
|
||||
|
||||
- title: "When the size of a book is less that 0.1MB but not zero, display the size as <0.1 instead of 0.0."
|
||||
tickets: [755768]
|
||||
|
||||
- title: "HTMLZ input: Fix handling of HTML files encoded in an encoding other than UTF-8"
|
||||
|
||||
- title: "EPUB Input: Fix EPUB files with empty Adobe PAGE templates causing conversion to abort."
|
||||
tickets: [760390]
|
||||
|
||||
- title: "Fix CHM input plugin not closing opened input file"
|
||||
tickets: [760589]
|
||||
|
||||
- title: "MOBI Output: Make super/subscripts use a slightly smaller font when rendered on a Kindle. Also allow the use of vertical-align:top/bottom in the CSS to specify a super/subscript."
|
||||
tickets: [758667]
|
||||
|
||||
- title: "LRF Input: Detect and workaround LRF files that have deeply nested spans, instead of crashing."
|
||||
tickets: [759680]
|
||||
|
||||
- title: "MOBI Output: Fix bug that would cause conversion to unneccessarily abort when malformed hyperlinks are present in the input document."
|
||||
tickets: [759313]
|
||||
|
||||
- title: "Make true and false searches work correctly for numeric fields."
|
||||
|
||||
- title: "MOBI Output: The Ignore margins setting no longer ignores blockquotes, only margins set via CSS on other elements."
|
||||
tickets: [758675]
|
||||
|
||||
- title: "Fix regression that caused clicking auto send to also change the email address in Preferences->Email"
|
||||
|
||||
improved recipes:
|
||||
- Wall Street Journal
|
||||
- Weblogs SL
|
||||
- Tabu.ro
|
||||
- Vecernje Novosti
|
||||
|
||||
new recipes:
|
||||
- title: Hallo Assen and Dvhn
|
||||
author: Reijendert
|
||||
|
||||
|
||||
- version: 0.7.54
|
||||
date: 2011-04-08
|
||||
|
@ -23,7 +23,7 @@ class BigOven(BasicNewsRecipe):
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
if self.username is not None and self.password is not None:
|
||||
@ -36,29 +36,38 @@ class BigOven(BasicNewsRecipe):
|
||||
|
||||
remove_attributes = ['style', 'font']
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'class':['ppy-caption']})
|
||||
,dict(name='div', attrs={'id':['float_corner']})
|
||||
]
|
||||
def get_article_url(self, article):
|
||||
url = article.get('feedburner_origlink',article.get('link', None))
|
||||
front, middle, end = url.partition('comhttp//www.bigoven.com')
|
||||
url = front + 'com' + end
|
||||
return url
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'id':['nosidebar_main']})]
|
||||
|
||||
remove_tags_after = [dict(name='div', attrs={'class':['display-field']})]
|
||||
|
||||
remove_tags = [dict(name='ul', attrs={'class':['tabs']})]
|
||||
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'Want detailed nutrition information?', re.DOTALL), lambda match: ''),
|
||||
(re.compile('\(You could win \$100 in our ', re.DOTALL), lambda match: ''),
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for tag in soup.findAll(name='a', attrs={'class':['deflink']}):
|
||||
tag.replaceWith(tag.string)
|
||||
for tag in soup.findAll(name='a', text=re.compile(r'.*View Metric.*', re.DOTALL)):
|
||||
tag.parent.parent.extract()
|
||||
for tag in soup.findAll(name='a', text=re.compile(r'.*Add my own photo.*', re.DOTALL)):
|
||||
tag.parent.parent.extract()
|
||||
for tag in soup.findAll(name='div', attrs={'class':['container']}):
|
||||
if tag.find(name='h1'):
|
||||
continue
|
||||
if tag.find(name='h2', text=re.compile(r'.*Ingredients.*', re.DOTALL)):
|
||||
print 'tag found Ingred h2'
|
||||
continue
|
||||
if tag.find(name='h2', text=re.compile(r'Preparation.*', re.DOTALL)):
|
||||
print 'tag found Prep h2'
|
||||
continue
|
||||
tag.extract()
|
||||
tag.parent.parent.extract()
|
||||
for tag in soup.findAll(text=re.compile(r'.*Try BigOven Pro for Free.*', re.DOTALL)):
|
||||
tag.extract()
|
||||
for tag in soup.findAll(text=re.compile(r'.*Add my photo of this recipe.*', re.DOTALL)):
|
||||
tag.parent.extract()
|
||||
for tag in soup.findAll(name='a', text=re.compile(r'.*photo contest.*', re.DOTALL)):
|
||||
tag.parent.extract()
|
||||
for tag in soup.findAll(name='a', text='Remove ads'):
|
||||
tag.parent.parent.extract()
|
||||
for tag in soup.findAll(name='ol', attrs={'class':['recipe-tags']}):
|
||||
tag.parent.extract()
|
||||
return soup
|
||||
|
||||
feeds = [(u'4 & 5 Star Rated Recipes', u'http://feeds.feedburner.com/Bigovencom-RecipeRaves?format=xml')]
|
||||
|
||||
feeds = [(u'Recent Raves', u'http://www.bigoven.com/rss/recentraves'),
|
||||
(u'Recipe Of The Day', u'http://feeds.feedburner.com/bigovencom-RecipeOfTheDay')]
|
||||
|
||||
|
@ -18,7 +18,6 @@ class IrishTimes(BasicNewsRecipe):
|
||||
oldest_article = 1.0
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
simultaneous_downloads= 5
|
||||
|
||||
r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*')
|
||||
remove_tags = [dict(name='div', attrs={'class':'footer'})]
|
||||
@ -26,17 +25,17 @@ class IrishTimes(BasicNewsRecipe):
|
||||
|
||||
feeds = [
|
||||
('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'),
|
||||
('Ireland', 'http://www.irishtimes.com/feeds/rss/newspaper/ireland.rss'),
|
||||
('World', 'http://www.irishtimes.com/feeds/rss/newspaper/world.rss'),
|
||||
('Finance', 'http://www.irishtimes.com/feeds/rss/newspaper/finance.rss'),
|
||||
('Features', 'http://www.irishtimes.com/feeds/rss/newspaper/features.rss'),
|
||||
('Sport', 'http://www.irishtimes.com/feeds/rss/newspaper/sport.rss'),
|
||||
('Opinion', 'http://www.irishtimes.com/feeds/rss/newspaper/opinion.rss'),
|
||||
('Letters', 'http://www.irishtimes.com/feeds/rss/newspaper/letters.rss'),
|
||||
('Ireland', 'http://rss.feedsportal.com/c/851/f/10845/index.rss'),
|
||||
('World', 'http://rss.feedsportal.com/c/851/f/10846/index.rss'),
|
||||
('Finance', 'http://rss.feedsportal.com/c/851/f/10847/index.rss'),
|
||||
('Features', 'http://rss.feedsportal.com/c/851/f/10848/index.rss'),
|
||||
('Sport', 'http://rss.feedsportal.com/c/851/f/10849/index.rss'),
|
||||
('Opinion', 'http://rss.feedsportal.com/c/851/f/10850/index.rss'),
|
||||
('Letters', 'http://rss.feedsportal.com/c/851/f/10851/index.rss'),
|
||||
('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'),
|
||||
('Health', 'http://www.irishtimes.com/feeds/rss/newspaper/health.rss'),
|
||||
('Education & Parenting', 'http://www.irishtimes.com/feeds/rss/newspaper/education.rss'),
|
||||
('Motors', 'http://www.irishtimes.com/feeds/rss/newspaper/motors.rss'),
|
||||
('Health', 'http://rss.feedsportal.com/c/851/f/10852/index.rss'),
|
||||
('Education & Parenting', 'http://rss.feedsportal.com/c/851/f/10853/index.rss'),
|
||||
('Motors', 'http://rss.feedsportal.com/c/851/f/10854/index.rss'),
|
||||
('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'),
|
||||
('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'),
|
||||
('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'),
|
||||
@ -57,5 +56,3 @@ class IrishTimes(BasicNewsRecipe):
|
||||
|
||||
def get_article_url(self, article):
|
||||
return article.link
|
||||
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008-2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
__copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
nspm.rs
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.ebooks.BeautifulSoup import NavigableString
|
||||
from calibre.ebooks.BeautifulSoup import NavigableString, Tag
|
||||
|
||||
class Nspm(BasicNewsRecipe):
|
||||
title = 'Nova srpska politicka misao'
|
||||
@ -21,7 +21,6 @@ class Nspm(BasicNewsRecipe):
|
||||
INDEX = 'http://www.nspm.rs/?alphabet=l'
|
||||
encoding = 'utf-8'
|
||||
language = 'sr'
|
||||
delay = 2
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'magazine'
|
||||
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
|
||||
@ -29,27 +28,21 @@ class Nspm(BasicNewsRecipe):
|
||||
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
|
||||
body{font-family: "Times New Roman", serif1, serif}
|
||||
.article_description{font-family: Arial, sans1, sans-serif}
|
||||
img{margin-top:0.5em; margin-bottom: 0.7em}
|
||||
img{margin-top:0.5em; margin-bottom: 0.7em; display: block}
|
||||
.author{color: #990000; font-weight: bold}
|
||||
.author,.createdate{font-size: 0.9em} """
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
, 'linearize_tables' : True
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
, 'pretty_print' : True
|
||||
}
|
||||
|
||||
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
|
||||
keep_only_tags = [dict(attrs={'id':'jsn-mainbody'})]
|
||||
remove_tags = [
|
||||
dict(name=['link','object','embed','script','meta','base','iframe'])
|
||||
,dict(attrs={'class':'buttonheading'})
|
||||
]
|
||||
remove_tags_before = dict(attrs={'class':'contentheading'})
|
||||
remove_tags_after = dict(attrs={'class':'article_separator'})
|
||||
remove_attributes = ['width','height']
|
||||
remove_tags = [dict(name=['link','script','meta','base','img'])]
|
||||
remove_attributes = ['width','height','lang','xmlns:fb','xmlns:og','vspace','hspace','type','start','size']
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
@ -57,21 +50,67 @@ class Nspm(BasicNewsRecipe):
|
||||
return br
|
||||
|
||||
feeds = [
|
||||
(u'Rubrike' , u'http://www.nspm.rs/rubrike/feed/rss.html')
|
||||
,(u'Debate' , u'http://www.nspm.rs/debate/feed/rss.html')
|
||||
,(u'Reci i misli' , u'http://www.nspm.rs/reci-i-misli/feed/rss.html')
|
||||
(u'Rubrike' , u'http://www.nspm.rs/rubrike/feed/rss.html' )
|
||||
,(u'Debate' , u'http://www.nspm.rs/debate/feed/rss.html' )
|
||||
,(u'Reci i misli' , u'http://www.nspm.rs/reci-i-misli/feed/rss.html' )
|
||||
,(u'Samo smeh srbina spasava', u'http://www.nspm.rs/samo-smeh-srbina-spasava/feed/rss.html')
|
||||
,(u'Polemike' , u'http://www.nspm.rs/polemike/feed/rss.html')
|
||||
,(u'Prikazi' , u'http://www.nspm.rs/prikazi/feed/rss.html')
|
||||
,(u'Prenosimo' , u'http://www.nspm.rs/prenosimo/feed/rss.html')
|
||||
,(u'Hronika' , u'http://www.nspm.rs/tabela/hronika/feed/rss.html')
|
||||
,(u'Polemike' , u'http://www.nspm.rs/polemike/feed/rss.html' )
|
||||
,(u'Prikazi' , u'http://www.nspm.rs/prikazi/feed/rss.html' )
|
||||
,(u'Prenosimo' , u'http://www.nspm.rs/prenosimo/feed/rss.html' )
|
||||
,(u'Hronika' , u'http://www.nspm.rs/tabela/hronika/feed/rss.html' )
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.body.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.body.findAll('h1'):
|
||||
nh = NavigableString(item.a.string)
|
||||
item.a.extract()
|
||||
item.insert(0,nh)
|
||||
return self.adeify_images(soup)
|
||||
atitle = soup.body.find('a',attrs={'class':'contentpagetitle'})
|
||||
if atitle:
|
||||
cleanTitle = Tag(soup,'h1',[('class','contentpagetitle')])
|
||||
cnt = NavigableString(self.tag_to_string(atitle))
|
||||
cleanTitle.append(cnt)
|
||||
|
||||
author = soup.body.find('span',attrs={'class':'author'})
|
||||
if author:
|
||||
author.extract()
|
||||
author.name = 'div'
|
||||
|
||||
crdate = soup.body.find('td',attrs={'class':'createdate'})
|
||||
if crdate:
|
||||
cleanCrdate = Tag(soup,'div',[('class','createdate')])
|
||||
cnt = NavigableString(self.tag_to_string(crdate))
|
||||
cleanCrdate.append(cnt)
|
||||
|
||||
#get the dependant element
|
||||
artText = Tag(soup,'div',[('class','text')])
|
||||
textHolderp = crdate.parent
|
||||
textHolder = textHolderp.nextSibling
|
||||
while textHolder and (not isinstance(textHolder,Tag) or (textHolder.name <> textHolderp.name)):
|
||||
textHolder = textHolder.nextSibling
|
||||
if textHolder.td:
|
||||
artText = textHolder.td
|
||||
artText.name = 'div'
|
||||
artText.attrs = []
|
||||
artText['class'] = 'text'
|
||||
artText.extract()
|
||||
|
||||
soup.body.contents=[]
|
||||
|
||||
soup.body.append(cleanTitle)
|
||||
soup.body.append(author)
|
||||
soup.body.append(cleanCrdate)
|
||||
soup.body.append(artText)
|
||||
|
||||
for item in soup.findAll('a'):
|
||||
limg = item.find('img')
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
if limg:
|
||||
item.name = 'div'
|
||||
item.attrs = []
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
for item in soup.findAll('img'):
|
||||
if not item.has_key('alt'):
|
||||
item['alt'] = 'image'
|
||||
return soup
|
||||
|
BIN
resources/images/highlight_only_off.png
Normal file
BIN
resources/images/highlight_only_off.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 810 B |
BIN
resources/images/highlight_only_on.png
Normal file
BIN
resources/images/highlight_only_on.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 396 B |
@ -2,7 +2,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = 'calibre'
|
||||
__version__ = '0.7.54'
|
||||
__version__ = '0.7.55'
|
||||
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
import re, importlib
|
||||
|
@ -344,6 +344,7 @@ class iPadOutput(OutputProfile):
|
||||
border-spacing:1px;
|
||||
margin-left: 5%;
|
||||
margin-right: 5%;
|
||||
page-break-inside:avoid;
|
||||
width: 90%;
|
||||
-webkit-border-radius:4px;
|
||||
}
|
||||
|
@ -51,6 +51,8 @@ Run an embedded python interpreter.
|
||||
'with sqlite3 works.')
|
||||
parser.add_option('-p', '--py-console', help='Run python console',
|
||||
default=False, action='store_true')
|
||||
parser.add_option('-m', '--inspect-mobi',
|
||||
help='Inspect the MOBI file at the specified path', default=None)
|
||||
|
||||
return parser
|
||||
|
||||
@ -227,6 +229,9 @@ def main(args=sys.argv):
|
||||
if len(args) > 1 and os.access(args[-1], os.R_OK):
|
||||
sql_dump = args[-1]
|
||||
reinit_db(opts.reinitialize_db, sql_dump=sql_dump)
|
||||
elif opts.inspect_mobi is not None:
|
||||
from calibre.ebooks.mobi.debug import inspect_mobi
|
||||
inspect_mobi(opts.inspect_mobi)
|
||||
else:
|
||||
from calibre import ipython
|
||||
ipython()
|
||||
|
@ -349,7 +349,7 @@ class ITUNES(DriverBase):
|
||||
break
|
||||
break
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(j+1/task_count, _('Updating device metadata listing...'))
|
||||
self.report_progress((j+1)/task_count, _('Updating device metadata listing...'))
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(1.0, _('Updating device metadata listing...'))
|
||||
@ -428,7 +428,7 @@ class ITUNES(DriverBase):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(i+1/book_count, _('%d of %d') % (i+1, book_count))
|
||||
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||
self._purge_orphans(library_books, cached_books)
|
||||
|
||||
elif iswindows:
|
||||
@ -466,7 +466,7 @@ class ITUNES(DriverBase):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(i+1/book_count,
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%d of %d') % (i+1, book_count))
|
||||
self._purge_orphans(library_books, cached_books)
|
||||
|
||||
@ -916,6 +916,8 @@ class ITUNES(DriverBase):
|
||||
"""
|
||||
if DEBUG:
|
||||
self.log.info("ITUNES.reset()")
|
||||
if report_progress:
|
||||
self.set_progress_reporter(report_progress)
|
||||
|
||||
def set_progress_reporter(self, report_progress):
|
||||
'''
|
||||
@ -924,6 +926,9 @@ class ITUNES(DriverBase):
|
||||
If it is called with -1 that means that the
|
||||
task does not have any progress information
|
||||
'''
|
||||
if DEBUG:
|
||||
self.log.info("ITUNES.set_progress_reporter()")
|
||||
|
||||
self.report_progress = report_progress
|
||||
|
||||
def set_plugboards(self, plugboards, pb_func):
|
||||
@ -1041,7 +1046,7 @@ class ITUNES(DriverBase):
|
||||
|
||||
# Report progress
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(i+1/file_count, _('%d of %d') % (i+1, file_count))
|
||||
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||
|
||||
elif iswindows:
|
||||
try:
|
||||
@ -1081,7 +1086,7 @@ class ITUNES(DriverBase):
|
||||
|
||||
# Report progress
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(i+1/file_count, _('%d of %d') % (i+1, file_count))
|
||||
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||
finally:
|
||||
pythoncom.CoUninitialize()
|
||||
|
||||
@ -3065,7 +3070,7 @@ class ITUNES_ASYNC(ITUNES):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(i+1/book_count, _('%d of %d') % (i+1, book_count))
|
||||
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||
|
||||
elif iswindows:
|
||||
try:
|
||||
@ -3104,7 +3109,7 @@ class ITUNES_ASYNC(ITUNES):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress(i+1/book_count,
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%d of %d') % (i+1, book_count))
|
||||
|
||||
finally:
|
||||
|
@ -203,6 +203,8 @@ class CollectionsBookList(BookList):
|
||||
val = [orig_val]
|
||||
elif fm['datatype'] == 'text' and fm['is_multiple']:
|
||||
val = orig_val
|
||||
elif fm['datatype'] == 'composite' and fm['is_multiple']:
|
||||
val = [v.strip() for v in val.split(fm['is_multiple'])]
|
||||
else:
|
||||
val = [val]
|
||||
|
||||
|
@ -51,6 +51,7 @@ class CHMInput(InputFormatPlugin):
|
||||
mainpath = os.path.join(tdir, mainname)
|
||||
|
||||
metadata = get_metadata_from_reader(self._chm_reader)
|
||||
self._chm_reader.CloseCHM()
|
||||
|
||||
odi = options.debug_pipeline
|
||||
options.debug_pipeline = None
|
||||
|
@ -175,18 +175,18 @@ class EPUBInput(InputFormatPlugin):
|
||||
raise ValueError(
|
||||
'EPUB files with DTBook markup are not supported')
|
||||
|
||||
not_for_spine = set()
|
||||
for y in opf.itermanifest():
|
||||
id_ = y.get('id', None)
|
||||
if id_ and y.get('media-type', None) in \
|
||||
('application/vnd.adobe-page-template+xml',):
|
||||
not_for_spine.add(id_)
|
||||
|
||||
for x in list(opf.iterspine()):
|
||||
ref = x.get('idref', None)
|
||||
if ref is None:
|
||||
if ref is None or ref in not_for_spine:
|
||||
x.getparent().remove(x)
|
||||
continue
|
||||
for y in opf.itermanifest():
|
||||
if y.get('id', None) == ref and y.get('media-type', None) in \
|
||||
('application/vnd.adobe-page-template+xml',):
|
||||
p = x.getparent()
|
||||
if p is not None:
|
||||
p.remove(x)
|
||||
break
|
||||
|
||||
with open('content.opf', 'wb') as nopf:
|
||||
nopf.write(opf.render())
|
||||
|
@ -10,6 +10,7 @@ import os
|
||||
|
||||
from calibre import walk
|
||||
from calibre.customize.conversion import InputFormatPlugin
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
|
||||
class HTMLZInput(InputFormatPlugin):
|
||||
@ -34,6 +35,13 @@ class HTMLZInput(InputFormatPlugin):
|
||||
html = tf.read()
|
||||
break
|
||||
|
||||
# Encoding
|
||||
if options.input_encoding:
|
||||
ienc = options.input_encoding
|
||||
else:
|
||||
ienc = xml_to_unicode(html[:4096])[-1]
|
||||
html = html.decode(ienc, 'replace')
|
||||
|
||||
# Run the HTML through the html processing plugin.
|
||||
from calibre.customize.ui import plugin_for_input_format
|
||||
html_input = plugin_for_input_format('html')
|
||||
|
@ -483,7 +483,7 @@ class Metadata(object):
|
||||
self_tags = self.get(x, [])
|
||||
self.set_user_metadata(x, meta) # get... did the deepcopy
|
||||
other_tags = other.get(x, [])
|
||||
if meta['is_multiple']:
|
||||
if meta['datatype'] == 'text' and meta['is_multiple']:
|
||||
# Case-insensitive but case preserving merging
|
||||
lotags = [t.lower() for t in other_tags]
|
||||
lstags = [t.lower() for t in self_tags]
|
||||
|
@ -259,6 +259,7 @@ class MetadataUpdater(object):
|
||||
trail = len(new_record0.getvalue()) % 4
|
||||
pad = '\0' * (4 - trail) # Always pad w/ at least 1 byte
|
||||
new_record0.write(pad)
|
||||
new_record0.write('\0'*(1024*8))
|
||||
|
||||
# Rebuild the stream, update the pdbrecords pointers
|
||||
self.patchSection(0,new_record0.getvalue())
|
||||
|
@ -24,6 +24,7 @@ msprefs.defaults['ignore_fields'] = []
|
||||
msprefs.defaults['max_tags'] = 20
|
||||
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
|
||||
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
|
||||
msprefs.defaults['swap_author_names'] = False
|
||||
|
||||
# Google covers are often poor quality (scans/errors) but they have high
|
||||
# resolution, so they trump covers from better sources. So make sure they
|
||||
|
@ -365,6 +365,18 @@ def identify(log, abort, # {{{
|
||||
for r in results:
|
||||
r.tags = r.tags[:max_tags]
|
||||
|
||||
if msprefs['swap_author_names']:
|
||||
for r in results:
|
||||
def swap_to_ln_fn(a):
|
||||
if ',' in a:
|
||||
return a
|
||||
parts = a.split(None)
|
||||
if len(parts) <= 1:
|
||||
return a
|
||||
surname = parts[-1]
|
||||
return '%s, %s' % (surname, ' '.join(parts[:-1]))
|
||||
r.authors = [swap_to_ln_fn(a) for a in r.authors]
|
||||
|
||||
return results
|
||||
# }}}
|
||||
|
||||
|
408
src/calibre/ebooks/mobi/debug.py
Normal file
408
src/calibre/ebooks/mobi/debug.py
Normal file
@ -0,0 +1,408 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import struct, datetime
|
||||
from calibre.utils.date import utc_tz
|
||||
from calibre.ebooks.mobi.langcodes import main_language, sub_language
|
||||
|
||||
class PalmDOCAttributes(object):
|
||||
|
||||
class Attr(object):
|
||||
|
||||
def __init__(self, name, field, val):
|
||||
self.name = name
|
||||
self.val = val & field
|
||||
|
||||
def __str__(self):
|
||||
return '%s: %s'%(self.name, bool(self.val))
|
||||
|
||||
def __init__(self, raw):
|
||||
self.val = struct.unpack(b'<H', raw)[0]
|
||||
self.attributes = []
|
||||
for name, field in [('Read Only', 0x02), ('Dirty AppInfoArea', 0x04),
|
||||
('Backup this database', 0x08),
|
||||
('Okay to install newer over existing copy, if present on PalmPilot', 0x10),
|
||||
('Force the PalmPilot to reset after this database is installed', 0x12),
|
||||
('Don\'t allow copy of file to be beamed to other Pilot',
|
||||
0x14)]:
|
||||
self.attributes.append(PalmDOCAttributes.Attr(name, field,
|
||||
self.val))
|
||||
|
||||
def __str__(self):
|
||||
attrs = '\n\t'.join([str(x) for x in self.attributes])
|
||||
return 'PalmDOC Attributes: %s\n\t%s'%(bin(self.val), attrs)
|
||||
|
||||
class PalmDB(object):
|
||||
|
||||
def __init__(self, raw):
|
||||
self.raw = raw
|
||||
|
||||
if self.raw.startswith(b'TPZ'):
|
||||
raise ValueError('This is a Topaz file')
|
||||
|
||||
self.name = self.raw[:32].replace(b'\x00', b'')
|
||||
self.attributes = PalmDOCAttributes(self.raw[32:34])
|
||||
self.version = struct.unpack(b'>H', self.raw[34:36])[0]
|
||||
|
||||
palm_epoch = datetime.datetime(1904, 1, 1, tzinfo=utc_tz)
|
||||
self.creation_date_raw = struct.unpack(b'>I', self.raw[36:40])[0]
|
||||
self.creation_date = (palm_epoch +
|
||||
datetime.timedelta(seconds=self.creation_date_raw))
|
||||
self.modification_date_raw = struct.unpack(b'>I', self.raw[40:44])[0]
|
||||
self.modification_date = (palm_epoch +
|
||||
datetime.timedelta(seconds=self.modification_date_raw))
|
||||
self.last_backup_date_raw = struct.unpack(b'>I', self.raw[44:48])[0]
|
||||
self.last_backup_date = (palm_epoch +
|
||||
datetime.timedelta(seconds=self.last_backup_date_raw))
|
||||
self.modification_number = struct.unpack(b'>I', self.raw[48:52])[0]
|
||||
self.app_info_id = self.raw[52:56]
|
||||
self.sort_info_id = self.raw[56:60]
|
||||
self.type = self.raw[60:64]
|
||||
self.creator = self.raw[64:68]
|
||||
self.ident = self.type + self.creator
|
||||
if self.ident not in (b'BOOKMOBI', b'TEXTREAD'):
|
||||
raise ValueError('Unknown book ident: %r'%self.ident)
|
||||
self.uid_seed = self.raw[68:72]
|
||||
self.next_rec_list_id = self.raw[72:76]
|
||||
|
||||
self.number_of_records, = struct.unpack(b'>H', self.raw[76:78])
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' PalmDB Header '+ '*'*20]
|
||||
ans.append('Name: %r'%self.name)
|
||||
ans.append(str(self.attributes))
|
||||
ans.append('Version: %s'%self.version)
|
||||
ans.append('Creation date: %s (%s)'%(self.creation_date.isoformat(),
|
||||
self.creation_date_raw))
|
||||
ans.append('Modification date: %s (%s)'%(self.modification_date.isoformat(),
|
||||
self.modification_date_raw))
|
||||
ans.append('Backup date: %s (%s)'%(self.last_backup_date.isoformat(),
|
||||
self.last_backup_date_raw))
|
||||
ans.append('Modification number: %s'%self.modification_number)
|
||||
ans.append('App Info ID: %r'%self.app_info_id)
|
||||
ans.append('Sort Info ID: %r'%self.sort_info_id)
|
||||
ans.append('Type: %r'%self.type)
|
||||
ans.append('Creator: %r'%self.creator)
|
||||
ans.append('UID seed: %r'%self.uid_seed)
|
||||
ans.append('Next record list id: %r'%self.next_rec_list_id)
|
||||
ans.append('Number of records: %s'%self.number_of_records)
|
||||
|
||||
return '\n'.join(ans)
|
||||
|
||||
class Record(object):
|
||||
|
||||
def __init__(self, raw, header):
|
||||
self.offset, self.flags, self.uid = header
|
||||
self.raw = raw
|
||||
|
||||
@property
|
||||
def header(self):
|
||||
return 'Offset: %d Flags: %d UID: %d'%(self.offset, self.flags,
|
||||
self.uid)
|
||||
|
||||
class EXTHRecord(object):
|
||||
|
||||
def __init__(self, type_, data):
|
||||
self.type = type_
|
||||
self.data = data
|
||||
self.name = {
|
||||
1 : 'DRM Server id',
|
||||
2 : 'DRM Commerce id',
|
||||
3 : 'DRM ebookbase book id',
|
||||
100 : 'author',
|
||||
101 : 'publisher',
|
||||
102 : 'imprint',
|
||||
103 : 'description',
|
||||
104 : 'isbn',
|
||||
105 : 'subject',
|
||||
106 : 'publishingdate',
|
||||
107 : 'review',
|
||||
108 : 'contributor',
|
||||
109 : 'rights',
|
||||
110 : 'subjectcode',
|
||||
111 : 'type',
|
||||
112 : 'source',
|
||||
113 : 'asin',
|
||||
114 : 'versionnumber',
|
||||
115 : 'sample',
|
||||
116 : 'startreading',
|
||||
117 : 'adult',
|
||||
118 : 'retailprice',
|
||||
119 : 'retailpricecurrency',
|
||||
201 : 'coveroffset',
|
||||
202 : 'thumboffset',
|
||||
203 : 'hasfakecover',
|
||||
204 : 'Creator Software',
|
||||
205 : 'Creator Major Version', # '>I'
|
||||
206 : 'Creator Minor Version', # '>I'
|
||||
207 : 'Creator Build Number', # '>I'
|
||||
208 : 'watermark',
|
||||
209 : 'tamper_proof_keys',
|
||||
300 : 'fontsignature',
|
||||
301 : 'clippinglimit', # percentage '>B'
|
||||
402 : 'publisherlimit',
|
||||
404 : 'TTS flag', # '>B' 1 - TTS disabled 0 - TTS enabled
|
||||
501 : 'cdetype', # 4 chars (PDOC or EBOK)
|
||||
502 : 'lastupdatetime',
|
||||
503 : 'updatedtitle',
|
||||
}.get(self.type, repr(self.type))
|
||||
|
||||
if self.name in ('coveroffset', 'thumboffset', 'hasfakecover',
|
||||
'Creator Major Version', 'Creator Minor Version',
|
||||
'Creator Build Number', 'Creator Software', 'startreading'):
|
||||
self.data, = struct.unpack(b'>I', self.data)
|
||||
|
||||
def __str__(self):
|
||||
return '%s (%d): %r'%(self.name, self.type, self.data)
|
||||
|
||||
class EXTHHeader(object):
|
||||
|
||||
def __init__(self, raw):
|
||||
self.raw = raw
|
||||
if not self.raw.startswith(b'EXTH'):
|
||||
raise ValueError('EXTH header does not start with EXTH')
|
||||
self.length, = struct.unpack(b'>I', self.raw[4:8])
|
||||
self.count, = struct.unpack(b'>I', self.raw[8:12])
|
||||
|
||||
pos = 12
|
||||
self.records = []
|
||||
for i in xrange(self.count):
|
||||
pos = self.read_record(pos)
|
||||
|
||||
def read_record(self, pos):
|
||||
type_, length = struct.unpack(b'>II', self.raw[pos:pos+8])
|
||||
data = self.raw[(pos+8):(pos+length)]
|
||||
self.records.append(EXTHRecord(type_, data))
|
||||
return pos + length
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' EXTH Header '+ '*'*20]
|
||||
ans.append('EXTH header length: %d'%self.length)
|
||||
ans.append('Number of EXTH records: %d'%self.count)
|
||||
ans.append('EXTH records...')
|
||||
for r in self.records:
|
||||
ans.append(str(r))
|
||||
return '\n'.join(ans)
|
||||
|
||||
|
||||
class MOBIHeader(object):
|
||||
|
||||
def __init__(self, record0):
|
||||
self.raw = record0.raw
|
||||
|
||||
self.compression_raw = self.raw[:2]
|
||||
self.compression = {1: 'No compression', 2: 'PalmDoc compression',
|
||||
17480: 'HUFF/CDIC compression'}.get(struct.unpack(b'>H',
|
||||
self.compression_raw)[0],
|
||||
repr(self.compression_raw))
|
||||
self.unused = self.raw[2:4]
|
||||
self.text_length, = struct.unpack(b'>I', self.raw[4:8])
|
||||
self.number_of_text_records, self.text_record_size = \
|
||||
struct.unpack(b'>HH', self.raw[8:12])
|
||||
self.encryption_type_raw, = struct.unpack(b'>H', self.raw[12:14])
|
||||
self.encryption_type = {0: 'No encryption',
|
||||
1: 'Old mobipocket encryption',
|
||||
2:'Mobipocket encryption'}.get(self.encryption_type_raw,
|
||||
repr(self.encryption_type_raw))
|
||||
self.unknown = self.raw[14:16]
|
||||
|
||||
self.identifier = self.raw[16:20]
|
||||
if self.identifier != b'MOBI':
|
||||
raise ValueError('Identifier %r unknown'%self.identifier)
|
||||
|
||||
self.length, = struct.unpack(b'>I', self.raw[20:24])
|
||||
self.type_raw, = struct.unpack(b'>I', self.raw[24:28])
|
||||
self.type = {
|
||||
2 : 'Mobipocket book',
|
||||
3 : 'PalmDOC book',
|
||||
4 : 'Audio',
|
||||
257 : 'News',
|
||||
258 : 'News Feed',
|
||||
259 : 'News magazine',
|
||||
513 : 'PICS',
|
||||
514 : 'Word',
|
||||
515 : 'XLS',
|
||||
516 : 'PPT',
|
||||
517 : 'TEXT',
|
||||
518 : 'HTML',
|
||||
}.get(self.type_raw, repr(self.type_raw))
|
||||
|
||||
self.encoding_raw, = struct.unpack(b'>I', self.raw[28:32])
|
||||
self.encoding = {
|
||||
1252 : 'cp1252',
|
||||
65001: 'utf-8',
|
||||
}.get(self.encoding_raw, repr(self.encoding_raw))
|
||||
self.uid = self.raw[32:36]
|
||||
self.file_version = struct.unpack(b'>I', self.raw[36:40])
|
||||
self.reserved = self.raw[40:48]
|
||||
self.secondary_index_record, = struct.unpack(b'>I', self.raw[48:52])
|
||||
self.reserved2 = self.raw[52:80]
|
||||
self.first_non_book_record, = struct.unpack(b'>I', self.raw[80:84])
|
||||
self.fullname_offset, = struct.unpack(b'>I', self.raw[84:88])
|
||||
self.fullname_length, = struct.unpack(b'>I', self.raw[88:92])
|
||||
self.locale_raw, = struct.unpack(b'>I', self.raw[92:96])
|
||||
langcode = self.locale_raw
|
||||
langid = langcode & 0xFF
|
||||
sublangid = (langcode >> 10) & 0xFF
|
||||
self.language = main_language.get(langid, 'ENGLISH')
|
||||
self.sublanguage = sub_language.get(sublangid, 'NEUTRAL')
|
||||
|
||||
self.input_language = self.raw[96:100]
|
||||
self.output_langauage = self.raw[100:104]
|
||||
self.min_version, = struct.unpack(b'>I', self.raw[104:108])
|
||||
self.first_image_index, = struct.unpack(b'>I', self.raw[108:112])
|
||||
self.huffman_record_offset, = struct.unpack(b'>I', self.raw[112:116])
|
||||
self.huffman_record_count, = struct.unpack(b'>I', self.raw[116:120])
|
||||
self.unknown2 = self.raw[120:128]
|
||||
self.exth_flags, = struct.unpack(b'>I', self.raw[128:132])
|
||||
self.has_exth = bool(self.exth_flags & 0x40)
|
||||
self.has_drm_data = self.length >= 174 and len(self.raw) >= 180
|
||||
if self.has_drm_data:
|
||||
self.unknown3 = self.raw[132:164]
|
||||
self.drm_offset, = struct.unpack(b'>I', self.raw[164:168])
|
||||
self.drm_count, = struct.unpack(b'>I', self.raw[168:172])
|
||||
self.drm_size, = struct.unpack(b'>I', self.raw[172:176])
|
||||
self.drm_flags = bin(struct.unpack(b'>I', self.raw[176:180])[0])
|
||||
self.has_extra_data_flags = self.length >= 232 and len(self.raw) >= 232+16
|
||||
self.has_fcis_flis = False
|
||||
if self.has_extra_data_flags:
|
||||
self.unknown4 = self.raw[180:192]
|
||||
self.first_content_record, self.last_content_record = \
|
||||
struct.unpack(b'>HH', self.raw[192:196])
|
||||
self.unknown5, = struct.unpack(b'>I', self.raw[196:200])
|
||||
(self.fcis_number, self.fcis_count, self.flis_number,
|
||||
self.flis_count) = struct.unpack(b'>IIII',
|
||||
self.raw[200:216])
|
||||
self.unknown6 = self.raw[216:240]
|
||||
self.extra_data_flags = bin(struct.unpack(b'>I',
|
||||
self.raw[240:244])[0])
|
||||
self.primary_index_record, = struct.unpack(b'>I',
|
||||
self.raw[244:248])
|
||||
|
||||
if self.has_exth:
|
||||
self.exth_offset = 16 + self.length
|
||||
|
||||
self.exth = EXTHHeader(self.raw[self.exth_offset:])
|
||||
|
||||
self.end_of_exth = self.exth_offset + self.exth.length
|
||||
self.bytes_after_exth = self.fullname_offset - self.end_of_exth
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' MOBI Header '+ '*'*20]
|
||||
ans.append('Compression: %s'%self.compression)
|
||||
ans.append('Unused: %r'%self.unused)
|
||||
ans.append('Number of text records: %d'%self.number_of_text_records)
|
||||
ans.append('Text record size: %d'%self.text_record_size)
|
||||
ans.append('Encryption: %s'%self.encryption_type)
|
||||
ans.append('Unknown: %r'%self.unknown)
|
||||
ans.append('Identifier: %r'%self.identifier)
|
||||
ans.append('Header length: %d'% self.length)
|
||||
ans.append('Type: %s'%self.type)
|
||||
ans.append('Encoding: %s'%self.encoding)
|
||||
ans.append('UID: %r'%self.uid)
|
||||
ans.append('File version: %d'%self.file_version)
|
||||
ans.append('Reserved: %r'%self.reserved)
|
||||
ans.append('Secondary index record: %d (null val: %d)'%(
|
||||
self.secondary_index_record, 0xffffffff))
|
||||
ans.append('Reserved2: %r'%self.reserved2)
|
||||
ans.append('First non-book record: %d'% self.first_non_book_record)
|
||||
ans.append('Full name offset: %d'%self.fullname_offset)
|
||||
ans.append('Full name length: %d bytes'%self.fullname_length)
|
||||
ans.append('Langcode: %r'%self.locale_raw)
|
||||
ans.append('Language: %s'%self.language)
|
||||
ans.append('Sub language: %s'%self.sublanguage)
|
||||
ans.append('Input language: %r'%self.input_language)
|
||||
ans.append('Output language: %r'%self.output_langauage)
|
||||
ans.append('Min version: %d'%self.min_version)
|
||||
ans.append('First Image index: %d'%self.first_image_index)
|
||||
ans.append('Huffman record offset: %d'%self.huffman_record_offset)
|
||||
ans.append('Huffman record count: %d'%self.huffman_record_count)
|
||||
ans.append('Unknown2: %r'%self.unknown2)
|
||||
ans.append('EXTH flags: %r (%s)'%(self.exth_flags, self.has_exth))
|
||||
if self.has_drm_data:
|
||||
ans.append('Unknown3: %r'%self.unknown3)
|
||||
ans.append('DRM Offset: %s'%self.drm_offset)
|
||||
ans.append('DRM Count: %s'%self.drm_count)
|
||||
ans.append('DRM Size: %s'%self.drm_size)
|
||||
ans.append('DRM Flags: %r'%self.drm_flags)
|
||||
if self.has_extra_data_flags:
|
||||
ans.append('Unknown4: %r'%self.unknown4)
|
||||
ans.append('First content record: %d'% self.first_content_record)
|
||||
ans.append('Last content record: %d'% self.last_content_record)
|
||||
ans.append('Unknown5: %d'% self.unknown5)
|
||||
ans.append('FCIS number: %d'% self.fcis_number)
|
||||
ans.append('FCIS count: %d'% self.fcis_count)
|
||||
ans.append('FLIS number: %d'% self.flis_number)
|
||||
ans.append('FLIS count: %d'% self.flis_count)
|
||||
ans.append('Unknown6: %r'% self.unknown6)
|
||||
ans.append('Extra data flags: %r'%self.extra_data_flags)
|
||||
ans.append('Primary index record: %d'%self.primary_index_record)
|
||||
|
||||
ans = '\n'.join(ans)
|
||||
|
||||
if self.has_exth:
|
||||
ans += '\n\n' + str(self.exth)
|
||||
ans += '\n\nBytes after EXTH: %d'%self.bytes_after_exth
|
||||
|
||||
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset +
|
||||
self.fullname_length))
|
||||
|
||||
ans += '\nRecord 0 length: %d'%len(self.raw)
|
||||
return ans
|
||||
|
||||
class MOBIFile(object):
|
||||
|
||||
def __init__(self, stream):
|
||||
self.raw = stream.read()
|
||||
|
||||
self.palmdb = PalmDB(self.raw[:78])
|
||||
|
||||
self.record_headers = []
|
||||
self.records = []
|
||||
for i in xrange(self.palmdb.number_of_records):
|
||||
pos = 78 + i * 8
|
||||
offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8])
|
||||
flags, val = a1, a2 << 16 | a3 << 8 | a4
|
||||
self.record_headers.append((offset, flags, val))
|
||||
|
||||
def section(section_number):
|
||||
if section_number == self.palmdb.number_of_records - 1:
|
||||
end_off = len(self.raw)
|
||||
else:
|
||||
end_off = self.record_headers[section_number + 1][0]
|
||||
off = self.record_headers[section_number][0]
|
||||
return self.raw[off:end_off]
|
||||
|
||||
for i in range(self.palmdb.number_of_records):
|
||||
self.records.append(Record(section(i), self.record_headers[i]))
|
||||
|
||||
self.mobi_header = MOBIHeader(self.records[0])
|
||||
|
||||
|
||||
def print_header(self):
|
||||
print (str(self.palmdb).encode('utf-8'))
|
||||
print ()
|
||||
print ('Record headers:')
|
||||
for i, r in enumerate(self.records):
|
||||
print ('%6d. %s'%(i, r.header))
|
||||
|
||||
print ()
|
||||
print (str(self.mobi_header).encode('utf-8'))
|
||||
|
||||
def inspect_mobi(path_or_stream):
|
||||
stream = (path_or_stream if hasattr(path_or_stream, 'read') else
|
||||
open(path_or_stream, 'rb'))
|
||||
f = MOBIFile(stream)
|
||||
f.print_header()
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
f = MOBIFile(open(sys.argv[1], 'rb'))
|
||||
f.print_header()
|
||||
|
@ -7,8 +7,6 @@ __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.cam> and \
|
||||
Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
|
||||
from collections import defaultdict
|
||||
from itertools import count
|
||||
from itertools import izip
|
||||
import random
|
||||
import re
|
||||
from struct import pack
|
||||
@ -1511,7 +1509,7 @@ class MobiWriter(object):
|
||||
record0.write(exth)
|
||||
record0.write(title)
|
||||
record0 = record0.getvalue()
|
||||
self._records[0] = record0 + ('\0' * (2452 - len(record0)))
|
||||
self._records[0] = record0 + ('\0' * (1024*8))
|
||||
|
||||
def _build_exth(self):
|
||||
oeb = self._oeb
|
||||
@ -1630,8 +1628,8 @@ class MobiWriter(object):
|
||||
self._write(title, pack('>HHIIIIII', 0, 0, now, now, 0, 0, 0, 0),
|
||||
'BOOK', 'MOBI', pack('>IIH', nrecords, 0, nrecords))
|
||||
offset = self._tell() + (8 * nrecords) + 2
|
||||
for id, record in izip(count(), self._records):
|
||||
self._write(pack('>I', offset), '\0', pack('>I', id)[1:])
|
||||
for i, record in enumerate(self._records):
|
||||
self._write(pack('>I', offset), '\0', pack('>I', 2*i)[1:])
|
||||
offset += len(record)
|
||||
self._write('\0\0')
|
||||
|
||||
|
@ -20,8 +20,9 @@ class RemoveAdobeMargins(object):
|
||||
self.oeb, self.opts, self.log = oeb, opts, log
|
||||
|
||||
for item in self.oeb.manifest:
|
||||
if item.media_type in ('application/vnd.adobe-page-template+xml',
|
||||
'application/vnd.adobe.page-template+xml'):
|
||||
if (item.media_type in ('application/vnd.adobe-page-template+xml',
|
||||
'application/vnd.adobe.page-template+xml') and
|
||||
hasattr(item.data, 'xpath')):
|
||||
self.log('Removing page margins specified in the'
|
||||
' Adobe page template')
|
||||
for elem in item.data.xpath(
|
||||
|
@ -43,7 +43,7 @@ class ViewAction(InterfaceAction):
|
||||
ac = self.view_specific_action = QAction(_('View specific format'),
|
||||
self.gui)
|
||||
self.qaction.setMenu(self.view_menu)
|
||||
ac.setShortcut((Qt.ControlModifier if isosx else Qt.AltModifier)+Qt.Key_V)
|
||||
ac.setShortcut(Qt.AltModifier+Qt.Key_V)
|
||||
ac.triggered.connect(self.view_specific_format, type=Qt.QueuedConnection)
|
||||
ac = self.view_action = QAction(self.qaction.icon(),
|
||||
self.qaction.text(), self.gui)
|
||||
|
@ -519,6 +519,8 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
||||
val = [] if fm['is_multiple'] else ['']
|
||||
elif not fm['is_multiple']:
|
||||
val = [val]
|
||||
elif fm['datatype'] == 'composite':
|
||||
val = [v.strip() for v in val.split(fm['is_multiple'])]
|
||||
elif field == 'authors':
|
||||
val = [v.replace('|', ',') for v in val]
|
||||
else:
|
||||
|
@ -196,6 +196,10 @@ class SearchBar(QWidget): # {{{
|
||||
l.addWidget(x)
|
||||
x.setToolTip(_("Reset Quick Search"))
|
||||
|
||||
x = parent.highlight_only_button = QToolButton(self)
|
||||
x.setIcon(QIcon(I('arrow-down.png')))
|
||||
l.addWidget(x)
|
||||
|
||||
x = parent.search_options_button = QToolButton(self)
|
||||
x.setIcon(QIcon(I('config.png')))
|
||||
x.setObjectName("search_option_button")
|
||||
|
@ -310,10 +310,17 @@ class BooksModel(QAbstractTableModel): # {{{
|
||||
def sort(self, col, order, reset=True):
|
||||
if not self.db:
|
||||
return
|
||||
self.about_to_be_sorted.emit(self.db.id)
|
||||
if not isinstance(order, bool):
|
||||
order = order == Qt.AscendingOrder
|
||||
label = self.column_map[col]
|
||||
self._sort(label, order, reset)
|
||||
|
||||
def sort_by_named_field(self, field, order, reset=True):
|
||||
if field in self.db.field_metadata.keys():
|
||||
self._sort(field, order, reset)
|
||||
|
||||
def _sort(self, label, order, reset):
|
||||
self.about_to_be_sorted.emit(self.db.id)
|
||||
self.db.sort(label, order)
|
||||
if reset:
|
||||
self.reset()
|
||||
|
@ -236,6 +236,46 @@ class BooksView(QTableView): # {{{
|
||||
sm.select(idx, sm.Select|sm.Rows)
|
||||
self.scroll_to_row(indices[0].row())
|
||||
self.selected_ids = []
|
||||
|
||||
def sort_by_named_field(self, field, order, reset=True):
|
||||
if field in self.column_map:
|
||||
idx = self.column_map.index(field)
|
||||
if order:
|
||||
self.sortByColumn(idx, Qt.AscendingOrder)
|
||||
else:
|
||||
self.sortByColumn(idx, Qt.DescendingOrder)
|
||||
else:
|
||||
self._model.sort_by_named_field(field, order, reset)
|
||||
|
||||
def multisort(self, fields, reset=True, only_if_different=False):
|
||||
if len(fields) == 0:
|
||||
return
|
||||
sh = self.cleanup_sort_history(self._model.sort_history,
|
||||
ignore_column_map=True)
|
||||
if only_if_different and len(sh) >= len(fields):
|
||||
ret=True
|
||||
for i,t in enumerate(fields):
|
||||
if t[0] != sh[i][0]:
|
||||
ret = False
|
||||
break
|
||||
if ret:
|
||||
return
|
||||
|
||||
for n,d in reversed(fields):
|
||||
if n in self._model.db.field_metadata.keys():
|
||||
sh.insert(0, (n, d))
|
||||
sh = self.cleanup_sort_history(sh, ignore_column_map=True)
|
||||
self._model.sort_history = [tuple(x) for x in sh]
|
||||
self._model.resort(reset=reset)
|
||||
col = fields[0][0]
|
||||
dir = Qt.AscendingOrder if fields[0][1] else Qt.DescendingOrder
|
||||
if col in self.column_map:
|
||||
col = self.column_map.index(col)
|
||||
hdrs = self.horizontalHeader()
|
||||
try:
|
||||
hdrs.setSortIndicator(col, dir)
|
||||
except:
|
||||
pass
|
||||
# }}}
|
||||
|
||||
# Ondevice column {{{
|
||||
@ -280,14 +320,14 @@ class BooksView(QTableView): # {{{
|
||||
state = self.get_state()
|
||||
self.write_state(state)
|
||||
|
||||
def cleanup_sort_history(self, sort_history):
|
||||
def cleanup_sort_history(self, sort_history, ignore_column_map=False):
|
||||
history = []
|
||||
for col, order in sort_history:
|
||||
if not isinstance(order, bool):
|
||||
continue
|
||||
if col == 'date':
|
||||
col = 'timestamp'
|
||||
if col in self.column_map:
|
||||
if ignore_column_map or col in self.column_map:
|
||||
if (not history or history[-1][0] != col):
|
||||
history.append([col, order])
|
||||
return history
|
||||
@ -621,7 +661,7 @@ class BooksView(QTableView): # {{{
|
||||
h = self.horizontalHeader()
|
||||
for i in range(h.count()):
|
||||
if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0:
|
||||
self.scrollTo(self.model().index(row, i))
|
||||
self.scrollTo(self.model().index(row, i), self.PositionAtCenter)
|
||||
break
|
||||
|
||||
def set_current_row(self, row, select=True):
|
||||
|
@ -846,7 +846,7 @@ class RatingEdit(QSpinBox): # {{{
|
||||
class TagsEdit(MultiCompleteLineEdit): # {{{
|
||||
LABEL = _('Ta&gs:')
|
||||
TOOLTIP = '<p>'+_('Tags categorize the book. This is particularly '
|
||||
'useful while searching. <br><br>They can be any words'
|
||||
'useful while searching. <br><br>They can be any words '
|
||||
'or phrases, separated by commas.')
|
||||
|
||||
def __init__(self, parent):
|
||||
|
@ -163,8 +163,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
elif '*edited' in self.custcols[c]:
|
||||
cc = self.custcols[c]
|
||||
db.set_custom_column_metadata(cc['colnum'], name=cc['name'],
|
||||
label=cc['label'],
|
||||
display = self.custcols[c]['display'])
|
||||
label=cc['label'],
|
||||
display = self.custcols[c]['display'],
|
||||
notify=False)
|
||||
if '*must_restart' in self.custcols[c]:
|
||||
must_restart = True
|
||||
return must_restart
|
||||
|
@ -41,6 +41,8 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
'text':_('Yes/No'), 'is_multiple':False},
|
||||
10:{'datatype':'composite',
|
||||
'text':_('Column built from other columns'), 'is_multiple':False},
|
||||
11:{'datatype':'*composite',
|
||||
'text':_('Column built from other columns, behaves like tags'), 'is_multiple':True},
|
||||
}
|
||||
|
||||
def __init__(self, parent, editing, standard_colheads, standard_colnames):
|
||||
@ -99,7 +101,9 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
c = parent.custcols[col]
|
||||
self.column_name_box.setText(c['label'])
|
||||
self.column_heading_box.setText(c['name'])
|
||||
ct = c['datatype'] if not c['is_multiple'] else '*text'
|
||||
ct = c['datatype']
|
||||
if c['is_multiple']:
|
||||
ct = '*' + ct
|
||||
self.orig_column_number = c['colnum']
|
||||
self.orig_column_name = col
|
||||
column_numbers = dict(map(lambda x:(self.column_types[x]['datatype'], x),
|
||||
@ -109,7 +113,7 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
if ct == 'datetime':
|
||||
if c['display'].get('date_format', None):
|
||||
self.date_format_box.setText(c['display'].get('date_format', ''))
|
||||
elif ct == 'composite':
|
||||
elif ct in ['composite', '*composite']:
|
||||
self.composite_box.setText(c['display'].get('composite_template', ''))
|
||||
sb = c['display'].get('composite_sort', 'text')
|
||||
vals = ['text', 'number', 'date', 'bool']
|
||||
@ -167,7 +171,7 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
getattr(self, 'date_format_'+x).setVisible(col_type == 'datetime')
|
||||
for x in ('box', 'default_label', 'label', 'sort_by', 'sort_by_label',
|
||||
'make_category'):
|
||||
getattr(self, 'composite_'+x).setVisible(col_type == 'composite')
|
||||
getattr(self, 'composite_'+x).setVisible(col_type in ['composite', '*composite'])
|
||||
for x in ('box', 'default_label', 'label'):
|
||||
getattr(self, 'enum_'+x).setVisible(col_type == 'enumeration')
|
||||
self.use_decorations.setVisible(col_type in ['text', 'composite', 'enumeration'])
|
||||
@ -187,8 +191,8 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
'because these names are reserved for the index of a series column.'))
|
||||
col_heading = unicode(self.column_heading_box.text()).strip()
|
||||
col_type = self.column_types[self.column_type_box.currentIndex()]['datatype']
|
||||
if col_type == '*text':
|
||||
col_type='text'
|
||||
if col_type[0] == '*':
|
||||
col_type = col_type[1:]
|
||||
is_multiple = True
|
||||
else:
|
||||
is_multiple = False
|
||||
@ -249,11 +253,10 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
elif col_type == 'text' and is_multiple:
|
||||
display_dict = {'is_names': self.is_names.isChecked()}
|
||||
|
||||
if col_type in ['text', 'composite', 'enumeration']:
|
||||
if col_type in ['text', 'composite', 'enumeration'] and not is_multiple:
|
||||
display_dict['use_decorations'] = self.use_decorations.checkState()
|
||||
|
||||
if not self.editing_col:
|
||||
db.field_metadata
|
||||
self.parent.custcols[key] = {
|
||||
'label':col,
|
||||
'name':col_heading,
|
||||
|
@ -258,6 +258,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
r('max_tags', msprefs)
|
||||
r('wait_after_first_identify_result', msprefs)
|
||||
r('wait_after_first_cover_result', msprefs)
|
||||
r('swap_author_names', msprefs)
|
||||
|
||||
self.configure_plugin_button.clicked.connect(self.configure_plugin)
|
||||
self.sources_model = SourcesModel(self)
|
||||
|
@ -21,7 +21,7 @@
|
||||
<widget class="QStackedWidget" name="stack">
|
||||
<widget class="QWidget" name="page">
|
||||
<layout class="QGridLayout" name="gridLayout">
|
||||
<item row="0" column="0" rowspan="5">
|
||||
<item row="0" column="0" rowspan="6">
|
||||
<widget class="QGroupBox" name="groupBox">
|
||||
<property name="title">
|
||||
<string>Metadata sources</string>
|
||||
@ -98,7 +98,14 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<item row="2" column="1" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_swap_author_names">
|
||||
<property name="text">
|
||||
<string>Swap author names from FN LN to LN, FN</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<widget class="QLabel" name="label_2">
|
||||
<property name="text">
|
||||
<string>Max. number of &tags to download:</string>
|
||||
@ -108,10 +115,10 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="2">
|
||||
<item row="3" column="2">
|
||||
<widget class="QSpinBox" name="opt_max_tags"/>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<item row="4" column="1">
|
||||
<widget class="QLabel" name="label_3">
|
||||
<property name="text">
|
||||
<string>Max. &time to wait after first match is found:</string>
|
||||
@ -121,14 +128,14 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="2">
|
||||
<item row="4" column="2">
|
||||
<widget class="QSpinBox" name="opt_wait_after_first_identify_result">
|
||||
<property name="suffix">
|
||||
<string> secs</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="1">
|
||||
<item row="5" column="1">
|
||||
<widget class="QLabel" name="label_4">
|
||||
<property name="text">
|
||||
<string>Max. time to wait after first &cover is found:</string>
|
||||
@ -138,7 +145,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="2">
|
||||
<item row="5" column="2">
|
||||
<widget class="QSpinBox" name="opt_wait_after_first_cover_result">
|
||||
<property name="suffix">
|
||||
<string> secs</string>
|
||||
|
@ -171,10 +171,10 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
return ConfigWidgetBase.commit(self)
|
||||
|
||||
def refresh_gui(self, gui):
|
||||
gui.set_highlight_only_button_icon()
|
||||
if self.muc_changed:
|
||||
gui.tags_view.set_new_model()
|
||||
gui.search.search_as_you_type(config['search_as_you_type'])
|
||||
gui.library_view.model().set_highlight_only(config['highlight_search_matches'])
|
||||
gui.search.do_search()
|
||||
|
||||
def clear_histories(self, *args):
|
||||
|
@ -10,7 +10,7 @@ import re
|
||||
|
||||
from PyQt4.Qt import QComboBox, Qt, QLineEdit, QStringList, pyqtSlot, QDialog, \
|
||||
pyqtSignal, QCompleter, QAction, QKeySequence, QTimer, \
|
||||
QString
|
||||
QString, QIcon
|
||||
|
||||
from calibre.gui2 import config
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
@ -383,6 +383,22 @@ class SearchBoxMixin(object): # {{{
|
||||
self.advanced_search_button.setStatusTip(self.advanced_search_button.toolTip())
|
||||
self.clear_button.setStatusTip(self.clear_button.toolTip())
|
||||
self.search_options_button.clicked.connect(self.search_options_button_clicked)
|
||||
self.set_highlight_only_button_icon()
|
||||
self.highlight_only_button.clicked.connect(self.highlight_only_clicked)
|
||||
tt = _('Enable or disable search highlighting.') + '<br><br>'
|
||||
tt += config.help('highlight_search_matches')
|
||||
self.highlight_only_button.setToolTip(tt)
|
||||
|
||||
def highlight_only_clicked(self, state):
|
||||
config['highlight_search_matches'] = not config['highlight_search_matches']
|
||||
self.set_highlight_only_button_icon()
|
||||
|
||||
def set_highlight_only_button_icon(self):
|
||||
if config['highlight_search_matches']:
|
||||
self.highlight_only_button.setIcon(QIcon(I('highlight_only_on.png')))
|
||||
else:
|
||||
self.highlight_only_button.setIcon(QIcon(I('highlight_only_off.png')))
|
||||
self.library_view.model().set_highlight_only(config['highlight_search_matches'])
|
||||
|
||||
def focus_search_box(self, *args):
|
||||
self.search.setFocus(Qt.OtherFocusReason)
|
||||
|
@ -413,12 +413,12 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
cast = adjust = lambda x: x
|
||||
|
||||
if query == 'false':
|
||||
if dt == 'rating':
|
||||
if dt == 'rating' or location == 'cover':
|
||||
relop = lambda x,y: not bool(x)
|
||||
else:
|
||||
relop = lambda x,y: x is None
|
||||
elif query == 'true':
|
||||
if dt == 'rating':
|
||||
if dt == 'rating' or location == 'cover':
|
||||
relop = lambda x,y: bool(x)
|
||||
else:
|
||||
relop = lambda x,y: x is not None
|
||||
@ -592,7 +592,8 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
candidates = self.universal_set()
|
||||
if len(candidates) == 0:
|
||||
return matches
|
||||
self.test_location_is_valid(location, query)
|
||||
if location not in self.all_search_locations:
|
||||
return matches
|
||||
|
||||
if len(location) > 2 and location.startswith('@') and \
|
||||
location[1:] in self.db_prefs['grouped_search_terms']:
|
||||
@ -751,7 +752,7 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
|
||||
if loc not in exclude_fields: # time for text matching
|
||||
if is_multiple_cols[loc] is not None:
|
||||
vals = item[loc].split(is_multiple_cols[loc])
|
||||
vals = [v.strip() for v in item[loc].split(is_multiple_cols[loc])]
|
||||
else:
|
||||
vals = [item[loc]] ### make into list to make _match happy
|
||||
if _match(q, vals, matchkind):
|
||||
|
@ -182,7 +182,7 @@ class CustomColumns(object):
|
||||
else:
|
||||
is_category = False
|
||||
if v['is_multiple']:
|
||||
is_m = '|'
|
||||
is_m = ',' if v['datatype'] == 'composite' else '|'
|
||||
else:
|
||||
is_m = None
|
||||
tn = 'custom_column_{0}'.format(v['num'])
|
||||
@ -318,7 +318,7 @@ class CustomColumns(object):
|
||||
self.conn.commit()
|
||||
|
||||
def set_custom_column_metadata(self, num, name=None, label=None,
|
||||
is_editable=None, display=None):
|
||||
is_editable=None, display=None, notify=True):
|
||||
changed = False
|
||||
if name is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET name=? WHERE id=?',
|
||||
@ -340,6 +340,9 @@ class CustomColumns(object):
|
||||
|
||||
if changed:
|
||||
self.conn.commit()
|
||||
if notify:
|
||||
self.notify('metadata', [])
|
||||
|
||||
return changed
|
||||
|
||||
def set_custom_bulk_multiple(self, ids, add=[], remove=[],
|
||||
@ -595,7 +598,7 @@ class CustomColumns(object):
|
||||
raise ValueError('%r is not a supported data type'%datatype)
|
||||
normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
|
||||
'float', 'composite')
|
||||
is_multiple = is_multiple and datatype in ('text',)
|
||||
is_multiple = is_multiple and datatype in ('text', 'composite')
|
||||
num = self.conn.execute(
|
||||
('INSERT INTO '
|
||||
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
|
||||
|
@ -1224,7 +1224,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
if field['datatype'] == 'composite':
|
||||
dex = field['rec_index']
|
||||
for book in self.data.iterall():
|
||||
if book[dex] == id_:
|
||||
if field['is_multiple']:
|
||||
vals = [v.strip() for v in book[dex].split(field['is_multiple'])
|
||||
if v.strip()]
|
||||
if id_ in vals:
|
||||
ans.add(book[0])
|
||||
elif book[dex] == id_:
|
||||
ans.add(book[0])
|
||||
return ans
|
||||
|
||||
@ -1354,6 +1359,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
cat = tb_cats[category]
|
||||
if cat['datatype'] == 'composite' and \
|
||||
cat['display'].get('make_category', False):
|
||||
tids[category] = {}
|
||||
tcategories[category] = {}
|
||||
md.append((category, cat['rec_index'], cat['is_multiple'],
|
||||
cat['datatype'] == 'composite'))
|
||||
@ -1402,8 +1408,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
prints('get_categories: item', val, 'is not in', cat, 'list!')
|
||||
else:
|
||||
vals = book[dex].split(mult)
|
||||
if is_comp:
|
||||
vals = [v.strip() for v in vals if v.strip()]
|
||||
for val in vals:
|
||||
if val not in tids:
|
||||
tids[cat][val] = (val, val)
|
||||
item = tcategories[cat].get(val, None)
|
||||
if not item:
|
||||
item = tag_class(val, val)
|
||||
tcategories[cat][val] = item
|
||||
item.c += 1
|
||||
item.id = val
|
||||
for val in vals:
|
||||
if not val: continue
|
||||
try:
|
||||
(item_id, sort_val) = tids[cat][val] # let exceptions fly
|
||||
item = tcategories[cat].get(val, None)
|
||||
|
@ -364,11 +364,11 @@ class FieldMetadata(dict):
|
||||
self._tb_cats[k]['display'] = {}
|
||||
self._tb_cats[k]['is_editable'] = True
|
||||
self._add_search_terms_to_map(k, v['search_terms'])
|
||||
for x in ('timestamp', 'last_modified'):
|
||||
self._tb_cats[x]['display'] = {
|
||||
self._tb_cats['timestamp']['display'] = {
|
||||
'date_format': tweaks['gui_timestamp_display_format']}
|
||||
self._tb_cats['pubdate']['display'] = {
|
||||
'date_format': tweaks['gui_pubdate_display_format']}
|
||||
self._tb_cats['last_modified']['display'] = {'date_format': 'iso'}
|
||||
self.custom_field_prefix = '#'
|
||||
self.get = self._tb_cats.get
|
||||
|
||||
|
@ -236,15 +236,16 @@ The following functions are available in addition to those described in single-f
|
||||
* ``format_date(x, date_format)`` -- format_date(val, format_string) -- format the value, which must be a date field, using the format_string, returning a string. The formatting codes are::
|
||||
|
||||
d : the day as number without a leading zero (1 to 31)
|
||||
dd : the day as number with a leading zero (01 to 31) '
|
||||
ddd : the abbreviated localized day name (e.g. "Mon" to "Sun"). '
|
||||
dddd : the long localized day name (e.g. "Monday" to "Sunday"). '
|
||||
M : the month as number without a leading zero (1 to 12). '
|
||||
MM : the month as number with a leading zero (01 to 12) '
|
||||
MMM : the abbreviated localized month name (e.g. "Jan" to "Dec"). '
|
||||
MMMM : the long localized month name (e.g. "January" to "December"). '
|
||||
yy : the year as two digit number (00 to 99). '
|
||||
yyyy : the year as four digit number.'
|
||||
dd : the day as number with a leading zero (01 to 31)
|
||||
ddd : the abbreviated localized day name (e.g. "Mon" to "Sun").
|
||||
dddd : the long localized day name (e.g. "Monday" to "Sunday").
|
||||
M : the month as number without a leading zero (1 to 12).
|
||||
MM : the month as number with a leading zero (01 to 12)
|
||||
MMM : the abbreviated localized month name (e.g. "Jan" to "Dec").
|
||||
MMMM : the long localized month name (e.g. "January" to "December").
|
||||
yy : the year as two digit number (00 to 99).
|
||||
yyyy : the year as four digit number.
|
||||
iso : the date with time and timezone. Must be the only format present.
|
||||
|
||||
* ``eval(string)`` -- evaluates the string as a program, passing the local variables (those ``assign`` ed to). This permits using the template processor to construct complex results from local variables.
|
||||
* ``multiply(x, y)`` -- returns x * y. Throws an exception if either x or y are not numbers.
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user