Merge from main branch

This commit is contained in:
Tom Scholl 2011-04-15 23:00:24 +00:00
commit cd547378b0
82 changed files with 83242 additions and 32963 deletions

View File

@ -19,6 +19,83 @@
# new recipes: # new recipes:
# - title: # - title:
- version: 0.7.55
date: 2011-04-15
new features:
- title: "Add a menu bar. Useful if you use a lot of plugins and are running out of space in your toolbars. By default the menu bar is hidden (except on OS X). You can add actions to it via Preferences->Toolbars. As soon as you add actions, it will become visible."
- title: "OS X: Make the main calibre window look a little more 'native' on OS X"
- title: "Show recently viewed books in the View button's drop down menu"
- title: "Add a button next to the search bar to toggle easily between highlight and restrict search modes"
- title: "Allow the use of arbitrary searches as search restrictions, rather than just saved searches. Do this by using the special entry '*Current Search' in the Search Restriction dropdown."
- title: "The Connect/share icon now changes color to indicate that the content server is running"
tickets: [755444]
- title: "Device drivers for Viewpad 7, Motorola Xoom and Asus Eee Note"
- title: "Add tags like composite custom column."
tickets: [759663]
- title: "Add a new date format code 'iso'. Permits formatting dates to see the complete time (via Preferences->Tweaks)"
- title: "Allow the use of data from the size column in the template language"
tickets: [759645]
- title: "Support reading/writing covers to txtz/htmlz files"
- title: "Speedup for large library sorting when using composite custom columns"
- title: "Move the boolean columns are tristate tweak to Preferences->Behavior"
bug fixes:
- title: "Fix a regression in 0.7.54 that broke reading covers/metadata from cbz files."
tickets: [756892]
- title: "Fix tweak names and help not translatable"
tickets: [756736]
- title: "When the size of a book is less that 0.1MB but not zero, display the size as <0.1 instead of 0.0."
tickets: [755768]
- title: "HTMLZ input: Fix handling of HTML files encoded in an encoding other than UTF-8"
- title: "EPUB Input: Fix EPUB files with empty Adobe PAGE templates causing conversion to abort."
tickets: [760390]
- title: "Fix CHM input plugin not closing opened input file"
tickets: [760589]
- title: "MOBI Output: Make super/subscripts use a slightly smaller font when rendered on a Kindle. Also allow the use of vertical-align:top/bottom in the CSS to specify a super/subscript."
tickets: [758667]
- title: "LRF Input: Detect and workaround LRF files that have deeply nested spans, instead of crashing."
tickets: [759680]
- title: "MOBI Output: Fix bug that would cause conversion to unneccessarily abort when malformed hyperlinks are present in the input document."
tickets: [759313]
- title: "Make true and false searches work correctly for numeric fields."
- title: "MOBI Output: The Ignore margins setting no longer ignores blockquotes, only margins set via CSS on other elements."
tickets: [758675]
- title: "Fix regression that caused clicking auto send to also change the email address in Preferences->Email"
improved recipes:
- Wall Street Journal
- Weblogs SL
- Tabu.ro
- Vecernje Novosti
new recipes:
- title: Hallo Assen and Dvhn
author: Reijendert
- version: 0.7.54 - version: 0.7.54
date: 2011-04-08 date: 2011-04-08

View File

@ -36,29 +36,38 @@ class BigOven(BasicNewsRecipe):
remove_attributes = ['style', 'font'] remove_attributes = ['style', 'font']
remove_tags = [dict(name='div', attrs={'class':['ppy-caption']}) def get_article_url(self, article):
,dict(name='div', attrs={'id':['float_corner']}) url = article.get('feedburner_origlink',article.get('link', None))
] front, middle, end = url.partition('comhttp//www.bigoven.com')
url = front + 'com' + end
return url
keep_only_tags = [dict(name='div', attrs={'id':['nosidebar_main']})]
remove_tags_after = [dict(name='div', attrs={'class':['display-field']})]
remove_tags = [dict(name='ul', attrs={'class':['tabs']})]
preprocess_regexps = [
(re.compile(r'Want detailed nutrition information?', re.DOTALL), lambda match: ''),
(re.compile('\(You could win \$100 in our ', re.DOTALL), lambda match: ''),
]
def preprocess_html(self, soup): def preprocess_html(self, soup):
for tag in soup.findAll(name='a', attrs={'class':['deflink']}):
tag.replaceWith(tag.string)
for tag in soup.findAll(name='a', text=re.compile(r'.*View Metric.*', re.DOTALL)): for tag in soup.findAll(name='a', text=re.compile(r'.*View Metric.*', re.DOTALL)):
tag.parent.parent.extract() tag.parent.parent.extract()
for tag in soup.findAll(name='a', text=re.compile(r'.*Add my own photo.*', re.DOTALL)): for tag in soup.findAll(text=re.compile(r'.*Try BigOven Pro for Free.*', re.DOTALL)):
tag.parent.parent.extract() tag.extract()
for tag in soup.findAll(name='div', attrs={'class':['container']}): for tag in soup.findAll(text=re.compile(r'.*Add my photo of this recipe.*', re.DOTALL)):
if tag.find(name='h1'): tag.parent.extract()
continue for tag in soup.findAll(name='a', text=re.compile(r'.*photo contest.*', re.DOTALL)):
if tag.find(name='h2', text=re.compile(r'.*Ingredients.*', re.DOTALL)): tag.parent.extract()
print 'tag found Ingred h2' for tag in soup.findAll(name='a', text='Remove ads'):
continue tag.parent.parent.extract()
if tag.find(name='h2', text=re.compile(r'Preparation.*', re.DOTALL)): for tag in soup.findAll(name='ol', attrs={'class':['recipe-tags']}):
print 'tag found Prep h2' tag.parent.extract()
continue
tag.extract()
return soup return soup
feeds = [(u'4 & 5 Star Rated Recipes', u'http://feeds.feedburner.com/Bigovencom-RecipeRaves?format=xml')] feeds = [(u'Recent Raves', u'http://www.bigoven.com/rss/recentraves'),
(u'Recipe Of The Day', u'http://feeds.feedburner.com/bigovencom-RecipeOfTheDay')]

View File

@ -18,7 +18,6 @@ class IrishTimes(BasicNewsRecipe):
oldest_article = 1.0 oldest_article = 1.0
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
simultaneous_downloads= 5
r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*') r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*')
remove_tags = [dict(name='div', attrs={'class':'footer'})] remove_tags = [dict(name='div', attrs={'class':'footer'})]
@ -26,17 +25,17 @@ class IrishTimes(BasicNewsRecipe):
feeds = [ feeds = [
('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'), ('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'),
('Ireland', 'http://www.irishtimes.com/feeds/rss/newspaper/ireland.rss'), ('Ireland', 'http://rss.feedsportal.com/c/851/f/10845/index.rss'),
('World', 'http://www.irishtimes.com/feeds/rss/newspaper/world.rss'), ('World', 'http://rss.feedsportal.com/c/851/f/10846/index.rss'),
('Finance', 'http://www.irishtimes.com/feeds/rss/newspaper/finance.rss'), ('Finance', 'http://rss.feedsportal.com/c/851/f/10847/index.rss'),
('Features', 'http://www.irishtimes.com/feeds/rss/newspaper/features.rss'), ('Features', 'http://rss.feedsportal.com/c/851/f/10848/index.rss'),
('Sport', 'http://www.irishtimes.com/feeds/rss/newspaper/sport.rss'), ('Sport', 'http://rss.feedsportal.com/c/851/f/10849/index.rss'),
('Opinion', 'http://www.irishtimes.com/feeds/rss/newspaper/opinion.rss'), ('Opinion', 'http://rss.feedsportal.com/c/851/f/10850/index.rss'),
('Letters', 'http://www.irishtimes.com/feeds/rss/newspaper/letters.rss'), ('Letters', 'http://rss.feedsportal.com/c/851/f/10851/index.rss'),
('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'), ('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'),
('Health', 'http://www.irishtimes.com/feeds/rss/newspaper/health.rss'), ('Health', 'http://rss.feedsportal.com/c/851/f/10852/index.rss'),
('Education & Parenting', 'http://www.irishtimes.com/feeds/rss/newspaper/education.rss'), ('Education & Parenting', 'http://rss.feedsportal.com/c/851/f/10853/index.rss'),
('Motors', 'http://www.irishtimes.com/feeds/rss/newspaper/motors.rss'), ('Motors', 'http://rss.feedsportal.com/c/851/f/10854/index.rss'),
('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'), ('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'),
('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'), ('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'),
('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'), ('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'),
@ -57,5 +56,3 @@ class IrishTimes(BasicNewsRecipe):
def get_article_url(self, article): def get_article_url(self, article):
return article.link return article.link

View File

@ -1,12 +1,12 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008-2010, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
''' '''
nspm.rs nspm.rs
''' '''
import re import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import NavigableString from calibre.ebooks.BeautifulSoup import NavigableString, Tag
class Nspm(BasicNewsRecipe): class Nspm(BasicNewsRecipe):
title = 'Nova srpska politicka misao' title = 'Nova srpska politicka misao'
@ -21,7 +21,6 @@ class Nspm(BasicNewsRecipe):
INDEX = 'http://www.nspm.rs/?alphabet=l' INDEX = 'http://www.nspm.rs/?alphabet=l'
encoding = 'utf-8' encoding = 'utf-8'
language = 'sr' language = 'sr'
delay = 2
remove_empty_feeds = True remove_empty_feeds = True
publication_type = 'magazine' publication_type = 'magazine'
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg' masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
@ -29,27 +28,21 @@ class Nspm(BasicNewsRecipe):
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)} @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: "Times New Roman", serif1, serif} body{font-family: "Times New Roman", serif1, serif}
.article_description{font-family: Arial, sans1, sans-serif} .article_description{font-family: Arial, sans1, sans-serif}
img{margin-top:0.5em; margin-bottom: 0.7em} img{margin-top:0.5em; margin-bottom: 0.7em; display: block}
.author{color: #990000; font-weight: bold} .author{color: #990000; font-weight: bold}
.author,.createdate{font-size: 0.9em} """ .author,.createdate{font-size: 0.9em} """
conversion_options = { conversion_options = {
'comment' : description 'comment' : description
, 'tags' : category , 'tags' : category
, 'publisher' : publisher , 'publisher' : publisher
, 'language' : language , 'language' : language
, 'linearize_tables' : True , 'pretty_print' : True
} }
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')] preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
keep_only_tags = [dict(attrs={'id':'jsn-mainbody'})] remove_tags = [dict(name=['link','script','meta','base','img'])]
remove_tags = [ remove_attributes = ['width','height','lang','xmlns:fb','xmlns:og','vspace','hspace','type','start','size']
dict(name=['link','object','embed','script','meta','base','iframe'])
,dict(attrs={'class':'buttonheading'})
]
remove_tags_before = dict(attrs={'class':'contentheading'})
remove_tags_after = dict(attrs={'class':'article_separator'})
remove_attributes = ['width','height']
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser() br = BasicNewsRecipe.get_browser()
@ -57,21 +50,67 @@ class Nspm(BasicNewsRecipe):
return br return br
feeds = [ feeds = [
(u'Rubrike' , u'http://www.nspm.rs/rubrike/feed/rss.html') (u'Rubrike' , u'http://www.nspm.rs/rubrike/feed/rss.html' )
,(u'Debate' , u'http://www.nspm.rs/debate/feed/rss.html') ,(u'Debate' , u'http://www.nspm.rs/debate/feed/rss.html' )
,(u'Reci i misli' , u'http://www.nspm.rs/reci-i-misli/feed/rss.html') ,(u'Reci i misli' , u'http://www.nspm.rs/reci-i-misli/feed/rss.html' )
,(u'Samo smeh srbina spasava', u'http://www.nspm.rs/samo-smeh-srbina-spasava/feed/rss.html') ,(u'Samo smeh srbina spasava', u'http://www.nspm.rs/samo-smeh-srbina-spasava/feed/rss.html')
,(u'Polemike' , u'http://www.nspm.rs/polemike/feed/rss.html') ,(u'Polemike' , u'http://www.nspm.rs/polemike/feed/rss.html' )
,(u'Prikazi' , u'http://www.nspm.rs/prikazi/feed/rss.html') ,(u'Prikazi' , u'http://www.nspm.rs/prikazi/feed/rss.html' )
,(u'Prenosimo' , u'http://www.nspm.rs/prenosimo/feed/rss.html') ,(u'Prenosimo' , u'http://www.nspm.rs/prenosimo/feed/rss.html' )
,(u'Hronika' , u'http://www.nspm.rs/tabela/hronika/feed/rss.html') ,(u'Hronika' , u'http://www.nspm.rs/tabela/hronika/feed/rss.html' )
] ]
def preprocess_html(self, soup): def preprocess_html(self, soup):
for item in soup.body.findAll(style=True): atitle = soup.body.find('a',attrs={'class':'contentpagetitle'})
del item['style'] if atitle:
for item in soup.body.findAll('h1'): cleanTitle = Tag(soup,'h1',[('class','contentpagetitle')])
nh = NavigableString(item.a.string) cnt = NavigableString(self.tag_to_string(atitle))
item.a.extract() cleanTitle.append(cnt)
item.insert(0,nh)
return self.adeify_images(soup) author = soup.body.find('span',attrs={'class':'author'})
if author:
author.extract()
author.name = 'div'
crdate = soup.body.find('td',attrs={'class':'createdate'})
if crdate:
cleanCrdate = Tag(soup,'div',[('class','createdate')])
cnt = NavigableString(self.tag_to_string(crdate))
cleanCrdate.append(cnt)
#get the dependant element
artText = Tag(soup,'div',[('class','text')])
textHolderp = crdate.parent
textHolder = textHolderp.nextSibling
while textHolder and (not isinstance(textHolder,Tag) or (textHolder.name <> textHolderp.name)):
textHolder = textHolder.nextSibling
if textHolder.td:
artText = textHolder.td
artText.name = 'div'
artText.attrs = []
artText['class'] = 'text'
artText.extract()
soup.body.contents=[]
soup.body.append(cleanTitle)
soup.body.append(author)
soup.body.append(cleanCrdate)
soup.body.append(artText)
for item in soup.findAll('a'):
limg = item.find('img')
if item.string is not None:
str = item.string
item.replaceWith(str)
else:
if limg:
item.name = 'div'
item.attrs = []
else:
str = self.tag_to_string(item)
item.replaceWith(str)
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
return soup

View File

@ -2,7 +2,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
__appname__ = 'calibre' __appname__ = 'calibre'
__version__ = '0.7.54' __version__ = '0.7.55'
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>" __author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
import re, importlib import re, importlib

View File

@ -51,6 +51,8 @@ Run an embedded python interpreter.
'with sqlite3 works.') 'with sqlite3 works.')
parser.add_option('-p', '--py-console', help='Run python console', parser.add_option('-p', '--py-console', help='Run python console',
default=False, action='store_true') default=False, action='store_true')
parser.add_option('-m', '--inspect-mobi',
help='Inspect the MOBI file at the specified path', default=None)
return parser return parser
@ -227,6 +229,9 @@ def main(args=sys.argv):
if len(args) > 1 and os.access(args[-1], os.R_OK): if len(args) > 1 and os.access(args[-1], os.R_OK):
sql_dump = args[-1] sql_dump = args[-1]
reinit_db(opts.reinitialize_db, sql_dump=sql_dump) reinit_db(opts.reinitialize_db, sql_dump=sql_dump)
elif opts.inspect_mobi is not None:
from calibre.ebooks.mobi.debug import inspect_mobi
inspect_mobi(opts.inspect_mobi)
else: else:
from calibre import ipython from calibre import ipython
ipython() ipython()

View File

@ -175,18 +175,18 @@ class EPUBInput(InputFormatPlugin):
raise ValueError( raise ValueError(
'EPUB files with DTBook markup are not supported') 'EPUB files with DTBook markup are not supported')
not_for_spine = set()
for y in opf.itermanifest():
id_ = y.get('id', None)
if id_ and y.get('media-type', None) in \
('application/vnd.adobe-page-template+xml',):
not_for_spine.add(id_)
for x in list(opf.iterspine()): for x in list(opf.iterspine()):
ref = x.get('idref', None) ref = x.get('idref', None)
if ref is None: if ref is None or ref in not_for_spine:
x.getparent().remove(x) x.getparent().remove(x)
continue continue
for y in opf.itermanifest():
if y.get('id', None) == ref and y.get('media-type', None) in \
('application/vnd.adobe-page-template+xml',):
p = x.getparent()
if p is not None:
p.remove(x)
break
with open('content.opf', 'wb') as nopf: with open('content.opf', 'wb') as nopf:
nopf.write(opf.render()) nopf.write(opf.render())

View File

@ -10,6 +10,7 @@ import os
from calibre import walk from calibre import walk
from calibre.customize.conversion import InputFormatPlugin from calibre.customize.conversion import InputFormatPlugin
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.zipfile import ZipFile from calibre.utils.zipfile import ZipFile
class HTMLZInput(InputFormatPlugin): class HTMLZInput(InputFormatPlugin):
@ -34,6 +35,13 @@ class HTMLZInput(InputFormatPlugin):
html = tf.read() html = tf.read()
break break
# Encoding
if options.input_encoding:
ienc = options.input_encoding
else:
ienc = xml_to_unicode(html[:4096])[-1]
html = html.decode(ienc, 'replace')
# Run the HTML through the html processing plugin. # Run the HTML through the html processing plugin.
from calibre.customize.ui import plugin_for_input_format from calibre.customize.ui import plugin_for_input_format
html_input = plugin_for_input_format('html') html_input = plugin_for_input_format('html')

View File

@ -259,6 +259,7 @@ class MetadataUpdater(object):
trail = len(new_record0.getvalue()) % 4 trail = len(new_record0.getvalue()) % 4
pad = '\0' * (4 - trail) # Always pad w/ at least 1 byte pad = '\0' * (4 - trail) # Always pad w/ at least 1 byte
new_record0.write(pad) new_record0.write(pad)
new_record0.write('\0'*(1024*8))
# Rebuild the stream, update the pdbrecords pointers # Rebuild the stream, update the pdbrecords pointers
self.patchSection(0,new_record0.getvalue()) self.patchSection(0,new_record0.getvalue())

View File

@ -24,6 +24,7 @@ msprefs.defaults['ignore_fields'] = []
msprefs.defaults['max_tags'] = 20 msprefs.defaults['max_tags'] = 20
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
msprefs.defaults['swap_author_names'] = False
# Google covers are often poor quality (scans/errors) but they have high # Google covers are often poor quality (scans/errors) but they have high
# resolution, so they trump covers from better sources. So make sure they # resolution, so they trump covers from better sources. So make sure they

View File

@ -365,6 +365,18 @@ def identify(log, abort, # {{{
for r in results: for r in results:
r.tags = r.tags[:max_tags] r.tags = r.tags[:max_tags]
if msprefs['swap_author_names']:
for r in results:
def swap_to_ln_fn(a):
if ',' in a:
return a
parts = a.split(None)
if len(parts) <= 1:
return a
surname = parts[-1]
return '%s, %s' % (surname, ' '.join(parts[:-1]))
r.authors = [swap_to_ln_fn(a) for a in r.authors]
return results return results
# }}} # }}}

View File

@ -0,0 +1,379 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import struct, datetime
from calibre.utils.date import utc_tz
from calibre.ebooks.mobi.langcodes import main_language, sub_language
class PalmDOCAttributes(object):
class Attr(object):
def __init__(self, name, field, val):
self.name = name
self.val = val & field
def __str__(self):
return '%s: %s'%(self.name, bool(self.val))
def __init__(self, raw):
self.val = struct.unpack(b'<H', raw)[0]
self.attributes = []
for name, field in [('Read Only', 0x02), ('Dirty AppInfoArea', 0x04),
('Backup this database', 0x08),
('Okay to install newer over existing copy, if present on PalmPilot', 0x10),
('Force the PalmPilot to reset after this database is installed', 0x12),
('Don\'t allow copy of file to be beamed to other Pilot',
0x14)]:
self.attributes.append(PalmDOCAttributes.Attr(name, field,
self.val))
def __str__(self):
attrs = '\n\t'.join([str(x) for x in self.attributes])
return 'PalmDOC Attributes: %s\n\t%s'%(bin(self.val), attrs)
class PalmDB(object):
def __init__(self, raw):
self.raw = raw
if self.raw.startswith(b'TPZ'):
raise ValueError('This is a Topaz file')
self.name = self.raw[:32].replace(b'\x00', b'')
self.attributes = PalmDOCAttributes(self.raw[32:34])
self.version = struct.unpack(b'>H', self.raw[34:36])[0]
palm_epoch = datetime.datetime(1904, 1, 1, tzinfo=utc_tz)
self.creation_date_raw = struct.unpack(b'>I', self.raw[36:40])[0]
self.creation_date = (palm_epoch +
datetime.timedelta(seconds=self.creation_date_raw))
self.modification_date_raw = struct.unpack(b'>I', self.raw[40:44])[0]
self.modification_date = (palm_epoch +
datetime.timedelta(seconds=self.modification_date_raw))
self.last_backup_date_raw = struct.unpack(b'>I', self.raw[44:48])[0]
self.last_backup_date = (palm_epoch +
datetime.timedelta(seconds=self.last_backup_date_raw))
self.modification_number = struct.unpack(b'>I', self.raw[48:52])[0]
self.app_info_id = self.raw[52:56]
self.sort_info_id = self.raw[56:60]
self.type = self.raw[60:64]
self.creator = self.raw[64:68]
self.ident = self.type + self.creator
if self.ident not in (b'BOOKMOBI', b'TEXTREAD'):
raise ValueError('Unknown book ident: %r'%self.ident)
self.uid_seed = self.raw[68:72]
self.next_rec_list_id = self.raw[72:76]
self.number_of_records, = struct.unpack(b'>H', self.raw[76:78])
def __str__(self):
ans = ['*'*20 + ' PalmDB Header '+ '*'*20]
ans.append('Name: %r'%self.name)
ans.append(str(self.attributes))
ans.append('Version: %s'%self.version)
ans.append('Creation date: %s (%s)'%(self.creation_date.isoformat(),
self.creation_date_raw))
ans.append('Modification date: %s (%s)'%(self.modification_date.isoformat(),
self.modification_date_raw))
ans.append('Backup date: %s (%s)'%(self.last_backup_date.isoformat(),
self.last_backup_date_raw))
ans.append('Modification number: %s'%self.modification_number)
ans.append('App Info ID: %r'%self.app_info_id)
ans.append('Sort Info ID: %r'%self.sort_info_id)
ans.append('Type: %r'%self.type)
ans.append('Creator: %r'%self.creator)
ans.append('UID seed: %r'%self.uid_seed)
ans.append('Next record list id: %r'%self.next_rec_list_id)
ans.append('Number of records: %s'%self.number_of_records)
return '\n'.join(ans)
class Record(object):
def __init__(self, raw, header):
self.offset, self.flags, self.uid = header
self.raw = raw
@property
def header(self):
return 'Offset: %d Flags: %d UID: %d'%(self.offset, self.flags,
self.uid)
class EXTHRecord(object):
def __init__(self, type_, data):
self.type = type_
self.data = data
self.name = {
1 : 'DRM Server id',
2 : 'DRM Commerce id',
3 : 'DRM ebookbase book id',
100 : 'author',
101 : 'publisher',
102 : 'imprint',
103 : 'description',
104 : 'isbn',
105 : 'subject',
106 : 'publishingdate',
107 : 'review',
108 : 'contributor',
109 : 'rights',
110 : 'subjectcode',
111 : 'type',
112 : 'source',
113 : 'asin',
114 : 'versionnumber',
115 : 'sample',
116 : 'startreading',
117 : 'adult',
118 : 'retailprice',
119 : 'retailpricecurrency',
201 : 'coveroffset',
202 : 'thumboffset',
203 : 'hasfakecover',
204 : 'Creator Software',
205 : 'Creator Major Version', # '>I'
206 : 'Creator Minor Version', # '>I'
207 : 'Creator Build number', # '>I'
208 : 'watermark',
209 : 'tamper_proof_keys',
300 : 'fontsignature',
301 : 'clippinglimit', # percentage '>B'
402 : 'publisherlimit',
404 : 'TTS flag', # '>B' 1 - TTS disabled 0 - TTS enabled
501 : 'cdetype', # 4 chars (PDOC or EBOK)
502 : 'lastupdatetime',
503 : 'updatedtitle',
}.get(self.type, repr(self.type))
def __str__(self):
return '%s (%d): %r'%(self.name, self.type, self.data)
class EXTHHeader(object):
def __init__(self, raw):
self.raw = raw
if not self.raw.startswith(b'EXTH'):
raise ValueError('EXTH header does not start with EXTH')
self.length, = struct.unpack(b'>I', self.raw[4:8])
self.count, = struct.unpack(b'>I', self.raw[8:12])
pos = 12
self.records = []
for i in xrange(self.count):
pos = self.read_record(pos)
def read_record(self, pos):
type_, length = struct.unpack(b'>II', self.raw[pos:pos+8])
data = self.raw[(pos+8):(pos+length)]
self.records.append(EXTHRecord(type_, data))
return pos + length
def __str__(self):
ans = ['*'*20 + ' EXTH Header '+ '*'*20]
ans.append('EXTH header length: %d'%self.length)
ans.append('Number of EXTH records: %d'%self.count)
ans.append('EXTH records...')
for r in self.records:
ans.append(str(r))
return '\n'.join(ans)
class MOBIHeader(object):
def __init__(self, record0):
self.raw = record0.raw
self.compression_raw = self.raw[:2]
self.compression = {1: 'No compression', 2: 'PalmDoc compression',
17480: 'HUFF/CDIC compression'}.get(struct.unpack(b'>H',
self.compression_raw)[0],
repr(self.compression_raw))
self.unused = self.raw[2:4]
self.text_length, = struct.unpack(b'>I', self.raw[4:8])
self.number_of_text_records, self.text_record_size = \
struct.unpack(b'>HH', self.raw[8:12])
self.encryption_type_raw, = struct.unpack(b'>H', self.raw[12:14])
self.encryption_type = {0: 'No encryption',
1: 'Old mobipocket encryption',
2:'Mobipocket encryption'}.get(self.encryption_type_raw,
repr(self.encryption_type_raw))
self.unknown = self.raw[14:16]
self.identifier = self.raw[16:20]
if self.identifier != b'MOBI':
raise ValueError('Identifier %r unknown'%self.identifier)
self.length, = struct.unpack(b'>I', self.raw[20:24])
self.type_raw, = struct.unpack(b'>I', self.raw[24:28])
self.type = {
2 : 'Mobipocket book',
3 : 'PalmDOC book',
4 : 'Audio',
257 : 'News',
258 : 'News Feed',
259 : 'News magazine',
513 : 'PICS',
514 : 'Word',
515 : 'XLS',
516 : 'PPT',
517 : 'TEXT',
518 : 'HTML',
}.get(self.type_raw, repr(self.type_raw))
self.encoding_raw, = struct.unpack(b'>I', self.raw[28:32])
self.encoding = {
1252 : 'cp1252',
65001: 'utf-8',
}.get(self.encoding_raw, repr(self.encoding_raw))
self.uid = self.raw[32:36]
self.file_version = struct.unpack(b'>I', self.raw[36:40])
self.reserved = self.raw[40:80]
self.first_non_book_record, = struct.unpack(b'>I', self.raw[80:84])
self.fullname_offset, = struct.unpack(b'>I', self.raw[84:88])
self.fullname_length, = struct.unpack(b'>I', self.raw[88:92])
self.locale_raw, = struct.unpack(b'>I', self.raw[92:96])
langcode = self.locale_raw
langid = langcode & 0xFF
sublangid = (langcode >> 10) & 0xFF
self.language = main_language.get(langid, 'ENGLISH')
self.sublanguage = sub_language.get(sublangid, 'NEUTRAL')
self.input_language = self.raw[96:100]
self.output_langauage = self.raw[100:104]
self.min_version, = struct.unpack(b'>I', self.raw[104:108])
self.first_image_index, = struct.unpack(b'>I', self.raw[108:112])
self.huffman_record_offset, = struct.unpack(b'>I', self.raw[112:116])
self.huffman_record_count, = struct.unpack(b'>I', self.raw[116:120])
self.unknown2 = self.raw[120:128]
self.exth_flags, = struct.unpack(b'>I', self.raw[128:132])
self.has_exth = bool(self.exth_flags & 0x40)
self.has_drm_data = self.length >= 184 and len(self.raw) >= 184
if self.has_drm_data:
self.unknown3 = self.raw[132:164]
self.drm_offset, = struct.unpack(b'>I', self.raw[164:168])
self.drm_count, = struct.unpack(b'>I', self.raw[168:172])
self.drm_size, = struct.unpack(b'>I', self.raw[172:176])
self.drm_flags = bin(struct.unpack(b'>I', self.raw[176:180])[0])
self.has_extra_data_flags = self.length >= 244 and len(self.raw) >= 244
if self.has_extra_data_flags:
self.unknown4 = self.raw[180:242]
self.extra_data_flags = bin(struct.unpack(b'>H',
self.raw[242:244])[0])
if self.has_exth:
self.exth_offset = 16 + self.length
self.exth = EXTHHeader(self.raw[self.exth_offset:])
self.end_of_exth = self.exth_offset + self.exth.length
self.bytes_after_exth = self.fullname_offset - self.end_of_exth
def __str__(self):
ans = ['*'*20 + ' MOBI Header '+ '*'*20]
ans.append('Compression: %s'%self.compression)
ans.append('Unused: %r'%self.unused)
ans.append('Number of text records: %d'%self.number_of_text_records)
ans.append('Text record size: %d'%self.text_record_size)
ans.append('Encryption: %s'%self.encryption_type)
ans.append('Unknown: %r'%self.unknown)
ans.append('Identifier: %r'%self.identifier)
ans.append('Header length: %d'% self.length)
ans.append('Type: %s'%self.type)
ans.append('Encoding: %s'%self.encoding)
ans.append('UID: %r'%self.uid)
ans.append('File version: %d'%self.file_version)
ans.append('Reserved: %r'%self.reserved)
ans.append('First non-book record: %d'% self.first_non_book_record)
ans.append('Full name offset: %d'%self.fullname_offset)
ans.append('Full name length: %d bytes'%self.fullname_length)
ans.append('Langcode: %r'%self.locale_raw)
ans.append('Language: %s'%self.language)
ans.append('Sub language: %s'%self.sublanguage)
ans.append('Input language: %r'%self.input_language)
ans.append('Output language: %r'%self.output_langauage)
ans.append('Min version: %d'%self.min_version)
ans.append('First Image index: %d'%self.first_image_index)
ans.append('Huffman record offset: %d'%self.huffman_record_offset)
ans.append('Huffman record count: %d'%self.huffman_record_count)
ans.append('Unknown2: %r'%self.unknown2)
ans.append('EXTH flags: %r (%s)'%(self.exth_flags, self.has_exth))
if self.has_drm_data:
ans.append('Unknown3: %r'%self.unknown3)
ans.append('DRM Offset: %s'%self.drm_offset)
ans.append('DRM Count: %s'%self.drm_count)
ans.append('DRM Size: %s'%self.drm_size)
ans.append('DRM Flags: %r'%self.drm_flags)
if self.has_extra_data_flags:
ans.append('Unknown4: %r'%self.unknown4)
ans.append('Extra data flags: %r'%self.extra_data_flags)
ans = '\n'.join(ans)
if self.has_exth:
ans += '\n\n' + str(self.exth)
ans += '\n\nBytes after EXTH: %d'%self.bytes_after_exth
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset +
self.fullname_length))
ans += '\nRecord 0 length: %d'%len(self.raw)
return ans
class MOBIFile(object):
def __init__(self, stream):
self.raw = stream.read()
self.palmdb = PalmDB(self.raw[:78])
self.record_headers = []
self.records = []
for i in xrange(self.palmdb.number_of_records):
pos = 78 + i * 8
offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8])
flags, val = a1, a2 << 16 | a3 << 8 | a4
self.record_headers.append((offset, flags, val))
def section(section_number):
if section_number == self.palmdb.number_of_records - 1:
end_off = len(self.raw)
else:
end_off = self.record_headers[section_number + 1][0]
off = self.record_headers[section_number][0]
return self.raw[off:end_off]
for i in range(self.palmdb.number_of_records):
self.records.append(Record(section(i), self.record_headers[i]))
self.mobi_header = MOBIHeader(self.records[0])
def print_header(self):
print (str(self.palmdb).encode('utf-8'))
print ()
print ('Record headers:')
for i, r in enumerate(self.records):
print ('%6d. %s'%(i, r.header))
print ()
print (str(self.mobi_header).encode('utf-8'))
def inspect_mobi(path_or_stream):
stream = (path_or_stream if hasattr(path_or_stream, 'read') else
open(path_or_stream, 'rb'))
f = MOBIFile(stream)
f.print_header()
if __name__ == '__main__':
import sys
f = MOBIFile(open(sys.argv[1], 'rb'))
f.print_header()

View File

@ -1511,7 +1511,7 @@ class MobiWriter(object):
record0.write(exth) record0.write(exth)
record0.write(title) record0.write(title)
record0 = record0.getvalue() record0 = record0.getvalue()
self._records[0] = record0 + ('\0' * (2452 - len(record0))) self._records[0] = record0 + ('\0' * (1024*8))
def _build_exth(self): def _build_exth(self):
oeb = self._oeb oeb = self._oeb

View File

@ -310,7 +310,6 @@ class BooksModel(QAbstractTableModel): # {{{
def sort(self, col, order, reset=True): def sort(self, col, order, reset=True):
if not self.db: if not self.db:
return return
self.about_to_be_sorted.emit(self.db.id)
if not isinstance(order, bool): if not isinstance(order, bool):
order = order == Qt.AscendingOrder order = order == Qt.AscendingOrder
label = self.column_map[col] label = self.column_map[col]
@ -321,6 +320,7 @@ class BooksModel(QAbstractTableModel): # {{{
self._sort(field, order, reset) self._sort(field, order, reset)
def _sort(self, label, order, reset): def _sort(self, label, order, reset):
self.about_to_be_sorted.emit(self.db.id)
self.db.sort(label, order) self.db.sort(label, order)
if reset: if reset:
self.reset() self.reset()

View File

@ -246,6 +246,36 @@ class BooksView(QTableView): # {{{
self.sortByColumn(idx, Qt.DescendingOrder) self.sortByColumn(idx, Qt.DescendingOrder)
else: else:
self._model.sort_by_named_field(field, order, reset) self._model.sort_by_named_field(field, order, reset)
def multisort(self, fields, reset=True, only_if_different=False):
if len(fields) == 0:
return
sh = self.cleanup_sort_history(self._model.sort_history,
ignore_column_map=True)
if only_if_different and len(sh) >= len(fields):
ret=True
for i,t in enumerate(fields):
if t[0] != sh[i][0]:
ret = False
break
if ret:
return
for n,d in reversed(fields):
if n in self._model.db.field_metadata.keys():
sh.insert(0, (n, d))
sh = self.cleanup_sort_history(sh, ignore_column_map=True)
self._model.sort_history = [tuple(x) for x in sh]
self._model.resort(reset=reset)
col = fields[0][0]
dir = Qt.AscendingOrder if fields[0][1] else Qt.DescendingOrder
if col in self.column_map:
col = self.column_map.index(col)
hdrs = self.horizontalHeader()
try:
hdrs.setSortIndicator(col, dir)
except:
pass
# }}} # }}}
# Ondevice column {{{ # Ondevice column {{{
@ -290,14 +320,14 @@ class BooksView(QTableView): # {{{
state = self.get_state() state = self.get_state()
self.write_state(state) self.write_state(state)
def cleanup_sort_history(self, sort_history): def cleanup_sort_history(self, sort_history, ignore_column_map=False):
history = [] history = []
for col, order in sort_history: for col, order in sort_history:
if not isinstance(order, bool): if not isinstance(order, bool):
continue continue
if col == 'date': if col == 'date':
col = 'timestamp' col = 'timestamp'
if col in self.column_map: if ignore_column_map or col in self.column_map:
if (not history or history[-1][0] != col): if (not history or history[-1][0] != col):
history.append([col, order]) history.append([col, order])
return history return history
@ -631,7 +661,7 @@ class BooksView(QTableView): # {{{
h = self.horizontalHeader() h = self.horizontalHeader()
for i in range(h.count()): for i in range(h.count()):
if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0: if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0:
self.scrollTo(self.model().index(row, i)) self.scrollTo(self.model().index(row, i), self.PositionAtCenter)
break break
def set_current_row(self, row, select=True): def set_current_row(self, row, select=True):

View File

@ -258,6 +258,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
r('max_tags', msprefs) r('max_tags', msprefs)
r('wait_after_first_identify_result', msprefs) r('wait_after_first_identify_result', msprefs)
r('wait_after_first_cover_result', msprefs) r('wait_after_first_cover_result', msprefs)
r('swap_author_names', msprefs)
self.configure_plugin_button.clicked.connect(self.configure_plugin) self.configure_plugin_button.clicked.connect(self.configure_plugin)
self.sources_model = SourcesModel(self) self.sources_model = SourcesModel(self)

View File

@ -21,7 +21,7 @@
<widget class="QStackedWidget" name="stack"> <widget class="QStackedWidget" name="stack">
<widget class="QWidget" name="page"> <widget class="QWidget" name="page">
<layout class="QGridLayout" name="gridLayout"> <layout class="QGridLayout" name="gridLayout">
<item row="0" column="0" rowspan="5"> <item row="0" column="0" rowspan="6">
<widget class="QGroupBox" name="groupBox"> <widget class="QGroupBox" name="groupBox">
<property name="title"> <property name="title">
<string>Metadata sources</string> <string>Metadata sources</string>
@ -98,7 +98,14 @@
</property> </property>
</widget> </widget>
</item> </item>
<item row="2" column="1"> <item row="2" column="1" colspan="2">
<widget class="QCheckBox" name="opt_swap_author_names">
<property name="text">
<string>Swap author names from FN LN to LN, FN</string>
</property>
</widget>
</item>
<item row="3" column="1">
<widget class="QLabel" name="label_2"> <widget class="QLabel" name="label_2">
<property name="text"> <property name="text">
<string>Max. number of &amp;tags to download:</string> <string>Max. number of &amp;tags to download:</string>
@ -108,10 +115,10 @@
</property> </property>
</widget> </widget>
</item> </item>
<item row="2" column="2"> <item row="3" column="2">
<widget class="QSpinBox" name="opt_max_tags"/> <widget class="QSpinBox" name="opt_max_tags"/>
</item> </item>
<item row="3" column="1"> <item row="4" column="1">
<widget class="QLabel" name="label_3"> <widget class="QLabel" name="label_3">
<property name="text"> <property name="text">
<string>Max. &amp;time to wait after first match is found:</string> <string>Max. &amp;time to wait after first match is found:</string>
@ -121,14 +128,14 @@
</property> </property>
</widget> </widget>
</item> </item>
<item row="3" column="2"> <item row="4" column="2">
<widget class="QSpinBox" name="opt_wait_after_first_identify_result"> <widget class="QSpinBox" name="opt_wait_after_first_identify_result">
<property name="suffix"> <property name="suffix">
<string> secs</string> <string> secs</string>
</property> </property>
</widget> </widget>
</item> </item>
<item row="4" column="1"> <item row="5" column="1">
<widget class="QLabel" name="label_4"> <widget class="QLabel" name="label_4">
<property name="text"> <property name="text">
<string>Max. time to wait after first &amp;cover is found:</string> <string>Max. time to wait after first &amp;cover is found:</string>
@ -138,7 +145,7 @@
</property> </property>
</widget> </widget>
</item> </item>
<item row="4" column="2"> <item row="5" column="2">
<widget class="QSpinBox" name="opt_wait_after_first_cover_result"> <widget class="QSpinBox" name="opt_wait_after_first_cover_result">
<property name="suffix"> <property name="suffix">
<string> secs</string> <string> secs</string>

View File

@ -413,12 +413,12 @@ class ResultCache(SearchQueryParser): # {{{
cast = adjust = lambda x: x cast = adjust = lambda x: x
if query == 'false': if query == 'false':
if dt == 'rating': if dt == 'rating' or location == 'cover':
relop = lambda x,y: not bool(x) relop = lambda x,y: not bool(x)
else: else:
relop = lambda x,y: x is None relop = lambda x,y: x is None
elif query == 'true': elif query == 'true':
if dt == 'rating': if dt == 'rating' or location == 'cover':
relop = lambda x,y: bool(x) relop = lambda x,y: bool(x)
else: else:
relop = lambda x,y: x is not None relop = lambda x,y: x is not None
@ -592,7 +592,8 @@ class ResultCache(SearchQueryParser): # {{{
candidates = self.universal_set() candidates = self.universal_set()
if len(candidates) == 0: if len(candidates) == 0:
return matches return matches
self.test_location_is_valid(location, query) if location not in self.all_search_locations:
return matches
if len(location) > 2 and location.startswith('@') and \ if len(location) > 2 and location.startswith('@') and \
location[1:] in self.db_prefs['grouped_search_terms']: location[1:] in self.db_prefs['grouped_search_terms']:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -19,8 +19,8 @@ If this module is run, it will perform a series of unit tests.
import sys, string, operator import sys, string, operator
from calibre.utils.pyparsing import CaselessKeyword, Group, Forward, \ from calibre.utils.pyparsing import CaselessKeyword, Group, Forward, \
CharsNotIn, Suppress, OneOrMore, MatchFirst, alphas, alphanums, \ CharsNotIn, Suppress, OneOrMore, MatchFirst, CaselessLiteral, \
Optional, ParseException, QuotedString, Word Optional, NoMatch, ParseException, QuotedString
from calibre.constants import preferred_encoding from calibre.constants import preferred_encoding
from calibre.utils.icu import sort_key from calibre.utils.icu import sort_key
@ -128,9 +128,12 @@ class SearchQueryParser(object):
self._tests_failed = False self._tests_failed = False
self.optimize = optimize self.optimize = optimize
# Define a token # Define a token
self.standard_locations = locations standard_locations = map(lambda x : CaselessLiteral(x)+Suppress(':'),
location = Optional(Word(alphas+'#', bodyChars=alphanums+'_')+Suppress(':'), locations)
default='all') location = NoMatch()
for l in standard_locations:
location |= l
location = Optional(location, default='all')
word_query = CharsNotIn(string.whitespace + '()') word_query = CharsNotIn(string.whitespace + '()')
#quoted_query = Suppress('"')+CharsNotIn('"')+Suppress('"') #quoted_query = Suppress('"')+CharsNotIn('"')+Suppress('"')
quoted_query = QuotedString('"', escChar='\\') quoted_query = QuotedString('"', escChar='\\')
@ -247,14 +250,7 @@ class SearchQueryParser(object):
raise ParseException(query, len(query), 'undefined saved search', self) raise ParseException(query, len(query), 'undefined saved search', self)
return self._get_matches(location, query, candidates) return self._get_matches(location, query, candidates)
def test_location_is_valid(self, location, query):
if location not in self.standard_locations:
raise ParseException(query, len(query),
_('No column exists with lookup name ') + location, self)
def _get_matches(self, location, query, candidates): def _get_matches(self, location, query, candidates):
location = location.lower()
self.test_location_is_valid(location, query)
if self.optimize: if self.optimize:
return self.get_matches(location, query, candidates=candidates) return self.get_matches(location, query, candidates=candidates)
else: else: