Merge from trunk

This commit is contained in:
JimmXinu 2013-04-23 11:31:35 -05:00
commit 466fc4a522
38 changed files with 670 additions and 4088 deletions

View File

@ -79,13 +79,6 @@ License: GPL2+
The full text of the GPL is distributed as in
/usr/share/common-licenses/GPL-2 on Debian systems.
Files: src/pyPdf/*
Copyright: Copyright (c) 2006, Mathieu Fenniak
Copyright: Copyright (c) 2007, Ashish Kulkarni <kulkarni.ashish@gmail.com>
License: BSD
The full text of the BSD license is distributed as in
/usr/share/common-licenses/BSD on Debian systems.
Files: src/calibre/utils/lzx/*
Copyright: Copyright (C) 2002, Matthew T. Russotto
Copyright: Copyright (C) 2008, Marshall T. Vandegrift <llasram@gmail.com>
@ -100,49 +93,6 @@ License: BSD
The full text of the BSD license is distributed as in
/usr/share/common-licenses/BSD on Debian systems.
Files: src/calibre/utils/pyparsing.py
Copyright: Copyright (c) 2003-2008, Paul T. McGuire
License: MIT
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Files: src/calibre/utils/PythonMagickWand.py
Copyright: (c) 2007 - Achim Domma - domma@procoders.net
License: MIT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Files: src/calibre/utils/msdes/d3des.h:
Files: src/calibre/utils/msdes/des.c:
Copyright: Copyright (C) 1988,1989,1990,1991,1992, Richard Outerbridge

View File

@ -1,7 +1,7 @@
__license__ = 'GPL v3'
__copyright__ = '2010-2012, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2010-2013, Darko Miletic <darko.miletic at gmail.com>'
'''
www.ft.com/uk-edition
www.ft.com/intl/uk-edition
'''
import datetime
@ -29,7 +29,7 @@ class FinancialTimes(BasicNewsRecipe):
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
LOGIN = 'https://registration.ft.com/registration/barrier/login'
LOGIN2 = 'http://media.ft.com/h/subs3.html'
INDEX = 'http://www.ft.com/uk-edition'
INDEX = 'http://www.ft.com/intl/uk-edition'
PREFIX = 'http://www.ft.com'
conversion_options = {

View File

@ -1,20 +1,21 @@
__license__ = 'GPL v3'
__copyright__ = '2013, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2010-2013, Darko Miletic <darko.miletic at gmail.com>'
'''
http://www.ft.com/intl/us-edition
www.ft.com/intl/international-edition
'''
import datetime
from calibre.ptempfile import PersistentTemporaryFile
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
from collections import OrderedDict
class FinancialTimes(BasicNewsRecipe):
title = 'Financial Times (US) printed edition'
title = 'Financial Times (International) printed edition'
__author__ = 'Darko Miletic'
description = "The Financial Times (FT) is one of the world's leading business news and information organisations, recognised internationally for its authority, integrity and accuracy."
publisher = 'The Financial Times Ltd.'
category = 'news, finances, politics, UK, World'
category = 'news, finances, politics, World'
oldest_article = 2
language = 'en'
max_articles_per_feed = 250
@ -28,7 +29,7 @@ class FinancialTimes(BasicNewsRecipe):
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
LOGIN = 'https://registration.ft.com/registration/barrier/login'
LOGIN2 = 'http://media.ft.com/h/subs3.html'
INDEX = 'http://www.ft.com/intl/us-edition'
INDEX = 'http://www.ft.com/intl/international-edition'
PREFIX = 'http://www.ft.com'
conversion_options = {
@ -93,7 +94,7 @@ class FinancialTimes(BasicNewsRecipe):
try:
urlverified = self.browser.open_novisit(url).geturl() # resolve redirect.
except:
continue
continue
title = self.tag_to_string(item)
date = strftime(self.timefmt)
articles.append({
@ -105,29 +106,30 @@ class FinancialTimes(BasicNewsRecipe):
return articles
def parse_index(self):
feeds = []
feeds = OrderedDict()
soup = self.index_to_soup(self.INDEX)
dates= self.tag_to_string(soup.find('div', attrs={'class':'btm-links'}).find('div'))
self.timefmt = ' [%s]'%dates
wide = soup.find('div',attrs={'class':'wide'})
if not wide:
return feeds
allsections = wide.findAll(attrs={'class':lambda x: x and 'footwell' in x.split()})
if not allsections:
return feeds
count = 0
for item in allsections:
count = count + 1
if self.test and count > 2:
return feeds
fitem = item.h3
if not fitem:
fitem = item.h4
ftitle = self.tag_to_string(fitem)
self.report_progress(0, _('Fetching feed')+' %s...'%(ftitle))
feedarts = self.get_artlinks(item.ul)
feeds.append((ftitle,feedarts))
return feeds
#dates= self.tag_to_string(soup.find('div', attrs={'class':'btm-links'}).find('div'))
#self.timefmt = ' [%s]'%dates
section_title = 'Untitled'
for column in soup.findAll('div', attrs = {'class':'feedBoxes clearfix'}):
for section in column. findAll('div', attrs = {'class':'feedBox'}):
sectiontitle=self.tag_to_string(section.find('h4'))
if '...' not in sectiontitle: section_title=sectiontitle
for article in section.ul.findAll('li'):
articles = []
title=self.tag_to_string(article.a)
url=article.a['href']
articles.append({'title':title, 'url':url, 'description':'', 'date':''})
if articles:
if section_title not in feeds:
feeds[section_title] = []
feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()]
return ans
def preprocess_html(self, soup):
items = ['promo-box','promo-title',
@ -174,9 +176,6 @@ class FinancialTimes(BasicNewsRecipe):
count += 1
tfile = PersistentTemporaryFile('_fa.html')
tfile.write(html)
tfile.close()
tfile.close()
self.temp_files.append(tfile)
return tfile.name
def cleanup(self):
self.browser.open('https://registration.ft.com/registration/login/logout?location=')

View File

@ -4,7 +4,7 @@ class AdvancedUserRecipe1366025923(BasicNewsRecipe):
title = u'Lightspeed Magazine'
language = 'en'
__author__ = 'Jose Pinto'
oldest_article = 7
oldest_article = 31
max_articles_per_feed = 100
auto_cleanup = True
use_embedded_content = False

View File

@ -36,6 +36,9 @@ from BeautifulSoup import BeautifulSoup
Changed order of regex to speedup proces
Version 1.9.3 23-05-2012
Updated Cover image
Version 1.9.4 19-04-2013
Added regex filter for mailto
Updated for new layout of metro-site
'''
class AdvancedUserRecipe1306097511(BasicNewsRecipe):
@ -43,7 +46,7 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
oldest_article = 1.2
max_articles_per_feed = 25
__author__ = u'DrMerry'
description = u'Metro Nederland'
description = u'Metro Nederland v1.9.4 2013-04-19'
language = u'nl'
simultaneous_downloads = 5
masthead_url = 'http://blog.metronieuws.nl/wp-content/themes/metro/images/header.gif'
@ -68,13 +71,17 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
#(re.compile('(</?)h2', re.DOTALL|re.IGNORECASE),lambda match:'\1em')
]
remove_tags_before= dict(id='date')
remove_tags_after = [dict(name='div', attrs={'class':['column-1-3','gallery-text']})]#id='share-and-byline')]
remove_tags_before= dict(id='subwrapper')
remove_tags_after = dict(name='div', attrs={'class':['body-area','article-main-area']})
#name='div', attrs={'class':['subwrapper']})]
#'column-1-3','gallery-text']})]#id='share-and-byline')]
filter_regexps = [r'mailto:.*']
remove_tags = [
dict(name=['iframe','script','noscript','style']),
dict(name='div', attrs={'class':['column-4-5','column-1-5','ad-msg','col-179 ','col-373 ','clear','ad','navigation',re.compile('share-tools(-top)?'),'tools','metroCommentFormWrap','article-tools-below-title','related-links','padding-top-15',re.compile('^promo.*?$'),'teaser-component',re.compile('fb(-comments|_iframe_widget)'),'promos','header-links','promo-2']}),
dict(id=['column-1-5-bottom','column-4-5',re.compile('^ad(\d+|adcomp.*?)?$'),'adadcomp-4','margin-5','sidebar',re.compile('^article-\d'),'comments','gallery-1']),
dict(name='div', attrs={'class':['aside clearfix','aside clearfix middle-col-line','comments','share-tools','article-right-column','column-4-5','column-1-5','ad-msg','col-179 ','col-373 ','clear','ad','navigation',re.compile('share-tools(-top)?'),'tools','metroCommentFormWrap','article-tools-below-title','related-links','padding-top-15',re.compile('^promo.*?$'),'teaser-component',re.compile('fb(-comments|_iframe_widget)'),'promos','header-links','promo-2']}),
dict(id=['article-2','googleads','column-1-5-bottom','column-4-5',re.compile('^ad(\d+|adcomp.*?)?$'),'adadcomp-4','margin-5','sidebar',re.compile('^article-\d'),'comments','gallery-1','sharez_container','ts-container','topshares','ts-title']),
dict(name='a', attrs={'name':'comments'}),
#dict(name='div', attrs={'data-href'}),
dict(name='img', attrs={'class':'top-line','title':'volledig scherm'}),

View File

@ -0,0 +1,27 @@
from calibre.web.feeds.news import BasicNewsRecipe
class HindustanTimes(BasicNewsRecipe):
title = u'Voice of America'
language = 'en'
__author__ = 'Krittika Goyal'
oldest_article = 15 #days
max_articles_per_feed = 25
#encoding = 'cp1252'
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
feeds = [
('All Zones',
'http://learningenglish.voanews.com/rss/?count=20'),
('World',
'http://learningenglish.voanews.com/rss/?count=20&zoneid=957'),
('USA',
'http://learningenglish.voanews.com/rss/?count=20&zoneid=958'),
('Health',
'http://learningenglish.voanews.com/rss/?count=20&zoneid=955'),
]

View File

@ -38,7 +38,7 @@ class Check(Command):
if cache.get(y, 0) == mtime:
continue
if (f.endswith('.py') and f not in (
'feedparser.py', 'pyparsing.py', 'markdown.py') and
'feedparser.py', 'markdown.py') and
'prs500/driver.py' not in y):
yield y, mtime
if f.endswith('.coffee'):

View File

@ -10253,7 +10253,7 @@ msgstr ""
#. name for inh
msgid "Ingush"
msgstr "Engelsk"
msgstr "Ingush"
#. name for inj
msgid "Inga; Jungle"

View File

@ -1448,7 +1448,6 @@ class StoreGoogleBooksStore(StoreBase):
headquarters = 'US'
formats = ['EPUB', 'PDF', 'TXT']
affiliate = True
class StoreGutenbergStore(StoreBase):
name = 'Project Gutenberg'

View File

@ -114,6 +114,19 @@ class Cache(object):
if self.dirtied_cache:
self.dirtied_sequence = max(self.dirtied_cache.itervalues())+1
@write_api
def initialize_template_cache(self):
self.formatter_template_cache = {}
@write_api
def refresh(self):
self._initialize_template_cache()
for field in self.fields.itervalues():
if hasattr(field, 'clear_cache'):
field.clear_cache() # Clear the composite cache
if hasattr(field, 'table'):
field.table.read(self.backend) # Reread data from metadata.db
@property
def field_metadata(self):
return self.backend.field_metadata

View File

@ -6,12 +6,13 @@ from __future__ import (unicode_literals, division, absolute_import,
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import os, traceback
from functools import partial
from calibre.db.backend import DB
from calibre.db.cache import Cache
from calibre.db.view import View
from calibre.utils.date import utcnow
class LibraryDatabase(object):
@ -29,6 +30,7 @@ class LibraryDatabase(object):
progress_callback=lambda x, y:True, restore_all_prefs=False):
self.is_second_db = is_second_db # TODO: Use is_second_db
self.listeners = set([])
backend = self.backend = DB(library_path, default_prefs=default_prefs,
read_only=read_only, restore_all_prefs=restore_all_prefs,
@ -50,6 +52,8 @@ class LibraryDatabase(object):
setattr(self, prop, partial(self.get_property,
loc=self.FIELD_MAP[fm]))
self.last_update_check = self.last_modified()
def close(self):
self.backend.close()
@ -71,9 +75,22 @@ class LibraryDatabase(object):
def library_id(self):
return self.backend.library_id
@property
def library_path(self):
return self.backend.library_path
@property
def dbpath(self):
return self.backend.dbpath
def last_modified(self):
return self.backend.last_modified()
def check_if_modified(self):
if self.last_modified() > self.last_update_check:
self.refresh()
self.last_update_check = utcnow()
@property
def custom_column_num_map(self):
return self.backend.custom_column_num_map
@ -86,9 +103,48 @@ class LibraryDatabase(object):
def FIELD_MAP(self):
return self.backend.FIELD_MAP
@property
def formatter_template_cache(self):
return self.data.cache.formatter_template_cache
def initialize_template_cache(self):
self.data.cache.initialize_template_cache()
def all_ids(self):
for book_id in self.data.cache.all_book_ids():
yield book_id
def refresh(self, field=None, ascending=True):
self.data.cache.refresh()
self.data.refresh(field=field, ascending=ascending)
def add_listener(self, listener):
'''
Add a listener. Will be called on change events with two arguments.
Event name and list of affected ids.
'''
self.listeners.add(listener)
def notify(self, event, ids=[]):
'Notify all listeners'
for listener in self.listeners:
try:
listener(event, ids)
except:
traceback.print_exc()
continue
# }}}
def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.'
book_id = index if index_is_id else self.data.index_to_id(index)
return self.data.cache.field_for('path', book_id).replace('/', os.sep)
def abspath(self, index, index_is_id=False, create_dirs=True):
'Return the absolute path to the directory containing this books files as a unicode string.'
path = os.path.join(self.library_path, self.path(index, index_is_id=index_is_id))
if create_dirs and not os.path.exists(path):
os.makedirs(path)
return path

View File

@ -195,13 +195,13 @@ class DateSearch(object): # {{{
try:
qd = now() - timedelta(int(num))
except:
raise ParseException(query, len(query), 'Number conversion error')
raise ParseException(_('Number conversion error: {0}').format(num))
field_count = 3
else:
try:
qd = parse_date(query, as_utc=False)
except:
raise ParseException(query, len(query), 'Date conversion error')
raise ParseException(_('Date conversion error: {0}').format(query))
if '-' in query:
field_count = query.count('-') + 1
else:
@ -285,8 +285,8 @@ class NumericSearch(object): # {{{
try:
q = cast(query) * mult
except:
raise ParseException(query, len(query),
'Non-numeric value in query: %r'%query)
raise ParseException(
_('Non-numeric value in query: {0}').format(query))
for val, book_ids in field_iter():
if val is None:
@ -351,8 +351,8 @@ class KeyPairSearch(object): # {{{
if ':' in query:
q = [q.strip() for q in query.split(':')]
if len(q) != 2:
raise ParseException(query, len(query),
'Invalid query format for colon-separated search')
raise ParseException(
_('Invalid query format for colon-separated search: {0}').format(query))
keyq, valq = q
keyq_mkind, keyq = _matchkind(keyq)
valq_mkind, valq = _matchkind(valq)
@ -465,7 +465,8 @@ class Parser(SearchQueryParser):
if invert:
matches = self.all_book_ids - matches
return matches
raise ParseException(query, len(query), 'Recursive query group detected')
raise ParseException(
_('Recursive query group detected: {0}').format(query))
# If the user has asked to restrict searching over all field, apply
# that restriction

View File

@ -16,7 +16,7 @@ class LegacyTest(BaseTest):
'Test library wide properties'
def get_props(db):
props = ('user_version', 'is_second_db', 'library_id', 'field_metadata',
'custom_column_label_map', 'custom_column_num_map')
'custom_column_label_map', 'custom_column_num_map', 'library_path', 'dbpath')
fprops = ('last_modified', )
ans = {x:getattr(db, x) for x in props}
ans.update({x:getattr(db, x)() for x in fprops})
@ -51,6 +51,11 @@ class LegacyTest(BaseTest):
if label in {'tags', 'formats'}:
# Order is random in the old db for these
ans[label] = tuple(set(x.split(',')) if x else x for x in ans[label])
if label == 'series_sort':
# The old db code did not take book language into account
# when generating series_sort values (the first book has
# lang=deu)
ans[label] = ans[label][1:]
return ans
old = self.init_old()
@ -64,3 +69,31 @@ class LegacyTest(BaseTest):
# }}}
def test_refresh(self): # {{{
' Test refreshing the view after a change to metadata.db '
db = self.init_legacy()
db2 = self.init_legacy()
self.assertEqual(db2.data.cache.set_field('title', {1:'xxx'}), set([1]))
db2.close()
del db2
self.assertNotEqual(db.title(1, index_is_id=True), 'xxx')
db.check_if_modified()
self.assertEqual(db.title(1, index_is_id=True), 'xxx')
# }}}
def test_legacy_getters(self): # {{{
old = self.init_old()
getters = ('path', 'abspath', 'title', 'authors', 'series',
'publisher', 'author_sort', 'authors', 'comments',
'comment', 'publisher', 'rating', 'series_index', 'tags',
'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified', 'languages')
oldvals = {g:tuple(getattr(old, g)(x) for x in xrange(3)) + tuple(getattr(old, g)(x, True) for x in (1,2,3)) for g in getters}
old.close()
db = self.init_legacy()
newvals = {g:tuple(getattr(db, g)(x) for x in xrange(3)) + tuple(getattr(db, g)(x, True) for x in (1,2,3)) for g in getters}
for x in (oldvals, newvals):
x['tags'] = tuple(set(y.split(',')) if y else y for y in x['tags'])
self.assertEqual(oldvals, newvals)
# }}}

View File

@ -294,3 +294,11 @@ class View(object):
self.marked_ids = dict(izip(id_dict.iterkeys(), imap(unicode,
id_dict.itervalues())))
def refresh(self, field=None, ascending=True):
self._map = tuple(self.cache.all_book_ids())
self._map_filtered = tuple(self._map)
if field is not None:
self.sort(field, ascending)
if self.search_restriction or self.base_restriction:
self.search('', return_matches=False)

View File

@ -71,6 +71,7 @@ class ANDROID(USBMS):
0x42f7 : [0x216],
0x4365 : [0x216],
0x4366 : [0x216],
0x4371 : [0x216],
},
# Freescale
0x15a2 : {
@ -239,7 +240,7 @@ class ANDROID(USBMS):
'ADVANCED', 'SGH-I727', 'USB_FLASH_DRIVER', 'ANDROID',
'S5830I_CARD', 'MID7042', 'LINK-CREATE', '7035', 'VIEWPAD_7E',
'NOVO7', 'MB526', '_USB#WYK7MSF8KE', 'TABLET_PC', 'F', 'MT65XX_MS',
'ICS', 'E400', '__FILE-STOR_GADG', 'ST80208-1', 'GT-S5660M_CARD']
'ICS', 'E400', '__FILE-STOR_GADG', 'ST80208-1', 'GT-S5660M_CARD', 'XT894']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
@ -250,7 +251,7 @@ class ANDROID(USBMS):
'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0', 'XT875',
'UMS_COMPOSITE', 'PRO', '.KOBO_VOX', 'SGH-T989_CARD', 'SGH-I727',
'USB_FLASH_DRIVER', 'ANDROID', 'MID7042', '7035', 'VIEWPAD_7E',
'NOVO7', 'ADVANCED', 'TABLET_PC', 'F', 'E400_SD_CARD', 'ST80208-1']
'NOVO7', 'ADVANCED', 'TABLET_PC', 'F', 'E400_SD_CARD', 'ST80208-1', 'XT894']
OSX_MAIN_MEM = 'Android Device Main Memory'

View File

@ -35,11 +35,11 @@ class KOBO(USBMS):
gui_name = 'Kobo Reader'
description = _('Communicate with the Kobo Reader')
author = 'Timothy Legge and David Forrester'
version = (2, 0, 7)
version = (2, 0, 8)
dbversion = 0
fwversion = 0
supported_dbversion = 75
supported_dbversion = 80
has_kepubs = False
supported_platforms = ['windows', 'osx', 'linux']
@ -419,7 +419,7 @@ class KOBO(USBMS):
# If all this succeeds we need to delete the images files via the ImageID
return ImageID
def delete_images(self, ImageID):
def delete_images(self, ImageID, book_path):
if ImageID != None:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
@ -449,7 +449,7 @@ class KOBO(USBMS):
ImageID = self.delete_via_sql(ContentID, ContentType)
#print " We would now delete the Images for" + ImageID
self.delete_images(ImageID)
self.delete_images(ImageID, path)
if os.path.exists(path):
# Delete the ebook
@ -1199,15 +1199,21 @@ class KOBO(USBMS):
class KOBOTOUCH(KOBO):
name = 'KoboTouch'
gui_name = 'Kobo Touch'
gui_name = 'Kobo Touch/Glo/Mini/Aura HD'
author = 'David Forrester'
description = 'Communicate with the Kobo Touch, Glo and Mini firmware. Based on the existing Kobo driver by %s.' % (KOBO.author)
description = 'Communicate with the Kobo Touch, Glo, Mini and Aura HD ereaders. Based on the existing Kobo driver by %s.' % (KOBO.author)
# icon = I('devices/kobotouch.jpg')
supported_dbversion = 75
min_supported_dbversion = 53
min_dbversion_series = 65
min_dbversion_archive = 71
supported_dbversion = 80
min_supported_dbversion = 53
min_dbversion_series = 65
min_dbversion_archive = 71
min_dbversion_images_on_sdcard = 77
max_supported_fwversion = (2,5,1)
min_fwversion_images_on_sdcard = (2,4,1)
has_kepubs = True
booklist_class = KTCollectionsBookList
book_class = Book
@ -1291,12 +1297,13 @@ class KOBOTOUCH(KOBO):
TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ"
GLO_PRODUCT_ID = [0x4173]
MINI_PRODUCT_ID = [0x4183]
TOUCH_PRODUCT_ID = [0x4163]
PRODUCT_ID = GLO_PRODUCT_ID + MINI_PRODUCT_ID + TOUCH_PRODUCT_ID
AURA_HD_PRODUCT_ID = [0x4193]
GLO_PRODUCT_ID = [0x4173]
MINI_PRODUCT_ID = [0x4183]
TOUCH_PRODUCT_ID = [0x4163]
PRODUCT_ID = AURA_HD_PRODUCT_ID + GLO_PRODUCT_ID + MINI_PRODUCT_ID + TOUCH_PRODUCT_ID
BCD = [0x0110, 0x0326]
BCD = [0x0110, 0x0326]
# Image file name endings. Made up of: image size, min_dbversion, max_dbversion,
COVER_FILE_ENDINGS = {
@ -1313,6 +1320,11 @@ class KOBOTOUCH(KOBO):
# ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
}
AURA_HD_COVER_FILE_ENDINGS = {
' - N3_FULL.parsed': [(1080,1440), 0, 99,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355, 471), 0, 99,False,], # Used for Details screen
' - N3_LIBRARY_GRID.parsed':[(149, 198), 0, 99,False,], # Used for library lists
}
#Following are the sizes used with pre2.1.4 firmware
# COVER_FILE_ENDINGS = {
# ' - N3_LIBRARY_FULL.parsed':[(355,530),0, 99,], # Used for Details screen
@ -1328,6 +1340,10 @@ class KOBOTOUCH(KOBO):
super(KOBOTOUCH, self).initialize()
self.bookshelvelist = []
def get_device_information(self, end_session=True):
self.set_device_name()
return super(KOBOTOUCH, self).get_device_information(end_session)
def books(self, oncard=None, end_session=True):
debug_print("KoboTouch:books - oncard='%s'"%oncard)
from calibre.ebooks.metadata.meta import path_to_ext
@ -1354,14 +1370,13 @@ class KOBOTOUCH(KOBO):
# Determine the firmware version
try:
with open(self.normalize_path(self._main_prefix + '.kobo/version'),
'rb') as f:
with open(self.normalize_path(self._main_prefix + '.kobo/version'), 'rb') as f:
self.fwversion = f.readline().split(',')[2]
self.fwversion = tuple((int(x) for x in self.fwversion.split('.')))
except:
self.fwversion = 'unknown'
self.fwversion = (0,0,0)
if self.fwversion != '1.0' and self.fwversion != '1.4':
self.has_kepubs = True
debug_print('Kobo device: %s' % self.gui_name)
debug_print('Version of driver:', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware:', self.fwversion, 'Has kepubs:', self.has_kepubs)
@ -1374,7 +1389,7 @@ class KOBOTOUCH(KOBO):
debug_print(opts.extra_customization)
if opts.extra_customization:
debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
debug_print("KoboTouch:books - set_debugging_title to", debugging_title )
debug_print("KoboTouch:books - set_debugging_title to '%s'" % debugging_title )
bl.set_debugging_title(debugging_title)
debug_print("KoboTouch:books - length bl=%d"%len(bl))
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
@ -1466,6 +1481,7 @@ class KOBOTOUCH(KOBO):
if show_debug:
self.debug_index = idx
debug_print("KoboTouch:update_booklist - idx=%d"%idx)
debug_print("KoboTouch:update_booklist - lpath=%s"%lpath)
debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
debug_print('KoboTouch:update_booklist - bookshelves=', bookshelves)
@ -1477,7 +1493,7 @@ class KOBOTOUCH(KOBO):
bl_cache[lpath] = None
if ImageID is not None:
imagename = self.imagefilename_from_imageID(ImageID)
imagename = self.imagefilename_from_imageID(prefix, ImageID)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType == '6' and MimeType != 'application/x-kobo-epub+zip'):
@ -1717,12 +1733,14 @@ class KOBOTOUCH(KOBO):
debug_print("KoboTouch:books - end - oncard='%s'"%oncard)
return bl
def imagefilename_from_imageID(self, ImageID):
def imagefilename_from_imageID(self, prefix, ImageID):
show_debug = self.is_debugging_title(ImageID)
path = self.images_path(prefix)
path = self.normalize_path(path.replace('/', os.sep))
for ending, cover_options in self.cover_file_endings().items():
fpath = self._main_prefix + '.kobo/images/' + ImageID + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
fpath = path + ImageID + ending
if os.path.exists(fpath):
if show_debug:
debug_print("KoboTouch:imagefilename_from_imageID - have cover image fpath=%s" % (fpath))
@ -1764,7 +1782,7 @@ class KOBOTOUCH(KOBO):
if not self.copying_covers():
imageID = self.imageid_from_contentid(contentID)
self.delete_images(imageID)
self.delete_images(imageID, fname)
connection.commit()
cursor.close()
@ -1821,11 +1839,11 @@ class KOBOTOUCH(KOBO):
return imageId
def delete_images(self, ImageID):
def delete_images(self, ImageID, book_path):
debug_print("KoboTouch:delete_images - ImageID=", ImageID)
if ImageID != None:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
path = self.images_path(book_path)
path = path + ImageID
for ending in self.cover_file_endings().keys():
fpath = path + ending
@ -1872,12 +1890,14 @@ class KOBOTOUCH(KOBO):
def get_content_type_from_extension(self, extension):
debug_print("KoboTouch:get_content_type_from_extension - start")
# With new firmware, ContentType appears to be 6 for all types of sideloaded books.
if self.fwversion.startswith('2.'):
if self.fwversion >= (1,9,17) or extension == '.kobo' or extension == '.mobi':
debug_print("KoboTouch:get_content_type_from_extension - V2 firmware")
ContentType = 6
# For older firmware, it depends on the type of file.
elif extension == '.kobo' or extension == '.mobi':
ContentType = 6
else:
debug_print("KoboTouch:get_content_type_from_extension - calling super")
ContentType = super(KOBOTOUCH, self).get_content_type_from_extension(extension)
ContentType = 901
return ContentType
def update_device_database_collections(self, booklists, collections_attributes, oncard):
@ -1920,7 +1940,7 @@ class KOBOTOUCH(KOBO):
delete_empty_shelves = opts.extra_customization[self.OPT_DELETE_BOOKSHELVES] and self.supports_bookshelves()
update_series_details = opts.extra_customization[self.OPT_UPDATE_SERIES_DETAILS] and self.supports_series()
debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
debug_print("KoboTouch:update_device_database_collections - set_debugging_title to", debugging_title )
debug_print("KoboTouch:update_device_database_collections - set_debugging_title to '%s'" % debugging_title )
booklists.set_debugging_title(debugging_title)
else:
delete_empty_shelves = False
@ -2088,8 +2108,8 @@ class KOBOTOUCH(KOBO):
# debug_print('KoboTouch: not uploading cover')
return
# Don't upload covers if book is on the SD card
if self._card_a_prefix and path.startswith(self._card_a_prefix):
# Only upload covers to SD card if that is supported
if self._card_a_prefix and path.startswith(self._card_a_prefix) and not self.supports_covers_on_sdcard():
return
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
@ -2111,6 +2131,16 @@ class KOBOTOUCH(KOBO):
ImageID = ImageID.replace('.', '_')
return ImageID
def images_path(self, path):
if self._card_a_prefix and path.startswith(self._card_a_prefix) and self.supports_covers_on_sdcard():
path_prefix = 'koboExtStorage/images/'
path = self._card_a_prefix + path_prefix
else:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix
return path
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, keep_cover_aspect=False):
from calibre.utils.magick.draw import save_cover_data_to, identify_data
debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' "%(filename, uploadgrayscale))
@ -2151,8 +2181,8 @@ class KOBOTOUCH(KOBO):
cursor.close()
if ImageID != None:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
path = self.images_path(path) + ImageID
if show_debug:
debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
@ -2496,6 +2526,8 @@ class KOBOTOUCH(KOBO):
return opts
def isAuraHD(self):
return self.detected_device.idProduct in self.AURA_HD_PRODUCT_ID
def isGlo(self):
return self.detected_device.idProduct in self.GLO_PRODUCT_ID
def isMini(self):
@ -2504,7 +2536,21 @@ class KOBOTOUCH(KOBO):
return self.detected_device.idProduct in self.TOUCH_PRODUCT_ID
def cover_file_endings(self):
return self.GLO_COVER_FILE_ENDINGS if self.isGlo() else self.COVER_FILE_ENDINGS
return self.GLO_COVER_FILE_ENDINGS if self.isGlo() else self.AURA_HD_COVER_FILE_ENDINGS if self.isAuraHD() else self.COVER_FILE_ENDINGS
def set_device_name(self):
device_name = self.gui_name
if self.isAuraHD():
device_name = 'Kobo Aura HD'
elif self.isGlo():
device_name = 'Kobo Glo'
elif self.isMini():
device_name = 'Kobo Mini'
elif self.isTouch():
device_name = 'Kobo Touch'
self.__class__.gui_name = device_name
return device_name
def copying_covers(self):
opts = self.settings()
@ -2524,6 +2570,44 @@ class KOBOTOUCH(KOBO):
def supports_kobo_archive(self):
return self.dbversion >= self.min_dbversion_archive
def supports_covers_on_sdcard(self):
return self.dbversion >= 77 and self.fwversion >= self.min_fwversion_images_on_sdcard
def modify_database_check(self, function):
# Checks to see whether the database version is supported
# and whether the user has chosen to support the firmware version
# debug_print("KoboTouch:modify_database_check - self.fwversion <= self.max_supported_fwversion=", self.fwversion > self.max_supported_fwversion)
if self.dbversion > self.supported_dbversion or self.fwversion > self.max_supported_fwversion:
# Unsupported database
opts = self.settings()
if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]:
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Kobo database version unsupported - See details"),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
' You can still send books to your Kobo with calibre, '
' but deleting books and managing collections is disabled.'
' If you are willing to experiment and know how to reset'
' your Kobo to Factory defaults, you can override this'
' check by right clicking the device icon in calibre and'
' selecting "Configure this device" and then the '
' "Attempt to support newer firmware" option.'
' Doing so may require you to perform a factory reset of'
' your Kobo.'
),
UserFeedback.WARN)
return False
else:
# The user chose to edit the database anyway
return True
else:
# Supported database version
return True
@classmethod
def is_debugging_title(cls, title):

View File

@ -95,7 +95,6 @@ class PDNOVEL(USBMS):
SUPPORTS_SUB_DIRS = False
DELETE_EXTS = ['.jpg', '.jpeg', '.png']
def upload_cover(self, path, filename, metadata, filepath):
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
@ -226,9 +225,9 @@ class TREKSTOR(USBMS):
VENDOR_ID = [0x1e68]
PRODUCT_ID = [0x0041, 0x0042, 0x0052, 0x004e, 0x0056,
0x0067, # This is for the Pyrus Mini
0x003e, # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
0x5cL, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=191318
0x0067, # This is for the Pyrus Mini
0x003e, # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
0x5cL, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=191318
]
BCD = [0x0002, 0x100]
@ -427,8 +426,8 @@ class WAYTEQ(USBMS):
EBOOK_DIR_MAIN = 'Documents'
SCAN_FROM_ROOT = True
VENDOR_NAME = 'ROCKCHIP'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'RK28_SDK_DEMO'
VENDOR_NAME = ['ROCKCHIP', 'CBR']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['RK28_SDK_DEMO', 'EINK_EBOOK_READE']
SUPPORTS_SUB_DIRS = True
def get_gui_name(self):
@ -445,7 +444,8 @@ class WAYTEQ(USBMS):
return self.EBOOK_DIR_CARD_A
def windows_sort_drives(self, drives):
if len(drives) < 2: return drives
if len(drives) < 2:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
@ -455,7 +455,8 @@ class WAYTEQ(USBMS):
def linux_swap_drives(self, drives):
# See https://bugs.launchpad.net/bugs/1151901
if len(drives) < 2 or not drives[1] or not drives[2]: return drives
if len(drives) < 2 or not drives[1] or not drives[2]:
return drives
drives = list(drives)
t = drives[0]
drives[0] = drives[1]
@ -463,7 +464,8 @@ class WAYTEQ(USBMS):
return tuple(drives)
def osx_sort_names(self, names):
if len(names) < 2: return names
if len(names) < 2:
return names
main = names.get('main', None)
card = names.get('carda', None)

View File

@ -58,8 +58,8 @@ class PICO(NEWSMY):
gui_name = 'Pico'
description = _('Communicate with the Pico reader.')
VENDOR_NAME = ['TECLAST', 'IMAGIN', 'LASER-', '']
WINDOWS_MAIN_MEM = ['USBDISK__USER', 'EB720']
VENDOR_NAME = ['TECLAST', 'IMAGIN', 'LASER-', 'LASER', '']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['USBDISK__USER', 'EB720', 'EBOOK-EB720']
EBOOK_DIR_MAIN = 'Books'
FORMATS = ['EPUB', 'FB2', 'TXT', 'LRC', 'PDB', 'PDF', 'HTML', 'WTXT']
SCAN_FROM_ROOT = True

View File

@ -188,7 +188,6 @@ class EPUBInput(InputFormatPlugin):
raise DRMError(os.path.basename(path))
self.encrypted_fonts = self._encrypted_font_uris
if len(parts) > 1 and parts[0]:
delta = '/'.join(parts[:-1])+'/'
for elem in opf.itermanifest():

View File

@ -4,12 +4,15 @@ __copyright__ = '2010, Fabian Grassl <fg@jusmeum.de>'
__docformat__ = 'restructuredtext en'
import os, re, shutil
from os.path import dirname, abspath, relpath, exists, basename
from os.path import dirname, abspath, relpath as _relpath, exists, basename
from calibre.customize.conversion import OutputFormatPlugin, OptionRecommendation
from calibre import CurrentDir
from calibre.ptempfile import PersistentTemporaryDirectory
def relpath(*args):
return _relpath(*args).replace(os.sep, '/')
class HTMLOutput(OutputFormatPlugin):
name = 'HTML Output'

View File

@ -1,7 +1,6 @@
'''
Basic support for manipulating OEB 1.x/2.0 content and metadata.
'''
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
@ -11,7 +10,7 @@ import os, re, uuid, logging
from collections import defaultdict
from itertools import count
from urlparse import urldefrag, urlparse, urlunparse, urljoin
from urllib import unquote as urlunquote
from urllib import unquote
from lxml import etree, html
from calibre.constants import filesystem_encoding, __version__
@ -40,11 +39,11 @@ CALIBRE_NS = 'http://calibre.kovidgoyal.net/2009/metadata'
RE_NS = 'http://exslt.org/regular-expressions'
MBP_NS = 'http://www.mobipocket.com'
XPNSMAP = {'h' : XHTML_NS, 'o1' : OPF1_NS, 'o2' : OPF2_NS,
'd09': DC09_NS, 'd10': DC10_NS, 'd11': DC11_NS,
'xsi': XSI_NS, 'dt' : DCTERMS_NS, 'ncx': NCX_NS,
'svg': SVG_NS, 'xl' : XLINK_NS, 're': RE_NS,
'mbp': MBP_NS, 'calibre': CALIBRE_NS }
XPNSMAP = {'h': XHTML_NS, 'o1': OPF1_NS, 'o2': OPF2_NS,
'd09': DC09_NS, 'd10': DC10_NS, 'd11': DC11_NS,
'xsi': XSI_NS, 'dt': DCTERMS_NS, 'ncx': NCX_NS,
'svg': SVG_NS, 'xl': XLINK_NS, 're': RE_NS,
'mbp': MBP_NS, 'calibre': CALIBRE_NS}
OPF1_NSMAP = {'dc': DC11_NS, 'oebpackage': OPF1_NS}
OPF2_NSMAP = {'opf': OPF2_NS, 'dc': DC11_NS, 'dcterms': DCTERMS_NS,
@ -142,7 +141,6 @@ def iterlinks(root, find_links_in_css=True):
if attr in link_attrs:
yield (el, attr, attribs[attr], 0)
if not find_links_in_css:
continue
if tag == XHTML('style') and el.text:
@ -363,7 +361,9 @@ URL_SAFE = set('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
URL_UNSAFE = [ASCII_CHARS - URL_SAFE, UNIBYTE_CHARS - URL_SAFE]
def urlquote(href):
"""Quote URL-unsafe characters, allowing IRI-safe characters."""
""" Quote URL-unsafe characters, allowing IRI-safe characters.
That is, this function returns valid IRIs not valid URIs. In particular,
IRIs can contain non-ascii characters. """
result = []
unsafe = 0 if isinstance(href, unicode) else 1
unsafe = URL_UNSAFE[unsafe]
@ -373,6 +373,19 @@ def urlquote(href):
result.append(char)
return ''.join(result)
def urlunquote(href):
# unquote must run on a bytestring and will return a bytestring
# If it runs on a unicode object, it returns a double encoded unicode
# string: unquote(u'%C3%A4') != unquote(b'%C3%A4').decode('utf-8')
# and the latter is correct
want_unicode = isinstance(href, unicode)
if want_unicode:
href = href.encode('utf-8')
href = unquote(href)
if want_unicode:
href = href.decode('utf-8')
return href
def urlnormalize(href):
"""Convert a URL into normalized form, with all and only URL-unsafe
characters URL quoted.
@ -469,7 +482,7 @@ class DirContainer(object):
return
def _unquote(self, path):
# urlunquote must run on a bytestring and will return a bytestring
# unquote must run on a bytestring and will return a bytestring
# If it runs on a unicode object, it returns a double encoded unicode
# string: unquote(u'%C3%A4') != unquote(b'%C3%A4').decode('utf-8')
# and the latter is correct
@ -497,7 +510,7 @@ class DirContainer(object):
return False
try:
path = os.path.join(self.rootdir, self._unquote(path))
except ValueError: #Happens if path contains quoted special chars
except ValueError: # Happens if path contains quoted special chars
return False
try:
return os.path.isfile(path)
@ -577,12 +590,13 @@ class Metadata(object):
allowed = self.allowed
if allowed is not None and term not in allowed:
raise AttributeError(
'attribute %r not valid for metadata term %r' \
'attribute %r not valid for metadata term %r'
% (self.attr(term), barename(obj.term)))
return self.attr(term)
def __get__(self, obj, cls):
if obj is None: return None
if obj is None:
return None
return obj.attrib.get(self.term_attr(obj), '')
def __set__(self, obj, value):
@ -628,8 +642,8 @@ class Metadata(object):
self.value = value
return property(fget=fget, fset=fset)
scheme = Attribute(lambda term: 'scheme' if \
term == OPF('meta') else OPF('scheme'),
scheme = Attribute(lambda term: 'scheme' if
term == OPF('meta') else OPF('scheme'),
[DC('identifier'), OPF('meta')])
file_as = Attribute(OPF('file-as'), [DC('creator'), DC('contributor'),
DC('title')])
@ -882,7 +896,6 @@ class Manifest(object):
return self._parse_xhtml(convert_markdown(data, title=title))
def _parse_css(self, data):
from cssutils import CSSParser, log, resolveImports
log.setLevel(logging.WARN)
@ -935,7 +948,7 @@ class Manifest(object):
data = self._loader(getattr(self, 'html_input_href',
self.href))
if not isinstance(data, basestring):
pass # already parsed
pass # already parsed
elif self.media_type.lower() in OEB_DOCS:
data = self._parse_xhtml(data)
elif self.media_type.lower()[-4:] in ('+xml', '/xml'):
@ -1022,7 +1035,8 @@ class Manifest(object):
target, frag = urldefrag(href)
target = target.split('/')
for index in xrange(min(len(base), len(target))):
if base[index] != target[index]: break
if base[index] != target[index]:
break
else:
index += 1
relhref = (['..'] * (len(base) - index)) + target[index:]

View File

@ -46,10 +46,11 @@ def is_raster_image(media_type):
return media_type and media_type.lower() in {
'image/png', 'image/jpeg', 'image/jpg', 'image/gif'}
COVER_TYPES = { 'coverimagestandard', 'other.ms-coverimage-standard',
'other.ms-titleimage-standard', 'other.ms-titleimage',
'other.ms-coverimage', 'other.ms-thumbimage-standard',
'other.ms-thumbimage', 'thumbimagestandard', 'cover'}
COVER_TYPES = {
'coverimagestandard', 'other.ms-coverimage-standard',
'other.ms-titleimage-standard', 'other.ms-titleimage',
'other.ms-coverimage', 'other.ms-thumbimage-standard',
'other.ms-thumbimage', 'thumbimagestandard', 'cover'}
def find_cover_image(container):
'Find a raster image marked as a cover in the OPF'
@ -92,7 +93,8 @@ def find_cover_page(container):
def find_cover_image_in_page(container, cover_page):
root = container.parsed(cover_page)
body = XPath('//h:body')(root)
if len(body) != 1: return
if len(body) != 1:
return
body = body[0]
images = []
for img in XPath('descendant::h:img[@src]|descendant::svg:svg/descendant::svg:image')(body):
@ -152,7 +154,7 @@ def create_epub_cover(container, cover_path):
ar = 'xMidYMid meet' if keep_aspect else 'none'
templ = CoverManager.SVG_TEMPLATE.replace('__ar__', ar)
templ = templ.replace('__viewbox__', '0 0 %d %d'%(width, height))
templ = templ.replace('__width__', str(width))
templ = templ.replace('__width__', str(width))
templ = templ.replace('__height__', str(height))
titlepage_item = container.generate_item('titlepage.xhtml',
id_prefix='titlepage')
@ -179,7 +181,7 @@ def create_epub_cover(container, cover_path):
guide = container.opf_get_or_create('guide')
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), type='cover', title=_('Cover'),
href=container.name_to_href(titlepage)))
href=container.name_to_href(titlepage, base=container.opf_name)))
metadata = container.opf_get_or_create('metadata')
meta = metadata.makeelement(OPF('meta'), name='cover')
meta.set('content', raster_cover_item.get('id'))

View File

@ -148,7 +148,6 @@ class OEBReader(object):
if not has_aut:
m.add('creator', self.oeb.translate(__('Unknown')), role='aut')
def _manifest_prune_invalid(self):
'''
Remove items from manifest that contain invalid data. This prevents
@ -197,6 +196,8 @@ class OEBReader(object):
item.media_type[-4:] in ('/xml', '+xml')):
hrefs = [r[2] for r in iterlinks(data)]
for href in hrefs:
if isinstance(href, bytes):
href = href.decode('utf-8')
href, _ = urldefrag(href)
if not href:
continue
@ -293,7 +294,7 @@ class OEBReader(object):
continue
try:
href = item.abshref(urlnormalize(href))
except ValueError: # Malformed URL
except ValueError: # Malformed URL
continue
if href not in manifest.hrefs:
continue
@ -394,9 +395,9 @@ class OEBReader(object):
authorElement = xpath(child,
'descendant::calibre:meta[@name = "author"]')
if authorElement :
if authorElement:
author = authorElement[0].text
else :
else:
author = None
descriptionElement = xpath(child,
@ -406,7 +407,7 @@ class OEBReader(object):
method='text', encoding=unicode).strip()
if not description:
description = None
else :
else:
description = None
index_image = xpath(child,
@ -497,7 +498,8 @@ class OEBReader(object):
titles = []
headers = []
for item in self.oeb.spine:
if not item.linear: continue
if not item.linear:
continue
html = item.data
title = ''.join(xpath(html, '/h:html/h:head/h:title/text()'))
title = COLLAPSE_RE.sub(' ', title.strip())
@ -515,17 +517,21 @@ class OEBReader(object):
if len(titles) > len(set(titles)):
use = headers
for title, item in izip(use, self.oeb.spine):
if not item.linear: continue
if not item.linear:
continue
toc.add(title, item.href)
return True
def _toc_from_opf(self, opf, item):
self.oeb.auto_generated_toc = False
if self._toc_from_ncx(item): return
if self._toc_from_ncx(item):
return
# Prefer HTML to tour based TOC, since several LIT files
# have good HTML TOCs but bad tour based TOCs
if self._toc_from_html(opf): return
if self._toc_from_tour(opf): return
if self._toc_from_html(opf):
return
if self._toc_from_tour(opf):
return
self._toc_from_spine(opf)
self.oeb.auto_generated_toc = True
@ -589,8 +595,10 @@ class OEBReader(object):
return True
def _pages_from_opf(self, opf, item):
if self._pages_from_ncx(opf, item): return
if self._pages_from_page_map(opf): return
if self._pages_from_ncx(opf, item):
return
if self._pages_from_page_map(opf):
return
return
def _cover_from_html(self, hcover):

View File

@ -47,6 +47,8 @@ class ManifestTrimmer(object):
item.data is not None:
hrefs = [r[2] for r in iterlinks(item.data)]
for href in hrefs:
if isinstance(href, bytes):
href = href.decode('utf-8')
try:
href = item.abshref(urlnormalize(href))
except:

View File

@ -51,7 +51,7 @@ class Links(object):
for link in self.links:
path, href, frag = link[0]
page, rect = link[1:]
combined_path = os.path.abspath(os.path.join(os.path.dirname(path), *href.split('/')))
combined_path = os.path.abspath(os.path.join(os.path.dirname(path), *unquote(href).split('/')))
is_local = not href or combined_path in self.anchors
annot = Dictionary({
'Type':Name('Annot'), 'Subtype':Name('Link'),

View File

@ -75,7 +75,7 @@ class GroupModel(QAbstractListModel):
def get_preferred_input_format_for_book(db, book_id):
recs = load_specifics(db, book_id)
if recs:
return recs.get('gui_preferred_input_format', None)
return recs.get('gui_preferred_input_format', None)
def get_available_formats_for_book(db, book_id):
available_formats = db.formats(book_id, index_is_id=True)
@ -147,6 +147,7 @@ class Config(ResizableDialog, Ui_Dialog):
self.connect(self.groups, SIGNAL('entered(QModelIndex)'),
self.show_group_help)
rb = self.buttonBox.button(self.buttonBox.RestoreDefaults)
rb.setText(_('Restore &Defaults'))
self.connect(rb, SIGNAL('clicked()'), self.restore_defaults)
self.groups.setMouseTracking(True)
geom = gprefs.get('convert_single_dialog_geom', None)
@ -188,7 +189,6 @@ class Config(ResizableDialog, Ui_Dialog):
return cls(self.stack, self.plumber.get_option_by_name,
self.plumber.get_option_help, self.db, self.book_id)
self.mw = widget_factory(MetadataWidget)
self.setWindowTitle(_('Convert')+ ' ' + unicode(self.mw.title.text()))
lf = widget_factory(LookAndFeelWidget)
@ -209,7 +209,8 @@ class Config(ResizableDialog, Ui_Dialog):
self.plumber.get_option_help, self.db, self.book_id)
while True:
c = self.stack.currentWidget()
if not c: break
if not c:
break
self.stack.removeWidget(c)
widgets = [self.mw, lf, hw, ps, sd, toc, sr]
@ -234,7 +235,6 @@ class Config(ResizableDialog, Ui_Dialog):
except:
pass
def setup_input_output_formats(self, db, book_id, preferred_input_format,
preferred_output_format):
if preferred_output_format:

View File

@ -12,7 +12,7 @@ from PyQt4.Qt import (QAbstractTableModel, Qt, pyqtSignal, QIcon, QImage,
QModelIndex, QVariant, QDateTime, QColor, QPixmap)
from calibre.gui2 import NONE, UNDEFINED_QDATETIME, error_dialog
from calibre.utils.pyparsing import ParseException
from calibre.utils.search_query_parser import ParseException
from calibre.ebooks.metadata import fmt_sidx, authors_to_string, string_to_authors
from calibre.ebooks.metadata.book.base import SafeFormat
from calibre.ptempfile import PersistentTemporaryFile

View File

@ -21,7 +21,7 @@ from PyQt4.Qt import (
QDialog, QVBoxLayout, QLabel, QDialogButtonBox, QStyle, QStackedWidget,
QWidget, QTableView, QGridLayout, QFontInfo, QPalette, QTimer, pyqtSignal,
QAbstractTableModel, QVariant, QSize, QListView, QPixmap, QModelIndex,
QAbstractListModel, QColor, QRect, QTextBrowser, QStringListModel)
QAbstractListModel, QColor, QRect, QTextBrowser, QStringListModel, QMenu, QCursor)
from PyQt4.QtWebKit import QWebView
from calibre.customize.ui import metadata_plugins
@ -40,7 +40,7 @@ from calibre.utils.ipc.simple_worker import fork_job, WorkerError
from calibre.ptempfile import TemporaryDirectory
# }}}
class RichTextDelegate(QStyledItemDelegate): # {{{
class RichTextDelegate(QStyledItemDelegate): # {{{
def __init__(self, parent=None, max_width=160):
QStyledItemDelegate.__init__(self, parent)
@ -77,7 +77,7 @@ class RichTextDelegate(QStyledItemDelegate): # {{{
painter.restore()
# }}}
class CoverDelegate(QStyledItemDelegate): # {{{
class CoverDelegate(QStyledItemDelegate): # {{{
needs_redraw = pyqtSignal()
@ -143,7 +143,7 @@ class CoverDelegate(QStyledItemDelegate): # {{{
# }}}
class ResultsModel(QAbstractTableModel): # {{{
class ResultsModel(QAbstractTableModel): # {{{
COLUMNS = (
'#', _('Title'), _('Published'), _('Has cover'), _('Has summary')
@ -182,7 +182,6 @@ class ResultsModel(QAbstractTableModel): # {{{
p = book.publisher if book.publisher else ''
return '<b>%s</b><br><i>%s</i>' % (d, p)
def data(self, index, role):
row, col = index.row(), index.column()
try:
@ -233,7 +232,7 @@ class ResultsModel(QAbstractTableModel): # {{{
# }}}
class ResultsView(QTableView): # {{{
class ResultsView(QTableView): # {{{
show_details_signal = pyqtSignal(object)
book_selected = pyqtSignal(object)
@ -316,7 +315,7 @@ class ResultsView(QTableView): # {{{
# }}}
class Comments(QWebView): # {{{
class Comments(QWebView): # {{{
def __init__(self, parent=None):
QWebView.__init__(self, parent)
@ -384,7 +383,7 @@ class Comments(QWebView): # {{{
return QSize(800, 300)
# }}}
class IdentifyWorker(Thread): # {{{
class IdentifyWorker(Thread): # {{{
def __init__(self, log, abort, title, authors, identifiers, caches):
Thread.__init__(self)
@ -441,7 +440,7 @@ class IdentifyWorker(Thread): # {{{
# }}}
class IdentifyWidget(QWidget): # {{{
class IdentifyWidget(QWidget): # {{{
rejected = pyqtSignal()
results_found = pyqtSignal()
@ -552,12 +551,11 @@ class IdentifyWidget(QWidget): # {{{
self.results_view.show_results(self.worker.results)
self.results_found.emit()
def cancel(self):
self.abort.set()
# }}}
class CoverWorker(Thread): # {{{
class CoverWorker(Thread): # {{{
def __init__(self, log, abort, title, authors, identifiers, caches):
Thread.__init__(self)
@ -609,7 +607,8 @@ class CoverWorker(Thread): # {{{
def scan_once(self, tdir, seen):
for x in list(os.listdir(tdir)):
if x in seen: continue
if x in seen:
continue
if x.endswith('.cover') and os.path.exists(os.path.join(tdir,
x+'.done')):
name = x.rpartition('.')[0]
@ -635,7 +634,7 @@ class CoverWorker(Thread): # {{{
# }}}
class CoversModel(QAbstractListModel): # {{{
class CoversModel(QAbstractListModel): # {{{
def __init__(self, current_cover, parent=None):
QAbstractListModel.__init__(self, parent)
@ -770,7 +769,7 @@ class CoversModel(QAbstractListModel): # {{{
# }}}
class CoversView(QListView): # {{{
class CoversView(QListView): # {{{
chosen = pyqtSignal()
@ -793,6 +792,8 @@ class CoversView(QListView): # {{{
type=Qt.QueuedConnection)
self.doubleClicked.connect(self.chosen, type=Qt.QueuedConnection)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.show_context_menu)
def select(self, num):
current = self.model().index(num)
@ -814,9 +815,24 @@ class CoversView(QListView): # {{{
else:
self.select(self.m.index_from_pointer(pointer).row())
def show_context_menu(self, point):
idx = self.currentIndex()
if idx and idx.isValid() and not idx.data(Qt.UserRole).toPyObject():
m = QMenu()
m.addAction(QIcon(I('view.png')), _('View this cover at full size'), self.show_cover)
m.exec_(QCursor.pos())
def show_cover(self):
idx = self.currentIndex()
pmap = self.model().cover_pixmap(idx)
if pmap is not None:
from calibre.gui2.viewer.image_popup import ImageView
d = ImageView(self, pmap, unicode(idx.data(Qt.DisplayRole).toString()), geom_name='metadata_download_cover_popup_geom')
d(use_exec=True)
# }}}
class CoversWidget(QWidget): # {{{
class CoversWidget(QWidget): # {{{
chosen = pyqtSignal()
finished = pyqtSignal()
@ -922,7 +938,7 @@ class CoversWidget(QWidget): # {{{
# }}}
class LogViewer(QDialog): # {{{
class LogViewer(QDialog): # {{{
def __init__(self, log, parent=None):
QDialog.__init__(self, parent)
@ -970,7 +986,7 @@ class LogViewer(QDialog): # {{{
# }}}
class FullFetch(QDialog): # {{{
class FullFetch(QDialog): # {{{
def __init__(self, current_cover=None, parent=None):
QDialog.__init__(self, parent)
@ -1085,7 +1101,7 @@ class FullFetch(QDialog): # {{{
return self.exec_()
# }}}
class CoverFetch(QDialog): # {{{
class CoverFetch(QDialog): # {{{
def __init__(self, current_cover=None, parent=None):
QDialog.__init__(self, parent)

View File

@ -164,7 +164,7 @@ Author matching is exact.</string>
<item>
<widget class="QLabel" name="label_3">
<property name="text">
<string>Ignore files with the following extensions when automatically adding </string>
<string>&lt;b&gt;Ignore&lt;/b&gt; files with the following extensions when automatically adding </string>
</property>
<property name="wordWrap">
<bool>true</bool>

View File

@ -129,7 +129,7 @@
<item row="6" column="0">
<widget class="QLabel" name="label_16">
<property name="text">
<string>Max. OPDS &amp;ungrouped items:</string>
<string>Max. &amp;ungrouped items:</string>
</property>
<property name="buddy">
<cstring>opt_max_opds_ungrouped_items</cstring>

View File

@ -13,7 +13,7 @@ from PyQt4.Qt import (
from calibre.gui2 import error_dialog, question_dialog
from calibre.gui2.widgets import ComboBoxWithHelp
from calibre.utils.icu import sort_key
from calibre.utils.pyparsing import ParseException
from calibre.utils.search_query_parser import ParseException
from calibre.utils.search_query_parser import saved_searches
class SelectNames(QDialog): # {{{
@ -299,7 +299,7 @@ class SearchRestrictionMixin(object):
def __init__(self):
self.checked = QIcon(I('ok.png'))
self.empty = QIcon()
self.empty = QIcon(I('blank.png'))
self.search_based_vl_name = None
self.search_based_vl = None
@ -315,21 +315,24 @@ class SearchRestrictionMixin(object):
self.search_restriction.setVisible(False)
self.search_count.setText(_("(all books)"))
self.ar_menu = QMenu(_('Additional restriction'))
self.edit_menu = QMenu(_('Edit Virtual Library'))
self.rm_menu = QMenu(_('Remove Virtual Library'))
def add_virtual_library(self, db, name, search):
virt_libs = db.prefs.get('virtual_libraries', {})
virt_libs[name] = search
db.prefs.set('virtual_libraries', virt_libs)
def do_create_edit(self, editing=None):
def do_create_edit(self, name=None):
db = self.library_view.model().db
virt_libs = db.prefs.get('virtual_libraries', {})
cd = CreateVirtualLibrary(self, virt_libs.keys(), editing=editing)
cd = CreateVirtualLibrary(self, virt_libs.keys(), editing=name)
if cd.exec_() == cd.Accepted:
if editing:
self._remove_vl(editing, reapply=False)
if name:
self._remove_vl(name, reapply=False)
self.add_virtual_library(db, cd.library_name, cd.library_search)
if not editing or editing == db.data.get_base_restriction_name():
if not name or name == db.data.get_base_restriction_name():
self.apply_virtual_library(cd.library_name)
def virtual_library_clicked(self):
@ -337,16 +340,14 @@ class SearchRestrictionMixin(object):
m.clear()
a = m.addAction(_('Create Virtual Library'))
a.triggered.connect(partial(self.do_create_edit, editing=None))
a.triggered.connect(partial(self.do_create_edit, name=None))
self.edit_menu = a = QMenu()
a.setTitle(_('Edit Virtual Library'))
a.aboutToShow.connect(partial(self.build_virtual_library_list, remove=False))
a = self.edit_menu
self.build_virtual_library_list(a, self.do_create_edit)
m.addMenu(a)
self.rm_menu = a = QMenu()
a.setTitle(_('Remove Virtual Library'))
a.aboutToShow.connect(partial(self.build_virtual_library_list, remove=True))
a = self.rm_menu
self.build_virtual_library_list(a, self.remove_vl_triggered)
m.addMenu(a)
m.addSeparator()
@ -356,7 +357,7 @@ class SearchRestrictionMixin(object):
a = self.ar_menu
a.clear()
a.setIcon(self.checked if db.data.get_search_restriction_name() else self.empty)
a.aboutToShow.connect(self.build_search_restriction_list)
self.build_search_restriction_list()
m.addMenu(a)
m.addSeparator()
@ -426,24 +427,24 @@ class SearchRestrictionMixin(object):
self._apply_search_restriction(db.data.get_search_restriction(),
db.data.get_search_restriction_name())
def build_virtual_library_list(self, remove=False):
def build_virtual_library_list(self, menu, handler):
db = self.library_view.model().db
virt_libs = db.prefs.get('virtual_libraries', {})
if remove:
m = self.rm_menu
else:
m = self.edit_menu
m.clear()
menu.clear()
menu.setIcon(self.empty)
def add_action(name, search):
a = m.addAction(name)
if remove:
a.triggered.connect(partial(self.remove_vl_triggered, name=name))
else:
a.triggered.connect(partial(self.do_create_edit, editing=name))
a = menu.addAction(name)
a.triggered.connect(partial(handler, name=name))
a.setIcon(self.empty)
for n in sorted(virt_libs.keys(), key=sort_key):
add_action(n, virt_libs[n])
libs = sorted(virt_libs.keys(), key=sort_key)
if libs:
menu.setEnabled(True)
for n in libs:
add_action(n, virt_libs[n])
else:
menu.setEnabled(False)
def remove_vl_triggered(self, name=None):
if not question_dialog(self, _('Are you sure?'),

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 1 # Needed for dynamic plugin loading
store_version = 2 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
@ -25,25 +25,7 @@ from calibre.gui2.store.web_store_dialog import WebStoreDialog
class GoogleBooksStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
aff_id = {
'lid': '41000000033185143',
'pubid': '21000000000352219',
'ganpub': 'k352219',
'ganclk': 'GOOG_1335334761',
}
# Use Kovid's affiliate id 30% of the time.
if random.randint(1, 10) in (1, 2, 3):
aff_id = {
'lid': '41000000031855266',
'pubid': '21000000000352583',
'ganpub': 'k352583',
'ganclk': 'GOOG_1335335464',
}
url = 'http://gan.doubleclick.net/gan_click?lid=%(lid)s&pubid=%(pubid)s' % aff_id
if detail_item:
detail_item += '&ganpub=%(ganpub)s&ganclk=%(ganclk)s' % aff_id
url = 'http://books.google.com/books'
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
else:

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 1 # Needed for dynamic plugin loading
store_version = 2 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
@ -31,10 +31,10 @@ class KoboStore(BasicStoreConfig, StorePlugin):
if random.randint(1, 10) in (1, 2, 3):
pub_id = '0dsO3kDu/AU'
murl = 'http://click.linksynergy.com/fs-bin/click?id=%s&offerid=268429.4&type=3&subid=0' % pub_id
murl = 'http://click.linksynergy.com/fs-bin/click?id=%s&subid=&offerid=280046.1&type=10&tmpid=9310&RD_PARM1=http%%3A%%2F%%2Fkobo.com' % pub_id
if detail_item:
purl = 'http://click.linksynergy.com/link?id=%s&offerid=268429&type=2&murl=%s' % (pub_id, urllib.quote_plus(detail_item))
purl = 'http://click.linksynergy.com/link?id=%s&offerid=280046&type=2&murl=%s' % (pub_id, urllib.quote_plus(detail_item))
url = purl
else:
purl = None

View File

@ -15,16 +15,17 @@ from calibre.gui2 import choose_save_file, gprefs
class ImageView(QDialog):
def __init__(self, parent, current_img, current_url):
def __init__(self, parent, current_img, current_url, geom_name='viewer_image_popup_geometry'):
QDialog.__init__(self)
dw = QApplication.instance().desktop()
self.avail_geom = dw.availableGeometry(parent)
self.current_img = current_img
self.current_url = current_url
self.factor = 1.0
self.geom_name = geom_name
self.label = l = QLabel()
l.setBackgroundRole(QPalette.Base);
l.setBackgroundRole(QPalette.Base)
l.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
l.setScaledContents(True)
@ -88,21 +89,27 @@ class ImageView(QDialog):
self.label.setPixmap(pm)
self.label.adjustSize()
def __call__(self):
def __call__(self, use_exec=False):
geom = self.avail_geom
self.label.setPixmap(self.current_img)
self.label.adjustSize()
self.resize(QSize(int(geom.width()/2.5), geom.height()-50))
geom = gprefs.get('viewer_image_popup_geometry', None)
geom = gprefs.get(self.geom_name, None)
if geom is not None:
self.restoreGeometry(geom)
self.current_image_name = unicode(self.current_url.toString()).rpartition('/')[-1]
try:
self.current_image_name = unicode(self.current_url.toString()).rpartition('/')[-1]
except AttributeError:
self.current_image_name = self.current_url
title = _('View Image: %s')%self.current_image_name
self.setWindowTitle(title)
self.show()
if use_exec:
self.exec_()
else:
self.show()
def done(self, e):
gprefs['viewer_image_popup_geometry'] = bytearray(self.saveGeometry())
gprefs[self.geom_name] = bytearray(self.saveGeometry())
return QDialog.done(self, e)
def wheelEvent(self, event):

View File

@ -14,7 +14,7 @@ from threading import Thread
from calibre.utils.config import tweaks, prefs
from calibre.utils.date import parse_date, now, UNDEFINED_DATE, clean_date_for_sort
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.utils.pyparsing import ParseException
from calibre.utils.search_query_parser import ParseException
from calibre.utils.localization import (canonicalize_lang, lang_map, get_udc)
from calibre.db.search import CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH, _match
from calibre.ebooks.metadata import title_sort, author_to_author_sort
@ -366,25 +366,18 @@ class ResultCache(SearchQueryParser): # {{{
elif query in self.local_thismonth:
qd = now()
field_count = 2
elif query.endswith(self.local_daysago):
elif query.endswith(self.local_daysago) or query.endswith(self.untrans_daysago):
num = query[0:-self.local_daysago_len]
try:
qd = now() - timedelta(int(num))
except:
raise ParseException(query, len(query), 'Number conversion error', self)
field_count = 3
elif query.endswith(self.untrans_daysago):
num = query[0:-self.untrans_daysago_len]
try:
qd = now() - timedelta(int(num))
except:
raise ParseException(query, len(query), 'Number conversion error', self)
raise ParseException(_('Number conversion error: {0}').format(num))
field_count = 3
else:
try:
qd = parse_date(query, as_utc=False)
except:
raise ParseException(query, len(query), 'Date conversion error', self)
raise ParseException(_('Date conversion error: {0}').format(query))
if '-' in query:
field_count = query.count('-') + 1
else:
@ -460,8 +453,7 @@ class ResultCache(SearchQueryParser): # {{{
try:
q = cast(query) * mult
except:
raise ParseException(query, len(query),
'Non-numeric value in query', self)
raise ParseException(_('Non-numeric value in query: {0}').format(query))
for id_ in candidates:
item = self._data[id_]
@ -505,8 +497,8 @@ class ResultCache(SearchQueryParser): # {{{
if query.find(':') >= 0:
q = [q.strip() for q in query.split(':')]
if len(q) != 2:
raise ParseException(query, len(query),
'Invalid query format for colon-separated search', self)
raise ParseException(
_('Invalid query format for colon-separated search: {0}').format(query))
(keyq, valq) = q
keyq_mkind, keyq = self._matchkind(keyq)
valq_mkind, valq = self._matchkind(valq)
@ -655,7 +647,7 @@ class ResultCache(SearchQueryParser): # {{{
if invert:
matches = self.universal_set() - matches
return matches
raise ParseException(query, len(query), 'Recursive query group detected', self)
raise ParseException(_('Recursive query group detected: {0}').format(query))
# apply the limit if appropriate
if location == 'all' and prefs['limit_search_columns'] and \

File diff suppressed because it is too large Load Diff

View File

@ -16,11 +16,8 @@ methods :method:`SearchQueryParser.universal_set` and
If this module is run, it will perform a series of unit tests.
'''
import sys, operator, weakref
import sys, operator, weakref, re
from calibre.utils.pyparsing import (CaselessKeyword, Group, Forward,
CharsNotIn, Suppress, OneOrMore, MatchFirst, CaselessLiteral,
Optional, NoMatch, ParseException, QuotedString)
from calibre.constants import preferred_encoding
from calibre.utils.icu import sort_key
from calibre import prints
@ -96,6 +93,151 @@ def saved_searches():
global ss
return ss
'''
Parse a search expression into a series of potentially recursive operations.
Note that the interpreter wants binary operators, not n-ary ops. This is why we
recurse instead of iterating when building sequences of the same op.
The syntax is more than a bit twisted. In particular, the handling of colons
in the base token requires semantic analysis.
Also note that the query string is lowercased before analysis. This is OK because
calibre's searches are all case-insensitive.
Grammar:
prog ::= or_expression
or_expression ::= and_expression [ 'or' or_expression ]
and_expression ::= not_expression [ [ 'and' ] and_expression ]
not_expression ::= [ 'not' ] location_expression
location_expression ::= base_token | ( '(' or_expression ')' )
base_token ::= a sequence of letters and colons, perhaps quoted
'''
class Parser(object):
def __init__(self):
self.current_token = 0
self.tokens = None
OPCODE = 1
WORD = 2
QUOTED_WORD = 3
EOF = 4
# Had to translate named constants to numeric values
lex_scanner = re.Scanner([
(r'[()]', lambda x,t: (1, t)),
(r'[^ "()]+', lambda x,t: (2, unicode(t))),
(r'".*?((?<!\\)")', lambda x,t: (3, t[1:-1])),
(r'\s', None)
], flags=re.DOTALL)
def token(self, advance=False):
if self.is_eof():
return None
res = self.tokens[self.current_token][1]
if advance:
self.current_token += 1
return res
def token_type(self):
if self.is_eof():
return self.EOF
return self.tokens[self.current_token][0]
def is_eof(self):
return self.current_token >= len(self.tokens)
def advance(self):
self.current_token += 1
def parse(self, expr, locations):
self.locations = locations
self.tokens = self.lex_scanner.scan(icu_lower(expr))[0]
self.current_token = 0
prog = self.or_expression()
if not self.is_eof():
raise ParseException(_('Extra characters at end of search'))
#prints(self.tokens, '\n', prog)
return prog
def or_expression(self):
lhs = self.and_expression()
if self.token() == 'or':
self.advance()
return ['or', lhs, self.or_expression()]
return lhs
def and_expression(self):
lhs = self.not_expression()
if self.token() == 'and':
self.advance()
return ['and', lhs, self.and_expression()]
# Account for the optional 'and'
if self.token_type() in [self.WORD, self.QUOTED_WORD] and self.token() != 'or':
return ['and', lhs, self.and_expression()]
return lhs
def not_expression(self):
if self.token() == 'not':
self.advance()
return ['not', self.not_expression()]
return self.location_expression()
def location_expression(self):
if self.token() == '(':
self.advance()
res = self.or_expression()
if self.token(advance=True) != ')':
raise ParseException(_('missing )'))
return res
if self.token_type() not in [ self.WORD, self.QUOTED_WORD ]:
raise ParseException(_('Invalid syntax. Expected a lookup name or a word'))
return self.base_token()
def base_token(self):
if self.token_type() == self.QUOTED_WORD:
return ['token', 'all', self.token(advance=True)]
words = self.token(advance=True).split(':')
# The complexity here comes from having colon-separated search
# values. That forces us to check that the first "word" in a colon-
# separated group is a valid location. If not, then the token must
# be reconstructed. We also have the problem that locations can be
# followed by quoted strings that appear as the next token. and that
# tokens can be a sequence of colons.
# We have a location if there is more than one word and the first
# word is in locations. This check could produce a "wrong" answer if
# the search string is something like 'author: "foo"' because it
# will be interpreted as 'author:"foo"'. I am choosing to accept the
# possible error. The expression should be written '"author:" foo'
if len(words) > 1 and words[0] in self.locations:
loc = words[0]
words = words[1:]
if len(words) == 1 and self.token_type() == self.QUOTED_WORD:
return ['token', loc, self.token(advance=True)]
return ['token', loc, ':'.join(words)]
return ['token', 'all', ':'.join(words)]
class ParseException(Exception):
@property
def msg(self):
if len(self.args) > 0:
return self.args[0]
return ""
class SearchQueryParser(object):
'''
Parses a search query.
@ -134,70 +276,15 @@ class SearchQueryParser(object):
def __init__(self, locations, test=False, optimize=False):
self.sqp_initialize(locations, test=test, optimize=optimize)
self.parser = Parser()
def sqp_change_locations(self, locations):
self.sqp_initialize(locations, optimize=self.optimize)
def sqp_initialize(self, locations, test=False, optimize=False):
self.locations = locations
self._tests_failed = False
self.optimize = optimize
# Define a token
standard_locations = map(lambda x : CaselessLiteral(x)+Suppress(':'),
locations)
location = NoMatch()
for l in standard_locations:
location |= l
location = Optional(location, default='all')
word_query = CharsNotIn(u'\t\r\n\u00a0 ' + u'()')
#quoted_query = Suppress('"')+CharsNotIn('"')+Suppress('"')
quoted_query = QuotedString('"', escChar='\\')
query = quoted_query | word_query
Token = Group(location + query).setResultsName('token')
if test:
print 'Testing Token parser:'
Token.validate()
failed = SearchQueryParser.run_tests(Token, 'token',
(
('tag:asd', ['tag', 'asd']),
(u'ddsä', ['all', u'ddsä']),
('"one \\"two"', ['all', 'one "two']),
('title:"one \\"1.5\\" two"', ['title', 'one "1.5" two']),
('title:abc"def', ['title', 'abc"def']),
)
)
Or = Forward()
Parenthesis = Group(
Suppress('(') + Or + Suppress(')')
).setResultsName('parenthesis') | Token
Not = Forward()
Not << (Group(
Suppress(CaselessKeyword("not")) + Not
).setResultsName("not") | Parenthesis)
And = Forward()
And << (Group(
Not + Suppress(CaselessKeyword("and")) + And
).setResultsName("and") | Group(
Not + OneOrMore(~MatchFirst(list(map(CaselessKeyword,
('and', 'or')))) + And)
).setResultsName("and") | Not)
Or << (Group(
And + Suppress(CaselessKeyword("or")) + Or
).setResultsName("or") | And)
if test:
#Or.validate()
self._tests_failed = bool(failed)
self._parser = Or
self._parser.setDebug(False)
def parse(self, query):
# empty the list of searches used for recursion testing
@ -213,10 +300,9 @@ class SearchQueryParser(object):
def _parse(self, query, candidates=None):
self.recurse_level += 1
try:
res = self._parser.parseString(query)[0]
res = self.parser.parse(query, self.locations)
except RuntimeError:
import repr
raise ParseException('Failed to parse query, recursion limit reached: %s'%repr(query))
raise ParseException(_('Failed to parse query, recursion limit reached: %s')%repr(query))
if candidates is None:
candidates = self.universal_set()
t = self.evaluate(res, candidates)
@ -227,7 +313,7 @@ class SearchQueryParser(object):
return getattr(self, 'evaluate_'+group_name)
def evaluate(self, parse_result, candidates):
return self.method(parse_result.getName())(parse_result, candidates)
return self.method(parse_result[0])(parse_result[1:], candidates)
def evaluate_and(self, argument, candidates):
# RHS checks only those items matched by LHS
@ -249,8 +335,8 @@ class SearchQueryParser(object):
# return self.universal_set().difference(self.evaluate(argument[0]))
return candidates.difference(self.evaluate(argument[0], candidates))
def evaluate_parenthesis(self, argument, candidates):
return self.evaluate(argument[0], candidates)
# def evaluate_parenthesis(self, argument, candidates):
# return self.evaluate(argument[0], candidates)
def evaluate_token(self, argument, candidates):
location = argument[0]
@ -260,12 +346,16 @@ class SearchQueryParser(object):
query = query[1:]
try:
if query in self.searches_seen:
raise ParseException(query, len(query), 'undefined saved search', self)
raise ParseException(_('Recursive saved search: {0}').format(query))
if self.recurse_level > 5:
self.searches_seen.add(query)
return self._parse(saved_searches().lookup(query), candidates)
except ParseException as e:
raise e
except: # convert all exceptions (e.g., missing key) to a parse error
raise ParseException(query, len(query), 'undefined saved search', self)
import traceback
traceback.print_exc()
raise ParseException(_('Unknown error in saved search: {0}').format(query))
return self._get_matches(location, query, candidates)
def _get_matches(self, location, query, candidates):

View File

@ -19,7 +19,7 @@ from calibre.web.feeds.recipes.collection import \
SchedulerConfig, download_builtin_recipe, update_custom_recipe, \
add_custom_recipe, remove_custom_recipe, get_custom_recipe, \
get_builtin_recipe
from calibre.utils.pyparsing import ParseException
from calibre.utils.search_query_parser import ParseException
class NewsTreeItem(object):