mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge from trunk
This commit is contained in:
commit
e8b0c0e674
@ -19,6 +19,81 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.9.15
|
||||
date: 2013-01-18
|
||||
|
||||
new features:
|
||||
- title: "Linux MTP driver: Detect devices that have MTP interfaces even if their USB ids are not known"
|
||||
|
||||
- title: "Content server: Allow picking a random book by clicking the 'Random book' link on the start page. You can also refresh the random book page to get a new random book"
|
||||
|
||||
- title: "E-book viewer: Add an option to hide the toolbars in the viewer window (Preferences->Miscellaneous->Show controls in the viewr preferences). You can unhide them by right clicking in the viewer window."
|
||||
|
||||
- title: "Kobo driver: Speedup initial connect by avoiding unnecessary update of series metadata in some situations."
|
||||
tickets: [1099190]
|
||||
|
||||
- title: "Get Books: Allow the store plugins to be dynamically loaded so that future website changes of a store dont require a calibre update to fix Get Books."
|
||||
|
||||
- title: "Wireless driver: Always replace file when resending a previously sent book to the device, even if the title/author have changed."
|
||||
|
||||
- title: "Add PocketBook Pro 912 driver."
|
||||
tickets: [1099571]
|
||||
|
||||
- title: "When creating/exporting epub and mobi files, add the calibre book identifier as a special field in the book's metadata. This allows third party tools to identify the book record in calibre to which the file belongs."
|
||||
|
||||
- title: "Wireless driver: Add support for using the book uuid as the filename"
|
||||
|
||||
- title: "Remove the experimental tag from the subset fonts feature, since there has been only one reported problem (now fixed) with it in the two months since it was released"
|
||||
|
||||
bug fixes:
|
||||
- title: "Get Books: Update the amazon, waterstones and libri.de plugins to account for website changes"
|
||||
|
||||
- title: "MOBI Input: Do not choke on MOBI files with incorrectly encoded titles."
|
||||
tickets: [1100601]
|
||||
|
||||
- title: "Font subsetting: Fix a bug in the parsing of the GSUB table that could cause some ligatures to not be included in the subset font"
|
||||
|
||||
- title: "E-book-viewer: Fix TOC links without anchors not scrolling to the top of the current flow"
|
||||
|
||||
- title: "LIT Input: Handle lit files that set an incorrect XML mimetype for their text."
|
||||
tickets: [1099621]
|
||||
|
||||
- title: "Catalogs: Fix 'X' being droppen from isbns on export"
|
||||
tickets: [1098325]
|
||||
|
||||
- title: "Fix an error when editing date in the main book list and all visible dates are blank."
|
||||
tickets: [1098675]
|
||||
|
||||
- title: "Fix calibre-smtp using incorrect escaping for non-ascii attachment filenames"
|
||||
tickets: [1098478]
|
||||
|
||||
- title: "Conversion: When subsetting fonts, handle multiple @font-face rules referring to the same physical font"
|
||||
|
||||
- title: "Content server: Update metadata when serving azw3 files"
|
||||
|
||||
- title: "CHM Input: Handle chm files that contain files with url unsafe filenames."
|
||||
tickets: [1100610]
|
||||
|
||||
- title: "Content server: Fix custom icons for top level categories incorrect."
|
||||
tickets: [1095016]
|
||||
|
||||
- title: "Kobo driver: When resending a file to the device, update the filesize in the Kobo db to prevent the device from deleting the file."
|
||||
tickets: [1100607]
|
||||
|
||||
improved recipes:
|
||||
- The Chronicle of Higher Education
|
||||
- Smithsonian Magazine
|
||||
- Philosophy Now
|
||||
- The Economist
|
||||
- Business Week Magazine
|
||||
|
||||
new recipes:
|
||||
- title: Asco de Vida
|
||||
author: Krittika Goyal
|
||||
|
||||
- title: Schattenblick
|
||||
author: ThB
|
||||
|
||||
- version: 0.9.14
|
||||
date: 2013-01-11
|
||||
|
||||
|
20
recipes/asco_de_vida.recipe
Normal file
20
recipes/asco_de_vida.recipe
Normal file
@ -0,0 +1,20 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class HindustanTimes(BasicNewsRecipe):
|
||||
title = u'Asco de vida'
|
||||
language = 'es'
|
||||
__author__ = 'Krittika Goyal'
|
||||
oldest_article = 1 #days
|
||||
max_articles_per_feed = 25
|
||||
#encoding = 'cp1252'
|
||||
use_embedded_content = False
|
||||
|
||||
no_stylesheets = True
|
||||
keep_only_tags = dict(name='div', attrs={'class':'box story'})
|
||||
|
||||
|
||||
feeds = [
|
||||
('News',
|
||||
'http://feeds2.feedburner.com/AscoDeVida'),
|
||||
]
|
||||
|
@ -28,6 +28,8 @@ class Barrons(BasicNewsRecipe):
|
||||
|
||||
## Don't grab articles more than 7 days old
|
||||
oldest_article = 7
|
||||
use_javascript_to_login = True
|
||||
requires_version = (0, 9, 16)
|
||||
|
||||
extra_css = '''
|
||||
.datestamp{font-family:Verdana,Geneva,Kalimati,sans-serif; font-size:x-small;}
|
||||
@ -40,7 +42,7 @@ class Barrons(BasicNewsRecipe):
|
||||
.insettipUnit{font-size: x-small;}
|
||||
'''
|
||||
remove_tags = [
|
||||
dict(name ='div', attrs={'class':['tabContainer artTabbedNav','rssToolBox hidden','articleToolbox']}),
|
||||
dict(name ='div', attrs={'class':['sTools sTools-t', 'tabContainer artTabbedNav','rssToolBox hidden','articleToolbox']}),
|
||||
dict(name = 'a', attrs ={'class':'insetClose'})
|
||||
]
|
||||
|
||||
@ -60,21 +62,17 @@ class Barrons(BasicNewsRecipe):
|
||||
]
|
||||
]
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open('http://commerce.barrons.com/auth/login')
|
||||
br.select_form(name='login_form')
|
||||
br['user'] = self.username
|
||||
br['password'] = self.password
|
||||
br.submit()
|
||||
return br
|
||||
def javascript_login(self, br, username, password):
|
||||
br.visit('http://commerce.barrons.com/auth/login')
|
||||
f = br.select_form(nr=0)
|
||||
f['username'] = username
|
||||
f['password'] = password
|
||||
br.submit(timeout=120)
|
||||
|
||||
## Use the print version of a page when available.
|
||||
|
||||
def print_version(self, url):
|
||||
main, sep, rest = url.rpartition('?')
|
||||
return main + '#printmode'
|
||||
return main + '#text.print'
|
||||
|
||||
def postprocess_html(self, soup, first):
|
||||
|
||||
|
@ -38,7 +38,7 @@ class BusinessWeekMagazine(BasicNewsRecipe):
|
||||
title=self.tag_to_string(div.a).strip()
|
||||
url=div.a['href']
|
||||
soup0 = self.index_to_soup(url)
|
||||
urlprint=soup0.find('li', attrs={'class':'print'}).a['href']
|
||||
urlprint=soup0.find('li', attrs={'class':'print tracked'}).a['href']
|
||||
articles.append({'title':title, 'url':urlprint, 'description':'', 'date':''})
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ class BusinessWeekMagazine(BasicNewsRecipe):
|
||||
title=self.tag_to_string(div.a).strip()
|
||||
url=div.a['href']
|
||||
soup0 = self.index_to_soup(url)
|
||||
urlprint=soup0.find('li', attrs={'class':'print'}).a['href']
|
||||
urlprint=soup0.find('li', attrs={'class':'print tracked'}).a['href']
|
||||
articles.append({'title':title, 'url':urlprint, 'description':desc, 'date':''})
|
||||
|
||||
if articles:
|
||||
|
0
recipes/conowego_pl.recipe
Executable file → Normal file
0
recipes/conowego_pl.recipe
Executable file → Normal file
@ -41,10 +41,11 @@ class Economist(BasicNewsRecipe):
|
||||
remove_tags = [
|
||||
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
|
||||
dict(attrs={'class':['dblClkTrk', 'ec-article-info',
|
||||
'share_inline_header', 'related-items']}),
|
||||
'share_inline_header', 'related-items',
|
||||
'main-content-container']}),
|
||||
{'class': lambda x: x and 'share-links-header' in x},
|
||||
]
|
||||
keep_only_tags = [dict(id='ec-article-body')]
|
||||
keep_only_tags = [dict(name='article')]
|
||||
no_stylesheets = True
|
||||
preprocess_regexps = [(re.compile('</html>.*', re.DOTALL),
|
||||
lambda x:'</html>')]
|
||||
|
@ -41,10 +41,11 @@ class Economist(BasicNewsRecipe):
|
||||
remove_tags = [
|
||||
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
|
||||
dict(attrs={'class':['dblClkTrk', 'ec-article-info',
|
||||
'share_inline_header', 'related-items']}),
|
||||
'share_inline_header', 'related-items',
|
||||
'main-content-container']}),
|
||||
{'class': lambda x: x and 'share-links-header' in x},
|
||||
]
|
||||
keep_only_tags = [dict(id='ec-article-body')]
|
||||
keep_only_tags = [dict(name='article')]
|
||||
no_stylesheets = True
|
||||
preprocess_regexps = [(re.compile('</html>.*', re.DOTALL),
|
||||
lambda x:'</html>')]
|
||||
|
Before Width: | Height: | Size: 605 B After Width: | Height: | Size: 605 B |
0
recipes/linux_journal.recipe
Executable file → Normal file
0
recipes/linux_journal.recipe
Executable file → Normal file
@ -8,13 +8,16 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
title = u'Metro UK'
|
||||
description = 'News as provided by The Metro -UK'
|
||||
#timefmt = ''
|
||||
__author__ = 'Dave Asbury'
|
||||
#last update 9/6/12
|
||||
cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg'
|
||||
oldest_article = 1
|
||||
__author__ = 'fleclerc & Dave Asbury'
|
||||
#last update 20/1/13
|
||||
#cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg'
|
||||
|
||||
cover_url = 'https://twimg0-a.akamaihd.net/profile_images/1638332595/METRO_LETTERS-01.jpg'
|
||||
remove_empty_feeds = True
|
||||
remove_javascript = True
|
||||
auto_cleanup = True
|
||||
max_articles_per_feed = 12
|
||||
ignore_duplicate_articles = {'title', 'url'}
|
||||
encoding = 'UTF-8'
|
||||
|
||||
language = 'en_GB'
|
||||
|
@ -18,6 +18,8 @@ class MichelleMalkin(BasicNewsRecipe):
|
||||
|
||||
remove_javascript = True
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
use_embedded_content = False
|
||||
|
||||
|
||||
conversion_options = {
|
||||
@ -29,16 +31,16 @@ class MichelleMalkin(BasicNewsRecipe):
|
||||
}
|
||||
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class':'article'})
|
||||
]
|
||||
#keep_only_tags = [
|
||||
#dict(name='div', attrs={'class':'article'})
|
||||
#]
|
||||
|
||||
remove_tags = [
|
||||
dict(name=['iframe', 'embed', 'object']),
|
||||
dict(name='div', attrs={'id':['comments', 'commentForm']}),
|
||||
dict(name='div', attrs={'class':['postCategories', 'comments', 'blogInfo', 'postInfo']})
|
||||
#remove_tags = [
|
||||
#dict(name=['iframe', 'embed', 'object']),
|
||||
#dict(name='div', attrs={'id':['comments', 'commentForm']}),
|
||||
#dict(name='div', attrs={'class':['postCategories', 'comments', 'blogInfo', 'postInfo']})
|
||||
|
||||
]
|
||||
#]
|
||||
|
||||
|
||||
feeds = [(u'http://feeds.feedburner.com/michellemalkin/posts')]
|
||||
|
@ -124,19 +124,19 @@ class NYTimes(BasicNewsRecipe):
|
||||
if headlinesOnly:
|
||||
title='New York Times Headlines'
|
||||
description = 'Headlines from the New York Times'
|
||||
needs_subscription = True
|
||||
needs_subscription = 'optional'
|
||||
elif webEdition:
|
||||
title='New York Times (Web)'
|
||||
description = 'New York Times on the Web'
|
||||
needs_subscription = True
|
||||
needs_subscription = 'optional'
|
||||
elif replaceKindleVersion:
|
||||
title='The New York Times'
|
||||
description = 'Today\'s New York Times'
|
||||
needs_subscription = True
|
||||
needs_subscription = 'optional'
|
||||
else:
|
||||
title='New York Times'
|
||||
description = 'Today\'s New York Times'
|
||||
needs_subscription = True
|
||||
needs_subscription = 'optional'
|
||||
|
||||
def decode_url_date(self,url):
|
||||
urlitems = url.split('/')
|
||||
|
@ -7,12 +7,16 @@ class AdvancedUserRecipe1282093204(BasicNewsRecipe):
|
||||
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 15
|
||||
use_embedded_content = False
|
||||
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
masthead_url = 'http://farm5.static.flickr.com/4118/4929686950_0e22e2c88a.jpg'
|
||||
|
||||
feeds = [
|
||||
(u'News-Bill McClellan', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2fcolumns%2Fbill-mclellan&f=rss&t=article'),
|
||||
(u'News-Columns', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2Fcolumns*&l=50&f=rss&t=article'),
|
||||
(u'News-Crime & Courtshttp://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2Fcrime-and-courts&l=50&f=rss&t=article'),
|
||||
(u'News-Crime & Courts', 'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2Fcrime-and-courts&l=50&f=rss&t=article'),
|
||||
(u'News-Deb Peterson', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2fcolumns%2Fdeb-peterson&f=rss&t=article'),
|
||||
(u'News-Education', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2feducation&f=rss&t=article'),
|
||||
(u'News-Government & Politics', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2fgovt-and-politics&f=rss&t=article'),
|
||||
@ -62,9 +66,9 @@ class AdvancedUserRecipe1282093204(BasicNewsRecipe):
|
||||
(u'Entertainment-House-O-Fun', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=entertainment%2Fhouse-o-fun&l=100&f=rss&t=article'),
|
||||
(u'Entertainment-Kevin C. Johnson', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=entertainment%2Fmusic%2Fkevin-johnson&l=100&f=rss&t=article')
|
||||
]
|
||||
remove_empty_feeds = True
|
||||
remove_tags = [dict(name='div', attrs={'id':'blox-logo'}),dict(name='a')]
|
||||
keep_only_tags = [dict(name='h1'), dict(name='p', attrs={'class':'byline'}), dict(name="div", attrs={'id':'blox-story-text'})]
|
||||
#remove_empty_feeds = True
|
||||
#remove_tags = [dict(name='div', attrs={'id':'blox-logo'}),dict(name='a')]
|
||||
#keep_only_tags = [dict(name='h1'), dict(name='p', attrs={'class':'byline'}), dict(name="div", attrs={'id':'blox-story-text'})]
|
||||
extra_css = 'p {text-align: left;}'
|
||||
|
||||
|
||||
|
@ -7,28 +7,15 @@ class AdvancedUserRecipe1289990851(BasicNewsRecipe):
|
||||
language = 'en_CA'
|
||||
__author__ = 'Nexus'
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
use_embedded_content = False
|
||||
INDEX = 'http://tsn.ca/nhl/story/?id=nhl'
|
||||
keep_only_tags = [dict(name='div', attrs={'id':['tsnColWrap']}),
|
||||
dict(name='div', attrs={'id':['tsnStory']})]
|
||||
remove_tags = [dict(name='div', attrs={'id':'tsnRelated'}),
|
||||
dict(name='div', attrs={'class':'textSize'})]
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
feed_parts = soup.findAll('div', attrs={'class': 'feature'})
|
||||
for feed_part in feed_parts:
|
||||
articles = []
|
||||
if not feed_part.h2:
|
||||
continue
|
||||
feed_title = feed_part.h2.string
|
||||
article_parts = feed_part.findAll('a')
|
||||
for article_part in article_parts:
|
||||
article_title = article_part.string
|
||||
article_date = ''
|
||||
article_url = 'http://tsn.ca/' + article_part['href']
|
||||
articles.append({'title': article_title, 'url': article_url, 'description':'', 'date':article_date})
|
||||
if articles:
|
||||
feeds.append((feed_title, articles))
|
||||
return feeds
|
||||
#keep_only_tags = [dict(name='div', attrs={'id':['tsnColWrap']}),
|
||||
#dict(name='div', attrs={'id':['tsnStory']})]
|
||||
#remove_tags = [dict(name='div', attrs={'id':'tsnRelated'}),
|
||||
#dict(name='div', attrs={'class':'textSize'})]
|
||||
|
||||
feeds = [
|
||||
('News',
|
||||
'http://www.tsn.ca/datafiles/rss/Stories.xml'),
|
||||
]
|
||||
|
@ -9,14 +9,14 @@ msgstr ""
|
||||
"Project-Id-Version: calibre\n"
|
||||
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2012-12-28 09:13+0000\n"
|
||||
"PO-Revision-Date: 2013-01-12 08:34+0000\n"
|
||||
"Last-Translator: Jellby <Unknown>\n"
|
||||
"Language-Team: Español; Castellano <>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2012-12-29 05:00+0000\n"
|
||||
"X-Generator: Launchpad (build 16378)\n"
|
||||
"X-Launchpad-Export-Date: 2013-01-13 04:37+0000\n"
|
||||
"X-Generator: Launchpad (build 16420)\n"
|
||||
|
||||
#. name for aaa
|
||||
msgid "Ghotuo"
|
||||
@ -9652,7 +9652,7 @@ msgstr "Haruku"
|
||||
|
||||
#. name for hrm
|
||||
msgid "Miao; Horned"
|
||||
msgstr ""
|
||||
msgstr "Miao blanco"
|
||||
|
||||
#. name for hro
|
||||
msgid "Haroi"
|
||||
@ -9756,7 +9756,7 @@ msgstr ""
|
||||
|
||||
#. name for huj
|
||||
msgid "Miao; Northern Guiyang"
|
||||
msgstr ""
|
||||
msgstr "Miao de Guiyang septentrional"
|
||||
|
||||
#. name for huk
|
||||
msgid "Hulung"
|
||||
@ -16280,7 +16280,7 @@ msgstr ""
|
||||
|
||||
#. name for mmr
|
||||
msgid "Miao; Western Xiangxi"
|
||||
msgstr ""
|
||||
msgstr "Miao de Xiangxi occidental"
|
||||
|
||||
#. name for mmt
|
||||
msgid "Malalamai"
|
||||
@ -17064,7 +17064,7 @@ msgstr ""
|
||||
|
||||
#. name for muq
|
||||
msgid "Miao; Eastern Xiangxi"
|
||||
msgstr ""
|
||||
msgstr "Miao de Xiangxi oriental"
|
||||
|
||||
#. name for mur
|
||||
msgid "Murle"
|
||||
@ -22836,7 +22836,7 @@ msgstr ""
|
||||
|
||||
#. name for sfm
|
||||
msgid "Miao; Small Flowery"
|
||||
msgstr ""
|
||||
msgstr "Pequeño miao florido"
|
||||
|
||||
#. name for sfs
|
||||
msgid "South African Sign Language"
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 9, 14)
|
||||
numeric_version = (0, 9, 15)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
@ -79,6 +79,42 @@ def debug():
|
||||
global DEBUG
|
||||
DEBUG = True
|
||||
|
||||
_cache_dir = None
|
||||
|
||||
def _get_cache_dir():
|
||||
confcache = os.path.join(config_dir, u'caches')
|
||||
if isportable:
|
||||
return confcache
|
||||
if os.environ.has_key('CALIBRE_CACHE_DIRECTORY'):
|
||||
return os.path.abspath(os.environ['CALIBRE_CACHE_DIRECTORY'])
|
||||
|
||||
if iswindows:
|
||||
w = plugins['winutil'][0]
|
||||
candidate = os.path.join(w.special_folder_path(w.CSIDL_LOCAL_APPDATA), u'%s-cache'%__appname__)
|
||||
elif isosx:
|
||||
candidate = os.path.join(os.path.expanduser(u'~/Library/Caches'), __appname__)
|
||||
else:
|
||||
candidate = os.environ.get('XDG_CACHE_HOME', u'~/.cache')
|
||||
candidate = os.path.join(os.path.expanduser(candidate),
|
||||
__appname__)
|
||||
if isinstance(candidate, bytes):
|
||||
try:
|
||||
candidate = candidate.decode(filesystem_encoding)
|
||||
except ValueError:
|
||||
candidate = confcache
|
||||
if not os.path.exists(candidate):
|
||||
try:
|
||||
os.makedirs(candidate)
|
||||
except:
|
||||
candidate = confcache
|
||||
return candidate
|
||||
|
||||
def cache_dir():
|
||||
global _cache_dir
|
||||
if _cache_dir is None:
|
||||
_cache_dir = _get_cache_dir()
|
||||
return _cache_dir
|
||||
|
||||
# plugins {{{
|
||||
|
||||
class Plugins(collections.Mapping):
|
||||
|
@ -1413,7 +1413,6 @@ class StoreEmpikStore(StoreBase):
|
||||
|
||||
headquarters = 'PL'
|
||||
formats = ['EPUB', 'MOBI', 'PDF']
|
||||
affiliate = True
|
||||
|
||||
class StoreEscapeMagazineStore(StoreBase):
|
||||
name = 'EscapeMagazine'
|
||||
|
@ -7,16 +7,19 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
import os, traceback
|
||||
from collections import defaultdict
|
||||
from functools import wraps, partial
|
||||
|
||||
from calibre.db.categories import get_categories
|
||||
from calibre.db.locking import create_locks, RecordLock
|
||||
from calibre.db.fields import create_field
|
||||
from calibre.db.search import Search
|
||||
from calibre.db.tables import VirtualTable
|
||||
from calibre.db.lazy import FormatMetadata, FormatsList
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import now
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
def api(f):
|
||||
f.is_cache_api = True
|
||||
@ -50,6 +53,7 @@ class Cache(object):
|
||||
self.record_lock = RecordLock(self.read_lock)
|
||||
self.format_metadata_cache = defaultdict(dict)
|
||||
self.formatter_template_cache = {}
|
||||
self._search_api = Search(self.field_metadata.get_search_terms())
|
||||
|
||||
# Implement locking for all simple read/write API methods
|
||||
# An unlocked version of the method is stored with the name starting
|
||||
@ -65,6 +69,36 @@ class Cache(object):
|
||||
lock = self.read_lock if ira else self.write_lock
|
||||
setattr(self, name, wrap_simple(lock, func))
|
||||
|
||||
self.initialize_dynamic()
|
||||
|
||||
def initialize_dynamic(self):
|
||||
# Reconstruct the user categories, putting them into field_metadata
|
||||
# Assumption is that someone else will fix them if they change.
|
||||
self.field_metadata.remove_dynamic_categories()
|
||||
for user_cat in sorted(self.pref('user_categories', {}).iterkeys(), key=sort_key):
|
||||
cat_name = '@' + user_cat # add the '@' to avoid name collision
|
||||
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
|
||||
|
||||
# add grouped search term user categories
|
||||
muc = frozenset(self.pref('grouped_search_make_user_categories', []))
|
||||
for cat in sorted(self.pref('grouped_search_terms', {}).iterkeys(), key=sort_key):
|
||||
if cat in muc:
|
||||
# There is a chance that these can be duplicates of an existing
|
||||
# user category. Print the exception and continue.
|
||||
try:
|
||||
self.field_metadata.add_user_category(label=u'@' + cat, name=cat)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
|
||||
# TODO: Saved searches
|
||||
# if len(saved_searches().names()):
|
||||
# self.field_metadata.add_search_category(label='search', name=_('Searches'))
|
||||
|
||||
self.field_metadata.add_grouped_search_terms(
|
||||
self.pref('grouped_search_terms', {}))
|
||||
|
||||
self._search_api.change_locations(self.field_metadata.get_search_terms())
|
||||
|
||||
@property
|
||||
def field_metadata(self):
|
||||
return self.backend.field_metadata
|
||||
@ -260,20 +294,20 @@ class Cache(object):
|
||||
Return all the books associated with the item identified by
|
||||
``item_id``, where the item belongs to the field ``name``.
|
||||
|
||||
Returned value is a tuple of book ids, or the empty tuple if the item
|
||||
Returned value is a set of book ids, or the empty set if the item
|
||||
or the field does not exist.
|
||||
'''
|
||||
try:
|
||||
return self.fields[name].books_for(item_id)
|
||||
except (KeyError, IndexError):
|
||||
return ()
|
||||
return set()
|
||||
|
||||
@read_api
|
||||
def all_book_ids(self):
|
||||
def all_book_ids(self, type=frozenset):
|
||||
'''
|
||||
Frozen set of all known book ids.
|
||||
'''
|
||||
return frozenset(self.fields['uuid'])
|
||||
return type(self.fields['uuid'])
|
||||
|
||||
@read_api
|
||||
def all_field_ids(self, name):
|
||||
@ -316,6 +350,10 @@ class Cache(object):
|
||||
self.format_metadata_cache[book_id][fmt] = ans
|
||||
return ans
|
||||
|
||||
@read_api
|
||||
def pref(self, name, default=None):
|
||||
return self.backend.prefs.get(name, default)
|
||||
|
||||
@api
|
||||
def get_metadata(self, book_id,
|
||||
get_cover=False, get_user_categories=True, cover_as_data=False):
|
||||
@ -378,17 +416,19 @@ class Cache(object):
|
||||
all_book_ids = frozenset(self._all_book_ids() if ids_to_sort is None
|
||||
else ids_to_sort)
|
||||
get_metadata = partial(self._get_metadata, get_user_categories=False)
|
||||
lang_map = self.fields['languages'].book_value_map
|
||||
|
||||
fm = {'title':'sort', 'authors':'author_sort'}
|
||||
|
||||
def sort_key(field):
|
||||
'Handle series type fields'
|
||||
ans = self.fields[fm.get(field, field)].sort_keys_for_books(get_metadata,
|
||||
all_book_ids)
|
||||
idx = field + '_index'
|
||||
if idx in self.fields:
|
||||
idx_ans = self.fields[idx].sort_keys_for_books(get_metadata,
|
||||
all_book_ids)
|
||||
is_series = idx in self.fields
|
||||
ans = self.fields[fm.get(field, field)].sort_keys_for_books(
|
||||
get_metadata, lang_map, all_book_ids,)
|
||||
if is_series:
|
||||
idx_ans = self.fields[idx].sort_keys_for_books(
|
||||
get_metadata, lang_map, all_book_ids)
|
||||
ans = {k:(v, idx_ans[k]) for k, v in ans.iteritems()}
|
||||
return ans
|
||||
|
||||
@ -401,6 +441,16 @@ class Cache(object):
|
||||
else:
|
||||
return sorted(all_book_ids, key=partial(SortKey, fields, sort_keys))
|
||||
|
||||
@read_api
|
||||
def search(self, query, restriction, virtual_fields=None):
|
||||
return self._search_api(self, query, restriction,
|
||||
virtual_fields=virtual_fields)
|
||||
|
||||
@read_api
|
||||
def get_categories(self, sort='name', book_ids=None, icon_map=None):
|
||||
return get_categories(self, sort=sort, book_ids=book_ids,
|
||||
icon_map=icon_map)
|
||||
|
||||
# }}}
|
||||
|
||||
class SortKey(object):
|
||||
|
118
src/calibre/db/categories.py
Normal file
118
src/calibre/db/categories.py
Normal file
@ -0,0 +1,118 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from functools import partial
|
||||
from operator import attrgetter
|
||||
|
||||
from calibre.library.field_metadata import TagsIcons
|
||||
from calibre.utils.config_base import tweaks
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
CATEGORY_SORTS = { 'name', 'popularity', 'rating' }
|
||||
|
||||
class Tag(object):
|
||||
|
||||
def __init__(self, name, id=None, count=0, state=0, avg=0, sort=None,
|
||||
tooltip=None, icon=None, category=None, id_set=None,
|
||||
is_editable=True, is_searchable=True, use_sort_as_name=False):
|
||||
self.name = self.original_name = name
|
||||
self.id = id
|
||||
self.count = count
|
||||
self.state = state
|
||||
self.is_hierarchical = ''
|
||||
self.is_editable = is_editable
|
||||
self.is_searchable = is_searchable
|
||||
self.id_set = id_set if id_set is not None else set([])
|
||||
self.avg_rating = avg/2.0 if avg is not None else 0
|
||||
self.sort = sort
|
||||
self.use_sort_as_name = use_sort_as_name
|
||||
if self.avg_rating > 0:
|
||||
if tooltip:
|
||||
tooltip = tooltip + ': '
|
||||
tooltip = _('%(tt)sAverage rating is %(rating)3.1f')%dict(
|
||||
tt=tooltip, rating=self.avg_rating)
|
||||
self.tooltip = tooltip
|
||||
self.icon = icon
|
||||
self.category = category
|
||||
|
||||
def __unicode__(self):
|
||||
return u'%s:%s:%s:%s:%s:%s'%(self.name, self.count, self.id, self.state,
|
||||
self.category, self.tooltip)
|
||||
|
||||
def __str__(self):
|
||||
return unicode(self).encode('utf-8')
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
def find_categories(field_metadata):
|
||||
for category, cat in field_metadata.iteritems():
|
||||
if (cat['is_category'] and cat['kind'] not in {'user', 'search'} and
|
||||
category != 'news'):
|
||||
yield (category, cat['is_multiple'].get('cache_to_list', None), False)
|
||||
elif (cat['datatype'] == 'composite' and
|
||||
cat['display'].get('make_category', False)):
|
||||
yield (category, cat['is_multiple'].get('cache_to_list', None), True)
|
||||
|
||||
def create_tag_class(category, fm, icon_map):
|
||||
cat = fm[category]
|
||||
icon = None
|
||||
tooltip = None if category in {'formats', 'identifiers'} else ('(' + category + ')')
|
||||
label = fm.key_to_label(category)
|
||||
if icon_map:
|
||||
if not fm.is_custom_field(category):
|
||||
if category in icon_map:
|
||||
icon = icon_map[label]
|
||||
else:
|
||||
icon = icon_map['custom:']
|
||||
icon_map[category] = icon
|
||||
is_editable = category not in {'news', 'rating', 'languages', 'formats',
|
||||
'identifiers'}
|
||||
|
||||
if (tweaks['categories_use_field_for_author_name'] == 'author_sort' and
|
||||
(category == 'authors' or
|
||||
(cat['display'].get('is_names', False) and
|
||||
cat['is_custom'] and cat['is_multiple'] and
|
||||
cat['datatype'] == 'text'))):
|
||||
use_sort_as_name = True
|
||||
else:
|
||||
use_sort_as_name = False
|
||||
|
||||
return partial(Tag, use_sort_as_name=use_sort_as_name, icon=icon,
|
||||
tooltip=tooltip, is_editable=is_editable,
|
||||
category=category)
|
||||
|
||||
def get_categories(dbcache, sort='name', book_ids=None, icon_map=None):
|
||||
if icon_map is not None and type(icon_map) != TagsIcons:
|
||||
raise TypeError('icon_map passed to get_categories must be of type TagIcons')
|
||||
if sort not in CATEGORY_SORTS:
|
||||
raise ValueError('sort ' + sort + ' not a valid value')
|
||||
|
||||
fm = dbcache.field_metadata
|
||||
book_rating_map = dbcache.fields['rating'].book_value_map
|
||||
lang_map = dbcache.fields['languages'].book_value_map
|
||||
|
||||
categories = {}
|
||||
book_ids = frozenset(book_ids) if book_ids else book_ids
|
||||
for category, is_multiple, is_composite in find_categories(fm):
|
||||
tag_class = create_tag_class(category, fm, icon_map)
|
||||
cats = dbcache.fields[category].get_categories(
|
||||
tag_class, book_rating_map, lang_map, book_ids)
|
||||
if sort == 'popularity':
|
||||
key=attrgetter('count')
|
||||
elif sort == 'rating':
|
||||
key=attrgetter('avg_rating')
|
||||
else:
|
||||
key=lambda x:sort_key(x.sort or x.name)
|
||||
cats.sort(key=key)
|
||||
categories[category] = cats
|
||||
|
||||
return categories
|
||||
|
||||
|
@ -9,32 +9,41 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from threading import Lock
|
||||
from collections import defaultdict, Counter
|
||||
|
||||
from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY
|
||||
from calibre.ebooks.metadata import title_sort
|
||||
from calibre.utils.config_base import tweaks
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.date import UNDEFINED_DATE
|
||||
from calibre.utils.localization import calibre_langcode_to_name
|
||||
|
||||
class Field(object):
|
||||
|
||||
is_many = False
|
||||
|
||||
def __init__(self, name, table):
|
||||
self.name, self.table = name, table
|
||||
self.has_text_data = self.metadata['datatype'] in ('text', 'comments',
|
||||
'series', 'enumeration')
|
||||
self.table_type = self.table.table_type
|
||||
dt = self.metadata['datatype']
|
||||
self.has_text_data = dt in {'text', 'comments', 'series', 'enumeration'}
|
||||
self.table_type = self.table.table_type
|
||||
self._sort_key = (sort_key if dt in ('text', 'series', 'enumeration') else lambda x: x)
|
||||
self._default_sort_key = ''
|
||||
if self.metadata['datatype'] in ('int', 'float', 'rating'):
|
||||
if dt in { 'int', 'float', 'rating' }:
|
||||
self._default_sort_key = 0
|
||||
elif self.metadata['datatype'] == 'bool':
|
||||
elif dt == 'bool':
|
||||
self._default_sort_key = None
|
||||
elif self.metadata['datatype'] == 'datetime':
|
||||
elif dt == 'datetime':
|
||||
self._default_sort_key = UNDEFINED_DATE
|
||||
if self.name == 'languages':
|
||||
self._sort_key = lambda x:sort_key(calibre_langcode_to_name(x))
|
||||
self.is_multiple = (bool(self.metadata['is_multiple']) or self.name ==
|
||||
'formats')
|
||||
self.category_formatter = type(u'')
|
||||
if dt == 'rating':
|
||||
self.category_formatter = lambda x:'\u2605'*int(x/2)
|
||||
elif name == 'languages':
|
||||
self.category_formatter = calibre_langcode_to_name
|
||||
|
||||
@property
|
||||
def metadata(self):
|
||||
@ -58,7 +67,7 @@ class Field(object):
|
||||
def books_for(self, item_id):
|
||||
'''
|
||||
Return the ids of all books associated with the item identified by
|
||||
item_id as a tuple. An empty tuple is returned if no books are found.
|
||||
item_id as a set. An empty set is returned if no books are found.
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -72,7 +81,7 @@ class Field(object):
|
||||
'''
|
||||
return iter(())
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
'''
|
||||
Return a mapping of book_id -> sort_key. The sort key is suitable for
|
||||
use in sorting the list of all books by this field, via the python cmp
|
||||
@ -81,6 +90,34 @@ class Field(object):
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
|
||||
'''
|
||||
Return a generator that yields items of the form (value, set of books
|
||||
ids that have this value). Here, value is a searchable value. Returned
|
||||
books_ids are restricted to the set of ids in candidates.
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
|
||||
ans = []
|
||||
if not self.is_many:
|
||||
return ans
|
||||
|
||||
special_sort = hasattr(self, 'category_sort_value')
|
||||
for item_id, item_book_ids in self.table.col_book_map.iteritems():
|
||||
if book_ids is not None:
|
||||
item_book_ids = item_book_ids.intersection(book_ids)
|
||||
if item_book_ids:
|
||||
ratings = tuple(r for r in (book_rating_map.get(book_id, 0) for
|
||||
book_id in item_book_ids) if r > 0)
|
||||
avg = sum(ratings)/len(ratings) if ratings else 0
|
||||
name = self.category_formatter(self.table.id_map[item_id])
|
||||
sval = (self.category_sort_value(item_id, item_book_ids, lang_map)
|
||||
if special_sort else name)
|
||||
c = tag_class(name, id=item_id, sort=sval, avg=avg,
|
||||
id_set=item_book_ids, count=len(item_book_ids))
|
||||
ans.append(c)
|
||||
return ans
|
||||
|
||||
class OneToOneField(Field):
|
||||
|
||||
@ -91,15 +128,20 @@ class OneToOneField(Field):
|
||||
return (book_id,)
|
||||
|
||||
def books_for(self, item_id):
|
||||
return (item_id,)
|
||||
return {item_id}
|
||||
|
||||
def __iter__(self):
|
||||
return self.table.book_col_map.iterkeys()
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
return {id_ : self._sort_key(self.table.book_col_map.get(id_,
|
||||
self._default_sort_key)) for id_ in all_book_ids}
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
|
||||
cbm = self.table.book_col_map
|
||||
for book_id in candidates:
|
||||
yield cbm.get(book_id, default_value), {book_id}
|
||||
|
||||
class CompositeField(OneToOneField):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -133,10 +175,16 @@ class CompositeField(OneToOneField):
|
||||
ans = mi.get('#'+self.metadata['label'])
|
||||
return ans
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
return {id_ : sort_key(self.get_value_with_cache(id_, get_metadata)) for id_ in
|
||||
all_book_ids}
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
|
||||
val_map = defaultdict(set)
|
||||
for book_id in candidates:
|
||||
val_map[self.get_value_with_cache(book_id, get_metadata)].add(book_id)
|
||||
for val, book_ids in val_map.iteritems():
|
||||
yield val, book_ids
|
||||
|
||||
class OnDeviceField(OneToOneField):
|
||||
|
||||
@ -170,12 +218,21 @@ class OnDeviceField(OneToOneField):
|
||||
def __iter__(self):
|
||||
return iter(())
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
return {id_ : self.for_book(id_) for id_ in
|
||||
all_book_ids}
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
|
||||
val_map = defaultdict(set)
|
||||
for book_id in candidates:
|
||||
val_map[self.for_book(book_id, default_value=default_value)].add(book_id)
|
||||
for val, book_ids in val_map.iteritems():
|
||||
yield val, book_ids
|
||||
|
||||
class ManyToOneField(Field):
|
||||
|
||||
is_many = True
|
||||
|
||||
def for_book(self, book_id, default_value=None):
|
||||
ids = self.table.book_col_map.get(book_id, None)
|
||||
if ids is not None:
|
||||
@ -191,12 +248,12 @@ class ManyToOneField(Field):
|
||||
return (id_,)
|
||||
|
||||
def books_for(self, item_id):
|
||||
return self.table.col_book_map.get(item_id, ())
|
||||
return self.table.col_book_map.get(item_id, set())
|
||||
|
||||
def __iter__(self):
|
||||
return self.table.id_map.iterkeys()
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
ans = {id_ : self.table.book_col_map.get(id_, None)
|
||||
for id_ in all_book_ids}
|
||||
sk_map = {cid : (self._default_sort_key if cid is None else
|
||||
@ -204,8 +261,23 @@ class ManyToOneField(Field):
|
||||
for cid in ans.itervalues()}
|
||||
return {id_ : sk_map[cid] for id_, cid in ans.iteritems()}
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
|
||||
cbm = self.table.col_book_map
|
||||
empty = set()
|
||||
for item_id, val in self.table.id_map.iteritems():
|
||||
book_ids = cbm.get(item_id, empty).intersection(candidates)
|
||||
if book_ids:
|
||||
yield val, book_ids
|
||||
|
||||
@property
|
||||
def book_value_map(self):
|
||||
return {book_id:self.table.id_map[item_id] for book_id, item_id in
|
||||
self.table.book_col_map.iteritems()}
|
||||
|
||||
class ManyToManyField(Field):
|
||||
|
||||
is_many = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Field.__init__(self, *args, **kwargs)
|
||||
self.alphabetical_sort = self.name != 'authors'
|
||||
@ -222,12 +294,12 @@ class ManyToManyField(Field):
|
||||
return self.table.book_col_map.get(book_id, ())
|
||||
|
||||
def books_for(self, item_id):
|
||||
return self.table.col_book_map.get(item_id, ())
|
||||
return self.table.col_book_map.get(item_id, set())
|
||||
|
||||
def __iter__(self):
|
||||
return self.table.id_map.iterkeys()
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
ans = {id_ : self.table.book_col_map.get(id_, ())
|
||||
for id_ in all_book_ids}
|
||||
all_cids = set()
|
||||
@ -239,6 +311,26 @@ class ManyToManyField(Field):
|
||||
(self._default_sort_key,))
|
||||
for id_, cids in ans.iteritems()}
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
|
||||
cbm = self.table.col_book_map
|
||||
empty = set()
|
||||
for item_id, val in self.table.id_map.iteritems():
|
||||
book_ids = cbm.get(item_id, empty).intersection(candidates)
|
||||
if book_ids:
|
||||
yield val, book_ids
|
||||
|
||||
def iter_counts(self, candidates):
|
||||
val_map = defaultdict(set)
|
||||
cbm = self.table.book_col_map
|
||||
for book_id in candidates:
|
||||
val_map[len(cbm.get(book_id, ()))].add(book_id)
|
||||
for count, book_ids in val_map.iteritems():
|
||||
yield count, book_ids
|
||||
|
||||
@property
|
||||
def book_value_map(self):
|
||||
return {book_id:tuple(self.table.id_map[item_id] for item_id in item_ids)
|
||||
for book_id, item_ids in self.table.book_col_map.iteritems()}
|
||||
|
||||
class IdentifiersField(ManyToManyField):
|
||||
|
||||
@ -248,7 +340,7 @@ class IdentifiersField(ManyToManyField):
|
||||
ids = default_value
|
||||
return ids
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
'Sort by identifier keys'
|
||||
ans = {id_ : self.table.book_col_map.get(id_, ())
|
||||
for id_ in all_book_ids}
|
||||
@ -256,6 +348,23 @@ class IdentifiersField(ManyToManyField):
|
||||
(self._default_sort_key,))
|
||||
for id_, cids in ans.iteritems()}
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=()):
|
||||
bcm = self.table.book_col_map
|
||||
for book_id in candidates:
|
||||
val = bcm.get(book_id, default_value)
|
||||
if val:
|
||||
yield val, {book_id}
|
||||
|
||||
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
|
||||
ans = []
|
||||
|
||||
for id_key, item_book_ids in self.table.col_book_map.iteritems():
|
||||
if book_ids is not None:
|
||||
item_book_ids = item_book_ids.intersection(book_ids)
|
||||
if item_book_ids:
|
||||
c = tag_class(id_key, id_set=item_book_ids, count=len(item_book_ids))
|
||||
ans.append(c)
|
||||
return ans
|
||||
|
||||
class AuthorsField(ManyToManyField):
|
||||
|
||||
@ -266,6 +375,9 @@ class AuthorsField(ManyToManyField):
|
||||
'link' : self.table.alink_map[author_id],
|
||||
}
|
||||
|
||||
def category_sort_value(self, item_id, book_ids, lang_map):
|
||||
return self.table.asort_map[item_id]
|
||||
|
||||
class FormatsField(ManyToManyField):
|
||||
|
||||
def for_book(self, book_id, default_value=None):
|
||||
@ -274,6 +386,61 @@ class FormatsField(ManyToManyField):
|
||||
def format_fname(self, book_id, fmt):
|
||||
return self.table.fname_map[book_id][fmt.upper()]
|
||||
|
||||
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
|
||||
val_map = defaultdict(set)
|
||||
cbm = self.table.book_col_map
|
||||
for book_id in candidates:
|
||||
vals = cbm.get(book_id, ())
|
||||
for val in vals:
|
||||
val_map[val].add(book_id)
|
||||
|
||||
for val, book_ids in val_map.iteritems():
|
||||
yield val, book_ids
|
||||
|
||||
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
|
||||
ans = []
|
||||
|
||||
for fmt, item_book_ids in self.table.col_book_map.iteritems():
|
||||
if book_ids is not None:
|
||||
item_book_ids = item_book_ids.intersection(book_ids)
|
||||
if item_book_ids:
|
||||
c = tag_class(fmt, id_set=item_book_ids, count=len(item_book_ids))
|
||||
ans.append(c)
|
||||
return ans
|
||||
|
||||
class SeriesField(ManyToOneField):
|
||||
|
||||
def sort_key_for_series(self, book_id, lang_map, series_sort_order):
|
||||
sid = self.table.book_col_map.get(book_id, None)
|
||||
if sid is None:
|
||||
return self._default_sort_key
|
||||
lang = lang_map.get(book_id, None) or None
|
||||
if lang:
|
||||
lang = lang[0]
|
||||
return self._sort_key(title_sort(self.table.id_map[sid],
|
||||
order=series_sort_order, lang=lang))
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, lang_map, all_book_ids):
|
||||
sso = tweaks['title_series_sorting']
|
||||
return {book_id:self.sort_key_for_series(book_id, lang_map, sso) for book_id
|
||||
in all_book_ids}
|
||||
|
||||
def category_sort_value(self, item_id, book_ids, lang_map):
|
||||
lang = None
|
||||
tss = tweaks['title_series_sorting']
|
||||
if tss != 'strictly_alphabetic':
|
||||
c = Counter()
|
||||
|
||||
for book_id in book_ids:
|
||||
l = lang_map.get(book_id, None)
|
||||
if l:
|
||||
c[l[0]] += 1
|
||||
|
||||
if c:
|
||||
lang = c.most_common(1)[0][0]
|
||||
val = self.table.id_map[item_id]
|
||||
return title_sort(val, order=tss, lang=lang)
|
||||
|
||||
def create_field(name, table):
|
||||
cls = {
|
||||
ONE_ONE : OneToOneField,
|
||||
@ -290,5 +457,7 @@ def create_field(name, table):
|
||||
cls = IdentifiersField
|
||||
elif table.metadata['datatype'] == 'composite':
|
||||
cls = CompositeField
|
||||
elif table.metadata['datatype'] == 'series':
|
||||
cls = SeriesField
|
||||
return cls(name, table)
|
||||
|
||||
|
700
src/calibre/db/search.py
Normal file
700
src/calibre/db/search.py
Normal file
@ -0,0 +1,700 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
from functools import partial
|
||||
from datetime import timedelta
|
||||
|
||||
from calibre.utils.config_base import prefs
|
||||
from calibre.utils.date import parse_date, UNDEFINED_DATE, now
|
||||
from calibre.utils.icu import primary_find
|
||||
from calibre.utils.localization import lang_map, canonicalize_lang
|
||||
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
|
||||
|
||||
# TODO: Thread safety of saved searches
|
||||
CONTAINS_MATCH = 0
|
||||
EQUALS_MATCH = 1
|
||||
REGEXP_MATCH = 2
|
||||
|
||||
# Utils {{{
|
||||
|
||||
def force_to_bool(val):
|
||||
if isinstance(val, (str, unicode)):
|
||||
try:
|
||||
val = icu_lower(val)
|
||||
if not val:
|
||||
val = None
|
||||
elif val in [_('yes'), _('checked'), 'true', 'yes']:
|
||||
val = True
|
||||
elif val in [_('no'), _('unchecked'), 'false', 'no']:
|
||||
val = False
|
||||
else:
|
||||
val = bool(int(val))
|
||||
except:
|
||||
val = None
|
||||
return val
|
||||
|
||||
def _matchkind(query):
|
||||
matchkind = CONTAINS_MATCH
|
||||
if (len(query) > 1):
|
||||
if query.startswith('\\'):
|
||||
query = query[1:]
|
||||
elif query.startswith('='):
|
||||
matchkind = EQUALS_MATCH
|
||||
query = query[1:]
|
||||
elif query.startswith('~'):
|
||||
matchkind = REGEXP_MATCH
|
||||
query = query[1:]
|
||||
|
||||
if matchkind != REGEXP_MATCH:
|
||||
# leave case in regexps because it can be significant e.g. \S \W \D
|
||||
query = icu_lower(query)
|
||||
return matchkind, query
|
||||
|
||||
def _match(query, value, matchkind, use_primary_find_in_search=True):
|
||||
if query.startswith('..'):
|
||||
query = query[1:]
|
||||
sq = query[1:]
|
||||
internal_match_ok = True
|
||||
else:
|
||||
internal_match_ok = False
|
||||
for t in value:
|
||||
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
|
||||
t = icu_lower(t)
|
||||
if (matchkind == EQUALS_MATCH):
|
||||
if internal_match_ok:
|
||||
if query == t:
|
||||
return True
|
||||
comps = [c.strip() for c in t.split('.') if c.strip()]
|
||||
for comp in comps:
|
||||
if sq == comp:
|
||||
return True
|
||||
elif query[0] == '.':
|
||||
if t.startswith(query[1:]):
|
||||
ql = len(query) - 1
|
||||
if (len(t) == ql) or (t[ql:ql+1] == '.'):
|
||||
return True
|
||||
elif query == t:
|
||||
return True
|
||||
elif matchkind == REGEXP_MATCH:
|
||||
if re.search(query, t, re.I|re.UNICODE):
|
||||
return True
|
||||
elif matchkind == CONTAINS_MATCH:
|
||||
if use_primary_find_in_search:
|
||||
if primary_find(query, t)[0] != -1:
|
||||
return True
|
||||
elif query in t:
|
||||
return True
|
||||
except re.error:
|
||||
pass
|
||||
return False
|
||||
# }}}
|
||||
|
||||
class DateSearch(object): # {{{
|
||||
|
||||
def __init__(self):
|
||||
self.operators = {
|
||||
'=' : (1, self.eq),
|
||||
'!=' : (2, self.ne),
|
||||
'>' : (1, self.gt),
|
||||
'>=' : (2, self.ge),
|
||||
'<' : (1, self.lt),
|
||||
'<=' : (2, self.le),
|
||||
}
|
||||
self.local_today = { '_today', 'today', icu_lower(_('today')) }
|
||||
self.local_yesterday = { '_yesterday', 'yesterday', icu_lower(_('yesterday')) }
|
||||
self.local_thismonth = { '_thismonth', 'thismonth', icu_lower(_('thismonth')) }
|
||||
self.daysago_pat = re.compile(r'(%s|daysago|_daysago)$'%_('daysago'))
|
||||
|
||||
def eq(self, dbdate, query, field_count):
|
||||
if dbdate.year == query.year:
|
||||
if field_count == 1:
|
||||
return True
|
||||
if dbdate.month == query.month:
|
||||
if field_count == 2:
|
||||
return True
|
||||
return dbdate.day == query.day
|
||||
return False
|
||||
|
||||
def ne(self, *args):
|
||||
return not self.eq(*args)
|
||||
|
||||
def gt(self, dbdate, query, field_count):
|
||||
if dbdate.year > query.year:
|
||||
return True
|
||||
if field_count > 1 and dbdate.year == query.year:
|
||||
if dbdate.month > query.month:
|
||||
return True
|
||||
return (field_count == 3 and dbdate.month == query.month and
|
||||
dbdate.day > query.day)
|
||||
return False
|
||||
|
||||
def le(self, *args):
|
||||
return not self.gt(*args)
|
||||
|
||||
def lt(self, dbdate, query, field_count):
|
||||
if dbdate.year < query.year:
|
||||
return True
|
||||
if field_count > 1 and dbdate.year == query.year:
|
||||
if dbdate.month < query.month:
|
||||
return True
|
||||
return (field_count == 3 and dbdate.month == query.month and
|
||||
dbdate.day < query.day)
|
||||
return False
|
||||
|
||||
def ge(self, *args):
|
||||
return not self.lt(*args)
|
||||
|
||||
def __call__(self, query, field_iter):
|
||||
matches = set()
|
||||
if len(query) < 2:
|
||||
return matches
|
||||
|
||||
if query == 'false':
|
||||
for v, book_ids in field_iter():
|
||||
if isinstance(v, (str, unicode)):
|
||||
v = parse_date(v)
|
||||
if v is None or v <= UNDEFINED_DATE:
|
||||
matches |= book_ids
|
||||
return matches
|
||||
|
||||
if query == 'true':
|
||||
for v, book_ids in field_iter():
|
||||
if isinstance(v, (str, unicode)):
|
||||
v = parse_date(v)
|
||||
if v is not None and v > UNDEFINED_DATE:
|
||||
matches |= book_ids
|
||||
return matches
|
||||
|
||||
relop = None
|
||||
for k, op in self.operators.iteritems():
|
||||
if query.startswith(k):
|
||||
p, relop = op
|
||||
query = query[p:]
|
||||
if relop is None:
|
||||
relop = self.operators['='][-1]
|
||||
|
||||
if query in self.local_today:
|
||||
qd = now()
|
||||
field_count = 3
|
||||
elif query in self.local_yesterday:
|
||||
qd = now() - timedelta(1)
|
||||
field_count = 3
|
||||
elif query in self.local_thismonth:
|
||||
qd = now()
|
||||
field_count = 2
|
||||
else:
|
||||
m = self.daysago_pat.search(query)
|
||||
if m is not None:
|
||||
num = query[:-len(m.group(1))]
|
||||
try:
|
||||
qd = now() - timedelta(int(num))
|
||||
except:
|
||||
raise ParseException(query, len(query), 'Number conversion error')
|
||||
field_count = 3
|
||||
else:
|
||||
try:
|
||||
qd = parse_date(query, as_utc=False)
|
||||
except:
|
||||
raise ParseException(query, len(query), 'Date conversion error')
|
||||
if '-' in query:
|
||||
field_count = query.count('-') + 1
|
||||
else:
|
||||
field_count = query.count('/') + 1
|
||||
|
||||
for v, book_ids in field_iter():
|
||||
if isinstance(v, (str, unicode)):
|
||||
v = parse_date(v)
|
||||
if v is not None and relop(v, qd, field_count):
|
||||
matches |= book_ids
|
||||
|
||||
return matches
|
||||
# }}}
|
||||
|
||||
class NumericSearch(object): # {{{
|
||||
|
||||
def __init__(self):
|
||||
self.operators = {
|
||||
'=':( 1, lambda r, q: r == q ),
|
||||
'>':( 1, lambda r, q: r is not None and r > q ),
|
||||
'<':( 1, lambda r, q: r is not None and r < q ),
|
||||
'!=':( 2, lambda r, q: r != q ),
|
||||
'>=':( 2, lambda r, q: r is not None and r >= q ),
|
||||
'<=':( 2, lambda r, q: r is not None and r <= q )
|
||||
}
|
||||
|
||||
def __call__(self, query, field_iter, location, datatype, candidates, is_many=False):
|
||||
matches = set()
|
||||
if not query:
|
||||
return matches
|
||||
|
||||
q = ''
|
||||
cast = adjust = lambda x: x
|
||||
dt = datatype
|
||||
|
||||
if is_many and query in {'true', 'false'}:
|
||||
valcheck = lambda x: True
|
||||
if datatype == 'rating':
|
||||
valcheck = lambda x: x is not None and x > 0
|
||||
found = set()
|
||||
for val, book_ids in field_iter():
|
||||
if valcheck(val):
|
||||
found |= book_ids
|
||||
return found if query == 'true' else candidates - found
|
||||
|
||||
if query == 'false':
|
||||
if location == 'cover':
|
||||
relop = lambda x,y: not bool(x)
|
||||
else:
|
||||
relop = lambda x,y: x is None
|
||||
elif query == 'true':
|
||||
if location == 'cover':
|
||||
relop = lambda x,y: bool(x)
|
||||
else:
|
||||
relop = lambda x,y: x is not None
|
||||
else:
|
||||
relop = None
|
||||
for k, op in self.operators.iteritems():
|
||||
if query.startswith(k):
|
||||
p, relop = op
|
||||
query = query[p:]
|
||||
if relop is None:
|
||||
p, relop = self.operators['=']
|
||||
|
||||
cast = int
|
||||
if dt == 'rating':
|
||||
cast = lambda x: 0 if x is None else int(x)
|
||||
adjust = lambda x: x/2
|
||||
elif dt in ('float', 'composite'):
|
||||
cast = float
|
||||
|
||||
mult = 1.0
|
||||
if len(query) > 1:
|
||||
mult = query[-1].lower()
|
||||
mult = {'k': 1024.,'m': 1024.**2, 'g': 1024.**3}.get(mult, 1.0)
|
||||
if mult != 1.0:
|
||||
query = query[:-1]
|
||||
else:
|
||||
mult = 1.0
|
||||
|
||||
try:
|
||||
q = cast(query) * mult
|
||||
except:
|
||||
raise ParseException(query, len(query),
|
||||
'Non-numeric value in query: %r'%query)
|
||||
|
||||
for val, book_ids in field_iter():
|
||||
if val is None:
|
||||
continue
|
||||
try:
|
||||
v = cast(val)
|
||||
except:
|
||||
v = None
|
||||
if v:
|
||||
v = adjust(v)
|
||||
if relop(v, q):
|
||||
matches |= book_ids
|
||||
return matches
|
||||
|
||||
# }}}
|
||||
|
||||
class BooleanSearch(object): # {{{
|
||||
|
||||
def __init__(self):
|
||||
self.local_no = icu_lower(_('no'))
|
||||
self.local_yes = icu_lower(_('yes'))
|
||||
self.local_unchecked = icu_lower(_('unchecked'))
|
||||
self.local_checked = icu_lower(_('checked'))
|
||||
self.local_empty = icu_lower(_('empty'))
|
||||
self.local_blank = icu_lower(_('blank'))
|
||||
self.local_bool_values = {
|
||||
self.local_no, self.local_unchecked, '_no', 'false', 'no',
|
||||
self.local_yes, self.local_checked, '_yes', 'true', 'yes',
|
||||
self.local_empty, self.local_blank, '_empty', 'empty'}
|
||||
|
||||
def __call__(self, query, field_iter, bools_are_tristate):
|
||||
matches = set()
|
||||
if query not in self.local_bool_values:
|
||||
raise ParseException(_('Invalid boolean query "{0}"').format(query))
|
||||
for val, book_ids in field_iter():
|
||||
val = force_to_bool(val)
|
||||
if not bools_are_tristate:
|
||||
if val is None or not val: # item is None or set to false
|
||||
if query in { self.local_no, self.local_unchecked, 'no', '_no', 'false' }:
|
||||
matches |= book_ids
|
||||
else: # item is explicitly set to true
|
||||
if query in { self.local_yes, self.local_checked, 'yes', '_yes', 'true' }:
|
||||
matches |= book_ids
|
||||
else:
|
||||
if val is None:
|
||||
if query in { self.local_empty, self.local_blank, 'empty', '_empty', 'false' }:
|
||||
matches |= book_ids
|
||||
elif not val: # is not None and false
|
||||
if query in { self.local_no, self.local_unchecked, 'no', '_no', 'true' }:
|
||||
matches |= book_ids
|
||||
else: # item is not None and true
|
||||
if query in { self.local_yes, self.local_checked, 'yes', '_yes', 'true' }:
|
||||
matches |= book_ids
|
||||
return matches
|
||||
|
||||
# }}}
|
||||
|
||||
class KeyPairSearch(object): # {{{
|
||||
|
||||
def __call__(self, query, field_iter, candidates, use_primary_find):
|
||||
matches = set()
|
||||
if ':' in query:
|
||||
q = [q.strip() for q in query.split(':')]
|
||||
if len(q) != 2:
|
||||
raise ParseException(query, len(query),
|
||||
'Invalid query format for colon-separated search')
|
||||
keyq, valq = q
|
||||
keyq_mkind, keyq = _matchkind(keyq)
|
||||
valq_mkind, valq = _matchkind(valq)
|
||||
else:
|
||||
keyq = keyq_mkind = ''
|
||||
valq_mkind, valq = _matchkind(query)
|
||||
keyq_mkind
|
||||
|
||||
if valq in {'true', 'false'}:
|
||||
found = set()
|
||||
if keyq:
|
||||
for val, book_ids in field_iter():
|
||||
if val and val.get(keyq, False):
|
||||
found |= book_ids
|
||||
else:
|
||||
for val, book_ids in field_iter():
|
||||
if val:
|
||||
found |= book_ids
|
||||
return found if valq == 'true' else candidates - found
|
||||
|
||||
for m, book_ids in field_iter():
|
||||
for key, val in m.iteritems():
|
||||
if (keyq and not _match(keyq, (key,), keyq_mkind,
|
||||
use_primary_find_in_search=use_primary_find)):
|
||||
continue
|
||||
if (valq and not _match(valq, (val,), valq_mkind,
|
||||
use_primary_find_in_search=use_primary_find)):
|
||||
continue
|
||||
matches |= book_ids
|
||||
break
|
||||
|
||||
return matches
|
||||
|
||||
# }}}
|
||||
|
||||
class Parser(SearchQueryParser):
|
||||
|
||||
def __init__(self, dbcache, all_book_ids, gst, date_search, num_search,
|
||||
bool_search, keypair_search, limit_search_columns, limit_search_columns_to,
|
||||
locations, virtual_fields):
|
||||
self.dbcache, self.all_book_ids = dbcache, all_book_ids
|
||||
self.all_search_locations = frozenset(locations)
|
||||
self.grouped_search_terms = gst
|
||||
self.date_search, self.num_search = date_search, num_search
|
||||
self.bool_search, self.keypair_search = bool_search, keypair_search
|
||||
self.limit_search_columns, self.limit_search_columns_to = (
|
||||
limit_search_columns, limit_search_columns_to)
|
||||
self.virtual_fields = virtual_fields or {}
|
||||
if 'marked' not in self.virtual_fields:
|
||||
self.virtual_fields['marked'] = self
|
||||
super(Parser, self).__init__(locations, optimize=True)
|
||||
|
||||
@property
|
||||
def field_metadata(self):
|
||||
return self.dbcache.field_metadata
|
||||
|
||||
def universal_set(self):
|
||||
return self.all_book_ids
|
||||
|
||||
def field_iter(self, name, candidates):
|
||||
get_metadata = partial(self.dbcache._get_metadata, get_user_categories=False)
|
||||
try:
|
||||
field = self.dbcache.fields[name]
|
||||
except KeyError:
|
||||
field = self.virtual_fields[name]
|
||||
return field.iter_searchable_values(get_metadata, candidates)
|
||||
|
||||
def iter_searchable_values(self, *args, **kwargs):
|
||||
for x in []:
|
||||
yield x, set()
|
||||
|
||||
def get_matches(self, location, query, candidates=None,
|
||||
allow_recursion=True):
|
||||
# If candidates is not None, it must not be modified. Changing its
|
||||
# value will break query optimization in the search parser
|
||||
matches = set()
|
||||
|
||||
if candidates is None:
|
||||
candidates = self.all_book_ids
|
||||
if not candidates or not query or not query.strip():
|
||||
return matches
|
||||
if location not in self.all_search_locations:
|
||||
return matches
|
||||
|
||||
if (len(location) > 2 and location.startswith('@') and
|
||||
location[1:] in self.grouped_search_terms):
|
||||
location = location[1:]
|
||||
|
||||
# get metadata key associated with the search term. Eliminates
|
||||
# dealing with plurals and other aliases
|
||||
original_location = location
|
||||
location = self.field_metadata.search_term_to_field_key(
|
||||
icu_lower(location.strip()))
|
||||
# grouped search terms
|
||||
if isinstance(location, list):
|
||||
if allow_recursion:
|
||||
if query.lower() == 'false':
|
||||
invert = True
|
||||
query = 'true'
|
||||
else:
|
||||
invert = False
|
||||
for loc in location:
|
||||
c = candidates.copy()
|
||||
m = self.get_matches(loc, query,
|
||||
candidates=c, allow_recursion=False)
|
||||
matches |= m
|
||||
c -= m
|
||||
if len(c) == 0:
|
||||
break
|
||||
if invert:
|
||||
matches = self.all_book_ids - matches
|
||||
return matches
|
||||
raise ParseException(query, len(query), 'Recursive query group detected')
|
||||
|
||||
# If the user has asked to restrict searching over all field, apply
|
||||
# that restriction
|
||||
if (location == 'all' and self.limit_search_columns and
|
||||
self.limit_search_columns_to):
|
||||
terms = set()
|
||||
for l in self.limit_search_columns_to:
|
||||
l = icu_lower(l.strip())
|
||||
if l and l != 'all' and l in self.all_search_locations:
|
||||
terms.add(l)
|
||||
if terms:
|
||||
c = candidates.copy()
|
||||
for l in terms:
|
||||
try:
|
||||
m = self.get_matches(l, query,
|
||||
candidates=c, allow_recursion=allow_recursion)
|
||||
matches |= m
|
||||
c -= m
|
||||
if len(c) == 0:
|
||||
break
|
||||
except:
|
||||
pass
|
||||
return matches
|
||||
|
||||
upf = prefs['use_primary_find_in_search']
|
||||
|
||||
if location in self.field_metadata:
|
||||
fm = self.field_metadata[location]
|
||||
dt = fm['datatype']
|
||||
|
||||
# take care of dates special case
|
||||
if (dt == 'datetime' or (
|
||||
dt == 'composite' and
|
||||
fm['display'].get('composite_sort', '') == 'date')):
|
||||
if location == 'date':
|
||||
location = 'timestamp'
|
||||
return self.date_search(
|
||||
icu_lower(query), partial(self.field_iter, location, candidates))
|
||||
|
||||
# take care of numbers special case
|
||||
if (dt in ('rating', 'int', 'float') or
|
||||
(dt == 'composite' and
|
||||
fm['display'].get('composite_sort', '') == 'number')):
|
||||
field = self.dbcache.fields[location]
|
||||
return self.num_search(
|
||||
icu_lower(query), partial(self.field_iter, location, candidates),
|
||||
location, dt, candidates, is_many=field.is_many)
|
||||
|
||||
# take care of the 'count' operator for is_multiples
|
||||
if (fm['is_multiple'] and
|
||||
len(query) > 1 and query[0] == '#' and query[1] in '=<>!'):
|
||||
return self.num_search(icu_lower(query[1:]), partial(
|
||||
self.dbcache.fields[location].iter_counts, candidates),
|
||||
location, dt, candidates)
|
||||
|
||||
# take care of boolean special case
|
||||
if dt == 'bool':
|
||||
return self.bool_search(icu_lower(query),
|
||||
partial(self.field_iter, location, candidates),
|
||||
self.dbcache.pref('bools_are_tristate'))
|
||||
|
||||
# special case: colon-separated fields such as identifiers. isbn
|
||||
# is a special case within the case
|
||||
if fm.get('is_csp', False):
|
||||
field_iter = partial(self.field_iter, location, candidates)
|
||||
if location == 'identifiers' and original_location == 'isbn':
|
||||
return self.keypair_search('=isbn:'+query, field_iter,
|
||||
candidates, upf)
|
||||
return self.keypair_search(query, field_iter, candidates, upf)
|
||||
|
||||
# check for user categories
|
||||
if len(location) >= 2 and location.startswith('@'):
|
||||
return self.get_user_category_matches(location[1:], icu_lower(query), candidates)
|
||||
|
||||
# Everything else (and 'all' matches)
|
||||
matchkind, query = _matchkind(query)
|
||||
all_locs = set()
|
||||
text_fields = set()
|
||||
field_metadata = {}
|
||||
|
||||
for x, fm in self.field_metadata.iteritems():
|
||||
if x.startswith('@'): continue
|
||||
if fm['search_terms'] and x != 'series_sort':
|
||||
all_locs.add(x)
|
||||
field_metadata[x] = fm
|
||||
if fm['datatype'] in { 'composite', 'text', 'comments', 'series', 'enumeration' }:
|
||||
text_fields.add(x)
|
||||
|
||||
locations = all_locs if location == 'all' else {location}
|
||||
|
||||
current_candidates = set(candidates)
|
||||
|
||||
try:
|
||||
rating_query = int(float(query)) * 2
|
||||
except:
|
||||
rating_query = None
|
||||
|
||||
try:
|
||||
int_query = int(float(query))
|
||||
except:
|
||||
int_query = None
|
||||
|
||||
try:
|
||||
float_query = float(query)
|
||||
except:
|
||||
float_query = None
|
||||
|
||||
for location in locations:
|
||||
current_candidates -= matches
|
||||
q = query
|
||||
if location == 'languages':
|
||||
q = canonicalize_lang(query)
|
||||
if q is None:
|
||||
lm = lang_map()
|
||||
rm = {v.lower():k for k,v in lm.iteritems()}
|
||||
q = rm.get(query, query)
|
||||
|
||||
if matchkind == CONTAINS_MATCH and q in {'true', 'false'}:
|
||||
found = set()
|
||||
for val, book_ids in self.field_iter(location, current_candidates):
|
||||
if val and (not hasattr(val, 'strip') or val.strip()):
|
||||
found |= book_ids
|
||||
matches |= (found if q == 'true' else (current_candidates-found))
|
||||
continue
|
||||
|
||||
dt = field_metadata.get(location, {}).get('datatype', None)
|
||||
if dt == 'rating':
|
||||
if rating_query is not None:
|
||||
for val, book_ids in self.field_iter(location, current_candidates):
|
||||
if val == rating_query:
|
||||
matches |= book_ids
|
||||
continue
|
||||
|
||||
if dt == 'float':
|
||||
if float_query is not None:
|
||||
for val, book_ids in self.field_iter(location, current_candidates):
|
||||
if val == float_query:
|
||||
matches |= book_ids
|
||||
continue
|
||||
|
||||
if dt == 'int':
|
||||
if int_query is not None:
|
||||
for val, book_ids in self.field_iter(location, current_candidates):
|
||||
if val == int_query:
|
||||
matches |= book_ids
|
||||
continue
|
||||
|
||||
if location in text_fields:
|
||||
for val, book_ids in self.field_iter(location, current_candidates):
|
||||
if val is not None:
|
||||
if isinstance(val, basestring):
|
||||
val = (val,)
|
||||
if _match(q, val, matchkind, use_primary_find_in_search=upf):
|
||||
matches |= book_ids
|
||||
|
||||
return matches
|
||||
|
||||
def get_user_category_matches(self, location, query, candidates):
|
||||
matches = set()
|
||||
if len(query) < 2:
|
||||
return matches
|
||||
|
||||
user_cats = self.dbcache.pref('user_categories')
|
||||
c = set(candidates)
|
||||
|
||||
if query.startswith('.'):
|
||||
check_subcats = True
|
||||
query = query[1:]
|
||||
else:
|
||||
check_subcats = False
|
||||
|
||||
for key in user_cats:
|
||||
if key == location or (check_subcats and key.startswith(location + '.')):
|
||||
for (item, category, ign) in user_cats[key]:
|
||||
s = self.get_matches(category, '=' + item, candidates=c)
|
||||
c -= s
|
||||
matches |= s
|
||||
if query == 'false':
|
||||
return candidates - matches
|
||||
return matches
|
||||
|
||||
class Search(object):
|
||||
|
||||
def __init__(self, all_search_locations=()):
|
||||
self.all_search_locations = all_search_locations
|
||||
self.date_search = DateSearch()
|
||||
self.num_search = NumericSearch()
|
||||
self.bool_search = BooleanSearch()
|
||||
self.keypair_search = KeyPairSearch()
|
||||
|
||||
def change_locations(self, newlocs):
|
||||
self.all_search_locations = newlocs
|
||||
|
||||
def __call__(self, dbcache, query, search_restriction, virtual_fields=None):
|
||||
'''
|
||||
Return the set of ids of all records that match the specified
|
||||
query and restriction
|
||||
'''
|
||||
q = ''
|
||||
if not query or not query.strip():
|
||||
q = search_restriction
|
||||
else:
|
||||
q = query
|
||||
if search_restriction:
|
||||
q = u'(%s) and (%s)' % (search_restriction, query)
|
||||
|
||||
all_book_ids = dbcache.all_book_ids(type=set)
|
||||
if not q:
|
||||
return all_book_ids
|
||||
|
||||
if not isinstance(q, type(u'')):
|
||||
q = q.decode('utf-8')
|
||||
|
||||
# We construct a new parser instance per search as pyparsing is not
|
||||
# thread safe. On my desktop, constructing a SearchQueryParser instance
|
||||
# takes 0.000975 seconds and restoring it from a pickle takes
|
||||
# 0.000974 seconds.
|
||||
sqp = Parser(
|
||||
dbcache, all_book_ids, dbcache.pref('grouped_search_terms'),
|
||||
self.date_search, self.num_search, self.bool_search,
|
||||
self.keypair_search,
|
||||
prefs[ 'limit_search_columns' ],
|
||||
prefs[ 'limit_search_columns_to' ], self.all_search_locations,
|
||||
virtual_fields)
|
||||
|
||||
try:
|
||||
ret = sqp.parse(q)
|
||||
finally:
|
||||
sqp.dbcache = None
|
||||
return ret
|
||||
|
@ -132,13 +132,10 @@ class ManyToOneTable(Table):
|
||||
'SELECT book, {0} FROM {1}'.format(
|
||||
self.metadata['link_column'], self.link_table)):
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
self.col_book_map[row[1]] = set()
|
||||
self.col_book_map[row[1]].add(row[0])
|
||||
self.book_col_map[row[0]] = row[1]
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
class ManyToManyTable(ManyToOneTable):
|
||||
|
||||
'''
|
||||
@ -148,26 +145,25 @@ class ManyToManyTable(ManyToOneTable):
|
||||
'''
|
||||
|
||||
table_type = MANY_MANY
|
||||
selectq = 'SELECT book, {0} FROM {1}'
|
||||
|
||||
def read_maps(self, db):
|
||||
for row in db.conn.execute(
|
||||
'SELECT book, {0} FROM {1}'.format(
|
||||
self.metadata['link_column'], self.link_table)):
|
||||
self.selectq.format(self.metadata['link_column'], self.link_table)):
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
self.col_book_map[row[1]] = set()
|
||||
self.col_book_map[row[1]].add(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append(row[1])
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
for key in tuple(self.book_col_map.iterkeys()):
|
||||
self.book_col_map[key] = tuple(self.book_col_map[key])
|
||||
|
||||
class AuthorsTable(ManyToManyTable):
|
||||
|
||||
selectq = 'SELECT book, {0} FROM {1} ORDER BY id'
|
||||
|
||||
def read_id_maps(self, db):
|
||||
self.alink_map = {}
|
||||
self.asort_map = {}
|
||||
@ -189,8 +185,8 @@ class FormatsTable(ManyToManyTable):
|
||||
if row[1] is not None:
|
||||
fmt = row[1].upper()
|
||||
if fmt not in self.col_book_map:
|
||||
self.col_book_map[fmt] = []
|
||||
self.col_book_map[fmt].append(row[0])
|
||||
self.col_book_map[fmt] = set()
|
||||
self.col_book_map[fmt].add(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append(fmt)
|
||||
@ -198,9 +194,6 @@ class FormatsTable(ManyToManyTable):
|
||||
self.fname_map[row[0]] = {}
|
||||
self.fname_map[row[0]][fmt] = row[2]
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
for key in tuple(self.book_col_map.iterkeys()):
|
||||
self.book_col_map[key] = tuple(sorted(self.book_col_map[key]))
|
||||
|
||||
@ -213,15 +206,12 @@ class IdentifiersTable(ManyToManyTable):
|
||||
for row in db.conn.execute('SELECT book, type, val FROM identifiers'):
|
||||
if row[1] is not None and row[2] is not None:
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
self.col_book_map[row[1]] = set()
|
||||
self.col_book_map[row[1]].add(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = {}
|
||||
self.book_col_map[row[0]][row[1]] = row[2]
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
class LanguagesTable(ManyToManyTable):
|
||||
|
||||
def read_id_maps(self, db):
|
||||
|
@ -7,8 +7,8 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
import unittest, os, shutil
|
||||
from future_builtins import map
|
||||
|
||||
class BaseTest(unittest.TestCase):
|
||||
|
||||
@ -39,7 +39,11 @@ class BaseTest(unittest.TestCase):
|
||||
'ondevice_col', 'last_modified'}.union(allfk1)
|
||||
for attr in all_keys:
|
||||
if attr == 'user_metadata': continue
|
||||
if attr == 'format_metadata': continue # TODO: Not implemented yet
|
||||
attr1, attr2 = getattr(mi1, attr), getattr(mi2, attr)
|
||||
if attr == 'formats':
|
||||
continue # TODO: Not implemented yet
|
||||
attr1, attr2 = map(lambda x:tuple(x) if x else (), (attr1, attr2))
|
||||
self.assertEqual(attr1, attr2,
|
||||
'%s not the same: %r != %r'%(attr, attr1, attr2))
|
||||
if attr.startswith('#'):
|
||||
|
Binary file not shown.
@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
|
||||
import shutil, unittest, tempfile, datetime
|
||||
from cStringIO import StringIO
|
||||
|
||||
from calibre.utils.date import local_tz
|
||||
from calibre.utils.date import utc_tz
|
||||
from calibre.db.tests.base import BaseTest
|
||||
|
||||
class ReadingTest(BaseTest):
|
||||
@ -37,12 +37,12 @@ class ReadingTest(BaseTest):
|
||||
'tags': (),
|
||||
'formats':(),
|
||||
'identifiers': {},
|
||||
'timestamp': datetime.datetime(2011, 9, 7, 13, 54, 41,
|
||||
tzinfo=local_tz),
|
||||
'pubdate': datetime.datetime(2011, 9, 7, 13, 54, 41,
|
||||
tzinfo=local_tz),
|
||||
'last_modified': datetime.datetime(2011, 9, 7, 13, 54, 41,
|
||||
tzinfo=local_tz),
|
||||
'timestamp': datetime.datetime(2011, 9, 7, 19, 54, 41,
|
||||
tzinfo=utc_tz),
|
||||
'pubdate': datetime.datetime(2011, 9, 7, 19, 54, 41,
|
||||
tzinfo=utc_tz),
|
||||
'last_modified': datetime.datetime(2011, 9, 7, 19, 54, 41,
|
||||
tzinfo=utc_tz),
|
||||
'publisher': None,
|
||||
'languages': (),
|
||||
'comments': None,
|
||||
@ -63,23 +63,23 @@ class ReadingTest(BaseTest):
|
||||
'sort': 'One',
|
||||
'authors': ('Author One',),
|
||||
'author_sort': 'One, Author',
|
||||
'series' : 'Series One',
|
||||
'series' : 'A Series One',
|
||||
'series_index': 1.0,
|
||||
'tags':('Tag Two', 'Tag One'),
|
||||
'tags':('Tag One', 'Tag Two'),
|
||||
'formats': (),
|
||||
'rating': 4.0,
|
||||
'identifiers': {'test':'one'},
|
||||
'timestamp': datetime.datetime(2011, 9, 5, 15, 6,
|
||||
tzinfo=local_tz),
|
||||
'pubdate': datetime.datetime(2011, 9, 5, 15, 6,
|
||||
tzinfo=local_tz),
|
||||
'timestamp': datetime.datetime(2011, 9, 5, 21, 6,
|
||||
tzinfo=utc_tz),
|
||||
'pubdate': datetime.datetime(2011, 9, 5, 21, 6,
|
||||
tzinfo=utc_tz),
|
||||
'publisher': 'Publisher One',
|
||||
'languages': ('eng',),
|
||||
'comments': '<p>Comments One</p>',
|
||||
'#enum':'One',
|
||||
'#authors':('Custom One', 'Custom Two'),
|
||||
'#date':datetime.datetime(2011, 9, 5, 0, 0,
|
||||
tzinfo=local_tz),
|
||||
'#date':datetime.datetime(2011, 9, 5, 6, 0,
|
||||
tzinfo=utc_tz),
|
||||
'#rating':2.0,
|
||||
'#series':'My Series One',
|
||||
'#series_index': 1.0,
|
||||
@ -92,23 +92,23 @@ class ReadingTest(BaseTest):
|
||||
'sort': 'Title Two',
|
||||
'authors': ('Author Two', 'Author One'),
|
||||
'author_sort': 'Two, Author & One, Author',
|
||||
'series' : 'Series One',
|
||||
'series' : 'A Series One',
|
||||
'series_index': 2.0,
|
||||
'rating': 6.0,
|
||||
'tags': ('Tag One',),
|
||||
'formats':(),
|
||||
'identifiers': {'test':'two'},
|
||||
'timestamp': datetime.datetime(2011, 9, 6, 0, 0,
|
||||
tzinfo=local_tz),
|
||||
'pubdate': datetime.datetime(2011, 8, 5, 0, 0,
|
||||
tzinfo=local_tz),
|
||||
'timestamp': datetime.datetime(2011, 9, 6, 6, 0,
|
||||
tzinfo=utc_tz),
|
||||
'pubdate': datetime.datetime(2011, 8, 5, 6, 0,
|
||||
tzinfo=utc_tz),
|
||||
'publisher': 'Publisher Two',
|
||||
'languages': ('deu',),
|
||||
'comments': '<p>Comments Two</p>',
|
||||
'#enum':'Two',
|
||||
'#authors':('My Author Two',),
|
||||
'#date':datetime.datetime(2011, 9, 1, 0, 0,
|
||||
tzinfo=local_tz),
|
||||
'#date':datetime.datetime(2011, 9, 1, 6, 0,
|
||||
tzinfo=utc_tz),
|
||||
'#rating':4.0,
|
||||
'#series':'My Series Two',
|
||||
'#series_index': 3.0,
|
||||
@ -130,30 +130,31 @@ class ReadingTest(BaseTest):
|
||||
'Test sorting'
|
||||
cache = self.init_cache(self.library_path)
|
||||
for field, order in {
|
||||
'title' : [2, 1, 3],
|
||||
'authors': [2, 1, 3],
|
||||
'series' : [3, 2, 1],
|
||||
'tags' : [3, 1, 2],
|
||||
'rating' : [3, 2, 1],
|
||||
# 'identifiers': [3, 2, 1], There is no stable sort since 1 and
|
||||
# 2 have the same identifier keys
|
||||
# TODO: Add an empty book to the db and ensure that empty
|
||||
# fields sort the same as they do in db2
|
||||
'timestamp': [2, 1, 3],
|
||||
'pubdate' : [1, 2, 3],
|
||||
'publisher': [3, 2, 1],
|
||||
'last_modified': [2, 1, 3],
|
||||
'languages': [3, 2, 1],
|
||||
'comments': [3, 2, 1],
|
||||
'#enum' : [3, 2, 1],
|
||||
'#authors' : [3, 2, 1],
|
||||
'#date': [3, 1, 2],
|
||||
'#rating':[3, 2, 1],
|
||||
'#series':[3, 2, 1],
|
||||
'#tags':[3, 2, 1],
|
||||
'#yesno':[3, 1, 2],
|
||||
'#comments':[3, 2, 1],
|
||||
}.iteritems():
|
||||
'title' : [2, 1, 3],
|
||||
'authors': [2, 1, 3],
|
||||
'series' : [3, 1, 2],
|
||||
'tags' : [3, 1, 2],
|
||||
'rating' : [3, 2, 1],
|
||||
# 'identifiers': [3, 2, 1], There is no stable sort since 1 and
|
||||
# 2 have the same identifier keys
|
||||
# 'last_modified': [3, 2, 1], There is no stable sort as two
|
||||
# records have the exact same value
|
||||
'timestamp': [2, 1, 3],
|
||||
'pubdate' : [1, 2, 3],
|
||||
'publisher': [3, 2, 1],
|
||||
'languages': [3, 2, 1],
|
||||
'comments': [3, 2, 1],
|
||||
'#enum' : [3, 2, 1],
|
||||
'#authors' : [3, 2, 1],
|
||||
'#date': [3, 1, 2],
|
||||
'#rating':[3, 2, 1],
|
||||
'#series':[3, 2, 1],
|
||||
'#tags':[3, 2, 1],
|
||||
'#yesno':[3, 1, 2],
|
||||
'#comments':[3, 2, 1],
|
||||
# TODO: Add an empty book to the db and ensure that empty
|
||||
# fields sort the same as they do in db2
|
||||
}.iteritems():
|
||||
x = list(reversed(order))
|
||||
self.assertEqual(order, cache.multisort([(field, True)],
|
||||
ids_to_sort=x),
|
||||
@ -190,6 +191,68 @@ class ReadingTest(BaseTest):
|
||||
|
||||
# }}}
|
||||
|
||||
def test_searching(self): # {{{
|
||||
'Test searching returns the same data for both backends'
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
old = LibraryDatabase2(self.library_path)
|
||||
oldvals = {query:set(old.search_getting_ids(query, '')) for query in (
|
||||
# Date tests
|
||||
'date:9/6/2011', 'date:true', 'date:false', 'pubdate:9/2011',
|
||||
'#date:true', 'date:<100daysago', 'date:>9/6/2011',
|
||||
'#date:>9/1/2011', '#date:=2011',
|
||||
|
||||
# Number tests
|
||||
'rating:3', 'rating:>2', 'rating:=2', 'rating:true',
|
||||
'rating:false', 'rating:>4', 'tags:#<2', 'tags:#>7',
|
||||
'cover:false', 'cover:true', '#float:>11', '#float:<1k',
|
||||
'#float:10.01', 'series_index:1', 'series_index:<3', 'id:1',
|
||||
'id:>2',
|
||||
|
||||
# Bool tests
|
||||
'#yesno:true', '#yesno:false', '#yesno:yes', '#yesno:no',
|
||||
'#yesno:empty',
|
||||
|
||||
# Keypair tests
|
||||
'identifiers:true', 'identifiers:false', 'identifiers:test',
|
||||
'identifiers:test:false', 'identifiers:test:one',
|
||||
'identifiers:t:n', 'identifiers:=test:=two', 'identifiers:x:y',
|
||||
'identifiers:z',
|
||||
|
||||
# Text tests
|
||||
'title:="Title One"', 'title:~title', '#enum:=one', '#enum:tw',
|
||||
'#enum:false', '#enum:true', 'series:one', 'tags:one', 'tags:true',
|
||||
'tags:false', '2', 'one', '20.02', '"publisher one"',
|
||||
'"my comments one"',
|
||||
|
||||
# User categories
|
||||
'@Good Authors:One', '@Good Series.good tags:two',
|
||||
|
||||
# TODO: Tests for searching the size and #formats columns and
|
||||
# cover:true|false
|
||||
)}
|
||||
old = None
|
||||
|
||||
cache = self.init_cache(self.library_path)
|
||||
for query, ans in oldvals.iteritems():
|
||||
nr = cache.search(query, '')
|
||||
self.assertEqual(ans, nr,
|
||||
'Old result: %r != New result: %r for search: %s'%(
|
||||
ans, nr, query))
|
||||
|
||||
# }}}
|
||||
|
||||
def test_get_categories(self): # {{{
|
||||
'Check that get_categories() returns the same data for both backends'
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
old = LibraryDatabase2(self.library_path)
|
||||
old_categories = old.get_categories()
|
||||
cache = self.init_cache(self.library_path)
|
||||
import pprint
|
||||
pprint.pprint(old_categories)
|
||||
pprint.pprint(cache.get_categories())
|
||||
|
||||
# }}}
|
||||
|
||||
def tests():
|
||||
return unittest.TestLoader().loadTestsFromTestCase(ReadingTest)
|
||||
|
||||
|
@ -141,7 +141,7 @@ class ANDROID(USBMS):
|
||||
|
||||
# LG
|
||||
0x1004 : {
|
||||
0x61c5 : [0x100, 0x226, 0x227, 0x9999],
|
||||
0x61c5 : [0x100, 0x226, 0x227, 0x229, 0x9999],
|
||||
0x61cc : [0x226, 0x227, 0x9999, 0x100],
|
||||
0x61ce : [0x226, 0x227, 0x9999, 0x100],
|
||||
0x618e : [0x226, 0x227, 0x9999, 0x100],
|
||||
@ -235,7 +235,7 @@ class ANDROID(USBMS):
|
||||
'ADVANCED', 'SGH-I727', 'USB_FLASH_DRIVER', 'ANDROID',
|
||||
'S5830I_CARD', 'MID7042', 'LINK-CREATE', '7035', 'VIEWPAD_7E',
|
||||
'NOVO7', 'MB526', '_USB#WYK7MSF8KE', 'TABLET_PC', 'F', 'MT65XX_MS',
|
||||
'ICS']
|
||||
'ICS', 'E400']
|
||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||
'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||
@ -246,7 +246,7 @@ class ANDROID(USBMS):
|
||||
'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0', 'XT875',
|
||||
'UMS_COMPOSITE', 'PRO', '.KOBO_VOX', 'SGH-T989_CARD', 'SGH-I727',
|
||||
'USB_FLASH_DRIVER', 'ANDROID', 'MID7042', '7035', 'VIEWPAD_7E',
|
||||
'NOVO7', 'ADVANCED', 'TABLET_PC', 'F']
|
||||
'NOVO7', 'ADVANCED', 'TABLET_PC', 'F', 'E400_SD_CARD']
|
||||
|
||||
OSX_MAIN_MEM = 'Android Device Main Memory'
|
||||
|
||||
|
@ -7,7 +7,7 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
import cStringIO, ctypes, datetime, os, platform, re, shutil, sys, tempfile, time
|
||||
|
||||
from calibre.constants import __appname__, __version__, DEBUG
|
||||
from calibre.constants import __appname__, __version__, DEBUG, cache_dir
|
||||
from calibre import fit_image, confirm_config_name, strftime as _strftime
|
||||
from calibre.constants import isosx, iswindows
|
||||
from calibre.devices.errors import OpenFeedback, UserFeedback
|
||||
@ -289,9 +289,7 @@ class ITUNES(DriverBase):
|
||||
|
||||
# Properties
|
||||
cached_books = {}
|
||||
cache_dir = os.path.join(config_dir, 'caches', 'itunes')
|
||||
calibre_library_path = prefs['library_path']
|
||||
archive_path = os.path.join(cache_dir, "thumbs.zip")
|
||||
description_prefix = "added by calibre"
|
||||
ejected = False
|
||||
iTunes = None
|
||||
@ -309,6 +307,14 @@ class ITUNES(DriverBase):
|
||||
update_msg = None
|
||||
update_needed = False
|
||||
|
||||
@property
|
||||
def cache_dir(self):
|
||||
return os.path.join(cache_dir(), 'itunes')
|
||||
|
||||
@property
|
||||
def archive_path(self):
|
||||
return os.path.join(self.cache_dir, "thumbs.zip")
|
||||
|
||||
# Public methods
|
||||
def add_books_to_metadata(self, locations, metadata, booklists):
|
||||
'''
|
||||
|
@ -33,7 +33,7 @@ class KOBO(USBMS):
|
||||
gui_name = 'Kobo Reader'
|
||||
description = _('Communicate with the Kobo Reader')
|
||||
author = 'Timothy Legge and David Forrester'
|
||||
version = (2, 0, 4)
|
||||
version = (2, 0, 5)
|
||||
|
||||
dbversion = 0
|
||||
fwversion = 0
|
||||
@ -1706,6 +1706,7 @@ class KOBOTOUCH(KOBO):
|
||||
def upload_books(self, files, names, on_card=None, end_session=True,
|
||||
metadata=None):
|
||||
debug_print('KoboTouch:upload_books - %d books'%(len(files)))
|
||||
debug_print('KoboTouch:upload_books - files=', files)
|
||||
|
||||
result = super(KOBOTOUCH, self).upload_books(files, names, on_card, end_session, metadata)
|
||||
# debug_print('KoboTouch:upload_books - result=', result)
|
||||
@ -1717,7 +1718,7 @@ class KOBOTOUCH(KOBO):
|
||||
'.kobo/KoboReader.sqlite'))) as connection:
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
|
||||
cursor = connection.cursor()
|
||||
query = "DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = 'false'"
|
||||
cleanup_query = "DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = 'false'"
|
||||
|
||||
for fname, cycle in result:
|
||||
show_debug = self.is_debugging_title(fname)
|
||||
@ -1726,9 +1727,11 @@ class KOBOTOUCH(KOBO):
|
||||
debug_print('KoboTouch:upload_books: fname=', fname)
|
||||
debug_print('KoboTouch:upload_books: contentID=', contentID)
|
||||
|
||||
t = (contentID,)
|
||||
cleanup_values = (contentID,)
|
||||
# debug_print('KoboTouch:upload_books: Delete record left if deleted on Touch')
|
||||
cursor.execute(query, t)
|
||||
cursor.execute(cleanup_query, cleanup_values)
|
||||
|
||||
self.set_filesize_in_device_database(connection, contentID, fname)
|
||||
|
||||
connection.commit()
|
||||
|
||||
@ -2183,6 +2186,43 @@ class KOBOTOUCH(KOBO):
|
||||
connection.commit()
|
||||
cursor.close()
|
||||
|
||||
def set_filesize_in_device_database(self, connection, contentID, fpath):
|
||||
show_debug = self.is_debugging_title(fpath)
|
||||
if show_debug:
|
||||
debug_print('KoboTouch:set_filesize_in_device_database contentID="%s"'%contentID)
|
||||
|
||||
test_query = 'SELECT ___FileSize ' \
|
||||
'FROM content ' \
|
||||
'WHERE ContentID = ? ' \
|
||||
' AND ContentType = 6'
|
||||
test_values = (contentID, )
|
||||
|
||||
updatequery = 'UPDATE content ' \
|
||||
'SET ___FileSize = ? ' \
|
||||
'WHERE ContentId = ? ' \
|
||||
'AND ContentType = 6'
|
||||
|
||||
cursor = connection.cursor()
|
||||
cursor.execute(test_query, test_values)
|
||||
result = cursor.fetchone()
|
||||
if result is None:
|
||||
if show_debug:
|
||||
debug_print(' Did not find a record - new book on device')
|
||||
elif os.path.exists(fpath):
|
||||
file_size = os.stat(self.normalize_path(fpath)).st_size
|
||||
if show_debug:
|
||||
debug_print(' Found a record - will update - ___FileSize=', result[0], ' file_size=', file_size)
|
||||
if file_size != int(result[0]):
|
||||
update_values = (file_size, contentID, )
|
||||
cursor.execute(updatequery, update_values)
|
||||
if show_debug:
|
||||
debug_print(' Size updated.')
|
||||
|
||||
connection.commit()
|
||||
cursor.close()
|
||||
|
||||
# debug_print("KoboTouch:set_filesize_in_device_database - end")
|
||||
|
||||
def delete_empty_bookshelves(self, connection):
|
||||
debug_print("KoboTouch:delete_empty_bookshelves - start")
|
||||
|
||||
@ -2353,10 +2393,17 @@ class KOBOTOUCH(KOBO):
|
||||
debug_print('KoboTouch:set_series book.series="%s"'%book.series)
|
||||
debug_print('KoboTouch:set_series book.series_index=', book.series_index)
|
||||
|
||||
if book.series == book.kobo_series and book.series_index == book.kobo_series_number:
|
||||
if show_debug:
|
||||
debug_print('KoboTouch:set_series - series info the same - not changing')
|
||||
return
|
||||
if book.series == book.kobo_series:
|
||||
kobo_series_number = None
|
||||
if book.kobo_series_number is not None:
|
||||
try:
|
||||
kobo_series_number = float(book.kobo_series_number)
|
||||
except:
|
||||
kobo_series_number = None
|
||||
if kobo_series_number == book.series_index:
|
||||
if show_debug:
|
||||
debug_print('KoboTouch:set_series - series info the same - not changing')
|
||||
return
|
||||
|
||||
update_query = 'UPDATE content SET Series=?, SeriesNumber==? where BookID is Null and ContentID = ?'
|
||||
if book.series is None:
|
||||
|
@ -195,7 +195,7 @@ class PRST1(USBMS):
|
||||
for i, row in enumerate(cursor):
|
||||
try:
|
||||
comp_date = int(os.path.getmtime(self.normalize_path(prefix + row[0])) * 1000);
|
||||
except (OSError, IOError):
|
||||
except (OSError, IOError, TypeError):
|
||||
# In case the db has incorrect path info
|
||||
continue
|
||||
device_date = int(row[1]);
|
||||
|
@ -300,19 +300,21 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
'particular IP address. The driver will listen only on the '
|
||||
'entered address, and this address will be the one advertized '
|
||||
'over mDNS (bonjour).') + '</p>',
|
||||
_('Replace books with the same calibre identifier') + ':::<p>' +
|
||||
_('Use this option to overwrite a book on the device if that book '
|
||||
'has the same calibre identifier as the book being sent. The file name of the '
|
||||
'book will not change even if the save template produces a '
|
||||
'different result. Using this option in most cases prevents '
|
||||
'having multiple copies of a book on the device.') + '</p>',
|
||||
]
|
||||
EXTRA_CUSTOMIZATION_DEFAULT = [
|
||||
False,
|
||||
'',
|
||||
'',
|
||||
'',
|
||||
False, '',
|
||||
'', '',
|
||||
False, '9090',
|
||||
False,
|
||||
'',
|
||||
'',
|
||||
'',
|
||||
True,
|
||||
''
|
||||
False, '',
|
||||
'', '',
|
||||
True, '',
|
||||
True
|
||||
]
|
||||
OPT_AUTOSTART = 0
|
||||
OPT_PASSWORD = 2
|
||||
@ -322,6 +324,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
OPT_COLLECTIONS = 8
|
||||
OPT_AUTODISCONNECT = 10
|
||||
OPT_FORCE_IP_ADDRESS = 11
|
||||
OPT_OVERWRITE_BOOKS_UUID = 12
|
||||
|
||||
|
||||
def __init__(self, path):
|
||||
@ -386,8 +389,14 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
ext = os.path.splitext(fname)[1]
|
||||
|
||||
try:
|
||||
# If we have already seen this book's UUID, use the existing path
|
||||
if self.settings().extra_customization[self.OPT_OVERWRITE_BOOKS_UUID]:
|
||||
existing_book = self._uuid_already_on_device(mdata.uuid, ext)
|
||||
if existing_book and existing_book.lpath:
|
||||
return existing_book.lpath
|
||||
|
||||
# If the device asked for it, try to use the UUID as the file name.
|
||||
# Fall back to the template if the UUID doesn't exist.
|
||||
# Fall back to the ch if the UUID doesn't exist.
|
||||
if self.client_wants_uuid_file_names and mdata.uuid:
|
||||
return (mdata.uuid + ext)
|
||||
except:
|
||||
@ -679,12 +688,24 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
return not v_thumb or v_thumb[1] == b_thumb[1]
|
||||
return False
|
||||
|
||||
def _uuid_already_on_device(self, uuid, ext):
|
||||
try:
|
||||
return self.known_uuids.get(uuid + ext, None)
|
||||
except:
|
||||
return None
|
||||
|
||||
def _set_known_metadata(self, book, remove=False):
|
||||
lpath = book.lpath
|
||||
ext = os.path.splitext(lpath)[1]
|
||||
uuid = book.get('uuid', None)
|
||||
if remove:
|
||||
self.known_metadata.pop(lpath, None)
|
||||
if uuid and ext:
|
||||
self.known_uuids.pop(uuid+ext, None)
|
||||
else:
|
||||
self.known_metadata[lpath] = book.deepcopy()
|
||||
new_book = self.known_metadata[lpath] = book.deepcopy()
|
||||
if uuid and ext:
|
||||
self.known_uuids[uuid+ext] = new_book
|
||||
|
||||
def _close_device_socket(self):
|
||||
if self.device_socket is not None:
|
||||
@ -865,10 +886,12 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
self._debug('extension path lengths', self.exts_path_lengths)
|
||||
|
||||
self.THUMBNAIL_HEIGHT = result.get('coverHeight', self.DEFAULT_THUMBNAIL_HEIGHT)
|
||||
self._debug('cover height', self.THUMBNAIL_HEIGHT)
|
||||
if 'coverWidth' in result:
|
||||
# Setting this field forces the aspect ratio
|
||||
self.THUMBNAIL_WIDTH = result.get('coverWidth',
|
||||
(self.DEFAULT_THUMBNAIL_HEIGHT/3) * 4)
|
||||
self._debug('cover width', self.THUMBNAIL_WIDTH)
|
||||
elif hasattr(self, 'THUMBNAIL_WIDTH'):
|
||||
delattr(self, 'THUMBNAIL_WIDTH')
|
||||
|
||||
@ -898,6 +921,9 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
self.connection_attempts[peer] = 0
|
||||
except:
|
||||
pass
|
||||
|
||||
self.known_metadata = {}
|
||||
self.known_uuids = {}
|
||||
return True
|
||||
except socket.timeout:
|
||||
self._close_device_socket()
|
||||
@ -999,14 +1025,6 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
if '_series_sort_' in result:
|
||||
del result['_series_sort_']
|
||||
book = self.json_codec.raw_to_book(result, SDBook, self.PREFIX)
|
||||
|
||||
# If the thumbnail is the wrong size, zero the last mod date
|
||||
# so the metadata will be resent
|
||||
thumbnail = book.get('thumbnail', None)
|
||||
if thumbnail and not (thumbnail[0] == self.THUMBNAIL_HEIGHT or
|
||||
thumbnail[1] == self.THUMBNAIL_HEIGHT):
|
||||
book.set('last_modified', UNDEFINED_DATE)
|
||||
|
||||
bl.add_book(book, replace_metadata=True)
|
||||
if '_new_book_' in result:
|
||||
book.set('_new_book_', True)
|
||||
@ -1062,7 +1080,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
|
||||
if count:
|
||||
for i,book in enumerate(books_to_send):
|
||||
self._debug('sending metadata for book', book.lpath)
|
||||
self._debug('sending metadata for book', book.lpath, book.title)
|
||||
self._set_known_metadata(book)
|
||||
opcode, result = self._call_client(
|
||||
'SEND_BOOK_METADATA',
|
||||
@ -1097,6 +1115,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
for i, infile in enumerate(files):
|
||||
mdata, fname = metadata.next(), names.next()
|
||||
lpath = self._create_upload_path(mdata, fname, create_dirs=False)
|
||||
self._debug('lpath', lpath)
|
||||
if not hasattr(infile, 'read'):
|
||||
infile = USBMS.normalize_path(infile)
|
||||
book = SDBook(self.PREFIX, lpath, other=mdata)
|
||||
@ -1258,6 +1277,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
self.device_socket = None
|
||||
self.json_codec = JsonCodec()
|
||||
self.known_metadata = {}
|
||||
self.known_uuids = {}
|
||||
self.debug_time = time.time()
|
||||
self.debug_start_time = time.time()
|
||||
self.max_book_packet_len = 0
|
||||
|
@ -100,7 +100,7 @@ class CHMReader(CHMFile):
|
||||
def ExtractFiles(self, output_dir=os.getcwdu(), debug_dump=False):
|
||||
html_files = set([])
|
||||
try:
|
||||
x = self.GetEncoding()
|
||||
x = self.get_encoding()
|
||||
codecs.lookup(x)
|
||||
enc = x
|
||||
except:
|
||||
|
@ -7,8 +7,6 @@ import os
|
||||
|
||||
from calibre.customize.conversion import InputFormatPlugin
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
from calibre.utils.localization import get_lang
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
from calibre.constants import filesystem_encoding
|
||||
|
||||
class CHMInput(InputFormatPlugin):
|
||||
@ -57,22 +55,39 @@ class CHMInput(InputFormatPlugin):
|
||||
mainpath = os.path.join(tdir, mainname)
|
||||
|
||||
metadata = get_metadata_from_reader(self._chm_reader)
|
||||
encoding = self._chm_reader.get_encoding() or options.input_encoding or 'cp1252'
|
||||
self._chm_reader.CloseCHM()
|
||||
#print tdir
|
||||
#from calibre import ipython
|
||||
#ipython()
|
||||
# print tdir, mainpath
|
||||
# from calibre import ipython
|
||||
# ipython()
|
||||
|
||||
options.debug_pipeline = None
|
||||
options.input_encoding = 'utf-8'
|
||||
# try a custom conversion:
|
||||
#oeb = self._create_oebbook(mainpath, tdir, options, log, metadata)
|
||||
# try using html converter:
|
||||
htmlpath = self._create_html_root(mainpath, log)
|
||||
htmlpath, toc = self._create_html_root(mainpath, log, encoding)
|
||||
oeb = self._create_oebbook_html(htmlpath, tdir, options, log, metadata)
|
||||
options.debug_pipeline = odi
|
||||
#log.debug('DEBUG: Not removing tempdir %s' % tdir)
|
||||
if toc.count() > 1:
|
||||
oeb.toc = self.parse_html_toc(oeb.spine[0])
|
||||
oeb.manifest.remove(oeb.spine[0])
|
||||
oeb.auto_generated_toc = False
|
||||
return oeb
|
||||
|
||||
def parse_html_toc(self, item):
|
||||
from calibre.ebooks.oeb.base import TOC, XPath
|
||||
dx = XPath('./h:div')
|
||||
ax = XPath('./h:a[1]')
|
||||
|
||||
def do_node(parent, div):
|
||||
for child in dx(div):
|
||||
a = ax(child)[0]
|
||||
c = parent.add(a.text, a.attrib['href'])
|
||||
do_node(c, child)
|
||||
|
||||
toc = TOC()
|
||||
root = XPath('//h:div[1]')(item.data)[0]
|
||||
do_node(toc, root)
|
||||
return toc
|
||||
|
||||
def _create_oebbook_html(self, htmlpath, basedir, opts, log, mi):
|
||||
# use HTMLInput plugin to generate book
|
||||
from calibre.customize.builtins import HTMLInput
|
||||
@ -81,104 +96,71 @@ class CHMInput(InputFormatPlugin):
|
||||
oeb = htmlinput.create_oebbook(htmlpath, basedir, opts, log, mi)
|
||||
return oeb
|
||||
|
||||
|
||||
def _create_oebbook(self, hhcpath, basedir, opts, log, mi):
|
||||
import uuid
|
||||
def _create_html_root(self, hhcpath, log, encoding):
|
||||
from lxml import html
|
||||
from calibre.ebooks.conversion.plumber import create_oebbook
|
||||
from calibre.ebooks.oeb.base import DirContainer
|
||||
oeb = create_oebbook(log, None, opts,
|
||||
encoding=opts.input_encoding, populate=False)
|
||||
self.oeb = oeb
|
||||
|
||||
metadata = oeb.metadata
|
||||
if mi.title:
|
||||
metadata.add('title', mi.title)
|
||||
if mi.authors:
|
||||
for a in mi.authors:
|
||||
metadata.add('creator', a, attrib={'role':'aut'})
|
||||
if mi.publisher:
|
||||
metadata.add('publisher', mi.publisher)
|
||||
if mi.isbn:
|
||||
metadata.add('identifier', mi.isbn, attrib={'scheme':'ISBN'})
|
||||
if not metadata.language:
|
||||
oeb.logger.warn(u'Language not specified')
|
||||
metadata.add('language', get_lang().replace('_', '-'))
|
||||
if not metadata.creator:
|
||||
oeb.logger.warn('Creator not specified')
|
||||
metadata.add('creator', _('Unknown'))
|
||||
if not metadata.title:
|
||||
oeb.logger.warn('Title not specified')
|
||||
metadata.add('title', _('Unknown'))
|
||||
|
||||
bookid = str(uuid.uuid4())
|
||||
metadata.add('identifier', bookid, id='uuid_id', scheme='uuid')
|
||||
for ident in metadata.identifier:
|
||||
if 'id' in ident.attrib:
|
||||
self.oeb.uid = metadata.identifier[0]
|
||||
break
|
||||
|
||||
from urllib import unquote as _unquote
|
||||
from calibre.ebooks.oeb.base import urlquote
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
hhcdata = self._read_file(hhcpath)
|
||||
hhcdata = hhcdata.decode(encoding)
|
||||
hhcdata = xml_to_unicode(hhcdata, verbose=True,
|
||||
strip_encoding_pats=True, resolve_entities=True)[0]
|
||||
hhcroot = html.fromstring(hhcdata)
|
||||
chapters = self._process_nodes(hhcroot)
|
||||
toc = self._process_nodes(hhcroot)
|
||||
#print "============================="
|
||||
#print "Printing hhcroot"
|
||||
#print etree.tostring(hhcroot, pretty_print=True)
|
||||
#print "============================="
|
||||
log.debug('Found %d section nodes' % len(chapters))
|
||||
|
||||
if len(chapters) > 0:
|
||||
path0 = chapters[0][1]
|
||||
subpath = os.path.dirname(path0)
|
||||
htmlpath = os.path.join(basedir, subpath)
|
||||
|
||||
oeb.container = DirContainer(htmlpath, log)
|
||||
for chapter in chapters:
|
||||
title = chapter[0]
|
||||
basename = os.path.basename(chapter[1])
|
||||
self._add_item(oeb, title, basename)
|
||||
|
||||
oeb.container = DirContainer(htmlpath, oeb.log)
|
||||
return oeb
|
||||
|
||||
def _create_html_root(self, hhcpath, log):
|
||||
from lxml import html
|
||||
hhcdata = self._read_file(hhcpath)
|
||||
hhcroot = html.fromstring(hhcdata)
|
||||
chapters = self._process_nodes(hhcroot)
|
||||
#print "============================="
|
||||
#print "Printing hhcroot"
|
||||
#print etree.tostring(hhcroot, pretty_print=True)
|
||||
#print "============================="
|
||||
log.debug('Found %d section nodes' % len(chapters))
|
||||
log.debug('Found %d section nodes' % toc.count())
|
||||
htmlpath = os.path.splitext(hhcpath)[0] + ".html"
|
||||
base = os.path.dirname(os.path.abspath(htmlpath))
|
||||
|
||||
def unquote(x):
|
||||
if isinstance(x, unicode):
|
||||
x = x.encode('utf-8')
|
||||
return _unquote(x).decode('utf-8')
|
||||
|
||||
def unquote_path(x):
|
||||
y = unquote(x)
|
||||
if (not os.path.exists(os.path.join(base, x)) and
|
||||
os.path.exists(os.path.join(base, y))):
|
||||
x = y
|
||||
return x
|
||||
|
||||
def donode(item, parent, base, subpath):
|
||||
for child in item:
|
||||
title = child.title
|
||||
if not title: continue
|
||||
raw = unquote_path(child.href or '')
|
||||
rsrcname = os.path.basename(raw)
|
||||
rsrcpath = os.path.join(subpath, rsrcname)
|
||||
if (not os.path.exists(os.path.join(base, rsrcpath)) and
|
||||
os.path.exists(os.path.join(base, raw))):
|
||||
rsrcpath = raw
|
||||
|
||||
if '%' not in rsrcpath:
|
||||
rsrcpath = urlquote(rsrcpath)
|
||||
if not raw:
|
||||
rsrcpath = ''
|
||||
c = DIV(A(title, href=rsrcpath))
|
||||
donode(child, c, base, subpath)
|
||||
parent.append(c)
|
||||
|
||||
with open(htmlpath, 'wb') as f:
|
||||
if chapters:
|
||||
f.write('<html><head><meta http-equiv="Content-type"'
|
||||
' content="text/html;charset=UTF-8" /></head><body>\n')
|
||||
path0 = chapters[0][1]
|
||||
if toc.count() > 1:
|
||||
from lxml.html.builder import HTML, BODY, DIV, A
|
||||
path0 = toc[0].href
|
||||
path0 = unquote_path(path0)
|
||||
subpath = os.path.dirname(path0)
|
||||
base = os.path.dirname(f.name)
|
||||
|
||||
for chapter in chapters:
|
||||
title = chapter[0]
|
||||
rsrcname = os.path.basename(chapter[1])
|
||||
rsrcpath = os.path.join(subpath, rsrcname)
|
||||
if (not os.path.exists(os.path.join(base, rsrcpath)) and
|
||||
os.path.exists(os.path.join(base, chapter[1]))):
|
||||
rsrcpath = chapter[1]
|
||||
|
||||
# title should already be url encoded
|
||||
url = "<br /><a href=" + rsrcpath + ">" + title + " </a>\n"
|
||||
if isinstance(url, unicode):
|
||||
url = url.encode('utf-8')
|
||||
f.write(url)
|
||||
|
||||
f.write("</body></html>")
|
||||
root = DIV()
|
||||
donode(toc, root, base, subpath)
|
||||
raw = html.tostring(HTML(BODY(root)), encoding='utf-8',
|
||||
pretty_print=True)
|
||||
f.write(raw)
|
||||
else:
|
||||
f.write(hhcdata)
|
||||
return htmlpath
|
||||
|
||||
return htmlpath, toc
|
||||
|
||||
def _read_file(self, name):
|
||||
f = open(name, 'rb')
|
||||
@ -186,41 +168,27 @@ class CHMInput(InputFormatPlugin):
|
||||
f.close()
|
||||
return data
|
||||
|
||||
def _visit_node(self, node, chapters, depth):
|
||||
# check that node is a normal node (not a comment, DOCTYPE, etc.)
|
||||
# (normal nodes have string tags)
|
||||
if isinstance(node.tag, basestring):
|
||||
from calibre.ebooks.chm.reader import match_string
|
||||
|
||||
chapter_path = None
|
||||
if match_string(node.tag, 'object') and match_string(node.attrib['type'], 'text/sitemap'):
|
||||
chapter_title = None
|
||||
for child in node:
|
||||
if match_string(child.tag,'param') and match_string(child.attrib['name'], 'name'):
|
||||
chapter_title = child.attrib['value']
|
||||
if match_string(child.tag,'param') and match_string(child.attrib['name'],'local'):
|
||||
chapter_path = child.attrib['value']
|
||||
if chapter_title is not None and chapter_path is not None:
|
||||
chapter = [chapter_title, chapter_path, depth]
|
||||
chapters.append(chapter)
|
||||
if node.tag=="UL":
|
||||
depth = depth + 1
|
||||
if node.tag=="/UL":
|
||||
depth = depth - 1
|
||||
def add_node(self, node, toc, ancestor_map):
|
||||
from calibre.ebooks.chm.reader import match_string
|
||||
if match_string(node.attrib['type'], 'text/sitemap'):
|
||||
p = node.xpath('ancestor::ul[1]/ancestor::li[1]/object[1]')
|
||||
parent = p[0] if p else None
|
||||
toc = ancestor_map.get(parent, toc)
|
||||
title = href = u''
|
||||
for param in node.xpath('./param'):
|
||||
if match_string(param.attrib['name'], 'name'):
|
||||
title = param.attrib['value']
|
||||
elif match_string(param.attrib['name'], 'local'):
|
||||
href = param.attrib['value']
|
||||
child = toc.add(title or _('Unknown'), href)
|
||||
ancestor_map[node] = child
|
||||
|
||||
def _process_nodes(self, root):
|
||||
chapters = []
|
||||
depth = 0
|
||||
for node in root.iter():
|
||||
self._visit_node(node, chapters, depth)
|
||||
return chapters
|
||||
from calibre.ebooks.oeb.base import TOC
|
||||
toc = TOC()
|
||||
ancestor_map = {}
|
||||
for node in root.xpath('//object'):
|
||||
self.add_node(node, toc, ancestor_map)
|
||||
return toc
|
||||
|
||||
def _add_item(self, oeb, title, path):
|
||||
bname = os.path.basename(path)
|
||||
id, href = oeb.manifest.generate(id='html',
|
||||
href=ascii_filename(bname))
|
||||
item = oeb.manifest.add(id, href, 'text/html')
|
||||
item.html_input_href = bname
|
||||
oeb.spine.add(item, True)
|
||||
oeb.toc.add(title, item.href)
|
||||
|
||||
|
@ -515,6 +515,7 @@ class HTMLPreProcessor(object):
|
||||
if not getattr(self.extra_opts, 'keep_ligatures', False):
|
||||
html = _ligpat.sub(lambda m:LIGATURES[m.group()], html)
|
||||
|
||||
user_sr_rules = {}
|
||||
# Function for processing search and replace
|
||||
def do_search_replace(search_pattern, replace_txt):
|
||||
try:
|
||||
@ -522,6 +523,7 @@ class HTMLPreProcessor(object):
|
||||
if not replace_txt:
|
||||
replace_txt = ''
|
||||
rules.insert(0, (search_re, replace_txt))
|
||||
user_sr_rules[(search_re, replace_txt)] = search_pattern
|
||||
except Exception as e:
|
||||
self.log.error('Failed to parse %r regexp because %s' %
|
||||
(search, as_unicode(e)))
|
||||
@ -587,7 +589,16 @@ class HTMLPreProcessor(object):
|
||||
#dump(html, 'pre-preprocess')
|
||||
|
||||
for rule in rules + end_rules:
|
||||
html = rule[0].sub(rule[1], html)
|
||||
try:
|
||||
html = rule[0].sub(rule[1], html)
|
||||
except re.error as e:
|
||||
if rule in user_sr_rules:
|
||||
self.log.error(
|
||||
'User supplied search & replace rule: %s -> %s '
|
||||
'failed with error: %s, ignoring.'%(
|
||||
user_sr_rules[rule], rule[1], e))
|
||||
else:
|
||||
raise
|
||||
|
||||
if is_pdftohtml and length > -1:
|
||||
# Dehyphenate
|
||||
|
@ -200,7 +200,7 @@ class Source(Plugin):
|
||||
#: during the identify phase
|
||||
touched_fields = frozenset()
|
||||
|
||||
#: Set this to True if your plugin return HTML formatted comments
|
||||
#: Set this to True if your plugin returns HTML formatted comments
|
||||
has_html_comments = False
|
||||
|
||||
#: Setting this to True means that the browser object will add
|
||||
|
@ -194,12 +194,11 @@ class TOC(list):
|
||||
content = content_path(np)
|
||||
if content and text:
|
||||
content = content[0]
|
||||
src = get_attr(content, attr='src')
|
||||
if src:
|
||||
purl = urlparse(content.get('src'))
|
||||
href, fragment = unquote(purl[2]), unquote(purl[5])
|
||||
nd = dest.add_item(href, fragment, text)
|
||||
nd.play_order = play_order
|
||||
# if get_attr(content, attr='src'):
|
||||
purl = urlparse(content.get('src'))
|
||||
href, fragment = unquote(purl[2]), unquote(purl[5])
|
||||
nd = dest.add_item(href, fragment, text)
|
||||
nd.play_order = play_order
|
||||
|
||||
for c in np_path(np):
|
||||
process_navpoint(c, nd)
|
||||
|
@ -13,6 +13,7 @@ from calibre.utils.date import parse_date
|
||||
from calibre.ebooks.mobi import MobiError
|
||||
from calibre.ebooks.metadata import MetaInformation, check_isbn
|
||||
from calibre.ebooks.mobi.langcodes import main_language, sub_language, mobi2iana
|
||||
from calibre.utils.cleantext import clean_ascii_chars
|
||||
from calibre.utils.localization import canonicalize_lang
|
||||
|
||||
NULL_INDEX = 0xffffffff
|
||||
@ -31,6 +32,8 @@ class EXTHHeader(object): # {{{
|
||||
self.kf8_header = None
|
||||
self.uuid = self.cdetype = None
|
||||
|
||||
self.decode = lambda x : clean_ascii_chars(x.decode(codec, 'replace'))
|
||||
|
||||
while left > 0:
|
||||
left -= 1
|
||||
idx, size = struct.unpack('>LL', raw[pos:pos + 8])
|
||||
@ -66,7 +69,7 @@ class EXTHHeader(object): # {{{
|
||||
# title contains non ASCII chars or non filename safe chars
|
||||
# they are messed up in the PDB header
|
||||
try:
|
||||
title = content.decode(codec)
|
||||
title = self.decode(content)
|
||||
except:
|
||||
pass
|
||||
elif idx == 524: # Lang code
|
||||
@ -80,31 +83,30 @@ class EXTHHeader(object): # {{{
|
||||
#else:
|
||||
# print 'unknown record', idx, repr(content)
|
||||
if title:
|
||||
self.mi.title = replace_entities(title)
|
||||
self.mi.title = replace_entities(clean_ascii_chars(title))
|
||||
|
||||
def process_metadata(self, idx, content, codec):
|
||||
if idx == 100:
|
||||
if self.mi.is_null('authors'):
|
||||
self.mi.authors = []
|
||||
au = content.decode(codec, 'ignore').strip()
|
||||
au = self.decode(content).strip()
|
||||
self.mi.authors.append(au)
|
||||
if self.mi.is_null('author_sort') and re.match(r'\S+?\s*,\s+\S+', au.strip()):
|
||||
self.mi.author_sort = au.strip()
|
||||
elif idx == 101:
|
||||
self.mi.publisher = content.decode(codec, 'ignore').strip()
|
||||
self.mi.publisher = self.decode(content).strip()
|
||||
if self.mi.publisher in {'Unknown', _('Unknown')}:
|
||||
self.mi.publisher = None
|
||||
elif idx == 103:
|
||||
self.mi.comments = content.decode(codec, 'ignore')
|
||||
self.mi.comments = self.decode(content).strip()
|
||||
elif idx == 104:
|
||||
raw = check_isbn(content.decode(codec, 'ignore').strip().replace('-', ''))
|
||||
raw = check_isbn(self.decode(content).strip().replace('-', ''))
|
||||
if raw:
|
||||
self.mi.isbn = raw
|
||||
elif idx == 105:
|
||||
if not self.mi.tags:
|
||||
self.mi.tags = []
|
||||
self.mi.tags.extend([x.strip() for x in content.decode(codec,
|
||||
'ignore').split(';')])
|
||||
self.mi.tags.extend([x.strip() for x in self.decode(content).split(';')])
|
||||
self.mi.tags = list(set(self.mi.tags))
|
||||
elif idx == 106:
|
||||
try:
|
||||
@ -112,7 +114,7 @@ class EXTHHeader(object): # {{{
|
||||
except:
|
||||
pass
|
||||
elif idx == 108:
|
||||
self.mi.book_producer = content.decode(codec, 'ignore').strip()
|
||||
self.mi.book_producer = self.decode(content).strip()
|
||||
elif idx == 112: # dc:source set in some EBSP amazon samples
|
||||
try:
|
||||
content = content.decode(codec).strip()
|
||||
|
@ -249,7 +249,10 @@ class MobiReader(object):
|
||||
head.insert(0, m)
|
||||
if not title:
|
||||
title = head.makeelement('title', {})
|
||||
title.text = self.book_header.title
|
||||
try:
|
||||
title.text = self.book_header.title
|
||||
except ValueError:
|
||||
title.text = clean_ascii_chars(self.book_header.title)
|
||||
title.tail = '\n\t'
|
||||
head.insert(0, title)
|
||||
head.text = '\n\t'
|
||||
|
@ -98,6 +98,9 @@ _self_closing_pat = re.compile(
|
||||
def close_self_closing_tags(raw):
|
||||
return _self_closing_pat.sub(r'<\g<tag>\g<arg>></\g<tag>>', raw)
|
||||
|
||||
def uuid_id():
|
||||
return 'u'+unicode(uuid.uuid4())
|
||||
|
||||
def iterlinks(root, find_links_in_css=True):
|
||||
'''
|
||||
Iterate over all links in a OEB Document.
|
||||
@ -1528,7 +1531,7 @@ class TOC(object):
|
||||
if parent is None:
|
||||
parent = etree.Element(NCX('navMap'))
|
||||
for node in self.nodes:
|
||||
id = node.id or unicode(uuid.uuid4())
|
||||
id = node.id or uuid_id()
|
||||
po = node.play_order
|
||||
if po == 0:
|
||||
po = 1
|
||||
@ -1634,10 +1637,10 @@ class PageList(object):
|
||||
return self.pages.remove(page)
|
||||
|
||||
def to_ncx(self, parent=None):
|
||||
plist = element(parent, NCX('pageList'), id=str(uuid.uuid4()))
|
||||
plist = element(parent, NCX('pageList'), id=uuid_id())
|
||||
values = dict((t, count(1)) for t in ('front', 'normal', 'special'))
|
||||
for page in self.pages:
|
||||
id = page.id or unicode(uuid.uuid4())
|
||||
id = page.id or uuid_id()
|
||||
type = page.type
|
||||
value = str(values[type].next())
|
||||
attrib = {'id': id, 'value': value, 'type': type, 'playOrder': '0'}
|
||||
|
@ -373,16 +373,12 @@ class OEBReader(object):
|
||||
if not title:
|
||||
self._toc_from_navpoint(item, toc, child)
|
||||
continue
|
||||
if not href:
|
||||
gc = xpath(child, 'ncx:navPoint')
|
||||
if not gc:
|
||||
# This node is useless
|
||||
continue
|
||||
href = 'missing.html'
|
||||
|
||||
href = item.abshref(urlnormalize(href[0]))
|
||||
if (not href or not href[0]) and not xpath(child, 'ncx:navPoint'):
|
||||
# This node is useless
|
||||
continue
|
||||
href = item.abshref(urlnormalize(href[0])) if href and href[0] else ''
|
||||
path, _ = urldefrag(href)
|
||||
if path not in self.oeb.manifest.hrefs:
|
||||
if href and path not in self.oeb.manifest.hrefs:
|
||||
self.logger.warn('TOC reference %r not found' % href)
|
||||
gc = xpath(child, 'ncx:navPoint')
|
||||
if not gc:
|
||||
|
@ -18,7 +18,7 @@ from calibre import guess_type
|
||||
from calibre.ebooks.oeb.base import (XHTML, XHTML_NS, CSS_MIME, OEB_STYLES,
|
||||
namespace, barename, XPath)
|
||||
from calibre.ebooks.oeb.stylizer import Stylizer
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
from calibre.utils.filenames import ascii_filename, ascii_text
|
||||
|
||||
COLLAPSE = re.compile(r'[ \t\r\n\v]+')
|
||||
STRIPNUM = re.compile(r'[-0-9]+$')
|
||||
@ -437,7 +437,7 @@ class CSSFlattener(object):
|
||||
items.sort()
|
||||
css = u';\n'.join(u'%s: %s' % (key, val) for key, val in items)
|
||||
classes = node.get('class', '').strip() or 'calibre'
|
||||
klass = STRIPNUM.sub('', classes.split()[0].replace('_', ''))
|
||||
klass = ascii_text(STRIPNUM.sub('', classes.split()[0].replace('_', '')))
|
||||
if css in styles:
|
||||
match = styles[css]
|
||||
else:
|
||||
|
@ -1661,9 +1661,11 @@ class DeviceMixin(object): # {{{
|
||||
update_metadata = device_prefs['manage_device_metadata'] == 'on_connect'
|
||||
|
||||
get_covers = False
|
||||
desired_thumbnail_height = 0
|
||||
if update_metadata and self.device_manager.is_device_connected:
|
||||
if self.device_manager.device.WANTS_UPDATED_THUMBNAILS:
|
||||
get_covers = True
|
||||
desired_thumbnail_height = self.device_manager.device.THUMBNAIL_HEIGHT
|
||||
|
||||
# Force a reset if the caches are not initialized
|
||||
if reset or not hasattr(self, 'db_book_title_cache'):
|
||||
@ -1698,17 +1700,28 @@ class DeviceMixin(object): # {{{
|
||||
# will be used by books_on_device to indicate matches. While we are
|
||||
# going by, update the metadata for a book if automatic management is on
|
||||
|
||||
def update_book(id_, book) :
|
||||
if not update_metadata:
|
||||
return
|
||||
mi = db.get_metadata(id_, index_is_id=True, get_cover=get_covers)
|
||||
book.smart_update(mi, replace_metadata=True)
|
||||
if get_covers:
|
||||
if book.cover and os.access(book.cover, os.R_OK):
|
||||
book.thumbnail = self.cover_to_thumbnail(open(book.cover, 'rb').read())
|
||||
else:
|
||||
book.thumbnail = self.default_thumbnail
|
||||
|
||||
for booklist in booklists:
|
||||
for book in booklist:
|
||||
book.in_library = None
|
||||
if getattr(book, 'uuid', None) in self.db_book_uuid_cache:
|
||||
id_ = db_book_uuid_cache[book.uuid]
|
||||
if (update_metadata and
|
||||
db.metadata_last_modified(id_, index_is_id=True) !=
|
||||
getattr(book, 'last_modified', None)):
|
||||
mi = db.get_metadata(id_, index_is_id=True,
|
||||
get_cover=get_covers)
|
||||
book.smart_update(mi, replace_metadata=True)
|
||||
if (db.metadata_last_modified(id_, index_is_id=True) !=
|
||||
getattr(book, 'last_modified', None)
|
||||
or (not book.thumbnail
|
||||
or max(book.thumbnail[0], book.thumbnail[1]) !=
|
||||
desired_thumbnail_height)):
|
||||
update_book(id_, book)
|
||||
book.in_library = 'UUID'
|
||||
# ensure that the correct application_id is set
|
||||
book.application_id = id_
|
||||
@ -1721,23 +1734,15 @@ class DeviceMixin(object): # {{{
|
||||
# will match if any of the db_id, author, or author_sort
|
||||
# also match.
|
||||
if getattr(book, 'application_id', None) in d['db_ids']:
|
||||
if update_metadata:
|
||||
id_ = getattr(book, 'application_id', None)
|
||||
book.smart_update(db.get_metadata(id_,
|
||||
index_is_id=True,
|
||||
get_cover=get_covers),
|
||||
replace_metadata=True)
|
||||
id_ = getattr(book, 'application_id', None)
|
||||
update_book(id_, book)
|
||||
book.in_library = 'APP_ID'
|
||||
# app_id already matches a db_id. No need to set it.
|
||||
continue
|
||||
# Sonys know their db_id independent of the application_id
|
||||
# in the metadata cache. Check that as well.
|
||||
if getattr(book, 'db_id', None) in d['db_ids']:
|
||||
if update_metadata:
|
||||
book.smart_update(db.get_metadata(book.db_id,
|
||||
index_is_id=True,
|
||||
get_cover=get_covers),
|
||||
replace_metadata=True)
|
||||
update_book(book.db_id, book)
|
||||
book.in_library = 'DB_ID'
|
||||
book.application_id = book.db_id
|
||||
continue
|
||||
@ -1752,20 +1757,12 @@ class DeviceMixin(object): # {{{
|
||||
book_authors = clean_string(authors_to_string(book.authors))
|
||||
if book_authors in d['authors']:
|
||||
id_ = d['authors'][book_authors]
|
||||
if update_metadata:
|
||||
book.smart_update(db.get_metadata(id_,
|
||||
index_is_id=True,
|
||||
get_cover=get_covers),
|
||||
replace_metadata=True)
|
||||
update_book(id_, book)
|
||||
book.in_library = 'AUTHOR'
|
||||
book.application_id = id_
|
||||
elif book_authors in d['author_sort']:
|
||||
id_ = d['author_sort'][book_authors]
|
||||
if update_metadata:
|
||||
book.smart_update(db.get_metadata(id_,
|
||||
index_is_id=True,
|
||||
get_cover=get_covers),
|
||||
replace_metadata=True)
|
||||
update_book(id_, book)
|
||||
book.in_library = 'AUTH_SORT'
|
||||
book.application_id = id_
|
||||
else:
|
||||
@ -1779,12 +1776,6 @@ class DeviceMixin(object): # {{{
|
||||
|
||||
if update_metadata:
|
||||
if self.device_manager.is_device_connected:
|
||||
if self.device_manager.device.WANTS_UPDATED_THUMBNAILS:
|
||||
for blist in booklists:
|
||||
for book in blist:
|
||||
if book.cover and os.access(book.cover, os.R_OK):
|
||||
book.thumbnail = \
|
||||
self.cover_to_thumbnail(open(book.cover, 'rb').read())
|
||||
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
|
||||
self.device_manager.sync_booklists(
|
||||
FunctionDispatcher(self.metadata_synced), booklists,
|
||||
|
@ -519,6 +519,7 @@ class PluginUpdaterDialog(SizePersistedDialog):
|
||||
self.description.setFrameStyle(QFrame.Panel | QFrame.Sunken)
|
||||
self.description.setAlignment(Qt.AlignTop | Qt.AlignLeft)
|
||||
self.description.setMinimumHeight(40)
|
||||
self.description.setWordWrap(True)
|
||||
layout.addWidget(self.description)
|
||||
|
||||
self.button_box = QDialogButtonBox(QDialogButtonBox.Close)
|
||||
|
@ -16,12 +16,12 @@ from calibre.utils.pyparsing import ParseException
|
||||
from calibre.ebooks.metadata import fmt_sidx, authors_to_string, string_to_authors
|
||||
from calibre.ebooks.metadata.book.base import SafeFormat
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.config import tweaks, device_prefs
|
||||
from calibre.utils.config import tweaks, device_prefs, prefs
|
||||
from calibre.utils.date import dt_factory, qt_to_dt, as_local_time
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.search_query_parser import SearchQueryParser
|
||||
from calibre.library.caches import (_match, CONTAINS_MATCH, EQUALS_MATCH,
|
||||
REGEXP_MATCH, MetadataBackup, force_to_bool)
|
||||
from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
|
||||
from calibre.library.caches import (MetadataBackup, force_to_bool)
|
||||
from calibre.library.save_to_disk import find_plugboard
|
||||
from calibre import strftime, isbytestring
|
||||
from calibre.constants import filesystem_encoding, DEBUG
|
||||
@ -1037,6 +1037,7 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
}
|
||||
for x in ('author', 'format'):
|
||||
q[x+'s'] = q[x]
|
||||
upf = prefs['use_primary_find_in_search']
|
||||
for index, row in enumerate(self.model.db):
|
||||
for locvalue in locations:
|
||||
accessor = q[locvalue]
|
||||
@ -1063,7 +1064,7 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
vals = accessor(row).split(',')
|
||||
else:
|
||||
vals = [accessor(row)]
|
||||
if _match(query, vals, m):
|
||||
if _match(query, vals, m, use_primary_find_in_search=upf):
|
||||
matches.add(index)
|
||||
break
|
||||
except ValueError: # Unicode errors
|
||||
|
@ -31,7 +31,7 @@ from calibre.utils.logging import GUILog as Log
|
||||
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.metadata.opf2 import OPF
|
||||
from calibre.gui2 import error_dialog, NONE, rating_font
|
||||
from calibre.gui2 import error_dialog, NONE, rating_font, gprefs
|
||||
from calibre.utils.date import (utcnow, fromordinal, format_date,
|
||||
UNDEFINED_DATE, as_utc)
|
||||
from calibre.library.comments import comments_to_html
|
||||
@ -264,6 +264,15 @@ class ResultsView(QTableView): # {{{
|
||||
sm = self.selectionModel()
|
||||
sm.select(idx, sm.ClearAndSelect|sm.Rows)
|
||||
|
||||
def resize_delegate(self):
|
||||
self.rt_delegate.max_width = int(self.width()/2.1)
|
||||
self.resizeColumnsToContents()
|
||||
|
||||
def resizeEvent(self, ev):
|
||||
ret = super(ResultsView, self).resizeEvent(ev)
|
||||
self.resize_delegate()
|
||||
return ret
|
||||
|
||||
def currentChanged(self, current, previous):
|
||||
ret = QTableView.currentChanged(self, current, previous)
|
||||
self.show_details(current)
|
||||
@ -385,7 +394,7 @@ class IdentifyWorker(Thread): # {{{
|
||||
|
||||
def sample_results(self):
|
||||
m1 = Metadata('The Great Gatsby', ['Francis Scott Fitzgerald'])
|
||||
m2 = Metadata('The Great Gatsby', ['F. Scott Fitzgerald'])
|
||||
m2 = Metadata('The Great Gatsby - An extra long title to test resizing', ['F. Scott Fitzgerald'])
|
||||
m1.has_cached_cover_url = True
|
||||
m2.has_cached_cover_url = False
|
||||
m1.comments = 'Some comments '*10
|
||||
@ -963,12 +972,16 @@ class FullFetch(QDialog): # {{{
|
||||
self.covers_widget.chosen.connect(self.ok_clicked)
|
||||
self.stack.addWidget(self.covers_widget)
|
||||
|
||||
self.resize(850, 600)
|
||||
geom = gprefs.get('metadata_single_gui_geom', None)
|
||||
if geom is not None and geom:
|
||||
self.restoreGeometry(geom)
|
||||
|
||||
# Workaround for Qt 4.8.0 bug that causes the frame of the window to go
|
||||
# off the top of the screen if a max height is not set for the
|
||||
# QWebView. Seems to only happen on windows, but keep it for all
|
||||
# platforms just in case.
|
||||
self.identify_widget.comments_view.setMaximumHeight(500)
|
||||
self.resize(850, 600)
|
||||
self.identify_widget.comments_view.setMaximumHeight(self.height()-100)
|
||||
|
||||
self.finished.connect(self.cleanup)
|
||||
|
||||
@ -995,12 +1008,14 @@ class FullFetch(QDialog): # {{{
|
||||
self.covers_widget.reset_covers()
|
||||
|
||||
def accept(self):
|
||||
gprefs['metadata_single_gui_geom'] = bytearray(self.saveGeometry())
|
||||
if self.stack.currentIndex() == 1:
|
||||
return QDialog.accept(self)
|
||||
# Prevent the usual dialog accept mechanisms from working
|
||||
pass
|
||||
|
||||
def reject(self):
|
||||
gprefs['metadata_single_gui_geom'] = bytearray(self.saveGeometry())
|
||||
self.identify_widget.cancel()
|
||||
self.covers_widget.cancel()
|
||||
return QDialog.reject(self)
|
||||
|
@ -413,6 +413,7 @@ class RulesModel(QAbstractListModel): # {{{
|
||||
rules = list(prefs['column_color_rules'])
|
||||
self.rules = []
|
||||
for col, template in rules:
|
||||
if col not in self.fm: continue
|
||||
try:
|
||||
rule = rule_from_template(self.fm, template)
|
||||
except:
|
||||
|
@ -10,8 +10,8 @@ from PyQt4.Qt import (Qt, QAbstractItemModel, QIcon, QVariant, QModelIndex, QSiz
|
||||
|
||||
from calibre.gui2 import NONE
|
||||
from calibre.customize.ui import is_disabled, disable_plugin, enable_plugin
|
||||
from calibre.library.caches import _match, CONTAINS_MATCH, EQUALS_MATCH, \
|
||||
REGEXP_MATCH
|
||||
from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
|
||||
from calibre.utils.config_base import prefs
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.search_query_parser import SearchQueryParser
|
||||
|
||||
@ -60,13 +60,13 @@ class Matches(QAbstractItemModel):
|
||||
index = self.createIndex(i, 0)
|
||||
data = QVariant(True)
|
||||
self.setData(index, data, Qt.CheckStateRole)
|
||||
|
||||
|
||||
def enable_none(self):
|
||||
for i in xrange(len(self.matches)):
|
||||
index = self.createIndex(i, 0)
|
||||
data = QVariant(False)
|
||||
self.setData(index, data, Qt.CheckStateRole)
|
||||
|
||||
|
||||
def enable_invert(self):
|
||||
for i in xrange(len(self.matches)):
|
||||
self.toggle_plugin(self.createIndex(i, 0))
|
||||
@ -243,6 +243,7 @@ class SearchFilter(SearchQueryParser):
|
||||
'name': lambda x : x.name.lower(),
|
||||
}
|
||||
q['formats'] = q['format']
|
||||
upf = prefs['use_primary_find_in_search']
|
||||
for sr in self.srs:
|
||||
for locvalue in locations:
|
||||
accessor = q[locvalue]
|
||||
@ -276,7 +277,7 @@ class SearchFilter(SearchQueryParser):
|
||||
vals = accessor(sr).split(',')
|
||||
else:
|
||||
vals = [accessor(sr)]
|
||||
if _match(query, vals, m):
|
||||
if _match(query, vals, m, use_primary_find_in_search=upf):
|
||||
matches.add(sr)
|
||||
break
|
||||
except ValueError: # Unicode errors
|
||||
|
@ -7,6 +7,7 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import random
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
|
@ -25,17 +25,12 @@ from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
class EmpikStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
plain_url = 'http://www.empik.com/ebooki'
|
||||
url = 'https://ssl.afiliant.com/affskrypt,,2f9de2,,23c7f,,,?u=(' + plain_url + ')'
|
||||
detail_url = None
|
||||
|
||||
if detail_item:
|
||||
detail_url = 'https://ssl.afiliant.com/affskrypt,,2f9de2,,23c7f,,,?u=(' + detail_item + ')'
|
||||
url = 'http://www.empik.com/ebooki'
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
|
||||
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
|
||||
else:
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_url)
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_item)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
@ -6,10 +6,12 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
from threading import Lock
|
||||
|
||||
from PyQt4.Qt import (QUrl, QCoreApplication)
|
||||
|
||||
from calibre.constants import cache_dir
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
@ -26,6 +28,16 @@ class MobileReadStore(BasicStoreConfig, StorePlugin):
|
||||
StorePlugin.__init__(self, *args, **kwargs)
|
||||
self.lock = Lock()
|
||||
|
||||
@property
|
||||
def cache(self):
|
||||
if not hasattr(self, '_mr_cache'):
|
||||
from calibre.utils.config import JSONConfig
|
||||
self._mr_cache = JSONConfig('mobileread_get_books')
|
||||
self._mr_cache.file_path = os.path.join(cache_dir(),
|
||||
'mobileread_get_books.json')
|
||||
self._mr_cache.refresh()
|
||||
return self._mr_cache
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
url = 'http://www.mobileread.com/'
|
||||
|
||||
@ -61,7 +73,7 @@ class MobileReadStore(BasicStoreConfig, StorePlugin):
|
||||
suppress_progress=False):
|
||||
if self.lock.acquire(False):
|
||||
try:
|
||||
update_thread = CacheUpdateThread(self.config, self.seralize_books, timeout)
|
||||
update_thread = CacheUpdateThread(self.cache, self.seralize_books, timeout)
|
||||
if not suppress_progress:
|
||||
progress = CacheProgressDialog(parent)
|
||||
progress.set_message(_('Updating MobileRead book cache...'))
|
||||
@ -85,7 +97,7 @@ class MobileReadStore(BasicStoreConfig, StorePlugin):
|
||||
self.lock.release()
|
||||
|
||||
def get_book_list(self):
|
||||
return self.deseralize_books(self.config.get('book_list', []))
|
||||
return self.deseralize_books(self.cache.get('book_list', []))
|
||||
|
||||
def seralize_books(self, books):
|
||||
sbooks = []
|
||||
|
@ -11,13 +11,13 @@ from operator import attrgetter
|
||||
from PyQt4.Qt import (Qt, QAbstractItemModel, QModelIndex, QVariant, pyqtSignal)
|
||||
|
||||
from calibre.gui2 import NONE
|
||||
from calibre.library.caches import _match, CONTAINS_MATCH, EQUALS_MATCH, \
|
||||
REGEXP_MATCH
|
||||
from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
|
||||
from calibre.utils.config_base import prefs
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.search_query_parser import SearchQueryParser
|
||||
|
||||
class BooksModel(QAbstractItemModel):
|
||||
|
||||
|
||||
total_changed = pyqtSignal(int)
|
||||
|
||||
HEADERS = [_('Title'), _('Author(s)'), _('Format')]
|
||||
@ -37,8 +37,8 @@ class BooksModel(QAbstractItemModel):
|
||||
return self.books[row]
|
||||
else:
|
||||
return None
|
||||
|
||||
def search(self, filter):
|
||||
|
||||
def search(self, filter):
|
||||
self.filter = filter.strip()
|
||||
if not self.filter:
|
||||
self.books = self.all_books
|
||||
@ -50,7 +50,7 @@ class BooksModel(QAbstractItemModel):
|
||||
self.layoutChanged.emit()
|
||||
self.sort(self.sort_col, self.sort_order)
|
||||
self.total_changed.emit(self.rowCount())
|
||||
|
||||
|
||||
def index(self, row, column, parent=QModelIndex()):
|
||||
return self.createIndex(row, column)
|
||||
|
||||
@ -64,7 +64,7 @@ class BooksModel(QAbstractItemModel):
|
||||
|
||||
def columnCount(self, *args):
|
||||
return len(self.HEADERS)
|
||||
|
||||
|
||||
def headerData(self, section, orientation, role):
|
||||
if role != Qt.DisplayRole:
|
||||
return NONE
|
||||
@ -112,7 +112,7 @@ class BooksModel(QAbstractItemModel):
|
||||
|
||||
|
||||
class SearchFilter(SearchQueryParser):
|
||||
|
||||
|
||||
USABLE_LOCATIONS = [
|
||||
'all',
|
||||
'author',
|
||||
@ -161,6 +161,7 @@ class SearchFilter(SearchQueryParser):
|
||||
}
|
||||
for x in ('author', 'format'):
|
||||
q[x+'s'] = q[x]
|
||||
upf = prefs['use_primary_find_in_search']
|
||||
for sr in self.srs:
|
||||
for locvalue in locations:
|
||||
accessor = q[locvalue]
|
||||
@ -182,7 +183,7 @@ class SearchFilter(SearchQueryParser):
|
||||
m = matchkind
|
||||
|
||||
vals = [accessor(sr)]
|
||||
if _match(query, vals, m):
|
||||
if _match(query, vals, m, use_primary_find_in_search=upf):
|
||||
matches.add(sr)
|
||||
break
|
||||
except ValueError: # Unicode errors
|
||||
|
@ -70,6 +70,7 @@ def config(defaults=None):
|
||||
c.add_opt('bottom_margin', default=20)
|
||||
c.add_opt('text_color', default=None)
|
||||
c.add_opt('background_color', default=None)
|
||||
c.add_opt('show_controls', default=True)
|
||||
|
||||
fonts = c.add_group('FONTS', _('Font options'))
|
||||
fonts('serif_family', default='Times New Roman' if iswindows else 'Liberation Serif',
|
||||
@ -221,6 +222,7 @@ class ConfigDialog(QDialog, Ui_Dialog):
|
||||
for x in ('text', 'background'):
|
||||
setattr(self, 'current_%s_color'%x, getattr(opts, '%s_color'%x))
|
||||
self.update_sample_colors()
|
||||
self.opt_show_controls.setChecked(opts.show_controls)
|
||||
|
||||
def change_color(self, which, reset=False):
|
||||
if reset:
|
||||
@ -292,6 +294,7 @@ class ConfigDialog(QDialog, Ui_Dialog):
|
||||
self.opt_override_book_margins.isChecked())
|
||||
c.set('text_color', self.current_text_color)
|
||||
c.set('background_color', self.current_background_color)
|
||||
c.set('show_controls', self.opt_show_controls.isChecked())
|
||||
for x in ('top', 'bottom', 'side'):
|
||||
c.set(x+'_margin', int(getattr(self, 'opt_%s_margin'%x).value()))
|
||||
|
||||
|
@ -347,8 +347,8 @@ QToolBox::tab:hover {
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>811</width>
|
||||
<height>352</height>
|
||||
<width>352</width>
|
||||
<height>176</height>
|
||||
</rect>
|
||||
</property>
|
||||
<attribute name="label">
|
||||
@ -573,8 +573,8 @@ QToolBox::tab:hover {
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>352</width>
|
||||
<height>123</height>
|
||||
<width>811</width>
|
||||
<height>352</height>
|
||||
</rect>
|
||||
</property>
|
||||
<attribute name="label">
|
||||
@ -605,20 +605,27 @@ QToolBox::tab:hover {
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0" colspan="2">
|
||||
<item row="3" column="0" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_remember_window_size">
|
||||
<property name="text">
|
||||
<string>Remember last used &window size and layout</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="0" colspan="2">
|
||||
<item row="4" column="0" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_remember_current_page">
|
||||
<property name="text">
|
||||
<string>Remember the &current page when quitting</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_show_controls">
|
||||
<property name="text">
|
||||
<string>Show &controls in the viewer window</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</widget>
|
||||
|
@ -33,6 +33,7 @@ class Document(QWebPage): # {{{
|
||||
|
||||
page_turn = pyqtSignal(object)
|
||||
mark_element = pyqtSignal(QWebElement)
|
||||
settings_changed = pyqtSignal()
|
||||
|
||||
def set_font_settings(self, opts):
|
||||
settings = self.settings()
|
||||
@ -57,6 +58,7 @@ class Document(QWebPage): # {{{
|
||||
self.set_font_settings(opts)
|
||||
self.set_user_stylesheet(opts)
|
||||
self.misc_config(opts)
|
||||
self.settings_changed.emit()
|
||||
self.after_load()
|
||||
|
||||
def __init__(self, shortcuts, parent=None, debug_javascript=False):
|
||||
@ -153,6 +155,7 @@ class Document(QWebPage): # {{{
|
||||
self.cols_per_screen = opts.cols_per_screen
|
||||
self.side_margin = opts.side_margin
|
||||
self.top_margin, self.bottom_margin = opts.top_margin, opts.bottom_margin
|
||||
self.show_controls = opts.show_controls
|
||||
|
||||
def fit_images(self):
|
||||
if self.do_fit_images and not self.in_paged_mode:
|
||||
@ -676,7 +679,7 @@ class DocumentView(QWebView): # {{{
|
||||
|
||||
if not text and img.isNull() and self.manager is not None:
|
||||
menu.addSeparator()
|
||||
if self.document.in_fullscreen_mode and self.manager is not None:
|
||||
if (not self.document.show_controls or self.document.in_fullscreen_mode) and self.manager is not None:
|
||||
menu.addAction(self.manager.toggle_toolbar_action)
|
||||
menu.addAction(self.manager.action_full_screen)
|
||||
|
||||
|
@ -303,6 +303,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.toggle_toolbar_action = QAction(_('Show/hide controls'), self)
|
||||
self.toggle_toolbar_action.setCheckable(True)
|
||||
self.toggle_toolbar_action.triggered.connect(self.toggle_toolbars)
|
||||
self.toolbar_hidden = None
|
||||
self.addAction(self.toggle_toolbar_action)
|
||||
self.full_screen_label_anim = QPropertyAnimation(
|
||||
self.full_screen_label, 'size')
|
||||
@ -359,7 +360,10 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
# continue to function even when the toolbars are hidden
|
||||
self.addAction(action)
|
||||
|
||||
self.view.document.settings_changed.connect(self.settings_changed)
|
||||
|
||||
self.restore_state()
|
||||
self.settings_changed()
|
||||
self.action_toggle_paged_mode.toggled[bool].connect(self.toggle_paged_mode)
|
||||
if (start_in_fullscreen or self.view.document.start_in_fullscreen):
|
||||
self.action_full_screen.trigger()
|
||||
@ -373,6 +377,11 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
if at_start: return
|
||||
self.reload()
|
||||
|
||||
def settings_changed(self):
|
||||
for x in ('', '2'):
|
||||
x = getattr(self, 'tool_bar'+x)
|
||||
x.setVisible(self.view.document.show_controls)
|
||||
|
||||
def reload(self):
|
||||
if hasattr(self, 'current_index') and self.current_index > -1:
|
||||
self.view.document.page_position.save(overwrite=False)
|
||||
@ -575,8 +584,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.vertical_scrollbar.setVisible(True)
|
||||
self.window_mode_changed = 'normal'
|
||||
self.esc_full_screen_action.setEnabled(False)
|
||||
self.tool_bar.setVisible(True)
|
||||
self.tool_bar2.setVisible(True)
|
||||
self.settings_changed()
|
||||
self.full_screen_label.setVisible(False)
|
||||
if hasattr(self, '_original_frame_margins'):
|
||||
om = self._original_frame_margins
|
||||
@ -758,7 +766,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.view.scroll_to(frag)
|
||||
else:
|
||||
# Scroll to top
|
||||
self.view.scroll_to('#')
|
||||
self.view.scroll_to(0)
|
||||
if self.view.document.ypos == oldpos:
|
||||
# If we are coming from goto_next_section() call this will
|
||||
# cause another goto next section call with the next toc
|
||||
|
@ -56,7 +56,7 @@ class TOCItem(QStandardItem):
|
||||
self.title = text
|
||||
self.parent = parent
|
||||
QStandardItem.__init__(self, text if text else '')
|
||||
self.abspath = toc.abspath
|
||||
self.abspath = toc.abspath if toc.href else None
|
||||
self.fragment = toc.fragment
|
||||
all_items.append(self)
|
||||
self.bold_font = QFont(self.font())
|
||||
@ -70,11 +70,13 @@ class TOCItem(QStandardItem):
|
||||
if si == self.abspath:
|
||||
spos = i
|
||||
break
|
||||
try:
|
||||
am = getattr(spine[i], 'anchor_map', {})
|
||||
except UnboundLocalError:
|
||||
# Spine was empty?
|
||||
am = {}
|
||||
am = {}
|
||||
if self.abspath is not None:
|
||||
try:
|
||||
am = getattr(spine[i], 'anchor_map', {})
|
||||
except UnboundLocalError:
|
||||
# Spine was empty?
|
||||
pass
|
||||
frag = self.fragment if (self.fragment and self.fragment in am) else None
|
||||
self.starts_at = spos
|
||||
self.start_anchor = frag
|
||||
|
@ -6,7 +6,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re, itertools, time, traceback, locale
|
||||
import itertools, time, traceback, locale
|
||||
from itertools import repeat, izip, imap
|
||||
from datetime import timedelta
|
||||
from threading import Thread
|
||||
@ -16,10 +16,10 @@ from calibre.utils.date import parse_date, now, UNDEFINED_DATE, clean_date_for_s
|
||||
from calibre.utils.search_query_parser import SearchQueryParser
|
||||
from calibre.utils.pyparsing import ParseException
|
||||
from calibre.utils.localization import (canonicalize_lang, lang_map, get_udc)
|
||||
from calibre.db.search import CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH, _match
|
||||
from calibre.ebooks.metadata import title_sort, author_to_author_sort
|
||||
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
||||
from calibre import prints
|
||||
from calibre.utils.icu import primary_find
|
||||
|
||||
class MetadataBackup(Thread): # {{{
|
||||
'''
|
||||
@ -118,7 +118,6 @@ class MetadataBackup(Thread): # {{{
|
||||
|
||||
# }}}
|
||||
|
||||
|
||||
### Global utility function for get_match here and in gui2/library.py
|
||||
# This is a global for performance
|
||||
pref_use_primary_find_in_search = False
|
||||
@ -127,47 +126,6 @@ def set_use_primary_find_in_search(toWhat):
|
||||
global pref_use_primary_find_in_search
|
||||
pref_use_primary_find_in_search = toWhat
|
||||
|
||||
CONTAINS_MATCH = 0
|
||||
EQUALS_MATCH = 1
|
||||
REGEXP_MATCH = 2
|
||||
def _match(query, value, matchkind):
|
||||
if query.startswith('..'):
|
||||
query = query[1:]
|
||||
sq = query[1:]
|
||||
internal_match_ok = True
|
||||
else:
|
||||
internal_match_ok = False
|
||||
for t in value:
|
||||
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
|
||||
t = icu_lower(t)
|
||||
if (matchkind == EQUALS_MATCH):
|
||||
if internal_match_ok:
|
||||
if query == t:
|
||||
return True
|
||||
comps = [c.strip() for c in t.split('.') if c.strip()]
|
||||
for comp in comps:
|
||||
if sq == comp:
|
||||
return True
|
||||
elif query[0] == '.':
|
||||
if t.startswith(query[1:]):
|
||||
ql = len(query) - 1
|
||||
if (len(t) == ql) or (t[ql:ql+1] == '.'):
|
||||
return True
|
||||
elif query == t:
|
||||
return True
|
||||
elif matchkind == REGEXP_MATCH:
|
||||
if re.search(query, t, re.I|re.UNICODE):
|
||||
return True
|
||||
elif matchkind == CONTAINS_MATCH:
|
||||
if pref_use_primary_find_in_search:
|
||||
if primary_find(query, t)[0] != -1:
|
||||
return True
|
||||
elif query in t:
|
||||
return True
|
||||
except re.error:
|
||||
pass
|
||||
return False
|
||||
|
||||
def force_to_bool(val):
|
||||
if isinstance(val, (str, unicode)):
|
||||
try:
|
||||
@ -576,7 +534,8 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
continue
|
||||
k = parts[:1]
|
||||
v = parts[1:]
|
||||
if keyq and not _match(keyq, k, keyq_mkind):
|
||||
if keyq and not _match(keyq, k, keyq_mkind,
|
||||
use_primary_find_in_search=pref_use_primary_find_in_search):
|
||||
continue
|
||||
if valq:
|
||||
if valq == 'true':
|
||||
@ -586,7 +545,8 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
if v:
|
||||
add_if_nothing_matches = False
|
||||
continue
|
||||
elif not _match(valq, v, valq_mkind):
|
||||
elif not _match(valq, v, valq_mkind,
|
||||
use_primary_find_in_search=pref_use_primary_find_in_search):
|
||||
continue
|
||||
matches.add(id_)
|
||||
|
||||
@ -851,7 +811,8 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
vals = [v.strip() for v in item[loc].split(is_multiple_cols[loc])]
|
||||
else:
|
||||
vals = [item[loc]] ### make into list to make _match happy
|
||||
if _match(q, vals, matchkind):
|
||||
if _match(q, vals, matchkind,
|
||||
use_primary_find_in_search=pref_use_primary_find_in_search):
|
||||
matches.add(item[0])
|
||||
continue
|
||||
current_candidates -= matches
|
||||
|
@ -9,7 +9,7 @@ from xml.sax.saxutils import escape
|
||||
|
||||
from calibre import (prepare_string_for_xml, strftime, force_unicode,
|
||||
isbytestring)
|
||||
from calibre.constants import isosx
|
||||
from calibre.constants import isosx, cache_dir
|
||||
from calibre.customize.conversion import DummyReporter
|
||||
from calibre.customize.ui import output_profiles
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, Tag, NavigableString
|
||||
@ -18,7 +18,6 @@ from calibre.ebooks.metadata import author_to_author_sort
|
||||
from calibre.library.catalogs import AuthorSortMismatchException, EmptyCatalogException, \
|
||||
InvalidGenresSourceFieldException
|
||||
from calibre.ptempfile import PersistentTemporaryDirectory
|
||||
from calibre.utils.config import config_dir
|
||||
from calibre.utils.date import format_date, is_date_undefined, now as nowf
|
||||
from calibre.utils.filenames import ascii_text, shorten_components_to
|
||||
from calibre.utils.icu import capitalize, collation_order, sort_key
|
||||
@ -109,7 +108,7 @@ class CatalogBuilder(object):
|
||||
self.plugin = plugin
|
||||
self.reporter = report_progress
|
||||
self.stylesheet = stylesheet
|
||||
self.cache_dir = os.path.join(config_dir, 'caches', 'catalog')
|
||||
self.cache_dir = os.path.join(cache_dir(), 'catalog')
|
||||
self.catalog_path = PersistentTemporaryDirectory("_epub_mobi_catalog", prefix='')
|
||||
self.content_dir = os.path.join(self.catalog_path, "content")
|
||||
self.excluded_tags = self.get_excluded_tags()
|
||||
|
@ -44,47 +44,13 @@ from calibre.utils.recycle_bin import delete_file, delete_tree
|
||||
from calibre.utils.formatter_functions import load_user_template_functions
|
||||
from calibre.db.errors import NoSuchFormat
|
||||
from calibre.db.lazy import FormatMetadata, FormatsList
|
||||
from calibre.db.categories import Tag
|
||||
from calibre.utils.localization import (canonicalize_lang,
|
||||
calibre_langcode_to_name)
|
||||
|
||||
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
|
||||
SPOOL_SIZE = 30*1024*1024
|
||||
|
||||
class Tag(object):
|
||||
|
||||
def __init__(self, name, id=None, count=0, state=0, avg=0, sort=None,
|
||||
tooltip=None, icon=None, category=None, id_set=None,
|
||||
is_editable = True, is_searchable=True, use_sort_as_name=False):
|
||||
self.name = self.original_name = name
|
||||
self.id = id
|
||||
self.count = count
|
||||
self.state = state
|
||||
self.is_hierarchical = ''
|
||||
self.is_editable = is_editable
|
||||
self.is_searchable = is_searchable
|
||||
self.id_set = id_set if id_set is not None else set([])
|
||||
self.avg_rating = avg/2.0 if avg is not None else 0
|
||||
self.sort = sort
|
||||
self.use_sort_as_name = use_sort_as_name
|
||||
if self.avg_rating > 0:
|
||||
if tooltip:
|
||||
tooltip = tooltip + ': '
|
||||
tooltip = _('%(tt)sAverage rating is %(rating)3.1f')%dict(
|
||||
tt=tooltip, rating=self.avg_rating)
|
||||
self.tooltip = tooltip
|
||||
self.icon = icon
|
||||
self.category = category
|
||||
|
||||
def __unicode__(self):
|
||||
return u'%s:%s:%s:%s:%s:%s'%(self.name, self.count, self.id, self.state,
|
||||
self.category, self.tooltip)
|
||||
|
||||
def __str__(self):
|
||||
return unicode(self).encode('utf-8')
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
'''
|
||||
An ebook metadata database that stores references to ebook files on disk.
|
||||
@ -1220,7 +1186,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
loc.append(_('Card A'))
|
||||
if b is not None:
|
||||
loc.append(_('Card B'))
|
||||
return ', '.join(loc) + ((' (%s books)'%count) if count > 1 else '')
|
||||
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
|
||||
|
||||
def set_book_on_device_func(self, func):
|
||||
self.book_on_device_func = func
|
||||
|
@ -372,8 +372,10 @@ class BrowseServer(object):
|
||||
if meta['is_custom'] and category not in displayed_custom_fields:
|
||||
continue
|
||||
# get the icon files
|
||||
if category in self.icon_map:
|
||||
icon = '_'+quote(self.icon_map[category])
|
||||
main_cat = (category.partition('.')[0]) if hasattr(category,
|
||||
'partition') else category
|
||||
if main_cat in self.icon_map:
|
||||
icon = '_'+quote(self.icon_map[main_cat])
|
||||
elif category in category_icon_map:
|
||||
icon = category_icon_map[category]
|
||||
elif meta['is_custom']:
|
||||
@ -894,7 +896,8 @@ class BrowseServer(object):
|
||||
@Endpoint()
|
||||
def browse_random(self, *args, **kwargs):
|
||||
import random
|
||||
book_id = random.choice(tuple(self.db.all_ids()))
|
||||
book_id = random.choice(self.db.search_getting_ids(
|
||||
'', self.search_restriction))
|
||||
ans = self.browse_render_details(book_id)
|
||||
return self.browse_template('').format(
|
||||
title='', script='book();', main=ans)
|
||||
|
@ -77,7 +77,7 @@ def build_navigation(start, num, total, url_base): # {{{
|
||||
right_buttons = TD(CLASS('button', style='text-align:right'))
|
||||
|
||||
if start > 1:
|
||||
for t,s in [('First', 1), ('Previous', max(start-(num+1),1))]:
|
||||
for t,s in [('First', 1), ('Previous', max(start-num,1))]:
|
||||
left_buttons.append(A(t, href='%s;start=%d'%(url_base, s)))
|
||||
|
||||
if total > start + num:
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user