0.9.15+, KG changed location for iTunes cache

This commit is contained in:
GRiker 2013-01-23 13:06:33 -07:00
commit 1291cfcd12
47 changed files with 1541 additions and 555 deletions

View File

@ -28,6 +28,8 @@ class Barrons(BasicNewsRecipe):
## Don't grab articles more than 7 days old
oldest_article = 7
use_javascript_to_login = True
requires_version = (0, 9, 16)
extra_css = '''
.datestamp{font-family:Verdana,Geneva,Kalimati,sans-serif; font-size:x-small;}
@ -40,7 +42,7 @@ class Barrons(BasicNewsRecipe):
.insettipUnit{font-size: x-small;}
'''
remove_tags = [
dict(name ='div', attrs={'class':['tabContainer artTabbedNav','rssToolBox hidden','articleToolbox']}),
dict(name ='div', attrs={'class':['sTools sTools-t', 'tabContainer artTabbedNav','rssToolBox hidden','articleToolbox']}),
dict(name = 'a', attrs ={'class':'insetClose'})
]
@ -60,21 +62,17 @@ class Barrons(BasicNewsRecipe):
]
]
def get_browser(self):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open('http://commerce.barrons.com/auth/login')
br.select_form(name='login_form')
br['user'] = self.username
br['password'] = self.password
br.submit()
return br
def javascript_login(self, br, username, password):
br.visit('http://commerce.barrons.com/auth/login')
f = br.select_form(nr=0)
f['username'] = username
f['password'] = password
br.submit(timeout=120)
## Use the print version of a page when available.
def print_version(self, url):
main, sep, rest = url.rpartition('?')
return main + '#printmode'
return main + '#text.print'
def postprocess_html(self, soup, first):

0
recipes/conowego_pl.recipe Executable file → Normal file
View File

View File

Before

Width:  |  Height:  |  Size: 605 B

After

Width:  |  Height:  |  Size: 605 B

0
recipes/linux_journal.recipe Executable file → Normal file
View File

View File

@ -8,13 +8,16 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
title = u'Metro UK'
description = 'News as provided by The Metro -UK'
#timefmt = ''
__author__ = 'Dave Asbury'
#last update 9/6/12
cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg'
oldest_article = 1
__author__ = 'fleclerc & Dave Asbury'
#last update 20/1/13
#cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg'
cover_url = 'https://twimg0-a.akamaihd.net/profile_images/1638332595/METRO_LETTERS-01.jpg'
remove_empty_feeds = True
remove_javascript = True
auto_cleanup = True
max_articles_per_feed = 12
ignore_duplicate_articles = {'title', 'url'}
encoding = 'UTF-8'
language = 'en_GB'

View File

@ -18,6 +18,8 @@ class MichelleMalkin(BasicNewsRecipe):
remove_javascript = True
no_stylesheets = True
auto_cleanup = True
use_embedded_content = False
conversion_options = {
@ -29,16 +31,16 @@ class MichelleMalkin(BasicNewsRecipe):
}
keep_only_tags = [
dict(name='div', attrs={'class':'article'})
]
#keep_only_tags = [
#dict(name='div', attrs={'class':'article'})
#]
remove_tags = [
dict(name=['iframe', 'embed', 'object']),
dict(name='div', attrs={'id':['comments', 'commentForm']}),
dict(name='div', attrs={'class':['postCategories', 'comments', 'blogInfo', 'postInfo']})
#remove_tags = [
#dict(name=['iframe', 'embed', 'object']),
#dict(name='div', attrs={'id':['comments', 'commentForm']}),
#dict(name='div', attrs={'class':['postCategories', 'comments', 'blogInfo', 'postInfo']})
]
#]
feeds = [(u'http://feeds.feedburner.com/michellemalkin/posts')]

View File

@ -7,12 +7,16 @@ class AdvancedUserRecipe1282093204(BasicNewsRecipe):
oldest_article = 1
max_articles_per_feed = 15
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
masthead_url = 'http://farm5.static.flickr.com/4118/4929686950_0e22e2c88a.jpg'
feeds = [
(u'News-Bill McClellan', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2fcolumns%2Fbill-mclellan&f=rss&t=article'),
(u'News-Columns', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2Fcolumns*&l=50&f=rss&t=article'),
(u'News-Crime & Courtshttp://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2Fcrime-and-courts&l=50&f=rss&t=article'),
(u'News-Crime & Courts', 'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2Fcrime-and-courts&l=50&f=rss&t=article'),
(u'News-Deb Peterson', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2fcolumns%2Fdeb-peterson&f=rss&t=article'),
(u'News-Education', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2feducation&f=rss&t=article'),
(u'News-Government & Politics', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=news%2Flocal%2fgovt-and-politics&f=rss&t=article'),
@ -62,9 +66,9 @@ class AdvancedUserRecipe1282093204(BasicNewsRecipe):
(u'Entertainment-House-O-Fun', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=entertainment%2Fhouse-o-fun&l=100&f=rss&t=article'),
(u'Entertainment-Kevin C. Johnson', u'http://www2.stltoday.com/search/?q=&d1=&d2=&s=start_time&sd=desc&c=entertainment%2Fmusic%2Fkevin-johnson&l=100&f=rss&t=article')
]
remove_empty_feeds = True
remove_tags = [dict(name='div', attrs={'id':'blox-logo'}),dict(name='a')]
keep_only_tags = [dict(name='h1'), dict(name='p', attrs={'class':'byline'}), dict(name="div", attrs={'id':'blox-story-text'})]
#remove_empty_feeds = True
#remove_tags = [dict(name='div', attrs={'id':'blox-logo'}),dict(name='a')]
#keep_only_tags = [dict(name='h1'), dict(name='p', attrs={'class':'byline'}), dict(name="div", attrs={'id':'blox-story-text'})]
extra_css = 'p {text-align: left;}'

View File

@ -7,28 +7,15 @@ class AdvancedUserRecipe1289990851(BasicNewsRecipe):
language = 'en_CA'
__author__ = 'Nexus'
no_stylesheets = True
auto_cleanup = True
use_embedded_content = False
INDEX = 'http://tsn.ca/nhl/story/?id=nhl'
keep_only_tags = [dict(name='div', attrs={'id':['tsnColWrap']}),
dict(name='div', attrs={'id':['tsnStory']})]
remove_tags = [dict(name='div', attrs={'id':'tsnRelated'}),
dict(name='div', attrs={'class':'textSize'})]
def parse_index(self):
feeds = []
soup = self.index_to_soup(self.INDEX)
feed_parts = soup.findAll('div', attrs={'class': 'feature'})
for feed_part in feed_parts:
articles = []
if not feed_part.h2:
continue
feed_title = feed_part.h2.string
article_parts = feed_part.findAll('a')
for article_part in article_parts:
article_title = article_part.string
article_date = ''
article_url = 'http://tsn.ca/' + article_part['href']
articles.append({'title': article_title, 'url': article_url, 'description':'', 'date':article_date})
if articles:
feeds.append((feed_title, articles))
return feeds
#keep_only_tags = [dict(name='div', attrs={'id':['tsnColWrap']}),
#dict(name='div', attrs={'id':['tsnStory']})]
#remove_tags = [dict(name='div', attrs={'id':'tsnRelated'}),
#dict(name='div', attrs={'class':'textSize'})]
feeds = [
('News',
'http://www.tsn.ca/datafiles/rss/Stories.xml'),
]

View File

@ -79,6 +79,42 @@ def debug():
global DEBUG
DEBUG = True
_cache_dir = None
def _get_cache_dir():
confcache = os.path.join(config_dir, u'caches')
if isportable:
return confcache
if os.environ.has_key('CALIBRE_CACHE_DIRECTORY'):
return os.path.abspath(os.environ['CALIBRE_CACHE_DIRECTORY'])
if iswindows:
w = plugins['winutil'][0]
candidate = os.path.join(w.special_folder_path(w.CSIDL_LOCAL_APPDATA), u'%s-cache'%__appname__)
elif isosx:
candidate = os.path.join(os.path.expanduser(u'~/Library/Caches'), __appname__)
else:
candidate = os.environ.get('XDG_CACHE_HOME', u'~/.cache')
candidate = os.path.join(os.path.expanduser(candidate),
__appname__)
if isinstance(candidate, bytes):
try:
candidate = candidate.decode(filesystem_encoding)
except ValueError:
candidate = confcache
if not os.path.exists(candidate):
try:
os.makedirs(candidate)
except:
candidate = confcache
return candidate
def cache_dir():
global _cache_dir
if _cache_dir is None:
_cache_dir = _get_cache_dir()
return _cache_dir
# plugins {{{
class Plugins(collections.Mapping):

View File

@ -7,16 +7,18 @@ __license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import os, traceback
from collections import defaultdict
from functools import wraps, partial
from calibre.db.locking import create_locks, RecordLock
from calibre.db.fields import create_field
from calibre.db.search import Search
from calibre.db.tables import VirtualTable
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.date import now
from calibre.utils.icu import sort_key
def api(f):
f.is_cache_api = True
@ -50,6 +52,7 @@ class Cache(object):
self.record_lock = RecordLock(self.read_lock)
self.format_metadata_cache = defaultdict(dict)
self.formatter_template_cache = {}
self._search_api = Search(self.field_metadata.get_search_terms())
# Implement locking for all simple read/write API methods
# An unlocked version of the method is stored with the name starting
@ -65,6 +68,36 @@ class Cache(object):
lock = self.read_lock if ira else self.write_lock
setattr(self, name, wrap_simple(lock, func))
self.initialize_dynamic()
def initialize_dynamic(self):
# Reconstruct the user categories, putting them into field_metadata
# Assumption is that someone else will fix them if they change.
self.field_metadata.remove_dynamic_categories()
for user_cat in sorted(self.pref('user_categories', {}).iterkeys(), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
# add grouped search term user categories
muc = frozenset(self.pref('grouped_search_make_user_categories', []))
for cat in sorted(self.pref('grouped_search_terms', {}).iterkeys(), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
try:
self.field_metadata.add_user_category(label=u'@' + cat, name=cat)
except:
traceback.print_exc()
# TODO: Saved searches
# if len(saved_searches().names()):
# self.field_metadata.add_search_category(label='search', name=_('Searches'))
self.field_metadata.add_grouped_search_terms(
self.pref('grouped_search_terms', {}))
self._search_api.change_locations(self.field_metadata.get_search_terms())
@property
def field_metadata(self):
return self.backend.field_metadata
@ -269,11 +302,11 @@ class Cache(object):
return ()
@read_api
def all_book_ids(self):
def all_book_ids(self, type=frozenset):
'''
Frozen set of all known book ids.
'''
return frozenset(self.fields['uuid'])
return type(self.fields['uuid'])
@read_api
def all_field_ids(self, name):
@ -316,6 +349,10 @@ class Cache(object):
self.format_metadata_cache[book_id][fmt] = ans
return ans
@read_api
def pref(self, name, default=None):
return self.backend.prefs.get(name, default)
@api
def get_metadata(self, book_id,
get_cover=False, get_user_categories=True, cover_as_data=False):
@ -378,17 +415,21 @@ class Cache(object):
all_book_ids = frozenset(self._all_book_ids() if ids_to_sort is None
else ids_to_sort)
get_metadata = partial(self._get_metadata, get_user_categories=False)
def get_lang(book_id):
ans = self._field_for('languages', book_id)
return ans[0] if ans else None
fm = {'title':'sort', 'authors':'author_sort'}
def sort_key(field):
'Handle series type fields'
ans = self.fields[fm.get(field, field)].sort_keys_for_books(get_metadata,
all_book_ids)
idx = field + '_index'
if idx in self.fields:
idx_ans = self.fields[idx].sort_keys_for_books(get_metadata,
all_book_ids)
is_series = idx in self.fields
ans = self.fields[fm.get(field, field)].sort_keys_for_books(
get_metadata, get_lang, all_book_ids,)
if is_series:
idx_ans = self.fields[idx].sort_keys_for_books(
get_metadata, get_lang, all_book_ids)
ans = {k:(v, idx_ans[k]) for k, v in ans.iteritems()}
return ans
@ -401,6 +442,11 @@ class Cache(object):
else:
return sorted(all_book_ids, key=partial(SortKey, fields, sort_keys))
@read_api
def search(self, query, restriction, virtual_fields=None):
return self._search_api(self, query, restriction,
virtual_fields=virtual_fields)
# }}}
class SortKey(object):

View File

@ -9,14 +9,19 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from threading import Lock
from collections import defaultdict
from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY
from calibre.ebooks.metadata import title_sort
from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
from calibre.utils.date import UNDEFINED_DATE
from calibre.utils.localization import calibre_langcode_to_name
class Field(object):
is_many = False
def __init__(self, name, table):
self.name, self.table = name, table
self.has_text_data = self.metadata['datatype'] in ('text', 'comments',
@ -72,7 +77,7 @@ class Field(object):
'''
return iter(())
def sort_keys_for_books(self, get_metadata, all_book_ids):
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
'''
Return a mapping of book_id -> sort_key. The sort key is suitable for
use in sorting the list of all books by this field, via the python cmp
@ -81,6 +86,13 @@ class Field(object):
'''
raise NotImplementedError()
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
'''
Return a generator that yields items of the form (value, set of books
ids that have this value). Here, value is a searchable value. Returned
books_ids are restricted to the set of ids in candidates.
'''
raise NotImplementedError()
class OneToOneField(Field):
@ -96,10 +108,15 @@ class OneToOneField(Field):
def __iter__(self):
return self.table.book_col_map.iterkeys()
def sort_keys_for_books(self, get_metadata, all_book_ids):
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
return {id_ : self._sort_key(self.table.book_col_map.get(id_,
self._default_sort_key)) for id_ in all_book_ids}
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.book_col_map
for book_id in candidates:
yield cbm.get(book_id, default_value), {book_id}
class CompositeField(OneToOneField):
def __init__(self, *args, **kwargs):
@ -133,10 +150,16 @@ class CompositeField(OneToOneField):
ans = mi.get('#'+self.metadata['label'])
return ans
def sort_keys_for_books(self, get_metadata, all_book_ids):
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
return {id_ : sort_key(self.get_value_with_cache(id_, get_metadata)) for id_ in
all_book_ids}
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
val_map = defaultdict(set)
for book_id in candidates:
val_map[self.get_value_with_cache(book_id, get_metadata)].add(book_id)
for val, book_ids in val_map.iteritems():
yield val, book_ids
class OnDeviceField(OneToOneField):
@ -170,12 +193,21 @@ class OnDeviceField(OneToOneField):
def __iter__(self):
return iter(())
def sort_keys_for_books(self, get_metadata, all_book_ids):
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
return {id_ : self.for_book(id_) for id_ in
all_book_ids}
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
val_map = defaultdict(set)
for book_id in candidates:
val_map[self.for_book(book_id, default_value=default_value)].add(book_id)
for val, book_ids in val_map.iteritems():
yield val, book_ids
class ManyToOneField(Field):
is_many = True
def for_book(self, book_id, default_value=None):
ids = self.table.book_col_map.get(book_id, None)
if ids is not None:
@ -196,7 +228,7 @@ class ManyToOneField(Field):
def __iter__(self):
return self.table.id_map.iterkeys()
def sort_keys_for_books(self, get_metadata, all_book_ids):
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
ans = {id_ : self.table.book_col_map.get(id_, None)
for id_ in all_book_ids}
sk_map = {cid : (self._default_sort_key if cid is None else
@ -204,8 +236,17 @@ class ManyToOneField(Field):
for cid in ans.itervalues()}
return {id_ : sk_map[cid] for id_, cid in ans.iteritems()}
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
for item_id, val in self.table.id_map.iteritems():
book_ids = set(cbm.get(item_id, ())).intersection(candidates)
if book_ids:
yield val, book_ids
class ManyToManyField(Field):
is_many = True
def __init__(self, *args, **kwargs):
Field.__init__(self, *args, **kwargs)
self.alphabetical_sort = self.name != 'authors'
@ -227,7 +268,7 @@ class ManyToManyField(Field):
def __iter__(self):
return self.table.id_map.iterkeys()
def sort_keys_for_books(self, get_metadata, all_book_ids):
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
ans = {id_ : self.table.book_col_map.get(id_, ())
for id_ in all_book_ids}
all_cids = set()
@ -239,6 +280,20 @@ class ManyToManyField(Field):
(self._default_sort_key,))
for id_, cids in ans.iteritems()}
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
for item_id, val in self.table.id_map.iteritems():
book_ids = set(cbm.get(item_id, ())).intersection(candidates)
if book_ids:
yield val, book_ids
def iter_counts(self, candidates):
val_map = defaultdict(set)
cbm = self.table.book_col_map
for book_id in candidates:
val_map[len(cbm.get(book_id, ()))].add(book_id)
for count, book_ids in val_map.iteritems():
yield count, book_ids
class IdentifiersField(ManyToManyField):
@ -248,7 +303,7 @@ class IdentifiersField(ManyToManyField):
ids = default_value
return ids
def sort_keys_for_books(self, get_metadata, all_book_ids):
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
'Sort by identifier keys'
ans = {id_ : self.table.book_col_map.get(id_, ())
for id_ in all_book_ids}
@ -256,6 +311,12 @@ class IdentifiersField(ManyToManyField):
(self._default_sort_key,))
for id_, cids in ans.iteritems()}
def iter_searchable_values(self, get_metadata, candidates, default_value=()):
bcm = self.table.book_col_map
for book_id in candidates:
val = bcm.get(book_id, default_value)
if val:
yield val, {book_id}
class AuthorsField(ManyToManyField):
@ -274,6 +335,32 @@ class FormatsField(ManyToManyField):
def format_fname(self, book_id, fmt):
return self.table.fname_map[book_id][fmt.upper()]
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
val_map = defaultdict(set)
cbm = self.table.book_col_map
for book_id in candidates:
vals = cbm.get(book_id, ())
for val in vals:
val_map[val].add(book_id)
for val, book_ids in val_map.iteritems():
yield val, book_ids
class SeriesField(ManyToOneField):
def sort_key_for_series(self, book_id, get_lang, series_sort_order):
sid = self.table.book_col_map.get(book_id, None)
if sid is None:
return self._default_sort_key
return self._sort_key(title_sort(self.table.id_map[sid],
order=series_sort_order,
lang=get_lang(book_id)))
def sort_keys_for_books(self, get_metadata, get_lang, all_book_ids):
sso = tweaks['title_series_sorting']
return {book_id:self.sort_key_for_series(book_id, get_lang, sso) for book_id
in all_book_ids}
def create_field(name, table):
cls = {
ONE_ONE : OneToOneField,
@ -290,5 +377,7 @@ def create_field(name, table):
cls = IdentifiersField
elif table.metadata['datatype'] == 'composite':
cls = CompositeField
elif table.metadata['datatype'] == 'series':
cls = SeriesField
return cls(name, table)

700
src/calibre/db/search.py Normal file
View File

@ -0,0 +1,700 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from functools import partial
from datetime import timedelta
from calibre.utils.config_base import prefs
from calibre.utils.date import parse_date, UNDEFINED_DATE, now
from calibre.utils.icu import primary_find
from calibre.utils.localization import lang_map, canonicalize_lang
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
# TODO: Thread safety of saved searches
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
REGEXP_MATCH = 2
# Utils {{{
def force_to_bool(val):
if isinstance(val, (str, unicode)):
try:
val = icu_lower(val)
if not val:
val = None
elif val in [_('yes'), _('checked'), 'true', 'yes']:
val = True
elif val in [_('no'), _('unchecked'), 'false', 'no']:
val = False
else:
val = bool(int(val))
except:
val = None
return val
def _matchkind(query):
matchkind = CONTAINS_MATCH
if (len(query) > 1):
if query.startswith('\\'):
query = query[1:]
elif query.startswith('='):
matchkind = EQUALS_MATCH
query = query[1:]
elif query.startswith('~'):
matchkind = REGEXP_MATCH
query = query[1:]
if matchkind != REGEXP_MATCH:
# leave case in regexps because it can be significant e.g. \S \W \D
query = icu_lower(query)
return matchkind, query
def _match(query, value, matchkind, use_primary_find_in_search=True):
if query.startswith('..'):
query = query[1:]
sq = query[1:]
internal_match_ok = True
else:
internal_match_ok = False
for t in value:
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
t = icu_lower(t)
if (matchkind == EQUALS_MATCH):
if internal_match_ok:
if query == t:
return True
comps = [c.strip() for c in t.split('.') if c.strip()]
for comp in comps:
if sq == comp:
return True
elif query[0] == '.':
if t.startswith(query[1:]):
ql = len(query) - 1
if (len(t) == ql) or (t[ql:ql+1] == '.'):
return True
elif query == t:
return True
elif matchkind == REGEXP_MATCH:
if re.search(query, t, re.I|re.UNICODE):
return True
elif matchkind == CONTAINS_MATCH:
if use_primary_find_in_search:
if primary_find(query, t)[0] != -1:
return True
elif query in t:
return True
except re.error:
pass
return False
# }}}
class DateSearch(object): # {{{
def __init__(self):
self.operators = {
'=' : (1, self.eq),
'!=' : (2, self.ne),
'>' : (1, self.gt),
'>=' : (2, self.ge),
'<' : (1, self.lt),
'<=' : (2, self.le),
}
self.local_today = { '_today', 'today', icu_lower(_('today')) }
self.local_yesterday = { '_yesterday', 'yesterday', icu_lower(_('yesterday')) }
self.local_thismonth = { '_thismonth', 'thismonth', icu_lower(_('thismonth')) }
self.daysago_pat = re.compile(r'(%s|daysago|_daysago)$'%_('daysago'))
def eq(self, dbdate, query, field_count):
if dbdate.year == query.year:
if field_count == 1:
return True
if dbdate.month == query.month:
if field_count == 2:
return True
return dbdate.day == query.day
return False
def ne(self, *args):
return not self.eq(*args)
def gt(self, dbdate, query, field_count):
if dbdate.year > query.year:
return True
if field_count > 1 and dbdate.year == query.year:
if dbdate.month > query.month:
return True
return (field_count == 3 and dbdate.month == query.month and
dbdate.day > query.day)
return False
def le(self, *args):
return not self.gt(*args)
def lt(self, dbdate, query, field_count):
if dbdate.year < query.year:
return True
if field_count > 1 and dbdate.year == query.year:
if dbdate.month < query.month:
return True
return (field_count == 3 and dbdate.month == query.month and
dbdate.day < query.day)
return False
def ge(self, *args):
return not self.lt(*args)
def __call__(self, query, field_iter):
matches = set()
if len(query) < 2:
return matches
if query == 'false':
for v, book_ids in field_iter():
if isinstance(v, (str, unicode)):
v = parse_date(v)
if v is None or v <= UNDEFINED_DATE:
matches |= book_ids
return matches
if query == 'true':
for v, book_ids in field_iter():
if isinstance(v, (str, unicode)):
v = parse_date(v)
if v is not None and v > UNDEFINED_DATE:
matches |= book_ids
return matches
relop = None
for k, op in self.operators.iteritems():
if query.startswith(k):
p, relop = op
query = query[p:]
if relop is None:
relop = self.operators['='][-1]
if query in self.local_today:
qd = now()
field_count = 3
elif query in self.local_yesterday:
qd = now() - timedelta(1)
field_count = 3
elif query in self.local_thismonth:
qd = now()
field_count = 2
else:
m = self.daysago_pat.search(query)
if m is not None:
num = query[:-len(m.group(1))]
try:
qd = now() - timedelta(int(num))
except:
raise ParseException(query, len(query), 'Number conversion error')
field_count = 3
else:
try:
qd = parse_date(query, as_utc=False)
except:
raise ParseException(query, len(query), 'Date conversion error')
if '-' in query:
field_count = query.count('-') + 1
else:
field_count = query.count('/') + 1
for v, book_ids in field_iter():
if isinstance(v, (str, unicode)):
v = parse_date(v)
if v is not None and relop(v, qd, field_count):
matches |= book_ids
return matches
# }}}
class NumericSearch(object): # {{{
def __init__(self):
self.operators = {
'=':( 1, lambda r, q: r == q ),
'>':( 1, lambda r, q: r is not None and r > q ),
'<':( 1, lambda r, q: r is not None and r < q ),
'!=':( 2, lambda r, q: r != q ),
'>=':( 2, lambda r, q: r is not None and r >= q ),
'<=':( 2, lambda r, q: r is not None and r <= q )
}
def __call__(self, query, field_iter, location, datatype, candidates, is_many=False):
matches = set()
if not query:
return matches
q = ''
cast = adjust = lambda x: x
dt = datatype
if is_many and query in {'true', 'false'}:
valcheck = lambda x: True
if datatype == 'rating':
valcheck = lambda x: x is not None and x > 0
found = set()
for val, book_ids in field_iter():
if valcheck(val):
found |= book_ids
return found if query == 'true' else candidates - found
if query == 'false':
if location == 'cover':
relop = lambda x,y: not bool(x)
else:
relop = lambda x,y: x is None
elif query == 'true':
if location == 'cover':
relop = lambda x,y: bool(x)
else:
relop = lambda x,y: x is not None
else:
relop = None
for k, op in self.operators.iteritems():
if query.startswith(k):
p, relop = op
query = query[p:]
if relop is None:
p, relop = self.operators['=']
cast = int
if dt == 'rating':
cast = lambda x: 0 if x is None else int(x)
adjust = lambda x: x/2
elif dt in ('float', 'composite'):
cast = float
mult = 1.0
if len(query) > 1:
mult = query[-1].lower()
mult = {'k': 1024.,'m': 1024.**2, 'g': 1024.**3}.get(mult, 1.0)
if mult != 1.0:
query = query[:-1]
else:
mult = 1.0
try:
q = cast(query) * mult
except:
raise ParseException(query, len(query),
'Non-numeric value in query: %r'%query)
for val, book_ids in field_iter():
if val is None:
continue
try:
v = cast(val)
except:
v = None
if v:
v = adjust(v)
if relop(v, q):
matches |= book_ids
return matches
# }}}
class BooleanSearch(object): # {{{
def __init__(self):
self.local_no = icu_lower(_('no'))
self.local_yes = icu_lower(_('yes'))
self.local_unchecked = icu_lower(_('unchecked'))
self.local_checked = icu_lower(_('checked'))
self.local_empty = icu_lower(_('empty'))
self.local_blank = icu_lower(_('blank'))
self.local_bool_values = {
self.local_no, self.local_unchecked, '_no', 'false', 'no',
self.local_yes, self.local_checked, '_yes', 'true', 'yes',
self.local_empty, self.local_blank, '_empty', 'empty'}
def __call__(self, query, field_iter, bools_are_tristate):
matches = set()
if query not in self.local_bool_values:
raise ParseException(_('Invalid boolean query "{0}"').format(query))
for val, book_ids in field_iter():
val = force_to_bool(val)
if not bools_are_tristate:
if val is None or not val: # item is None or set to false
if query in { self.local_no, self.local_unchecked, 'no', '_no', 'false' }:
matches |= book_ids
else: # item is explicitly set to true
if query in { self.local_yes, self.local_checked, 'yes', '_yes', 'true' }:
matches |= book_ids
else:
if val is None:
if query in { self.local_empty, self.local_blank, 'empty', '_empty', 'false' }:
matches |= book_ids
elif not val: # is not None and false
if query in { self.local_no, self.local_unchecked, 'no', '_no', 'true' }:
matches |= book_ids
else: # item is not None and true
if query in { self.local_yes, self.local_checked, 'yes', '_yes', 'true' }:
matches |= book_ids
return matches
# }}}
class KeyPairSearch(object): # {{{
def __call__(self, query, field_iter, candidates, use_primary_find):
matches = set()
if ':' in query:
q = [q.strip() for q in query.split(':')]
if len(q) != 2:
raise ParseException(query, len(query),
'Invalid query format for colon-separated search')
keyq, valq = q
keyq_mkind, keyq = _matchkind(keyq)
valq_mkind, valq = _matchkind(valq)
else:
keyq = keyq_mkind = ''
valq_mkind, valq = _matchkind(query)
keyq_mkind
if valq in {'true', 'false'}:
found = set()
if keyq:
for val, book_ids in field_iter():
if val and val.get(keyq, False):
found |= book_ids
else:
for val, book_ids in field_iter():
if val:
found |= book_ids
return found if valq == 'true' else candidates - found
for m, book_ids in field_iter():
for key, val in m.iteritems():
if (keyq and not _match(keyq, (key,), keyq_mkind,
use_primary_find_in_search=use_primary_find)):
continue
if (valq and not _match(valq, (val,), valq_mkind,
use_primary_find_in_search=use_primary_find)):
continue
matches |= book_ids
break
return matches
# }}}
class Parser(SearchQueryParser):
def __init__(self, dbcache, all_book_ids, gst, date_search, num_search,
bool_search, keypair_search, limit_search_columns, limit_search_columns_to,
locations, virtual_fields):
self.dbcache, self.all_book_ids = dbcache, all_book_ids
self.all_search_locations = frozenset(locations)
self.grouped_search_terms = gst
self.date_search, self.num_search = date_search, num_search
self.bool_search, self.keypair_search = bool_search, keypair_search
self.limit_search_columns, self.limit_search_columns_to = (
limit_search_columns, limit_search_columns_to)
self.virtual_fields = virtual_fields or {}
if 'marked' not in self.virtual_fields:
self.virtual_fields['marked'] = self
super(Parser, self).__init__(locations, optimize=True)
@property
def field_metadata(self):
return self.dbcache.field_metadata
def universal_set(self):
return self.all_book_ids
def field_iter(self, name, candidates):
get_metadata = partial(self.dbcache._get_metadata, get_user_categories=False)
try:
field = self.dbcache.fields[name]
except KeyError:
field = self.virtual_fields[name]
return field.iter_searchable_values(get_metadata, candidates)
def iter_searchable_values(self, *args, **kwargs):
for x in []:
yield x, set()
def get_matches(self, location, query, candidates=None,
allow_recursion=True):
# If candidates is not None, it must not be modified. Changing its
# value will break query optimization in the search parser
matches = set()
if candidates is None:
candidates = self.all_book_ids
if not candidates or not query or not query.strip():
return matches
if location not in self.all_search_locations:
return matches
if (len(location) > 2 and location.startswith('@') and
location[1:] in self.grouped_search_terms):
location = location[1:]
# get metadata key associated with the search term. Eliminates
# dealing with plurals and other aliases
original_location = location
location = self.field_metadata.search_term_to_field_key(
icu_lower(location.strip()))
# grouped search terms
if isinstance(location, list):
if allow_recursion:
if query.lower() == 'false':
invert = True
query = 'true'
else:
invert = False
for loc in location:
c = candidates.copy()
m = self.get_matches(loc, query,
candidates=c, allow_recursion=False)
matches |= m
c -= m
if len(c) == 0:
break
if invert:
matches = self.all_book_ids - matches
return matches
raise ParseException(query, len(query), 'Recursive query group detected')
# If the user has asked to restrict searching over all field, apply
# that restriction
if (location == 'all' and self.limit_search_columns and
self.limit_search_columns_to):
terms = set()
for l in self.limit_search_columns_to:
l = icu_lower(l.strip())
if l and l != 'all' and l in self.all_search_locations:
terms.add(l)
if terms:
c = candidates.copy()
for l in terms:
try:
m = self.get_matches(l, query,
candidates=c, allow_recursion=allow_recursion)
matches |= m
c -= m
if len(c) == 0:
break
except:
pass
return matches
upf = prefs['use_primary_find_in_search']
if location in self.field_metadata:
fm = self.field_metadata[location]
dt = fm['datatype']
# take care of dates special case
if (dt == 'datetime' or (
dt == 'composite' and
fm['display'].get('composite_sort', '') == 'date')):
if location == 'date':
location = 'timestamp'
return self.date_search(
icu_lower(query), partial(self.field_iter, location, candidates))
# take care of numbers special case
if (dt in ('rating', 'int', 'float') or
(dt == 'composite' and
fm['display'].get('composite_sort', '') == 'number')):
field = self.dbcache.fields[location]
return self.num_search(
icu_lower(query), partial(self.field_iter, location, candidates),
location, dt, candidates, is_many=field.is_many)
# take care of the 'count' operator for is_multiples
if (fm['is_multiple'] and
len(query) > 1 and query[0] == '#' and query[1] in '=<>!'):
return self.num_search(icu_lower(query[1:]), partial(
self.dbcache.fields[location].iter_counts, candidates),
location, dt, candidates)
# take care of boolean special case
if dt == 'bool':
return self.bool_search(icu_lower(query),
partial(self.field_iter, location, candidates),
self.dbcache.pref('bools_are_tristate'))
# special case: colon-separated fields such as identifiers. isbn
# is a special case within the case
if fm.get('is_csp', False):
field_iter = partial(self.field_iter, location, candidates)
if location == 'identifiers' and original_location == 'isbn':
return self.keypair_search('=isbn:'+query, field_iter,
candidates, upf)
return self.keypair_search(query, field_iter, candidates, upf)
# check for user categories
if len(location) >= 2 and location.startswith('@'):
return self.get_user_category_matches(location[1:], icu_lower(query), candidates)
# Everything else (and 'all' matches)
matchkind, query = _matchkind(query)
all_locs = set()
text_fields = set()
field_metadata = {}
for x, fm in self.field_metadata.iteritems():
if x.startswith('@'): continue
if fm['search_terms'] and x != 'series_sort':
all_locs.add(x)
field_metadata[x] = fm
if fm['datatype'] in { 'composite', 'text', 'comments', 'series', 'enumeration' }:
text_fields.add(x)
locations = all_locs if location == 'all' else {location}
current_candidates = set(candidates)
try:
rating_query = int(float(query)) * 2
except:
rating_query = None
try:
int_query = int(float(query))
except:
int_query = None
try:
float_query = float(query)
except:
float_query = None
for location in locations:
current_candidates -= matches
q = query
if location == 'languages':
q = canonicalize_lang(query)
if q is None:
lm = lang_map()
rm = {v.lower():k for k,v in lm.iteritems()}
q = rm.get(query, query)
if matchkind == CONTAINS_MATCH and q in {'true', 'false'}:
found = set()
for val, book_ids in self.field_iter(location, current_candidates):
if val and (not hasattr(val, 'strip') or val.strip()):
found |= book_ids
matches |= (found if q == 'true' else (current_candidates-found))
continue
dt = field_metadata.get(location, {}).get('datatype', None)
if dt == 'rating':
if rating_query is not None:
for val, book_ids in self.field_iter(location, current_candidates):
if val == rating_query:
matches |= book_ids
continue
if dt == 'float':
if float_query is not None:
for val, book_ids in self.field_iter(location, current_candidates):
if val == float_query:
matches |= book_ids
continue
if dt == 'int':
if int_query is not None:
for val, book_ids in self.field_iter(location, current_candidates):
if val == int_query:
matches |= book_ids
continue
if location in text_fields:
for val, book_ids in self.field_iter(location, current_candidates):
if val is not None:
if isinstance(val, basestring):
val = (val,)
if _match(q, val, matchkind, use_primary_find_in_search=upf):
matches |= book_ids
return matches
def get_user_category_matches(self, location, query, candidates):
matches = set()
if len(query) < 2:
return matches
user_cats = self.dbcache.pref('user_categories')
c = set(candidates)
if query.startswith('.'):
check_subcats = True
query = query[1:]
else:
check_subcats = False
for key in user_cats:
if key == location or (check_subcats and key.startswith(location + '.')):
for (item, category, ign) in user_cats[key]:
s = self.get_matches(category, '=' + item, candidates=c)
c -= s
matches |= s
if query == 'false':
return candidates - matches
return matches
class Search(object):
def __init__(self, all_search_locations=()):
self.all_search_locations = all_search_locations
self.date_search = DateSearch()
self.num_search = NumericSearch()
self.bool_search = BooleanSearch()
self.keypair_search = KeyPairSearch()
def change_locations(self, newlocs):
self.all_search_locations = newlocs
def __call__(self, dbcache, query, search_restriction, virtual_fields=None):
'''
Return the set of ids of all records that match the specified
query and restriction
'''
q = ''
if not query or not query.strip():
q = search_restriction
else:
q = query
if search_restriction:
q = u'(%s) and (%s)' % (search_restriction, query)
all_book_ids = dbcache.all_book_ids(type=set)
if not q:
return all_book_ids
if not isinstance(q, type(u'')):
q = q.decode('utf-8')
# We construct a new parser instance per search as pyparsing is not
# thread safe. On my desktop, constructing a SearchQueryParser instance
# takes 0.000975 seconds and restoring it from a pickle takes
# 0.000974 seconds.
sqp = Parser(
dbcache, all_book_ids, dbcache.pref('grouped_search_terms'),
self.date_search, self.num_search, self.bool_search,
self.keypair_search,
prefs[ 'limit_search_columns' ],
prefs[ 'limit_search_columns_to' ], self.all_search_locations,
virtual_fields)
try:
ret = sqp.parse(q)
finally:
sqp.dbcache = None
return ret

View File

@ -148,11 +148,11 @@ class ManyToManyTable(ManyToOneTable):
'''
table_type = MANY_MANY
selectq = 'SELECT book, {0} FROM {1}'
def read_maps(self, db):
for row in db.conn.execute(
'SELECT book, {0} FROM {1}'.format(
self.metadata['link_column'], self.link_table)):
self.selectq.format(self.metadata['link_column'], self.link_table)):
if row[1] not in self.col_book_map:
self.col_book_map[row[1]] = []
self.col_book_map[row[1]].append(row[0])
@ -168,6 +168,8 @@ class ManyToManyTable(ManyToOneTable):
class AuthorsTable(ManyToManyTable):
selectq = 'SELECT book, {0} FROM {1} ORDER BY id'
def read_id_maps(self, db):
self.alink_map = {}
self.asort_map = {}

View File

@ -7,8 +7,8 @@ __license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import unittest, os, shutil
from future_builtins import map
class BaseTest(unittest.TestCase):
@ -39,7 +39,10 @@ class BaseTest(unittest.TestCase):
'ondevice_col', 'last_modified'}.union(allfk1)
for attr in all_keys:
if attr == 'user_metadata': continue
if attr == 'format_metadata': continue # TODO: Not implemented yet
attr1, attr2 = getattr(mi1, attr), getattr(mi2, attr)
if attr == 'formats':
attr1, attr2 = map(lambda x:tuple(x) if x else (), (attr1, attr2))
self.assertEqual(attr1, attr2,
'%s not the same: %r != %r'%(attr, attr1, attr2))
if attr.startswith('#'):

Binary file not shown.

View File

@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
import shutil, unittest, tempfile, datetime
from cStringIO import StringIO
from calibre.utils.date import local_tz
from calibre.utils.date import utc_tz
from calibre.db.tests.base import BaseTest
class ReadingTest(BaseTest):
@ -37,12 +37,12 @@ class ReadingTest(BaseTest):
'tags': (),
'formats':(),
'identifiers': {},
'timestamp': datetime.datetime(2011, 9, 7, 13, 54, 41,
tzinfo=local_tz),
'pubdate': datetime.datetime(2011, 9, 7, 13, 54, 41,
tzinfo=local_tz),
'last_modified': datetime.datetime(2011, 9, 7, 13, 54, 41,
tzinfo=local_tz),
'timestamp': datetime.datetime(2011, 9, 7, 19, 54, 41,
tzinfo=utc_tz),
'pubdate': datetime.datetime(2011, 9, 7, 19, 54, 41,
tzinfo=utc_tz),
'last_modified': datetime.datetime(2011, 9, 7, 19, 54, 41,
tzinfo=utc_tz),
'publisher': None,
'languages': (),
'comments': None,
@ -63,23 +63,23 @@ class ReadingTest(BaseTest):
'sort': 'One',
'authors': ('Author One',),
'author_sort': 'One, Author',
'series' : 'Series One',
'series' : 'A Series One',
'series_index': 1.0,
'tags':('Tag Two', 'Tag One'),
'tags':('Tag One', 'Tag Two'),
'formats': (),
'rating': 4.0,
'identifiers': {'test':'one'},
'timestamp': datetime.datetime(2011, 9, 5, 15, 6,
tzinfo=local_tz),
'pubdate': datetime.datetime(2011, 9, 5, 15, 6,
tzinfo=local_tz),
'timestamp': datetime.datetime(2011, 9, 5, 21, 6,
tzinfo=utc_tz),
'pubdate': datetime.datetime(2011, 9, 5, 21, 6,
tzinfo=utc_tz),
'publisher': 'Publisher One',
'languages': ('eng',),
'comments': '<p>Comments One</p>',
'#enum':'One',
'#authors':('Custom One', 'Custom Two'),
'#date':datetime.datetime(2011, 9, 5, 0, 0,
tzinfo=local_tz),
'#date':datetime.datetime(2011, 9, 5, 6, 0,
tzinfo=utc_tz),
'#rating':2.0,
'#series':'My Series One',
'#series_index': 1.0,
@ -92,23 +92,23 @@ class ReadingTest(BaseTest):
'sort': 'Title Two',
'authors': ('Author Two', 'Author One'),
'author_sort': 'Two, Author & One, Author',
'series' : 'Series One',
'series' : 'A Series One',
'series_index': 2.0,
'rating': 6.0,
'tags': ('Tag One',),
'formats':(),
'identifiers': {'test':'two'},
'timestamp': datetime.datetime(2011, 9, 6, 0, 0,
tzinfo=local_tz),
'pubdate': datetime.datetime(2011, 8, 5, 0, 0,
tzinfo=local_tz),
'timestamp': datetime.datetime(2011, 9, 6, 6, 0,
tzinfo=utc_tz),
'pubdate': datetime.datetime(2011, 8, 5, 6, 0,
tzinfo=utc_tz),
'publisher': 'Publisher Two',
'languages': ('deu',),
'comments': '<p>Comments Two</p>',
'#enum':'Two',
'#authors':('My Author Two',),
'#date':datetime.datetime(2011, 9, 1, 0, 0,
tzinfo=local_tz),
'#date':datetime.datetime(2011, 9, 1, 6, 0,
tzinfo=utc_tz),
'#rating':4.0,
'#series':'My Series Two',
'#series_index': 3.0,
@ -130,30 +130,31 @@ class ReadingTest(BaseTest):
'Test sorting'
cache = self.init_cache(self.library_path)
for field, order in {
'title' : [2, 1, 3],
'authors': [2, 1, 3],
'series' : [3, 2, 1],
'tags' : [3, 1, 2],
'rating' : [3, 2, 1],
# 'identifiers': [3, 2, 1], There is no stable sort since 1 and
# 2 have the same identifier keys
# TODO: Add an empty book to the db and ensure that empty
# fields sort the same as they do in db2
'timestamp': [2, 1, 3],
'pubdate' : [1, 2, 3],
'publisher': [3, 2, 1],
'last_modified': [2, 1, 3],
'languages': [3, 2, 1],
'comments': [3, 2, 1],
'#enum' : [3, 2, 1],
'#authors' : [3, 2, 1],
'#date': [3, 1, 2],
'#rating':[3, 2, 1],
'#series':[3, 2, 1],
'#tags':[3, 2, 1],
'#yesno':[3, 1, 2],
'#comments':[3, 2, 1],
}.iteritems():
'title' : [2, 1, 3],
'authors': [2, 1, 3],
'series' : [3, 1, 2],
'tags' : [3, 1, 2],
'rating' : [3, 2, 1],
# 'identifiers': [3, 2, 1], There is no stable sort since 1 and
# 2 have the same identifier keys
# 'last_modified': [3, 2, 1], There is no stable sort as two
# records have the exact same value
'timestamp': [2, 1, 3],
'pubdate' : [1, 2, 3],
'publisher': [3, 2, 1],
'languages': [3, 2, 1],
'comments': [3, 2, 1],
'#enum' : [3, 2, 1],
'#authors' : [3, 2, 1],
'#date': [3, 1, 2],
'#rating':[3, 2, 1],
'#series':[3, 2, 1],
'#tags':[3, 2, 1],
'#yesno':[3, 1, 2],
'#comments':[3, 2, 1],
# TODO: Add an empty book to the db and ensure that empty
# fields sort the same as they do in db2
}.iteritems():
x = list(reversed(order))
self.assertEqual(order, cache.multisort([(field, True)],
ids_to_sort=x),
@ -190,6 +191,56 @@ class ReadingTest(BaseTest):
# }}}
def test_searching(self): # {{{
'Test searching returns the same data for both backends'
from calibre.library.database2 import LibraryDatabase2
old = LibraryDatabase2(self.library_path)
oldvals = {query:set(old.search_getting_ids(query, '')) for query in (
# Date tests
'date:9/6/2011', 'date:true', 'date:false', 'pubdate:9/2011',
'#date:true', 'date:<100daysago', 'date:>9/6/2011',
'#date:>9/1/2011', '#date:=2011',
# Number tests
'rating:3', 'rating:>2', 'rating:=2', 'rating:true',
'rating:false', 'rating:>4', 'tags:#<2', 'tags:#>7',
'cover:false', 'cover:true', '#float:>11', '#float:<1k',
'#float:10.01', 'series_index:1', 'series_index:<3', 'id:1',
'id:>2',
# Bool tests
'#yesno:true', '#yesno:false', '#yesno:yes', '#yesno:no',
'#yesno:empty',
# Keypair tests
'identifiers:true', 'identifiers:false', 'identifiers:test',
'identifiers:test:false', 'identifiers:test:one',
'identifiers:t:n', 'identifiers:=test:=two', 'identifiers:x:y',
'identifiers:z',
# Text tests
'title:="Title One"', 'title:~title', '#enum:=one', '#enum:tw',
'#enum:false', '#enum:true', 'series:one', 'tags:one', 'tags:true',
'tags:false', '2', 'one', '20.02', '"publisher one"',
'"my comments one"',
# User categories
'@Good Authors:One', '@Good Series.good tags:two',
# TODO: Tests for searching the size and #formats columns and
# cover:true|false
)}
old = None
cache = self.init_cache(self.library_path)
for query, ans in oldvals.iteritems():
nr = cache.search(query, '')
self.assertEqual(ans, nr,
'Old result: %r != New result: %r for search: %s'%(
ans, nr, query))
# }}}
def tests():
return unittest.TestLoader().loadTestsFromTestCase(ReadingTest)

View File

@ -291,9 +291,7 @@ class ITUNES(DriverBase):
# Properties
cached_books = {}
cache_dir = os.path.join(config_dir, 'caches', 'itunes')
calibre_library_path = prefs['library_path']
archive_path = os.path.join(cache_dir, "thumbs.zip")
description_prefix = "added by calibre"
ejected = False
iTunes = None
@ -887,6 +885,8 @@ class ITUNES(DriverBase):
logger().info(" BCD: %s" % ['0x%x' % x for x in sorted(self.BCD)])
logger().info(" PRODUCT_ID: %s" % ['0x%x' % x for x in sorted(self.PRODUCT_ID)])
self.cache_dir = os.path.join(cache_dir(), 'itunes')
self.archive_path = os.path.join(self.cache_dir, "thumbs.zip")
# Confirm/create thumbs archive
if not os.path.exists(self.cache_dir):
if DEBUG:

View File

@ -195,7 +195,7 @@ class PRST1(USBMS):
for i, row in enumerate(cursor):
try:
comp_date = int(os.path.getmtime(self.normalize_path(prefix + row[0])) * 1000);
except (OSError, IOError):
except (OSError, IOError, TypeError):
# In case the db has incorrect path info
continue
device_date = int(row[1]);

View File

@ -886,10 +886,12 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
self._debug('extension path lengths', self.exts_path_lengths)
self.THUMBNAIL_HEIGHT = result.get('coverHeight', self.DEFAULT_THUMBNAIL_HEIGHT)
self._debug('cover height', self.THUMBNAIL_HEIGHT)
if 'coverWidth' in result:
# Setting this field forces the aspect ratio
self.THUMBNAIL_WIDTH = result.get('coverWidth',
(self.DEFAULT_THUMBNAIL_HEIGHT/3) * 4)
self._debug('cover width', self.THUMBNAIL_WIDTH)
elif hasattr(self, 'THUMBNAIL_WIDTH'):
delattr(self, 'THUMBNAIL_WIDTH')
@ -1023,14 +1025,6 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
if '_series_sort_' in result:
del result['_series_sort_']
book = self.json_codec.raw_to_book(result, SDBook, self.PREFIX)
# If the thumbnail is the wrong size, zero the last mod date
# so the metadata will be resent
thumbnail = book.get('thumbnail', None)
if thumbnail and not (thumbnail[0] == self.THUMBNAIL_HEIGHT or
thumbnail[1] == self.THUMBNAIL_HEIGHT):
book.set('last_modified', UNDEFINED_DATE)
bl.add_book(book, replace_metadata=True)
if '_new_book_' in result:
book.set('_new_book_', True)
@ -1086,7 +1080,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
if count:
for i,book in enumerate(books_to_send):
self._debug('sending metadata for book', book.lpath)
self._debug('sending metadata for book', book.lpath, book.title)
self._set_known_metadata(book)
opcode, result = self._call_client(
'SEND_BOOK_METADATA',

View File

@ -100,7 +100,7 @@ class CHMReader(CHMFile):
def ExtractFiles(self, output_dir=os.getcwdu(), debug_dump=False):
html_files = set([])
try:
x = self.GetEncoding()
x = self.get_encoding()
codecs.lookup(x)
enc = x
except:

View File

@ -7,8 +7,6 @@ import os
from calibre.customize.conversion import InputFormatPlugin
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.localization import get_lang
from calibre.utils.filenames import ascii_filename
from calibre.constants import filesystem_encoding
class CHMInput(InputFormatPlugin):
@ -57,6 +55,7 @@ class CHMInput(InputFormatPlugin):
mainpath = os.path.join(tdir, mainname)
metadata = get_metadata_from_reader(self._chm_reader)
encoding = self._chm_reader.get_encoding() or options.input_encoding or 'cp1252'
self._chm_reader.CloseCHM()
# print tdir, mainpath
# from calibre import ipython
@ -64,15 +63,31 @@ class CHMInput(InputFormatPlugin):
options.debug_pipeline = None
options.input_encoding = 'utf-8'
# try a custom conversion:
#oeb = self._create_oebbook(mainpath, tdir, options, log, metadata)
# try using html converter:
htmlpath = self._create_html_root(mainpath, log)
htmlpath, toc = self._create_html_root(mainpath, log, encoding)
oeb = self._create_oebbook_html(htmlpath, tdir, options, log, metadata)
options.debug_pipeline = odi
#log.debug('DEBUG: Not removing tempdir %s' % tdir)
if toc.count() > 1:
oeb.toc = self.parse_html_toc(oeb.spine[0])
oeb.manifest.remove(oeb.spine[0])
oeb.auto_generated_toc = False
return oeb
def parse_html_toc(self, item):
from calibre.ebooks.oeb.base import TOC, XPath
dx = XPath('./h:div')
ax = XPath('./h:a[1]')
def do_node(parent, div):
for child in dx(div):
a = ax(child)[0]
c = parent.add(a.text, a.attrib['href'])
do_node(c, child)
toc = TOC()
root = XPath('//h:div[1]')(item.data)[0]
do_node(toc, root)
return toc
def _create_oebbook_html(self, htmlpath, basedir, opts, log, mi):
# use HTMLInput plugin to generate book
from calibre.customize.builtins import HTMLInput
@ -81,78 +96,22 @@ class CHMInput(InputFormatPlugin):
oeb = htmlinput.create_oebbook(htmlpath, basedir, opts, log, mi)
return oeb
def _create_oebbook(self, hhcpath, basedir, opts, log, mi):
import uuid
from lxml import html
from calibre.ebooks.conversion.plumber import create_oebbook
from calibre.ebooks.oeb.base import DirContainer
oeb = create_oebbook(log, None, opts,
encoding=opts.input_encoding, populate=False)
self.oeb = oeb
metadata = oeb.metadata
if mi.title:
metadata.add('title', mi.title)
if mi.authors:
for a in mi.authors:
metadata.add('creator', a, attrib={'role':'aut'})
if mi.publisher:
metadata.add('publisher', mi.publisher)
if mi.isbn:
metadata.add('identifier', mi.isbn, attrib={'scheme':'ISBN'})
if not metadata.language:
oeb.logger.warn(u'Language not specified')
metadata.add('language', get_lang().replace('_', '-'))
if not metadata.creator:
oeb.logger.warn('Creator not specified')
metadata.add('creator', _('Unknown'))
if not metadata.title:
oeb.logger.warn('Title not specified')
metadata.add('title', _('Unknown'))
bookid = str(uuid.uuid4())
metadata.add('identifier', bookid, id='uuid_id', scheme='uuid')
for ident in metadata.identifier:
if 'id' in ident.attrib:
self.oeb.uid = metadata.identifier[0]
break
hhcdata = self._read_file(hhcpath)
hhcroot = html.fromstring(hhcdata)
chapters = self._process_nodes(hhcroot)
#print "============================="
#print "Printing hhcroot"
#print etree.tostring(hhcroot, pretty_print=True)
#print "============================="
log.debug('Found %d section nodes' % len(chapters))
if len(chapters) > 0:
path0 = chapters[0][1]
subpath = os.path.dirname(path0)
htmlpath = os.path.join(basedir, subpath)
oeb.container = DirContainer(htmlpath, log)
for chapter in chapters:
title = chapter[0]
basename = os.path.basename(chapter[1])
self._add_item(oeb, title, basename)
oeb.container = DirContainer(htmlpath, oeb.log)
return oeb
def _create_html_root(self, hhcpath, log):
def _create_html_root(self, hhcpath, log, encoding):
from lxml import html
from urllib import unquote as _unquote
from calibre.ebooks.oeb.base import urlquote
from calibre.ebooks.chardet import xml_to_unicode
hhcdata = self._read_file(hhcpath)
hhcdata = hhcdata.decode(encoding)
hhcdata = xml_to_unicode(hhcdata, verbose=True,
strip_encoding_pats=True, resolve_entities=True)[0]
hhcroot = html.fromstring(hhcdata)
chapters = self._process_nodes(hhcroot)
toc = self._process_nodes(hhcroot)
#print "============================="
#print "Printing hhcroot"
#print etree.tostring(hhcroot, pretty_print=True)
#print "============================="
log.debug('Found %d section nodes' % len(chapters))
log.debug('Found %d section nodes' % toc.count())
htmlpath = os.path.splitext(hhcpath)[0] + ".html"
base = os.path.dirname(os.path.abspath(htmlpath))
@ -168,37 +127,40 @@ class CHMInput(InputFormatPlugin):
x = y
return x
def donode(item, parent, base, subpath):
for child in item:
title = child.title
if not title: continue
raw = unquote_path(child.href or '')
rsrcname = os.path.basename(raw)
rsrcpath = os.path.join(subpath, rsrcname)
if (not os.path.exists(os.path.join(base, rsrcpath)) and
os.path.exists(os.path.join(base, raw))):
rsrcpath = raw
if '%' not in rsrcpath:
rsrcpath = urlquote(rsrcpath)
if not raw:
rsrcpath = ''
c = DIV(A(title, href=rsrcpath))
donode(child, c, base, subpath)
parent.append(c)
with open(htmlpath, 'wb') as f:
if chapters:
f.write('<html><head><meta http-equiv="Content-type"'
' content="text/html;charset=UTF-8" /></head><body>\n')
path0 = chapters[0][1]
if toc.count() > 1:
from lxml.html.builder import HTML, BODY, DIV, A
path0 = toc[0].href
path0 = unquote_path(path0)
subpath = os.path.dirname(path0)
base = os.path.dirname(f.name)
for chapter in chapters:
title = chapter[0]
raw = unquote_path(chapter[1])
rsrcname = os.path.basename(raw)
rsrcpath = os.path.join(subpath, rsrcname)
if (not os.path.exists(os.path.join(base, rsrcpath)) and
os.path.exists(os.path.join(base, raw))):
rsrcpath = raw
# title should already be url encoded
if '%' not in rsrcpath:
rsrcpath = urlquote(rsrcpath)
url = "<br /><a href=" + rsrcpath + ">" + title + " </a>\n"
if isinstance(url, unicode):
url = url.encode('utf-8')
f.write(url)
f.write("</body></html>")
root = DIV()
donode(toc, root, base, subpath)
raw = html.tostring(HTML(BODY(root)), encoding='utf-8',
pretty_print=True)
f.write(raw)
else:
f.write(hhcdata)
return htmlpath
return htmlpath, toc
def _read_file(self, name):
f = open(name, 'rb')
@ -206,41 +168,27 @@ class CHMInput(InputFormatPlugin):
f.close()
return data
def _visit_node(self, node, chapters, depth):
# check that node is a normal node (not a comment, DOCTYPE, etc.)
# (normal nodes have string tags)
if isinstance(node.tag, basestring):
from calibre.ebooks.chm.reader import match_string
chapter_path = None
if match_string(node.tag, 'object') and match_string(node.attrib['type'], 'text/sitemap'):
chapter_title = None
for child in node:
if match_string(child.tag,'param') and match_string(child.attrib['name'], 'name'):
chapter_title = child.attrib['value']
if match_string(child.tag,'param') and match_string(child.attrib['name'],'local'):
chapter_path = child.attrib['value']
if chapter_title is not None and chapter_path is not None:
chapter = [chapter_title, chapter_path, depth]
chapters.append(chapter)
if node.tag=="UL":
depth = depth + 1
if node.tag=="/UL":
depth = depth - 1
def add_node(self, node, toc, ancestor_map):
from calibre.ebooks.chm.reader import match_string
if match_string(node.attrib['type'], 'text/sitemap'):
p = node.xpath('ancestor::ul[1]/ancestor::li[1]/object[1]')
parent = p[0] if p else None
toc = ancestor_map.get(parent, toc)
title = href = u''
for param in node.xpath('./param'):
if match_string(param.attrib['name'], 'name'):
title = param.attrib['value']
elif match_string(param.attrib['name'], 'local'):
href = param.attrib['value']
child = toc.add(title or _('Unknown'), href)
ancestor_map[node] = child
def _process_nodes(self, root):
chapters = []
depth = 0
for node in root.iter():
self._visit_node(node, chapters, depth)
return chapters
from calibre.ebooks.oeb.base import TOC
toc = TOC()
ancestor_map = {}
for node in root.xpath('//object'):
self.add_node(node, toc, ancestor_map)
return toc
def _add_item(self, oeb, title, path):
bname = os.path.basename(path)
id, href = oeb.manifest.generate(id='html',
href=ascii_filename(bname))
item = oeb.manifest.add(id, href, 'text/html')
item.html_input_href = bname
oeb.spine.add(item, True)
oeb.toc.add(title, item.href)

View File

@ -515,6 +515,7 @@ class HTMLPreProcessor(object):
if not getattr(self.extra_opts, 'keep_ligatures', False):
html = _ligpat.sub(lambda m:LIGATURES[m.group()], html)
user_sr_rules = {}
# Function for processing search and replace
def do_search_replace(search_pattern, replace_txt):
try:
@ -522,6 +523,7 @@ class HTMLPreProcessor(object):
if not replace_txt:
replace_txt = ''
rules.insert(0, (search_re, replace_txt))
user_sr_rules[(search_re, replace_txt)] = search_pattern
except Exception as e:
self.log.error('Failed to parse %r regexp because %s' %
(search, as_unicode(e)))
@ -587,7 +589,16 @@ class HTMLPreProcessor(object):
#dump(html, 'pre-preprocess')
for rule in rules + end_rules:
html = rule[0].sub(rule[1], html)
try:
html = rule[0].sub(rule[1], html)
except re.error as e:
if rule in user_sr_rules:
self.log.error(
'User supplied search & replace rule: %s -> %s '
'failed with error: %s, ignoring.'%(
user_sr_rules[rule], rule[1], e))
else:
raise
if is_pdftohtml and length > -1:
# Dehyphenate

View File

@ -200,7 +200,7 @@ class Source(Plugin):
#: during the identify phase
touched_fields = frozenset()
#: Set this to True if your plugin return HTML formatted comments
#: Set this to True if your plugin returns HTML formatted comments
has_html_comments = False
#: Setting this to True means that the browser object will add

View File

@ -194,12 +194,11 @@ class TOC(list):
content = content_path(np)
if content and text:
content = content[0]
src = get_attr(content, attr='src')
if src:
purl = urlparse(content.get('src'))
href, fragment = unquote(purl[2]), unquote(purl[5])
nd = dest.add_item(href, fragment, text)
nd.play_order = play_order
# if get_attr(content, attr='src'):
purl = urlparse(content.get('src'))
href, fragment = unquote(purl[2]), unquote(purl[5])
nd = dest.add_item(href, fragment, text)
nd.play_order = play_order
for c in np_path(np):
process_navpoint(c, nd)

View File

@ -13,6 +13,7 @@ from calibre.utils.date import parse_date
from calibre.ebooks.mobi import MobiError
from calibre.ebooks.metadata import MetaInformation, check_isbn
from calibre.ebooks.mobi.langcodes import main_language, sub_language, mobi2iana
from calibre.utils.cleantext import clean_ascii_chars
from calibre.utils.localization import canonicalize_lang
NULL_INDEX = 0xffffffff
@ -31,6 +32,8 @@ class EXTHHeader(object): # {{{
self.kf8_header = None
self.uuid = self.cdetype = None
self.decode = lambda x : clean_ascii_chars(x.decode(codec, 'replace'))
while left > 0:
left -= 1
idx, size = struct.unpack('>LL', raw[pos:pos + 8])
@ -66,7 +69,7 @@ class EXTHHeader(object): # {{{
# title contains non ASCII chars or non filename safe chars
# they are messed up in the PDB header
try:
title = content.decode(codec)
title = self.decode(content)
except:
pass
elif idx == 524: # Lang code
@ -80,31 +83,30 @@ class EXTHHeader(object): # {{{
#else:
# print 'unknown record', idx, repr(content)
if title:
self.mi.title = replace_entities(title)
self.mi.title = replace_entities(clean_ascii_chars(title))
def process_metadata(self, idx, content, codec):
if idx == 100:
if self.mi.is_null('authors'):
self.mi.authors = []
au = content.decode(codec, 'ignore').strip()
au = self.decode(content).strip()
self.mi.authors.append(au)
if self.mi.is_null('author_sort') and re.match(r'\S+?\s*,\s+\S+', au.strip()):
self.mi.author_sort = au.strip()
elif idx == 101:
self.mi.publisher = content.decode(codec, 'ignore').strip()
self.mi.publisher = self.decode(content).strip()
if self.mi.publisher in {'Unknown', _('Unknown')}:
self.mi.publisher = None
elif idx == 103:
self.mi.comments = content.decode(codec, 'ignore')
self.mi.comments = self.decode(content).strip()
elif idx == 104:
raw = check_isbn(content.decode(codec, 'ignore').strip().replace('-', ''))
raw = check_isbn(self.decode(content).strip().replace('-', ''))
if raw:
self.mi.isbn = raw
elif idx == 105:
if not self.mi.tags:
self.mi.tags = []
self.mi.tags.extend([x.strip() for x in content.decode(codec,
'ignore').split(';')])
self.mi.tags.extend([x.strip() for x in self.decode(content).split(';')])
self.mi.tags = list(set(self.mi.tags))
elif idx == 106:
try:
@ -112,7 +114,7 @@ class EXTHHeader(object): # {{{
except:
pass
elif idx == 108:
self.mi.book_producer = content.decode(codec, 'ignore').strip()
self.mi.book_producer = self.decode(content).strip()
elif idx == 112: # dc:source set in some EBSP amazon samples
try:
content = content.decode(codec).strip()

View File

@ -98,6 +98,9 @@ _self_closing_pat = re.compile(
def close_self_closing_tags(raw):
return _self_closing_pat.sub(r'<\g<tag>\g<arg>></\g<tag>>', raw)
def uuid_id():
return 'u'+unicode(uuid.uuid4())
def iterlinks(root, find_links_in_css=True):
'''
Iterate over all links in a OEB Document.
@ -1528,7 +1531,7 @@ class TOC(object):
if parent is None:
parent = etree.Element(NCX('navMap'))
for node in self.nodes:
id = node.id or unicode(uuid.uuid4())
id = node.id or uuid_id()
po = node.play_order
if po == 0:
po = 1
@ -1634,10 +1637,10 @@ class PageList(object):
return self.pages.remove(page)
def to_ncx(self, parent=None):
plist = element(parent, NCX('pageList'), id=str(uuid.uuid4()))
plist = element(parent, NCX('pageList'), id=uuid_id())
values = dict((t, count(1)) for t in ('front', 'normal', 'special'))
for page in self.pages:
id = page.id or unicode(uuid.uuid4())
id = page.id or uuid_id()
type = page.type
value = str(values[type].next())
attrib = {'id': id, 'value': value, 'type': type, 'playOrder': '0'}

View File

@ -373,16 +373,12 @@ class OEBReader(object):
if not title:
self._toc_from_navpoint(item, toc, child)
continue
if not href:
gc = xpath(child, 'ncx:navPoint')
if not gc:
# This node is useless
continue
href = 'missing.html'
href = item.abshref(urlnormalize(href[0]))
if (not href or not href[0]) and not xpath(child, 'ncx:navPoint'):
# This node is useless
continue
href = item.abshref(urlnormalize(href[0])) if href and href[0] else ''
path, _ = urldefrag(href)
if path not in self.oeb.manifest.hrefs:
if href and path not in self.oeb.manifest.hrefs:
self.logger.warn('TOC reference %r not found' % href)
gc = xpath(child, 'ncx:navPoint')
if not gc:

View File

@ -18,7 +18,7 @@ from calibre import guess_type
from calibre.ebooks.oeb.base import (XHTML, XHTML_NS, CSS_MIME, OEB_STYLES,
namespace, barename, XPath)
from calibre.ebooks.oeb.stylizer import Stylizer
from calibre.utils.filenames import ascii_filename
from calibre.utils.filenames import ascii_filename, ascii_text
COLLAPSE = re.compile(r'[ \t\r\n\v]+')
STRIPNUM = re.compile(r'[-0-9]+$')
@ -437,7 +437,7 @@ class CSSFlattener(object):
items.sort()
css = u';\n'.join(u'%s: %s' % (key, val) for key, val in items)
classes = node.get('class', '').strip() or 'calibre'
klass = STRIPNUM.sub('', classes.split()[0].replace('_', ''))
klass = ascii_text(STRIPNUM.sub('', classes.split()[0].replace('_', '')))
if css in styles:
match = styles[css]
else:

View File

@ -1661,9 +1661,11 @@ class DeviceMixin(object): # {{{
update_metadata = device_prefs['manage_device_metadata'] == 'on_connect'
get_covers = False
desired_thumbnail_height = 0
if update_metadata and self.device_manager.is_device_connected:
if self.device_manager.device.WANTS_UPDATED_THUMBNAILS:
get_covers = True
desired_thumbnail_height = self.device_manager.device.THUMBNAIL_HEIGHT
# Force a reset if the caches are not initialized
if reset or not hasattr(self, 'db_book_title_cache'):
@ -1698,17 +1700,28 @@ class DeviceMixin(object): # {{{
# will be used by books_on_device to indicate matches. While we are
# going by, update the metadata for a book if automatic management is on
def update_book(id_, book) :
if not update_metadata:
return
mi = db.get_metadata(id_, index_is_id=True, get_cover=get_covers)
book.smart_update(mi, replace_metadata=True)
if get_covers:
if book.cover and os.access(book.cover, os.R_OK):
book.thumbnail = self.cover_to_thumbnail(open(book.cover, 'rb').read())
else:
book.thumbnail = self.default_thumbnail
for booklist in booklists:
for book in booklist:
book.in_library = None
if getattr(book, 'uuid', None) in self.db_book_uuid_cache:
id_ = db_book_uuid_cache[book.uuid]
if (update_metadata and
db.metadata_last_modified(id_, index_is_id=True) !=
getattr(book, 'last_modified', None)):
mi = db.get_metadata(id_, index_is_id=True,
get_cover=get_covers)
book.smart_update(mi, replace_metadata=True)
if (db.metadata_last_modified(id_, index_is_id=True) !=
getattr(book, 'last_modified', None)
or (not book.thumbnail
or max(book.thumbnail[0], book.thumbnail[1]) !=
desired_thumbnail_height)):
update_book(id_, book)
book.in_library = 'UUID'
# ensure that the correct application_id is set
book.application_id = id_
@ -1721,23 +1734,15 @@ class DeviceMixin(object): # {{{
# will match if any of the db_id, author, or author_sort
# also match.
if getattr(book, 'application_id', None) in d['db_ids']:
if update_metadata:
id_ = getattr(book, 'application_id', None)
book.smart_update(db.get_metadata(id_,
index_is_id=True,
get_cover=get_covers),
replace_metadata=True)
id_ = getattr(book, 'application_id', None)
update_book(id_, book)
book.in_library = 'APP_ID'
# app_id already matches a db_id. No need to set it.
continue
# Sonys know their db_id independent of the application_id
# in the metadata cache. Check that as well.
if getattr(book, 'db_id', None) in d['db_ids']:
if update_metadata:
book.smart_update(db.get_metadata(book.db_id,
index_is_id=True,
get_cover=get_covers),
replace_metadata=True)
update_book(book.db_id, book)
book.in_library = 'DB_ID'
book.application_id = book.db_id
continue
@ -1752,20 +1757,12 @@ class DeviceMixin(object): # {{{
book_authors = clean_string(authors_to_string(book.authors))
if book_authors in d['authors']:
id_ = d['authors'][book_authors]
if update_metadata:
book.smart_update(db.get_metadata(id_,
index_is_id=True,
get_cover=get_covers),
replace_metadata=True)
update_book(id_, book)
book.in_library = 'AUTHOR'
book.application_id = id_
elif book_authors in d['author_sort']:
id_ = d['author_sort'][book_authors]
if update_metadata:
book.smart_update(db.get_metadata(id_,
index_is_id=True,
get_cover=get_covers),
replace_metadata=True)
update_book(id_, book)
book.in_library = 'AUTH_SORT'
book.application_id = id_
else:
@ -1779,12 +1776,6 @@ class DeviceMixin(object): # {{{
if update_metadata:
if self.device_manager.is_device_connected:
if self.device_manager.device.WANTS_UPDATED_THUMBNAILS:
for blist in booklists:
for book in blist:
if book.cover and os.access(book.cover, os.R_OK):
book.thumbnail = \
self.cover_to_thumbnail(open(book.cover, 'rb').read())
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
self.device_manager.sync_booklists(
FunctionDispatcher(self.metadata_synced), booklists,

View File

@ -16,12 +16,12 @@ from calibre.utils.pyparsing import ParseException
from calibre.ebooks.metadata import fmt_sidx, authors_to_string, string_to_authors
from calibre.ebooks.metadata.book.base import SafeFormat
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.config import tweaks, device_prefs
from calibre.utils.config import tweaks, device_prefs, prefs
from calibre.utils.date import dt_factory, qt_to_dt, as_local_time
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.library.caches import (_match, CONTAINS_MATCH, EQUALS_MATCH,
REGEXP_MATCH, MetadataBackup, force_to_bool)
from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
from calibre.library.caches import (MetadataBackup, force_to_bool)
from calibre.library.save_to_disk import find_plugboard
from calibre import strftime, isbytestring
from calibre.constants import filesystem_encoding, DEBUG
@ -1037,6 +1037,7 @@ class OnDeviceSearch(SearchQueryParser): # {{{
}
for x in ('author', 'format'):
q[x+'s'] = q[x]
upf = prefs['use_primary_find_in_search']
for index, row in enumerate(self.model.db):
for locvalue in locations:
accessor = q[locvalue]
@ -1063,7 +1064,7 @@ class OnDeviceSearch(SearchQueryParser): # {{{
vals = accessor(row).split(',')
else:
vals = [accessor(row)]
if _match(query, vals, m):
if _match(query, vals, m, use_primary_find_in_search=upf):
matches.add(index)
break
except ValueError: # Unicode errors

View File

@ -31,7 +31,7 @@ from calibre.utils.logging import GUILog as Log
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.opf2 import OPF
from calibre.gui2 import error_dialog, NONE, rating_font
from calibre.gui2 import error_dialog, NONE, rating_font, gprefs
from calibre.utils.date import (utcnow, fromordinal, format_date,
UNDEFINED_DATE, as_utc)
from calibre.library.comments import comments_to_html
@ -264,6 +264,15 @@ class ResultsView(QTableView): # {{{
sm = self.selectionModel()
sm.select(idx, sm.ClearAndSelect|sm.Rows)
def resize_delegate(self):
self.rt_delegate.max_width = int(self.width()/2.1)
self.resizeColumnsToContents()
def resizeEvent(self, ev):
ret = super(ResultsView, self).resizeEvent(ev)
self.resize_delegate()
return ret
def currentChanged(self, current, previous):
ret = QTableView.currentChanged(self, current, previous)
self.show_details(current)
@ -385,7 +394,7 @@ class IdentifyWorker(Thread): # {{{
def sample_results(self):
m1 = Metadata('The Great Gatsby', ['Francis Scott Fitzgerald'])
m2 = Metadata('The Great Gatsby', ['F. Scott Fitzgerald'])
m2 = Metadata('The Great Gatsby - An extra long title to test resizing', ['F. Scott Fitzgerald'])
m1.has_cached_cover_url = True
m2.has_cached_cover_url = False
m1.comments = 'Some comments '*10
@ -963,12 +972,16 @@ class FullFetch(QDialog): # {{{
self.covers_widget.chosen.connect(self.ok_clicked)
self.stack.addWidget(self.covers_widget)
self.resize(850, 600)
geom = gprefs.get('metadata_single_gui_geom', None)
if geom is not None and geom:
self.restoreGeometry(geom)
# Workaround for Qt 4.8.0 bug that causes the frame of the window to go
# off the top of the screen if a max height is not set for the
# QWebView. Seems to only happen on windows, but keep it for all
# platforms just in case.
self.identify_widget.comments_view.setMaximumHeight(500)
self.resize(850, 600)
self.identify_widget.comments_view.setMaximumHeight(self.height()-100)
self.finished.connect(self.cleanup)
@ -995,12 +1008,14 @@ class FullFetch(QDialog): # {{{
self.covers_widget.reset_covers()
def accept(self):
gprefs['metadata_single_gui_geom'] = bytearray(self.saveGeometry())
if self.stack.currentIndex() == 1:
return QDialog.accept(self)
# Prevent the usual dialog accept mechanisms from working
pass
def reject(self):
gprefs['metadata_single_gui_geom'] = bytearray(self.saveGeometry())
self.identify_widget.cancel()
self.covers_widget.cancel()
return QDialog.reject(self)

View File

@ -413,6 +413,7 @@ class RulesModel(QAbstractListModel): # {{{
rules = list(prefs['column_color_rules'])
self.rules = []
for col, template in rules:
if col not in self.fm: continue
try:
rule = rule_from_template(self.fm, template)
except:

View File

@ -10,8 +10,8 @@ from PyQt4.Qt import (Qt, QAbstractItemModel, QIcon, QVariant, QModelIndex, QSiz
from calibre.gui2 import NONE
from calibre.customize.ui import is_disabled, disable_plugin, enable_plugin
from calibre.library.caches import _match, CONTAINS_MATCH, EQUALS_MATCH, \
REGEXP_MATCH
from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
from calibre.utils.config_base import prefs
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
@ -60,13 +60,13 @@ class Matches(QAbstractItemModel):
index = self.createIndex(i, 0)
data = QVariant(True)
self.setData(index, data, Qt.CheckStateRole)
def enable_none(self):
for i in xrange(len(self.matches)):
index = self.createIndex(i, 0)
data = QVariant(False)
self.setData(index, data, Qt.CheckStateRole)
def enable_invert(self):
for i in xrange(len(self.matches)):
self.toggle_plugin(self.createIndex(i, 0))
@ -243,6 +243,7 @@ class SearchFilter(SearchQueryParser):
'name': lambda x : x.name.lower(),
}
q['formats'] = q['format']
upf = prefs['use_primary_find_in_search']
for sr in self.srs:
for locvalue in locations:
accessor = q[locvalue]
@ -276,7 +277,7 @@ class SearchFilter(SearchQueryParser):
vals = accessor(sr).split(',')
else:
vals = [accessor(sr)]
if _match(query, vals, m):
if _match(query, vals, m, use_primary_find_in_search=upf):
matches.add(sr)
break
except ValueError: # Unicode errors

View File

@ -6,10 +6,12 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
from threading import Lock
from PyQt4.Qt import (QUrl, QCoreApplication)
from calibre.constants import cache_dir
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
@ -26,6 +28,16 @@ class MobileReadStore(BasicStoreConfig, StorePlugin):
StorePlugin.__init__(self, *args, **kwargs)
self.lock = Lock()
@property
def cache(self):
if not hasattr(self, '_mr_cache'):
from calibre.utils.config import JSONConfig
self._mr_cache = JSONConfig('mobileread_get_books')
self._mr_cache.file_path = os.path.join(cache_dir(),
'mobileread_get_books.json')
self._mr_cache.refresh()
return self._mr_cache
def open(self, parent=None, detail_item=None, external=False):
url = 'http://www.mobileread.com/'
@ -61,7 +73,7 @@ class MobileReadStore(BasicStoreConfig, StorePlugin):
suppress_progress=False):
if self.lock.acquire(False):
try:
update_thread = CacheUpdateThread(self.config, self.seralize_books, timeout)
update_thread = CacheUpdateThread(self.cache, self.seralize_books, timeout)
if not suppress_progress:
progress = CacheProgressDialog(parent)
progress.set_message(_('Updating MobileRead book cache...'))
@ -85,7 +97,7 @@ class MobileReadStore(BasicStoreConfig, StorePlugin):
self.lock.release()
def get_book_list(self):
return self.deseralize_books(self.config.get('book_list', []))
return self.deseralize_books(self.cache.get('book_list', []))
def seralize_books(self, books):
sbooks = []

View File

@ -11,13 +11,13 @@ from operator import attrgetter
from PyQt4.Qt import (Qt, QAbstractItemModel, QModelIndex, QVariant, pyqtSignal)
from calibre.gui2 import NONE
from calibre.library.caches import _match, CONTAINS_MATCH, EQUALS_MATCH, \
REGEXP_MATCH
from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
from calibre.utils.config_base import prefs
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
class BooksModel(QAbstractItemModel):
total_changed = pyqtSignal(int)
HEADERS = [_('Title'), _('Author(s)'), _('Format')]
@ -37,8 +37,8 @@ class BooksModel(QAbstractItemModel):
return self.books[row]
else:
return None
def search(self, filter):
def search(self, filter):
self.filter = filter.strip()
if not self.filter:
self.books = self.all_books
@ -50,7 +50,7 @@ class BooksModel(QAbstractItemModel):
self.layoutChanged.emit()
self.sort(self.sort_col, self.sort_order)
self.total_changed.emit(self.rowCount())
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
@ -64,7 +64,7 @@ class BooksModel(QAbstractItemModel):
def columnCount(self, *args):
return len(self.HEADERS)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return NONE
@ -112,7 +112,7 @@ class BooksModel(QAbstractItemModel):
class SearchFilter(SearchQueryParser):
USABLE_LOCATIONS = [
'all',
'author',
@ -161,6 +161,7 @@ class SearchFilter(SearchQueryParser):
}
for x in ('author', 'format'):
q[x+'s'] = q[x]
upf = prefs['use_primary_find_in_search']
for sr in self.srs:
for locvalue in locations:
accessor = q[locvalue]
@ -182,7 +183,7 @@ class SearchFilter(SearchQueryParser):
m = matchkind
vals = [accessor(sr)]
if _match(query, vals, m):
if _match(query, vals, m, use_primary_find_in_search=upf):
matches.add(sr)
break
except ValueError: # Unicode errors

View File

@ -56,7 +56,7 @@ class TOCItem(QStandardItem):
self.title = text
self.parent = parent
QStandardItem.__init__(self, text if text else '')
self.abspath = toc.abspath
self.abspath = toc.abspath if toc.href else None
self.fragment = toc.fragment
all_items.append(self)
self.bold_font = QFont(self.font())
@ -70,11 +70,13 @@ class TOCItem(QStandardItem):
if si == self.abspath:
spos = i
break
try:
am = getattr(spine[i], 'anchor_map', {})
except UnboundLocalError:
# Spine was empty?
am = {}
am = {}
if self.abspath is not None:
try:
am = getattr(spine[i], 'anchor_map', {})
except UnboundLocalError:
# Spine was empty?
pass
frag = self.fragment if (self.fragment and self.fragment in am) else None
self.starts_at = spos
self.start_anchor = frag

View File

@ -6,7 +6,7 @@ __license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, itertools, time, traceback, locale
import itertools, time, traceback, locale
from itertools import repeat, izip, imap
from datetime import timedelta
from threading import Thread
@ -16,10 +16,10 @@ from calibre.utils.date import parse_date, now, UNDEFINED_DATE, clean_date_for_s
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.utils.pyparsing import ParseException
from calibre.utils.localization import (canonicalize_lang, lang_map, get_udc)
from calibre.db.search import CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH, _match
from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre import prints
from calibre.utils.icu import primary_find
class MetadataBackup(Thread): # {{{
'''
@ -118,7 +118,6 @@ class MetadataBackup(Thread): # {{{
# }}}
### Global utility function for get_match here and in gui2/library.py
# This is a global for performance
pref_use_primary_find_in_search = False
@ -127,47 +126,6 @@ def set_use_primary_find_in_search(toWhat):
global pref_use_primary_find_in_search
pref_use_primary_find_in_search = toWhat
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
REGEXP_MATCH = 2
def _match(query, value, matchkind):
if query.startswith('..'):
query = query[1:]
sq = query[1:]
internal_match_ok = True
else:
internal_match_ok = False
for t in value:
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
t = icu_lower(t)
if (matchkind == EQUALS_MATCH):
if internal_match_ok:
if query == t:
return True
comps = [c.strip() for c in t.split('.') if c.strip()]
for comp in comps:
if sq == comp:
return True
elif query[0] == '.':
if t.startswith(query[1:]):
ql = len(query) - 1
if (len(t) == ql) or (t[ql:ql+1] == '.'):
return True
elif query == t:
return True
elif matchkind == REGEXP_MATCH:
if re.search(query, t, re.I|re.UNICODE):
return True
elif matchkind == CONTAINS_MATCH:
if pref_use_primary_find_in_search:
if primary_find(query, t)[0] != -1:
return True
elif query in t:
return True
except re.error:
pass
return False
def force_to_bool(val):
if isinstance(val, (str, unicode)):
try:
@ -576,7 +534,8 @@ class ResultCache(SearchQueryParser): # {{{
continue
k = parts[:1]
v = parts[1:]
if keyq and not _match(keyq, k, keyq_mkind):
if keyq and not _match(keyq, k, keyq_mkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
continue
if valq:
if valq == 'true':
@ -586,7 +545,8 @@ class ResultCache(SearchQueryParser): # {{{
if v:
add_if_nothing_matches = False
continue
elif not _match(valq, v, valq_mkind):
elif not _match(valq, v, valq_mkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
continue
matches.add(id_)
@ -851,7 +811,8 @@ class ResultCache(SearchQueryParser): # {{{
vals = [v.strip() for v in item[loc].split(is_multiple_cols[loc])]
else:
vals = [item[loc]] ### make into list to make _match happy
if _match(q, vals, matchkind):
if _match(q, vals, matchkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
matches.add(item[0])
continue
current_candidates -= matches

View File

@ -9,7 +9,7 @@ from xml.sax.saxutils import escape
from calibre import (prepare_string_for_xml, strftime, force_unicode,
isbytestring)
from calibre.constants import isosx
from calibre.constants import isosx, cache_dir
from calibre.customize.conversion import DummyReporter
from calibre.customize.ui import output_profiles
from calibre.ebooks.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, Tag, NavigableString
@ -18,7 +18,6 @@ from calibre.ebooks.metadata import author_to_author_sort
from calibre.library.catalogs import AuthorSortMismatchException, EmptyCatalogException, \
InvalidGenresSourceFieldException
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.config import config_dir
from calibre.utils.date import format_date, is_date_undefined, now as nowf
from calibre.utils.filenames import ascii_text, shorten_components_to
from calibre.utils.icu import capitalize, collation_order, sort_key
@ -109,7 +108,7 @@ class CatalogBuilder(object):
self.plugin = plugin
self.reporter = report_progress
self.stylesheet = stylesheet
self.cache_dir = os.path.join(config_dir, 'caches', 'catalog')
self.cache_dir = os.path.join(cache_dir(), 'catalog')
self.catalog_path = PersistentTemporaryDirectory("_epub_mobi_catalog", prefix='')
self.content_dir = os.path.join(self.catalog_path, "content")
self.excluded_tags = self.get_excluded_tags()

View File

@ -1220,7 +1220,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
loc.append(_('Card A'))
if b is not None:
loc.append(_('Card B'))
return ', '.join(loc) + ((' (%s books)'%count) if count > 1 else '')
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
def set_book_on_device_func(self, func):
self.book_on_device_func = func

View File

@ -5,8 +5,8 @@
msgid ""
msgstr ""
"Project-Id-Version: calibre 0.9.15\n"
"POT-Creation-Date: 2013-01-18 09:12+IST\n"
"PO-Revision-Date: 2013-01-18 09:12+IST\n"
"POT-Creation-Date: 2013-01-22 10:10+IST\n"
"PO-Revision-Date: 2013-01-22 10:10+IST\n"
"Last-Translator: Automatically generated\n"
"Language-Team: LANGUAGE\n"
"MIME-Version: 1.0\n"
@ -21,9 +21,9 @@ msgid "Does absolutely nothing"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/customize/__init__.py:59
#: /home/kovid/work/calibre/src/calibre/db/cache.py:106
#: /home/kovid/work/calibre/src/calibre/db/cache.py:109
#: /home/kovid/work/calibre/src/calibre/db/cache.py:120
#: /home/kovid/work/calibre/src/calibre/db/cache.py:139
#: /home/kovid/work/calibre/src/calibre/db/cache.py:142
#: /home/kovid/work/calibre/src/calibre/db/cache.py:153
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:379
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:380
#: /home/kovid/work/calibre/src/calibre/devices/hanvon/driver.py:114
@ -42,8 +42,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/prst1/driver.py:469
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:480
#: /home/kovid/work/calibre/src/calibre/ebooks/chm/metadata.py:57
#: /home/kovid/work/calibre/src/calibre/ebooks/conversion/plugins/chm_input.py:109
#: /home/kovid/work/calibre/src/calibre/ebooks/conversion/plugins/chm_input.py:112
#: /home/kovid/work/calibre/src/calibre/ebooks/conversion/plugins/chm_input.py:183
#: /home/kovid/work/calibre/src/calibre/ebooks/conversion/plugins/comic_input.py:189
#: /home/kovid/work/calibre/src/calibre/ebooks/conversion/plugins/fb2_input.py:99
#: /home/kovid/work/calibre/src/calibre/ebooks/conversion/plugins/fb2_input.py:101
@ -106,10 +105,10 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/ebooks/metadata/sources/ozon.py:130
#: /home/kovid/work/calibre/src/calibre/ebooks/metadata/sources/worker.py:26
#: /home/kovid/work/calibre/src/calibre/ebooks/metadata/txt.py:18
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:27
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:95
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:154
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:193
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:28
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:98
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:156
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:195
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/mobi6.py:618
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/utils.py:316
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/writer2/indexer.py:463
@ -155,11 +154,11 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/email.py:193
#: /home/kovid/work/calibre/src/calibre/gui2/email.py:208
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:439
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1103
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1319
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1322
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1325
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1413
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1104
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1320
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1323
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1326
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1414
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/basic_widgets.py:85
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/basic_widgets.py:250
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/basic_widgets.py:261
@ -884,7 +883,7 @@ msgstr ""
msgid "Path to library too long. Must be less than %d characters."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/cache.py:134
#: /home/kovid/work/calibre/src/calibre/db/cache.py:167
#: /home/kovid/work/calibre/src/calibre/ebooks/metadata/book/base.py:666
#: /home/kovid/work/calibre/src/calibre/gui2/custom_column_widgets.py:67
#: /home/kovid/work/calibre/src/calibre/gui2/custom_column_widgets.py:678
@ -894,23 +893,88 @@ msgstr ""
msgid "Yes"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/fields.py:163
#: /home/kovid/work/calibre/src/calibre/db/fields.py:186
#: /home/kovid/work/calibre/src/calibre/library/database2.py:1218
msgid "Main"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/fields.py:165
#: /home/kovid/work/calibre/src/calibre/db/fields.py:188
#: /home/kovid/work/calibre/src/calibre/gui2/layout.py:77
#: /home/kovid/work/calibre/src/calibre/library/database2.py:1220
msgid "Card A"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/fields.py:167
#: /home/kovid/work/calibre/src/calibre/db/fields.py:190
#: /home/kovid/work/calibre/src/calibre/gui2/layout.py:79
#: /home/kovid/work/calibre/src/calibre/library/database2.py:1222
msgid "Card B"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:33
#: /home/kovid/work/calibre/src/calibre/db/search.py:313
#: /home/kovid/work/calibre/src/calibre/library/caches.py:135
#: /home/kovid/work/calibre/src/calibre/library/caches.py:577
msgid "checked"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:33
#: /home/kovid/work/calibre/src/calibre/db/search.py:311
#: /home/kovid/work/calibre/src/calibre/library/caches.py:135
#: /home/kovid/work/calibre/src/calibre/library/caches.py:575
#: /home/kovid/work/calibre/src/calibre/library/save_to_disk.py:229
msgid "yes"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:35
#: /home/kovid/work/calibre/src/calibre/db/search.py:310
#: /home/kovid/work/calibre/src/calibre/library/caches.py:137
#: /home/kovid/work/calibre/src/calibre/library/caches.py:574
#: /home/kovid/work/calibre/src/calibre/library/save_to_disk.py:229
msgid "no"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:35
#: /home/kovid/work/calibre/src/calibre/db/search.py:312
#: /home/kovid/work/calibre/src/calibre/library/caches.py:137
#: /home/kovid/work/calibre/src/calibre/library/caches.py:576
msgid "unchecked"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:110
#: /home/kovid/work/calibre/src/calibre/library/caches.py:313
msgid "today"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:111
#: /home/kovid/work/calibre/src/calibre/library/caches.py:314
msgid "yesterday"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:112
#: /home/kovid/work/calibre/src/calibre/library/caches.py:315
msgid "thismonth"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:113
#: /home/kovid/work/calibre/src/calibre/library/caches.py:316
msgid "daysago"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:314
#: /home/kovid/work/calibre/src/calibre/library/caches.py:578
msgid "empty"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:315
#: /home/kovid/work/calibre/src/calibre/library/caches.py:579
msgid "blank"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/db/search.py:324
#: /home/kovid/work/calibre/src/calibre/library/caches.py:591
msgid "Invalid boolean query \"{0}\""
msgstr ""
#: /home/kovid/work/calibre/src/calibre/debug.py:70
#: /home/kovid/work/calibre/src/calibre/gui2/main.py:47
msgid "Cause a running calibre instance, if any, to be shutdown. Note that if there are running jobs, they will be silently aborted, so use with care."
@ -1123,8 +1187,8 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/bambook/driver.py:268
#: /home/kovid/work/calibre/src/calibre/devices/bambook/driver.py:324
#: /home/kovid/work/calibre/src/calibre/devices/mtp/driver.py:391
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1134
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1136
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1128
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1130
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:277
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:279
msgid "Transferring books to device..."
@ -1135,8 +1199,8 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/kobo/driver.py:491
#: /home/kovid/work/calibre/src/calibre/devices/kobo/driver.py:525
#: /home/kovid/work/calibre/src/calibre/devices/mtp/driver.py:430
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1147
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1158
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1141
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1152
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:301
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:332
msgid "Adding books to device metadata listing..."
@ -1158,8 +1222,8 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/bambook/driver.py:374
#: /home/kovid/work/calibre/src/calibre/devices/kobo/driver.py:479
#: /home/kovid/work/calibre/src/calibre/devices/kobo/driver.py:486
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1190
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1196
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1202
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:366
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:371
msgid "Removing books from device metadata listing..."
@ -1668,7 +1732,7 @@ msgid "Communicate with MTP devices"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/mtp/driver.py:167
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:950
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:952
#: /home/kovid/work/calibre/src/calibre/devices/usbms/driver.py:95
msgid "Get device information..."
msgstr ""
@ -1967,17 +2031,17 @@ msgstr ""
msgid "Too many connection attempts from %s"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1312
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1306
#, python-format
msgid "Invalid port in options: %s"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1320
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1314
#, python-format
msgid "Failed to connect to port %d. Try a different value."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1332
#: /home/kovid/work/calibre/src/calibre/devices/smart_device_app/driver.py:1326
msgid "Failed to allocate a random port"
msgstr ""
@ -3443,7 +3507,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/quickview.py:85
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/template_dialog.py:222
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:83
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1108
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1109
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:150
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/metadata_sources.py:162
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:39
@ -3456,7 +3520,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/ebooks/metadata/book/base.py:770
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:85
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1109
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1110
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/models.py:23
msgid "Author(s)"
msgstr ""
@ -3501,7 +3565,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/catalogs/epub_mobi_builder.py:982
#: /home/kovid/work/calibre/src/calibre/library/catalogs/epub_mobi_builder.py:1228
#: /home/kovid/work/calibre/src/calibre/library/field_metadata.py:201
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:802
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:804
msgid "Tags"
msgstr ""
@ -3742,7 +3806,7 @@ msgstr ""
msgid "Downloads metadata and covers from OZON.ru"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:58
#: /home/kovid/work/calibre/src/calibre/ebooks/mobi/reader/headers.py:61
msgid "Sample Book"
msgstr ""
@ -3778,7 +3842,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/ebooks/oeb/base.py:1281
#: /home/kovid/work/calibre/src/calibre/ebooks/oeb/transforms/htmltoc.py:15
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/main_ui.py:221
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/toc.py:217
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/toc.py:219
msgid "Table of Contents"
msgstr ""
@ -3863,7 +3927,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/create_custom_column.py:71
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/metadata_sources.py:160
#: /home/kovid/work/calibre/src/calibre/library/field_metadata.py:176
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:800
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:802
msgid "Rating"
msgstr ""
@ -4985,8 +5049,8 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/actions/edit_metadata.py:101
#: /home/kovid/work/calibre/src/calibre/gui2/dnd.py:84
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:518
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:830
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:527
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:839
msgid "Download failed"
msgstr ""
@ -5018,7 +5082,7 @@ msgid "Download complete"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/actions/edit_metadata.py:123
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:892
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:901
msgid "Download log"
msgstr ""
@ -5249,7 +5313,7 @@ msgid "Click the show details button to see which ones."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/actions/show_book_details.py:16
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:807
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:809
msgid "Show book details"
msgstr ""
@ -5799,7 +5863,7 @@ msgid "Click to open"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/book_details.py:180
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:856
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:858
msgid "Ids"
msgstr ""
@ -5809,7 +5873,7 @@ msgid "Book %(sidx)s of <span class=\"series_name\">%(series)s</span>"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/book_details.py:233
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1112
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1113
msgid "Collections"
msgstr ""
@ -8315,7 +8379,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/device_drivers/mtp_config.py:421
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/message_box.py:141
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:885
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:894
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/tweaks.py:344
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/main_ui.py:227
msgid "Copy to clipboard"
@ -8815,7 +8879,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/delete_matching_from_device.py:77
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:87
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1110
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1111
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/create_custom_column.py:35
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/create_custom_column.py:76
#: /home/kovid/work/calibre/src/calibre/library/field_metadata.py:365
@ -8931,7 +8995,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/edit_authors_dialog.py:122
#: /home/kovid/work/calibre/src/calibre/gui2/lrf_renderer/main.py:160
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:527
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:536
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/main.py:729
msgid "No matches found"
msgstr ""
@ -9110,8 +9174,8 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/message_box.py:196
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/message_box.py:251
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:950
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:1059
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:959
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:1074
#: /home/kovid/work/calibre/src/calibre/gui2/proceed.py:48
msgid "View log"
msgstr ""
@ -11539,13 +11603,13 @@ msgid "Modified"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:819
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1455
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1456
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:335
msgid "The lookup/search name is \"{0}\""
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:825
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1457
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1458
msgid "This book's UUID is \"{0}\""
msgstr ""
@ -11574,20 +11638,20 @@ msgstr ""
msgid "Could not set data, click Show Details to see why."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1107
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1108
msgid "In Library"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1111
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1112
#: /home/kovid/work/calibre/src/calibre/library/field_metadata.py:355
msgid "Size"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1437
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1438
msgid "Marked for deletion"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1440
#: /home/kovid/work/calibre/src/calibre/gui2/library/models.py:1441
msgid "Double click to <b>edit</b> me<br><br>"
msgstr ""
@ -11690,7 +11754,7 @@ msgid "Previous Page"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/lrf_renderer/main_ui.py:133
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:947
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:956
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:62
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/main_ui.py:215
msgid "Back"
@ -12131,7 +12195,7 @@ msgid "Edit Metadata"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single.py:63
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:940
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:949
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:108
#: /home/kovid/work/calibre/src/calibre/web/feeds/templates.py:219
#: /home/kovid/work/calibre/src/calibre/web/feeds/templates.py:410
@ -12288,62 +12352,62 @@ msgid ""
"cover stage, and vice versa."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:292
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:301
msgid "See at"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:446
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:455
msgid "calibre is downloading metadata from: "
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:468
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:477
msgid "Please wait"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:500
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:509
msgid "Query: "
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:519
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:528
msgid "Failed to download metadata. Click Show Details to see details"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:528
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:537
msgid "Failed to find any books that match your search. Try making the search <b>less specific</b>. For example, use only the author's last name and a single distinctive word from the title.<p>To see the full log, click Show Details."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:636
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:645
msgid "Current cover"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:639
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:648
msgid "Searching..."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:800
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:809
#, python-format
msgid "Downloading covers for <b>%s</b>, please wait..."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:831
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:840
msgid "Failed to download any covers, click \"Show details\" for details."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:837
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:846
#, python-format
msgid "Could not find any covers for <b>%s</b>"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:839
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:848
#, python-format
msgid "Found <b>%(num)d</b> covers of %(title)s. Pick the one you like best."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:928
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:937
msgid "Downloading metadata..."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:1043
#: /home/kovid/work/calibre/src/calibre/gui2/metadata/single_download.py:1058
msgid "Downloading cover..."
msgstr ""
@ -16693,56 +16757,6 @@ msgid ""
"<p>Stanza should see your calibre collection automatically. If not, try adding the URL http://myhostname:8080 as a new catalog in the Stanza reader on your iPhone. Here myhostname should be the fully qualified hostname or the IP address of the computer calibre is running on."
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:177
#: /home/kovid/work/calibre/src/calibre/library/caches.py:617
msgid "checked"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:177
#: /home/kovid/work/calibre/src/calibre/library/caches.py:615
#: /home/kovid/work/calibre/src/calibre/library/save_to_disk.py:229
msgid "yes"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:179
#: /home/kovid/work/calibre/src/calibre/library/caches.py:614
#: /home/kovid/work/calibre/src/calibre/library/save_to_disk.py:229
msgid "no"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:179
#: /home/kovid/work/calibre/src/calibre/library/caches.py:616
msgid "unchecked"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:355
msgid "today"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:356
msgid "yesterday"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:357
msgid "thismonth"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:358
msgid "daysago"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:618
msgid "empty"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:619
msgid "blank"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/caches.py:631
msgid "Invalid boolean query \"{0}\""
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/catalogs/bibtex.py:36
#, python-format
msgid ""
@ -17766,6 +17780,11 @@ msgstr ""
msgid "creating custom column "
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/database2.py:1223
#, python-format
msgid " (%s books)"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/database2.py:3698
#, python-format
msgid "<p>Migrating old database to ebook library in %s<br><center>"
@ -17985,19 +18004,19 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/ajax.py:317
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:355
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:649
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:651
msgid "All books"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/ajax.py:318
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:354
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:648
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:650
#: /home/kovid/work/calibre/src/calibre/library/server/opds.py:584
msgid "Newest"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:65
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:518
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:520
msgid "Loading, please wait"
msgstr ""
@ -18050,65 +18069,65 @@ msgstr ""
msgid "Random book"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:403
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:472
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:405
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:474
msgid "Browse books by"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:408
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:410
msgid "Choose a category to browse by:"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:543
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:545
msgid "Browsing by"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:544
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:546
msgid "Up"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:684
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:686
msgid "in"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:687
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:689
msgid "Books in"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:781
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:783
msgid "Other formats"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:788
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:790
#, python-format
msgid "Read %(title)s in the %(fmt)s format"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:793
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:795
msgid "Get"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:806
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:808
msgid "Details"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:808
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:810
msgid "Permalink"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:809
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:811
msgid "A permanent link to this book"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:821
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:823
msgid "This book has been deleted"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:927
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:929
msgid "in search"
msgstr ""
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:929
#: /home/kovid/work/calibre/src/calibre/library/server/browse.py:931
msgid "Matching books"
msgstr ""

View File

@ -32,6 +32,10 @@ class Browser(B):
B.set_cookiejar(self, *args, **kwargs)
self._clone_actions['set_cookiejar'] = ('set_cookiejar', args, kwargs)
def copy_cookies_from_jsbrowser(self, jsbrowser):
for cookie in jsbrowser.cookies:
self.cookiejar.set_cookie(cookie)
@property
def cookiejar(self):
return self._clone_actions['set_cookiejar'][1][0]

View File

@ -28,6 +28,7 @@
import array
import string
import sys
import codecs
import calibre.utils.chm.chmlib as chmlib
from calibre.constants import plugins
@ -184,7 +185,7 @@ locale_table = {
0x0420 : ('iso8859_6', "Urdu", "Arabic"),
0x0443 : ('iso8859_9', "Uzbek_Latin", "Turkish"),
0x0843 : ('cp1251', "Uzbek_Cyrillic", "Cyrillic"),
0x042a : (None, "Vietnamese", "Vietnamese")
0x042a : ('cp1258', "Vietnamese", "Vietnamese")
}
class CHMFile:
@ -434,6 +435,19 @@ class CHMFile:
else:
return None
def get_encoding(self):
ans = self.GetEncoding()
if ans is None:
lcid = self.GetLCID()
if lcid is not None:
ans = lcid[0]
if ans:
try:
codecs.lookup(ans)
except:
ans = None
return ans
def GetDWORD(self, buff, idx=0):
'''Internal method.
Reads a double word (4 bytes) from a buffer.

View File

@ -9,6 +9,7 @@ __docformat__ = 'restructuredtext en'
from future_builtins import map
from calibre.utils.fonts.utils import get_all_font_names
from calibre.utils.fonts.sfnt.container import UnsupportedFont
class FontMetrics(object):
@ -19,6 +20,10 @@ class FontMetrics(object):
'''
def __init__(self, sfnt):
for table in (b'head', b'hhea', b'hmtx', b'cmap', b'OS/2', b'post',
b'name'):
if table not in sfnt:
raise UnsupportedFont('This font has no %s table'%table)
self.sfnt = sfnt
self.head = self.sfnt[b'head']

View File

@ -332,6 +332,12 @@ class BasicNewsRecipe(Recipe):
#: ignore_duplicate_articles = {'title', 'url'}
ignore_duplicate_articles = None
#: If you set this True, then calibre will use javascript to login to the
#: website. This is needed for some websites that require the use of
#: javascript to login. If you set this to True you must implement the
#: :meth:`javascript_login` method, to do the actual logging in.
use_javascript_to_login = False
# See the built-in profiles for examples of these settings.
def short_title(self):
@ -404,8 +410,7 @@ class BasicNewsRecipe(Recipe):
'''
return url
@classmethod
def get_browser(cls, *args, **kwargs):
def get_browser(self, *args, **kwargs):
'''
Return a browser instance used to fetch documents from the web. By default
it returns a `mechanize <http://wwwsearch.sourceforge.net/mechanize/>`_
@ -427,9 +432,47 @@ class BasicNewsRecipe(Recipe):
return br
'''
br = browser(*args, **kwargs)
br.addheaders += [('Accept', '*/*')]
return br
if self.use_javascript_to_login:
if getattr(self, 'browser', None) is not None:
return self.clone_browser(self.browser)
from calibre.web.jsbrowser.browser import Browser
br = Browser()
with br:
self.javascript_login(br, self.username, self.password)
kwargs['user_agent'] = br.user_agent
ans = browser(*args, **kwargs)
ans.copy_cookies_from_jsbrowser(br)
return ans
else:
br = browser(*args, **kwargs)
br.addheaders += [('Accept', '*/*')]
return br
def javascript_login(self, browser, username, password):
'''
This method is used to login to a website that uses javascript for its
login form. After the login is complete, the cookies returned from the
website are copied to a normal (non-javascript) browser and the
download proceeds using those cookies.
An example implementation::
def javascript_login(self, browser, username, password):
browser.visit('http://some-page-that-has-a-login')
form = browser.select_form(nr=0) # Select the first form on the page
form['username'] = username
form['password'] = password
browser.submit(timeout=120) # Submit the form and wait at most two minutes for loading to complete
Note that you can also select forms with CSS2 selectors, like this::
browser.select_form('form#login_form')
browser.select_from('form[name="someform"]')
'''
raise NotImplementedError('You must implement the javascript_login()'
' method if you set use_javascript_to_login'
' to True')
def clone_browser(self, br):
'''

View File

@ -10,8 +10,6 @@ UTF-8 encoding with any charset declarations removed.
import sys, socket, os, urlparse, re, time, copy, urllib2, threading, traceback, imghdr
from urllib import url2pathname, quote
from httplib import responses
from PIL import Image
from cStringIO import StringIO
from base64 import b64decode
from calibre import browser, relpath, unicode_path
@ -21,6 +19,8 @@ from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.config import OptionParser
from calibre.utils.logging import Log
from calibre.utils.magick import Image
from calibre.utils.magick.draw import identify_data
class FetchError(Exception):
pass
@ -374,8 +374,8 @@ class RecursiveFetcher(object):
fname = ascii_filename('img'+str(c))
if isinstance(fname, unicode):
fname = fname.encode('ascii', 'replace')
imgpath = os.path.join(diskpath, fname+'.jpg')
if (imghdr.what(None, data) is None and b'<svg' in data[:1024]):
itype = imghdr.what(None, data)
if itype is None and b'<svg' in data[:1024]:
# SVG image
imgpath = os.path.join(diskpath, fname+'.svg')
with self.imagemap_lock:
@ -385,11 +385,18 @@ class RecursiveFetcher(object):
tag['src'] = imgpath
else:
try:
im = Image.open(StringIO(data)).convert('RGBA')
if itype not in {'png', 'jpg', 'jpeg'}:
itype == 'png' if itype == 'gif' else 'jpg'
im = Image()
im.load(data)
data = im.export(itype)
else:
identify_data(data)
imgpath = os.path.join(diskpath, fname+'.'+itype)
with self.imagemap_lock:
self.imagemap[iurl] = imgpath
with open(imgpath, 'wb') as x:
im.save(x, 'JPEG')
x.write(data)
tag['src'] = imgpath
except:
traceback.print_exc()

View File

@ -16,7 +16,7 @@ from PyQt4.Qt import (QObject, QNetworkAccessManager, QNetworkDiskCache,
from PyQt4.QtWebKit import QWebPage, QWebSettings, QWebView, QWebElement
from calibre import USER_AGENT, prints, get_proxies, get_proxy_info
from calibre.constants import ispy3, config_dir
from calibre.constants import ispy3, cache_dir
from calibre.utils.logging import ThreadSafeLog
from calibre.gui2 import must_use_qt
from calibre.web.jsbrowser.forms import FormsMixin
@ -44,7 +44,7 @@ class WebPage(QWebPage): # {{{
settings = self.settings()
if enable_developer_tools:
settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
QWebSettings.enablePersistentStorage(os.path.join(config_dir, 'caches',
QWebSettings.enablePersistentStorage(os.path.join(cache_dir(),
'webkit-persistence'))
QWebSettings.setMaximumPagesInCache(0)
@ -135,8 +135,7 @@ class NetworkAccessManager(QNetworkAccessManager): # {{{
self.log = log
if use_disk_cache:
self.cache = QNetworkDiskCache(self)
self.cache.setCacheDirectory(os.path.join(config_dir, 'caches',
'jsbrowser'))
self.cache.setCacheDirectory(os.path.join(cache_dir(), 'jsbrowser'))
self.setCache(self.cache)
self.sslErrors.connect(self.on_ssl_errors)
self.pf = ProxyFactory(log)
@ -303,6 +302,10 @@ class Browser(QObject, FormsMixin):
self.nam = NetworkAccessManager(log, use_disk_cache=use_disk_cache, parent=self)
self.page.setNetworkAccessManager(self.nam)
@property
def user_agent(self):
return self.page.user_agent
def _wait_for_load(self, timeout, url=None):
loop = QEventLoop(self)
start_time = time.time()
@ -422,3 +425,9 @@ class Browser(QObject, FormsMixin):
pass
self.nam = self.page = None
def __enter__(self):
pass
def __exit__(self, *args):
self.close()

View File

@ -11,6 +11,7 @@ import unittest, pprint, threading, time
import cherrypy
from calibre import browser
from calibre.web.jsbrowser.browser import Browser
from calibre.library.server.utils import (cookie_max_age_to_expires,
cookie_time_fmt)
@ -105,6 +106,12 @@ class Server(object):
import traceback
traceback.print_exc()
@cherrypy.expose
def receive_cookies(self):
self.received_cookies = {n:(c.value, dict(c)) for n, c in
dict(cherrypy.request.cookie).iteritems()}
return pprint.pformat(self.received_cookies)
class Test(unittest.TestCase):
@classmethod
@ -202,6 +209,26 @@ class Test(unittest.TestCase):
if fexp:
self.assertEqual(fexp, cexp)
def test_cookie_copy(self):
'Test copying of cookies from jsbrowser to mechanize'
self.assertEqual(self.browser.visit('http://127.0.0.1:%d/cookies'%self.port),
True)
sent_cookies = self.server.sent_cookies.copy()
self.browser.visit('http://127.0.0.1:%d/receive_cookies'%self.port)
orig_rc = self.server.received_cookies.copy()
br = browser(user_agent=self.browser.user_agent)
br.copy_cookies_from_jsbrowser(self.browser)
br.open('http://127.0.0.1:%d/receive_cookies'%self.port)
for name, vals in sent_cookies.iteritems():
val = vals[0]
try:
rval = self.server.received_cookies[name][0]
except:
self.fail('The cookie: %s was not received by the server')
self.assertEqual(val, rval,
'The received value for the cookie: %s, %s != %s'%(
name, rval, val))
self.assertEqual(orig_rc, self.server.received_cookies)
def tests():
return unittest.TestLoader().loadTestsFromTestCase(Test)