Merge branch 'kovidgoyal/master'

This commit is contained in:
Charles Haley 2013-07-12 08:04:45 +02:00
commit 433dcea35a
9 changed files with 294 additions and 9 deletions

View File

@ -20,6 +20,45 @@
# new recipes:
# - title:
- version: 0.9.39
date: 2013-07-12
new features:
- title: "Bulk metadata edit: Add a checkbox to prevent the refreshing of the book list after the bulk edit. This means that the book list will not be resorted and any existing search/virtual library will not be refreshed. Useful if you have a large library as the refresh can be slow."
- title: "Allow manually marking a book in the calibre library as being on the device. To do so click the device icon in calibre, then right click on the book you want marked and choose 'Match book to library'. Once you are done marking all the books, right click the device icon and choose 'Update cached metadata'"
- title: "Driver for Coby Kyros MID1126"
tickets: [1199410]
- title: "When adding formats to an existing book, by right clicking the add books button, ask for confirmation if some formats will be overwritten."
- title: "Add a tweak to restrict the list of output formats available in the conversion dialog. Go to Preferences->Tweaks to change it."
bug fixes:
- title: "Amazon metadata download: Update plugin to deal with the new amazon.com website"
- title: "Edelweiss metadata download plugin: Workaround for advanced search being broken at the Edelweiss website."
- title: "Invalid data in the device database on sony readers could cause errors when sorting device collections, ignore those errors."
- title: "DOCX Input: Fix no page break being inserted before the last section."
tickets: [1198414]
- title: "Metadata download dialog: Have the OK button enabled in the results screen as well."
tickets: [1198288]
- title: "Get Books: Update empik store plugin"
improved recipes:
- Houston Chronicle
- cracked.com
- mediapart.fr
new recipes:
- title: Glenn Brenwald and Ludwig von Mises Institute
author: anywho
- version: 0.9.38
date: 2013-07-05

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 9, 38)
numeric_version = (0, 9, 39)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -100,3 +100,66 @@ def recursive_import(db, root, single_book_per_directory=True,
break
return duplicates
def add_catalog(cache, path, title):
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.meta import get_metadata
from calibre.utils.date import utcnow
fmt = os.path.splitext(path)[1][1:].lower()
with lopen(path, 'rb') as stream, cache.write_lock:
matches = cache._search('title:="%s" and tags:="%s"' % (title.replace('"', '\\"'), _('Catalog')), None)
db_id = None
if matches:
db_id = list(matches)[0]
try:
mi = get_metadata(stream, fmt)
mi.authors = ['calibre']
except:
mi = Metadata(title, ['calibre'])
mi.title, mi.authors = title, ['calibre']
mi.tags = [_('Catalog')]
mi.pubdate = mi.timestamp = utcnow()
if fmt == 'mobi':
mi.cover, mi.cover_data = None, (None, None)
if db_id is None:
db_id = cache._create_book_entry(mi, apply_import_tags=False)
else:
cache._set_metadata(db_id, mi)
cache._add_format(db_id, fmt, stream)
return db_id
def add_news(cache, path, arg):
from calibre.ebooks.metadata.meta import get_metadata
from calibre.utils.date import utcnow
fmt = os.path.splitext(getattr(path, 'name', path))[1][1:].lower()
stream = path if hasattr(path, 'read') else lopen(path, 'rb')
stream.seek(0)
mi = get_metadata(stream, fmt, use_libprs_metadata=False,
force_read_metadata=True)
# Force the author to calibre as the auto delete of old news checks for
# both the author==calibre and the tag News
mi.authors = ['calibre']
stream.seek(0)
with cache.write_lock:
if mi.series_index is None:
mi.series_index = cache._get_next_series_num_for(mi.series)
mi.tags = [_('News')]
if arg['add_title_tag']:
mi.tags += [arg['title']]
if arg['custom_tags']:
mi.tags += arg['custom_tags']
if mi.pubdate is None:
mi.pubdate = utcnow()
if mi.timestamp is None:
mi.timestamp = utcnow()
db_id = cache._create_book_entry(mi, apply_import_tags=False)
cache._add_format(db_id, fmt, stream)
if not hasattr(path, 'read'):
stream.close()
return db_id

View File

@ -1175,5 +1175,46 @@ class DB(object):
self.rmtree(parent, permanent=permanent)
self.conn.executemany(
'DELETE FROM books WHERE id=?', [(x,) for x in path_map])
def add_custom_data(self, name, val_map, delete_first):
if delete_first:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.conn.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in val_map.iteritems()])
def get_custom_book_data(self, name, book_ids, default=None):
book_ids = frozenset(book_ids)
def safe_load(val):
try:
return json.loads(val, object_hook=from_json)
except:
return default
if len(book_ids) == 1:
bid = next(iter(book_ids))
ans = {book_id:safe_load(val) for book_id, val in
self.conn.execute('SELECT book, val FROM books_plugin_data WHERE book=? AND name=?', (bid, name))}
return ans or {bid:default}
ans = {}
for book_id, val in self.conn.execute(
'SELECT book, val FROM books_plugin_data WHERE name=?', (name,)):
if not book_ids or book_id in book_ids:
val = safe_load(val)
ans[book_id] = val
return ans
def delete_custom_book_data(self, name, book_ids):
if book_ids:
self.conn.executemany('DELETE FROM books_plugin_data WHERE book=? AND name=?',
[(book_id, name) for book_id in book_ids])
else:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name,))
def get_ids_for_custom_book_data(self, name):
return frozenset(r[0] for r in self.conn.execute('SELECT book FROM books_plugin_data WHERE name=?', (name,)))
# }}}

View File

@ -1147,6 +1147,36 @@ class Cache(object):
else:
table.remove_books(book_ids, self.backend)
@write_api
def add_custom_book_data(self, name, val_map, delete_first=False):
''' Add data for name where val_map is a map of book_ids to values. If
delete_first is True, all previously stored data for name will be
removed. '''
missing = frozenset(val_map) - self._all_book_ids()
if missing:
raise ValueError('add_custom_book_data: no such book_ids: %d'%missing)
self.backend.add_custom_data(name, val_map, delete_first)
@read_api
def get_custom_book_data(self, name, book_ids=(), default=None):
''' Get data for name. By default returns data for all book_ids, pass
in a list of book ids if you only want some data. Returns a map of
book_id to values. If a particular value could not be decoded, uses
default for it. '''
return self.backend.get_custom_book_data(name, book_ids, default)
@write_api
def delete_custom_book_data(self, name, book_ids=()):
''' Delete data for name. By default deletes all data, if you only want
to delete data for some book ids, pass in a list of book ids. '''
self.backend.delete_custom_book_data(name, book_ids)
@read_api
def get_ids_for_custom_book_data(self, name):
''' Return the set of book ids for which name has data. '''
return self.backend.get_ids_for_custom_book_data(name)
# }}}
class SortKey(object): # {{{

View File

@ -11,7 +11,9 @@ from functools import partial
from future_builtins import zip
from calibre.db import _get_next_series_num_for_list, _get_series_values
from calibre.db.adding import find_books_in_directory, import_book_directory_multiple, import_book_directory, recursive_import
from calibre.db.adding import (
find_books_in_directory, import_book_directory_multiple,
import_book_directory, recursive_import, add_catalog, add_news)
from calibre.db.backend import DB
from calibre.db.cache import Cache
from calibre.db.categories import CATEGORY_SORTS
@ -205,6 +207,56 @@ class LibraryDatabase(object):
def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
return recursive_import(self, root, single_book_per_directory=single_book_per_directory, callback=callback, added_ids=added_ids)
def add_catalog(self, path, title):
return add_catalog(self.new_api, path, title)
def add_news(self, path, arg):
return add_news(self.new_api, path, arg)
def add_format(self, index, fmt, stream, index_is_id=False, path=None, notify=True, replace=True, copy_function=None):
''' path and copy_function are ignored by the new API '''
book_id = index if index_is_id else self.data.index_to_id(index)
try:
return self.new_api.add_format(book_id, fmt, stream, replace=replace, run_hooks=False, dbapi=self)
except:
raise
else:
self.notify('metadata', [book_id])
def add_format_with_hooks(self, index, fmt, fpath, index_is_id=False, path=None, notify=True, replace=True):
''' path is ignored by the new API '''
book_id = index if index_is_id else self.data.index_to_id(index)
try:
return self.new_api.add_format(book_id, fmt, fpath, replace=replace, run_hooks=True, dbapi=self)
except:
raise
else:
self.notify('metadata', [book_id])
# }}}
# Custom data {{{
def add_custom_book_data(self, book_id, name, val):
self.new_api.add_custom_book_data(name, {book_id:val})
def add_multiple_custom_book_data(self, name, val_map, delete_first=False):
self.new_api.add_custom_book_data(name, val_map, delete_first=delete_first)
def get_custom_book_data(self, book_id, name, default=None):
return self.new_api.get_custom_book_data(name, book_ids={book_id}, default=default).get(book_id, default)
def get_all_custom_book_data(self, name, default=None):
return self.new_api.get_custom_book_data(name, default=default)
def delete_custom_book_data(self, book_id, name):
self.new_api.delete_custom_book_data(name, book_ids=(book_id,))
def delete_all_custom_book_data(self, name):
self.new_api.delete_custom_book_data(name)
def get_ids_for_custom_book_data(self, name):
return list(self.new_api.get_ids_for_custom_book_data(name))
# }}}
# Private interface {{{

View File

@ -526,7 +526,7 @@ class Parser(SearchQueryParser):
if dt == 'bool':
return self.bool_search(icu_lower(query),
partial(self.field_iter, location, candidates),
self.dbcache.pref('bools_are_tristate'))
self.dbcache._pref('bools_are_tristate'))
# special case: colon-separated fields such as identifiers. isbn
# is a special case within the case
@ -630,7 +630,7 @@ class Parser(SearchQueryParser):
if len(query) < 2:
return matches
user_cats = self.dbcache.pref('user_categories')
user_cats = self.dbcache._pref('user_categories')
c = set(candidates)
if query.startswith('.'):
@ -674,7 +674,7 @@ class Search(object):
if search_restriction:
q = u'(%s) and (%s)' % (search_restriction, query)
all_book_ids = dbcache.all_book_ids(type=set)
all_book_ids = dbcache._all_book_ids(type=set)
if not q:
return all_book_ids
@ -686,7 +686,7 @@ class Search(object):
# takes 0.000975 seconds and restoring it from a pickle takes
# 0.000974 seconds.
sqp = Parser(
dbcache, all_book_ids, dbcache.pref('grouped_search_terms'),
dbcache, all_book_ids, dbcache._pref('grouped_search_terms'),
self.date_search, self.num_search, self.bool_search,
self.keypair_search,
prefs['limit_search_columns'],

View File

@ -7,6 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import inspect
from io import BytesIO
from repr import repr
from functools import partial
from tempfile import NamedTemporaryFile
@ -166,6 +167,11 @@ class LegacyTest(BaseTest):
book_id = T(kwargs={'preserve_uuid':True})(self)
self.assertEqual(legacy.uuid(book_id, index_is_id=True), old.uuid(book_id, index_is_id=True))
self.assertEqual(legacy.new_api.formats(book_id), ('AFF',))
T = partial(ET, 'add_format', old=old, legacy=legacy)
T((0, 'AFF', BytesIO(b'fffff')))(self)
T((0, 'AFF', BytesIO(b'fffff')))(self)
T((0, 'AFF', BytesIO(b'fffff')), {'replace':True})(self)
with NamedTemporaryFile(suffix='.opf') as f:
f.write(b'zzzz')
f.flush()
@ -178,6 +184,27 @@ class LegacyTest(BaseTest):
T()
T({'add_duplicates':False})
T({'force_id':1000})
with NamedTemporaryFile(suffix='.txt') as f:
f.write(b'tttttt')
f.seek(0)
bid = legacy.add_catalog(f.name, 'My Catalog')
self.assertEqual(old.add_catalog(f.name, 'My Catalog'), bid)
cache = legacy.new_api
self.assertEqual(cache.formats(bid), ('TXT',))
self.assertEqual(cache.field_for('title', bid), 'My Catalog')
self.assertEqual(cache.field_for('authors', bid), ('calibre',))
self.assertEqual(cache.field_for('tags', bid), (_('Catalog'),))
self.assertTrue(bid < legacy.add_catalog(f.name, 'Something else'))
self.assertEqual(legacy.add_catalog(f.name, 'My Catalog'), bid)
self.assertEqual(old.add_catalog(f.name, 'My Catalog'), bid)
bid = legacy.add_news(f.name, {'title':'Events', 'add_title_tag':True, 'custom_tags':('one', 'two')})
self.assertEqual(cache.formats(bid), ('TXT',))
self.assertEqual(cache.field_for('authors', bid), ('calibre',))
self.assertEqual(cache.field_for('tags', bid), (_('News'), 'Events', 'one', 'two'))
old.close()
# }}}
def test_legacy_coverage(self): # {{{
@ -189,6 +216,8 @@ class LegacyTest(BaseTest):
SKIP_ATTRS = {
'TCat_Tag', '_add_newbook_tag', '_clean_identifier', '_library_id_', '_set_authors',
'_set_title', '_set_custom', '_update_author_in_cache',
# Feeds are now stored in the config folder
'get_feeds', 'get_feed', 'update_feed', 'remove_feeds', 'add_feed', 'set_feeds',
}
SKIP_ARGSPEC = {
'__init__', 'get_next_series_num_for', 'has_book', 'author_sort_from_authors',
@ -220,7 +249,38 @@ class LegacyTest(BaseTest):
if missing:
pc = len(missing)/total
raise AssertionError('{0:.1%} of API ({2} attrs) are missing. For example: {1}'.format(pc, missing[0], len(missing)))
raise AssertionError('{0:.1%} of API ({2} attrs) are missing. For example: {1}'.format(pc, ', '.join(missing[:5]), len(missing)))
# }}}
def test_legacy_custom_data(self): # {{{
'Test the API for custom data storage'
legacy, old = self.init_legacy(self.cloned_library), self.init_old(self.cloned_library)
for name in ('name1', 'name2', 'name3'):
T = partial(ET, 'add_custom_book_data', old=old, legacy=legacy)
T((1, name, 'val1'))(self)
T((2, name, 'val2'))(self)
T((3, name, 'val3'))(self)
T = partial(ET, 'get_ids_for_custom_book_data', old=old, legacy=legacy)
T((name,))(self)
T = partial(ET, 'get_custom_book_data', old=old, legacy=legacy)
T((1, name, object()))
T((9, name, object()))
T = partial(ET, 'get_all_custom_book_data', old=old, legacy=legacy)
T((name, object()))
T((name+'!', object()))
T = partial(ET, 'delete_custom_book_data', old=old, legacy=legacy)
T((name, 1))
T = partial(ET, 'get_all_custom_book_data', old=old, legacy=legacy)
T((name, object()))
T = partial(ET, 'delete_all_custom_book_data', old=old, legacy=legacy)
T((name))
T = partial(ET, 'get_all_custom_book_data', old=old, legacy=legacy)
T((name, object()))
T = partial(ET, 'add_multiple_custom_book_data', old=old, legacy=legacy)
T(('n', {1:'val1', 2:'val2'}))(self)
T = partial(ET, 'get_all_custom_book_data', old=old, legacy=legacy)
T(('n', object()))
old.close()
# }}}

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 2 # Needed for dynamic plugin loading
store_version = 3 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011-2013, Tomasz Długosz <tomek3d@gmail.com>'
@ -51,7 +51,7 @@ class EmpikStore(BasicStoreConfig, StorePlugin):
if not id:
continue
cover_url = ''.join(data.xpath('.//div[@class="productBox-450Pic"]/a/img/@data-original'))
cover_url = ''.join(data.xpath('.//div[@class="productBox-450Pic"]/a/img/@src'))
title = ''.join(data.xpath('.//a[@class="productBox-450Title"]/text()'))
title = re.sub(r' \(ebook\)', '', title)
author = ''.join(data.xpath('.//div[@class="productBox-450Author"]/a/text()'))