diff --git a/Changelog.yaml b/Changelog.yaml
index db25f77a8d..2cbe422226 100644
--- a/Changelog.yaml
+++ b/Changelog.yaml
@@ -20,6 +20,55 @@
# new recipes:
# - title:
+- version: 0.9.40
+ date: 2013-07-19
+
+ new features:
+ - title: "EPUB Output: Add an option to insert an inline Table of Contents into the main text."
+ tickets: [1201006]
+
+ - title: "Driver for LG Android phone"
+ tickets: [1202013]
+
+ - title: "When matching books in the library against the device manually, pre-fill the search field with the book title"
+ tickets: [1200826]
+
+ bug fixes:
+ - title: "PDF Input: Fix a regression that caused some images to be flipped when converting PDF files that use image rotation operators."
+ tickets: [1201083]
+
+ - title: "Fix regression that caused incorrect font size in dropcaps generated by the DOCX input plugin"
+
+ - title: "Get Books: Fix searching for title and author returning some extra matches, if the title starts with an article like the, a or an."
+ tickets: [1200012]
+
+ - title: "PDF Output: Fix extra blank page being inserted at the start of the chapter when converting some epub files from feedbooks"
+
+ - title: "PDF Output: Workaround bug in WebKit's getBoundingClientRect() method that could cause links to occasionally point to incorrect locations."
+ tickets: [1202390]
+
+ - title: "E-book viewer: Fix a bug that could cause the reported position to be incorrect immediately after opening a previously opened book. This also fixes the Back button not working if a link is clicked on the page immediately after opening the book."
+
+ - title: "Fix memory card not being detected for Elonex 621 on Windows"
+
+ - title: "Fix regression in last release that broke auto-conversion of ebooks when sending to device/sending by email."
+ tickets: [1200864]
+
+ - title: "Get Books: Update amazon plugins for website changes"
+
+ - title: "Allow using non-ascii chars in email passwords."
+ tickets: [1202825]
+
+ improved recipes:
+ - Galaxy's Edge
+
+ new recipes:
+ - title: Il Foglio
+ author: faber1971
+
+ - title: Le Monde Diplomatique and Acrimed
+ author: Gaetan Lehmann
+
- version: 0.9.39
date: 2013-07-12
diff --git a/manual/faq.rst b/manual/faq.rst
index e5a6342cf8..46d675da13 100644
--- a/manual/faq.rst
+++ b/manual/faq.rst
@@ -840,6 +840,19 @@ If you still cannot get the installer to work and you are on windows, you can us
My antivirus program claims |app| is a virus/trojan?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. note ::
+ As of July, 2013 McAfee Site Advisor has started warning that
+ http://calibre-ebook.com is unsafe, with no stated reason or justification.
+ McAfee is wrong, the mistake has been reported to them, by several people,
+ but they have not corrected it. McAfee SiteAdvisor is a notoriously
+ unreliable service, see for example
+ `this page `_ or
+ `this page `_ or
+ `this Wikipedia entry `_.
+ We strongly urge you to stop using McAfee products, find a more competent security provider
+ to give your business to.
+ Instructions on how to `uninstall McAfee SiteAdvisor `_.
+
The first thing to check is that you are downloading |app| from the official
website: ``_. |app| is a very popular program
and unscrupulous people try to setup websites offering it for download to fool
diff --git a/recipes/galaxys_edge.recipe b/recipes/galaxys_edge.recipe
index e6e1dd7475..4406db4620 100644
--- a/recipes/galaxys_edge.recipe
+++ b/recipes/galaxys_edge.recipe
@@ -14,19 +14,12 @@ class GalaxyEdge(BasicNewsRecipe):
auto_cleanup = True
- #keep_only_tags = [dict(id='content')]
- #remove_tags = [dict(attrs={'class':['article-links', 'breadcr']}),
- #dict(id=['email-section', 'right-column', 'printfooter', 'topover',
- #'slidebox', 'th_footer'])]
-
extra_css = '.photo-caption { font-size: smaller }'
def parse_index(self):
soup = self.index_to_soup('http://www.galaxysedge.com/')
- main = soup.find('table', attrs={'width':'911'})
- toc = main.find('td', attrs={'width':'225'})
-
-
+ main = soup.find('table', attrs={'width':'944'})
+ toc = main.find('td', attrs={'width':'204'})
current_section = None
current_articles = []
@@ -68,41 +61,7 @@ class GalaxyEdge(BasicNewsRecipe):
current_articles.append({'title': title, 'url':url,
'description':'', 'date':''})
if current_articles and current_section:
- feeds.append((current_section, current_articles))
+ feeds.append((current_section, current_articles))
return feeds
-
-
-
- #def preprocess_raw_html(self, raw, url):
- #return raw.replace('', '
').replace('
', '
')
-
- #def postprocess_html(self, soup, first_fetch):
- #for t in soup.findAll(['table', 'tr', 'td','center']):
- #t.name = 'div'
- #return soup
-
- #def parse_index(self):
- #today = time.strftime('%Y-%m-%d')
- #soup = self.index_to_soup(
- #'http://www.thehindu.com/todays-paper/tp-index/?date=' + today)
- #div = soup.find(id='left-column')
- #feeds = []
- #current_section = None
- #current_articles = []
- #for x in div.findAll(['h3', 'div']):
- #if current_section and x.get('class', '') == 'tpaper':
- #a = x.find('a', href=True)
- #if a is not None:
- #current_articles.append({'url':a['href']+'?css=print',
- #'title':self.tag_to_string(a), 'date': '',
- #'description':''})
- #if x.name == 'h3':
- #if current_section and current_articles:
- #feeds.append((current_section, current_articles))
- #current_section = self.tag_to_string(x)
- #current_articles = []
- #return feeds
-
-
diff --git a/recipes/il_foglio.recipe b/recipes/il_foglio.recipe
new file mode 100644
index 0000000000..9d5e8aa2e6
--- /dev/null
+++ b/recipes/il_foglio.recipe
@@ -0,0 +1,16 @@
+from calibre.web.feeds.news import BasicNewsRecipe
+
+class AdvancedUserRecipe1373969939(BasicNewsRecipe):
+ title = u'Il Foglio - Editoriali'
+ oldest_article = 1
+ max_articles_per_feed = 10
+ auto_cleanup = False
+ keep_only_tags = [
+ dict(name='div', attrs={'class':'sec_item'})
+ ]
+ feeds = [(u'Il Foglio - Editoriali', u'http://feed43.com/8814237344800115.xml')]
+ no_stylesheets = True
+ __author__ = 'faber1971'
+ description = 'Leading articles from an Italian newspaper - v1.00 (16 July, 2013)'
+ language = 'it'
+ masthead_url = 'http://www.ilfoglio.it/media/img/interface/logo_testata_small.gif'
diff --git a/resources/compiled_coffeescript.zip b/resources/compiled_coffeescript.zip
index e092b53157..ddf0db08e7 100644
Binary files a/resources/compiled_coffeescript.zip and b/resources/compiled_coffeescript.zip differ
diff --git a/setup/file_hosting_servers.rst b/setup/file_hosting_servers.rst
index c72998958e..5b494cf066 100644
--- a/setup/file_hosting_servers.rst
+++ b/setup/file_hosting_servers.rst
@@ -22,6 +22,7 @@ mkdir -p /root/staging /root/work/vim /srv/download /srv/manual
scp .zshrc .vimrc server:
scp -r ~/work/vim/zsh-syntax-highlighting server:work/vim
+scp -r ~/work/vim/zsh-history-substring-search server:work/vim
If the server has a backup hard-disk, mount it at /mnt/backup and edit /etc/fstab so that it is auto-mounted.
Then, add the following to crontab::
diff --git a/src/calibre/constants.py b/src/calibre/constants.py
index 18b4e3d238..1e0b2a1a83 100644
--- a/src/calibre/constants.py
+++ b/src/calibre/constants.py
@@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
-numeric_version = (0, 9, 39)
+numeric_version = (0, 9, 40)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal "
diff --git a/src/calibre/db/__init__.py b/src/calibre/db/__init__.py
index 47e44335ea..a07fb8b5a3 100644
--- a/src/calibre/db/__init__.py
+++ b/src/calibre/db/__init__.py
@@ -54,6 +54,70 @@ def _get_series_values(val):
pass
return (val, None)
+def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None):
+ '''
+ Return all metadata stored in the database as a dict. Includes paths to
+ the cover and each format.
+
+ :param prefix: The prefix for all paths. By default, the prefix is the absolute path
+ to the library folder.
+ :param ids: Set of ids to return the data for. If None return data for
+ all entries in database.
+ '''
+ import os
+ from calibre.ebooks.metadata import authors_to_string
+ backend = getattr(self, 'backend', self) # Works with both old and legacy interfaces
+ if prefix is None:
+ prefix = backend.library_path
+ fdata = backend.custom_column_num_map
+
+ FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher',
+ 'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
+ 'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
+ 'languages']).union(set(fdata))
+ for x, data in fdata.iteritems():
+ if data['datatype'] == 'series':
+ FIELDS.add('%d_index'%x)
+ data = []
+ for record in self.data:
+ if record is None:
+ continue
+ db_id = record[self.FIELD_MAP['id']]
+ if ids is not None and db_id not in ids:
+ continue
+ x = {}
+ for field in FIELDS:
+ x[field] = record[self.FIELD_MAP[field]]
+ data.append(x)
+ x['id'] = db_id
+ x['formats'] = []
+ isbn = self.isbn(db_id, index_is_id=True)
+ x['isbn'] = isbn if isbn else ''
+ if not x['authors']:
+ x['authors'] = _('Unknown')
+ x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]
+ if authors_as_string:
+ x['authors'] = authors_to_string(x['authors'])
+ x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else []
+ path = os.path.join(prefix, self.path(record[self.FIELD_MAP['id']], index_is_id=True))
+ x['cover'] = os.path.join(path, 'cover.jpg')
+ if not record[self.FIELD_MAP['cover']]:
+ x['cover'] = None
+ formats = self.formats(record[self.FIELD_MAP['id']], index_is_id=True)
+ if formats:
+ for fmt in formats.split(','):
+ path = self.format_abspath(x['id'], fmt, index_is_id=True)
+ if path is None:
+ continue
+ if prefix != self.library_path:
+ path = os.path.relpath(path, self.library_path)
+ path = os.path.join(prefix, path)
+ x['formats'].append(path)
+ x['fmt_'+fmt.lower()] = path
+ x['available_formats'] = [i.upper() for i in formats.split(',')]
+
+ return data
+
'''
Rewrite of the calibre database backend.
@@ -107,15 +171,13 @@ Various things that require other things before they can be migrated:
1. From initialize_dynamic(): set_saved_searches,
load_user_template_functions. Also add custom
columns/categories/searches info into
- self.field_metadata. Finally, implement metadata dirtied
- functionality.
+ self.field_metadata.
2. Catching DatabaseException and sqlite.Error when creating new
libraries/switching/on calibre startup.
- 3. From refresh in the legacy interface: Rember to flush the composite
- column template cache.
+ 3. Port library/restore.py
4. Replace the metadatabackup thread with the new implementation when using the new backend.
- 5. In the new API refresh() does not re-read from disk. That might break a
- few things, for example content server reloading on db change as well as
- dump/restore of db?
- 6. grep the sources for TODO
+ 5. grep the sources for TODO
+ 6. Check that content server reloading on metadata,db change, metadata
+ backup, refresh gui on calibredb add and moving libraries all work (check
+ them on windows as well for file locking issues)
'''
diff --git a/src/calibre/db/backend.py b/src/calibre/db/backend.py
index 0ebc9679b7..4f33a917fa 100644
--- a/src/calibre/db/backend.py
+++ b/src/calibre/db/backend.py
@@ -8,7 +8,7 @@ __copyright__ = '2011, Kovid Goyal '
__docformat__ = 'restructuredtext en'
# Imports {{{
-import os, shutil, uuid, json, glob, time, cPickle
+import os, shutil, uuid, json, glob, time, cPickle, hashlib
from functools import partial
import apsw
@@ -17,7 +17,9 @@ from calibre import isbytestring, force_unicode, prints
from calibre.constants import (iswindows, filesystem_encoding,
preferred_encoding)
from calibre.ptempfile import PersistentTemporaryFile
+from calibre.db import SPOOL_SIZE
from calibre.db.schema_upgrades import SchemaUpgrade
+from calibre.db.errors import NoSuchFormat
from calibre.library.field_metadata import FieldMetadata
from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.utils.icu import sort_key
@@ -40,6 +42,8 @@ Differences in semantics from pysqlite:
3. There is no executescript
'''
+CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
+ 'int', 'float', 'bool', 'series', 'composite', 'enumeration'])
class DynamicFilter(object): # {{{
@@ -547,6 +551,7 @@ class DB(object):
# Load metadata for custom columns
self.custom_column_label_map, self.custom_column_num_map = {}, {}
+ self.custom_column_num_to_label_map = {}
triggers = []
remove = []
custom_tables = self.custom_tables
@@ -584,6 +589,7 @@ class DB(object):
self.custom_column_num_map[data['num']] = \
self.custom_column_label_map[data['label']] = data
+ self.custom_column_num_to_label_map[data['num']] = data['label']
# Create Foreign Key triggers
if data['normalized']:
@@ -783,6 +789,194 @@ class DB(object):
self._conn = Connection(self.dbpath)
return self._conn
+ def custom_field_name(self, label=None, num=None):
+ if label is not None:
+ return self.field_metadata.custom_field_prefix + label
+ return self.field_metadata.custom_field_prefix + self.custom_column_num_to_label_map[num]
+
+ def custom_field_metadata(self, label=None, num=None):
+ if label is not None:
+ return self.custom_column_label_map[label]
+ return self.custom_column_num_map[num]
+
+ def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None):
+ changed = False
+ if name is not None:
+ self.conn.execute('UPDATE custom_columns SET name=? WHERE id=?', (name, num))
+ changed = True
+ if label is not None:
+ self.conn.execute('UPDATE custom_columns SET label=? WHERE id=?', (label, num))
+ changed = True
+ if is_editable is not None:
+ self.conn.execute('UPDATE custom_columns SET editable=? WHERE id=?', (bool(is_editable), num))
+ self.custom_column_num_map[num]['is_editable'] = bool(is_editable)
+ changed = True
+ if display is not None:
+ self.conn.execute('UPDATE custom_columns SET display=? WHERE id=?', (json.dumps(display), num))
+ changed = True
+ return changed
+
+ def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}): # {{{
+ import re
+ if not label:
+ raise ValueError(_('No label was provided'))
+ if re.match('^\w*$', label) is None or not label[0].isalpha() or label.lower() != label:
+ raise ValueError(_('The label must contain only lower case letters, digits and underscores, and start with a letter'))
+ if datatype not in CUSTOM_DATA_TYPES:
+ raise ValueError('%r is not a supported data type'%datatype)
+ normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
+ 'float', 'composite')
+ is_multiple = is_multiple and datatype in ('text', 'composite')
+ self.conn.execute(
+ ('INSERT INTO '
+ 'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
+ 'VALUES (?,?,?,?,?,?,?)'),
+ (label, name, datatype, is_multiple, editable, json.dumps(display), normalized))
+ num = self.conn.last_insert_rowid()
+
+ if datatype in ('rating', 'int'):
+ dt = 'INT'
+ elif datatype in ('text', 'comments', 'series', 'composite', 'enumeration'):
+ dt = 'TEXT'
+ elif datatype in ('float',):
+ dt = 'REAL'
+ elif datatype == 'datetime':
+ dt = 'timestamp'
+ elif datatype == 'bool':
+ dt = 'BOOL'
+ collate = 'COLLATE NOCASE' if dt == 'TEXT' else ''
+ table, lt = self.custom_table_names(num)
+ if normalized:
+ if datatype == 'series':
+ s_index = 'extra REAL,'
+ else:
+ s_index = ''
+ lines = [
+ '''\
+ CREATE TABLE %s(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ value %s NOT NULL %s,
+ UNIQUE(value));
+ '''%(table, dt, collate),
+
+ 'CREATE INDEX %s_idx ON %s (value %s);'%(table, table, collate),
+
+ '''\
+ CREATE TABLE %s(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ book INTEGER NOT NULL,
+ value INTEGER NOT NULL,
+ %s
+ UNIQUE(book, value)
+ );'''%(lt, s_index),
+
+ 'CREATE INDEX %s_aidx ON %s (value);'%(lt,lt),
+ 'CREATE INDEX %s_bidx ON %s (book);'%(lt,lt),
+
+ '''\
+ CREATE TRIGGER fkc_update_{lt}_a
+ BEFORE UPDATE OF book ON {lt}
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ END;
+ END;
+ CREATE TRIGGER fkc_update_{lt}_b
+ BEFORE UPDATE OF author ON {lt}
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
+ END;
+ END;
+ CREATE TRIGGER fkc_insert_{lt}
+ BEFORE INSERT ON {lt}
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
+ END;
+ END;
+ CREATE TRIGGER fkc_delete_{lt}
+ AFTER DELETE ON {table}
+ BEGIN
+ DELETE FROM {lt} WHERE value=OLD.id;
+ END;
+
+ CREATE VIEW tag_browser_{table} AS SELECT
+ id,
+ value,
+ (SELECT COUNT(id) FROM {lt} WHERE value={table}.id) count,
+ (SELECT AVG(r.rating)
+ FROM {lt},
+ books_ratings_link as bl,
+ ratings as r
+ WHERE {lt}.value={table}.id and bl.book={lt}.book and
+ r.id = bl.rating and r.rating <> 0) avg_rating,
+ value AS sort
+ FROM {table};
+
+ CREATE VIEW tag_browser_filtered_{table} AS SELECT
+ id,
+ value,
+ (SELECT COUNT({lt}.id) FROM {lt} WHERE value={table}.id AND
+ books_list_filter(book)) count,
+ (SELECT AVG(r.rating)
+ FROM {lt},
+ books_ratings_link as bl,
+ ratings as r
+ WHERE {lt}.value={table}.id AND bl.book={lt}.book AND
+ r.id = bl.rating AND r.rating <> 0 AND
+ books_list_filter(bl.book)) avg_rating,
+ value AS sort
+ FROM {table};
+
+ '''.format(lt=lt, table=table),
+
+ ]
+ else:
+ lines = [
+ '''\
+ CREATE TABLE %s(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ book INTEGER,
+ value %s NOT NULL %s,
+ UNIQUE(book));
+ '''%(table, dt, collate),
+
+ 'CREATE INDEX %s_idx ON %s (book);'%(table, table),
+
+ '''\
+ CREATE TRIGGER fkc_insert_{table}
+ BEFORE INSERT ON {table}
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ END;
+ END;
+ CREATE TRIGGER fkc_update_{table}
+ BEFORE UPDATE OF book ON {table}
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ END;
+ END;
+ '''.format(table=table),
+ ]
+ script = ' \n'.join(lines)
+ self.conn.execute(script)
+ return num
+ # }}}
+
+ def delete_custom_column(self, label=None, num=None):
+ data = self.custom_field_metadata(label, num)
+ self.conn.execute('UPDATE custom_columns SET mark_for_delete=1 WHERE id=?', (data['num'],))
+
def close(self):
if self._conn is not None:
self._conn.close()
@@ -926,6 +1120,19 @@ class DB(object):
shutil.copyfile(candidates[0], fmt_path)
return fmt_path
+ def format_hash(self, book_id, fmt, fname, path):
+ path = self.format_abspath(book_id, fmt, fname, path)
+ if path is None:
+ raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
+ sha = hashlib.sha256()
+ with lopen(path, 'rb') as f:
+ while True:
+ raw = f.read(SPOOL_SIZE)
+ sha.update(raw)
+ if len(raw) < SPOOL_SIZE:
+ break
+ return sha.hexdigest()
+
def format_metadata(self, book_id, fmt, fname, path):
path = self.format_abspath(book_id, fmt, fname, path)
ans = {}
@@ -948,6 +1155,13 @@ class DB(object):
import traceback
traceback.print_exc()
+ def cover_last_modified(self, path):
+ path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
+ try:
+ return utcfromtimestamp(os.stat(path).st_mtime)
+ except EnvironmentError:
+ pass # Cover doesn't exist
+
def copy_cover_to(self, path, dest, windows_atomic_move=None, use_hardlink=False):
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
if windows_atomic_move is not None:
@@ -1238,5 +1452,59 @@ class DB(object):
options = [(book_id, fmt.upper(), buffer(cPickle.dumps(data, -1))) for book_id, data in options.iteritems()]
self.conn.executemany('INSERT OR REPLACE INTO conversion_options(book,format,data) VALUES (?,?,?)', options)
+ def get_top_level_move_items(self, all_paths):
+ items = set(os.listdir(self.library_path))
+ paths = set(all_paths)
+ paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
+ path_map = {x:x for x in paths}
+ if not self.is_case_sensitive:
+ for x in items:
+ path_map[x.lower()] = x
+ items = set(path_map)
+ paths = {x.lower() for x in paths}
+ items = items.intersection(paths)
+ return items, path_map
+
+ def move_library_to(self, all_paths, newloc, progress=lambda x: x):
+ if not os.path.exists(newloc):
+ os.makedirs(newloc)
+ old_dirs = set()
+ items, path_map = self.get_top_level_move_items(all_paths)
+ for x in items:
+ src = os.path.join(self.library_path, x)
+ dest = os.path.join(newloc, path_map[x])
+ if os.path.isdir(src):
+ if os.path.exists(dest):
+ shutil.rmtree(dest)
+ shutil.copytree(src, dest)
+ old_dirs.add(src)
+ else:
+ if os.path.exists(dest):
+ os.remove(dest)
+ shutil.copyfile(src, dest)
+ x = path_map[x]
+ if not isinstance(x, unicode):
+ x = x.decode(filesystem_encoding, 'replace')
+ progress(x)
+
+ dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
+ opath = self.dbpath
+ self.conn.close()
+ self.library_path, self.dbpath = newloc, dbpath
+ if self._conn is not None:
+ self._conn.close()
+ self._conn = None
+ self.conn
+ try:
+ os.unlink(opath)
+ except:
+ pass
+ for loc in old_dirs:
+ try:
+ shutil.rmtree(loc)
+ except:
+ pass
+
# }}}
+
diff --git a/src/calibre/db/cache.py b/src/calibre/db/cache.py
index 119e166c49..c2d094eef0 100644
--- a/src/calibre/db/cache.py
+++ b/src/calibre/db/cache.py
@@ -7,7 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal '
__docformat__ = 'restructuredtext en'
-import os, traceback, random, shutil
+import os, traceback, random, shutil, re
from io import BytesIO
from collections import defaultdict
from functools import wraps, partial
@@ -25,12 +25,12 @@ from calibre.db.tables import VirtualTable
from calibre.db.write import get_series_values
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.ebooks import check_ebook_format
-from calibre.ebooks.metadata import string_to_authors, author_to_author_sort
+from calibre.ebooks.metadata import string_to_authors, author_to_author_sort, get_title_sort_pat
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.ptempfile import (base_dir, PersistentTemporaryFile,
SpooledTemporaryFile)
-from calibre.utils.config import prefs
+from calibre.utils.config import prefs, tweaks
from calibre.utils.date import now as nowf, utcnow, UNDEFINED_DATE
from calibre.utils.icu import sort_key
@@ -89,7 +89,7 @@ class Cache(object):
self.formatter_template_cache = {}
self.dirtied_cache = {}
self.dirtied_sequence = 0
- self._search_api = Search(self.field_metadata.get_search_terms())
+ self._search_api = Search(self, 'saved_searches', self.field_metadata.get_search_terms())
# Implement locking for all simple read/write API methods
# An unlocked version of the method is stored with the name starting
@@ -127,9 +127,8 @@ class Cache(object):
except:
traceback.print_exc()
- # TODO: Saved searches
- # if len(saved_searches().names()):
- # self.field_metadata.add_search_category(label='search', name=_('Searches'))
+ if len(self._search_api.get_saved_searches().names()):
+ self.field_metadata.add_search_category(label='search', name=_('Searches'))
self.field_metadata.add_grouped_search_terms(
self._pref('grouped_search_terms', {}))
@@ -141,16 +140,28 @@ class Cache(object):
if self.dirtied_cache:
self.dirtied_sequence = max(self.dirtied_cache.itervalues())+1
+ @property
+ def prefs(self):
+ 'For internal use only (used by SavedSearchQueries). For thread-safe access to the preferences, use the pref() and set_pref() methods.'
+ return self.backend.prefs
+
@write_api
def initialize_template_cache(self):
self.formatter_template_cache = {}
@write_api
- def refresh(self):
- self._initialize_template_cache()
+ def clear_caches(self, book_ids=None):
+ self._initialize_template_cache() # Clear the formatter template cache
+ for field in self.fields.itervalues():
+ if hasattr(field, 'clear_caches'):
+ field.clear_caches(book_ids=book_ids) # Clear the composite cache and ondevice caches
+ self.format_metadata_cache.clear()
+
+ @write_api
+ def reload_from_db(self, clear_caches=True):
+ if clear_caches:
+ self._clear_caches()
for field in self.fields.itervalues():
- if hasattr(field, 'clear_cache'):
- field.clear_cache() # Clear the composite cache
if hasattr(field, 'table'):
field.table.read(self.backend) # Reread data from metadata.db
@@ -394,6 +405,12 @@ class Cache(object):
def get_item_name(self, field, item_id):
return self.fields[field].table.id_map[item_id]
+ @read_api
+ def get_item_id(self, field, item_name):
+ ' Return the item id for item_name (case-insensitive) '
+ rmap = {icu_lower(v) if isinstance(v, unicode) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
+ return rmap.get(icu_lower(item_name) if isinstance(item_name, unicode) else item_name, None)
+
@read_api
def author_data(self, author_ids=None):
'''
@@ -408,7 +425,16 @@ class Cache(object):
return {aid:af.author_data(aid) for aid in author_ids if aid in af.table.id_map}
@read_api
- def format_metadata(self, book_id, fmt, allow_cache=True):
+ def format_hash(self, book_id, fmt):
+ try:
+ name = self.fields['formats'].format_fname(book_id, fmt)
+ path = self._field_for('path', book_id).replace('/', os.sep)
+ except:
+ raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
+ return self.backend.format_hash(book_id, fmt, name, path)
+
+ @api
+ def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False):
if not fmt:
return {}
fmt = fmt.upper()
@@ -416,18 +442,30 @@ class Cache(object):
x = self.format_metadata_cache[book_id].get(fmt, None)
if x is not None:
return x
- try:
- name = self.fields['formats'].format_fname(book_id, fmt)
- path = self._field_for('path', book_id).replace('/', os.sep)
- except:
- return {}
+ with self.read_lock:
+ try:
+ name = self.fields['formats'].format_fname(book_id, fmt)
+ path = self._field_for('path', book_id).replace('/', os.sep)
+ except:
+ return {}
+
+ ans = {}
+ if path and name:
+ ans = self.backend.format_metadata(book_id, fmt, name, path)
+ self.format_metadata_cache[book_id][fmt] = ans
+ if update_db and 'size' in ans:
+ with self.write_lock:
+ max_size = self.fields['formats'].table.update_fmt(book_id, fmt, name, ans['size'], self.backend)
+ self.fields['size'].table.update_sizes({book_id: max_size})
- ans = {}
- if path and name:
- ans = self.backend.format_metadata(book_id, fmt, name, path)
- self.format_metadata_cache[book_id][fmt] = ans
return ans
+ @read_api
+ def format_files(self, book_id):
+ field = self.fields['formats']
+ fmts = field.table.book_col_map.get(book_id, ())
+ return {fmt:field.format_fname(book_id, fmt) for fmt in fmts}
+
@read_api
def pref(self, name, default=None):
return self.backend.prefs.get(name, default)
@@ -498,6 +536,14 @@ class Cache(object):
ret = i
return ret
+ @read_api
+ def cover_last_modified(self, book_id):
+ try:
+ path = self._field_for('path', book_id).replace('/', os.sep)
+ except AttributeError:
+ return
+ return self.backend.cover_last_modified(path)
+
@read_api
def copy_cover_to(self, book_id, dest, use_hardlink=False):
'''
@@ -524,6 +570,7 @@ class Cache(object):
the path is different from the current path (taking case sensitivity
into account).
'''
+ fmt = (fmt or '').upper()
try:
name = self.fields['formats'].format_fname(book_id, fmt)
path = self._field_for('path', book_id).replace('/', os.sep)
@@ -544,6 +591,7 @@ class Cache(object):
Apart from the viewer, I don't believe any of the others do any file
I/O with the results of this call.
'''
+ fmt = (fmt or '').upper()
try:
name = self.fields['formats'].format_fname(book_id, fmt)
path = self._field_for('path', book_id).replace('/', os.sep)
@@ -555,6 +603,7 @@ class Cache(object):
@read_api
def has_format(self, book_id, fmt):
'Return True iff the format exists on disk'
+ fmt = (fmt or '').upper()
try:
name = self.fields['formats'].format_fname(book_id, fmt)
path = self._field_for('path', book_id).replace('/', os.sep)
@@ -562,6 +611,31 @@ class Cache(object):
return False
return self.backend.has_format(book_id, fmt, name, path)
+ @api
+ def save_original_format(self, book_id, fmt):
+ fmt = fmt.upper()
+ if 'ORIGINAL' in fmt:
+ raise ValueError('Cannot save original of an original fmt')
+ fmtfile = self.format(book_id, fmt, as_file=True)
+ if fmtfile is None:
+ return False
+ with fmtfile:
+ nfmt = 'ORIGINAL_'+fmt
+ return self.add_format(book_id, nfmt, fmtfile, run_hooks=False)
+
+ @api
+ def restore_original_format(self, book_id, original_fmt):
+ original_fmt = original_fmt.upper()
+ fmtfile = self.format(book_id, original_fmt, as_file=True)
+ if fmtfile is not None:
+ fmt = original_fmt.partition('_')[2]
+ with self.write_lock:
+ with fmtfile:
+ self._add_format(book_id, fmt, fmtfile, run_hooks=False)
+ self._remove_formats({book_id:(original_fmt,)})
+ return True
+ return False
+
@read_api
def formats(self, book_id, verify_formats=True):
'''
@@ -601,6 +675,7 @@ class Cache(object):
this means that repeated calls yield the same
temp file (which is re-created each time)
'''
+ fmt = (fmt or '').upper()
ext = ('.'+fmt.lower()) if fmt else ''
if as_path:
if preserve_filename:
@@ -696,9 +771,8 @@ class Cache(object):
return sorted(all_book_ids, key=partial(SortKey, fields, sort_keys))
@read_api
- def search(self, query, restriction, virtual_fields=None):
- return self._search_api(self, query, restriction,
- virtual_fields=virtual_fields)
+ def search(self, query, restriction='', virtual_fields=None, book_ids=None):
+ return self._search_api(self, query, restriction, virtual_fields=virtual_fields, book_ids=book_ids)
@read_api
def get_categories(self, sort='name', book_ids=None, icon_map=None):
@@ -761,7 +835,7 @@ class Cache(object):
if dirtied and self.composites:
for name in self.composites:
- self.fields[name].pop_cache(dirtied)
+ self.fields[name].clear_caches(book_ids=dirtied)
if dirtied and update_path and do_path_update:
self._update_path(dirtied, mark_as_dirtied=False)
@@ -1068,16 +1142,16 @@ class Cache(object):
self._update_last_modified(tuple(formats_map.iterkeys()))
@read_api
- def get_next_series_num_for(self, series):
+ def get_next_series_num_for(self, series, field='series'):
books = ()
- sf = self.fields['series']
+ sf = self.fields[field]
if series:
q = icu_lower(series)
- for val, book_ids in sf.iter_searchable_values(self._get_metadata, frozenset(self.all_book_ids())):
+ for val, book_ids in sf.iter_searchable_values(self._get_metadata, frozenset(self._all_book_ids())):
if q == icu_lower(val):
books = book_ids
break
- series_indices = sorted(self._field_for('series_index', book_id) for book_id in books)
+ series_indices = sorted(self._field_for(sf.index_field.name, book_id) for book_id in books)
return _get_next_series_num_for_list(tuple(series_indices), unwrap=False)
@read_api
@@ -1181,6 +1255,42 @@ class Cache(object):
else:
table.remove_books(book_ids, self.backend)
+ @read_api
+ def author_sort_strings_for_books(self, book_ids):
+ val_map = {}
+ for book_id in book_ids:
+ authors = self._field_ids_for('authors', book_id)
+ adata = self._author_data(authors)
+ val_map[book_id] = tuple(adata[aid]['sort'] for aid in authors)
+ return val_map
+
+ @write_api
+ def rename_items(self, field, item_id_to_new_name_map, change_index=True):
+ f = self.fields[field]
+ try:
+ func = f.table.rename_item
+ except AttributeError:
+ raise ValueError('Cannot rename items for one-one fields: %s' % field)
+ affected_books = set()
+ moved_books = set()
+ id_map = {}
+ for item_id, new_name in item_id_to_new_name_map.iteritems():
+ books, new_id = func(item_id, new_name, self.backend)
+ affected_books.update(books)
+ id_map[item_id] = new_id
+ if new_id != item_id:
+ moved_books.update(books)
+ if affected_books:
+ if field == 'authors':
+ self._set_field('author_sort',
+ {k:' & '.join(v) for k, v in self._author_sort_strings_for_books(affected_books).iteritems()})
+ self._update_path(affected_books, mark_as_dirtied=False)
+ elif change_index and hasattr(f, 'index_field') and tweaks['series_index_auto_increment'] != 'no_change':
+ for book_id in moved_books:
+ self._set_field(f.index_field.name, {book_id:self._get_next_series_num_for(self._field_for(field, book_id), field=field)})
+ self._mark_as_dirty(affected_books)
+ return affected_books, id_map
+
@write_api
def remove_items(self, field, item_ids):
''' Delete all items in the specified field with the specified ids. Returns the set of affected book ids. '''
@@ -1239,6 +1349,177 @@ class Cache(object):
''' options must be a map of the form {book_id:conversion_options} '''
return self.backend.set_conversion_options(options, fmt)
+ @write_api
+ def refresh_format_cache(self):
+ self.fields['formats'].table.read(self.backend)
+ self.format_metadata_cache.clear()
+
+ @write_api
+ def refresh_ondevice(self):
+ self.fields['ondevice'].clear_caches()
+
+ @read_api
+ def tags_older_than(self, tag, delta=None, must_have_tag=None, must_have_authors=None):
+ '''
+ Return the ids of all books having the tag ``tag`` that are older than
+ than the specified time. tag comparison is case insensitive.
+
+ :param delta: A timedelta object or None. If None, then all ids with
+ the tag are returned.
+ :param must_have_tag: If not None the list of matches will be
+ restricted to books that have this tag
+ :param must_have_authors: A list of authors. If not None the list of
+ matches will be restricted to books that have these authors (case
+ insensitive).
+ '''
+ tag_map = {icu_lower(v):k for k, v in self._get_id_map('tags').iteritems()}
+ tag = icu_lower(tag.strip())
+ mht = icu_lower(must_have_tag.strip()) if must_have_tag else None
+ tag_id, mht_id = tag_map.get(tag, None), tag_map.get(mht, None)
+ ans = set()
+ if mht_id is None and mht:
+ return ans
+ if tag_id is not None:
+ tagged_books = self._books_for_field('tags', tag_id)
+ if mht_id is not None and tagged_books:
+ tagged_books = tagged_books.intersection(self._books_for_field('tags', mht_id))
+ if tagged_books:
+ if must_have_authors is not None:
+ amap = {icu_lower(v):k for k, v in self._get_id_map('authors').iteritems()}
+ books = None
+ for author in must_have_authors:
+ abooks = self._books_for_field('authors', amap.get(icu_lower(author), None))
+ books = abooks if books is None else books.intersection(abooks)
+ if not books:
+ break
+ tagged_books = tagged_books.intersection(books or set())
+ if delta is None:
+ ans = tagged_books
+ else:
+ now = nowf()
+ for book_id in tagged_books:
+ ts = self._field_for('timestamp', book_id)
+ if (now - ts) > delta:
+ ans.add(book_id)
+ return ans
+
+ @write_api
+ def set_sort_for_authors(self, author_id_to_sort_map, update_books=True):
+ self.fields['authors'].table.set_sort_names(author_id_to_sort_map, self.backend)
+ changed_books = set()
+ if update_books:
+ val_map = {}
+ for author_id in author_id_to_sort_map:
+ books = self._books_for_field('authors', author_id)
+ changed_books |= books
+ for book_id in books:
+ authors = self._field_ids_for('authors', book_id)
+ adata = self._author_data(authors)
+ sorts = [adata[x]['sort'] for x in authors]
+ val_map[book_id] = ' & '.join(sorts)
+ if val_map:
+ self._set_field('author_sort', val_map)
+ self._mark_as_dirty(changed_books)
+ return changed_books
+
+ @write_api
+ def set_link_for_authors(self, author_id_to_link_map):
+ self.fields['authors'].table.set_links(author_id_to_link_map, self.backend)
+ changed_books = set()
+ for author_id in author_id_to_link_map:
+ changed_books |= self._books_for_field('authors', author_id)
+ self._mark_as_dirty(changed_books)
+ return changed_books
+
+ @read_api
+ def lookup_by_uuid(self, uuid):
+ return self.fields['uuid'].table.lookup_by_uuid(uuid)
+
+ @write_api
+ def delete_custom_column(self, label=None, num=None):
+ self.backend.delete_custom_column(label, num)
+
+ @write_api
+ def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}):
+ self.backend.create_custom_column(label, name, datatype, is_multiple, editable=editable, display=display)
+
+ @write_api
+ def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None):
+ return self.backend.set_custom_column_metadata(num, name=name, label=label, is_editable=is_editable, display=display)
+
+ @read_api
+ def get_books_for_category(self, category, item_id_or_composite_value):
+ f = self.fields[category]
+ if hasattr(f, 'get_books_for_val'):
+ # Composite field
+ return f.get_books_for_val(item_id_or_composite_value, self._get_metadata, self._all_book_ids())
+ return self._books_for_field(f.name, item_id_or_composite_value)
+
+ @read_api
+ def find_identical_books(self, mi, search_restriction='', book_ids=None):
+ ''' Finds books that have a superset of the authors in mi and the same
+ title (title is fuzzy matched) '''
+ fuzzy_title_patterns = [(re.compile(pat, re.IGNORECASE) if
+ isinstance(pat, basestring) else pat, repl) for pat, repl in
+ [
+ (r'[\[\](){}<>\'";,:#]', ''),
+ (get_title_sort_pat(), ''),
+ (r'[-._]', ' '),
+ (r'\s+', ' ')
+ ]
+ ]
+
+ def fuzzy_title(title):
+ title = icu_lower(title.strip())
+ for pat, repl in fuzzy_title_patterns:
+ title = pat.sub(repl, title)
+ return title
+
+ identical_book_ids = set()
+ if mi.authors:
+ try:
+ quathors = mi.authors[:20] # Too many authors causes parsing of
+ # the search expression to fail
+ query = ' and '.join('authors:"=%s"'%(a.replace('"', '')) for a in quathors)
+ qauthors = mi.authors[20:]
+ except ValueError:
+ return identical_book_ids
+ try:
+ book_ids = self._search(query, restriction=search_restriction, book_ids=book_ids)
+ except:
+ traceback.print_exc()
+ return identical_book_ids
+ if qauthors and book_ids:
+ matches = set()
+ qauthors = {icu_lower(x) for x in qauthors}
+ for book_id in book_ids:
+ aut = self._field_for('authors', book_id)
+ if aut:
+ aut = {icu_lower(x) for x in aut}
+ if aut.issuperset(qauthors):
+ matches.add(book_id)
+ book_ids = matches
+
+ for book_id in book_ids:
+ fbook_title = self._field_for('title', book_id)
+ fbook_title = fuzzy_title(fbook_title)
+ mbook_title = fuzzy_title(mi.title)
+ if fbook_title == mbook_title:
+ identical_book_ids.add(book_id)
+ return identical_book_ids
+
+ @read_api
+ def get_top_level_move_items(self):
+ all_paths = {self._field_for('path', book_id).partition('/')[0] for book_id in self._all_book_ids()}
+ return self.backend.get_top_level_move_items(all_paths)
+
+ @write_api
+ def move_library_to(self, newloc, progress=None):
+ if progress is None:
+ progress = lambda x:x
+ all_paths = {self._field_for('path', book_id).partition('/')[0] for book_id in self._all_book_ids()}
+ self.backend.move_library_to(all_paths, newloc, progress=progress)
+
# }}}
class SortKey(object): # {{{
diff --git a/src/calibre/db/categories.py b/src/calibre/db/categories.py
index 3f7bbb9e61..df6c1402d2 100644
--- a/src/calibre/db/categories.py
+++ b/src/calibre/db/categories.py
@@ -16,7 +16,6 @@ from calibre.ebooks.metadata import author_to_author_sort
from calibre.library.field_metadata import TagsIcons
from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
-from calibre.utils.search_query_parser import saved_searches
CATEGORY_SORTS = ('name', 'popularity', 'rating') # This has to be a tuple not a set
@@ -229,7 +228,7 @@ def get_categories(dbcache, sort='name', book_ids=None, icon_map=None):
icon = None
if icon_map and 'search' in icon_map:
icon = icon_map['search']
- ss = saved_searches()
+ ss = dbcache._search_api.get_saved_searches()
for srch in ss.names():
items.append(Tag(srch, tooltip=ss.lookup(srch),
sort=srch, icon=icon, category='search',
diff --git a/src/calibre/db/fields.py b/src/calibre/db/fields.py
index 20d0d75ff4..e028ff5d99 100644
--- a/src/calibre/db/fields.py
+++ b/src/calibre/db/fields.py
@@ -11,7 +11,7 @@ __docformat__ = 'restructuredtext en'
from threading import Lock
from collections import defaultdict, Counter
-from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY
+from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY, null
from calibre.db.write import Writer
from calibre.ebooks.metadata import title_sort
from calibre.utils.config_base import tweaks
@@ -163,14 +163,13 @@ class CompositeField(OneToOneField):
self._render_cache[book_id] = ans
return ans
- def clear_cache(self):
+ def clear_caches(self, book_ids=None):
with self._lock:
- self._render_cache = {}
-
- def pop_cache(self, book_ids):
- with self._lock:
- for book_id in book_ids:
- self._render_cache.pop(book_id, None)
+ if book_ids is None:
+ self._render_cache.clear()
+ else:
+ for book_id in book_ids:
+ self._render_cache.pop(book_id, None)
def get_value_with_cache(self, book_id, get_metadata):
with self._lock:
@@ -212,17 +211,41 @@ class CompositeField(OneToOneField):
ans.append(c)
return ans
+ def get_books_for_val(self, value, get_metadata, book_ids):
+ is_multiple = self.table.metadata['is_multiple'].get('cache_to_list', None)
+ ans = set()
+ for book_id in book_ids:
+ val = self.get_value_with_cache(book_id, get_metadata)
+ vals = {x.strip() for x in val.split(is_multiple)} if is_multiple else [val]
+ if value in vals:
+ ans.add(book_id)
+ return ans
+
class OnDeviceField(OneToOneField):
def __init__(self, name, table):
self.name = name
self.book_on_device_func = None
self.is_multiple = False
+ self.cache = {}
+ self._lock = Lock()
+
+ def clear_caches(self, book_ids=None):
+ with self._lock:
+ if book_ids is None:
+ self.cache.clear()
+ else:
+ for book_id in book_ids:
+ self.cache.pop(book_id, None)
def book_on_device(self, book_id):
- if callable(self.book_on_device_func):
- return self.book_on_device_func(book_id)
- return None
+ with self._lock:
+ ans = self.cache.get(book_id, null)
+ if ans is null and callable(self.book_on_device_func):
+ ans = self.book_on_device_func(book_id)
+ with self._lock:
+ self.cache[book_id] = ans
+ return None if ans is null else ans
def set_book_on_device_func(self, func):
self.book_on_device_func = func
@@ -312,7 +335,11 @@ class ManyToManyField(Field):
def for_book(self, book_id, default_value=None):
ids = self.table.book_col_map.get(book_id, ())
if ids:
- ans = tuple(self.table.id_map[i] for i in ids)
+ ans = (self.table.id_map[i] for i in ids)
+ if self.table.sort_alpha:
+ ans = tuple(sorted(ans, key=sort_key))
+ else:
+ ans = tuple(ans)
else:
ans = default_value
return ans
diff --git a/src/calibre/db/legacy.py b/src/calibre/db/legacy.py
index 9d9e93c02b..838ccdfe21 100644
--- a/src/calibre/db/legacy.py
+++ b/src/calibre/db/legacy.py
@@ -9,17 +9,32 @@ __copyright__ = '2013, Kovid Goyal '
import os, traceback, types
from future_builtins import zip
-from calibre import force_unicode
-from calibre.db import _get_next_series_num_for_list, _get_series_values
+from calibre import force_unicode, isbytestring
+from calibre.constants import preferred_encoding
+from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict
from calibre.db.adding import (
find_books_in_directory, import_book_directory_multiple,
import_book_directory, recursive_import, add_catalog, add_news)
from calibre.db.backend import DB
from calibre.db.cache import Cache
+from calibre.db.errors import NoSuchFormat
from calibre.db.categories import CATEGORY_SORTS
from calibre.db.view import View
from calibre.db.write import clean_identifier
from calibre.utils.date import utcnow
+from calibre.utils.search_query_parser import set_saved_searches
+
+def cleanup_tags(tags):
+ tags = [x.strip().replace(',', ';') for x in tags if x.strip()]
+ tags = [x.decode(preferred_encoding, 'replace')
+ if isbytestring(x) else x for x in tags]
+ tags = [u' '.join(x.split()) for x in tags]
+ ans, seen = [], set([])
+ for tag in tags:
+ if tag.lower() not in seen:
+ seen.add(tag.lower())
+ ans.append(tag)
+ return ans
class LibraryDatabase(object):
@@ -49,11 +64,18 @@ class LibraryDatabase(object):
cache = self.new_api = Cache(backend)
cache.init()
self.data = View(cache)
+ self.id = self.data.index_to_id
+ for x in ('get_property', 'count', 'refresh_ids', 'set_marked_ids',
+ 'multisort', 'search', 'search_getting_ids'):
+ setattr(self, x, getattr(self.data, x))
- self.get_property = self.data.get_property
+ self.is_case_sensitive = getattr(backend, 'is_case_sensitive', False)
+ self.custom_field_name = backend.custom_field_name
self.last_update_check = self.last_modified()
- self.book_on_device_func = None
+
+ if not self.is_second_db:
+ set_saved_searches(self, 'saved_searches')
def close(self):
self.backend.close()
@@ -66,6 +88,10 @@ class LibraryDatabase(object):
delattr(self, x)
# Library wide properties {{{
+ @property
+ def prefs(self):
+ return self.new_api.backend.prefs
+
@property
def field_metadata(self):
return self.backend.field_metadata
@@ -91,9 +117,12 @@ class LibraryDatabase(object):
def check_if_modified(self):
if self.last_modified() > self.last_update_check:
- self.refresh()
+ self.new_api.reload_from_db()
self.last_update_check = utcnow()
+ def get_saved_searches(self):
+ return self.new_api._search_api.get_saved_searches()
+
@property
def custom_column_num_map(self):
return self.backend.custom_column_num_map
@@ -117,16 +146,29 @@ class LibraryDatabase(object):
for book_id in self.data.cache.all_book_ids():
yield book_id
+ def is_empty(self):
+ with self.new_api.read_lock:
+ return not bool(self.new_api.fields['title'].table.book_col_map)
+
def get_usage_count_by_id(self, field):
return [[k, v] for k, v in self.new_api.get_usage_count_by_id(field).iteritems()]
def field_id_map(self, field):
return [(k, v) for k, v in self.new_api.get_id_map(field).iteritems()]
+ def get_custom_items_with_ids(self, label=None, num=None):
+ try:
+ return [[k, v] for k, v in self.new_api.get_id_map(self.custom_field_name(label, num)).iteritems()]
+ except ValueError:
+ return []
+
def refresh(self, field=None, ascending=True):
- self.data.cache.refresh()
self.data.refresh(field=field, ascending=ascending)
+ def get_id_from_uuid(self, uuid):
+ if uuid:
+ return self.new_api.lookup_by_uuid(uuid)
+
def add_listener(self, listener):
'''
Add a listener. Will be called on change events with two arguments.
@@ -147,7 +189,7 @@ class LibraryDatabase(object):
def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.'
- book_id = index if index_is_id else self.data.index_to_id(index)
+ book_id = index if index_is_id else self.id(index)
return self.new_api.field_for('path', book_id).replace('/', os.sep)
def abspath(self, index, index_is_id=False, create_dirs=True):
@@ -210,7 +252,7 @@ class LibraryDatabase(object):
def add_format(self, index, fmt, stream, index_is_id=False, path=None, notify=True, replace=True, copy_function=None):
''' path and copy_function are ignored by the new API '''
- book_id = index if index_is_id else self.data.index_to_id(index)
+ book_id = index if index_is_id else self.id(index)
try:
return self.new_api.add_format(book_id, fmt, stream, replace=replace, run_hooks=False, dbapi=self)
except:
@@ -220,7 +262,7 @@ class LibraryDatabase(object):
def add_format_with_hooks(self, index, fmt, fpath, index_is_id=False, path=None, notify=True, replace=True):
''' path is ignored by the new API '''
- book_id = index if index_is_id else self.data.index_to_id(index)
+ book_id = index if index_is_id else self.id(index)
try:
return self.new_api.add_format(book_id, fmt, fpath, replace=replace, run_hooks=True, dbapi=self)
except:
@@ -253,49 +295,78 @@ class LibraryDatabase(object):
return list(self.new_api.get_ids_for_custom_book_data(name))
# }}}
+ def sort(self, field, ascending, subsort=False):
+ self.multisort([(field, ascending)])
+
def get_field(self, index, key, default=None, index_is_id=False):
- book_id = index if index_is_id else self.data.index_to_id(index)
+ book_id = index if index_is_id else self.id(index)
mi = self.new_api.get_metadata(book_id, get_cover=key == 'cover')
return mi.get(key, default)
+ def cover_last_modified(self, index, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.cover_last_modified(book_id) or self.last_modified()
+
+ def cover(self, index, index_is_id=False, as_file=False, as_image=False, as_path=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.cover(book_id, as_file=as_file, as_image=as_image, as_path=as_path)
+
+ def copy_cover_to(self, index, dest, index_is_id=False, windows_atomic_move=None, use_hardlink=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.copy_cover_to(book_id, dest, use_hardlink=use_hardlink)
+
+ def copy_format_to(self, index, fmt, dest, index_is_id=False, windows_atomic_move=None, use_hardlink=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.copy_format_to(book_id, fmt, dest, use_hardlink=use_hardlink)
+
+ def delete_book(self, book_id, notify=True, commit=True, permanent=False, do_clean=True):
+ self.new_api.remove_books((book_id,), permanent=permanent)
+ if notify:
+ self.notify('delete', [id])
+
+ def dirtied(self, book_ids, commit=True):
+ self.new_api.mark_as_dirty(book_ids)
+
+ def dump_metadata(self, book_ids=None, remove_from_dirtied=True, commit=True, callback=None):
+ self.new_api.dump_metadata(book_ids=book_ids, remove_from_dirtied=remove_from_dirtied, callback=callback)
+
def authors_sort_strings(self, index, index_is_id=False):
- book_id = index if index_is_id else self.data.index_to_id(index)
- with self.new_api.read_lock:
- authors = self.new_api._field_ids_for('authors', book_id)
- adata = self.new_api._author_data(authors)
- return [adata[aid]['sort'] for aid in authors]
+ book_id = index if index_is_id else self.id(index)
+ return list(self.new_api.author_sort_strings_for_books((book_id,))[book_id])
def author_sort_from_book(self, index, index_is_id=False):
return ' & '.join(self.authors_sort_strings(index, index_is_id=index_is_id))
def authors_with_sort_strings(self, index, index_is_id=False):
- book_id = index if index_is_id else self.data.index_to_id(index)
+ book_id = index if index_is_id else self.id(index)
with self.new_api.read_lock:
authors = self.new_api._field_ids_for('authors', book_id)
adata = self.new_api._author_data(authors)
return [(aid, adata[aid]['name'], adata[aid]['sort'], adata[aid]['link']) for aid in authors]
+ def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False):
+ changed_books = self.new_api.set_sort_for_authors({old_id:new_sort})
+ if notify:
+ self.notify('metadata', list(changed_books))
+
+ def set_link_field_for_author(self, aid, link, commit=True, notify=False):
+ changed_books = self.new_api.set_link_for_authors({aid:link})
+ if notify:
+ self.notify('metadata', list(changed_books))
+
def book_on_device(self, book_id):
- if callable(self.book_on_device_func):
- return self.book_on_device_func(book_id)
- return None
+ with self.new_api.read_lock:
+ return self.new_api.fields['ondevice'].book_on_device(book_id)
def book_on_device_string(self, book_id):
- loc = []
- count = 0
- on = self.book_on_device(book_id)
- if on is not None:
- m, a, b, count = on[:4]
- if m is not None:
- loc.append(_('Main'))
- if a is not None:
- loc.append(_('Card A'))
- if b is not None:
- loc.append(_('Card B'))
- return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
+ return self.new_api.field_for('ondevice', book_id)
def set_book_on_device_func(self, func):
- self.book_on_device_func = func
+ self.new_api.fields['ondevice'].set_book_on_device_func(func)
+
+ @property
+ def book_on_device_func(self):
+ return self.new_api.fields['ondevice'].book_on_device_func
def books_in_series(self, series_id):
with self.new_api.read_lock:
@@ -304,7 +375,7 @@ class LibraryDatabase(object):
return sorted(book_ids, key=lambda x:ff('series_index', x))
def books_in_series_of(self, index, index_is_id=False):
- book_id = index if index_is_id else self.data.index_to_id(index)
+ book_id = index if index_is_id else self.id(index)
series_ids = self.new_api.field_ids_for('series', book_id)
if not series_ids:
return []
@@ -335,7 +406,7 @@ class LibraryDatabase(object):
self.new_api.delete_conversion_options((book_id,), fmt=fmt)
def set(self, index, field, val, allow_case_change=False):
- book_id = self.data.index_to_id(index)
+ book_id = self.id(index)
try:
return self.new_api.set_field(field, {book_id:val}, allow_case_change=allow_case_change)
finally:
@@ -371,6 +442,266 @@ class LibraryDatabase(object):
if notify:
self.notify('metadata', [book_id])
+ def remove_all_tags(self, ids, notify=False, commit=True):
+ self.new_api.set_field('tags', {book_id:() for book_id in ids})
+ if notify:
+ self.notify('metadata', ids)
+
+ def _do_bulk_modify(self, field, ids, add, remove, notify):
+ add = cleanup_tags(add)
+ remove = cleanup_tags(remove)
+ remove = set(remove) - set(add)
+ if not ids or (not add and not remove):
+ return
+
+ remove = {icu_lower(x) for x in remove}
+ with self.new_api.write_lock:
+ val_map = {}
+ for book_id in ids:
+ tags = list(self.new_api._field_for(field, book_id))
+ existing = {icu_lower(x) for x in tags}
+ tags.extend(t for t in add if icu_lower(t) not in existing)
+ tags = tuple(t for t in tags if icu_lower(t) not in remove)
+ val_map[book_id] = tags
+ self.new_api._set_field(field, val_map, allow_case_change=False)
+
+ if notify:
+ self.notify('metadata', ids)
+
+ def bulk_modify_tags(self, ids, add=[], remove=[], notify=False):
+ self._do_bulk_modify('tags', ids, add, remove, notify)
+
+ def set_custom_bulk_multiple(self, ids, add=[], remove=[], label=None, num=None, notify=False):
+ data = self.backend.custom_field_metadata(label, num)
+ if not data['editable']:
+ raise ValueError('Column %r is not editable'%data['label'])
+ if data['datatype'] != 'text' or not data['is_multiple']:
+ raise ValueError('Column %r is not text/multiple'%data['label'])
+ field = self.custom_field_name(label, num)
+ self._do_bulk_modify(field, ids, add, remove, notify)
+
+ def unapply_tags(self, book_id, tags, notify=True):
+ self.bulk_modify_tags((book_id,), remove=tags, notify=notify)
+
+ def is_tag_used(self, tag):
+ return icu_lower(tag) in {icu_lower(x) for x in self.new_api.all_field_names('tags')}
+
+ def delete_tag(self, tag):
+ self.delete_tags((tag,))
+
+ def delete_tags(self, tags):
+ with self.new_api.write_lock:
+ tag_map = {icu_lower(v):k for k, v in self.new_api._get_id_map('tags').iteritems()}
+ tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags)
+ tag_ids = tuple(tid for tid in tag_ids if tid is not None)
+ if tag_ids:
+ self.new_api._remove_items('tags', tag_ids)
+
+ def has_id(self, book_id):
+ return book_id in self.new_api.all_book_ids()
+
+ def format(self, index, fmt, index_is_id=False, as_file=False, mode='r+b', as_path=False, preserve_filename=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.format(book_id, fmt, as_file=as_file, as_path=as_path, preserve_filename=preserve_filename)
+
+ def format_abspath(self, index, fmt, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.format_abspath(book_id, fmt)
+
+ def format_path(self, index, fmt, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ ans = self.new_api.format_abspath(book_id, fmt)
+ if ans is None:
+ raise NoSuchFormat('Record %d has no format: %s'%(book_id, fmt))
+ return ans
+
+ def format_files(self, index, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ return [(v, k) for k, v in self.new_api.format_files(book_id).iteritems()]
+
+ def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False, commit=False):
+ return self.new_api.format_metadata(book_id, fmt, allow_cache=allow_cache, update_db=update_db)
+
+ def format_last_modified(self, book_id, fmt):
+ m = self.format_metadata(book_id, fmt)
+ if m:
+ return m['mtime']
+
+ def formats(self, index, index_is_id=False, verify_formats=True):
+ book_id = index if index_is_id else self.id(index)
+ ans = self.new_api.formats(book_id, verify_formats=verify_formats)
+ if ans:
+ return ','.join(ans)
+
+ def has_format(self, index, fmt, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.has_format(book_id, fmt)
+
+ def refresh_format_cache(self):
+ self.new_api.refresh_format_cache()
+
+ def refresh_ondevice(self):
+ self.new_api.refresh_ondevice()
+
+ def tags_older_than(self, tag, delta, must_have_tag=None, must_have_authors=None):
+ for book_id in sorted(self.new_api.tags_older_than(tag, delta=delta, must_have_tag=must_have_tag, must_have_authors=must_have_authors)):
+ yield book_id
+
+ def sizeof_format(self, index, fmt, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.format_metadata(book_id, fmt).get('size', None)
+
+ def get_metadata(self, index, index_is_id=False, get_cover=False, get_user_categories=True, cover_as_data=False):
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.get_metadata(book_id, get_cover=get_cover, get_user_categories=get_user_categories, cover_as_data=cover_as_data)
+
+ def rename_series(self, old_id, new_name, change_index=True):
+ self.new_api.rename_items('series', {old_id:new_name}, change_index=change_index)
+
+ def get_custom(self, index, label=None, num=None, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ ans = self.new_api.field_for(self.custom_field_name(label, num), book_id)
+ if isinstance(ans, tuple):
+ ans = list(ans)
+ return ans
+
+ def get_custom_extra(self, index, label=None, num=None, index_is_id=False):
+ data = self.backend.custom_field_metadata(label, num)
+ # add future datatypes with an extra column here
+ if data['datatype'] != 'series':
+ return None
+ book_id = index if index_is_id else self.id(index)
+ return self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id)
+
+ def get_custom_and_extra(self, index, label=None, num=None, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ data = self.backend.custom_field_metadata(label, num)
+ ans = self.new_api.field_for(self.custom_field_name(label, num), book_id)
+ if isinstance(ans, tuple):
+ ans = list(ans)
+ if data['datatype'] != 'series':
+ return (ans, None)
+ return (ans, self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id))
+
+ def get_next_cc_series_num_for(self, series, label=None, num=None):
+ data = self.backend.custom_field_metadata(label, num)
+ if data['datatype'] != 'series':
+ return None
+ return self.new_api.get_next_series_num_for(series, field=self.custom_field_name(label, num))
+
+ def is_item_used_in_multiple(self, item, label=None, num=None):
+ existing_tags = self.all_custom(label=label, num=num)
+ return icu_lower(item) in {icu_lower(t) for t in existing_tags}
+
+ def delete_custom_item_using_id(self, item_id, label=None, num=None):
+ self.new_api.remove_items(self.custom_field_name(label, num), (item_id,))
+
+ def rename_custom_item(self, old_id, new_name, label=None, num=None):
+ self.new_api.rename_items(self.custom_field_name(label, num), {old_id:new_name}, change_index=False)
+
+ def delete_item_from_multiple(self, item, label=None, num=None):
+ field = self.custom_field_name(label, num)
+ existing = self.new_api.get_id_map(field)
+ rmap = {icu_lower(v):k for k, v in existing.iteritems()}
+ item_id = rmap.get(icu_lower(item), None)
+ if item_id is None:
+ return []
+ return list(self.new_api.remove_items(field, (item_id,)))
+
+ def set_custom(self, book_id, val, label=None, num=None, append=False,
+ notify=True, extra=None, commit=True, allow_case_change=False):
+ field = self.custom_field_name(label, num)
+ data = self.backend.custom_field_metadata(label, num)
+ if data['datatype'] == 'composite':
+ return set()
+ if not data['editable']:
+ raise ValueError('Column %r is not editable'%data['label'])
+ if data['datatype'] == 'enumeration' and (
+ val and val not in data['display']['enum_values']):
+ return set()
+ with self.new_api.write_lock:
+ if append and data['is_multiple']:
+ current = self.new_api._field_for(field, book_id)
+ existing = {icu_lower(x) for x in current}
+ val = current + tuple(x for x in self.new_api.fields[field].writer.adapter(val) if icu_lower(x) not in existing)
+ affected_books = self.new_api._set_field(field, {book_id:val}, allow_case_change=allow_case_change)
+ else:
+ affected_books = self.new_api._set_field(field, {book_id:val}, allow_case_change=allow_case_change)
+ if data['datatype'] == 'series':
+ extra = 1.0 if extra is None else extra
+ self.new_api._set_field(field + '_index', {book_id:extra})
+ if notify and affected_books:
+ self.notify('metadata', list(affected_books))
+ return affected_books
+
+ def set_custom_bulk(self, ids, val, label=None, num=None,
+ append=False, notify=True, extras=None):
+ if extras is not None and len(extras) != len(ids):
+ raise ValueError('Length of ids and extras is not the same')
+ field = self.custom_field_name(label, num)
+ data = self.backend.custom_field_metadata(label, num)
+ if data['datatype'] == 'composite':
+ return set()
+ if data['datatype'] == 'enumeration' and (
+ val and val not in data['display']['enum_values']):
+ return
+ if not data['editable']:
+ raise ValueError('Column %r is not editable'%data['label'])
+
+ if append:
+ for book_id in ids:
+ self.set_custom(book_id, val, label=label, num=num, append=True, notify=False)
+ else:
+ with self.new_api.write_lock:
+ self.new_api._set_field(field, {book_id:val for book_id in ids}, allow_case_change=False)
+ if extras is not None:
+ self.new_api._set_field(field + '_index', {book_id:val for book_id, val in zip(ids, extras)})
+ if notify:
+ self.notify('metadata', list(ids))
+
+ def delete_custom_column(self, label=None, num=None):
+ self.new_api.delete_custom_column(label, num)
+
+ def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}):
+ self.new_api.create_custom_column(label, name, datatype, is_multiple, editable=editable, display=display)
+
+ def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None, notify=True):
+ changed = self.new_api.set_custom_column_metadata(num, name=name, label=label, is_editable=is_editable, display=display)
+ if changed and notify:
+ self.notify('metadata', [])
+
+ def remove_cover(self, book_id, notify=True, commit=True):
+ self.new_api.set_cover({book_id:None})
+ if notify:
+ self.notify('cover', [book_id])
+
+ def set_cover(self, book_id, data, notify=True, commit=True):
+ self.new_api.set_cover({book_id:data})
+ if notify:
+ self.notify('cover', [book_id])
+
+ def original_fmt(self, book_id, fmt):
+ nfmt = ('ORIGINAL_%s'%fmt).upper()
+ return nfmt if self.new_api.has_format(book_id, nfmt) else fmt
+
+ def save_original_format(self, book_id, fmt, notify=True):
+ ret = self.new_api.save_original_format(book_id, fmt)
+ if ret and notify:
+ self.notify('metadata', [book_id])
+ return ret
+
+ def restore_original_format(self, book_id, original_fmt, notify=True):
+ ret = self.new_api.restore_original_format(book_id, original_fmt)
+ if ret and notify:
+ self.notify('metadata', [book_id])
+ return ret
+
+ def remove_format(self, index, fmt, index_is_id=False, notify=True, commit=True, db_only=False):
+ book_id = index if index_is_id else self.id(index)
+ self.new_api.remove_formats({book_id:(fmt,)}, db_only=db_only)
+ if notify:
+ self.notify('metadata', [book_id])
+
# Private interface {{{
def __iter__(self):
for row in self.data.iterall():
@@ -387,20 +718,43 @@ class LibraryDatabase(object):
MT = lambda func: types.MethodType(func, None, LibraryDatabase)
# Legacy getter API {{{
-for prop in ('author_sort', 'authors', 'comment', 'comments', 'publisher',
+for prop in ('author_sort', 'authors', 'comment', 'comments', 'publisher', 'max_size',
'rating', 'series', 'series_index', 'tags', 'title', 'title_sort',
'timestamp', 'uuid', 'pubdate', 'ondevice', 'metadata_last_modified', 'languages',):
def getter(prop):
fm = {'comment':'comments', 'metadata_last_modified':
- 'last_modified', 'title_sort':'sort'}.get(prop, prop)
+ 'last_modified', 'title_sort':'sort', 'max_size':'size'}.get(prop, prop)
def func(self, index, index_is_id=False):
return self.get_property(index, index_is_id=index_is_id, loc=self.FIELD_MAP[fm])
return func
setattr(LibraryDatabase, prop, MT(getter(prop)))
+for prop in ('series', 'publisher'):
+ def getter(field):
+ def func(self, index, index_is_id=False):
+ book_id = index if index_is_id else self.id(index)
+ ans = self.new_api.field_ids_for(field, book_id)
+ try:
+ return ans[0]
+ except IndexError:
+ pass
+ return func
+ setattr(LibraryDatabase, prop + '_id', MT(getter(prop)))
+
+LibraryDatabase.format_hash = MT(lambda self, book_id, fmt:self.new_api.format_hash(book_id, fmt))
+LibraryDatabase.index = MT(lambda self, book_id, cache=False:self.data.id_to_index(book_id))
LibraryDatabase.has_cover = MT(lambda self, book_id:self.new_api.field_for('cover', book_id))
+LibraryDatabase.get_tags = MT(lambda self, book_id:set(self.new_api.field_for('tags', book_id)))
+LibraryDatabase.get_categories = MT(lambda self, sort='name', ids=None, icon_map=None:self.new_api.get_categories(sort=sort, book_ids=ids, icon_map=icon_map))
LibraryDatabase.get_identifiers = MT(
- lambda self, index, index_is_id=False: self.new_api.field_for('identifiers', index if index_is_id else self.data.index_to_id(index)))
+ lambda self, index, index_is_id=False: self.new_api.field_for('identifiers', index if index_is_id else self.id(index)))
+LibraryDatabase.isbn = MT(
+ lambda self, index, index_is_id=False: self.get_identifiers(index, index_is_id=index_is_id).get('isbn', None))
+LibraryDatabase.get_books_for_category = MT(
+ lambda self, category, id_:self.new_api.get_books_for_category(category, id_))
+LibraryDatabase.get_data_as_dict = MT(get_data_as_dict)
+LibraryDatabase.find_identical_books = MT(lambda self, mi:self.new_api.find_identical_books(mi))
+LibraryDatabase.get_top_level_move_items = MT(lambda self:self.new_api.get_top_level_move_items())
# }}}
# Legacy setter API {{{
@@ -435,6 +789,20 @@ for field in (
return ret if field == 'languages' else retval
return func
setattr(LibraryDatabase, 'set_%s' % field.replace('!', ''), MT(setter(field)))
+
+for field in ('authors', 'tags', 'publisher'):
+ def renamer(field):
+ def func(self, old_id, new_name):
+ id_map = self.new_api.rename_items(field, {old_id:new_name})[1]
+ if field == 'authors':
+ return id_map[old_id]
+ return func
+ fname = field[:-1] if field in {'tags', 'authors'} else field
+ setattr(LibraryDatabase, 'rename_%s' % fname, MT(renamer(field)))
+
+LibraryDatabase.update_last_modified = MT(
+ lambda self, book_ids, commit=False, now=None: self.new_api.update_last_modified(book_ids, now=now))
+
# }}}
# Legacy API to get information about many-(one, many) fields {{{
@@ -445,7 +813,8 @@ for field in ('authors', 'tags', 'publisher', 'series'):
return func
name = field[:-1] if field in {'authors', 'tags'} else field
setattr(LibraryDatabase, 'all_%s_names' % name, MT(getter(field)))
- LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats'))
+LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats'))
+LibraryDatabase.all_custom = MT(lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num)))
for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}.iteritems():
def getter(field):
@@ -458,6 +827,8 @@ LibraryDatabase.all_tags = MT(lambda self: list(self.all_tag_names()))
LibraryDatabase.get_all_identifier_types = MT(lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types()))
LibraryDatabase.get_authors_with_ids = MT(
lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().iteritems()])
+LibraryDatabase.get_author_id = MT(
+ lambda self, author: {icu_lower(v):k for k, v in self.new_api.get_id_map('authors').iteritems()}.get(icu_lower(author), None))
for field in ('tags', 'series', 'publishers', 'ratings', 'languages'):
def getter(field):
@@ -515,6 +886,7 @@ for meth in ('get_next_series_num_for', 'has_book', 'author_sort_from_authors'):
return func
setattr(LibraryDatabase, meth, MT(getter(meth)))
+LibraryDatabase.move_library_to = MT(lambda self, newloc, progress=None:self.new_api.move_library_to(newloc, progress=progress))
# Cleaning is not required anymore
LibraryDatabase.clean = LibraryDatabase.clean_custom = MT(lambda self:None)
LibraryDatabase.clean_standard_field = MT(lambda self, field, commit=False:None)
@@ -524,3 +896,8 @@ LibraryDatabase.commit = MT(lambda self:None)
del MT
+
+
+
+
+
diff --git a/src/calibre/db/search.py b/src/calibre/db/search.py
index 7b4ad90bc3..013678e3b3 100644
--- a/src/calibre/db/search.py
+++ b/src/calibre/db/search.py
@@ -15,7 +15,7 @@ from calibre.utils.config_base import prefs
from calibre.utils.date import parse_date, UNDEFINED_DATE, now
from calibre.utils.icu import primary_find
from calibre.utils.localization import lang_map, canonicalize_lang
-from calibre.utils.search_query_parser import SearchQueryParser, ParseException
+from calibre.utils.search_query_parser import SearchQueryParser, ParseException, SavedSearchQueries
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
@@ -392,7 +392,7 @@ class Parser(SearchQueryParser):
def __init__(self, dbcache, all_book_ids, gst, date_search, num_search,
bool_search, keypair_search, limit_search_columns, limit_search_columns_to,
- locations, virtual_fields):
+ locations, virtual_fields, get_saved_searches):
self.dbcache, self.all_book_ids = dbcache, all_book_ids
self.all_search_locations = frozenset(locations)
self.grouped_search_terms = gst
@@ -403,7 +403,7 @@ class Parser(SearchQueryParser):
self.virtual_fields = virtual_fields or {}
if 'marked' not in self.virtual_fields:
self.virtual_fields['marked'] = self
- super(Parser, self).__init__(locations, optimize=True)
+ super(Parser, self).__init__(locations, optimize=True, get_saved_searches=get_saved_searches)
@property
def field_metadata(self):
@@ -651,17 +651,21 @@ class Parser(SearchQueryParser):
class Search(object):
- def __init__(self, all_search_locations=()):
+ def __init__(self, db, opt_name, all_search_locations=()):
self.all_search_locations = all_search_locations
self.date_search = DateSearch()
self.num_search = NumericSearch()
self.bool_search = BooleanSearch()
self.keypair_search = KeyPairSearch()
+ self.saved_searches = SavedSearchQueries(db, opt_name)
+
+ def get_saved_searches(self):
+ return self.saved_searches
def change_locations(self, newlocs):
self.all_search_locations = newlocs
- def __call__(self, dbcache, query, search_restriction, virtual_fields=None):
+ def __call__(self, dbcache, query, search_restriction, virtual_fields=None, book_ids=None):
'''
Return the set of ids of all records that match the specified
query and restriction
@@ -674,28 +678,26 @@ class Search(object):
if search_restriction:
q = u'(%s) and (%s)' % (search_restriction, query)
- all_book_ids = dbcache._all_book_ids(type=set)
+ all_book_ids = dbcache._all_book_ids(type=set) if book_ids is None else set(book_ids)
if not q:
return all_book_ids
if not isinstance(q, type(u'')):
q = q.decode('utf-8')
- # We construct a new parser instance per search as pyparsing is not
- # thread safe. On my desktop, constructing a SearchQueryParser instance
- # takes 0.000975 seconds and restoring it from a pickle takes
- # 0.000974 seconds.
+ # We construct a new parser instance per search as the parse is not
+ # thread safe.
sqp = Parser(
dbcache, all_book_ids, dbcache._pref('grouped_search_terms'),
self.date_search, self.num_search, self.bool_search,
self.keypair_search,
prefs['limit_search_columns'],
prefs['limit_search_columns_to'], self.all_search_locations,
- virtual_fields)
+ virtual_fields, self.get_saved_searches)
try:
ret = sqp.parse(q)
finally:
- sqp.dbcache = None
+ sqp.dbcache = sqp.get_saved_searches = None
return ret
diff --git a/src/calibre/db/tables.py b/src/calibre/db/tables.py
index 46c4554586..9b9ff4e9e0 100644
--- a/src/calibre/db/tables.py
+++ b/src/calibre/db/tables.py
@@ -44,6 +44,7 @@ class Table(object):
def __init__(self, name, metadata, link_table=None):
self.name, self.metadata = name, metadata
+ self.sort_alpha = metadata.get('is_multiple', False) and metadata.get('display', {}).get('sort_alpha', False)
# self.unserialize() maps values from the db to python objects
self.unserialize = \
@@ -137,6 +138,9 @@ class UUIDTable(OneToOneTable):
clean.add(val)
return clean
+ def lookup_by_uuid(self, uuid):
+ return self.uuid_to_id_map.get(uuid, None)
+
class CompositeTable(OneToOneTable):
def read(self, db):
@@ -219,6 +223,31 @@ class ManyToOneTable(Table):
db.conn.executemany('DELETE FROM {0} WHERE id=?'.format(self.metadata['table']), item_ids)
return affected_books
+ def rename_item(self, item_id, new_name, db):
+ rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
+ existing_item = rmap.get(icu_lower(new_name), None)
+ table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
+ affected_books = self.col_book_map.get(item_id, set())
+ new_id = item_id
+ if existing_item is None or existing_item == item_id:
+ # A simple rename will do the trick
+ self.id_map[item_id] = new_name
+ db.conn.execute('UPDATE {0} SET {1}=? WHERE id=?'.format(table, col), (new_name, item_id))
+ else:
+ # We have to replace
+ new_id = existing_item
+ self.id_map.pop(item_id, None)
+ books = self.col_book_map.pop(item_id, set())
+ for book_id in books:
+ self.book_col_map[book_id] = existing_item
+ self.col_book_map[existing_item].update(books)
+ # For custom series this means that the series index can
+ # potentially have duplicates/be incorrect, but there is no way to
+ # handle that in this context.
+ db.conn.execute('UPDATE {0} SET {1}=? WHERE {1}=?; DELETE FROM {2} WHERE id=?'.format(
+ self.link_table, lcol, table), (existing_item, item_id, item_id))
+ return affected_books, new_id
+
class ManyToManyTable(ManyToOneTable):
'''
@@ -280,6 +309,34 @@ class ManyToManyTable(ManyToOneTable):
db.conn.executemany('DELETE FROM {0} WHERE id=?'.format(self.metadata['table']), item_ids)
return affected_books
+ def rename_item(self, item_id, new_name, db):
+ rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
+ existing_item = rmap.get(icu_lower(new_name), None)
+ table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
+ affected_books = self.col_book_map.get(item_id, set())
+ new_id = item_id
+ if existing_item is None or existing_item == item_id:
+ # A simple rename will do the trick
+ self.id_map[item_id] = new_name
+ db.conn.execute('UPDATE {0} SET {1}=? WHERE id=?'.format(table, col), (new_name, item_id))
+ else:
+ # We have to replace
+ new_id = existing_item
+ self.id_map.pop(item_id, None)
+ books = self.col_book_map.pop(item_id, set())
+ # Replacing item_id with existing_item could cause the same id to
+ # appear twice in the book list. Handle that by removing existing
+ # item from the book list before replacing.
+ for book_id in books:
+ self.book_col_map[book_id] = tuple((existing_item if x == item_id else x) for x in self.book_col_map.get(book_id, ()) if x != existing_item)
+ self.col_book_map[existing_item].update(books)
+ db.conn.executemany('DELETE FROM {0} WHERE book=? AND {1}=?'.format(self.link_table, lcol), [
+ (book_id, existing_item) for book_id in books])
+ db.conn.execute('UPDATE {0} SET {1}=? WHERE {1}=?; DELETE FROM {2} WHERE id=?'.format(
+ self.link_table, lcol, table), (existing_item, item_id, item_id))
+ return affected_books, new_id
+
+
class AuthorsTable(ManyToManyTable):
def read_id_maps(self, db):
@@ -293,10 +350,17 @@ class AuthorsTable(ManyToManyTable):
self.alink_map[row[0]] = row[3]
def set_sort_names(self, aus_map, db):
+ aus_map = {aid:(a or '').strip() for aid, a in aus_map.iteritems()}
self.asort_map.update(aus_map)
db.conn.executemany('UPDATE authors SET sort=? WHERE id=?',
[(v, k) for k, v in aus_map.iteritems()])
+ def set_links(self, link_map, db):
+ link_map = {aid:(l or '').strip() for aid, l in link_map.iteritems()}
+ self.alink_map.update(link_map)
+ db.conn.executemany('UPDATE authors SET link=? WHERE id=?',
+ [(v, k) for k, v in link_map.iteritems()])
+
def remove_books(self, book_ids, db):
clean = ManyToManyTable.remove_books(self, book_ids, db)
for item_id in clean:
@@ -304,6 +368,17 @@ class AuthorsTable(ManyToManyTable):
self.asort_map.pop(item_id, None)
return clean
+ def rename_item(self, item_id, new_name, db):
+ ret = ManyToManyTable.rename_item(self, item_id, new_name, db)
+ if item_id not in self.id_map:
+ self.alink_map.pop(item_id, None)
+ self.asort_map.pop(item_id, None)
+ else:
+ # Was a simple rename, update the author sort value
+ self.set_sort_names({item_id:author_to_author_sort(new_name)}, db)
+
+ return ret
+
def remove_items(self, item_ids, db):
raise ValueError('Direct removal of authors is not allowed')
@@ -367,6 +442,9 @@ class FormatsTable(ManyToManyTable):
def remove_items(self, item_ids, db):
raise NotImplementedError('Cannot delete a format directly')
+ def rename_item(self, item_id, new_name, db):
+ raise NotImplementedError('Cannot rename formats')
+
def update_fmt(self, book_id, fmt, fname, size, db):
fmts = list(self.book_col_map.get(book_id, []))
try:
@@ -420,6 +498,9 @@ class IdentifiersTable(ManyToManyTable):
def remove_items(self, item_ids, db):
raise NotImplementedError('Direct deletion of identifiers is not implemented')
+ def rename_item(self, item_id, new_name, db):
+ raise NotImplementedError('Cannot rename identifiers')
+
def all_identifier_types(self):
return frozenset(k for k, v in self.col_book_map.iteritems() if v)
diff --git a/src/calibre/db/tests/add_remove.py b/src/calibre/db/tests/add_remove.py
index 76349df1c5..0047a0ec4f 100644
--- a/src/calibre/db/tests/add_remove.py
+++ b/src/calibre/db/tests/add_remove.py
@@ -251,4 +251,21 @@ class AddRemoveTest(BaseTest):
# }}}
+ def test_original_fmt(self): # {{{
+ ' Test management of original fmt '
+ af, ae, at = self.assertFalse, self.assertEqual, self.assertTrue
+ db = self.init_cache()
+ fmts = db.formats(1)
+ af(db.has_format(1, 'ORIGINAL_FMT1'))
+ at(db.save_original_format(1, 'FMT1'))
+ at(db.has_format(1, 'ORIGINAL_FMT1'))
+ raw = db.format(1, 'FMT1')
+ ae(raw, db.format(1, 'ORIGINAL_FMT1'))
+ db.add_format(1, 'FMT1', BytesIO(b'replacedfmt'))
+ self.assertNotEqual(db.format(1, 'FMT1'), db.format(1, 'ORIGINAL_FMT1'))
+ at(db.restore_original_format(1, 'ORIGINAL_FMT1'))
+ ae(raw, db.format(1, 'FMT1'))
+ af(db.has_format(1, 'ORIGINAL_FMT1'))
+ ae(set(fmts), set(db.formats(1, verify_formats=False)))
+ # }}}
diff --git a/src/calibre/db/tests/base.py b/src/calibre/db/tests/base.py
index b94faf6b28..dd87ab1583 100644
--- a/src/calibre/db/tests/base.py
+++ b/src/calibre/db/tests/base.py
@@ -21,6 +21,11 @@ class BaseTest(unittest.TestCase):
longMessage = True
maxDiff = None
+ @classmethod
+ def setUpClass(cls):
+ from calibre.utils.config_base import reset_tweaks_to_default
+ reset_tweaks_to_default()
+
def setUp(self):
self.library_path = self.mkdtemp()
self.create_db(self.library_path)
diff --git a/src/calibre/db/tests/filesystem.py b/src/calibre/db/tests/filesystem.py
index 168eec53a4..5367f62235 100644
--- a/src/calibre/db/tests/filesystem.py
+++ b/src/calibre/db/tests/filesystem.py
@@ -79,4 +79,21 @@ class FilesystemTest(BaseTest):
f.close()
self.assertNotEqual(cache.field_for('title', 1), 'Moved', 'Title was changed despite file lock')
+ def test_library_move(self):
+ ' Test moving of library '
+ from calibre.ptempfile import TemporaryDirectory
+ cache = self.init_cache()
+ self.assertIn('metadata.db', cache.get_top_level_move_items()[0])
+ all_ids = cache.all_book_ids()
+ fmt1 = cache.format(1, 'FMT1')
+ cov = cache.cover(1)
+ with TemporaryDirectory('moved_lib') as tdir:
+ cache.move_library_to(tdir)
+ self.assertIn('moved_lib', cache.backend.library_path)
+ self.assertIn('moved_lib', cache.backend.dbpath)
+ self.assertEqual(fmt1, cache.format(1, 'FMT1'))
+ self.assertEqual(cov, cache.cover(1))
+ cache.reload_from_db()
+ self.assertEqual(all_ids, cache.all_book_ids())
+ cache.backend.close()
diff --git a/src/calibre/db/tests/legacy.py b/src/calibre/db/tests/legacy.py
index b0ef9fbe1e..3e1a1457b3 100644
--- a/src/calibre/db/tests/legacy.py
+++ b/src/calibre/db/tests/legacy.py
@@ -11,9 +11,11 @@ from io import BytesIO
from repr import repr
from functools import partial
from tempfile import NamedTemporaryFile
+from operator import itemgetter
from calibre.db.tests.base import BaseTest
+# Utils {{{
class ET(object):
def __init__(self, func_name, args, kwargs={}, old=None, legacy=None):
@@ -47,15 +49,17 @@ def run_funcs(self, db, ndb, funcs):
meth(*args)
else:
fmt = lambda x:x
- if meth[0] in {'!', '@', '#', '+'}:
+ if meth[0] in {'!', '@', '#', '+', '$', '-', '%'}:
if meth[0] != '+':
- fmt = {'!':dict, '@':lambda x:frozenset(x or ()), '#':lambda x:set((x or '').split(','))}[meth[0]]
+ fmt = {'!':dict, '@':lambda x:frozenset(x or ()), '#':lambda x:set((x or '').split(',')),
+ '$':lambda x:set(tuple(y) for y in x), '-':lambda x:None, '%':lambda x: set((x or '').split(','))}[meth[0]]
else:
fmt = args[-1]
args = args[:-1]
meth = meth[1:]
res1, res2 = fmt(getattr(db, meth)(*args)), fmt(getattr(ndb, meth)(*args))
self.assertEqual(res1, res2, 'The method: %s() returned different results for argument %s' % (meth, args))
+# }}}
class LegacyTest(BaseTest):
@@ -152,15 +156,44 @@ class LegacyTest(BaseTest):
# }}}
def test_legacy_direct(self): # {{{
- 'Test methods that are directly equivalent in the old and new interface'
+ 'Test read-only methods that are directly equivalent in the old and new interface'
from calibre.ebooks.metadata.book.base import Metadata
+ from datetime import timedelta
ndb = self.init_legacy(self.cloned_library)
db = self.init_old()
+ newstag = ndb.new_api.get_item_id('tags', 'news')
+
+ self.assertEqual(dict(db.prefs), dict(ndb.prefs))
for meth, args in {
+ 'find_identical_books': [(Metadata('title one', ['author one']),), (Metadata('unknown'),), (Metadata('xxxx'),)],
+ 'get_top_level_move_items': [()],
+ 'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')],
'get_next_series_num_for': [('A Series One',)],
+ 'get_id_from_uuid':[('ddddd',), (db.uuid(1, True),)],
+ 'cover':[(0,), (1,), (2,)],
+ 'get_author_id': [('author one',), ('unknown',), ('xxxxx',)],
+ 'series_id': [(0,), (1,), (2,)],
+ 'publisher_id': [(0,), (1,), (2,)],
+ '@tags_older_than': [
+ ('News', None), ('Tag One', None), ('xxxx', None), ('Tag One', None, 'News'), ('News', None, 'xxxx'),
+ ('News', None, None, ['xxxxxxx']), ('News', None, 'Tag One', ['Author Two', 'Author One']),
+ ('News', timedelta(0), None, None), ('News', timedelta(100000)),
+ ],
+ 'format':[(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')],
+ 'has_format':[(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')],
+ 'sizeof_format':[(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')],
+ '@format_files':[(0,),(1,),(2,)],
+ 'formats':[(0,),(1,),(2,)],
+ 'max_size':[(0,),(1,),(2,)],
+ 'format_hash':[(1, 'FMT1'),(1, 'FMT2'), (2, 'FMT1')],
'author_sort_from_authors': [(['Author One', 'Author Two', 'Unknown'],)],
'has_book':[(Metadata('title one'),), (Metadata('xxxx1111'),)],
+ 'has_id':[(1,), (2,), (3,), (9999,)],
+ 'id':[(1,), (2,), (0,),],
+ 'index':[(1,), (2,), (3,), ],
+ 'is_empty':[()],
+ 'count':[()],
'all_author_names':[()],
'all_tag_names':[()],
'all_series_names':[()],
@@ -198,15 +231,29 @@ class LegacyTest(BaseTest):
'books_in_series_of':[(0,), (1,), (2,)],
'books_with_same_title':[(Metadata(db.title(0)),), (Metadata(db.title(1)),), (Metadata('1234'),)],
}.iteritems():
+ fmt = lambda x: x
+ if meth[0] in {'!', '@'}:
+ fmt = {'!':dict, '@':frozenset}[meth[0]]
+ meth = meth[1:]
+ elif meth == 'get_authors_with_ids':
+ fmt = lambda val:{x[0]:tuple(x[1:]) for x in val}
for a in args:
- fmt = lambda x: x
- if meth[0] in {'!', '@'}:
- fmt = {'!':dict, '@':frozenset}[meth[0]]
- meth = meth[1:]
- elif meth == 'get_authors_with_ids':
- fmt = lambda val:{x[0]:tuple(x[1:]) for x in val}
self.assertEqual(fmt(getattr(db, meth)(*a)), fmt(getattr(ndb, meth)(*a)),
'The method: %s() returned different results for argument %s' % (meth, a))
+ d1, d2 = BytesIO(), BytesIO()
+ db.copy_cover_to(1, d1, True)
+ ndb.copy_cover_to(1, d2, True)
+ self.assertTrue(d1.getvalue() == d2.getvalue())
+ d1, d2 = BytesIO(), BytesIO()
+ db.copy_format_to(1, 'FMT1', d1, True)
+ ndb.copy_format_to(1, 'FMT1', d2, True)
+ self.assertTrue(d1.getvalue() == d2.getvalue())
+ old = db.get_data_as_dict(prefix='test-prefix')
+ new = ndb.get_data_as_dict(prefix='test-prefix')
+ for o, n in zip(old, new):
+ o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in o.iteritems()}
+ n = {k:set(v) if isinstance(v, list) else v for k, v in n.iteritems()}
+ self.assertEqual(o, n)
db.close()
# }}}
@@ -251,7 +298,7 @@ class LegacyTest(BaseTest):
# }}}
def test_legacy_adding_books(self): # {{{
- 'Test various adding books methods'
+ 'Test various adding/deleting books methods'
from calibre.ebooks.metadata.book.base import Metadata
legacy, old = self.init_legacy(self.cloned_library), self.init_old(self.cloned_library)
mi = Metadata('Added Book0', authors=('Added Author',))
@@ -308,6 +355,24 @@ class LegacyTest(BaseTest):
self.assertEqual(cache.field_for('authors', bid), ('calibre',))
self.assertEqual(cache.field_for('tags', bid), (_('News'), 'Events', 'one', 'two'))
+ self.assertTrue(legacy.cover(1, index_is_id=True))
+ origcov = legacy.cover(1, index_is_id=True)
+ self.assertTrue(legacy.has_cover(1))
+ legacy.remove_cover(1)
+ self.assertFalse(legacy.has_cover(1))
+ self.assertFalse(legacy.cover(1, index_is_id=True))
+ legacy.set_cover(3, origcov)
+ self.assertEqual(legacy.cover(3, index_is_id=True), origcov)
+ self.assertTrue(legacy.has_cover(3))
+
+ self.assertTrue(legacy.format(1, 'FMT1', index_is_id=True))
+ legacy.remove_format(1, 'FMT1', index_is_id=True)
+ self.assertIsNone(legacy.format(1, 'FMT1', index_is_id=True))
+
+ legacy.delete_book(1)
+ old.delete_book(1)
+ self.assertNotIn(1, legacy.all_ids())
+ legacy.dump_metadata((2,3))
old.close()
# }}}
@@ -325,14 +390,18 @@ class LegacyTest(BaseTest):
# Obsolete/broken methods
'author_id', # replaced by get_author_id
'books_for_author', # broken
- 'books_in_old_database', # unused
+ 'books_in_old_database', 'sizeof_old_database', # unused
+ 'migrate_old', # no longer supported
+ 'remove_unused_series', # superseded by clean API
# Internal API
'clean_user_categories', 'cleanup_tags', 'books_list_filter', 'conn', 'connect', 'construct_file_name',
'construct_path_name', 'clear_dirtied', 'commit_dirty_cache', 'initialize_database', 'initialize_dynamic',
'run_import_plugins', 'vacuum', 'set_path', 'row', 'row_factory', 'rows', 'rmtree', 'series_index_pat',
'import_old_database', 'dirtied_lock', 'dirtied_cache', 'dirty_queue_length', 'dirty_books_referencing',
- 'windows_check_if_files_in_use', 'get_metadata_for_dump', 'get_a_dirtied_book',
+ 'windows_check_if_files_in_use', 'get_metadata_for_dump', 'get_a_dirtied_book', 'dirtied_sequence',
+ 'format_filename_cache', 'format_metadata_cache', 'filter', 'create_version1', 'normpath', 'custom_data_adapters',
+ 'custom_table_names', 'custom_columns_in_meta', 'custom_tables',
}
SKIP_ARGSPEC = {
'__init__',
@@ -404,6 +473,49 @@ class LegacyTest(BaseTest):
def test_legacy_setters(self): # {{{
'Test methods that are directly equivalent in the old and new interface'
from calibre.ebooks.metadata.book.base import Metadata
+ from calibre.utils.date import now
+ n = now()
+ ndb = self.init_legacy(self.cloned_library)
+ amap = ndb.new_api.get_id_map('authors')
+ sorts = [(aid, 's%d' % aid) for aid in amap]
+ db = self.init_old(self.cloned_library)
+ run_funcs(self, db, ndb, (
+ ('+format_metadata', 1, 'FMT1', itemgetter('size')),
+ ('+format_metadata', 1, 'FMT2', itemgetter('size')),
+ ('+format_metadata', 2, 'FMT1', itemgetter('size')),
+ ('get_tags', 0), ('get_tags', 1), ('get_tags', 2),
+ ('is_tag_used', 'News'), ('is_tag_used', 'xchkjgfh'),
+ ('bulk_modify_tags', (1,), ['t1'], ['News']),
+ ('bulk_modify_tags', (2,), ['t1'], ['Tag One', 'Tag Two']),
+ ('bulk_modify_tags', (3,), ['t1', 't2', 't3']),
+ (db.clean,),
+ ('@all_tags',),
+ ('@tags', 0), ('@tags', 1), ('@tags', 2),
+
+ ('unapply_tags', 1, ['t1']),
+ ('unapply_tags', 2, ['xxxx']),
+ ('unapply_tags', 3, ['t2', 't3']),
+ (db.clean,),
+ ('@all_tags',),
+ ('@tags', 0), ('@tags', 1), ('@tags', 2),
+
+ ('update_last_modified', (1,), True, n), ('update_last_modified', (3,), True, n),
+ ('metadata_last_modified', 1, True), ('metadata_last_modified', 3, True),
+ ('set_sort_field_for_author', sorts[0][0], sorts[0][1]),
+ ('set_sort_field_for_author', sorts[1][0], sorts[1][1]),
+ ('set_sort_field_for_author', sorts[2][0], sorts[2][1]),
+ ('set_link_field_for_author', sorts[0][0], sorts[0][1]),
+ ('set_link_field_for_author', sorts[1][0], sorts[1][1]),
+ ('set_link_field_for_author', sorts[2][0], sorts[2][1]),
+ (db.refresh,),
+ ('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
+ ))
+ omi = [db.get_metadata(x) for x in (0, 1, 2)]
+ nmi = [ndb.get_metadata(x) for x in (0, 1, 2)]
+ self.assertEqual([x.author_sort_map for x in omi], [x.author_sort_map for x in nmi])
+ self.assertEqual([x.author_link_map for x in omi], [x.author_link_map for x in nmi])
+ db.close()
+
ndb = self.init_legacy(self.cloned_library)
db = self.init_old(self.cloned_library)
@@ -412,7 +524,7 @@ class LegacyTest(BaseTest):
('set_author_sort', 3, 'new_aus'),
('set_comment', 1, ''), ('set_comment', 2, None), ('set_comment', 3, 'a comment
'),
('set_has_cover', 1, True), ('set_has_cover', 2, True), ('set_has_cover', 3, 1),
- ('set_identifiers', 2, {'test':'', 'a':'b'}), ('set_identifiers', 3, {'id':'1', 'url':'http://acme.com'}), ('set_identifiers', 1, {}),
+ ('set_identifiers', 2, {'test':'', 'a':'b'}), ('set_identifiers', 3, {'id':'1', 'isbn':'9783161484100'}), ('set_identifiers', 1, {}),
('set_languages', 1, ('en',)),
('set_languages', 2, ()),
('set_languages', 3, ('deu', 'spa', 'fra')),
@@ -438,6 +550,7 @@ class LegacyTest(BaseTest):
('series', 0), ('series', 1), ('series', 2),
('series_index', 0), ('series_index', 1), ('series_index', 2),
('uuid', 0), ('uuid', 1), ('uuid', 2),
+ ('isbn', 0), ('isbn', 1), ('isbn', 2),
('@tags', 0), ('@tags', 1), ('@tags', 2),
('@all_tags',),
('@get_all_identifier_types',),
@@ -479,5 +592,162 @@ class LegacyTest(BaseTest):
('#tags', 0), ('#tags', 1), ('#tags', 2),
('authors', 0), ('authors', 1), ('authors', 2),
('publisher', 0), ('publisher', 1), ('publisher', 2),
+ ('delete_tag', 'T1'), ('delete_tag', 'T2'), ('delete_tag', 'Tag one'), ('delete_tag', 'News'),
+ (db.clean,), (db.refresh,),
+ ('@all_tags',),
+ ('#tags', 0), ('#tags', 1), ('#tags', 2),
))
+ db.close()
+
+ ndb = self.init_legacy(self.cloned_library)
+ db = self.init_old(self.cloned_library)
+ run_funcs(self, db, ndb, (
+ ('remove_all_tags', (1, 2, 3)),
+ (db.clean,),
+ ('@all_tags',),
+ ('@tags', 0), ('@tags', 1), ('@tags', 2),
+ ))
+ db.close()
+
+ ndb = self.init_legacy(self.cloned_library)
+ db = self.init_old(self.cloned_library)
+ a = {v:k for k, v in ndb.new_api.get_id_map('authors').iteritems()}['Author One']
+ t = {v:k for k, v in ndb.new_api.get_id_map('tags').iteritems()}['Tag One']
+ s = {v:k for k, v in ndb.new_api.get_id_map('series').iteritems()}['A Series One']
+ p = {v:k for k, v in ndb.new_api.get_id_map('publisher').iteritems()}['Publisher One']
+ run_funcs(self, db, ndb, (
+ ('rename_author', a, 'Author Two'),
+ ('rename_tag', t, 'News'),
+ ('rename_series', s, 'ss'),
+ ('rename_publisher', p, 'publisher one'),
+ (db.clean,),
+ (db.refresh,),
+ ('@all_tags',),
+ ('tags', 0), ('tags', 1), ('tags', 2),
+ ('series', 0), ('series', 1), ('series', 2),
+ ('publisher', 0), ('publisher', 1), ('publisher', 2),
+ ('series_index', 0), ('series_index', 1), ('series_index', 2),
+ ('authors', 0), ('authors', 1), ('authors', 2),
+ ('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
+ ))
+ db.close()
+
+ # }}}
+
+ def test_legacy_custom(self): # {{{
+ 'Test the legacy API for custom columns'
+ ndb = self.init_legacy(self.cloned_library)
+ db = self.init_old(self.cloned_library)
+ # Test getting
+ run_funcs(self, db, ndb, (
+ ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'rating'), ('all_custom', 'authors'), ('all_custom', None, 7),
+ ('get_next_cc_series_num_for', 'My Series One', 'series'), ('get_next_cc_series_num_for', 'My Series Two', 'series'),
+ ('is_item_used_in_multiple', 'My Tag One', 'tags'),
+ ('is_item_used_in_multiple', 'My Series One', 'series'),
+ ('$get_custom_items_with_ids', 'series'), ('$get_custom_items_with_ids', 'tags'), ('$get_custom_items_with_ids', 'float'),
+ ('$get_custom_items_with_ids', 'rating'), ('$get_custom_items_with_ids', 'authors'), ('$get_custom_items_with_ids', None, 7),
+ ))
+ for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'):
+ for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'):
+ run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
+
+ # Test renaming/deleting
+ t = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag One']
+ t2 = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag Two']
+ a = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['My Author Two']
+ a2 = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['Custom One']
+ s = {v:k for k, v in ndb.new_api.get_id_map('#series').iteritems()}['My Series One']
+ run_funcs(self, db, ndb, (
+ ('delete_custom_item_using_id', t, 'tags'),
+ ('delete_custom_item_using_id', a, 'authors'),
+ ('rename_custom_item', t2, 't2', 'tags'),
+ ('rename_custom_item', a2, 'custom one', 'authors'),
+ ('rename_custom_item', s, 'My Series Two', 'series'),
+ ('delete_item_from_multiple', 'custom two', 'authors'),
+ (db.clean,),
+ (db.refresh,),
+ ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'),
+ ))
+ for label in ('tags', 'authors', 'series'):
+ run_funcs(self, db, ndb, [('get_custom_and_extra', idx, label) for idx in range(3)])
+ db.close()
+
+ ndb = self.init_legacy(self.cloned_library)
+ db = self.init_old(self.cloned_library)
+ # Test setting
+ run_funcs(self, db, ndb, (
+ ('-set_custom', 1, 't1 & t2', 'authors'),
+ ('-set_custom', 1, 't3 & t4', 'authors', None, True),
+ ('-set_custom', 3, 'test one & test Two', 'authors'),
+ ('-set_custom', 1, 'ijfkghkjdf', 'enum'),
+ ('-set_custom', 3, 'One', 'enum'),
+ ('-set_custom', 3, 'xxx', 'formats'),
+ ('-set_custom', 1, 'my tag two', 'tags', None, False, False, None, True, True),
+ (db.clean,), (db.refresh,),
+ ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'),
+ ))
+ for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'):
+ for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'):
+ run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
+ db.close()
+
+ ndb = self.init_legacy(self.cloned_library)
+ db = self.init_old(self.cloned_library)
+ # Test setting bulk
+ run_funcs(self, db, ndb, (
+ ('set_custom_bulk', (1,2,3), 't1 & t2', 'authors'),
+ ('set_custom_bulk', (1,2,3), 'a series', 'series', None, False, False, (9, 10, 11)),
+ ('set_custom_bulk', (1,2,3), 't1', 'tags', None, True),
+ (db.clean,), (db.refresh,),
+ ('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'),
+ ))
+ for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'):
+ for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'):
+ run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
+ db.close()
+
+ ndb = self.init_legacy(self.cloned_library)
+ db = self.init_old(self.cloned_library)
+ # Test bulk multiple
+ run_funcs(self, db, ndb, (
+ ('set_custom_bulk_multiple', (1,2,3), ['t1'], ['My Tag One'], 'tags'),
+ (db.clean,), (db.refresh,),
+ ('all_custom', 'tags'),
+ ('get_custom', 0, 'tags'), ('get_custom', 1, 'tags'), ('get_custom', 2, 'tags'),
+ ))
+ db.close()
+
+ o = self.cloned_library
+ n = self.cloned_library
+ ndb, db = self.init_legacy(n), self.init_old(o)
+ ndb.create_custom_column('created', 'Created', 'text', True, True, {'moose':'cat'})
+ db.create_custom_column('created', 'Created', 'text', True, True, {'moose':'cat'})
+ db.close()
+ ndb, db = self.init_legacy(n), self.init_old(o)
+ self.assertEqual(db.custom_column_label_map['created'], ndb.backend.custom_field_metadata('created'))
+ num = db.custom_column_label_map['created']['num']
+ ndb.set_custom_column_metadata(num, is_editable=False, name='Crikey', display={})
+ db.set_custom_column_metadata(num, is_editable=False, name='Crikey', display={})
+ db.close()
+ ndb, db = self.init_legacy(n), self.init_old(o)
+ self.assertEqual(db.custom_column_label_map['created'], ndb.backend.custom_field_metadata('created'))
+ db.close()
+ ndb = self.init_legacy(n)
+ ndb.delete_custom_column('created')
+ ndb = self.init_legacy(n)
+ self.assertRaises(KeyError, ndb.custom_field_name, num=num)
+ # }}}
+
+ def test_legacy_original_fmt(self): # {{{
+ db, ndb = self.init_old(), self.init_legacy()
+ run_funcs(self, db, ndb, (
+ ('original_fmt', 1, 'FMT1'),
+ ('save_original_format', 1, 'FMT1'),
+ ('original_fmt', 1, 'FMT1'),
+ ('restore_original_format', 1, 'ORIGINAL_FMT1'),
+ ('original_fmt', 1, 'FMT1'),
+ ('%formats', 1, True),
+ ))
+ db.close()
+
# }}}
diff --git a/src/calibre/db/tests/reading.py b/src/calibre/db/tests/reading.py
index 24d80d33c7..fcf309ea66 100644
--- a/src/calibre/db/tests/reading.py
+++ b/src/calibre/db/tests/reading.py
@@ -149,8 +149,6 @@ class ReadingTest(BaseTest):
'#tags':[3, 2, 1],
'#yesno':[3, 1, 2],
'#comments':[3, 2, 1],
- # TODO: Add an empty book to the db and ensure that empty
- # fields sort the same as they do in db2
}.iteritems():
x = list(reversed(order))
self.assertEqual(order, cache.multisort([(field, True)],
diff --git a/src/calibre/db/tests/writing.py b/src/calibre/db/tests/writing.py
index c4918b4c4b..26f73964df 100644
--- a/src/calibre/db/tests/writing.py
+++ b/src/calibre/db/tests/writing.py
@@ -474,3 +474,72 @@ class WritingTest(BaseTest):
for bid in c.all_book_ids():
self.assertIn(c.field_for('#series', bid), (None, 'My Series One'))
# }}}
+
+ def test_rename_items(self): # {{{
+ ' Test renaming of many-(many,one) items '
+ cl = self.cloned_library
+ cache = self.init_cache(cl)
+ # Check that renaming authors updates author sort and path
+ a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Unknown']
+ self.assertEqual(cache.rename_items('authors', {a:'New Author'})[0], {3})
+ a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Author One']
+ self.assertEqual(cache.rename_items('authors', {a:'Author Two'})[0], {1, 2})
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('authors'), {'New Author', 'Author Two'})
+ self.assertEqual(c.field_for('author_sort', 3), 'Author, New')
+ self.assertIn('New Author/', c.field_for('path', 3))
+ self.assertEqual(c.field_for('authors', 1), ('Author Two',))
+ self.assertEqual(c.field_for('author_sort', 1), 'Two, Author')
+
+ t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
+ # Test case change
+ self.assertEqual(cache.rename_items('tags', {t:'tag one'}), ({1, 2}, {t:t}))
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('tags'), {'tag one', 'Tag Two', 'News'})
+ self.assertEqual(set(c.field_for('tags', 1)), {'tag one', 'News'})
+ self.assertEqual(set(c.field_for('tags', 2)), {'tag one', 'Tag Two'})
+ # Test new name
+ self.assertEqual(cache.rename_items('tags', {t:'t1'})[0], {1,2})
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('tags'), {'t1', 'Tag Two', 'News'})
+ self.assertEqual(set(c.field_for('tags', 1)), {'t1', 'News'})
+ self.assertEqual(set(c.field_for('tags', 2)), {'t1', 'Tag Two'})
+ # Test rename to existing
+ self.assertEqual(cache.rename_items('tags', {t:'Tag Two'})[0], {1,2})
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('tags'), {'Tag Two', 'News'})
+ self.assertEqual(set(c.field_for('tags', 1)), {'Tag Two', 'News'})
+ self.assertEqual(set(c.field_for('tags', 2)), {'Tag Two'})
+ # Test on a custom column
+ t = {v:k for k, v in cache.get_id_map('#tags').iteritems()}['My Tag One']
+ self.assertEqual(cache.rename_items('#tags', {t:'My Tag Two'})[0], {2})
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('#tags'), {'My Tag Two'})
+ self.assertEqual(set(c.field_for('#tags', 2)), {'My Tag Two'})
+
+ # Test a Many-one field
+ s = {v:k for k, v in cache.get_id_map('series').iteritems()}['A Series One']
+ # Test case change
+ self.assertEqual(cache.rename_items('series', {s:'a series one'}), ({1, 2}, {s:s}))
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('series'), {'a series one'})
+ self.assertEqual(c.field_for('series', 1), 'a series one')
+ self.assertEqual(c.field_for('series_index', 1), 2.0)
+
+ # Test new name
+ self.assertEqual(cache.rename_items('series', {s:'series'})[0], {1, 2})
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('series'), {'series'})
+ self.assertEqual(c.field_for('series', 1), 'series')
+ self.assertEqual(c.field_for('series', 2), 'series')
+ self.assertEqual(c.field_for('series_index', 1), 2.0)
+
+ s = {v:k for k, v in cache.get_id_map('#series').iteritems()}['My Series One']
+ # Test custom column with rename to existing
+ self.assertEqual(cache.rename_items('#series', {s:'My Series Two'})[0], {2})
+ for c in (cache, self.init_cache(cl)):
+ self.assertEqual(c.all_field_names('#series'), {'My Series Two'})
+ self.assertEqual(c.field_for('#series', 2), 'My Series Two')
+ self.assertEqual(c.field_for('#series_index', 1), 3.0)
+ self.assertEqual(c.field_for('#series_index', 2), 4.0)
+ # }}}
diff --git a/src/calibre/db/view.py b/src/calibre/db/view.py
index ecd5182232..bb9131e212 100644
--- a/src/calibre/db/view.py
+++ b/src/calibre/db/view.py
@@ -10,6 +10,7 @@ __docformat__ = 'restructuredtext en'
import weakref
from functools import partial
from itertools import izip, imap
+from future_builtins import map
from calibre.ebooks.metadata import title_sort
from calibre.utils.config_base import tweaks
@@ -119,6 +120,9 @@ class View(object):
self._map = tuple(sorted(self.cache.all_book_ids()))
self._map_filtered = tuple(self._map)
+ def count(self):
+ return len(self._map)
+
def get_property(self, id_or_index, index_is_id=False, loc=-1):
book_id = id_or_index if index_is_id else self._map_filtered[id_or_index]
return self._field_getters[loc](book_id)
@@ -161,6 +165,10 @@ class View(object):
def index_to_id(self, idx):
return self._map_filtered[idx]
+ def id_to_index(self, book_id):
+ return self._map.index(book_id)
+ row = index_to_id
+
def _get(self, field, idx, index_is_id=True, default_value=None, fmt=lambda x:x):
id_ = idx if index_is_id else self.index_to_id(idx)
if index_is_id and id_ not in self.cache.all_book_ids():
@@ -304,8 +312,17 @@ class View(object):
def refresh(self, field=None, ascending=True):
self._map = tuple(self.cache.all_book_ids())
self._map_filtered = tuple(self._map)
+ self.cache.clear_caches()
if field is not None:
self.sort(field, ascending)
if self.search_restriction or self.base_restriction:
self.search('', return_matches=False)
+ def refresh_ids(self, db, ids):
+ self.cache.clear_caches(book_ids=ids)
+ try:
+ return list(map(self.id_to_index, ids))
+ except ValueError:
+ pass
+ return None
+
diff --git a/src/calibre/devices/android/driver.py b/src/calibre/devices/android/driver.py
index 1880324fdc..a0eb021289 100644
--- a/src/calibre/devices/android/driver.py
+++ b/src/calibre/devices/android/driver.py
@@ -151,6 +151,7 @@ class ANDROID(USBMS):
0x61ce : [0x226, 0x227, 0x9999, 0x100],
0x618e : [0x226, 0x227, 0x9999, 0x100],
0x6205 : [0x226, 0x227, 0x9999, 0x100],
+ 0x6234 : [0x231],
},
# Archos
@@ -254,7 +255,7 @@ class ANDROID(USBMS):
'UMS_COMPOSITE', 'PRO', '.KOBO_VOX', 'SGH-T989_CARD', 'SGH-I727',
'USB_FLASH_DRIVER', 'ANDROID', 'MID7042', '7035', 'VIEWPAD_7E',
'NOVO7', 'ADVANCED', 'TABLET_PC', 'F', 'E400_SD_CARD', 'ST80208-1', 'XT894',
- '_USB', 'PROD_TAB13-201', 'URFPAD2', 'MID1126',
+ '_USB', 'PROD_TAB13-201', 'URFPAD2', 'MID1126', 'ANDROID_PLATFORM',
]
OSX_MAIN_MEM = 'Android Device Main Memory'
diff --git a/src/calibre/devices/eb600/driver.py b/src/calibre/devices/eb600/driver.py
index f647c28a75..e51633f3a1 100644
--- a/src/calibre/devices/eb600/driver.py
+++ b/src/calibre/devices/eb600/driver.py
@@ -86,7 +86,7 @@ class COOL_ER(EB600):
FORMATS = ['epub', 'mobi', 'prc', 'pdf', 'txt']
VENDOR_NAME = 'COOL-ER'
- WINDOWS_MAIN_MEM = 'EREADER'
+ WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EREADER'
OSX_MAIN_MEM = 'COOL-ER eReader Media'
diff --git a/src/calibre/devices/misc.py b/src/calibre/devices/misc.py
index e35db8f03d..13ae870fd1 100644
--- a/src/calibre/devices/misc.py
+++ b/src/calibre/devices/misc.py
@@ -229,7 +229,8 @@ class TREKSTOR(USBMS):
0x0067, # This is for the Pyrus Mini
0x006f, # This is for the Pyrus Maxi
0x003e, # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
- 0x5cL, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=191318
+ 0x05cL, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=191318
+ 0x006c, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=218273
]
BCD = [0x0002, 0x100, 0x0222]
diff --git a/src/calibre/ebooks/conversion/plugins/epub_output.py b/src/calibre/ebooks/conversion/plugins/epub_output.py
index f09f2560b0..5569be4304 100644
--- a/src/calibre/ebooks/conversion/plugins/epub_output.py
+++ b/src/calibre/ebooks/conversion/plugins/epub_output.py
@@ -105,14 +105,23 @@ class EPUBOutput(OutputFormatPlugin):
' EPUB, putting all files into the top level.')
),
+ OptionRecommendation(name='epub_inline_toc', recommended_value=False,
+ help=_('Insert an inline Table of Contents that will appear as part of the main book content.')
+ ),
+
+ OptionRecommendation(name='epub_toc_at_end', recommended_value=False,
+ help=_('Put the inserted inline Table of Contents at the end of the book instead of the start.')
+ ),
+
+ OptionRecommendation(name='toc_title', recommended_value=None,
+ help=_('Title for any generated in-line table of contents.')
+ ),
])
recommendations = set([('pretty_print', True, OptionRecommendation.HIGH)])
-
-
- def workaround_webkit_quirks(self): # {{{
+ def workaround_webkit_quirks(self): # {{{
from calibre.ebooks.oeb.base import XPath
for x in self.oeb.spine:
root = x.data
@@ -128,13 +137,13 @@ class EPUBOutput(OutputFormatPlugin):
pre.tag = 'div'
# }}}
- def upshift_markup(self): # {{{
+ def upshift_markup(self): # {{{
'Upgrade markup to comply with XHTML 1.1 where possible'
from calibre.ebooks.oeb.base import XPath, XML
for x in self.oeb.spine:
root = x.data
if (not root.get(XML('lang'))) and (root.get('lang')):
- root.set(XML('lang'), root.get('lang'))
+ root.set(XML('lang'), root.get('lang'))
body = XPath('//h:body')(root)
if body:
body = body[0]
@@ -159,12 +168,17 @@ class EPUBOutput(OutputFormatPlugin):
else:
seen_names.add(name)
-
# }}}
-
def convert(self, oeb, output_path, input_plugin, opts, log):
self.log, self.opts, self.oeb = log, opts, oeb
+ if self.opts.epub_inline_toc:
+ from calibre.ebooks.mobi.writer8.toc import TOCAdder
+ opts.mobi_toc_at_start = not opts.epub_toc_at_end
+ opts.mobi_passthrough = False
+ opts.no_inline_toc = False
+ TOCAdder(oeb, opts, replace_previous_inline_toc=True, ignore_existing_toc=True)
+
if self.opts.epub_flatten:
from calibre.ebooks.oeb.transforms.filenames import FlatFilenames
FlatFilenames()(oeb, opts)
@@ -234,7 +248,7 @@ class EPUBOutput(OutputFormatPlugin):
oeb_output = plugin_for_output_format('oeb')
oeb_output.convert(oeb, tdir, input_plugin, opts, log)
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
- self.condense_ncx([os.path.join(tdir, x) for x in os.listdir(tdir)\
+ self.condense_ncx([os.path.join(tdir, x) for x in os.listdir(tdir)
if x.endswith('.ncx')][0])
encryption = None
if encrypted_fonts:
@@ -261,7 +275,7 @@ class EPUBOutput(OutputFormatPlugin):
zf.extractall(path=opts.extract_to)
self.log.info('EPUB extracted to', opts.extract_to)
- def encrypt_fonts(self, uris, tdir, uuid): # {{{
+ def encrypt_fonts(self, uris, tdir, uuid): # {{{
from binascii import unhexlify
key = re.sub(r'[^a-fA-F0-9]', '', uuid)
@@ -301,14 +315,14 @@ class EPUBOutput(OutputFormatPlugin):
'''%(uri.replace('"', '\\"')))
if fonts:
- ans = '''
'''
- ans += (u'\n'.join(fonts)).encode('utf-8')
- ans += '\n'
- return ans
+ ans += (u'\n'.join(fonts)).encode('utf-8')
+ ans += '\n'
+ return ans
# }}}
def condense_ncx(self, ncx_path):
@@ -323,7 +337,7 @@ class EPUBOutput(OutputFormatPlugin):
compressed = etree.tostring(tree.getroot(), encoding='utf-8')
open(ncx_path, 'wb').write(compressed)
- def workaround_ade_quirks(self): # {{{
+ def workaround_ade_quirks(self): # {{{
'''
Perform various markup transforms to get the output to render correctly
in the quirky ADE.
@@ -462,7 +476,7 @@ class EPUBOutput(OutputFormatPlugin):
# }}}
- def workaround_sony_quirks(self): # {{{
+ def workaround_sony_quirks(self): # {{{
'''
Perform toc link transforms to alleviate slow loading.
'''
diff --git a/src/calibre/ebooks/metadata/sources/worker.py b/src/calibre/ebooks/metadata/sources/worker.py
index 1c83f965e1..ebe764c68f 100644
--- a/src/calibre/ebooks/metadata/sources/worker.py
+++ b/src/calibre/ebooks/metadata/sources/worker.py
@@ -50,14 +50,13 @@ def merge_result(oldmi, newmi, ensure_fields=None):
return newmi
def main(do_identify, covers, metadata, ensure_fields, tdir):
- os.chdir(tdir)
failed_ids = set()
failed_covers = set()
all_failed = True
log = GUILog()
for book_id, mi in metadata.iteritems():
- mi = OPF(BytesIO(mi), basedir=os.getcwdu(),
+ mi = OPF(BytesIO(mi), basedir=tdir,
populate_spine=False).to_book_metadata()
title, authors, identifiers = mi.title, mi.authors, mi.identifiers
cdata = None
@@ -77,7 +76,7 @@ def main(do_identify, covers, metadata, ensure_fields, tdir):
if not mi.is_null('rating'):
# set_metadata expects a rating out of 10
mi.rating *= 2
- with open('%d.mi'%book_id, 'wb') as f:
+ with open(os.path.join(tdir, '%d.mi'%book_id), 'wb') as f:
f.write(metadata_to_opf(mi, default_lang='und'))
else:
log.error('Failed to download metadata for', title)
@@ -89,11 +88,11 @@ def main(do_identify, covers, metadata, ensure_fields, tdir):
if cdata is None:
failed_covers.add(book_id)
else:
- with open('%d.cover'%book_id, 'wb') as f:
+ with open(os.path.join(tdir, '%d.cover'%book_id), 'wb') as f:
f.write(cdata[-1])
all_failed = False
- with open('%d.log'%book_id, 'wb') as f:
+ with open(os.path.join(tdir, '%d.log'%book_id), 'wb') as f:
f.write(log.plain_text.encode('utf-8'))
return failed_ids, failed_covers, all_failed
diff --git a/src/calibre/ebooks/mobi/writer8/toc.py b/src/calibre/ebooks/mobi/writer8/toc.py
index 7bae35ae98..640e8bec5f 100644
--- a/src/calibre/ebooks/mobi/writer8/toc.py
+++ b/src/calibre/ebooks/mobi/writer8/toc.py
@@ -34,9 +34,17 @@ TEMPLATE = '''