mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'kovidgoyal/master'
This commit is contained in:
commit
a761e5a9d6
@ -20,6 +20,55 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.9.40
|
||||
date: 2013-07-19
|
||||
|
||||
new features:
|
||||
- title: "EPUB Output: Add an option to insert an inline Table of Contents into the main text."
|
||||
tickets: [1201006]
|
||||
|
||||
- title: "Driver for LG Android phone"
|
||||
tickets: [1202013]
|
||||
|
||||
- title: "When matching books in the library against the device manually, pre-fill the search field with the book title"
|
||||
tickets: [1200826]
|
||||
|
||||
bug fixes:
|
||||
- title: "PDF Input: Fix a regression that caused some images to be flipped when converting PDF files that use image rotation operators."
|
||||
tickets: [1201083]
|
||||
|
||||
- title: "Fix regression that caused incorrect font size in dropcaps generated by the DOCX input plugin"
|
||||
|
||||
- title: "Get Books: Fix searching for title and author returning some extra matches, if the title starts with an article like the, a or an."
|
||||
tickets: [1200012]
|
||||
|
||||
- title: "PDF Output: Fix extra blank page being inserted at the start of the chapter when converting some epub files from feedbooks"
|
||||
|
||||
- title: "PDF Output: Workaround bug in WebKit's getBoundingClientRect() method that could cause links to occasionally point to incorrect locations."
|
||||
tickets: [1202390]
|
||||
|
||||
- title: "E-book viewer: Fix a bug that could cause the reported position to be incorrect immediately after opening a previously opened book. This also fixes the Back button not working if a link is clicked on the page immediately after opening the book."
|
||||
|
||||
- title: "Fix memory card not being detected for Elonex 621 on Windows"
|
||||
|
||||
- title: "Fix regression in last release that broke auto-conversion of ebooks when sending to device/sending by email."
|
||||
tickets: [1200864]
|
||||
|
||||
- title: "Get Books: Update amazon plugins for website changes"
|
||||
|
||||
- title: "Allow using non-ascii chars in email passwords."
|
||||
tickets: [1202825]
|
||||
|
||||
improved recipes:
|
||||
- Galaxy's Edge
|
||||
|
||||
new recipes:
|
||||
- title: Il Foglio
|
||||
author: faber1971
|
||||
|
||||
- title: Le Monde Diplomatique and Acrimed
|
||||
author: Gaetan Lehmann
|
||||
|
||||
- version: 0.9.39
|
||||
date: 2013-07-12
|
||||
|
||||
|
@ -840,6 +840,19 @@ If you still cannot get the installer to work and you are on windows, you can us
|
||||
My antivirus program claims |app| is a virus/trojan?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. note ::
|
||||
As of July, 2013 McAfee Site Advisor has started warning that
|
||||
http://calibre-ebook.com is unsafe, with no stated reason or justification.
|
||||
McAfee is wrong, the mistake has been reported to them, by several people,
|
||||
but they have not corrected it. McAfee SiteAdvisor is a notoriously
|
||||
unreliable service, see for example
|
||||
`this page <http://www.naturalnews.com/041170_McAfee_Site_Advisor_false_information.html>`_ or
|
||||
`this page <http://www.snapfiles.com/siteadvisor.html>`_ or
|
||||
`this Wikipedia entry <http://en.wikipedia.org/wiki/McAfee_SiteAdvisor#Criticism>`_.
|
||||
We strongly urge you to stop using McAfee products, find a more competent security provider
|
||||
to give your business to.
|
||||
Instructions on how to `uninstall McAfee SiteAdvisor <http://service.mcafee.com/faqdocument.aspx?id=TS100162>`_.
|
||||
|
||||
The first thing to check is that you are downloading |app| from the official
|
||||
website: `<http://calibre-ebook.com/download>`_. |app| is a very popular program
|
||||
and unscrupulous people try to setup websites offering it for download to fool
|
||||
|
@ -14,19 +14,12 @@ class GalaxyEdge(BasicNewsRecipe):
|
||||
|
||||
auto_cleanup = True
|
||||
|
||||
#keep_only_tags = [dict(id='content')]
|
||||
#remove_tags = [dict(attrs={'class':['article-links', 'breadcr']}),
|
||||
#dict(id=['email-section', 'right-column', 'printfooter', 'topover',
|
||||
#'slidebox', 'th_footer'])]
|
||||
|
||||
extra_css = '.photo-caption { font-size: smaller }'
|
||||
|
||||
def parse_index(self):
|
||||
soup = self.index_to_soup('http://www.galaxysedge.com/')
|
||||
main = soup.find('table', attrs={'width':'911'})
|
||||
toc = main.find('td', attrs={'width':'225'})
|
||||
|
||||
|
||||
main = soup.find('table', attrs={'width':'944'})
|
||||
toc = main.find('td', attrs={'width':'204'})
|
||||
|
||||
current_section = None
|
||||
current_articles = []
|
||||
@ -72,37 +65,3 @@ class GalaxyEdge(BasicNewsRecipe):
|
||||
|
||||
return feeds
|
||||
|
||||
|
||||
|
||||
|
||||
#def preprocess_raw_html(self, raw, url):
|
||||
#return raw.replace('<body><p>', '<p>').replace('</p></body>', '</p>')
|
||||
|
||||
#def postprocess_html(self, soup, first_fetch):
|
||||
#for t in soup.findAll(['table', 'tr', 'td','center']):
|
||||
#t.name = 'div'
|
||||
#return soup
|
||||
|
||||
#def parse_index(self):
|
||||
#today = time.strftime('%Y-%m-%d')
|
||||
#soup = self.index_to_soup(
|
||||
#'http://www.thehindu.com/todays-paper/tp-index/?date=' + today)
|
||||
#div = soup.find(id='left-column')
|
||||
#feeds = []
|
||||
#current_section = None
|
||||
#current_articles = []
|
||||
#for x in div.findAll(['h3', 'div']):
|
||||
#if current_section and x.get('class', '') == 'tpaper':
|
||||
#a = x.find('a', href=True)
|
||||
#if a is not None:
|
||||
#current_articles.append({'url':a['href']+'?css=print',
|
||||
#'title':self.tag_to_string(a), 'date': '',
|
||||
#'description':''})
|
||||
#if x.name == 'h3':
|
||||
#if current_section and current_articles:
|
||||
#feeds.append((current_section, current_articles))
|
||||
#current_section = self.tag_to_string(x)
|
||||
#current_articles = []
|
||||
#return feeds
|
||||
|
||||
|
||||
|
16
recipes/il_foglio.recipe
Normal file
16
recipes/il_foglio.recipe
Normal file
@ -0,0 +1,16 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class AdvancedUserRecipe1373969939(BasicNewsRecipe):
|
||||
title = u'Il Foglio - Editoriali'
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 10
|
||||
auto_cleanup = False
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class':'sec_item'})
|
||||
]
|
||||
feeds = [(u'Il Foglio - Editoriali', u'http://feed43.com/8814237344800115.xml')]
|
||||
no_stylesheets = True
|
||||
__author__ = 'faber1971'
|
||||
description = 'Leading articles from an Italian newspaper - v1.00 (16 July, 2013)'
|
||||
language = 'it'
|
||||
masthead_url = 'http://www.ilfoglio.it/media/img/interface/logo_testata_small.gif'
|
Binary file not shown.
@ -22,6 +22,7 @@ mkdir -p /root/staging /root/work/vim /srv/download /srv/manual
|
||||
|
||||
scp .zshrc .vimrc server:
|
||||
scp -r ~/work/vim/zsh-syntax-highlighting server:work/vim
|
||||
scp -r ~/work/vim/zsh-history-substring-search server:work/vim
|
||||
|
||||
If the server has a backup hard-disk, mount it at /mnt/backup and edit /etc/fstab so that it is auto-mounted.
|
||||
Then, add the following to crontab::
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 9, 39)
|
||||
numeric_version = (0, 9, 40)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -54,6 +54,70 @@ def _get_series_values(val):
|
||||
pass
|
||||
return (val, None)
|
||||
|
||||
def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None):
|
||||
'''
|
||||
Return all metadata stored in the database as a dict. Includes paths to
|
||||
the cover and each format.
|
||||
|
||||
:param prefix: The prefix for all paths. By default, the prefix is the absolute path
|
||||
to the library folder.
|
||||
:param ids: Set of ids to return the data for. If None return data for
|
||||
all entries in database.
|
||||
'''
|
||||
import os
|
||||
from calibre.ebooks.metadata import authors_to_string
|
||||
backend = getattr(self, 'backend', self) # Works with both old and legacy interfaces
|
||||
if prefix is None:
|
||||
prefix = backend.library_path
|
||||
fdata = backend.custom_column_num_map
|
||||
|
||||
FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher',
|
||||
'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
|
||||
'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
|
||||
'languages']).union(set(fdata))
|
||||
for x, data in fdata.iteritems():
|
||||
if data['datatype'] == 'series':
|
||||
FIELDS.add('%d_index'%x)
|
||||
data = []
|
||||
for record in self.data:
|
||||
if record is None:
|
||||
continue
|
||||
db_id = record[self.FIELD_MAP['id']]
|
||||
if ids is not None and db_id not in ids:
|
||||
continue
|
||||
x = {}
|
||||
for field in FIELDS:
|
||||
x[field] = record[self.FIELD_MAP[field]]
|
||||
data.append(x)
|
||||
x['id'] = db_id
|
||||
x['formats'] = []
|
||||
isbn = self.isbn(db_id, index_is_id=True)
|
||||
x['isbn'] = isbn if isbn else ''
|
||||
if not x['authors']:
|
||||
x['authors'] = _('Unknown')
|
||||
x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]
|
||||
if authors_as_string:
|
||||
x['authors'] = authors_to_string(x['authors'])
|
||||
x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else []
|
||||
path = os.path.join(prefix, self.path(record[self.FIELD_MAP['id']], index_is_id=True))
|
||||
x['cover'] = os.path.join(path, 'cover.jpg')
|
||||
if not record[self.FIELD_MAP['cover']]:
|
||||
x['cover'] = None
|
||||
formats = self.formats(record[self.FIELD_MAP['id']], index_is_id=True)
|
||||
if formats:
|
||||
for fmt in formats.split(','):
|
||||
path = self.format_abspath(x['id'], fmt, index_is_id=True)
|
||||
if path is None:
|
||||
continue
|
||||
if prefix != self.library_path:
|
||||
path = os.path.relpath(path, self.library_path)
|
||||
path = os.path.join(prefix, path)
|
||||
x['formats'].append(path)
|
||||
x['fmt_'+fmt.lower()] = path
|
||||
x['available_formats'] = [i.upper() for i in formats.split(',')]
|
||||
|
||||
return data
|
||||
|
||||
'''
|
||||
Rewrite of the calibre database backend.
|
||||
|
||||
@ -107,15 +171,13 @@ Various things that require other things before they can be migrated:
|
||||
1. From initialize_dynamic(): set_saved_searches,
|
||||
load_user_template_functions. Also add custom
|
||||
columns/categories/searches info into
|
||||
self.field_metadata. Finally, implement metadata dirtied
|
||||
functionality.
|
||||
self.field_metadata.
|
||||
2. Catching DatabaseException and sqlite.Error when creating new
|
||||
libraries/switching/on calibre startup.
|
||||
3. From refresh in the legacy interface: Rember to flush the composite
|
||||
column template cache.
|
||||
3. Port library/restore.py
|
||||
4. Replace the metadatabackup thread with the new implementation when using the new backend.
|
||||
5. In the new API refresh() does not re-read from disk. That might break a
|
||||
few things, for example content server reloading on db change as well as
|
||||
dump/restore of db?
|
||||
6. grep the sources for TODO
|
||||
5. grep the sources for TODO
|
||||
6. Check that content server reloading on metadata,db change, metadata
|
||||
backup, refresh gui on calibredb add and moving libraries all work (check
|
||||
them on windows as well for file locking issues)
|
||||
'''
|
||||
|
@ -8,7 +8,7 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
# Imports {{{
|
||||
import os, shutil, uuid, json, glob, time, cPickle
|
||||
import os, shutil, uuid, json, glob, time, cPickle, hashlib
|
||||
from functools import partial
|
||||
|
||||
import apsw
|
||||
@ -17,7 +17,9 @@ from calibre import isbytestring, force_unicode, prints
|
||||
from calibre.constants import (iswindows, filesystem_encoding,
|
||||
preferred_encoding)
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.db import SPOOL_SIZE
|
||||
from calibre.db.schema_upgrades import SchemaUpgrade
|
||||
from calibre.db.errors import NoSuchFormat
|
||||
from calibre.library.field_metadata import FieldMetadata
|
||||
from calibre.ebooks.metadata import title_sort, author_to_author_sort
|
||||
from calibre.utils.icu import sort_key
|
||||
@ -40,6 +42,8 @@ Differences in semantics from pysqlite:
|
||||
3. There is no executescript
|
||||
|
||||
'''
|
||||
CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
|
||||
'int', 'float', 'bool', 'series', 'composite', 'enumeration'])
|
||||
|
||||
|
||||
class DynamicFilter(object): # {{{
|
||||
@ -547,6 +551,7 @@ class DB(object):
|
||||
|
||||
# Load metadata for custom columns
|
||||
self.custom_column_label_map, self.custom_column_num_map = {}, {}
|
||||
self.custom_column_num_to_label_map = {}
|
||||
triggers = []
|
||||
remove = []
|
||||
custom_tables = self.custom_tables
|
||||
@ -584,6 +589,7 @@ class DB(object):
|
||||
|
||||
self.custom_column_num_map[data['num']] = \
|
||||
self.custom_column_label_map[data['label']] = data
|
||||
self.custom_column_num_to_label_map[data['num']] = data['label']
|
||||
|
||||
# Create Foreign Key triggers
|
||||
if data['normalized']:
|
||||
@ -783,6 +789,194 @@ class DB(object):
|
||||
self._conn = Connection(self.dbpath)
|
||||
return self._conn
|
||||
|
||||
def custom_field_name(self, label=None, num=None):
|
||||
if label is not None:
|
||||
return self.field_metadata.custom_field_prefix + label
|
||||
return self.field_metadata.custom_field_prefix + self.custom_column_num_to_label_map[num]
|
||||
|
||||
def custom_field_metadata(self, label=None, num=None):
|
||||
if label is not None:
|
||||
return self.custom_column_label_map[label]
|
||||
return self.custom_column_num_map[num]
|
||||
|
||||
def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None):
|
||||
changed = False
|
||||
if name is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET name=? WHERE id=?', (name, num))
|
||||
changed = True
|
||||
if label is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET label=? WHERE id=?', (label, num))
|
||||
changed = True
|
||||
if is_editable is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET editable=? WHERE id=?', (bool(is_editable), num))
|
||||
self.custom_column_num_map[num]['is_editable'] = bool(is_editable)
|
||||
changed = True
|
||||
if display is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET display=? WHERE id=?', (json.dumps(display), num))
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}): # {{{
|
||||
import re
|
||||
if not label:
|
||||
raise ValueError(_('No label was provided'))
|
||||
if re.match('^\w*$', label) is None or not label[0].isalpha() or label.lower() != label:
|
||||
raise ValueError(_('The label must contain only lower case letters, digits and underscores, and start with a letter'))
|
||||
if datatype not in CUSTOM_DATA_TYPES:
|
||||
raise ValueError('%r is not a supported data type'%datatype)
|
||||
normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
|
||||
'float', 'composite')
|
||||
is_multiple = is_multiple and datatype in ('text', 'composite')
|
||||
self.conn.execute(
|
||||
('INSERT INTO '
|
||||
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
|
||||
'VALUES (?,?,?,?,?,?,?)'),
|
||||
(label, name, datatype, is_multiple, editable, json.dumps(display), normalized))
|
||||
num = self.conn.last_insert_rowid()
|
||||
|
||||
if datatype in ('rating', 'int'):
|
||||
dt = 'INT'
|
||||
elif datatype in ('text', 'comments', 'series', 'composite', 'enumeration'):
|
||||
dt = 'TEXT'
|
||||
elif datatype in ('float',):
|
||||
dt = 'REAL'
|
||||
elif datatype == 'datetime':
|
||||
dt = 'timestamp'
|
||||
elif datatype == 'bool':
|
||||
dt = 'BOOL'
|
||||
collate = 'COLLATE NOCASE' if dt == 'TEXT' else ''
|
||||
table, lt = self.custom_table_names(num)
|
||||
if normalized:
|
||||
if datatype == 'series':
|
||||
s_index = 'extra REAL,'
|
||||
else:
|
||||
s_index = ''
|
||||
lines = [
|
||||
'''\
|
||||
CREATE TABLE %s(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
value %s NOT NULL %s,
|
||||
UNIQUE(value));
|
||||
'''%(table, dt, collate),
|
||||
|
||||
'CREATE INDEX %s_idx ON %s (value %s);'%(table, table, collate),
|
||||
|
||||
'''\
|
||||
CREATE TABLE %s(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
book INTEGER NOT NULL,
|
||||
value INTEGER NOT NULL,
|
||||
%s
|
||||
UNIQUE(book, value)
|
||||
);'''%(lt, s_index),
|
||||
|
||||
'CREATE INDEX %s_aidx ON %s (value);'%(lt,lt),
|
||||
'CREATE INDEX %s_bidx ON %s (book);'%(lt,lt),
|
||||
|
||||
'''\
|
||||
CREATE TRIGGER fkc_update_{lt}_a
|
||||
BEFORE UPDATE OF book ON {lt}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_update_{lt}_b
|
||||
BEFORE UPDATE OF author ON {lt}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_insert_{lt}
|
||||
BEFORE INSERT ON {lt}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_delete_{lt}
|
||||
AFTER DELETE ON {table}
|
||||
BEGIN
|
||||
DELETE FROM {lt} WHERE value=OLD.id;
|
||||
END;
|
||||
|
||||
CREATE VIEW tag_browser_{table} AS SELECT
|
||||
id,
|
||||
value,
|
||||
(SELECT COUNT(id) FROM {lt} WHERE value={table}.id) count,
|
||||
(SELECT AVG(r.rating)
|
||||
FROM {lt},
|
||||
books_ratings_link as bl,
|
||||
ratings as r
|
||||
WHERE {lt}.value={table}.id and bl.book={lt}.book and
|
||||
r.id = bl.rating and r.rating <> 0) avg_rating,
|
||||
value AS sort
|
||||
FROM {table};
|
||||
|
||||
CREATE VIEW tag_browser_filtered_{table} AS SELECT
|
||||
id,
|
||||
value,
|
||||
(SELECT COUNT({lt}.id) FROM {lt} WHERE value={table}.id AND
|
||||
books_list_filter(book)) count,
|
||||
(SELECT AVG(r.rating)
|
||||
FROM {lt},
|
||||
books_ratings_link as bl,
|
||||
ratings as r
|
||||
WHERE {lt}.value={table}.id AND bl.book={lt}.book AND
|
||||
r.id = bl.rating AND r.rating <> 0 AND
|
||||
books_list_filter(bl.book)) avg_rating,
|
||||
value AS sort
|
||||
FROM {table};
|
||||
|
||||
'''.format(lt=lt, table=table),
|
||||
|
||||
]
|
||||
else:
|
||||
lines = [
|
||||
'''\
|
||||
CREATE TABLE %s(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
book INTEGER,
|
||||
value %s NOT NULL %s,
|
||||
UNIQUE(book));
|
||||
'''%(table, dt, collate),
|
||||
|
||||
'CREATE INDEX %s_idx ON %s (book);'%(table, table),
|
||||
|
||||
'''\
|
||||
CREATE TRIGGER fkc_insert_{table}
|
||||
BEFORE INSERT ON {table}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_update_{table}
|
||||
BEFORE UPDATE OF book ON {table}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
END;
|
||||
END;
|
||||
'''.format(table=table),
|
||||
]
|
||||
script = ' \n'.join(lines)
|
||||
self.conn.execute(script)
|
||||
return num
|
||||
# }}}
|
||||
|
||||
def delete_custom_column(self, label=None, num=None):
|
||||
data = self.custom_field_metadata(label, num)
|
||||
self.conn.execute('UPDATE custom_columns SET mark_for_delete=1 WHERE id=?', (data['num'],))
|
||||
|
||||
def close(self):
|
||||
if self._conn is not None:
|
||||
self._conn.close()
|
||||
@ -926,6 +1120,19 @@ class DB(object):
|
||||
shutil.copyfile(candidates[0], fmt_path)
|
||||
return fmt_path
|
||||
|
||||
def format_hash(self, book_id, fmt, fname, path):
|
||||
path = self.format_abspath(book_id, fmt, fname, path)
|
||||
if path is None:
|
||||
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
|
||||
sha = hashlib.sha256()
|
||||
with lopen(path, 'rb') as f:
|
||||
while True:
|
||||
raw = f.read(SPOOL_SIZE)
|
||||
sha.update(raw)
|
||||
if len(raw) < SPOOL_SIZE:
|
||||
break
|
||||
return sha.hexdigest()
|
||||
|
||||
def format_metadata(self, book_id, fmt, fname, path):
|
||||
path = self.format_abspath(book_id, fmt, fname, path)
|
||||
ans = {}
|
||||
@ -948,6 +1155,13 @@ class DB(object):
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def cover_last_modified(self, path):
|
||||
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
|
||||
try:
|
||||
return utcfromtimestamp(os.stat(path).st_mtime)
|
||||
except EnvironmentError:
|
||||
pass # Cover doesn't exist
|
||||
|
||||
def copy_cover_to(self, path, dest, windows_atomic_move=None, use_hardlink=False):
|
||||
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
|
||||
if windows_atomic_move is not None:
|
||||
@ -1238,5 +1452,59 @@ class DB(object):
|
||||
options = [(book_id, fmt.upper(), buffer(cPickle.dumps(data, -1))) for book_id, data in options.iteritems()]
|
||||
self.conn.executemany('INSERT OR REPLACE INTO conversion_options(book,format,data) VALUES (?,?,?)', options)
|
||||
|
||||
def get_top_level_move_items(self, all_paths):
|
||||
items = set(os.listdir(self.library_path))
|
||||
paths = set(all_paths)
|
||||
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
|
||||
path_map = {x:x for x in paths}
|
||||
if not self.is_case_sensitive:
|
||||
for x in items:
|
||||
path_map[x.lower()] = x
|
||||
items = set(path_map)
|
||||
paths = {x.lower() for x in paths}
|
||||
items = items.intersection(paths)
|
||||
return items, path_map
|
||||
|
||||
def move_library_to(self, all_paths, newloc, progress=lambda x: x):
|
||||
if not os.path.exists(newloc):
|
||||
os.makedirs(newloc)
|
||||
old_dirs = set()
|
||||
items, path_map = self.get_top_level_move_items(all_paths)
|
||||
for x in items:
|
||||
src = os.path.join(self.library_path, x)
|
||||
dest = os.path.join(newloc, path_map[x])
|
||||
if os.path.isdir(src):
|
||||
if os.path.exists(dest):
|
||||
shutil.rmtree(dest)
|
||||
shutil.copytree(src, dest)
|
||||
old_dirs.add(src)
|
||||
else:
|
||||
if os.path.exists(dest):
|
||||
os.remove(dest)
|
||||
shutil.copyfile(src, dest)
|
||||
x = path_map[x]
|
||||
if not isinstance(x, unicode):
|
||||
x = x.decode(filesystem_encoding, 'replace')
|
||||
progress(x)
|
||||
|
||||
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
|
||||
opath = self.dbpath
|
||||
self.conn.close()
|
||||
self.library_path, self.dbpath = newloc, dbpath
|
||||
if self._conn is not None:
|
||||
self._conn.close()
|
||||
self._conn = None
|
||||
self.conn
|
||||
try:
|
||||
os.unlink(opath)
|
||||
except:
|
||||
pass
|
||||
for loc in old_dirs:
|
||||
try:
|
||||
shutil.rmtree(loc)
|
||||
except:
|
||||
pass
|
||||
|
||||
# }}}
|
||||
|
||||
|
||||
|
@ -7,7 +7,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, traceback, random, shutil
|
||||
import os, traceback, random, shutil, re
|
||||
from io import BytesIO
|
||||
from collections import defaultdict
|
||||
from functools import wraps, partial
|
||||
@ -25,12 +25,12 @@ from calibre.db.tables import VirtualTable
|
||||
from calibre.db.write import get_series_values
|
||||
from calibre.db.lazy import FormatMetadata, FormatsList
|
||||
from calibre.ebooks import check_ebook_format
|
||||
from calibre.ebooks.metadata import string_to_authors, author_to_author_sort
|
||||
from calibre.ebooks.metadata import string_to_authors, author_to_author_sort, get_title_sort_pat
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
||||
from calibre.ptempfile import (base_dir, PersistentTemporaryFile,
|
||||
SpooledTemporaryFile)
|
||||
from calibre.utils.config import prefs
|
||||
from calibre.utils.config import prefs, tweaks
|
||||
from calibre.utils.date import now as nowf, utcnow, UNDEFINED_DATE
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
@ -89,7 +89,7 @@ class Cache(object):
|
||||
self.formatter_template_cache = {}
|
||||
self.dirtied_cache = {}
|
||||
self.dirtied_sequence = 0
|
||||
self._search_api = Search(self.field_metadata.get_search_terms())
|
||||
self._search_api = Search(self, 'saved_searches', self.field_metadata.get_search_terms())
|
||||
|
||||
# Implement locking for all simple read/write API methods
|
||||
# An unlocked version of the method is stored with the name starting
|
||||
@ -127,9 +127,8 @@ class Cache(object):
|
||||
except:
|
||||
traceback.print_exc()
|
||||
|
||||
# TODO: Saved searches
|
||||
# if len(saved_searches().names()):
|
||||
# self.field_metadata.add_search_category(label='search', name=_('Searches'))
|
||||
if len(self._search_api.get_saved_searches().names()):
|
||||
self.field_metadata.add_search_category(label='search', name=_('Searches'))
|
||||
|
||||
self.field_metadata.add_grouped_search_terms(
|
||||
self._pref('grouped_search_terms', {}))
|
||||
@ -141,16 +140,28 @@ class Cache(object):
|
||||
if self.dirtied_cache:
|
||||
self.dirtied_sequence = max(self.dirtied_cache.itervalues())+1
|
||||
|
||||
@property
|
||||
def prefs(self):
|
||||
'For internal use only (used by SavedSearchQueries). For thread-safe access to the preferences, use the pref() and set_pref() methods.'
|
||||
return self.backend.prefs
|
||||
|
||||
@write_api
|
||||
def initialize_template_cache(self):
|
||||
self.formatter_template_cache = {}
|
||||
|
||||
@write_api
|
||||
def refresh(self):
|
||||
self._initialize_template_cache()
|
||||
def clear_caches(self, book_ids=None):
|
||||
self._initialize_template_cache() # Clear the formatter template cache
|
||||
for field in self.fields.itervalues():
|
||||
if hasattr(field, 'clear_caches'):
|
||||
field.clear_caches(book_ids=book_ids) # Clear the composite cache and ondevice caches
|
||||
self.format_metadata_cache.clear()
|
||||
|
||||
@write_api
|
||||
def reload_from_db(self, clear_caches=True):
|
||||
if clear_caches:
|
||||
self._clear_caches()
|
||||
for field in self.fields.itervalues():
|
||||
if hasattr(field, 'clear_cache'):
|
||||
field.clear_cache() # Clear the composite cache
|
||||
if hasattr(field, 'table'):
|
||||
field.table.read(self.backend) # Reread data from metadata.db
|
||||
|
||||
@ -394,6 +405,12 @@ class Cache(object):
|
||||
def get_item_name(self, field, item_id):
|
||||
return self.fields[field].table.id_map[item_id]
|
||||
|
||||
@read_api
|
||||
def get_item_id(self, field, item_name):
|
||||
' Return the item id for item_name (case-insensitive) '
|
||||
rmap = {icu_lower(v) if isinstance(v, unicode) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
|
||||
return rmap.get(icu_lower(item_name) if isinstance(item_name, unicode) else item_name, None)
|
||||
|
||||
@read_api
|
||||
def author_data(self, author_ids=None):
|
||||
'''
|
||||
@ -408,7 +425,16 @@ class Cache(object):
|
||||
return {aid:af.author_data(aid) for aid in author_ids if aid in af.table.id_map}
|
||||
|
||||
@read_api
|
||||
def format_metadata(self, book_id, fmt, allow_cache=True):
|
||||
def format_hash(self, book_id, fmt):
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
|
||||
return self.backend.format_hash(book_id, fmt, name, path)
|
||||
|
||||
@api
|
||||
def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False):
|
||||
if not fmt:
|
||||
return {}
|
||||
fmt = fmt.upper()
|
||||
@ -416,6 +442,7 @@ class Cache(object):
|
||||
x = self.format_metadata_cache[book_id].get(fmt, None)
|
||||
if x is not None:
|
||||
return x
|
||||
with self.read_lock:
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
@ -426,8 +453,19 @@ class Cache(object):
|
||||
if path and name:
|
||||
ans = self.backend.format_metadata(book_id, fmt, name, path)
|
||||
self.format_metadata_cache[book_id][fmt] = ans
|
||||
if update_db and 'size' in ans:
|
||||
with self.write_lock:
|
||||
max_size = self.fields['formats'].table.update_fmt(book_id, fmt, name, ans['size'], self.backend)
|
||||
self.fields['size'].table.update_sizes({book_id: max_size})
|
||||
|
||||
return ans
|
||||
|
||||
@read_api
|
||||
def format_files(self, book_id):
|
||||
field = self.fields['formats']
|
||||
fmts = field.table.book_col_map.get(book_id, ())
|
||||
return {fmt:field.format_fname(book_id, fmt) for fmt in fmts}
|
||||
|
||||
@read_api
|
||||
def pref(self, name, default=None):
|
||||
return self.backend.prefs.get(name, default)
|
||||
@ -498,6 +536,14 @@ class Cache(object):
|
||||
ret = i
|
||||
return ret
|
||||
|
||||
@read_api
|
||||
def cover_last_modified(self, book_id):
|
||||
try:
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except AttributeError:
|
||||
return
|
||||
return self.backend.cover_last_modified(path)
|
||||
|
||||
@read_api
|
||||
def copy_cover_to(self, book_id, dest, use_hardlink=False):
|
||||
'''
|
||||
@ -524,6 +570,7 @@ class Cache(object):
|
||||
the path is different from the current path (taking case sensitivity
|
||||
into account).
|
||||
'''
|
||||
fmt = (fmt or '').upper()
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
@ -544,6 +591,7 @@ class Cache(object):
|
||||
Apart from the viewer, I don't believe any of the others do any file
|
||||
I/O with the results of this call.
|
||||
'''
|
||||
fmt = (fmt or '').upper()
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
@ -555,6 +603,7 @@ class Cache(object):
|
||||
@read_api
|
||||
def has_format(self, book_id, fmt):
|
||||
'Return True iff the format exists on disk'
|
||||
fmt = (fmt or '').upper()
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
@ -562,6 +611,31 @@ class Cache(object):
|
||||
return False
|
||||
return self.backend.has_format(book_id, fmt, name, path)
|
||||
|
||||
@api
|
||||
def save_original_format(self, book_id, fmt):
|
||||
fmt = fmt.upper()
|
||||
if 'ORIGINAL' in fmt:
|
||||
raise ValueError('Cannot save original of an original fmt')
|
||||
fmtfile = self.format(book_id, fmt, as_file=True)
|
||||
if fmtfile is None:
|
||||
return False
|
||||
with fmtfile:
|
||||
nfmt = 'ORIGINAL_'+fmt
|
||||
return self.add_format(book_id, nfmt, fmtfile, run_hooks=False)
|
||||
|
||||
@api
|
||||
def restore_original_format(self, book_id, original_fmt):
|
||||
original_fmt = original_fmt.upper()
|
||||
fmtfile = self.format(book_id, original_fmt, as_file=True)
|
||||
if fmtfile is not None:
|
||||
fmt = original_fmt.partition('_')[2]
|
||||
with self.write_lock:
|
||||
with fmtfile:
|
||||
self._add_format(book_id, fmt, fmtfile, run_hooks=False)
|
||||
self._remove_formats({book_id:(original_fmt,)})
|
||||
return True
|
||||
return False
|
||||
|
||||
@read_api
|
||||
def formats(self, book_id, verify_formats=True):
|
||||
'''
|
||||
@ -601,6 +675,7 @@ class Cache(object):
|
||||
this means that repeated calls yield the same
|
||||
temp file (which is re-created each time)
|
||||
'''
|
||||
fmt = (fmt or '').upper()
|
||||
ext = ('.'+fmt.lower()) if fmt else ''
|
||||
if as_path:
|
||||
if preserve_filename:
|
||||
@ -696,9 +771,8 @@ class Cache(object):
|
||||
return sorted(all_book_ids, key=partial(SortKey, fields, sort_keys))
|
||||
|
||||
@read_api
|
||||
def search(self, query, restriction, virtual_fields=None):
|
||||
return self._search_api(self, query, restriction,
|
||||
virtual_fields=virtual_fields)
|
||||
def search(self, query, restriction='', virtual_fields=None, book_ids=None):
|
||||
return self._search_api(self, query, restriction, virtual_fields=virtual_fields, book_ids=book_ids)
|
||||
|
||||
@read_api
|
||||
def get_categories(self, sort='name', book_ids=None, icon_map=None):
|
||||
@ -761,7 +835,7 @@ class Cache(object):
|
||||
|
||||
if dirtied and self.composites:
|
||||
for name in self.composites:
|
||||
self.fields[name].pop_cache(dirtied)
|
||||
self.fields[name].clear_caches(book_ids=dirtied)
|
||||
|
||||
if dirtied and update_path and do_path_update:
|
||||
self._update_path(dirtied, mark_as_dirtied=False)
|
||||
@ -1068,16 +1142,16 @@ class Cache(object):
|
||||
self._update_last_modified(tuple(formats_map.iterkeys()))
|
||||
|
||||
@read_api
|
||||
def get_next_series_num_for(self, series):
|
||||
def get_next_series_num_for(self, series, field='series'):
|
||||
books = ()
|
||||
sf = self.fields['series']
|
||||
sf = self.fields[field]
|
||||
if series:
|
||||
q = icu_lower(series)
|
||||
for val, book_ids in sf.iter_searchable_values(self._get_metadata, frozenset(self.all_book_ids())):
|
||||
for val, book_ids in sf.iter_searchable_values(self._get_metadata, frozenset(self._all_book_ids())):
|
||||
if q == icu_lower(val):
|
||||
books = book_ids
|
||||
break
|
||||
series_indices = sorted(self._field_for('series_index', book_id) for book_id in books)
|
||||
series_indices = sorted(self._field_for(sf.index_field.name, book_id) for book_id in books)
|
||||
return _get_next_series_num_for_list(tuple(series_indices), unwrap=False)
|
||||
|
||||
@read_api
|
||||
@ -1181,6 +1255,42 @@ class Cache(object):
|
||||
else:
|
||||
table.remove_books(book_ids, self.backend)
|
||||
|
||||
@read_api
|
||||
def author_sort_strings_for_books(self, book_ids):
|
||||
val_map = {}
|
||||
for book_id in book_ids:
|
||||
authors = self._field_ids_for('authors', book_id)
|
||||
adata = self._author_data(authors)
|
||||
val_map[book_id] = tuple(adata[aid]['sort'] for aid in authors)
|
||||
return val_map
|
||||
|
||||
@write_api
|
||||
def rename_items(self, field, item_id_to_new_name_map, change_index=True):
|
||||
f = self.fields[field]
|
||||
try:
|
||||
func = f.table.rename_item
|
||||
except AttributeError:
|
||||
raise ValueError('Cannot rename items for one-one fields: %s' % field)
|
||||
affected_books = set()
|
||||
moved_books = set()
|
||||
id_map = {}
|
||||
for item_id, new_name in item_id_to_new_name_map.iteritems():
|
||||
books, new_id = func(item_id, new_name, self.backend)
|
||||
affected_books.update(books)
|
||||
id_map[item_id] = new_id
|
||||
if new_id != item_id:
|
||||
moved_books.update(books)
|
||||
if affected_books:
|
||||
if field == 'authors':
|
||||
self._set_field('author_sort',
|
||||
{k:' & '.join(v) for k, v in self._author_sort_strings_for_books(affected_books).iteritems()})
|
||||
self._update_path(affected_books, mark_as_dirtied=False)
|
||||
elif change_index and hasattr(f, 'index_field') and tweaks['series_index_auto_increment'] != 'no_change':
|
||||
for book_id in moved_books:
|
||||
self._set_field(f.index_field.name, {book_id:self._get_next_series_num_for(self._field_for(field, book_id), field=field)})
|
||||
self._mark_as_dirty(affected_books)
|
||||
return affected_books, id_map
|
||||
|
||||
@write_api
|
||||
def remove_items(self, field, item_ids):
|
||||
''' Delete all items in the specified field with the specified ids. Returns the set of affected book ids. '''
|
||||
@ -1239,6 +1349,177 @@ class Cache(object):
|
||||
''' options must be a map of the form {book_id:conversion_options} '''
|
||||
return self.backend.set_conversion_options(options, fmt)
|
||||
|
||||
@write_api
|
||||
def refresh_format_cache(self):
|
||||
self.fields['formats'].table.read(self.backend)
|
||||
self.format_metadata_cache.clear()
|
||||
|
||||
@write_api
|
||||
def refresh_ondevice(self):
|
||||
self.fields['ondevice'].clear_caches()
|
||||
|
||||
@read_api
|
||||
def tags_older_than(self, tag, delta=None, must_have_tag=None, must_have_authors=None):
|
||||
'''
|
||||
Return the ids of all books having the tag ``tag`` that are older than
|
||||
than the specified time. tag comparison is case insensitive.
|
||||
|
||||
:param delta: A timedelta object or None. If None, then all ids with
|
||||
the tag are returned.
|
||||
:param must_have_tag: If not None the list of matches will be
|
||||
restricted to books that have this tag
|
||||
:param must_have_authors: A list of authors. If not None the list of
|
||||
matches will be restricted to books that have these authors (case
|
||||
insensitive).
|
||||
'''
|
||||
tag_map = {icu_lower(v):k for k, v in self._get_id_map('tags').iteritems()}
|
||||
tag = icu_lower(tag.strip())
|
||||
mht = icu_lower(must_have_tag.strip()) if must_have_tag else None
|
||||
tag_id, mht_id = tag_map.get(tag, None), tag_map.get(mht, None)
|
||||
ans = set()
|
||||
if mht_id is None and mht:
|
||||
return ans
|
||||
if tag_id is not None:
|
||||
tagged_books = self._books_for_field('tags', tag_id)
|
||||
if mht_id is not None and tagged_books:
|
||||
tagged_books = tagged_books.intersection(self._books_for_field('tags', mht_id))
|
||||
if tagged_books:
|
||||
if must_have_authors is not None:
|
||||
amap = {icu_lower(v):k for k, v in self._get_id_map('authors').iteritems()}
|
||||
books = None
|
||||
for author in must_have_authors:
|
||||
abooks = self._books_for_field('authors', amap.get(icu_lower(author), None))
|
||||
books = abooks if books is None else books.intersection(abooks)
|
||||
if not books:
|
||||
break
|
||||
tagged_books = tagged_books.intersection(books or set())
|
||||
if delta is None:
|
||||
ans = tagged_books
|
||||
else:
|
||||
now = nowf()
|
||||
for book_id in tagged_books:
|
||||
ts = self._field_for('timestamp', book_id)
|
||||
if (now - ts) > delta:
|
||||
ans.add(book_id)
|
||||
return ans
|
||||
|
||||
@write_api
|
||||
def set_sort_for_authors(self, author_id_to_sort_map, update_books=True):
|
||||
self.fields['authors'].table.set_sort_names(author_id_to_sort_map, self.backend)
|
||||
changed_books = set()
|
||||
if update_books:
|
||||
val_map = {}
|
||||
for author_id in author_id_to_sort_map:
|
||||
books = self._books_for_field('authors', author_id)
|
||||
changed_books |= books
|
||||
for book_id in books:
|
||||
authors = self._field_ids_for('authors', book_id)
|
||||
adata = self._author_data(authors)
|
||||
sorts = [adata[x]['sort'] for x in authors]
|
||||
val_map[book_id] = ' & '.join(sorts)
|
||||
if val_map:
|
||||
self._set_field('author_sort', val_map)
|
||||
self._mark_as_dirty(changed_books)
|
||||
return changed_books
|
||||
|
||||
@write_api
|
||||
def set_link_for_authors(self, author_id_to_link_map):
|
||||
self.fields['authors'].table.set_links(author_id_to_link_map, self.backend)
|
||||
changed_books = set()
|
||||
for author_id in author_id_to_link_map:
|
||||
changed_books |= self._books_for_field('authors', author_id)
|
||||
self._mark_as_dirty(changed_books)
|
||||
return changed_books
|
||||
|
||||
@read_api
|
||||
def lookup_by_uuid(self, uuid):
|
||||
return self.fields['uuid'].table.lookup_by_uuid(uuid)
|
||||
|
||||
@write_api
|
||||
def delete_custom_column(self, label=None, num=None):
|
||||
self.backend.delete_custom_column(label, num)
|
||||
|
||||
@write_api
|
||||
def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}):
|
||||
self.backend.create_custom_column(label, name, datatype, is_multiple, editable=editable, display=display)
|
||||
|
||||
@write_api
|
||||
def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None):
|
||||
return self.backend.set_custom_column_metadata(num, name=name, label=label, is_editable=is_editable, display=display)
|
||||
|
||||
@read_api
|
||||
def get_books_for_category(self, category, item_id_or_composite_value):
|
||||
f = self.fields[category]
|
||||
if hasattr(f, 'get_books_for_val'):
|
||||
# Composite field
|
||||
return f.get_books_for_val(item_id_or_composite_value, self._get_metadata, self._all_book_ids())
|
||||
return self._books_for_field(f.name, item_id_or_composite_value)
|
||||
|
||||
@read_api
|
||||
def find_identical_books(self, mi, search_restriction='', book_ids=None):
|
||||
''' Finds books that have a superset of the authors in mi and the same
|
||||
title (title is fuzzy matched) '''
|
||||
fuzzy_title_patterns = [(re.compile(pat, re.IGNORECASE) if
|
||||
isinstance(pat, basestring) else pat, repl) for pat, repl in
|
||||
[
|
||||
(r'[\[\](){}<>\'";,:#]', ''),
|
||||
(get_title_sort_pat(), ''),
|
||||
(r'[-._]', ' '),
|
||||
(r'\s+', ' ')
|
||||
]
|
||||
]
|
||||
|
||||
def fuzzy_title(title):
|
||||
title = icu_lower(title.strip())
|
||||
for pat, repl in fuzzy_title_patterns:
|
||||
title = pat.sub(repl, title)
|
||||
return title
|
||||
|
||||
identical_book_ids = set()
|
||||
if mi.authors:
|
||||
try:
|
||||
quathors = mi.authors[:20] # Too many authors causes parsing of
|
||||
# the search expression to fail
|
||||
query = ' and '.join('authors:"=%s"'%(a.replace('"', '')) for a in quathors)
|
||||
qauthors = mi.authors[20:]
|
||||
except ValueError:
|
||||
return identical_book_ids
|
||||
try:
|
||||
book_ids = self._search(query, restriction=search_restriction, book_ids=book_ids)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
return identical_book_ids
|
||||
if qauthors and book_ids:
|
||||
matches = set()
|
||||
qauthors = {icu_lower(x) for x in qauthors}
|
||||
for book_id in book_ids:
|
||||
aut = self._field_for('authors', book_id)
|
||||
if aut:
|
||||
aut = {icu_lower(x) for x in aut}
|
||||
if aut.issuperset(qauthors):
|
||||
matches.add(book_id)
|
||||
book_ids = matches
|
||||
|
||||
for book_id in book_ids:
|
||||
fbook_title = self._field_for('title', book_id)
|
||||
fbook_title = fuzzy_title(fbook_title)
|
||||
mbook_title = fuzzy_title(mi.title)
|
||||
if fbook_title == mbook_title:
|
||||
identical_book_ids.add(book_id)
|
||||
return identical_book_ids
|
||||
|
||||
@read_api
|
||||
def get_top_level_move_items(self):
|
||||
all_paths = {self._field_for('path', book_id).partition('/')[0] for book_id in self._all_book_ids()}
|
||||
return self.backend.get_top_level_move_items(all_paths)
|
||||
|
||||
@write_api
|
||||
def move_library_to(self, newloc, progress=None):
|
||||
if progress is None:
|
||||
progress = lambda x:x
|
||||
all_paths = {self._field_for('path', book_id).partition('/')[0] for book_id in self._all_book_ids()}
|
||||
self.backend.move_library_to(all_paths, newloc, progress=progress)
|
||||
|
||||
# }}}
|
||||
|
||||
class SortKey(object): # {{{
|
||||
|
@ -16,7 +16,6 @@ from calibre.ebooks.metadata import author_to_author_sort
|
||||
from calibre.library.field_metadata import TagsIcons
|
||||
from calibre.utils.config_base import tweaks
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
|
||||
CATEGORY_SORTS = ('name', 'popularity', 'rating') # This has to be a tuple not a set
|
||||
|
||||
@ -229,7 +228,7 @@ def get_categories(dbcache, sort='name', book_ids=None, icon_map=None):
|
||||
icon = None
|
||||
if icon_map and 'search' in icon_map:
|
||||
icon = icon_map['search']
|
||||
ss = saved_searches()
|
||||
ss = dbcache._search_api.get_saved_searches()
|
||||
for srch in ss.names():
|
||||
items.append(Tag(srch, tooltip=ss.lookup(srch),
|
||||
sort=srch, icon=icon, category='search',
|
||||
|
@ -11,7 +11,7 @@ __docformat__ = 'restructuredtext en'
|
||||
from threading import Lock
|
||||
from collections import defaultdict, Counter
|
||||
|
||||
from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY
|
||||
from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY, null
|
||||
from calibre.db.write import Writer
|
||||
from calibre.ebooks.metadata import title_sort
|
||||
from calibre.utils.config_base import tweaks
|
||||
@ -163,12 +163,11 @@ class CompositeField(OneToOneField):
|
||||
self._render_cache[book_id] = ans
|
||||
return ans
|
||||
|
||||
def clear_cache(self):
|
||||
with self._lock:
|
||||
self._render_cache = {}
|
||||
|
||||
def pop_cache(self, book_ids):
|
||||
def clear_caches(self, book_ids=None):
|
||||
with self._lock:
|
||||
if book_ids is None:
|
||||
self._render_cache.clear()
|
||||
else:
|
||||
for book_id in book_ids:
|
||||
self._render_cache.pop(book_id, None)
|
||||
|
||||
@ -212,17 +211,41 @@ class CompositeField(OneToOneField):
|
||||
ans.append(c)
|
||||
return ans
|
||||
|
||||
def get_books_for_val(self, value, get_metadata, book_ids):
|
||||
is_multiple = self.table.metadata['is_multiple'].get('cache_to_list', None)
|
||||
ans = set()
|
||||
for book_id in book_ids:
|
||||
val = self.get_value_with_cache(book_id, get_metadata)
|
||||
vals = {x.strip() for x in val.split(is_multiple)} if is_multiple else [val]
|
||||
if value in vals:
|
||||
ans.add(book_id)
|
||||
return ans
|
||||
|
||||
class OnDeviceField(OneToOneField):
|
||||
|
||||
def __init__(self, name, table):
|
||||
self.name = name
|
||||
self.book_on_device_func = None
|
||||
self.is_multiple = False
|
||||
self.cache = {}
|
||||
self._lock = Lock()
|
||||
|
||||
def clear_caches(self, book_ids=None):
|
||||
with self._lock:
|
||||
if book_ids is None:
|
||||
self.cache.clear()
|
||||
else:
|
||||
for book_id in book_ids:
|
||||
self.cache.pop(book_id, None)
|
||||
|
||||
def book_on_device(self, book_id):
|
||||
if callable(self.book_on_device_func):
|
||||
return self.book_on_device_func(book_id)
|
||||
return None
|
||||
with self._lock:
|
||||
ans = self.cache.get(book_id, null)
|
||||
if ans is null and callable(self.book_on_device_func):
|
||||
ans = self.book_on_device_func(book_id)
|
||||
with self._lock:
|
||||
self.cache[book_id] = ans
|
||||
return None if ans is null else ans
|
||||
|
||||
def set_book_on_device_func(self, func):
|
||||
self.book_on_device_func = func
|
||||
@ -312,7 +335,11 @@ class ManyToManyField(Field):
|
||||
def for_book(self, book_id, default_value=None):
|
||||
ids = self.table.book_col_map.get(book_id, ())
|
||||
if ids:
|
||||
ans = tuple(self.table.id_map[i] for i in ids)
|
||||
ans = (self.table.id_map[i] for i in ids)
|
||||
if self.table.sort_alpha:
|
||||
ans = tuple(sorted(ans, key=sort_key))
|
||||
else:
|
||||
ans = tuple(ans)
|
||||
else:
|
||||
ans = default_value
|
||||
return ans
|
||||
|
@ -9,17 +9,32 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
import os, traceback, types
|
||||
from future_builtins import zip
|
||||
|
||||
from calibre import force_unicode
|
||||
from calibre.db import _get_next_series_num_for_list, _get_series_values
|
||||
from calibre import force_unicode, isbytestring
|
||||
from calibre.constants import preferred_encoding
|
||||
from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict
|
||||
from calibre.db.adding import (
|
||||
find_books_in_directory, import_book_directory_multiple,
|
||||
import_book_directory, recursive_import, add_catalog, add_news)
|
||||
from calibre.db.backend import DB
|
||||
from calibre.db.cache import Cache
|
||||
from calibre.db.errors import NoSuchFormat
|
||||
from calibre.db.categories import CATEGORY_SORTS
|
||||
from calibre.db.view import View
|
||||
from calibre.db.write import clean_identifier
|
||||
from calibre.utils.date import utcnow
|
||||
from calibre.utils.search_query_parser import set_saved_searches
|
||||
|
||||
def cleanup_tags(tags):
|
||||
tags = [x.strip().replace(',', ';') for x in tags if x.strip()]
|
||||
tags = [x.decode(preferred_encoding, 'replace')
|
||||
if isbytestring(x) else x for x in tags]
|
||||
tags = [u' '.join(x.split()) for x in tags]
|
||||
ans, seen = [], set([])
|
||||
for tag in tags:
|
||||
if tag.lower() not in seen:
|
||||
seen.add(tag.lower())
|
||||
ans.append(tag)
|
||||
return ans
|
||||
|
||||
class LibraryDatabase(object):
|
||||
|
||||
@ -49,11 +64,18 @@ class LibraryDatabase(object):
|
||||
cache = self.new_api = Cache(backend)
|
||||
cache.init()
|
||||
self.data = View(cache)
|
||||
self.id = self.data.index_to_id
|
||||
for x in ('get_property', 'count', 'refresh_ids', 'set_marked_ids',
|
||||
'multisort', 'search', 'search_getting_ids'):
|
||||
setattr(self, x, getattr(self.data, x))
|
||||
|
||||
self.get_property = self.data.get_property
|
||||
self.is_case_sensitive = getattr(backend, 'is_case_sensitive', False)
|
||||
self.custom_field_name = backend.custom_field_name
|
||||
|
||||
self.last_update_check = self.last_modified()
|
||||
self.book_on_device_func = None
|
||||
|
||||
if not self.is_second_db:
|
||||
set_saved_searches(self, 'saved_searches')
|
||||
|
||||
def close(self):
|
||||
self.backend.close()
|
||||
@ -66,6 +88,10 @@ class LibraryDatabase(object):
|
||||
delattr(self, x)
|
||||
|
||||
# Library wide properties {{{
|
||||
@property
|
||||
def prefs(self):
|
||||
return self.new_api.backend.prefs
|
||||
|
||||
@property
|
||||
def field_metadata(self):
|
||||
return self.backend.field_metadata
|
||||
@ -91,9 +117,12 @@ class LibraryDatabase(object):
|
||||
|
||||
def check_if_modified(self):
|
||||
if self.last_modified() > self.last_update_check:
|
||||
self.refresh()
|
||||
self.new_api.reload_from_db()
|
||||
self.last_update_check = utcnow()
|
||||
|
||||
def get_saved_searches(self):
|
||||
return self.new_api._search_api.get_saved_searches()
|
||||
|
||||
@property
|
||||
def custom_column_num_map(self):
|
||||
return self.backend.custom_column_num_map
|
||||
@ -117,16 +146,29 @@ class LibraryDatabase(object):
|
||||
for book_id in self.data.cache.all_book_ids():
|
||||
yield book_id
|
||||
|
||||
def is_empty(self):
|
||||
with self.new_api.read_lock:
|
||||
return not bool(self.new_api.fields['title'].table.book_col_map)
|
||||
|
||||
def get_usage_count_by_id(self, field):
|
||||
return [[k, v] for k, v in self.new_api.get_usage_count_by_id(field).iteritems()]
|
||||
|
||||
def field_id_map(self, field):
|
||||
return [(k, v) for k, v in self.new_api.get_id_map(field).iteritems()]
|
||||
|
||||
def get_custom_items_with_ids(self, label=None, num=None):
|
||||
try:
|
||||
return [[k, v] for k, v in self.new_api.get_id_map(self.custom_field_name(label, num)).iteritems()]
|
||||
except ValueError:
|
||||
return []
|
||||
|
||||
def refresh(self, field=None, ascending=True):
|
||||
self.data.cache.refresh()
|
||||
self.data.refresh(field=field, ascending=ascending)
|
||||
|
||||
def get_id_from_uuid(self, uuid):
|
||||
if uuid:
|
||||
return self.new_api.lookup_by_uuid(uuid)
|
||||
|
||||
def add_listener(self, listener):
|
||||
'''
|
||||
Add a listener. Will be called on change events with two arguments.
|
||||
@ -147,7 +189,7 @@ class LibraryDatabase(object):
|
||||
|
||||
def path(self, index, index_is_id=False):
|
||||
'Return the relative path to the directory containing this books files as a unicode string.'
|
||||
book_id = index if index_is_id else self.data.index_to_id(index)
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.field_for('path', book_id).replace('/', os.sep)
|
||||
|
||||
def abspath(self, index, index_is_id=False, create_dirs=True):
|
||||
@ -210,7 +252,7 @@ class LibraryDatabase(object):
|
||||
|
||||
def add_format(self, index, fmt, stream, index_is_id=False, path=None, notify=True, replace=True, copy_function=None):
|
||||
''' path and copy_function are ignored by the new API '''
|
||||
book_id = index if index_is_id else self.data.index_to_id(index)
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
try:
|
||||
return self.new_api.add_format(book_id, fmt, stream, replace=replace, run_hooks=False, dbapi=self)
|
||||
except:
|
||||
@ -220,7 +262,7 @@ class LibraryDatabase(object):
|
||||
|
||||
def add_format_with_hooks(self, index, fmt, fpath, index_is_id=False, path=None, notify=True, replace=True):
|
||||
''' path is ignored by the new API '''
|
||||
book_id = index if index_is_id else self.data.index_to_id(index)
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
try:
|
||||
return self.new_api.add_format(book_id, fmt, fpath, replace=replace, run_hooks=True, dbapi=self)
|
||||
except:
|
||||
@ -253,49 +295,78 @@ class LibraryDatabase(object):
|
||||
return list(self.new_api.get_ids_for_custom_book_data(name))
|
||||
# }}}
|
||||
|
||||
def sort(self, field, ascending, subsort=False):
|
||||
self.multisort([(field, ascending)])
|
||||
|
||||
def get_field(self, index, key, default=None, index_is_id=False):
|
||||
book_id = index if index_is_id else self.data.index_to_id(index)
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
mi = self.new_api.get_metadata(book_id, get_cover=key == 'cover')
|
||||
return mi.get(key, default)
|
||||
|
||||
def cover_last_modified(self, index, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.cover_last_modified(book_id) or self.last_modified()
|
||||
|
||||
def cover(self, index, index_is_id=False, as_file=False, as_image=False, as_path=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.cover(book_id, as_file=as_file, as_image=as_image, as_path=as_path)
|
||||
|
||||
def copy_cover_to(self, index, dest, index_is_id=False, windows_atomic_move=None, use_hardlink=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.copy_cover_to(book_id, dest, use_hardlink=use_hardlink)
|
||||
|
||||
def copy_format_to(self, index, fmt, dest, index_is_id=False, windows_atomic_move=None, use_hardlink=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.copy_format_to(book_id, fmt, dest, use_hardlink=use_hardlink)
|
||||
|
||||
def delete_book(self, book_id, notify=True, commit=True, permanent=False, do_clean=True):
|
||||
self.new_api.remove_books((book_id,), permanent=permanent)
|
||||
if notify:
|
||||
self.notify('delete', [id])
|
||||
|
||||
def dirtied(self, book_ids, commit=True):
|
||||
self.new_api.mark_as_dirty(book_ids)
|
||||
|
||||
def dump_metadata(self, book_ids=None, remove_from_dirtied=True, commit=True, callback=None):
|
||||
self.new_api.dump_metadata(book_ids=book_ids, remove_from_dirtied=remove_from_dirtied, callback=callback)
|
||||
|
||||
def authors_sort_strings(self, index, index_is_id=False):
|
||||
book_id = index if index_is_id else self.data.index_to_id(index)
|
||||
with self.new_api.read_lock:
|
||||
authors = self.new_api._field_ids_for('authors', book_id)
|
||||
adata = self.new_api._author_data(authors)
|
||||
return [adata[aid]['sort'] for aid in authors]
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return list(self.new_api.author_sort_strings_for_books((book_id,))[book_id])
|
||||
|
||||
def author_sort_from_book(self, index, index_is_id=False):
|
||||
return ' & '.join(self.authors_sort_strings(index, index_is_id=index_is_id))
|
||||
|
||||
def authors_with_sort_strings(self, index, index_is_id=False):
|
||||
book_id = index if index_is_id else self.data.index_to_id(index)
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
with self.new_api.read_lock:
|
||||
authors = self.new_api._field_ids_for('authors', book_id)
|
||||
adata = self.new_api._author_data(authors)
|
||||
return [(aid, adata[aid]['name'], adata[aid]['sort'], adata[aid]['link']) for aid in authors]
|
||||
|
||||
def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False):
|
||||
changed_books = self.new_api.set_sort_for_authors({old_id:new_sort})
|
||||
if notify:
|
||||
self.notify('metadata', list(changed_books))
|
||||
|
||||
def set_link_field_for_author(self, aid, link, commit=True, notify=False):
|
||||
changed_books = self.new_api.set_link_for_authors({aid:link})
|
||||
if notify:
|
||||
self.notify('metadata', list(changed_books))
|
||||
|
||||
def book_on_device(self, book_id):
|
||||
if callable(self.book_on_device_func):
|
||||
return self.book_on_device_func(book_id)
|
||||
return None
|
||||
with self.new_api.read_lock:
|
||||
return self.new_api.fields['ondevice'].book_on_device(book_id)
|
||||
|
||||
def book_on_device_string(self, book_id):
|
||||
loc = []
|
||||
count = 0
|
||||
on = self.book_on_device(book_id)
|
||||
if on is not None:
|
||||
m, a, b, count = on[:4]
|
||||
if m is not None:
|
||||
loc.append(_('Main'))
|
||||
if a is not None:
|
||||
loc.append(_('Card A'))
|
||||
if b is not None:
|
||||
loc.append(_('Card B'))
|
||||
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
|
||||
return self.new_api.field_for('ondevice', book_id)
|
||||
|
||||
def set_book_on_device_func(self, func):
|
||||
self.book_on_device_func = func
|
||||
self.new_api.fields['ondevice'].set_book_on_device_func(func)
|
||||
|
||||
@property
|
||||
def book_on_device_func(self):
|
||||
return self.new_api.fields['ondevice'].book_on_device_func
|
||||
|
||||
def books_in_series(self, series_id):
|
||||
with self.new_api.read_lock:
|
||||
@ -304,7 +375,7 @@ class LibraryDatabase(object):
|
||||
return sorted(book_ids, key=lambda x:ff('series_index', x))
|
||||
|
||||
def books_in_series_of(self, index, index_is_id=False):
|
||||
book_id = index if index_is_id else self.data.index_to_id(index)
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
series_ids = self.new_api.field_ids_for('series', book_id)
|
||||
if not series_ids:
|
||||
return []
|
||||
@ -335,7 +406,7 @@ class LibraryDatabase(object):
|
||||
self.new_api.delete_conversion_options((book_id,), fmt=fmt)
|
||||
|
||||
def set(self, index, field, val, allow_case_change=False):
|
||||
book_id = self.data.index_to_id(index)
|
||||
book_id = self.id(index)
|
||||
try:
|
||||
return self.new_api.set_field(field, {book_id:val}, allow_case_change=allow_case_change)
|
||||
finally:
|
||||
@ -371,6 +442,266 @@ class LibraryDatabase(object):
|
||||
if notify:
|
||||
self.notify('metadata', [book_id])
|
||||
|
||||
def remove_all_tags(self, ids, notify=False, commit=True):
|
||||
self.new_api.set_field('tags', {book_id:() for book_id in ids})
|
||||
if notify:
|
||||
self.notify('metadata', ids)
|
||||
|
||||
def _do_bulk_modify(self, field, ids, add, remove, notify):
|
||||
add = cleanup_tags(add)
|
||||
remove = cleanup_tags(remove)
|
||||
remove = set(remove) - set(add)
|
||||
if not ids or (not add and not remove):
|
||||
return
|
||||
|
||||
remove = {icu_lower(x) for x in remove}
|
||||
with self.new_api.write_lock:
|
||||
val_map = {}
|
||||
for book_id in ids:
|
||||
tags = list(self.new_api._field_for(field, book_id))
|
||||
existing = {icu_lower(x) for x in tags}
|
||||
tags.extend(t for t in add if icu_lower(t) not in existing)
|
||||
tags = tuple(t for t in tags if icu_lower(t) not in remove)
|
||||
val_map[book_id] = tags
|
||||
self.new_api._set_field(field, val_map, allow_case_change=False)
|
||||
|
||||
if notify:
|
||||
self.notify('metadata', ids)
|
||||
|
||||
def bulk_modify_tags(self, ids, add=[], remove=[], notify=False):
|
||||
self._do_bulk_modify('tags', ids, add, remove, notify)
|
||||
|
||||
def set_custom_bulk_multiple(self, ids, add=[], remove=[], label=None, num=None, notify=False):
|
||||
data = self.backend.custom_field_metadata(label, num)
|
||||
if not data['editable']:
|
||||
raise ValueError('Column %r is not editable'%data['label'])
|
||||
if data['datatype'] != 'text' or not data['is_multiple']:
|
||||
raise ValueError('Column %r is not text/multiple'%data['label'])
|
||||
field = self.custom_field_name(label, num)
|
||||
self._do_bulk_modify(field, ids, add, remove, notify)
|
||||
|
||||
def unapply_tags(self, book_id, tags, notify=True):
|
||||
self.bulk_modify_tags((book_id,), remove=tags, notify=notify)
|
||||
|
||||
def is_tag_used(self, tag):
|
||||
return icu_lower(tag) in {icu_lower(x) for x in self.new_api.all_field_names('tags')}
|
||||
|
||||
def delete_tag(self, tag):
|
||||
self.delete_tags((tag,))
|
||||
|
||||
def delete_tags(self, tags):
|
||||
with self.new_api.write_lock:
|
||||
tag_map = {icu_lower(v):k for k, v in self.new_api._get_id_map('tags').iteritems()}
|
||||
tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags)
|
||||
tag_ids = tuple(tid for tid in tag_ids if tid is not None)
|
||||
if tag_ids:
|
||||
self.new_api._remove_items('tags', tag_ids)
|
||||
|
||||
def has_id(self, book_id):
|
||||
return book_id in self.new_api.all_book_ids()
|
||||
|
||||
def format(self, index, fmt, index_is_id=False, as_file=False, mode='r+b', as_path=False, preserve_filename=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.format(book_id, fmt, as_file=as_file, as_path=as_path, preserve_filename=preserve_filename)
|
||||
|
||||
def format_abspath(self, index, fmt, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.format_abspath(book_id, fmt)
|
||||
|
||||
def format_path(self, index, fmt, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
ans = self.new_api.format_abspath(book_id, fmt)
|
||||
if ans is None:
|
||||
raise NoSuchFormat('Record %d has no format: %s'%(book_id, fmt))
|
||||
return ans
|
||||
|
||||
def format_files(self, index, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return [(v, k) for k, v in self.new_api.format_files(book_id).iteritems()]
|
||||
|
||||
def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False, commit=False):
|
||||
return self.new_api.format_metadata(book_id, fmt, allow_cache=allow_cache, update_db=update_db)
|
||||
|
||||
def format_last_modified(self, book_id, fmt):
|
||||
m = self.format_metadata(book_id, fmt)
|
||||
if m:
|
||||
return m['mtime']
|
||||
|
||||
def formats(self, index, index_is_id=False, verify_formats=True):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
ans = self.new_api.formats(book_id, verify_formats=verify_formats)
|
||||
if ans:
|
||||
return ','.join(ans)
|
||||
|
||||
def has_format(self, index, fmt, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.has_format(book_id, fmt)
|
||||
|
||||
def refresh_format_cache(self):
|
||||
self.new_api.refresh_format_cache()
|
||||
|
||||
def refresh_ondevice(self):
|
||||
self.new_api.refresh_ondevice()
|
||||
|
||||
def tags_older_than(self, tag, delta, must_have_tag=None, must_have_authors=None):
|
||||
for book_id in sorted(self.new_api.tags_older_than(tag, delta=delta, must_have_tag=must_have_tag, must_have_authors=must_have_authors)):
|
||||
yield book_id
|
||||
|
||||
def sizeof_format(self, index, fmt, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.format_metadata(book_id, fmt).get('size', None)
|
||||
|
||||
def get_metadata(self, index, index_is_id=False, get_cover=False, get_user_categories=True, cover_as_data=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.get_metadata(book_id, get_cover=get_cover, get_user_categories=get_user_categories, cover_as_data=cover_as_data)
|
||||
|
||||
def rename_series(self, old_id, new_name, change_index=True):
|
||||
self.new_api.rename_items('series', {old_id:new_name}, change_index=change_index)
|
||||
|
||||
def get_custom(self, index, label=None, num=None, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
ans = self.new_api.field_for(self.custom_field_name(label, num), book_id)
|
||||
if isinstance(ans, tuple):
|
||||
ans = list(ans)
|
||||
return ans
|
||||
|
||||
def get_custom_extra(self, index, label=None, num=None, index_is_id=False):
|
||||
data = self.backend.custom_field_metadata(label, num)
|
||||
# add future datatypes with an extra column here
|
||||
if data['datatype'] != 'series':
|
||||
return None
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
return self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id)
|
||||
|
||||
def get_custom_and_extra(self, index, label=None, num=None, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
data = self.backend.custom_field_metadata(label, num)
|
||||
ans = self.new_api.field_for(self.custom_field_name(label, num), book_id)
|
||||
if isinstance(ans, tuple):
|
||||
ans = list(ans)
|
||||
if data['datatype'] != 'series':
|
||||
return (ans, None)
|
||||
return (ans, self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id))
|
||||
|
||||
def get_next_cc_series_num_for(self, series, label=None, num=None):
|
||||
data = self.backend.custom_field_metadata(label, num)
|
||||
if data['datatype'] != 'series':
|
||||
return None
|
||||
return self.new_api.get_next_series_num_for(series, field=self.custom_field_name(label, num))
|
||||
|
||||
def is_item_used_in_multiple(self, item, label=None, num=None):
|
||||
existing_tags = self.all_custom(label=label, num=num)
|
||||
return icu_lower(item) in {icu_lower(t) for t in existing_tags}
|
||||
|
||||
def delete_custom_item_using_id(self, item_id, label=None, num=None):
|
||||
self.new_api.remove_items(self.custom_field_name(label, num), (item_id,))
|
||||
|
||||
def rename_custom_item(self, old_id, new_name, label=None, num=None):
|
||||
self.new_api.rename_items(self.custom_field_name(label, num), {old_id:new_name}, change_index=False)
|
||||
|
||||
def delete_item_from_multiple(self, item, label=None, num=None):
|
||||
field = self.custom_field_name(label, num)
|
||||
existing = self.new_api.get_id_map(field)
|
||||
rmap = {icu_lower(v):k for k, v in existing.iteritems()}
|
||||
item_id = rmap.get(icu_lower(item), None)
|
||||
if item_id is None:
|
||||
return []
|
||||
return list(self.new_api.remove_items(field, (item_id,)))
|
||||
|
||||
def set_custom(self, book_id, val, label=None, num=None, append=False,
|
||||
notify=True, extra=None, commit=True, allow_case_change=False):
|
||||
field = self.custom_field_name(label, num)
|
||||
data = self.backend.custom_field_metadata(label, num)
|
||||
if data['datatype'] == 'composite':
|
||||
return set()
|
||||
if not data['editable']:
|
||||
raise ValueError('Column %r is not editable'%data['label'])
|
||||
if data['datatype'] == 'enumeration' and (
|
||||
val and val not in data['display']['enum_values']):
|
||||
return set()
|
||||
with self.new_api.write_lock:
|
||||
if append and data['is_multiple']:
|
||||
current = self.new_api._field_for(field, book_id)
|
||||
existing = {icu_lower(x) for x in current}
|
||||
val = current + tuple(x for x in self.new_api.fields[field].writer.adapter(val) if icu_lower(x) not in existing)
|
||||
affected_books = self.new_api._set_field(field, {book_id:val}, allow_case_change=allow_case_change)
|
||||
else:
|
||||
affected_books = self.new_api._set_field(field, {book_id:val}, allow_case_change=allow_case_change)
|
||||
if data['datatype'] == 'series':
|
||||
extra = 1.0 if extra is None else extra
|
||||
self.new_api._set_field(field + '_index', {book_id:extra})
|
||||
if notify and affected_books:
|
||||
self.notify('metadata', list(affected_books))
|
||||
return affected_books
|
||||
|
||||
def set_custom_bulk(self, ids, val, label=None, num=None,
|
||||
append=False, notify=True, extras=None):
|
||||
if extras is not None and len(extras) != len(ids):
|
||||
raise ValueError('Length of ids and extras is not the same')
|
||||
field = self.custom_field_name(label, num)
|
||||
data = self.backend.custom_field_metadata(label, num)
|
||||
if data['datatype'] == 'composite':
|
||||
return set()
|
||||
if data['datatype'] == 'enumeration' and (
|
||||
val and val not in data['display']['enum_values']):
|
||||
return
|
||||
if not data['editable']:
|
||||
raise ValueError('Column %r is not editable'%data['label'])
|
||||
|
||||
if append:
|
||||
for book_id in ids:
|
||||
self.set_custom(book_id, val, label=label, num=num, append=True, notify=False)
|
||||
else:
|
||||
with self.new_api.write_lock:
|
||||
self.new_api._set_field(field, {book_id:val for book_id in ids}, allow_case_change=False)
|
||||
if extras is not None:
|
||||
self.new_api._set_field(field + '_index', {book_id:val for book_id, val in zip(ids, extras)})
|
||||
if notify:
|
||||
self.notify('metadata', list(ids))
|
||||
|
||||
def delete_custom_column(self, label=None, num=None):
|
||||
self.new_api.delete_custom_column(label, num)
|
||||
|
||||
def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}):
|
||||
self.new_api.create_custom_column(label, name, datatype, is_multiple, editable=editable, display=display)
|
||||
|
||||
def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None, notify=True):
|
||||
changed = self.new_api.set_custom_column_metadata(num, name=name, label=label, is_editable=is_editable, display=display)
|
||||
if changed and notify:
|
||||
self.notify('metadata', [])
|
||||
|
||||
def remove_cover(self, book_id, notify=True, commit=True):
|
||||
self.new_api.set_cover({book_id:None})
|
||||
if notify:
|
||||
self.notify('cover', [book_id])
|
||||
|
||||
def set_cover(self, book_id, data, notify=True, commit=True):
|
||||
self.new_api.set_cover({book_id:data})
|
||||
if notify:
|
||||
self.notify('cover', [book_id])
|
||||
|
||||
def original_fmt(self, book_id, fmt):
|
||||
nfmt = ('ORIGINAL_%s'%fmt).upper()
|
||||
return nfmt if self.new_api.has_format(book_id, nfmt) else fmt
|
||||
|
||||
def save_original_format(self, book_id, fmt, notify=True):
|
||||
ret = self.new_api.save_original_format(book_id, fmt)
|
||||
if ret and notify:
|
||||
self.notify('metadata', [book_id])
|
||||
return ret
|
||||
|
||||
def restore_original_format(self, book_id, original_fmt, notify=True):
|
||||
ret = self.new_api.restore_original_format(book_id, original_fmt)
|
||||
if ret and notify:
|
||||
self.notify('metadata', [book_id])
|
||||
return ret
|
||||
|
||||
def remove_format(self, index, fmt, index_is_id=False, notify=True, commit=True, db_only=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
self.new_api.remove_formats({book_id:(fmt,)}, db_only=db_only)
|
||||
if notify:
|
||||
self.notify('metadata', [book_id])
|
||||
|
||||
# Private interface {{{
|
||||
def __iter__(self):
|
||||
for row in self.data.iterall():
|
||||
@ -387,20 +718,43 @@ class LibraryDatabase(object):
|
||||
MT = lambda func: types.MethodType(func, None, LibraryDatabase)
|
||||
|
||||
# Legacy getter API {{{
|
||||
for prop in ('author_sort', 'authors', 'comment', 'comments', 'publisher',
|
||||
for prop in ('author_sort', 'authors', 'comment', 'comments', 'publisher', 'max_size',
|
||||
'rating', 'series', 'series_index', 'tags', 'title', 'title_sort',
|
||||
'timestamp', 'uuid', 'pubdate', 'ondevice', 'metadata_last_modified', 'languages',):
|
||||
def getter(prop):
|
||||
fm = {'comment':'comments', 'metadata_last_modified':
|
||||
'last_modified', 'title_sort':'sort'}.get(prop, prop)
|
||||
'last_modified', 'title_sort':'sort', 'max_size':'size'}.get(prop, prop)
|
||||
def func(self, index, index_is_id=False):
|
||||
return self.get_property(index, index_is_id=index_is_id, loc=self.FIELD_MAP[fm])
|
||||
return func
|
||||
setattr(LibraryDatabase, prop, MT(getter(prop)))
|
||||
|
||||
for prop in ('series', 'publisher'):
|
||||
def getter(field):
|
||||
def func(self, index, index_is_id=False):
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
ans = self.new_api.field_ids_for(field, book_id)
|
||||
try:
|
||||
return ans[0]
|
||||
except IndexError:
|
||||
pass
|
||||
return func
|
||||
setattr(LibraryDatabase, prop + '_id', MT(getter(prop)))
|
||||
|
||||
LibraryDatabase.format_hash = MT(lambda self, book_id, fmt:self.new_api.format_hash(book_id, fmt))
|
||||
LibraryDatabase.index = MT(lambda self, book_id, cache=False:self.data.id_to_index(book_id))
|
||||
LibraryDatabase.has_cover = MT(lambda self, book_id:self.new_api.field_for('cover', book_id))
|
||||
LibraryDatabase.get_tags = MT(lambda self, book_id:set(self.new_api.field_for('tags', book_id)))
|
||||
LibraryDatabase.get_categories = MT(lambda self, sort='name', ids=None, icon_map=None:self.new_api.get_categories(sort=sort, book_ids=ids, icon_map=icon_map))
|
||||
LibraryDatabase.get_identifiers = MT(
|
||||
lambda self, index, index_is_id=False: self.new_api.field_for('identifiers', index if index_is_id else self.data.index_to_id(index)))
|
||||
lambda self, index, index_is_id=False: self.new_api.field_for('identifiers', index if index_is_id else self.id(index)))
|
||||
LibraryDatabase.isbn = MT(
|
||||
lambda self, index, index_is_id=False: self.get_identifiers(index, index_is_id=index_is_id).get('isbn', None))
|
||||
LibraryDatabase.get_books_for_category = MT(
|
||||
lambda self, category, id_:self.new_api.get_books_for_category(category, id_))
|
||||
LibraryDatabase.get_data_as_dict = MT(get_data_as_dict)
|
||||
LibraryDatabase.find_identical_books = MT(lambda self, mi:self.new_api.find_identical_books(mi))
|
||||
LibraryDatabase.get_top_level_move_items = MT(lambda self:self.new_api.get_top_level_move_items())
|
||||
# }}}
|
||||
|
||||
# Legacy setter API {{{
|
||||
@ -435,6 +789,20 @@ for field in (
|
||||
return ret if field == 'languages' else retval
|
||||
return func
|
||||
setattr(LibraryDatabase, 'set_%s' % field.replace('!', ''), MT(setter(field)))
|
||||
|
||||
for field in ('authors', 'tags', 'publisher'):
|
||||
def renamer(field):
|
||||
def func(self, old_id, new_name):
|
||||
id_map = self.new_api.rename_items(field, {old_id:new_name})[1]
|
||||
if field == 'authors':
|
||||
return id_map[old_id]
|
||||
return func
|
||||
fname = field[:-1] if field in {'tags', 'authors'} else field
|
||||
setattr(LibraryDatabase, 'rename_%s' % fname, MT(renamer(field)))
|
||||
|
||||
LibraryDatabase.update_last_modified = MT(
|
||||
lambda self, book_ids, commit=False, now=None: self.new_api.update_last_modified(book_ids, now=now))
|
||||
|
||||
# }}}
|
||||
|
||||
# Legacy API to get information about many-(one, many) fields {{{
|
||||
@ -445,7 +813,8 @@ for field in ('authors', 'tags', 'publisher', 'series'):
|
||||
return func
|
||||
name = field[:-1] if field in {'authors', 'tags'} else field
|
||||
setattr(LibraryDatabase, 'all_%s_names' % name, MT(getter(field)))
|
||||
LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats'))
|
||||
LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats'))
|
||||
LibraryDatabase.all_custom = MT(lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num)))
|
||||
|
||||
for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}.iteritems():
|
||||
def getter(field):
|
||||
@ -458,6 +827,8 @@ LibraryDatabase.all_tags = MT(lambda self: list(self.all_tag_names()))
|
||||
LibraryDatabase.get_all_identifier_types = MT(lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types()))
|
||||
LibraryDatabase.get_authors_with_ids = MT(
|
||||
lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().iteritems()])
|
||||
LibraryDatabase.get_author_id = MT(
|
||||
lambda self, author: {icu_lower(v):k for k, v in self.new_api.get_id_map('authors').iteritems()}.get(icu_lower(author), None))
|
||||
|
||||
for field in ('tags', 'series', 'publishers', 'ratings', 'languages'):
|
||||
def getter(field):
|
||||
@ -515,6 +886,7 @@ for meth in ('get_next_series_num_for', 'has_book', 'author_sort_from_authors'):
|
||||
return func
|
||||
setattr(LibraryDatabase, meth, MT(getter(meth)))
|
||||
|
||||
LibraryDatabase.move_library_to = MT(lambda self, newloc, progress=None:self.new_api.move_library_to(newloc, progress=progress))
|
||||
# Cleaning is not required anymore
|
||||
LibraryDatabase.clean = LibraryDatabase.clean_custom = MT(lambda self:None)
|
||||
LibraryDatabase.clean_standard_field = MT(lambda self, field, commit=False:None)
|
||||
@ -524,3 +896,8 @@ LibraryDatabase.commit = MT(lambda self:None)
|
||||
|
||||
del MT
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -15,7 +15,7 @@ from calibre.utils.config_base import prefs
|
||||
from calibre.utils.date import parse_date, UNDEFINED_DATE, now
|
||||
from calibre.utils.icu import primary_find
|
||||
from calibre.utils.localization import lang_map, canonicalize_lang
|
||||
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
|
||||
from calibre.utils.search_query_parser import SearchQueryParser, ParseException, SavedSearchQueries
|
||||
|
||||
CONTAINS_MATCH = 0
|
||||
EQUALS_MATCH = 1
|
||||
@ -392,7 +392,7 @@ class Parser(SearchQueryParser):
|
||||
|
||||
def __init__(self, dbcache, all_book_ids, gst, date_search, num_search,
|
||||
bool_search, keypair_search, limit_search_columns, limit_search_columns_to,
|
||||
locations, virtual_fields):
|
||||
locations, virtual_fields, get_saved_searches):
|
||||
self.dbcache, self.all_book_ids = dbcache, all_book_ids
|
||||
self.all_search_locations = frozenset(locations)
|
||||
self.grouped_search_terms = gst
|
||||
@ -403,7 +403,7 @@ class Parser(SearchQueryParser):
|
||||
self.virtual_fields = virtual_fields or {}
|
||||
if 'marked' not in self.virtual_fields:
|
||||
self.virtual_fields['marked'] = self
|
||||
super(Parser, self).__init__(locations, optimize=True)
|
||||
super(Parser, self).__init__(locations, optimize=True, get_saved_searches=get_saved_searches)
|
||||
|
||||
@property
|
||||
def field_metadata(self):
|
||||
@ -651,17 +651,21 @@ class Parser(SearchQueryParser):
|
||||
|
||||
class Search(object):
|
||||
|
||||
def __init__(self, all_search_locations=()):
|
||||
def __init__(self, db, opt_name, all_search_locations=()):
|
||||
self.all_search_locations = all_search_locations
|
||||
self.date_search = DateSearch()
|
||||
self.num_search = NumericSearch()
|
||||
self.bool_search = BooleanSearch()
|
||||
self.keypair_search = KeyPairSearch()
|
||||
self.saved_searches = SavedSearchQueries(db, opt_name)
|
||||
|
||||
def get_saved_searches(self):
|
||||
return self.saved_searches
|
||||
|
||||
def change_locations(self, newlocs):
|
||||
self.all_search_locations = newlocs
|
||||
|
||||
def __call__(self, dbcache, query, search_restriction, virtual_fields=None):
|
||||
def __call__(self, dbcache, query, search_restriction, virtual_fields=None, book_ids=None):
|
||||
'''
|
||||
Return the set of ids of all records that match the specified
|
||||
query and restriction
|
||||
@ -674,28 +678,26 @@ class Search(object):
|
||||
if search_restriction:
|
||||
q = u'(%s) and (%s)' % (search_restriction, query)
|
||||
|
||||
all_book_ids = dbcache._all_book_ids(type=set)
|
||||
all_book_ids = dbcache._all_book_ids(type=set) if book_ids is None else set(book_ids)
|
||||
if not q:
|
||||
return all_book_ids
|
||||
|
||||
if not isinstance(q, type(u'')):
|
||||
q = q.decode('utf-8')
|
||||
|
||||
# We construct a new parser instance per search as pyparsing is not
|
||||
# thread safe. On my desktop, constructing a SearchQueryParser instance
|
||||
# takes 0.000975 seconds and restoring it from a pickle takes
|
||||
# 0.000974 seconds.
|
||||
# We construct a new parser instance per search as the parse is not
|
||||
# thread safe.
|
||||
sqp = Parser(
|
||||
dbcache, all_book_ids, dbcache._pref('grouped_search_terms'),
|
||||
self.date_search, self.num_search, self.bool_search,
|
||||
self.keypair_search,
|
||||
prefs['limit_search_columns'],
|
||||
prefs['limit_search_columns_to'], self.all_search_locations,
|
||||
virtual_fields)
|
||||
virtual_fields, self.get_saved_searches)
|
||||
|
||||
try:
|
||||
ret = sqp.parse(q)
|
||||
finally:
|
||||
sqp.dbcache = None
|
||||
sqp.dbcache = sqp.get_saved_searches = None
|
||||
return ret
|
||||
|
||||
|
@ -44,6 +44,7 @@ class Table(object):
|
||||
|
||||
def __init__(self, name, metadata, link_table=None):
|
||||
self.name, self.metadata = name, metadata
|
||||
self.sort_alpha = metadata.get('is_multiple', False) and metadata.get('display', {}).get('sort_alpha', False)
|
||||
|
||||
# self.unserialize() maps values from the db to python objects
|
||||
self.unserialize = \
|
||||
@ -137,6 +138,9 @@ class UUIDTable(OneToOneTable):
|
||||
clean.add(val)
|
||||
return clean
|
||||
|
||||
def lookup_by_uuid(self, uuid):
|
||||
return self.uuid_to_id_map.get(uuid, None)
|
||||
|
||||
class CompositeTable(OneToOneTable):
|
||||
|
||||
def read(self, db):
|
||||
@ -219,6 +223,31 @@ class ManyToOneTable(Table):
|
||||
db.conn.executemany('DELETE FROM {0} WHERE id=?'.format(self.metadata['table']), item_ids)
|
||||
return affected_books
|
||||
|
||||
def rename_item(self, item_id, new_name, db):
|
||||
rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
|
||||
existing_item = rmap.get(icu_lower(new_name), None)
|
||||
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
|
||||
affected_books = self.col_book_map.get(item_id, set())
|
||||
new_id = item_id
|
||||
if existing_item is None or existing_item == item_id:
|
||||
# A simple rename will do the trick
|
||||
self.id_map[item_id] = new_name
|
||||
db.conn.execute('UPDATE {0} SET {1}=? WHERE id=?'.format(table, col), (new_name, item_id))
|
||||
else:
|
||||
# We have to replace
|
||||
new_id = existing_item
|
||||
self.id_map.pop(item_id, None)
|
||||
books = self.col_book_map.pop(item_id, set())
|
||||
for book_id in books:
|
||||
self.book_col_map[book_id] = existing_item
|
||||
self.col_book_map[existing_item].update(books)
|
||||
# For custom series this means that the series index can
|
||||
# potentially have duplicates/be incorrect, but there is no way to
|
||||
# handle that in this context.
|
||||
db.conn.execute('UPDATE {0} SET {1}=? WHERE {1}=?; DELETE FROM {2} WHERE id=?'.format(
|
||||
self.link_table, lcol, table), (existing_item, item_id, item_id))
|
||||
return affected_books, new_id
|
||||
|
||||
class ManyToManyTable(ManyToOneTable):
|
||||
|
||||
'''
|
||||
@ -280,6 +309,34 @@ class ManyToManyTable(ManyToOneTable):
|
||||
db.conn.executemany('DELETE FROM {0} WHERE id=?'.format(self.metadata['table']), item_ids)
|
||||
return affected_books
|
||||
|
||||
def rename_item(self, item_id, new_name, db):
|
||||
rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
|
||||
existing_item = rmap.get(icu_lower(new_name), None)
|
||||
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
|
||||
affected_books = self.col_book_map.get(item_id, set())
|
||||
new_id = item_id
|
||||
if existing_item is None or existing_item == item_id:
|
||||
# A simple rename will do the trick
|
||||
self.id_map[item_id] = new_name
|
||||
db.conn.execute('UPDATE {0} SET {1}=? WHERE id=?'.format(table, col), (new_name, item_id))
|
||||
else:
|
||||
# We have to replace
|
||||
new_id = existing_item
|
||||
self.id_map.pop(item_id, None)
|
||||
books = self.col_book_map.pop(item_id, set())
|
||||
# Replacing item_id with existing_item could cause the same id to
|
||||
# appear twice in the book list. Handle that by removing existing
|
||||
# item from the book list before replacing.
|
||||
for book_id in books:
|
||||
self.book_col_map[book_id] = tuple((existing_item if x == item_id else x) for x in self.book_col_map.get(book_id, ()) if x != existing_item)
|
||||
self.col_book_map[existing_item].update(books)
|
||||
db.conn.executemany('DELETE FROM {0} WHERE book=? AND {1}=?'.format(self.link_table, lcol), [
|
||||
(book_id, existing_item) for book_id in books])
|
||||
db.conn.execute('UPDATE {0} SET {1}=? WHERE {1}=?; DELETE FROM {2} WHERE id=?'.format(
|
||||
self.link_table, lcol, table), (existing_item, item_id, item_id))
|
||||
return affected_books, new_id
|
||||
|
||||
|
||||
class AuthorsTable(ManyToManyTable):
|
||||
|
||||
def read_id_maps(self, db):
|
||||
@ -293,10 +350,17 @@ class AuthorsTable(ManyToManyTable):
|
||||
self.alink_map[row[0]] = row[3]
|
||||
|
||||
def set_sort_names(self, aus_map, db):
|
||||
aus_map = {aid:(a or '').strip() for aid, a in aus_map.iteritems()}
|
||||
self.asort_map.update(aus_map)
|
||||
db.conn.executemany('UPDATE authors SET sort=? WHERE id=?',
|
||||
[(v, k) for k, v in aus_map.iteritems()])
|
||||
|
||||
def set_links(self, link_map, db):
|
||||
link_map = {aid:(l or '').strip() for aid, l in link_map.iteritems()}
|
||||
self.alink_map.update(link_map)
|
||||
db.conn.executemany('UPDATE authors SET link=? WHERE id=?',
|
||||
[(v, k) for k, v in link_map.iteritems()])
|
||||
|
||||
def remove_books(self, book_ids, db):
|
||||
clean = ManyToManyTable.remove_books(self, book_ids, db)
|
||||
for item_id in clean:
|
||||
@ -304,6 +368,17 @@ class AuthorsTable(ManyToManyTable):
|
||||
self.asort_map.pop(item_id, None)
|
||||
return clean
|
||||
|
||||
def rename_item(self, item_id, new_name, db):
|
||||
ret = ManyToManyTable.rename_item(self, item_id, new_name, db)
|
||||
if item_id not in self.id_map:
|
||||
self.alink_map.pop(item_id, None)
|
||||
self.asort_map.pop(item_id, None)
|
||||
else:
|
||||
# Was a simple rename, update the author sort value
|
||||
self.set_sort_names({item_id:author_to_author_sort(new_name)}, db)
|
||||
|
||||
return ret
|
||||
|
||||
def remove_items(self, item_ids, db):
|
||||
raise ValueError('Direct removal of authors is not allowed')
|
||||
|
||||
@ -367,6 +442,9 @@ class FormatsTable(ManyToManyTable):
|
||||
def remove_items(self, item_ids, db):
|
||||
raise NotImplementedError('Cannot delete a format directly')
|
||||
|
||||
def rename_item(self, item_id, new_name, db):
|
||||
raise NotImplementedError('Cannot rename formats')
|
||||
|
||||
def update_fmt(self, book_id, fmt, fname, size, db):
|
||||
fmts = list(self.book_col_map.get(book_id, []))
|
||||
try:
|
||||
@ -420,6 +498,9 @@ class IdentifiersTable(ManyToManyTable):
|
||||
def remove_items(self, item_ids, db):
|
||||
raise NotImplementedError('Direct deletion of identifiers is not implemented')
|
||||
|
||||
def rename_item(self, item_id, new_name, db):
|
||||
raise NotImplementedError('Cannot rename identifiers')
|
||||
|
||||
def all_identifier_types(self):
|
||||
return frozenset(k for k, v in self.col_book_map.iteritems() if v)
|
||||
|
||||
|
@ -251,4 +251,21 @@ class AddRemoveTest(BaseTest):
|
||||
|
||||
# }}}
|
||||
|
||||
def test_original_fmt(self): # {{{
|
||||
' Test management of original fmt '
|
||||
af, ae, at = self.assertFalse, self.assertEqual, self.assertTrue
|
||||
db = self.init_cache()
|
||||
fmts = db.formats(1)
|
||||
af(db.has_format(1, 'ORIGINAL_FMT1'))
|
||||
at(db.save_original_format(1, 'FMT1'))
|
||||
at(db.has_format(1, 'ORIGINAL_FMT1'))
|
||||
raw = db.format(1, 'FMT1')
|
||||
ae(raw, db.format(1, 'ORIGINAL_FMT1'))
|
||||
db.add_format(1, 'FMT1', BytesIO(b'replacedfmt'))
|
||||
self.assertNotEqual(db.format(1, 'FMT1'), db.format(1, 'ORIGINAL_FMT1'))
|
||||
at(db.restore_original_format(1, 'ORIGINAL_FMT1'))
|
||||
ae(raw, db.format(1, 'FMT1'))
|
||||
af(db.has_format(1, 'ORIGINAL_FMT1'))
|
||||
ae(set(fmts), set(db.formats(1, verify_formats=False)))
|
||||
# }}}
|
||||
|
||||
|
@ -21,6 +21,11 @@ class BaseTest(unittest.TestCase):
|
||||
longMessage = True
|
||||
maxDiff = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
from calibre.utils.config_base import reset_tweaks_to_default
|
||||
reset_tweaks_to_default()
|
||||
|
||||
def setUp(self):
|
||||
self.library_path = self.mkdtemp()
|
||||
self.create_db(self.library_path)
|
||||
|
@ -79,4 +79,21 @@ class FilesystemTest(BaseTest):
|
||||
f.close()
|
||||
self.assertNotEqual(cache.field_for('title', 1), 'Moved', 'Title was changed despite file lock')
|
||||
|
||||
def test_library_move(self):
|
||||
' Test moving of library '
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
cache = self.init_cache()
|
||||
self.assertIn('metadata.db', cache.get_top_level_move_items()[0])
|
||||
all_ids = cache.all_book_ids()
|
||||
fmt1 = cache.format(1, 'FMT1')
|
||||
cov = cache.cover(1)
|
||||
with TemporaryDirectory('moved_lib') as tdir:
|
||||
cache.move_library_to(tdir)
|
||||
self.assertIn('moved_lib', cache.backend.library_path)
|
||||
self.assertIn('moved_lib', cache.backend.dbpath)
|
||||
self.assertEqual(fmt1, cache.format(1, 'FMT1'))
|
||||
self.assertEqual(cov, cache.cover(1))
|
||||
cache.reload_from_db()
|
||||
self.assertEqual(all_ids, cache.all_book_ids())
|
||||
cache.backend.close()
|
||||
|
||||
|
@ -11,9 +11,11 @@ from io import BytesIO
|
||||
from repr import repr
|
||||
from functools import partial
|
||||
from tempfile import NamedTemporaryFile
|
||||
from operator import itemgetter
|
||||
|
||||
from calibre.db.tests.base import BaseTest
|
||||
|
||||
# Utils {{{
|
||||
class ET(object):
|
||||
|
||||
def __init__(self, func_name, args, kwargs={}, old=None, legacy=None):
|
||||
@ -47,15 +49,17 @@ def run_funcs(self, db, ndb, funcs):
|
||||
meth(*args)
|
||||
else:
|
||||
fmt = lambda x:x
|
||||
if meth[0] in {'!', '@', '#', '+'}:
|
||||
if meth[0] in {'!', '@', '#', '+', '$', '-', '%'}:
|
||||
if meth[0] != '+':
|
||||
fmt = {'!':dict, '@':lambda x:frozenset(x or ()), '#':lambda x:set((x or '').split(','))}[meth[0]]
|
||||
fmt = {'!':dict, '@':lambda x:frozenset(x or ()), '#':lambda x:set((x or '').split(',')),
|
||||
'$':lambda x:set(tuple(y) for y in x), '-':lambda x:None, '%':lambda x: set((x or '').split(','))}[meth[0]]
|
||||
else:
|
||||
fmt = args[-1]
|
||||
args = args[:-1]
|
||||
meth = meth[1:]
|
||||
res1, res2 = fmt(getattr(db, meth)(*args)), fmt(getattr(ndb, meth)(*args))
|
||||
self.assertEqual(res1, res2, 'The method: %s() returned different results for argument %s' % (meth, args))
|
||||
# }}}
|
||||
|
||||
class LegacyTest(BaseTest):
|
||||
|
||||
@ -152,15 +156,44 @@ class LegacyTest(BaseTest):
|
||||
# }}}
|
||||
|
||||
def test_legacy_direct(self): # {{{
|
||||
'Test methods that are directly equivalent in the old and new interface'
|
||||
'Test read-only methods that are directly equivalent in the old and new interface'
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from datetime import timedelta
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old()
|
||||
newstag = ndb.new_api.get_item_id('tags', 'news')
|
||||
|
||||
self.assertEqual(dict(db.prefs), dict(ndb.prefs))
|
||||
|
||||
for meth, args in {
|
||||
'find_identical_books': [(Metadata('title one', ['author one']),), (Metadata('unknown'),), (Metadata('xxxx'),)],
|
||||
'get_top_level_move_items': [()],
|
||||
'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')],
|
||||
'get_next_series_num_for': [('A Series One',)],
|
||||
'get_id_from_uuid':[('ddddd',), (db.uuid(1, True),)],
|
||||
'cover':[(0,), (1,), (2,)],
|
||||
'get_author_id': [('author one',), ('unknown',), ('xxxxx',)],
|
||||
'series_id': [(0,), (1,), (2,)],
|
||||
'publisher_id': [(0,), (1,), (2,)],
|
||||
'@tags_older_than': [
|
||||
('News', None), ('Tag One', None), ('xxxx', None), ('Tag One', None, 'News'), ('News', None, 'xxxx'),
|
||||
('News', None, None, ['xxxxxxx']), ('News', None, 'Tag One', ['Author Two', 'Author One']),
|
||||
('News', timedelta(0), None, None), ('News', timedelta(100000)),
|
||||
],
|
||||
'format':[(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')],
|
||||
'has_format':[(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')],
|
||||
'sizeof_format':[(1, 'FMT1', True), (2, 'FMT1', True), (0, 'xxxxxx')],
|
||||
'@format_files':[(0,),(1,),(2,)],
|
||||
'formats':[(0,),(1,),(2,)],
|
||||
'max_size':[(0,),(1,),(2,)],
|
||||
'format_hash':[(1, 'FMT1'),(1, 'FMT2'), (2, 'FMT1')],
|
||||
'author_sort_from_authors': [(['Author One', 'Author Two', 'Unknown'],)],
|
||||
'has_book':[(Metadata('title one'),), (Metadata('xxxx1111'),)],
|
||||
'has_id':[(1,), (2,), (3,), (9999,)],
|
||||
'id':[(1,), (2,), (0,),],
|
||||
'index':[(1,), (2,), (3,), ],
|
||||
'is_empty':[()],
|
||||
'count':[()],
|
||||
'all_author_names':[()],
|
||||
'all_tag_names':[()],
|
||||
'all_series_names':[()],
|
||||
@ -198,15 +231,29 @@ class LegacyTest(BaseTest):
|
||||
'books_in_series_of':[(0,), (1,), (2,)],
|
||||
'books_with_same_title':[(Metadata(db.title(0)),), (Metadata(db.title(1)),), (Metadata('1234'),)],
|
||||
}.iteritems():
|
||||
for a in args:
|
||||
fmt = lambda x: x
|
||||
if meth[0] in {'!', '@'}:
|
||||
fmt = {'!':dict, '@':frozenset}[meth[0]]
|
||||
meth = meth[1:]
|
||||
elif meth == 'get_authors_with_ids':
|
||||
fmt = lambda val:{x[0]:tuple(x[1:]) for x in val}
|
||||
for a in args:
|
||||
self.assertEqual(fmt(getattr(db, meth)(*a)), fmt(getattr(ndb, meth)(*a)),
|
||||
'The method: %s() returned different results for argument %s' % (meth, a))
|
||||
d1, d2 = BytesIO(), BytesIO()
|
||||
db.copy_cover_to(1, d1, True)
|
||||
ndb.copy_cover_to(1, d2, True)
|
||||
self.assertTrue(d1.getvalue() == d2.getvalue())
|
||||
d1, d2 = BytesIO(), BytesIO()
|
||||
db.copy_format_to(1, 'FMT1', d1, True)
|
||||
ndb.copy_format_to(1, 'FMT1', d2, True)
|
||||
self.assertTrue(d1.getvalue() == d2.getvalue())
|
||||
old = db.get_data_as_dict(prefix='test-prefix')
|
||||
new = ndb.get_data_as_dict(prefix='test-prefix')
|
||||
for o, n in zip(old, new):
|
||||
o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in o.iteritems()}
|
||||
n = {k:set(v) if isinstance(v, list) else v for k, v in n.iteritems()}
|
||||
self.assertEqual(o, n)
|
||||
db.close()
|
||||
# }}}
|
||||
|
||||
@ -251,7 +298,7 @@ class LegacyTest(BaseTest):
|
||||
# }}}
|
||||
|
||||
def test_legacy_adding_books(self): # {{{
|
||||
'Test various adding books methods'
|
||||
'Test various adding/deleting books methods'
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
legacy, old = self.init_legacy(self.cloned_library), self.init_old(self.cloned_library)
|
||||
mi = Metadata('Added Book0', authors=('Added Author',))
|
||||
@ -308,6 +355,24 @@ class LegacyTest(BaseTest):
|
||||
self.assertEqual(cache.field_for('authors', bid), ('calibre',))
|
||||
self.assertEqual(cache.field_for('tags', bid), (_('News'), 'Events', 'one', 'two'))
|
||||
|
||||
self.assertTrue(legacy.cover(1, index_is_id=True))
|
||||
origcov = legacy.cover(1, index_is_id=True)
|
||||
self.assertTrue(legacy.has_cover(1))
|
||||
legacy.remove_cover(1)
|
||||
self.assertFalse(legacy.has_cover(1))
|
||||
self.assertFalse(legacy.cover(1, index_is_id=True))
|
||||
legacy.set_cover(3, origcov)
|
||||
self.assertEqual(legacy.cover(3, index_is_id=True), origcov)
|
||||
self.assertTrue(legacy.has_cover(3))
|
||||
|
||||
self.assertTrue(legacy.format(1, 'FMT1', index_is_id=True))
|
||||
legacy.remove_format(1, 'FMT1', index_is_id=True)
|
||||
self.assertIsNone(legacy.format(1, 'FMT1', index_is_id=True))
|
||||
|
||||
legacy.delete_book(1)
|
||||
old.delete_book(1)
|
||||
self.assertNotIn(1, legacy.all_ids())
|
||||
legacy.dump_metadata((2,3))
|
||||
old.close()
|
||||
# }}}
|
||||
|
||||
@ -325,14 +390,18 @@ class LegacyTest(BaseTest):
|
||||
# Obsolete/broken methods
|
||||
'author_id', # replaced by get_author_id
|
||||
'books_for_author', # broken
|
||||
'books_in_old_database', # unused
|
||||
'books_in_old_database', 'sizeof_old_database', # unused
|
||||
'migrate_old', # no longer supported
|
||||
'remove_unused_series', # superseded by clean API
|
||||
|
||||
# Internal API
|
||||
'clean_user_categories', 'cleanup_tags', 'books_list_filter', 'conn', 'connect', 'construct_file_name',
|
||||
'construct_path_name', 'clear_dirtied', 'commit_dirty_cache', 'initialize_database', 'initialize_dynamic',
|
||||
'run_import_plugins', 'vacuum', 'set_path', 'row', 'row_factory', 'rows', 'rmtree', 'series_index_pat',
|
||||
'import_old_database', 'dirtied_lock', 'dirtied_cache', 'dirty_queue_length', 'dirty_books_referencing',
|
||||
'windows_check_if_files_in_use', 'get_metadata_for_dump', 'get_a_dirtied_book',
|
||||
'windows_check_if_files_in_use', 'get_metadata_for_dump', 'get_a_dirtied_book', 'dirtied_sequence',
|
||||
'format_filename_cache', 'format_metadata_cache', 'filter', 'create_version1', 'normpath', 'custom_data_adapters',
|
||||
'custom_table_names', 'custom_columns_in_meta', 'custom_tables',
|
||||
}
|
||||
SKIP_ARGSPEC = {
|
||||
'__init__',
|
||||
@ -404,6 +473,49 @@ class LegacyTest(BaseTest):
|
||||
def test_legacy_setters(self): # {{{
|
||||
'Test methods that are directly equivalent in the old and new interface'
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import now
|
||||
n = now()
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
amap = ndb.new_api.get_id_map('authors')
|
||||
sorts = [(aid, 's%d' % aid) for aid in amap]
|
||||
db = self.init_old(self.cloned_library)
|
||||
run_funcs(self, db, ndb, (
|
||||
('+format_metadata', 1, 'FMT1', itemgetter('size')),
|
||||
('+format_metadata', 1, 'FMT2', itemgetter('size')),
|
||||
('+format_metadata', 2, 'FMT1', itemgetter('size')),
|
||||
('get_tags', 0), ('get_tags', 1), ('get_tags', 2),
|
||||
('is_tag_used', 'News'), ('is_tag_used', 'xchkjgfh'),
|
||||
('bulk_modify_tags', (1,), ['t1'], ['News']),
|
||||
('bulk_modify_tags', (2,), ['t1'], ['Tag One', 'Tag Two']),
|
||||
('bulk_modify_tags', (3,), ['t1', 't2', 't3']),
|
||||
(db.clean,),
|
||||
('@all_tags',),
|
||||
('@tags', 0), ('@tags', 1), ('@tags', 2),
|
||||
|
||||
('unapply_tags', 1, ['t1']),
|
||||
('unapply_tags', 2, ['xxxx']),
|
||||
('unapply_tags', 3, ['t2', 't3']),
|
||||
(db.clean,),
|
||||
('@all_tags',),
|
||||
('@tags', 0), ('@tags', 1), ('@tags', 2),
|
||||
|
||||
('update_last_modified', (1,), True, n), ('update_last_modified', (3,), True, n),
|
||||
('metadata_last_modified', 1, True), ('metadata_last_modified', 3, True),
|
||||
('set_sort_field_for_author', sorts[0][0], sorts[0][1]),
|
||||
('set_sort_field_for_author', sorts[1][0], sorts[1][1]),
|
||||
('set_sort_field_for_author', sorts[2][0], sorts[2][1]),
|
||||
('set_link_field_for_author', sorts[0][0], sorts[0][1]),
|
||||
('set_link_field_for_author', sorts[1][0], sorts[1][1]),
|
||||
('set_link_field_for_author', sorts[2][0], sorts[2][1]),
|
||||
(db.refresh,),
|
||||
('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
|
||||
))
|
||||
omi = [db.get_metadata(x) for x in (0, 1, 2)]
|
||||
nmi = [ndb.get_metadata(x) for x in (0, 1, 2)]
|
||||
self.assertEqual([x.author_sort_map for x in omi], [x.author_sort_map for x in nmi])
|
||||
self.assertEqual([x.author_link_map for x in omi], [x.author_link_map for x in nmi])
|
||||
db.close()
|
||||
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old(self.cloned_library)
|
||||
|
||||
@ -412,7 +524,7 @@ class LegacyTest(BaseTest):
|
||||
('set_author_sort', 3, 'new_aus'),
|
||||
('set_comment', 1, ''), ('set_comment', 2, None), ('set_comment', 3, '<p>a comment</p>'),
|
||||
('set_has_cover', 1, True), ('set_has_cover', 2, True), ('set_has_cover', 3, 1),
|
||||
('set_identifiers', 2, {'test':'', 'a':'b'}), ('set_identifiers', 3, {'id':'1', 'url':'http://acme.com'}), ('set_identifiers', 1, {}),
|
||||
('set_identifiers', 2, {'test':'', 'a':'b'}), ('set_identifiers', 3, {'id':'1', 'isbn':'9783161484100'}), ('set_identifiers', 1, {}),
|
||||
('set_languages', 1, ('en',)),
|
||||
('set_languages', 2, ()),
|
||||
('set_languages', 3, ('deu', 'spa', 'fra')),
|
||||
@ -438,6 +550,7 @@ class LegacyTest(BaseTest):
|
||||
('series', 0), ('series', 1), ('series', 2),
|
||||
('series_index', 0), ('series_index', 1), ('series_index', 2),
|
||||
('uuid', 0), ('uuid', 1), ('uuid', 2),
|
||||
('isbn', 0), ('isbn', 1), ('isbn', 2),
|
||||
('@tags', 0), ('@tags', 1), ('@tags', 2),
|
||||
('@all_tags',),
|
||||
('@get_all_identifier_types',),
|
||||
@ -479,5 +592,162 @@ class LegacyTest(BaseTest):
|
||||
('#tags', 0), ('#tags', 1), ('#tags', 2),
|
||||
('authors', 0), ('authors', 1), ('authors', 2),
|
||||
('publisher', 0), ('publisher', 1), ('publisher', 2),
|
||||
('delete_tag', 'T1'), ('delete_tag', 'T2'), ('delete_tag', 'Tag one'), ('delete_tag', 'News'),
|
||||
(db.clean,), (db.refresh,),
|
||||
('@all_tags',),
|
||||
('#tags', 0), ('#tags', 1), ('#tags', 2),
|
||||
))
|
||||
db.close()
|
||||
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old(self.cloned_library)
|
||||
run_funcs(self, db, ndb, (
|
||||
('remove_all_tags', (1, 2, 3)),
|
||||
(db.clean,),
|
||||
('@all_tags',),
|
||||
('@tags', 0), ('@tags', 1), ('@tags', 2),
|
||||
))
|
||||
db.close()
|
||||
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old(self.cloned_library)
|
||||
a = {v:k for k, v in ndb.new_api.get_id_map('authors').iteritems()}['Author One']
|
||||
t = {v:k for k, v in ndb.new_api.get_id_map('tags').iteritems()}['Tag One']
|
||||
s = {v:k for k, v in ndb.new_api.get_id_map('series').iteritems()}['A Series One']
|
||||
p = {v:k for k, v in ndb.new_api.get_id_map('publisher').iteritems()}['Publisher One']
|
||||
run_funcs(self, db, ndb, (
|
||||
('rename_author', a, 'Author Two'),
|
||||
('rename_tag', t, 'News'),
|
||||
('rename_series', s, 'ss'),
|
||||
('rename_publisher', p, 'publisher one'),
|
||||
(db.clean,),
|
||||
(db.refresh,),
|
||||
('@all_tags',),
|
||||
('tags', 0), ('tags', 1), ('tags', 2),
|
||||
('series', 0), ('series', 1), ('series', 2),
|
||||
('publisher', 0), ('publisher', 1), ('publisher', 2),
|
||||
('series_index', 0), ('series_index', 1), ('series_index', 2),
|
||||
('authors', 0), ('authors', 1), ('authors', 2),
|
||||
('author_sort', 0), ('author_sort', 1), ('author_sort', 2),
|
||||
))
|
||||
db.close()
|
||||
|
||||
# }}}
|
||||
|
||||
def test_legacy_custom(self): # {{{
|
||||
'Test the legacy API for custom columns'
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old(self.cloned_library)
|
||||
# Test getting
|
||||
run_funcs(self, db, ndb, (
|
||||
('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'rating'), ('all_custom', 'authors'), ('all_custom', None, 7),
|
||||
('get_next_cc_series_num_for', 'My Series One', 'series'), ('get_next_cc_series_num_for', 'My Series Two', 'series'),
|
||||
('is_item_used_in_multiple', 'My Tag One', 'tags'),
|
||||
('is_item_used_in_multiple', 'My Series One', 'series'),
|
||||
('$get_custom_items_with_ids', 'series'), ('$get_custom_items_with_ids', 'tags'), ('$get_custom_items_with_ids', 'float'),
|
||||
('$get_custom_items_with_ids', 'rating'), ('$get_custom_items_with_ids', 'authors'), ('$get_custom_items_with_ids', None, 7),
|
||||
))
|
||||
for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'):
|
||||
for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'):
|
||||
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
|
||||
|
||||
# Test renaming/deleting
|
||||
t = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag One']
|
||||
t2 = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag Two']
|
||||
a = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['My Author Two']
|
||||
a2 = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['Custom One']
|
||||
s = {v:k for k, v in ndb.new_api.get_id_map('#series').iteritems()}['My Series One']
|
||||
run_funcs(self, db, ndb, (
|
||||
('delete_custom_item_using_id', t, 'tags'),
|
||||
('delete_custom_item_using_id', a, 'authors'),
|
||||
('rename_custom_item', t2, 't2', 'tags'),
|
||||
('rename_custom_item', a2, 'custom one', 'authors'),
|
||||
('rename_custom_item', s, 'My Series Two', 'series'),
|
||||
('delete_item_from_multiple', 'custom two', 'authors'),
|
||||
(db.clean,),
|
||||
(db.refresh,),
|
||||
('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'),
|
||||
))
|
||||
for label in ('tags', 'authors', 'series'):
|
||||
run_funcs(self, db, ndb, [('get_custom_and_extra', idx, label) for idx in range(3)])
|
||||
db.close()
|
||||
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old(self.cloned_library)
|
||||
# Test setting
|
||||
run_funcs(self, db, ndb, (
|
||||
('-set_custom', 1, 't1 & t2', 'authors'),
|
||||
('-set_custom', 1, 't3 & t4', 'authors', None, True),
|
||||
('-set_custom', 3, 'test one & test Two', 'authors'),
|
||||
('-set_custom', 1, 'ijfkghkjdf', 'enum'),
|
||||
('-set_custom', 3, 'One', 'enum'),
|
||||
('-set_custom', 3, 'xxx', 'formats'),
|
||||
('-set_custom', 1, 'my tag two', 'tags', None, False, False, None, True, True),
|
||||
(db.clean,), (db.refresh,),
|
||||
('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'),
|
||||
))
|
||||
for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'):
|
||||
for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'):
|
||||
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
|
||||
db.close()
|
||||
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old(self.cloned_library)
|
||||
# Test setting bulk
|
||||
run_funcs(self, db, ndb, (
|
||||
('set_custom_bulk', (1,2,3), 't1 & t2', 'authors'),
|
||||
('set_custom_bulk', (1,2,3), 'a series', 'series', None, False, False, (9, 10, 11)),
|
||||
('set_custom_bulk', (1,2,3), 't1', 'tags', None, True),
|
||||
(db.clean,), (db.refresh,),
|
||||
('all_custom', 'series'), ('all_custom', 'tags'), ('all_custom', 'authors'),
|
||||
))
|
||||
for label in ('tags', 'series', 'authors', 'comments', 'rating', 'date', 'yesno', 'isbn', 'enum', 'formats', 'float', 'comp_tags'):
|
||||
for func in ('get_custom', 'get_custom_extra', 'get_custom_and_extra'):
|
||||
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
|
||||
db.close()
|
||||
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
db = self.init_old(self.cloned_library)
|
||||
# Test bulk multiple
|
||||
run_funcs(self, db, ndb, (
|
||||
('set_custom_bulk_multiple', (1,2,3), ['t1'], ['My Tag One'], 'tags'),
|
||||
(db.clean,), (db.refresh,),
|
||||
('all_custom', 'tags'),
|
||||
('get_custom', 0, 'tags'), ('get_custom', 1, 'tags'), ('get_custom', 2, 'tags'),
|
||||
))
|
||||
db.close()
|
||||
|
||||
o = self.cloned_library
|
||||
n = self.cloned_library
|
||||
ndb, db = self.init_legacy(n), self.init_old(o)
|
||||
ndb.create_custom_column('created', 'Created', 'text', True, True, {'moose':'cat'})
|
||||
db.create_custom_column('created', 'Created', 'text', True, True, {'moose':'cat'})
|
||||
db.close()
|
||||
ndb, db = self.init_legacy(n), self.init_old(o)
|
||||
self.assertEqual(db.custom_column_label_map['created'], ndb.backend.custom_field_metadata('created'))
|
||||
num = db.custom_column_label_map['created']['num']
|
||||
ndb.set_custom_column_metadata(num, is_editable=False, name='Crikey', display={})
|
||||
db.set_custom_column_metadata(num, is_editable=False, name='Crikey', display={})
|
||||
db.close()
|
||||
ndb, db = self.init_legacy(n), self.init_old(o)
|
||||
self.assertEqual(db.custom_column_label_map['created'], ndb.backend.custom_field_metadata('created'))
|
||||
db.close()
|
||||
ndb = self.init_legacy(n)
|
||||
ndb.delete_custom_column('created')
|
||||
ndb = self.init_legacy(n)
|
||||
self.assertRaises(KeyError, ndb.custom_field_name, num=num)
|
||||
# }}}
|
||||
|
||||
def test_legacy_original_fmt(self): # {{{
|
||||
db, ndb = self.init_old(), self.init_legacy()
|
||||
run_funcs(self, db, ndb, (
|
||||
('original_fmt', 1, 'FMT1'),
|
||||
('save_original_format', 1, 'FMT1'),
|
||||
('original_fmt', 1, 'FMT1'),
|
||||
('restore_original_format', 1, 'ORIGINAL_FMT1'),
|
||||
('original_fmt', 1, 'FMT1'),
|
||||
('%formats', 1, True),
|
||||
))
|
||||
db.close()
|
||||
|
||||
# }}}
|
||||
|
@ -149,8 +149,6 @@ class ReadingTest(BaseTest):
|
||||
'#tags':[3, 2, 1],
|
||||
'#yesno':[3, 1, 2],
|
||||
'#comments':[3, 2, 1],
|
||||
# TODO: Add an empty book to the db and ensure that empty
|
||||
# fields sort the same as they do in db2
|
||||
}.iteritems():
|
||||
x = list(reversed(order))
|
||||
self.assertEqual(order, cache.multisort([(field, True)],
|
||||
|
@ -474,3 +474,72 @@ class WritingTest(BaseTest):
|
||||
for bid in c.all_book_ids():
|
||||
self.assertIn(c.field_for('#series', bid), (None, 'My Series One'))
|
||||
# }}}
|
||||
|
||||
def test_rename_items(self): # {{{
|
||||
' Test renaming of many-(many,one) items '
|
||||
cl = self.cloned_library
|
||||
cache = self.init_cache(cl)
|
||||
# Check that renaming authors updates author sort and path
|
||||
a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Unknown']
|
||||
self.assertEqual(cache.rename_items('authors', {a:'New Author'})[0], {3})
|
||||
a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Author One']
|
||||
self.assertEqual(cache.rename_items('authors', {a:'Author Two'})[0], {1, 2})
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('authors'), {'New Author', 'Author Two'})
|
||||
self.assertEqual(c.field_for('author_sort', 3), 'Author, New')
|
||||
self.assertIn('New Author/', c.field_for('path', 3))
|
||||
self.assertEqual(c.field_for('authors', 1), ('Author Two',))
|
||||
self.assertEqual(c.field_for('author_sort', 1), 'Two, Author')
|
||||
|
||||
t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
|
||||
# Test case change
|
||||
self.assertEqual(cache.rename_items('tags', {t:'tag one'}), ({1, 2}, {t:t}))
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('tags'), {'tag one', 'Tag Two', 'News'})
|
||||
self.assertEqual(set(c.field_for('tags', 1)), {'tag one', 'News'})
|
||||
self.assertEqual(set(c.field_for('tags', 2)), {'tag one', 'Tag Two'})
|
||||
# Test new name
|
||||
self.assertEqual(cache.rename_items('tags', {t:'t1'})[0], {1,2})
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('tags'), {'t1', 'Tag Two', 'News'})
|
||||
self.assertEqual(set(c.field_for('tags', 1)), {'t1', 'News'})
|
||||
self.assertEqual(set(c.field_for('tags', 2)), {'t1', 'Tag Two'})
|
||||
# Test rename to existing
|
||||
self.assertEqual(cache.rename_items('tags', {t:'Tag Two'})[0], {1,2})
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('tags'), {'Tag Two', 'News'})
|
||||
self.assertEqual(set(c.field_for('tags', 1)), {'Tag Two', 'News'})
|
||||
self.assertEqual(set(c.field_for('tags', 2)), {'Tag Two'})
|
||||
# Test on a custom column
|
||||
t = {v:k for k, v in cache.get_id_map('#tags').iteritems()}['My Tag One']
|
||||
self.assertEqual(cache.rename_items('#tags', {t:'My Tag Two'})[0], {2})
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('#tags'), {'My Tag Two'})
|
||||
self.assertEqual(set(c.field_for('#tags', 2)), {'My Tag Two'})
|
||||
|
||||
# Test a Many-one field
|
||||
s = {v:k for k, v in cache.get_id_map('series').iteritems()}['A Series One']
|
||||
# Test case change
|
||||
self.assertEqual(cache.rename_items('series', {s:'a series one'}), ({1, 2}, {s:s}))
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('series'), {'a series one'})
|
||||
self.assertEqual(c.field_for('series', 1), 'a series one')
|
||||
self.assertEqual(c.field_for('series_index', 1), 2.0)
|
||||
|
||||
# Test new name
|
||||
self.assertEqual(cache.rename_items('series', {s:'series'})[0], {1, 2})
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('series'), {'series'})
|
||||
self.assertEqual(c.field_for('series', 1), 'series')
|
||||
self.assertEqual(c.field_for('series', 2), 'series')
|
||||
self.assertEqual(c.field_for('series_index', 1), 2.0)
|
||||
|
||||
s = {v:k for k, v in cache.get_id_map('#series').iteritems()}['My Series One']
|
||||
# Test custom column with rename to existing
|
||||
self.assertEqual(cache.rename_items('#series', {s:'My Series Two'})[0], {2})
|
||||
for c in (cache, self.init_cache(cl)):
|
||||
self.assertEqual(c.all_field_names('#series'), {'My Series Two'})
|
||||
self.assertEqual(c.field_for('#series', 2), 'My Series Two')
|
||||
self.assertEqual(c.field_for('#series_index', 1), 3.0)
|
||||
self.assertEqual(c.field_for('#series_index', 2), 4.0)
|
||||
# }}}
|
||||
|
@ -10,6 +10,7 @@ __docformat__ = 'restructuredtext en'
|
||||
import weakref
|
||||
from functools import partial
|
||||
from itertools import izip, imap
|
||||
from future_builtins import map
|
||||
|
||||
from calibre.ebooks.metadata import title_sort
|
||||
from calibre.utils.config_base import tweaks
|
||||
@ -119,6 +120,9 @@ class View(object):
|
||||
self._map = tuple(sorted(self.cache.all_book_ids()))
|
||||
self._map_filtered = tuple(self._map)
|
||||
|
||||
def count(self):
|
||||
return len(self._map)
|
||||
|
||||
def get_property(self, id_or_index, index_is_id=False, loc=-1):
|
||||
book_id = id_or_index if index_is_id else self._map_filtered[id_or_index]
|
||||
return self._field_getters[loc](book_id)
|
||||
@ -161,6 +165,10 @@ class View(object):
|
||||
def index_to_id(self, idx):
|
||||
return self._map_filtered[idx]
|
||||
|
||||
def id_to_index(self, book_id):
|
||||
return self._map.index(book_id)
|
||||
row = index_to_id
|
||||
|
||||
def _get(self, field, idx, index_is_id=True, default_value=None, fmt=lambda x:x):
|
||||
id_ = idx if index_is_id else self.index_to_id(idx)
|
||||
if index_is_id and id_ not in self.cache.all_book_ids():
|
||||
@ -304,8 +312,17 @@ class View(object):
|
||||
def refresh(self, field=None, ascending=True):
|
||||
self._map = tuple(self.cache.all_book_ids())
|
||||
self._map_filtered = tuple(self._map)
|
||||
self.cache.clear_caches()
|
||||
if field is not None:
|
||||
self.sort(field, ascending)
|
||||
if self.search_restriction or self.base_restriction:
|
||||
self.search('', return_matches=False)
|
||||
|
||||
def refresh_ids(self, db, ids):
|
||||
self.cache.clear_caches(book_ids=ids)
|
||||
try:
|
||||
return list(map(self.id_to_index, ids))
|
||||
except ValueError:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
@ -151,6 +151,7 @@ class ANDROID(USBMS):
|
||||
0x61ce : [0x226, 0x227, 0x9999, 0x100],
|
||||
0x618e : [0x226, 0x227, 0x9999, 0x100],
|
||||
0x6205 : [0x226, 0x227, 0x9999, 0x100],
|
||||
0x6234 : [0x231],
|
||||
},
|
||||
|
||||
# Archos
|
||||
@ -254,7 +255,7 @@ class ANDROID(USBMS):
|
||||
'UMS_COMPOSITE', 'PRO', '.KOBO_VOX', 'SGH-T989_CARD', 'SGH-I727',
|
||||
'USB_FLASH_DRIVER', 'ANDROID', 'MID7042', '7035', 'VIEWPAD_7E',
|
||||
'NOVO7', 'ADVANCED', 'TABLET_PC', 'F', 'E400_SD_CARD', 'ST80208-1', 'XT894',
|
||||
'_USB', 'PROD_TAB13-201', 'URFPAD2', 'MID1126',
|
||||
'_USB', 'PROD_TAB13-201', 'URFPAD2', 'MID1126', 'ANDROID_PLATFORM',
|
||||
]
|
||||
|
||||
OSX_MAIN_MEM = 'Android Device Main Memory'
|
||||
|
@ -86,7 +86,7 @@ class COOL_ER(EB600):
|
||||
FORMATS = ['epub', 'mobi', 'prc', 'pdf', 'txt']
|
||||
|
||||
VENDOR_NAME = 'COOL-ER'
|
||||
WINDOWS_MAIN_MEM = 'EREADER'
|
||||
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EREADER'
|
||||
|
||||
OSX_MAIN_MEM = 'COOL-ER eReader Media'
|
||||
|
||||
|
@ -229,7 +229,8 @@ class TREKSTOR(USBMS):
|
||||
0x0067, # This is for the Pyrus Mini
|
||||
0x006f, # This is for the Pyrus Maxi
|
||||
0x003e, # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
|
||||
0x5cL, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=191318
|
||||
0x05cL, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=191318
|
||||
0x006c, # This is for the 4ink http://www.mobileread.com/forums/showthread.php?t=218273
|
||||
]
|
||||
BCD = [0x0002, 0x100, 0x0222]
|
||||
|
||||
|
@ -105,13 +105,22 @@ class EPUBOutput(OutputFormatPlugin):
|
||||
' EPUB, putting all files into the top level.')
|
||||
),
|
||||
|
||||
OptionRecommendation(name='epub_inline_toc', recommended_value=False,
|
||||
help=_('Insert an inline Table of Contents that will appear as part of the main book content.')
|
||||
),
|
||||
|
||||
OptionRecommendation(name='epub_toc_at_end', recommended_value=False,
|
||||
help=_('Put the inserted inline Table of Contents at the end of the book instead of the start.')
|
||||
),
|
||||
|
||||
OptionRecommendation(name='toc_title', recommended_value=None,
|
||||
help=_('Title for any generated in-line table of contents.')
|
||||
),
|
||||
|
||||
])
|
||||
|
||||
recommendations = set([('pretty_print', True, OptionRecommendation.HIGH)])
|
||||
|
||||
|
||||
|
||||
def workaround_webkit_quirks(self): # {{{
|
||||
from calibre.ebooks.oeb.base import XPath
|
||||
for x in self.oeb.spine:
|
||||
@ -159,12 +168,17 @@ class EPUBOutput(OutputFormatPlugin):
|
||||
else:
|
||||
seen_names.add(name)
|
||||
|
||||
|
||||
# }}}
|
||||
|
||||
def convert(self, oeb, output_path, input_plugin, opts, log):
|
||||
self.log, self.opts, self.oeb = log, opts, oeb
|
||||
|
||||
if self.opts.epub_inline_toc:
|
||||
from calibre.ebooks.mobi.writer8.toc import TOCAdder
|
||||
opts.mobi_toc_at_start = not opts.epub_toc_at_end
|
||||
opts.mobi_passthrough = False
|
||||
opts.no_inline_toc = False
|
||||
TOCAdder(oeb, opts, replace_previous_inline_toc=True, ignore_existing_toc=True)
|
||||
|
||||
if self.opts.epub_flatten:
|
||||
from calibre.ebooks.oeb.transforms.filenames import FlatFilenames
|
||||
FlatFilenames()(oeb, opts)
|
||||
@ -234,7 +248,7 @@ class EPUBOutput(OutputFormatPlugin):
|
||||
oeb_output = plugin_for_output_format('oeb')
|
||||
oeb_output.convert(oeb, tdir, input_plugin, opts, log)
|
||||
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
|
||||
self.condense_ncx([os.path.join(tdir, x) for x in os.listdir(tdir)\
|
||||
self.condense_ncx([os.path.join(tdir, x) for x in os.listdir(tdir)
|
||||
if x.endswith('.ncx')][0])
|
||||
encryption = None
|
||||
if encrypted_fonts:
|
||||
|
@ -50,14 +50,13 @@ def merge_result(oldmi, newmi, ensure_fields=None):
|
||||
return newmi
|
||||
|
||||
def main(do_identify, covers, metadata, ensure_fields, tdir):
|
||||
os.chdir(tdir)
|
||||
failed_ids = set()
|
||||
failed_covers = set()
|
||||
all_failed = True
|
||||
log = GUILog()
|
||||
|
||||
for book_id, mi in metadata.iteritems():
|
||||
mi = OPF(BytesIO(mi), basedir=os.getcwdu(),
|
||||
mi = OPF(BytesIO(mi), basedir=tdir,
|
||||
populate_spine=False).to_book_metadata()
|
||||
title, authors, identifiers = mi.title, mi.authors, mi.identifiers
|
||||
cdata = None
|
||||
@ -77,7 +76,7 @@ def main(do_identify, covers, metadata, ensure_fields, tdir):
|
||||
if not mi.is_null('rating'):
|
||||
# set_metadata expects a rating out of 10
|
||||
mi.rating *= 2
|
||||
with open('%d.mi'%book_id, 'wb') as f:
|
||||
with open(os.path.join(tdir, '%d.mi'%book_id), 'wb') as f:
|
||||
f.write(metadata_to_opf(mi, default_lang='und'))
|
||||
else:
|
||||
log.error('Failed to download metadata for', title)
|
||||
@ -89,11 +88,11 @@ def main(do_identify, covers, metadata, ensure_fields, tdir):
|
||||
if cdata is None:
|
||||
failed_covers.add(book_id)
|
||||
else:
|
||||
with open('%d.cover'%book_id, 'wb') as f:
|
||||
with open(os.path.join(tdir, '%d.cover'%book_id), 'wb') as f:
|
||||
f.write(cdata[-1])
|
||||
all_failed = False
|
||||
|
||||
with open('%d.log'%book_id, 'wb') as f:
|
||||
with open(os.path.join(tdir, '%d.log'%book_id), 'wb') as f:
|
||||
f.write(log.plain_text.encode('utf-8'))
|
||||
|
||||
return failed_ids, failed_covers, all_failed
|
||||
|
@ -34,9 +34,17 @@ TEMPLATE = '''
|
||||
</html>
|
||||
'''
|
||||
|
||||
def find_previous_calibre_inline_toc(oeb):
|
||||
if 'toc' in oeb.guide:
|
||||
href = urlnormalize(oeb.guide['toc'].href.partition('#')[0])
|
||||
if href in oeb.manifest.hrefs:
|
||||
item = oeb.manifest.hrefs[href]
|
||||
if (hasattr(item.data, 'xpath') and XPath('//h:body[@id="calibre_generated_inline_toc"]')(item.data)):
|
||||
return item
|
||||
|
||||
class TOCAdder(object):
|
||||
|
||||
def __init__(self, oeb, opts):
|
||||
def __init__(self, oeb, opts, replace_previous_inline_toc=False, ignore_existing_toc=False):
|
||||
self.oeb, self.opts, self.log = oeb, opts, oeb.log
|
||||
self.title = opts.toc_title or DEFAULT_TITLE
|
||||
self.at_start = opts.mobi_toc_at_start
|
||||
@ -44,6 +52,12 @@ class TOCAdder(object):
|
||||
self.added_toc_guide_entry = False
|
||||
self.has_toc = oeb.toc and oeb.toc.count() > 1
|
||||
|
||||
self.tocitem = tocitem = None
|
||||
if find_previous_calibre_inline_toc:
|
||||
tocitem = self.tocitem = find_previous_calibre_inline_toc(oeb)
|
||||
if ignore_existing_toc and 'toc' in oeb.guide:
|
||||
oeb.guide.remove('toc')
|
||||
|
||||
if 'toc' in oeb.guide:
|
||||
# Remove spurious toc entry from guide if it is not in spine or it
|
||||
# does not have any hyperlinks
|
||||
@ -81,13 +95,19 @@ class TOCAdder(object):
|
||||
for child in self.oeb.toc:
|
||||
self.process_toc_node(child, parent)
|
||||
|
||||
if tocitem is not None:
|
||||
href = tocitem.href
|
||||
if oeb.spine.index(tocitem) > -1:
|
||||
oeb.spine.remove(tocitem)
|
||||
tocitem.data = root
|
||||
else:
|
||||
id, href = oeb.manifest.generate('contents', 'contents.xhtml')
|
||||
item = self.generated_item = oeb.manifest.add(id, href, XHTML_MIME,
|
||||
tocitem = self.generated_item = oeb.manifest.add(id, href, XHTML_MIME,
|
||||
data=root)
|
||||
if self.at_start:
|
||||
oeb.spine.insert(0, item, linear=True)
|
||||
oeb.spine.insert(0, tocitem, linear=True)
|
||||
else:
|
||||
oeb.spine.add(item, linear=False)
|
||||
oeb.spine.add(tocitem, linear=False)
|
||||
|
||||
oeb.guide.add('toc', 'Table of Contents', href)
|
||||
|
||||
@ -95,7 +115,10 @@ class TOCAdder(object):
|
||||
li = parent.makeelement(XHTML('li'))
|
||||
li.tail = '\n'+ ('\t'*level)
|
||||
parent.append(li)
|
||||
a = parent.makeelement(XHTML('a'), href=toc.href or '#')
|
||||
href = toc.href
|
||||
if self.tocitem is not None and href:
|
||||
href = self.tocitem.relhref(toc.href)
|
||||
a = parent.makeelement(XHTML('a'), href=href or '#')
|
||||
a.text = toc.title
|
||||
li.append(a)
|
||||
if toc.count() > 0:
|
||||
@ -115,3 +138,4 @@ class TOCAdder(object):
|
||||
self.oeb.guide.remove('toc')
|
||||
self.added_toc_guide_entry = False
|
||||
|
||||
|
||||
|
@ -50,6 +50,8 @@ class BookIndexing
|
||||
this.last_check = [null, null]
|
||||
|
||||
cache_valid: (anchors) ->
|
||||
if not anchors
|
||||
return false
|
||||
for a in anchors
|
||||
if not Object.prototype.hasOwnProperty.call(this.cache, a)
|
||||
return false
|
||||
@ -65,6 +67,8 @@ class BookIndexing
|
||||
return this.cache
|
||||
|
||||
ans = {}
|
||||
if not anchors
|
||||
return ans
|
||||
for anchor in anchors
|
||||
elem = document.getElementById(anchor)
|
||||
if elem == null
|
||||
|
@ -294,8 +294,20 @@ class PagedDisplay
|
||||
return Math.floor(xpos/this.page_width)
|
||||
|
||||
column_location: (elem) ->
|
||||
# Return the location of elem relative to its containing column
|
||||
# Return the location of elem relative to its containing column.
|
||||
# WARNING: This method may cause the viewport to scroll (to workaround
|
||||
# a bug in WebKit).
|
||||
br = elem.getBoundingClientRect()
|
||||
# Because of a bug in WebKit's getBoundingClientRect() in column
|
||||
# mode, this position can be inaccurate, see
|
||||
# https://bugs.launchpad.net/calibre/+bug/1202390 for a test case.
|
||||
# The usual symptom of the inaccuracy is br.top is highly negative.
|
||||
if br.top < -100
|
||||
# We have to actually scroll the element into view to get its
|
||||
# position
|
||||
elem.scrollIntoView()
|
||||
[left, top] = calibre_utils.viewport_to_document(elem.scrollLeft, elem.scrollTop, elem.ownerDocument)
|
||||
else
|
||||
[left, top] = calibre_utils.viewport_to_document(br.left, br.top, elem.ownerDocument)
|
||||
c = this.column_at(left)
|
||||
width = Math.min(br.right, (c+1)*this.page_width) - br.left
|
||||
|
@ -371,11 +371,13 @@ class CSSFlattener(object):
|
||||
is_drop_cap = (cssdict.get('float', None) == 'left' and 'font-size' in
|
||||
cssdict and len(node) == 0 and node.text and
|
||||
len(node.text) == 1)
|
||||
is_drop_cap = is_drop_cap or (
|
||||
# The docx input plugin generates drop caps that look like this
|
||||
len(node) == 1 and not node.text and len(node[0]) == 0 and
|
||||
node[0].text and not node[0].tail and len(node[0].text) == 1 and
|
||||
'line-height' in cssdict and 'font-size' in cssdict)
|
||||
# Detect drop caps generated by the docx input plugin
|
||||
if (node.tag and node.tag.endswith('}p') and len(node) == 0 and node.text and len(node.text.strip()) == 1 and
|
||||
not node.tail and 'line-height' in cssdict and 'font-size' in cssdict):
|
||||
dp = node.getparent()
|
||||
if dp.tag and dp.tag.endswith('}div') and len(dp) == 1 and not dp.text:
|
||||
if stylizer.style(dp).cssdict().get('float', None) == 'left':
|
||||
is_drop_cap = True
|
||||
if not self.context.disable_font_rescaling and not is_drop_cap:
|
||||
_sbase = self.sbase if self.sbase is not None else \
|
||||
self.context.source.fbase
|
||||
|
@ -10,7 +10,7 @@ import re, uuid
|
||||
|
||||
from lxml import etree
|
||||
from urlparse import urlparse
|
||||
from collections import OrderedDict
|
||||
from collections import OrderedDict, Counter
|
||||
|
||||
from calibre.ebooks.oeb.base import XPNSMAP, TOC, XHTML, xml2text, barename
|
||||
from calibre.ebooks import ConversionError
|
||||
@ -22,6 +22,26 @@ def XPath(x):
|
||||
raise ConversionError(
|
||||
'The syntax of the XPath expression %s is invalid.' % repr(x))
|
||||
|
||||
def isspace(x):
|
||||
return not x or x.replace(u'\xa0', u'').isspace()
|
||||
|
||||
def at_start(elem):
|
||||
' Return True if there is no content before elem '
|
||||
body = XPath('ancestor-or-self::h:body')(elem)
|
||||
if not body:
|
||||
return True
|
||||
body = body[0]
|
||||
ancestors = frozenset(XPath('ancestor::*')(elem))
|
||||
for x in body.iter():
|
||||
if x is elem:
|
||||
return True
|
||||
if getattr(x, 'tag', None) and x.tag.rpartition('}')[-1] in {'img', 'svg'}:
|
||||
return False
|
||||
if isspace(getattr(x, 'text', None)) and (x in ancestors or isspace(getattr(x, 'tail', None))):
|
||||
continue
|
||||
return False
|
||||
return False
|
||||
|
||||
class DetectStructure(object):
|
||||
|
||||
def __call__(self, oeb, opts):
|
||||
@ -51,7 +71,7 @@ class DetectStructure(object):
|
||||
regexp = re.compile(opts.toc_filter)
|
||||
for node in list(self.oeb.toc.iter()):
|
||||
if not node.title or regexp.search(node.title) is not None:
|
||||
self.log('Filtering', node.title if node.title else\
|
||||
self.log('Filtering', node.title if node.title else
|
||||
'empty node', 'from TOC')
|
||||
self.oeb.toc.remove(node)
|
||||
|
||||
@ -92,7 +112,8 @@ class DetectStructure(object):
|
||||
'Invalid start reading at XPath expression, ignoring: %s'%expr)
|
||||
return
|
||||
for item in self.oeb.spine:
|
||||
if not hasattr(item.data, 'xpath'): continue
|
||||
if not hasattr(item.data, 'xpath'):
|
||||
continue
|
||||
matches = expr(item.data)
|
||||
if matches:
|
||||
elem = matches[0]
|
||||
@ -129,15 +150,25 @@ class DetectStructure(object):
|
||||
chapter_mark = self.opts.chapter_mark
|
||||
page_break_before = 'display: block; page-break-before: always'
|
||||
page_break_after = 'display: block; page-break-after: always'
|
||||
c = Counter()
|
||||
for item, elem in self.detected_chapters:
|
||||
c[item] += 1
|
||||
text = xml2text(elem).strip()
|
||||
text = re.sub(r'\s+', ' ', text.strip())
|
||||
self.log('\tDetected chapter:', text[:50])
|
||||
if chapter_mark == 'none':
|
||||
continue
|
||||
elif chapter_mark == 'rule':
|
||||
if chapter_mark == 'rule':
|
||||
mark = etree.Element(XHTML('hr'))
|
||||
elif chapter_mark == 'pagebreak':
|
||||
if c[item] < 3 and at_start(elem):
|
||||
# For the first two elements in this item, check if they
|
||||
# are at the start of the file, in which case inserting a
|
||||
# page break in unnecessary and can lead to extra blank
|
||||
# pages in the PDF Output plugin. We need to use two as
|
||||
# feedbooks epubs match both a heading tag and its
|
||||
# containing div with the default chapter expression.
|
||||
continue
|
||||
mark = etree.Element(XHTML('div'), style=page_break_after)
|
||||
else: # chapter_mark == 'both':
|
||||
mark = etree.Element(XHTML('hr'), style=page_break_before)
|
||||
@ -182,8 +213,6 @@ class DetectStructure(object):
|
||||
self.log('Maximum TOC links reached, stopping.')
|
||||
return
|
||||
|
||||
|
||||
|
||||
def elem_to_link(self, item, elem, counter):
|
||||
text = xml2text(elem).strip()
|
||||
if not text:
|
||||
@ -197,7 +226,6 @@ class DetectStructure(object):
|
||||
href = '#'.join((item.href, id))
|
||||
return text, href
|
||||
|
||||
|
||||
def add_leveled_toc_items(self):
|
||||
added = OrderedDict()
|
||||
added2 = OrderedDict()
|
||||
@ -223,7 +251,7 @@ class DetectStructure(object):
|
||||
node = self.oeb.toc.add(text, _href,
|
||||
play_order=self.oeb.toc.next_play_order())
|
||||
added[elem] = node
|
||||
#node.add(_('Top'), _href)
|
||||
# node.add(_('Top'), _href)
|
||||
|
||||
if self.opts.level2_toc is not None and added:
|
||||
for elem in find_matches(self.opts.level2_toc, document.data):
|
||||
@ -263,3 +291,4 @@ class DetectStructure(object):
|
||||
play_order=self.oeb.toc.next_play_order())
|
||||
break
|
||||
|
||||
|
||||
|
@ -118,7 +118,7 @@ def flip_image(img, flip):
|
||||
im.save(img)
|
||||
|
||||
def flip_images(raw):
|
||||
for match in re.finditer(b'<IMG[^>]+/?>', raw):
|
||||
for match in re.finditer(b'<IMG[^>]+/?>', raw, flags=re.I):
|
||||
img = match.group()
|
||||
m = re.search(br'class="(x|y|xy)flip"', img)
|
||||
if m is None: continue
|
||||
@ -127,7 +127,6 @@ def flip_images(raw):
|
||||
if src is None: continue
|
||||
img = src.group(1)
|
||||
if not os.path.exists(img): continue
|
||||
print ('Flipping image %s: %s'%(img, flip))
|
||||
flip_image(img, flip)
|
||||
raw = re.sub(br'<STYLE.+?</STYLE>\s*', b'', raw, flags=re.I|re.DOTALL)
|
||||
return raw
|
||||
|
@ -353,6 +353,7 @@ class PDFWriter(QObject):
|
||||
paged_display.layout();
|
||||
paged_display.fit_images();
|
||||
py_bridge.value = book_indexing.all_links_and_anchors();
|
||||
window.scrollTo(0, 0); // This is needed as getting anchor positions could have caused the viewport to scroll
|
||||
'''%(self.margin_top, 0, self.margin_bottom))
|
||||
|
||||
amap = self.bridge_value
|
||||
|
@ -119,7 +119,7 @@ class BulkConfig(Config):
|
||||
|
||||
def setup_output_formats(self, db, preferred_output_format):
|
||||
if preferred_output_format:
|
||||
preferred_output_format = preferred_output_format.lower()
|
||||
preferred_output_format = preferred_output_format.upper()
|
||||
output_formats = get_output_formats(preferred_output_format)
|
||||
preferred_output_format = preferred_output_format if \
|
||||
preferred_output_format and preferred_output_format \
|
||||
|
@ -21,6 +21,7 @@ class PluginWidget(Widget, Ui_Form):
|
||||
Widget.__init__(self, parent,
|
||||
['dont_split_on_page_breaks', 'flow_size',
|
||||
'no_default_epub_cover', 'no_svg_cover',
|
||||
'epub_inline_toc', 'epub_toc_at_end', 'toc_title',
|
||||
'preserve_cover_aspect_ratio', 'epub_flatten']
|
||||
)
|
||||
for i in range(2):
|
||||
|
@ -6,7 +6,7 @@
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>400</width>
|
||||
<width>644</width>
|
||||
<height>300</height>
|
||||
</rect>
|
||||
</property>
|
||||
@ -14,27 +14,6 @@
|
||||
<string>Form</string>
|
||||
</property>
|
||||
<layout class="QGridLayout" name="gridLayout">
|
||||
<item row="0" column="0">
|
||||
<widget class="QCheckBox" name="opt_dont_split_on_page_breaks">
|
||||
<property name="text">
|
||||
<string>Do not &split on page breaks</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
<widget class="QCheckBox" name="opt_no_default_epub_cover">
|
||||
<property name="text">
|
||||
<string>No default &cover</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QCheckBox" name="opt_no_svg_cover">
|
||||
<property name="text">
|
||||
<string>No &SVG cover</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<widget class="QCheckBox" name="opt_preserve_cover_aspect_ratio">
|
||||
<property name="text">
|
||||
@ -42,7 +21,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="0">
|
||||
<item row="5" column="0">
|
||||
<widget class="QLabel" name="label">
|
||||
<property name="text">
|
||||
<string>Split files &larger than:</string>
|
||||
@ -52,7 +31,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<item row="5" column="1">
|
||||
<widget class="QSpinBox" name="opt_flow_size">
|
||||
<property name="suffix">
|
||||
<string> KB</string>
|
||||
@ -68,7 +47,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="0">
|
||||
<item row="6" column="0">
|
||||
<spacer name="verticalSpacer">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Vertical</enum>
|
||||
@ -81,6 +60,41 @@
|
||||
</property>
|
||||
</spacer>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
<widget class="QCheckBox" name="opt_no_default_epub_cover">
|
||||
<property name="text">
|
||||
<string>No default &cover</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QCheckBox" name="opt_no_svg_cover">
|
||||
<property name="text">
|
||||
<string>No &SVG cover</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="0">
|
||||
<widget class="QCheckBox" name="opt_epub_inline_toc">
|
||||
<property name="text">
|
||||
<string>Insert inline &Table of Contents</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="0" column="0" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_dont_split_on_page_breaks">
|
||||
<property name="text">
|
||||
<string>Do not &split on page breaks</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<widget class="QCheckBox" name="opt_epub_toc_at_end">
|
||||
<property name="text">
|
||||
<string>Put inserted Table of Contents at the &end of the book</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QCheckBox" name="opt_epub_flatten">
|
||||
<property name="text">
|
||||
@ -88,6 +102,19 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="0">
|
||||
<widget class="QLabel" name="label_2">
|
||||
<property name="text">
|
||||
<string>&Title for inserted ToC:</string>
|
||||
</property>
|
||||
<property name="buddy">
|
||||
<cstring>opt_toc_title</cstring>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="1">
|
||||
<widget class="QLineEdit" name="opt_toc_title"/>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
<resources/>
|
||||
|
@ -252,7 +252,7 @@ class Config(ResizableDialog, Ui_Dialog):
|
||||
def setup_input_output_formats(self, db, book_id, preferred_input_format,
|
||||
preferred_output_format):
|
||||
if preferred_output_format:
|
||||
preferred_output_format = preferred_output_format.lower()
|
||||
preferred_output_format = preferred_output_format.upper()
|
||||
output_formats = get_output_formats(preferred_output_format)
|
||||
input_format, input_formats = get_input_format_for_book(db, book_id,
|
||||
preferred_input_format)
|
||||
|
@ -7,7 +7,6 @@ from PyQt4.QtCore import SIGNAL
|
||||
from PyQt4.QtGui import QDialog
|
||||
|
||||
from calibre.gui2.dialogs.saved_search_editor_ui import Ui_SavedSearchEditor
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.gui2 import error_dialog
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
@ -15,6 +14,7 @@ from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
class SavedSearchEditor(QDialog, Ui_SavedSearchEditor):
|
||||
|
||||
def __init__(self, parent, initial_search=None):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
QDialog.__init__(self, parent)
|
||||
Ui_SavedSearchEditor.__init__(self)
|
||||
self.setupUi(self)
|
||||
@ -98,6 +98,7 @@ class SavedSearchEditor(QDialog, Ui_SavedSearchEditor):
|
||||
self.search_text.setPlainText('')
|
||||
|
||||
def accept(self):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
if self.current_search_name:
|
||||
self.searches[self.current_search_name] = unicode(self.search_text.toPlainText())
|
||||
for name in saved_searches().names():
|
||||
|
@ -113,7 +113,7 @@ class Sendmail(object):
|
||||
verbose=1,
|
||||
relay=opts.relay_host,
|
||||
username=opts.relay_username,
|
||||
password=unhexlify(opts.relay_password), port=opts.relay_port,
|
||||
password=unhexlify(opts.relay_password).decode('utf-8'), port=opts.relay_port,
|
||||
encryption=opts.encryption,
|
||||
debug_output=log.debug)
|
||||
finally:
|
||||
|
@ -18,7 +18,6 @@ from calibre.gui2 import config, error_dialog
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
from calibre.gui2.dialogs.saved_search_editor import SavedSearchEditor
|
||||
from calibre.gui2.dialogs.search import SearchDialog
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
|
||||
class SearchLineEdit(QLineEdit): # {{{
|
||||
key_pressed = pyqtSignal(object)
|
||||
@ -309,6 +308,7 @@ class SavedSearchBox(QComboBox): # {{{
|
||||
self.saved_search_selected(self.currentText())
|
||||
|
||||
def saved_search_selected(self, qname):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
qname = unicode(qname)
|
||||
if qname is None or not qname.strip():
|
||||
self.search_box.clear()
|
||||
@ -322,12 +322,14 @@ class SavedSearchBox(QComboBox): # {{{
|
||||
self.setToolTip(saved_searches().lookup(qname))
|
||||
|
||||
def initialize_saved_search_names(self):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
qnames = saved_searches().names()
|
||||
self.addItems(qnames)
|
||||
self.setCurrentIndex(-1)
|
||||
|
||||
# SIGNALed from the main UI
|
||||
def save_search_button_clicked(self):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
name = unicode(self.currentText())
|
||||
if not name.strip():
|
||||
name = unicode(self.search_box.text()).replace('"', '')
|
||||
@ -346,6 +348,7 @@ class SavedSearchBox(QComboBox): # {{{
|
||||
self.changed.emit()
|
||||
|
||||
def delete_current_search(self):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
idx = self.currentIndex()
|
||||
if idx <= 0:
|
||||
error_dialog(self, _('Delete current search'),
|
||||
@ -365,6 +368,7 @@ class SavedSearchBox(QComboBox): # {{{
|
||||
|
||||
# SIGNALed from the main UI
|
||||
def copy_search_button_clicked(self):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
idx = self.currentIndex()
|
||||
if idx < 0:
|
||||
return
|
||||
|
@ -17,7 +17,6 @@ from calibre.gui2.widgets import ComboBoxWithHelp
|
||||
from calibre.utils.config_base import tweaks
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.search_query_parser import ParseException
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
|
||||
class SelectNames(QDialog): # {{{
|
||||
|
||||
@ -179,6 +178,7 @@ class CreateVirtualLibrary(QDialog): # {{{
|
||||
self.resize(self.sizeHint()+QSize(150, 25))
|
||||
|
||||
def search_text_changed(self, txt):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
searches = [_('Saved searches recognized in the expression:')]
|
||||
txt = unicode(txt)
|
||||
while txt:
|
||||
@ -234,6 +234,7 @@ class CreateVirtualLibrary(QDialog): # {{{
|
||||
self.vl_text.setText(self.original_search)
|
||||
|
||||
def link_activated(self, url):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
db = self.gui.current_db
|
||||
f, txt = unicode(url).partition('.')[0::2]
|
||||
if f == 'search':
|
||||
@ -475,6 +476,7 @@ class SearchRestrictionMixin(object):
|
||||
return name[0:MAX_VIRTUAL_LIBRARY_NAME_LENGTH].strip()
|
||||
|
||||
def build_search_restriction_list(self):
|
||||
from calibre.gui2.ui import saved_searches
|
||||
m = self.ar_menu
|
||||
m.clear()
|
||||
|
||||
|
@ -229,14 +229,14 @@ class Matches(QAbstractItemModel):
|
||||
if col == 1:
|
||||
return QVariant('<p>%s</p>' % result.title)
|
||||
elif col == 2:
|
||||
return QVariant('<p>' + _('Detected price as: %s. Check with the store before making a purchase to verify this price is correct. This price often does not include promotions the store may be running.') % result.price + '</p>')
|
||||
return QVariant('<p>' + _('Detected price as: %s. Check with the store before making a purchase to verify this price is correct. This price often does not include promotions the store may be running.') % result.price + '</p>') # noqa
|
||||
elif col == 3:
|
||||
if result.drm == SearchResult.DRM_LOCKED:
|
||||
return QVariant('<p>' + _('This book as been detected as having DRM restrictions. This book may not work with your reader and you will have limitations placed upon you as to what you can do with this book. Check with the store before making any purchases to ensure you can actually read this book.') + '</p>')
|
||||
return QVariant('<p>' + _('This book as been detected as having DRM restrictions. This book may not work with your reader and you will have limitations placed upon you as to what you can do with this book. Check with the store before making any purchases to ensure you can actually read this book.') + '</p>') # noqa
|
||||
elif result.drm == SearchResult.DRM_UNLOCKED:
|
||||
return QVariant('<p>' + _('This book has been detected as being DRM Free. You should be able to use this book on any device provided it is in a format calibre supports for conversion. However, before making a purchase double check the DRM status with the store. The store may not be disclosing the use of DRM.') + '</p>')
|
||||
return QVariant('<p>' + _('This book has been detected as being DRM Free. You should be able to use this book on any device provided it is in a format calibre supports for conversion. However, before making a purchase double check the DRM status with the store. The store may not be disclosing the use of DRM.') + '</p>') # noqa
|
||||
else:
|
||||
return QVariant('<p>' + _('The DRM status of this book could not be determined. There is a very high likelihood that this book is actually DRM restricted.') + '</p>')
|
||||
return QVariant('<p>' + _('The DRM status of this book could not be determined. There is a very high likelihood that this book is actually DRM restricted.') + '</p>') # noqa
|
||||
elif col == 4:
|
||||
return QVariant('<p>%s</p>' % result.formats)
|
||||
elif col == 5:
|
||||
@ -337,7 +337,7 @@ class SearchFilter(SearchQueryParser):
|
||||
|
||||
def _match(self, query, value, matchkind):
|
||||
for t in value:
|
||||
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
|
||||
try: # ignore regexp exceptions, required because search-ahead tries before typing is finished
|
||||
t = icu_lower(t)
|
||||
if matchkind == self.EQUALS_MATCH:
|
||||
if query == t:
|
||||
@ -375,7 +375,7 @@ class SearchFilter(SearchQueryParser):
|
||||
elif query.startswith('~'):
|
||||
matchkind = self.REGEXP_MATCH
|
||||
query = query[1:]
|
||||
if matchkind != self.REGEXP_MATCH: ### leave case in regexps because it can be significant e.g. \S \W \D
|
||||
if matchkind != self.REGEXP_MATCH: # leave case in regexps because it can be significant e.g. \S \W \D
|
||||
query = query.lower()
|
||||
|
||||
if location not in self.USABLE_LOCATIONS:
|
||||
@ -439,9 +439,9 @@ class SearchFilter(SearchQueryParser):
|
||||
if locvalue in ('affiliate', 'drm', 'download', 'downloads'):
|
||||
continue
|
||||
try:
|
||||
### Can't separate authors because comma is used for name sep and author sep
|
||||
### Exact match might not get what you want. For that reason, turn author
|
||||
### exactmatch searches into contains searches.
|
||||
# Can't separate authors because comma is used for name sep and author sep
|
||||
# Exact match might not get what you want. For that reason, turn author
|
||||
# exactmatch searches into contains searches.
|
||||
if locvalue == 'author' and matchkind == self.EQUALS_MATCH:
|
||||
m = self.CONTAINS_MATCH
|
||||
else:
|
||||
@ -452,6 +452,7 @@ class SearchFilter(SearchQueryParser):
|
||||
elif locvalue in ('author2', 'title2'):
|
||||
m = self.IN_MATCH
|
||||
vals = re.sub(r'(^|\s)(and|not|or|a|the|is|of|,)(\s|$)', ' ', accessor(sr)).split(' ')
|
||||
vals = [x for x in vals if x]
|
||||
final_query = query.lower()
|
||||
else:
|
||||
vals = [accessor(sr)]
|
||||
@ -462,3 +463,4 @@ class SearchFilter(SearchQueryParser):
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return matches
|
||||
|
||||
|
@ -236,11 +236,11 @@ class SearchDialog(QDialog, Ui_Dialog):
|
||||
query = re.sub(r'%s:"[^"]"' % loc, '', query)
|
||||
query = re.sub(r'%s:[^\s]*' % loc, '', query)
|
||||
# Remove logic.
|
||||
query = re.sub(r'(^|\s)(and|not|or|a|the|is|of)(\s|$)', ' ', query)
|
||||
query = re.sub(r'(^|\s|")(and|not|or|a|the|is|of)(\s|$|")', r' ', query)
|
||||
# Remove "
|
||||
query = query.replace('"', '')
|
||||
# Remove excess whitespace.
|
||||
query = re.sub(r'\s{2,}', ' ', query)
|
||||
query = re.sub(r'\s+', ' ', query)
|
||||
query = query.strip()
|
||||
return query.encode('utf-8')
|
||||
|
||||
|
@ -21,7 +21,6 @@ from calibre.utils.icu import sort_key, lower, strcmp, collation_order
|
||||
from calibre.library.field_metadata import TagsIcons, category_icon_map
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
from calibre.utils.formatter import EvalFormatter
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
|
||||
TAG_SEARCH_STATES = {'clear': 0, 'mark_plus': 1, 'mark_plusplus': 2,
|
||||
'mark_minus': 3, 'mark_minusminus': 4}
|
||||
@ -879,7 +878,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
traceback.print_exc()
|
||||
self.db.data.change_search_locations(self.db.field_metadata.get_search_terms())
|
||||
|
||||
if len(saved_searches().names()):
|
||||
if len(self.db.get_saved_searches().names()):
|
||||
tb_cats.add_search_category(label='search', name=_('Searches'))
|
||||
|
||||
if self.filter_categories_by:
|
||||
@ -1005,11 +1004,11 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
_('Author names cannot contain & characters.')).exec_()
|
||||
return False
|
||||
if key == 'search':
|
||||
if val in saved_searches().names():
|
||||
if val in self.db.get_saved_searches().names():
|
||||
error_dialog(self.gui_parent, _('Duplicate search name'),
|
||||
_('The saved search name %s is already used.')%val).exec_()
|
||||
return False
|
||||
saved_searches().rename(unicode(item.data(role).toString()), val)
|
||||
self.db.get_saved_searches().rename(unicode(item.data(role).toString()), val)
|
||||
item.tag.name = val
|
||||
self.search_item_renamed.emit() # Does a refresh
|
||||
else:
|
||||
|
@ -20,7 +20,6 @@ from calibre.constants import config_dir
|
||||
from calibre.gui2.tag_browser.model import (TagTreeItem, TAG_SEARCH_STATES,
|
||||
TagsModel)
|
||||
from calibre.gui2 import config, gprefs, choose_files, pixmap_to_data
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
class TagDelegate(QStyledItemDelegate): # {{{
|
||||
@ -355,6 +354,7 @@ class TagsView(QTreeView): # {{{
|
||||
self.delete_user_category.emit(key)
|
||||
return
|
||||
if action == 'delete_search':
|
||||
from calibre.gui2.ui import saved_searches
|
||||
saved_searches().delete(key)
|
||||
self.rebuild_saved_searches.emit()
|
||||
return
|
||||
|
@ -98,6 +98,16 @@ _gui = None
|
||||
def get_gui():
|
||||
return _gui
|
||||
|
||||
def saved_searches():
|
||||
'Return the saved searches defined in the currently open library'
|
||||
try:
|
||||
return _gui.library_view.model().db.get_saved_searches()
|
||||
except AttributeError:
|
||||
# Happens during initialization of the gui
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
return saved_searches()
|
||||
|
||||
|
||||
class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
TagBrowserMixin, CoverFlowMixin, LibraryViewMixin, SearchBoxMixin,
|
||||
SavedSearchBoxMixin, SearchRestrictionMixin, LayoutMixin, UpdateMixin,
|
||||
@ -532,6 +542,11 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
self.raise_()
|
||||
self.activateWindow()
|
||||
elif msg.startswith('refreshdb:'):
|
||||
db = self.library_view.model().db
|
||||
if hasattr(db, 'new_api'):
|
||||
db.new_api.reload_from_db()
|
||||
self.library_view.model().resort()
|
||||
else:
|
||||
self.library_view.model().refresh()
|
||||
self.library_view.model().research()
|
||||
self.tags_view.recount()
|
||||
|
@ -772,12 +772,14 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.scrolled(self.view.scroll_fraction)
|
||||
|
||||
def internal_link_clicked(self, frac):
|
||||
self.update_page_number() # Ensure page number is accurate as it is used for history
|
||||
self.history.add(self.pos.value())
|
||||
|
||||
def link_clicked(self, url):
|
||||
path = os.path.abspath(unicode(url.toLocalFile()))
|
||||
frag = None
|
||||
if path in self.iterator.spine:
|
||||
self.update_page_number() # Ensure page number is accurate as it is used for history
|
||||
self.history.add(self.pos.value())
|
||||
path = self.iterator.spine[self.iterator.spine.index(path)]
|
||||
if url.hasFragment():
|
||||
@ -913,6 +915,14 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
else:
|
||||
self.view.document.page_position.restore()
|
||||
self.view.document.after_resize()
|
||||
# For some reason scroll_fraction returns incorrect results in paged
|
||||
# mode for some time after a resize is finished. No way of knowing
|
||||
# exactly how long, so we update it in a second, in the hopes that it
|
||||
# will be enough *most* of the time.
|
||||
QTimer.singleShot(1000, self.update_page_number)
|
||||
|
||||
def update_page_number(self):
|
||||
self.set_page_number(self.view.document.scroll_fraction)
|
||||
|
||||
def close_progress_indicator(self):
|
||||
self.pi.stop()
|
||||
|
@ -32,7 +32,7 @@ class TestEmail(QDialog, TE_Dialog):
|
||||
self.to.setText(pa)
|
||||
if opts.relay_host:
|
||||
self.label.setText(_('Using: %(un)s:%(pw)s@%(host)s:%(port)s and %(enc)s encryption')%
|
||||
dict(un=opts.relay_username, pw=unhexlify(opts.relay_password),
|
||||
dict(un=opts.relay_username, pw=unhexlify(opts.relay_password).decode('utf-8'),
|
||||
host=opts.relay_host, port=opts.relay_port, enc=opts.encryption))
|
||||
|
||||
def test(self, *args):
|
||||
@ -129,7 +129,7 @@ class SendEmail(QWidget, Ui_Form):
|
||||
self.relay_username.setText(opts.relay_username)
|
||||
self.relay_username.textChanged.connect(self.changed)
|
||||
if opts.relay_password:
|
||||
self.relay_password.setText(unhexlify(opts.relay_password))
|
||||
self.relay_password.setText(unhexlify(opts.relay_password).decode('utf-8'))
|
||||
self.relay_password.textChanged.connect(self.changed)
|
||||
getattr(self, 'relay_'+opts.encryption.lower()).setChecked(True)
|
||||
self.relay_tls.toggled.connect(self.changed)
|
||||
@ -169,7 +169,7 @@ class SendEmail(QWidget, Ui_Form):
|
||||
sendmail(msg, from_=opts.from_, to=[to],
|
||||
verbose=3, timeout=30, relay=opts.relay_host,
|
||||
username=opts.relay_username,
|
||||
password=unhexlify(opts.relay_password),
|
||||
password=unhexlify(opts.relay_password).decode('utf-8'),
|
||||
encryption=opts.encryption, port=opts.relay_port)
|
||||
except:
|
||||
import traceback
|
||||
@ -248,7 +248,7 @@ class SendEmail(QWidget, Ui_Form):
|
||||
conf.set('relay_host', host if host else None)
|
||||
conf.set('relay_port', self.relay_port.value())
|
||||
conf.set('relay_username', username if username else None)
|
||||
conf.set('relay_password', hexlify(password))
|
||||
conf.set('relay_password', hexlify(password.encode('utf-8')))
|
||||
conf.set('encryption', enc_method)
|
||||
return True
|
||||
|
||||
|
@ -1028,10 +1028,8 @@ def command_saved_searches(args, dbpath):
|
||||
print
|
||||
prints(_('Error: You must specify an action (add|remove|list)'), file=sys.stderr)
|
||||
return 1
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
db = get_db(dbpath, opts)
|
||||
db
|
||||
ss = saved_searches()
|
||||
ss = db.get_saved_searches()
|
||||
if args[0] == 'list':
|
||||
for name in ss.names():
|
||||
prints(_('Name:'), name)
|
||||
|
@ -959,7 +959,7 @@ ALTER TABLE books ADD COLUMN isbn TEXT DEFAULT "" COLLATE NOCASE;
|
||||
def max_size(self, index, index_is_id=False):
|
||||
if index_is_id:
|
||||
return self.conn.get('SELECT size FROM meta WHERE id=?', (index,), all=False)
|
||||
return self.data[index][6]
|
||||
return self.data[index][4]
|
||||
|
||||
def cover(self, index, index_is_id=False):
|
||||
'''Cover as a data string or None'''
|
||||
|
@ -7,14 +7,14 @@ __docformat__ = 'restructuredtext en'
|
||||
The database used to store ebook metadata
|
||||
'''
|
||||
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
|
||||
json, uuid, hashlib, copy
|
||||
json, uuid, hashlib, copy, types
|
||||
from collections import defaultdict
|
||||
import threading, random
|
||||
from itertools import repeat
|
||||
|
||||
from calibre import prints, force_unicode
|
||||
from calibre.ebooks.metadata import (title_sort, author_to_author_sort,
|
||||
string_to_authors, authors_to_string, get_title_sort_pat)
|
||||
string_to_authors, get_title_sort_pat)
|
||||
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
||||
from calibre.library.database import LibraryDatabase
|
||||
from calibre.library.field_metadata import FieldMetadata, TagsIcons
|
||||
@ -41,7 +41,7 @@ from calibre.ebooks import check_ebook_format
|
||||
from calibre.utils.magick.draw import save_cover_data_to
|
||||
from calibre.utils.recycle_bin import delete_file, delete_tree
|
||||
from calibre.utils.formatter_functions import load_user_template_functions
|
||||
from calibre.db import _get_next_series_num_for_list, _get_series_values
|
||||
from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict
|
||||
from calibre.db.adding import find_books_in_directory, import_book_directory_multiple, import_book_directory, recursive_import
|
||||
from calibre.db.errors import NoSuchFormat
|
||||
from calibre.db.lazy import FormatMetadata, FormatsList
|
||||
@ -135,6 +135,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
read_only=False, is_second_db=False, progress_callback=None,
|
||||
restore_all_prefs=False):
|
||||
self.is_second_db = is_second_db
|
||||
self.get_data_as_dict = types.MethodType(get_data_as_dict, self, LibraryDatabase2)
|
||||
try:
|
||||
if isbytestring(library_path):
|
||||
library_path = library_path.decode(filesystem_encoding)
|
||||
@ -536,6 +537,9 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
if self.user_version == 0:
|
||||
self.user_version = 1
|
||||
|
||||
def get_saved_searches(self):
|
||||
return saved_searches()
|
||||
|
||||
def last_modified(self):
|
||||
''' Return last modified time as a UTC datetime object'''
|
||||
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
|
||||
@ -1570,6 +1574,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
with lopen(opath, 'rb') as f:
|
||||
self.add_format(book_id, fmt, f, index_is_id=True, notify=False)
|
||||
self.remove_format(book_id, original_fmt, index_is_id=True, notify=notify)
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete_book(self, id, notify=True, commit=True, permanent=False,
|
||||
do_clean=True):
|
||||
@ -3558,7 +3564,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
path = self.path(x, index_is_id=True)
|
||||
path = path.split(os.sep)[0]
|
||||
paths.add(path)
|
||||
paths.add('metadata.db')
|
||||
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
|
||||
path_map = {}
|
||||
for x in paths:
|
||||
path_map[x] = x
|
||||
@ -3570,7 +3576,9 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
items = items.intersection(paths)
|
||||
return items, path_map
|
||||
|
||||
def move_library_to(self, newloc, progress=lambda x: x):
|
||||
def move_library_to(self, newloc, progress=None):
|
||||
if progress is None:
|
||||
progress = lambda x:x
|
||||
if not os.path.exists(newloc):
|
||||
os.makedirs(newloc)
|
||||
old_dirs = set([])
|
||||
@ -3617,67 +3625,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
for i in iter(self):
|
||||
yield i[x]
|
||||
|
||||
def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None):
|
||||
'''
|
||||
Return all metadata stored in the database as a dict. Includes paths to
|
||||
the cover and each format.
|
||||
|
||||
:param prefix: The prefix for all paths. By default, the prefix is the absolute path
|
||||
to the library folder.
|
||||
:param ids: Set of ids to return the data for. If None return data for
|
||||
all entries in database.
|
||||
'''
|
||||
if prefix is None:
|
||||
prefix = self.library_path
|
||||
fdata = self.custom_column_num_map
|
||||
|
||||
FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher',
|
||||
'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
|
||||
'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
|
||||
'languages']).union(set(fdata))
|
||||
for x, data in fdata.iteritems():
|
||||
if data['datatype'] == 'series':
|
||||
FIELDS.add('%d_index'%x)
|
||||
data = []
|
||||
for record in self.data:
|
||||
if record is None:
|
||||
continue
|
||||
db_id = record[self.FIELD_MAP['id']]
|
||||
if ids is not None and db_id not in ids:
|
||||
continue
|
||||
x = {}
|
||||
for field in FIELDS:
|
||||
x[field] = record[self.FIELD_MAP[field]]
|
||||
data.append(x)
|
||||
x['id'] = db_id
|
||||
x['formats'] = []
|
||||
isbn = self.isbn(db_id, index_is_id=True)
|
||||
x['isbn'] = isbn if isbn else ''
|
||||
if not x['authors']:
|
||||
x['authors'] = _('Unknown')
|
||||
x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]
|
||||
if authors_as_string:
|
||||
x['authors'] = authors_to_string(x['authors'])
|
||||
x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else []
|
||||
path = os.path.join(prefix, self.path(record[self.FIELD_MAP['id']], index_is_id=True))
|
||||
x['cover'] = os.path.join(path, 'cover.jpg')
|
||||
if not record[self.FIELD_MAP['cover']]:
|
||||
x['cover'] = None
|
||||
formats = self.formats(record[self.FIELD_MAP['id']], index_is_id=True)
|
||||
if formats:
|
||||
for fmt in formats.split(','):
|
||||
path = self.format_abspath(x['id'], fmt, index_is_id=True)
|
||||
if path is None:
|
||||
continue
|
||||
if prefix != self.library_path:
|
||||
path = os.path.relpath(path, self.library_path)
|
||||
path = os.path.join(prefix, path)
|
||||
x['formats'].append(path)
|
||||
x['fmt_'+fmt.lower()] = path
|
||||
x['available_formats'] = [i.upper() for i in formats.split(',')]
|
||||
|
||||
return data
|
||||
|
||||
def migrate_old(self, db, progress):
|
||||
from PyQt4.QtCore import QCoreApplication
|
||||
header = _(u'<p>Migrating old database to ebook library in %s<br><center>')%self.library_path
|
||||
|
@ -25,7 +25,6 @@ from calibre.library.server.opds import OPDSServer
|
||||
from calibre.library.server.cache import Cache
|
||||
from calibre.library.server.browse import BrowseServer
|
||||
from calibre.library.server.ajax import AjaxServer
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
from calibre import prints, as_unicode
|
||||
|
||||
|
||||
@ -210,7 +209,7 @@ class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer, Cache,
|
||||
if sr:
|
||||
if sr in virt_libs:
|
||||
sr = virt_libs[sr]
|
||||
elif sr not in saved_searches().names():
|
||||
elif sr not in self.db.get_saved_searches().names():
|
||||
prints('WARNING: Content server: search restriction ',
|
||||
sr, ' does not exist')
|
||||
sr = ''
|
||||
|
@ -474,4 +474,11 @@ def write_tweaks(raw):
|
||||
|
||||
tweaks = read_tweaks()
|
||||
|
||||
def reset_tweaks_to_default():
|
||||
global tweaks
|
||||
default_tweaks = P('default_tweaks.py', data=True,
|
||||
allow_user_override=False)
|
||||
dl, dg = {}, {}
|
||||
exec default_tweaks in dg, dl
|
||||
tweaks = dl
|
||||
|
||||
|
@ -26,6 +26,10 @@ def what(file, h=None):
|
||||
finally:
|
||||
if f:
|
||||
f.close()
|
||||
# There exist some jpeg files with no headers, only the starting two bits
|
||||
# If we cannot identify as anything else, identify as jpeg.
|
||||
if h[:2] == b'\xff\xd8':
|
||||
return 'jpeg'
|
||||
return None
|
||||
|
||||
|
||||
|
@ -40,7 +40,7 @@ class SavedSearchQueries(object):
|
||||
self.queries = {}
|
||||
try:
|
||||
self._db = weakref.ref(db)
|
||||
except:
|
||||
except TypeError:
|
||||
# db could be None
|
||||
self._db = lambda : None
|
||||
|
||||
@ -292,9 +292,10 @@ class SearchQueryParser(object):
|
||||
failed.append(test[0])
|
||||
return failed
|
||||
|
||||
def __init__(self, locations, test=False, optimize=False):
|
||||
def __init__(self, locations, test=False, optimize=False, get_saved_searches=None):
|
||||
self.sqp_initialize(locations, test=test, optimize=optimize)
|
||||
self.parser = Parser()
|
||||
self.get_saved_searches = saved_searches if get_saved_searches is None else get_saved_searches
|
||||
|
||||
def sqp_change_locations(self, locations):
|
||||
self.sqp_initialize(locations, optimize=self.optimize)
|
||||
@ -367,7 +368,7 @@ class SearchQueryParser(object):
|
||||
raise ParseException(_('Recursive saved search: {0}').format(query))
|
||||
if self.recurse_level > 5:
|
||||
self.searches_seen.add(query)
|
||||
return self._parse(saved_searches().lookup(query), candidates)
|
||||
return self._parse(self.get_saved_searches().lookup(query), candidates)
|
||||
except ParseException as e:
|
||||
raise e
|
||||
except: # convert all exceptions (e.g., missing key) to a parse error
|
||||
|
Loading…
x
Reference in New Issue
Block a user