Database backend code re-organzation

This commit is contained in:
Kovid Goyal 2010-04-12 15:58:19 +05:30
parent 20ca860d13
commit 3d5aca03eb
9 changed files with 831 additions and 783 deletions

View File

@ -5,7 +5,7 @@ from PyQt4.QtGui import QDialog
from calibre.gui2.dialogs.search_ui import Ui_Dialog from calibre.gui2.dialogs.search_ui import Ui_Dialog
from calibre.gui2 import qstring_to_unicode from calibre.gui2 import qstring_to_unicode
from calibre.library.database2 import CONTAINS_MATCH, EQUALS_MATCH from calibre.library.caches import CONTAINS_MATCH, EQUALS_MATCH
class SearchDialog(QDialog, Ui_Dialog): class SearchDialog(QDialog, Ui_Dialog):

View File

@ -17,7 +17,7 @@ from PyQt4.QtCore import QAbstractTableModel, QVariant, Qt, pyqtSignal, \
from calibre import strftime from calibre import strftime
from calibre.ptempfile import PersistentTemporaryFile from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.pyparsing import ParseException from calibre.utils.pyparsing import ParseException
from calibre.library.database2 import FIELD_MAP, _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH from calibre.library.caches import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
from calibre.gui2 import NONE, TableView, qstring_to_unicode, config, \ from calibre.gui2 import NONE, TableView, qstring_to_unicode, config, \
error_dialog error_dialog
from calibre.gui2.widgets import EnLineEdit, TagsLineEdit from calibre.gui2.widgets import EnLineEdit, TagsLineEdit
@ -560,16 +560,16 @@ class BooksModel(QAbstractTableModel):
def build_data_convertors(self): def build_data_convertors(self):
tidx = FIELD_MAP['title'] tidx = self.db.FIELD_MAP['title']
aidx = FIELD_MAP['authors'] aidx = self.db.FIELD_MAP['authors']
sidx = FIELD_MAP['size'] sidx = self.db.FIELD_MAP['size']
ridx = FIELD_MAP['rating'] ridx = self.db.FIELD_MAP['rating']
pidx = FIELD_MAP['publisher'] pidx = self.db.FIELD_MAP['publisher']
tmdx = FIELD_MAP['timestamp'] tmdx = self.db.FIELD_MAP['timestamp']
pddx = FIELD_MAP['pubdate'] pddx = self.db.FIELD_MAP['pubdate']
srdx = FIELD_MAP['series'] srdx = self.db.FIELD_MAP['series']
tgdx = FIELD_MAP['tags'] tgdx = self.db.FIELD_MAP['tags']
siix = FIELD_MAP['series_index'] siix = self.db.FIELD_MAP['series_index']
def authors(r): def authors(r):
au = self.db.data[r][aidx] au = self.db.data[r][aidx]

View File

@ -57,7 +57,8 @@ from calibre.gui2.dialogs.choose_format import ChooseFormatDialog
from calibre.gui2.dialogs.book_info import BookInfo from calibre.gui2.dialogs.book_info import BookInfo
from calibre.ebooks import BOOK_EXTENSIONS from calibre.ebooks import BOOK_EXTENSIONS
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString
from calibre.library.database2 import LibraryDatabase2, CoverCache from calibre.library.database2 import LibraryDatabase2
from calibre.library.caches import CoverCache
from calibre.gui2.dialogs.confirm_delete import confirm from calibre.gui2.dialogs.confirm_delete import confirm
class SaveMenu(QMenu): class SaveMenu(QMenu):

View File

@ -0,0 +1,430 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import collections, glob, os, re, itertools, functools
from itertools import repeat
from PyQt4.QtCore import QThread, QReadWriteLock
from PyQt4.QtGui import QImage
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.utils.date import parse_date
class CoverCache(QThread):
def __init__(self, library_path, parent=None):
QThread.__init__(self, parent)
self.library_path = library_path
self.id_map = None
self.id_map_lock = QReadWriteLock()
self.load_queue = collections.deque()
self.load_queue_lock = QReadWriteLock(QReadWriteLock.Recursive)
self.cache = {}
self.cache_lock = QReadWriteLock()
self.id_map_stale = True
self.keep_running = True
def build_id_map(self):
self.id_map_lock.lockForWrite()
self.id_map = {}
for f in glob.glob(os.path.join(self.library_path, '*', '* (*)', 'cover.jpg')):
c = os.path.basename(os.path.dirname(f))
try:
id = int(re.search(r'\((\d+)\)', c[c.rindex('('):]).group(1))
self.id_map[id] = f
except:
continue
self.id_map_lock.unlock()
self.id_map_stale = False
def set_cache(self, ids):
self.cache_lock.lockForWrite()
already_loaded = set([])
for id in self.cache.keys():
if id in ids:
already_loaded.add(id)
else:
self.cache.pop(id)
self.cache_lock.unlock()
ids = [i for i in ids if i not in already_loaded]
self.load_queue_lock.lockForWrite()
self.load_queue = collections.deque(ids)
self.load_queue_lock.unlock()
def run(self):
while self.keep_running:
if self.id_map is None or self.id_map_stale:
self.build_id_map()
while True: # Load images from the load queue
self.load_queue_lock.lockForWrite()
try:
id = self.load_queue.popleft()
except IndexError:
break
finally:
self.load_queue_lock.unlock()
self.cache_lock.lockForRead()
need = True
if id in self.cache.keys():
need = False
self.cache_lock.unlock()
if not need:
continue
path = None
self.id_map_lock.lockForRead()
if id in self.id_map.keys():
path = self.id_map[id]
else:
self.id_map_stale = True
self.id_map_lock.unlock()
if path and os.access(path, os.R_OK):
try:
img = QImage()
data = open(path, 'rb').read()
img.loadFromData(data)
if img.isNull():
continue
except:
continue
self.cache_lock.lockForWrite()
self.cache[id] = img
self.cache_lock.unlock()
self.sleep(1)
def stop(self):
self.keep_running = False
def cover(self, id):
val = None
if self.cache_lock.tryLockForRead(50):
val = self.cache.get(id, None)
self.cache_lock.unlock()
return val
def clear_cache(self):
self.cache_lock.lockForWrite()
self.cache = {}
self.cache_lock.unlock()
def refresh(self, ids):
self.cache_lock.lockForWrite()
for id in ids:
self.cache.pop(id, None)
self.cache_lock.unlock()
self.load_queue_lock.lockForWrite()
for id in ids:
self.load_queue.appendleft(id)
self.load_queue_lock.unlock()
### Global utility function for get_match here and in gui2/library.py
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
REGEXP_MATCH = 2
def _match(query, value, matchkind):
for t in value:
t = t.lower()
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
if ((matchkind == EQUALS_MATCH and query == t) or
(matchkind == REGEXP_MATCH and re.search(query, t, re.I)) or ### search unanchored
(matchkind == CONTAINS_MATCH and query in t)):
return True
except re.error:
pass
return False
class ResultCache(SearchQueryParser):
'''
Stores sorted and filtered metadata in memory.
'''
def build_relop_dict(self):
'''
Because the database dates have time in them, we can't use direct
comparisons even when field_count == 3. The query has time = 0, but
the database object has time == something. As such, a complete compare
will almost never be correct.
'''
def relop_eq(db, query, field_count):
if db.year == query.year:
if field_count == 1:
return True
if db.month == query.month:
if field_count == 2:
return True
return db.day == query.day
return False
def relop_gt(db, query, field_count):
if db.year > query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month > query.month:
return True
return field_count == 3 and db.month == query.month and db.day > query.day
return False
def relop_lt(db, query, field_count):
if db.year < query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month < query.month:
return True
return field_count == 3 and db.month == query.month and db.day < query.day
return False
def relop_ne(db, query, field_count):
return not relop_eq(db, query, field_count)
def relop_ge(db, query, field_count):
return not relop_lt(db, query, field_count)
def relop_le(db, query, field_count):
return not relop_gt(db, query, field_count)
self.search_relops = {'=':[1, relop_eq], '>':[1, relop_gt], '<':[1, relop_lt], \
'!=':[2, relop_ne], '>=':[2, relop_ge], '<=':[2, relop_le]}
def __init__(self, FIELD_MAP):
self.FIELD_MAP = FIELD_MAP
self._map = self._map_filtered = self._data = []
self.first_sort = True
SearchQueryParser.__init__(self)
self.build_relop_dict()
def __getitem__(self, row):
return self._data[self._map_filtered[row]]
def __len__(self):
return len(self._map_filtered)
def __iter__(self):
for id in self._map_filtered:
yield self._data[id]
def universal_set(self):
return set([i[0] for i in self._data if i is not None])
def get_matches(self, location, query):
matches = set([])
if query and query.strip():
location = location.lower().strip()
### take care of dates special case
if location in ('pubdate', 'date'):
if len(query) < 2:
return matches
relop = None
for k in self.search_relops.keys():
if query.startswith(k):
(p, relop) = self.search_relops[k]
query = query[p:]
if relop is None:
return matches
loc = self.FIELD_MAP[{'date':'timestamp', 'pubdate':'pubdate'}[location]]
qd = parse_date(query)
field_count = query.count('-') + 1
for item in self._data:
if item is None: continue
if relop(item[loc], qd, field_count):
matches.add(item[0])
return matches
### everything else
matchkind = CONTAINS_MATCH
if (len(query) > 1):
if query.startswith('\\'):
query = query[1:]
elif query.startswith('='):
matchkind = EQUALS_MATCH
query = query[1:]
elif query.startswith('~'):
matchkind = REGEXP_MATCH
query = query[1:]
if matchkind != REGEXP_MATCH: ### leave case in regexps because it can be significant e.g. \S \W \D
query = query.lower()
if not isinstance(query, unicode):
query = query.decode('utf-8')
if location in ('tag', 'author', 'format', 'comment'):
location += 's'
all = ('title', 'authors', 'publisher', 'tags', 'comments', 'series', 'formats', 'isbn', 'rating', 'cover')
MAP = {}
for x in all:
MAP[x] = self.FIELD_MAP[x]
EXCLUDE_FIELDS = [MAP['rating'], MAP['cover']]
SPLITABLE_FIELDS = [MAP['authors'], MAP['tags'], MAP['formats']]
location = [location] if location != 'all' else list(MAP.keys())
for i, loc in enumerate(location):
location[i] = MAP[loc]
try:
rating_query = int(query) * 2
except:
rating_query = None
for loc in location:
if loc == MAP['authors']:
q = query.replace(',', '|'); ### DB stores authors with commas changed to bars, so change query
else:
q = query
for item in self._data:
if item is None: continue
if not item[loc]:
if query == 'false':
if isinstance(item[loc], basestring):
if item[loc].strip() != '':
continue
matches.add(item[0])
continue
continue ### item is empty. No possible matches below
if q == 'true':
if isinstance(item[loc], basestring):
if item[loc].strip() == '':
continue
matches.add(item[0])
continue
if rating_query and loc == MAP['rating'] and rating_query == int(item[loc]):
matches.add(item[0])
continue
if loc not in EXCLUDE_FIELDS:
if loc in SPLITABLE_FIELDS:
vals = item[loc].split(',') ### check individual tags/authors/formats, not the long string
else:
vals = [item[loc]] ### make into list to make _match happy
if _match(q, vals, matchkind):
matches.add(item[0])
continue
return matches
def remove(self, id):
self._data[id] = None
if id in self._map:
self._map.remove(id)
if id in self._map_filtered:
self._map_filtered.remove(id)
def set(self, row, col, val, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
self._data[id][col] = val
def get(self, row, col, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
return self._data[id][col]
def index(self, id, cache=False):
x = self._map if cache else self._map_filtered
return x.index(id)
def row(self, id):
return self.index(id)
def has_id(self, id):
try:
return self._data[id] is not None
except IndexError:
pass
return False
def refresh_ids(self, db, ids):
'''
Refresh the data in the cache for books identified by ids.
Returns a list of affected rows or None if the rows are filtered.
'''
for id in ids:
try:
self._data[id] = db.conn.get('SELECT * from meta2 WHERE id=?',
(id,))[0]
self._data[id].append(db.has_cover(id, index_is_id=True))
except IndexError:
return None
try:
return map(self.row, ids)
except ValueError:
pass
return None
def books_added(self, ids, db):
if not ids:
return
self._data.extend(repeat(None, max(ids)-len(self._data)+2))
for id in ids:
self._data[id] = db.conn.get('SELECT * from meta2 WHERE id=?', (id,))[0]
self._data[id].append(db.has_cover(id, index_is_id=True))
self._map[0:0] = ids
self._map_filtered[0:0] = ids
def books_deleted(self, ids):
for id in ids:
self._data[id] = None
if id in self._map: self._map.remove(id)
if id in self._map_filtered: self._map_filtered.remove(id)
def count(self):
return len(self._map)
def refresh(self, db, field=None, ascending=True):
temp = db.conn.get('SELECT * FROM meta2')
self._data = list(itertools.repeat(None, temp[-1][0]+2)) if temp else []
for r in temp:
self._data[r[0]] = r
for item in self._data:
if item is not None:
item.append(db.has_cover(item[0], index_is_id=True))
self._map = [i[0] for i in self._data if i is not None]
if field is not None:
self.sort(field, ascending)
self._map_filtered = list(self._map)
def seriescmp(self, x, y):
try:
ans = cmp(self._data[x][9].lower(), self._data[y][9].lower())
except AttributeError: # Some entries may be None
ans = cmp(self._data[x][9], self._data[y][9])
if ans != 0: return ans
return cmp(self._data[x][10], self._data[y][10])
def cmp(self, loc, x, y, asstr=True, subsort=False):
try:
ans = cmp(self._data[x][loc].lower(), self._data[y][loc].lower()) if \
asstr else cmp(self._data[x][loc], self._data[y][loc])
except AttributeError: # Some entries may be None
ans = cmp(self._data[x][loc], self._data[y][loc])
if subsort and ans == 0:
return cmp(self._data[x][11].lower(), self._data[y][11].lower())
return ans
def sort(self, field, ascending, subsort=False):
field = field.lower().strip()
if field in ('author', 'tag', 'comment'):
field += 's'
if field == 'date': field = 'timestamp'
elif field == 'title': field = 'sort'
elif field == 'authors': field = 'author_sort'
if self.first_sort:
subsort = True
self.first_sort = False
fcmp = self.seriescmp if field == 'series' else \
functools.partial(self.cmp, self.FIELD_MAP[field], subsort=subsort,
asstr=field not in ('size', 'rating', 'timestamp'))
self._map.sort(cmp=fcmp, reverse=not ascending)
self._map_filtered = [id for id in self._map if id in self._map_filtered]
def search(self, query):
if not query or not query.strip():
self._map_filtered = list(self._map)
return
matches = sorted(self.parse(query))
self._map_filtered = [id for id in self._map if id in matches]

View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
class CustomColumns(object):
CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
'int', 'float', 'bool'])
def __init__(self):
return
# Delete marked custom columns
for num in self.conn.get(
'SELECT id FROM custom_columns WHERE mark_for_delete=1'):
dt, lt = self.custom_table_names(num)
self.conn.executescript('''\
DROP TABLE IF EXISTS %s;
DROP TABLE IF EXISTS %s;
'''%(dt, lt)
)
self.conn.execute('DELETE FROM custom_columns WHERE mark_for_delete=1')
self.conn.commit()
def custom_table_names(self, num):
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num
@property
def custom_tables(self):
return set([x[0] for x in self.conn.get(
'SELECT name FROM sqlite_master WHERE type="table" AND '
'(name GLOB "custom_column_*" OR name GLOB books_customcolumn_*)')])
def create_custom_table(self, label, name, datatype, is_multiple,
sort_alpha):
if datatype not in self.CUSTOM_DATA_TYPES:
raise ValueError('%r is not a supported data type'%datatype)

View File

@ -6,11 +6,9 @@ __docformat__ = 'restructuredtext en'
''' '''
The database used to store ebook metadata The database used to store ebook metadata
''' '''
import os, re, sys, shutil, cStringIO, glob, collections, textwrap, \ import os, sys, shutil, cStringIO, glob,functools, traceback
itertools, functools, traceback
from itertools import repeat from itertools import repeat
from math import floor from math import floor
from PyQt4.QtCore import QThread, QReadWriteLock
try: try:
from PIL import Image as PILImage from PIL import Image as PILImage
PILImage PILImage
@ -22,8 +20,10 @@ from PyQt4.QtGui import QImage
from calibre.ebooks.metadata import title_sort from calibre.ebooks.metadata import title_sort
from calibre.library.database import LibraryDatabase from calibre.library.database import LibraryDatabase
from calibre.library.schema_upgrades import SchemaUpgrade
from calibre.library.caches import ResultCache
from calibre.library.custom_columns import CustomColumns
from calibre.library.sqlite import connect, IntegrityError, DBThread from calibre.library.sqlite import connect, IntegrityError, DBThread
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.ebooks.metadata import string_to_authors, authors_to_string, \ from calibre.ebooks.metadata import string_to_authors, authors_to_string, \
MetaInformation, authors_to_sort_string MetaInformation, authors_to_sort_string
from calibre.ebooks.metadata.meta import get_metadata, metadata_from_formats from calibre.ebooks.metadata.meta import get_metadata, metadata_from_formats
@ -32,7 +32,7 @@ from calibre.ptempfile import PersistentTemporaryFile
from calibre.customize.ui import run_plugins_on_import from calibre.customize.ui import run_plugins_on_import
from calibre.utils.filenames import ascii_filename from calibre.utils.filenames import ascii_filename
from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp, parse_date from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp
from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format
if iswindows: if iswindows:
@ -56,423 +56,6 @@ def delete_tree(path, permanent=False):
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'publisher':3, 'rating':4, 'timestamp':5,
'size':6, 'tags':7, 'comments':8, 'series':9, 'series_index':10,
'sort':11, 'author_sort':12, 'formats':13, 'isbn':14, 'path':15,
'lccn':16, 'pubdate':17, 'flags':18, 'uuid':19, 'cover':20}
INDEX_MAP = dict(zip(FIELD_MAP.values(), FIELD_MAP.keys()))
class CoverCache(QThread):
def __init__(self, library_path, parent=None):
QThread.__init__(self, parent)
self.library_path = library_path
self.id_map = None
self.id_map_lock = QReadWriteLock()
self.load_queue = collections.deque()
self.load_queue_lock = QReadWriteLock(QReadWriteLock.Recursive)
self.cache = {}
self.cache_lock = QReadWriteLock()
self.id_map_stale = True
self.keep_running = True
def build_id_map(self):
self.id_map_lock.lockForWrite()
self.id_map = {}
for f in glob.glob(os.path.join(self.library_path, '*', '* (*)', 'cover.jpg')):
c = os.path.basename(os.path.dirname(f))
try:
id = int(re.search(r'\((\d+)\)', c[c.rindex('('):]).group(1))
self.id_map[id] = f
except:
continue
self.id_map_lock.unlock()
self.id_map_stale = False
def set_cache(self, ids):
self.cache_lock.lockForWrite()
already_loaded = set([])
for id in self.cache.keys():
if id in ids:
already_loaded.add(id)
else:
self.cache.pop(id)
self.cache_lock.unlock()
ids = [i for i in ids if i not in already_loaded]
self.load_queue_lock.lockForWrite()
self.load_queue = collections.deque(ids)
self.load_queue_lock.unlock()
def run(self):
while self.keep_running:
if self.id_map is None or self.id_map_stale:
self.build_id_map()
while True: # Load images from the load queue
self.load_queue_lock.lockForWrite()
try:
id = self.load_queue.popleft()
except IndexError:
break
finally:
self.load_queue_lock.unlock()
self.cache_lock.lockForRead()
need = True
if id in self.cache.keys():
need = False
self.cache_lock.unlock()
if not need:
continue
path = None
self.id_map_lock.lockForRead()
if id in self.id_map.keys():
path = self.id_map[id]
else:
self.id_map_stale = True
self.id_map_lock.unlock()
if path and os.access(path, os.R_OK):
try:
img = QImage()
data = open(path, 'rb').read()
img.loadFromData(data)
if img.isNull():
continue
except:
continue
self.cache_lock.lockForWrite()
self.cache[id] = img
self.cache_lock.unlock()
self.sleep(1)
def stop(self):
self.keep_running = False
def cover(self, id):
val = None
if self.cache_lock.tryLockForRead(50):
val = self.cache.get(id, None)
self.cache_lock.unlock()
return val
def clear_cache(self):
self.cache_lock.lockForWrite()
self.cache = {}
self.cache_lock.unlock()
def refresh(self, ids):
self.cache_lock.lockForWrite()
for id in ids:
self.cache.pop(id, None)
self.cache_lock.unlock()
self.load_queue_lock.lockForWrite()
for id in ids:
self.load_queue.appendleft(id)
self.load_queue_lock.unlock()
### Global utility function for get_match here and in gui2/library.py
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
REGEXP_MATCH = 2
def _match(query, value, matchkind):
for t in value:
t = t.lower()
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
if ((matchkind == EQUALS_MATCH and query == t) or
(matchkind == REGEXP_MATCH and re.search(query, t, re.I)) or ### search unanchored
(matchkind == CONTAINS_MATCH and query in t)):
return True
except re.error:
pass
return False
class ResultCache(SearchQueryParser):
'''
Stores sorted and filtered metadata in memory.
'''
def build_relop_dict(self):
'''
Because the database dates have time in them, we can't use direct
comparisons even when field_count == 3. The query has time = 0, but
the database object has time == something. As such, a complete compare
will almost never be correct.
'''
def relop_eq(db, query, field_count):
if db.year == query.year:
if field_count == 1:
return True
if db.month == query.month:
if field_count == 2:
return True
return db.day == query.day
return False
def relop_gt(db, query, field_count):
if db.year > query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month > query.month:
return True
return field_count == 3 and db.month == query.month and db.day > query.day
return False
def relop_lt(db, query, field_count):
if db.year < query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month < query.month:
return True
return field_count == 3 and db.month == query.month and db.day < query.day
return False
def relop_ne(db, query, field_count):
return not relop_eq(db, query, field_count)
def relop_ge(db, query, field_count):
return not relop_lt(db, query, field_count)
def relop_le(db, query, field_count):
return not relop_gt(db, query, field_count)
self.search_relops = {'=':[1, relop_eq], '>':[1, relop_gt], '<':[1, relop_lt], \
'!=':[2, relop_ne], '>=':[2, relop_ge], '<=':[2, relop_le]}
def __init__(self):
self._map = self._map_filtered = self._data = []
self.first_sort = True
SearchQueryParser.__init__(self)
self.build_relop_dict()
def __getitem__(self, row):
return self._data[self._map_filtered[row]]
def __len__(self):
return len(self._map_filtered)
def __iter__(self):
for id in self._map_filtered:
yield self._data[id]
def universal_set(self):
return set([i[0] for i in self._data if i is not None])
def get_matches(self, location, query):
matches = set([])
if query and query.strip():
location = location.lower().strip()
### take care of dates special case
if location in ('pubdate', 'date'):
if len(query) < 2:
return matches
relop = None
for k in self.search_relops.keys():
if query.startswith(k):
(p, relop) = self.search_relops[k]
query = query[p:]
if relop is None:
return matches
loc = FIELD_MAP[{'date':'timestamp', 'pubdate':'pubdate'}[location]]
qd = parse_date(query)
field_count = query.count('-') + 1
for item in self._data:
if item is None: continue
if relop(item[loc], qd, field_count):
matches.add(item[0])
return matches
### everything else
matchkind = CONTAINS_MATCH
if (len(query) > 1):
if query.startswith('\\'):
query = query[1:]
elif query.startswith('='):
matchkind = EQUALS_MATCH
query = query[1:]
elif query.startswith('~'):
matchkind = REGEXP_MATCH
query = query[1:]
if matchkind != REGEXP_MATCH: ### leave case in regexps because it can be significant e.g. \S \W \D
query = query.lower()
if not isinstance(query, unicode):
query = query.decode('utf-8')
if location in ('tag', 'author', 'format', 'comment'):
location += 's'
all = ('title', 'authors', 'publisher', 'tags', 'comments', 'series', 'formats', 'isbn', 'rating', 'cover')
MAP = {}
for x in all:
MAP[x] = FIELD_MAP[x]
EXCLUDE_FIELDS = [MAP['rating'], MAP['cover']]
SPLITABLE_FIELDS = [MAP['authors'], MAP['tags'], MAP['formats']]
location = [location] if location != 'all' else list(MAP.keys())
for i, loc in enumerate(location):
location[i] = MAP[loc]
try:
rating_query = int(query) * 2
except:
rating_query = None
for loc in location:
if loc == MAP['authors']:
q = query.replace(',', '|'); ### DB stores authors with commas changed to bars, so change query
else:
q = query
for item in self._data:
if item is None: continue
if not item[loc]:
if query == 'false':
if isinstance(item[loc], basestring):
if item[loc].strip() != '':
continue
matches.add(item[0])
continue
continue ### item is empty. No possible matches below
if q == 'true':
if isinstance(item[loc], basestring):
if item[loc].strip() == '':
continue
matches.add(item[0])
continue
if rating_query and loc == MAP['rating'] and rating_query == int(item[loc]):
matches.add(item[0])
continue
if loc not in EXCLUDE_FIELDS:
if loc in SPLITABLE_FIELDS:
vals = item[loc].split(',') ### check individual tags/authors/formats, not the long string
else:
vals = [item[loc]] ### make into list to make _match happy
if _match(q, vals, matchkind):
matches.add(item[0])
continue
return matches
def remove(self, id):
self._data[id] = None
if id in self._map:
self._map.remove(id)
if id in self._map_filtered:
self._map_filtered.remove(id)
def set(self, row, col, val, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
self._data[id][col] = val
def get(self, row, col, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
return self._data[id][col]
def index(self, id, cache=False):
x = self._map if cache else self._map_filtered
return x.index(id)
def row(self, id):
return self.index(id)
def has_id(self, id):
try:
return self._data[id] is not None
except IndexError:
pass
return False
def refresh_ids(self, db, ids):
'''
Refresh the data in the cache for books identified by ids.
Returns a list of affected rows or None if the rows are filtered.
'''
for id in ids:
try:
self._data[id] = db.conn.get('SELECT * from meta2 WHERE id=?',
(id,))[0]
self._data[id].append(db.has_cover(id, index_is_id=True))
except IndexError:
return None
try:
return map(self.row, ids)
except ValueError:
pass
return None
def books_added(self, ids, db):
if not ids:
return
self._data.extend(repeat(None, max(ids)-len(self._data)+2))
for id in ids:
self._data[id] = db.conn.get('SELECT * from meta2 WHERE id=?', (id,))[0]
self._data[id].append(db.has_cover(id, index_is_id=True))
self._map[0:0] = ids
self._map_filtered[0:0] = ids
def books_deleted(self, ids):
for id in ids:
self._data[id] = None
if id in self._map: self._map.remove(id)
if id in self._map_filtered: self._map_filtered.remove(id)
def count(self):
return len(self._map)
def refresh(self, db, field=None, ascending=True):
temp = db.conn.get('SELECT * FROM meta2')
self._data = list(itertools.repeat(None, temp[-1][0]+2)) if temp else []
for r in temp:
self._data[r[0]] = r
for item in self._data:
if item is not None:
item.append(db.has_cover(item[0], index_is_id=True))
self._map = [i[0] for i in self._data if i is not None]
if field is not None:
self.sort(field, ascending)
self._map_filtered = list(self._map)
def seriescmp(self, x, y):
try:
ans = cmp(self._data[x][9].lower(), self._data[y][9].lower())
except AttributeError: # Some entries may be None
ans = cmp(self._data[x][9], self._data[y][9])
if ans != 0: return ans
return cmp(self._data[x][10], self._data[y][10])
def cmp(self, loc, x, y, asstr=True, subsort=False):
try:
ans = cmp(self._data[x][loc].lower(), self._data[y][loc].lower()) if \
asstr else cmp(self._data[x][loc], self._data[y][loc])
except AttributeError: # Some entries may be None
ans = cmp(self._data[x][loc], self._data[y][loc])
if subsort and ans == 0:
return cmp(self._data[x][11].lower(), self._data[y][11].lower())
return ans
def sort(self, field, ascending, subsort=False):
field = field.lower().strip()
if field in ('author', 'tag', 'comment'):
field += 's'
if field == 'date': field = 'timestamp'
elif field == 'title': field = 'sort'
elif field == 'authors': field = 'author_sort'
if self.first_sort:
subsort = True
self.first_sort = False
fcmp = self.seriescmp if field == 'series' else \
functools.partial(self.cmp, FIELD_MAP[field], subsort=subsort,
asstr=field not in ('size', 'rating', 'timestamp'))
self._map.sort(cmp=fcmp, reverse=not ascending)
self._map_filtered = [id for id in self._map if id in self._map_filtered]
def search(self, query):
if not query or not query.strip():
self._map_filtered = list(self._map)
return
matches = sorted(self.parse(query))
self._map_filtered = [id for id in self._map if id in matches]
class Tag(object): class Tag(object):
@ -494,11 +77,12 @@ class Tag(object):
return str(self) return str(self)
class LibraryDatabase2(LibraryDatabase): class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
''' '''
An ebook metadata database that stores references to ebook files on disk. An ebook metadata database that stores references to ebook files on disk.
''' '''
PATH_LIMIT = 40 if 'win32' in sys.platform else 100 PATH_LIMIT = 40 if 'win32' in sys.platform else 100
@dynamic_property @dynamic_property
def user_version(self): def user_version(self):
doc = 'The user version of this database' doc = 'The user version of this database'
@ -538,28 +122,10 @@ class LibraryDatabase2(LibraryDatabase):
self.connect() self.connect()
self.is_case_sensitive = not iswindows and not isosx and \ self.is_case_sensitive = not iswindows and not isosx and \
not os.path.exists(self.dbpath.replace('metadata.db', 'MeTAdAtA.dB')) not os.path.exists(self.dbpath.replace('metadata.db', 'MeTAdAtA.dB'))
# Upgrade database SchemaUpgrade.__init__(self)
while True: CustomColumns.__init__(self)
uv = self.user_version
meth = getattr(self, 'upgrade_version_%d'%uv, None)
if meth is None:
break
else:
print 'Upgrading database to version %d...'%(uv+1)
meth()
self.user_version = uv+1
self.initialize_dynamic() self.initialize_dynamic()
def custom_table_names(self, num):
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num
@property
def custom_tables(self):
return set([x[0] for x in self.conn.get(
'SELECT name FROM sqlite_master WHERE type="table" AND '
'(name GLOB "custom_column_*" OR name GLOB books_customcolumn_*)')])
def initialize_dynamic(self): def initialize_dynamic(self):
template = '''\ template = '''\
(SELECT {query} FROM books_{table}_link AS link INNER JOIN (SELECT {query} FROM books_{table}_link AS link INNER JOIN
@ -594,6 +160,13 @@ class LibraryDatabase2(LibraryDatabase):
line = template.format(col=col[0], table=col[1], line = template.format(col=col[0], table=col[1],
link_col=col[2], query=col[3]) link_col=col[2], query=col[3])
lines.append(line) lines.append(line)
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'publisher':3, 'rating':4, 'timestamp':5,
'size':6, 'tags':7, 'comments':8, 'series':9, 'series_index':10,
'sort':11, 'author_sort':12, 'formats':13, 'isbn':14, 'path':15,
'lccn':16, 'pubdate':17, 'flags':18, 'uuid':19, 'cover':20}
script = ''' script = '''
DROP VIEW IF EXISTS meta2; DROP VIEW IF EXISTS meta2;
CREATE TEMP VIEW meta2 AS CREATE TEMP VIEW meta2 AS
@ -603,44 +176,8 @@ class LibraryDatabase2(LibraryDatabase):
'''.format(', \n'.join(lines)) '''.format(', \n'.join(lines))
self.conn.executescript(script.format('')) self.conn.executescript(script.format(''))
self.conn.commit() self.conn.commit()
"""
# Delete marked custom columns
for num in self.conn.get(
'SELECT id FROM custom_columns WHERE delete=1'):
dt, lt = self.custom_table_names(num)
self.conn.executescript('''\
DROP TABLE IF EXISTS %s;
DROP TABLE IF EXISTS %s;
'''%(dt, lt)
)
self.conn.execute('DELETE FROM custom_columns WHERE delete=1')
self.conn.commit()
columns = [] self.data = ResultCache(self.FIELD_MAP)
remove = set([])
tables = self.custom_tables
for num, label, is_multiple in self.conn.get(
'SELECT id,label,is_multiple from custom_columns'):
data_table, link_table = self.custom_table_names(num)
if data_table in tables and link_table in tables:
col = 'concat(name)' if is_multiple else 'name'
columns.append(('(SELECT {col} FROM {dt} WHERE '
'{dt}.id IN (SELECT custom FROM '
'{lt} WHERE book=books.id)) '
'custom_{label}').format(num=num, label=label, col=col,
dt=data_table, lt=link_table))
else:
from calibre import prints
prints(u'WARNING: Custom column %s is missing, removing its entry!'%label)
remove.add(num)
for num in remove:
self.conn.execute('DELETE FROM custom_columns WHERE id=%d'%num)
self.conn.executescript(script)
self.conn.commit()
"""
self.data = ResultCache()
self.search = self.data.search self.search = self.data.search
self.refresh = functools.partial(self.data.refresh, self) self.refresh = functools.partial(self.data.refresh, self)
self.sort = self.data.sort self.sort = self.data.sort
@ -663,255 +200,13 @@ class LibraryDatabase2(LibraryDatabase):
'publisher', 'rating', 'series', 'series_index', 'tags', 'publisher', 'rating', 'series', 'series_index', 'tags',
'title', 'timestamp', 'uuid', 'pubdate'): 'title', 'timestamp', 'uuid', 'pubdate'):
setattr(self, prop, functools.partial(get_property, setattr(self, prop, functools.partial(get_property,
loc=FIELD_MAP['comments' if prop == 'comment' else prop])) loc=self.FIELD_MAP['comments' if prop == 'comment' else prop]))
def initialize_database(self): def initialize_database(self):
metadata_sqlite = open(P('metadata_sqlite.sql'), 'rb').read() metadata_sqlite = open(P('metadata_sqlite.sql'), 'rb').read()
self.conn.executescript(metadata_sqlite) self.conn.executescript(metadata_sqlite)
self.user_version = 1 self.user_version = 1
def upgrade_version_1(self):
'''
Normalize indices.
'''
self.conn.executescript(textwrap.dedent('''\
DROP INDEX authors_idx;
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE, sort COLLATE NOCASE);
DROP INDEX series_idx;
CREATE INDEX series_idx ON series (name COLLATE NOCASE);
CREATE INDEX series_sort_idx ON books (series_index, id);
'''))
def upgrade_version_2(self):
''' Fix Foreign key constraints for deleting from link tables. '''
script = textwrap.dedent('''\
DROP TRIGGER IF EXISTS fkc_delete_books_%(ltable)s_link;
CREATE TRIGGER fkc_delete_on_%(table)s
BEFORE DELETE ON %(table)s
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=OLD.id) > 0
THEN RAISE(ABORT, 'Foreign key violation: %(table)s is still referenced')
END;
END;
DELETE FROM %(table)s WHERE (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=%(table)s.id) < 1;
''')
self.conn.executescript(script%dict(ltable='authors', table='authors', ltable_col='author'))
self.conn.executescript(script%dict(ltable='publishers', table='publishers', ltable_col='publisher'))
self.conn.executescript(script%dict(ltable='tags', table='tags', ltable_col='tag'))
self.conn.executescript(script%dict(ltable='series', table='series', ltable_col='series'))
def upgrade_version_3(self):
' Add path to result cache '
self.conn.executescript('''
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path
FROM books;
''')
def upgrade_version_4(self):
'Rationalize books table'
self.conn.executescript('''
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE
books_backup(id,title,sort,timestamp,series_index,author_sort,isbn,path);
INSERT INTO books_backup SELECT id,title,sort,timestamp,series_index,author_sort,isbn,path FROM books;
DROP TABLE books;
CREATE TABLE books ( id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL DEFAULT 'Unknown' COLLATE NOCASE,
sort TEXT COLLATE NOCASE,
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
pubdate TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
series_index REAL NOT NULL DEFAULT 1.0,
author_sort TEXT COLLATE NOCASE,
isbn TEXT DEFAULT "" COLLATE NOCASE,
lccn TEXT DEFAULT "" COLLATE NOCASE,
path TEXT NOT NULL DEFAULT "",
flags INTEGER NOT NULL DEFAULT 1
);
INSERT INTO
books (id,title,sort,timestamp,pubdate,series_index,author_sort,isbn,path)
SELECT id,title,sort,timestamp,timestamp,series_index,author_sort,isbn,path FROM books_backup;
DROP TABLE books_backup;
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path,
lccn,
pubdate,
flags
FROM books;
''')
def upgrade_version_5(self):
'Update indexes/triggers for new books table'
self.conn.executescript('''
BEGIN TRANSACTION;
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE);
CREATE INDEX books_idx ON books (sort COLLATE NOCASE);
CREATE TRIGGER books_delete_trg
AFTER DELETE ON books
BEGIN
DELETE FROM books_authors_link WHERE book=OLD.id;
DELETE FROM books_publishers_link WHERE book=OLD.id;
DELETE FROM books_ratings_link WHERE book=OLD.id;
DELETE FROM books_series_link WHERE book=OLD.id;
DELETE FROM books_tags_link WHERE book=OLD.id;
DELETE FROM data WHERE book=OLD.id;
DELETE FROM comments WHERE book=OLD.id;
DELETE FROM conversion_options WHERE book=OLD.id;
END;
CREATE TRIGGER books_insert_trg
AFTER INSERT ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
CREATE TRIGGER books_update_trg
AFTER UPDATE ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
UPDATE books SET sort=title_sort(title) WHERE sort IS NULL;
END TRANSACTION;
'''
)
def upgrade_version_6(self):
'Show authors in order'
self.conn.executescript('''
BEGIN TRANSACTION;
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path,
lccn,
pubdate,
flags
FROM books;
END TRANSACTION;
''')
def upgrade_version_7(self):
'Add uuid column'
self.conn.executescript('''
BEGIN TRANSACTION;
ALTER TABLE books ADD COLUMN uuid TEXT;
DROP TRIGGER IF EXISTS books_insert_trg;
DROP TRIGGER IF EXISTS books_update_trg;
UPDATE books SET uuid=uuid4();
CREATE TRIGGER books_insert_trg AFTER INSERT ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title),uuid=uuid4() WHERE id=NEW.id;
END;
CREATE TRIGGER books_update_trg AFTER UPDATE ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path,
lccn,
pubdate,
flags,
uuid
FROM books;
END TRANSACTION;
''')
def upgrade_version_8(self):
'Add Tag Browser views'
def create_tag_browser_view(table_name, column_name):
self.conn.executescript('''
DROP VIEW IF EXISTS tag_browser_{tn};
CREATE VIEW tag_browser_{tn} AS SELECT
id,
name,
(SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count
FROM {tn};
'''.format(tn=table_name, cn=column_name))
for tn in ('authors', 'tags', 'publishers', 'series'):
cn = tn[:-1]
if tn == 'series':
cn = tn
create_tag_browser_view(tn, cn)
"""def upgrade_version_9(self):
'Add custom columns'
self.conn.executescript('''
CREATE TABLE custom_columns (
id INTEGER PRIMARY KEY AUTOINCREMENT,
label TEXT NOT NULL,
name TEXT NOT NULL,
datatype TEXT NOT NULL,
delete BOOL DEFAULT 0,
UNIQUE(label)
);
''')"""
def last_modified(self): def last_modified(self):
''' Return last modified time as a UTC datetime object''' ''' Return last modified time as a UTC datetime object'''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime) return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
@ -924,7 +219,7 @@ class LibraryDatabase2(LibraryDatabase):
def path(self, index, index_is_id=False): def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.' 'Return the relative path to the directory containing this books files as a unicode string.'
row = self.data._data[index] if index_is_id else self.data[index] row = self.data._data[index] if index_is_id else self.data[index]
return row[FIELD_MAP['path']].replace('/', os.sep) return row[self.FIELD_MAP['path']].replace('/', os.sep)
def abspath(self, index, index_is_id=False): def abspath(self, index, index_is_id=False):
@ -1011,7 +306,7 @@ class LibraryDatabase2(LibraryDatabase):
self.add_format(id, format, stream, index_is_id=True, path=tpath) self.add_format(id, format, stream, index_is_id=True, path=tpath)
self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id)) self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['path'], path, row_is_id=True) self.data.set(id, self.FIELD_MAP['path'], path, row_is_id=True)
# Delete not needed directories # Delete not needed directories
if current_path and os.path.exists(spath): if current_path and os.path.exists(spath):
if self.normpath(spath) != self.normpath(tpath): if self.normpath(spath) != self.normpath(tpath):
@ -1315,10 +610,10 @@ class LibraryDatabase2(LibraryDatabase):
now = nowf() now = nowf()
for r in self.data._data: for r in self.data._data:
if r is not None: if r is not None:
if (now - r[FIELD_MAP['timestamp']]) > delta: if (now - r[self.FIELD_MAP['timestamp']]) > delta:
tags = r[FIELD_MAP['tags']] tags = r[self.FIELD_MAP['tags']]
if tags and tag in tags.lower(): if tags and tag in tags.lower():
yield r[FIELD_MAP['id']] yield r[self.FIELD_MAP['id']]
def get_next_series_num_for(self, series): def get_next_series_num_for(self, series):
series_id = self.conn.get('SELECT id from series WHERE name=?', series_id = self.conn.get('SELECT id from series WHERE name=?',
@ -1434,10 +729,10 @@ class LibraryDatabase2(LibraryDatabase):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', self.conn.execute('UPDATE books SET author_sort=? WHERE id=?',
(ss, id)) (ss, id))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['authors'], self.data.set(id, self.FIELD_MAP['authors'],
','.join([a.replace(',', '|') for a in authors]), ','.join([a.replace(',', '|') for a in authors]),
row_is_id=True) row_is_id=True)
self.data.set(id, FIELD_MAP['author_sort'], ss, row_is_id=True) self.data.set(id, self.FIELD_MAP['author_sort'], ss, row_is_id=True)
self.set_path(id, True) self.set_path(id, True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1448,8 +743,8 @@ class LibraryDatabase2(LibraryDatabase):
if not isinstance(title, unicode): if not isinstance(title, unicode):
title = title.decode(preferred_encoding, 'replace') title = title.decode(preferred_encoding, 'replace')
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id)) self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
self.data.set(id, FIELD_MAP['title'], title, row_is_id=True) self.data.set(id, self.FIELD_MAP['title'], title, row_is_id=True)
self.data.set(id, FIELD_MAP['sort'], title_sort(title), row_is_id=True) self.data.set(id, self.FIELD_MAP['sort'], title_sort(title), row_is_id=True)
self.set_path(id, True) self.set_path(id, True)
self.conn.commit() self.conn.commit()
if notify: if notify:
@ -1458,7 +753,7 @@ class LibraryDatabase2(LibraryDatabase):
def set_timestamp(self, id, dt, notify=True): def set_timestamp(self, id, dt, notify=True):
if dt: if dt:
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id)) self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
self.data.set(id, FIELD_MAP['timestamp'], dt, row_is_id=True) self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
self.conn.commit() self.conn.commit()
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1466,7 +761,7 @@ class LibraryDatabase2(LibraryDatabase):
def set_pubdate(self, id, dt, notify=True): def set_pubdate(self, id, dt, notify=True):
if dt: if dt:
self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id)) self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id))
self.data.set(id, FIELD_MAP['pubdate'], dt, row_is_id=True) self.data.set(id, self.FIELD_MAP['pubdate'], dt, row_is_id=True)
self.conn.commit() self.conn.commit()
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1485,7 +780,7 @@ class LibraryDatabase2(LibraryDatabase):
aid = self.conn.execute('INSERT INTO publishers(name) VALUES (?)', (publisher,)).lastrowid aid = self.conn.execute('INSERT INTO publishers(name) VALUES (?)', (publisher,)).lastrowid
self.conn.execute('INSERT INTO books_publishers_link(book, publisher) VALUES (?,?)', (id, aid)) self.conn.execute('INSERT INTO books_publishers_link(book, publisher) VALUES (?,?)', (id, aid))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['publisher'], publisher, row_is_id=True) self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1536,7 +831,7 @@ class LibraryDatabase2(LibraryDatabase):
(id, tid)) (id, tid))
self.conn.commit() self.conn.commit()
tags = ','.join(self.get_tags(id)) tags = ','.join(self.get_tags(id))
self.data.set(id, FIELD_MAP['tags'], tags, row_is_id=True) self.data.set(id, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1595,7 +890,7 @@ class LibraryDatabase2(LibraryDatabase):
self.data.set(row, 9, series) self.data.set(row, 9, series)
except ValueError: except ValueError:
pass pass
self.data.set(id, FIELD_MAP['series'], series, row_is_id=True) self.data.set(id, self.FIELD_MAP['series'], series, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1608,7 +903,7 @@ class LibraryDatabase2(LibraryDatabase):
idx = 1.0 idx = 1.0
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id)) self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['series_index'], idx, row_is_id=True) self.data.set(id, self.FIELD_MAP['series_index'], idx, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1619,7 +914,7 @@ class LibraryDatabase2(LibraryDatabase):
rat = rat if rat else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid rat = rat if rat else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat)) self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['rating'], rating, row_is_id=True) self.data.set(id, self.FIELD_MAP['rating'], rating, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1627,21 +922,21 @@ class LibraryDatabase2(LibraryDatabase):
self.conn.execute('DELETE FROM comments WHERE book=?', (id,)) self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text)) self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['comments'], text, row_is_id=True) self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
def set_author_sort(self, id, sort, notify=True): def set_author_sort(self, id, sort, notify=True):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id)) self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['author_sort'], sort, row_is_id=True) self.data.set(id, self.FIELD_MAP['author_sort'], sort, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
def set_isbn(self, id, isbn, notify=True): def set_isbn(self, id, isbn, notify=True):
self.conn.execute('UPDATE books SET isbn=? WHERE id=?', (isbn, id)) self.conn.execute('UPDATE books SET isbn=? WHERE id=?', (isbn, id))
self.conn.commit() self.conn.commit()
self.data.set(id, FIELD_MAP['isbn'], isbn, row_is_id=True) self.data.set(id, self.FIELD_MAP['isbn'], isbn, row_is_id=True)
if notify: if notify:
self.notify('metadata', [id]) self.notify('metadata', [id])
@ -1890,7 +1185,7 @@ class LibraryDatabase2(LibraryDatabase):
yield record yield record
def all_ids(self): def all_ids(self):
x = FIELD_MAP['id'] x = self.FIELD_MAP['id']
for i in iter(self): for i in iter(self):
yield i[x] yield i[x]
@ -1912,12 +1207,12 @@ class LibraryDatabase2(LibraryDatabase):
data = [] data = []
for record in self.data: for record in self.data:
if record is None: continue if record is None: continue
db_id = record[FIELD_MAP['id']] db_id = record[self.FIELD_MAP['id']]
if ids is not None and db_id not in ids: if ids is not None and db_id not in ids:
continue continue
x = {} x = {}
for field in FIELDS: for field in FIELDS:
x[field] = record[FIELD_MAP[field]] x[field] = record[self.FIELD_MAP[field]]
data.append(x) data.append(x)
x['id'] = db_id x['id'] = db_id
x['formats'] = [] x['formats'] = []
@ -1927,11 +1222,11 @@ class LibraryDatabase2(LibraryDatabase):
if authors_as_string: if authors_as_string:
x['authors'] = authors_to_string(x['authors']) x['authors'] = authors_to_string(x['authors'])
x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else [] x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else []
path = os.path.join(prefix, self.path(record[FIELD_MAP['id']], index_is_id=True)) path = os.path.join(prefix, self.path(record[self.FIELD_MAP['id']], index_is_id=True))
x['cover'] = os.path.join(path, 'cover.jpg') x['cover'] = os.path.join(path, 'cover.jpg')
if not self.has_cover(x['id'], index_is_id=True): if not self.has_cover(x['id'], index_is_id=True):
x['cover'] = None x['cover'] = None
formats = self.formats(record[FIELD_MAP['id']], index_is_id=True) formats = self.formats(record[self.FIELD_MAP['id']], index_is_id=True)
if formats: if formats:
for fmt in formats.split(','): for fmt in formats.split(','):
path = self.format_abspath(x['id'], fmt, index_is_id=True) path = self.format_abspath(x['id'], fmt, index_is_id=True)
@ -2129,7 +1424,7 @@ books_series_link feeds
us = self.data.universal_set() us = self.data.universal_set()
total = float(len(us)) total = float(len(us))
for i, id in enumerate(us): for i, id in enumerate(us):
formats = self.data.get(id, FIELD_MAP['formats'], row_is_id=True) formats = self.data.get(id, self.FIELD_MAP['formats'], row_is_id=True)
if not formats: if not formats:
formats = [] formats = []
else: else:

View File

@ -0,0 +1,271 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
class SchemaUpgrade(object):
def __init__(self):
# Upgrade database
while True:
uv = self.user_version
meth = getattr(self, 'upgrade_version_%d'%uv, None)
if meth is None:
break
else:
print 'Upgrading database to version %d...'%(uv+1)
meth()
self.user_version = uv+1
def upgrade_version_1(self):
'''
Normalize indices.
'''
self.conn.executescript('''\
DROP INDEX authors_idx;
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE, sort COLLATE NOCASE);
DROP INDEX series_idx;
CREATE INDEX series_idx ON series (name COLLATE NOCASE);
CREATE INDEX series_sort_idx ON books (series_index, id);
''')
def upgrade_version_2(self):
''' Fix Foreign key constraints for deleting from link tables. '''
script = '''\
DROP TRIGGER IF EXISTS fkc_delete_books_%(ltable)s_link;
CREATE TRIGGER fkc_delete_on_%(table)s
BEFORE DELETE ON %(table)s
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=OLD.id) > 0
THEN RAISE(ABORT, 'Foreign key violation: %(table)s is still referenced')
END;
END;
DELETE FROM %(table)s WHERE (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=%(table)s.id) < 1;
'''
self.conn.executescript(script%dict(ltable='authors', table='authors', ltable_col='author'))
self.conn.executescript(script%dict(ltable='publishers', table='publishers', ltable_col='publisher'))
self.conn.executescript(script%dict(ltable='tags', table='tags', ltable_col='tag'))
self.conn.executescript(script%dict(ltable='series', table='series', ltable_col='series'))
def upgrade_version_3(self):
' Add path to result cache '
self.conn.executescript('''
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path
FROM books;
''')
def upgrade_version_4(self):
'Rationalize books table'
self.conn.executescript('''
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE
books_backup(id,title,sort,timestamp,series_index,author_sort,isbn,path);
INSERT INTO books_backup SELECT id,title,sort,timestamp,series_index,author_sort,isbn,path FROM books;
DROP TABLE books;
CREATE TABLE books ( id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL DEFAULT 'Unknown' COLLATE NOCASE,
sort TEXT COLLATE NOCASE,
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
pubdate TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
series_index REAL NOT NULL DEFAULT 1.0,
author_sort TEXT COLLATE NOCASE,
isbn TEXT DEFAULT "" COLLATE NOCASE,
lccn TEXT DEFAULT "" COLLATE NOCASE,
path TEXT NOT NULL DEFAULT "",
flags INTEGER NOT NULL DEFAULT 1
);
INSERT INTO
books (id,title,sort,timestamp,pubdate,series_index,author_sort,isbn,path)
SELECT id,title,sort,timestamp,timestamp,series_index,author_sort,isbn,path FROM books_backup;
DROP TABLE books_backup;
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path,
lccn,
pubdate,
flags
FROM books;
''')
def upgrade_version_5(self):
'Update indexes/triggers for new books table'
self.conn.executescript('''
BEGIN TRANSACTION;
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE);
CREATE INDEX books_idx ON books (sort COLLATE NOCASE);
CREATE TRIGGER books_delete_trg
AFTER DELETE ON books
BEGIN
DELETE FROM books_authors_link WHERE book=OLD.id;
DELETE FROM books_publishers_link WHERE book=OLD.id;
DELETE FROM books_ratings_link WHERE book=OLD.id;
DELETE FROM books_series_link WHERE book=OLD.id;
DELETE FROM books_tags_link WHERE book=OLD.id;
DELETE FROM data WHERE book=OLD.id;
DELETE FROM comments WHERE book=OLD.id;
DELETE FROM conversion_options WHERE book=OLD.id;
END;
CREATE TRIGGER books_insert_trg
AFTER INSERT ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
CREATE TRIGGER books_update_trg
AFTER UPDATE ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
UPDATE books SET sort=title_sort(title) WHERE sort IS NULL;
END TRANSACTION;
'''
)
def upgrade_version_6(self):
'Show authors in order'
self.conn.executescript('''
BEGIN TRANSACTION;
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path,
lccn,
pubdate,
flags
FROM books;
END TRANSACTION;
''')
def upgrade_version_7(self):
'Add uuid column'
self.conn.executescript('''
BEGIN TRANSACTION;
ALTER TABLE books ADD COLUMN uuid TEXT;
DROP TRIGGER IF EXISTS books_insert_trg;
DROP TRIGGER IF EXISTS books_update_trg;
UPDATE books SET uuid=uuid4();
CREATE TRIGGER books_insert_trg AFTER INSERT ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title),uuid=uuid4() WHERE id=NEW.id;
END;
CREATE TRIGGER books_update_trg AFTER UPDATE ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn,
path,
lccn,
pubdate,
flags,
uuid
FROM books;
END TRANSACTION;
''')
def upgrade_version_8(self):
'Add Tag Browser views'
def create_tag_browser_view(table_name, column_name):
self.conn.executescript('''
DROP VIEW IF EXISTS tag_browser_{tn};
CREATE VIEW tag_browser_{tn} AS SELECT
id,
name,
(SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count
FROM {tn};
'''.format(tn=table_name, cn=column_name))
for tn in ('authors', 'tags', 'publishers', 'series'):
cn = tn[:-1]
if tn == 'series':
cn = tn
create_tag_browser_view(tn, cn)
"""
def upgrade_version_9(self):
'Add custom columns'
self.conn.executescript('''
CREATE TABLE custom_columns (
id INTEGER PRIMARY KEY AUTOINCREMENT,
label TEXT NOT NULL,
name TEXT NOT NULL,
datatype TEXT NOT NULL,
mark_for_delete BOOL DEFAULT 0 NOT NULL,
flag BOOL DEFAULT 0 NOT NULL,
editable BOOL DEFAULT 1 NOT NULL,
UNIQUE(label)
);
CREATE INDEX custom_columns_idx ON custom_columns (label);
CREATE INDEX formats_idx ON data (format);
''')
"""

View File

@ -25,7 +25,7 @@ from calibre.utils.genshi.template import MarkupTemplate
from calibre import fit_image, guess_type, prepare_string_for_xml, \ from calibre import fit_image, guess_type, prepare_string_for_xml, \
strftime as _strftime strftime as _strftime
from calibre.library import server_config as config from calibre.library import server_config as config
from calibre.library.database2 import LibraryDatabase2, FIELD_MAP from calibre.library.database2 import LibraryDatabase2
from calibre.utils.config import config_dir from calibre.utils.config import config_dir
from calibre.utils.mdns import publish as publish_zeroconf, \ from calibre.utils.mdns import publish as publish_zeroconf, \
stop_server as stop_zeroconf, get_external_ip stop_server as stop_zeroconf, get_external_ip
@ -512,18 +512,18 @@ class LibraryServer(object):
if field == 'series': if field == 'series':
items.sort(cmp=self.seriescmp, reverse=not order) items.sort(cmp=self.seriescmp, reverse=not order)
else: else:
field = FIELD_MAP[field] field = self.db.FIELD_MAP[field]
getter = operator.itemgetter(field) getter = operator.itemgetter(field)
items.sort(cmp=lambda x, y: cmpf(getter(x), getter(y)), reverse=not order) items.sort(cmp=lambda x, y: cmpf(getter(x), getter(y)), reverse=not order)
def seriescmp(self, x, y): def seriescmp(self, x, y):
si = FIELD_MAP['series'] si = self.db.FIELD_MAP['series']
try: try:
ans = cmp(x[si].lower(), y[si].lower()) ans = cmp(x[si].lower(), y[si].lower())
except AttributeError: # Some entries may be None except AttributeError: # Some entries may be None
ans = cmp(x[si], y[si]) ans = cmp(x[si], y[si])
if ans != 0: return ans if ans != 0: return ans
return cmp(x[FIELD_MAP['series_index']], y[FIELD_MAP['series_index']]) return cmp(x[self.db.FIELD_MAP['series_index']], y[self.db.FIELD_MAP['series_index']])
def last_modified(self, updated): def last_modified(self, updated):
@ -585,11 +585,11 @@ class LibraryServer(object):
next_link = ('<link rel="next" title="Next" ' next_link = ('<link rel="next" title="Next" '
'type="application/atom+xml" href="/stanza/?sortby=%s&amp;offset=%d"/>\n' 'type="application/atom+xml" href="/stanza/?sortby=%s&amp;offset=%d"/>\n'
) % (sortby, next_offset) ) % (sortby, next_offset)
return self.STANZA.generate(subtitle=subtitle, data=entries, FM=FIELD_MAP, return self.STANZA.generate(subtitle=subtitle, data=entries, FM=self.db.FIELD_MAP,
updated=updated, id='urn:calibre:main', next_link=next_link).render('xml') updated=updated, id='urn:calibre:main', next_link=next_link).render('xml')
def stanza_main(self, updated): def stanza_main(self, updated):
return self.STANZA_MAIN.generate(subtitle='', data=[], FM=FIELD_MAP, return self.STANZA_MAIN.generate(subtitle='', data=[], FM=self.db.FIELD_MAP,
updated=updated, id='urn:calibre:main').render('xml') updated=updated, id='urn:calibre:main').render('xml')
@expose @expose
@ -626,15 +626,18 @@ class LibraryServer(object):
# Sort the record list # Sort the record list
if sortby == "bytitle" or authorid or tagid: if sortby == "bytitle" or authorid or tagid:
record_list.sort(lambda x, y: cmp(title_sort(x[FIELD_MAP['title']]), record_list.sort(lambda x, y:
title_sort(y[FIELD_MAP['title']]))) cmp(title_sort(x[self.db.FIELD_MAP['title']]),
title_sort(y[self.db.FIELD_MAP['title']])))
elif seriesid: elif seriesid:
record_list.sort(lambda x, y: cmp(x[FIELD_MAP['series_index']], y[FIELD_MAP['series_index']])) record_list.sort(lambda x, y:
cmp(x[self.db.FIELD_MAP['series_index']],
y[self.db.FIELD_MAP['series_index']]))
else: # Sort by date else: # Sort by date
record_list = reversed(record_list) record_list = reversed(record_list)
fmts = FIELD_MAP['formats'] fmts = self.db.FIELD_MAP['formats']
pat = re.compile(r'EPUB|PDB', re.IGNORECASE) pat = re.compile(r'EPUB|PDB', re.IGNORECASE)
record_list = [x for x in record_list if x[0] in ids and record_list = [x for x in record_list if x[0] in ids and
pat.search(x[fmts] if x[fmts] else '') is not None] pat.search(x[fmts] if x[fmts] else '') is not None]
@ -656,10 +659,10 @@ class LibraryServer(object):
) % '&amp;'.join(q) ) % '&amp;'.join(q)
for record in nrecord_list: for record in nrecord_list:
r = record[FIELD_MAP['formats']] r = record[self.db.FIELD_MAP['formats']]
r = r.upper() if r else '' r = r.upper() if r else ''
z = record[FIELD_MAP['authors']] z = record[self.db.FIELD_MAP['authors']]
if not z: if not z:
z = _('Unknown') z = _('Unknown')
authors = ' & '.join([i.replace('|', ',') for i in authors = ' & '.join([i.replace('|', ',') for i in
@ -667,19 +670,19 @@ class LibraryServer(object):
# Setup extra description # Setup extra description
extra = [] extra = []
rating = record[FIELD_MAP['rating']] rating = record[self.db.FIELD_MAP['rating']]
if rating > 0: if rating > 0:
rating = ''.join(repeat('&#9733;', rating)) rating = ''.join(repeat('&#9733;', rating))
extra.append('RATING: %s<br />'%rating) extra.append('RATING: %s<br />'%rating)
tags = record[FIELD_MAP['tags']] tags = record[self.db.FIELD_MAP['tags']]
if tags: if tags:
extra.append('TAGS: %s<br />'%\ extra.append('TAGS: %s<br />'%\
prepare_string_for_xml(', '.join(tags.split(',')))) prepare_string_for_xml(', '.join(tags.split(','))))
series = record[FIELD_MAP['series']] series = record[self.db.FIELD_MAP['series']]
if series: if series:
extra.append('SERIES: %s [%s]<br />'%\ extra.append('SERIES: %s [%s]<br />'%\
(prepare_string_for_xml(series), (prepare_string_for_xml(series),
fmt_sidx(float(record[FIELD_MAP['series_index']])))) fmt_sidx(float(record[self.db.FIELD_MAP['series_index']]))))
fmt = 'epub' if 'EPUB' in r else 'pdb' fmt = 'epub' if 'EPUB' in r else 'pdb'
mimetype = guess_type('dummy.'+fmt)[0] mimetype = guess_type('dummy.'+fmt)[0]
@ -692,17 +695,17 @@ class LibraryServer(object):
authors=authors, authors=authors,
tags=tags, tags=tags,
series=series, series=series,
FM=FIELD_MAP, FM=self.db.FIELD_MAP,
extra='\n'.join(extra), extra='\n'.join(extra),
mimetype=mimetype, mimetype=mimetype,
fmt=fmt, fmt=fmt,
urn=record[FIELD_MAP['uuid']], urn=record[self.db.FIELD_MAP['uuid']],
timestamp=strftime('%Y-%m-%dT%H:%M:%S+00:00', record[5]) timestamp=strftime('%Y-%m-%dT%H:%M:%S+00:00', record[5])
) )
books.append(self.STANZA_ENTRY.generate(**data)\ books.append(self.STANZA_ENTRY.generate(**data)\
.render('xml').decode('utf8')) .render('xml').decode('utf8'))
return self.STANZA.generate(subtitle='', data=books, FM=FIELD_MAP, return self.STANZA.generate(subtitle='', data=books, FM=self.db.FIELD_MAP,
next_link=next_link, updated=updated, id='urn:calibre:main').render('xml') next_link=next_link, updated=updated, id='urn:calibre:main').render('xml')
@ -741,7 +744,7 @@ class LibraryServer(object):
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')]) authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
record[10] = fmt_sidx(float(record[10])) record[10] = fmt_sidx(float(record[10]))
ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[5]), \ ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[5]), \
strftime('%Y/%m/%d %H:%M:%S', record[FIELD_MAP['pubdate']]) strftime('%Y/%m/%d %H:%M:%S', record[self.db.FIELD_MAP['pubdate']])
books.append(book.generate(r=record, authors=authors, timestamp=ts, books.append(book.generate(r=record, authors=authors, timestamp=ts,
pubdate=pd).render('xml').decode('utf-8')) pubdate=pd).render('xml').decode('utf-8'))
updated = self.db.last_modified() updated = self.db.last_modified()
@ -788,7 +791,7 @@ class LibraryServer(object):
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')]) authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
record[10] = fmt_sidx(float(record[10])) record[10] = fmt_sidx(float(record[10]))
ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[5]), \ ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[5]), \
strftime('%Y/%m/%d %H:%M:%S', record[FIELD_MAP['pubdate']]) strftime('%Y/%m/%d %H:%M:%S', record[self.db.FIELD_MAP['pubdate']])
books.append(book.generate(r=record, authors=authors, timestamp=ts, books.append(book.generate(r=record, authors=authors, timestamp=ts,
pubdate=pd).render('xml').decode('utf-8')) pubdate=pd).render('xml').decode('utf-8'))
updated = self.db.last_modified() updated = self.db.last_modified()

View File

@ -20,7 +20,9 @@ from calibre.utils.date import parse_date, isoformat
global_lock = RLock() global_lock = RLock()
def convert_timestamp(val): def convert_timestamp(val):
if val:
return parse_date(val, as_utc=False) return parse_date(val, as_utc=False)
return None
def adapt_datetime(dt): def adapt_datetime(dt):
return isoformat(dt, sep=' ') return isoformat(dt, sep=' ')