diff --git a/src/calibre/ebooks/metadata/book/__init__.py b/src/calibre/ebooks/metadata/book/__init__.py
index 033a78d611..fae858aabd 100644
--- a/src/calibre/ebooks/metadata/book/__init__.py
+++ b/src/calibre/ebooks/metadata/book/__init__.py
@@ -18,14 +18,14 @@ SOCIAL_METADATA_FIELDS = frozenset([
'series_index', # A floating point number
# Of the form { scheme1:value1, scheme2:value2}
# For example: {'isbn':'123456789', 'doi':'xxxx', ... }
- 'classifiers',
+ 'identifiers',
])
'''
-The list of names that convert to classifiers when in get and set.
+The list of names that convert to identifiers when in get and set.
'''
-TOP_LEVEL_CLASSIFIERS = frozenset([
+TOP_LEVEL_IDENTIFIERS = frozenset([
'isbn',
])
@@ -108,7 +108,7 @@ STANDARD_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
SC_FIELDS_NOT_COPIED = frozenset(['title', 'title_sort', 'authors',
'author_sort', 'author_sort_map',
'cover_data', 'tags', 'language',
- 'classifiers'])
+ 'identifiers'])
# Metadata fields that smart update should copy only if the source is not None
SC_FIELDS_COPY_NOT_NULL = frozenset(['lpath', 'size', 'comments', 'thumbnail'])
diff --git a/src/calibre/ebooks/metadata/book/base.py b/src/calibre/ebooks/metadata/book/base.py
index b47cc373a7..e3e9917491 100644
--- a/src/calibre/ebooks/metadata/book/base.py
+++ b/src/calibre/ebooks/metadata/book/base.py
@@ -12,7 +12,7 @@ from calibre.constants import DEBUG
from calibre.ebooks.metadata.book import SC_COPYABLE_FIELDS
from calibre.ebooks.metadata.book import SC_FIELDS_COPY_NOT_NULL
from calibre.ebooks.metadata.book import STANDARD_METADATA_FIELDS
-from calibre.ebooks.metadata.book import TOP_LEVEL_CLASSIFIERS
+from calibre.ebooks.metadata.book import TOP_LEVEL_IDENTIFIERS
from calibre.ebooks.metadata.book import ALL_METADATA_FIELDS
from calibre.library.field_metadata import FieldMetadata
from calibre.utils.date import isoformat, format_date
@@ -24,7 +24,7 @@ NULL_VALUES = {
'user_metadata': {},
'cover_data' : (None, None),
'tags' : [],
- 'classifiers' : {},
+ 'identifiers' : {},
'languages' : [],
'device_collections': [],
'author_sort_map': {},
@@ -96,8 +96,8 @@ class Metadata(object):
def __getattribute__(self, field):
_data = object.__getattribute__(self, '_data')
- if field in TOP_LEVEL_CLASSIFIERS:
- return _data.get('classifiers').get(field, None)
+ if field in TOP_LEVEL_IDENTIFIERS:
+ return _data.get('identifiers').get(field, None)
if field in STANDARD_METADATA_FIELDS:
return _data.get(field, None)
try:
@@ -123,8 +123,8 @@ class Metadata(object):
def __setattr__(self, field, val, extra=None):
_data = object.__getattribute__(self, '_data')
- if field in TOP_LEVEL_CLASSIFIERS:
- _data['classifiers'].update({field: val})
+ if field in TOP_LEVEL_IDENTIFIERS:
+ _data['identifiers'].update({field: val})
elif field in STANDARD_METADATA_FIELDS:
if val is None:
val = NULL_VALUES.get(field, None)
@@ -176,17 +176,21 @@ class Metadata(object):
def set(self, field, val, extra=None):
self.__setattr__(field, val, extra)
- def get_classifiers(self):
+ def get_identifiers(self):
'''
- Return a copy of the classifiers dictionary.
+ Return a copy of the identifiers dictionary.
The dict is small, and the penalty for using a reference where a copy is
needed is large. Also, we don't want any manipulations of the returned
dict to show up in the book.
'''
- return copy.deepcopy(object.__getattribute__(self, '_data')['classifiers'])
+ ans = object.__getattribute__(self,
+ '_data')['identifiers']
+ if not ans:
+ ans = {}
+ return copy.deepcopy(ans)
- def set_classifiers(self, classifiers):
- object.__getattribute__(self, '_data')['classifiers'] = classifiers
+ def set_identifiers(self, identifiers):
+ object.__getattribute__(self, '_data')['identifiers'] = identifiers
# field-oriented interface. Intended to be the same as in LibraryDatabase
@@ -229,7 +233,7 @@ class Metadata(object):
if v is not None:
result[attr] = v
# separate these because it uses the self.get(), not _data.get()
- for attr in TOP_LEVEL_CLASSIFIERS:
+ for attr in TOP_LEVEL_IDENTIFIERS:
v = self.get(attr, None)
if v is not None:
result[attr] = v
@@ -400,8 +404,8 @@ class Metadata(object):
self.set_all_user_metadata(other.get_all_user_metadata(make_copy=True))
for x in SC_FIELDS_COPY_NOT_NULL:
copy_not_none(self, other, x)
- if callable(getattr(other, 'get_classifiers', None)):
- self.set_classifiers(other.get_classifiers())
+ if callable(getattr(other, 'get_identifiers', None)):
+ self.set_identifiers(other.get_identifiers())
# language is handled below
else:
for attr in SC_COPYABLE_FIELDS:
@@ -456,15 +460,15 @@ class Metadata(object):
if len(other_comments.strip()) > len(my_comments.strip()):
self.comments = other_comments
- # Copy all the non-none classifiers
- if callable(getattr(other, 'get_classifiers', None)):
- d = self.get_classifiers()
- s = other.get_classifiers()
+ # Copy all the non-none identifiers
+ if callable(getattr(other, 'get_identifiers', None)):
+ d = self.get_identifiers()
+ s = other.get_identifiers()
d.update([v for v in s.iteritems() if v[1] is not None])
- self.set_classifiers(d)
+ self.set_identifiers(d)
else:
- # other structure not Metadata. Copy the top-level classifiers
- for attr in TOP_LEVEL_CLASSIFIERS:
+ # other structure not Metadata. Copy the top-level identifiers
+ for attr in TOP_LEVEL_IDENTIFIERS:
copy_not_none(self, other, attr)
other_lang = getattr(other, 'language', None)
diff --git a/src/calibre/ebooks/metadata/book/json_codec.py b/src/calibre/ebooks/metadata/book/json_codec.py
index c02d4e953d..f434800edf 100644
--- a/src/calibre/ebooks/metadata/book/json_codec.py
+++ b/src/calibre/ebooks/metadata/book/json_codec.py
@@ -119,6 +119,8 @@ class JsonCodec(object):
for item in js:
book = book_class(prefix, item.get('lpath', None))
for key in item.keys():
+ if key == 'classifiers':
+ key = 'identifiers'
meta = self.decode_metadata(key, item[key])
if key == 'user_metadata':
book.set_all_user_metadata(meta)
diff --git a/src/calibre/ebooks/metadata/opf2.py b/src/calibre/ebooks/metadata/opf2.py
index d34a563110..9c59692628 100644
--- a/src/calibre/ebooks/metadata/opf2.py
+++ b/src/calibre/ebooks/metadata/opf2.py
@@ -596,6 +596,9 @@ class OPF(object): # {{{
ans = MetaInformation(self)
for n, v in self._user_metadata_.items():
ans.set_user_metadata(n, v)
+
+ ans.set_identifiers(self.get_identifiers())
+
return ans
def write_user_metadata(self):
@@ -855,6 +858,21 @@ class OPF(object): # {{{
return property(fget=fget, fset=fset)
+ def get_identifiers(self):
+ identifiers = {}
+ for x in self.XPath(
+ 'descendant::*[local-name() = "identifier" and text()]')(
+ self.metadata):
+ for attr, val in x.attrib.iteritems():
+ if attr.endswith('scheme'):
+ typ = icu_lower(val)
+ val = etree.tostring(x, with_tail=False, encoding=unicode,
+ method='text').strip()
+ if val and typ not in ('calibre', 'uuid'):
+ identifiers[typ] = val
+ break
+ return identifiers
+
@dynamic_property
def application_id(self):
@@ -1166,8 +1184,8 @@ class OPFCreator(Metadata):
a(DC_ELEM('description', self.comments))
if self.publisher:
a(DC_ELEM('publisher', self.publisher))
- if self.isbn:
- a(DC_ELEM('identifier', self.isbn, opf_attrs={'scheme':'ISBN'}))
+ for key, val in self.get_identifiers().iteritems():
+ a(DC_ELEM('identifier', val, opf_attrs={'scheme':icu_upper(key)}))
if self.rights:
a(DC_ELEM('rights', self.rights))
if self.tags:
@@ -1291,8 +1309,8 @@ def metadata_to_opf(mi, as_string=True):
factory(DC('description'), mi.comments)
if mi.publisher:
factory(DC('publisher'), mi.publisher)
- if mi.isbn:
- factory(DC('identifier'), mi.isbn, scheme='ISBN')
+ for key, val in mi.get_identifiers().iteritems():
+ factory(DC('identifier'), val, scheme=icu_upper(key))
if mi.rights:
factory(DC('rights'), mi.rights)
factory(DC('language'), mi.language if mi.language and mi.language.lower()
@@ -1342,7 +1360,7 @@ def test_m2o():
mi.language = 'en'
mi.comments = 'what a fun book\n\n'
mi.publisher = 'publisher'
- mi.isbn = 'boooo'
+ mi.set_identifiers({'isbn':'booo', 'dummy':'dummy'})
mi.tags = ['a', 'b']
mi.series = 's"c\'l&<>'
mi.series_index = 3.34
@@ -1350,7 +1368,7 @@ def test_m2o():
mi.timestamp = nowf()
mi.publication_type = 'ooooo'
mi.rights = 'yes'
- mi.cover = 'asd.jpg'
+ mi.cover = os.path.abspath('asd.jpg')
opf = metadata_to_opf(mi)
print opf
newmi = MetaInformation(OPF(StringIO(opf)))
@@ -1363,6 +1381,9 @@ def test_m2o():
o, n = getattr(mi, attr), getattr(newmi, attr)
if o != n and o.strip() != n.strip():
print 'FAILED:', attr, getattr(mi, attr), '!=', getattr(newmi, attr)
+ if mi.get_identifiers() != newmi.get_identifiers():
+ print 'FAILED:', 'identifiers', mi.get_identifiers(),
+ print '!=', newmi.get_identifiers()
class OPFTest(unittest.TestCase):
@@ -1378,6 +1399,7 @@ class OPFTest(unittest.TestCase):
Next
OneTwo
123456789
+ dummy
@@ -1405,6 +1427,8 @@ class OPFTest(unittest.TestCase):
self.assertEqual(opf.rating, 4)
self.assertEqual(opf.publication_type, 'test')
self.assertEqual(list(opf.itermanifest())[0].get('href'), 'a ~ b')
+ self.assertEqual(opf.get_identifiers(), {'isbn':'123456789',
+ 'dummy':'dummy'})
def testWriting(self):
for test in [('title', 'New & Title'), ('authors', ['One', 'Two']),
@@ -1461,5 +1485,5 @@ def test_user_metadata():
if __name__ == '__main__':
#test_user_metadata()
- #test_m2o()
+ test_m2o()
test()
diff --git a/src/calibre/library/cli.py b/src/calibre/library/cli.py
index e93be187f9..359f5876fd 100644
--- a/src/calibre/library/cli.py
+++ b/src/calibre/library/cli.py
@@ -20,7 +20,7 @@ from calibre.utils.date import isoformat
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
- 'formats', 'isbn', 'uuid', 'pubdate', 'cover'])
+ 'formats', 'isbn', 'uuid', 'pubdate', 'cover', 'last_modified'])
def send_message(msg=''):
prints('Notifying calibre of the change')
diff --git a/src/calibre/library/database2.py b/src/calibre/library/database2.py
index 1762fd16d2..8c509e7ceb 100644
--- a/src/calibre/library/database2.py
+++ b/src/calibre/library/database2.py
@@ -6,7 +6,8 @@ __docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
-import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, json
+import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
+ json, uuid
import threading, random
from itertools import repeat
from math import ceil
@@ -94,6 +95,31 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
return property(doc=doc, fget=fget, fset=fset)
+ @dynamic_property
+ def library_id(self):
+ doc = ('The UUID for this library. As long as the user only operates'
+ ' on libraries with calibre, it will be unique')
+
+ def fget(self):
+ if self._library_id_ is None:
+ ans = self.conn.get('SELECT uuid FROM library_id', all=False)
+ if ans is None:
+ ans = str(uuid.uuid4())
+ self.library_id = ans
+ else:
+ self._library_id_ = ans
+ return self._library_id_
+
+ def fset(self, val):
+ self._library_id_ = unicode(val)
+ self.conn.executescript('''
+ DELETE FROM library_id;
+ INSERT INTO library_id (uuid) VALUES ("%s");
+ '''%self._library_id_)
+ self.conn.commit()
+
+ return property(doc=doc, fget=fget, fset=fset)
+
def connect(self):
if 'win32' in sys.platform and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError('Path to library too long. Must be less than %d characters.'%(259-4*self.PATH_LIMIT-10))
@@ -120,6 +146,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False):
self.field_metadata = FieldMetadata()
+ self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
@@ -148,6 +175,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.is_case_sensitive = not iswindows and not isosx and \
not os.path.exists(self.dbpath.replace('metadata.db', 'MeTAdAtA.dB'))
SchemaUpgrade.__init__(self)
+ # Guarantee that the library_id is set
+ self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
@@ -293,14 +322,14 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
- 'isbn',
'path',
- 'lccn',
'pubdate',
- 'flags',
'uuid',
'has_cover',
- ('au_map', 'authors', 'author', 'aum_sortconcat(link.id, authors.name, authors.sort)')
+ ('au_map', 'authors', 'author',
+ 'aum_sortconcat(link.id, authors.name, authors.sort)'),
+ 'last_modified',
+ '(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
]
lines = []
for col in columns:
@@ -318,8 +347,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
- 'formats':13, 'isbn':14, 'path':15, 'lccn':16, 'pubdate':17,
- 'flags':18, 'uuid':19, 'cover':20, 'au_map':21}
+ 'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
+ 'au_map':18, 'last_modified':19, 'identifiers':20}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
@@ -391,11 +420,16 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.has_id = self.data.has_id
self.count = self.data.count
- for prop in ('author_sort', 'authors', 'comment', 'comments', 'isbn',
- 'publisher', 'rating', 'series', 'series_index', 'tags',
- 'title', 'timestamp', 'uuid', 'pubdate', 'ondevice'):
+ for prop in (
+ 'author_sort', 'authors', 'comment', 'comments',
+ 'publisher', 'rating', 'series', 'series_index', 'tags',
+ 'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
+ 'metadata_last_modified',
+ ):
+ fm = {'comment':'comments', 'metadata_last_modified':
+ 'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
- loc=self.FIELD_MAP['comments' if prop == 'comment' else prop]))
+ loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
@@ -681,8 +715,20 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if commit:
self.conn.commit()
+ def update_last_modified(self, book_ids, commit=False, now=None):
+ if now is None:
+ now = nowf()
+ if book_ids:
+ self.conn.executemany(
+ 'UPDATE books SET last_modified=? WHERE id=?',
+ [(now, book) for book in book_ids])
+ for book_id in book_ids:
+ self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
+ if commit:
+ self.conn.commit()
+
def dirtied(self, book_ids, commit=True):
- changed = False
+ self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
@@ -691,21 +737,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
- try:
- self.conn.execute(
- 'INSERT INTO metadata_dirtied (book) VALUES (?)',
- (book,))
- changed = True
- except IntegrityError:
- # Already in table
- pass
+
+ self.conn.execute(
+ 'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
+ (book,))
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
+
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
- if commit and changed:
+ if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
@@ -803,8 +846,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
- mi.isbn = row[fm['isbn']]
id = idx if index_is_id else self.id(idx)
+ mi.set_identifiers(self.get_identifiers(id, index_is_id=True))
mi.application_id = id
mi.id = id
for key, meta in self.field_metadata.custom_iteritems():
@@ -911,10 +954,14 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
- self.conn.execute('UPDATE books SET has_cover=1 WHERE id=?', (id,))
+ now = nowf()
+ self.conn.execute(
+ 'UPDATE books SET has_cover=1,last_modified=? WHERE id=?',
+ (now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
+ self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
@@ -923,8 +970,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def set_has_cover(self, id, val):
dval = 1 if val else 0
- self.conn.execute('UPDATE books SET has_cover=? WHERE id=?', (dval, id,))
+ now = nowf()
+ self.conn.execute(
+ 'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
+ (dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
+ self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
@@ -1222,7 +1273,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
for category in tb_cats.keys():
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
- or category in ['news', 'formats']:
+ or category in ['news', 'formats', 'identifiers']:
continue
# Get the ids for the item values
if not cat['is_custom']:
@@ -1652,8 +1703,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if mi.comments:
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
- if mi.isbn and mi.isbn.strip():
- doit(self.set_isbn, id, mi.isbn, notify=False, commit=False)
if mi.series_index:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
@@ -1663,6 +1712,15 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
+ mi_idents = mi.get_identifiers()
+ if mi_idents:
+ identifiers = self.get_identifiers(id, index_is_id=True)
+ for key, val in mi_idents.iteritems():
+ if val and val.strip(): # Don't delete an existing identifier
+ identifiers[icu_lower(key)] = val
+ self.set_identifiers(id, identifiers, notify=False, commit=False)
+
+
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi.iterkeys():
if key in self.field_metadata and \
@@ -2441,14 +2499,84 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if notify:
self.notify('metadata', [id])
- def set_isbn(self, id, isbn, notify=True, commit=True):
- self.conn.execute('UPDATE books SET isbn=? WHERE id=?', (isbn, id))
- self.dirtied([id], commit=False)
+ def isbn(self, idx, index_is_id=False):
+ row = self.data._data[idx] if index_is_id else self.data[idx]
+ if row is not None:
+ raw = row[self.FIELD_MAP['identifiers']]
+ if raw:
+ for x in raw.split(','):
+ if x.startswith('isbn:'):
+ return x[5:].strip()
+
+ def get_identifiers(self, idx, index_is_id=False):
+ ans = {}
+ row = self.data._data[idx] if index_is_id else self.data[idx]
+ if row is not None:
+ raw = row[self.FIELD_MAP['identifiers']]
+ if raw:
+ for x in raw.split(','):
+ key, _, val = x.partition(':')
+ key, val = key.strip(), val.strip()
+ if key and val:
+ ans[key] = val
+
+ return ans
+
+ def _clean_identifier(self, typ, val):
+ typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
+ val = val.strip().replace(',', '|').replace(':', '|')
+ return typ, val
+
+ def set_identifier(self, id_, typ, val, notify=True, commit=True):
+ 'If val is empty, deletes identifier of type typ'
+ typ, val = self._clean_identifier(typ, val)
+ identifiers = self.get_identifiers(id_, index_is_id=True)
+ if not typ:
+ return
+ changed = False
+ if not val and typ in identifiers:
+ identifiers.pop(typ)
+ changed = True
+ self.conn.execute(
+ 'DELETE from identifiers WHERE book=? AND type=?',
+ (id_, typ))
+ if val and identifiers.get(typ, None) != val:
+ changed = True
+ identifiers[typ] = val
+ self.conn.execute(
+ 'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
+ (id_, typ, val))
+ if changed:
+ raw = ','.join(['%s:%s'%(k, v) for k, v in
+ identifiers.iteritems()])
+ self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
+ row_is_id=True)
+ if commit:
+ self.conn.commit()
+ if notify:
+ self.notify('metadata', [id_])
+
+ def set_identifiers(self, id_, identifiers, notify=True, commit=True):
+ cleaned = {}
+ for typ, val in identifiers.iteritems():
+ typ, val = self._clean_identifier(typ, val)
+ if val:
+ cleaned[typ] = val
+ self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
+ self.conn.executemany(
+ 'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
+ [(id_, k, v) for k, v in cleaned.iteritems()])
+ raw = ','.join(['%s:%s'%(k, v) for k, v in
+ cleaned.iteritems()])
+ self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
+ row_is_id=True)
if commit:
self.conn.commit()
- self.data.set(id, self.FIELD_MAP['isbn'], isbn, row_is_id=True)
if notify:
- self.notify('metadata', [id])
+ self.notify('metadata', [id_])
+
+ def set_isbn(self, id_, isbn, notify=True, commit=True):
+ self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
format = os.path.splitext(path)[1][1:].lower()
@@ -2746,7 +2874,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
prefix = self.library_path
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
- 'isbn', 'uuid', 'pubdate'])
+ 'uuid', 'pubdate', 'last_modified'])
for x in self.custom_column_num_map:
FIELDS.add(x)
data = []
@@ -2761,6 +2889,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
data.append(x)
x['id'] = db_id
x['formats'] = []
+ isbn = self.isbn(db_id, index_is_id=True)
+ x['isbn'] = isbn if isbn else ''
if not x['authors']:
x['authors'] = _('Unknown')
x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]
diff --git a/src/calibre/library/field_metadata.py b/src/calibre/library/field_metadata.py
index aff2803452..b0d604dc57 100644
--- a/src/calibre/library/field_metadata.py
+++ b/src/calibre/library/field_metadata.py
@@ -119,6 +119,15 @@ class FieldMetadata(dict):
'search_terms':['formats', 'format'],
'is_custom':False,
'is_category':True}),
+ ('identifiers', {'table':None,
+ 'column':None,
+ 'datatype':'text',
+ 'is_multiple':',',
+ 'kind':'field',
+ 'name':_('Identifiers'),
+ 'search_terms':['identifiers', 'identifier'],
+ 'is_custom':False,
+ 'is_category':True}),
('publisher', {'table':'publishers',
'column':'name',
'link_column':'publisher',
@@ -296,6 +305,15 @@ class FieldMetadata(dict):
'search_terms':['date'],
'is_custom':False,
'is_category':False}),
+ ('last_modified', {'table':None,
+ 'column':None,
+ 'datatype':'datetime',
+ 'is_multiple':None,
+ 'kind':'field',
+ 'name':_('Date'),
+ 'search_terms':['last_modified'],
+ 'is_custom':False,
+ 'is_category':False}),
('title', {'table':None,
'column':None,
'datatype':'text',
@@ -335,7 +353,8 @@ class FieldMetadata(dict):
self._tb_cats[k]['display'] = {}
self._tb_cats[k]['is_editable'] = True
self._add_search_terms_to_map(k, v['search_terms'])
- self._tb_cats['timestamp']['display'] = {
+ for x in ('timestamp', 'last_modified'):
+ self._tb_cats[x]['display'] = {
'date_format': tweaks['gui_timestamp_display_format']}
self._tb_cats['pubdate']['display'] = {
'date_format': tweaks['gui_pubdate_display_format']}
diff --git a/src/calibre/library/restore.py b/src/calibre/library/restore.py
index 76f3c0333d..e03edd449a 100644
--- a/src/calibre/library/restore.py
+++ b/src/calibre/library/restore.py
@@ -13,6 +13,7 @@ from calibre.ptempfile import TemporaryDirectory
from calibre.ebooks.metadata.opf2 import OPF
from calibre.library.database2 import LibraryDatabase2
from calibre.constants import filesystem_encoding
+from calibre.utils.date import utcfromtimestamp
from calibre import isbytestring
NON_EBOOK_EXTENSIONS = frozenset([
@@ -211,8 +212,8 @@ class Restore(Thread):
force_id=book['id'])
if book['mi'].uuid:
db.set_uuid(book['id'], book['mi'].uuid, commit=False, notify=False)
- db.conn.execute('UPDATE books SET path=? WHERE id=?', (book['path'],
- book['id']))
+ db.conn.execute('UPDATE books SET path=?,last_modified=? WHERE id=?', (book['path'],
+ utcfromtimestamp(book['timestamp']), book['id']))
for fmt, size, name in book['formats']:
db.conn.execute('''
diff --git a/src/calibre/library/schema_upgrades.py b/src/calibre/library/schema_upgrades.py
index 0b7a3f5350..d1f22d379b 100644
--- a/src/calibre/library/schema_upgrades.py
+++ b/src/calibre/library/schema_upgrades.py
@@ -8,6 +8,8 @@ __docformat__ = 'restructuredtext en'
import os
+from calibre.utils.date import isoformat, DEFAULT_DATE
+
class SchemaUpgrade(object):
def __init__(self):
@@ -468,4 +470,116 @@ class SchemaUpgrade(object):
'''
self.conn.executescript(script)
+ def upgrade_version_18(self):
+ '''
+ Add a library UUID.
+ Add an identifiers table.
+ Add a languages table.
+ Add a last_modified column.
+ NOTE: You cannot downgrade after this update, if you do
+ any changes you make to book isbns will be lost.
+ '''
+ script = '''
+ DROP TABLE IF EXISTS library_id;
+ CREATE TABLE library_id ( id INTEGER PRIMARY KEY,
+ uuid TEXT NOT NULL,
+ UNIQUE(uuid)
+ );
+
+ DROP TABLE IF EXISTS identifiers;
+ CREATE TABLE identifiers ( id INTEGER PRIMARY KEY,
+ book INTEGER NON NULL,
+ type TEXT NON NULL DEFAULT "isbn" COLLATE NOCASE,
+ val TEXT NON NULL COLLATE NOCASE,
+ UNIQUE(book, type)
+ );
+
+ DROP TABLE IF EXISTS languages;
+ CREATE TABLE languages ( id INTEGER PRIMARY KEY,
+ lang_code TEXT NON NULL COLLATE NOCASE,
+ UNIQUE(lang_code)
+ );
+
+ DROP TABLE IF EXISTS books_languages_link;
+ CREATE TABLE books_languages_link ( id INTEGER PRIMARY KEY,
+ book INTEGER NOT NULL,
+ lang_code INTEGER NOT NULL,
+ item_order INTEGER NOT NULL DEFAULT 0,
+ UNIQUE(book, lang_code)
+ );
+
+ DROP TRIGGER IF EXISTS fkc_delete_on_languages;
+ CREATE TRIGGER fkc_delete_on_languages
+ BEFORE DELETE ON languages
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT COUNT(id) FROM books_languages_link WHERE lang_code=OLD.id) > 0
+ THEN RAISE(ABORT, 'Foreign key violation: language is still referenced')
+ END;
+ END;
+
+ DROP TRIGGER IF EXISTS fkc_delete_on_languages_link;
+ CREATE TRIGGER fkc_delete_on_languages_link
+ BEFORE INSERT ON books_languages_link
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
+ END;
+ END;
+
+ DROP TRIGGER IF EXISTS fkc_update_books_languages_link_a;
+ CREATE TRIGGER fkc_update_books_languages_link_a
+ BEFORE UPDATE OF book ON books_languages_link
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ END;
+ END;
+ DROP TRIGGER IF EXISTS fkc_update_books_languages_link_b;
+ CREATE TRIGGER fkc_update_books_languages_link_b
+ BEFORE UPDATE OF lang_code ON books_languages_link
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
+ END;
+ END;
+
+ DROP INDEX IF EXISTS books_languages_link_aidx;
+ CREATE INDEX books_languages_link_aidx ON books_languages_link (lang_code);
+ DROP INDEX IF EXISTS books_languages_link_bidx;
+ CREATE INDEX books_languages_link_bidx ON books_languages_link (book);
+ DROP INDEX IF EXISTS languages_idx;
+ CREATE INDEX languages_idx ON languages (lang_code COLLATE NOCASE);
+
+ DROP TRIGGER IF EXISTS books_delete_trg;
+ CREATE TRIGGER books_delete_trg
+ AFTER DELETE ON books
+ BEGIN
+ DELETE FROM books_authors_link WHERE book=OLD.id;
+ DELETE FROM books_publishers_link WHERE book=OLD.id;
+ DELETE FROM books_ratings_link WHERE book=OLD.id;
+ DELETE FROM books_series_link WHERE book=OLD.id;
+ DELETE FROM books_tags_link WHERE book=OLD.id;
+ DELETE FROM books_languages_link WHERE book=OLD.id;
+ DELETE FROM data WHERE book=OLD.id;
+ DELETE FROM comments WHERE book=OLD.id;
+ DELETE FROM conversion_options WHERE book=OLD.id;
+ DELETE FROM books_plugin_data WHERE book=OLD.id;
+ DELETE FROM identifiers WHERE book=OLD.id;
+ END;
+
+ INSERT INTO identifiers (book, val) SELECT id,isbn FROM books WHERE isbn;
+
+ ALTER TABLE books ADD COLUMN last_modified TIMESTAMP NOT NULL DEFAULT "%s";
+
+ '''%isoformat(DEFAULT_DATE, sep=' ')
+ # Sqlite does not support non constant default values in alter
+ # statements
+ self.conn.executescript(script)
+
diff --git a/src/calibre/library/sqlite.py b/src/calibre/library/sqlite.py
index 622d6b8459..a57eb6b1f9 100644
--- a/src/calibre/library/sqlite.py
+++ b/src/calibre/library/sqlite.py
@@ -87,6 +87,18 @@ class SortedConcatenate(object):
class SafeSortedConcatenate(SortedConcatenate):
sep = '|'
+class IdentifiersConcat(object):
+ '''String concatenation aggregator for the identifiers map'''
+ def __init__(self):
+ self.ans = []
+
+ def step(self, key, val):
+ self.ans.append(u'%s:%s'%(key, val))
+
+ def finalize(self):
+ return ','.join(self.ans)
+
+
class AumSortedConcatenate(object):
'''String concatenation aggregator for the author sort map'''
def __init__(self):
@@ -170,13 +182,13 @@ class DBThread(Thread):
detect_types=sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES)
self.conn.execute('pragma cache_size=5000')
encoding = self.conn.execute('pragma encoding').fetchone()[0]
- c_ext_loaded = load_c_extensions(self.conn)
+ self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
+ self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
+ self.conn.create_aggregate('identifiers_concat', 2, IdentifiersConcat)
+ load_c_extensions(self.conn)
self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row)
self.conn.create_aggregate('concat', 1, Concatenate)
self.conn.create_aggregate('aum_sortconcat', 3, AumSortedConcatenate)
- if not c_ext_loaded:
- self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
- self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
self.conn.create_collation('PYNOCASE', partial(pynocase,
encoding=encoding))
self.conn.create_function('title_sort', 1, title_sort)
diff --git a/src/calibre/library/sqlite_custom.c b/src/calibre/library/sqlite_custom.c
index 650c474c2c..dee17c79d4 100644
--- a/src/calibre/library/sqlite_custom.c
+++ b/src/calibre/library/sqlite_custom.c
@@ -77,6 +77,7 @@ static void sort_concat_free(SortConcatList *list) {
free(list->vals[i]->val);
free(list->vals[i]);
}
+ free(list->vals);
}
static int sort_concat_cmp(const void *a_, const void *b_) {
@@ -142,11 +143,102 @@ static void sort_concat_finalize2(sqlite3_context *context) {
// }}}
+// identifiers_concat {{{
+
+typedef struct {
+ char *val;
+ size_t length;
+} IdentifiersConcatItem;
+
+typedef struct {
+ IdentifiersConcatItem **vals;
+ size_t count;
+ size_t length;
+} IdentifiersConcatList;
+
+static void identifiers_concat_step(sqlite3_context *context, int argc, sqlite3_value **argv) {
+ const char *key, *val;
+ size_t len = 0;
+ IdentifiersConcatList *list;
+
+ assert(argc == 2);
+
+ list = (IdentifiersConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
+ if (list == NULL) return;
+
+ if (list->vals == NULL) {
+ list->vals = (IdentifiersConcatItem**)calloc(100, sizeof(IdentifiersConcatItem*));
+ if (list->vals == NULL) return;
+ list->length = 100;
+ list->count = 0;
+ }
+
+ if (list->count == list->length) {
+ list->vals = (IdentifiersConcatItem**)realloc(list->vals, list->length + 100);
+ if (list->vals == NULL) return;
+ list->length = list->length + 100;
+ }
+
+ list->vals[list->count] = (IdentifiersConcatItem*)calloc(1, sizeof(IdentifiersConcatItem));
+ if (list->vals[list->count] == NULL) return;
+
+ key = (char*) sqlite3_value_text(argv[0]);
+ val = (char*) sqlite3_value_text(argv[1]);
+ if (key == NULL || val == NULL) {return;}
+ len = strlen(key) + strlen(val) + 1;
+
+ list->vals[list->count]->val = (char*)calloc(len+1, sizeof(char));
+ if (list->vals[list->count]->val == NULL) return;
+ snprintf(list->vals[list->count]->val, len+1, "%s:%s", key, val);
+ list->vals[list->count]->length = len;
+
+ list->count = list->count + 1;
+
+}
+
+
+static void identifiers_concat_finalize(sqlite3_context *context) {
+ IdentifiersConcatList *list;
+ IdentifiersConcatItem *item;
+ char *ans, *pos;
+ size_t sz = 0, i;
+
+ list = (IdentifiersConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
+ if (list == NULL || list->vals == NULL || list->count < 1) return;
+
+ for (i = 0; i < list->count; i++) {
+ sz += list->vals[i]->length;
+ }
+ sz += list->count; // Space for commas
+ ans = (char*)calloc(sz+2, sizeof(char));
+ if (ans == NULL) return;
+
+ pos = ans;
+
+ for (i = 0; i < list->count; i++) {
+ item = list->vals[i];
+ if (item == NULL || item->val == NULL) continue;
+ memcpy(pos, item->val, item->length);
+ pos += item->length;
+ *pos = ',';
+ pos += 1;
+ free(item->val);
+ free(item);
+ }
+ *(pos-1) = 0; // Remove trailing comma
+ sqlite3_result_text(context, ans, -1, SQLITE_TRANSIENT);
+ free(ans);
+ free(list->vals);
+}
+
+// }}}
+
MYEXPORT int sqlite3_extension_init(
sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi){
SQLITE_EXTENSION_INIT2(pApi);
sqlite3_create_function(db, "sortconcat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize);
sqlite3_create_function(db, "sort_concat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize2);
+ sqlite3_create_function(db, "identifiers_concat", 2, SQLITE_UTF8, NULL, NULL, identifiers_concat_step, identifiers_concat_finalize);
return 0;
}
diff --git a/src/calibre/utils/date.py b/src/calibre/utils/date.py
index 31c770bea5..eaf68df904 100644
--- a/src/calibre/utils/date.py
+++ b/src/calibre/utils/date.py
@@ -45,6 +45,7 @@ utc_tz = _utc_tz = tzutc()
local_tz = _local_tz = SafeLocalTimeZone()
UNDEFINED_DATE = datetime(101,1,1, tzinfo=utc_tz)
+DEFAULT_DATE = datetime(2000,1,1, tzinfo=utc_tz)
def is_date_undefined(qt_or_dt):
d = qt_or_dt