Merge from trunk

This commit is contained in:
Charles Haley 2011-03-01 08:03:10 +00:00
commit 4715f600a3
21 changed files with 520 additions and 107 deletions

View File

@ -5,8 +5,9 @@
"strcat": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n res = ''\n for i in range(0, len(args)):\n res += args[i]\n return res\n",
"substr": "def evaluate(self, formatter, kwargs, mi, locals, str_, start_, end_):\n return str_[int(start_): len(str_) if int(end_) == 0 else int(end_)]\n",
"ifempty": "def evaluate(self, formatter, kwargs, mi, locals, val, value_if_empty):\n if val:\n return val\n else:\n return value_if_empty\n",
"select": "def evaluate(self, formatter, kwargs, mi, locals, val, key):\n if not val:\n return ''\n vals = [v.strip() for v in val.split(',')]\n for v in vals:\n if v.startswith(key+':'):\n return v[len(key)+1:]\n return ''\n",
"field": "def evaluate(self, formatter, kwargs, mi, locals, name):\n return formatter.get_value(name, [], kwargs)\n",
"capitalize": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return capitalize(val)\n",
"subtract": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x - y)\n",
"list_item": "def evaluate(self, formatter, kwargs, mi, locals, val, index, sep):\n if not val:\n return ''\n index = int(index)\n val = val.split(sep)\n try:\n return val[index]\n except:\n return ''\n",
"shorten": "def evaluate(self, formatter, kwargs, mi, locals,\n val, leading, center_string, trailing):\n l = max(0, int(leading))\n t = max(0, int(trailing))\n if len(val) > l + len(center_string) + t:\n return val[0:l] + center_string + ('' if t == 0 else val[-t:])\n else:\n return val\n",
"re": "def evaluate(self, formatter, kwargs, mi, locals, val, pattern, replacement):\n return re.sub(pattern, replacement, val)\n",
@ -19,11 +20,13 @@
"test": "def evaluate(self, formatter, kwargs, mi, locals, val, value_if_set, value_not_set):\n if val:\n return value_if_set\n else:\n return value_not_set\n",
"eval": "def evaluate(self, formatter, kwargs, mi, locals, template):\n from formatter import eval_formatter\n template = template.replace('[[', '{').replace(']]', '}')\n return eval_formatter.safe_format(template, locals, 'EVAL', None)\n",
"multiply": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x * y)\n",
"subtract": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x - y)\n",
"format_date": "def evaluate(self, formatter, kwargs, mi, locals, val, format_string):\n print val\n if not val:\n return ''\n try:\n dt = parse_date(val)\n s = format_date(dt, format_string)\n except:\n s = 'BAD DATE'\n return s\n",
"capitalize": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return capitalize(val)\n",
"count": "def evaluate(self, formatter, kwargs, mi, locals, val, sep):\n return unicode(len(val.split(sep)))\n",
"lowercase": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return val.lower()\n",
"assign": "def evaluate(self, formatter, kwargs, mi, locals, target, value):\n locals[target] = value\n return value\n",
"switch": "def evaluate(self, formatter, kwargs, mi, locals, val, *args):\n if (len(args) % 2) != 1:\n raise ValueError(_('switch requires an odd number of arguments'))\n i = 0\n while i < len(args):\n if i + 1 >= len(args):\n return args[i]\n if re.search(args[i], val):\n return args[i+1]\n i += 2\n",
"strcmp": "def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):\n v = strcmp(x, y)\n if v < 0:\n return lt\n if v == 0:\n return eq\n return gt\n",
"raw_field": "def evaluate(self, formatter, kwargs, mi, locals, name):\n return unicode(getattr(mi, name, None))\n",
"cmp": "def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):\n x = float(x if x else 0)\n y = float(y if y else 0)\n if x < y:\n return lt\n if x == y:\n return eq\n return gt\n"
}

View File

@ -260,6 +260,8 @@ class DevicePlugin(Plugin):
Ask device for device information. See L{DeviceInfoQuery}.
:return: (device name, device version, software version on device, mime type)
The tuple can optionally have a fifth element, which is a
drive information diction. See usbms.driver for an example.
"""
raise NotImplementedError()

View File

@ -21,7 +21,7 @@ from calibre.devices.usbms.device import Device
from calibre.devices.usbms.books import BookList, Book
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.utils.config import from_json, to_json
from calibre.utils.date import now
from calibre.utils.date import now, isoformat
BASE_TIME = None
def debug_print(*args):
@ -61,12 +61,13 @@ class USBMS(CLI, Device):
if dinfo.get('device_store_uuid', None) is None:
dinfo['device_store_uuid'] = unicode(uuid.uuid4())
if dinfo.get('device_name') is None:
dinfo['device_name'] = self.get_gui_name() + '_' + location_code
dinfo['device_name'] = self.get_gui_name()
if name is not None:
dinfo['device_name'] = name
dinfo['location_code'] = location_code
dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
dinfo['calibre_version'] = '.'.join([unicode(i) for i in numeric_version])
dinfo['date_last_connected'] = unicode(now())
dinfo['date_last_connected'] = isoformat(now())
dinfo['prefix'] = prefix.replace('\\', '/')
return dinfo

View File

@ -28,7 +28,7 @@ class ParserError(ValueError):
BOOK_EXTENSIONS = ['lrf', 'rar', 'zip', 'rtf', 'lit', 'txt', 'txtz', 'htm', 'xhtm',
'html', 'xhtml', 'pdf', 'pdb', 'pdr', 'prc', 'mobi', 'azw', 'doc',
'epub', 'fb2', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip',
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'mbp', 'tan', 'snb']
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'pmlz', 'mbp', 'tan', 'snb']
class HTMLRenderer(object):

View File

@ -18,14 +18,14 @@ SOCIAL_METADATA_FIELDS = frozenset([
'series_index', # A floating point number
# Of the form { scheme1:value1, scheme2:value2}
# For example: {'isbn':'123456789', 'doi':'xxxx', ... }
'classifiers',
'identifiers',
])
'''
The list of names that convert to classifiers when in get and set.
The list of names that convert to identifiers when in get and set.
'''
TOP_LEVEL_CLASSIFIERS = frozenset([
TOP_LEVEL_IDENTIFIERS = frozenset([
'isbn',
])
@ -108,7 +108,7 @@ STANDARD_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
SC_FIELDS_NOT_COPIED = frozenset(['title', 'title_sort', 'authors',
'author_sort', 'author_sort_map',
'cover_data', 'tags', 'language',
'classifiers'])
'identifiers'])
# Metadata fields that smart update should copy only if the source is not None
SC_FIELDS_COPY_NOT_NULL = frozenset(['lpath', 'size', 'comments', 'thumbnail'])

View File

@ -12,7 +12,7 @@ from calibre.constants import DEBUG
from calibre.ebooks.metadata.book import SC_COPYABLE_FIELDS
from calibre.ebooks.metadata.book import SC_FIELDS_COPY_NOT_NULL
from calibre.ebooks.metadata.book import STANDARD_METADATA_FIELDS
from calibre.ebooks.metadata.book import TOP_LEVEL_CLASSIFIERS
from calibre.ebooks.metadata.book import TOP_LEVEL_IDENTIFIERS
from calibre.ebooks.metadata.book import ALL_METADATA_FIELDS
from calibre.library.field_metadata import FieldMetadata
from calibre.utils.date import isoformat, format_date
@ -24,7 +24,7 @@ NULL_VALUES = {
'user_metadata': {},
'cover_data' : (None, None),
'tags' : [],
'classifiers' : {},
'identifiers' : {},
'languages' : [],
'device_collections': [],
'author_sort_map': {},
@ -96,8 +96,8 @@ class Metadata(object):
def __getattribute__(self, field):
_data = object.__getattribute__(self, '_data')
if field in TOP_LEVEL_CLASSIFIERS:
return _data.get('classifiers').get(field, None)
if field in TOP_LEVEL_IDENTIFIERS:
return _data.get('identifiers').get(field, None)
if field in STANDARD_METADATA_FIELDS:
return _data.get(field, None)
try:
@ -123,8 +123,8 @@ class Metadata(object):
def __setattr__(self, field, val, extra=None):
_data = object.__getattribute__(self, '_data')
if field in TOP_LEVEL_CLASSIFIERS:
_data['classifiers'].update({field: val})
if field in TOP_LEVEL_IDENTIFIERS:
_data['identifiers'].update({field: val})
elif field in STANDARD_METADATA_FIELDS:
if val is None:
val = NULL_VALUES.get(field, None)
@ -176,17 +176,21 @@ class Metadata(object):
def set(self, field, val, extra=None):
self.__setattr__(field, val, extra)
def get_classifiers(self):
def get_identifiers(self):
'''
Return a copy of the classifiers dictionary.
Return a copy of the identifiers dictionary.
The dict is small, and the penalty for using a reference where a copy is
needed is large. Also, we don't want any manipulations of the returned
dict to show up in the book.
'''
return copy.deepcopy(object.__getattribute__(self, '_data')['classifiers'])
ans = object.__getattribute__(self,
'_data')['identifiers']
if not ans:
ans = {}
return copy.deepcopy(ans)
def set_classifiers(self, classifiers):
object.__getattribute__(self, '_data')['classifiers'] = classifiers
def set_identifiers(self, identifiers):
object.__getattribute__(self, '_data')['identifiers'] = identifiers
# field-oriented interface. Intended to be the same as in LibraryDatabase
@ -229,7 +233,7 @@ class Metadata(object):
if v is not None:
result[attr] = v
# separate these because it uses the self.get(), not _data.get()
for attr in TOP_LEVEL_CLASSIFIERS:
for attr in TOP_LEVEL_IDENTIFIERS:
v = self.get(attr, None)
if v is not None:
result[attr] = v
@ -400,8 +404,8 @@ class Metadata(object):
self.set_all_user_metadata(other.get_all_user_metadata(make_copy=True))
for x in SC_FIELDS_COPY_NOT_NULL:
copy_not_none(self, other, x)
if callable(getattr(other, 'get_classifiers', None)):
self.set_classifiers(other.get_classifiers())
if callable(getattr(other, 'get_identifiers', None)):
self.set_identifiers(other.get_identifiers())
# language is handled below
else:
for attr in SC_COPYABLE_FIELDS:
@ -456,15 +460,15 @@ class Metadata(object):
if len(other_comments.strip()) > len(my_comments.strip()):
self.comments = other_comments
# Copy all the non-none classifiers
if callable(getattr(other, 'get_classifiers', None)):
d = self.get_classifiers()
s = other.get_classifiers()
# Copy all the non-none identifiers
if callable(getattr(other, 'get_identifiers', None)):
d = self.get_identifiers()
s = other.get_identifiers()
d.update([v for v in s.iteritems() if v[1] is not None])
self.set_classifiers(d)
self.set_identifiers(d)
else:
# other structure not Metadata. Copy the top-level classifiers
for attr in TOP_LEVEL_CLASSIFIERS:
# other structure not Metadata. Copy the top-level identifiers
for attr in TOP_LEVEL_IDENTIFIERS:
copy_not_none(self, other, attr)
other_lang = getattr(other, 'language', None)

View File

@ -119,6 +119,8 @@ class JsonCodec(object):
for item in js:
book = book_class(prefix, item.get('lpath', None))
for key in item.keys():
if key == 'classifiers':
key = 'identifiers'
meta = self.decode_metadata(key, item[key])
if key == 'user_metadata':
book.set_all_user_metadata(meta)

View File

@ -596,6 +596,9 @@ class OPF(object): # {{{
ans = MetaInformation(self)
for n, v in self._user_metadata_.items():
ans.set_user_metadata(n, v)
ans.set_identifiers(self.get_identifiers())
return ans
def write_user_metadata(self):
@ -855,6 +858,21 @@ class OPF(object): # {{{
return property(fget=fget, fset=fset)
def get_identifiers(self):
identifiers = {}
for x in self.XPath(
'descendant::*[local-name() = "identifier" and text()]')(
self.metadata):
for attr, val in x.attrib.iteritems():
if attr.endswith('scheme'):
typ = icu_lower(val)
val = etree.tostring(x, with_tail=False, encoding=unicode,
method='text').strip()
if val and typ not in ('calibre', 'uuid'):
identifiers[typ] = val
break
return identifiers
@dynamic_property
def application_id(self):
@ -1166,8 +1184,8 @@ class OPFCreator(Metadata):
a(DC_ELEM('description', self.comments))
if self.publisher:
a(DC_ELEM('publisher', self.publisher))
if self.isbn:
a(DC_ELEM('identifier', self.isbn, opf_attrs={'scheme':'ISBN'}))
for key, val in self.get_identifiers().iteritems():
a(DC_ELEM('identifier', val, opf_attrs={'scheme':icu_upper(key)}))
if self.rights:
a(DC_ELEM('rights', self.rights))
if self.tags:
@ -1291,8 +1309,8 @@ def metadata_to_opf(mi, as_string=True):
factory(DC('description'), mi.comments)
if mi.publisher:
factory(DC('publisher'), mi.publisher)
if mi.isbn:
factory(DC('identifier'), mi.isbn, scheme='ISBN')
for key, val in mi.get_identifiers().iteritems():
factory(DC('identifier'), val, scheme=icu_upper(key))
if mi.rights:
factory(DC('rights'), mi.rights)
factory(DC('language'), mi.language if mi.language and mi.language.lower()
@ -1342,7 +1360,7 @@ def test_m2o():
mi.language = 'en'
mi.comments = 'what a fun book\n\n'
mi.publisher = 'publisher'
mi.isbn = 'boooo'
mi.set_identifiers({'isbn':'booo', 'dummy':'dummy'})
mi.tags = ['a', 'b']
mi.series = 's"c\'l&<>'
mi.series_index = 3.34
@ -1350,7 +1368,7 @@ def test_m2o():
mi.timestamp = nowf()
mi.publication_type = 'ooooo'
mi.rights = 'yes'
mi.cover = 'asd.jpg'
mi.cover = os.path.abspath('asd.jpg')
opf = metadata_to_opf(mi)
print opf
newmi = MetaInformation(OPF(StringIO(opf)))
@ -1363,6 +1381,9 @@ def test_m2o():
o, n = getattr(mi, attr), getattr(newmi, attr)
if o != n and o.strip() != n.strip():
print 'FAILED:', attr, getattr(mi, attr), '!=', getattr(newmi, attr)
if mi.get_identifiers() != newmi.get_identifiers():
print 'FAILED:', 'identifiers', mi.get_identifiers(),
print '!=', newmi.get_identifiers()
class OPFTest(unittest.TestCase):
@ -1378,6 +1399,7 @@ class OPFTest(unittest.TestCase):
<creator opf:role="aut">Next</creator>
<dc:subject>One</dc:subject><dc:subject>Two</dc:subject>
<dc:identifier scheme="ISBN">123456789</dc:identifier>
<dc:identifier scheme="dummy">dummy</dc:identifier>
<meta name="calibre:series" content="A one book series" />
<meta name="calibre:rating" content="4"/>
<meta name="calibre:publication_type" content="test"/>
@ -1405,6 +1427,8 @@ class OPFTest(unittest.TestCase):
self.assertEqual(opf.rating, 4)
self.assertEqual(opf.publication_type, 'test')
self.assertEqual(list(opf.itermanifest())[0].get('href'), 'a ~ b')
self.assertEqual(opf.get_identifiers(), {'isbn':'123456789',
'dummy':'dummy'})
def testWriting(self):
for test in [('title', 'New & Title'), ('authors', ['One', 'Two']),
@ -1461,5 +1485,5 @@ def test_user_metadata():
if __name__ == '__main__':
#test_user_metadata()
#test_m2o()
test_m2o()
test()

View File

@ -296,7 +296,7 @@ class DeviceManager(Thread): # {{{
def _get_device_information(self):
info = self.device.get_device_information(end_session=False)
if len(info) < 5:
list(info).append({})
info = tuple(list(info) + [{}])
info = [i.replace('\x00', '').replace('\x01', '') if isinstance(i, basestring) else i
for i in info]
cp = self.device.card_prefix(end_session=False)

View File

@ -296,7 +296,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
traceback.print_exc()
if ac.plugin_path is None:
raise
self.device_manager.set_current_library_uuid('THIS IS A UUID')
self.device_manager.set_current_library_uuid(db.library_id)
if show_gui and self.gui_debug is not None:
info_dialog(self, _('Debug mode'), '<p>' +
@ -462,7 +462,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
self.memory_view.reset()
self.card_a_view.reset()
self.card_b_view.reset()
self.device_manager.set_current_library_uuid('THIS IS A UUID')
self.device_manager.set_current_library_uuid(db.library_id)
def set_window_title(self):

View File

@ -13,7 +13,7 @@ from PyQt4.Qt import QIcon, QFont, QLabel, QListWidget, QAction, \
QRegExp, QSettings, QSize, QSplitter, \
QPainter, QLineEdit, QComboBox, QPen, \
QMenu, QStringListModel, QCompleter, QStringList, \
QTimer, QRect
QTimer, QRect, QFontDatabase
from calibre.gui2 import NONE, error_dialog, pixmap_to_data, gprefs
from calibre.gui2.filename_pattern_ui import Ui_Form
@ -299,8 +299,6 @@ class ImageView(QWidget):
# }}}
class FontFamilyModel(QAbstractListModel):
def __init__(self, *args):
@ -312,6 +310,9 @@ class FontFamilyModel(QAbstractListModel):
self.families = []
print 'WARNING: Could not load fonts'
traceback.print_exc()
# Restrict to Qt families as Qt tends to crash
qt_families = set([unicode(x) for x in QFontDatabase().families()])
self.families = list(qt_families.intersection(set(self.families)))
self.families.sort()
self.families[:0] = [_('None')]

View File

@ -20,7 +20,8 @@ from calibre.utils.date import isoformat
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
'formats', 'isbn', 'uuid', 'pubdate', 'cover'])
'formats', 'isbn', 'uuid', 'pubdate', 'cover', 'last_modified',
'identifiers'])
def send_message(msg=''):
prints('Notifying calibre of the change')

View File

@ -6,7 +6,8 @@ __docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, json
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
json, uuid
import threading, random
from itertools import repeat
from math import ceil
@ -94,6 +95,31 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
return property(doc=doc, fget=fget, fset=fset)
@dynamic_property
def library_id(self):
doc = ('The UUID for this library. As long as the user only operates'
' on libraries with calibre, it will be unique')
def fget(self):
if self._library_id_ is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
def fset(self, val):
self._library_id_ = unicode(val)
self.conn.executescript('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES ("%s");
'''%self._library_id_)
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
def connect(self):
if 'win32' in sys.platform and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError('Path to library too long. Must be less than %d characters.'%(259-4*self.PATH_LIMIT-10))
@ -120,6 +146,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False):
self.field_metadata = FieldMetadata()
self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
@ -148,6 +175,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.is_case_sensitive = not iswindows and not isosx and \
not os.path.exists(self.dbpath.replace('metadata.db', 'MeTAdAtA.dB'))
SchemaUpgrade.__init__(self)
# Guarantee that the library_id is set
self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
@ -293,14 +322,14 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
'isbn',
'path',
'lccn',
'pubdate',
'flags',
'uuid',
'has_cover',
('au_map', 'authors', 'author', 'aum_sortconcat(link.id, authors.name, authors.sort)')
('au_map', 'authors', 'author',
'aum_sortconcat(link.id, authors.name, authors.sort)'),
'last_modified',
'(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
]
lines = []
for col in columns:
@ -318,8 +347,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
'formats':13, 'isbn':14, 'path':15, 'lccn':16, 'pubdate':17,
'flags':18, 'uuid':19, 'cover':20, 'au_map':21}
'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
'au_map':18, 'last_modified':19, 'identifiers':20}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
@ -391,11 +420,16 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.has_id = self.data.has_id
self.count = self.data.count
for prop in ('author_sort', 'authors', 'comment', 'comments', 'isbn',
for prop in (
'author_sort', 'authors', 'comment', 'comments',
'publisher', 'rating', 'series', 'series_index', 'tags',
'title', 'timestamp', 'uuid', 'pubdate', 'ondevice'):
'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified',
):
fm = {'comment':'comments', 'metadata_last_modified':
'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
loc=self.FIELD_MAP['comments' if prop == 'comment' else prop]))
loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
@ -681,8 +715,20 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if commit:
self.conn.commit()
def update_last_modified(self, book_ids, commit=False, now=None):
if now is None:
now = nowf()
if book_ids:
self.conn.executemany(
'UPDATE books SET last_modified=? WHERE id=?',
[(now, book) for book in book_ids])
for book_id in book_ids:
self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if commit:
self.conn.commit()
def dirtied(self, book_ids, commit=True):
changed = False
self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
@ -691,21 +737,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
try:
self.conn.execute(
'INSERT INTO metadata_dirtied (book) VALUES (?)',
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
(book,))
changed = True
except IntegrityError:
# Already in table
pass
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
if commit and changed:
if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
@ -803,8 +846,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
mi.isbn = row[fm['isbn']]
id = idx if index_is_id else self.id(idx)
mi.set_identifiers(self.get_identifiers(id, index_is_id=True))
mi.application_id = id
mi.id = id
for key, meta in self.field_metadata.custom_iteritems():
@ -911,10 +954,14 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
self.conn.execute('UPDATE books SET has_cover=1 WHERE id=?', (id,))
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=1,last_modified=? WHERE id=?',
(now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
@ -923,8 +970,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def set_has_cover(self, id, val):
dval = 1 if val else 0
self.conn.execute('UPDATE books SET has_cover=? WHERE id=?', (dval, id,))
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
(dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
@ -1222,7 +1273,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
for category in tb_cats.keys():
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
or category in ['news', 'formats']:
or category in ['news', 'formats', 'identifiers']:
continue
# Get the ids for the item values
if not cat['is_custom']:
@ -1652,8 +1703,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if mi.comments:
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
if mi.isbn and mi.isbn.strip():
doit(self.set_isbn, id, mi.isbn, notify=False, commit=False)
if mi.series_index:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
@ -1663,6 +1712,15 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
mi_idents = mi.get_identifiers()
if mi_idents:
identifiers = self.get_identifiers(id, index_is_id=True)
for key, val in mi_idents.iteritems():
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
self.set_identifiers(id, identifiers, notify=False, commit=False)
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi.iterkeys():
if key in self.field_metadata and \
@ -2441,14 +2499,84 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if notify:
self.notify('metadata', [id])
def set_isbn(self, id, isbn, notify=True, commit=True):
self.conn.execute('UPDATE books SET isbn=? WHERE id=?', (isbn, id))
self.dirtied([id], commit=False)
def isbn(self, idx, index_is_id=False):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
if x.startswith('isbn:'):
return x[5:].strip()
def get_identifiers(self, idx, index_is_id=False):
ans = {}
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
key, _, val = x.partition(':')
key, val = key.strip(), val.strip()
if key and val:
ans[key] = val
return ans
def _clean_identifier(self, typ, val):
typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
val = val.strip().replace(',', '|').replace(':', '|')
return typ, val
def set_identifier(self, id_, typ, val, notify=True, commit=True):
'If val is empty, deletes identifier of type typ'
typ, val = self._clean_identifier(typ, val)
identifiers = self.get_identifiers(id_, index_is_id=True)
if not typ:
return
changed = False
if not val and typ in identifiers:
identifiers.pop(typ)
changed = True
self.conn.execute(
'DELETE from identifiers WHERE book=? AND type=?',
(id_, typ))
if val and identifiers.get(typ, None) != val:
changed = True
identifiers[typ] = val
self.conn.execute(
'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
(id_, typ, val))
if changed:
raw = ','.join(['%s:%s'%(k, v) for k, v in
identifiers.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['isbn'], isbn, row_is_id=True)
if notify:
self.notify('metadata', [id])
self.notify('metadata', [id_])
def set_identifiers(self, id_, identifiers, notify=True, commit=True):
cleaned = {}
for typ, val in identifiers.iteritems():
typ, val = self._clean_identifier(typ, val)
if val:
cleaned[typ] = val
self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
self.conn.executemany(
'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
[(id_, k, v) for k, v in cleaned.iteritems()])
raw = ','.join(['%s:%s'%(k, v) for k, v in
cleaned.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_isbn(self, id_, isbn, notify=True, commit=True):
self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
format = os.path.splitext(path)[1][1:].lower()
@ -2746,7 +2874,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
prefix = self.library_path
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
'isbn', 'uuid', 'pubdate'])
'uuid', 'pubdate', 'last_modified', 'identifiers'])
for x in self.custom_column_num_map:
FIELDS.add(x)
data = []
@ -2761,6 +2889,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
data.append(x)
x['id'] = db_id
x['formats'] = []
isbn = self.isbn(db_id, index_is_id=True)
x['isbn'] = isbn if isbn else ''
if not x['authors']:
x['authors'] = _('Unknown')
x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]

View File

@ -119,6 +119,15 @@ class FieldMetadata(dict):
'search_terms':['formats', 'format'],
'is_custom':False,
'is_category':True}),
('identifiers', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':',',
'kind':'field',
'name':_('Identifiers'),
'search_terms':['identifiers', 'identifier'],
'is_custom':False,
'is_category':True}),
('publisher', {'table':'publishers',
'column':'name',
'link_column':'publisher',
@ -296,6 +305,15 @@ class FieldMetadata(dict):
'search_terms':['date'],
'is_custom':False,
'is_category':False}),
('last_modified', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':None,
'kind':'field',
'name':_('Date'),
'search_terms':['last_modified'],
'is_custom':False,
'is_category':False}),
('title', {'table':None,
'column':None,
'datatype':'text',
@ -335,7 +353,8 @@ class FieldMetadata(dict):
self._tb_cats[k]['display'] = {}
self._tb_cats[k]['is_editable'] = True
self._add_search_terms_to_map(k, v['search_terms'])
self._tb_cats['timestamp']['display'] = {
for x in ('timestamp', 'last_modified'):
self._tb_cats[x]['display'] = {
'date_format': tweaks['gui_timestamp_display_format']}
self._tb_cats['pubdate']['display'] = {
'date_format': tweaks['gui_pubdate_display_format']}

View File

@ -13,6 +13,7 @@ from calibre.ptempfile import TemporaryDirectory
from calibre.ebooks.metadata.opf2 import OPF
from calibre.library.database2 import LibraryDatabase2
from calibre.constants import filesystem_encoding
from calibre.utils.date import utcfromtimestamp
from calibre import isbytestring
NON_EBOOK_EXTENSIONS = frozenset([
@ -211,8 +212,8 @@ class Restore(Thread):
force_id=book['id'])
if book['mi'].uuid:
db.set_uuid(book['id'], book['mi'].uuid, commit=False, notify=False)
db.conn.execute('UPDATE books SET path=? WHERE id=?', (book['path'],
book['id']))
db.conn.execute('UPDATE books SET path=?,last_modified=? WHERE id=?', (book['path'],
utcfromtimestamp(book['timestamp']), book['id']))
for fmt, size, name in book['formats']:
db.conn.execute('''

View File

@ -8,6 +8,8 @@ __docformat__ = 'restructuredtext en'
import os
from calibre.utils.date import isoformat, DEFAULT_DATE
class SchemaUpgrade(object):
def __init__(self):
@ -468,4 +470,116 @@ class SchemaUpgrade(object):
'''
self.conn.executescript(script)
def upgrade_version_18(self):
'''
Add a library UUID.
Add an identifiers table.
Add a languages table.
Add a last_modified column.
NOTE: You cannot downgrade after this update, if you do
any changes you make to book isbns will be lost.
'''
script = '''
DROP TABLE IF EXISTS library_id;
CREATE TABLE library_id ( id INTEGER PRIMARY KEY,
uuid TEXT NOT NULL,
UNIQUE(uuid)
);
DROP TABLE IF EXISTS identifiers;
CREATE TABLE identifiers ( id INTEGER PRIMARY KEY,
book INTEGER NON NULL,
type TEXT NON NULL DEFAULT "isbn" COLLATE NOCASE,
val TEXT NON NULL COLLATE NOCASE,
UNIQUE(book, type)
);
DROP TABLE IF EXISTS languages;
CREATE TABLE languages ( id INTEGER PRIMARY KEY,
lang_code TEXT NON NULL COLLATE NOCASE,
UNIQUE(lang_code)
);
DROP TABLE IF EXISTS books_languages_link;
CREATE TABLE books_languages_link ( id INTEGER PRIMARY KEY,
book INTEGER NOT NULL,
lang_code INTEGER NOT NULL,
item_order INTEGER NOT NULL DEFAULT 0,
UNIQUE(book, lang_code)
);
DROP TRIGGER IF EXISTS fkc_delete_on_languages;
CREATE TRIGGER fkc_delete_on_languages
BEFORE DELETE ON languages
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_languages_link WHERE lang_code=OLD.id) > 0
THEN RAISE(ABORT, 'Foreign key violation: language is still referenced')
END;
END;
DROP TRIGGER IF EXISTS fkc_delete_on_languages_link;
CREATE TRIGGER fkc_delete_on_languages_link
BEFORE INSERT ON books_languages_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
END;
END;
DROP TRIGGER IF EXISTS fkc_update_books_languages_link_a;
CREATE TRIGGER fkc_update_books_languages_link_a
BEFORE UPDATE OF book ON books_languages_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
DROP TRIGGER IF EXISTS fkc_update_books_languages_link_b;
CREATE TRIGGER fkc_update_books_languages_link_b
BEFORE UPDATE OF lang_code ON books_languages_link
BEGIN
SELECT CASE
WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
END;
END;
DROP INDEX IF EXISTS books_languages_link_aidx;
CREATE INDEX books_languages_link_aidx ON books_languages_link (lang_code);
DROP INDEX IF EXISTS books_languages_link_bidx;
CREATE INDEX books_languages_link_bidx ON books_languages_link (book);
DROP INDEX IF EXISTS languages_idx;
CREATE INDEX languages_idx ON languages (lang_code COLLATE NOCASE);
DROP TRIGGER IF EXISTS books_delete_trg;
CREATE TRIGGER books_delete_trg
AFTER DELETE ON books
BEGIN
DELETE FROM books_authors_link WHERE book=OLD.id;
DELETE FROM books_publishers_link WHERE book=OLD.id;
DELETE FROM books_ratings_link WHERE book=OLD.id;
DELETE FROM books_series_link WHERE book=OLD.id;
DELETE FROM books_tags_link WHERE book=OLD.id;
DELETE FROM books_languages_link WHERE book=OLD.id;
DELETE FROM data WHERE book=OLD.id;
DELETE FROM comments WHERE book=OLD.id;
DELETE FROM conversion_options WHERE book=OLD.id;
DELETE FROM books_plugin_data WHERE book=OLD.id;
DELETE FROM identifiers WHERE book=OLD.id;
END;
INSERT INTO identifiers (book, val) SELECT id,isbn FROM books WHERE isbn;
ALTER TABLE books ADD COLUMN last_modified TIMESTAMP NOT NULL DEFAULT "%s";
'''%isoformat(DEFAULT_DATE, sep=' ')
# Sqlite does not support non constant default values in alter
# statements
self.conn.executescript(script)

View File

@ -90,7 +90,7 @@ class XMLServer(object):
kwargs[x] = serialize(record[FM[x]])
for x in ('formats', 'series', 'tags', 'publisher',
'comments'):
'comments', 'identifiers'):
y = record[FM[x]]
if x == 'tags':
y = format_tag_string(y, ',', ignore_max=True)

View File

@ -8,6 +8,7 @@ Wrapper for multi-threaded access to a single sqlite database connection. Serial
all calls.
'''
import sqlite3 as sqlite, traceback, time, uuid, sys, os
import repr as reprlib
from sqlite3 import IntegrityError, OperationalError
from threading import Thread
from Queue import Queue
@ -20,6 +21,7 @@ from calibre.utils.date import parse_date, isoformat
from calibre import isbytestring, force_unicode
from calibre.constants import iswindows, DEBUG
from calibre.utils.icu import strcmp
from calibre import prints
global_lock = RLock()
@ -87,6 +89,18 @@ class SortedConcatenate(object):
class SafeSortedConcatenate(SortedConcatenate):
sep = '|'
class IdentifiersConcat(object):
'''String concatenation aggregator for the identifiers map'''
def __init__(self):
self.ans = []
def step(self, key, val):
self.ans.append(u'%s:%s'%(key, val))
def finalize(self):
return ','.join(self.ans)
class AumSortedConcatenate(object):
'''String concatenation aggregator for the author sort map'''
def __init__(self):
@ -170,13 +184,13 @@ class DBThread(Thread):
detect_types=sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES)
self.conn.execute('pragma cache_size=5000')
encoding = self.conn.execute('pragma encoding').fetchone()[0]
c_ext_loaded = load_c_extensions(self.conn)
self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
self.conn.create_aggregate('identifiers_concat', 2, IdentifiersConcat)
load_c_extensions(self.conn)
self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row)
self.conn.create_aggregate('concat', 1, Concatenate)
self.conn.create_aggregate('aum_sortconcat', 3, AumSortedConcatenate)
if not c_ext_loaded:
self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
self.conn.create_collation('PYNOCASE', partial(pynocase,
encoding=encoding))
self.conn.create_function('title_sort', 1, title_sort)
@ -208,17 +222,21 @@ class DBThread(Thread):
except Exception, err:
ok, res = False, (err, traceback.format_exc())
else:
func = getattr(self.conn, func)
bfunc = getattr(self.conn, func)
try:
for i in range(3):
try:
ok, res = True, func(*args, **kwargs)
ok, res = True, bfunc(*args, **kwargs)
break
except OperationalError, err:
# Retry if unable to open db file
if 'unable to open' not in str(err) or i == 2:
e = str(err)
if 'unable to open' not in e or i == 2:
if 'unable to open' in e:
prints('Unable to open database for func',
func, reprlib.repr(args),
reprlib.repr(kwargs))
raise
traceback.print_exc()
time.sleep(0.5)
except Exception, err:
ok, res = False, (err, traceback.format_exc())

View File

@ -77,6 +77,7 @@ static void sort_concat_free(SortConcatList *list) {
free(list->vals[i]->val);
free(list->vals[i]);
}
free(list->vals);
}
static int sort_concat_cmp(const void *a_, const void *b_) {
@ -142,11 +143,102 @@ static void sort_concat_finalize2(sqlite3_context *context) {
// }}}
// identifiers_concat {{{
typedef struct {
char *val;
size_t length;
} IdentifiersConcatItem;
typedef struct {
IdentifiersConcatItem **vals;
size_t count;
size_t length;
} IdentifiersConcatList;
static void identifiers_concat_step(sqlite3_context *context, int argc, sqlite3_value **argv) {
const char *key, *val;
size_t len = 0;
IdentifiersConcatList *list;
assert(argc == 2);
list = (IdentifiersConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
if (list == NULL) return;
if (list->vals == NULL) {
list->vals = (IdentifiersConcatItem**)calloc(100, sizeof(IdentifiersConcatItem*));
if (list->vals == NULL) return;
list->length = 100;
list->count = 0;
}
if (list->count == list->length) {
list->vals = (IdentifiersConcatItem**)realloc(list->vals, list->length + 100);
if (list->vals == NULL) return;
list->length = list->length + 100;
}
list->vals[list->count] = (IdentifiersConcatItem*)calloc(1, sizeof(IdentifiersConcatItem));
if (list->vals[list->count] == NULL) return;
key = (char*) sqlite3_value_text(argv[0]);
val = (char*) sqlite3_value_text(argv[1]);
if (key == NULL || val == NULL) {return;}
len = strlen(key) + strlen(val) + 1;
list->vals[list->count]->val = (char*)calloc(len+1, sizeof(char));
if (list->vals[list->count]->val == NULL) return;
snprintf(list->vals[list->count]->val, len+1, "%s:%s", key, val);
list->vals[list->count]->length = len;
list->count = list->count + 1;
}
static void identifiers_concat_finalize(sqlite3_context *context) {
IdentifiersConcatList *list;
IdentifiersConcatItem *item;
char *ans, *pos;
size_t sz = 0, i;
list = (IdentifiersConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
if (list == NULL || list->vals == NULL || list->count < 1) return;
for (i = 0; i < list->count; i++) {
sz += list->vals[i]->length;
}
sz += list->count; // Space for commas
ans = (char*)calloc(sz+2, sizeof(char));
if (ans == NULL) return;
pos = ans;
for (i = 0; i < list->count; i++) {
item = list->vals[i];
if (item == NULL || item->val == NULL) continue;
memcpy(pos, item->val, item->length);
pos += item->length;
*pos = ',';
pos += 1;
free(item->val);
free(item);
}
*(pos-1) = 0; // Remove trailing comma
sqlite3_result_text(context, ans, -1, SQLITE_TRANSIENT);
free(ans);
free(list->vals);
}
// }}}
MYEXPORT int sqlite3_extension_init(
sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi){
SQLITE_EXTENSION_INIT2(pApi);
sqlite3_create_function(db, "sortconcat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize);
sqlite3_create_function(db, "sort_concat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize2);
sqlite3_create_function(db, "identifiers_concat", 2, SQLITE_UTF8, NULL, NULL, identifiers_concat_step, identifiers_concat_finalize);
return 0;
}

View File

@ -1,23 +1,6 @@
{% extends "!layout.html" %}
{% block extrahead %}
{% if not embedded %}
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-20736318-1']);
_gaq.push(['_setDomainName', '.calibre-ebook.com']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
{% endif %}
<style type="text/css">
.float-left-img { float: left; margin-right: 1em; margin-bottom: 1em }
.float-right-img { float: right; margin-left: 1em; margin-bottom: 1em }
@ -52,6 +35,23 @@
</div>
{%- endif %}
</div>
{% if not embedded %}
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-20736318-1']);
_gaq.push(['_setDomainName', '.calibre-ebook.com']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
{%- endif %}
{% endblock %}
{% block sidebarlogo %}

View File

@ -45,6 +45,7 @@ utc_tz = _utc_tz = tzutc()
local_tz = _local_tz = SafeLocalTimeZone()
UNDEFINED_DATE = datetime(101,1,1, tzinfo=utc_tz)
DEFAULT_DATE = datetime(2000,1,1, tzinfo=utc_tz)
def is_date_undefined(qt_or_dt):
d = qt_or_dt