Framework for custom columns

This commit is contained in:
Kovid Goyal 2010-04-13 15:40:50 +05:30
parent fe2d463ecd
commit 5d32694762
5 changed files with 588 additions and 42 deletions

View File

@ -119,7 +119,7 @@ def send_message(msg=''):
def get_parser(usage): def get_parser(usage):
parser = OptionParser(usage) parser = OptionParser(usage)
go = parser.add_option_group('GLOBAL OPTIONS') go = parser.add_option_group('GLOBAL OPTIONS')
go.add_option('--library-path', default=None, help=_('Path to the calibre library. Default is to use the path stored in the settings.')) go.add_option('--library-path', '--with-library', default=None, help=_('Path to the calibre library. Default is to use the path stored in the settings.'))
return parser return parser
@ -129,7 +129,7 @@ def get_db(dbpath, options):
dbpath = os.path.abspath(dbpath) dbpath = os.path.abspath(dbpath)
return LibraryDatabase2(dbpath) return LibraryDatabase2(dbpath)
def do_list(db, fields, sort_by, ascending, search_text, line_width, separator, def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, separator,
prefix, output_format, subtitle='Books in the calibre database'): prefix, output_format, subtitle='Books in the calibre database'):
if sort_by: if sort_by:
db.sort(sort_by, ascending) db.sort(sort_by, ascending)
@ -138,6 +138,9 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
authors_to_string = output_format in ['stanza', 'text'] authors_to_string = output_format in ['stanza', 'text']
data = db.get_data_as_dict(prefix, authors_as_string=authors_to_string) data = db.get_data_as_dict(prefix, authors_as_string=authors_to_string)
fields = ['id'] + fields fields = ['id'] + fields
title_fields = fields
fields = [db.custom_column_label_map[x[1:]]['num'] if x[0]=='*'
else x for x in fields]
if output_format == 'text': if output_format == 'text':
for f in data: for f in data:
fmts = [x for x in f['formats'] if x is not None] fmts = [x for x in f['formats'] if x is not None]
@ -152,7 +155,7 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
record[f] = record[f].replace('\n', ' ') record[f] = record[f].replace('\n', ' ')
for i in data: for i in data:
for j, field in enumerate(fields): for j, field in enumerate(fields):
widths[j] = max(widths[j], len(unicode(i[str(field)]))) widths[j] = max(widths[j], len(unicode(i[field])))
screen_width = terminal_controller.COLS if line_width < 0 else line_width screen_width = terminal_controller.COLS if line_width < 0 else line_width
if not screen_width: if not screen_width:
@ -171,7 +174,8 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
break break
widths = list(base_widths) widths = list(base_widths)
titles = map(lambda x, y: '%-*s%s'%(x-len(separator), y, separator), widths, fields) titles = map(lambda x, y: '%-*s%s'%(x-len(separator), y, separator),
widths, title_fields)
print terminal_controller.GREEN + ''.join(titles)+terminal_controller.NORMAL print terminal_controller.GREEN + ''.join(titles)+terminal_controller.NORMAL
wrappers = map(lambda x: TextWrapper(x-1), widths) wrappers = map(lambda x: TextWrapper(x-1), widths)
@ -202,7 +206,12 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
return template.generate(id="urn:calibre:main", data=data, subtitle=subtitle, return template.generate(id="urn:calibre:main", data=data, subtitle=subtitle,
sep=os.sep, quote=quote, updated=db.last_modified()).render('xml') sep=os.sep, quote=quote, updated=db.last_modified()).render('xml')
def list_option_parser(): def list_option_parser(db=None):
fields = set(FIELDS)
if db is not None:
for f in db.custom_column_label_map:
fields.add('*'+f)
parser = get_parser(_( parser = get_parser(_(
'''\ '''\
%prog list [options] %prog list [options]
@ -211,7 +220,12 @@ List the books available in the calibre database.
''' '''
)) ))
parser.add_option('-f', '--fields', default='title,authors', parser.add_option('-f', '--fields', default='title,authors',
help=_('The fields to display when listing books in the database. Should be a comma separated list of fields.\nAvailable fields: %s\nDefault: %%default. The special field "all" can be used to select all fields. Only has effect in the text output format.')%','.join(FIELDS)) help=_('The fields to display when listing books in the'
' database. Should be a comma separated list of'
' fields.\nAvailable fields: %s\nDefault: %%default. The'
' special field "all" can be used to select all fields.'
' Only has effect in the text output'
' format.')%','.join(sorted(fields)))
parser.add_option('--sort-by', default=None, parser.add_option('--sort-by', default=None,
help=_('The field by which to sort the results.\nAvailable fields: %s\nDefault: %%default')%','.join(FIELDS)) help=_('The field by which to sort the results.\nAvailable fields: %s\nDefault: %%default')%','.join(FIELDS))
parser.add_option('--ascending', default=False, action='store_true', parser.add_option('--ascending', default=False, action='store_true',
@ -229,25 +243,35 @@ List the books available in the calibre database.
def command_list(args, dbpath): def command_list(args, dbpath):
parser = list_option_parser() pre = get_parser('')
pargs = [x for x in args if x in ('--with-library', '--library-path')
or not x.startswith('-')]
opts = pre.parse_args(sys.argv[:1] + pargs)[0]
db = get_db(dbpath, opts)
parser = list_option_parser(db=db)
opts, args = parser.parse_args(sys.argv[:1] + args) opts, args = parser.parse_args(sys.argv[:1] + args)
afields = set(FIELDS)
if db is not None:
for f in db.custom_column_label_map:
afields.add('*'+f)
fields = [str(f.strip().lower()) for f in opts.fields.split(',')] fields = [str(f.strip().lower()) for f in opts.fields.split(',')]
if 'all' in fields: if 'all' in fields:
fields = sorted(list(FIELDS)) fields = sorted(list(afields))
if not set(fields).issubset(FIELDS): if not set(fields).issubset(afields):
parser.print_help() parser.print_help()
print print
print >>sys.stderr, _('Invalid fields. Available fields:'), ','.join(sorted(FIELDS)) prints(_('Invalid fields. Available fields:'),
','.join(sorted(afields)), file=sys.stderr)
return 1 return 1
db = get_db(dbpath, opts) if not opts.sort_by in afields and opts.sort_by is not None:
if not opts.sort_by in FIELDS and opts.sort_by is not None:
parser.print_help() parser.print_help()
print print
print >>sys.stderr, _('Invalid sort field. Available fields:'), ','.join(FIELDS) prints(_('Invalid sort field. Available fields:'), ','.join(afields),
file=sys.stderr)
return 1 return 1
print do_list(db, fields, opts.sort_by, opts.ascending, opts.search, opts.line_width, opts.separator, print do_list(db, fields, afields, opts.sort_by, opts.ascending, opts.search, opts.line_width, opts.separator,
opts.prefix, opts.output_format) opts.prefix, opts.output_format)
return 0 return 0
@ -589,6 +613,44 @@ def command_export(args, dbpath):
do_export(get_db(dbpath, opts), ids, dir, opts) do_export(get_db(dbpath, opts), ids, dir, opts)
return 0 return 0
def do_add_custom_column(db, label, name, datatype, is_multiple, display):
num = db.create_custom_column(label, name, datatype, is_multiple, display=display)
prints('Custom column created with id: %d'%num)
def add_custom_column_option_parser():
from calibre.library.custom_columns import CustomColumns
parser = get_parser(_('''\
%prog add_custom_column [options] label name datatype
Create a custom column. label is the machine friendly name of the column. Should
not contain spaces or colons. name is the human friendly name of the column.
datatype is one of: {0}
''').format(', '.join(CustomColumns.CUSTOM_DATA_TYPES)))
parser.add_option('--is-multiple', default=False, action='store_true',
help=_('This column stores tag like data (i.e. '
'multiple comma separated values). Only '
'applies if datatype is text.'))
parser.add_option('--display', default='{}',
help=_('A dictionary of options to customize how '
'the data in this column will be interpreted.'))
return parser
def command_add_custom_column(args, dbpath):
import json
parser = add_custom_column_option_parser()
opts, args = parser.parse_args(args)
if len(args) < 3:
parser.print_help()
print
print >>sys.stderr, _('You must specify label, name and datatype')
return 1
do_add_custom_column(get_db(dbpath, opts), args[0], args[1], args[2],
opts.is_multiple, json.loads(opts.display))
return 0
def catalog_option_parser(args): def catalog_option_parser(args):
from calibre.customize.ui import available_catalog_formats, plugin_for_catalog_format from calibre.customize.ui import available_catalog_formats, plugin_for_catalog_format
from calibre.utils.logging import Log from calibre.utils.logging import Log
@ -693,8 +755,107 @@ def command_catalog(args, dbpath):
# end of GR additions # end of GR additions
def do_set_custom(db, col, id_, val, append):
db.set_custom(id_, val, label=col, append=append)
prints('Data set to: %r'%db.get_custom(id_, label=col, index_is_id=True))
def set_custom_option_parser():
parser = get_parser(_(
'''
%prog set_custom [options] column id value
Set the value of a custom column for the book identified by id.
You can get a list of ids using the list command.
You can get a list of custom column names using the custom_columns
command.
'''))
parser.add_option('-a', '--append', default=False, action='store_true',
help=_('If the column stores multiple values, append the specified '
'values to the existing ones, instead of replacing them.'))
return parser
def command_set_custom(args, dbpath):
parser = set_custom_option_parser()
opts, args = parser.parse_args(args)
if len(args) < 3:
parser.print_help()
print
print >>sys.stderr, _('Error: You must specify a field name, id and value')
return 1
do_set_custom(get_db(dbpath, opts), args[0], int(args[1]), args[2],
opts.append)
return 0
def do_custom_columns(db, details):
from pprint import pformat
cols = db.custom_column_label_map
for col, data in cols.items():
if details:
prints(col)
print
prints(pformat(data))
print '\n'
else:
prints(col, '(%d)'%data['num'])
def custom_columns_option_parser():
parser = get_parser(_(
'''
%prog custom_columns [options]
List available custom columns. Shows column labels and ids.
'''))
parser.add_option('-d', '--details', default=False, action='store_true',
help=_('Show details for each column.'))
return parser
def command_custom_columns(args, dbpath):
parser = custom_columns_option_parser()
opts, args = parser.parse_args(args)
do_custom_columns(get_db(dbpath, opts), opts.details)
return 0
def do_remove_custom_column(db, label, force):
if not force:
q = raw_input(_('You will lose all data in the column: %r.'
' Are you sure (y/n)? ')%label)
if q.lower().strip() != 'y':
return
db.delete_custom_column(label=label)
prints('Column %r removed.'%label)
def remove_custom_column_option_parser():
parser = get_parser(_(
'''
%prog remove_custom_column [options] label
Remove the custom column identified by label. You can see available
columns with the custom_columns command.
'''))
parser.add_option('-f', '--force', default=False, action='store_true',
help=_('Do not ask for confirmation'))
return parser
def command_remove_custom_column(args, dbpath):
parser = remove_custom_column_option_parser()
opts, args = parser.parse_args(args)
if len(args) < 1:
parser.print_help()
print
prints(_('Error: You must specify a column label'), file=sys.stderr)
return 1
do_remove_custom_column(get_db(dbpath, opts), args[0], opts.force)
return 0
COMMANDS = ('list', 'add', 'remove', 'add_format', 'remove_format', COMMANDS = ('list', 'add', 'remove', 'add_format', 'remove_format',
'show_metadata', 'set_metadata', 'export', 'catalog') 'show_metadata', 'set_metadata', 'export', 'catalog',
'add_custom_column', 'custom_columns', 'remove_custom_column', 'set_custom')
def option_parser(): def option_parser():

View File

@ -6,29 +6,18 @@ __license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>' __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import json
from functools import partial
from calibre import prints
from calibre.constants import preferred_encoding
from calibre.utils.date import parse_date
class CustomColumns(object): class CustomColumns(object):
CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime', CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
'int', 'float', 'bool']) 'int', 'float', 'bool'])
def __init__(self):
return
# Delete marked custom columns
for num in self.conn.get(
'SELECT id FROM custom_columns WHERE mark_for_delete=1'):
dt, lt = self.custom_table_names(num)
self.conn.executescript('''\
DROP TABLE IF EXISTS %s;
DROP TABLE IF EXISTS %s;
'''%(dt, lt)
)
self.conn.execute('DELETE FROM custom_columns WHERE mark_for_delete=1')
self.conn.commit()
def custom_table_names(self, num): def custom_table_names(self, num):
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num
@ -36,11 +25,387 @@ class CustomColumns(object):
def custom_tables(self): def custom_tables(self):
return set([x[0] for x in self.conn.get( return set([x[0] for x in self.conn.get(
'SELECT name FROM sqlite_master WHERE type="table" AND ' 'SELECT name FROM sqlite_master WHERE type="table" AND '
'(name GLOB "custom_column_*" OR name GLOB books_customcolumn_*)')]) '(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')])
def create_custom_table(self, label, name, datatype, is_multiple,
sort_alpha): def __init__(self):
# Delete marked custom columns
for record in self.conn.get(
'SELECT id FROM custom_columns WHERE mark_for_delete=1'):
num = record[0]
table, lt = self.custom_table_names(num)
self.conn.executescript('''\
DROP INDEX IF EXISTS {table}_idx;
DROP INDEX IF EXISTS {lt}_aidx;
DROP INDEX IF EXISTS {lt}_bidx;
DROP TRIGGER IF EXISTS fkc_update_{lt}_a;
DROP TRIGGER IF EXISTS fkc_update_{lt}_b;
DROP TRIGGER IF EXISTS fkc_insert_{lt};
DROP TRIGGER IF EXISTS fkc_delete_{lt};
DROP TRIGGER IF EXISTS fkc_insert_{table};
DROP TRIGGER IF EXISTS fkc_delete_{table};
DROP VIEW IF EXISTS tag_browser_{table};
DROP TABLE IF EXISTS {table};
DROP TABLE IF EXISTS {lt};
'''.format(table=table, lt=lt)
)
self.conn.execute('DELETE FROM custom_columns WHERE mark_for_delete=1')
self.conn.commit()
# Load metadata for custom columns
self.custom_column_label_map, self.custom_column_num_map = {}, {}
triggers = []
remove = []
custom_tables = self.custom_tables
for record in self.conn.get(
'SELECT label,name,datatype,editable,display,normalized,id,is_multiple FROM custom_columns'):
data = {
'label':record[0],
'name':record[1],
'datatype':record[2],
'editable':record[3],
'display':json.loads(record[4]),
'normalized':record[5],
'num':record[6],
'is_multiple':record[7],
}
table, lt = self.custom_table_names(data['num'])
if table not in custom_tables or (data['normalized'] and lt not in
custom_tables):
remove.append(data)
continue
self.custom_column_label_map[data['label']] = data['num']
self.custom_column_num_map[data['num']] = \
self.custom_column_label_map[data['label']] = data
# Create Foreign Key triggers
if data['normalized']:
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%lt
else:
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%table
triggers.append(trigger)
if remove:
for data in remove:
prints('WARNING: Custom column %r not found, removing.' %
data['label'])
self.conn.execute('DELETE FROM custom_columns WHERE id=?',
(data['num'],))
self.conn.commit()
if triggers:
self.conn.execute('''\
CREATE TEMP TRIGGER custom_books_delete_trg
AFTER DELETE ON books
BEGIN
%s
END;
'''%(' \n'.join(triggers)))
self.conn.commit()
# Setup data adapters
def adapt_text(x, d):
if d['is_multiple']:
if x is None:
return []
if isinstance(x, (str, unicode, bytes)):
x = x.split(',')
x = [y.strip() for y in x if y.strip()]
x = [y.decode(preferred_encoding, 'replace') if not isinstance(y,
unicode) else y for y in x]
return [u' '.join(y.split()) for y in x]
else:
return x if x is None or isinstance(x, unicode) else \
x.decode(preferred_encoding, 'replace')
def adapt_datetime(x, d):
if isinstance(x, (str, unicode, bytes)):
x = parse_date(x, assume_utc=False, as_utc=False)
return x
def adapt_bool(x, d):
if isinstance(x, (str, unicode, bytes)):
x = bool(int(x))
return x
self.custom_data_adapters = {
'float': lambda x,d : x if x is None else float(x),
'int': lambda x,d : x if x is None else int(x),
'rating':lambda x,d : x if x is None else min(10., max(0., float(x))),
'bool': adapt_bool,
'comments': lambda x,d: adapt_text(x, {'is_multiple':False}),
'datetime' : adapt_datetime,
'text':adapt_text
}
def get_custom(self, idx, label=None, num=None, index_is_id=False):
if label is not None:
data = self.custom_column_label_map[label]
if num is not None:
data = self.custom_column_num_map[num]
row = self.data._data[idx] if index_is_id else self.data[idx]
ans = row[self.FIELD_MAP[data['num']]]
if data['is_multiple'] and data['datatype'] == 'text':
ans = ans.split('|') if ans else []
if data['display'].get('sort_alpha', False):
ans.sort(cmp=lambda x,y:cmp(x.lower(), y.lower()))
return ans
def all_custom(self, label=None, num=None):
if label is not None:
data = self.custom_column_label_map[label]
if num is not None:
data = self.custom_column_num_map[num]
table, lt = self.custom_table_names(data['num'])
if data['normalized']:
ans = self.conn.get('SELECT value FROM %s'%table)
else:
ans = self.conn.get('SELECT DISTINCT value FROM %s'%table)
ans = set([x[0] for x in ans])
return ans
def delete_custom_column(self, label=None, num=None):
data = None
if label is not None:
data = self.custom_column_label_map[label]
if num is not None:
data = self.custom_column_num_map[num]
if data is None:
raise ValueError('No such column')
self.conn.execute(
'UPDATE custom_columns SET mark_for_delete=1 WHERE id=?',
(data['num'],))
self.conn.commit()
def set_custom(self, id_, val, label=None, num=None, append=False, notify=True):
if label is not None:
data = self.custom_column_label_map[label]
if num is not None:
data = self.custom_column_num_map[num]
if not data['editable']:
raise ValueError('Column %r is not editable'%data['label'])
table, lt = self.custom_table_names(data['num'])
getter = partial(self.get_custom, id_, num=data['num'],
index_is_id=True)
val = self.custom_data_adapters[data['datatype']](val, data)
if data['normalized']:
if not append or not data['is_multiple']:
self.conn.execute('DELETE FROM %s WHERE book=?'%lt, (id_,))
self.conn.execute(
'''DELETE FROM %s WHERE (SELECT COUNT(id) FROM %s WHERE
value=%s.id) < 1''' % (table, lt, table))
self.data._data[id_][self.FIELD_MAP[data['num']]] = None
set_val = val if data['is_multiple'] else [val]
existing = getter()
if not existing:
existing = []
for x in set(set_val) - set(existing):
if x is None:
continue
existing = self.all_custom(num=data['num'])
lx = [t.lower() if hasattr(t, 'lower') else t for t in existing]
try:
idx = lx.index(x.lower() if hasattr(x, 'lower') else x)
except ValueError:
idx = -1
if idx > -1:
ex = existing[idx]
xid = self.conn.get(
'SELECT id FROM %s WHERE value=?'%table, (ex,), all=False)
if ex != x:
self.conn.execute(
'UPDATE %s SET value=? WHERE id=?', (x, xid))
else:
xid = self.conn.execute(
'INSERT INTO %s(value) VALUES(?)'%table, (x,)).lastrowid
if not self.conn.get(
'SELECT book FROM %s WHERE book=? AND value=?'%lt,
(id_, xid), all=False):
self.conn.execute(
'INSERT INTO %s(book, value) VALUES (?,?)'%lt,
(id_, xid))
self.conn.commit()
nval = self.conn.get(
'SELECT custom_%s FROM meta2 WHERE id=?'%data['num'],
(id_,), all=False)
self.data.set(id_, self.FIELD_MAP[data['num']], nval,
row_is_id=True)
else:
self.conn.execute('DELETE FROM %s WHERE book=?'%table, (id_,))
if val is not None:
self.conn.execute(
'INSERT INTO %s(book,value) VALUES (?,?)'%table,
(id_, val))
self.conn.commit()
nval = self.conn.get(
'SELECT custom_%s FROM meta2 WHERE id=?'%data['num'],
(id_,), all=False)
self.data.set(id_, self.FIELD_MAP[data['num']], nval,
row_is_id=True)
if notify:
self.notify('metadata', [id_])
return nval
def clean_custom(self):
st = ('DELETE FROM {table} WHERE (SELECT COUNT(id) FROM {lt} WHERE'
' {lt}.value={table}.id) < 1;')
statements = []
for data in self.custom_column_num_map.values():
if data['normalized']:
table, lt = self.custom_table_names(data['num'])
statements.append(st.format(lt=lt, table=table))
if statements:
self.conn.executescript(' \n'.join(statements))
self.conn.commit()
def custom_columns_in_meta(self):
lines = {}
for data in self.custom_column_label_map.values():
display = data['display']
table, lt = self.custom_table_names(data['num'])
if data['normalized']:
query = '%s.value'
if data['is_multiple']:
query = 'group_concat(%s.value, "|")'
if not display.get('sort_alpha', False):
query = 'sort_concat(link.id, %s.value)'
line = '''(SELECT {query} FROM {lt} AS link INNER JOIN
{table} ON(link.value={table}.id) WHERE link.book=books.id)
custom_{num}
'''.format(query=query%table, lt=lt, table=table, num=data['num'])
else:
line = '''
(SELECT value FROM {table} WHERE book=books.id) custom_{num}
'''.format(table=table, num=data['num'])
lines[data['num']] = line
return lines
def create_custom_column(self, label, name, datatype, is_multiple,
editable=True, display={}):
if datatype not in self.CUSTOM_DATA_TYPES: if datatype not in self.CUSTOM_DATA_TYPES:
raise ValueError('%r is not a supported data type'%datatype) raise ValueError('%r is not a supported data type'%datatype)
normalized = datatype not in ('datetime', 'comments', 'int', 'bool')
is_multiple = is_multiple and datatype in ('text',)
num = self.conn.execute(
('INSERT INTO '
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
'VALUES (?,?,?,?,?,?,?)'),
(label, name, datatype, is_multiple, editable,
json.dumps(display), normalized)).lastrowid
if datatype in ('rating', 'int'):
dt = 'INT'
elif datatype in ('text', 'comments'):
dt = 'TEXT'
elif datatype in ('float',):
dt = 'REAL'
elif datatype == 'datetime':
dt = 'timestamp'
elif datatype == 'bool':
dt = 'BOOL'
collate = 'COLLATE NOCASE' if dt == 'TEXT' else ''
table, lt = self.custom_table_names(num)
if normalized:
lines = [
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
value %s NOT NULL %s,
UNIQUE(value));
'''%(table, dt, collate),
'CREATE INDEX %s_idx ON %s (value %s);'%(table, table, collate),
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
book INTEGER NOT NULL,
value INTEGER NOT NULL,
UNIQUE(book, value)
);'''%lt,
'CREATE INDEX %s_aidx ON %s (value);'%(lt,lt),
'CREATE INDEX %s_bidx ON %s (book);'%(lt,lt),
'''\
CREATE TRIGGER fkc_update_{lt}_a
BEFORE UPDATE OF book ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_{lt}_b
BEFORE UPDATE OF author ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
END;
END;
CREATE TRIGGER fkc_insert_{lt}
BEFORE INSERT ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
END;
END;
CREATE TRIGGER fkc_delete_{lt}
AFTER DELETE ON {table}
BEGIN
DELETE FROM {lt} WHERE value=OLD.id;
END;
CREATE VIEW tag_browser_{table} AS SELECT
id,
value,
(SELECT COUNT(id) FROM {lt} WHERE value={table}.id) count
FROM {table};
'''.format(lt=lt, table=table),
]
else:
lines = [
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
book INTEGER,
value %s NOT NULL %s,
UNIQUE(book));
'''%(table, dt, collate),
'CREATE INDEX %s_idx ON %s (book);'%(table, table),
'''\
CREATE TRIGGER fkc_insert_{table}
BEFORE INSERT ON {table}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_{table}
BEFORE UPDATE OF book ON {table}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
'''.format(table=table),
]
script = ' \n'.join(lines)
self.conn.executescript(script)
self.conn.commit()
return num

View File

@ -161,11 +161,17 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
link_col=col[2], query=col[3]) link_col=col[2], query=col[3])
lines.append(line) lines.append(line)
custom_map = self.custom_columns_in_meta()
custom_cols = list(sorted(custom_map.keys()))
lines.extend([custom_map[x] for x in custom_cols])
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'publisher':3, 'rating':4, 'timestamp':5, self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'publisher':3, 'rating':4, 'timestamp':5,
'size':6, 'tags':7, 'comments':8, 'series':9, 'series_index':10, 'size':6, 'tags':7, 'comments':8, 'series':9, 'series_index':10,
'sort':11, 'author_sort':12, 'formats':13, 'isbn':14, 'path':15, 'sort':11, 'author_sort':12, 'formats':13, 'isbn':14, 'path':15,
'lccn':16, 'pubdate':17, 'flags':18, 'uuid':19, 'cover':20} 'lccn':16, 'pubdate':17, 'flags':18, 'uuid':19}
for i, col in enumerate(custom_cols):
self.FIELD_MAP[col] = 19+1+i
script = ''' script = '''
DROP VIEW IF EXISTS meta2; DROP VIEW IF EXISTS meta2;
@ -174,7 +180,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
{0} {0}
FROM books; FROM books;
'''.format(', \n'.join(lines)) '''.format(', \n'.join(lines))
self.conn.executescript(script.format('')) self.conn.executescript(script)
self.conn.commit() self.conn.commit()
self.data = ResultCache(self.FIELD_MAP) self.data = ResultCache(self.FIELD_MAP)
@ -558,6 +564,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.conn.execute(st%dict(ltable='publishers', table='publishers', ltable_col='publisher')) self.conn.execute(st%dict(ltable='publishers', table='publishers', ltable_col='publisher'))
self.conn.execute(st%dict(ltable='tags', table='tags', ltable_col='tag')) self.conn.execute(st%dict(ltable='tags', table='tags', ltable_col='tag'))
self.conn.execute(st%dict(ltable='series', table='series', ltable_col='series')) self.conn.execute(st%dict(ltable='series', table='series', ltable_col='series'))
self.clean_custom()
self.conn.commit() self.conn.commit()
def get_recipes(self): def get_recipes(self):
@ -1204,6 +1211,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating', FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index', 'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
'isbn', 'uuid', 'pubdate']) 'isbn', 'uuid', 'pubdate'])
for x in self.custom_column_num_map:
FIELDS.add(x)
data = [] data = []
for record in self.data: for record in self.data:
if record is None: continue if record is None: continue

View File

@ -250,7 +250,6 @@ class SchemaUpgrade(object):
cn = tn cn = tn
create_tag_browser_view(tn, cn) create_tag_browser_view(tn, cn)
"""
def upgrade_version_9(self): def upgrade_version_9(self):
'Add custom columns' 'Add custom columns'
self.conn.executescript(''' self.conn.executescript('''
@ -260,12 +259,13 @@ class SchemaUpgrade(object):
name TEXT NOT NULL, name TEXT NOT NULL,
datatype TEXT NOT NULL, datatype TEXT NOT NULL,
mark_for_delete BOOL DEFAULT 0 NOT NULL, mark_for_delete BOOL DEFAULT 0 NOT NULL,
flag BOOL DEFAULT 0 NOT NULL,
editable BOOL DEFAULT 1 NOT NULL, editable BOOL DEFAULT 1 NOT NULL,
display TEXT DEFAULT "{}" NOT NULL,
is_multiple BOOL DEFAULT 0 NOT NULL,
normalized BOOL NOT NULL,
UNIQUE(label) UNIQUE(label)
); );
CREATE INDEX custom_columns_idx ON custom_columns (label); CREATE INDEX custom_columns_idx ON custom_columns (label);
CREATE INDEX formats_idx ON data (format); CREATE INDEX formats_idx ON data (format);
''') ''')
"""

View File

@ -30,6 +30,13 @@ def adapt_datetime(dt):
sqlite.register_adapter(datetime, adapt_datetime) sqlite.register_adapter(datetime, adapt_datetime)
sqlite.register_converter('timestamp', convert_timestamp) sqlite.register_converter('timestamp', convert_timestamp)
def convert_bool(val):
return bool(int(val))
sqlite.register_adapter(bool, lambda x : 1 if x else 0)
sqlite.register_converter('bool', convert_bool)
class Concatenate(object): class Concatenate(object):
'''String concatenation aggregator for sqlite''' '''String concatenation aggregator for sqlite'''
def __init__(self, sep=','): def __init__(self, sep=','):
@ -47,8 +54,8 @@ class Concatenate(object):
class SortedConcatenate(object): class SortedConcatenate(object):
'''String concatenation aggregator for sqlite, sorted by supplied index''' '''String concatenation aggregator for sqlite, sorted by supplied index'''
def __init__(self, sep=','): sep = ','
self.sep = sep def __init__(self):
self.ans = {} self.ans = {}
def step(self, ndx, value): def step(self, ndx, value):
@ -60,6 +67,9 @@ class SortedConcatenate(object):
return None return None
return self.sep.join(map(self.ans.get, sorted(self.ans.keys()))) return self.sep.join(map(self.ans.get, sorted(self.ans.keys())))
class SafeSortedConcatenate(SortedConcatenate):
sep = '|'
class Connection(sqlite.Connection): class Connection(sqlite.Connection):
def get(self, *args, **kw): def get(self, *args, **kw):
@ -92,6 +102,7 @@ class DBThread(Thread):
self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row) self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row)
self.conn.create_aggregate('concat', 1, Concatenate) self.conn.create_aggregate('concat', 1, Concatenate)
self.conn.create_aggregate('sortconcat', 2, SortedConcatenate) self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
self.conn.create_function('title_sort', 1, title_sort) self.conn.create_function('title_sort', 1, title_sort)
self.conn.create_function('uuid4', 0, lambda : str(uuid.uuid4())) self.conn.create_function('uuid4', 0, lambda : str(uuid.uuid4()))