diff --git a/Changelog.yaml b/Changelog.yaml index dfc9b9efe6..b297823841 100644 --- a/Changelog.yaml +++ b/Changelog.yaml @@ -19,6 +19,10 @@ # new recipes: # - title: +# - title: "Launch of a new website that catalogues DRM free books. http://drmfree.calibre-ebook.com" +# description: "A growing catalogue of DRM free books. Books that you actually own after buying instead of renting." +# type: major + - version: 0.7.47 date: 2011-02-25 @@ -88,8 +92,8 @@ - title: "Various Romanian news sources" author: Silviu Coatara - - title: "Osnews.pl and SwiatKindle" - author: Mori + - title: "Osnews.pl and SwiatCzytnikow" + author: Tomasz Dlugosz - title: "Roger Ebert Journal" author: Shane Erstad diff --git a/resources/images/minusminus.png b/resources/images/minusminus.png new file mode 100644 index 0000000000..71225be8d7 Binary files /dev/null and b/resources/images/minusminus.png differ diff --git a/resources/images/news/bucataras.png b/resources/images/news/bucataras.png new file mode 100644 index 0000000000..fae90e17c4 Binary files /dev/null and b/resources/images/news/bucataras.png differ diff --git a/resources/images/news/credit_slips.png b/resources/images/news/credit_slips.png new file mode 100644 index 0000000000..50ac1dc02e Binary files /dev/null and b/resources/images/news/credit_slips.png differ diff --git a/resources/images/news/historiaro.png b/resources/images/news/historiaro.png new file mode 100644 index 0000000000..c9e616c876 Binary files /dev/null and b/resources/images/news/historiaro.png differ diff --git a/resources/images/news/lwn_weekly.png b/resources/images/news/lwn_weekly.png new file mode 100644 index 0000000000..0fc654add9 Binary files /dev/null and b/resources/images/news/lwn_weekly.png differ diff --git a/resources/images/news/nytimes_sports.png b/resources/images/news/nytimes_sports.png new file mode 100644 index 0000000000..b587be8de0 Binary files /dev/null and b/resources/images/news/nytimes_sports.png differ diff --git a/resources/images/news/nytimes_tech.png b/resources/images/news/nytimes_tech.png new file mode 100644 index 0000000000..64ff8b5eb2 Binary files /dev/null and b/resources/images/news/nytimes_tech.png differ diff --git a/resources/images/plusplus.png b/resources/images/plusplus.png new file mode 100644 index 0000000000..db918365d0 Binary files /dev/null and b/resources/images/plusplus.png differ diff --git a/resources/recipes/bucataras.recipe b/resources/recipes/bucataras.recipe new file mode 100644 index 0000000000..b069ecc5b0 --- /dev/null +++ b/resources/recipes/bucataras.recipe @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +#!/usr/bin/env python + +__license__ = 'GPL v3' +__copyright__ = u'2011, Silviu Cotoar\u0103' +''' +bucataras.ro +''' + +from calibre.web.feeds.news import BasicNewsRecipe + +class Bucataras(BasicNewsRecipe): + title = u'Bucataras' + __author__ = u'Silviu Cotoar\u0103' + description = '' + publisher = 'Bucataras' + oldest_article = 5 + language = 'ro' + max_articles_per_feed = 100 + no_stylesheets = True + use_embedded_content = False + category = 'Ziare,Bucatarie,Retete' + encoding = 'utf-8' + cover_url = 'http://www.bucataras.ro/templates/default/images/pink/logo.jpg' + + conversion_options = { + 'comments' : description + ,'tags' : category + ,'language' : language + ,'publisher' : publisher + } + + keep_only_tags = [ + dict(name='h1', attrs={'class':'titlu'}) + , dict(name='div', attrs={'class':'contentL'}) + , dict(name='div', attrs={'class':'contentBottom'}) + + ] + + remove_tags = [ + dict(name='div', attrs={'class':['sociale']}) + , dict(name='div', attrs={'class':['contentR']}) + , dict(name='a', attrs={'target':['_self']}) + , dict(name='div', attrs={'class':['comentarii']}) + ] + + remove_tags_after = [ + dict(name='div', attrs={'class':['comentarii']}) + ] + + feeds = [ + (u'Feeds', u'http://www.bucataras.ro/rss/retete/') + ] + + def preprocess_html(self, soup): + return self.adeify_images(soup) diff --git a/resources/recipes/buffalo_news.recipe b/resources/recipes/buffalo_news.recipe new file mode 100644 index 0000000000..51985a3c51 --- /dev/null +++ b/resources/recipes/buffalo_news.recipe @@ -0,0 +1,58 @@ +__license__ = 'GPL v3' +__author__ = 'Todd Chapman' +__copyright__ = 'Todd Chapman' +__version__ = 'v0.2' +__date__ = '2 March 2011' + +''' +http://www.buffalonews.com/RSS/ +''' + +from calibre.web.feeds.news import BasicNewsRecipe + +class AdvancedUserRecipe1298680852(BasicNewsRecipe): + title = u'Buffalo News' + oldest_article = 2 + language = 'en' + __author__ = 'ChappyOnIce' + max_articles_per_feed = 20 + encoding = 'utf-8' + masthead_url = 'http://www.buffalonews.com/buffalonews/skins/buffalonews/images/masthead/the_buffalo_news_logo.png' + remove_javascript = True + extra_css = 'body {text-align: justify;}\n \ + p {text-indent: 20px;}' + + keep_only_tags = [ + dict(name='div', attrs={'class':['main-content-left']}) + ] + + remove_tags = [ + dict(name='div', attrs={'id':['commentCount']}), + dict(name='div', attrs={'class':['story-list-links']}) + ] + + remove_tags_after = dict(name='div', attrs={'class':['body storyContent']}) + + feeds = [(u'City of Buffalo', u'http://www.buffalonews.com/city/communities/buffalo/?widget=rssfeed&view=feed&contentId=77944'), + (u'Southern Erie County', u'http://www.buffalonews.com/city/communities/southern-erie/?widget=rssfeed&view=feed&contentId=77944'), + (u'Eastern Erie County', u'http://www.buffalonews.com/city/communities/eastern-erie/?widget=rssfeed&view=feed&contentId=77944'), + (u'Southern Tier', u'http://www.buffalonews.com/city/communities/southern-tier/?widget=rssfeed&view=feed&contentId=77944'), + (u'Niagara County', u'http://www.buffalonews.com/city/communities/niagara-county/?widget=rssfeed&view=feed&contentId=77944'), + (u'Business', u'http://www.buffalonews.com/business/?widget=rssfeed&view=feed&contentId=77944'), + (u'MoneySmart', u'http://www.buffalonews.com/business/moneysmart/?widget=rssfeed&view=feed&contentId=77944'), + (u'Bills & NFL', u'http://www.buffalonews.com/sports/bills-nfl/?widget=rssfeed&view=feed&contentId=77944'), + (u'Sabres & NHL', u'http://www.buffalonews.com/sports/sabres-nhl/?widget=rssfeed&view=feed&contentId=77944'), + (u'Bob DiCesare', u'http://www.buffalonews.com/sports/columns/bob-dicesare/?widget=rssfeed&view=feed&contentId=77944'), + (u'Bucky Gleason', u'http://www.buffalonews.com/sports/columns/bucky-gleason/?widget=rssfeed&view=feed&contentId=77944'), + (u'Mark Gaughan', u'http://www.buffalonews.com/sports/bills-nfl/inside-the-nfl/?widget=rssfeed&view=feed&contentId=77944'), + (u'Mike Harrington', u'http://www.buffalonews.com/sports/columns/mike-harrington/?widget=rssfeed&view=feed&contentId=77944'), + (u'Jerry Sullivan', u'http://www.buffalonews.com/sports/columns/jerry-sullivan/?widget=rssfeed&view=feed&contentId=77944'), + (u'Other Sports Columns', u'http://www.buffalonews.com/sports/columns/other-sports-columns/?widget=rssfeed&view=feed&contentId=77944'), + (u'Life', u'http://www.buffalonews.com/life/?widget=rssfeed&view=feed&contentId=77944'), + (u'Bruce Andriatch', u'http://www.buffalonews.com/city/columns/bruce-andriatch/?widget=rssfeed&view=feed&contentId=77944'), + (u'Donn Esmonde', u'http://www.buffalonews.com/city/columns/donn-esmonde/?widget=rssfeed&view=feed&contentId=77944'), + (u'Rod Watson', u'http://www.buffalonews.com/city/columns/rod-watson/?widget=rssfeed&view=feed&contentId=77944'), + (u'Entertainment', u'http://www.buffalonews.com/entertainment/?widget=rssfeed&view=feed&contentId=77944'), + (u'Off Main Street', u'http://www.buffalonews.com/city/columns/off-main-street/?widget=rssfeed&view=feed&contentId=77944'), + (u'Editorials', u'http://www.buffalonews.com/editorial-page/buffalo-news-editorials/?widget=rssfeed&view=feed&contentId=77944') + ] diff --git a/resources/recipes/credit_slips.recipe b/resources/recipes/credit_slips.recipe index 19e19ca2fb..d4fb3a94c0 100644 --- a/resources/recipes/credit_slips.recipe +++ b/resources/recipes/credit_slips.recipe @@ -1,35 +1,44 @@ #!/usr/bin/env python __license__ = 'GPL 3' -__copyright__ = 'zotzot' +__copyright__ = 'zotzo' __docformat__ = 'restructuredtext en' from calibre.web.feeds.news import BasicNewsRecipe class CreditSlips(BasicNewsRecipe): - __license__ = 'GPL v3' - __author__ = 'zotzot' language = 'en' - version = 1 + __author__ = 'zotzot' + version = 2 title = u'Credit Slips.org' publisher = u'Bankr-L' category = u'Economic blog' - description = u'All things about credit.' - cover_url = 'http://bit.ly/hyZSTr' - oldest_article = 50 + description = u'A discussion on credit and bankruptcy' + cover_url = 'http://bit.ly/eAKNCB' + oldest_article = 15 max_articles_per_feed = 100 use_embedded_content = True + no_stylesheets = True + remove_javascript = True + + conversion_options = { + 'comments': description, + 'tags': category, + 'language': 'en', + 'publisher': publisher, + } feeds = [ -(u'Credit Slips', u'http://www.creditslips.org/creditslips/atom.xml') -] - conversion_options = { -'comments': description, -'tags': category, -'language': 'en', -'publisher': publisher -} - extra_css = ''' - body{font-family:verdana,arial,helvetica,geneva,sans-serif;} - img {float: left; margin-right: 0.5em;} - ''' + (u'Credit Slips', u'http://www.creditslips.org/creditslips/atom.xml') + ] + + extra_css = ''' + .author {font-family:Helvetica,sans-serif; font-weight:normal;font-size:small;} + h1 {font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;} + p {font-family:Helvetica,Arial,sans-serif;font-size:small;} + body {font-family:Helvetica,Arial,sans-serif;font-size:small;} + ''' + + def populate_article_metadata(self, article, soup, first): + h2 = soup.find('h2') + h2.replaceWith(h2.prettify() + '
Posted by ' + article.author + '
') diff --git a/resources/recipes/dotpod.recipe b/resources/recipes/dotpod.recipe new file mode 100644 index 0000000000..b04945e6d4 --- /dev/null +++ b/resources/recipes/dotpod.recipe @@ -0,0 +1,27 @@ +__license__ = 'GPL v3' +__copyright__ = '2011-2011, Federico Escalada'+_('Quick create:')
+ for col, name in [('isbn', _('ISBN')), ('formats', _('Formats')),
+ ('last_modified', _('Modified Date')), ('yesno', _('Yes/No')),
+ ('tags', _('Tags')), ('series', _('Series')), ('rating',
+ _('Rating'))]:
+ text += ' %s,'%(col, name)
+ text = text[:-1]
+ self.shortcuts.setText(text)
+
self.parent = parent
self.editing_col = editing
self.standard_colheads = standard_colheads
@@ -69,6 +80,9 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
self.datatype_changed()
self.exec_()
return
+ self.setWindowTitle(_('Edit a custom column'))
+ self.heading_label.setText(_('Edit a custom column'))
+ self.shortcuts.setVisible(False)
idx = parent.opt_columns.currentRow()
if idx < 0:
self.simple_error(_('No column selected'),
@@ -99,6 +113,32 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
self.datatype_changed()
self.exec_()
+ def shortcut_activated(self, url):
+ which = unicode(url).split(':')[-1]
+ self.column_type_box.setCurrentIndex({
+ 'yesno': 9,
+ 'tags' : 1,
+ 'series': 3,
+ 'rating': 8,
+ }.get(which, 10))
+ self.column_name_box.setText(which)
+ self.column_heading_box.setText({
+ 'isbn':'ISBN',
+ 'formats':_('Formats'),
+ 'yesno':_('Yes/No'),
+ 'tags': _('My Tags'),
+ 'series': _('My Series'),
+ 'rating': _('My Rating'),
+ 'last_modified':_('Modified Date')}[which])
+ if self.composite_box.isVisible():
+ self.composite_box.setText(
+ {
+ 'isbn': '{identifiers:select(isbn)}',
+ 'formats': '{formats}',
+ 'last_modified':'''{last_modified:'format_date($, "%d %m, %Y")'}'''
+ }[which])
+
+
def datatype_changed(self, *args):
try:
col_type = self.column_types[self.column_type_box.currentIndex()]['datatype']
diff --git a/src/calibre/gui2/preferences/create_custom_column.ui b/src/calibre/gui2/preferences/create_custom_column.ui
index d4e85a24c9..9df7107d9b 100644
--- a/src/calibre/gui2/preferences/create_custom_column.ui
+++ b/src/calibre/gui2/preferences/create_custom_column.ui
@@ -9,8 +9,8 @@
' +
@@ -495,6 +496,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
self.memory_view.reset()
self.card_a_view.reset()
self.card_b_view.reset()
+ self.device_manager.set_current_library_uuid(db.library_id)
def set_window_title(self):
diff --git a/src/calibre/gui2/widgets.py b/src/calibre/gui2/widgets.py
index f6c4cce3ef..5cacf32bb2 100644
--- a/src/calibre/gui2/widgets.py
+++ b/src/calibre/gui2/widgets.py
@@ -11,9 +11,9 @@ from PyQt4.Qt import QIcon, QFont, QLabel, QListWidget, QAction, \
QPixmap, QSplitterHandle, QToolButton, \
QAbstractListModel, QVariant, Qt, SIGNAL, pyqtSignal, \
QRegExp, QSettings, QSize, QSplitter, \
- QPainter, QLineEdit, QComboBox, QPen, \
+ QPainter, QLineEdit, QComboBox, QPen, QGraphicsScene, \
QMenu, QStringListModel, QCompleter, QStringList, \
- QTimer, QRect
+ QTimer, QRect, QFontDatabase, QGraphicsView
from calibre.gui2 import NONE, error_dialog, pixmap_to_data, gprefs
from calibre.gui2.filename_pattern_ui import Ui_Form
@@ -181,22 +181,16 @@ class FormatList(QListWidget):
else:
return QListWidget.keyPressEvent(self, event)
-
-class ImageView(QWidget):
-
- BORDER_WIDTH = 1
- cover_changed = pyqtSignal(object)
-
- def __init__(self, parent=None):
- QWidget.__init__(self, parent)
- self._pixmap = QPixmap(self)
- self.setMinimumSize(QSize(150, 200))
- self.setAcceptDrops(True)
- self.draw_border = True
-
- # Drag 'n drop {{{
+class ImageDropMixin(object): # {{{
+ '''
+ Adds support for dropping images onto widgets and a contect menu for
+ copy/pasting images.
+ '''
DROPABBLE_EXTENSIONS = IMAGE_EXTENSIONS
+ def __init__(self):
+ self.setAcceptDrops(True)
+
@classmethod
def paths_from_event(cls, event):
'''
@@ -223,14 +217,58 @@ class ImageView(QWidget):
pmap = QPixmap()
pmap.load(path)
if not pmap.isNull():
- self.setPixmap(pmap)
+ self.handle_image_drop(path, pmap)
event.accept()
- self.cover_changed.emit(open(path, 'rb').read())
break
+ def handle_image_drop(self, path, pmap):
+ self.set_pixmap(pmap)
+ self.cover_changed.emit(open(path, 'rb').read())
+
def dragMoveEvent(self, event):
event.acceptProposedAction()
- # }}}
+
+ def get_pixmap(self):
+ return self.pixmap()
+
+ def set_pixmap(self, pmap):
+ self.setPixmap(pmap)
+
+ def contextMenuEvent(self, ev):
+ cm = QMenu(self)
+ copy = cm.addAction(_('Copy Image'))
+ paste = cm.addAction(_('Paste Image'))
+ if not QApplication.instance().clipboard().mimeData().hasImage():
+ paste.setEnabled(False)
+ copy.triggered.connect(self.copy_to_clipboard)
+ paste.triggered.connect(self.paste_from_clipboard)
+ cm.exec_(ev.globalPos())
+
+ def copy_to_clipboard(self):
+ QApplication.instance().clipboard().setPixmap(self.get_pixmap())
+
+ def paste_from_clipboard(self):
+ cb = QApplication.instance().clipboard()
+ pmap = cb.pixmap()
+ if pmap.isNull() and cb.supportsSelection():
+ pmap = cb.pixmap(cb.Selection)
+ if not pmap.isNull():
+ self.set_pixmap(pmap)
+ self.cover_changed.emit(
+ pixmap_to_data(pmap))
+# }}}
+
+class ImageView(QWidget, ImageDropMixin):
+
+ BORDER_WIDTH = 1
+ cover_changed = pyqtSignal(object)
+
+ def __init__(self, parent=None):
+ QWidget.__init__(self, parent)
+ self._pixmap = QPixmap(self)
+ self.setMinimumSize(QSize(150, 200))
+ ImageDropMixin.__init__(self)
+ self.draw_border = True
def setPixmap(self, pixmap):
if not isinstance(pixmap, QPixmap):
@@ -272,34 +310,23 @@ class ImageView(QWidget):
p.drawRect(target)
p.end()
+class CoverView(QGraphicsView, ImageDropMixin):
- # Clipboard copy/paste # {{{
- def contextMenuEvent(self, ev):
- cm = QMenu(self)
- copy = cm.addAction(_('Copy Image'))
- paste = cm.addAction(_('Paste Image'))
- if not QApplication.instance().clipboard().mimeData().hasImage():
- paste.setEnabled(False)
- copy.triggered.connect(self.copy_to_clipboard)
- paste.triggered.connect(self.paste_from_clipboard)
- cm.exec_(ev.globalPos())
-
- def copy_to_clipboard(self):
- QApplication.instance().clipboard().setPixmap(self.pixmap())
-
- def paste_from_clipboard(self):
- cb = QApplication.instance().clipboard()
- pmap = cb.pixmap()
- if pmap.isNull() and cb.supportsSelection():
- pmap = cb.pixmap(cb.Selection)
- if not pmap.isNull():
- self.setPixmap(pmap)
- self.cover_changed.emit(
- pixmap_to_data(pmap))
- # }}}
+ cover_changed = pyqtSignal(object)
+ def __init__(self, *args, **kwargs):
+ QGraphicsView.__init__(self, *args, **kwargs)
+ ImageDropMixin.__init__(self)
+ def get_pixmap(self):
+ for item in self.scene().items():
+ if hasattr(item, 'pixmap'):
+ return item.pixmap()
+ def set_pixmap(self, pmap):
+ self.scene = QGraphicsScene()
+ self.scene.addPixmap(pmap)
+ self.setScene(self.scene)
class FontFamilyModel(QAbstractListModel):
@@ -312,6 +339,9 @@ class FontFamilyModel(QAbstractListModel):
self.families = []
print 'WARNING: Could not load fonts'
traceback.print_exc()
+ # Restrict to Qt families as Qt tends to crash
+ qt_families = set([unicode(x) for x in QFontDatabase().families()])
+ self.families = list(qt_families.intersection(set(self.families)))
self.families.sort()
self.families[:0] = [_('None')]
diff --git a/src/calibre/gui2/wizard/__init__.py b/src/calibre/gui2/wizard/__init__.py
index 5f9f1828fa..c629b10b5d 100644
--- a/src/calibre/gui2/wizard/__init__.py
+++ b/src/calibre/gui2/wizard/__init__.py
@@ -51,7 +51,7 @@ class Device(object):
@classmethod
def set_output_format(cls):
if cls.output_format:
- prefs.set('output_format', cls.output_format)
+ prefs.set('output_format', cls.output_format.lower())
@classmethod
def commit(cls):
diff --git a/src/calibre/library/caches.py b/src/calibre/library/caches.py
index e626d446d2..dafeddaf86 100644
--- a/src/calibre/library/caches.py
+++ b/src/calibre/library/caches.py
@@ -121,11 +121,16 @@ CONTAINS_MATCH = 0
EQUALS_MATCH = 1
REGEXP_MATCH = 2
def _match(query, value, matchkind):
+ if query.startswith('..'):
+ query = query[1:]
+ prefix_match_ok = False
+ else:
+ prefix_match_ok = True
for t in value:
t = icu_lower(t)
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
if (matchkind == EQUALS_MATCH):
- if query[0] == '.':
+ if prefix_match_ok and query[0] == '.':
if t.startswith(query[1:]):
ql = len(query) - 1
if (len(t) == ql) or (t[ql:ql+1] == '.'):
@@ -418,32 +423,91 @@ class ResultCache(SearchQueryParser): # {{{
return matches
def get_user_category_matches(self, location, query, candidates):
- res = set([])
- if self.db_prefs is None:
- return res
+ matches = set([])
+ if self.db_prefs is None or len(query) < 2:
+ return matches
user_cats = self.db_prefs.get('user_categories', [])
c = set(candidates)
- l = location.rfind('.')
- if l > 0:
- alt_loc = location[0:l]
- alt_item = location[l+1:]
+
+ if query.startswith('.'):
+ check_subcats = True
+ query = query[1:]
else:
- alt_loc = None
+ check_subcats = False
+
for key in user_cats:
- if key == location or key.startswith(location + '.'):
+ if key == location or (check_subcats and key.startswith(location + '.')):
for (item, category, ign) in user_cats[key]:
s = self.get_matches(category, '=' + item, candidates=c)
c -= s
- res |= s
- elif key == alt_loc:
- for (item, category, ign) in user_cats[key]:
- if item == alt_item:
- s = self.get_matches(category, '=' + item, candidates=c)
- c -= s
- res |= s
+ matches |= s
if query == 'false':
- return candidates - res
- return res
+ return candidates - matches
+ return matches
+
+ def get_keypair_matches(self, location, query, candidates):
+ matches = set([])
+ if query.find(':') >= 0:
+ q = [q.strip() for q in query.split(':')]
+ if len(q) != 2:
+ raise ParseException(query, len(query),
+ 'Invalid query format for colon-separated search', self)
+ (keyq, valq) = q
+ keyq_mkind, keyq = self._matchkind(keyq)
+ valq_mkind, valq = self._matchkind(valq)
+ else:
+ keyq = keyq_mkind = ''
+ valq_mkind, valq = self._matchkind(query)
+
+ loc = self.field_metadata[location]['rec_index']
+ split_char = self.field_metadata[location]['is_multiple']
+ for id_ in candidates:
+ item = self._data[id_]
+ if item is None:
+ continue
+
+ if item[loc] is None:
+ if valq == 'false':
+ matches.add(id_)
+ continue
+
+ pairs = [p.strip() for p in item[loc].split(split_char)]
+ for pair in pairs:
+ parts = pair.split(':')
+ if len(parts) != 2:
+ continue
+ k = parts[:1]
+ v = parts[1:]
+ if keyq and not _match(keyq, k, keyq_mkind):
+ continue
+ if valq:
+ if valq == 'true':
+ if not v:
+ continue
+ elif valq == 'false':
+ if v:
+ continue
+ elif not _match(valq, v, valq_mkind):
+ continue
+ matches.add(id_)
+ return matches
+
+ def _matchkind(self, query):
+ matchkind = CONTAINS_MATCH
+ if (len(query) > 1):
+ if query.startswith('\\'):
+ query = query[1:]
+ elif query.startswith('='):
+ matchkind = EQUALS_MATCH
+ query = query[1:]
+ elif query.startswith('~'):
+ matchkind = REGEXP_MATCH
+ query = query[1:]
+
+ if matchkind != REGEXP_MATCH:
+ # leave case in regexps because it can be significant e.g. \S \W \D
+ query = icu_lower(query)
+ return matchkind, query
def get_matches(self, location, query, candidates=None,
allow_recursion=True):
@@ -460,6 +524,7 @@ class ResultCache(SearchQueryParser): # {{{
if query and query.strip():
# get metadata key associated with the search term. Eliminates
# dealing with plurals and other aliases
+ original_location = location
location = self.field_metadata.search_term_to_field_key(icu_lower(location.strip()))
# grouped search terms
if isinstance(location, list):
@@ -510,24 +575,20 @@ class ResultCache(SearchQueryParser): # {{{
return self.get_numeric_matches(location, query[1:],
candidates, val_func=vf)
+ # special case: colon-separated fields such as identifiers. isbn
+ # is a special case within the case
+ if fm.get('is_csp', False):
+ if location == 'identifiers' and original_location == 'isbn':
+ return self.get_keypair_matches('identifiers',
+ '=isbn:'+query, candidates)
+ return self.get_keypair_matches(location, query, candidates)
+
# check for user categories
if len(location) >= 2 and location.startswith('@'):
return self.get_user_category_matches(location[1:], query.lower(),
candidates)
# everything else, or 'all' matches
- matchkind = CONTAINS_MATCH
- if (len(query) > 1):
- if query.startswith('\\'):
- query = query[1:]
- elif query.startswith('='):
- matchkind = EQUALS_MATCH
- query = query[1:]
- elif query.startswith('~'):
- matchkind = REGEXP_MATCH
- query = query[1:]
- if matchkind != REGEXP_MATCH:
- # leave case in regexps because it can be significant e.g. \S \W \D
- query = icu_lower(query)
+ matchkind, query = self._matchkind(query)
if not isinstance(query, unicode):
query = query.decode('utf-8')
diff --git a/src/calibre/library/cli.py b/src/calibre/library/cli.py
index e93be187f9..f062aecc26 100644
--- a/src/calibre/library/cli.py
+++ b/src/calibre/library/cli.py
@@ -20,7 +20,8 @@ from calibre.utils.date import isoformat
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
- 'formats', 'isbn', 'uuid', 'pubdate', 'cover'])
+ 'formats', 'isbn', 'uuid', 'pubdate', 'cover', 'last_modified',
+ 'identifiers'])
def send_message(msg=''):
prints('Notifying calibre of the change')
diff --git a/src/calibre/library/custom_columns.py b/src/calibre/library/custom_columns.py
index 358daf9de6..dec55f2b02 100644
--- a/src/calibre/library/custom_columns.py
+++ b/src/calibre/library/custom_columns.py
@@ -188,7 +188,7 @@ class CustomColumns(object):
table=tn, column='value', datatype=v['datatype'],
colnum=v['num'], name=v['name'], display=v['display'],
is_multiple=is_m, is_category=is_category,
- is_editable=v['editable'])
+ is_editable=v['editable'], is_csp=False)
def get_custom(self, idx, label=None, num=None, index_is_id=False):
if label is not None:
diff --git a/src/calibre/library/database2.py b/src/calibre/library/database2.py
index 4be2ba4340..bb46411fc9 100644
--- a/src/calibre/library/database2.py
+++ b/src/calibre/library/database2.py
@@ -6,7 +6,8 @@ __docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
-import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, json
+import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
+ json, uuid
import threading, random
from itertools import repeat
from math import ceil
@@ -94,6 +95,31 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
return property(doc=doc, fget=fget, fset=fset)
+ @dynamic_property
+ def library_id(self):
+ doc = ('The UUID for this library. As long as the user only operates'
+ ' on libraries with calibre, it will be unique')
+
+ def fget(self):
+ if self._library_id_ is None:
+ ans = self.conn.get('SELECT uuid FROM library_id', all=False)
+ if ans is None:
+ ans = str(uuid.uuid4())
+ self.library_id = ans
+ else:
+ self._library_id_ = ans
+ return self._library_id_
+
+ def fset(self, val):
+ self._library_id_ = unicode(val)
+ self.conn.executescript('''
+ DELETE FROM library_id;
+ INSERT INTO library_id (uuid) VALUES ("%s");
+ '''%self._library_id_)
+ self.conn.commit()
+
+ return property(doc=doc, fget=fget, fset=fset)
+
def connect(self):
if 'win32' in sys.platform and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError('Path to library too long. Must be less than %d characters.'%(259-4*self.PATH_LIMIT-10))
@@ -120,6 +146,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False):
self.field_metadata = FieldMetadata()
+ self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
@@ -148,6 +175,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.is_case_sensitive = not iswindows and not isosx and \
not os.path.exists(self.dbpath.replace('metadata.db', 'MeTAdAtA.dB'))
SchemaUpgrade.__init__(self)
+ # Guarantee that the library_id is set
+ self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
@@ -293,14 +322,14 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
- 'isbn',
'path',
- 'lccn',
'pubdate',
- 'flags',
'uuid',
'has_cover',
- ('au_map', 'authors', 'author', 'aum_sortconcat(link.id, authors.name, authors.sort)')
+ ('au_map', 'authors', 'author',
+ 'aum_sortconcat(link.id, authors.name, authors.sort)'),
+ 'last_modified',
+ '(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
]
lines = []
for col in columns:
@@ -318,8 +347,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
- 'formats':13, 'isbn':14, 'path':15, 'lccn':16, 'pubdate':17,
- 'flags':18, 'uuid':19, 'cover':20, 'au_map':21}
+ 'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
+ 'au_map':18, 'last_modified':19, 'identifiers':20}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
@@ -391,11 +420,16 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.has_id = self.data.has_id
self.count = self.data.count
- for prop in ('author_sort', 'authors', 'comment', 'comments', 'isbn',
- 'publisher', 'rating', 'series', 'series_index', 'tags',
- 'title', 'timestamp', 'uuid', 'pubdate', 'ondevice'):
+ for prop in (
+ 'author_sort', 'authors', 'comment', 'comments',
+ 'publisher', 'rating', 'series', 'series_index', 'tags',
+ 'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
+ 'metadata_last_modified',
+ ):
+ fm = {'comment':'comments', 'metadata_last_modified':
+ 'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
- loc=self.FIELD_MAP['comments' if prop == 'comment' else prop]))
+ loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
@@ -681,8 +715,20 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if commit:
self.conn.commit()
+ def update_last_modified(self, book_ids, commit=False, now=None):
+ if now is None:
+ now = nowf()
+ if book_ids:
+ self.conn.executemany(
+ 'UPDATE books SET last_modified=? WHERE id=?',
+ [(now, book) for book in book_ids])
+ for book_id in book_ids:
+ self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
+ if commit:
+ self.conn.commit()
+
def dirtied(self, book_ids, commit=True):
- changed = False
+ self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
@@ -691,21 +737,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
- try:
- self.conn.execute(
- 'INSERT INTO metadata_dirtied (book) VALUES (?)',
- (book,))
- changed = True
- except IntegrityError:
- # Already in table
- pass
+
+ self.conn.execute(
+ 'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
+ (book,))
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
+
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
- if commit and changed:
+ if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
@@ -790,6 +833,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
mi.pubdate = row[fm['pubdate']]
mi.uuid = row[fm['uuid']]
mi.title_sort = row[fm['sort']]
+ mi.metadata_last_modified = row[fm['last_modified']]
formats = row[fm['formats']]
if not formats:
formats = None
@@ -803,8 +847,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
- mi.isbn = row[fm['isbn']]
id = idx if index_is_id else self.id(idx)
+ mi.set_identifiers(self.get_identifiers(id, index_is_id=True))
mi.application_id = id
mi.id = id
for key, meta in self.field_metadata.custom_iteritems():
@@ -911,10 +955,14 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
- self.conn.execute('UPDATE books SET has_cover=1 WHERE id=?', (id,))
+ now = nowf()
+ self.conn.execute(
+ 'UPDATE books SET has_cover=1,last_modified=? WHERE id=?',
+ (now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
+ self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
@@ -923,8 +971,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def set_has_cover(self, id, val):
dval = 1 if val else 0
- self.conn.execute('UPDATE books SET has_cover=? WHERE id=?', (dval, id,))
+ now = nowf()
+ self.conn.execute(
+ 'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
+ (dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
+ self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
@@ -1195,7 +1247,11 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
i += 1
else:
new_cats['.'.join(comps)] = user_cats[k]
- self.prefs.set('user_categories', new_cats)
+ try:
+ if new_cats != user_cats:
+ self.prefs.set('user_categories', new_cats)
+ except:
+ pass
return new_cats
def get_categories(self, sort='name', ids=None, icon_map=None):
@@ -1218,7 +1274,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
for category in tb_cats.keys():
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
- or category in ['news', 'formats']:
+ or category in ['news', 'formats'] or cat.get('is_csp',
+ False):
continue
# Get the ids for the item values
if not cat['is_custom']:
@@ -1500,18 +1557,30 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
############# End get_categories
- def tags_older_than(self, tag, delta):
+ def tags_older_than(self, tag, delta, must_have_tag=None):
+ '''
+ Return the ids of all books having the tag ``tag`` that are older than
+ than the specified time. tag comparison is case insensitive.
+
+ :param delta: A timedelta object or None. If None, then all ids with
+ the tag are returned.
+ :param must_have_tag: If not None the list of matches will be
+ restricted to books that have this tag
+ '''
tag = tag.lower().strip()
+ mht = must_have_tag.lower().strip() if must_have_tag else None
now = nowf()
tindex = self.FIELD_MAP['timestamp']
gindex = self.FIELD_MAP['tags']
+ iindex = self.FIELD_MAP['id']
for r in self.data._data:
if r is not None:
- if (now - r[tindex]) > delta:
+ if delta is None or (now - r[tindex]) > delta:
tags = r[gindex]
- if tags and tag in [x.strip() for x in
- tags.lower().split(',')]:
- yield r[self.FIELD_MAP['id']]
+ if tags:
+ tags = [x.strip() for x in tags.lower().split(',')]
+ if tag in tags and (mht is None or mht in tags):
+ yield r[iindex]
def get_next_series_num_for(self, series):
series_id = self.conn.get('SELECT id from series WHERE name=?',
@@ -1636,8 +1705,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if mi.comments:
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
- if mi.isbn and mi.isbn.strip():
- doit(self.set_isbn, id, mi.isbn, notify=False, commit=False)
if mi.series_index:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
@@ -1647,6 +1714,15 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
+ mi_idents = mi.get_identifiers()
+ if mi_idents:
+ identifiers = self.get_identifiers(id, index_is_id=True)
+ for key, val in mi_idents.iteritems():
+ if val and val.strip(): # Don't delete an existing identifier
+ identifiers[icu_lower(key)] = val
+ self.set_identifiers(id, identifiers, notify=False, commit=False)
+
+
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi.iterkeys():
if key in self.field_metadata and \
@@ -2425,14 +2501,84 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if notify:
self.notify('metadata', [id])
- def set_isbn(self, id, isbn, notify=True, commit=True):
- self.conn.execute('UPDATE books SET isbn=? WHERE id=?', (isbn, id))
- self.dirtied([id], commit=False)
+ def isbn(self, idx, index_is_id=False):
+ row = self.data._data[idx] if index_is_id else self.data[idx]
+ if row is not None:
+ raw = row[self.FIELD_MAP['identifiers']]
+ if raw:
+ for x in raw.split(','):
+ if x.startswith('isbn:'):
+ return x[5:].strip()
+
+ def get_identifiers(self, idx, index_is_id=False):
+ ans = {}
+ row = self.data._data[idx] if index_is_id else self.data[idx]
+ if row is not None:
+ raw = row[self.FIELD_MAP['identifiers']]
+ if raw:
+ for x in raw.split(','):
+ key, _, val = x.partition(':')
+ key, val = key.strip(), val.strip()
+ if key and val:
+ ans[key] = val
+
+ return ans
+
+ def _clean_identifier(self, typ, val):
+ typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
+ val = val.strip().replace(',', '|').replace(':', '|')
+ return typ, val
+
+ def set_identifier(self, id_, typ, val, notify=True, commit=True):
+ 'If val is empty, deletes identifier of type typ'
+ typ, val = self._clean_identifier(typ, val)
+ identifiers = self.get_identifiers(id_, index_is_id=True)
+ if not typ:
+ return
+ changed = False
+ if not val and typ in identifiers:
+ identifiers.pop(typ)
+ changed = True
+ self.conn.execute(
+ 'DELETE from identifiers WHERE book=? AND type=?',
+ (id_, typ))
+ if val and identifiers.get(typ, None) != val:
+ changed = True
+ identifiers[typ] = val
+ self.conn.execute(
+ 'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
+ (id_, typ, val))
+ if changed:
+ raw = ','.join(['%s:%s'%(k, v) for k, v in
+ identifiers.iteritems()])
+ self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
+ row_is_id=True)
+ if commit:
+ self.conn.commit()
+ if notify:
+ self.notify('metadata', [id_])
+
+ def set_identifiers(self, id_, identifiers, notify=True, commit=True):
+ cleaned = {}
+ for typ, val in identifiers.iteritems():
+ typ, val = self._clean_identifier(typ, val)
+ if val:
+ cleaned[typ] = val
+ self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
+ self.conn.executemany(
+ 'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
+ [(id_, k, v) for k, v in cleaned.iteritems()])
+ raw = ','.join(['%s:%s'%(k, v) for k, v in
+ cleaned.iteritems()])
+ self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
+ row_is_id=True)
if commit:
self.conn.commit()
- self.data.set(id, self.FIELD_MAP['isbn'], isbn, row_is_id=True)
if notify:
- self.notify('metadata', [id])
+ self.notify('metadata', [id_])
+
+ def set_isbn(self, id_, isbn, notify=True, commit=True):
+ self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
format = os.path.splitext(path)[1][1:].lower()
@@ -2730,7 +2876,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
prefix = self.library_path
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
- 'isbn', 'uuid', 'pubdate'])
+ 'uuid', 'pubdate', 'last_modified', 'identifiers'])
for x in self.custom_column_num_map:
FIELDS.add(x)
data = []
@@ -2745,6 +2891,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
data.append(x)
x['id'] = db_id
x['formats'] = []
+ isbn = self.isbn(db_id, index_is_id=True)
+ x['isbn'] = isbn if isbn else ''
if not x['authors']:
x['authors'] = _('Unknown')
x['authors'] = [i.replace('|', ',') for i in x['authors'].split(',')]
diff --git a/src/calibre/library/field_metadata.py b/src/calibre/library/field_metadata.py
index aff2803452..d89322954f 100644
--- a/src/calibre/library/field_metadata.py
+++ b/src/calibre/library/field_metadata.py
@@ -80,6 +80,8 @@ class FieldMetadata(dict):
rec_index: the index of the field in the db metadata record.
+ is_csp: field contains colon-separated pairs. Must also be text, is_multiple
+
'''
VALID_DATA_TYPES = frozenset([None, 'rating', 'text', 'comments', 'datetime',
@@ -98,7 +100,8 @@ class FieldMetadata(dict):
'name':_('Authors'),
'search_terms':['authors', 'author'],
'is_custom':False,
- 'is_category':True}),
+ 'is_category':True,
+ 'is_csp': False}),
('series', {'table':'series',
'column':'name',
'link_column':'series',
@@ -109,7 +112,8 @@ class FieldMetadata(dict):
'name':_('Series'),
'search_terms':['series'],
'is_custom':False,
- 'is_category':True}),
+ 'is_category':True,
+ 'is_csp': False}),
('formats', {'table':None,
'column':None,
'datatype':'text',
@@ -118,7 +122,8 @@ class FieldMetadata(dict):
'name':_('Formats'),
'search_terms':['formats', 'format'],
'is_custom':False,
- 'is_category':True}),
+ 'is_category':True,
+ 'is_csp': False}),
('publisher', {'table':'publishers',
'column':'name',
'link_column':'publisher',
@@ -129,7 +134,8 @@ class FieldMetadata(dict):
'name':_('Publishers'),
'search_terms':['publisher'],
'is_custom':False,
- 'is_category':True}),
+ 'is_category':True,
+ 'is_csp': False}),
('rating', {'table':'ratings',
'column':'rating',
'link_column':'rating',
@@ -140,7 +146,8 @@ class FieldMetadata(dict):
'name':_('Ratings'),
'search_terms':['rating'],
'is_custom':False,
- 'is_category':True}),
+ 'is_category':True,
+ 'is_csp': False}),
('news', {'table':'news',
'column':'name',
'category_sort':'name',
@@ -150,7 +157,8 @@ class FieldMetadata(dict):
'name':_('News'),
'search_terms':[],
'is_custom':False,
- 'is_category':True}),
+ 'is_category':True,
+ 'is_csp': False}),
('tags', {'table':'tags',
'column':'name',
'link_column': 'tag',
@@ -161,7 +169,18 @@ class FieldMetadata(dict):
'name':_('Tags'),
'search_terms':['tags', 'tag'],
'is_custom':False,
- 'is_category':True}),
+ 'is_category':True,
+ 'is_csp': False}),
+ ('identifiers', {'table':None,
+ 'column':None,
+ 'datatype':'text',
+ 'is_multiple':',',
+ 'kind':'field',
+ 'name':_('Identifiers'),
+ 'search_terms':['identifiers', 'identifier', 'isbn'],
+ 'is_custom':False,
+ 'is_category':True,
+ 'is_csp': True}),
('author_sort',{'table':None,
'column':None,
'datatype':'text',
@@ -170,7 +189,8 @@ class FieldMetadata(dict):
'name':None,
'search_terms':['author_sort'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('au_map', {'table':None,
'column':None,
'datatype':'text',
@@ -179,7 +199,8 @@ class FieldMetadata(dict):
'name':None,
'search_terms':[],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('comments', {'table':None,
'column':None,
'datatype':'text',
@@ -187,7 +208,9 @@ class FieldMetadata(dict):
'kind':'field',
'name':_('Comments'),
'search_terms':['comments', 'comment'],
- 'is_custom':False, 'is_category':False}),
+ 'is_custom':False,
+ 'is_category':False,
+ 'is_csp': False}),
('cover', {'table':None,
'column':None,
'datatype':'int',
@@ -196,16 +219,8 @@ class FieldMetadata(dict):
'name':None,
'search_terms':['cover'],
'is_custom':False,
- 'is_category':False}),
- ('flags', {'table':None,
- 'column':None,
- 'datatype':'text',
- 'is_multiple':None,
- 'kind':'field',
- 'name':None,
- 'search_terms':[],
- 'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('id', {'table':None,
'column':None,
'datatype':'int',
@@ -214,25 +229,18 @@ class FieldMetadata(dict):
'name':None,
'search_terms':[],
'is_custom':False,
- 'is_category':False}),
- ('isbn', {'table':None,
+ 'is_category':False,
+ 'is_csp': False}),
+ ('last_modified', {'table':None,
'column':None,
- 'datatype':'text',
+ 'datatype':'datetime',
'is_multiple':None,
'kind':'field',
- 'name':None,
- 'search_terms':['isbn'],
+ 'name':_('Date'),
+ 'search_terms':['last_modified'],
'is_custom':False,
- 'is_category':False}),
- ('lccn', {'table':None,
- 'column':None,
- 'datatype':'text',
- 'is_multiple':None,
- 'kind':'field',
- 'name':None,
- 'search_terms':[],
- 'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('ondevice', {'table':None,
'column':None,
'datatype':'text',
@@ -241,7 +249,8 @@ class FieldMetadata(dict):
'name':_('On Device'),
'search_terms':['ondevice'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('path', {'table':None,
'column':None,
'datatype':'text',
@@ -250,7 +259,8 @@ class FieldMetadata(dict):
'name':None,
'search_terms':[],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('pubdate', {'table':None,
'column':None,
'datatype':'datetime',
@@ -259,7 +269,8 @@ class FieldMetadata(dict):
'name':_('Published'),
'search_terms':['pubdate'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('series_index',{'table':None,
'column':None,
'datatype':'float',
@@ -268,7 +279,8 @@ class FieldMetadata(dict):
'name':None,
'search_terms':['series_index'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('sort', {'table':None,
'column':None,
'datatype':'text',
@@ -277,7 +289,8 @@ class FieldMetadata(dict):
'name':_('Title Sort'),
'search_terms':['title_sort'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('size', {'table':None,
'column':None,
'datatype':'float',
@@ -286,7 +299,8 @@ class FieldMetadata(dict):
'name':_('Size (MB)'),
'search_terms':['size'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('timestamp', {'table':None,
'column':None,
'datatype':'datetime',
@@ -295,7 +309,8 @@ class FieldMetadata(dict):
'name':_('Date'),
'search_terms':['date'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('title', {'table':None,
'column':None,
'datatype':'text',
@@ -304,7 +319,8 @@ class FieldMetadata(dict):
'name':_('Title'),
'search_terms':['title'],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
('uuid', {'table':None,
'column':None,
'datatype':'text',
@@ -313,7 +329,8 @@ class FieldMetadata(dict):
'name':None,
'search_terms':[],
'is_custom':False,
- 'is_category':False}),
+ 'is_category':False,
+ 'is_csp': False}),
]
# }}}
@@ -335,7 +352,8 @@ class FieldMetadata(dict):
self._tb_cats[k]['display'] = {}
self._tb_cats[k]['is_editable'] = True
self._add_search_terms_to_map(k, v['search_terms'])
- self._tb_cats['timestamp']['display'] = {
+ for x in ('timestamp', 'last_modified'):
+ self._tb_cats[x]['display'] = {
'date_format': tweaks['gui_timestamp_display_format']}
self._tb_cats['pubdate']['display'] = {
'date_format': tweaks['gui_pubdate_display_format']}
@@ -441,7 +459,8 @@ class FieldMetadata(dict):
return l
def add_custom_field(self, label, table, column, datatype, colnum, name,
- display, is_editable, is_multiple, is_category):
+ display, is_editable, is_multiple, is_category,
+ is_csp=False):
key = self.custom_field_prefix + label
if key in self._tb_cats:
raise ValueError('Duplicate custom field [%s]'%(label))
@@ -454,7 +473,7 @@ class FieldMetadata(dict):
'colnum':colnum, 'display':display,
'is_custom':True, 'is_category':is_category,
'link_column':'value','category_sort':'value',
- 'is_editable': is_editable,}
+ 'is_csp' : is_csp, 'is_editable': is_editable,}
self._add_search_terms_to_map(key, [key])
self.custom_label_to_key_map[label] = key
if datatype == 'series':
@@ -466,7 +485,7 @@ class FieldMetadata(dict):
'colnum':None, 'display':{},
'is_custom':False, 'is_category':False,
'link_column':None, 'category_sort':None,
- 'is_editable': False,}
+ 'is_editable': False, 'is_csp': False}
self._add_search_terms_to_map(key, [key])
self.custom_label_to_key_map[label+'_index'] = key
@@ -515,7 +534,7 @@ class FieldMetadata(dict):
'datatype':None, 'is_multiple':None,
'kind':'user', 'name':name,
'search_terms':st, 'is_custom':False,
- 'is_category':True}
+ 'is_category':True, 'is_csp': False}
self._add_search_terms_to_map(label, st)
def add_search_category(self, label, name):
@@ -524,8 +543,8 @@ class FieldMetadata(dict):
self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':None,
'kind':'search', 'name':name,
- 'search_terms':[], 'is_custom':False,
- 'is_category':True}
+ 'search_terms':[], 'is_custom':False,
+ 'is_category':True, 'is_csp': False}
def set_field_record_index(self, label, index, prefer_custom=False):
if prefer_custom:
diff --git a/src/calibre/library/prefs.py b/src/calibre/library/prefs.py
index 233c717897..4ef1dcb35a 100644
--- a/src/calibre/library/prefs.py
+++ b/src/calibre/library/prefs.py
@@ -49,8 +49,7 @@ class DBPrefs(dict):
if self.disable_setting:
return
raw = self.to_raw(val)
- self.db.conn.execute('DELETE FROM preferences WHERE key=?', (key,))
- self.db.conn.execute('INSERT INTO preferences (key,val) VALUES (?,?)', (key,
+ self.db.conn.execute('INSERT OR REPLACE INTO preferences (key,val) VALUES (?,?)', (key,
raw))
self.db.conn.commit()
dict.__setitem__(self, key, val)
diff --git a/src/calibre/library/restore.py b/src/calibre/library/restore.py
index 76f3c0333d..e03edd449a 100644
--- a/src/calibre/library/restore.py
+++ b/src/calibre/library/restore.py
@@ -13,6 +13,7 @@ from calibre.ptempfile import TemporaryDirectory
from calibre.ebooks.metadata.opf2 import OPF
from calibre.library.database2 import LibraryDatabase2
from calibre.constants import filesystem_encoding
+from calibre.utils.date import utcfromtimestamp
from calibre import isbytestring
NON_EBOOK_EXTENSIONS = frozenset([
@@ -211,8 +212,8 @@ class Restore(Thread):
force_id=book['id'])
if book['mi'].uuid:
db.set_uuid(book['id'], book['mi'].uuid, commit=False, notify=False)
- db.conn.execute('UPDATE books SET path=? WHERE id=?', (book['path'],
- book['id']))
+ db.conn.execute('UPDATE books SET path=?,last_modified=? WHERE id=?', (book['path'],
+ utcfromtimestamp(book['timestamp']), book['id']))
for fmt, size, name in book['formats']:
db.conn.execute('''
diff --git a/src/calibre/library/schema_upgrades.py b/src/calibre/library/schema_upgrades.py
index 0b7a3f5350..d1f22d379b 100644
--- a/src/calibre/library/schema_upgrades.py
+++ b/src/calibre/library/schema_upgrades.py
@@ -8,6 +8,8 @@ __docformat__ = 'restructuredtext en'
import os
+from calibre.utils.date import isoformat, DEFAULT_DATE
+
class SchemaUpgrade(object):
def __init__(self):
@@ -468,4 +470,116 @@ class SchemaUpgrade(object):
'''
self.conn.executescript(script)
+ def upgrade_version_18(self):
+ '''
+ Add a library UUID.
+ Add an identifiers table.
+ Add a languages table.
+ Add a last_modified column.
+ NOTE: You cannot downgrade after this update, if you do
+ any changes you make to book isbns will be lost.
+ '''
+ script = '''
+ DROP TABLE IF EXISTS library_id;
+ CREATE TABLE library_id ( id INTEGER PRIMARY KEY,
+ uuid TEXT NOT NULL,
+ UNIQUE(uuid)
+ );
+
+ DROP TABLE IF EXISTS identifiers;
+ CREATE TABLE identifiers ( id INTEGER PRIMARY KEY,
+ book INTEGER NON NULL,
+ type TEXT NON NULL DEFAULT "isbn" COLLATE NOCASE,
+ val TEXT NON NULL COLLATE NOCASE,
+ UNIQUE(book, type)
+ );
+
+ DROP TABLE IF EXISTS languages;
+ CREATE TABLE languages ( id INTEGER PRIMARY KEY,
+ lang_code TEXT NON NULL COLLATE NOCASE,
+ UNIQUE(lang_code)
+ );
+
+ DROP TABLE IF EXISTS books_languages_link;
+ CREATE TABLE books_languages_link ( id INTEGER PRIMARY KEY,
+ book INTEGER NOT NULL,
+ lang_code INTEGER NOT NULL,
+ item_order INTEGER NOT NULL DEFAULT 0,
+ UNIQUE(book, lang_code)
+ );
+
+ DROP TRIGGER IF EXISTS fkc_delete_on_languages;
+ CREATE TRIGGER fkc_delete_on_languages
+ BEFORE DELETE ON languages
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT COUNT(id) FROM books_languages_link WHERE lang_code=OLD.id) > 0
+ THEN RAISE(ABORT, 'Foreign key violation: language is still referenced')
+ END;
+ END;
+
+ DROP TRIGGER IF EXISTS fkc_delete_on_languages_link;
+ CREATE TRIGGER fkc_delete_on_languages_link
+ BEFORE INSERT ON books_languages_link
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
+ END;
+ END;
+
+ DROP TRIGGER IF EXISTS fkc_update_books_languages_link_a;
+ CREATE TRIGGER fkc_update_books_languages_link_a
+ BEFORE UPDATE OF book ON books_languages_link
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: book not in books')
+ END;
+ END;
+ DROP TRIGGER IF EXISTS fkc_update_books_languages_link_b;
+ CREATE TRIGGER fkc_update_books_languages_link_b
+ BEFORE UPDATE OF lang_code ON books_languages_link
+ BEGIN
+ SELECT CASE
+ WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
+ THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
+ END;
+ END;
+
+ DROP INDEX IF EXISTS books_languages_link_aidx;
+ CREATE INDEX books_languages_link_aidx ON books_languages_link (lang_code);
+ DROP INDEX IF EXISTS books_languages_link_bidx;
+ CREATE INDEX books_languages_link_bidx ON books_languages_link (book);
+ DROP INDEX IF EXISTS languages_idx;
+ CREATE INDEX languages_idx ON languages (lang_code COLLATE NOCASE);
+
+ DROP TRIGGER IF EXISTS books_delete_trg;
+ CREATE TRIGGER books_delete_trg
+ AFTER DELETE ON books
+ BEGIN
+ DELETE FROM books_authors_link WHERE book=OLD.id;
+ DELETE FROM books_publishers_link WHERE book=OLD.id;
+ DELETE FROM books_ratings_link WHERE book=OLD.id;
+ DELETE FROM books_series_link WHERE book=OLD.id;
+ DELETE FROM books_tags_link WHERE book=OLD.id;
+ DELETE FROM books_languages_link WHERE book=OLD.id;
+ DELETE FROM data WHERE book=OLD.id;
+ DELETE FROM comments WHERE book=OLD.id;
+ DELETE FROM conversion_options WHERE book=OLD.id;
+ DELETE FROM books_plugin_data WHERE book=OLD.id;
+ DELETE FROM identifiers WHERE book=OLD.id;
+ END;
+
+ INSERT INTO identifiers (book, val) SELECT id,isbn FROM books WHERE isbn;
+
+ ALTER TABLE books ADD COLUMN last_modified TIMESTAMP NOT NULL DEFAULT "%s";
+
+ '''%isoformat(DEFAULT_DATE, sep=' ')
+ # Sqlite does not support non constant default values in alter
+ # statements
+ self.conn.executescript(script)
+
diff --git a/src/calibre/library/server/browse.py b/src/calibre/library/server/browse.py
index 7dfedcb6ff..0e132c7d11 100644
--- a/src/calibre/library/server/browse.py
+++ b/src/calibre/library/server/browse.py
@@ -673,6 +673,8 @@ class BrowseServer(object):
categories = [categories]
dbtags = []
for category in categories:
+ if category not in ccache:
+ continue
dbtag = None
for tag in ccache[key]:
if tag.name == category:
diff --git a/src/calibre/library/server/xml.py b/src/calibre/library/server/xml.py
index efbceb9771..14955dc541 100644
--- a/src/calibre/library/server/xml.py
+++ b/src/calibre/library/server/xml.py
@@ -89,13 +89,16 @@ class XMLServer(object):
for x in ('id', 'title', 'sort', 'author_sort', 'rating', 'size'):
kwargs[x] = serialize(record[FM[x]])
- for x in ('isbn', 'formats', 'series', 'tags', 'publisher',
- 'comments'):
+ for x in ('formats', 'series', 'tags', 'publisher',
+ 'comments', 'identifiers'):
y = record[FM[x]]
if x == 'tags':
y = format_tag_string(y, ',', ignore_max=True)
kwargs[x] = serialize(y) if y else ''
+ isbn = self.db.isbn(record[FM['id']], index_is_id=True)
+ kwargs['isbn'] = serialize(isbn if isbn else '')
+
kwargs['safe_title'] = ascii_filename(kwargs['title'])
c = kwargs.pop('comments')
diff --git a/src/calibre/library/sqlite.py b/src/calibre/library/sqlite.py
index 622d6b8459..1b595435ce 100644
--- a/src/calibre/library/sqlite.py
+++ b/src/calibre/library/sqlite.py
@@ -8,6 +8,7 @@ Wrapper for multi-threaded access to a single sqlite database connection. Serial
all calls.
'''
import sqlite3 as sqlite, traceback, time, uuid, sys, os
+import repr as reprlib
from sqlite3 import IntegrityError, OperationalError
from threading import Thread
from Queue import Queue
@@ -20,6 +21,7 @@ from calibre.utils.date import parse_date, isoformat
from calibre import isbytestring, force_unicode
from calibre.constants import iswindows, DEBUG
from calibre.utils.icu import strcmp
+from calibre import prints
global_lock = RLock()
@@ -87,6 +89,18 @@ class SortedConcatenate(object):
class SafeSortedConcatenate(SortedConcatenate):
sep = '|'
+class IdentifiersConcat(object):
+ '''String concatenation aggregator for the identifiers map'''
+ def __init__(self):
+ self.ans = []
+
+ def step(self, key, val):
+ self.ans.append(u'%s:%s'%(key, val))
+
+ def finalize(self):
+ return ','.join(self.ans)
+
+
class AumSortedConcatenate(object):
'''String concatenation aggregator for the author sort map'''
def __init__(self):
@@ -170,13 +184,13 @@ class DBThread(Thread):
detect_types=sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES)
self.conn.execute('pragma cache_size=5000')
encoding = self.conn.execute('pragma encoding').fetchone()[0]
- c_ext_loaded = load_c_extensions(self.conn)
+ self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
+ self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
+ self.conn.create_aggregate('identifiers_concat', 2, IdentifiersConcat)
+ load_c_extensions(self.conn)
self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row)
self.conn.create_aggregate('concat', 1, Concatenate)
self.conn.create_aggregate('aum_sortconcat', 3, AumSortedConcatenate)
- if not c_ext_loaded:
- self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
- self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
self.conn.create_collation('PYNOCASE', partial(pynocase,
encoding=encoding))
self.conn.create_function('title_sort', 1, title_sort)
@@ -208,17 +222,21 @@ class DBThread(Thread):
except Exception, err:
ok, res = False, (err, traceback.format_exc())
else:
- func = getattr(self.conn, func)
+ bfunc = getattr(self.conn, func)
try:
for i in range(3):
try:
- ok, res = True, func(*args, **kwargs)
+ ok, res = True, bfunc(*args, **kwargs)
break
except OperationalError, err:
# Retry if unable to open db file
- if 'unable to open' not in str(err) or i == 2:
+ e = str(err)
+ if 'unable to open' not in e or i == 2:
+ if 'unable to open' in e:
+ prints('Unable to open database for func',
+ func, reprlib.repr(args),
+ reprlib.repr(kwargs))
raise
- traceback.print_exc()
time.sleep(0.5)
except Exception, err:
ok, res = False, (err, traceback.format_exc())
diff --git a/src/calibre/library/sqlite_custom.c b/src/calibre/library/sqlite_custom.c
index 650c474c2c..dee17c79d4 100644
--- a/src/calibre/library/sqlite_custom.c
+++ b/src/calibre/library/sqlite_custom.c
@@ -77,6 +77,7 @@ static void sort_concat_free(SortConcatList *list) {
free(list->vals[i]->val);
free(list->vals[i]);
}
+ free(list->vals);
}
static int sort_concat_cmp(const void *a_, const void *b_) {
@@ -142,11 +143,102 @@ static void sort_concat_finalize2(sqlite3_context *context) {
// }}}
+// identifiers_concat {{{
+
+typedef struct {
+ char *val;
+ size_t length;
+} IdentifiersConcatItem;
+
+typedef struct {
+ IdentifiersConcatItem **vals;
+ size_t count;
+ size_t length;
+} IdentifiersConcatList;
+
+static void identifiers_concat_step(sqlite3_context *context, int argc, sqlite3_value **argv) {
+ const char *key, *val;
+ size_t len = 0;
+ IdentifiersConcatList *list;
+
+ assert(argc == 2);
+
+ list = (IdentifiersConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
+ if (list == NULL) return;
+
+ if (list->vals == NULL) {
+ list->vals = (IdentifiersConcatItem**)calloc(100, sizeof(IdentifiersConcatItem*));
+ if (list->vals == NULL) return;
+ list->length = 100;
+ list->count = 0;
+ }
+
+ if (list->count == list->length) {
+ list->vals = (IdentifiersConcatItem**)realloc(list->vals, list->length + 100);
+ if (list->vals == NULL) return;
+ list->length = list->length + 100;
+ }
+
+ list->vals[list->count] = (IdentifiersConcatItem*)calloc(1, sizeof(IdentifiersConcatItem));
+ if (list->vals[list->count] == NULL) return;
+
+ key = (char*) sqlite3_value_text(argv[0]);
+ val = (char*) sqlite3_value_text(argv[1]);
+ if (key == NULL || val == NULL) {return;}
+ len = strlen(key) + strlen(val) + 1;
+
+ list->vals[list->count]->val = (char*)calloc(len+1, sizeof(char));
+ if (list->vals[list->count]->val == NULL) return;
+ snprintf(list->vals[list->count]->val, len+1, "%s:%s", key, val);
+ list->vals[list->count]->length = len;
+
+ list->count = list->count + 1;
+
+}
+
+
+static void identifiers_concat_finalize(sqlite3_context *context) {
+ IdentifiersConcatList *list;
+ IdentifiersConcatItem *item;
+ char *ans, *pos;
+ size_t sz = 0, i;
+
+ list = (IdentifiersConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
+ if (list == NULL || list->vals == NULL || list->count < 1) return;
+
+ for (i = 0; i < list->count; i++) {
+ sz += list->vals[i]->length;
+ }
+ sz += list->count; // Space for commas
+ ans = (char*)calloc(sz+2, sizeof(char));
+ if (ans == NULL) return;
+
+ pos = ans;
+
+ for (i = 0; i < list->count; i++) {
+ item = list->vals[i];
+ if (item == NULL || item->val == NULL) continue;
+ memcpy(pos, item->val, item->length);
+ pos += item->length;
+ *pos = ',';
+ pos += 1;
+ free(item->val);
+ free(item);
+ }
+ *(pos-1) = 0; // Remove trailing comma
+ sqlite3_result_text(context, ans, -1, SQLITE_TRANSIENT);
+ free(ans);
+ free(list->vals);
+}
+
+// }}}
+
MYEXPORT int sqlite3_extension_init(
sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi){
SQLITE_EXTENSION_INIT2(pApi);
sqlite3_create_function(db, "sortconcat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize);
sqlite3_create_function(db, "sort_concat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize2);
+ sqlite3_create_function(db, "identifiers_concat", 2, SQLITE_UTF8, NULL, NULL, identifiers_concat_step, identifiers_concat_finalize);
return 0;
}
diff --git a/src/calibre/manual/faq.rst b/src/calibre/manual/faq.rst
index cb7f4d62ff..84f99414a8 100644
--- a/src/calibre/manual/faq.rst
+++ b/src/calibre/manual/faq.rst
@@ -81,7 +81,7 @@ Device Integration
What devices does |app| support?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-At the moment |app| has full support for the SONY PRS line, Barnes & Noble Nook, Cybook Gen 3/Opus, Amazon Kindle line, Entourage Edge, Longshine ShineBook, Ectaco Jetbook, BeBook/BeBook Mini, Irex Illiad/DR1000, Foxit eSlick, PocketBook 360, Italica, eClicto, Iriver Story, Airis dBook, Hanvon N515, Binatone Readme, Teclast K3, SpringDesign Alex, Kobo Reader, various Android phones and the iPhone/iPad. In addition, using the :guilabel:`Save to disk` function you can use it with any ebook reader that exports itself as a USB disk.
+At the moment |app| has full support for the SONY PRS line, Barnes & Noble Nook line, Cybook Gen 3/Opus, Amazon Kindle line, Entourage Edge, Longshine ShineBook, Ectaco Jetbook, BeBook/BeBook Mini, Irex Illiad/DR1000, Foxit eSlick, PocketBook line, Italica, eClicto, Iriver Story, Airis dBook, Hanvon N515, Binatone Readme, Teclast K3 and clones, SpringDesign Alex, Kobo Reader, various Android phones and the iPhone/iPad. In addition, using the :guilabel:`Connect to folder` function you can use it with any ebook reader that exports itself as a USB disk.
How can I help get my device supported in |app|?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -350,7 +350,7 @@ Why doesn't |app| have a column for foo?
|app| is designed to have columns for the most frequently and widely used fields. In addition, you can add any columns you like. Columns can be added via :guilabel:`Preferences->Interface->Add your own columns`.
Watch the tutorial `UI Power tips