From beb817b5cbd10ce4ed6b97d8c4af32b0b3aeea23 Mon Sep 17 00:00:00 2001 From: Charles Haley <> Date: Fri, 21 May 2010 19:19:51 +0100 Subject: [PATCH 01/22] Build enough of custom_column metadata support to implement reserved column names. Fix search to not go pink when the timer expires, and to not be always pink for devices. --- src/calibre/devices/metadata_serializer.py | 90 +++++++++++++++++++ src/calibre/devices/usbms/books.py | 25 +----- src/calibre/devices/usbms/driver.py | 7 +- .../dialogs/config/create_custom_column.py | 3 + src/calibre/gui2/library/models.py | 2 +- src/calibre/gui2/search_box.py | 5 +- 6 files changed, 104 insertions(+), 28 deletions(-) create mode 100644 src/calibre/devices/metadata_serializer.py diff --git a/src/calibre/devices/metadata_serializer.py b/src/calibre/devices/metadata_serializer.py new file mode 100644 index 0000000000..651ba1d678 --- /dev/null +++ b/src/calibre/devices/metadata_serializer.py @@ -0,0 +1,90 @@ +''' +Created on 21 May 2010 + +@author: charles +''' + +from calibre.constants import filesystem_encoding, preferred_encoding +from calibre import isbytestring +import json + +class MetadataSerializer(object): + + SERIALIZED_ATTRS = [ + 'lpath', 'title', 'authors', 'mime', 'size', 'tags', 'author_sort', + 'title_sort', 'comments', 'category', 'publisher', 'series', + 'series_index', 'rating', 'isbn', 'language', 'application_id', + 'book_producer', 'lccn', 'lcc', 'ddc', 'rights', 'publication_type', + 'uuid', + ] + + def to_json(self): + json = {} + for attr in self.SERIALIZED_ATTRS: + val = getattr(self, attr) + if isbytestring(val): + enc = filesystem_encoding if attr == 'lpath' else preferred_encoding + val = val.decode(enc, 'replace') + elif isinstance(val, (list, tuple)): + val = [x.decode(preferred_encoding, 'replace') if + isbytestring(x) else x for x in val] + json[attr] = val + return json + + def read_json(self, cache_file): + with open(cache_file, 'rb') as f: + js = json.load(f, encoding='utf-8') + return js + + def write_json(self, js, cache_file): + with open(cache_file, 'wb') as f: + json.dump(js, f, indent=2, encoding='utf-8') + + def string_to_value(self, string, col_metadata, column_label=None): + ''' + if column_label is none, col_metadata must be a dict containing custom + column metadata for one column. If column_label is not none, then + col_metadata must be a dict of custom column metadata, with column + labels as keys. Metadata for standard columns is always assumed to be in + the col_metadata dict. If column_label is not standard and is not in + col_metadata, check if it matches a custom column. If so, use that + column metadata. See get_column_metadata below. + ''' + pass + + def value_to_display(self, value, col_metadata, column_label=None): + pass + + def value_to_string (self, value, col_metadata, column_label=None): + pass + + def get_column_metadata(self, column_label = None, from_book=None): + ''' + if column_label is None, then from_book must not be None. Returns the + complete set of custom column metadata for that book. + + If column_label is not None, return the column metadata for the given + column. This works even if the label is for a built-in column. If + from_book is None, then column_label must be a current custom column + label or a standard label. If from_book is not None, then the column + metadata from that metadata set is returned if it exists, otherwise the + standard metadata for that column is returned. If neither is found, + return {} + ''' + pass + + def get_custom_column_labels(self, book): + ''' + returns a list of custom column attributes in the book metadata. + ''' + pass + + def get_standard_column_labels(self): + ''' + returns a list of standard attributes that should be in any book's + metadata + ''' + pass + +metadata_serializer = MetadataSerializer() + diff --git a/src/calibre/devices/usbms/books.py b/src/calibre/devices/usbms/books.py index 6e8811432a..8d79981ad7 100644 --- a/src/calibre/devices/usbms/books.py +++ b/src/calibre/devices/usbms/books.py @@ -9,20 +9,14 @@ import os, re, time, sys from calibre.ebooks.metadata import MetaInformation from calibre.devices.mime import mime_type_ext from calibre.devices.interface import BookList as _BookList -from calibre.constants import filesystem_encoding, preferred_encoding +from calibre.devices.metadata_serializer import MetadataSerializer +from calibre.constants import preferred_encoding from calibre import isbytestring -class Book(MetaInformation): +class Book(MetaInformation, MetadataSerializer): BOOK_ATTRS = ['lpath', 'size', 'mime', 'device_collections'] - JSON_ATTRS = [ - 'lpath', 'title', 'authors', 'mime', 'size', 'tags', 'author_sort', - 'title_sort', 'comments', 'category', 'publisher', 'series', - 'series_index', 'rating', 'isbn', 'language', 'application_id', - 'book_producer', 'lccn', 'lcc', 'ddc', 'rights', 'publication_type', - 'uuid', - ] def __init__(self, prefix, lpath, size=None, other=None): from calibre.ebooks.metadata.meta import path_to_ext @@ -82,19 +76,6 @@ class Book(MetaInformation): val = getattr(other, attr, None) setattr(self, attr, val) - def to_json(self): - json = {} - for attr in self.JSON_ATTRS: - val = getattr(self, attr) - if isbytestring(val): - enc = filesystem_encoding if attr == 'lpath' else preferred_encoding - val = val.decode(enc, 'replace') - elif isinstance(val, (list, tuple)): - val = [x.decode(preferred_encoding, 'replace') if - isbytestring(x) else x for x in val] - json[attr] = val - return json - class BookList(_BookList): def supports_collections(self): diff --git a/src/calibre/devices/usbms/driver.py b/src/calibre/devices/usbms/driver.py index 97c212775a..3c30827dbc 100644 --- a/src/calibre/devices/usbms/driver.py +++ b/src/calibre/devices/usbms/driver.py @@ -17,6 +17,7 @@ from itertools import cycle from calibre import prints, isbytestring from calibre.constants import filesystem_encoding +from calibre.devices.metadata_serializer import metadata_serializer as ms from calibre.devices.usbms.cli import CLI from calibre.devices.usbms.device import Device from calibre.devices.usbms.books import BookList, Book @@ -260,8 +261,7 @@ class USBMS(CLI, Device): os.makedirs(self.normalize_path(prefix)) js = [item.to_json() for item in booklists[listid] if hasattr(item, 'to_json')] - with open(self.normalize_path(os.path.join(prefix, self.METADATA_CACHE)), 'wb') as f: - json.dump(js, f, indent=2, encoding='utf-8') + ms.write_json(js, self.normalize_path(os.path.join(prefix, self.METADATA_CACHE))) write_prefix(self._main_prefix, 0) write_prefix(self._card_a_prefix, 1) write_prefix(self._card_b_prefix, 2) @@ -293,8 +293,7 @@ class USBMS(CLI, Device): cache_file = cls.normalize_path(os.path.join(prefix, name)) if os.access(cache_file, os.R_OK): try: - with open(cache_file, 'rb') as f: - js = json.load(f, encoding='utf-8') + js = ms.read_json(cache_file) for item in js: book = cls.book_class(prefix, item.get('lpath', None)) for key in item.keys(): diff --git a/src/calibre/gui2/dialogs/config/create_custom_column.py b/src/calibre/gui2/dialogs/config/create_custom_column.py index 5b470123a4..296a868fbf 100644 --- a/src/calibre/gui2/dialogs/config/create_custom_column.py +++ b/src/calibre/gui2/dialogs/config/create_custom_column.py @@ -8,6 +8,7 @@ from functools import partial from PyQt4.QtCore import SIGNAL from PyQt4.Qt import QDialog, Qt, QListWidgetItem, QVariant +from calibre.devices.metadata_serializer import metadata_serializer from calibre.gui2.dialogs.config.create_custom_column_ui import Ui_QCreateCustomColumn from calibre.gui2 import error_dialog @@ -102,6 +103,8 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn): return self.simple_error('', _('No lookup name was provided')) if not col_heading: return self.simple_error('', _('No column heading was provided')) + if col in metadata_serializer.SERIALIZED_ATTRS: + return self.simple_error('', _('The lookup name %s is reserved and cannot be used')%col) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['num'] != self.orig_column_number: diff --git a/src/calibre/gui2/library/models.py b/src/calibre/gui2/library/models.py index 0fc2c7f7ed..bc0367b766 100644 --- a/src/calibre/gui2/library/models.py +++ b/src/calibre/gui2/library/models.py @@ -883,7 +883,7 @@ class DeviceBooksModel(BooksModel): # {{{ self.reset() self.last_search = text if self.last_search: - self.searched.emit(False) + self.searched.emit(True) def sort(self, col, order, reset=True): diff --git a/src/calibre/gui2/search_box.py b/src/calibre/gui2/search_box.py index 230debd598..575f5563d6 100644 --- a/src/calibre/gui2/search_box.py +++ b/src/calibre/gui2/search_box.py @@ -136,12 +136,12 @@ class SearchBox2(QComboBox): def text_edited_slot(self, text): if self.as_you_type: text = unicode(text) - self.prev_text = text self.timer = self.startTimer(self.__class__.INTERVAL) def timerEvent(self, event): self.killTimer(event.timerId()) if event.timerId() == self.timer: + self.timer = None self.do_search() @property @@ -190,6 +190,9 @@ class SearchBox2(QComboBox): def set_search_string(self, txt): self.normalize_state() self.setEditText(txt) + if self.timer is not None: # Turn off any timers that got started in setEditText + self.killTimer(self.timer) + self.timer = None self.search.emit(txt, False) self.line_edit.end(False) self.initial_state = False From 7969c9ead533418dfced6e439dfd4eed53fc6868 Mon Sep 17 00:00:00 2001 From: Charles Haley <> Date: Sat, 22 May 2010 08:50:22 +0100 Subject: [PATCH 02/22] Back out metadata/json changes I made --- src/calibre/devices/metadata_serializer.py | 90 ---------------------- src/calibre/devices/usbms/books.py | 25 +++++- src/calibre/devices/usbms/driver.py | 7 +- 3 files changed, 26 insertions(+), 96 deletions(-) delete mode 100644 src/calibre/devices/metadata_serializer.py diff --git a/src/calibre/devices/metadata_serializer.py b/src/calibre/devices/metadata_serializer.py deleted file mode 100644 index 651ba1d678..0000000000 --- a/src/calibre/devices/metadata_serializer.py +++ /dev/null @@ -1,90 +0,0 @@ -''' -Created on 21 May 2010 - -@author: charles -''' - -from calibre.constants import filesystem_encoding, preferred_encoding -from calibre import isbytestring -import json - -class MetadataSerializer(object): - - SERIALIZED_ATTRS = [ - 'lpath', 'title', 'authors', 'mime', 'size', 'tags', 'author_sort', - 'title_sort', 'comments', 'category', 'publisher', 'series', - 'series_index', 'rating', 'isbn', 'language', 'application_id', - 'book_producer', 'lccn', 'lcc', 'ddc', 'rights', 'publication_type', - 'uuid', - ] - - def to_json(self): - json = {} - for attr in self.SERIALIZED_ATTRS: - val = getattr(self, attr) - if isbytestring(val): - enc = filesystem_encoding if attr == 'lpath' else preferred_encoding - val = val.decode(enc, 'replace') - elif isinstance(val, (list, tuple)): - val = [x.decode(preferred_encoding, 'replace') if - isbytestring(x) else x for x in val] - json[attr] = val - return json - - def read_json(self, cache_file): - with open(cache_file, 'rb') as f: - js = json.load(f, encoding='utf-8') - return js - - def write_json(self, js, cache_file): - with open(cache_file, 'wb') as f: - json.dump(js, f, indent=2, encoding='utf-8') - - def string_to_value(self, string, col_metadata, column_label=None): - ''' - if column_label is none, col_metadata must be a dict containing custom - column metadata for one column. If column_label is not none, then - col_metadata must be a dict of custom column metadata, with column - labels as keys. Metadata for standard columns is always assumed to be in - the col_metadata dict. If column_label is not standard and is not in - col_metadata, check if it matches a custom column. If so, use that - column metadata. See get_column_metadata below. - ''' - pass - - def value_to_display(self, value, col_metadata, column_label=None): - pass - - def value_to_string (self, value, col_metadata, column_label=None): - pass - - def get_column_metadata(self, column_label = None, from_book=None): - ''' - if column_label is None, then from_book must not be None. Returns the - complete set of custom column metadata for that book. - - If column_label is not None, return the column metadata for the given - column. This works even if the label is for a built-in column. If - from_book is None, then column_label must be a current custom column - label or a standard label. If from_book is not None, then the column - metadata from that metadata set is returned if it exists, otherwise the - standard metadata for that column is returned. If neither is found, - return {} - ''' - pass - - def get_custom_column_labels(self, book): - ''' - returns a list of custom column attributes in the book metadata. - ''' - pass - - def get_standard_column_labels(self): - ''' - returns a list of standard attributes that should be in any book's - metadata - ''' - pass - -metadata_serializer = MetadataSerializer() - diff --git a/src/calibre/devices/usbms/books.py b/src/calibre/devices/usbms/books.py index 8d79981ad7..6e8811432a 100644 --- a/src/calibre/devices/usbms/books.py +++ b/src/calibre/devices/usbms/books.py @@ -9,14 +9,20 @@ import os, re, time, sys from calibre.ebooks.metadata import MetaInformation from calibre.devices.mime import mime_type_ext from calibre.devices.interface import BookList as _BookList -from calibre.devices.metadata_serializer import MetadataSerializer -from calibre.constants import preferred_encoding +from calibre.constants import filesystem_encoding, preferred_encoding from calibre import isbytestring -class Book(MetaInformation, MetadataSerializer): +class Book(MetaInformation): BOOK_ATTRS = ['lpath', 'size', 'mime', 'device_collections'] + JSON_ATTRS = [ + 'lpath', 'title', 'authors', 'mime', 'size', 'tags', 'author_sort', + 'title_sort', 'comments', 'category', 'publisher', 'series', + 'series_index', 'rating', 'isbn', 'language', 'application_id', + 'book_producer', 'lccn', 'lcc', 'ddc', 'rights', 'publication_type', + 'uuid', + ] def __init__(self, prefix, lpath, size=None, other=None): from calibre.ebooks.metadata.meta import path_to_ext @@ -76,6 +82,19 @@ class Book(MetaInformation, MetadataSerializer): val = getattr(other, attr, None) setattr(self, attr, val) + def to_json(self): + json = {} + for attr in self.JSON_ATTRS: + val = getattr(self, attr) + if isbytestring(val): + enc = filesystem_encoding if attr == 'lpath' else preferred_encoding + val = val.decode(enc, 'replace') + elif isinstance(val, (list, tuple)): + val = [x.decode(preferred_encoding, 'replace') if + isbytestring(x) else x for x in val] + json[attr] = val + return json + class BookList(_BookList): def supports_collections(self): diff --git a/src/calibre/devices/usbms/driver.py b/src/calibre/devices/usbms/driver.py index 3c30827dbc..97c212775a 100644 --- a/src/calibre/devices/usbms/driver.py +++ b/src/calibre/devices/usbms/driver.py @@ -17,7 +17,6 @@ from itertools import cycle from calibre import prints, isbytestring from calibre.constants import filesystem_encoding -from calibre.devices.metadata_serializer import metadata_serializer as ms from calibre.devices.usbms.cli import CLI from calibre.devices.usbms.device import Device from calibre.devices.usbms.books import BookList, Book @@ -261,7 +260,8 @@ class USBMS(CLI, Device): os.makedirs(self.normalize_path(prefix)) js = [item.to_json() for item in booklists[listid] if hasattr(item, 'to_json')] - ms.write_json(js, self.normalize_path(os.path.join(prefix, self.METADATA_CACHE))) + with open(self.normalize_path(os.path.join(prefix, self.METADATA_CACHE)), 'wb') as f: + json.dump(js, f, indent=2, encoding='utf-8') write_prefix(self._main_prefix, 0) write_prefix(self._card_a_prefix, 1) write_prefix(self._card_b_prefix, 2) @@ -293,7 +293,8 @@ class USBMS(CLI, Device): cache_file = cls.normalize_path(os.path.join(prefix, name)) if os.access(cache_file, os.R_OK): try: - js = ms.read_json(cache_file) + with open(cache_file, 'rb') as f: + js = json.load(f, encoding='utf-8') for item in js: book = cls.book_class(prefix, item.get('lpath', None)) for key in item.keys(): From 0cec5cbf832591f4421be837865f786622569b67 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sun, 23 May 2010 11:57:16 -0600 Subject: [PATCH 03/22] Separate out driver for The Book as it uses a different ebook directory than the N516 --- src/calibre/customize/builtins.py | 3 ++- src/calibre/devices/hanvon/driver.py | 12 +++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/calibre/customize/builtins.py b/src/calibre/customize/builtins.py index 34c2de7515..6bc94d30b0 100644 --- a/src/calibre/customize/builtins.py +++ b/src/calibre/customize/builtins.py @@ -450,7 +450,7 @@ from calibre.devices.eslick.driver import ESLICK from calibre.devices.nuut2.driver import NUUT2 from calibre.devices.iriver.driver import IRIVER_STORY from calibre.devices.binatone.driver import README -from calibre.devices.hanvon.driver import N516, EB511, ALEX, AZBOOKA +from calibre.devices.hanvon.driver import N516, EB511, ALEX, AZBOOKA, THEBOOK from calibre.devices.edge.driver import EDGE from calibre.devices.teclast.driver import TECLAST_K3, NEWSMY, IPAPYRUS from calibre.devices.sne.driver import SNE @@ -530,6 +530,7 @@ plugins += [ EB600, README, N516, + THEBOOK, EB511, ELONEX, TECLAST_K3, diff --git a/src/calibre/devices/hanvon/driver.py b/src/calibre/devices/hanvon/driver.py index aa384910cd..7a0de3064e 100644 --- a/src/calibre/devices/hanvon/driver.py +++ b/src/calibre/devices/hanvon/driver.py @@ -24,7 +24,7 @@ class N516(USBMS): VENDOR_ID = [0x0525] PRODUCT_ID = [0xa4a5] - BCD = [0x323, 0x326, 0x399] + BCD = [0x323, 0x326] VENDOR_NAME = 'INGENIC' WINDOWS_MAIN_MEM = '_FILE-STOR_GADGE' @@ -34,6 +34,16 @@ class N516(USBMS): EBOOK_DIR_MAIN = 'e_book' SUPPORTS_SUB_DIRS = True +class THEBOOK(N516): + name = 'The Book driver' + gui_name = 'The Book' + description = _('Communicate with The Book reader.') + author = 'Kovid Goyal' + + BCD = [0x399] + MAIN_MEMORY_VOLUME_LABEL = 'The Book Main Memory' + EBOOK_DIR_MAIN = 'My books' + class ALEX(N516): name = 'Alex driver' From d7fa2363a878f6b9e5676d6ea7a83cb241e9bacd Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sun, 23 May 2010 12:28:24 -0600 Subject: [PATCH 04/22] Timing infrastructure for the content server --- src/calibre/library/server/utils.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/calibre/library/server/utils.py b/src/calibre/library/server/utils.py index 1732da540c..7dc0884e1a 100644 --- a/src/calibre/library/server/utils.py +++ b/src/calibre/library/server/utils.py @@ -5,7 +5,9 @@ __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal ' __docformat__ = 'restructuredtext en' -from calibre import strftime as _strftime +import time + +from calibre import strftime as _strftime, prints from calibre.utils.date import now as nowf @@ -20,6 +22,19 @@ def expose(func): return cherrypy.expose(do) +def timeit(func): + + def do(self, *args, **kwargs): + if self.opts.develop: + start = time.time() + ans = func(self, *args, **kwargs) + if self.opts.develop: + prints('Function', func.__name__, 'called with args:', args, kwargs) + prints('\tTime:', func.__name__, time.time()-start) + return ans + + return do + def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None): if not hasattr(dt, 'timetuple'): dt = nowf() From 3b557de4c724384587f99ff2ad2f496e3db46011 Mon Sep 17 00:00:00 2001 From: Charles Haley <> Date: Sun, 23 May 2010 21:34:19 +0100 Subject: [PATCH 05/22] First pass at converting db2.get_categories to return a complete dict --- src/calibre/gui2/tag_view.py | 8 +-- src/calibre/library/custom_columns.py | 4 +- src/calibre/library/database2.py | 61 +++++++++------- src/calibre/utils/ordered_dict.py | 100 ++++++++++++++++++++++++++ 4 files changed, 141 insertions(+), 32 deletions(-) create mode 100644 src/calibre/utils/ordered_dict.py diff --git a/src/calibre/gui2/tag_view.py b/src/calibre/gui2/tag_view.py index 8a01b6ad27..0fb72e071b 100644 --- a/src/calibre/gui2/tag_view.py +++ b/src/calibre/gui2/tag_view.py @@ -199,8 +199,8 @@ class TagsModel(QAbstractItemModel): # {{{ categories_orig = [_('Authors'), _('Series'), _('Formats'), _('Publishers'), _('Ratings'), _('News'), _('Tags')] - row_map_orig = ['author', 'series', 'format', 'publisher', 'rating', - 'news', 'tag'] + row_map_orig = ['authors', 'series', 'formats', 'publishers', 'ratings', + 'news', 'tags'] tags_categories_start= 7 search_keys=['search', _('Searches')] @@ -264,8 +264,8 @@ class TagsModel(QAbstractItemModel): # {{{ self.cat_icon_map.append(self.cat_icon_map_orig[i]) # Clean up the author's tags, getting rid of the '|' characters - if data['author'] is not None: - for t in data['author']: + if data['authors'] is not None: + for t in data['authors']: t.name = t.name.replace('|', ',') # Now do the user-defined categories. There is a time/space tradeoff here. diff --git a/src/calibre/library/custom_columns.py b/src/calibre/library/custom_columns.py index a8375c6b5c..b6ada01b8c 100644 --- a/src/calibre/library/custom_columns.py +++ b/src/calibre/library/custom_columns.py @@ -144,8 +144,8 @@ class CustomColumns(object): for i, v in self.custom_column_num_map.items(): if v['normalized']: tn = 'custom_column_{0}'.format(i) - self.tag_browser_categories[tn] = [v['label'], 'value'] - self.tag_browser_datatype[v['label']] = v['datatype'] + self.tag_browser_categories[v['label']] = {'table':tn, 'column':'value', 'type':v['datatype'], 'name':v['name']} + #self.tag_browser_datatype[v['label']] = v['datatype'] def get_custom(self, idx, label=None, num=None, index_is_id=False): if label is not None: diff --git a/src/calibre/library/database2.py b/src/calibre/library/database2.py index ed56d35bdc..12398de918 100644 --- a/src/calibre/library/database2.py +++ b/src/calibre/library/database2.py @@ -33,6 +33,7 @@ from calibre.customize.ui import run_plugins_on_import from calibre.utils.filenames import ascii_filename from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp +from calibre.utils.ordered_dict import OrderedDict from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format if iswindows: @@ -123,22 +124,25 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): if isinstance(self.dbpath, unicode): self.dbpath = self.dbpath.encode(filesystem_encoding) - self.tag_browser_categories = { - 'tags' : ['tag', 'name'], - 'series' : ['series', 'name'], - 'publishers': ['publisher', 'name'], - 'authors' : ['author', 'name'], - 'news' : ['news', 'name'], - 'ratings' : ['rating', 'rating'] - } - self.tag_browser_datatype = { - 'tag' : 'textmult', - 'series' : None, - 'publisher' : 'text', - 'author' : 'text', - 'news' : None, - 'rating' : 'rating', - } + # Order as has been customary in the tags pane. + self.tag_browser_categories = OrderedDict([ + ('authors', {'table':'authors', 'column':'name', 'type':'text', 'name':_('Authors')}), + ('series', {'table':'series', 'column':'name', 'type':None, 'name':_('Series')}), + ('formats', {'table':None, 'column':None, 'type':None, 'name':_('Formats')}), + ('publishers',{'table':'publishers', 'column':'name', 'type':'text', 'name':_('Publishers')}), + ('ratings', {'table':'ratings', 'column':'rating', 'type':'rating', 'name':_('Ratings')}), + ('news', {'table':'news', 'column':'name', 'type':None, 'name':_('News')}), + ('tags', {'table':'tags', 'column':'name', 'type':'textmult', 'name':_('Tags')}), + ]) + +# self.tag_browser_datatype = { +# 'tag' : 'textmult', +# 'series' : None, +# 'publisher' : 'text', +# 'author' : 'text', +# 'news' : None, +# 'rating' : 'rating', +# } self.tag_browser_formatters = {'rating': lambda x:u'\u2605'*int(round(x/2.))} @@ -653,17 +657,22 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): self.books_list_filter.change([] if not ids else ids) categories = {} - for tn, cn in self.tag_browser_categories.items(): + for category in self.tag_browser_categories.keys(): + tn = self.tag_browser_categories[category]['table'] + categories[category] = [] #reserve the position in the ordered list + if tn is None: + continue + cn = self.tag_browser_categories[category]['column'] if ids is None: - query = 'SELECT id, {0}, count FROM tag_browser_{1}'.format(cn[1], tn) + query = 'SELECT id, {0}, count FROM tag_browser_{1}'.format(cn, tn) else: - query = 'SELECT id, {0}, count FROM tag_browser_filtered_{1}'.format(cn[1], tn) + query = 'SELECT id, {0}, count FROM tag_browser_filtered_{1}'.format(cn, tn) if sort_on_count: query += ' ORDER BY count DESC' else: - query += ' ORDER BY {0} ASC'.format(cn[1]) + query += ' ORDER BY {0} ASC'.format(cn) data = self.conn.get(query) - category = cn[0] + # category = cn[0] icon, tooltip = None, '' if icon_map: if category in icon_map: @@ -671,14 +680,14 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): else: icon = icon_map['*custom'] tooltip = self.custom_column_label_map[category]['name'] - datatype = self.tag_browser_datatype[category] + datatype = self.tag_browser_categories[category]['type'] formatter = self.tag_browser_formatters.get(datatype, lambda x: x) categories[category] = [Tag(formatter(r[1]), count=r[2], id=r[0], icon=icon, tooltip = tooltip) for r in data if r[2] > 0 and (datatype != 'rating' or len(formatter(r[1])) > 0)] - categories['format'] = [] + categories['formats'] = [] for fmt in self.conn.get('SELECT DISTINCT format FROM data'): fmt = fmt[0] if ids is not None: @@ -693,13 +702,13 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): WHERE format="%s"'''%fmt, all=False) if count > 0: - categories['format'].append(Tag(fmt, count=count)) + categories['formats'].append(Tag(fmt, count=count)) if sort_on_count: - categories['format'].sort(cmp=lambda x,y:cmp(x.count, y.count), + categories['formats'].sort(cmp=lambda x,y:cmp(x.count, y.count), reverse=True) else: - categories['format'].sort(cmp=lambda x,y:cmp(x.name, y.name)) + categories['formats'].sort(cmp=lambda x,y:cmp(x.name, y.name)) return categories def tags_older_than(self, tag, delta): diff --git a/src/calibre/utils/ordered_dict.py b/src/calibre/utils/ordered_dict.py new file mode 100644 index 0000000000..95a0af9e76 --- /dev/null +++ b/src/calibre/utils/ordered_dict.py @@ -0,0 +1,100 @@ +from UserDict import DictMixin + +class OrderedDict(dict, DictMixin): + + def __init__(self, *args, **kwds): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__end + except AttributeError: + self.clear() + self.update(*args, **kwds) + + def clear(self): + self.__end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.__map = {} # key --> [key, prev, next] + dict.clear(self) + + def __setitem__(self, key, value): + if key not in self: + end = self.__end + curr = end[1] + curr[2] = end[1] = self.__map[key] = [key, curr, end] + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + dict.__delitem__(self, key) + key, prev, next = self.__map.pop(key) + prev[2] = next + next[1] = prev + + def __iter__(self): + end = self.__end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.__end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + def popitem(self, last=True): + if not self: + raise KeyError('dictionary is empty') + if last: + key = reversed(self).next() + else: + key = iter(self).next() + value = self.pop(key) + return key, value + + def __reduce__(self): + items = [[k, self[k]] for k in self] + tmp = self.__map, self.__end + del self.__map, self.__end + inst_dict = vars(self).copy() + self.__map, self.__end = tmp + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def keys(self): + return list(self) + + setdefault = DictMixin.setdefault + update = DictMixin.update + pop = DictMixin.pop + values = DictMixin.values + items = DictMixin.items + iterkeys = DictMixin.iterkeys + itervalues = DictMixin.itervalues + iteritems = DictMixin.iteritems + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + + def copy(self): + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other From c948360dcf0d804d57856699fce3f3e36cf9c37b Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sun, 23 May 2010 19:02:10 -0600 Subject: [PATCH 06/22] Fix #5598 (ePub as a supported type for Palm Pre) --- src/calibre/devices/misc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/calibre/devices/misc.py b/src/calibre/devices/misc.py index b0d5718d7f..9d58bbcae6 100644 --- a/src/calibre/devices/misc.py +++ b/src/calibre/devices/misc.py @@ -17,7 +17,7 @@ class PALMPRE(USBMS): supported_platforms = ['windows', 'osx', 'linux'] # Ordered list of supported formats - FORMATS = ['mobi', 'prc', 'pdb', 'txt'] + FORMATS = ['epub', 'mobi', 'prc', 'pdb', 'txt'] VENDOR_ID = [0x0830] PRODUCT_ID = [0x8004, 0x8002, 0x0101] From 359c0cd40e06a4ce261efcc85bb44fce4bd87eab Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sun, 23 May 2010 19:13:44 -0600 Subject: [PATCH 07/22] Start refactoring of Content Server to use Routes for URL dispatching and etree instead of genshi for templating. OPDS feeds are currently broken. --- resources/content_server/gui.js | 2 +- src/calibre/library/server/base.py | 44 +- src/calibre/library/server/cache.py | 18 + src/calibre/library/server/content.py | 14 +- src/calibre/library/server/mobile.py | 6 +- src/calibre/library/server/opds.py | 165 ++-- src/calibre/library/server/utils.py | 23 +- src/calibre/library/server/xml.py | 101 ++- src/routes/__init__.py | 142 +++ src/routes/base.py | 4 + src/routes/lru.py | 70 ++ src/routes/mapper.py | 1161 +++++++++++++++++++++++++ src/routes/middleware.py | 146 ++++ src/routes/route.py | 742 ++++++++++++++++ src/routes/util.py | 503 +++++++++++ 15 files changed, 3013 insertions(+), 128 deletions(-) create mode 100644 src/calibre/library/server/cache.py create mode 100644 src/routes/__init__.py create mode 100644 src/routes/base.py create mode 100644 src/routes/lru.py create mode 100644 src/routes/mapper.py create mode 100644 src/routes/middleware.py create mode 100644 src/routes/route.py create mode 100644 src/routes/util.py diff --git a/resources/content_server/gui.js b/resources/content_server/gui.js index ba2b0af940..9c20037207 100644 --- a/resources/content_server/gui.js +++ b/resources/content_server/gui.js @@ -123,7 +123,7 @@ function fetch_library_books(start, num, timeout, sort, order, search) { current_library_request = $.ajax({ type: "GET", - url: "library", + url: "xml", data: data, cache: false, timeout: timeout, //milliseconds diff --git a/src/calibre/library/server/base.py b/src/calibre/library/server/base.py index 666ce52ffc..a8d4ae899c 100644 --- a/src/calibre/library/server/base.py +++ b/src/calibre/library/server/base.py @@ -14,14 +14,46 @@ import cherrypy from calibre.constants import __appname__, __version__ from calibre.utils.date import fromtimestamp from calibre.library.server import listen_on, log_access_file, log_error_file +from calibre.library.server.utils import expose from calibre.utils.mdns import publish as publish_zeroconf, \ stop_server as stop_zeroconf, get_external_ip from calibre.library.server.content import ContentServer from calibre.library.server.mobile import MobileServer from calibre.library.server.xml import XMLServer from calibre.library.server.opds import OPDSServer +from calibre.library.server.cache import Cache -class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer): + +class DispatchController(object): # {{{ + + def __init__(self): + self.dispatcher = cherrypy.dispatch.RoutesDispatcher() + self.funcs = [] + self.seen = set([]) + + def __call__(self, name, route, func, **kwargs): + if name in self.seen: + raise NameError('Route name: '+ repr(name) + ' already used') + self.seen.add(name) + kwargs['action'] = 'f_%d'%len(self.funcs) + self.dispatcher.connect(name, route, self, **kwargs) + self.funcs.append(expose(func)) + + def __getattr__(self, attr): + if not attr.startswith('f_'): + raise AttributeError(attr + ' not found') + num = attr.rpartition('_')[-1] + try: + num = int(num) + except: + raise AttributeError(attr + ' not found') + if num < 0 or num >= len(self.funcs): + raise AttributeError(attr + ' not found') + return self.funcs[num] + +# }}} + +class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer, Cache): server_name = __appname__ + '/' + __version__ @@ -88,8 +120,16 @@ class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer): def start(self): self.is_running = False + d = DispatchController() + for x in self.__class__.__bases__: + if hasattr(x, 'add_routes'): + x.add_routes(self, d) + root_conf = self.config.get('/', {}) + root_conf['request.dispatch'] = d.dispatcher + self.config['/'] = root_conf + self.setup_loggers() - cherrypy.tree.mount(self, '', config=self.config) + cherrypy.tree.mount(root=None, config=self.config) try: try: cherrypy.engine.start() diff --git a/src/calibre/library/server/cache.py b/src/calibre/library/server/cache.py new file mode 100644 index 0000000000..89dc140434 --- /dev/null +++ b/src/calibre/library/server/cache.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai + +__license__ = 'GPL v3' +__copyright__ = '2010, Kovid Goyal ' +__docformat__ = 'restructuredtext en' + +from calibre.utils.date import utcnow + +class Cache(object): + + @property + def categories_cache(self): + old = getattr(self, '_category_cache', None) + if old is None or old[0] <= self.db.last_modified(): + categories = self.db.get_categories() + self._category_cache = (utcnow(), categories) + return self._category_cache[1] diff --git a/src/calibre/library/server/content.py b/src/calibre/library/server/content.py index d1a695cee1..8638035c88 100644 --- a/src/calibre/library/server/content.py +++ b/src/calibre/library/server/content.py @@ -16,7 +16,7 @@ except ImportError: from calibre import fit_image, guess_type from calibre.utils.date import fromtimestamp -from calibre.library.server.utils import expose + class ContentServer(object): @@ -25,6 +25,13 @@ class ContentServer(object): a few utility methods. ''' + def add_routes(self, connect): + connect('root', '/', self.index) + connect('get', '/get/{what}/{id}', self.get, + conditions=dict(method=["GET", "HEAD"])) + connect('static', '/static/{name}', self.static, + conditions=dict(method=["GET", "HEAD"])) + # Utility methods {{{ def last_modified(self, updated): ''' @@ -68,8 +75,7 @@ class ContentServer(object): # }}} - @expose - def get(self, what, id, *args, **kwargs): + def get(self, what, id): 'Serves files, covers, thumbnails from the calibre database' try: id = int(id) @@ -87,7 +93,6 @@ class ContentServer(object): return self.get_cover(id) return self.get_format(id, what) - @expose def static(self, name): 'Serves static content' name = name.lower() @@ -108,7 +113,6 @@ class ContentServer(object): cherrypy.response.headers['Last-Modified'] = self.last_modified(lm) return open(path, 'rb').read() - @expose def index(self, **kwargs): 'The / URL' ua = cherrypy.request.headers.get('User-Agent', '').strip() diff --git a/src/calibre/library/server/mobile.py b/src/calibre/library/server/mobile.py index 9bec6cce35..afb31815d5 100644 --- a/src/calibre/library/server/mobile.py +++ b/src/calibre/library/server/mobile.py @@ -11,7 +11,7 @@ import __builtin__ import cherrypy from calibre.utils.genshi.template import MarkupTemplate -from calibre.library.server.utils import strftime, expose +from calibre.library.server.utils import strftime from calibre.ebooks.metadata import fmt_sidx # Templates {{{ @@ -173,7 +173,9 @@ class MobileServer(object): MOBILE_UA = re.compile('(?i)(?:iPhone|Opera Mini|NetFront|webOS|Mobile|Android|imode|DoCoMo|Minimo|Blackberry|MIDP|Symbian|HD2)') - @expose + def add_routes(self, connect): + connect('mobile', '/mobile', self.mobile) + def mobile(self, start='1', num='25', sort='date', search='', _=None, order='descending'): ''' diff --git a/src/calibre/library/server/opds.py b/src/calibre/library/server/opds.py index f7a7679813..359449a838 100644 --- a/src/calibre/library/server/opds.py +++ b/src/calibre/library/server/opds.py @@ -5,15 +5,102 @@ __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal ' __docformat__ = 'restructuredtext en' -import re +import re, hashlib from itertools import repeat +from functools import partial import cherrypy +from lxml import etree +from lxml.builder import ElementMaker from calibre.utils.genshi.template import MarkupTemplate from calibre.library.server.utils import strftime, expose from calibre.ebooks.metadata import fmt_sidx, title_sort from calibre import guess_type, prepare_string_for_xml +from calibre.constants import __appname__ + +# Vocabulary for building OPDS feeds {{{ +E = ElementMaker(namespace='http://www.w3.org/2005/Atom', + nsmap={ + None : 'http://www.w3.org/2005/Atom', + 'dc' : 'http://purl.org/dc/terms/', + 'opds' : 'http://opds-spec.org/2010/catalog', + }) + + +FEED = E.feed +TITLE = E.title +ID = E.id + +def UPDATED(dt, *args, **kwargs): + return E.updated(dt.strftime('%Y-%m-%dT%H:%M:%S+00:00'), *args, **kwargs) + +LINK = partial(E.link, type='application/atom+xml') +NAVLINK = partial(E.link, + type='application/atom+xml;type=feed;profile=opds-catalog') + +def SEARCH(base_href, *args, **kwargs): + kwargs['rel'] = 'search' + kwargs['title'] = 'Search' + kwargs['href'] = base_href+'/?search={searchTerms}' + return LINK(*args, **kwargs) + +def AUTHOR(name, uri=None): + args = [E.name(name)] + if uri is not None: + args.append(E.uri(uri)) + return E.author(*args) + +SUBTITLE = E.subtitle + +def NAVCATALOG_ENTRY(base_href, updated, title, description, query_data): + data = [u'%s=%s'%(key, val) for key, val in query_data.items()] + data = '&'.join(data) + href = base_href+'/?'+data + id_ = 'calibre-subcatalog:'+str(hashlib.sha1(href).hexdigest()) + return E.entry( + TITLE(title), + ID(id_), + UPDATED(updated), + E.content(description, type='text'), + NAVLINK(href=href) + ) + +# }}} + +class Feed(object): + + def __str__(self): + return etree.tostring(self.root, pretty_print=True, encoding='utf-8', + xml_declaration=True) + +class TopLevel(Feed): + + def __init__(self, + updated, # datetime object in UTC + categories, + id_ = 'urn:calibre:main', + base_href = '/stanza' + ): + self.base_href = base_href + subc = partial(NAVCATALOG_ENTRY, base_href, updated) + + subcatalogs = [subc('By '+title, + 'Books sorted by '+desc, {'sortby':q}) for title, desc, q in + categories] + + self.root = \ + FEED( + TITLE(__appname__ + ' ' + _('Library')), + ID(id_), + UPDATED(updated), + SEARCH(base_href), + AUTHOR(__appname__, uri='http://calibre-ebook.com'), + SUBTITLE(_('Books in your library')), + *subcatalogs + ) + + # Templates {{{ @@ -42,6 +129,7 @@ STANZA_SUBCATALOG_ENTRY=MarkupTemplate('''\ ''') +# Feed of books STANZA = MarkupTemplate('''\ @@ -63,62 +151,20 @@ STANZA = MarkupTemplate('''\ ''') -STANZA_MAIN = MarkupTemplate('''\ - - - calibre Library - $id - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - - calibre - http://calibre-ebook.com - - - ${subtitle} - - - By Author - urn:uuid:fc000fa0-8c23-11de-a31d-0002a5d5c51b - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - Books sorted by Author - - - By Title - urn:uuid:1df4fe40-8c24-11de-b4c6-0002a5d5c51b - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - Books sorted by Title - - - By Newest - urn:uuid:3c6d4940-8c24-11de-a4d7-0002a5d5c51b - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - Books sorted by Date - - - By Tag - urn:uuid:824921e8-db8a-4e61-7d38-f1ce41502853 - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - Books sorted by Tags - - - By Series - urn:uuid:512a5e50-a88f-f6b8-82aa-8f129c719f61 - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - Books sorted by Series - - -''') # }}} class OPDSServer(object): + def build_top_level(self, updated, base_href='/stanza'): + categories = self.categories_cache + categories = [(x.capitalize(), x.capitalize(), x) for x in + categories.keys()] + categories.append(('Title', 'Title', '|title|')) + categories.append(('Newest', 'Newest', '|newest|')) + + return TopLevel(updated, categories, base_href=base_href) + def get_matches(self, location, query): base = self.db.data.get_matches(location, query) epub = self.db.data.get_matches('format', '=epub') @@ -173,10 +219,6 @@ class OPDSServer(object): return STANZA.generate(subtitle=subtitle, data=entries, FM=self.db.FIELD_MAP, updated=updated, id='urn:calibre:main', next_link=next_link).render('xml') - def stanza_main(self, updated): - return STANZA_MAIN.generate(subtitle='', data=[], FM=self.db.FIELD_MAP, - updated=updated, id='urn:calibre:main').render('xml') - @expose def stanza(self, search=None, sortby=None, authorid=None, tagid=None, seriesid=None, offset=0): @@ -186,9 +228,11 @@ class OPDSServer(object): offset = int(offset) cherrypy.response.headers['Last-Modified'] = self.last_modified(updated) cherrypy.response.headers['Content-Type'] = 'text/xml' - # Main feed + + # Top Level feed if not sortby and not search and not authorid and not tagid and not seriesid: - return self.stanza_main(updated) + return str(self.build_top_level(updated)) + if sortby in ('byseries', 'byauthor', 'bytag'): return self.stanza_sortby_subcategory(updated, sortby, offset) @@ -296,5 +340,8 @@ class OPDSServer(object): next_link=next_link, updated=updated, id='urn:calibre:main').render('xml') - +if __name__ == '__main__': + from datetime import datetime + f = TopLevel(datetime.utcnow()) + print f diff --git a/src/calibre/library/server/utils.py b/src/calibre/library/server/utils.py index 7dc0884e1a..ad5aaac169 100644 --- a/src/calibre/library/server/utils.py +++ b/src/calibre/library/server/utils.py @@ -7,34 +7,33 @@ __docformat__ = 'restructuredtext en' import time +import cherrypy + from calibre import strftime as _strftime, prints from calibre.utils.date import now as nowf def expose(func): - import cherrypy - def do(self, *args, **kwargs): + def do(*args, **kwargs): + self = func.im_self + if self.opts.develop: + start = time.time() + dict.update(cherrypy.response.headers, {'Server':self.server_name}) if not self.embedded: self.db.check_if_modified() - return func(self, *args, **kwargs) - - return cherrypy.expose(do) - -def timeit(func): - - def do(self, *args, **kwargs): - if self.opts.develop: - start = time.time() - ans = func(self, *args, **kwargs) + ans = func(*args, **kwargs) if self.opts.develop: prints('Function', func.__name__, 'called with args:', args, kwargs) prints('\tTime:', func.__name__, time.time()-start) return ans + do.__name__ = func.__name__ + return do + def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None): if not hasattr(dt, 'timetuple'): dt = nowf() diff --git a/src/calibre/library/server/xml.py b/src/calibre/library/server/xml.py index e9f9a02548..036a2051bf 100644 --- a/src/calibre/library/server/xml.py +++ b/src/calibre/library/server/xml.py @@ -5,52 +5,26 @@ __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal ' __docformat__ = 'restructuredtext en' -import copy, __builtin__ +import __builtin__ import cherrypy +from lxml.builder import ElementMaker +from lxml import etree -from calibre.utils.genshi.template import MarkupTemplate -from calibre.library.server.utils import strftime, expose +from calibre.library.server.utils import strftime from calibre.ebooks.metadata import fmt_sidx +from calibre.constants import preferred_encoding +from calibre import isbytestring -# Templates {{{ -BOOK = '''\ -${r[FM['comments']] if r[FM['comments']] else ''} - -''' - - -LIBRARY = MarkupTemplate('''\ - - - - ${Markup(book)} - - -''') - -# }}} +E = ElementMaker() class XMLServer(object): 'Serves XML and the Ajax based HTML frontend' - @expose - def library(self, start='0', num='50', sort=None, search=None, + def add_routes(self, connect): + connect('xml', '/xml', self.xml) + + def xml(self, start='0', num='50', sort=None, search=None, _=None, order='ascending'): ''' Serves metadata from the calibre database as XML. @@ -68,30 +42,63 @@ class XMLServer(object): num = int(num) except ValueError: raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num) + order = order.lower().strip() == 'ascending' + ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set() - ids = sorted(ids) + FM = self.db.FIELD_MAP - items = copy.deepcopy([r for r in iter(self.db) if r[FM['id']] in ids]) + + items = [r for r in iter(self.db) if r[FM['id']] in ids] if sort is not None: self.sort(items, sort, order) - book, books = MarkupTemplate(BOOK), [] + + books = [] + + def serialize(x): + if isinstance(x, unicode): + return x + if isbytestring(x): + return x.decode(preferred_encoding, 'replace') + return unicode(x) + for record in items[start:start+num]: + kwargs = {} aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown') authors = '|'.join([i.replace('|', ',') for i in aus.split(',')]) - record[FM['series_index']] = \ + kwargs['authors'] = authors + + kwargs['series_index'] = \ fmt_sidx(float(record[FM['series_index']])) - ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[FM['timestamp']]), \ - strftime('%Y/%m/%d %H:%M:%S', record[FM['pubdate']]) - books.append(book.generate(r=record, authors=authors, timestamp=ts, - pubdate=pd, FM=FM).render('xml').decode('utf-8')) + + for x in ('timestamp', 'pubdate'): + kwargs[x] = strftime('%Y/%m/%d %H:%M:%S', record[FM[x]]) + + for x in ('id', 'title', 'sort', 'author_sort', 'rating', 'size'): + kwargs[x] = serialize(record[FM[x]]) + + for x in ('isbn', 'formats', 'series', 'tags', 'publisher', + 'comments'): + y = record[FM[x]] + kwargs[x] = serialize(y) if y else '' + + c = kwargs.pop('comments') + books.append(E.book(c, **kwargs)) + updated = self.db.last_modified() + kwargs = dict( + start = str(start), + updated=updated.strftime('%Y-%m-%dT%H:%M:%S+00:00'), + total=str(len(ids)), + num=str(len(books))) + ans = E.library(*books, **kwargs) cherrypy.response.headers['Content-Type'] = 'text/xml' cherrypy.response.headers['Last-Modified'] = self.last_modified(updated) - return LIBRARY.generate(books=books, start=start, updated=updated, - total=len(ids), FM=FM).render('xml') + + return etree.tostring(ans, encoding='utf-8', pretty_print=True, + xml_declaration=True) diff --git a/src/routes/__init__.py b/src/routes/__init__.py new file mode 100644 index 0000000000..d252c700e4 --- /dev/null +++ b/src/routes/__init__.py @@ -0,0 +1,142 @@ +"""Provides common classes and functions most users will want access to.""" +import threading, sys + +class _RequestConfig(object): + """ + RequestConfig thread-local singleton + + The Routes RequestConfig object is a thread-local singleton that should + be initialized by the web framework that is utilizing Routes. + """ + __shared_state = threading.local() + + def __getattr__(self, name): + return getattr(self.__shared_state, name) + + def __setattr__(self, name, value): + """ + If the name is environ, load the wsgi envion with load_wsgi_environ + and set the environ + """ + if name == 'environ': + self.load_wsgi_environ(value) + return self.__shared_state.__setattr__(name, value) + return self.__shared_state.__setattr__(name, value) + + def __delattr__(self, name): + delattr(self.__shared_state, name) + + def load_wsgi_environ(self, environ): + """ + Load the protocol/server info from the environ and store it. + Also, match the incoming URL if there's already a mapper, and + store the resulting match dict in mapper_dict. + """ + if 'HTTPS' in environ or environ.get('wsgi.url_scheme') == 'https' \ + or environ.get('HTTP_X_FORWARDED_PROTO') == 'https': + self.__shared_state.protocol = 'https' + else: + self.__shared_state.protocol = 'http' + try: + self.mapper.environ = environ + except AttributeError: + pass + + # Wrap in try/except as common case is that there is a mapper + # attached to self + try: + if 'PATH_INFO' in environ: + mapper = self.mapper + path = environ['PATH_INFO'] + result = mapper.routematch(path) + if result is not None: + self.__shared_state.mapper_dict = result[0] + self.__shared_state.route = result[1] + else: + self.__shared_state.mapper_dict = None + self.__shared_state.route = None + except AttributeError: + pass + + if 'HTTP_X_FORWARDED_HOST' in environ: + self.__shared_state.host = environ['HTTP_X_FORWARDED_HOST'] + elif 'HTTP_HOST' in environ: + self.__shared_state.host = environ['HTTP_HOST'] + else: + self.__shared_state.host = environ['SERVER_NAME'] + if environ['wsgi.url_scheme'] == 'https': + if environ['SERVER_PORT'] != '443': + self.__shared_state.host += ':' + environ['SERVER_PORT'] + else: + if environ['SERVER_PORT'] != '80': + self.__shared_state.host += ':' + environ['SERVER_PORT'] + +def request_config(original=False): + """ + Returns the Routes RequestConfig object. + + To get the Routes RequestConfig: + + >>> from routes import * + >>> config = request_config() + + The following attributes must be set on the config object every request: + + mapper + mapper should be a Mapper instance thats ready for use + host + host is the hostname of the webapp + protocol + protocol is the protocol of the current request + mapper_dict + mapper_dict should be the dict returned by mapper.match() + redirect + redirect should be a function that issues a redirect, + and takes a url as the sole argument + prefix (optional) + Set if the application is moved under a URL prefix. Prefix + will be stripped before matching, and prepended on generation + environ (optional) + Set to the WSGI environ for automatic prefix support if the + webapp is underneath a 'SCRIPT_NAME' + + Setting the environ will use information in environ to try and + populate the host/protocol/mapper_dict options if you've already + set a mapper. + + **Using your own requst local** + + If you have your own request local object that you'd like to use instead + of the default thread local provided by Routes, you can configure Routes + to use it:: + + from routes import request_config() + config = request_config() + if hasattr(config, 'using_request_local'): + config.request_local = YourLocalCallable + config = request_config() + + Once you have configured request_config, its advisable you retrieve it + again to get the object you wanted. The variable you assign to + request_local is assumed to be a callable that will get the local config + object you wish. + + This example tests for the presence of the 'using_request_local' attribute + which will be present if you haven't assigned it yet. This way you can + avoid repeat assignments of the request specific callable. + + Should you want the original object, perhaps to change the callable its + using or stop this behavior, call request_config(original=True). + """ + obj = _RequestConfig() + try: + if obj.request_local and original is False: + return getattr(obj, 'request_local')() + except AttributeError: + obj.request_local = False + obj.using_request_local = False + return _RequestConfig() + +from routes.mapper import Mapper +from routes.util import redirect_to, url_for, URLGenerator +__all__=['Mapper', 'url_for', 'URLGenerator', 'redirect_to', 'request_config'] diff --git a/src/routes/base.py b/src/routes/base.py new file mode 100644 index 0000000000..f9e2f64973 --- /dev/null +++ b/src/routes/base.py @@ -0,0 +1,4 @@ +"""Route and Mapper core classes""" +from routes import request_config +from routes.mapper import Mapper +from routes.route import Route diff --git a/src/routes/lru.py b/src/routes/lru.py new file mode 100644 index 0000000000..9fb2329e44 --- /dev/null +++ b/src/routes/lru.py @@ -0,0 +1,70 @@ +"""LRU caching class and decorator""" +import threading + +_marker = object() + +class LRUCache(object): + def __init__(self, size): + """ Implements a psueudo-LRU algorithm (CLOCK) """ + if size < 1: + raise ValueError('size must be >1') + self.clock = [] + for i in xrange(0, size): + self.clock.append({'key':_marker, 'ref':False}) + self.size = size + self.maxpos = size - 1 + self.hand = 0 + self.data = {} + self.lock = threading.Lock() + + def __contains__(self, key): + return key in self.data + + def __getitem__(self, key, default=None): + try: + datum = self.data[key] + except KeyError: + return default + pos, val = datum + self.clock[pos]['ref'] = True + hand = pos + 1 + if hand > self.maxpos: + hand = 0 + self.hand = hand + return val + + def __setitem__(self, key, val, _marker=_marker): + hand = self.hand + maxpos = self.maxpos + clock = self.clock + data = self.data + lock = self.lock + + end = hand - 1 + if end < 0: + end = maxpos + + while 1: + current = clock[hand] + ref = current['ref'] + if ref is True: + current['ref'] = False + hand = hand + 1 + if hand > maxpos: + hand = 0 + elif ref is False or hand == end: + lock.acquire() + try: + oldkey = current['key'] + if oldkey in data: + del data[oldkey] + current['key'] = key + current['ref'] = True + data[key] = (hand, val) + hand += 1 + if hand > maxpos: + hand = 0 + self.hand = hand + finally: + lock.release() + break \ No newline at end of file diff --git a/src/routes/mapper.py b/src/routes/mapper.py new file mode 100644 index 0000000000..50f7482580 --- /dev/null +++ b/src/routes/mapper.py @@ -0,0 +1,1161 @@ +"""Mapper and Sub-Mapper""" +import re +import sys +import threading + +from routes import request_config +from routes.lru import LRUCache +from routes.util import controller_scan, MatchException, RoutesException +from routes.route import Route + + +COLLECTION_ACTIONS = ['index', 'create', 'new'] +MEMBER_ACTIONS = ['show', 'update', 'delete', 'edit'] + + +def strip_slashes(name): + """Remove slashes from the beginning and end of a part/URL.""" + if name.startswith('/'): + name = name[1:] + if name.endswith('/'): + name = name[:-1] + return name + + +class SubMapperParent(object): + """Base class for Mapper and SubMapper, both of which may be the parent + of SubMapper objects + """ + + def submapper(self, **kargs): + """Create a partial version of the Mapper with the designated + options set + + This results in a :class:`routes.mapper.SubMapper` object. + + If keyword arguments provided to this method also exist in the + keyword arguments provided to the submapper, their values will + be merged with the saved options going first. + + In addition to :class:`routes.route.Route` arguments, submapper + can also take a ``path_prefix`` argument which will be + prepended to the path of all routes that are connected. + + Example:: + + >>> map = Mapper(controller_scan=None) + >>> map.connect('home', '/', controller='home', action='splash') + >>> map.matchlist[0].name == 'home' + True + >>> m = map.submapper(controller='home') + >>> m.connect('index', '/index', action='index') + >>> map.matchlist[1].name == 'index' + True + >>> map.matchlist[1].defaults['controller'] == 'home' + True + + Optional ``collection_name`` and ``resource_name`` arguments are + used in the generation of route names by the ``action`` and + ``link`` methods. These in turn are used by the ``index``, + ``new``, ``create``, ``show``, ``edit``, ``update`` and + ``delete`` methods which may be invoked indirectly by listing + them in the ``actions`` argument. If the ``formatted`` argument + is set to ``True`` (the default), generated paths are given the + suffix '{.format}' which matches or generates an optional format + extension. + + Example:: + + >>> from routes.util import url_for + >>> map = Mapper(controller_scan=None) + >>> m = map.submapper(path_prefix='/entries', collection_name='entries', resource_name='entry', actions=['index', 'new']) + >>> url_for('entries') == '/entries' + True + >>> url_for('new_entry', format='xml') == '/entries/new.xml' + True + + """ + return SubMapper(self, **kargs) + + def collection(self, collection_name, resource_name, path_prefix=None, + member_prefix='/{id}', controller=None, + collection_actions=COLLECTION_ACTIONS, + member_actions = MEMBER_ACTIONS, member_options=None, + **kwargs): + """Create a submapper that represents a collection. + + This results in a :class:`routes.mapper.SubMapper` object, with a + ``member`` property of the same type that represents the collection's + member resources. + + Its interface is the same as the ``submapper`` together with + ``member_prefix``, ``member_actions`` and ``member_options`` + which are passed to the ``member` submatter as ``path_prefix``, + ``actions`` and keyword arguments respectively. + + Example:: + + >>> from routes.util import url_for + >>> map = Mapper(controller_scan=None) + >>> c = map.collection('entries', 'entry') + >>> c.member.link('ping', method='POST') + >>> url_for('entries') == '/entries' + True + >>> url_for('edit_entry', id=1) == '/entries/1/edit' + True + >>> url_for('ping_entry', id=1) == '/entries/1/ping' + True + + """ + if controller is None: + controller = resource_name or collection_name + + if path_prefix is None: + path_prefix = '/' + collection_name + + collection = SubMapper(self, collection_name=collection_name, + resource_name=resource_name, + path_prefix=path_prefix, controller=controller, + actions=collection_actions, **kwargs) + + collection.member = SubMapper(collection, path_prefix=member_prefix, + actions=member_actions, + **(member_options or {})) + + return collection + + +class SubMapper(SubMapperParent): + """Partial mapper for use with_options""" + def __init__(self, obj, resource_name=None, collection_name=None, + actions=None, formatted=None, **kwargs): + self.kwargs = kwargs + self.obj = obj + self.collection_name = collection_name + self.member = None + self.resource_name = resource_name \ + or getattr(obj, 'resource_name', None) \ + or kwargs.get('controller', None) \ + or getattr(obj, 'controller', None) + if formatted is not None: + self.formatted = formatted + else: + self.formatted = getattr(obj, 'formatted', None) + if self.formatted is None: + self.formatted = True + + self.add_actions(actions or []) + + def connect(self, *args, **kwargs): + newkargs = {} + newargs = args + for key, value in self.kwargs.items(): + if key == 'path_prefix': + if len(args) > 1: + newargs = (args[0], self.kwargs[key] + args[1]) + else: + newargs = (self.kwargs[key] + args[0],) + elif key in kwargs: + if isinstance(value, dict): + newkargs[key] = dict(value, **kwargs[key]) # merge dicts + else: + newkargs[key] = value + kwargs[key] + else: + newkargs[key] = self.kwargs[key] + for key in kwargs: + if key not in self.kwargs: + newkargs[key] = kwargs[key] + return self.obj.connect(*newargs, **newkargs) + + def link(self, rel=None, name=None, action=None, method='GET', + formatted=None, **kwargs): + """Generates a named route for a subresource. + + Example:: + + >>> from routes.util import url_for + >>> map = Mapper(controller_scan=None) + >>> c = map.collection('entries', 'entry') + >>> c.link('recent', name='recent_entries') + >>> c.member.link('ping', method='POST', formatted=True) + >>> url_for('entries') == '/entries' + True + >>> url_for('recent_entries') == '/entries/recent' + True + >>> url_for('ping_entry', id=1) == '/entries/1/ping' + True + >>> url_for('ping_entry', id=1, format='xml') == '/entries/1/ping.xml' + True + + """ + if formatted or (formatted is None and self.formatted): + suffix = '{.format}' + else: + suffix = '' + + return self.connect(name or (rel + '_' + self.resource_name), + '/' + (rel or name) + suffix, + action=action or rel or name, + **_kwargs_with_conditions(kwargs, method)) + + def new(self, **kwargs): + """Generates the "new" link for a collection submapper.""" + return self.link(rel='new', **kwargs) + + def edit(self, **kwargs): + """Generates the "edit" link for a collection member submapper.""" + return self.link(rel='edit', **kwargs) + + def action(self, name=None, action=None, method='GET', formatted=None, + **kwargs): + """Generates a named route at the base path of a submapper. + + Example:: + + >>> from routes import url_for + >>> map = Mapper(controller_scan=None) + >>> c = map.submapper(path_prefix='/entries', controller='entry') + >>> c.action(action='index', name='entries', formatted=True) + >>> c.action(action='create', method='POST') + >>> url_for(controller='entry', action='index', method='GET') == '/entries' + True + >>> url_for(controller='entry', action='index', method='GET', format='xml') == '/entries.xml' + True + >>> url_for(controller='entry', action='create', method='POST') == '/entries' + True + + """ + if formatted or (formatted is None and self.formatted): + suffix = '{.format}' + else: + suffix = '' + return self.connect(name or (action + '_' + self.resource_name), + suffix, + action=action or name, + **_kwargs_with_conditions(kwargs, method)) + + def index(self, name=None, **kwargs): + """Generates the "index" action for a collection submapper.""" + return self.action(name=name or self.collection_name, + action='index', method='GET', **kwargs) + + def show(self, name = None, **kwargs): + """Generates the "show" action for a collection member submapper.""" + return self.action(name=name or self.resource_name, + action='show', method='GET', **kwargs) + + def create(self, **kwargs): + """Generates the "create" action for a collection submapper.""" + return self.action(action='create', method='POST', **kwargs) + + def update(self, **kwargs): + """Generates the "update" action for a collection member submapper.""" + return self.action(action='update', method='PUT', **kwargs) + + def delete(self, **kwargs): + """Generates the "delete" action for a collection member submapper.""" + return self.action(action='delete', method='DELETE', **kwargs) + + def add_actions(self, actions): + [getattr(self, action)() for action in actions] + + # Provided for those who prefer using the 'with' syntax in Python 2.5+ + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + pass + +# Create kwargs with a 'conditions' member generated for the given method +def _kwargs_with_conditions(kwargs, method): + if method and 'conditions' not in kwargs: + newkwargs = kwargs.copy() + newkwargs['conditions'] = {'method': method} + return newkwargs + else: + return kwargs + + + +class Mapper(SubMapperParent): + """Mapper handles URL generation and URL recognition in a web + application. + + Mapper is built handling dictionary's. It is assumed that the web + application will handle the dictionary returned by URL recognition + to dispatch appropriately. + + URL generation is done by passing keyword parameters into the + generate function, a URL is then returned. + + """ + def __init__(self, controller_scan=controller_scan, directory=None, + always_scan=False, register=True, explicit=True): + """Create a new Mapper instance + + All keyword arguments are optional. + + ``controller_scan`` + Function reference that will be used to return a list of + valid controllers used during URL matching. If + ``directory`` keyword arg is present, it will be passed + into the function during its call. This option defaults to + a function that will scan a directory for controllers. + + Alternatively, a list of controllers or None can be passed + in which are assumed to be the definitive list of + controller names valid when matching 'controller'. + + ``directory`` + Passed into controller_scan for the directory to scan. It + should be an absolute path if using the default + ``controller_scan`` function. + + ``always_scan`` + Whether or not the ``controller_scan`` function should be + run during every URL match. This is typically a good idea + during development so the server won't need to be restarted + anytime a controller is added. + + ``register`` + Boolean used to determine if the Mapper should use + ``request_config`` to register itself as the mapper. Since + it's done on a thread-local basis, this is typically best + used during testing though it won't hurt in other cases. + + ``explicit`` + Boolean used to determine if routes should be connected + with implicit defaults of:: + + {'controller':'content','action':'index','id':None} + + When set to True, these defaults will not be added to route + connections and ``url_for`` will not use Route memory. + + Additional attributes that may be set after mapper + initialization (ie, map.ATTRIBUTE = 'something'): + + ``encoding`` + Used to indicate alternative encoding/decoding systems to + use with both incoming URL's, and during Route generation + when passed a Unicode string. Defaults to 'utf-8'. + + ``decode_errors`` + How to handle errors in the encoding, generally ignoring + any chars that don't convert should be sufficient. Defaults + to 'ignore'. + + ``minimization`` + Boolean used to indicate whether or not Routes should + minimize URL's and the generated URL's, or require every + part where it appears in the path. Defaults to True. + + ``hardcode_names`` + Whether or not Named Routes result in the default options + for the route being used *or* if they actually force url + generation to use the route. Defaults to False. + + """ + self.matchlist = [] + self.maxkeys = {} + self.minkeys = {} + self.urlcache = LRUCache(1600) + self._created_regs = False + self._created_gens = False + self._master_regexp = None + self.prefix = None + self.req_data = threading.local() + self.directory = directory + self.always_scan = always_scan + self.controller_scan = controller_scan + self._regprefix = None + self._routenames = {} + self.debug = False + self.append_slash = False + self.sub_domains = False + self.sub_domains_ignore = [] + self.domain_match = '[^\.\/]+?\.[^\.\/]+' + self.explicit = explicit + self.encoding = 'utf-8' + self.decode_errors = 'ignore' + self.hardcode_names = True + self.minimization = False + self.create_regs_lock = threading.Lock() + if register: + config = request_config() + config.mapper = self + + def __str__(self): + """Generates a tabular string representation.""" + def format_methods(r): + if r.conditions: + method = r.conditions.get('method', '') + return type(method) is str and method or ', '.join(method) + else: + return '' + + table = [('Route name', 'Methods', 'Path')] + \ + [(r.name or '', format_methods(r), r.routepath or '') + for r in self.matchlist] + + widths = [max(len(row[col]) for row in table) + for col in range(len(table[0]))] + + return '\n'.join( + ' '.join(row[col].ljust(widths[col]) + for col in range(len(widths))) + for row in table) + + def _envget(self): + try: + return self.req_data.environ + except AttributeError: + return None + def _envset(self, env): + self.req_data.environ = env + def _envdel(self): + del self.req_data.environ + environ = property(_envget, _envset, _envdel) + + def extend(self, routes, path_prefix=''): + """Extends the mapper routes with a list of Route objects + + If a path_prefix is provided, all the routes will have their + path prepended with the path_prefix. + + Example:: + + >>> map = Mapper(controller_scan=None) + >>> map.connect('home', '/', controller='home', action='splash') + >>> map.matchlist[0].name == 'home' + True + >>> routes = [Route('index', '/index.htm', controller='home', + ... action='index')] + >>> map.extend(routes) + >>> len(map.matchlist) == 2 + True + >>> map.extend(routes, path_prefix='/subapp') + >>> len(map.matchlist) == 3 + True + >>> map.matchlist[2].routepath == '/subapp/index.htm' + True + + .. note:: + + This function does not merely extend the mapper with the + given list of routes, it actually creates new routes with + identical calling arguments. + + """ + for route in routes: + if path_prefix and route.minimization: + routepath = '/'.join([path_prefix, route.routepath]) + elif path_prefix: + routepath = path_prefix + route.routepath + else: + routepath = route.routepath + self.connect(route.name, routepath, **route._kargs) + + def connect(self, *args, **kargs): + """Create and connect a new Route to the Mapper. + + Usage: + + .. code-block:: python + + m = Mapper() + m.connect(':controller/:action/:id') + m.connect('date/:year/:month/:day', controller="blog", action="view") + m.connect('archives/:page', controller="blog", action="by_page", + requirements = { 'page':'\d{1,2}' }) + m.connect('category_list', 'archives/category/:section', controller='blog', action='category', + section='home', type='list') + m.connect('home', '', controller='blog', action='view', section='home') + + """ + routename = None + if len(args) > 1: + routename = args[0] + else: + args = (None,) + args + if '_explicit' not in kargs: + kargs['_explicit'] = self.explicit + if '_minimize' not in kargs: + kargs['_minimize'] = self.minimization + route = Route(*args, **kargs) + + # Apply encoding and errors if its not the defaults and the route + # didn't have one passed in. + if (self.encoding != 'utf-8' or self.decode_errors != 'ignore') and \ + '_encoding' not in kargs: + route.encoding = self.encoding + route.decode_errors = self.decode_errors + + if not route.static: + self.matchlist.append(route) + + if routename: + self._routenames[routename] = route + route.name = routename + if route.static: + return + exists = False + for key in self.maxkeys: + if key == route.maxkeys: + self.maxkeys[key].append(route) + exists = True + break + if not exists: + self.maxkeys[route.maxkeys] = [route] + self._created_gens = False + + def _create_gens(self): + """Create the generation hashes for route lookups""" + # Use keys temporailly to assemble the list to avoid excessive + # list iteration testing with "in" + controllerlist = {} + actionlist = {} + + # Assemble all the hardcoded/defaulted actions/controllers used + for route in self.matchlist: + if route.static: + continue + if route.defaults.has_key('controller'): + controllerlist[route.defaults['controller']] = True + if route.defaults.has_key('action'): + actionlist[route.defaults['action']] = True + + # Setup the lists of all controllers/actions we'll add each route + # to. We include the '*' in the case that a generate contains a + # controller/action that has no hardcodes + controllerlist = controllerlist.keys() + ['*'] + actionlist = actionlist.keys() + ['*'] + + # Go through our list again, assemble the controllers/actions we'll + # add each route to. If its hardcoded, we only add it to that dict key. + # Otherwise we add it to every hardcode since it can be changed. + gendict = {} # Our generated two-deep hash + for route in self.matchlist: + if route.static: + continue + clist = controllerlist + alist = actionlist + if 'controller' in route.hardcoded: + clist = [route.defaults['controller']] + if 'action' in route.hardcoded: + alist = [unicode(route.defaults['action'])] + for controller in clist: + for action in alist: + actiondict = gendict.setdefault(controller, {}) + actiondict.setdefault(action, ([], {}))[0].append(route) + self._gendict = gendict + self._created_gens = True + + def create_regs(self, *args, **kwargs): + """Atomically creates regular expressions for all connected + routes + """ + self.create_regs_lock.acquire() + try: + self._create_regs(*args, **kwargs) + finally: + self.create_regs_lock.release() + + def _create_regs(self, clist=None): + """Creates regular expressions for all connected routes""" + if clist is None: + if self.directory: + clist = self.controller_scan(self.directory) + elif callable(self.controller_scan): + clist = self.controller_scan() + elif not self.controller_scan: + clist = [] + else: + clist = self.controller_scan + + for key, val in self.maxkeys.iteritems(): + for route in val: + route.makeregexp(clist) + + regexps = [] + routematches = [] + for route in self.matchlist: + if not route.static: + routematches.append(route) + regexps.append(route.makeregexp(clist, include_names=False)) + self._routematches = routematches + + # Create our regexp to strip the prefix + if self.prefix: + self._regprefix = re.compile(self.prefix + '(.*)') + + # Save the master regexp + regexp = '|'.join(['(?:%s)' % x for x in regexps]) + self._master_reg = regexp + self._master_regexp = re.compile(regexp) + self._created_regs = True + + def _match(self, url, environ): + """Internal Route matcher + + Matches a URL against a route, and returns a tuple of the match + dict and the route object if a match is successfull, otherwise + it returns empty. + + For internal use only. + + """ + if not self._created_regs and self.controller_scan: + self.create_regs() + elif not self._created_regs: + raise RoutesException("You must generate the regular expressions" + " before matching.") + + if self.always_scan: + self.create_regs() + + matchlog = [] + if self.prefix: + if re.match(self._regprefix, url): + url = re.sub(self._regprefix, r'\1', url) + if not url: + url = '/' + else: + return (None, None, matchlog) + + environ = environ or self.environ + sub_domains = self.sub_domains + sub_domains_ignore = self.sub_domains_ignore + domain_match = self.domain_match + debug = self.debug + + # Check to see if its a valid url against the main regexp + # Done for faster invalid URL elimination + valid_url = re.match(self._master_regexp, url) + if not valid_url: + return (None, None, matchlog) + + for route in self.matchlist: + if route.static: + if debug: + matchlog.append(dict(route=route, static=True)) + continue + match = route.match(url, environ, sub_domains, sub_domains_ignore, + domain_match) + if debug: + matchlog.append(dict(route=route, regexp=bool(match))) + if isinstance(match, dict) or match: + return (match, route, matchlog) + return (None, None, matchlog) + + def match(self, url=None, environ=None): + """Match a URL against against one of the routes contained. + + Will return None if no valid match is found. + + .. code-block:: python + + resultdict = m.match('/joe/sixpack') + + """ + if not url and not environ: + raise RoutesException('URL or environ must be provided') + + if not url: + url = environ['PATH_INFO'] + + result = self._match(url, environ) + if self.debug: + return result[0], result[1], result[2] + if isinstance(result[0], dict) or result[0]: + return result[0] + return None + + def routematch(self, url=None, environ=None): + """Match a URL against against one of the routes contained. + + Will return None if no valid match is found, otherwise a + result dict and a route object is returned. + + .. code-block:: python + + resultdict, route_obj = m.match('/joe/sixpack') + + """ + if not url and not environ: + raise RoutesException('URL or environ must be provided') + + if not url: + url = environ['PATH_INFO'] + result = self._match(url, environ) + if self.debug: + return result[0], result[1], result[2] + if isinstance(result[0], dict) or result[0]: + return result[0], result[1] + return None + + def generate(self, *args, **kargs): + """Generate a route from a set of keywords + + Returns the url text, or None if no URL could be generated. + + .. code-block:: python + + m.generate(controller='content',action='view',id=10) + + """ + # Generate ourself if we haven't already + if not self._created_gens: + self._create_gens() + + if self.append_slash: + kargs['_append_slash'] = True + + if not self.explicit: + if 'controller' not in kargs: + kargs['controller'] = 'content' + if 'action' not in kargs: + kargs['action'] = 'index' + + controller = kargs.get('controller', None) + action = kargs.get('action', None) + + # If the URL didn't depend on the SCRIPT_NAME, we'll cache it + # keyed by just by kargs; otherwise we need to cache it with + # both SCRIPT_NAME and kargs: + cache_key = unicode(args).encode('utf8') + \ + unicode(kargs).encode('utf8') + + if self.urlcache is not None: + if self.environ: + cache_key_script_name = '%s:%s' % ( + self.environ.get('SCRIPT_NAME', ''), cache_key) + else: + cache_key_script_name = cache_key + + # Check the url cache to see if it exists, use it if it does + for key in [cache_key, cache_key_script_name]: + if key in self.urlcache: + return self.urlcache[key] + + actionlist = self._gendict.get(controller) or self._gendict.get('*', {}) + if not actionlist and not args: + return None + (keylist, sortcache) = actionlist.get(action) or \ + actionlist.get('*', (None, {})) + if not keylist and not args: + return None + + keys = frozenset(kargs.keys()) + cacheset = False + cachekey = unicode(keys) + cachelist = sortcache.get(cachekey) + if args: + keylist = args + elif cachelist: + keylist = cachelist + else: + cacheset = True + newlist = [] + for route in keylist: + if len(route.minkeys - route.dotkeys - keys) == 0: + newlist.append(route) + keylist = newlist + + def keysort(a, b): + """Sorts two sets of sets, to order them ideally for + matching.""" + am = a.minkeys + a = a.maxkeys + b = b.maxkeys + + lendiffa = len(keys^a) + lendiffb = len(keys^b) + # If they both match, don't switch them + if lendiffa == 0 and lendiffb == 0: + return 0 + + # First, if a matches exactly, use it + if lendiffa == 0: + return -1 + + # Or b matches exactly, use it + if lendiffb == 0: + return 1 + + # Neither matches exactly, return the one with the most in + # common + if cmp(lendiffa, lendiffb) != 0: + return cmp(lendiffa, lendiffb) + + # Neither matches exactly, but if they both have just as much + # in common + if len(keys&b) == len(keys&a): + # Then we return the shortest of the two + return cmp(len(a), len(b)) + + # Otherwise, we return the one that has the most in common + else: + return cmp(len(keys&b), len(keys&a)) + + keylist.sort(keysort) + if cacheset: + sortcache[cachekey] = keylist + + # Iterate through the keylist of sorted routes (or a single route if + # it was passed in explicitly for hardcoded named routes) + for route in keylist: + fail = False + for key in route.hardcoded: + kval = kargs.get(key) + if not kval: + continue + if isinstance(kval, str): + kval = kval.decode(self.encoding) + else: + kval = unicode(kval) + if kval != route.defaults[key] and not callable(route.defaults[key]): + fail = True + break + if fail: + continue + path = route.generate(**kargs) + if path: + if self.prefix: + path = self.prefix + path + external_static = route.static and route.external + if self.environ and self.environ.get('SCRIPT_NAME', '') != ''\ + and not route.absolute and not external_static: + path = self.environ['SCRIPT_NAME'] + path + key = cache_key_script_name + else: + key = cache_key + if self.urlcache is not None: + self.urlcache[key] = str(path) + return str(path) + else: + continue + return None + + def resource(self, member_name, collection_name, **kwargs): + """Generate routes for a controller resource + + The member_name name should be the appropriate singular version + of the resource given your locale and used with members of the + collection. The collection_name name will be used to refer to + the resource collection methods and should be a plural version + of the member_name argument. By default, the member_name name + will also be assumed to map to a controller you create. + + The concept of a web resource maps somewhat directly to 'CRUD' + operations. The overlying things to keep in mind is that + mapping a resource is about handling creating, viewing, and + editing that resource. + + All keyword arguments are optional. + + ``controller`` + If specified in the keyword args, the controller will be + the actual controller used, but the rest of the naming + conventions used for the route names and URL paths are + unchanged. + + ``collection`` + Additional action mappings used to manipulate/view the + entire set of resources provided by the controller. + + Example:: + + map.resource('message', 'messages', collection={'rss':'GET'}) + # GET /message/rss (maps to the rss action) + # also adds named route "rss_message" + + ``member`` + Additional action mappings used to access an individual + 'member' of this controllers resources. + + Example:: + + map.resource('message', 'messages', member={'mark':'POST'}) + # POST /message/1/mark (maps to the mark action) + # also adds named route "mark_message" + + ``new`` + Action mappings that involve dealing with a new member in + the controller resources. + + Example:: + + map.resource('message', 'messages', new={'preview':'POST'}) + # POST /message/new/preview (maps to the preview action) + # also adds a url named "preview_new_message" + + ``path_prefix`` + Prepends the URL path for the Route with the path_prefix + given. This is most useful for cases where you want to mix + resources or relations between resources. + + ``name_prefix`` + Perpends the route names that are generated with the + name_prefix given. Combined with the path_prefix option, + it's easy to generate route names and paths that represent + resources that are in relations. + + Example:: + + map.resource('message', 'messages', controller='categories', + path_prefix='/category/:category_id', + name_prefix="category_") + # GET /category/7/message/1 + # has named route "category_message" + + ``parent_resource`` + A ``dict`` containing information about the parent + resource, for creating a nested resource. It should contain + the ``member_name`` and ``collection_name`` of the parent + resource. This ``dict`` will + be available via the associated ``Route`` object which can + be accessed during a request via + ``request.environ['routes.route']`` + + If ``parent_resource`` is supplied and ``path_prefix`` + isn't, ``path_prefix`` will be generated from + ``parent_resource`` as + "/:_id". + + If ``parent_resource`` is supplied and ``name_prefix`` + isn't, ``name_prefix`` will be generated from + ``parent_resource`` as "_". + + Example:: + + >>> from routes.util import url_for + >>> m = Mapper() + >>> m.resource('location', 'locations', + ... parent_resource=dict(member_name='region', + ... collection_name='regions')) + >>> # path_prefix is "regions/:region_id" + >>> # name prefix is "region_" + >>> url_for('region_locations', region_id=13) + '/regions/13/locations' + >>> url_for('region_new_location', region_id=13) + '/regions/13/locations/new' + >>> url_for('region_location', region_id=13, id=60) + '/regions/13/locations/60' + >>> url_for('region_edit_location', region_id=13, id=60) + '/regions/13/locations/60/edit' + + Overriding generated ``path_prefix``:: + + >>> m = Mapper() + >>> m.resource('location', 'locations', + ... parent_resource=dict(member_name='region', + ... collection_name='regions'), + ... path_prefix='areas/:area_id') + >>> # name prefix is "region_" + >>> url_for('region_locations', area_id=51) + '/areas/51/locations' + + Overriding generated ``name_prefix``:: + + >>> m = Mapper() + >>> m.resource('location', 'locations', + ... parent_resource=dict(member_name='region', + ... collection_name='regions'), + ... name_prefix='') + >>> # path_prefix is "regions/:region_id" + >>> url_for('locations', region_id=51) + '/regions/51/locations' + + """ + collection = kwargs.pop('collection', {}) + member = kwargs.pop('member', {}) + new = kwargs.pop('new', {}) + path_prefix = kwargs.pop('path_prefix', None) + name_prefix = kwargs.pop('name_prefix', None) + parent_resource = kwargs.pop('parent_resource', None) + + # Generate ``path_prefix`` if ``path_prefix`` wasn't specified and + # ``parent_resource`` was. Likewise for ``name_prefix``. Make sure + # that ``path_prefix`` and ``name_prefix`` *always* take precedence if + # they are specified--in particular, we need to be careful when they + # are explicitly set to "". + if parent_resource is not None: + if path_prefix is None: + path_prefix = '%s/:%s_id' % (parent_resource['collection_name'], + parent_resource['member_name']) + if name_prefix is None: + name_prefix = '%s_' % parent_resource['member_name'] + else: + if path_prefix is None: path_prefix = '' + if name_prefix is None: name_prefix = '' + + # Ensure the edit and new actions are in and GET + member['edit'] = 'GET' + new.update({'new': 'GET'}) + + # Make new dict's based off the old, except the old values become keys, + # and the old keys become items in a list as the value + def swap(dct, newdct): + """Swap the keys and values in the dict, and uppercase the values + from the dict during the swap.""" + for key, val in dct.iteritems(): + newdct.setdefault(val.upper(), []).append(key) + return newdct + collection_methods = swap(collection, {}) + member_methods = swap(member, {}) + new_methods = swap(new, {}) + + # Insert create, update, and destroy methods + collection_methods.setdefault('POST', []).insert(0, 'create') + member_methods.setdefault('PUT', []).insert(0, 'update') + member_methods.setdefault('DELETE', []).insert(0, 'delete') + + # If there's a path prefix option, use it with the controller + controller = strip_slashes(collection_name) + path_prefix = strip_slashes(path_prefix) + path_prefix = '/' + path_prefix + if path_prefix and path_prefix != '/': + path = path_prefix + '/' + controller + else: + path = '/' + controller + collection_path = path + new_path = path + "/new" + member_path = path + "/:(id)" + + options = { + 'controller': kwargs.get('controller', controller), + '_member_name': member_name, + '_collection_name': collection_name, + '_parent_resource': parent_resource, + '_filter': kwargs.get('_filter') + } + + def requirements_for(meth): + """Returns a new dict to be used for all route creation as the + route options""" + opts = options.copy() + if method != 'any': + opts['conditions'] = {'method':[meth.upper()]} + return opts + + # Add the routes for handling collection methods + for method, lst in collection_methods.iteritems(): + primary = (method != 'GET' and lst.pop(0)) or None + route_options = requirements_for(method) + for action in lst: + route_options['action'] = action + route_name = "%s%s_%s" % (name_prefix, action, collection_name) + self.connect("formatted_" + route_name, "%s/%s.:(format)" % \ + (collection_path, action), **route_options) + self.connect(route_name, "%s/%s" % (collection_path, action), + **route_options) + if primary: + route_options['action'] = primary + self.connect("%s.:(format)" % collection_path, **route_options) + self.connect(collection_path, **route_options) + + # Specifically add in the built-in 'index' collection method and its + # formatted version + self.connect("formatted_" + name_prefix + collection_name, + collection_path + ".:(format)", action='index', + conditions={'method':['GET']}, **options) + self.connect(name_prefix + collection_name, collection_path, + action='index', conditions={'method':['GET']}, **options) + + # Add the routes that deal with new resource methods + for method, lst in new_methods.iteritems(): + route_options = requirements_for(method) + for action in lst: + path = (action == 'new' and new_path) or "%s/%s" % (new_path, + action) + name = "new_" + member_name + if action != 'new': + name = action + "_" + name + route_options['action'] = action + formatted_path = (action == 'new' and new_path + '.:(format)') or \ + "%s/%s.:(format)" % (new_path, action) + self.connect("formatted_" + name_prefix + name, formatted_path, + **route_options) + self.connect(name_prefix + name, path, **route_options) + + requirements_regexp = '[^\/]+' + + # Add the routes that deal with member methods of a resource + for method, lst in member_methods.iteritems(): + route_options = requirements_for(method) + route_options['requirements'] = {'id':requirements_regexp} + if method not in ['POST', 'GET', 'any']: + primary = lst.pop(0) + else: + primary = None + for action in lst: + route_options['action'] = action + self.connect("formatted_%s%s_%s" % (name_prefix, action, + member_name), + "%s/%s.:(format)" % (member_path, action), **route_options) + self.connect("%s%s_%s" % (name_prefix, action, member_name), + "%s/%s" % (member_path, action), **route_options) + if primary: + route_options['action'] = primary + self.connect("%s.:(format)" % member_path, **route_options) + self.connect(member_path, **route_options) + + # Specifically add the member 'show' method + route_options = requirements_for('GET') + route_options['action'] = 'show' + route_options['requirements'] = {'id':requirements_regexp} + self.connect("formatted_" + name_prefix + member_name, + member_path + ".:(format)", **route_options) + self.connect(name_prefix + member_name, member_path, **route_options) + + def redirect(self, match_path, destination_path, *args, **kwargs): + """Add a redirect route to the mapper + + Redirect routes bypass the wrapped WSGI application and instead + result in a redirect being issued by the RoutesMiddleware. As + such, this method is only meaningful when using + RoutesMiddleware. + + By default, a 302 Found status code is used, this can be + changed by providing a ``_redirect_code`` keyword argument + which will then be used instead. Note that the entire status + code string needs to be present. + + When using keyword arguments, all arguments that apply to + matching will be used for the match, while generation specific + options will be used during generation. Thus all options + normally available to connected Routes may be used with + redirect routes as well. + + Example:: + + map = Mapper() + map.redirect('/legacyapp/archives/{url:.*}, '/archives/{url}) + map.redirect('/home/index', '/', _redirect_code='301 Moved Permanently') + + """ + both_args = ['_encoding', '_explicit', '_minimize'] + gen_args = ['_filter'] + + status_code = kwargs.pop('_redirect_code', '302 Found') + gen_dict, match_dict = {}, {} + + # Create the dict of args for the generation route + for key in both_args + gen_args: + if key in kwargs: + gen_dict[key] = kwargs[key] + gen_dict['_static'] = True + + # Create the dict of args for the matching route + for key in kwargs: + if key not in gen_args: + match_dict[key] = kwargs[key] + + self.connect(match_path, **match_dict) + match_route = self.matchlist[-1] + + self.connect('_redirect_%s' % id(match_route), destination_path, + **gen_dict) + match_route.redirect = True + match_route.redirect_status = status_code diff --git a/src/routes/middleware.py b/src/routes/middleware.py new file mode 100644 index 0000000000..d4c005ee78 --- /dev/null +++ b/src/routes/middleware.py @@ -0,0 +1,146 @@ +"""Routes WSGI Middleware""" +import re +import logging + +from webob import Request + +from routes.base import request_config +from routes.util import URLGenerator, url_for + +log = logging.getLogger('routes.middleware') + +class RoutesMiddleware(object): + """Routing middleware that handles resolving the PATH_INFO in + addition to optionally recognizing method overriding.""" + def __init__(self, wsgi_app, mapper, use_method_override=True, + path_info=True, singleton=True): + """Create a Route middleware object + + Using the use_method_override keyword will require Paste to be + installed, and your application should use Paste's WSGIRequest + object as it will properly handle POST issues with wsgi.input + should Routes check it. + + If path_info is True, then should a route var contain + path_info, the SCRIPT_NAME and PATH_INFO will be altered + accordingly. This should be used with routes like: + + .. code-block:: python + + map.connect('blog/*path_info', controller='blog', path_info='') + + """ + self.app = wsgi_app + self.mapper = mapper + self.singleton = singleton + self.use_method_override = use_method_override + self.path_info = path_info + log_debug = self.log_debug = logging.DEBUG >= log.getEffectiveLevel() + if self.log_debug: + log.debug("Initialized with method overriding = %s, and path " + "info altering = %s", use_method_override, path_info) + + def __call__(self, environ, start_response): + """Resolves the URL in PATH_INFO, and uses wsgi.routing_args + to pass on URL resolver results.""" + old_method = None + if self.use_method_override: + req = None + + # In some odd cases, there's no query string + try: + qs = environ['QUERY_STRING'] + except KeyError: + qs = '' + if '_method' in qs: + req = Request(environ) + req.errors = 'ignore' + if '_method' in req.GET: + old_method = environ['REQUEST_METHOD'] + environ['REQUEST_METHOD'] = req.GET['_method'].upper() + if self.log_debug: + log.debug("_method found in QUERY_STRING, altering request" + " method to %s", environ['REQUEST_METHOD']) + elif environ['REQUEST_METHOD'] == 'POST' and is_form_post(environ): + if req is None: + req = Request(environ) + req.errors = 'ignore' + if '_method' in req.POST: + old_method = environ['REQUEST_METHOD'] + environ['REQUEST_METHOD'] = req.POST['_method'].upper() + if self.log_debug: + log.debug("_method found in POST data, altering request " + "method to %s", environ['REQUEST_METHOD']) + + # Run the actual route matching + # -- Assignment of environ to config triggers route matching + if self.singleton: + config = request_config() + config.mapper = self.mapper + config.environ = environ + match = config.mapper_dict + route = config.route + else: + results = self.mapper.routematch(environ=environ) + if results: + match, route = results[0], results[1] + else: + match = route = None + + if old_method: + environ['REQUEST_METHOD'] = old_method + + if not match: + match = {} + if self.log_debug: + urlinfo = "%s %s" % (environ['REQUEST_METHOD'], environ['PATH_INFO']) + log.debug("No route matched for %s", urlinfo) + elif self.log_debug: + urlinfo = "%s %s" % (environ['REQUEST_METHOD'], environ['PATH_INFO']) + log.debug("Matched %s", urlinfo) + log.debug("Route path: '%s', defaults: %s", route.routepath, + route.defaults) + log.debug("Match dict: %s", match) + + url = URLGenerator(self.mapper, environ) + environ['wsgiorg.routing_args'] = ((url), match) + environ['routes.route'] = route + environ['routes.url'] = url + + if route and route.redirect: + route_name = '_redirect_%s' % id(route) + location = url(route_name, **match) + log.debug("Using redirect route, redirect to '%s' with status" + "code: %s", location, route.redirect_status) + start_response(route.redirect_status, + [('Content-Type', 'text/plain; charset=utf8'), + ('Location', location)]) + return [] + + # If the route included a path_info attribute and it should be used to + # alter the environ, we'll pull it out + if self.path_info and 'path_info' in match: + oldpath = environ['PATH_INFO'] + newpath = match.get('path_info') or '' + environ['PATH_INFO'] = newpath + if not environ['PATH_INFO'].startswith('/'): + environ['PATH_INFO'] = '/' + environ['PATH_INFO'] + environ['SCRIPT_NAME'] += re.sub(r'^(.*?)/' + re.escape(newpath) + '$', + r'\1', oldpath) + + response = self.app(environ, start_response) + + # Wrapped in try as in rare cases the attribute will be gone already + try: + del self.mapper.environ + except AttributeError: + pass + return response + +def is_form_post(environ): + """Determine whether the request is a POSTed html form""" + content_type = environ.get('CONTENT_TYPE', '').lower() + if ';' in content_type: + content_type = content_type.split(';', 1)[0] + return content_type in ('application/x-www-form-urlencoded', + 'multipart/form-data') diff --git a/src/routes/route.py b/src/routes/route.py new file mode 100644 index 0000000000..688d6e4cb9 --- /dev/null +++ b/src/routes/route.py @@ -0,0 +1,742 @@ +import re +import sys +import urllib + +if sys.version < '2.4': + from sets import ImmutableSet as frozenset + +from routes.util import _url_quote as url_quote, _str_encode + + +class Route(object): + """The Route object holds a route recognition and generation + routine. + + See Route.__init__ docs for usage. + + """ + # reserved keys that don't count + reserved_keys = ['requirements'] + + # special chars to indicate a natural split in the URL + done_chars = ('/', ',', ';', '.', '#') + + def __init__(self, name, routepath, **kargs): + """Initialize a route, with a given routepath for + matching/generation + + The set of keyword args will be used as defaults. + + Usage:: + + >>> from routes.base import Route + >>> newroute = Route(None, ':controller/:action/:id') + >>> sorted(newroute.defaults.items()) + [('action', 'index'), ('id', None)] + >>> newroute = Route(None, 'date/:year/:month/:day', + ... controller="blog", action="view") + >>> newroute = Route(None, 'archives/:page', controller="blog", + ... action="by_page", requirements = { 'page':'\d{1,2}' }) + >>> newroute.reqs + {'page': '\\\d{1,2}'} + + .. Note:: + Route is generally not called directly, a Mapper instance + connect method should be used to add routes. + + """ + self.routepath = routepath + self.sub_domains = False + self.prior = None + self.redirect = False + self.name = name + self._kargs = kargs + self.minimization = kargs.pop('_minimize', False) + self.encoding = kargs.pop('_encoding', 'utf-8') + self.reqs = kargs.get('requirements', {}) + self.decode_errors = 'replace' + + # Don't bother forming stuff we don't need if its a static route + self.static = kargs.pop('_static', False) + self.filter = kargs.pop('_filter', None) + self.absolute = kargs.pop('_absolute', False) + + # Pull out the member/collection name if present, this applies only to + # map.resource + self.member_name = kargs.pop('_member_name', None) + self.collection_name = kargs.pop('_collection_name', None) + self.parent_resource = kargs.pop('_parent_resource', None) + + # Pull out route conditions + self.conditions = kargs.pop('conditions', None) + + # Determine if explicit behavior should be used + self.explicit = kargs.pop('_explicit', False) + + # Since static need to be generated exactly, treat them as + # non-minimized + if self.static: + self.external = '://' in self.routepath + self.minimization = False + + # Strip preceding '/' if present, and not minimizing + if routepath.startswith('/') and self.minimization: + self.routepath = routepath[1:] + self._setup_route() + + def _setup_route(self): + # Build our routelist, and the keys used in the route + self.routelist = routelist = self._pathkeys(self.routepath) + routekeys = frozenset([key['name'] for key in routelist + if isinstance(key, dict)]) + self.dotkeys = frozenset([key['name'] for key in routelist + if isinstance(key, dict) and + key['type'] == '.']) + + if not self.minimization: + self.make_full_route() + + # Build a req list with all the regexp requirements for our args + self.req_regs = {} + for key, val in self.reqs.iteritems(): + self.req_regs[key] = re.compile('^' + val + '$') + # Update our defaults and set new default keys if needed. defaults + # needs to be saved + (self.defaults, defaultkeys) = self._defaults(routekeys, + self.reserved_keys, + self._kargs.copy()) + # Save the maximum keys we could utilize + self.maxkeys = defaultkeys | routekeys + + # Populate our minimum keys, and save a copy of our backward keys for + # quicker generation later + (self.minkeys, self.routebackwards) = self._minkeys(routelist[:]) + + # Populate our hardcoded keys, these are ones that are set and don't + # exist in the route + self.hardcoded = frozenset([key for key in self.maxkeys \ + if key not in routekeys and self.defaults[key] is not None]) + + # Cache our default keys + self._default_keys = frozenset(self.defaults.keys()) + + def make_full_route(self): + """Make a full routelist string for use with non-minimized + generation""" + regpath = '' + for part in self.routelist: + if isinstance(part, dict): + regpath += '%(' + part['name'] + ')s' + else: + regpath += part + self.regpath = regpath + + def make_unicode(self, s): + """Transform the given argument into a unicode string.""" + if isinstance(s, unicode): + return s + elif isinstance(s, str): + return s.decode(self.encoding) + elif callable(s): + return s + else: + return unicode(s) + + def _pathkeys(self, routepath): + """Utility function to walk the route, and pull out the valid + dynamic/wildcard keys.""" + collecting = False + current = '' + done_on = '' + var_type = '' + just_started = False + routelist = [] + for char in routepath: + if char in [':', '*', '{'] and not collecting and not self.static \ + or char in ['{'] and not collecting: + just_started = True + collecting = True + var_type = char + if char == '{': + done_on = '}' + just_started = False + if len(current) > 0: + routelist.append(current) + current = '' + elif collecting and just_started: + just_started = False + if char == '(': + done_on = ')' + else: + current = char + done_on = self.done_chars + ('-',) + elif collecting and char not in done_on: + current += char + elif collecting: + collecting = False + if var_type == '{': + if current[0] == '.': + var_type = '.' + current = current[1:] + else: + var_type = ':' + opts = current.split(':') + if len(opts) > 1: + current = opts[0] + self.reqs[current] = opts[1] + routelist.append(dict(type=var_type, name=current)) + if char in self.done_chars: + routelist.append(char) + done_on = var_type = current = '' + else: + current += char + if collecting: + routelist.append(dict(type=var_type, name=current)) + elif current: + routelist.append(current) + return routelist + + def _minkeys(self, routelist): + """Utility function to walk the route backwards + + Will also determine the minimum keys we can handle to generate + a working route. + + routelist is a list of the '/' split route path + defaults is a dict of all the defaults provided for the route + + """ + minkeys = [] + backcheck = routelist[:] + + # If we don't honor minimization, we need all the keys in the + # route path + if not self.minimization: + for part in backcheck: + if isinstance(part, dict): + minkeys.append(part['name']) + return (frozenset(minkeys), backcheck) + + gaps = False + backcheck.reverse() + for part in backcheck: + if not isinstance(part, dict) and part not in self.done_chars: + gaps = True + continue + elif not isinstance(part, dict): + continue + key = part['name'] + if self.defaults.has_key(key) and not gaps: + continue + minkeys.append(key) + gaps = True + return (frozenset(minkeys), backcheck) + + def _defaults(self, routekeys, reserved_keys, kargs): + """Creates default set with values stringified + + Put together our list of defaults, stringify non-None values + and add in our action/id default if they use it and didn't + specify it. + + defaultkeys is a list of the currently assumed default keys + routekeys is a list of the keys found in the route path + reserved_keys is a list of keys that are not + + """ + defaults = {} + # Add in a controller/action default if they don't exist + if 'controller' not in routekeys and 'controller' not in kargs \ + and not self.explicit: + kargs['controller'] = 'content' + if 'action' not in routekeys and 'action' not in kargs \ + and not self.explicit: + kargs['action'] = 'index' + defaultkeys = frozenset([key for key in kargs.keys() \ + if key not in reserved_keys]) + for key in defaultkeys: + if kargs[key] is not None: + defaults[key] = self.make_unicode(kargs[key]) + else: + defaults[key] = None + if 'action' in routekeys and not defaults.has_key('action') \ + and not self.explicit: + defaults['action'] = 'index' + if 'id' in routekeys and not defaults.has_key('id') \ + and not self.explicit: + defaults['id'] = None + newdefaultkeys = frozenset([key for key in defaults.keys() \ + if key not in reserved_keys]) + + return (defaults, newdefaultkeys) + + def makeregexp(self, clist, include_names=True): + """Create a regular expression for matching purposes + + Note: This MUST be called before match can function properly. + + clist should be a list of valid controller strings that can be + matched, for this reason makeregexp should be called by the web + framework after it knows all available controllers that can be + utilized. + + include_names indicates whether this should be a match regexp + assigned to itself using regexp grouping names, or if names + should be excluded for use in a single larger regexp to + determine if any routes match + + """ + if self.minimization: + reg = self.buildnextreg(self.routelist, clist, include_names)[0] + if not reg: + reg = '/' + reg = reg + '/?' + '$' + + if not reg.startswith('/'): + reg = '/' + reg + else: + reg = self.buildfullreg(clist, include_names) + + reg = '^' + reg + + if not include_names: + return reg + + self.regexp = reg + self.regmatch = re.compile(reg) + + def buildfullreg(self, clist, include_names=True): + """Build the regexp by iterating through the routelist and + replacing dicts with the appropriate regexp match""" + regparts = [] + for part in self.routelist: + if isinstance(part, dict): + var = part['name'] + if var == 'controller': + partmatch = '|'.join(map(re.escape, clist)) + elif part['type'] == ':': + partmatch = self.reqs.get(var) or '[^/]+?' + elif part['type'] == '.': + partmatch = self.reqs.get(var) or '[^/.]+?' + else: + partmatch = self.reqs.get(var) or '.+?' + if include_names: + regpart = '(?P<%s>%s)' % (var, partmatch) + else: + regpart = '(?:%s)' % partmatch + if part['type'] == '.': + regparts.append('(?:\.%s)??' % regpart) + else: + regparts.append(regpart) + else: + regparts.append(re.escape(part)) + regexp = ''.join(regparts) + '$' + return regexp + + def buildnextreg(self, path, clist, include_names=True): + """Recursively build our regexp given a path, and a controller + list. + + Returns the regular expression string, and two booleans that + can be ignored as they're only used internally by buildnextreg. + + """ + if path: + part = path[0] + else: + part = '' + reg = '' + + # noreqs will remember whether the remainder has either a string + # match, or a non-defaulted regexp match on a key, allblank remembers + # if the rest could possible be completely empty + (rest, noreqs, allblank) = ('', True, True) + if len(path[1:]) > 0: + self.prior = part + (rest, noreqs, allblank) = self.buildnextreg(path[1:], clist, include_names) + + if isinstance(part, dict) and part['type'] in (':', '.'): + var = part['name'] + typ = part['type'] + partreg = '' + + # First we plug in the proper part matcher + if self.reqs.has_key(var): + if include_names: + partreg = '(?P<%s>%s)' % (var, self.reqs[var]) + else: + partreg = '(?:%s)' % self.reqs[var] + if typ == '.': + partreg = '(?:\.%s)??' % partreg + elif var == 'controller': + if include_names: + partreg = '(?P<%s>%s)' % (var, '|'.join(map(re.escape, clist))) + else: + partreg = '(?:%s)' % '|'.join(map(re.escape, clist)) + elif self.prior in ['/', '#']: + if include_names: + partreg = '(?P<' + var + '>[^' + self.prior + ']+?)' + else: + partreg = '(?:[^' + self.prior + ']+?)' + else: + if not rest: + if typ == '.': + exclude_chars = '/.' + else: + exclude_chars = '/' + if include_names: + partreg = '(?P<%s>[^%s]+?)' % (var, exclude_chars) + else: + partreg = '(?:[^%s]+?)' % exclude_chars + if typ == '.': + partreg = '(?:\.%s)??' % partreg + else: + end = ''.join(self.done_chars) + rem = rest + if rem[0] == '\\' and len(rem) > 1: + rem = rem[1] + elif rem.startswith('(\\') and len(rem) > 2: + rem = rem[2] + else: + rem = end + rem = frozenset(rem) | frozenset(['/']) + if include_names: + partreg = '(?P<%s>[^%s]+?)' % (var, ''.join(rem)) + else: + partreg = '(?:[^%s]+?)' % ''.join(rem) + + if self.reqs.has_key(var): + noreqs = False + if not self.defaults.has_key(var): + allblank = False + noreqs = False + + # Now we determine if its optional, or required. This changes + # depending on what is in the rest of the match. If noreqs is + # true, then its possible the entire thing is optional as there's + # no reqs or string matches. + if noreqs: + # The rest is optional, but now we have an optional with a + # regexp. Wrap to ensure that if we match anything, we match + # our regexp first. It's still possible we could be completely + # blank as we have a default + if self.reqs.has_key(var) and self.defaults.has_key(var): + reg = '(' + partreg + rest + ')?' + + # Or we have a regexp match with no default, so now being + # completely blank form here on out isn't possible + elif self.reqs.has_key(var): + allblank = False + reg = partreg + rest + + # If the character before this is a special char, it has to be + # followed by this + elif self.defaults.has_key(var) and \ + self.prior in (',', ';', '.'): + reg = partreg + rest + + # Or we have a default with no regexp, don't touch the allblank + elif self.defaults.has_key(var): + reg = partreg + '?' + rest + + # Or we have a key with no default, and no reqs. Not possible + # to be all blank from here + else: + allblank = False + reg = partreg + rest + # In this case, we have something dangling that might need to be + # matched + else: + # If they can all be blank, and we have a default here, we know + # its safe to make everything from here optional. Since + # something else in the chain does have req's though, we have + # to make the partreg here required to continue matching + if allblank and self.defaults.has_key(var): + reg = '(' + partreg + rest + ')?' + + # Same as before, but they can't all be blank, so we have to + # require it all to ensure our matches line up right + else: + reg = partreg + rest + elif isinstance(part, dict) and part['type'] == '*': + var = part['name'] + if noreqs: + if include_names: + reg = '(?P<%s>.*)' % var + rest + else: + reg = '(?:.*)' + rest + if not self.defaults.has_key(var): + allblank = False + noreqs = False + else: + if allblank and self.defaults.has_key(var): + if include_names: + reg = '(?P<%s>.*)' % var + rest + else: + reg = '(?:.*)' + rest + elif self.defaults.has_key(var): + if include_names: + reg = '(?P<%s>.*)' % var + rest + else: + reg = '(?:.*)' + rest + else: + if include_names: + reg = '(?P<%s>.*)' % var + rest + else: + reg = '(?:.*)' + rest + allblank = False + noreqs = False + elif part and part[-1] in self.done_chars: + if allblank: + reg = re.escape(part[:-1]) + '(' + re.escape(part[-1]) + rest + reg += ')?' + else: + allblank = False + reg = re.escape(part) + rest + + # We have a normal string here, this is a req, and it prevents us from + # being all blank + else: + noreqs = False + allblank = False + reg = re.escape(part) + rest + + return (reg, noreqs, allblank) + + def match(self, url, environ=None, sub_domains=False, + sub_domains_ignore=None, domain_match=''): + """Match a url to our regexp. + + While the regexp might match, this operation isn't + guaranteed as there's other factors that can cause a match to + fail even though the regexp succeeds (Default that was relied + on wasn't given, requirement regexp doesn't pass, etc.). + + Therefore the calling function shouldn't assume this will + return a valid dict, the other possible return is False if a + match doesn't work out. + + """ + # Static routes don't match, they generate only + if self.static: + return False + + match = self.regmatch.match(url) + + if not match: + return False + + sub_domain = None + + if sub_domains and environ and 'HTTP_HOST' in environ: + host = environ['HTTP_HOST'].split(':')[0] + sub_match = re.compile('^(.+?)\.%s$' % domain_match) + subdomain = re.sub(sub_match, r'\1', host) + if subdomain not in sub_domains_ignore and host != subdomain: + sub_domain = subdomain + + if self.conditions: + if 'method' in self.conditions and environ and \ + environ['REQUEST_METHOD'] not in self.conditions['method']: + return False + + # Check sub-domains? + use_sd = self.conditions.get('sub_domain') + if use_sd and not sub_domain: + return False + elif not use_sd and 'sub_domain' in self.conditions and sub_domain: + return False + if isinstance(use_sd, list) and sub_domain not in use_sd: + return False + + matchdict = match.groupdict() + result = {} + extras = self._default_keys - frozenset(matchdict.keys()) + for key, val in matchdict.iteritems(): + if key != 'path_info' and self.encoding: + # change back into python unicode objects from the URL + # representation + try: + val = val and val.decode(self.encoding, self.decode_errors) + except UnicodeDecodeError: + return False + + if not val and key in self.defaults and self.defaults[key]: + result[key] = self.defaults[key] + else: + result[key] = val + for key in extras: + result[key] = self.defaults[key] + + # Add the sub-domain if there is one + if sub_domains: + result['sub_domain'] = sub_domain + + # If there's a function, call it with environ and expire if it + # returns False + if self.conditions and 'function' in self.conditions and \ + not self.conditions['function'](environ, result): + return False + + return result + + def generate_non_minimized(self, kargs): + """Generate a non-minimal version of the URL""" + # Iterate through the keys that are defaults, and NOT in the route + # path. If its not in kargs, or doesn't match, or is None, this + # route won't work + for k in self.maxkeys - self.minkeys: + if k not in kargs: + return False + elif self.make_unicode(kargs[k]) != \ + self.make_unicode(self.defaults[k]): + return False + + # Ensure that all the args in the route path are present and not None + for arg in self.minkeys: + if arg not in kargs or kargs[arg] is None: + if arg in self.dotkeys: + kargs[arg] = '' + else: + return False + + # Encode all the argument that the regpath can use + for k in kargs: + if k in self.maxkeys: + if k in self.dotkeys: + if kargs[k]: + kargs[k] = url_quote('.' + kargs[k], self.encoding) + else: + kargs[k] = url_quote(kargs[k], self.encoding) + + return self.regpath % kargs + + def generate_minimized(self, kargs): + """Generate a minimized version of the URL""" + routelist = self.routebackwards + urllist = [] + gaps = False + for part in routelist: + if isinstance(part, dict) and part['type'] in (':', '.'): + arg = part['name'] + + # For efficiency, check these just once + has_arg = kargs.has_key(arg) + has_default = self.defaults.has_key(arg) + + # Determine if we can leave this part off + # First check if the default exists and wasn't provided in the + # call (also no gaps) + if has_default and not has_arg and not gaps: + continue + + # Now check to see if there's a default and it matches the + # incoming call arg + if (has_default and has_arg) and self.make_unicode(kargs[arg]) == \ + self.make_unicode(self.defaults[arg]) and not gaps: + continue + + # We need to pull the value to append, if the arg is None and + # we have a default, use that + if has_arg and kargs[arg] is None and has_default and not gaps: + continue + + # Otherwise if we do have an arg, use that + elif has_arg: + val = kargs[arg] + + elif has_default and self.defaults[arg] is not None: + val = self.defaults[arg] + # Optional format parameter? + elif part['type'] == '.': + continue + # No arg at all? This won't work + else: + return False + + urllist.append(url_quote(val, self.encoding)) + if part['type'] == '.': + urllist.append('.') + + if has_arg: + del kargs[arg] + gaps = True + elif isinstance(part, dict) and part['type'] == '*': + arg = part['name'] + kar = kargs.get(arg) + if kar is not None: + urllist.append(url_quote(kar, self.encoding)) + gaps = True + elif part and part[-1] in self.done_chars: + if not gaps and part in self.done_chars: + continue + elif not gaps: + urllist.append(part[:-1]) + gaps = True + else: + gaps = True + urllist.append(part) + else: + gaps = True + urllist.append(part) + urllist.reverse() + url = ''.join(urllist) + return url + + def generate(self, _ignore_req_list=False, _append_slash=False, **kargs): + """Generate a URL from ourself given a set of keyword arguments + + Toss an exception if this + set of keywords would cause a gap in the url. + + """ + # Verify that our args pass any regexp requirements + if not _ignore_req_list: + for key in self.reqs.keys(): + val = kargs.get(key) + if val and not self.req_regs[key].match(self.make_unicode(val)): + return False + + # Verify that if we have a method arg, its in the method accept list. + # Also, method will be changed to _method for route generation + meth = kargs.get('method') + if meth: + if self.conditions and 'method' in self.conditions \ + and meth.upper() not in self.conditions['method']: + return False + kargs.pop('method') + + if self.minimization: + url = self.generate_minimized(kargs) + else: + url = self.generate_non_minimized(kargs) + + if url is False: + return url + + if not url.startswith('/') and not self.static: + url = '/' + url + extras = frozenset(kargs.keys()) - self.maxkeys + if extras: + if _append_slash and not url.endswith('/'): + url += '/' + fragments = [] + # don't assume the 'extras' set preserves order: iterate + # through the ordered kargs instead + for key in kargs: + if key not in extras: + continue + if key == 'action' or key == 'controller': + continue + val = kargs[key] + if isinstance(val, (tuple, list)): + for value in val: + fragments.append((key, _str_encode(value, self.encoding))) + else: + fragments.append((key, _str_encode(val, self.encoding))) + if fragments: + url += '?' + url += urllib.urlencode(fragments) + elif _append_slash and not url.endswith('/'): + url += '/' + return url diff --git a/src/routes/util.py b/src/routes/util.py new file mode 100644 index 0000000000..6c3f845015 --- /dev/null +++ b/src/routes/util.py @@ -0,0 +1,503 @@ +"""Utility functions for use in templates / controllers + +*PLEASE NOTE*: Many of these functions expect an initialized RequestConfig +object. This is expected to have been initialized for EACH REQUEST by the web +framework. + +""" +import os +import re +import urllib +from routes import request_config + + +class RoutesException(Exception): + """Tossed during Route exceptions""" + + +class MatchException(RoutesException): + """Tossed during URL matching exceptions""" + + +class GenerationException(RoutesException): + """Tossed during URL generation exceptions""" + + +def _screenargs(kargs, mapper, environ, force_explicit=False): + """ + Private function that takes a dict, and screens it against the current + request dict to determine what the dict should look like that is used. + This is responsible for the requests "memory" of the current. + """ + # Coerce any unicode args with the encoding + encoding = mapper.encoding + for key, val in kargs.iteritems(): + if isinstance(val, unicode): + kargs[key] = val.encode(encoding) + + if mapper.explicit and mapper.sub_domains and not force_explicit: + return _subdomain_check(kargs, mapper, environ) + elif mapper.explicit and not force_explicit: + return kargs + + controller_name = kargs.get('controller') + + if controller_name and controller_name.startswith('/'): + # If the controller name starts with '/', ignore route memory + kargs['controller'] = kargs['controller'][1:] + return kargs + elif controller_name and not kargs.has_key('action'): + # Fill in an action if we don't have one, but have a controller + kargs['action'] = 'index' + + route_args = environ.get('wsgiorg.routing_args') + if route_args: + memory_kargs = route_args[1].copy() + else: + memory_kargs = {} + + # Remove keys from memory and kargs if kargs has them as None + for key in [key for key in kargs.keys() if kargs[key] is None]: + del kargs[key] + if memory_kargs.has_key(key): + del memory_kargs[key] + + # Merge the new args on top of the memory args + memory_kargs.update(kargs) + + # Setup a sub-domain if applicable + if mapper.sub_domains: + memory_kargs = _subdomain_check(memory_kargs, mapper, environ) + return memory_kargs + + +def _subdomain_check(kargs, mapper, environ): + """Screen the kargs for a subdomain and alter it appropriately depending + on the current subdomain or lack therof.""" + if mapper.sub_domains: + subdomain = kargs.pop('sub_domain', None) + if isinstance(subdomain, unicode): + subdomain = str(subdomain) + + fullhost = environ.get('HTTP_HOST') or environ.get('SERVER_NAME') + + # In case environ defaulted to {} + if not fullhost: + return kargs + + hostmatch = fullhost.split(':') + host = hostmatch[0] + port = '' + if len(hostmatch) > 1: + port += ':' + hostmatch[1] + sub_match = re.compile('^.+?\.(%s)$' % mapper.domain_match) + domain = re.sub(sub_match, r'\1', host) + if subdomain and not host.startswith(subdomain) and \ + subdomain not in mapper.sub_domains_ignore: + kargs['_host'] = subdomain + '.' + domain + port + elif (subdomain in mapper.sub_domains_ignore or \ + subdomain is None) and domain != host: + kargs['_host'] = domain + port + return kargs + else: + return kargs + + +def _url_quote(string, encoding): + """A Unicode handling version of urllib.quote.""" + if encoding: + if isinstance(string, unicode): + s = string.encode(encoding) + elif isinstance(string, str): + # assume the encoding is already correct + s = string + else: + s = unicode(string).encode(encoding) + else: + s = str(string) + return urllib.quote(s, '/') + + +def _str_encode(string, encoding): + if encoding: + if isinstance(string, unicode): + s = string.encode(encoding) + elif isinstance(string, str): + # assume the encoding is already correct + s = string + else: + s = unicode(string).encode(encoding) + return s + + +def url_for(*args, **kargs): + """Generates a URL + + All keys given to url_for are sent to the Routes Mapper instance for + generation except for:: + + anchor specified the anchor name to be appened to the path + host overrides the default (current) host if provided + protocol overrides the default (current) protocol if provided + qualified creates the URL with the host/port information as + needed + + The URL is generated based on the rest of the keys. When generating a new + URL, values will be used from the current request's parameters (if + present). The following rules are used to determine when and how to keep + the current requests parameters: + + * If the controller is present and begins with '/', no defaults are used + * If the controller is changed, action is set to 'index' unless otherwise + specified + + For example, if the current request yielded a dict of + {'controller': 'blog', 'action': 'view', 'id': 2}, with the standard + ':controller/:action/:id' route, you'd get the following results:: + + url_for(id=4) => '/blog/view/4', + url_for(controller='/admin') => '/admin', + url_for(controller='admin') => '/admin/view/2' + url_for(action='edit') => '/blog/edit/2', + url_for(action='list', id=None) => '/blog/list' + + **Static and Named Routes** + + If there is a string present as the first argument, a lookup is done + against the named routes table to see if there's any matching routes. The + keyword defaults used with static routes will be sent in as GET query + arg's if a route matches. + + If no route by that name is found, the string is assumed to be a raw URL. + Should the raw URL begin with ``/`` then appropriate SCRIPT_NAME data will + be added if present, otherwise the string will be used as the url with + keyword args becoming GET query args. + + """ + anchor = kargs.get('anchor') + host = kargs.get('host') + protocol = kargs.get('protocol') + qualified = kargs.pop('qualified', None) + + # Remove special words from kargs, convert placeholders + for key in ['anchor', 'host', 'protocol']: + if kargs.get(key): + del kargs[key] + config = request_config() + route = None + static = False + encoding = config.mapper.encoding + url = '' + if len(args) > 0: + route = config.mapper._routenames.get(args[0]) + + # No named route found, assume the argument is a relative path + if not route: + static = True + url = args[0] + + if url.startswith('/') and hasattr(config, 'environ') \ + and config.environ.get('SCRIPT_NAME'): + url = config.environ.get('SCRIPT_NAME') + url + + if static: + if kargs: + url += '?' + query_args = [] + for key, val in kargs.iteritems(): + if isinstance(val, (list, tuple)): + for value in val: + query_args.append("%s=%s" % ( + urllib.quote(unicode(key).encode(encoding)), + urllib.quote(unicode(value).encode(encoding)))) + else: + query_args.append("%s=%s" % ( + urllib.quote(unicode(key).encode(encoding)), + urllib.quote(unicode(val).encode(encoding)))) + url += '&'.join(query_args) + environ = getattr(config, 'environ', {}) + if 'wsgiorg.routing_args' not in environ: + environ = environ.copy() + mapper_dict = getattr(config, 'mapper_dict', None) + if mapper_dict is not None: + match_dict = mapper_dict.copy() + else: + match_dict = {} + environ['wsgiorg.routing_args'] = ((), match_dict) + + if not static: + route_args = [] + if route: + if config.mapper.hardcode_names: + route_args.append(route) + newargs = route.defaults.copy() + newargs.update(kargs) + + # If this route has a filter, apply it + if route.filter: + newargs = route.filter(newargs) + + if not route.static: + # Handle sub-domains + newargs = _subdomain_check(newargs, config.mapper, environ) + else: + newargs = _screenargs(kargs, config.mapper, environ) + anchor = newargs.pop('_anchor', None) or anchor + host = newargs.pop('_host', None) or host + protocol = newargs.pop('_protocol', None) or protocol + url = config.mapper.generate(*route_args, **newargs) + if anchor is not None: + url += '#' + _url_quote(anchor, encoding) + if host or protocol or qualified: + if not host and not qualified: + # Ensure we don't use a specific port, as changing the protocol + # means that we most likely need a new port + host = config.host.split(':')[0] + elif not host: + host = config.host + if not protocol: + protocol = config.protocol + if url is not None: + url = protocol + '://' + host + url + + if not isinstance(url, str) and url is not None: + raise GenerationException("url_for can only return a string, got " + "unicode instead: %s" % url) + if url is None: + raise GenerationException( + "url_for could not generate URL. Called with args: %s %s" % \ + (args, kargs)) + return url + + +class URLGenerator(object): + """The URL Generator generates URL's + + It is automatically instantiated by the RoutesMiddleware and put + into the ``wsgiorg.routing_args`` tuple accessible as:: + + url = environ['wsgiorg.routing_args'][0][0] + + Or via the ``routes.url`` key:: + + url = environ['routes.url'] + + The url object may be instantiated outside of a web context for use + in testing, however sub_domain support and fully qualified URL's + cannot be generated without supplying a dict that must contain the + key ``HTTP_HOST``. + + """ + def __init__(self, mapper, environ): + """Instantiate the URLGenerator + + ``mapper`` + The mapper object to use when generating routes. + ``environ`` + The environment dict used in WSGI, alternately, any dict + that contains at least an ``HTTP_HOST`` value. + + """ + self.mapper = mapper + if 'SCRIPT_NAME' not in environ: + environ['SCRIPT_NAME'] = '' + self.environ = environ + + def __call__(self, *args, **kargs): + """Generates a URL + + All keys given to url_for are sent to the Routes Mapper instance for + generation except for:: + + anchor specified the anchor name to be appened to the path + host overrides the default (current) host if provided + protocol overrides the default (current) protocol if provided + qualified creates the URL with the host/port information as + needed + + """ + anchor = kargs.get('anchor') + host = kargs.get('host') + protocol = kargs.get('protocol') + qualified = kargs.pop('qualified', None) + + # Remove special words from kargs, convert placeholders + for key in ['anchor', 'host', 'protocol']: + if kargs.get(key): + del kargs[key] + + route = None + use_current = '_use_current' in kargs and kargs.pop('_use_current') + + static = False + encoding = self.mapper.encoding + url = '' + + more_args = len(args) > 0 + if more_args: + route = self.mapper._routenames.get(args[0]) + + if not route and more_args: + static = True + url = args[0] + if url.startswith('/') and self.environ.get('SCRIPT_NAME'): + url = self.environ.get('SCRIPT_NAME') + url + + if static: + if kargs: + url += '?' + query_args = [] + for key, val in kargs.iteritems(): + if isinstance(val, (list, tuple)): + for value in val: + query_args.append("%s=%s" % ( + urllib.quote(unicode(key).encode(encoding)), + urllib.quote(unicode(value).encode(encoding)))) + else: + query_args.append("%s=%s" % ( + urllib.quote(unicode(key).encode(encoding)), + urllib.quote(unicode(val).encode(encoding)))) + url += '&'.join(query_args) + if not static: + route_args = [] + if route: + if self.mapper.hardcode_names: + route_args.append(route) + newargs = route.defaults.copy() + newargs.update(kargs) + + # If this route has a filter, apply it + if route.filter: + newargs = route.filter(newargs) + if not route.static or (route.static and not route.external): + # Handle sub-domains, retain sub_domain if there is one + sub = newargs.get('sub_domain', None) + newargs = _subdomain_check(newargs, self.mapper, + self.environ) + # If the route requires a sub-domain, and we have it, restore + # it + if 'sub_domain' in route.defaults: + newargs['sub_domain'] = sub + + elif use_current: + newargs = _screenargs(kargs, self.mapper, self.environ, force_explicit=True) + elif 'sub_domain' in kargs: + newargs = _subdomain_check(kargs, self.mapper, self.environ) + else: + newargs = kargs + + anchor = anchor or newargs.pop('_anchor', None) + host = host or newargs.pop('_host', None) + protocol = protocol or newargs.pop('_protocol', None) + url = self.mapper.generate(*route_args, **newargs) + if anchor is not None: + url += '#' + _url_quote(anchor, encoding) + if host or protocol or qualified: + if 'routes.cached_hostinfo' not in self.environ: + cache_hostinfo(self.environ) + hostinfo = self.environ['routes.cached_hostinfo'] + + if not host and not qualified: + # Ensure we don't use a specific port, as changing the protocol + # means that we most likely need a new port + host = hostinfo['host'].split(':')[0] + elif not host: + host = hostinfo['host'] + if not protocol: + protocol = hostinfo['protocol'] + if url is not None: + if host[-1] != '/': + host += '/' + url = protocol + '://' + host + url.lstrip('/') + + if not isinstance(url, str) and url is not None: + raise GenerationException("Can only return a string, got " + "unicode instead: %s" % url) + if url is None: + raise GenerationException( + "Could not generate URL. Called with args: %s %s" % \ + (args, kargs)) + return url + + def current(self, *args, **kwargs): + """Generate a route that includes params used on the current + request + + The arguments for this method are identical to ``__call__`` + except that arguments set to None will remove existing route + matches of the same name from the set of arguments used to + construct a URL. + """ + return self(_use_current=True, *args, **kwargs) + + +def redirect_to(*args, **kargs): + """Issues a redirect based on the arguments. + + Redirect's *should* occur as a "302 Moved" header, however the web + framework may utilize a different method. + + All arguments are passed to url_for to retrieve the appropriate URL, then + the resulting URL it sent to the redirect function as the URL. + """ + target = url_for(*args, **kargs) + config = request_config() + return config.redirect(target) + + +def cache_hostinfo(environ): + """Processes the host information and stores a copy + + This work was previously done but wasn't stored in environ, nor is + it guaranteed to be setup in the future (Routes 2 and beyond). + + cache_hostinfo processes environ keys that may be present to + determine the proper host, protocol, and port information to use + when generating routes. + + """ + hostinfo = {} + if environ.get('HTTPS') or environ.get('wsgi.url_scheme') == 'https' \ + or environ.get('HTTP_X_FORWARDED_PROTO') == 'https': + hostinfo['protocol'] = 'https' + else: + hostinfo['protocol'] = 'http' + if environ.get('HTTP_X_FORWARDED_HOST'): + hostinfo['host'] = environ['HTTP_X_FORWARDED_HOST'] + elif environ.get('HTTP_HOST'): + hostinfo['host'] = environ['HTTP_HOST'] + else: + hostinfo['host'] = environ['SERVER_NAME'] + if environ.get('wsgi.url_scheme') == 'https': + if environ['SERVER_PORT'] != '443': + hostinfo['host'] += ':' + environ['SERVER_PORT'] + else: + if environ['SERVER_PORT'] != '80': + hostinfo['host'] += ':' + environ['SERVER_PORT'] + environ['routes.cached_hostinfo'] = hostinfo + return hostinfo + + +def controller_scan(directory=None): + """Scan a directory for python files and use them as controllers""" + if directory is None: + return [] + + def find_controllers(dirname, prefix=''): + """Locate controllers in a directory""" + controllers = [] + for fname in os.listdir(dirname): + filename = os.path.join(dirname, fname) + if os.path.isfile(filename) and \ + re.match('^[^_]{1,1}.*\.py$', fname): + controllers.append(prefix + fname[:-3]) + elif os.path.isdir(filename): + controllers.extend(find_controllers(filename, + prefix=prefix+fname+'/')) + return controllers + def longest_first(fst, lst): + """Compare the length of one string to another, shortest goes first""" + return cmp(len(lst), len(fst)) + controllers = find_controllers(directory) + controllers.sort(longest_first) + return controllers From 73753b67d882e733bf619011176d6b35580efa45 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sun, 23 May 2010 21:35:00 -0600 Subject: [PATCH 08/22] Move mobile server templates to lxml from genshi --- src/calibre/__init__.py | 14 ++ src/calibre/gui2/__init__.py | 13 -- src/calibre/gui2/widgets.py | 5 +- src/calibre/library/server/mobile.py | 249 +++++++++++++++++---------- 4 files changed, 171 insertions(+), 110 deletions(-) diff --git a/src/calibre/__init__.py b/src/calibre/__init__.py index e44f8d8ec6..ff4bab6a9a 100644 --- a/src/calibre/__init__.py +++ b/src/calibre/__init__.py @@ -451,6 +451,20 @@ def prepare_string_for_xml(raw, attribute=False): def isbytestring(obj): return isinstance(obj, (str, bytes)) +def human_readable(size): + """ Convert a size in bytes into a human readable form """ + divisor, suffix = 1, "B" + for i, candidate in enumerate(('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')): + if size < 1024**(i+1): + divisor, suffix = 1024**(i), candidate + break + size = str(float(size)/divisor) + if size.find(".") > -1: + size = size[:size.find(".")+2] + if size.endswith('.0'): + size = size[:-2] + return size + " " + suffix + if isosx: import glob, shutil fdir = os.path.expanduser('~/.fonts') diff --git a/src/calibre/gui2/__init__.py b/src/calibre/gui2/__init__.py index 0cf565c928..3ee5e67b6b 100644 --- a/src/calibre/gui2/__init__.py +++ b/src/calibre/gui2/__init__.py @@ -229,19 +229,6 @@ def info_dialog(parent, title, msg, det_msg='', show=False): return d -def human_readable(size): - """ Convert a size in bytes into a human readable form """ - divisor, suffix = 1, "B" - for i, candidate in enumerate(('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')): - if size < 1024**(i+1): - divisor, suffix = 1024**(i), candidate - break - size = str(float(size)/divisor) - if size.find(".") > -1: - size = size[:size.find(".")+2] - if size.endswith('.0'): - size = size[:-2] - return size + " " + suffix class Dispatcher(QObject): '''Convenience class to ensure that a function call always happens in the diff --git a/src/calibre/gui2/widgets.py b/src/calibre/gui2/widgets.py index 8083cd4ba0..093fa3fc5c 100644 --- a/src/calibre/gui2/widgets.py +++ b/src/calibre/gui2/widgets.py @@ -13,11 +13,10 @@ from PyQt4.Qt import QListView, QIcon, QFont, QLabel, QListWidget, \ QAbstractButton, QPainter, QLineEdit, QComboBox, \ QMenu, QStringListModel, QCompleter, QStringList -from calibre.gui2 import human_readable, NONE, \ - error_dialog, pixmap_to_data, dynamic +from calibre.gui2 import NONE, error_dialog, pixmap_to_data, dynamic from calibre.gui2.filename_pattern_ui import Ui_Form -from calibre import fit_image +from calibre import fit_image, human_readable from calibre.utils.fonts import fontconfig from calibre.ebooks import BOOK_EXTENSIONS from calibre.ebooks.metadata.meta import metadata_from_filename diff --git a/src/calibre/library/server/mobile.py b/src/calibre/library/server/mobile.py index afb31815d5..6a227a6366 100644 --- a/src/calibre/library/server/mobile.py +++ b/src/calibre/library/server/mobile.py @@ -5,34 +5,143 @@ __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal ' __docformat__ = 'restructuredtext en' -import re, copy +import re import __builtin__ import cherrypy +from lxml import html +from lxml.html.builder import HTML, HEAD, TITLE, STYLE, LINK, DIV, IMG, BODY, \ + OPTION, SELECT, INPUT, FORM, SPAN, TABLE, TR, TD, A, HR -from calibre.utils.genshi.template import MarkupTemplate from calibre.library.server.utils import strftime from calibre.ebooks.metadata import fmt_sidx +from calibre.constants import __appname__ +from calibre import human_readable -# Templates {{{ -MOBILE_BOOK = '''\ - - - - - - - ${format.lower()}  - - ${r[FM['title']]}${(' ['+r[FM['series']]+'-'+r[FM['series_index']]+']') if r[FM['series']] else ''} by ${authors} - ${r[FM['size']]/1024}k - ${r[FM['publisher']] if r[FM['publisher']] else ''} ${pubdate} ${'['+r[FM['tags']]+']' if r[FM['tags']] else ''} - - -''' +def CLASS(*args, **kwargs): # class is a reserved word in Python + kwargs['class'] = ' '.join(args) + return kwargs -MOBILE = MarkupTemplate('''\ - - - - - - - - - -
- - - ${Markup(book)} - -
- - -''') - -# }}} class MobileServer(object): 'A view optimized for browsers in mobile devices' @@ -195,26 +251,31 @@ class MobileServer(object): except ValueError: raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num) ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set() - ids = sorted(ids) FM = self.db.FIELD_MAP - items = copy.deepcopy([r for r in iter(self.db) if r[FM['id']] in ids]) + items = [r for r in iter(self.db) if r[FM['id']] in ids] if sort is not None: self.sort(items, sort, (order.lower().strip() == 'ascending')) - book, books = MarkupTemplate(MOBILE_BOOK), [] + books = [] for record in items[(start-1):(start-1)+num]: - if record[FM['formats']] is None: - record[FM['formats']] = '' - if record[FM['size']] is None: - record[FM['size']] = 0 + book = {'formats':record[FM['formats']], 'size':record[FM['size']]} + if not book['formats']: + book['formats'] = '' + if not book['size']: + book['size'] = 0 + book['size'] = human_readable(book['size']) + aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown') authors = '|'.join([i.replace('|', ',') for i in aus.split(',')]) - record[FM['series_index']] = \ - fmt_sidx(float(record[FM['series_index']])) - ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[FM['timestamp']]), \ - strftime('%Y/%m/%d %H:%M:%S', record[FM['pubdate']]) - books.append(book.generate(r=record, authors=authors, timestamp=ts, - pubdate=pd, FM=FM).render('xml').decode('utf-8')) + book['authors'] = authors + book['series_index'] = fmt_sidx(float(record[FM['series_index']])) + book['series'] = record[FM['series']] + book['tags'] = record[FM['tags']] + book['title'] = record[FM['title']] + for x in ('timestamp', 'pubdate'): + book[x] = strftime('%Y/%m/%d %H:%M:%S', record[FM[x]]) + book['id'] = record[FM['id']] + books.append(book) updated = self.db.last_modified() cherrypy.response.headers['Content-Type'] = 'text/html; charset=utf-8' @@ -223,8 +284,8 @@ class MobileServer(object): url_base = "/mobile?search=" + search+";order="+order+";sort="+sort+";num="+str(num) - return MOBILE.generate(books=books, start=start, updated=updated, - search=search, sort=sort, order=order, num=num, FM=FM, - total=len(ids), url_base=url_base).render('html') - + return html.tostring(build_index(books, num, search, sort, order, + start, len(ids), url_base), + encoding='utf-8', include_meta_content_type=True, + pretty_print=True) From c96e77fe4c76cca3d7643706fcf923863bf811ec Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sun, 23 May 2010 21:37:36 -0600 Subject: [PATCH 09/22] Remove old OPF classes --- src/calibre/ebooks/metadata/opf.py | 538 ----------------------------- 1 file changed, 538 deletions(-) delete mode 100644 src/calibre/ebooks/metadata/opf.py diff --git a/src/calibre/ebooks/metadata/opf.py b/src/calibre/ebooks/metadata/opf.py deleted file mode 100644 index 9f1d12d6d1..0000000000 --- a/src/calibre/ebooks/metadata/opf.py +++ /dev/null @@ -1,538 +0,0 @@ -__license__ = 'GPL v3' -__copyright__ = '2008, Kovid Goyal ' -'''Read/Write metadata from Open Packaging Format (.opf) files.''' - -import re, os -import uuid -from urllib import unquote, quote - -from calibre.constants import __appname__, __version__ -from calibre.ebooks.metadata import MetaInformation, string_to_authors -from calibre.ebooks.BeautifulSoup import BeautifulStoneSoup, BeautifulSoup -from calibre.ebooks.lrf import entity_to_unicode -from calibre.ebooks.metadata import Resource, ResourceCollection -from calibre.ebooks.metadata.toc import TOC - -class OPFSoup(BeautifulStoneSoup): - - def __init__(self, raw): - BeautifulStoneSoup.__init__(self, raw, - convertEntities=BeautifulSoup.HTML_ENTITIES, - selfClosingTags=['item', 'itemref', 'reference']) - -class ManifestItem(Resource): - - @staticmethod - def from_opf_manifest_item(item, basedir): - if item.has_key('href'): - href = item['href'] - if unquote(href) == href: - try: - href = quote(href) - except KeyError: - pass - res = ManifestItem(href, basedir=basedir, is_path=False) - mt = item.get('media-type', '').strip() - if mt: - res.mime_type = mt - return res - - @dynamic_property - def media_type(self): - def fget(self): - return self.mime_type - def fset(self, val): - self.mime_type = val - return property(fget=fget, fset=fset) - - - def __unicode__(self): - return u''%(self.id, self.href(), self.media_type) - - def __str__(self): - return unicode(self).encode('utf-8') - - def __repr__(self): - return unicode(self) - - - def __getitem__(self, index): - if index == 0: - return self.href() - if index == 1: - return self.media_type - raise IndexError('%d out of bounds.'%index) - - -class Manifest(ResourceCollection): - - @staticmethod - def from_opf_manifest_element(manifest, dir): - m = Manifest() - for item in manifest.findAll(re.compile('item')): - try: - m.append(ManifestItem.from_opf_manifest_item(item, dir)) - id = item.get('id', '') - if not id: - id = 'id%d'%m.next_id - m[-1].id = id - m.next_id += 1 - except ValueError: - continue - return m - - @staticmethod - def from_paths(entries): - ''' - `entries`: List of (path, mime-type) If mime-type is None it is autodetected - ''' - m = Manifest() - for path, mt in entries: - mi = ManifestItem(path, is_path=True) - if mt: - mi.mime_type = mt - mi.id = 'id%d'%m.next_id - m.next_id += 1 - m.append(mi) - return m - - def __init__(self): - ResourceCollection.__init__(self) - self.next_id = 1 - - - def item(self, id): - for i in self: - if i.id == id: - return i - - def id_for_path(self, path): - path = os.path.normpath(os.path.abspath(path)) - for i in self: - if i.path and os.path.normpath(i.path) == path: - return i.id - - def path_for_id(self, id): - for i in self: - if i.id == id: - return i.path - -class Spine(ResourceCollection): - - class Item(Resource): - - def __init__(self, idfunc, *args, **kwargs): - Resource.__init__(self, *args, **kwargs) - self.is_linear = True - self.id = idfunc(self.path) - - @staticmethod - def from_opf_spine_element(spine, manifest): - s = Spine(manifest) - for itemref in spine.findAll(re.compile('itemref')): - if itemref.has_key('idref'): - r = Spine.Item(s.manifest.id_for_path, - s.manifest.path_for_id(itemref['idref']), is_path=True) - r.is_linear = itemref.get('linear', 'yes') == 'yes' - s.append(r) - return s - - @staticmethod - def from_paths(paths, manifest): - s = Spine(manifest) - for path in paths: - try: - s.append(Spine.Item(s.manifest.id_for_path, path, is_path=True)) - except: - continue - return s - - - - def __init__(self, manifest): - ResourceCollection.__init__(self) - self.manifest = manifest - - - def linear_items(self): - for r in self: - if r.is_linear: - yield r.path - - def nonlinear_items(self): - for r in self: - if not r.is_linear: - yield r.path - - def items(self): - for i in self: - yield i.path - - -class Guide(ResourceCollection): - - class Reference(Resource): - - @staticmethod - def from_opf_resource_item(ref, basedir): - title, href, type = ref.get('title', ''), ref['href'], ref['type'] - res = Guide.Reference(href, basedir, is_path=False) - res.title = title - res.type = type - return res - - def __repr__(self): - ans = '' - - - @staticmethod - def from_opf_guide(guide_elem, base_dir=os.getcwdu()): - coll = Guide() - for ref in guide_elem.findAll('reference'): - try: - ref = Guide.Reference.from_opf_resource_item(ref, base_dir) - coll.append(ref) - except: - continue - return coll - - def set_cover(self, path): - map(self.remove, [i for i in self if 'cover' in i.type.lower()]) - for type in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'): - self.append(Guide.Reference(path, is_path=True)) - self[-1].type = type - self[-1].title = '' - - -class standard_field(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, typ=None): - return getattr(obj, 'get_'+self.name)() - - -class OPF(MetaInformation): - - MIMETYPE = 'application/oebps-package+xml' - ENTITY_PATTERN = re.compile(r'&(\S+?);') - - uid = standard_field('uid') - application_id = standard_field('application_id') - title = standard_field('title') - authors = standard_field('authors') - language = standard_field('language') - title_sort = standard_field('title_sort') - author_sort = standard_field('author_sort') - comments = standard_field('comments') - category = standard_field('category') - publisher = standard_field('publisher') - isbn = standard_field('isbn') - cover = standard_field('cover') - series = standard_field('series') - series_index = standard_field('series_index') - rating = standard_field('rating') - tags = standard_field('tags') - - def __init__(self): - raise NotImplementedError('Abstract base class') - - @dynamic_property - def package(self): - def fget(self): - return self.soup.find(re.compile('package')) - return property(fget=fget) - - @dynamic_property - def metadata(self): - def fget(self): - return self.package.find(re.compile('metadata')) - return property(fget=fget) - - - def get_title(self): - title = self.metadata.find('dc:title') - if title and title.string: - return self.ENTITY_PATTERN.sub(entity_to_unicode, title.string).strip() - return self.default_title.strip() - - def get_authors(self): - creators = self.metadata.findAll('dc:creator') - for elem in creators: - role = elem.get('role') - if not role: - role = elem.get('opf:role') - if not role: - role = 'aut' - if role == 'aut' and elem.string: - raw = self.ENTITY_PATTERN.sub(entity_to_unicode, elem.string) - return string_to_authors(raw) - return [] - - def get_author_sort(self): - creators = self.metadata.findAll('dc:creator') - for elem in creators: - role = elem.get('role') - if not role: - role = elem.get('opf:role') - if role == 'aut': - fa = elem.get('file-as') - return self.ENTITY_PATTERN.sub(entity_to_unicode, fa).strip() if fa else None - return None - - def get_title_sort(self): - title = self.package.find('dc:title') - if title: - if title.has_key('file-as'): - return title['file-as'].strip() - return None - - def get_comments(self): - comments = self.soup.find('dc:description') - if comments and comments.string: - return self.ENTITY_PATTERN.sub(entity_to_unicode, comments.string).strip() - return None - - def get_uid(self): - package = self.package - if package.has_key('unique-identifier'): - return package['unique-identifier'] - - def get_category(self): - category = self.soup.find('dc:type') - if category and category.string: - return self.ENTITY_PATTERN.sub(entity_to_unicode, category.string).strip() - return None - - def get_publisher(self): - publisher = self.soup.find('dc:publisher') - if publisher and publisher.string: - return self.ENTITY_PATTERN.sub(entity_to_unicode, publisher.string).strip() - return None - - def get_isbn(self): - for item in self.metadata.findAll('dc:identifier'): - scheme = item.get('scheme') - if not scheme: - scheme = item.get('opf:scheme') - if scheme is not None and scheme.lower() == 'isbn' and item.string: - return str(item.string).strip() - return None - - def get_language(self): - item = self.metadata.find('dc:language') - if not item: - return _('Unknown') - return ''.join(item.findAll(text=True)).strip() - - def get_application_id(self): - for item in self.metadata.findAll('dc:identifier'): - scheme = item.get('scheme', None) - if scheme is None: - scheme = item.get('opf:scheme', None) - if scheme in ['libprs500', 'calibre']: - return str(item.string).strip() - return None - - def get_cover(self): - guide = getattr(self, 'guide', []) - if not guide: - guide = [] - references = [ref for ref in guide if 'cover' in ref.type.lower()] - for candidate in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'): - matches = [r for r in references if r.type.lower() == candidate and r.path] - if matches: - return matches[0].path - - def possible_cover_prefixes(self): - isbn, ans = [], [] - for item in self.metadata.findAll('dc:identifier'): - scheme = item.get('scheme') - if not scheme: - scheme = item.get('opf:scheme') - isbn.append((scheme, item.string)) - for item in isbn: - ans.append(item[1].replace('-', '')) - return ans - - def get_series(self): - s = self.metadata.find('series') - if s is not None: - return str(s.string).strip() - return None - - def get_series_index(self): - s = self.metadata.find('series-index') - if s and s.string: - try: - return float(str(s.string).strip()) - except: - return None - return None - - def get_rating(self): - s = self.metadata.find('rating') - if s and s.string: - try: - return int(str(s.string).strip()) - except: - return None - return None - - def get_tags(self): - ans = [] - subs = self.soup.findAll('dc:subject') - for sub in subs: - val = sub.string - if val: - ans.append(val) - return [unicode(a).strip() for a in ans] - - -class OPFReader(OPF): - - def __init__(self, stream, dir=os.getcwdu()): - manage = False - if not hasattr(stream, 'read'): - manage = True - dir = os.path.dirname(stream) - stream = open(stream, 'rb') - self.default_title = stream.name if hasattr(stream, 'name') else 'Unknown' - if hasattr(stream, 'seek'): - stream.seek(0) - self.soup = OPFSoup(stream.read()) - if manage: - stream.close() - self.manifest = Manifest() - m = self.soup.find(re.compile('manifest')) - if m is not None: - self.manifest = Manifest.from_opf_manifest_element(m, dir) - self.spine = None - spine = self.soup.find(re.compile('spine')) - if spine is not None: - self.spine = Spine.from_opf_spine_element(spine, self.manifest) - - self.toc = TOC(base_path=dir) - self.toc.read_from_opf(self) - guide = self.soup.find(re.compile('guide')) - if guide is not None: - self.guide = Guide.from_opf_guide(guide, dir) - self.base_dir = dir - self.cover_data = (None, None) - - -class OPFCreator(MetaInformation): - - def __init__(self, base_path, *args, **kwargs): - ''' - Initialize. - @param base_path: An absolute path to the directory in which this OPF file - will eventually be. This is used by the L{create_manifest} method - to convert paths to files into relative paths. - ''' - MetaInformation.__init__(self, *args, **kwargs) - self.base_path = os.path.abspath(base_path) - if self.application_id is None: - self.application_id = str(uuid.uuid4()) - if not isinstance(self.toc, TOC): - self.toc = None - if not self.authors: - self.authors = [_('Unknown')] - if self.guide is None: - self.guide = Guide() - if self.cover: - self.guide.set_cover(self.cover) - - - def create_manifest(self, entries): - ''' - Create - - `entries`: List of (path, mime-type) If mime-type is None it is autodetected - ''' - entries = map(lambda x: x if os.path.isabs(x[0]) else - (os.path.abspath(os.path.join(self.base_path, x[0])), x[1]), - entries) - self.manifest = Manifest.from_paths(entries) - self.manifest.set_basedir(self.base_path) - - def create_manifest_from_files_in(self, files_and_dirs): - entries = [] - - def dodir(dir): - for spec in os.walk(dir): - root, files = spec[0], spec[-1] - for name in files: - path = os.path.join(root, name) - if os.path.isfile(path): - entries.append((path, None)) - - for i in files_and_dirs: - if os.path.isdir(i): - dodir(i) - else: - entries.append((i, None)) - - self.create_manifest(entries) - - def create_spine(self, entries): - ''' - Create the element. Must first call :method:`create_manifest`. - - `entries`: List of paths - ''' - entries = map(lambda x: x if os.path.isabs(x) else - os.path.abspath(os.path.join(self.base_path, x)), entries) - self.spine = Spine.from_paths(entries, self.manifest) - - def set_toc(self, toc): - ''' - Set the toc. You must call :method:`create_spine` before calling this - method. - - :param toc: A :class:`TOC` object - ''' - self.toc = toc - - def create_guide(self, guide_element): - self.guide = Guide.from_opf_guide(guide_element, self.base_path) - self.guide.set_basedir(self.base_path) - - def render(self, opf_stream, ncx_stream=None, ncx_manifest_entry=None): - from calibre.utils.genshi.template import MarkupTemplate - opf_template = open(P('templates/opf.xml'), 'rb').read() - template = MarkupTemplate(opf_template) - if self.manifest: - self.manifest.set_basedir(self.base_path) - if ncx_manifest_entry is not None: - if not os.path.isabs(ncx_manifest_entry): - ncx_manifest_entry = os.path.join(self.base_path, ncx_manifest_entry) - remove = [i for i in self.manifest if i.id == 'ncx'] - for item in remove: - self.manifest.remove(item) - self.manifest.append(ManifestItem(ncx_manifest_entry, self.base_path)) - self.manifest[-1].id = 'ncx' - self.manifest[-1].mime_type = 'application/x-dtbncx+xml' - if not self.guide: - self.guide = Guide() - if self.cover: - cover = self.cover - if not os.path.isabs(cover): - cover = os.path.abspath(os.path.join(self.base_path, cover)) - self.guide.set_cover(cover) - self.guide.set_basedir(self.base_path) - - opf = template.generate(__appname__=__appname__, mi=self, __version__=__version__).render('xml') - if not opf.startswith('\n'+opf - opf_stream.write(opf) - opf_stream.flush() - toc = getattr(self, 'toc', None) - if toc is not None and ncx_stream is not None: - toc.render(ncx_stream, self.application_id) - ncx_stream.flush() - From 82d3945702943d60f40a6663dd855175b2f5112a Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sun, 23 May 2010 21:55:49 -0600 Subject: [PATCH 10/22] Remove --output-format from calibredb list as it is superseded by calibredb catalog --- src/calibre/library/cli.py | 194 +++++++++---------------------------- 1 file changed, 48 insertions(+), 146 deletions(-) diff --git a/src/calibre/library/cli.py b/src/calibre/library/cli.py index 12b7944383..3f71c98238 100644 --- a/src/calibre/library/cli.py +++ b/src/calibre/library/cli.py @@ -9,99 +9,18 @@ Command line interface to the calibre database. import sys, os, cStringIO from textwrap import TextWrapper -from urllib import quote from calibre import terminal_controller, preferred_encoding, prints from calibre.utils.config import OptionParser, prefs from calibre.ebooks.metadata.meta import get_metadata from calibre.library.database2 import LibraryDatabase2 from calibre.ebooks.metadata.opf2 import OPFCreator, OPF -from calibre.utils.genshi.template import MarkupTemplate from calibre.utils.date import isoformat FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating', 'timestamp', 'size', 'tags', 'comments', 'series', 'series_index', 'formats', 'isbn', 'uuid', 'pubdate', 'cover']) -XML_TEMPLATE = '''\ - - - - - ${record['id']} - ${record['uuid']} - ${record['title']} - - - $author - - - ${record['publisher']} - ${record['rating']} - ${record['timestamp'].isoformat()} - ${record['pubdate'].isoformat()} - ${record['size']} - - - $tag - - - ${record['comments']} - ${record['series']} - ${record['isbn']} - ${record['cover'].replace(os.sep, '/')} - - - ${path.replace(os.sep, '/')} - - - - - -''' - -STANZA_TEMPLATE='''\ - - - calibre Library - - calibre - http://calibre-ebook.com - - $id - ${updated.isoformat()} - - ${subtitle} - - - - ${record['title']} - urn:calibre:${record['uuid']} - ${record['author_sort']} - ${record['timestamp'].isoformat()} - - - - -
- - - ${f.capitalize()}:${unicode(', '.join(record[f]) if f=='tags' else record[f])} - # ${str(record['series_index'])} -
-
-
- -
- ${record['comments']} -
-
-
-
-
-
-''' - def send_message(msg=''): prints('Notifying calibre of the change') from calibre.utils.ipc import RC @@ -130,81 +49,67 @@ def get_db(dbpath, options): return LibraryDatabase2(dbpath) def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, separator, - prefix, output_format, subtitle='Books in the calibre database'): + prefix, subtitle='Books in the calibre database'): if sort_by: db.sort(sort_by, ascending) if search_text: db.search(search_text) - authors_to_string = output_format in ['stanza', 'text'] - data = db.get_data_as_dict(prefix, authors_as_string=authors_to_string) + data = db.get_data_as_dict(prefix, authors_as_string=True) fields = ['id'] + fields title_fields = fields fields = [db.custom_column_label_map[x[1:]]['num'] if x[0]=='*' else x for x in fields] - if output_format == 'text': - for f in data: - fmts = [x for x in f['formats'] if x is not None] - f['formats'] = u'[%s]'%u','.join(fmts) - widths = list(map(lambda x : 0, fields)) - for record in data: - for f in record.keys(): - if hasattr(record[f], 'isoformat'): - record[f] = isoformat(record[f], as_utc=False) - else: - record[f] = unicode(record[f]) - record[f] = record[f].replace('\n', ' ') - for i in data: - for j, field in enumerate(fields): - widths[j] = max(widths[j], len(unicode(i[field]))) - screen_width = terminal_controller.COLS if line_width < 0 else line_width - if not screen_width: - screen_width = 80 - field_width = screen_width//len(fields) - base_widths = map(lambda x: min(x+1, field_width), widths) + for f in data: + fmts = [x for x in f['formats'] if x is not None] + f['formats'] = u'[%s]'%u','.join(fmts) + widths = list(map(lambda x : 0, fields)) + for record in data: + for f in record.keys(): + if hasattr(record[f], 'isoformat'): + record[f] = isoformat(record[f], as_utc=False) + else: + record[f] = unicode(record[f]) + record[f] = record[f].replace('\n', ' ') + for i in data: + for j, field in enumerate(fields): + widths[j] = max(widths[j], len(unicode(i[field]))) - while sum(base_widths) < screen_width: - adjusted = False - for i in range(len(widths)): - if base_widths[i] < widths[i]: - base_widths[i] += min(screen_width-sum(base_widths), widths[i]-base_widths[i]) - adjusted = True - break - if not adjusted: + screen_width = terminal_controller.COLS if line_width < 0 else line_width + if not screen_width: + screen_width = 80 + field_width = screen_width//len(fields) + base_widths = map(lambda x: min(x+1, field_width), widths) + + while sum(base_widths) < screen_width: + adjusted = False + for i in range(len(widths)): + if base_widths[i] < widths[i]: + base_widths[i] += min(screen_width-sum(base_widths), widths[i]-base_widths[i]) + adjusted = True break + if not adjusted: + break - widths = list(base_widths) - titles = map(lambda x, y: '%-*s%s'%(x-len(separator), y, separator), - widths, title_fields) - print terminal_controller.GREEN + ''.join(titles)+terminal_controller.NORMAL + widths = list(base_widths) + titles = map(lambda x, y: '%-*s%s'%(x-len(separator), y, separator), + widths, title_fields) + print terminal_controller.GREEN + ''.join(titles)+terminal_controller.NORMAL - wrappers = map(lambda x: TextWrapper(x-1), widths) - o = cStringIO.StringIO() + wrappers = map(lambda x: TextWrapper(x-1), widths) + o = cStringIO.StringIO() - for record in data: - text = [wrappers[i].wrap(unicode(record[field]).encode('utf-8')) for i, field in enumerate(fields)] - lines = max(map(len, text)) - for l in range(lines): - for i, field in enumerate(text): - ft = text[i][l] if l < len(text[i]) else '' - filler = '%*s'%(widths[i]-len(ft)-1, '') - o.write(ft) - o.write(filler+separator) - print >>o - return o.getvalue() - elif output_format == 'xml': - template = MarkupTemplate(XML_TEMPLATE) - return template.generate(data=data, os=os).render('xml') - elif output_format == 'stanza': - data = [i for i in data if i.has_key('fmt_epub')] - for x in data: - if isinstance(x['fmt_epub'], unicode): - x['fmt_epub'] = x['fmt_epub'].encode('utf-8') - if isinstance(x['cover'], unicode): - x['cover'] = x['cover'].encode('utf-8') - template = MarkupTemplate(STANZA_TEMPLATE) - return template.generate(id="urn:calibre:main", data=data, subtitle=subtitle, - sep=os.sep, quote=quote, updated=db.last_modified()).render('xml') + for record in data: + text = [wrappers[i].wrap(unicode(record[field]).encode('utf-8')) for i, field in enumerate(fields)] + lines = max(map(len, text)) + for l in range(lines): + for i, field in enumerate(text): + ft = text[i][l] if l < len(text[i]) else '' + filler = '%*s'%(widths[i]-len(ft)-1, '') + o.write(ft) + o.write(filler+separator) + print >>o + return o.getvalue() def list_option_parser(db=None): fields = set(FIELDS) @@ -236,9 +141,6 @@ List the books available in the calibre database. help=_('The maximum width of a single line in the output. Defaults to detecting screen size.')) parser.add_option('--separator', default=' ', help=_('The string used to separate fields. Default is a space.')) parser.add_option('--prefix', default=None, help=_('The prefix for all file paths. Default is the absolute path to the library folder.')) - of = ['text', 'xml', 'stanza'] - parser.add_option('--output-format', choices=of, default='text', - help=_('The format in which to output the data. Available choices: %s. Defaults is text.')%of) return parser @@ -272,7 +174,7 @@ def command_list(args, dbpath): return 1 print do_list(db, fields, afields, opts.sort_by, opts.ascending, opts.search, opts.line_width, opts.separator, - opts.prefix, opts.output_format) + opts.prefix) return 0 From 0a16be06e8d55f0e4d0ae3dce22946cc987d828f Mon Sep 17 00:00:00 2001 From: Charles Haley <> Date: Mon, 24 May 2010 15:00:47 +0100 Subject: [PATCH 11/22] 1) Move all tag category code to DB2. 2) Fix bug where opening preferences resets the folder device menus even when connected --- src/calibre/ebooks/metadata/book/__init__.py | 10 +- src/calibre/gui2/__init__.py | 2 - src/calibre/gui2/dialogs/tag_categories.py | 8 +- src/calibre/gui2/tag_view.py | 88 +++-------- src/calibre/gui2/ui.py | 15 +- src/calibre/library/custom_columns.py | 12 +- src/calibre/library/database2.py | 151 +++++++++++++++---- src/calibre/utils/config.py | 4 +- 8 files changed, 179 insertions(+), 111 deletions(-) diff --git a/src/calibre/ebooks/metadata/book/__init__.py b/src/calibre/ebooks/metadata/book/__init__.py index 9a44a36489..2e47ee71e3 100644 --- a/src/calibre/ebooks/metadata/book/__init__.py +++ b/src/calibre/ebooks/metadata/book/__init__.py @@ -88,17 +88,25 @@ CALIBRE_METADATA_FIELDS = frozenset([ ] ) +CALIBRE_RESERVED_LABELS = frozenset([ + # reserved for saved searches + 'search', + ] +) + RESERVED_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union( PUBLICATION_METADATA_FIELDS).union( BOOK_STRUCTURE_FIELDS).union( USER_METADATA_FIELDS).union( DEVICE_METADATA_FIELDS).union( - CALIBRE_METADATA_FIELDS) + CALIBRE_METADATA_FIELDS).union( + CALIBRE_RESERVED_LABELS) assert len(RESERVED_METADATA_FIELDS) == sum(map(len, ( SOCIAL_METADATA_FIELDS, PUBLICATION_METADATA_FIELDS, BOOK_STRUCTURE_FIELDS, USER_METADATA_FIELDS, DEVICE_METADATA_FIELDS, CALIBRE_METADATA_FIELDS, + CALIBRE_RESERVED_LABELS ))) SERIALIZABLE_FIELDS = SOCIAL_METADATA_FIELDS.union( diff --git a/src/calibre/gui2/__init__.py b/src/calibre/gui2/__init__.py index 0cf565c928..478273dd0e 100644 --- a/src/calibre/gui2/__init__.py +++ b/src/calibre/gui2/__init__.py @@ -97,8 +97,6 @@ def _config(): help=_('Overwrite author and title with new metadata')) c.add_opt('enforce_cpu_limit', default=True, help=_('Limit max simultaneous jobs to number of CPUs')) - c.add_opt('user_categories', default={}, - help=_('User-created tag browser categories')) return ConfigProxy(c) diff --git a/src/calibre/gui2/dialogs/tag_categories.py b/src/calibre/gui2/dialogs/tag_categories.py index 0e15c06828..f49ae4ce83 100644 --- a/src/calibre/gui2/dialogs/tag_categories.py +++ b/src/calibre/gui2/dialogs/tag_categories.py @@ -7,7 +7,7 @@ from PyQt4.QtCore import SIGNAL, Qt from PyQt4.QtGui import QDialog, QIcon, QListWidgetItem from calibre.gui2.dialogs.tag_categories_ui import Ui_TagCategories -from calibre.gui2 import config +from calibre.utils.config import prefs from calibre.gui2.dialogs.confirm_delete import confirm from calibre.constants import islinux @@ -22,7 +22,7 @@ class Item: return 'name=%s, label=%s, index=%s, exists='%(self.name, self.label, self.index, self.exists) class TagCategories(QDialog, Ui_TagCategories): - category_labels_orig = ['', 'author', 'series', 'publisher', 'tag'] + category_labels_orig = ['', 'authors', 'series', 'publishers', 'tags'] def __init__(self, window, db, index=None): QDialog.__init__(self, window) @@ -64,7 +64,7 @@ class TagCategories(QDialog, Ui_TagCategories): self.all_items.append(t) self.all_items_dict[label+':'+n] = t - self.categories = dict.copy(config['user_categories']) + self.categories = dict.copy(prefs['user_categories']) if self.categories is None: self.categories = {} for cat in self.categories: @@ -181,7 +181,7 @@ class TagCategories(QDialog, Ui_TagCategories): def accept(self): self.save_category() - config['user_categories'] = self.categories + prefs['user_categories'] = self.categories QDialog.accept(self) def save_category(self): diff --git a/src/calibre/gui2/tag_view.py b/src/calibre/gui2/tag_view.py index 0fb72e071b..ba93b818c2 100644 --- a/src/calibre/gui2/tag_view.py +++ b/src/calibre/gui2/tag_view.py @@ -201,29 +201,34 @@ class TagsModel(QAbstractItemModel): # {{{ _('Ratings'), _('News'), _('Tags')] row_map_orig = ['authors', 'series', 'formats', 'publishers', 'ratings', 'news', 'tags'] - tags_categories_start= 7 search_keys=['search', _('Searches')] + def __init__(self, db, parent=None): QAbstractItemModel.__init__(self, parent) - self.cat_icon_map_orig = list(map(QIcon, [I('user_profile.svg'), - I('series.svg'), I('book.svg'), I('publisher.png'), I('star.png'), - I('news.svg'), I('tags.svg')])) + + # must do this here because 'QPixmap: Must construct a QApplication + # before a QPaintDevice' + self.category_icon_map = {'authors': QIcon(I('user_profile.svg')), + 'series': QIcon(I('series.svg')), + 'formats':QIcon(I('book.svg')), + 'publishers': QIcon(I('publisher.png')), + 'ratings':QIcon(I('star.png')), + 'news':QIcon(I('news.svg')), + 'tags':QIcon(I('tags.svg')), + '*custom':QIcon(I('column.svg')), + '*user':QIcon(I('drawer.svg')), + 'search':QIcon(I('search.svg'))} self.icon_state_map = [None, QIcon(I('plus.svg')), QIcon(I('minus.svg'))] - self.custcol_icon = QIcon(I('column.svg')) - self.search_icon = QIcon(I('search.svg')) - self.usercat_icon = QIcon(I('drawer.svg')) - self.label_to_icon_map = dict(map(None, self.row_map_orig, self.cat_icon_map_orig)) - self.label_to_icon_map['*custom'] = self.custcol_icon self.db = db self.search_restriction = '' - self.user_categories = {} self.ignore_next_search = 0 data = self.get_node_tree(config['sort_by_popularity']) self.root_item = TagTreeItem() for i, r in enumerate(self.row_map): c = TagTreeItem(parent=self.root_item, - data=self.categories[i], category_icon=self.cat_icon_map[i]) + data=self.categories[i], + category_icon=self.category_icon_map[r]) for tag in data[r]: TagTreeItem(parent=c, data=tag, icon_map=self.icon_state_map) @@ -233,66 +238,19 @@ class TagsModel(QAbstractItemModel): # {{{ def get_node_tree(self, sort): self.row_map = [] self.categories = [] - # strip the icons after the 'standard' categories. We will put them back later - if self.tags_categories_start < len(self.row_map_orig): - self.cat_icon_map = self.cat_icon_map_orig[:self.tags_categories_start-len(self.row_map_orig)] - else: - self.cat_icon_map = self.cat_icon_map_orig[:] - self.user_categories = dict.copy(config['user_categories']) - column_map = config['column_map'] - - for i in range(0, self.tags_categories_start): # First the standard categories - self.row_map.append(self.row_map_orig[i]) - self.categories.append(self.categories_orig[i]) if len(self.search_restriction): - data = self.db.get_categories(sort_on_count=sort, icon_map=self.label_to_icon_map, + data = self.db.get_categories(sort_on_count=sort, icon_map=self.category_icon_map, ids=self.db.search(self.search_restriction, return_matches=True)) else: - data = self.db.get_categories(sort_on_count=sort, icon_map=self.label_to_icon_map) + data = self.db.get_categories(sort_on_count=sort, icon_map=self.category_icon_map) - for c in data: # now the custom columns - if c not in self.row_map_orig and c in column_map: - self.row_map.append(c) - self.categories.append(self.db.custom_column_label_map[c]['name']) - self.cat_icon_map.append(self.custcol_icon) + tb_categories = self.db.get_tag_browser_categories() + for category in tb_categories.iterkeys(): + if category in data: # They should always be there, but ... + self.row_map.append(category) + self.categories.append(tb_categories[category]['name']) - # Now the rest of the normal tag categories - for i in range(self.tags_categories_start, len(self.row_map_orig)): - self.row_map.append(self.row_map_orig[i]) - self.categories.append(self.categories_orig[i]) - self.cat_icon_map.append(self.cat_icon_map_orig[i]) - - # Clean up the author's tags, getting rid of the '|' characters - if data['authors'] is not None: - for t in data['authors']: - t.name = t.name.replace('|', ',') - - # Now do the user-defined categories. There is a time/space tradeoff here. - # By converting the tags into a map, we can do the verification in the category - # loop much faster, at the cost of duplicating the categories lists. - taglist = {} - for c in self.row_map: - taglist[c] = dict(map(lambda t:(t.name, t), data[c])) - - for c in self.user_categories: - l = [] - for (name,label,ign) in self.user_categories[c]: - if label in taglist and name in taglist[label]: # use same node as the complete category - l.append(taglist[label][name]) - # else: do nothing, to eliminate nodes that have zero counts - if config['sort_by_popularity']: - data[c+'*'] = sorted(l, cmp=(lambda x, y: cmp(x.count, y.count))) - else: - data[c+'*'] = sorted(l, cmp=(lambda x, y: cmp(x.name.lower(), y.name.lower()))) - self.row_map.append(c+'*') - self.categories.append(c) - self.cat_icon_map.append(self.usercat_icon) - - data['search'] = self.get_search_nodes(self.search_icon) # Add the search category - self.row_map.append(self.search_keys[0]) - self.categories.append(self.search_keys[1]) - self.cat_icon_map.append(self.search_icon) return data def get_search_nodes(self, icon): diff --git a/src/calibre/gui2/ui.py b/src/calibre/gui2/ui.py index 36848e33cf..91b2353469 100644 --- a/src/calibre/gui2/ui.py +++ b/src/calibre/gui2/ui.py @@ -183,7 +183,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI): _('Error communicating with device'), ' ') self.device_error_dialog.setModal(Qt.NonModal) self.tb_wrapper = textwrap.TextWrapper(width=40) - self.device_connected = False + self.device_connected = None self.viewers = collections.deque() self.content_server = None self.system_tray_icon = SystemTrayIcon(QIcon(I('library.png')), self) @@ -675,6 +675,15 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI): self._sync_menu.fetch_annotations.connect(self.fetch_annotations) self._sync_menu.connect_to_folder.connect(self.connect_to_folder) self._sync_menu.disconnect_from_folder.connect(self.disconnect_from_folder) + if self.device_connected: + self._sync_menu.connect_to_folder_action.setEnabled(False) + if self.device_connected == 'folder': + self._sync_menu.disconnect_from_folder_action.setEnabled(True) + else: + self._sync_menu.disconnect_from_folder_action.setEnabled(False) + else: + self._sync_menu.connect_to_folder_action.setEnabled(True) + self._sync_menu.disconnect_from_folder_action.setEnabled(False) def add_spare_server(self, *args): self.spare_servers.append(Server(limit=int(config['worker_limit']/2.0))) @@ -944,7 +953,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI): self.status_bar.showMessage(_('Device: ')+\ self.device_manager.device.__class__.get_gui_name()+\ _(' detected.'), 3000) - self.device_connected = True + self.device_connected = 'device' if not is_folder_device else 'folder' self._sync_menu.enable_device_actions(True, self.device_manager.device.card_prefix(), self.device_manager.device) @@ -955,7 +964,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI): self._sync_menu.connect_to_folder_action.setEnabled(True) self._sync_menu.disconnect_from_folder_action.setEnabled(False) self.save_device_view_settings() - self.device_connected = False + self.device_connected = None self._sync_menu.enable_device_actions(False) self.location_view.model().update_devices() self.vanity.setText(self.vanity_template%\ diff --git a/src/calibre/library/custom_columns.py b/src/calibre/library/custom_columns.py index b6ada01b8c..36ea49763e 100644 --- a/src/calibre/library/custom_columns.py +++ b/src/calibre/library/custom_columns.py @@ -141,11 +141,15 @@ class CustomColumns(object): } # Create Tag Browser categories for custom columns - for i, v in self.custom_column_num_map.items(): + for k in sorted(self.custom_column_label_map.keys()): + v = self.custom_column_label_map[k] if v['normalized']: - tn = 'custom_column_{0}'.format(i) - self.tag_browser_categories[v['label']] = {'table':tn, 'column':'value', 'type':v['datatype'], 'name':v['name']} - #self.tag_browser_datatype[v['label']] = v['datatype'] + tn = 'custom_column_{0}'.format(v['num']) + self.tag_browser_categories[v['label']] = { + 'table':tn, 'column':'value', + 'type':v['datatype'], 'is_multiple':v['is_multiple'], + 'kind':'custom', 'name':v['name'] + } def get_custom(self, idx, label=None, num=None, index_is_id=False): if label is not None: diff --git a/src/calibre/library/database2.py b/src/calibre/library/database2.py index 12398de918..6ca73d9656 100644 --- a/src/calibre/library/database2.py +++ b/src/calibre/library/database2.py @@ -34,6 +34,8 @@ from calibre.customize.ui import run_plugins_on_import from calibre.utils.filenames import ascii_filename from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp from calibre.utils.ordered_dict import OrderedDict +from calibre.utils.config import prefs +from calibre.utils.search_query_parser import saved_searches from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format if iswindows: @@ -125,26 +127,32 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): self.dbpath = self.dbpath.encode(filesystem_encoding) # Order as has been customary in the tags pane. - self.tag_browser_categories = OrderedDict([ - ('authors', {'table':'authors', 'column':'name', 'type':'text', 'name':_('Authors')}), - ('series', {'table':'series', 'column':'name', 'type':None, 'name':_('Series')}), - ('formats', {'table':None, 'column':None, 'type':None, 'name':_('Formats')}), - ('publishers',{'table':'publishers', 'column':'name', 'type':'text', 'name':_('Publishers')}), - ('ratings', {'table':'ratings', 'column':'rating', 'type':'rating', 'name':_('Ratings')}), - ('news', {'table':'news', 'column':'name', 'type':None, 'name':_('News')}), - ('tags', {'table':'tags', 'column':'name', 'type':'textmult', 'name':_('Tags')}), - ]) - -# self.tag_browser_datatype = { -# 'tag' : 'textmult', -# 'series' : None, -# 'publisher' : 'text', -# 'author' : 'text', -# 'news' : None, -# 'rating' : 'rating', -# } - - self.tag_browser_formatters = {'rating': lambda x:u'\u2605'*int(round(x/2.))} + tag_browser_categories_items = [ + ('authors', {'table':'authors', 'column':'name', + 'type':'text', 'is_multiple':False, + 'kind':'standard', 'name':_('Authors')}), + ('series', {'table':'series', 'column':'name', + 'type':None, 'is_multiple':False, + 'kind':'standard', 'name':_('Series')}), + ('formats', {'table':None, 'column':None, + 'type':None, 'is_multiple':False, + 'kind':'standard', 'name':_('Formats')}), + ('publishers',{'table':'publishers', 'column':'name', + 'type':'text', 'is_multiple':False, + 'kind':'standard', 'name':_('Publishers')}), + ('ratings', {'table':'ratings', 'column':'rating', + 'type':'rating', 'is_multiple':False, + 'kind':'standard', 'name':_('Ratings')}), + ('news', {'table':'news', 'column':'name', + 'type':None, 'is_multiple':False, + 'kind':'standard', 'name':_('News')}), + ('tags', {'table':'tags', 'column':'name', + 'type':'text', 'is_multiple':True, + 'kind':'standard', 'name':_('Tags')}), + ] + self.tag_browser_categories = OrderedDict() + for k,v in tag_browser_categories_items: + self.tag_browser_categories[k] = v self.connect() self.is_case_sensitive = not iswindows and not isosx and \ @@ -653,14 +661,19 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): def get_recipe(self, id): return self.conn.get('SELECT script FROM feeds WHERE id=?', (id,), all=False) + def get_tag_browser_categories(self): + return self.tag_browser_categories + def get_categories(self, sort_on_count=False, ids=None, icon_map=None): self.books_list_filter.change([] if not ids else ids) categories = {} + + #### First, build the standard and custom-column categories #### for category in self.tag_browser_categories.keys(): tn = self.tag_browser_categories[category]['table'] - categories[category] = [] #reserve the position in the ordered list - if tn is None: + categories[category] = [] #reserve the position in the ordered list + if tn is None: # Nothing to do for the moment continue cn = self.tag_browser_categories[category]['column'] if ids is None: @@ -672,22 +685,41 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): else: query += ' ORDER BY {0} ASC'.format(cn) data = self.conn.get(query) - # category = cn[0] + + # icon_map is not None if get_categories is to store an icon and + # possibly a tooltip in the tag structure. icon, tooltip = None, '' if icon_map: - if category in icon_map: - icon = icon_map[category] - else: + if self.tag_browser_categories[category]['kind'] == 'standard': + if category in icon_map: + icon = icon_map[category] + elif self.tag_browser_categories[category]['kind'] == 'custom': icon = icon_map['*custom'] + icon_map[category] = icon_map['*custom'] tooltip = self.custom_column_label_map[category]['name'] + datatype = self.tag_browser_categories[category]['type'] - formatter = self.tag_browser_formatters.get(datatype, lambda x: x) + if datatype == 'rating': + item_zero_func = (lambda x: len(formatter(r[1])) > 0) + formatter = (lambda x:u'\u2605'*int(round(x/2.))) + elif category == 'authors': + item_zero_func = (lambda x: x[2] > 0) + # Clean up the authors strings to human-readable form + formatter = (lambda x: x.replace('|', ',')) + else: + item_zero_func = (lambda x: x[2] > 0) + formatter = (lambda x:x) + categories[category] = [Tag(formatter(r[1]), count=r[2], id=r[0], icon=icon, tooltip = tooltip) - for r in data - if r[2] > 0 and - (datatype != 'rating' or len(formatter(r[1])) > 0)] + for r in data if item_zero_func(r)] + + # We delayed computing the standard formats category because it does not + # use a view, but is computed dynamically categories['formats'] = [] + icon = None + if icon_map and 'formats' in icon_map: + icon = icon_map['formats'] for fmt in self.conn.get('SELECT DISTINCT format FROM data'): fmt = fmt[0] if ids is not None: @@ -702,13 +734,70 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): WHERE format="%s"'''%fmt, all=False) if count > 0: - categories['formats'].append(Tag(fmt, count=count)) + categories['formats'].append(Tag(fmt, count=count, icon=icon)) if sort_on_count: categories['formats'].sort(cmp=lambda x,y:cmp(x.count, y.count), reverse=True) else: categories['formats'].sort(cmp=lambda x,y:cmp(x.name, y.name)) + + #### Now do the user-defined categories. #### + user_categories = dict.copy(prefs['user_categories']) + + # remove all user categories from tag_browser_categories. They can + # easily come and go. We will add all the existing ones in below. + for k in self.tag_browser_categories.keys(): + if self.tag_browser_categories[k]['kind'] in ['user', 'search']: + del self.tag_browser_categories[k] + + # We want to use same node in the user category as in the source + # category. To do that, we need to find the original Tag node. There is + # a time/space tradeoff here. By converting the tags into a map, we can + # do the verification in the category loop much faster, at the cost of + # temporarily duplicating the categories lists. + taglist = {} + for c in categories.keys(): + taglist[c] = dict(map(lambda t:(t.name, t), categories[c])) + + for user_cat in sorted(user_categories.keys()): + items = [] + for (name,label,ign) in user_categories[user_cat]: + if label in taglist and name in taglist[label]: + items.append(taglist[label][name]) + # else: do nothing, to not include nodes w zero counts + if len(items): + cat_name = user_cat+'*' # add the * to avoid name collision + self.tag_browser_categories[cat_name] = { + 'table':None, 'column':None, + 'type':None, 'is_multiple':False, + 'kind':'user', 'name':user_cat} + # Not a problem if we accumulate entries in the icon map + if icon_map is not None: + icon_map[cat_name] = icon_map['*user'] + if sort_on_count: + categories[cat_name] = \ + sorted(items, cmp=(lambda x, y: cmp(y.count, x.count))) + else: + categories[cat_name] = \ + sorted(items, cmp=(lambda x, y: cmp(x.name.lower(), y.name.lower()))) + + #### Finally, the saved searches category #### + items = [] + icon = None + if icon_map and 'search' in icon_map: + icon = icon_map['search'] + for srch in saved_searches.names(): + items.append(Tag(srch, tooltip=saved_searches.lookup(srch), icon=icon)) + if len(items): + self.tag_browser_categories['search'] = { + 'table':None, 'column':None, + 'type':None, 'is_multiple':False, + 'kind':'search', 'name':_('Searches')} + if icon_map is not None: + icon_map['search'] = icon_map['search'] + categories['search'] = items + return categories def tags_older_than(self, tag, delta): diff --git a/src/calibre/utils/config.py b/src/calibre/utils/config.py index 559721c193..69eee4d1ed 100644 --- a/src/calibre/utils/config.py +++ b/src/calibre/utils/config.py @@ -694,8 +694,10 @@ def _prefs(): help=_('Add new formats to existing book records')) c.add_opt('installation_uuid', default=None, help='Installation UUID') - # this is here instead of the gui preferences because calibredb can execute searches + # these are here instead of the gui preferences because calibredb and + # calibre server can execute searches c.add_opt('saved_searches', default={}, help=_('List of named saved searches')) + c.add_opt('user_categories', default={}, help=_('User-created tag browser categories')) c.add_opt('migrated', default=False, help='For Internal use. Don\'t modify.') return c From 8d9ddba6cd3052185c4159040d0e4ad60c182583 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Mon, 24 May 2010 08:56:43 -0600 Subject: [PATCH 12/22] No longer install a UDEV file as the PRS500 is not supported --- src/calibre/linux.py | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/src/calibre/linux.py b/src/calibre/linux.py index ed806d58ac..26bbe0837b 100644 --- a/src/calibre/linux.py +++ b/src/calibre/linux.py @@ -132,7 +132,6 @@ class PostInstall: self.mime_resources = [] if islinux: self.setup_completion() - self.setup_udev_rules() self.install_man_pages() if islinux: self.setup_desktop_integration() @@ -286,40 +285,6 @@ class PostInstall: raise self.task_failed('Setting up completion failed') - def setup_udev_rules(self): - self.info('Trying to setup udev rules...') - try: - group_file = os.path.join(self.opts.staging_etc, 'group') - if not os.path.exists(group_file): - group_file = '/etc/group' - groups = open(group_file, 'rb').read() - group = 'plugdev' if 'plugdev' in groups else 'usb' - old_udev = '/etc/udev/rules.d/95-calibre.rules' - if not os.path.exists(old_udev): - old_udev = os.path.join(self.opts.staging_etc, 'udev/rules.d/95-calibre.rules') - if os.path.exists(old_udev): - try: - os.remove(old_udev) - except: - self.warn('Old udev rules found, please delete manually:', - old_udev) - if self.opts.staging_root == '/usr': - base = '/lib' - else: - base = os.path.join(self.opts.staging_root, 'lib') - base = os.path.join(base, 'udev', 'rules.d') - if not os.path.exists(base): - os.makedirs(base) - with open(os.path.join(base, '95-calibre.rules'), 'wb') as udev: - self.manifest.append(udev.name) - udev.write('''# Sony Reader PRS-500\n''' - '''SUBSYSTEMS=="usb", SYSFS{idProduct}=="029b", SYSFS{idVendor}=="054c", MODE="660", GROUP="%s"\n'''%(group,) - ) - except: - if self.opts.fatal_errors: - raise - self.task_failed('Setting up udev rules failed') - def install_man_pages(self): try: from calibre.utils.help2man import create_man_page From e0a727002b392c934f01ea799593deef503d467e Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Mon, 24 May 2010 09:07:58 -0600 Subject: [PATCH 13/22] Add The Book to the welcome wizard --- src/calibre/gui2/wizard/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/calibre/gui2/wizard/__init__.py b/src/calibre/gui2/wizard/__init__.py index 0a395e9eb8..b57867331f 100644 --- a/src/calibre/gui2/wizard/__init__.py +++ b/src/calibre/gui2/wizard/__init__.py @@ -106,6 +106,13 @@ class Booq(Device): output_format = 'EPUB' id = 'booq' +class TheBook(Device): + name = 'The Book' + manufacturer = 'Augen' + output_profile = 'prs505' + output_format = 'EPUB' + id = 'thebook' + class Avant(Booq): name = 'Booq Avant' From d91cd4419e5bc70a47965c6eae196893ec71b81e Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Mon, 24 May 2010 09:10:28 -0600 Subject: [PATCH 14/22] iPad image --- resources/images/devices/ipad.png | Bin 0 -> 17785 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 resources/images/devices/ipad.png diff --git a/resources/images/devices/ipad.png b/resources/images/devices/ipad.png new file mode 100644 index 0000000000000000000000000000000000000000..119d53dc9afbb1dbff06c4cbe0b2db043a9ed29a GIT binary patch literal 17785 zcmV)AK*Ya^P)pPPiaF#P*7-ZbZ>KLZ*U+IBfRsybQWXdwQbLP>6pAqfylh#{fb6;Z(vMMVS~$e@S=j*ftg6;Uhf59&ghTmgWD0l;*T zI709Y^p6lP1rIRMx#05C~cW=H_Aw*bJ-5DT&Z2n+x)QHX^p z00esgV8|mQcmRZ%02D^@S3L16t`O%c004NIvOKvYIYoh62rY33S640`D9%Y2D-rV&neh&#Q1i z007~1e$oCcFS8neI|hJl{-P!B1ZZ9hpmq0)X0i`JwE&>$+E?>%_LC6RbVIkUx0b+_+BaR3cnT7Zv!AJxW zizFb)h!jyGOOZ85F;a?DAXP{m@;!0_IfqH8(HlgRxt7s3}k3K`kFu>>-2Q$QMFfPW!La{h336o>X zu_CMttHv6zR;&ZNiS=X8v3CR#fknUxHUxJ0uoBa_M6WNWeqIg~6QE69c9o#eyhGvpiOA@W-aonk<7r1(?fC{oI5N*U!4 zfg=2N-7=cNnjjOr{yriy6mMFgG#l znCF=fnQv8CDz++o6_Lscl}eQ+l^ZHARH>?_s@|##Rr6KLRFA1%Q+=*RRWnoLsR`7U zt5vFIcfW3@?wFpwUVxrVZ>QdQz32KIeJ}k~{cZZE^+ya? z2D1z#2HOnI7(B%_ac?{wFUQ;QQA1tBKtrWrm0_3Rgps+?Jfqb{jYbcQX~taRB;#$y zZN{S}1|}gUOHJxc?wV3fxuz+mJ4`!F$IZ;mqRrNsHJd##*D~ju=bP7?-?v~|cv>vB zsJ6IeNwVZxrdjT`yl#bBIa#GxRa#xMMy;K#CDyyGyQdMSxlWT#tDe?p!?5wT$+oGt z8L;Kp2HUQ-ZMJ=3XJQv;x5ci*?vuTfeY$;({XGW_huIFR9a(?@3)XSs8O^N5RyOM=TTmp(3=8^+zpz2r)C z^>JO{deZfso3oq3?Wo(Y?l$ge?uXo;%ru`Vo>?<<(8I_>;8Eq#KMS9gFl*neeosSB zfoHYnBQIkwkyowPu(zdms`p{<7e4kra-ZWq<2*OsGTvEV%s0Td$hXT+!*8Bnh2KMe zBmZRodjHV?r+_5^X9J0WL4jKW`}lf%A-|44I@@LTvf1rHjG(ze6+w@Jt%Bvjts!X0 z?2xS?_ve_-kiKB_KiJlZ$9G`c^=E@oNG)mWWaNo-3TIW8)$Hg0Ub-~8?KhvJ>$ z3*&nim@mj(aCxE5!t{lw7O5^0EIO7zOo&c6l<+|iDySBWCGrz@C5{St!X3hAA}`T4 z(TLbXTq+(;@<=L8dXnssyft|w#WSTW<++3>sgS%(4NTpeI-VAqb|7ssJvzNHgOZVu zaYCvgO_R1~>SyL=cFU|~g|hy|Zi}}s9+d~lYqOB71z9Z$wnC=pR9Yz4DhIM>Wmjgu z&56o6maCpC&F##y%G;1PobR9i?GnNg;gYtchD%p19a!eQtZF&3JaKv33gZ<8D~47E ztUS1iwkmDaPpj=$m#%)jCVEY4fnLGNg2A-`YwHVD3gv};>)hAvT~AmqS>Lr``i7kw zJ{5_It`yrBmlc25DBO7E8;5VoznR>Ww5hAaxn$2~(q`%A-YuS64wkBy=9dm`4cXeX z4c}I@?e+FW+b@^RDBHV(wnMq2zdX3SWv9u`%{xC-q*U}&`cyXV(%rRT*Z6MH?i+i& z_B8C(+grT%{XWUQ+f@NoP1R=AW&26{v-dx)iK^-Nmiuj8txj!m?Z*Ss1N{dh4z}01 z)YTo*JycSU)+_5r4#yw9{+;i4Ee$peRgIj+;v;ZGdF1K$3E%e~4LaI(jC-u%2h$&R z9cLXcYC@Xwnns&bn)_Q~Te?roKGD|d-g^8;+aC{{G(1^(O7m37Y1-+6)01cN&y1aw zoqc{T`P^XJqPBbIW6s}d4{z_f5Om?vMgNQEJG?v2T=KYd^0M3I6IZxbny)%vZR&LD zJpPl@Psh8QyPB@KTx+@RdcC!KX7}kEo;S|j^u2lU7XQ}Oo;f|;z4Ll+_r>@1-xl3| zawq-H%e&ckC+@AhPrP6BKT#_XdT7&;F71j}Joy zkC~6lh7E@6o;W@^IpRNZ{ptLtL(gQ-CY~4mqW;US7Zxvm_|@yz&e53Bp_lTPlfP|z zrTyx_>lv@x#=^!PzR7qqF<$gm`|ZJZ+;<)Cqu&ot2z=0000WV@Og>004R=004l4008;_004mL004C`008P>0026e000+nl3&F} z0026qNklJ=6^^hhhb1sL97b3kWGoZ55kg^Pfw}>SX6%8U=^E%7x@*cBGArkNnQy-L zo^$rz{$uYm-S=MR%MP$0!msMadwGX*&sk^hwb%OAx4tbez4QtYxOnjb*H&&kbmYj9 z9~le=PmG4ccZU!HBGfXaPhJ(fC~aA5rDTjT>xjZ2^LF4(A*P4<01T zGMsbdd2WkY@mf`tUnq*=!;4FcA0G{e#l835hgH@31pc;){I{R})MpNS`qQ80+_^WI z&1T&ENBX7_>0S7|;iPkOBwisvx=4D@Nqqy-^U{4!=Q$Nc5Q2C_wE2EfP4AW7@A#ne zJgC-27KDIk=e6br(D`iqys?U^>D_McJt25N5rKt;@w@K2>&|z5{nx+m#}^jH|2Bm1 zgTS_Y@rzGURn-rkKmXRh{JEd|ANcHNKh2-ykBI3ROp`(C-_1kgh!PRhnBIF-;{}Rn z`n!EjMNtzkQq`cU7^CU4_50h0h+vGFJ9hg3!I=JQy>qEYe!4k3uA> zvT}{@{hsgPLm&FCf3tjW`Frik_3LU>|MQ>!-~K6|{p_dHRMS0-D7P@O?08>uECDo) z(0VG1AZYL8cU-7;jTooAF{T-*^+QxJ>GkG0>HgL|4JCWbtlefA0zuj_9_rb<$G@{I zmmsC>k!i4A+(3?9(Uf)@AmgzpP7HpUdL>m zY@n(t08XAb!SDXg@8W&C8ajjq9`=*8FOPi2ee8)Fy^MVI-?&8~Hcio6jp}zEv+co&S@tf}s?G3n@kFRG# zZMd$1ZWTcbQvUYkw~?H(jOUb^_i9RJ+CvcrT5csV{Wr z$nP3hZu)ooyK6w{4T#$@eAhi%WdAOvJxEBYMxABu%rV>KGv$o}0T)MK5JUoAh33DY zDUng|zB#r})77W&F}f8=zCJFz(Z36-f#8#?YHmtMuSZ`WM{tN|F9DwhgK8iIO*b^o zjOg~_jBKt|NjJC~UA6hi{7kLgq=+^Py!ReUxb<{tjcLO5gC!HUnvB+aNcS#6t$b-yzua`kIcCui|*I{60a zd0f2N-Nm^(Q|jltF3j$}2(s&{{qVyN@xVJCV0~?avuDrpp7*?)&wcKXSX*1isBIDq zX^dWHc**?f;(G~HXrGh%f{~PsN|Q;FBA}Be?%ktM^#q5^4fXIsscMi(<}@kgk-dkjD;vPv#9eOs_l$S8F31i zVwfv}D4oCKv4XVe>&vg6Bg-vC6&HTz=q<`^s2PKTs;Z*lyD{*d%c2U~rKh-2fm_Aq z%dBk$vW3G<#w%&0G+@~rK0|0PSfmEB5-toZi9 zg+nLMEv!Zj)@G2VzKTW^PKl6!nA)SXi%XpNX>lT5h&GFA&^9)&7cvRjBpm@k zlFdP*0&&CQ1aQy@ZINX zzaBa_Dx~KLDU?$sh&B;WZCslVs9Z%5(}u0dC)a+tzR~eHZ9+&>XSsFgDjJs@Iz^Bm z?R7K&qV`Ke0SIwcdmj*d|E4vOO=uQaTO(&X(#u^i#%M5uCoL8|Br`@NaRk*17X6D4 z+Tb$H&Fh@Jq{Ue~rfu#51)JLD_p*Eoh-BQvvoy8pXM%WWW%^|bUslFy9)E`h@ae(s(3aQB1v;ARDQk7gFh4DVdK#*bfIjXqlYh9|}S zlhW~x58ch*{h_bnz~T@!O65Te{Ncx*;_P|l_UB$g#WOf~oYN;y@^ioN z|DaiD$~*Oq_rzMO_W_kKXYksKhxDMIx!&r>7}H6HT0e{rnDKx!L&GaCy@nYW5M^U) zlfws=xN_(qYTL!F!E)+2ImOy0R9v~Xg6cBQKldt!4;|#ryN|Q6xkW}GU{ivY5~So> zVxk08%85G{IDO|a2&RdUR1E=m{IVqFf!X@m;MoK-r(%jd-z{g z{uW2aOAIGxdC&M)5w*CWm>Tb?kxogYJ$Uf4BaE!!>pI%8g#CN@!Usk{<_g$?c*k(* zAj{wX1N`fce3;>A$iwe^glC_7ma3fb#1DKweDq1w<|xTXooFW}S3>Xs@dZW-p84YQ zJoM1L+2)`7<0p^=|$u+dOxv+ENolYrc zGiJpM9|B&$OK1!qXJJSSwx>dw&Q5dV*DBUmj`8sQmLGn+;MBnZg+GM%Rjg{^@o7PI z67UiKjNjq?o{ze&a9f5~(O!``CMfaW))<2K{KKF82fY1(2Pq53AO7K|`M~?XnQ}Hw zHIJBpHL<(I*c>r<5%7+3d!2=0;M>0KA>RAmy9w0>v*|_*>*@$bNd{~maV7**1L7+r zl!Q=4Y~Kg0_-Ncz3#jpy7zPSnB#l^yW5W4&AK=`Lx7b{paD8LK_V$Fz&C;CuDJCz-F%XJ4 zSZv0%3tOyjmod`nE$osO{8(6-OiHd?U*pa@*D0ol2xWx{fufjk>B?#o&Bu|MbuE$b8}S!L%@MFFFz!Ex<4Q z%By_-smnlx_llwn7e<`FG+}MclaDfjYAnKp7;}dd>WLb*rv?A`mwu1CkFB#c{Vjr6 zEEYAw8<$qGd4`wJf@^7}5$F`pcl!8lz91r4*(V+!kF9t$NL^J42{I*Mvy6*ZRu~O4 z2AO3#n=u|QaP`JIYa1oQg+XV8wvca)`DlQmQZ7u7aP-h(1h&**GG(&4MySdTY^)6e zDmC5?7#r9uEuVaHg|B(&ek4?g2v5Ish0?>~(m2jkl@4Z~!qPO8vq8?ql?|?5&w2FW zqik<&a(#2c#Y?LU7Z=DgWxkjf6Ss)+){ASu$X$QJq*)+4GuQdzb?A~uHGyL>=T4wh zI;0rX2}Tct3d~Yl#d93K$aOKc=ADpRvs~t#usXI zuv6ryp^97$xfN!vVw77lqbwa*V7sWmWG$yMorkaMx#^q`u=#*oVLF+xFj}OjgyBMg zwYGsjLdpX~G(jkeBjCM@H=-#=>Q0o`m>5ppj42>(dA>biRnRV?rwP{xoH=&|mkPy5 zq_E}r2r+fBgP0Af;%gkHF6$X#qo}xk?ppI*MlCka$nrr-e6)3SYI;a!iX6dYoV&V4 zNXSw)7?KS~F&pckrat(lhM*>1iw88zaUt;XxfKWjV+~n0NO6QVFma31C;>z8@kZil z5+&!{4hl)kTz3Eia?{LJRpZK?78q+ybU^`(fU)CVj89!YtND$_U=p@cWBRoVu_6SM zW3s%rct#RwI!;0=cW@tjOkl{f4DB$K8vw5^gr||KNJDt_-Y2%OMI?}?5}q`$0yP+) z+-;|@pJsq}F2NDIv65RN)@?1Wd7h!h#Fe|Q3Z-xiZL#mJAwpf-6vfzt%Y>F~E41Y} zh=5ek_BbZFr_kYPcFSs+z0$>lB2w9w zH&aegFVYDBLv#moSR``j>j=m00{*gB0lajMEC|LLjJ0uUA&7|=6jJR}6L8erfEpIS zw1o+kUe#WxT}McT<=W?V7QY75X9v4Dgq9f`6buTu7Jly3R@>+-!M$+^N4!Tte88mY zS`&>Z80Z0mEu|&l{Ha$W(mvXfX-zY1XMomBvi-555XI&h1jTtDr(_*jq#;&$s4B;3 zJYZTBWVt2JGK88NqiOIZ+`gu4h@cWEe4ZBTI2(dfjDgIA6vsl#B-LeFND1m?vuEAD4T;M+NAI{9MK1u>7>pXK${}i-dTShohQ&N^|6NBpeb>Ew;R~PVn?LvjXU@FN z%jd2V@&%00D~N|qf!v6a8Ti_R1tVMGQ$;6J$7UGw>iQstX?yW=P@E4OKeE6#yz?Dc z^;9Vru-5R#NY9xaYa2~$)9rtnf>63WxV$dJ~gYw5OzRdL->lih2 zR&t;*^Yz|0vqvO!x>4v+JnViFrq&5*bS-Lotup~(G_bt?vBNz5h0pNgKlJTXSHH~c z()0Ypk9;40@4xw<*ccp$BqUK>n({_l!_BvbeJH4YI+^6KF`no z&XhA-%ZLeRO?uVT5@XUaiWxuf_9%6Yc8CR|@x<-fY}TdZwO$Xo?1t-Zz&yaSu_##=Mw^d>9Yg3Tdb>(CR|e7<;!TErCDO;rl6qSVY2}R1=m+s@#PkR zu(G|$;X^}mJBSghwJpU?G?gSHbm2mvyfmYDb4oQ8Ce=2Rtpcxs^)IgxmI5YIOh_TB zchN8@6gD0T=XrW{ixV3a&rAYmUz_r_MUSq-D{I@_Z9_s-J6zG4BoKVeT!Wc&HQn*+ zZ@&Vis%T~!peK%6iv&TfVIj8|t4x3|Z&Yk7E^_aE_cHUI*_F#^BK~MX7|}K%!9$*T z9)IUUY`uDh0|%FZz+g4w>mInDXV1KfkmCBp^s0F=Dv227Jjae7z^|9w|IMe^K0jmO z)FP|fYe=XVj&rs}&_s}G5muT82^m-}pUQb_8Kz#jhU1~*1D@Poqr5}lx;E{ZGM89F z%2d_w+Sb{1>6TmY;~Q>z3LDj!h8|HfazU6)3$${aI&qZM8(Z9U%%F=ywk9Q)uD``4 zjYdGb%I;E&Q1s1Gf`@(gy!3bk5YHS-NsWygR5P0soaOuhohF|(LlBzr^FmVG` zho>2jO)Q|J&GfF@Lp21dEbw`MmUGi9lrK8M%O3GC8sw}{P-oe7{>?i;sPsuR$X^ZxhWn$k4{pLcvTFk zu~%E}xit%%IeV4W8*7|CdI<3j;~bY(wjdv47sru~XA(X#XZWBoDk&H4V>mXr%EiQK zFk)?Eit!n0^TcFp$)?&#-xs{HS(JRKxWawM4l$d|$c8!Rw^rFI49knlsI|RL@&+JG zj5zNDr*OR6RIF{Ru{g*H0V;2}B7u;i6651^12qD~750p`Z%`CyiINn-|^D!zg zMzO;oGv|2i%4X~uF@|g~AkX6TGzo)8S|m$5m=3wcRTUT4r-%<=Ei1DrgMno{8a6Ye z(HH3W&f50Y4)H!?tt?sH_CP_NS;h-FVzTC|n;1T|62;Ro4DvwX0%UQ(Rot)b1tDHl-2~GM&?0&v0{4^Z+Z*t^LeighLV^a7iq4nt+D>VZPA9~L{+;jW@sKr`KQ53l9 z5TAJZWu9A8vZnTG+E&Oq{qJHQN^kMh4x6F3xT?@kgXotXYsPN3F{|gQd}qPywU)P$ zPP3FI12!EZp%+eOdDak?=5-MoQmpTQWZ^bQt2Q&_gB)MB@=>aWAj;C>h|RSX&be*w zxZ@PFa)vR=n`h549*-Dqr-tCUrrzBQUxQ!`lUVvCyIMRVH2OMo7h1a-e7`2!W5gxS zWC(5dNo~x~`8($MJB5!T2u$sNtWDJK#I0dY3ZR7xe7m^r*Z9^bd1k0E4KJn%D~+;p z{RWEXGoSg_96fprSCt$+a)>Ld1vWnwN4RN2$a;O~^+I(bk2*7Mwa3_{<|?~TQE0N^ zW|o9*Q;W1ZZwX8H9lyOp$Hx0T#b82+#?N!x)Ej)4ceINrMjY`CgZ0)wgWPgpWGPK( z7grT3=NVaHeQk|T{K4<@*rSiKxU|IQKleFYIpz43Y%pS5|$7n7oDSLG$f^d zY~6fU<7Prr>uuWV(H&@@-)#-(!MQb4pk6TRaUPn;s4HUB$hstCvoWT3WVyviCywv* zlZ6gE(e^+EV4vCRz`}T-RN9VI6~TEWmx@!TPV&a#BYfp6U*Y`4i%hn+c<>z$FkD_@ zBZI-nMkYaRG|@S=ygEee_fU1A1r_P(+0-Lmd}mSC{wVL_;xb=3>MouQY|yq{^ceyj z!>51y5RhQVhB?LvF1T3m?f`iW0NN<@-7aa>STH#fB=#7KVpOmQ)!irUOs;bUkKxoX=obF;+SG~?Ql_ywq?22@@**Etf<{kF|<6H zP=}~t4j$-axYD#+)^TG!8naF$A76J}hu~dwOCcmmcD!NvAdj3N7ZPac8wrzow0q~8 zQGZ*87cp29L9w7jw_zf`QSt%LyndFKE?r^yo`?9xBd3_=i+t>Jg|wN z)RKHc)1a)K#Z-E%+$f*|}GB98arJc3XzGR>*P#;!~Dd)9T_~%#WX7DT{ruqr)Sn-rs2j7LMJ zKHzk~a5O|shH7L5)CO*RcfFX_0uT+LRr4N0r!$-#>p~0RbULs*4baUftdlVxFuSOh z%#T*Op=N+U>guRFJyISz91Yw4R$uo3*9&tBU45JSTK2$dc3)hTg@I6zU;>k*Mv^NS z4Ku2uWacVFl}QEJaLD3#jI~*V!DvrCqtg}MQ1VPNevRiOm_VhEuXGy#x|wawmrb-) z=}1O}-9}n!HjeRkYmRsV<-E4s;9 zXhcDGB&)S=uMOKJPKS2%*01@dpDH4ksFG;uKFS+g8$y`;H+$b*b7$bu# zQahrel4*=o^KKfqF(*~4jT|kFYLcm?tLAVT?WigV2`g&c47~`bHhc)uY0IzcCVge_ zAiDvA(i42^=@m30o;1y7wV_9u_9|h$l4(zDBGDml$MCrc46FfPyMY*%s|Jjz|4xQY zf;*y-D3od)u`5rd`MjXL8&pRqH62>;XhI`(uK6l{e?Y_|VrwCh4qV)KqrOgr=JJ7d zfVcHq-@$4^PnA#S(3@!5#i5OQ4Gmjt=gQtyX1zUryb;EENQ58|jS?Zz%Tx#+BT+;m zmiYo|EJj0Gh||rGXe<@Qr02BjUK%6rjAH6_Aq3xRP!?(BaYbAUJ#>{nI(E>Gh=eq4 zlQgZyJ(@;6y6ew`R(vodxOgFpxy@cAyk-{2R|5Bp^Al) z=D)}_R|&DL#i#KaQfz*@2_r*c&zQ4*Rcqoc7F=snj{ zX_TE!Fg0M-`1D;e-2e>p0XDO#E)gS~<;5X`OxZnH3H_;cyP2!SlWT>S0u3f9Ty~T>fV$w`WK3;tq@uV6p{!#>5S!|x&N*D=ab5nrws1n- z&y{5ZzVgBg{Kkhr!uHk{Y7En&5py(k{K5A3=K(^O!|=r z(hBw%6J@JZUF`f9hf-mJWn>2!<02z12(ENdPe6s?XoR(v$!x~C^Vj+9kN!Si{Nhu* z{{!F5_V$#E=g;%nnOC{8a)Sd0mKf)j@qt4uFAf-uM||VA{6*gX{wFwmco}01MNxA3 z@_Dk{kYyPly!FaCGLs=jdFd-JFd8p#$MNGFJ${_UrA1VrDyyjUS5GIh@(diG8!sd(+N+1=>?v8@;RP+{zWcc zxI#IbVKd7w|I)u`iZSEikb{ehOea$)l#5r_;xzW2PkrVo{?-3|n!D~g&Ej~G>2%8T z&ppR5ACP4^;90qP6;)+@%Q2lz8D@%R8OzH{EG#dRTSGaUF)J#HqCia44p~{b&erAz z2bPu?EiOc1w*V$7t{Stx4^ch;Rrq*Ha;A7@=9DO zNIqa;2vtzdUb?~T&8uWK<{u?BVUj+sl z7-fbm3#@OKj7DP)9)QW#4CfriB)=Y_3X?HPrh)AuaD7`C=AMJ&oQaKWfqKna$QAD^ zR4rxYm{lcHuULr!inT~l#A4h{v(XS*%;vU6{I7iEbA0jHv&^Oy7cZ@{wz=I2#Wcp= zLC8hOhK9l^-V1|#gv}fwD7lRlD-};>q40)0H&iYsvrxLgWLB{>j2B*oCVxr8gDc+YIR#AGptFdETCW0bOVWSPZkB*d0p*qk|>SB9oyWHoUh z9fkK4rN^jJRTV~sffdTib@GWQ98nLH-TeD*3+-(g{_?d6o0Ico3Rab+K_0bY0Bc}Y z3hM<-L@29}vJlU95i?#DaNd!n*u5wNd2YbKX6bOHk3d`|WD;ejwx$8AurO4tDr=iF zGGkaCDXZQynO0<(#FAuLk*jieq-|U;Lg^e5Vsw)kgL97Q%;ADE%3`Z#lr4<9J4Q1? z5)a)jK|)Dx6%ztRgpn0W2UQ9?HL(lk_qN)LDsxks_U<5=urM$LUq#)ml!*8$5L6fr zl!adBezf1}0#wQWk+SC`Ab11vhn+7)&NCS>^Del4mw{NCVc!kr}+OwlP6X zH1g`U!{9l3Aj5gbtgILgOw1lbAa=wA%F0D;GZcrX44zrxC|#@;5#or>QfNy6@2ckc zp@qsTvof-47IMV|2O+iH04fGi*oRW^t!di3x4AmR`&fZWO`y3^yeO4-RL*09Ft)KE zK!Bn392{F_m1lcek=dko;{}n5St%?IVU!t+1e6M2dM2e$z*=l|DP6_V$Y6~yElNfM z%RmfjVS5ts?@U2)l+Gp1`RJ;gcUV(*)RM;*{ z2G%f}&2S+$iD{HRZYnq<17jP=)ggFBS?tO7RbYEopaPjSjBJD{s-j?=XJj@u9k?R8 zqJ!h8!X$)P)|G_i_zJaxHG&H!x&-GDab!k0yqM!F7kgJVe#U!G2pem_1sIqN(RfpHQ3gq-B!qw% z68};uH6U4rHA-O>5sy*GiYkF&3F5U%rbBWoEDaUsVQX3?x}7wbPhC(C&XMQwW;h?1 z6@kJBicpXZbB4L4@Gc^$HpAH1N*Yp=j+NNK4jwIGGWC&f?_(cpQ51ErW@M2yxcg0X zf4&;5=ES}CaPNKhP&rRP=x*#1<;N}7Xj>>w&_qN9<0o7QjR3PT(eT+IXD}S%s)}+p zi?bmKE)+#c@D6LTn&d)NIo1+nEDLebC5b-d`2d>@TF#I6RAm`SNgBD0RasG%jMv8=1W>IsJHzm2(uc8O3x)IV;G0yVsL7hU=3ltgPH>u-9>X*taa%c$9JI z;E1XUU8Y;BRAe+w*$$pvYx*YOD}+FmxP^83FlyT5lx0aZ$fB}cZTPan*M^r^C32M` z_*ytfTGp~iOzJY*WYo@is;cVX^iWkkKJR>LxTv-IwWC8BdR%w!165Trn<`Ua3l%kFDe~r0g+oQA~kc1Xk>h5;bwEfihcSze0d5?IHhS0#ZLA#>) zsbC^WUNDIbN{17JPpW!8s(Gr~Ncjj3eAy`O;RBRa)B>)`M$aH9PF+0+E%PcwPJOqBiX;(Oi#YgNLNB;j zFN-oq(KYxGsUzM)u#HfUrW$KYKJAtV4I(x^sFhSZdpRkHiO02xLqpn=MC*+lRXQ+p zOJr4bK;puSNeqKnk;?NNwb^Y%H9M;6d;fSB(uT2}M4`LEV(^IyL66VUpmM%n81U&0 z&d0dl`M4;Ddf}G1OF>XNn)RtSMml;59rbOJmSt!t+omBK)Wwmy_>B5y%cOQ|(RL?? zhV`31Wl(Go1|jwv`p(T6WI1MEhOg-z`vi6xKj-4&NT)Zy$6F7r%y>wg#t`ZT<@PwW z&vlYqjq^AcaIQ1OL%VH+@9mscYq*<6=d3Hm67|vA3RJCK)zDlT=|aDKP`N~Q0ud|a36N3+dE((<{D#dZJTq<0=)d7SUmBx(DR#E|sUwIPRw9`RILDn@5h zD`QME(p}nau68xG+fjABon=CxM}n$HVPy=~x~8?VHW*D`RvP7XYk?1~R$L?HB6S;W zzdRc}u8Mhtb3QGu5wkMR_gvI14Ig9C{e2+dBMrHhKdj}YYFR{=w&Br`Ld<>(O*`E> zn{dr>QGC76UyXzKon7WNTUXMS6uzmG*8BLG2oWc_tNNIANLjnJdf&YsZ?m&m%{)yY zq~x!vs<eHq>df(wn|>(&%OqsXU(wC7T%txmd>>rTC2TEedH z5cg}sXTAQf zj!HIANwqJ`s~&*2CEV|wqpZp_b*0%x4bj*bpb~|Yd+gs%cU#J*2X?e0)hhl6#4@~(s!+QrkQ#oqaDW-C;>-_Dw( zNfvH{JLz1YEFI3bvaN0J5}jQmwY@Ro`y6#?xASSZ@J=y}QGKsNC3LoFlR8`_LcJ>e ze%QLp#F!*s+v`c~YYNm0r=O?V-DE%DK!~|a>Sc`)Qm7fSb6XDGEQ%LwU$^DvKV?-h zD`rtRqUp3~wcu+RTNTsUWKmn5Vz-%q{!WqJ`G#{6I@2AxEI+$aFRXvkG&+a5#;v``6=L=MSD+9O$r#IjQLzE;!O=-R?KEalX142 z4R(Y#MG&eK!L*$ibehWMH(yj$)OGkMb0@xI1M}LBx{LgYw%dg^y+=3UxXcS$-_r2C zoe*dSiU}@}sBQs}$yb5L$Bn+LRuD?&+u3(^YJ~2^XuBY{n>5mPa&%nMe1}GBNJ(v{ zEjg&(KeguVsgcfRi@T9X>-JP>NRe$*!ycT+jUfZnsX3_D0r59gjzd2pbz4{n?}1QLdI=3(O8a z{G5NElK|Q^vWw!NH2Fp#V7s-CGsz!>(&5qaIZ2mSjSqwN=Q5KFMZbw&LPjN%~ zp;@a3qrFXbn*>aD597jIEML>L=XD8O<>u5q*r8w0@0s0wwC^XvtMQ|rNNNHH{cc(B zE*G^!Tlm(nu*X#uLHw>puZn`ID*Fvzz+gB;ZI<+7pr@r?M<>Z8OsA7}WTfQhz|pwb z;cR0giR`l@5|JiGk9EtKPt=PtBJlciwwHcfS4Y_^KjM$M>EW|M+Rv*SArlJzpQ=`_S1N z+w*nrd7R|~OKAZMXn_CX+rNe3c*ytt)PK+F#unD*^ z3jYGabh5=0-~PS);zv z?LYk2xmTa%G;eX;9pt6(O?>%FU*_-s@OLmRW>GI~*KpZ$U}40s{mhT?_P3oRn8UG? zv7CUJ@K1mF5BS?Z^Uqiq4f;1MxtslzQ3GjrOYJt~?t|N1FRV=#28gQ&qeVU}$0;wD z9HQc(%(DE<@AC4o$2hZmH`WNjRXdWS2IVYg*u_(P>6&3#H?fIDHZ9@*_}U-it_QEptHao^@SsyElkjudRadWbhU%J+QlcQTq> z$7^B;)Yy;@_~0qLaMWyJFa0`YwZX>mzr?Uwpqc?^KFN`Ak|*B(1ep|sR4TlB^(rsD z^b%I=jvQpKbtEd(W_I6LjS!i%C-1zA<>iBMD`%rv9NE$WpIJ0~&1lN+UD)Erk+0`P zXSp(hQ-{VJKX`;-M^Rr`+6Lnq>#^Zuk0X0YaH#`ayCu7XPL|H_ z#G#6x{LTUY>|D;(GVqxV9CNtWZ7n1FXIC zFYw_ve}!=1z5LN@+nhY~GS2}%1;P4XxlFaO5w|x@dk2LMUSB#Pj3!JU6vl@yp~o&E zGKF0w|! zXO_Wqjfel>7r6MyyE!r~Fiu!HYdM@Nk3Ih+*?5f2M$HB-wQ_3O{u`TToO$sjKK1Lr z!KrMKgVQO8gXb-$Y;3Gkz3@j|D5iKv(;nI1pw>~Aj_|L3iFbYLcdPYN4b2F7;kbahlKc=!60 zNAEoyL&T!s<;z!@Y^`!(?FORjt&5-)1PB3wD;X~?@W)Rol?o5scOPbZLUryUQk1N% zZNO{`)M7e79G#M#$%N^wOiz ztJjwJ)Y_1H-u`}kaBQt!)2B{z&gv+7-q9q z2{_!wBz0~lt@se|q3TPabbzd~3|x5gRW>hdvz)ZzUA08%;;x)NRMeYpb%~ud>sK^T z&N5uF!0T^rpz3j@Wwtd27aJp_=8#H=yP*Zo;Ufn*eB=<9FI~I^?wCq{*4X_SUf}%s zvs}4yaTm3VP8J)W3O;UNu2F$01Tf-uWQ?KV#?bKXA&M!xqz))rLj;l7*H(?XnvRl$ zlhdYN!4rHD`BNeUu>@n0RP>fz%W+-YG@U=6jk`94xED&m;dkx(i03k$PWOd~s%msK zs=BWjesgn^|NrOKZe$*h@7;uYO%vJoda&_2k1dLF zUp}5cBeTNVjD_(chYlS=L@A0H>+9=mZEgICyQQzj)!gbcS(agq#g5108^?|w|8ucu zK_JgFj^A+yOH0d?Wr;C{d@y8td-G4+@P8WX%b`PuICkt9dgI30haY|HQPkLfrwiGB z3+}VojL~Ssv17+Lb@~*?jvY;zwEJ^0Yv22I?`1d`d{i%-KmXgOPM&<~+dla1Z592t zZb0ZIB6~l_ZM?qs=yT5T%F8cu`SJyd>6G(ty~S&1&iq+*j=l508 ze*UAM_>Z~gzI%V^#EBEXEoa|4hqZ=RUpezvu3f$UkKTCm&3m4C=4mdRKhJbB-Id?I zTkPHYySsw#L7%&6P4bpioPK;3I(6K=KN0pclI*D$x%<%2X4@ln`Fu-+%JTt-4jts- zcRtLck33qGRrz!8`-b=Z)ObAJmbczI-+ZBS{=d6=_1aHdyLPpxsw(7po|<%B>mYpU zh^Ty0!Q-Z}^t^6wU4Zc3M^T4eqJU7VXoo$-nd?o=!# Date: Mon, 24 May 2010 16:24:21 +0100 Subject: [PATCH 15/22] Add tooltips to top-level categories in tags_view --- src/calibre/gui2/tag_view.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/calibre/gui2/tag_view.py b/src/calibre/gui2/tag_view.py index ba93b818c2..5ff4fc23ba 100644 --- a/src/calibre/gui2/tag_view.py +++ b/src/calibre/gui2/tag_view.py @@ -126,7 +126,7 @@ class TagTreeItem(object): # {{{ TAG = 1 ROOT = 2 - def __init__(self, data=None, category_icon=None, icon_map=None, parent=None): + def __init__(self, data=None, category_icon=None, icon_map=None, parent=None, tooltip=None): self.parent = parent self.children = [] if self.parent is not None: @@ -144,6 +144,7 @@ class TagTreeItem(object): # {{{ elif self.type == self.TAG: icon_map[0] = data.icon self.tag, self.icon_state_map = data, list(map(QVariant, icon_map)) + self.tooltip = tooltip def __str__(self): if self.type == self.ROOT: @@ -175,6 +176,8 @@ class TagTreeItem(object): # {{{ return self.icon if role == Qt.FontRole: return self.bold_font + if role == Qt.ToolTipRole and self.tooltip is not None: + return QVariant(self.tooltip) return NONE def tag_data(self, role): @@ -228,7 +231,8 @@ class TagsModel(QAbstractItemModel): # {{{ for i, r in enumerate(self.row_map): c = TagTreeItem(parent=self.root_item, data=self.categories[i], - category_icon=self.category_icon_map[r]) + category_icon=self.category_icon_map[r], + tooltip=_('The lookup/search name is "{0}"').format(r)) for tag in data[r]: TagTreeItem(parent=c, data=tag, icon_map=self.icon_state_map) From b65821c073870f2ae8dd9ad1769e03bc56f350fc Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Mon, 24 May 2010 09:30:21 -0600 Subject: [PATCH 16/22] American Thinker by Walt Anthony --- resources/images/news/american_thinker.png | Bin 0 -> 705 bytes resources/recipes/american_thinker.recipe | 43 +++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 resources/images/news/american_thinker.png create mode 100644 resources/recipes/american_thinker.recipe diff --git a/resources/images/news/american_thinker.png b/resources/images/news/american_thinker.png new file mode 100644 index 0000000000000000000000000000000000000000..bbfdf08152d1882d4899ddb7b56705b4faba1c1c GIT binary patch literal 705 zcmV;y0zUnTP)I(=|HbNZ8B0hEl5Ez;@w&jWaWi>5uN!IE{4KEw_%t&`YjuWPHcOf$AoNDDp&f=q zN~27M-u`j+A3BL`y8y^d()`>z=07e_tu<(b3H2s~u|b~5 zcO1lV$gS(=&?;u2G=#K0v~bW`gOS9ELZ&H+sQL=&S(X7kaQT6+YS6KkDiIx&nYEb#!W_zE{*UCChS2wx5!|nk|6oicJ$z!XCAgFWd?Bzc> zSg$VgWby`!b8qmR3=JiytZ3>@L!d2`Qn', re.DOTALL), lambda m: '')] def find_title(self, section): d = {'scope':'Scope', 'thetake':'The Take', 'features':'Features', From c92c3312ed91978a190b8c191e3462badeb3bc8e Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Mon, 24 May 2010 11:43:59 -0600 Subject: [PATCH 19/22] Replace use of genshi with lxml for templates in the news download subsystem --- src/calibre/web/feeds/templates.py | 332 ++++++++++++++--------------- 1 file changed, 159 insertions(+), 173 deletions(-) diff --git a/src/calibre/web/feeds/templates.py b/src/calibre/web/feeds/templates.py index 4b2156b6a1..4de7c42daa 100644 --- a/src/calibre/web/feeds/templates.py +++ b/src/calibre/web/feeds/templates.py @@ -2,207 +2,193 @@ __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal ' -from calibre.utils.genshi.template import MarkupTemplate -from calibre import preferred_encoding, strftime +from lxml import html, etree +from lxml.html.builder import HTML, HEAD, TITLE, STYLE, DIV, BODY, \ + STRONG, BR, H1, SPAN, A, HR, UL, LI, H2, IMG, P as PT -class Template(MarkupTemplate): +from calibre import preferred_encoding, strftime, isbytestring + +def CLASS(*args, **kwargs): # class is a reserved word in Python + kwargs['class'] = ' '.join(args) + return kwargs + +class Template(object): + + IS_HTML = True def generate(self, *args, **kwargs): if not kwargs.has_key('style'): kwargs['style'] = '' for key in kwargs.keys(): - if isinstance(kwargs[key], basestring) and not isinstance(kwargs[key], unicode): - kwargs[key] = unicode(kwargs[key], 'utf-8', 'replace') - for arg in args: - if isinstance(arg, basestring) and not isinstance(arg, unicode): - arg = unicode(arg, 'utf-8', 'replace') + if isbytestring(kwargs[key]): + kwargs[key] = kwargs[key].decode('utf-8', 'replace') + if kwargs[key] is None: + kwargs[key] = u'' + args = list(args) + for i in range(len(args)): + if isbytestring(args[i]): + args[i] = args[i].decode('utf-8', 'replace') + if args[i] is None: + args[i] = u'' - return MarkupTemplate.generate(self, *args, **kwargs) + self._generate(*args, **kwargs) + + return self + + def render(self, *args, **kwargs): + if self.IS_HTML: + return html.tostring(self.root, encoding='utf-8', + include_meta_content_type=True, pretty_print=True) + return etree.tostring(self.root, encoding='utf-8', xml_declaration=True, + pretty_print=True) class NavBarTemplate(Template): - def __init__(self): - Template.__init__(self, u'''\ - - - - - - -
-
-

- This article was downloaded by ${__appname__} from ${url} -

-

- - | Next - - - | Next - - | Section menu - - | Main menu - - - | Previous - - | -
-
- - -''') - - def generate(self, bottom, feed, art, number_of_articles_in_feed, + def _generate(self, bottom, feed, art, number_of_articles_in_feed, two_levels, url, __appname__, prefix='', center=True, - extra_css=None): + extra_css=None, style=None): + head = HEAD(TITLE('navbar')) + if style: + head.append(STYLE(style, type='text/css')) + if extra_css: + head.append(STYLE(extra_css, type='text/css')) + if prefix and not prefix.endswith('/'): prefix += '/' - return Template.generate(self, bottom=bottom, art=art, feed=feed, - num=number_of_articles_in_feed, - two_levels=two_levels, url=url, - __appname__=__appname__, prefix=prefix, - center=center, extra_css=extra_css) + align = 'center' if center else 'left' + navbar = DIV(CLASS('calibre_navbar', 'calibre_rescale_70', + style='text-align:'+align)) + if bottom: + navbar.append(HR()) + text = 'This article was downloaded by ' + p = PT(text, STRONG(__appname__), A(url, href=url), style='text-align:left') + p[0].tail = ' from ' + navbar.append(BR()) + navbar.append(BR()) + else: + next = 'feed_%d'%(feed+1) if art == number_of_articles_in_feed - 1 \ + else 'article_%d'%(art+1) + up = '../..' if art == number_of_articles_in_feed - 1 else '..' + href = '%s%s/%s/index.html'%(prefix, up, next) + navbar.text = '| ' + navbar.append(A('Next', href=href)) + href = '%s../index.html#article_%d'%(prefix, art) + navbar.iterchildren(reversed=True).next().tail = ' | ' + navbar.append(A('Section Menu', href=href)) + href = '%s../../index.html#feed_%d'%(prefix, feed) + navbar.iterchildren(reversed=True).next().tail = ' | ' + navbar.append(A('Main Menu', href=href)) + if art > 0 and not bottom: + href = '%s../article_%d/index.html'%(prefix, art-1) + navbar.iterchildren(reversed=True).next().tail = ' | ' + navbar.append(A('Previous', href=href)) + navbar.iterchildren(reversed=True).next().tail = ' | ' + if not bottom: + navbar.append(HR()) + + self.root = HTML(head, BODY(navbar)) + + class IndexTemplate(Template): - def __init__(self): - Template.__init__(self, u'''\ - - - - - ${title} - - - - -
-

${title}

-

${date}

- -
- - -''') - - def generate(self, title, datefmt, feeds, extra_css=None): + def _generate(self, title, datefmt, feeds, extra_css=None, style=None): if isinstance(datefmt, unicode): datefmt = datefmt.encode(preferred_encoding) date = strftime(datefmt) - return Template.generate(self, title=title, date=date, feeds=feeds, - extra_css=extra_css) - + head = HEAD(TITLE(title)) + if style: + head.append(STYLE(style, type='text/css')) + if extra_css: + head.append(STYLE(extra_css, type='text/css')) + ul = UL(CLASS('calibre_feed_list')) + for i, feed in enumerate(feeds): + if feed: + li = LI(A(feed.title, CLASS('feed', 'calibre_rescale_120', + href='feed_%d/index.html'%i)), id='feed_%d'%i) + ul.append(li) + div = DIV( + H1(title, CLASS('calibre_recipe_title', 'calibre_rescale_180')), + PT(date, style='text-align:right'), + ul, + CLASS('calibre_rescale_100')) + self.root = HTML(head, BODY(div)) class FeedTemplate(Template): - def __init__(self): - Template.__init__(self, u'''\ - - - - - ${feed.title} - - - - -
-

${feed.title}

- -
- ${feed.image_alt} -
-
-
- ${feed.description}
-
-
    - -
  • - ${article.title} - -
    - ${Markup(cutoff(article.text_summary))} -
    -
  • -
    -
-
- | Up one level | -
-
- - -''') + self.root = HTML(head, body) - def generate(self, feed, cutoff, extra_css=None): - return Template.generate(self, feed=feed, cutoff=cutoff, - extra_css=extra_css) class EmbeddedContent(Template): - def __init__(self): - Template.__init__(self, u'''\ - - len(summary) else summary + head = HEAD(TITLE(article.title)) + if style: + head.append(STYLE(style, type='text/css')) + if extra_css: + head.append(STYLE(extra_css, type='text/css')) -> - - ${article.title} - + if isbytestring(text): + text = text.decode('utf-8', 'replace') + elements = html.fragments_fromstring(text) + self.root = HTML(head, + BODY(H2(article.title), DIV())) + div = self.root.find('body').find('div') + if elements and isinstance(elements[0], unicode): + div.text = elements[0] + elements = list(elements)[1:] + for elem in elements: + elem.getparent().remove(elem) + div.append(elem) - -

${article.title}

-
- ${Markup(article.content if len(article.content if article.content else '') > len(article.summary if article.summary else '') else article.summary)} -
- - -''') - - def generate(self, article): - return Template.generate(self, article=article) From cf90d5f1e55373520201316cf572ee0ebc1ecb71 Mon Sep 17 00:00:00 2001 From: Charles Haley <> Date: Mon, 24 May 2010 21:08:55 +0100 Subject: [PATCH 20/22] Small spelling mistake in default_tweaks.py --- resources/default_tweaks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/default_tweaks.py b/resources/default_tweaks.py index 5c15651f9c..e9ad64cee2 100644 --- a/resources/default_tweaks.py +++ b/resources/default_tweaks.py @@ -35,7 +35,7 @@ bool_custom_columns_are_tristate = 'yes' # Provide a set of columns to be sorted on when calibre starts -# The argument is None of saved sort history is to be used +# The argument is None if saved sort history is to be used # otherwise it is a list of column,order pairs. Column is the # lookup/search name, found using the tooltip for the column # Order is 0 for ascending, 1 for descending From 3bdf4e61f36507247db7dac00c4ee3797ab4a58d Mon Sep 17 00:00:00 2001 From: Charles Haley <> Date: Mon, 24 May 2010 21:27:27 +0100 Subject: [PATCH 21/22] Fix stupidities: 1) I left a bunch of unused declarations in tag_view.py 2) I needlessly made a copy of user_categories in DB2 --- src/calibre/gui2/tag_view.py | 13 ------------- src/calibre/library/database2.py | 2 +- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/src/calibre/gui2/tag_view.py b/src/calibre/gui2/tag_view.py index 5ff4fc23ba..6d1bf5ab28 100644 --- a/src/calibre/gui2/tag_view.py +++ b/src/calibre/gui2/tag_view.py @@ -200,13 +200,6 @@ class TagTreeItem(object): # {{{ class TagsModel(QAbstractItemModel): # {{{ - categories_orig = [_('Authors'), _('Series'), _('Formats'), _('Publishers'), - _('Ratings'), _('News'), _('Tags')] - row_map_orig = ['authors', 'series', 'formats', 'publishers', 'ratings', - 'news', 'tags'] - search_keys=['search', _('Searches')] - - def __init__(self, db, parent=None): QAbstractItemModel.__init__(self, parent) @@ -257,12 +250,6 @@ class TagsModel(QAbstractItemModel): # {{{ return data - def get_search_nodes(self, icon): - l = [] - for i in saved_searches.names(): - l.append(Tag(i, tooltip=saved_searches.lookup(i), icon=icon)) - return l - def refresh(self): data = self.get_node_tree(config['sort_by_popularity']) # get category data for i, r in enumerate(self.row_map): diff --git a/src/calibre/library/database2.py b/src/calibre/library/database2.py index 6ca73d9656..8278386b8e 100644 --- a/src/calibre/library/database2.py +++ b/src/calibre/library/database2.py @@ -743,7 +743,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns): categories['formats'].sort(cmp=lambda x,y:cmp(x.name, y.name)) #### Now do the user-defined categories. #### - user_categories = dict.copy(prefs['user_categories']) + user_categories = prefs['user_categories'] # remove all user categories from tag_browser_categories. They can # easily come and go. We will add all the existing ones in below. From 907ae0c3724ca7f537feb94ac61edd2ee49e9329 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Mon, 24 May 2010 14:28:20 -0600 Subject: [PATCH 22/22] Add parameter to ignore restriction when searching and fix bug when return matches with an empty query in ResultCache.search --- src/calibre/library/caches.py | 18 +- src/calibre/library/server/cache.py | 27 ++- src/calibre/library/server/opds.py | 312 ++++++---------------------- 3 files changed, 98 insertions(+), 259 deletions(-) diff --git a/src/calibre/library/caches.py b/src/calibre/library/caches.py index 17853b818f..5e6c10c27b 100644 --- a/src/calibre/library/caches.py +++ b/src/calibre/library/caches.py @@ -626,20 +626,24 @@ class ResultCache(SearchQueryParser): self._map.sort(cmp=fcmp, reverse=not ascending) self._map_filtered = [id for id in self._map if id in self._map_filtered] - def search(self, query, return_matches=False): + def search(self, query, return_matches=False, + ignore_search_restriction=False): if not query or not query.strip(): - q = self.search_restriction - else: - q = '%s (%s)' % (self.search_restriction, query) + q = '' + if not ignore_search_restriction: + q = self.search_restriction + elif not ignore_search_restriction: + q = u'%s (%s)' % (self.search_restriction, query) if not q: if return_matches: - return list(self.map) # when return_matches, do not update the maps! + return list(self._map) # when return_matches, do not update the maps! self._map_filtered = list(self._map) return [] matches = sorted(self.parse(q)) + ans = [id for id in self._map if id in matches] if return_matches: - return [id for id in self._map if id in matches] - self._map_filtered = [id for id in self._map if id in matches] + return ans + self._map_filtered = ans return [] def set_search_restriction(self, s): diff --git a/src/calibre/library/server/cache.py b/src/calibre/library/server/cache.py index 89dc140434..5c9be367d0 100644 --- a/src/calibre/library/server/cache.py +++ b/src/calibre/library/server/cache.py @@ -6,13 +6,28 @@ __copyright__ = '2010, Kovid Goyal ' __docformat__ = 'restructuredtext en' from calibre.utils.date import utcnow +from calibre.utils.ordered_dict import OrderedDict class Cache(object): - @property - def categories_cache(self): - old = getattr(self, '_category_cache', None) + def add_routes(self, c): + self._category_cache = OrderedDict() + self._search_cache = OrderedDict() + + def search_cache(self, search): + old = self._search_cache.get(search, None) if old is None or old[0] <= self.db.last_modified(): - categories = self.db.get_categories() - self._category_cache = (utcnow(), categories) - return self._category_cache[1] + matches = self.db.data.search(search) + self._search_cache[search] = frozenset(matches) + if len(self._search_cache) > 10: + self._search_cache.popitem(last=False) + + + def categories_cache(self, restrict_to=frozenset([])): + old = self._category_cache.get(frozenset(restrict_to), None) + if old is None or old[0] <= self.db.last_modified(): + categories = self.db.get_categories(ids=restrict_to) + self._category_cache[restrict_to] = (utcnow(), categories) + if len(self._category_cache) > 10: + self._category_cache.popitem(last=False) + return self._category_cache[restrict_to][1] diff --git a/src/calibre/library/server/opds.py b/src/calibre/library/server/opds.py index 359449a838..23ee58da7f 100644 --- a/src/calibre/library/server/opds.py +++ b/src/calibre/library/server/opds.py @@ -5,20 +5,20 @@ __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal ' __docformat__ = 'restructuredtext en' -import re, hashlib -from itertools import repeat +import hashlib, binascii from functools import partial -import cherrypy from lxml import etree from lxml.builder import ElementMaker +import cherrypy -from calibre.utils.genshi.template import MarkupTemplate -from calibre.library.server.utils import strftime, expose -from calibre.ebooks.metadata import fmt_sidx, title_sort -from calibre import guess_type, prepare_string_for_xml from calibre.constants import __appname__ +BASE_HREFS = { + 0 : '/stanza', + 1 : '/opds', +} + # Vocabulary for building OPDS feeds {{{ E = ElementMaker(namespace='http://www.w3.org/2005/Atom', nsmap={ @@ -42,7 +42,7 @@ NAVLINK = partial(E.link, def SEARCH(base_href, *args, **kwargs): kwargs['rel'] = 'search' kwargs['title'] = 'Search' - kwargs['href'] = base_href+'/?search={searchTerms}' + kwargs['href'] = base_href+'/search/{searchTerms}' return LINK(*args, **kwargs) def AUTHOR(name, uri=None): @@ -53,11 +53,9 @@ def AUTHOR(name, uri=None): SUBTITLE = E.subtitle -def NAVCATALOG_ENTRY(base_href, updated, title, description, query_data): - data = [u'%s=%s'%(key, val) for key, val in query_data.items()] - data = '&'.join(data) - href = base_href+'/?'+data - id_ = 'calibre-subcatalog:'+str(hashlib.sha1(href).hexdigest()) +def NAVCATALOG_ENTRY(base_href, updated, title, description, query): + href = base_href+'/navcatalog/'+binascii.hexlify(query) + id_ = 'calibre-navcatalog:'+str(hashlib.sha1(href).hexdigest()) return E.entry( TITLE(title), ID(id_), @@ -79,14 +77,15 @@ class TopLevel(Feed): def __init__(self, updated, # datetime object in UTC categories, + version, id_ = 'urn:calibre:main', - base_href = '/stanza' ): + base_href = BASE_HREFS[version] self.base_href = base_href subc = partial(NAVCATALOG_ENTRY, base_href, updated) - subcatalogs = [subc('By '+title, - 'Books sorted by '+desc, {'sortby':q}) for title, desc, q in + subcatalogs = [subc(_('By ')+title, + _('Books sorted by ') + desc, q) for title, desc, q in categories] self.root = \ @@ -100,248 +99,69 @@ class TopLevel(Feed): *subcatalogs ) - - -# Templates {{{ - -STANZA_ENTRY=MarkupTemplate('''\ - - ${record[FM['title']]} - urn:calibre:${urn} - ${authors} - ${timestamp} - - - - -
${Markup(extra)}${record[FM['comments']]}
-
-
-''') - -STANZA_SUBCATALOG_ENTRY=MarkupTemplate('''\ - - ${title} - urn:calibre:${id} - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - ${count} books - -''') - -# Feed of books -STANZA = MarkupTemplate('''\ - - - calibre Library - $id - ${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')} - - ${Markup(next_link)} - - calibre - http://calibre-ebook.com - - - ${subtitle} - - - ${Markup(entry)} - - -''') - - -# }}} +STANZA_FORMATS = frozenset(['epub', 'pdb']) class OPDSServer(object): - def build_top_level(self, updated, base_href='/stanza'): - categories = self.categories_cache - categories = [(x.capitalize(), x.capitalize(), x) for x in - categories.keys()] - categories.append(('Title', 'Title', '|title|')) - categories.append(('Newest', 'Newest', '|newest|')) + def add_routes(self, connect): + for base in ('stanza', 'opds'): + version = 0 if base == 'stanza' else 1 + base_href = BASE_HREFS[version] + connect(base, base_href, self.opds, version=version) + connect('opdsnavcatalog_'+base, base_href+'/navcatalog/{which}', + self.opds_navcatalog, version=version) + connect('opdssearch_'+base, base_href+'/search/{terms}', + self.opds_search, version=version) - return TopLevel(updated, categories, base_href=base_href) + def get_opds_allowed_ids_for_version(self, version): + search = '' if version > 0 else ' '.join(['format:='+x for x in + STANZA_FORMATS]) + self.seach_cache(search) - def get_matches(self, location, query): - base = self.db.data.get_matches(location, query) - epub = self.db.data.get_matches('format', '=epub') - pdb = self.db.data.get_matches('format', '=pdb') - return base.intersection(epub.union(pdb)) + def opds_search(self, terms=None, version=0): + version = int(version) + if not terms or version not in BASE_HREFS: + raise cherrypy.HTTPError(404, 'Not found') - def stanza_sortby_subcategory(self, updated, sortby, offset): - pat = re.compile(r'\(.*\)') + def opds_navcatalog(self, which=None, version=0): + version = int(version) + if not which or version not in BASE_HREFS: + raise cherrypy.HTTPError(404, 'Not found') + which = binascii.unhexlify(which) + type_ = which[0] + which = which[1:] + if type_ == 'O': + return self.get_opds_all_books(which) + elif type_ == 'N': + return self.get_opds_navcatalog(which) + raise cherrypy.HTTPError(404, 'Not found') - def clean_author(x): - return pat.sub('', x).strip() - - def author_cmp(x, y): - x = x if ',' in x else clean_author(x).rpartition(' ')[-1] - y = y if ',' in y else clean_author(y).rpartition(' ')[-1] - return cmp(x.lower(), y.lower()) - - def get_author(x): - pref, ___, suff = clean_author(x).rpartition(' ') - return suff + (', '+pref) if pref else suff - - - what, subtitle = sortby[2:], '' - if sortby == 'byseries': - data = self.db.all_series() - data = [(x[0], x[1], len(self.get_matches('series', '='+x[1]))) for x in data] - subtitle = 'Books by series' - elif sortby == 'byauthor': - data = self.db.all_authors() - data = [(x[0], x[1], len(self.get_matches('authors', '='+x[1]))) for x in data] - subtitle = 'Books by author' - elif sortby == 'bytag': - data = self.db.all_tags2() - data = [(x[0], x[1], len(self.get_matches('tags', '='+x[1]))) for x in data] - subtitle = 'Books by tag' - fcmp = author_cmp if sortby == 'byauthor' else cmp - data = [x for x in data if x[2] > 0] - data.sort(cmp=lambda x, y: fcmp(x[1], y[1])) - next_offset = offset + self.max_stanza_items - rdata = data[offset:next_offset] - if next_offset >= len(data): - next_offset = -1 - gt = get_author if sortby == 'byauthor' else lambda x: x - entries = [STANZA_SUBCATALOG_ENTRY.generate(title=gt(title), id=id, - what=what, updated=updated, count=c).render('xml').decode('utf-8') for id, - title, c in rdata] - next_link = '' - if next_offset > -1: - next_link = ('\n' - ) % (sortby, next_offset) - return STANZA.generate(subtitle=subtitle, data=entries, FM=self.db.FIELD_MAP, - updated=updated, id='urn:calibre:main', next_link=next_link).render('xml') - - @expose - def stanza(self, search=None, sortby=None, authorid=None, tagid=None, - seriesid=None, offset=0): - 'Feeds to read calibre books on a ipod with stanza.' - books = [] + def opds(self, version=0): + version = int(version) + if version not in BASE_HREFS: + raise cherrypy.HTTPError(404, 'Not found') + categories = self.categories_cache( + self.get_opds_allowed_ids_for_version(version)) + category_meta = self.db.get_tag_browser_categories() + cats = [ + (_('Newest'), _('Date'), 'Onewest'), + (_('Title'), _('Title'), 'Otitle'), + ] + for category in categories: + if category == 'formats': + continue + meta = category_meta.get(category, None) + if meta is None: + continue + cats.append((meta['name'], meta['name'], 'N'+category)) updated = self.db.last_modified() - offset = int(offset) + cherrypy.response.headers['Last-Modified'] = self.last_modified(updated) cherrypy.response.headers['Content-Type'] = 'text/xml' - # Top Level feed - if not sortby and not search and not authorid and not tagid and not seriesid: - return str(self.build_top_level(updated)) + feed = TopLevel(updated, cats, version) - if sortby in ('byseries', 'byauthor', 'bytag'): - return self.stanza_sortby_subcategory(updated, sortby, offset) - - # Get matching ids - if authorid: - authorid=int(authorid) - au = self.db.author_name(authorid) - ids = self.get_matches('authors', au) - elif tagid: - tagid=int(tagid) - ta = self.db.tag_name(tagid) - ids = self.get_matches('tags', ta) - elif seriesid: - seriesid=int(seriesid) - se = self.db.series_name(seriesid) - ids = self.get_matches('series', se) - else: - ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set() - record_list = list(iter(self.db)) - - FM = self.db.FIELD_MAP - # Sort the record list - if sortby == "bytitle" or authorid or tagid: - record_list.sort(lambda x, y: - cmp(title_sort(x[FM['title']]), - title_sort(y[FM['title']]))) - elif seriesid: - record_list.sort(lambda x, y: - cmp(x[FM['series_index']], - y[FM['series_index']])) - else: # Sort by date - record_list = reversed(record_list) + return str(feed) - fmts = FM['formats'] - pat = re.compile(r'EPUB|PDB', re.IGNORECASE) - record_list = [x for x in record_list if x[FM['id']] in ids and - pat.search(x[fmts] if x[fmts] else '') is not None] - next_offset = offset + self.max_stanza_items - nrecord_list = record_list[offset:next_offset] - if next_offset >= len(record_list): - next_offset = -1 - - next_link = '' - if next_offset > -1: - q = ['offset=%d'%next_offset] - for x in ('search', 'sortby', 'authorid', 'tagid', 'seriesid'): - val = locals()[x] - if val is not None: - val = prepare_string_for_xml(unicode(val), True) - q.append('%s=%s'%(x, val)) - next_link = ('\n' - ) % '&'.join(q) - - for record in nrecord_list: - r = record[FM['formats']] - r = r.upper() if r else '' - - z = record[FM['authors']] - if not z: - z = _('Unknown') - authors = ' & '.join([i.replace('|', ',') for i in - z.split(',')]) - - # Setup extra description - extra = [] - rating = record[FM['rating']] - if rating > 0: - rating = ''.join(repeat('★', rating)) - extra.append('RATING: %s
'%rating) - tags = record[FM['tags']] - if tags: - extra.append('TAGS: %s
'%\ - prepare_string_for_xml(', '.join(tags.split(',')))) - series = record[FM['series']] - if series: - extra.append('SERIES: %s [%s]
'%\ - (prepare_string_for_xml(series), - fmt_sidx(float(record[FM['series_index']])))) - - fmt = 'epub' if 'EPUB' in r else 'pdb' - mimetype = guess_type('dummy.'+fmt)[0] - - # Create the sub-catalog, which is either a list of - # authors/tags/series or a list of books - data = dict( - record=record, - updated=updated, - authors=authors, - tags=tags, - series=series, - FM=FM, - extra='\n'.join(extra), - mimetype=mimetype, - fmt=fmt, - urn=record[FM['uuid']], - timestamp=strftime('%Y-%m-%dT%H:%M:%S+00:00', - record[FM['timestamp']]) - ) - books.append(STANZA_ENTRY.generate(**data)\ - .render('xml').decode('utf8')) - - return STANZA.generate(subtitle='', data=books, FM=FM, - next_link=next_link, updated=updated, id='urn:calibre:main').render('xml') - - -if __name__ == '__main__': - from datetime import datetime - f = TopLevel(datetime.utcnow()) - print f