mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Remove the old server code. Still need to port various bits of calibre that use it
This commit is contained in:
parent
fce59c91c7
commit
bae7d21608
@ -60,7 +60,7 @@ All the calibre python code is in the ``calibre`` package. This package contains
|
|||||||
|
|
||||||
* db - The database back-end. See :ref:`db_api` for the interface to the calibre library.
|
* db - The database back-end. See :ref:`db_api` for the interface to the calibre library.
|
||||||
|
|
||||||
* Content server: ``library.server`` is the calibre Content server.
|
* Content server: ``srv`` is the calibre Content server.
|
||||||
|
|
||||||
* gui2 - The Graphical User Interface. GUI initialization happens in ``gui2.main`` and ``gui2.ui``. The e-book-viewer is in ``gui2.viewer``. The e-book editor is in ``gui2.tweak_book``.
|
* gui2 - The Graphical User Interface. GUI initialization happens in ``gui2.main`` and ``gui2.ui``. The e-book-viewer is in ``gui2.viewer``. The e-book editor is in ``gui2.tweak_book``.
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@ from calibre.constants import get_osx_version, isosx, iswindows
|
|||||||
from calibre.gui2 import info_dialog, question_dialog
|
from calibre.gui2 import info_dialog, question_dialog
|
||||||
from calibre.gui2.actions import InterfaceAction
|
from calibre.gui2.actions import InterfaceAction
|
||||||
from calibre.gui2.dialogs.smartdevice import SmartdeviceDialog
|
from calibre.gui2.dialogs.smartdevice import SmartdeviceDialog
|
||||||
from calibre.library.server import server_config as content_server_config
|
|
||||||
from calibre.utils.config import tweaks
|
from calibre.utils.config import tweaks
|
||||||
from calibre.utils.icu import primary_sort_key
|
from calibre.utils.icu import primary_sort_key
|
||||||
from calibre.utils.smtp import config as email_config
|
from calibre.utils.smtp import config as email_config
|
||||||
@ -80,11 +79,12 @@ class ShareConnMenu(QMenu): # {{{
|
|||||||
if running:
|
if running:
|
||||||
listen_on = (verify_ipV4_address(tweaks['server_listen_on']) or
|
listen_on = (verify_ipV4_address(tweaks['server_listen_on']) or
|
||||||
get_external_ip())
|
get_external_ip())
|
||||||
try :
|
try:
|
||||||
|
from calibre.library.server import server_config as content_server_config
|
||||||
cs_port = content_server_config().parse().port
|
cs_port = content_server_config().parse().port
|
||||||
ip_text = _(' [%(ip)s, port %(port)d]')%dict(ip=listen_on,
|
ip_text = _(' [%(ip)s, port %(port)d]')%dict(ip=listen_on,
|
||||||
port=cs_port)
|
port=cs_port)
|
||||||
except:
|
except Exception:
|
||||||
ip_text = ' [%s]'%listen_on
|
ip_text = ' [%s]'%listen_on
|
||||||
text = _('Stop Content server') + ip_text
|
text = _('Stop Content server') + ip_text
|
||||||
self.toggle_server_action.setText(text)
|
self.toggle_server_action.setText(text)
|
||||||
|
@ -1,67 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from calibre.utils.config_base import Config, StringConfig, config_dir, tweaks
|
|
||||||
|
|
||||||
|
|
||||||
listen_on = tweaks['server_listen_on']
|
|
||||||
|
|
||||||
|
|
||||||
log_access_file = os.path.join(config_dir, 'server_access_log.txt')
|
|
||||||
log_error_file = os.path.join(config_dir, 'server_error_log.txt')
|
|
||||||
|
|
||||||
|
|
||||||
def server_config(defaults=None):
|
|
||||||
desc=_('Settings to control the calibre content server')
|
|
||||||
c = Config('server', desc) if defaults is None else StringConfig(defaults, desc)
|
|
||||||
|
|
||||||
c.add_opt('port', ['-p', '--port'], default=8080,
|
|
||||||
help=_('The port on which to listen. Default is %default'))
|
|
||||||
c.add_opt('timeout', ['-t', '--timeout'], default=120,
|
|
||||||
help=_('The server timeout in seconds. Default is %default'))
|
|
||||||
c.add_opt('thread_pool', ['--thread-pool'], default=30,
|
|
||||||
help=_('The max number of worker threads to use. Default is %default'))
|
|
||||||
c.add_opt('password', ['--password'], default=None,
|
|
||||||
help=_('Set a password to restrict access. By default access is unrestricted.'))
|
|
||||||
c.add_opt('username', ['--username'], default='calibre',
|
|
||||||
help=_('Username for access. By default, it is: %default'))
|
|
||||||
c.add_opt('develop', ['--develop'], default=False,
|
|
||||||
help=_('Development mode. Server logs to stdout, with more verbose logging and has much lower timeouts.')) # noqa
|
|
||||||
c.add_opt('max_cover', ['--max-cover'], default='600x800',
|
|
||||||
help=_('The maximum size for displayed covers. Default is %default.'))
|
|
||||||
c.add_opt('max_opds_items', ['--max-opds-items'], default=30,
|
|
||||||
help=_('The maximum number of matches to return per OPDS query. '
|
|
||||||
'This affects Stanza, WordPlayer, etc. integration.'))
|
|
||||||
c.add_opt('max_opds_ungrouped_items', ['--max-opds-ungrouped-items'],
|
|
||||||
default=100,
|
|
||||||
help=_('Group items in categories such as author/tags '
|
|
||||||
'by first letter when there are more than this number '
|
|
||||||
'of items. Default: %default. Set to a large number '
|
|
||||||
'to disable grouping.'))
|
|
||||||
c.add_opt('url_prefix', ['--url-prefix'], default='',
|
|
||||||
help=_('Prefix to prepend to all URLs. Useful for reverse'
|
|
||||||
'proxying to this server from Apache/nginx/etc.'))
|
|
||||||
|
|
||||||
return c
|
|
||||||
|
|
||||||
|
|
||||||
def custom_fields_to_display(db):
|
|
||||||
ckeys = set(db.field_metadata.ignorable_field_keys())
|
|
||||||
yes_fields = set(tweaks['content_server_will_display'])
|
|
||||||
no_fields = set(tweaks['content_server_wont_display'])
|
|
||||||
if '*' in yes_fields:
|
|
||||||
yes_fields = ckeys
|
|
||||||
if '*' in no_fields:
|
|
||||||
no_fields = ckeys
|
|
||||||
return frozenset(ckeys & (yes_fields - no_fields))
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
from calibre.library.server.main import main
|
|
||||||
return main()
|
|
@ -1,644 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
from __future__ import (unicode_literals, division, absolute_import,
|
|
||||||
print_function)
|
|
||||||
from future_builtins import map
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import json
|
|
||||||
from functools import wraps
|
|
||||||
from binascii import hexlify, unhexlify
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
|
|
||||||
from calibre.utils.date import isoformat
|
|
||||||
from calibre.utils.config import prefs, tweaks
|
|
||||||
from calibre.ebooks.metadata import title_sort
|
|
||||||
from calibre.ebooks.metadata.book.json_codec import JsonCodec
|
|
||||||
from calibre.utils.icu import sort_key
|
|
||||||
from calibre.library.server import custom_fields_to_display
|
|
||||||
from calibre import force_unicode, isbytestring
|
|
||||||
from calibre.library.field_metadata import category_icon_map
|
|
||||||
|
|
||||||
|
|
||||||
class Endpoint(object): # {{{
|
|
||||||
|
|
||||||
'Manage mime-type json serialization, etc.'
|
|
||||||
|
|
||||||
def __init__(self, mimetype='application/json; charset=utf-8',
|
|
||||||
set_last_modified=True):
|
|
||||||
self.mimetype = mimetype
|
|
||||||
self.set_last_modified = set_last_modified
|
|
||||||
|
|
||||||
def __call__(eself, func):
|
|
||||||
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(self, *args, **kwargs):
|
|
||||||
# Remove AJAX caching disabling jquery workaround arg
|
|
||||||
# This arg is put into AJAX queries by jQuery to prevent
|
|
||||||
# caching in the browser. We dont want it passed to the wrapped
|
|
||||||
# function
|
|
||||||
kwargs.pop('_', None)
|
|
||||||
|
|
||||||
ans = func(self, *args, **kwargs)
|
|
||||||
cherrypy.response.headers['Content-Type'] = eself.mimetype
|
|
||||||
if eself.set_last_modified:
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
cherrypy.response.headers['Last-Modified'] = \
|
|
||||||
self.last_modified(max(updated, self.build_time))
|
|
||||||
if 'application/json' in eself.mimetype:
|
|
||||||
ans = json.dumps(ans, indent=2,
|
|
||||||
ensure_ascii=False).encode('utf-8')
|
|
||||||
return ans
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
def category_icon(category, meta): # {{{
|
|
||||||
if category in category_icon_map:
|
|
||||||
icon = category_icon_map[category]
|
|
||||||
elif meta['is_custom']:
|
|
||||||
icon = category_icon_map['custom:']
|
|
||||||
elif meta['kind'] == 'user':
|
|
||||||
icon = category_icon_map['user:']
|
|
||||||
else:
|
|
||||||
icon = 'blank.png'
|
|
||||||
return icon
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# URL Encoding {{{
|
|
||||||
|
|
||||||
|
|
||||||
def encode_name(name):
|
|
||||||
if isinstance(name, unicode):
|
|
||||||
name = name.encode('utf-8')
|
|
||||||
return hexlify(name)
|
|
||||||
|
|
||||||
|
|
||||||
def decode_name(name):
|
|
||||||
return unhexlify(name).decode('utf-8')
|
|
||||||
|
|
||||||
|
|
||||||
def absurl(prefix, url):
|
|
||||||
return prefix + url
|
|
||||||
|
|
||||||
|
|
||||||
def category_url(prefix, cid):
|
|
||||||
return absurl(prefix, '/ajax/category/'+encode_name(cid))
|
|
||||||
|
|
||||||
|
|
||||||
def icon_url(prefix, name):
|
|
||||||
return absurl(prefix, '/browse/icon/'+name)
|
|
||||||
|
|
||||||
|
|
||||||
def books_in_url(prefix, category, cid):
|
|
||||||
return absurl(prefix, '/ajax/books_in/%s/%s'%(
|
|
||||||
encode_name(category), encode_name(cid)))
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
class AjaxServer(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.ajax_json_codec = JsonCodec()
|
|
||||||
|
|
||||||
def add_routes(self, connect):
|
|
||||||
base_href = '/ajax'
|
|
||||||
|
|
||||||
# Metadata for books
|
|
||||||
connect('ajax_book', base_href+'/book/{book_id}', self.ajax_book)
|
|
||||||
connect('ajax_books', base_href+'/books', self.ajax_books)
|
|
||||||
|
|
||||||
# The list of top level categories
|
|
||||||
connect('ajax_categories', base_href+'/categories',
|
|
||||||
self.ajax_categories)
|
|
||||||
|
|
||||||
# The list of sub-categories and items in each category
|
|
||||||
connect('ajax_category', base_href+'/category/{name}',
|
|
||||||
self.ajax_category)
|
|
||||||
|
|
||||||
# List of books in specified category
|
|
||||||
connect('ajax_books_in', base_href+'/books_in/{category}/{item}',
|
|
||||||
self.ajax_books_in)
|
|
||||||
|
|
||||||
# Search
|
|
||||||
connect('ajax_search', base_href+'/search', self.ajax_search)
|
|
||||||
|
|
||||||
# Get book metadata {{{
|
|
||||||
def ajax_book_to_json(self, book_id, get_category_urls=True,
|
|
||||||
device_compatible=False, device_for_template=None):
|
|
||||||
mi = self.db.get_metadata(book_id, index_is_id=True)
|
|
||||||
|
|
||||||
if not device_compatible:
|
|
||||||
try:
|
|
||||||
mi.rating = mi.rating/2.
|
|
||||||
except:
|
|
||||||
mi.rating = 0.0
|
|
||||||
|
|
||||||
data = self.ajax_json_codec.encode_book_metadata(mi)
|
|
||||||
for x in ('publication_type', 'size', 'db_id', 'lpath', 'mime',
|
|
||||||
'rights', 'book_producer'):
|
|
||||||
data.pop(x, None)
|
|
||||||
|
|
||||||
data['cover'] = absurl(self.opts.url_prefix, u'/get/cover/%d'%book_id)
|
|
||||||
data['thumbnail'] = absurl(self.opts.url_prefix, u'/get/thumb/%d'%book_id)
|
|
||||||
|
|
||||||
if not device_compatible:
|
|
||||||
mi.format_metadata = {k.lower():dict(v) for k, v in
|
|
||||||
mi.format_metadata.iteritems()}
|
|
||||||
for v in mi.format_metadata.itervalues():
|
|
||||||
mtime = v.get('mtime', None)
|
|
||||||
if mtime is not None:
|
|
||||||
v['mtime'] = isoformat(mtime, as_utc=True)
|
|
||||||
data['format_metadata'] = mi.format_metadata
|
|
||||||
fmts = set(x.lower() for x in mi.format_metadata.iterkeys())
|
|
||||||
pf = prefs['output_format'].lower()
|
|
||||||
other_fmts = list(fmts)
|
|
||||||
try:
|
|
||||||
fmt = pf if pf in fmts else other_fmts[0]
|
|
||||||
except:
|
|
||||||
fmt = None
|
|
||||||
if fmts and fmt:
|
|
||||||
other_fmts = [x for x in fmts if x != fmt]
|
|
||||||
data['formats'] = sorted(fmts)
|
|
||||||
if fmt:
|
|
||||||
data['main_format'] = {fmt: absurl(self.opts.url_prefix, u'/get/%s/%d'%(fmt, book_id))}
|
|
||||||
else:
|
|
||||||
data['main_format'] = None
|
|
||||||
data['other_formats'] = {fmt: absurl(self.opts.url_prefix, u'/get/%s/%d'%(fmt, book_id)) for fmt
|
|
||||||
in other_fmts}
|
|
||||||
|
|
||||||
if get_category_urls:
|
|
||||||
category_urls = data['category_urls'] = {}
|
|
||||||
ccache = self.categories_cache()
|
|
||||||
for key in mi.all_field_keys():
|
|
||||||
fm = mi.metadata_for_field(key)
|
|
||||||
if (fm and fm['is_category'] and not fm['is_csp'] and
|
|
||||||
key != 'formats' and fm['datatype'] not in ['rating']):
|
|
||||||
categories = mi.get(key)
|
|
||||||
if isinstance(categories, basestring):
|
|
||||||
categories = [categories]
|
|
||||||
if categories is None:
|
|
||||||
categories = []
|
|
||||||
dbtags = {}
|
|
||||||
for category in categories:
|
|
||||||
for tag in ccache.get(key, []):
|
|
||||||
if tag.original_name == category:
|
|
||||||
dbtags[category] = books_in_url(self.opts.url_prefix,
|
|
||||||
tag.category if tag.category else key,
|
|
||||||
tag.original_name if tag.id is None else
|
|
||||||
unicode(tag.id))
|
|
||||||
break
|
|
||||||
category_urls[key] = dbtags
|
|
||||||
else:
|
|
||||||
series = data.get('series', None)
|
|
||||||
if series:
|
|
||||||
tsorder = tweaks['save_template_title_series_sorting']
|
|
||||||
series = title_sort(series, order=tsorder)
|
|
||||||
else:
|
|
||||||
series = ''
|
|
||||||
data['_series_sort_'] = series
|
|
||||||
if device_for_template:
|
|
||||||
import posixpath
|
|
||||||
from calibre.devices.utils import create_upload_path
|
|
||||||
from calibre.utils.filenames import ascii_filename as sanitize
|
|
||||||
from calibre.customize.ui import device_plugins
|
|
||||||
|
|
||||||
for device_class in device_plugins():
|
|
||||||
if device_class.__class__.__name__ == device_for_template:
|
|
||||||
template = device_class.save_template()
|
|
||||||
data['_filename_'] = create_upload_path(mi, book_id,
|
|
||||||
template, sanitize, path_type=posixpath)
|
|
||||||
break
|
|
||||||
|
|
||||||
return data, mi.last_modified
|
|
||||||
|
|
||||||
@Endpoint(set_last_modified=False)
|
|
||||||
def ajax_book(self, book_id, category_urls='true', id_is_uuid='false',
|
|
||||||
device_compatible='false', device_for_template=None):
|
|
||||||
'''
|
|
||||||
Return the metadata of the book as a JSON dictionary.
|
|
||||||
|
|
||||||
If category_urls == 'true' the returned dictionary also contains a
|
|
||||||
mapping of category names to URLs that return the list of books in the
|
|
||||||
given category.
|
|
||||||
'''
|
|
||||||
cherrypy.response.timeout = 3600
|
|
||||||
|
|
||||||
try:
|
|
||||||
if id_is_uuid == 'true':
|
|
||||||
book_id = self.db.get_id_from_uuid(book_id)
|
|
||||||
else:
|
|
||||||
book_id = int(book_id)
|
|
||||||
data, last_modified = self.ajax_book_to_json(book_id,
|
|
||||||
get_category_urls=category_urls.lower()=='true',
|
|
||||||
device_compatible=device_compatible.lower()=='true',
|
|
||||||
device_for_template=device_for_template)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'No book with id: %r'%book_id)
|
|
||||||
|
|
||||||
cherrypy.response.headers['Last-Modified'] = \
|
|
||||||
self.last_modified(last_modified)
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
@Endpoint(set_last_modified=False)
|
|
||||||
def ajax_books(self, ids=None, category_urls='true', id_is_uuid='false', device_for_template=None):
|
|
||||||
'''
|
|
||||||
Return the metadata for a list of books specified as a comma separated
|
|
||||||
list of ids. The metadata is returned as a dictionary mapping ids to
|
|
||||||
the metadata. The format for the metadata is the same as in
|
|
||||||
ajax_book(). If no book is found for a given id, it is mapped to null
|
|
||||||
in the dictionary.
|
|
||||||
|
|
||||||
This endpoint can be used with either GET or POST requests, variable
|
|
||||||
name is ids: /ajax/books?ids=1,2,3,4,5
|
|
||||||
'''
|
|
||||||
if ids is None:
|
|
||||||
raise cherrypy.HTTPError(404, 'Must specify some ids')
|
|
||||||
try:
|
|
||||||
if id_is_uuid == 'true':
|
|
||||||
ids = set(self.db.get_id_from_uuid(x) for x in ids.split(','))
|
|
||||||
else:
|
|
||||||
ids = set(int(x.strip()) for x in ids.split(','))
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'ids must be a comma separated list'
|
|
||||||
' of integers')
|
|
||||||
ans = {}
|
|
||||||
lm = None
|
|
||||||
gcu = category_urls.lower()=='true'
|
|
||||||
for book_id in ids:
|
|
||||||
try:
|
|
||||||
data, last_modified = self.ajax_book_to_json(book_id,
|
|
||||||
get_category_urls=gcu, device_for_template=device_for_template)
|
|
||||||
except:
|
|
||||||
ans[book_id] = None
|
|
||||||
else:
|
|
||||||
ans[book_id] = data
|
|
||||||
if lm is None or last_modified > lm:
|
|
||||||
lm = last_modified
|
|
||||||
|
|
||||||
cherrypy.response.timeout = 3600
|
|
||||||
cherrypy.response.headers['Last-Modified'] = \
|
|
||||||
self.last_modified(lm if lm is not None else
|
|
||||||
self.db.last_modified())
|
|
||||||
|
|
||||||
return ans
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# Top level categories {{{
|
|
||||||
@Endpoint()
|
|
||||||
def ajax_categories(self):
|
|
||||||
'''
|
|
||||||
Return the list of top-level categories as a list of dictionaries. Each
|
|
||||||
dictionary is of the form::
|
|
||||||
{
|
|
||||||
'name': Display Name,
|
|
||||||
'url':URL that gives the JSON object corresponding to all entries in this category,
|
|
||||||
'icon': URL to icon of this category,
|
|
||||||
'is_category': False for the All Books and Newest categories, True for everything else
|
|
||||||
}
|
|
||||||
|
|
||||||
'''
|
|
||||||
ans = {}
|
|
||||||
categories = self.categories_cache()
|
|
||||||
category_meta = self.db.field_metadata
|
|
||||||
|
|
||||||
def getter(x):
|
|
||||||
return category_meta[x]['name']
|
|
||||||
|
|
||||||
displayed_custom_fields = custom_fields_to_display(self.db)
|
|
||||||
|
|
||||||
for category in sorted(categories, key=lambda x: sort_key(getter(x))):
|
|
||||||
if len(categories[category]) == 0:
|
|
||||||
continue
|
|
||||||
if category in ('formats', 'identifiers'):
|
|
||||||
continue
|
|
||||||
meta = category_meta.get(category, None)
|
|
||||||
if meta is None:
|
|
||||||
continue
|
|
||||||
if category_meta.is_ignorable_field(category) and \
|
|
||||||
category not in displayed_custom_fields:
|
|
||||||
continue
|
|
||||||
display_name = meta['name']
|
|
||||||
if category.startswith('@'):
|
|
||||||
category = category.partition('.')[0]
|
|
||||||
display_name = category[1:]
|
|
||||||
url = force_unicode(category)
|
|
||||||
icon = category_icon(category, meta)
|
|
||||||
ans[url] = (display_name, icon)
|
|
||||||
|
|
||||||
ans = [{'url':k, 'name':v[0], 'icon':v[1], 'is_category':True}
|
|
||||||
for k, v in ans.iteritems()]
|
|
||||||
ans.sort(key=lambda x: sort_key(x['name']))
|
|
||||||
for name, url, icon in [
|
|
||||||
(_('All books'), 'allbooks', 'book.png'),
|
|
||||||
(_('Newest'), 'newest', 'forward.png'),
|
|
||||||
]:
|
|
||||||
ans.insert(0, {'name':name, 'url':url, 'icon':icon,
|
|
||||||
'is_category':False})
|
|
||||||
|
|
||||||
for c in ans:
|
|
||||||
c['url'] = category_url(self.opts.url_prefix, c['url'])
|
|
||||||
c['icon'] = icon_url(self.opts.url_prefix, c['icon'])
|
|
||||||
|
|
||||||
return ans
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# Items in the specified category {{{
|
|
||||||
@Endpoint()
|
|
||||||
def ajax_category(self, name, sort='title', num=100, offset=0,
|
|
||||||
sort_order='asc'):
|
|
||||||
'''
|
|
||||||
Return a dictionary describing the category specified by name. The
|
|
||||||
dictionary looks like::
|
|
||||||
|
|
||||||
{
|
|
||||||
'category_name': Category display name,
|
|
||||||
'base_url': Base URL for this category,
|
|
||||||
'total_num': Total numberof items in this category,
|
|
||||||
'offset': The offset for the items returned in this result,
|
|
||||||
'num': The number of items returned in this result,
|
|
||||||
'sort': How the returned items are sorted,
|
|
||||||
'sort_order': asc or desc
|
|
||||||
'subcategories': List of sub categories of this category.
|
|
||||||
'items': List of items in this category,
|
|
||||||
}
|
|
||||||
|
|
||||||
Each subcategory is a dictionary of the same form as those returned by
|
|
||||||
ajax_categories().
|
|
||||||
|
|
||||||
Each item is a dictionary of the form::
|
|
||||||
|
|
||||||
{
|
|
||||||
'name': Display name,
|
|
||||||
'average_rating': Average rating for books in this item,
|
|
||||||
'count': Number of books in this item,
|
|
||||||
'url': URL to get list of books in this item,
|
|
||||||
'has_children': If True this item contains sub categories, look
|
|
||||||
for an entry corresponding to this item in subcategories in the
|
|
||||||
main dictionary,
|
|
||||||
}
|
|
||||||
|
|
||||||
:param sort: How to sort the returned items. Choices are: name, rating,
|
|
||||||
popularity
|
|
||||||
:param sort_order: asc or desc
|
|
||||||
|
|
||||||
To learn how to create subcategories see
|
|
||||||
https://manual.calibre-ebook.com/sub_groups.html
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
num = int(num)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, "Invalid num: %r"%num)
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, "Invalid offset: %r"%offset)
|
|
||||||
|
|
||||||
base_url = absurl(self.opts.url_prefix, '/ajax/category/'+name)
|
|
||||||
|
|
||||||
if sort not in ('rating', 'name', 'popularity'):
|
|
||||||
sort = 'name'
|
|
||||||
|
|
||||||
if sort_order not in ('asc', 'desc'):
|
|
||||||
sort_order = 'asc'
|
|
||||||
|
|
||||||
try:
|
|
||||||
dname = decode_name(name)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Invalid encoding of category name'
|
|
||||||
' %r'%name)
|
|
||||||
|
|
||||||
if dname in ('newest', 'allbooks'):
|
|
||||||
if dname == 'newest':
|
|
||||||
sort, sort_order = 'timestamp', 'desc'
|
|
||||||
raise cherrypy.InternalRedirect(
|
|
||||||
'/ajax/books_in/%s/%s?sort=%s&sort_order=%s'%(
|
|
||||||
encode_name(dname), encode_name('0'), sort, sort_order))
|
|
||||||
|
|
||||||
fm = self.db.field_metadata
|
|
||||||
categories = self.categories_cache()
|
|
||||||
hierarchical_categories = self.db.prefs['categories_using_hierarchy']
|
|
||||||
|
|
||||||
subcategory = dname
|
|
||||||
toplevel = subcategory.partition('.')[0]
|
|
||||||
if toplevel == subcategory:
|
|
||||||
subcategory = None
|
|
||||||
if toplevel not in categories or toplevel not in fm:
|
|
||||||
raise cherrypy.HTTPError(404, 'Category %r not found'%toplevel)
|
|
||||||
|
|
||||||
# Find items and sub categories
|
|
||||||
subcategories = []
|
|
||||||
meta = fm[toplevel]
|
|
||||||
item_names = {}
|
|
||||||
children = set()
|
|
||||||
|
|
||||||
if meta['kind'] == 'user':
|
|
||||||
fullname = ((toplevel + '.' + subcategory) if subcategory is not
|
|
||||||
None else toplevel)
|
|
||||||
try:
|
|
||||||
# User categories cannot be applied to books, so this is the
|
|
||||||
# complete set of items, no need to consider sub categories
|
|
||||||
items = categories[fullname]
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404,
|
|
||||||
'User category %r not found'%fullname)
|
|
||||||
|
|
||||||
parts = fullname.split('.')
|
|
||||||
for candidate in categories:
|
|
||||||
cparts = candidate.split('.')
|
|
||||||
if len(cparts) == len(parts)+1 and cparts[:-1] == parts:
|
|
||||||
subcategories.append({'name':cparts[-1],
|
|
||||||
'url':candidate,
|
|
||||||
'icon':category_icon(toplevel, meta)})
|
|
||||||
|
|
||||||
category_name = toplevel[1:].split('.')
|
|
||||||
# When browsing by user categories we ignore hierarchical normal
|
|
||||||
# columns, so children can be empty
|
|
||||||
|
|
||||||
elif toplevel in hierarchical_categories:
|
|
||||||
items = []
|
|
||||||
|
|
||||||
category_names = [x.original_name.split('.') for x in categories[toplevel] if
|
|
||||||
'.' in x.original_name]
|
|
||||||
|
|
||||||
if subcategory is None:
|
|
||||||
children = set(x[0] for x in category_names)
|
|
||||||
category_name = [meta['name']]
|
|
||||||
items = [x for x in categories[toplevel] if '.' not in x.original_name]
|
|
||||||
else:
|
|
||||||
subcategory_parts = subcategory.split('.')[1:]
|
|
||||||
category_name = [meta['name']] + subcategory_parts
|
|
||||||
|
|
||||||
lsp = len(subcategory_parts)
|
|
||||||
children = set('.'.join(x) for x in category_names if len(x) ==
|
|
||||||
lsp+1 and x[:lsp] == subcategory_parts)
|
|
||||||
items = [x for x in categories[toplevel] if x.original_name in
|
|
||||||
children]
|
|
||||||
item_names = {x:x.original_name.rpartition('.')[-1] for x in
|
|
||||||
items}
|
|
||||||
# Only mark the subcategories that have children themselves as
|
|
||||||
# subcategories
|
|
||||||
children = set('.'.join(x[:lsp+1]) for x in category_names if len(x) >
|
|
||||||
lsp+1 and x[:lsp] == subcategory_parts)
|
|
||||||
subcategories = [{'name':x.rpartition('.')[-1],
|
|
||||||
'url':toplevel+'.'+x,
|
|
||||||
'icon':category_icon(toplevel, meta)} for x in children]
|
|
||||||
else:
|
|
||||||
items = categories[toplevel]
|
|
||||||
category_name = meta['name']
|
|
||||||
|
|
||||||
for x in subcategories:
|
|
||||||
x['url'] = category_url(self.opts.url_prefix, x['url'])
|
|
||||||
x['icon'] = icon_url(self.opts.url_prefix, x['icon'])
|
|
||||||
x['is_category'] = True
|
|
||||||
|
|
||||||
sort_keygen = {
|
|
||||||
'name': lambda x: sort_key(x.sort if x.sort else x.original_name),
|
|
||||||
'popularity': lambda x: x.count,
|
|
||||||
'rating': lambda x: x.avg_rating
|
|
||||||
}
|
|
||||||
items.sort(key=sort_keygen[sort], reverse=sort_order == 'desc')
|
|
||||||
total_num = len(items)
|
|
||||||
items = items[offset:offset+num]
|
|
||||||
items = [{
|
|
||||||
'name':item_names.get(x, x.original_name),
|
|
||||||
'average_rating': x.avg_rating,
|
|
||||||
'count': x.count,
|
|
||||||
'url': books_in_url(self.opts.url_prefix,
|
|
||||||
x.category if x.category else toplevel,
|
|
||||||
x.original_name if x.id is None else unicode(x.id)),
|
|
||||||
'has_children': x.original_name in children,
|
|
||||||
} for x in items]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'category_name': category_name,
|
|
||||||
'base_url': base_url,
|
|
||||||
'total_num': total_num,
|
|
||||||
'offset':offset, 'num':len(items), 'sort':sort,
|
|
||||||
'sort_order':sort_order,
|
|
||||||
'subcategories':subcategories,
|
|
||||||
'items':items,
|
|
||||||
}
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# Books in the specified category {{{
|
|
||||||
@Endpoint()
|
|
||||||
def ajax_books_in(self, category, item, sort='title', num=25, offset=0,
|
|
||||||
sort_order='asc', get_additional_fields=''):
|
|
||||||
'''
|
|
||||||
Return the books (as list of ids) present in the specified category.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
dname, ditem = map(decode_name, (category, item))
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Invalid encoded param: %r'%category)
|
|
||||||
|
|
||||||
try:
|
|
||||||
num = int(num)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, "Invalid num: %r"%num)
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, "Invalid offset: %r"%offset)
|
|
||||||
|
|
||||||
if sort_order not in ('asc', 'desc'):
|
|
||||||
sort_order = 'asc'
|
|
||||||
|
|
||||||
sfield = self.db.data.sanitize_sort_field_name(sort)
|
|
||||||
if sfield not in self.db.field_metadata.sortable_field_keys():
|
|
||||||
raise cherrypy.HTTPError(404, '%s is not a valid sort field'%sort)
|
|
||||||
|
|
||||||
if dname in ('allbooks', 'newest'):
|
|
||||||
ids = self.search_cache('')
|
|
||||||
elif dname == 'search':
|
|
||||||
try:
|
|
||||||
ids = self.search_cache('search:"%s"'%ditem)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Search: %r not understood'%ditem)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
cid = int(ditem)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404,
|
|
||||||
'Category id %r not an integer'%ditem)
|
|
||||||
|
|
||||||
if dname == 'news':
|
|
||||||
dname = 'tags'
|
|
||||||
ids = self.db.get_books_for_category(dname, cid)
|
|
||||||
all_ids = set(self.search_cache(''))
|
|
||||||
# Implement restriction
|
|
||||||
ids = ids.intersection(all_ids)
|
|
||||||
|
|
||||||
ids = list(ids)
|
|
||||||
self.db.data.multisort(fields=[(sfield, sort_order == 'asc')], subsort=True,
|
|
||||||
only_ids=ids)
|
|
||||||
total_num = len(ids)
|
|
||||||
ids = ids[offset:offset+num]
|
|
||||||
|
|
||||||
result = {
|
|
||||||
'total_num': total_num, 'sort_order':sort_order,
|
|
||||||
'offset':offset, 'num':len(ids), 'sort':sort,
|
|
||||||
'base_url':absurl(self.opts.url_prefix, '/ajax/books_in/%s/%s'%(category, item)),
|
|
||||||
'book_ids':ids
|
|
||||||
}
|
|
||||||
|
|
||||||
if get_additional_fields:
|
|
||||||
additional_fields = {}
|
|
||||||
for field in get_additional_fields.split(','):
|
|
||||||
field = field.strip()
|
|
||||||
if field:
|
|
||||||
flist = additional_fields[field] = []
|
|
||||||
for id_ in ids:
|
|
||||||
flist.append(self.db.new_api.field_for(field, id_,
|
|
||||||
default_value=None))
|
|
||||||
if additional_fields:
|
|
||||||
result['additional_fields'] = additional_fields
|
|
||||||
return result
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# Search {{{
|
|
||||||
@Endpoint()
|
|
||||||
def ajax_search(self, query='', sort='title', offset=0, num=25,
|
|
||||||
sort_order='asc'):
|
|
||||||
'''
|
|
||||||
Return the books (as list of ids) matching the specified search query.
|
|
||||||
'''
|
|
||||||
|
|
||||||
try:
|
|
||||||
num = int(num)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, "Invalid num: %r"%num)
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, "Invalid offset: %r"%offset)
|
|
||||||
sfield = self.db.data.sanitize_sort_field_name(sort)
|
|
||||||
if sfield not in self.db.field_metadata.sortable_field_keys():
|
|
||||||
raise cherrypy.HTTPError(404, '%s is not a valid sort field'%sort)
|
|
||||||
|
|
||||||
if isbytestring(query):
|
|
||||||
query = query.decode('UTF-8')
|
|
||||||
ids = list(self.search_for_books(query))
|
|
||||||
self.db.data.multisort(fields=[(sfield, sort_order == 'asc')], subsort=True,
|
|
||||||
only_ids=ids)
|
|
||||||
total_num = len(ids)
|
|
||||||
ids = ids[offset:offset+num]
|
|
||||||
return {
|
|
||||||
'total_num': total_num, 'sort_order':sort_order,
|
|
||||||
'offset':offset, 'num':len(ids), 'sort':sort,
|
|
||||||
'base_url':absurl(self.opts.url_prefix, '/ajax/search'),
|
|
||||||
'query': query,
|
|
||||||
'book_ids':ids
|
|
||||||
}
|
|
||||||
|
|
||||||
# }}}
|
|
@ -1,327 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
from logging.handlers import RotatingFileHandler
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
from cherrypy.process.plugins import SimplePlugin
|
|
||||||
|
|
||||||
from calibre.constants import __appname__, __version__
|
|
||||||
from calibre.utils.date import fromtimestamp
|
|
||||||
from calibre.library.server import listen_on, log_access_file, log_error_file
|
|
||||||
from calibre.library.server.utils import expose, AuthController
|
|
||||||
from calibre.utils.mdns import publish as publish_zeroconf, \
|
|
||||||
unpublish as unpublish_zeroconf, get_external_ip, verify_ipV4_address
|
|
||||||
from calibre.library.server.content import ContentServer
|
|
||||||
from calibre.library.server.mobile import MobileServer
|
|
||||||
from calibre.library.server.xml import XMLServer
|
|
||||||
from calibre.library.server.opds import OPDSServer
|
|
||||||
from calibre.library.server.cache import Cache
|
|
||||||
from calibre.library.server.browse import BrowseServer
|
|
||||||
from calibre.library.server.ajax import AjaxServer
|
|
||||||
from calibre import prints, as_unicode
|
|
||||||
|
|
||||||
|
|
||||||
class DispatchController(object): # {{{
|
|
||||||
|
|
||||||
def __init__(self, prefix, wsgi=False, auth_controller=None):
|
|
||||||
self.dispatcher = cherrypy.dispatch.RoutesDispatcher()
|
|
||||||
self.funcs = []
|
|
||||||
self.seen = set()
|
|
||||||
self.auth_controller = auth_controller
|
|
||||||
self.prefix = prefix if prefix else ''
|
|
||||||
if wsgi:
|
|
||||||
self.prefix = ''
|
|
||||||
|
|
||||||
def __call__(self, name, route, func, **kwargs):
|
|
||||||
if name in self.seen:
|
|
||||||
raise NameError('Route name: '+ repr(name) + ' already used')
|
|
||||||
self.seen.add(name)
|
|
||||||
kwargs['action'] = 'f_%d'%len(self.funcs)
|
|
||||||
aw = kwargs.pop('android_workaround', False)
|
|
||||||
if route != '/':
|
|
||||||
route = self.prefix + route
|
|
||||||
if isinstance(route, unicode):
|
|
||||||
# Apparently the routes package chokes on unicode routes, see
|
|
||||||
# https://www.mobileread.com/forums/showthread.php?t=235366
|
|
||||||
route = route.encode('utf-8')
|
|
||||||
elif self.prefix:
|
|
||||||
self.dispatcher.connect(name+'prefix_extra', self.prefix, self,
|
|
||||||
**kwargs)
|
|
||||||
self.dispatcher.connect(name+'prefix_extra_trailing',
|
|
||||||
self.prefix+'/', self, **kwargs)
|
|
||||||
self.dispatcher.connect(name, route, self, **kwargs)
|
|
||||||
if self.auth_controller is not None:
|
|
||||||
func = self.auth_controller(func, aw)
|
|
||||||
self.funcs.append(expose(func))
|
|
||||||
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
if not attr.startswith('f_'):
|
|
||||||
raise AttributeError(attr + ' not found')
|
|
||||||
num = attr.rpartition('_')[-1]
|
|
||||||
try:
|
|
||||||
num = int(num)
|
|
||||||
except:
|
|
||||||
raise AttributeError(attr + ' not found')
|
|
||||||
if num < 0 or num >= len(self.funcs):
|
|
||||||
raise AttributeError(attr + ' not found')
|
|
||||||
return self.funcs[num]
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
class BonJour(SimplePlugin): # {{{
|
|
||||||
|
|
||||||
def __init__(self, engine, port=8080, prefix=''):
|
|
||||||
SimplePlugin.__init__(self, engine)
|
|
||||||
self.port = port
|
|
||||||
self.prefix = prefix
|
|
||||||
self.ip_address = '0.0.0.0'
|
|
||||||
|
|
||||||
@property
|
|
||||||
def mdns_services(self):
|
|
||||||
return [
|
|
||||||
('Books in calibre', '_stanza._tcp', self.port,
|
|
||||||
{'path':self.prefix+'/stanza'}),
|
|
||||||
('Books in calibre', '_calibre._tcp', self.port,
|
|
||||||
{'path':self.prefix+'/opds'}),
|
|
||||||
]
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
zeroconf_ip_address = verify_ipV4_address(self.ip_address)
|
|
||||||
try:
|
|
||||||
for s in self.mdns_services:
|
|
||||||
publish_zeroconf(*s, use_ip_address=zeroconf_ip_address)
|
|
||||||
except:
|
|
||||||
import traceback
|
|
||||||
cherrypy.log.error('Failed to start BonJour:')
|
|
||||||
cherrypy.log.error(traceback.format_exc())
|
|
||||||
|
|
||||||
start.priority = 90
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
try:
|
|
||||||
for s in self.mdns_services:
|
|
||||||
unpublish_zeroconf(*s)
|
|
||||||
except:
|
|
||||||
import traceback
|
|
||||||
cherrypy.log.error('Failed to stop BonJour:')
|
|
||||||
cherrypy.log.error(traceback.format_exc())
|
|
||||||
|
|
||||||
stop.priority = 10
|
|
||||||
|
|
||||||
|
|
||||||
cherrypy.engine.bonjour = BonJour(cherrypy.engine)
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer, Cache,
|
|
||||||
BrowseServer, AjaxServer):
|
|
||||||
|
|
||||||
server_name = __appname__ + '/' + __version__
|
|
||||||
|
|
||||||
def __init__(self, db, opts, embedded=False, show_tracebacks=True,
|
|
||||||
wsgi=False):
|
|
||||||
self.is_wsgi = bool(wsgi)
|
|
||||||
self.opts = opts
|
|
||||||
self.embedded = embedded
|
|
||||||
self.state_callback = None
|
|
||||||
self.start_failure_callback = None
|
|
||||||
try:
|
|
||||||
self.max_cover_width, self.max_cover_height = \
|
|
||||||
map(int, self.opts.max_cover.split('x'))
|
|
||||||
except:
|
|
||||||
self.max_cover_width = 1200
|
|
||||||
self.max_cover_height = 1600
|
|
||||||
path = P('content_server')
|
|
||||||
self.build_time = fromtimestamp(os.stat(path).st_mtime)
|
|
||||||
self.default_cover = open(P('content_server/default_cover.jpg'), 'rb').read()
|
|
||||||
if not opts.url_prefix:
|
|
||||||
opts.url_prefix = ''
|
|
||||||
|
|
||||||
cherrypy.engine.bonjour.ip_address = listen_on
|
|
||||||
cherrypy.engine.bonjour.port = opts.port
|
|
||||||
cherrypy.engine.bonjour.prefix = opts.url_prefix
|
|
||||||
|
|
||||||
Cache.__init__(self)
|
|
||||||
|
|
||||||
self.set_database(db)
|
|
||||||
|
|
||||||
st = 0.1 if opts.develop else 1
|
|
||||||
|
|
||||||
cherrypy.config.update({
|
|
||||||
'log.screen' : opts.develop,
|
|
||||||
'engine.autoreload.on' : getattr(opts,
|
|
||||||
'auto_reload', False),
|
|
||||||
'tools.log_headers.on' : opts.develop,
|
|
||||||
'tools.encode.encoding' : 'UTF-8',
|
|
||||||
'checker.on' : opts.develop,
|
|
||||||
'request.show_tracebacks': show_tracebacks,
|
|
||||||
'server.socket_host' : listen_on,
|
|
||||||
'server.socket_port' : opts.port,
|
|
||||||
'server.socket_timeout' : opts.timeout, # seconds
|
|
||||||
'server.thread_pool' : opts.thread_pool, # number of threads
|
|
||||||
'server.shutdown_timeout': st, # minutes
|
|
||||||
})
|
|
||||||
if embedded or wsgi:
|
|
||||||
cherrypy.config.update({'engine.SIGHUP' : None,
|
|
||||||
'engine.SIGTERM' : None,})
|
|
||||||
self.config = {}
|
|
||||||
self.is_running = False
|
|
||||||
self.exception = None
|
|
||||||
auth_controller = None
|
|
||||||
self.users_dict = {}
|
|
||||||
# self.config['/'] = {
|
|
||||||
# 'tools.sessions.on' : True,
|
|
||||||
# 'tools.sessions.timeout': 60, # Session times out after 60 minutes
|
|
||||||
# }
|
|
||||||
|
|
||||||
if not wsgi:
|
|
||||||
self.setup_loggers()
|
|
||||||
cherrypy.engine.bonjour.subscribe()
|
|
||||||
self.config['global'] = {
|
|
||||||
'tools.gzip.on' : True,
|
|
||||||
'tools.gzip.mime_types': ['text/html', 'text/plain',
|
|
||||||
'text/xml', 'text/javascript', 'text/css'],
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.username and opts.password:
|
|
||||||
self.users_dict[opts.username.strip()] = opts.password.strip()
|
|
||||||
auth_controller = AuthController('Your calibre library',
|
|
||||||
self.users_dict)
|
|
||||||
|
|
||||||
self.__dispatcher__ = DispatchController(self.opts.url_prefix,
|
|
||||||
wsgi=wsgi, auth_controller=auth_controller)
|
|
||||||
for x in self.__class__.__bases__:
|
|
||||||
if hasattr(x, 'add_routes'):
|
|
||||||
x.__init__(self)
|
|
||||||
x.add_routes(self, self.__dispatcher__)
|
|
||||||
root_conf = self.config.get('/', {})
|
|
||||||
root_conf['request.dispatch'] = self.__dispatcher__.dispatcher
|
|
||||||
self.config['/'] = root_conf
|
|
||||||
|
|
||||||
def set_database(self, db):
|
|
||||||
self.db = db
|
|
||||||
virt_libs = db.prefs.get('virtual_libraries', {})
|
|
||||||
sr = getattr(self.opts, 'restriction', None)
|
|
||||||
if sr:
|
|
||||||
if sr in virt_libs:
|
|
||||||
sr = virt_libs[sr]
|
|
||||||
elif sr not in self.db.saved_search_names():
|
|
||||||
prints('WARNING: Content server: search restriction ',
|
|
||||||
sr, ' does not exist')
|
|
||||||
sr = ''
|
|
||||||
else:
|
|
||||||
sr = 'search:"%s"'%sr
|
|
||||||
else:
|
|
||||||
sr = db.prefs.get('cs_virtual_lib_on_startup', '')
|
|
||||||
if sr:
|
|
||||||
if sr not in virt_libs:
|
|
||||||
prints('WARNING: Content server: virtual library ',
|
|
||||||
sr, ' does not exist')
|
|
||||||
sr = ''
|
|
||||||
else:
|
|
||||||
sr = virt_libs[sr]
|
|
||||||
self.search_restriction = sr
|
|
||||||
self.reset_caches()
|
|
||||||
|
|
||||||
def graceful(self):
|
|
||||||
cherrypy.engine.graceful()
|
|
||||||
|
|
||||||
def setup_loggers(self):
|
|
||||||
access_file = log_access_file
|
|
||||||
error_file = log_error_file
|
|
||||||
log = cherrypy.log
|
|
||||||
|
|
||||||
maxBytes = getattr(log, "rot_maxBytes", 10000000)
|
|
||||||
backupCount = getattr(log, "rot_backupCount", 1000)
|
|
||||||
|
|
||||||
# Make a new RotatingFileHandler for the error log.
|
|
||||||
h = RotatingFileHandler(error_file, 'a', maxBytes, backupCount)
|
|
||||||
h.setLevel(logging.DEBUG)
|
|
||||||
h.setFormatter(cherrypy._cplogging.logfmt)
|
|
||||||
log.error_log.addHandler(h)
|
|
||||||
|
|
||||||
# Make a new RotatingFileHandler for the access log.
|
|
||||||
h = RotatingFileHandler(access_file, 'a', maxBytes, backupCount)
|
|
||||||
h.setLevel(logging.DEBUG)
|
|
||||||
h.setFormatter(cherrypy._cplogging.logfmt)
|
|
||||||
log.access_log.addHandler(h)
|
|
||||||
|
|
||||||
def start_cherrypy(self):
|
|
||||||
try:
|
|
||||||
cherrypy.engine.start()
|
|
||||||
except:
|
|
||||||
ip = get_external_ip()
|
|
||||||
if not ip or ip.startswith('127.'):
|
|
||||||
raise
|
|
||||||
cherrypy.log('Trying to bind to single interface: '+ip)
|
|
||||||
# Change the host we listen on
|
|
||||||
cherrypy.config.update({'server.socket_host' : ip})
|
|
||||||
# This ensures that the change is actually applied
|
|
||||||
cherrypy.server.socket_host = ip
|
|
||||||
cherrypy.server.httpserver = cherrypy.server.instance = None
|
|
||||||
|
|
||||||
cherrypy.engine.start()
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
self.is_running = False
|
|
||||||
self.exception = None
|
|
||||||
cherrypy.tree.mount(root=None, config=self.config)
|
|
||||||
try:
|
|
||||||
self.start_cherrypy()
|
|
||||||
except Exception as e:
|
|
||||||
self.exception = e
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
if callable(self.start_failure_callback):
|
|
||||||
try:
|
|
||||||
self.start_failure_callback(as_unicode(e))
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.is_running = True
|
|
||||||
self.notify_listener()
|
|
||||||
cherrypy.engine.block()
|
|
||||||
except Exception as e:
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
self.exception = e
|
|
||||||
finally:
|
|
||||||
self.is_running = False
|
|
||||||
self.notify_listener()
|
|
||||||
|
|
||||||
def notify_listener(self):
|
|
||||||
try:
|
|
||||||
if callable(self.state_callback):
|
|
||||||
self.state_callback(self.is_running)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def exit(self):
|
|
||||||
try:
|
|
||||||
cherrypy.engine.exit()
|
|
||||||
finally:
|
|
||||||
cherrypy.server.httpserver = None
|
|
||||||
self.is_running = False
|
|
||||||
self.notify_listener()
|
|
||||||
|
|
||||||
def threaded_exit(self):
|
|
||||||
from threading import Thread
|
|
||||||
t = Thread(target=self.exit)
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
def search_for_books(self, query):
|
|
||||||
return self.db.search_getting_ids(
|
|
||||||
(query or '').strip(), self.search_restriction,
|
|
||||||
sort_results=False, use_virtual_library=False)
|
|
@ -1,983 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import operator, os, json, re, time
|
|
||||||
from binascii import hexlify, unhexlify
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
|
|
||||||
from calibre.constants import filesystem_encoding, config_dir
|
|
||||||
from calibre import (isbytestring, force_unicode, prepare_string_for_xml, sanitize_file_name2)
|
|
||||||
from calibre.utils.filenames import ascii_filename
|
|
||||||
from calibre.utils.config import prefs, JSONConfig
|
|
||||||
from calibre.utils.icu import sort_key
|
|
||||||
from calibre.utils.img import scale_image
|
|
||||||
from calibre.library.comments import comments_to_html
|
|
||||||
from calibre.library.server import custom_fields_to_display
|
|
||||||
from calibre.library.field_metadata import category_icon_map
|
|
||||||
from calibre.library.server.utils import quote, unquote
|
|
||||||
from calibre.db.categories import Tag
|
|
||||||
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
|
|
||||||
|
|
||||||
|
|
||||||
def xml(*args, **kwargs):
|
|
||||||
ans = prepare_string_for_xml(*args, **kwargs)
|
|
||||||
return ans.replace(''', ''')
|
|
||||||
|
|
||||||
|
|
||||||
def render_book_list(ids, prefix, suffix=''): # {{{
|
|
||||||
pages = []
|
|
||||||
num = len(ids)
|
|
||||||
pos = 0
|
|
||||||
delta = 25
|
|
||||||
while ids:
|
|
||||||
page = list(ids[:delta])
|
|
||||||
pages.append((page, pos))
|
|
||||||
ids = ids[delta:]
|
|
||||||
pos += len(page)
|
|
||||||
page_template = u'''\
|
|
||||||
<div class="page" id="page{0}">
|
|
||||||
<div class="load_data" title="{1}">
|
|
||||||
<span class="url" title="{prefix}/browse/booklist_page"></span>
|
|
||||||
<span class="start" title="{start}"></span>
|
|
||||||
<span class="end" title="{end}"></span>
|
|
||||||
</div>
|
|
||||||
<div class="loading"><img src="{prefix}/static/loading.gif" /> {2}</div>
|
|
||||||
<div class="loaded"></div>
|
|
||||||
</div>
|
|
||||||
'''
|
|
||||||
pagelist_template = u'''\
|
|
||||||
<div class="pagelist">
|
|
||||||
<ul>
|
|
||||||
{pages}
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
'''
|
|
||||||
rpages, lpages = [], []
|
|
||||||
for i, x in enumerate(pages):
|
|
||||||
pg, pos = x
|
|
||||||
ld = xml(json.dumps(pg), True)
|
|
||||||
start, end = pos+1, pos+len(pg)
|
|
||||||
rpages.append(page_template.format(i, ld,
|
|
||||||
xml(_('Loading, please wait')) + '…',
|
|
||||||
start=start, end=end, prefix=prefix))
|
|
||||||
lpages.append(' '*20 + (u'<li><a href="#" title="Books {start} to {end}"'
|
|
||||||
' onclick="gp_internal(\'{id}\'); return false;"> '
|
|
||||||
'{start} to {end}</a></li>').format(start=start, end=end,
|
|
||||||
id='page%d'%i))
|
|
||||||
rpages = u'\n\n'.join(rpages)
|
|
||||||
lpages = u'\n'.join(lpages)
|
|
||||||
pagelist = pagelist_template.format(pages=lpages)
|
|
||||||
|
|
||||||
templ = u'''\
|
|
||||||
<h3>{0} {suffix}</h3>
|
|
||||||
<div id="booklist">
|
|
||||||
<div id="pagelist" title="{goto}">{pagelist}</div>
|
|
||||||
<div class="listnav topnav">
|
|
||||||
{navbar}
|
|
||||||
</div>
|
|
||||||
{pages}
|
|
||||||
<div class="listnav bottomnav">
|
|
||||||
{navbar}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
'''
|
|
||||||
gp_start = gp_end = ''
|
|
||||||
if len(pages) > 1:
|
|
||||||
gp_start = '<a href="#" onclick="goto_page(); return false;" title="%s">' % \
|
|
||||||
(_('Go to') + '…')
|
|
||||||
gp_end = '</a>'
|
|
||||||
navbar = u'''\
|
|
||||||
<div class="navleft">
|
|
||||||
<a href="#" onclick="first_page(); return false;">{first}</a>
|
|
||||||
<a href="#" onclick="previous_page(); return false;">{previous}</a>
|
|
||||||
</div>
|
|
||||||
<div class="navmiddle">
|
|
||||||
{gp_start}
|
|
||||||
<span class="start">0</span> to <span class="end">0</span>
|
|
||||||
{gp_end}of {num}
|
|
||||||
</div>
|
|
||||||
<div class="navright">
|
|
||||||
<a href="#" onclick="next_page(); return false;">{next}</a>
|
|
||||||
<a href="#" onclick="last_page(); return false;">{last}</a>
|
|
||||||
</div>
|
|
||||||
'''.format(first=_('First'), last=_('Last'), previous=_('Previous'),
|
|
||||||
next=_('Next'), num=num, gp_start=gp_start, gp_end=gp_end)
|
|
||||||
|
|
||||||
return templ.format(_('Browsing %d books')%num, suffix=suffix,
|
|
||||||
pages=rpages, navbar=navbar, pagelist=pagelist,
|
|
||||||
goto=xml(_('Go to'), True) + '…')
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
def utf8(x): # {{{
|
|
||||||
if isinstance(x, unicode):
|
|
||||||
x = x.encode('utf-8')
|
|
||||||
return x
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
def render_rating(rating, url_prefix, container='span', prefix=None): # {{{
|
|
||||||
if rating < 0.1:
|
|
||||||
return '', ''
|
|
||||||
added = 0
|
|
||||||
if prefix is None:
|
|
||||||
prefix = _('Average rating')
|
|
||||||
rstring = xml(_('%(prefix)s: %(rating).1f stars')%dict(
|
|
||||||
prefix=prefix, rating=rating if rating else 0.0),
|
|
||||||
True)
|
|
||||||
ans = ['<%s class="rating">' % (container)]
|
|
||||||
for i in range(5):
|
|
||||||
n = rating - added
|
|
||||||
x = 'half'
|
|
||||||
if n <= 0.1:
|
|
||||||
x = 'off'
|
|
||||||
elif n >= 0.9:
|
|
||||||
x = 'on'
|
|
||||||
ans.append(
|
|
||||||
u'<img alt="{0}" title="{0}" src="{2}/static/star-{1}.png" />'.format(
|
|
||||||
rstring, x, url_prefix))
|
|
||||||
added += 1
|
|
||||||
ans.append('</%s>'%container)
|
|
||||||
return u''.join(ans), rstring
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
def get_category_items(category, items, datatype, prefix): # {{{
|
|
||||||
|
|
||||||
def item(i):
|
|
||||||
templ = (u'<div title="{4}" class="category-item">'
|
|
||||||
'<div class="category-name">'
|
|
||||||
'<a href="{5}{3}" title="{4}">{0}</a></div>'
|
|
||||||
'<div>{1}</div>'
|
|
||||||
'<div>{2}</div></div>')
|
|
||||||
rating, rstring = render_rating(i.avg_rating, prefix)
|
|
||||||
orig_name = i.sort if i.use_sort_as_name else i.name
|
|
||||||
name = xml(orig_name)
|
|
||||||
if datatype == 'rating':
|
|
||||||
name = xml(_('%d stars')%int(i.avg_rating))
|
|
||||||
id_ = i.id
|
|
||||||
if id_ is None:
|
|
||||||
id_ = hexlify(force_unicode(orig_name).encode('utf-8'))
|
|
||||||
id_ = xml(str(id_))
|
|
||||||
desc = ''
|
|
||||||
if i.count > 0:
|
|
||||||
desc += '[' + _('%d books')%i.count + ']'
|
|
||||||
q = i.category
|
|
||||||
if not q:
|
|
||||||
q = category
|
|
||||||
href = '/browse/matches/%s/%s'%(quote(q), quote(id_))
|
|
||||||
return templ.format(xml(name), rating,
|
|
||||||
xml(desc), xml(href, True), rstring, prefix)
|
|
||||||
|
|
||||||
items = list(map(item, items))
|
|
||||||
return '\n'.join(['<div class="category-container">'] + items + ['</div>'])
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
class Endpoint(object): # {{{
|
|
||||||
'Manage encoding, mime-type, last modified, cookies, etc.'
|
|
||||||
|
|
||||||
def __init__(self, mimetype='text/html; charset=utf-8', sort_type='category'):
|
|
||||||
self.mimetype = mimetype
|
|
||||||
self.sort_type = sort_type
|
|
||||||
self.sort_kwarg = sort_type + '_sort'
|
|
||||||
self.sort_cookie_name = 'calibre_browse_server_sort_'+self.sort_type
|
|
||||||
|
|
||||||
def __call__(eself, func):
|
|
||||||
|
|
||||||
def do(self, *args, **kwargs):
|
|
||||||
if 'json' not in eself.mimetype:
|
|
||||||
sort_val = None
|
|
||||||
cookie = cherrypy.request.cookie
|
|
||||||
if eself.sort_cookie_name in cookie:
|
|
||||||
sort_val = cookie[eself.sort_cookie_name].value
|
|
||||||
kwargs[eself.sort_kwarg] = sort_val
|
|
||||||
|
|
||||||
# Remove AJAX caching disabling jquery workaround arg
|
|
||||||
kwargs.pop('_', None)
|
|
||||||
|
|
||||||
ans = func(self, *args, **kwargs)
|
|
||||||
cherrypy.response.headers['Content-Type'] = eself.mimetype
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
cherrypy.response.headers['Last-Modified'] = \
|
|
||||||
self.last_modified(max(updated, self.build_time))
|
|
||||||
ans = utf8(ans)
|
|
||||||
return ans
|
|
||||||
|
|
||||||
do.__name__ = func.__name__
|
|
||||||
|
|
||||||
return do
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
class BrowseServer(object):
|
|
||||||
|
|
||||||
def add_routes(self, connect):
|
|
||||||
base_href = '/browse'
|
|
||||||
connect('browse', base_href, self.browse_catalog)
|
|
||||||
connect('browse_catalog', base_href+'/category/{category}',
|
|
||||||
self.browse_catalog)
|
|
||||||
connect('browse_category_group',
|
|
||||||
base_href+'/category_group/{category}/{group}',
|
|
||||||
self.browse_category_group)
|
|
||||||
connect('browse_matches',
|
|
||||||
base_href+'/matches/{category}/{cid}',
|
|
||||||
self.browse_matches)
|
|
||||||
connect('browse_booklist_page',
|
|
||||||
base_href+'/booklist_page',
|
|
||||||
self.browse_booklist_page)
|
|
||||||
connect('browse_search', base_href+'/search',
|
|
||||||
self.browse_search)
|
|
||||||
connect('browse_details', base_href+'/details/{id}',
|
|
||||||
self.browse_details)
|
|
||||||
connect('browse_book', base_href+'/book/{id}',
|
|
||||||
self.browse_book)
|
|
||||||
connect('browse_random', base_href+'/random',
|
|
||||||
self.browse_random)
|
|
||||||
connect('browse_category_icon', base_href+'/icon/{name}',
|
|
||||||
self.browse_icon)
|
|
||||||
|
|
||||||
self.icon_map = JSONConfig('gui').get('tags_browser_category_icons', {})
|
|
||||||
|
|
||||||
# Templates {{{
|
|
||||||
def browse_template(self, sort, category=True, initial_search=''):
|
|
||||||
|
|
||||||
if not hasattr(self, '__browse_template__') or \
|
|
||||||
self.opts.develop:
|
|
||||||
self.__browse_template__ = \
|
|
||||||
P('content_server/browse/browse.html', data=True).decode('utf-8')
|
|
||||||
|
|
||||||
ans = self.__browse_template__
|
|
||||||
scn = 'calibre_browse_server_sort_'
|
|
||||||
|
|
||||||
if category:
|
|
||||||
sort_opts = [('rating', _('Average rating')), ('name',
|
|
||||||
_('Name')), ('popularity', _('Popularity'))]
|
|
||||||
scn += 'category'
|
|
||||||
else:
|
|
||||||
scn += 'list'
|
|
||||||
fm = self.db.field_metadata
|
|
||||||
sort_opts, added = [], set([])
|
|
||||||
displayed_custom_fields = custom_fields_to_display(self.db)
|
|
||||||
for x in fm.sortable_field_keys():
|
|
||||||
if x in ('ondevice', 'formats', 'sort'):
|
|
||||||
continue
|
|
||||||
if fm.is_ignorable_field(x) and x not in displayed_custom_fields:
|
|
||||||
continue
|
|
||||||
if x == 'comments' or fm[x]['datatype'] == 'comments':
|
|
||||||
continue
|
|
||||||
n = fm[x]['name']
|
|
||||||
if n not in added:
|
|
||||||
added.add(n)
|
|
||||||
sort_opts.append((x, n))
|
|
||||||
|
|
||||||
ans = ans.replace('{sort_select_label}', xml(_('Sort by')+':'))
|
|
||||||
ans = ans.replace('{sort_cookie_name}', scn)
|
|
||||||
ans = ans.replace('{prefix}', self.opts.url_prefix)
|
|
||||||
ans = ans.replace('{library}', _('library'))
|
|
||||||
ans = ans.replace('{home}', _('home'))
|
|
||||||
ans = ans.replace('{Search}', _('Search'))
|
|
||||||
opts = ['<option %svalue="%s">%s</option>' % (
|
|
||||||
'selected="selected" ' if k==sort else '',
|
|
||||||
xml(k), xml(nl), ) for k, nl in
|
|
||||||
sorted(sort_opts, key=lambda x: sort_key(operator.itemgetter(1)(x))) if k and nl]
|
|
||||||
ans = ans.replace('{sort_select_options}', ('\n'+' '*20).join(opts))
|
|
||||||
lp = self.db.library_path
|
|
||||||
if isbytestring(lp):
|
|
||||||
lp = force_unicode(lp, filesystem_encoding)
|
|
||||||
ans = ans.replace('{library_name}', xml(os.path.basename(lp)))
|
|
||||||
ans = ans.replace('{library_path}', xml(lp, True))
|
|
||||||
ans = ans.replace('{initial_search}', xml(initial_search, attribute=True))
|
|
||||||
return ans
|
|
||||||
|
|
||||||
@property
|
|
||||||
def browse_summary_template(self):
|
|
||||||
if not hasattr(self, '__browse_summary_template__') or \
|
|
||||||
self.opts.develop:
|
|
||||||
self.__browse_summary_template__ = \
|
|
||||||
P('content_server/browse/summary.html', data=True).decode('utf-8')
|
|
||||||
return self.__browse_summary_template__.replace('{prefix}',
|
|
||||||
self.opts.url_prefix)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def browse_details_template(self):
|
|
||||||
if not hasattr(self, '__browse_details_template__') or \
|
|
||||||
self.opts.develop:
|
|
||||||
self.__browse_details_template__ = \
|
|
||||||
P('content_server/browse/details.html', data=True).decode('utf-8')
|
|
||||||
return self.__browse_details_template__.replace('{prefix}',
|
|
||||||
self.opts.url_prefix)
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# Catalogs {{{
|
|
||||||
def browse_icon(self, name='blank.png'):
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'image/png'
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(self.build_time)
|
|
||||||
|
|
||||||
if not hasattr(self, '__browse_icon_cache__'):
|
|
||||||
self.__browse_icon_cache__ = {}
|
|
||||||
if name not in self.__browse_icon_cache__:
|
|
||||||
if name.startswith('_'):
|
|
||||||
name = sanitize_file_name2(name[1:])
|
|
||||||
try:
|
|
||||||
with open(os.path.join(config_dir, 'tb_icons', name), 'rb') as f:
|
|
||||||
data = f.read()
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'no icon named: %r'%name)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
data = I(name, data=True)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'no icon named: %r'%name)
|
|
||||||
self.__browse_icon_cache__[name] = scale_image(data, 48, 48, as_png=True)[-1]
|
|
||||||
return self.__browse_icon_cache__[name]
|
|
||||||
|
|
||||||
def browse_toplevel(self):
|
|
||||||
categories = self.categories_cache()
|
|
||||||
category_meta = self.db.field_metadata
|
|
||||||
cats = [
|
|
||||||
(_('Newest'), 'newest', 'forward.png'),
|
|
||||||
(_('All books'), 'allbooks', 'book.png'),
|
|
||||||
(_('Random book'), 'randombook', 'random.png'),
|
|
||||||
]
|
|
||||||
virt_libs = self.db.prefs.get('virtual_libraries', {})
|
|
||||||
if virt_libs:
|
|
||||||
cats.append((_('Virtual Libs.'), 'virt_libs', 'lt.png'))
|
|
||||||
|
|
||||||
def getter(x):
|
|
||||||
try:
|
|
||||||
return category_meta[x]['name'].lower()
|
|
||||||
except KeyError:
|
|
||||||
return x
|
|
||||||
|
|
||||||
displayed_custom_fields = custom_fields_to_display(self.db)
|
|
||||||
uc_displayed = set()
|
|
||||||
for category in sorted(categories, key=lambda x: sort_key(getter(x))):
|
|
||||||
if len(categories[category]) == 0:
|
|
||||||
continue
|
|
||||||
if category in ('formats', 'identifiers'):
|
|
||||||
continue
|
|
||||||
meta = category_meta.get(category, None)
|
|
||||||
if meta is None:
|
|
||||||
continue
|
|
||||||
if self.db.field_metadata.is_ignorable_field(category) and \
|
|
||||||
category not in displayed_custom_fields:
|
|
||||||
continue
|
|
||||||
# get the icon files
|
|
||||||
main_cat = (category.partition('.')[0]) if hasattr(category,
|
|
||||||
'partition') else category
|
|
||||||
if main_cat in self.icon_map:
|
|
||||||
icon = '_'+quote(self.icon_map[main_cat])
|
|
||||||
elif category in category_icon_map:
|
|
||||||
icon = category_icon_map[category]
|
|
||||||
elif meta['is_custom']:
|
|
||||||
icon = category_icon_map['custom:']
|
|
||||||
elif meta['kind'] == 'user':
|
|
||||||
icon = category_icon_map['user:']
|
|
||||||
else:
|
|
||||||
icon = 'blank.png'
|
|
||||||
|
|
||||||
if meta['kind'] == 'user':
|
|
||||||
dot = category.find('.')
|
|
||||||
if dot > 0:
|
|
||||||
cat = category[:dot]
|
|
||||||
if cat not in uc_displayed:
|
|
||||||
cats.append((meta['name'][:dot-1], cat, icon))
|
|
||||||
uc_displayed.add(cat)
|
|
||||||
else:
|
|
||||||
cats.append((meta['name'], category, icon))
|
|
||||||
uc_displayed.add(category)
|
|
||||||
else:
|
|
||||||
cats.append((meta['name'], category, icon))
|
|
||||||
|
|
||||||
cats = [(u'<li><a title="{2} {0}" href="{3}/browse/category/{1}"> </a>'
|
|
||||||
u'<img src="{3}{src}" alt="{0}" />'
|
|
||||||
u'<span class="label">{0}</span>'
|
|
||||||
u'</li>')
|
|
||||||
.format(xml(x, True), xml(quote(y)), xml(_('Browse books by')),
|
|
||||||
self.opts.url_prefix, src='/browse/icon/'+z)
|
|
||||||
for x, y, z in cats]
|
|
||||||
|
|
||||||
main = u'<div class="toplevel"><h3>{0}</h3><ul>{1}</ul></div>'\
|
|
||||||
.format(_('Choose a category to browse by:'), u'\n\n'.join(cats))
|
|
||||||
return self.browse_template('name').format(title='',
|
|
||||||
script='toplevel();', main=main)
|
|
||||||
|
|
||||||
def browse_sort_categories(self, items, sort):
|
|
||||||
if sort not in ('rating', 'name', 'popularity'):
|
|
||||||
sort = 'name'
|
|
||||||
items.sort(key=lambda x: sort_key(getattr(x, 'sort', x.name)))
|
|
||||||
if sort == 'popularity':
|
|
||||||
items.sort(key=operator.attrgetter('count'), reverse=True)
|
|
||||||
elif sort == 'rating':
|
|
||||||
items.sort(key=operator.attrgetter('avg_rating'), reverse=True)
|
|
||||||
return sort
|
|
||||||
|
|
||||||
def browse_category(self, category, sort):
|
|
||||||
categories = self.categories_cache()
|
|
||||||
categories['virt_libs'] = {}
|
|
||||||
if category not in categories:
|
|
||||||
raise cherrypy.HTTPError(404, 'category not found')
|
|
||||||
category_meta = self.db.field_metadata
|
|
||||||
category_name = _('Virtual Libraries') if category == 'virt_libs' else category_meta[category]['name']
|
|
||||||
datatype = 'text' if category == 'virt_libs' else category_meta[category]['datatype']
|
|
||||||
|
|
||||||
# See if we have any sub-categories to display. As we find them, add
|
|
||||||
# them to the displayed set to avoid showing the same item twice
|
|
||||||
uc_displayed = set()
|
|
||||||
cats = []
|
|
||||||
for ucat in sorted(categories.keys(), key=sort_key):
|
|
||||||
if len(categories[ucat]) == 0:
|
|
||||||
continue
|
|
||||||
if category == 'formats':
|
|
||||||
continue
|
|
||||||
meta = category_meta.get(ucat, None)
|
|
||||||
if meta is None:
|
|
||||||
continue
|
|
||||||
if meta['kind'] != 'user':
|
|
||||||
continue
|
|
||||||
cat_len = len(category)
|
|
||||||
if not (len(ucat) > cat_len and ucat.startswith(category+'.')):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if ucat in self.icon_map:
|
|
||||||
icon = '_'+quote(self.icon_map[ucat])
|
|
||||||
else:
|
|
||||||
icon = category_icon_map['user:']
|
|
||||||
# we have a subcategory. Find any further dots (further subcats)
|
|
||||||
cat_len += 1
|
|
||||||
cat = ucat[cat_len:]
|
|
||||||
dot = cat.find('.')
|
|
||||||
if dot > 0:
|
|
||||||
# More subcats
|
|
||||||
cat = cat[:dot]
|
|
||||||
if cat not in uc_displayed:
|
|
||||||
cats.append((cat, ucat[:cat_len+dot], icon))
|
|
||||||
uc_displayed.add(cat)
|
|
||||||
else:
|
|
||||||
# This is the end of the chain
|
|
||||||
cats.append((cat, ucat, icon))
|
|
||||||
uc_displayed.add(cat)
|
|
||||||
|
|
||||||
cats = u'\n\n'.join(
|
|
||||||
[(u'<li><a title="{2} {0}" href="{3}/browse/category/{1}"> </a>'
|
|
||||||
u'<img src="{3}{src}" alt="{0}" />'
|
|
||||||
u'<span class="label">{0}</span>'
|
|
||||||
u'</li>')
|
|
||||||
.format(xml(x, True), xml(quote(y)), xml(_('Browse books by')),
|
|
||||||
self.opts.url_prefix, src='/browse/icon/'+z)
|
|
||||||
for x, y, z in cats])
|
|
||||||
if cats:
|
|
||||||
cats = (u'\n<div class="toplevel">\n'
|
|
||||||
'{0}</div>').format(cats)
|
|
||||||
script = 'toplevel();'
|
|
||||||
else:
|
|
||||||
script = 'true'
|
|
||||||
|
|
||||||
# Now do the category items
|
|
||||||
vls = self.db.prefs.get('virtual_libraries', {})
|
|
||||||
categories['virt_libs'] = sorted([Tag(k) for k, v in vls.iteritems()], key=lambda x:sort_key(x.name))
|
|
||||||
items = categories[category]
|
|
||||||
|
|
||||||
sort = self.browse_sort_categories(items, sort)
|
|
||||||
|
|
||||||
if not cats and len(items) == 1:
|
|
||||||
# Only one item in category, go directly to book list
|
|
||||||
html = get_category_items(category, items,
|
|
||||||
datatype, self.opts.url_prefix)
|
|
||||||
href = re.search(r'<a href="([^"]+)"', html)
|
|
||||||
if href is not None:
|
|
||||||
# cherrypy does not auto unquote params when using
|
|
||||||
# InternalRedirect
|
|
||||||
raise cherrypy.InternalRedirect(unquote(href.group(1)))
|
|
||||||
|
|
||||||
if len(items) <= self.opts.max_opds_ungrouped_items:
|
|
||||||
script = 'false'
|
|
||||||
items = get_category_items(category, items,
|
|
||||||
datatype, self.opts.url_prefix)
|
|
||||||
else:
|
|
||||||
getter = lambda x: unicode(getattr(x, 'sort', None) or x.name)
|
|
||||||
starts = set([])
|
|
||||||
for x in items:
|
|
||||||
val = getter(x)
|
|
||||||
if not val:
|
|
||||||
val = u'A'
|
|
||||||
starts.add(val[0].upper())
|
|
||||||
category_groups = OrderedDict()
|
|
||||||
for x in sorted(starts):
|
|
||||||
category_groups[x] = len([y for y in items if
|
|
||||||
getter(y).upper().startswith(x)])
|
|
||||||
items = [(u'<h3 title="{0}"><a class="load_href" title="{0}"'
|
|
||||||
u' href="{4}{3}"><strong>{0}</strong> [{2}]</a></h3><div>'
|
|
||||||
u'<div class="loaded" style="display:none"></div>'
|
|
||||||
u'<div class="loading"><img alt="{1}" src="{4}/static/loading.gif" /><em>{1}</em></div>'
|
|
||||||
u'</div>').format(
|
|
||||||
xml(s, True),
|
|
||||||
xml(_('Loading, please wait'))+'…',
|
|
||||||
unicode(c),
|
|
||||||
xml(u'/browse/category_group/%s/%s'%(
|
|
||||||
hexlify(category.encode('utf-8')),
|
|
||||||
hexlify(s.encode('utf-8'))), True),
|
|
||||||
self.opts.url_prefix)
|
|
||||||
for s, c in category_groups.items()]
|
|
||||||
items = '\n\n'.join(items)
|
|
||||||
items = u'<div id="groups">\n{0}</div>'.format(items)
|
|
||||||
|
|
||||||
if cats:
|
|
||||||
script = 'toplevel();category(%s);'%script
|
|
||||||
else:
|
|
||||||
script = 'category(%s);'%script
|
|
||||||
|
|
||||||
main = u'''
|
|
||||||
<div class="category">
|
|
||||||
<h3>{0}</h3>
|
|
||||||
<a class="navlink" href="{3}/browse"
|
|
||||||
title="{2}">{2} ↑</a>
|
|
||||||
{1}
|
|
||||||
</div>
|
|
||||||
'''.format(
|
|
||||||
xml(_('Browsing by')+': ' + category_name), cats + items,
|
|
||||||
xml(_('Up'), True), self.opts.url_prefix)
|
|
||||||
|
|
||||||
return self.browse_template(sort).format(title=category_name,
|
|
||||||
script=script, main=main)
|
|
||||||
|
|
||||||
@Endpoint(mimetype='application/json; charset=utf-8')
|
|
||||||
def browse_category_group(self, category=None, group=None, sort=None):
|
|
||||||
if sort == 'null':
|
|
||||||
sort = None
|
|
||||||
if sort not in ('rating', 'name', 'popularity'):
|
|
||||||
sort = 'name'
|
|
||||||
try:
|
|
||||||
category = unhexlify(category)
|
|
||||||
if isbytestring(category):
|
|
||||||
category = category.decode('utf-8')
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'invalid category')
|
|
||||||
|
|
||||||
categories = self.categories_cache()
|
|
||||||
if category not in categories:
|
|
||||||
raise cherrypy.HTTPError(404, 'category not found')
|
|
||||||
|
|
||||||
category_meta = self.db.field_metadata
|
|
||||||
try:
|
|
||||||
datatype = category_meta[category]['datatype']
|
|
||||||
except KeyError:
|
|
||||||
datatype = 'text'
|
|
||||||
|
|
||||||
try:
|
|
||||||
group = unhexlify(group)
|
|
||||||
if isbytestring(group):
|
|
||||||
group = group.decode('utf-8')
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'invalid group')
|
|
||||||
|
|
||||||
items = categories[category]
|
|
||||||
entries = []
|
|
||||||
getter = lambda x: unicode(getattr(x, 'sort', None) or x.name)
|
|
||||||
for x in items:
|
|
||||||
val = getter(x)
|
|
||||||
if not val:
|
|
||||||
val = u'A'
|
|
||||||
if val.upper().startswith(group):
|
|
||||||
entries.append(x)
|
|
||||||
|
|
||||||
sort = self.browse_sort_categories(entries, sort)
|
|
||||||
entries = get_category_items(category, entries,
|
|
||||||
datatype, self.opts.url_prefix)
|
|
||||||
return json.dumps(entries, ensure_ascii=True)
|
|
||||||
|
|
||||||
@Endpoint()
|
|
||||||
def browse_catalog(self, category=None, category_sort=None):
|
|
||||||
'Entry point for top-level, categories and sub-categories'
|
|
||||||
prefix = '' if self.is_wsgi else self.opts.url_prefix
|
|
||||||
if category is None:
|
|
||||||
ans = self.browse_toplevel()
|
|
||||||
# The following are fake categories used for the top-level view
|
|
||||||
elif category == 'newest':
|
|
||||||
raise cherrypy.InternalRedirect(prefix +
|
|
||||||
'/browse/matches/newest/dummy')
|
|
||||||
elif category == 'allbooks':
|
|
||||||
raise cherrypy.InternalRedirect(prefix +
|
|
||||||
'/browse/matches/allbooks/dummy')
|
|
||||||
elif category == 'randombook':
|
|
||||||
raise cherrypy.InternalRedirect(prefix +
|
|
||||||
'/browse/random')
|
|
||||||
else:
|
|
||||||
ans = self.browse_category(category, category_sort)
|
|
||||||
|
|
||||||
return ans
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# Book Lists {{{
|
|
||||||
|
|
||||||
def browse_sort_book_list(self, items, sort):
|
|
||||||
fm = self.db.field_metadata
|
|
||||||
keys = frozenset(fm.sortable_field_keys())
|
|
||||||
if sort not in keys:
|
|
||||||
sort = 'title'
|
|
||||||
self.sort(items, 'title', True)
|
|
||||||
if sort != 'title':
|
|
||||||
ascending = fm[sort]['datatype'] not in ('rating', 'datetime',
|
|
||||||
'series')
|
|
||||||
self.sort(items, sort, ascending)
|
|
||||||
return sort
|
|
||||||
|
|
||||||
@Endpoint(sort_type='list')
|
|
||||||
def browse_matches(self, category=None, cid=None, list_sort=None):
|
|
||||||
if list_sort:
|
|
||||||
list_sort = unquote(list_sort)
|
|
||||||
if not cid:
|
|
||||||
raise cherrypy.HTTPError(404, 'invalid category id: %r'%cid)
|
|
||||||
categories = self.categories_cache()
|
|
||||||
|
|
||||||
if category not in categories and \
|
|
||||||
category not in ('newest', 'allbooks', 'virt_libs'):
|
|
||||||
raise cherrypy.HTTPError(404, 'category not found')
|
|
||||||
fm = self.db.field_metadata
|
|
||||||
try:
|
|
||||||
category_name = fm[category]['name']
|
|
||||||
dt = fm[category]['datatype']
|
|
||||||
except:
|
|
||||||
if category not in ('newest', 'allbooks', 'virt_libs'):
|
|
||||||
raise
|
|
||||||
category_name = {
|
|
||||||
'newest' : _('Newest'),
|
|
||||||
'allbooks' : _('All books'),
|
|
||||||
'virt_libs': _('Virtual Libraries'),
|
|
||||||
}[category]
|
|
||||||
dt = None
|
|
||||||
|
|
||||||
hide_sort = 'true' if dt == 'series' else 'false'
|
|
||||||
if category == 'search':
|
|
||||||
which = unhexlify(cid).decode('utf-8')
|
|
||||||
try:
|
|
||||||
ids = self.search_cache('search:"%s"'%which)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Search: %r not understood'%which)
|
|
||||||
else:
|
|
||||||
all_ids = self.search_cache('')
|
|
||||||
if category == 'newest':
|
|
||||||
ids = all_ids
|
|
||||||
hide_sort = 'true'
|
|
||||||
elif category == 'allbooks':
|
|
||||||
ids = all_ids
|
|
||||||
elif category == 'virt_libs':
|
|
||||||
which = unhexlify(cid).decode('utf-8')
|
|
||||||
vls = self.db.prefs.get('virtual_libraries', {})
|
|
||||||
ids = self.search_cache(vls[which])
|
|
||||||
category_name = _('virtual library: ') + xml(which)
|
|
||||||
if not ids:
|
|
||||||
msg = _('The virtual library <b>%s</b> has no books.') % prepare_string_for_xml(which)
|
|
||||||
if self.search_restriction:
|
|
||||||
msg += ' ' + _(
|
|
||||||
'This is probably because you have applied a virtual library'
|
|
||||||
' to the content server in Preferences->Sharing over the net.'
|
|
||||||
' This virtual library is applied globally and combined with'
|
|
||||||
' the current virtual library.')
|
|
||||||
return self.browse_template('name').format(title='',
|
|
||||||
script='', main='<p>%s</p>'%msg)
|
|
||||||
else:
|
|
||||||
if fm.get(category, {'datatype':None})['datatype'] == 'composite':
|
|
||||||
cid = cid.decode('utf-8')
|
|
||||||
q = category
|
|
||||||
if q == 'news':
|
|
||||||
q = 'tags'
|
|
||||||
ids = self.db.get_books_for_category(q, cid)
|
|
||||||
ids = [x for x in ids if x in all_ids]
|
|
||||||
|
|
||||||
items = [self.db.data.tablerow_for_id(x) for x in ids]
|
|
||||||
if category == 'newest':
|
|
||||||
list_sort = 'timestamp'
|
|
||||||
if dt == 'series':
|
|
||||||
list_sort = category
|
|
||||||
sort = self.browse_sort_book_list(items, list_sort)
|
|
||||||
ids = [x[0] for x in items]
|
|
||||||
html = render_book_list(ids, self.opts.url_prefix,
|
|
||||||
suffix=_('in') + ' ' + category_name)
|
|
||||||
|
|
||||||
return self.browse_template(sort, category=False).format(
|
|
||||||
title=_('Books in') + " " +category_name,
|
|
||||||
script='booklist(%s);'%hide_sort, main=html)
|
|
||||||
|
|
||||||
def browse_get_book_args(self, mi, id_, add_category_links=False):
|
|
||||||
fmts = self.db.formats(id_, index_is_id=True)
|
|
||||||
if not fmts:
|
|
||||||
fmts = ''
|
|
||||||
fmts = [x.lower() for x in fmts.split(',') if x]
|
|
||||||
pf = prefs['output_format'].lower()
|
|
||||||
try:
|
|
||||||
fmt = pf if pf in fmts else fmts[0]
|
|
||||||
except:
|
|
||||||
fmt = None
|
|
||||||
args = {'id':id_, 'mi':mi,
|
|
||||||
}
|
|
||||||
ccache = self.categories_cache() if add_category_links else {}
|
|
||||||
ftitle = fauthors = ''
|
|
||||||
for key in mi.all_field_keys():
|
|
||||||
val = mi.format_field(key)[1]
|
|
||||||
if not val:
|
|
||||||
val = ''
|
|
||||||
if key == 'title':
|
|
||||||
ftitle = xml(val, True)
|
|
||||||
elif key == 'authors':
|
|
||||||
fauthors = xml(val, True)
|
|
||||||
if add_category_links:
|
|
||||||
added_key = False
|
|
||||||
fm = mi.metadata_for_field(key)
|
|
||||||
if val and fm and fm['is_category'] and not fm['is_csp'] and\
|
|
||||||
key != 'formats' and fm['datatype'] not in ['rating']:
|
|
||||||
categories = mi.get(key)
|
|
||||||
if isinstance(categories, basestring):
|
|
||||||
categories = [categories]
|
|
||||||
dbtags = []
|
|
||||||
for category in categories:
|
|
||||||
dbtag = None
|
|
||||||
for tag in ccache[key]:
|
|
||||||
if tag.name == category:
|
|
||||||
dbtag = tag
|
|
||||||
break
|
|
||||||
dbtags.append(dbtag)
|
|
||||||
if None not in dbtags:
|
|
||||||
vals = []
|
|
||||||
for tag in dbtags:
|
|
||||||
tval = ('<a title="Browse books by {3}: {0}"'
|
|
||||||
' href="{1}" class="details_category_link">{2}</a>')
|
|
||||||
href='%s/browse/matches/%s/%s' % \
|
|
||||||
(self.opts.url_prefix, quote(tag.category), quote(str(tag.id)))
|
|
||||||
vals.append(tval.format(xml(tag.name, True),
|
|
||||||
xml(href, True),
|
|
||||||
xml(val if len(dbtags) == 1 else tag.name),
|
|
||||||
xml(key, True)))
|
|
||||||
join = ' & ' if key == 'authors' or \
|
|
||||||
(fm['is_custom'] and
|
|
||||||
fm['display'].get('is_names', False)) \
|
|
||||||
else ', '
|
|
||||||
args[key] = join.join(vals)
|
|
||||||
added_key = True
|
|
||||||
if not added_key:
|
|
||||||
args[key] = xml(val, True)
|
|
||||||
else:
|
|
||||||
args[key] = xml(val, True)
|
|
||||||
fname = quote(ascii_filename(ftitle) + ' - ' +
|
|
||||||
ascii_filename(fauthors))
|
|
||||||
return args, fmt, fmts, fname
|
|
||||||
|
|
||||||
@Endpoint(mimetype='application/json; charset=utf-8')
|
|
||||||
def browse_booklist_page(self, ids=None, sort=None):
|
|
||||||
if sort == 'null':
|
|
||||||
sort = None
|
|
||||||
if ids is None:
|
|
||||||
ids = json.dumps('[]')
|
|
||||||
try:
|
|
||||||
ids = json.loads(ids)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'invalid ids')
|
|
||||||
summs = []
|
|
||||||
for id_ in ids:
|
|
||||||
try:
|
|
||||||
id_ = int(id_)
|
|
||||||
mi = self.db.get_metadata(id_, index_is_id=True)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_)
|
|
||||||
args['other_formats'] = ''
|
|
||||||
args['fmt'] = fmt
|
|
||||||
if fmts and fmt:
|
|
||||||
other_fmts = [x for x in fmts if x.lower() != fmt.lower()]
|
|
||||||
if other_fmts:
|
|
||||||
ofmts = [u'<a href="{4}/get/{0}/{1}_{2}.{0}" title="{3}">{3}</a>'
|
|
||||||
.format(f, fname, id_, f.upper(),
|
|
||||||
self.opts.url_prefix) for f in
|
|
||||||
other_fmts]
|
|
||||||
ofmts = ', '.join(ofmts)
|
|
||||||
args['other_formats'] = u'<strong>%s: </strong>' % \
|
|
||||||
_('Other formats') + ofmts
|
|
||||||
|
|
||||||
args['details_href'] = self.opts.url_prefix + '/browse/details/'+str(id_)
|
|
||||||
|
|
||||||
if fmt:
|
|
||||||
href = self.opts.url_prefix + '/get/%s/%s_%d.%s'%(
|
|
||||||
fmt, fname, id_, fmt)
|
|
||||||
rt = xml(_('Read %(title)s in the %(fmt)s format')%
|
|
||||||
{'title':args['title'], 'fmt':fmt.upper()}, True)
|
|
||||||
|
|
||||||
args['get_button'] = \
|
|
||||||
'<a href="%s" class="read" title="%s">%s</a>' % \
|
|
||||||
(xml(href, True), rt, xml(_('Get')))
|
|
||||||
args['get_url'] = xml(href, True)
|
|
||||||
else:
|
|
||||||
args['get_button'] = ''
|
|
||||||
args['get_url'] = 'javascript:alert(\'%s\')' % xml(_(
|
|
||||||
'This book has no available formats to view'), True)
|
|
||||||
args['comments'] = comments_to_html(mi.comments)
|
|
||||||
args['stars'] = ''
|
|
||||||
if mi.rating:
|
|
||||||
args['stars'] = render_rating(mi.rating/2.0,
|
|
||||||
self.opts.url_prefix, prefix=_('Rating'))[0]
|
|
||||||
if args['tags']:
|
|
||||||
args['tags'] = u'<strong>%s: </strong>'%xml(_('Tags')) + \
|
|
||||||
args['tags']
|
|
||||||
if args['series']:
|
|
||||||
args['series'] = args['series']
|
|
||||||
args['details'] = xml(_('Details'), True)
|
|
||||||
args['details_tt'] = xml(_('Show book details'), True)
|
|
||||||
args['permalink'] = xml(_('Permalink'), True)
|
|
||||||
args['permalink_tt'] = xml(_('A permanent link to this book'), True)
|
|
||||||
|
|
||||||
summs.append(self.browse_summary_template.format(**args))
|
|
||||||
|
|
||||||
raw = json.dumps('\n'.join(summs), ensure_ascii=True)
|
|
||||||
return raw
|
|
||||||
|
|
||||||
def browse_render_details(self, id_, add_random_button=False, add_title=False):
|
|
||||||
try:
|
|
||||||
mi = self.db.get_metadata(id_, index_is_id=True)
|
|
||||||
except:
|
|
||||||
return _('This book has been deleted')
|
|
||||||
else:
|
|
||||||
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_,
|
|
||||||
add_category_links=True)
|
|
||||||
args['fmt'] = fmt
|
|
||||||
if fmt:
|
|
||||||
args['get_url'] = xml(self.opts.url_prefix + '/get/%s/%s_%d.%s'%(
|
|
||||||
fmt, fname, id_, fmt), True)
|
|
||||||
else:
|
|
||||||
args['get_url'] = 'javascript:alert(\'%s\')' % xml(_(
|
|
||||||
'This book has no available formats to view'), True)
|
|
||||||
args['formats'] = ''
|
|
||||||
if fmts:
|
|
||||||
ofmts = [u'<a href="{4}/get/{0}/{1}_{2}.{0}" title="{3}">{3}</a>'
|
|
||||||
.format(xfmt, fname, id_, xfmt.upper(),
|
|
||||||
self.opts.url_prefix) for xfmt in fmts]
|
|
||||||
ofmts = ', '.join(ofmts)
|
|
||||||
args['formats'] = ofmts
|
|
||||||
fields, comments = [], []
|
|
||||||
displayed_custom_fields = custom_fields_to_display(self.db)
|
|
||||||
for field, m in list(mi.get_all_standard_metadata(False).items()) + \
|
|
||||||
list(mi.get_all_user_metadata(False).items()):
|
|
||||||
if self.db.field_metadata.is_ignorable_field(field) and \
|
|
||||||
field not in displayed_custom_fields:
|
|
||||||
continue
|
|
||||||
if m['datatype'] == 'comments' or field == 'comments' or (
|
|
||||||
m['datatype'] == 'composite' and
|
|
||||||
m['display'].get('contains_html', False)):
|
|
||||||
val = mi.get(field, '')
|
|
||||||
if val and val.strip():
|
|
||||||
comments.append((m['name'], comments_to_html(val)))
|
|
||||||
continue
|
|
||||||
if field in ('title', 'formats') or not args.get(field, False) \
|
|
||||||
or not m['name']:
|
|
||||||
continue
|
|
||||||
if field == 'identifiers':
|
|
||||||
urls = urls_from_identifiers(mi.get(field, {}))
|
|
||||||
links = [u'<a class="details_category_link" target="_new" href="%s" title="%s:%s">%s</a>' % (url, id_typ, id_val, name)
|
|
||||||
for name, id_typ, id_val, url in urls]
|
|
||||||
links = u', '.join(links)
|
|
||||||
if links:
|
|
||||||
fields.append((field, m['name'], u'<strong>%s: </strong>%s'%(
|
|
||||||
_('Ids'), links)))
|
|
||||||
continue
|
|
||||||
|
|
||||||
if m['datatype'] == 'rating':
|
|
||||||
r = u'<strong>%s: </strong>'%xml(m['name']) + \
|
|
||||||
render_rating(mi.get(field)/2.0, self.opts.url_prefix,
|
|
||||||
prefix=m['name'])[0]
|
|
||||||
else:
|
|
||||||
r = u'<strong>%s: </strong>'%xml(m['name']) + \
|
|
||||||
args[field]
|
|
||||||
fields.append((field, m['name'], r))
|
|
||||||
|
|
||||||
def fsort(x):
|
|
||||||
num = {'authors':0, 'series':1, 'tags':2}.get(x[0], 100)
|
|
||||||
return (num, sort_key(x[-1]))
|
|
||||||
fields.sort(key=fsort)
|
|
||||||
if add_title:
|
|
||||||
fields.insert(0, ('title', 'Title', u'<strong>%s: </strong>%s' % (xml(_('Title')), xml(mi.title))))
|
|
||||||
fields = [u'<div class="field">{0}</div>'.format(f[-1]) for f in
|
|
||||||
fields]
|
|
||||||
fields = u'<div class="fields">%s</div>'%('\n\n'.join(fields))
|
|
||||||
|
|
||||||
comments.sort(key=lambda x: x[0].lower())
|
|
||||||
comments = [(u'<div class="field"><strong>%s: </strong>'
|
|
||||||
u'<div class="comment">%s</div></div>') % (xml(c[0]),
|
|
||||||
c[1]) for c in comments]
|
|
||||||
comments = u'<div class="comments">%s</div>'%('\n\n'.join(comments))
|
|
||||||
random = ''
|
|
||||||
if add_random_button:
|
|
||||||
href = '%s/browse/random?v=%s'%(
|
|
||||||
self.opts.url_prefix, time.time())
|
|
||||||
random = '<a href="%s" id="random_button" title="%s">%s</a>' % (
|
|
||||||
xml(href, True), xml(_('Choose another random book'), True),
|
|
||||||
xml(_('Another random book')))
|
|
||||||
|
|
||||||
return self.browse_details_template.format(
|
|
||||||
id=id_, title=xml(mi.title, True), fields=fields,
|
|
||||||
get_url=args['get_url'], fmt=args['fmt'],
|
|
||||||
formats=args['formats'], comments=comments, random=random)
|
|
||||||
|
|
||||||
@Endpoint(mimetype='application/json; charset=utf-8')
|
|
||||||
def browse_details(self, id=None):
|
|
||||||
try:
|
|
||||||
id_ = int(id)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'invalid id: %r'%id)
|
|
||||||
|
|
||||||
ans = self.browse_render_details(id_)
|
|
||||||
|
|
||||||
return json.dumps(ans, ensure_ascii=True)
|
|
||||||
|
|
||||||
@Endpoint()
|
|
||||||
def browse_random(self, *args, **kwargs):
|
|
||||||
import random
|
|
||||||
try:
|
|
||||||
book_id = random.choice(self.search_for_books(''))
|
|
||||||
except IndexError:
|
|
||||||
raise cherrypy.HTTPError(404, 'This library has no books')
|
|
||||||
ans = self.browse_render_details(book_id, add_random_button=True, add_title=True)
|
|
||||||
return self.browse_template('').format(
|
|
||||||
title=prepare_string_for_xml(self.db.title(book_id, index_is_id=True)), script='book();', main=ans)
|
|
||||||
|
|
||||||
@Endpoint()
|
|
||||||
def browse_book(self, id=None, category_sort=None):
|
|
||||||
try:
|
|
||||||
id_ = int(id)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'invalid id: %r'%id)
|
|
||||||
|
|
||||||
ans = self.browse_render_details(id_, add_title=True)
|
|
||||||
return self.browse_template('').format(
|
|
||||||
title=prepare_string_for_xml(self.db.title(id_, index_is_id=True)), script='book();', main=ans)
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
# Search {{{
|
|
||||||
@Endpoint(sort_type='list')
|
|
||||||
def browse_search(self, query='', list_sort=None):
|
|
||||||
if isbytestring(query):
|
|
||||||
query = query.decode('UTF-8')
|
|
||||||
ids = self.search_for_books(query)
|
|
||||||
items = [self.db.data.tablerow_for_id(x) for x in ids]
|
|
||||||
sort = self.browse_sort_book_list(items, list_sort)
|
|
||||||
ids = [x[0] for x in items]
|
|
||||||
html = render_book_list(ids, self.opts.url_prefix,
|
|
||||||
suffix=_('in search')+': '+xml(query))
|
|
||||||
return self.browse_template(sort, category=False, initial_search=query).format(
|
|
||||||
title=_('Matching books'),
|
|
||||||
script='search_result();', main=html)
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
from calibre.utils.date import utcnow
|
|
||||||
|
|
||||||
|
|
||||||
class Cache(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.reset_caches()
|
|
||||||
|
|
||||||
def reset_caches(self):
|
|
||||||
self._category_cache = OrderedDict()
|
|
||||||
self._search_cache = OrderedDict()
|
|
||||||
|
|
||||||
def search_cache(self, search):
|
|
||||||
old = self._search_cache.pop(search, None)
|
|
||||||
if old is None or old[0] <= self.db.last_modified():
|
|
||||||
matches = self.search_for_books(search) or []
|
|
||||||
self._search_cache[search] = (utcnow(), frozenset(matches))
|
|
||||||
if len(self._search_cache) > 50:
|
|
||||||
self._search_cache.popitem(last=False)
|
|
||||||
else:
|
|
||||||
self._search_cache[search] = old
|
|
||||||
return self._search_cache[search][1]
|
|
||||||
|
|
||||||
def categories_cache(self, restrict_to=frozenset([])):
|
|
||||||
base_restriction = self.search_cache('')
|
|
||||||
if restrict_to:
|
|
||||||
restrict_to = frozenset(restrict_to).intersection(base_restriction)
|
|
||||||
else:
|
|
||||||
restrict_to = base_restriction
|
|
||||||
old = self._category_cache.pop(frozenset(restrict_to), None)
|
|
||||||
if old is None or old[0] <= self.db.last_modified():
|
|
||||||
categories = self.db.get_categories(ids=restrict_to)
|
|
||||||
self._category_cache[restrict_to] = (utcnow(), categories)
|
|
||||||
if len(self._category_cache) > 20:
|
|
||||||
self._category_cache.popitem(last=False)
|
|
||||||
else:
|
|
||||||
self._category_cache[frozenset(restrict_to)] = old
|
|
||||||
return self._category_cache[restrict_to][1]
|
|
@ -1,262 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import re, os, posixpath
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
|
|
||||||
from calibre import guess_type
|
|
||||||
from calibre.utils.date import fromtimestamp, as_utc
|
|
||||||
from calibre.utils.img import save_cover_data_to, scale_image
|
|
||||||
from calibre.library.caches import SortKeyGenerator
|
|
||||||
from calibre.library.save_to_disk import find_plugboard
|
|
||||||
from calibre.ebooks.metadata import authors_to_string
|
|
||||||
from calibre.utils.filenames import ascii_filename
|
|
||||||
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
|
||||||
from calibre.utils.config import tweaks
|
|
||||||
|
|
||||||
plugboard_content_server_value = 'content_server'
|
|
||||||
plugboard_content_server_formats = ['epub', 'mobi', 'azw3']
|
|
||||||
|
|
||||||
|
|
||||||
class CSSortKeyGenerator(SortKeyGenerator):
|
|
||||||
|
|
||||||
def __init__(self, fields, fm, db_prefs):
|
|
||||||
SortKeyGenerator.__init__(self, fields, fm, None, db_prefs)
|
|
||||||
|
|
||||||
def __call__(self, record):
|
|
||||||
return self.itervals(record).next()
|
|
||||||
|
|
||||||
|
|
||||||
class ContentServer(object):
|
|
||||||
|
|
||||||
'''
|
|
||||||
Handles actually serving content files/covers/metadata. Also has
|
|
||||||
a few utility methods.
|
|
||||||
'''
|
|
||||||
|
|
||||||
def add_routes(self, connect):
|
|
||||||
connect('root', '/', self.index)
|
|
||||||
connect('old', '/old', self.old)
|
|
||||||
connect('get', '/get/{what}/{id}', self.get,
|
|
||||||
conditions=dict(method=["GET", "HEAD"]),
|
|
||||||
android_workaround=True)
|
|
||||||
connect('static', '/static/{name:.*?}', self.static,
|
|
||||||
conditions=dict(method=["GET", "HEAD"]))
|
|
||||||
connect('favicon', '/favicon.png', self.favicon,
|
|
||||||
conditions=dict(method=["GET", "HEAD"]))
|
|
||||||
|
|
||||||
# Utility methods {{{
|
|
||||||
def last_modified(self, updated):
|
|
||||||
'''
|
|
||||||
Generates a locale independent, english timestamp from a datetime
|
|
||||||
object
|
|
||||||
'''
|
|
||||||
updated = as_utc(updated)
|
|
||||||
lm = updated.strftime('day, %d month %Y %H:%M:%S GMT')
|
|
||||||
day ={0:'Sun', 1:'Mon', 2:'Tue', 3:'Wed', 4:'Thu', 5:'Fri', 6:'Sat'}
|
|
||||||
lm = lm.replace('day', day[int(updated.strftime('%w'))])
|
|
||||||
month = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul',
|
|
||||||
8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
|
|
||||||
return lm.replace('month', month[updated.month])
|
|
||||||
|
|
||||||
def sort(self, items, field, order):
|
|
||||||
field = self.db.data.sanitize_sort_field_name(field)
|
|
||||||
if field not in self.db.field_metadata.sortable_field_keys():
|
|
||||||
raise cherrypy.HTTPError(400, '%s is not a valid sort field'%field)
|
|
||||||
keyg = CSSortKeyGenerator([(field, order)], self.db.field_metadata,
|
|
||||||
self.db.prefs)
|
|
||||||
items.sort(key=keyg, reverse=not order)
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
def get(self, what, id):
|
|
||||||
'Serves files, covers, thumbnails, metadata from the calibre database'
|
|
||||||
try:
|
|
||||||
id = int(id)
|
|
||||||
except ValueError:
|
|
||||||
id = id.rpartition('.')[0].rpartition('_')[-1]
|
|
||||||
match = re.search(r'\d+', id)
|
|
||||||
if not match:
|
|
||||||
raise cherrypy.HTTPError(404, 'id:%s not an integer'%id)
|
|
||||||
id = int(match.group())
|
|
||||||
if not self.db.has_id(id):
|
|
||||||
raise cherrypy.HTTPError(404, 'id:%d does not exist in database'%id)
|
|
||||||
if what == 'thumb' or what.startswith('thumb_'):
|
|
||||||
try:
|
|
||||||
width, height = map(int, what.split('_')[1:])
|
|
||||||
except:
|
|
||||||
width, height = 60, 80
|
|
||||||
return self.get_cover(id, thumbnail=True, thumb_width=width,
|
|
||||||
thumb_height=height)
|
|
||||||
if what == 'cover':
|
|
||||||
return self.get_cover(id)
|
|
||||||
if what == 'opf':
|
|
||||||
return self.get_metadata_as_opf(id)
|
|
||||||
if what == 'json':
|
|
||||||
raise cherrypy.InternalRedirect('/ajax/book/%d'%id)
|
|
||||||
return self.get_format(id, what)
|
|
||||||
|
|
||||||
def static(self, name):
|
|
||||||
'Serves static content'
|
|
||||||
name = name.lower()
|
|
||||||
fname = posixpath.basename(name)
|
|
||||||
try:
|
|
||||||
cherrypy.response.headers['Content-Type'] = {
|
|
||||||
'js' : 'text/javascript',
|
|
||||||
'css' : 'text/css',
|
|
||||||
'png' : 'image/png',
|
|
||||||
'gif' : 'image/gif',
|
|
||||||
'html' : 'text/html',
|
|
||||||
}[fname.rpartition('.')[-1].lower()]
|
|
||||||
except KeyError:
|
|
||||||
raise cherrypy.HTTPError(404, '%r not a valid resource type'%name)
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(self.build_time)
|
|
||||||
basedir = os.path.abspath(P('content_server'))
|
|
||||||
path = os.path.join(basedir, name.replace('/', os.sep))
|
|
||||||
path = os.path.abspath(path)
|
|
||||||
if not path.startswith(basedir):
|
|
||||||
raise cherrypy.HTTPError(403, 'Access to %s is forbidden'%name)
|
|
||||||
if not os.path.exists(path) or not os.path.isfile(path):
|
|
||||||
raise cherrypy.HTTPError(404, '%s not found'%name)
|
|
||||||
if self.opts.develop:
|
|
||||||
lm = fromtimestamp(os.stat(path).st_mtime)
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(lm)
|
|
||||||
with open(path, 'rb') as f:
|
|
||||||
ans = f.read()
|
|
||||||
if path.endswith('.css'):
|
|
||||||
ans = ans.replace('/static/', self.opts.url_prefix + '/static/')
|
|
||||||
return ans
|
|
||||||
|
|
||||||
def favicon(self):
|
|
||||||
data = I('lt.png', data=True)
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'image/png'
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(
|
|
||||||
self.build_time)
|
|
||||||
return data
|
|
||||||
|
|
||||||
def index(self, **kwargs):
|
|
||||||
'The / URL'
|
|
||||||
ua = cherrypy.request.headers.get('User-Agent', '').strip()
|
|
||||||
want_opds = \
|
|
||||||
cherrypy.request.headers.get('Stanza-Device-Name', 919) != 919 or \
|
|
||||||
cherrypy.request.headers.get('Want-OPDS-Catalog', 919) != 919 or \
|
|
||||||
ua.startswith('Stanza')
|
|
||||||
|
|
||||||
want_mobile = self.is_mobile_browser(ua)
|
|
||||||
if self.opts.develop and not want_mobile:
|
|
||||||
cherrypy.log('User agent: '+ua)
|
|
||||||
|
|
||||||
if want_opds:
|
|
||||||
return self.opds(version=0)
|
|
||||||
|
|
||||||
if want_mobile:
|
|
||||||
return self.mobile()
|
|
||||||
|
|
||||||
return self.browse_catalog()
|
|
||||||
|
|
||||||
def old(self, **kwargs):
|
|
||||||
return self.static('index.html').replace('{prefix}',
|
|
||||||
self.opts.url_prefix)
|
|
||||||
|
|
||||||
# Actually get content from the database {{{
|
|
||||||
def get_cover(self, id, thumbnail=False, thumb_width=60, thumb_height=80):
|
|
||||||
try:
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'image/jpeg'
|
|
||||||
cherrypy.response.timeout = 3600
|
|
||||||
cover = self.db.cover(id, index_is_id=True)
|
|
||||||
if cover is None:
|
|
||||||
cover = self.default_cover
|
|
||||||
updated = self.build_time
|
|
||||||
else:
|
|
||||||
updated = self.db.cover_last_modified(id, index_is_id=True)
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
|
|
||||||
if thumbnail:
|
|
||||||
quality = tweaks['content_server_thumbnail_compression_quality']
|
|
||||||
if quality < 50:
|
|
||||||
quality = 50
|
|
||||||
elif quality > 99:
|
|
||||||
quality = 99
|
|
||||||
return scale_image(cover, thumb_width, thumb_height, compression_quality=quality)[-1]
|
|
||||||
|
|
||||||
return save_cover_data_to(cover, None, minify_to=(self.max_cover_width, self.max_cover_height))
|
|
||||||
except Exception as err:
|
|
||||||
import traceback
|
|
||||||
cherrypy.log.error('Failed to generate cover:')
|
|
||||||
cherrypy.log.error(traceback.print_exc())
|
|
||||||
raise cherrypy.HTTPError(404, 'Failed to generate cover: %r'%err)
|
|
||||||
|
|
||||||
def get_metadata_as_opf(self, id_):
|
|
||||||
cherrypy.response.headers['Content-Type'] = \
|
|
||||||
'application/oebps-package+xml; charset=UTF-8'
|
|
||||||
mi = self.db.get_metadata(id_, index_is_id=True)
|
|
||||||
data = metadata_to_opf(mi)
|
|
||||||
cherrypy.response.timeout = 3600
|
|
||||||
cherrypy.response.headers['Last-Modified'] = \
|
|
||||||
self.last_modified(mi.last_modified)
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
def get_format(self, id, format):
|
|
||||||
format = format.upper()
|
|
||||||
fm = self.db.format_metadata(id, format, allow_cache=False)
|
|
||||||
if not fm:
|
|
||||||
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
|
|
||||||
update_metadata = format in {'MOBI', 'EPUB', 'AZW3'}
|
|
||||||
mi = newmi = self.db.get_metadata(
|
|
||||||
id, index_is_id=True, cover_as_data=True, get_cover=update_metadata)
|
|
||||||
|
|
||||||
cherrypy.response.headers['Last-Modified'] = \
|
|
||||||
self.last_modified(max(fm['mtime'], mi.last_modified))
|
|
||||||
|
|
||||||
fmt = self.db.format(id, format, index_is_id=True, as_file=True,
|
|
||||||
mode='rb')
|
|
||||||
if fmt is None:
|
|
||||||
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
|
|
||||||
mt = guess_type('dummy.'+format.lower())[0]
|
|
||||||
if mt is None:
|
|
||||||
mt = 'application/octet-stream'
|
|
||||||
cherrypy.response.headers['Content-Type'] = mt
|
|
||||||
|
|
||||||
if format.lower() in plugboard_content_server_formats:
|
|
||||||
# Get any plugboards for the content server
|
|
||||||
plugboards = self.db.prefs.get('plugboards', {})
|
|
||||||
cpb = find_plugboard(plugboard_content_server_value,
|
|
||||||
format.lower(), plugboards)
|
|
||||||
if cpb:
|
|
||||||
# Transform the metadata via the plugboard
|
|
||||||
newmi = mi.deepcopy_metadata()
|
|
||||||
newmi.template_to_attribute(mi, cpb)
|
|
||||||
|
|
||||||
if update_metadata:
|
|
||||||
# Write the updated file
|
|
||||||
from calibre.ebooks.metadata.meta import set_metadata
|
|
||||||
set_metadata(fmt, newmi, format.lower())
|
|
||||||
fmt.seek(0)
|
|
||||||
|
|
||||||
fmt.seek(0, 2)
|
|
||||||
cherrypy.response.headers['Content-Length'] = fmt.tell()
|
|
||||||
fmt.seek(0)
|
|
||||||
|
|
||||||
ua = cherrypy.request.headers.get('User-Agent', '').strip()
|
|
||||||
have_kobo_browser = self.is_kobo_browser(ua)
|
|
||||||
file_extension = "kepub.epub" if have_kobo_browser and format.lower() == "kepub" else format
|
|
||||||
|
|
||||||
au = authors_to_string(newmi.authors if newmi.authors else
|
|
||||||
[_('Unknown')])
|
|
||||||
title = newmi.title if newmi.title else _('Unknown')
|
|
||||||
fname = u'%s - %s_%s.%s'%(title[:30], au[:30], id, file_extension.lower())
|
|
||||||
fname = ascii_filename(fname).replace('"', '_')
|
|
||||||
cherrypy.response.headers['Content-Disposition'] = \
|
|
||||||
b'attachment; filename="%s"'%fname
|
|
||||||
cherrypy.response.body = fmt
|
|
||||||
cherrypy.response.timeout = 3600
|
|
||||||
return fmt
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
@ -1,135 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import sys, os
|
|
||||||
from threading import Thread
|
|
||||||
|
|
||||||
from calibre.library.server import server_config as config
|
|
||||||
from calibre.library.server.base import LibraryServer
|
|
||||||
from calibre.constants import iswindows, plugins
|
|
||||||
import cherrypy
|
|
||||||
|
|
||||||
|
|
||||||
def start_threaded_server(db, opts):
|
|
||||||
server = LibraryServer(db, opts, embedded=True, show_tracebacks=False)
|
|
||||||
server.thread = Thread(target=server.start)
|
|
||||||
server.thread.setDaemon(True)
|
|
||||||
server.thread.start()
|
|
||||||
return server
|
|
||||||
|
|
||||||
|
|
||||||
def stop_threaded_server(server):
|
|
||||||
server.exit()
|
|
||||||
server.thread = None
|
|
||||||
|
|
||||||
|
|
||||||
def create_wsgi_app(path_to_library=None, prefix='', virtual_library=None):
|
|
||||||
'WSGI entry point'
|
|
||||||
from calibre.library import db
|
|
||||||
cherrypy.config.update({'environment': 'embedded'})
|
|
||||||
db = db(path_to_library)
|
|
||||||
parser = option_parser()
|
|
||||||
opts, args = parser.parse_args(['calibre-server'])
|
|
||||||
opts.url_prefix = prefix
|
|
||||||
opts.restriction = virtual_library
|
|
||||||
server = LibraryServer(db, opts, wsgi=True, show_tracebacks=True)
|
|
||||||
return cherrypy.Application(server, script_name=None, config=server.config)
|
|
||||||
|
|
||||||
|
|
||||||
def option_parser():
|
|
||||||
parser = config().option_parser('%prog '+ _(
|
|
||||||
'''[options]
|
|
||||||
|
|
||||||
Start the calibre content server. The calibre content server
|
|
||||||
exposes your calibre library over the internet. The default interface
|
|
||||||
allows you to browse you calibre library by categories. You can also
|
|
||||||
access an interface optimized for mobile browsers at /mobile and an
|
|
||||||
OPDS based interface for use with reading applications at /opds.
|
|
||||||
|
|
||||||
The OPDS interface is advertised via BonJour automatically.
|
|
||||||
'''
|
|
||||||
))
|
|
||||||
parser.add_option('--with-library', default=None,
|
|
||||||
help=_('Path to the library folder to serve with the content server'))
|
|
||||||
parser.add_option('--pidfile', default=None,
|
|
||||||
help=_('Write process PID to the specified file'))
|
|
||||||
parser.add_option('--daemonize', default=False, action='store_true',
|
|
||||||
help=_('Run process in background as a daemon. No effect on windows.'))
|
|
||||||
parser.add_option('--restriction', '--virtual-library', default=None,
|
|
||||||
help=_('Specifies a virtual library to be used for this invocation. '
|
|
||||||
'This option overrides any per-library settings specified'
|
|
||||||
' in the GUI. For compatibility, if the value is not a '
|
|
||||||
'virtual library but is a saved search, that saved search is used.'
|
|
||||||
' Also note that if you do not specify a restriction,'
|
|
||||||
' the value specified in the GUI (if any) will be used.'))
|
|
||||||
parser.add_option('--auto-reload', default=False, action='store_true',
|
|
||||||
help=_('Auto reload server when source code changes. May not'
|
|
||||||
' work in all environments.'))
|
|
||||||
return parser
|
|
||||||
|
|
||||||
|
|
||||||
def daemonize():
|
|
||||||
try:
|
|
||||||
pid = os.fork()
|
|
||||||
if pid > 0:
|
|
||||||
# exit first parent
|
|
||||||
sys.exit(0)
|
|
||||||
except OSError as e:
|
|
||||||
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# decouple from parent environment
|
|
||||||
os.chdir("/")
|
|
||||||
os.setsid()
|
|
||||||
os.umask(0)
|
|
||||||
|
|
||||||
# do second fork
|
|
||||||
try:
|
|
||||||
pid = os.fork()
|
|
||||||
if pid > 0:
|
|
||||||
# exit from second parent
|
|
||||||
sys.exit(0)
|
|
||||||
except OSError as e:
|
|
||||||
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Redirect standard file descriptors.
|
|
||||||
try:
|
|
||||||
plugins['speedup'][0].detach(os.devnull)
|
|
||||||
except AttributeError: # people running from source without updated binaries
|
|
||||||
si = os.open(os.devnull, os.O_RDONLY)
|
|
||||||
so = os.open(os.devnull, os.O_WRONLY)
|
|
||||||
se = os.open(os.devnull, os.O_WRONLY)
|
|
||||||
os.dup2(si, sys.stdin.fileno())
|
|
||||||
os.dup2(so, sys.stdout.fileno())
|
|
||||||
os.dup2(se, sys.stderr.fileno())
|
|
||||||
|
|
||||||
|
|
||||||
def main(args=sys.argv):
|
|
||||||
from calibre.db.legacy import LibraryDatabase
|
|
||||||
parser = option_parser()
|
|
||||||
opts, args = parser.parse_args(args)
|
|
||||||
if opts.daemonize and not iswindows:
|
|
||||||
daemonize()
|
|
||||||
if opts.pidfile is not None:
|
|
||||||
from cherrypy.process.plugins import PIDFile
|
|
||||||
PIDFile(cherrypy.engine, opts.pidfile).subscribe()
|
|
||||||
cherrypy.log.screen = True
|
|
||||||
from calibre.utils.config import prefs
|
|
||||||
if opts.with_library is None:
|
|
||||||
opts.with_library = prefs['library_path']
|
|
||||||
if not opts.with_library:
|
|
||||||
print('No saved library path. Use the --with-library option'
|
|
||||||
' to specify the path to the library you want to use.')
|
|
||||||
return 1
|
|
||||||
db = LibraryDatabase(os.path.expanduser(opts.with_library))
|
|
||||||
server = LibraryServer(db, opts, show_tracebacks=opts.develop)
|
|
||||||
server.start()
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
sys.exit(main())
|
|
@ -1,312 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import re, os
|
|
||||||
import __builtin__
|
|
||||||
from urllib import quote, urlencode
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
from lxml import html
|
|
||||||
from lxml.html.builder import HTML, HEAD, TITLE, LINK, DIV, IMG, BODY, \
|
|
||||||
OPTION, SELECT, INPUT, FORM, SPAN, TABLE, TR, TD, A, HR, META
|
|
||||||
|
|
||||||
from calibre.library.server import custom_fields_to_display
|
|
||||||
from calibre.library.server.utils import strftime, format_tag_string
|
|
||||||
from calibre.ebooks.metadata import fmt_sidx
|
|
||||||
from calibre.constants import __appname__
|
|
||||||
from calibre import human_readable, isbytestring
|
|
||||||
from calibre.utils.cleantext import clean_xml_chars
|
|
||||||
from calibre.utils.date import utcfromtimestamp, as_local_time, is_date_undefined
|
|
||||||
from calibre.utils.filenames import ascii_filename
|
|
||||||
from calibre.utils.icu import sort_key
|
|
||||||
|
|
||||||
|
|
||||||
def CLASS(*args, **kwargs): # class is a reserved word in Python
|
|
||||||
kwargs['class'] = ' '.join(args)
|
|
||||||
return kwargs
|
|
||||||
|
|
||||||
|
|
||||||
def build_search_box(num, search, sort, order, prefix): # {{{
|
|
||||||
div = DIV(id='search_box')
|
|
||||||
form = FORM('Show ', method='get', action=prefix+'/mobile')
|
|
||||||
form.set('accept-charset', 'UTF-8')
|
|
||||||
|
|
||||||
div.append(form)
|
|
||||||
|
|
||||||
num_select = SELECT(name='num')
|
|
||||||
for option in (5, 10, 25, 100):
|
|
||||||
kwargs = {'value':str(option)}
|
|
||||||
if option == num:
|
|
||||||
kwargs['SELECTED'] = 'SELECTED'
|
|
||||||
num_select.append(OPTION(str(option), **kwargs))
|
|
||||||
num_select.tail = ' books matching '
|
|
||||||
form.append(num_select)
|
|
||||||
|
|
||||||
searchf = INPUT(name='search', id='s', value=search if search else '')
|
|
||||||
searchf.tail = ' sorted by '
|
|
||||||
form.append(searchf)
|
|
||||||
|
|
||||||
sort_select = SELECT(name='sort')
|
|
||||||
for option in ('date','author','title','rating','size','tags','series'):
|
|
||||||
kwargs = {'value':option}
|
|
||||||
if option == sort:
|
|
||||||
kwargs['SELECTED'] = 'SELECTED'
|
|
||||||
sort_select.append(OPTION(option, **kwargs))
|
|
||||||
form.append(sort_select)
|
|
||||||
|
|
||||||
order_select = SELECT(name='order')
|
|
||||||
for option in ('ascending','descending'):
|
|
||||||
kwargs = {'value':option}
|
|
||||||
if option == order:
|
|
||||||
kwargs['SELECTED'] = 'SELECTED'
|
|
||||||
order_select.append(OPTION(option, **kwargs))
|
|
||||||
form.append(order_select)
|
|
||||||
|
|
||||||
form.append(INPUT(id='go', type='submit', value='Search'))
|
|
||||||
|
|
||||||
return div
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
def build_navigation(start, num, total, url_base): # {{{
|
|
||||||
end = min((start+num-1), total)
|
|
||||||
tagline = SPAN('Books %d to %d of %d'%(start, end, total),
|
|
||||||
style='display: block; text-align: center;')
|
|
||||||
left_buttons = TD(CLASS('button', style='text-align:left'))
|
|
||||||
right_buttons = TD(CLASS('button', style='text-align:right'))
|
|
||||||
|
|
||||||
if start > 1:
|
|
||||||
for t,s in [('First', 1), ('Previous', max(start-num,1))]:
|
|
||||||
left_buttons.append(A(t, href='%s&start=%d'%(url_base, s)))
|
|
||||||
|
|
||||||
if total > start + num:
|
|
||||||
for t,s in [('Next', start+num), ('Last', total-num+1)]:
|
|
||||||
right_buttons.append(A(t, href='%s&start=%d'%(url_base, s)))
|
|
||||||
|
|
||||||
buttons = TABLE(
|
|
||||||
TR(left_buttons, right_buttons),
|
|
||||||
CLASS('buttons'))
|
|
||||||
return DIV(tagline, buttons, CLASS('navigation'))
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
def build_index(books, num, search, sort, order, start, total, url_base, CKEYS,
|
|
||||||
prefix, have_kobo_browser=False):
|
|
||||||
logo = DIV(IMG(src=prefix+'/static/calibre.png', alt=__appname__), id='logo')
|
|
||||||
|
|
||||||
search_box = build_search_box(num, search, sort, order, prefix)
|
|
||||||
navigation = build_navigation(start, num, total, prefix+url_base)
|
|
||||||
navigation2 = build_navigation(start, num, total, prefix+url_base)
|
|
||||||
bookt = TABLE(id='listing')
|
|
||||||
|
|
||||||
body = BODY(
|
|
||||||
logo,
|
|
||||||
search_box,
|
|
||||||
navigation,
|
|
||||||
HR(CLASS('spacer')),
|
|
||||||
bookt,
|
|
||||||
HR(CLASS('spacer')),
|
|
||||||
navigation2
|
|
||||||
)
|
|
||||||
|
|
||||||
# Book list {{{
|
|
||||||
for book in books:
|
|
||||||
thumbnail = TD(
|
|
||||||
IMG(type='image/jpeg', border='0',
|
|
||||||
src=prefix+'/get/thumb/%s' %
|
|
||||||
book['id']),
|
|
||||||
CLASS('thumbnail'))
|
|
||||||
|
|
||||||
data = TD()
|
|
||||||
for fmt in book['formats'].split(','):
|
|
||||||
if not fmt or fmt.lower().startswith('original_'):
|
|
||||||
continue
|
|
||||||
file_extension = "kepub.epub" if have_kobo_browser and fmt.lower() == "kepub" else fmt
|
|
||||||
a = quote(ascii_filename(book['authors']))
|
|
||||||
t = quote(ascii_filename(book['title']))
|
|
||||||
s = SPAN(
|
|
||||||
A(
|
|
||||||
fmt.lower(),
|
|
||||||
href=prefix+'/get/%s/%s-%s_%d.%s' % (fmt, a, t,
|
|
||||||
book['id'], file_extension.lower())
|
|
||||||
),
|
|
||||||
CLASS('button'))
|
|
||||||
s.tail = u''
|
|
||||||
data.append(s)
|
|
||||||
|
|
||||||
div = DIV(CLASS('data-container'))
|
|
||||||
data.append(div)
|
|
||||||
|
|
||||||
series = u'[%s - %s]'%(book['series'], book['series_index']) \
|
|
||||||
if book['series'] else ''
|
|
||||||
tags = u'Tags=[%s]'%book['tags'] if book['tags'] else ''
|
|
||||||
|
|
||||||
ctext = ''
|
|
||||||
for key in CKEYS:
|
|
||||||
val = book.get(key, None)
|
|
||||||
if val:
|
|
||||||
ctext += '%s=[%s] '%tuple(val.split(':#:'))
|
|
||||||
|
|
||||||
first = SPAN(u'\u202f%s %s by %s' % (clean_xml_chars(book['title']), clean_xml_chars(series),
|
|
||||||
clean_xml_chars(book['authors'])), CLASS('first-line'))
|
|
||||||
div.append(first)
|
|
||||||
second = SPAN(u'%s - %s %s %s' % (book['size'],
|
|
||||||
book['timestamp'],
|
|
||||||
tags, ctext), CLASS('second-line'))
|
|
||||||
div.append(second)
|
|
||||||
|
|
||||||
bookt.append(TR(thumbnail, data))
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
body.append(DIV(
|
|
||||||
A(_('Switch to the full interface (non-mobile interface)'),
|
|
||||||
href=prefix+"/browse",
|
|
||||||
style="text-decoration: none; color: blue",
|
|
||||||
title=_('The full interface gives you many more features, '
|
|
||||||
'but it may not work well on a small screen')),
|
|
||||||
style="text-align:center"))
|
|
||||||
return HTML(
|
|
||||||
HEAD(
|
|
||||||
TITLE(__appname__ + ' Library'),
|
|
||||||
LINK(rel='icon', href='//calibre-ebook.com/favicon.ico',
|
|
||||||
type='image/x-icon'),
|
|
||||||
LINK(rel='stylesheet', type='text/css',
|
|
||||||
href=prefix+'/mobile/style.css'),
|
|
||||||
LINK(rel='apple-touch-icon', href="/static/calibre.png"),
|
|
||||||
META(name="robots", content="noindex")
|
|
||||||
), # End head
|
|
||||||
body
|
|
||||||
) # End html
|
|
||||||
|
|
||||||
|
|
||||||
class MobileServer(object):
|
|
||||||
'A view optimized for browsers in mobile devices'
|
|
||||||
|
|
||||||
MOBILE_UA = re.compile('(?i)(?:iPhone|Opera Mini|NetFront|webOS|Mobile|Android|imode|DoCoMo|Minimo|Blackberry|MIDP|Symbian|HD2|Kindle)')
|
|
||||||
|
|
||||||
def is_mobile_browser(self, ua):
|
|
||||||
match = self.MOBILE_UA.search(ua)
|
|
||||||
return match is not None and 'iPad' not in ua
|
|
||||||
|
|
||||||
def is_kobo_browser(self, ua):
|
|
||||||
return 'Kobo Touch' in ua
|
|
||||||
|
|
||||||
def add_routes(self, connect):
|
|
||||||
connect('mobile', '/mobile', self.mobile)
|
|
||||||
connect('mobile_css', '/mobile/style.css', self.mobile_css)
|
|
||||||
|
|
||||||
def mobile_css(self, *args, **kwargs):
|
|
||||||
path = P('content_server/mobile.css')
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'text/css; charset=utf-8'
|
|
||||||
updated = utcfromtimestamp(os.stat(path).st_mtime)
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
with open(path, 'rb') as f:
|
|
||||||
ans = f.read()
|
|
||||||
return ans.replace('{prefix}', self.opts.url_prefix)
|
|
||||||
|
|
||||||
def mobile(self, start='1', num='25', sort='date', search='',
|
|
||||||
_=None, order='descending'):
|
|
||||||
'''
|
|
||||||
Serves metadata from the calibre database as XML.
|
|
||||||
|
|
||||||
:param sort: Sort results by ``sort``. Can be one of `title,author,rating`.
|
|
||||||
:param search: Filter results by ``search`` query. See :class:`SearchQueryParser` for query syntax
|
|
||||||
:param start,num: Return the slice `[start:start+num]` of the sorted and filtered results
|
|
||||||
:param _: Firefox seems to sometimes send this when using XMLHttpRequest with no caching
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
start = int(start)
|
|
||||||
except ValueError:
|
|
||||||
raise cherrypy.HTTPError(400, 'start: %s is not an integer'%start)
|
|
||||||
try:
|
|
||||||
num = int(num)
|
|
||||||
except ValueError:
|
|
||||||
raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num)
|
|
||||||
if not search:
|
|
||||||
search = ''
|
|
||||||
if isbytestring(search):
|
|
||||||
search = search.decode('UTF-8')
|
|
||||||
ids = self.search_for_books(search)
|
|
||||||
FM = self.db.FIELD_MAP
|
|
||||||
items = [r for r in iter(self.db) if r[FM['id']] in ids]
|
|
||||||
if sort is not None:
|
|
||||||
self.sort(items, sort, (order.lower().strip() == 'ascending'))
|
|
||||||
|
|
||||||
CFM = self.db.field_metadata
|
|
||||||
CKEYS = [key for key in sorted(custom_fields_to_display(self.db),
|
|
||||||
key=lambda x:sort_key(CFM[x]['name']))]
|
|
||||||
# This method uses its own book dict, not the Metadata dict. The loop
|
|
||||||
# below could be changed to use db.get_metadata instead of reading
|
|
||||||
# info directly from the record made by the view, but it doesn't seem
|
|
||||||
# worth it at the moment.
|
|
||||||
books = []
|
|
||||||
for record in items[(start-1):(start-1)+num]:
|
|
||||||
book = {'formats':record[FM['formats']], 'size':record[FM['size']]}
|
|
||||||
if not book['formats']:
|
|
||||||
book['formats'] = ''
|
|
||||||
if not book['size']:
|
|
||||||
book['size'] = 0
|
|
||||||
book['size'] = human_readable(book['size'])
|
|
||||||
|
|
||||||
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
|
|
||||||
aut_is = CFM['authors']['is_multiple']
|
|
||||||
authors = aut_is['list_to_ui'].join([i.replace('|', ',') for i in aus.split(',')])
|
|
||||||
book['authors'] = authors
|
|
||||||
book['series_index'] = fmt_sidx(float(record[FM['series_index']]))
|
|
||||||
book['series'] = record[FM['series']]
|
|
||||||
book['tags'] = format_tag_string(record[FM['tags']], ',',
|
|
||||||
no_tag_count=True)
|
|
||||||
book['title'] = record[FM['title']]
|
|
||||||
for x in ('timestamp', 'pubdate'):
|
|
||||||
dval = record[FM[x]]
|
|
||||||
if is_date_undefined(dval):
|
|
||||||
book[x] = ''
|
|
||||||
else:
|
|
||||||
book[x] = strftime('%d %b, %Y', as_local_time(dval))
|
|
||||||
book['id'] = record[FM['id']]
|
|
||||||
books.append(book)
|
|
||||||
for key in CKEYS:
|
|
||||||
def concat(name, val):
|
|
||||||
return '%s:#:%s'%(name, unicode(val))
|
|
||||||
mi = self.db.get_metadata(record[CFM['id']['rec_index']], index_is_id=True)
|
|
||||||
name, val = mi.format_field(key)
|
|
||||||
if not val:
|
|
||||||
continue
|
|
||||||
datatype = CFM[key]['datatype']
|
|
||||||
if datatype in ['comments']:
|
|
||||||
continue
|
|
||||||
if datatype == 'text' and CFM[key]['is_multiple']:
|
|
||||||
book[key] = concat(name,
|
|
||||||
format_tag_string(val,
|
|
||||||
CFM[key]['is_multiple']['ui_to_list'],
|
|
||||||
no_tag_count=True,
|
|
||||||
joinval=CFM[key]['is_multiple']['list_to_ui']))
|
|
||||||
else:
|
|
||||||
book[key] = concat(name, val)
|
|
||||||
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'text/html; charset=utf-8'
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
|
|
||||||
q = {b'search':search.encode('utf-8'), b'order':order.encode('utf-8'), b'sort':sort.encode('utf-8'), b'num':str(num).encode('utf-8')}
|
|
||||||
url_base = "/mobile?" + urlencode(q)
|
|
||||||
ua = cherrypy.request.headers.get('User-Agent', '').strip()
|
|
||||||
have_kobo_browser = self.is_kobo_browser(ua)
|
|
||||||
|
|
||||||
raw = html.tostring(build_index(books, num, search, sort, order,
|
|
||||||
start, len(ids), url_base, CKEYS,
|
|
||||||
self.opts.url_prefix,
|
|
||||||
have_kobo_browser=have_kobo_browser),
|
|
||||||
encoding='utf-8',
|
|
||||||
pretty_print=True)
|
|
||||||
# tostring's include_meta_content_type is broken
|
|
||||||
raw = raw.replace('<head>', '<head>\n'
|
|
||||||
'<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
|
|
||||||
return raw
|
|
||||||
|
|
@ -1,660 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import hashlib, binascii
|
|
||||||
from functools import partial
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
from lxml import etree, html
|
|
||||||
from lxml.builder import ElementMaker
|
|
||||||
import cherrypy
|
|
||||||
import routes
|
|
||||||
|
|
||||||
from calibre.constants import __appname__
|
|
||||||
from calibre.ebooks.metadata import fmt_sidx, rating_to_stars
|
|
||||||
from calibre.library.comments import comments_to_html
|
|
||||||
from calibre.library.server import custom_fields_to_display
|
|
||||||
from calibre.library.server.utils import format_tag_string, Offsets
|
|
||||||
from calibre import guess_type, prepare_string_for_xml as xml
|
|
||||||
from calibre.utils.icu import sort_key
|
|
||||||
from calibre.utils.date import as_utc, is_date_undefined
|
|
||||||
|
|
||||||
BASE_HREFS = {
|
|
||||||
0 : '/stanza',
|
|
||||||
1 : '/opds',
|
|
||||||
}
|
|
||||||
|
|
||||||
STANZA_FORMATS = frozenset(['epub', 'pdb', 'pdf', 'cbr', 'cbz', 'djvu'])
|
|
||||||
|
|
||||||
|
|
||||||
def url_for(name, version, **kwargs):
|
|
||||||
if not name.endswith('_'):
|
|
||||||
name += '_'
|
|
||||||
return routes.url_for(name+str(version), **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def hexlify(x):
|
|
||||||
if isinstance(x, unicode):
|
|
||||||
x = x.encode('utf-8')
|
|
||||||
return binascii.hexlify(x)
|
|
||||||
|
|
||||||
|
|
||||||
def unhexlify(x):
|
|
||||||
return binascii.unhexlify(x).decode('utf-8')
|
|
||||||
|
|
||||||
# Vocabulary for building OPDS feeds {{{
|
|
||||||
DC_NS = 'http://purl.org/dc/terms/'
|
|
||||||
E = ElementMaker(namespace='http://www.w3.org/2005/Atom',
|
|
||||||
nsmap={
|
|
||||||
None : 'http://www.w3.org/2005/Atom',
|
|
||||||
'dc' : DC_NS,
|
|
||||||
'opds' : 'http://opds-spec.org/2010/catalog',
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
FEED = E.feed
|
|
||||||
TITLE = E.title
|
|
||||||
ID = E.id
|
|
||||||
ICON = E.icon
|
|
||||||
|
|
||||||
|
|
||||||
def UPDATED(dt, *args, **kwargs):
|
|
||||||
return E.updated(as_utc(dt).strftime('%Y-%m-%dT%H:%M:%S+00:00'), *args, **kwargs)
|
|
||||||
|
|
||||||
LINK = partial(E.link, type='application/atom+xml')
|
|
||||||
NAVLINK = partial(E.link,
|
|
||||||
type='application/atom+xml;type=feed;profile=opds-catalog')
|
|
||||||
|
|
||||||
|
|
||||||
def SEARCH_LINK(base_href, *args, **kwargs):
|
|
||||||
kwargs['rel'] = 'search'
|
|
||||||
kwargs['title'] = 'Search'
|
|
||||||
kwargs['href'] = base_href+'/search/{searchTerms}'
|
|
||||||
return LINK(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def AUTHOR(name, uri=None):
|
|
||||||
args = [E.name(name)]
|
|
||||||
if uri is not None:
|
|
||||||
args.append(E.uri(uri))
|
|
||||||
return E.author(*args)
|
|
||||||
|
|
||||||
SUBTITLE = E.subtitle
|
|
||||||
|
|
||||||
|
|
||||||
def NAVCATALOG_ENTRY(base_href, updated, title, description, query, version=0):
|
|
||||||
href = base_href+'/navcatalog/'+hexlify(query)
|
|
||||||
id_ = 'calibre-navcatalog:'+str(hashlib.sha1(href).hexdigest())
|
|
||||||
return E.entry(
|
|
||||||
TITLE(title),
|
|
||||||
ID(id_),
|
|
||||||
UPDATED(updated),
|
|
||||||
E.content(description, type='text'),
|
|
||||||
NAVLINK(href=href)
|
|
||||||
)
|
|
||||||
|
|
||||||
START_LINK = partial(NAVLINK, rel='start')
|
|
||||||
UP_LINK = partial(NAVLINK, rel='up')
|
|
||||||
FIRST_LINK = partial(NAVLINK, rel='first')
|
|
||||||
LAST_LINK = partial(NAVLINK, rel='last')
|
|
||||||
NEXT_LINK = partial(NAVLINK, rel='next', title='Next')
|
|
||||||
PREVIOUS_LINK = partial(NAVLINK, rel='previous')
|
|
||||||
|
|
||||||
|
|
||||||
def html_to_lxml(raw):
|
|
||||||
raw = u'<div>%s</div>'%raw
|
|
||||||
root = html.fragment_fromstring(raw)
|
|
||||||
root.set('xmlns', "http://www.w3.org/1999/xhtml")
|
|
||||||
raw = etree.tostring(root, encoding=None)
|
|
||||||
try:
|
|
||||||
return etree.fromstring(raw)
|
|
||||||
except:
|
|
||||||
for x in root.iterdescendants():
|
|
||||||
remove = []
|
|
||||||
for attr in x.attrib:
|
|
||||||
if ':' in attr:
|
|
||||||
remove.append(attr)
|
|
||||||
for a in remove:
|
|
||||||
del x.attrib[a]
|
|
||||||
raw = etree.tostring(root, encoding=None)
|
|
||||||
try:
|
|
||||||
return etree.fromstring(raw)
|
|
||||||
except:
|
|
||||||
from calibre.ebooks.oeb.parse_utils import _html4_parse
|
|
||||||
return _html4_parse(raw)
|
|
||||||
|
|
||||||
|
|
||||||
def CATALOG_ENTRY(item, item_kind, base_href, version, updated,
|
|
||||||
ignore_count=False, add_kind=False):
|
|
||||||
id_ = 'calibre:category:'+item.name
|
|
||||||
iid = 'N' + item.name
|
|
||||||
if item.id is not None:
|
|
||||||
iid = 'I' + str(item.id)
|
|
||||||
iid += ':'+item_kind
|
|
||||||
link = NAVLINK(href=base_href + '/' + hexlify(iid))
|
|
||||||
count = (_('%d books') if item.count > 1 else _('%d book'))%item.count
|
|
||||||
if ignore_count:
|
|
||||||
count = ''
|
|
||||||
if item.use_sort_as_name:
|
|
||||||
name = item.sort
|
|
||||||
else:
|
|
||||||
name = item.name
|
|
||||||
return E.entry(
|
|
||||||
TITLE(name + ('' if not add_kind else ' (%s)'%item_kind)),
|
|
||||||
ID(id_),
|
|
||||||
UPDATED(updated),
|
|
||||||
E.content(count, type='text'),
|
|
||||||
link
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def CATALOG_GROUP_ENTRY(item, category, base_href, version, updated):
|
|
||||||
id_ = 'calibre:category-group:'+category+':'+item.text
|
|
||||||
iid = item.text
|
|
||||||
link = NAVLINK(href=base_href + '/' + hexlify(iid))
|
|
||||||
return E.entry(
|
|
||||||
TITLE(item.text),
|
|
||||||
ID(id_),
|
|
||||||
UPDATED(updated),
|
|
||||||
E.content(_('%d items')%item.count, type='text'),
|
|
||||||
link
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix):
|
|
||||||
FM = db.FIELD_MAP
|
|
||||||
title = item[FM['title']]
|
|
||||||
if not title:
|
|
||||||
title = _('Unknown')
|
|
||||||
authors = item[FM['authors']]
|
|
||||||
if not authors:
|
|
||||||
authors = _('Unknown')
|
|
||||||
authors = ' & '.join([i.replace('|', ',') for i in
|
|
||||||
authors.split(',')])
|
|
||||||
extra = []
|
|
||||||
rating = item[FM['rating']]
|
|
||||||
if rating > 0:
|
|
||||||
rating = rating_to_stars(rating)
|
|
||||||
extra.append(_('RATING: %s<br />')%rating)
|
|
||||||
tags = item[FM['tags']]
|
|
||||||
if tags:
|
|
||||||
extra.append(_('TAGS: %s<br />')%xml(format_tag_string(tags, ',',
|
|
||||||
ignore_max=True,
|
|
||||||
no_tag_count=True)))
|
|
||||||
series = item[FM['series']]
|
|
||||||
if series:
|
|
||||||
extra.append(_('SERIES: %(series)s [%(sidx)s]<br />')%
|
|
||||||
dict(series=xml(series),
|
|
||||||
sidx=fmt_sidx(float(item[FM['series_index']]))))
|
|
||||||
mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True)
|
|
||||||
for key in CKEYS:
|
|
||||||
name, val = mi.format_field(key)
|
|
||||||
if val:
|
|
||||||
datatype = CFM[key]['datatype']
|
|
||||||
if datatype == 'text' and CFM[key]['is_multiple']:
|
|
||||||
extra.append('%s: %s<br />'%
|
|
||||||
(xml(name),
|
|
||||||
xml(format_tag_string(val,
|
|
||||||
CFM[key]['is_multiple']['ui_to_list'],
|
|
||||||
ignore_max=True, no_tag_count=True,
|
|
||||||
joinval=CFM[key]['is_multiple']['list_to_ui']))))
|
|
||||||
elif datatype == 'comments' or (CFM[key]['datatype'] == 'composite' and
|
|
||||||
CFM[key]['display'].get('contains_html', False)):
|
|
||||||
extra.append('%s: %s<br />'%(xml(name), comments_to_html(unicode(val))))
|
|
||||||
else:
|
|
||||||
extra.append('%s: %s<br />'%(xml(name), xml(unicode(val))))
|
|
||||||
comments = item[FM['comments']]
|
|
||||||
if comments:
|
|
||||||
comments = comments_to_html(comments)
|
|
||||||
extra.append(comments)
|
|
||||||
if extra:
|
|
||||||
extra = html_to_lxml('\n'.join(extra))
|
|
||||||
idm = 'calibre' if version == 0 else 'uuid'
|
|
||||||
id_ = 'urn:%s:%s'%(idm, item[FM['uuid']])
|
|
||||||
ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_),
|
|
||||||
UPDATED(item[FM['last_modified']]), E.published(item[FM['timestamp']].isoformat()))
|
|
||||||
if mi.pubdate and not is_date_undefined(mi.pubdate):
|
|
||||||
ans.append(ans.makeelement('{%s}date' % DC_NS))
|
|
||||||
ans[-1].text = mi.pubdate.isoformat()
|
|
||||||
if len(extra):
|
|
||||||
ans.append(E.content(extra, type='xhtml'))
|
|
||||||
formats = item[FM['formats']]
|
|
||||||
if formats:
|
|
||||||
book_id = item[FM['id']]
|
|
||||||
for fmt in formats.split(','):
|
|
||||||
fmt = fmt.lower()
|
|
||||||
mt = guess_type('a.'+fmt)[0]
|
|
||||||
href = prefix + '/get/%s/%s'%(fmt, book_id)
|
|
||||||
if mt:
|
|
||||||
link = E.link(type=mt, href=href)
|
|
||||||
if version > 0:
|
|
||||||
link.set('rel', "http://opds-spec.org/acquisition")
|
|
||||||
fm = db.format_metadata(book_id, fmt)
|
|
||||||
if fm:
|
|
||||||
link.set('length', str(fm['size']))
|
|
||||||
link.set('mtime', fm['mtime'].isoformat())
|
|
||||||
ans.append(link)
|
|
||||||
ans.append(E.link(type='image/jpeg', href=prefix+'/get/cover/%s'%item[FM['id']],
|
|
||||||
rel="x-stanza-cover-image" if version == 0 else
|
|
||||||
"http://opds-spec.org/cover"))
|
|
||||||
ans.append(E.link(type='image/jpeg', href=prefix+'/get/thumb/%s'%item[FM['id']],
|
|
||||||
rel="x-stanza-cover-image-thumbnail" if version == 0 else
|
|
||||||
"http://opds-spec.org/thumbnail"))
|
|
||||||
|
|
||||||
return ans
|
|
||||||
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
default_feed_title = __appname__ + ' ' + _('Library')
|
|
||||||
|
|
||||||
|
|
||||||
class Feed(object): # {{{
|
|
||||||
|
|
||||||
def __init__(self, id_, updated, version, subtitle=None,
|
|
||||||
title=None,
|
|
||||||
up_link=None, first_link=None, last_link=None,
|
|
||||||
next_link=None, previous_link=None):
|
|
||||||
self.base_href = url_for('opds', version)
|
|
||||||
|
|
||||||
self.root = \
|
|
||||||
FEED(
|
|
||||||
TITLE(title or default_feed_title),
|
|
||||||
AUTHOR(__appname__, uri='http://calibre-ebook.com'),
|
|
||||||
ID(id_),
|
|
||||||
ICON('/favicon.png'),
|
|
||||||
UPDATED(updated),
|
|
||||||
SEARCH_LINK(self.base_href),
|
|
||||||
START_LINK(href=self.base_href)
|
|
||||||
)
|
|
||||||
if up_link:
|
|
||||||
self.root.append(UP_LINK(href=up_link))
|
|
||||||
if first_link:
|
|
||||||
self.root.append(FIRST_LINK(href=first_link))
|
|
||||||
if last_link:
|
|
||||||
self.root.append(LAST_LINK(href=last_link))
|
|
||||||
if next_link:
|
|
||||||
self.root.append(NEXT_LINK(href=next_link))
|
|
||||||
if previous_link:
|
|
||||||
self.root.append(PREVIOUS_LINK(href=previous_link))
|
|
||||||
if subtitle:
|
|
||||||
self.root.insert(1, SUBTITLE(subtitle))
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return etree.tostring(self.root, pretty_print=True, encoding='utf-8',
|
|
||||||
xml_declaration=True)
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
class TopLevel(Feed): # {{{
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
updated, # datetime object in UTC
|
|
||||||
categories,
|
|
||||||
version,
|
|
||||||
id_='urn:calibre:main',
|
|
||||||
subtitle=_('Books in your library')
|
|
||||||
):
|
|
||||||
Feed.__init__(self, id_, updated, version, subtitle=subtitle)
|
|
||||||
|
|
||||||
subc = partial(NAVCATALOG_ENTRY, self.base_href, updated,
|
|
||||||
version=version)
|
|
||||||
subcatalogs = [subc(_('By ')+title,
|
|
||||||
_('Books sorted by ') + desc, q) for title, desc, q in
|
|
||||||
categories]
|
|
||||||
for x in subcatalogs:
|
|
||||||
self.root.append(x)
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
|
|
||||||
class NavFeed(Feed):
|
|
||||||
|
|
||||||
def __init__(self, id_, updated, version, offsets, page_url, up_url, title=None):
|
|
||||||
kwargs = {'up_link': up_url}
|
|
||||||
kwargs['first_link'] = page_url
|
|
||||||
kwargs['last_link'] = page_url+'?offset=%d'%offsets.last_offset
|
|
||||||
if offsets.offset > 0:
|
|
||||||
kwargs['previous_link'] = \
|
|
||||||
page_url+'?offset=%d'%offsets.previous_offset
|
|
||||||
if offsets.next_offset > -1:
|
|
||||||
kwargs['next_link'] = \
|
|
||||||
page_url+'?offset=%d'%offsets.next_offset
|
|
||||||
if title:
|
|
||||||
kwargs['title'] = title
|
|
||||||
Feed.__init__(self, id_, updated, version, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class AcquisitionFeed(NavFeed):
|
|
||||||
|
|
||||||
def __init__(self, updated, id_, items, offsets, page_url, up_url, version,
|
|
||||||
db, prefix, title=None):
|
|
||||||
NavFeed.__init__(self, id_, updated, version, offsets, page_url, up_url, title=title)
|
|
||||||
CFM = db.field_metadata
|
|
||||||
CKEYS = [key for key in sorted(custom_fields_to_display(db),
|
|
||||||
key=lambda x: sort_key(CFM[x]['name']))]
|
|
||||||
for item in items:
|
|
||||||
self.root.append(ACQUISITION_ENTRY(item, version, db, updated,
|
|
||||||
CFM, CKEYS, prefix))
|
|
||||||
|
|
||||||
|
|
||||||
class CategoryFeed(NavFeed):
|
|
||||||
|
|
||||||
def __init__(self, items, which, id_, updated, version, offsets, page_url, up_url, db, title=None):
|
|
||||||
NavFeed.__init__(self, id_, updated, version, offsets, page_url, up_url, title=title)
|
|
||||||
base_href = self.base_href + '/category/' + hexlify(which)
|
|
||||||
ignore_count = False
|
|
||||||
if which == 'search':
|
|
||||||
ignore_count = True
|
|
||||||
for item in items:
|
|
||||||
self.root.append(CATALOG_ENTRY(item, item.category, base_href, version,
|
|
||||||
updated, ignore_count=ignore_count,
|
|
||||||
add_kind=which != item.category))
|
|
||||||
|
|
||||||
|
|
||||||
class CategoryGroupFeed(NavFeed):
|
|
||||||
|
|
||||||
def __init__(self, items, which, id_, updated, version, offsets, page_url, up_url, title=None):
|
|
||||||
NavFeed.__init__(self, id_, updated, version, offsets, page_url, up_url, title=title)
|
|
||||||
base_href = self.base_href + '/categorygroup/' + hexlify(which)
|
|
||||||
for item in items:
|
|
||||||
self.root.append(CATALOG_GROUP_ENTRY(item, which, base_href, version, updated))
|
|
||||||
|
|
||||||
|
|
||||||
class OPDSServer(object):
|
|
||||||
|
|
||||||
def add_routes(self, connect):
|
|
||||||
for version in (0, 1):
|
|
||||||
base_href = BASE_HREFS[version]
|
|
||||||
ver = str(version)
|
|
||||||
connect('opds_'+ver, base_href, self.opds, version=version)
|
|
||||||
connect('opdst_'+ver, base_href+'/', self.opds, version=version)
|
|
||||||
connect('opdsnavcatalog_'+ver, base_href+'/navcatalog/{which}',
|
|
||||||
self.opds_navcatalog, version=version)
|
|
||||||
connect('opdscategory_'+ver, base_href+'/category/{category}/{which}',
|
|
||||||
self.opds_category, version=version)
|
|
||||||
connect('opdscategorygroup_'+ver, base_href+'/categorygroup/{category}/{which}',
|
|
||||||
self.opds_category_group, version=version)
|
|
||||||
connect('opdssearch_'+ver, base_href+'/search/{query}',
|
|
||||||
self.opds_search, version=version)
|
|
||||||
|
|
||||||
def get_opds_allowed_ids_for_version(self, version):
|
|
||||||
search = '' if version > 0 else ' or '.join(['format:='+x for x in
|
|
||||||
STANZA_FORMATS])
|
|
||||||
ids = self.search_cache(search)
|
|
||||||
return ids
|
|
||||||
|
|
||||||
def get_opds_acquisition_feed(self, ids, offset, page_url, up_url, id_,
|
|
||||||
sort_by='title', ascending=True, version=0, feed_title=None):
|
|
||||||
idx = self.db.FIELD_MAP['id']
|
|
||||||
ids &= self.get_opds_allowed_ids_for_version(version)
|
|
||||||
if not ids:
|
|
||||||
raise cherrypy.HTTPError(404, 'No books found')
|
|
||||||
items = [x for x in self.db.data.iterall() if x[idx] in ids]
|
|
||||||
self.sort(items, sort_by, ascending)
|
|
||||||
max_items = self.opts.max_opds_items
|
|
||||||
offsets = Offsets(offset, max_items, len(items))
|
|
||||||
items = items[offsets.offset:offsets.offset+max_items]
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'application/atom+xml;profile=opds-catalog'
|
|
||||||
return str(AcquisitionFeed(updated, id_, items, offsets,
|
|
||||||
page_url, up_url, version, self.db,
|
|
||||||
self.opts.url_prefix, title=feed_title))
|
|
||||||
|
|
||||||
def opds_search(self, query=None, version=0, offset=0):
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
version = int(version)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
if query is None or version not in BASE_HREFS:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
try:
|
|
||||||
ids = self.search_cache(query)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Search: %r not understood'%query)
|
|
||||||
page_url = url_for('opdssearch', version, query=query)
|
|
||||||
return self.get_opds_acquisition_feed(ids, offset, page_url,
|
|
||||||
url_for('opds', version), 'calibre-search:'+query,
|
|
||||||
version=version)
|
|
||||||
|
|
||||||
def get_opds_all_books(self, which, page_url, up_url, version=0, offset=0):
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
version = int(version)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
if which not in ('title', 'newest') or version not in BASE_HREFS:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
sort = 'timestamp' if which == 'newest' else 'title'
|
|
||||||
ascending = which == 'title'
|
|
||||||
feed_title = {'newest':_('Newest'), 'title': _('Title')}.get(which, which)
|
|
||||||
feed_title = default_feed_title + ' :: ' + _('By %s') % feed_title
|
|
||||||
ids = self.get_opds_allowed_ids_for_version(version)
|
|
||||||
return self.get_opds_acquisition_feed(ids, offset, page_url, up_url,
|
|
||||||
id_='calibre-all:'+sort, sort_by=sort, ascending=ascending,
|
|
||||||
version=version, feed_title=feed_title)
|
|
||||||
|
|
||||||
# Categories {{{
|
|
||||||
|
|
||||||
def opds_category_group(self, category=None, which=None, version=0, offset=0):
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
version = int(version)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
|
|
||||||
if not which or not category or version not in BASE_HREFS:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
|
|
||||||
categories = self.categories_cache(
|
|
||||||
self.get_opds_allowed_ids_for_version(version))
|
|
||||||
page_url = url_for('opdscategorygroup', version, category=category, which=which)
|
|
||||||
|
|
||||||
category = unhexlify(category)
|
|
||||||
if category not in categories:
|
|
||||||
raise cherrypy.HTTPError(404, 'Category %r not found'%which)
|
|
||||||
category_meta = self.db.field_metadata
|
|
||||||
meta = category_meta.get(category, {})
|
|
||||||
category_name = meta.get('name', which)
|
|
||||||
which = unhexlify(which)
|
|
||||||
feed_title = default_feed_title + ' :: ' + (_('By {0} :: {1}').format(category_name, which))
|
|
||||||
owhich = hexlify('N'+which)
|
|
||||||
up_url = url_for('opdsnavcatalog', version, which=owhich)
|
|
||||||
items = categories[category]
|
|
||||||
|
|
||||||
def belongs(x, which):
|
|
||||||
return getattr(x, 'sort', x.name).lower().startswith(which.lower())
|
|
||||||
items = [x for x in items if belongs(x, which)]
|
|
||||||
if not items:
|
|
||||||
raise cherrypy.HTTPError(404, 'No items in group %r:%r'%(category,
|
|
||||||
which))
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
|
|
||||||
id_ = 'calibre-category-group-feed:'+category+':'+which
|
|
||||||
|
|
||||||
max_items = self.opts.max_opds_items
|
|
||||||
offsets = Offsets(offset, max_items, len(items))
|
|
||||||
items = list(items)[offsets.offset:offsets.offset+max_items]
|
|
||||||
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'application/atom+xml'
|
|
||||||
|
|
||||||
return str(CategoryFeed(items, category, id_, updated, version, offsets,
|
|
||||||
page_url, up_url, self.db, title=feed_title))
|
|
||||||
|
|
||||||
def opds_navcatalog(self, which=None, version=0, offset=0):
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
version = int(version)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
|
|
||||||
if not which or version not in BASE_HREFS:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
page_url = url_for('opdsnavcatalog', version, which=which)
|
|
||||||
up_url = url_for('opds', version)
|
|
||||||
which = unhexlify(which)
|
|
||||||
type_ = which[0]
|
|
||||||
which = which[1:]
|
|
||||||
if type_ == 'O':
|
|
||||||
return self.get_opds_all_books(which, page_url, up_url,
|
|
||||||
version=version, offset=offset)
|
|
||||||
elif type_ == 'N':
|
|
||||||
return self.get_opds_navcatalog(which, page_url, up_url,
|
|
||||||
version=version, offset=offset)
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
|
|
||||||
def get_opds_navcatalog(self, which, page_url, up_url, version=0, offset=0):
|
|
||||||
categories = self.categories_cache(
|
|
||||||
self.get_opds_allowed_ids_for_version(version))
|
|
||||||
if which not in categories:
|
|
||||||
raise cherrypy.HTTPError(404, 'Category %r not found'%which)
|
|
||||||
|
|
||||||
items = categories[which]
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
category_meta = self.db.field_metadata
|
|
||||||
meta = category_meta.get(which, {})
|
|
||||||
category_name = meta.get('name', which)
|
|
||||||
feed_title = default_feed_title + ' :: ' + _('By %s') % category_name
|
|
||||||
|
|
||||||
id_ = 'calibre-category-feed:'+which
|
|
||||||
|
|
||||||
MAX_ITEMS = self.opts.max_opds_ungrouped_items
|
|
||||||
|
|
||||||
if len(items) <= MAX_ITEMS:
|
|
||||||
max_items = self.opts.max_opds_items
|
|
||||||
offsets = Offsets(offset, max_items, len(items))
|
|
||||||
items = list(items)[offsets.offset:offsets.offset+max_items]
|
|
||||||
ans = CategoryFeed(items, which, id_, updated, version, offsets,
|
|
||||||
page_url, up_url, self.db, title=feed_title)
|
|
||||||
else:
|
|
||||||
class Group:
|
|
||||||
|
|
||||||
def __init__(self, text, count):
|
|
||||||
self.text, self.count = text, count
|
|
||||||
|
|
||||||
starts = set([])
|
|
||||||
for x in items:
|
|
||||||
val = getattr(x, 'sort', x.name)
|
|
||||||
if not val:
|
|
||||||
val = 'A'
|
|
||||||
starts.add(val[0].upper())
|
|
||||||
category_groups = OrderedDict()
|
|
||||||
for x in sorted(starts, key=sort_key):
|
|
||||||
category_groups[x] = len([y for y in items if
|
|
||||||
getattr(y, 'sort', y.name).startswith(x)])
|
|
||||||
items = [Group(x, y) for x, y in category_groups.items()]
|
|
||||||
max_items = self.opts.max_opds_items
|
|
||||||
offsets = Offsets(offset, max_items, len(items))
|
|
||||||
items = items[offsets.offset:offsets.offset+max_items]
|
|
||||||
ans = CategoryGroupFeed(items, which, id_, updated, version, offsets,
|
|
||||||
page_url, up_url, title=feed_title)
|
|
||||||
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'application/atom+xml'
|
|
||||||
|
|
||||||
return str(ans)
|
|
||||||
|
|
||||||
def opds_category(self, category=None, which=None, version=0, offset=0):
|
|
||||||
try:
|
|
||||||
offset = int(offset)
|
|
||||||
version = int(version)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
|
|
||||||
if not which or not category or version not in BASE_HREFS:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
page_url = url_for('opdscategory', version, which=which,
|
|
||||||
category=category)
|
|
||||||
up_url = url_for('opdsnavcatalog', version, which=category)
|
|
||||||
|
|
||||||
which, category = unhexlify(which), unhexlify(category)
|
|
||||||
type_ = which[0]
|
|
||||||
which = which[1:]
|
|
||||||
if type_ == 'I':
|
|
||||||
try:
|
|
||||||
p = which.rindex(':')
|
|
||||||
category = which[p+1:]
|
|
||||||
which = which[:p]
|
|
||||||
# This line will toss an exception for composite columns
|
|
||||||
which = int(which[:p])
|
|
||||||
except:
|
|
||||||
# Might be a composite column, where we have the lookup key
|
|
||||||
if not (category in self.db.field_metadata and
|
|
||||||
self.db.field_metadata[category]['datatype'] == 'composite'):
|
|
||||||
raise cherrypy.HTTPError(404, 'Tag %r not found'%which)
|
|
||||||
|
|
||||||
categories = self.categories_cache(
|
|
||||||
self.get_opds_allowed_ids_for_version(version))
|
|
||||||
if category not in categories:
|
|
||||||
raise cherrypy.HTTPError(404, 'Category %r not found'%which)
|
|
||||||
|
|
||||||
if category == 'search':
|
|
||||||
try:
|
|
||||||
ids = self.search_cache('search:"%s"'%which)
|
|
||||||
except:
|
|
||||||
raise cherrypy.HTTPError(404, 'Search: %r not understood'%which)
|
|
||||||
return self.get_opds_acquisition_feed(ids, offset, page_url,
|
|
||||||
up_url, 'calibre-search:'+which,
|
|
||||||
version=version)
|
|
||||||
|
|
||||||
if type_ != 'I':
|
|
||||||
raise cherrypy.HTTPError(404, 'Non id categories not supported')
|
|
||||||
|
|
||||||
q = category
|
|
||||||
if q == 'news':
|
|
||||||
q = 'tags'
|
|
||||||
ids = self.db.get_books_for_category(q, which)
|
|
||||||
sort_by = 'series' if category == 'series' else 'title'
|
|
||||||
|
|
||||||
return self.get_opds_acquisition_feed(ids, offset, page_url,
|
|
||||||
up_url, 'calibre-category:'+category+':'+str(which),
|
|
||||||
version=version, sort_by=sort_by)
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
def opds(self, version=0):
|
|
||||||
version = int(version)
|
|
||||||
if version not in BASE_HREFS:
|
|
||||||
raise cherrypy.HTTPError(404, 'Not found')
|
|
||||||
categories = self.categories_cache(
|
|
||||||
self.get_opds_allowed_ids_for_version(version))
|
|
||||||
category_meta = self.db.field_metadata
|
|
||||||
cats = [
|
|
||||||
(_('Newest'), _('Date'), 'Onewest'),
|
|
||||||
(_('Title'), _('Title'), 'Otitle'),
|
|
||||||
]
|
|
||||||
|
|
||||||
def getter(x):
|
|
||||||
try:
|
|
||||||
return category_meta[x]['name'].lower()
|
|
||||||
except KeyError:
|
|
||||||
return x
|
|
||||||
for category in sorted(categories, key=lambda x: sort_key(getter(x))):
|
|
||||||
if len(categories[category]) == 0:
|
|
||||||
continue
|
|
||||||
if category in ('formats', 'identifiers'):
|
|
||||||
continue
|
|
||||||
meta = category_meta.get(category, None)
|
|
||||||
if meta is None:
|
|
||||||
continue
|
|
||||||
if category_meta.is_ignorable_field(category) and \
|
|
||||||
category not in custom_fields_to_display(self.db):
|
|
||||||
continue
|
|
||||||
cats.append((meta['name'], meta['name'], 'N'+category))
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'application/atom+xml'
|
|
||||||
|
|
||||||
feed = TopLevel(updated, cats, version)
|
|
||||||
|
|
||||||
return str(feed)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,194 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import time, sys, hashlib, binascii, random, os
|
|
||||||
from urllib import quote as quote_, unquote as unquote_
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
from cherrypy.lib.auth_digest import digest_auth, get_ha1_dict_plain
|
|
||||||
|
|
||||||
from calibre import strftime as _strftime, prints, isbytestring
|
|
||||||
from calibre.utils.date import now as nowf
|
|
||||||
from calibre.utils.config import tweaks
|
|
||||||
from calibre.utils.icu import sort_key
|
|
||||||
|
|
||||||
|
|
||||||
class Offsets(object):
|
|
||||||
'Calculate offsets for a paginated view'
|
|
||||||
|
|
||||||
def __init__(self, offset, delta, total):
|
|
||||||
if offset < 0:
|
|
||||||
offset = 0
|
|
||||||
if offset >= total:
|
|
||||||
raise cherrypy.HTTPError(404, 'Invalid offset: %r'%offset)
|
|
||||||
last_allowed_index = total - 1
|
|
||||||
last_current_index = offset + delta - 1
|
|
||||||
self.slice_upper_bound = offset+delta
|
|
||||||
self.offset = offset
|
|
||||||
self.next_offset = last_current_index + 1
|
|
||||||
if self.next_offset > last_allowed_index:
|
|
||||||
self.next_offset = -1
|
|
||||||
self.previous_offset = self.offset - delta
|
|
||||||
if self.previous_offset < 0:
|
|
||||||
self.previous_offset = 0
|
|
||||||
self.last_offset = last_allowed_index - delta
|
|
||||||
if self.last_offset < 0:
|
|
||||||
self.last_offset = 0
|
|
||||||
|
|
||||||
|
|
||||||
def expose(func):
|
|
||||||
|
|
||||||
@wraps(func)
|
|
||||||
def do(*args, **kwargs):
|
|
||||||
self = func.im_self
|
|
||||||
if self.opts.develop:
|
|
||||||
start = time.time()
|
|
||||||
|
|
||||||
dict.update(cherrypy.response.headers, {'Server':self.server_name})
|
|
||||||
if not self.embedded:
|
|
||||||
self.db.check_if_modified()
|
|
||||||
ans = func(*args, **kwargs)
|
|
||||||
if self.opts.develop:
|
|
||||||
prints('Function', func.__name__, 'called with args:', args, kwargs)
|
|
||||||
prints('\tTime:', func.__name__, time.time()-start)
|
|
||||||
return ans
|
|
||||||
|
|
||||||
return do
|
|
||||||
|
|
||||||
|
|
||||||
class AuthController(object):
|
|
||||||
|
|
||||||
'''
|
|
||||||
Implement Digest authentication for the content server. Android browsers
|
|
||||||
cannot handle HTTP AUTH when downloading files, as the download is handed
|
|
||||||
off to a separate process. So we use a cookie based authentication scheme
|
|
||||||
for some endpoints (/get) to allow downloads to work on android. Apparently,
|
|
||||||
cookies are passed to the download process. The cookie expires after
|
|
||||||
MAX_AGE seconds.
|
|
||||||
|
|
||||||
The android browser appears to send a GET request to the server and only if
|
|
||||||
that request succeeds is the download handed off to the download process.
|
|
||||||
Therefore, even if the user clicks Get after MAX_AGE, it should still work.
|
|
||||||
In fact, we could reduce MAX_AGE, but we leave it high as the download
|
|
||||||
process might have downloads queued and therefore not start the download
|
|
||||||
immediately.
|
|
||||||
|
|
||||||
Note that this makes the server vulnerable to session-hijacking (i.e. some
|
|
||||||
one can sniff the traffic and create their own requests to /get with the
|
|
||||||
appropriate cookie, for an hour). The fix is to use https, but since this
|
|
||||||
is usually run as a private server, that cannot be done. If you care about
|
|
||||||
this vulnerability, run the server behind a reverse proxy that uses HTTPS.
|
|
||||||
'''
|
|
||||||
|
|
||||||
MAX_AGE = 3600 # Number of seconds after a successful digest auth for which
|
|
||||||
# the cookie auth will be allowed
|
|
||||||
|
|
||||||
def __init__(self, realm, users_dict):
|
|
||||||
self.realm = realm
|
|
||||||
self.users_dict = users_dict
|
|
||||||
self.secret = bytes(binascii.hexlify(os.urandom(random.randint(20,
|
|
||||||
30))))
|
|
||||||
self.cookie_name = 'android_workaround'
|
|
||||||
self.key_order = random.choice(('%(t)s:%(s)s', '%(s)s:%(t)s'))
|
|
||||||
|
|
||||||
def hashit(self, raw):
|
|
||||||
return hashlib.sha256(raw).hexdigest()
|
|
||||||
|
|
||||||
def __call__(self, func, allow_cookie_auth):
|
|
||||||
|
|
||||||
@wraps(func)
|
|
||||||
def authenticate(*args, **kwargs):
|
|
||||||
cookie = cherrypy.request.cookie.get(self.cookie_name, None)
|
|
||||||
|
|
||||||
if not (allow_cookie_auth and self.is_valid(cookie)):
|
|
||||||
digest_auth(self.realm, get_ha1_dict_plain(self.users_dict),
|
|
||||||
self.secret)
|
|
||||||
|
|
||||||
cookie = cherrypy.response.cookie
|
|
||||||
cookie[self.cookie_name] = self.generate_cookie()
|
|
||||||
cookie[self.cookie_name]['path'] = '/'
|
|
||||||
cookie[self.cookie_name]['version'] = '1'
|
|
||||||
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
|
|
||||||
authenticate.im_self = func.im_self
|
|
||||||
return authenticate
|
|
||||||
|
|
||||||
def generate_cookie(self, timestamp=None):
|
|
||||||
'''
|
|
||||||
Generate a cookie. The cookie contains a plain text timestamp and a
|
|
||||||
hash of the timestamp and the server secret.
|
|
||||||
'''
|
|
||||||
timestamp = int(time.time()) if timestamp is None else timestamp
|
|
||||||
key = self.hashit(self.key_order%dict(t=timestamp, s=self.secret))
|
|
||||||
return '%d:%s'%(timestamp, key)
|
|
||||||
|
|
||||||
def is_valid(self, cookie):
|
|
||||||
'''
|
|
||||||
Check that cookie has not been spoofed (i.e. verify the declared
|
|
||||||
timestamp against the hashed timestamp). If the timestamps match, check
|
|
||||||
that the cookie has not expired. Return True iff the cookie has not
|
|
||||||
been spoofed and has not expired.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
timestamp, hashpart = cookie.value.split(':', 1)
|
|
||||||
timestamp = int(timestamp)
|
|
||||||
except:
|
|
||||||
return False
|
|
||||||
s_timestamp, s_hashpart = self.generate_cookie(timestamp).split(':', 1)
|
|
||||||
is_valid = s_hashpart == hashpart
|
|
||||||
return (is_valid and (time.time() - timestamp) < self.MAX_AGE)
|
|
||||||
|
|
||||||
|
|
||||||
def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
|
|
||||||
if not hasattr(dt, 'timetuple'):
|
|
||||||
dt = nowf()
|
|
||||||
dt = dt.timetuple()
|
|
||||||
try:
|
|
||||||
return _strftime(fmt, dt)
|
|
||||||
except:
|
|
||||||
return _strftime(fmt, nowf().timetuple())
|
|
||||||
|
|
||||||
|
|
||||||
def format_tag_string(tags, sep, ignore_max=False, no_tag_count=False, joinval=', '):
|
|
||||||
MAX = sys.maxint if ignore_max else tweaks['max_content_server_tags_shown']
|
|
||||||
if tags:
|
|
||||||
tlist = [t.strip() for t in tags.split(sep)]
|
|
||||||
else:
|
|
||||||
tlist = []
|
|
||||||
tlist.sort(key=sort_key)
|
|
||||||
if len(tlist) > MAX:
|
|
||||||
tlist = tlist[:MAX]+['...']
|
|
||||||
if no_tag_count:
|
|
||||||
return joinval.join(tlist) if tlist else ''
|
|
||||||
else:
|
|
||||||
return u'%s:&:%s'%(tweaks['max_content_server_tags_shown'],
|
|
||||||
joinval.join(tlist)) if tlist else ''
|
|
||||||
|
|
||||||
|
|
||||||
def quote(s):
|
|
||||||
if isinstance(s, unicode):
|
|
||||||
s = s.encode('utf-8')
|
|
||||||
return quote_(s)
|
|
||||||
|
|
||||||
|
|
||||||
def unquote(s):
|
|
||||||
ans = unquote_(s)
|
|
||||||
if isbytestring(ans):
|
|
||||||
ans = ans.decode('utf-8')
|
|
||||||
return ans
|
|
||||||
|
|
||||||
|
|
||||||
def cookie_time_fmt(time_t):
|
|
||||||
return time.strftime('%a, %d-%b-%Y %H:%M:%S GMT', time_t)
|
|
||||||
|
|
||||||
|
|
||||||
def cookie_max_age_to_expires(max_age):
|
|
||||||
gmt_expiration_time = time.gmtime(time.time() + max_age)
|
|
||||||
return cookie_time_fmt(gmt_expiration_time)
|
|
||||||
|
|
@ -1,149 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import __builtin__
|
|
||||||
|
|
||||||
import cherrypy
|
|
||||||
from lxml.builder import ElementMaker
|
|
||||||
from lxml import etree
|
|
||||||
|
|
||||||
from calibre.library.server import custom_fields_to_display
|
|
||||||
from calibre.library.server.utils import strftime, format_tag_string
|
|
||||||
from calibre.ebooks.metadata import fmt_sidx
|
|
||||||
from calibre.constants import preferred_encoding
|
|
||||||
from calibre import isbytestring
|
|
||||||
from calibre.utils.filenames import ascii_filename
|
|
||||||
from calibre.utils.icu import sort_key
|
|
||||||
|
|
||||||
E = ElementMaker()
|
|
||||||
|
|
||||||
|
|
||||||
class XMLServer(object):
|
|
||||||
'Serves XML and the Ajax based HTML frontend'
|
|
||||||
|
|
||||||
def add_routes(self, connect):
|
|
||||||
connect('xml', '/xml', self.xml)
|
|
||||||
|
|
||||||
def xml(self, start='0', num='50', sort=None, search=None,
|
|
||||||
_=None, order='ascending'):
|
|
||||||
'''
|
|
||||||
Serves metadata from the calibre database as XML.
|
|
||||||
|
|
||||||
:param sort: Sort results by ``sort``. Can be one of `title,author,rating`.
|
|
||||||
:param search: Filter results by ``search`` query. See :class:`SearchQueryParser` for query syntax
|
|
||||||
:param start,num: Return the slice `[start:start+num]` of the sorted and filtered results
|
|
||||||
:param _: Firefox seems to sometimes send this when using XMLHttpRequest with no caching
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
start = int(start)
|
|
||||||
except ValueError:
|
|
||||||
raise cherrypy.HTTPError(400, 'start: %s is not an integer'%start)
|
|
||||||
try:
|
|
||||||
num = int(num)
|
|
||||||
except ValueError:
|
|
||||||
raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num)
|
|
||||||
|
|
||||||
order = order.lower().strip() == 'ascending'
|
|
||||||
|
|
||||||
if not search:
|
|
||||||
search = ''
|
|
||||||
if isbytestring(search):
|
|
||||||
search = search.decode('UTF-8')
|
|
||||||
|
|
||||||
ids = self.search_for_books(search)
|
|
||||||
|
|
||||||
FM = self.db.FIELD_MAP
|
|
||||||
|
|
||||||
items = [r for r in iter(self.db) if r[FM['id']] in ids]
|
|
||||||
if sort is not None:
|
|
||||||
self.sort(items, sort, order)
|
|
||||||
|
|
||||||
books = []
|
|
||||||
|
|
||||||
def serialize(x):
|
|
||||||
if isinstance(x, unicode):
|
|
||||||
return x
|
|
||||||
if isbytestring(x):
|
|
||||||
return x.decode(preferred_encoding, 'replace')
|
|
||||||
return unicode(x)
|
|
||||||
|
|
||||||
# This method uses its own book dict, not the Metadata dict. The loop
|
|
||||||
# below could be changed to use db.get_metadata instead of reading
|
|
||||||
# info directly from the record made by the view, but it doesn't seem
|
|
||||||
# worth it at the moment.
|
|
||||||
for record in items[start:start+num]:
|
|
||||||
kwargs = {}
|
|
||||||
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
|
|
||||||
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
|
|
||||||
kwargs['authors'] = authors
|
|
||||||
|
|
||||||
kwargs['series_index'] = \
|
|
||||||
fmt_sidx(float(record[FM['series_index']]))
|
|
||||||
|
|
||||||
for x in ('timestamp', 'pubdate'):
|
|
||||||
kwargs[x] = strftime('%Y/%m/%d %H:%M:%S', record[FM[x]])
|
|
||||||
|
|
||||||
for x in ('id', 'title', 'sort', 'author_sort', 'rating', 'size'):
|
|
||||||
kwargs[x] = serialize(record[FM[x]])
|
|
||||||
|
|
||||||
for x in ('formats', 'series', 'tags', 'publisher',
|
|
||||||
'comments', 'identifiers'):
|
|
||||||
y = record[FM[x]]
|
|
||||||
if x == 'tags':
|
|
||||||
y = format_tag_string(y, ',', ignore_max=True)
|
|
||||||
kwargs[x] = serialize(y) if y else ''
|
|
||||||
|
|
||||||
isbn = self.db.isbn(record[FM['id']], index_is_id=True)
|
|
||||||
kwargs['isbn'] = serialize(isbn if isbn else '')
|
|
||||||
|
|
||||||
kwargs['safe_title'] = ascii_filename(kwargs['title'])
|
|
||||||
|
|
||||||
c = kwargs.pop('comments')
|
|
||||||
|
|
||||||
CFM = self.db.field_metadata
|
|
||||||
CKEYS = [key for key in sorted(custom_fields_to_display(self.db),
|
|
||||||
key=lambda x: sort_key(CFM[x]['name']))]
|
|
||||||
custcols = []
|
|
||||||
for key in CKEYS:
|
|
||||||
def concat(name, val):
|
|
||||||
return '%s:#:%s'%(name, unicode(val))
|
|
||||||
mi = self.db.get_metadata(record[CFM['id']['rec_index']], index_is_id=True)
|
|
||||||
name, val = mi.format_field(key)
|
|
||||||
if not val:
|
|
||||||
continue
|
|
||||||
datatype = CFM[key]['datatype']
|
|
||||||
if datatype in ['comments']:
|
|
||||||
continue
|
|
||||||
k = str('CF_'+key[1:])
|
|
||||||
name = CFM[key]['name']
|
|
||||||
custcols.append(k)
|
|
||||||
if datatype == 'text' and CFM[key]['is_multiple']:
|
|
||||||
kwargs[k] = \
|
|
||||||
concat('#T#'+name,
|
|
||||||
format_tag_string(val,
|
|
||||||
CFM[key]['is_multiple']['ui_to_list'],
|
|
||||||
ignore_max=True,
|
|
||||||
joinval=CFM[key]['is_multiple']['list_to_ui']))
|
|
||||||
else:
|
|
||||||
kwargs[k] = concat(name, val)
|
|
||||||
kwargs['custcols'] = ','.join(custcols)
|
|
||||||
books.append(E.book(c, **kwargs))
|
|
||||||
|
|
||||||
updated = self.db.last_modified()
|
|
||||||
kwargs = dict(
|
|
||||||
start=str(start),
|
|
||||||
updated=updated.strftime('%Y-%m-%dT%H:%M:%S+00:00'),
|
|
||||||
total=str(len(ids)),
|
|
||||||
num=str(len(books)))
|
|
||||||
ans = E.library(*books, **kwargs)
|
|
||||||
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'text/xml'
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
|
||||||
|
|
||||||
return etree.tostring(ans, encoding='utf-8', pretty_print=True,
|
|
||||||
xml_declaration=True)
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
|||||||
Rewrite server integration with nginx/apache section
|
Rewrite server integration with nginx/apache section
|
||||||
|
|
||||||
Grep for from calibre.library.server and port all code that uses it
|
Grep for from library.server and port all code that uses it
|
||||||
|
Loading…
x
Reference in New Issue
Block a user