Merge from custcol trunk
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 158 KiB |
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 57 KiB |
@ -11,7 +11,7 @@
|
||||
</head>
|
||||
<body>
|
||||
<div id="banner">
|
||||
<a style="border: 0pt" href="http://calibre-ebook.com" alt="calibre" title="calibre"><img style="border:0pt" src="/static/calibre.png" alt="calibre" /></a>
|
||||
<a style="border: 0pt" href="http://calibre-ebook.com" alt="calibre" title="calibre"><img style="border:0pt" src="/static/calibre_banner.png" alt="calibre" /></a>
|
||||
</div>
|
||||
|
||||
<div id="search_box">
|
||||
|
Before Width: | Height: | Size: 124 KiB After Width: | Height: | Size: 224 KiB |
Before Width: | Height: | Size: 3.8 KiB After Width: | Height: | Size: 4.1 KiB |
57
resources/recipes/infomotori.recipe
Normal file
@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
__author__ = 'Gabriele Marini, based on Darko Miletic'
|
||||
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
description = 'On Line Motor News - 01-05-2010'
|
||||
|
||||
'''
|
||||
http://www.infomotori.it/
|
||||
'''
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class infomotori(BasicNewsRecipe):
|
||||
author = 'Gabriele Marini'
|
||||
title = u'Infomotori'
|
||||
cover = 'http://www.infomotori.com/content/files/anniversario_01.gif'
|
||||
oldest_article = 31
|
||||
max_articles_per_feed = 100
|
||||
recursion = 100
|
||||
use_embedded_content = False
|
||||
|
||||
language = 'it'
|
||||
use_embedded_content = False
|
||||
remove_javascript = True
|
||||
no_stylesheets = True
|
||||
language = 'it'
|
||||
timefmt = '[%a, %d %b, %Y]'
|
||||
|
||||
|
||||
def print_version(self, url):
|
||||
raw = self.browser.open(url).read()
|
||||
soup = BeautifulSoup(raw.decode('utf8', 'replace'))
|
||||
print_link = soup.find('a', {'class':'printarticle'})
|
||||
|
||||
'''if print_link is None:
|
||||
|
||||
keep_only_tags = [ dict(name='div', attrs={'class':['article main-column-article photogallery-column','category-header','article-body']})
|
||||
]
|
||||
remove_tags = [ dict(name='div', attrs={'class':['thumbnails-article','infoflash-footer','imushortarticle']}),
|
||||
dict(name='div', attrs={'id':['linkinviastampa','linkspazioblu','altriarticoli','articoliconcorrenti','articolicorrelati','boxbrand']}),
|
||||
dict(name='table', attrs={'class':'article-page'})
|
||||
]
|
||||
|
||||
remove_tags_after = [ dict(name='div', attrs={'id':'articlebody'})
|
||||
]
|
||||
return url
|
||||
'''
|
||||
return print_link['href']
|
||||
|
||||
feeds = [(u'Ultime Novit\xe0', u'http://feeds.infomotori.com/ultimenovita'),
|
||||
(u'Auto: Ultime Novit\xe0 ', u'http://feeds.infomotori.com/autonovita'),
|
||||
(u'Moto: Ultime Novit\xe0 Moto', u'http://feeds.infomotori.com/motonovita'),
|
||||
(u'Notizie Flash', u'http://feeds.infomotori.com/infoflashmotori'),
|
||||
(u'Veicoli Ecologici e Mobilit\xe0 Sostenibile', u'http://feeds.infomotori.com/ecomotori'),
|
||||
(u'4x4 Fuoristrada, Crossover e Suv', u'http://feeds.infomotori.com/fuoristrada'),
|
||||
(u'Shopping Motori', u'http://feeds.infomotori.com/shoppingmotori')
|
||||
]
|
@ -88,24 +88,6 @@ CALIBRE_METADATA_FIELDS = frozenset([
|
||||
]
|
||||
)
|
||||
|
||||
CALIBRE_RESERVED_LABELS = frozenset([
|
||||
]
|
||||
)
|
||||
|
||||
RESERVED_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
|
||||
PUBLICATION_METADATA_FIELDS).union(
|
||||
BOOK_STRUCTURE_FIELDS).union(
|
||||
USER_METADATA_FIELDS).union(
|
||||
DEVICE_METADATA_FIELDS).union(
|
||||
CALIBRE_METADATA_FIELDS).union(
|
||||
CALIBRE_RESERVED_LABELS)
|
||||
|
||||
assert len(RESERVED_METADATA_FIELDS) == sum(map(len, (
|
||||
SOCIAL_METADATA_FIELDS, PUBLICATION_METADATA_FIELDS,
|
||||
BOOK_STRUCTURE_FIELDS, USER_METADATA_FIELDS,
|
||||
DEVICE_METADATA_FIELDS, CALIBRE_METADATA_FIELDS,
|
||||
CALIBRE_RESERVED_LABELS
|
||||
)))
|
||||
|
||||
SERIALIZABLE_FIELDS = SOCIAL_METADATA_FIELDS.union(
|
||||
USER_METADATA_FIELDS).union(
|
||||
|
@ -68,13 +68,6 @@ class Metadata(object):
|
||||
# Don't abuse this privilege
|
||||
self.__dict__[field] = val
|
||||
|
||||
@property
|
||||
def reserved_names(self):
|
||||
'The set of names you cannot use for your own purposes on this object'
|
||||
_data = object.__getattribute__(self, '_data')
|
||||
return frozenset(RESERVED_FIELD_NAMES).union(frozenset(
|
||||
_data['user_metadata'].iterkeys()))
|
||||
|
||||
@property
|
||||
def user_metadata_names(self):
|
||||
'The set of user metadata names this object knows about'
|
||||
@ -120,10 +113,8 @@ class Metadata(object):
|
||||
|
||||
# }}}
|
||||
|
||||
_m = Metadata()
|
||||
RESERVED_FIELD_NAMES = \
|
||||
frozenset(_m.__dict__.iterkeys()).union( # _data
|
||||
RESERVED_METADATA_FIELDS).union(
|
||||
frozenset(Metadata.__dict__.iterkeys())) # methods defined in Metadata
|
||||
del _m
|
||||
# We don't need reserved field names for this object any more. Lets just use a
|
||||
# protocol like the last char of a user field label should be _ when using this
|
||||
# object
|
||||
# So mi.tags returns the builtin tags and mi.tags_ returns the user tags
|
||||
|
||||
|
@ -10,7 +10,6 @@ from PyQt4.Qt import QDialog, Qt, QListWidgetItem, QVariant
|
||||
|
||||
from calibre.gui2.dialogs.config.create_custom_column_ui import Ui_QCreateCustomColumn
|
||||
from calibre.gui2 import error_dialog
|
||||
from calibre.ebooks.metadata.book.base import RESERVED_FIELD_NAMES
|
||||
|
||||
class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
|
||||
@ -103,14 +102,10 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
return self.simple_error('', _('No lookup name was provided'))
|
||||
if not col_heading:
|
||||
return self.simple_error('', _('No column heading was provided'))
|
||||
if col in RESERVED_FIELD_NAMES:
|
||||
return self.simple_error('', _('The lookup name %s is reserved and cannot be used')%col)
|
||||
bad_col = False
|
||||
if col in self.parent.custcols:
|
||||
if not self.editing_col or self.parent.custcols[col]['num'] != self.orig_column_number:
|
||||
bad_col = True
|
||||
if col in self.standard_colnames:
|
||||
bad_col = True
|
||||
if bad_col:
|
||||
return self.simple_error('', _('The lookup name %s is already used')%col)
|
||||
bad_head = False
|
||||
|
@ -778,11 +778,11 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
q = {
|
||||
'title' : lambda x : getattr(x, 'title').lower(),
|
||||
'author': lambda x: ' & '.join(getattr(x, 'authors')).lower(),
|
||||
'authors': lambda x: ' & '.join(getattr(x, 'authors')).lower(),
|
||||
'collections':lambda x: ','.join(getattr(x, 'device_collections')).lower(),
|
||||
'format':lambda x: os.path.splitext(x.path)[1].lower(),
|
||||
'formats':lambda x: os.path.splitext(x.path)[1].lower()
|
||||
}
|
||||
for x in ('author', 'format'):
|
||||
q[x+'s'] = q[x]
|
||||
for index, row in enumerate(self.model.db):
|
||||
for locvalue in locations:
|
||||
accessor = q[locvalue]
|
||||
|
@ -81,7 +81,7 @@ class KindleDX(Kindle):
|
||||
class Sony505(Device):
|
||||
|
||||
output_profile = 'sony'
|
||||
name = 'SONY Reader 6" and Touch Editions'
|
||||
name = 'All other SONY devices'
|
||||
output_format = 'EPUB'
|
||||
manufacturer = 'SONY'
|
||||
id = 'prs505'
|
||||
|
@ -195,6 +195,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
lines.append(line)
|
||||
|
||||
custom_map = self.custom_columns_in_meta()
|
||||
# custom col labels are numbers (the id in the custom_columns table)
|
||||
custom_cols = list(sorted(custom_map.keys()))
|
||||
lines.extend([custom_map[x] for x in custom_cols])
|
||||
|
||||
|
@ -133,9 +133,10 @@ class FieldMetadata(dict, DictMixin):
|
||||
self._tb_cats = OrderedDict()
|
||||
for k,v in self.category_items_:
|
||||
self._tb_cats[k] = v
|
||||
self._custom_fields = []
|
||||
self.custom_field_prefix = '#'
|
||||
|
||||
self.get = self._tb_cats.get
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._tb_cats[key]
|
||||
|
||||
@ -161,7 +162,7 @@ class FieldMetadata(dict, DictMixin):
|
||||
yield (key, self._tb_cats[key])
|
||||
|
||||
def is_custom_field(self, key):
|
||||
return key.startswith(self.custom_field_prefix) or key in self._custom_fields
|
||||
return key.startswith(self.custom_field_prefix)
|
||||
|
||||
def get_field_label(self, key):
|
||||
if 'label' not in self._tb_cats[key]:
|
||||
@ -183,7 +184,6 @@ class FieldMetadata(dict, DictMixin):
|
||||
fn = self.custom_field_prefix + label
|
||||
if fn in self._tb_cats:
|
||||
raise ValueError('Duplicate custom field [%s]'%(label))
|
||||
self._custom_fields.append(label)
|
||||
if searchable:
|
||||
sl = [fn]
|
||||
kind = 'standard'
|
||||
|
@ -40,3 +40,6 @@ def server_config(defaults=None):
|
||||
'This affects Stanza, WordPlayer, etc. integration.'))
|
||||
return c
|
||||
|
||||
def main():
|
||||
from calibre.library.server.main import main
|
||||
return main()
|
||||
|
@ -12,6 +12,7 @@ from itertools import repeat
|
||||
from lxml import etree, html
|
||||
from lxml.builder import ElementMaker
|
||||
import cherrypy
|
||||
import routes
|
||||
|
||||
from calibre.constants import __appname__
|
||||
from calibre.ebooks.metadata import fmt_sidx
|
||||
@ -25,6 +26,11 @@ BASE_HREFS = {
|
||||
|
||||
STANZA_FORMATS = frozenset(['epub', 'pdb'])
|
||||
|
||||
def url_for(name, version, **kwargs):
|
||||
if not name.endswith('_'):
|
||||
name += '_'
|
||||
return routes.url_for(name+str(version), **kwargs)
|
||||
|
||||
# Vocabulary for building OPDS feeds {{{
|
||||
E = ElementMaker(namespace='http://www.w3.org/2005/Atom',
|
||||
nsmap={
|
||||
@ -42,7 +48,7 @@ def UPDATED(dt, *args, **kwargs):
|
||||
return E.updated(dt.strftime('%Y-%m-%dT%H:%M:%S+00:00'), *args, **kwargs)
|
||||
|
||||
LINK = partial(E.link, type='application/atom+xml')
|
||||
NAVLINK = partial(E.link, rel='subsection',
|
||||
NAVLINK = partial(E.link,
|
||||
type='application/atom+xml;type=feed;profile=opds-catalog')
|
||||
|
||||
def SEARCH_LINK(base_href, *args, **kwargs):
|
||||
@ -59,7 +65,7 @@ def AUTHOR(name, uri=None):
|
||||
|
||||
SUBTITLE = E.subtitle
|
||||
|
||||
def NAVCATALOG_ENTRY(base_href, updated, title, description, query):
|
||||
def NAVCATALOG_ENTRY(base_href, updated, title, description, query, version=0):
|
||||
href = base_href+'/navcatalog/'+binascii.hexlify(query)
|
||||
id_ = 'calibre-navcatalog:'+str(hashlib.sha1(href).hexdigest())
|
||||
return E.entry(
|
||||
@ -74,7 +80,7 @@ START_LINK = partial(NAVLINK, rel='start')
|
||||
UP_LINK = partial(NAVLINK, rel='up')
|
||||
FIRST_LINK = partial(NAVLINK, rel='first')
|
||||
LAST_LINK = partial(NAVLINK, rel='last')
|
||||
NEXT_LINK = partial(NAVLINK, rel='next')
|
||||
NEXT_LINK = partial(NAVLINK, rel='next', title='Next')
|
||||
PREVIOUS_LINK = partial(NAVLINK, rel='previous')
|
||||
|
||||
def html_to_lxml(raw):
|
||||
@ -117,7 +123,7 @@ def ACQUISITION_ENTRY(item, version, FM, updated):
|
||||
id_ = 'urn:%s:%s'%(idm, item[FM['uuid']])
|
||||
ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_),
|
||||
UPDATED(updated))
|
||||
if extra:
|
||||
if len(extra):
|
||||
ans.append(E.content(extra, type='xhtml'))
|
||||
formats = item[FM['formats']]
|
||||
if formats:
|
||||
@ -148,7 +154,7 @@ class Feed(object): # {{{
|
||||
title=__appname__ + ' ' + _('Library'),
|
||||
up_link=None, first_link=None, last_link=None,
|
||||
next_link=None, previous_link=None):
|
||||
self.base_href = BASE_HREFS[version]
|
||||
self.base_href = url_for('opds', version)
|
||||
|
||||
self.root = \
|
||||
FEED(
|
||||
@ -157,18 +163,18 @@ class Feed(object): # {{{
|
||||
ID(id_),
|
||||
UPDATED(updated),
|
||||
SEARCH_LINK(self.base_href),
|
||||
START_LINK(self.base_href)
|
||||
START_LINK(href=self.base_href)
|
||||
)
|
||||
if up_link:
|
||||
self.root.append(UP_LINK(up_link))
|
||||
self.root.append(UP_LINK(href=up_link))
|
||||
if first_link:
|
||||
self.root.append(FIRST_LINK(first_link))
|
||||
self.root.append(FIRST_LINK(href=first_link))
|
||||
if last_link:
|
||||
self.root.append(LAST_LINK(last_link))
|
||||
self.root.append(LAST_LINK(href=last_link))
|
||||
if next_link:
|
||||
self.root.append(NEXT_LINK(next_link))
|
||||
self.root.append(NEXT_LINK(href=next_link))
|
||||
if previous_link:
|
||||
self.root.append(PREVIOUS_LINK(previous_link))
|
||||
self.root.append(PREVIOUS_LINK(href=previous_link))
|
||||
if subtitle:
|
||||
self.root.insert(1, SUBTITLE(subtitle))
|
||||
|
||||
@ -188,7 +194,8 @@ class TopLevel(Feed): # {{{
|
||||
):
|
||||
Feed.__init__(self, id_, updated, version, subtitle=subtitle)
|
||||
|
||||
subc = partial(NAVCATALOG_ENTRY, self.base_href, updated)
|
||||
subc = partial(NAVCATALOG_ENTRY, self.base_href, updated,
|
||||
version=version)
|
||||
subcatalogs = [subc(_('By ')+title,
|
||||
_('Books sorted by ') + desc, q) for title, desc, q in
|
||||
categories]
|
||||
@ -206,7 +213,7 @@ class NavFeed(Feed):
|
||||
kwargs['previous_link'] = \
|
||||
page_url+'?offset=%d'%offsets.previous_offset
|
||||
if offsets.next_offset > -1:
|
||||
kwargs['next_offset'] = \
|
||||
kwargs['next_link'] = \
|
||||
page_url+'?offset=%d'%offsets.next_offset
|
||||
Feed.__init__(self, id_, updated, version, **kwargs)
|
||||
|
||||
@ -226,16 +233,16 @@ class OPDSOffsets(object):
|
||||
offset = 0
|
||||
if offset >= total:
|
||||
raise cherrypy.HTTPError(404, 'Invalid offset: %r'%offset)
|
||||
last_allowed_index = total - 1
|
||||
last_current_index = offset + delta - 1
|
||||
self.offset = offset
|
||||
self.next_offset = offset + delta
|
||||
if self.next_offset >= total:
|
||||
self.next_offset = -1
|
||||
if self.next_offset >= total:
|
||||
self.next_offset = last_current_index + 1
|
||||
if self.next_offset > last_allowed_index:
|
||||
self.next_offset = -1
|
||||
self.previous_offset = self.offset - delta
|
||||
if self.previous_offset < 0:
|
||||
self.previous_offset = 0
|
||||
self.last_offset = total - delta
|
||||
self.last_offset = last_allowed_index - delta
|
||||
if self.last_offset < 0:
|
||||
self.last_offset = 0
|
||||
|
||||
@ -243,13 +250,13 @@ class OPDSOffsets(object):
|
||||
class OPDSServer(object):
|
||||
|
||||
def add_routes(self, connect):
|
||||
for base in ('stanza', 'opds'):
|
||||
version = 0 if base == 'stanza' else 1
|
||||
for version in (0, 1):
|
||||
base_href = BASE_HREFS[version]
|
||||
connect(base, base_href, self.opds, version=version)
|
||||
connect('opdsnavcatalog_'+base, base_href+'/navcatalog/{which}',
|
||||
ver = str(version)
|
||||
connect('opds_'+ver, base_href, self.opds, version=version)
|
||||
connect('opdsnavcatalog_'+ver, base_href+'/navcatalog/{which}',
|
||||
self.opds_navcatalog, version=version)
|
||||
connect('opdssearch_'+base, base_href+'/search/{query}',
|
||||
connect('opdssearch_'+ver, base_href+'/search/{query}',
|
||||
self.opds_search, version=version)
|
||||
|
||||
def get_opds_allowed_ids_for_version(self, version):
|
||||
@ -266,7 +273,7 @@ class OPDSServer(object):
|
||||
self.sort(items, sort_by, ascending)
|
||||
max_items = self.opts.max_opds_items
|
||||
offsets = OPDSOffsets(offset, max_items, len(items))
|
||||
items = items[offsets.offset:offsets.next_offset]
|
||||
items = items[offsets.offset:offsets.offset+max_items]
|
||||
return str(AcquisitionFeed(self.db.last_modified(), id_, items, offsets,
|
||||
page_url, up_url, version, self.db.FIELD_MAP))
|
||||
|
||||
@ -282,19 +289,38 @@ class OPDSServer(object):
|
||||
ids = self.search_cache(query)
|
||||
except:
|
||||
raise cherrypy.HTTPError(404, 'Search: %r not understood'%query)
|
||||
return self.get_opds_acquisition_feed(ids, offset, '/search/'+query,
|
||||
BASE_HREFS[version], 'calibre-search:'+query,
|
||||
page_url = url_for('opdssearch', version, query=query)
|
||||
return self.get_opds_acquisition_feed(ids, offset, page_url,
|
||||
url_for('opds', version), 'calibre-search:'+query,
|
||||
version=version)
|
||||
|
||||
def opds_navcatalog(self, which=None, version=0):
|
||||
def get_opds_all_books(self, which, page_url, up_url, version=0, offset=0):
|
||||
try:
|
||||
offset = int(offset)
|
||||
version = int(version)
|
||||
except:
|
||||
raise cherrypy.HTTPError(404, 'Not found')
|
||||
if which not in ('title', 'newest') or version not in BASE_HREFS:
|
||||
raise cherrypy.HTTPError(404, 'Not found')
|
||||
sort = 'timestamp' if which == 'newest' else 'title'
|
||||
ascending = which == 'title'
|
||||
ids = self.get_opds_allowed_ids_for_version(version)
|
||||
return self.get_opds_acquisition_feed(ids, offset, page_url, up_url,
|
||||
id_='calibre-all:'+sort, sort_by=sort, ascending=ascending,
|
||||
version=version)
|
||||
|
||||
def opds_navcatalog(self, which=None, version=0, offset=0):
|
||||
version = int(version)
|
||||
if not which or version not in BASE_HREFS:
|
||||
raise cherrypy.HTTPError(404, 'Not found')
|
||||
page_url = url_for('opdsnavcatalog', version, which=which)
|
||||
up_url = url_for('opds', version)
|
||||
which = binascii.unhexlify(which)
|
||||
type_ = which[0]
|
||||
which = which[1:]
|
||||
if type_ == 'O':
|
||||
return self.get_opds_all_books(which)
|
||||
return self.get_opds_all_books(which, page_url, up_url,
|
||||
version=version, offset=offset)
|
||||
elif type_ == 'N':
|
||||
return self.get_opds_navcatalog(which)
|
||||
raise cherrypy.HTTPError(404, 'Not found')
|
||||
|