mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
0.9.14+; KG added uuid to OPF
This commit is contained in:
commit
c288c5afce
@ -5,7 +5,7 @@
|
||||
# Also, each release can have new and improved recipes.
|
||||
|
||||
# - version: ?.?.?
|
||||
# date: 2012-??-??
|
||||
# date: 2013-??-??
|
||||
#
|
||||
# new features:
|
||||
# - title:
|
||||
@ -20,13 +20,13 @@
|
||||
# - title:
|
||||
|
||||
- version: 0.9.14
|
||||
date: 2012-01-11
|
||||
date: 2013-01-11
|
||||
|
||||
new features:
|
||||
- title: "When adding multiple books and duplicates are found, allow the user to select which of the duplicate books will be added anyway."
|
||||
tickets: [1095256]
|
||||
|
||||
- title: "Device drivers for Kobo Arc on linux, Polaroid Abdroid tablet"
|
||||
- title: "Device drivers for Kobo Arc on linux, Polaroid Android tablet"
|
||||
tickets: [1098049]
|
||||
|
||||
- title: "When sorting by series, use the language of the book to decide what leading articles to remove, just as is done for sorting by title"
|
||||
|
@ -437,10 +437,10 @@ that allows you to create collections on your Kindle from the |app| metadata. It
|
||||
|
||||
.. note:: Amazon have removed the ability to manipulate collections completely in their newer models, like the Kindle Touch and Kindle Fire, making even the above plugin useless. If you really want the ability to manage collections on your Kindle via a USB connection, we encourage you to complain to Amazon about it, or get a reader where this is supported, like the SONY or Kobo Readers.
|
||||
|
||||
I am getting an error when I try to use |app| with my Kobo Touch?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
I am getting an error when I try to use |app| with my Kobo Touch/Glo/etc.?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The Kobo Touch has very buggy firmware. Connecting to it has been known to fail at random. Certain combinations of motherboard, USB ports/cables/hubs can exacerbate this tendency to fail. If you are getting an error when connecting to your touch with |app| try the following, each of which has solved the problem for *some* |app| users.
|
||||
The Kobo has very buggy firmware. Connecting to it has been known to fail at random. Certain combinations of motherboard, USB ports/cables/hubs can exacerbate this tendency to fail. If you are getting an error when connecting to your touch with |app| try the following, each of which has solved the problem for *some* |app| users.
|
||||
|
||||
* Connect the Kobo directly to your computer, not via USB Hub
|
||||
* Try a different USB cable and a different USB port on your computer
|
||||
|
@ -1,224 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
##
|
||||
## Title: Microwave and RF
|
||||
##
|
||||
## License: GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html
|
||||
|
||||
# Feb 2012: Initial release
|
||||
|
||||
__license__ = 'GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html'
|
||||
'''
|
||||
mwrf.com
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.utils.magick import Image
|
||||
|
||||
class Microwaves_and_RF(BasicNewsRecipe):
|
||||
|
||||
Convert_Grayscale = False # Convert images to gray scale or not
|
||||
|
||||
# Add sections that want to be excluded from the magazine
|
||||
exclude_sections = []
|
||||
|
||||
# Add sections that want to be included from the magazine
|
||||
include_sections = []
|
||||
|
||||
title = u'Microwaves and RF'
|
||||
__author__ = u'kiavash'
|
||||
description = u'Microwaves and RF Montly Magazine'
|
||||
publisher = 'Penton Media, Inc.'
|
||||
publication_type = 'magazine'
|
||||
site = 'http://mwrf.com'
|
||||
|
||||
language = 'en'
|
||||
asciiize = True
|
||||
timeout = 120
|
||||
simultaneous_downloads = 1 # very peaky site!
|
||||
|
||||
# Main article is inside this tag
|
||||
keep_only_tags = [dict(name='table', attrs={'id':'prtContent'})]
|
||||
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
# Flattens all the tables to make it compatible with Nook
|
||||
conversion_options = {'linearize_tables' : True}
|
||||
|
||||
remove_tags = [
|
||||
dict(name='span', attrs={'class':'body12'}),
|
||||
]
|
||||
|
||||
remove_attributes = [ 'border', 'cellspacing', 'align', 'cellpadding', 'colspan',
|
||||
'valign', 'vspace', 'hspace', 'alt', 'width', 'height' ]
|
||||
|
||||
# Specify extra CSS - overrides ALL other CSS (IE. Added last).
|
||||
extra_css = 'body { font-family: verdana, helvetica, sans-serif; } \
|
||||
.introduction, .first { font-weight: bold; } \
|
||||
.cross-head { font-weight: bold; font-size: 125%; } \
|
||||
.cap, .caption { display: block; font-size: 80%; font-style: italic; } \
|
||||
.cap, .caption, .caption img, .caption span { display: block; margin: 5px auto; } \
|
||||
.byl, .byd, .byline img, .byline-name, .byline-title, .author-name, .author-position, \
|
||||
.correspondent-portrait img, .byline-lead-in, .name, .bbc-role { display: block; \
|
||||
font-size: 80%; font-style: italic; margin: 1px auto; } \
|
||||
.story-date, .published { font-size: 80%; } \
|
||||
table { width: 100%; } \
|
||||
td img { display: block; margin: 5px auto; } \
|
||||
ul { padding-top: 10px; } \
|
||||
ol { padding-top: 10px; } \
|
||||
li { padding-top: 5px; padding-bottom: 5px; } \
|
||||
h1 { font-size: 175%; font-weight: bold; } \
|
||||
h2 { font-size: 150%; font-weight: bold; } \
|
||||
h3 { font-size: 125%; font-weight: bold; } \
|
||||
h4, h5, h6 { font-size: 100%; font-weight: bold; }'
|
||||
|
||||
# Remove the line breaks and float left/right and picture width/height.
|
||||
preprocess_regexps = [(re.compile(r'<br[ ]*/>', re.IGNORECASE), lambda m: ''),
|
||||
(re.compile(r'<br[ ]*clear.*/>', re.IGNORECASE), lambda m: ''),
|
||||
(re.compile(r'float:.*?'), lambda m: ''),
|
||||
(re.compile(r'width:.*?px'), lambda m: ''),
|
||||
(re.compile(r'height:.*?px'), lambda m: '')
|
||||
]
|
||||
|
||||
|
||||
def print_version(self, url):
|
||||
url = re.sub(r'.html', '', url)
|
||||
url = re.sub('/ArticleID/.*?/', '/Print.cfm?ArticleID=', url)
|
||||
return url
|
||||
|
||||
# Need to change the user agent to avoid potential download errors
|
||||
def get_browser(self, *args, **kwargs):
|
||||
from calibre import browser
|
||||
kwargs['user_agent'] = 'Mozilla/5.0 (Windows NT 5.1; rv:10.0) Gecko/20100101 Firefox/10.0'
|
||||
return browser(*args, **kwargs)
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
|
||||
# Fetches the main page of Microwaves and RF
|
||||
soup = self.index_to_soup(self.site)
|
||||
|
||||
# First page has the ad, Let's find the redirect address.
|
||||
url = soup.find('span', attrs={'class':'commonCopy'}).find('a').get('href')
|
||||
if url.startswith('/'):
|
||||
url = self.site + url
|
||||
|
||||
soup = self.index_to_soup(url)
|
||||
|
||||
# Searches the site for Issue ID link then returns the href address
|
||||
# pointing to the latest issue
|
||||
latest_issue = soup.find('a', attrs={'href':lambda x: x and 'IssueID' in x}).get('href')
|
||||
|
||||
# Fetches the index page for of the latest issue
|
||||
soup = self.index_to_soup(latest_issue)
|
||||
|
||||
# Finds the main section of the page containing cover, issue date and
|
||||
# TOC
|
||||
ts = soup.find('div', attrs={'id':'columnContainer'})
|
||||
|
||||
# Finds the issue date
|
||||
ds = ' '.join(self.tag_to_string(ts.find('span', attrs={'class':'CurrentIssueSectionHead'})).strip().split()[-2:]).capitalize()
|
||||
self.log('Found Current Issue:', ds)
|
||||
self.timefmt = ' [%s]'%ds
|
||||
|
||||
# Finds the cover image
|
||||
cover = ts.find('img', src = lambda x: x and 'Cover' in x)
|
||||
if cover is not None:
|
||||
self.cover_url = self.site + cover['src']
|
||||
self.log('Found Cover image:', self.cover_url)
|
||||
|
||||
feeds = []
|
||||
article_info = []
|
||||
|
||||
# Finds all the articles (tiles and links)
|
||||
articles = ts.findAll('a', attrs={'class':'commonArticleTitle'})
|
||||
|
||||
# Finds all the descriptions
|
||||
descriptions = ts.findAll('span', attrs={'class':'commonCopy'})
|
||||
|
||||
# Find all the sections
|
||||
sections = ts.findAll('span', attrs={'class':'kicker'})
|
||||
|
||||
title_number = 0
|
||||
|
||||
# Goes thru all the articles one by one and sort them out
|
||||
for section in sections:
|
||||
title_number = title_number + 1
|
||||
|
||||
# Removes the unwanted sections
|
||||
if self.tag_to_string(section) in self.exclude_sections:
|
||||
continue
|
||||
|
||||
# Only includes the wanted sections
|
||||
if self.include_sections:
|
||||
if self.tag_to_string(section) not in self.include_sections:
|
||||
continue
|
||||
|
||||
|
||||
title = self.tag_to_string(articles[title_number])
|
||||
url = articles[title_number].get('href')
|
||||
if url.startswith('/'):
|
||||
url = self.site + url
|
||||
|
||||
self.log('\tFound article:', title, 'at', url)
|
||||
desc = self.tag_to_string(descriptions[title_number])
|
||||
self.log('\t\t', desc)
|
||||
|
||||
article_info.append({'title':title, 'url':url, 'description':desc,
|
||||
'date':self.timefmt})
|
||||
|
||||
if article_info:
|
||||
feeds.append((self.title, article_info))
|
||||
|
||||
#self.log(feeds)
|
||||
return feeds
|
||||
|
||||
def postprocess_html(self, soup, first):
|
||||
if self.Convert_Grayscale:
|
||||
#process all the images
|
||||
for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')):
|
||||
iurl = tag['src']
|
||||
img = Image()
|
||||
img.open(iurl)
|
||||
if img < 0:
|
||||
raise RuntimeError('Out of memory')
|
||||
img.type = "GrayscaleType"
|
||||
img.save(iurl)
|
||||
return soup
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
|
||||
# Includes all the figures inside the final ebook
|
||||
# Finds all the jpg links
|
||||
for figure in soup.findAll('a', attrs = {'href' : lambda x: x and 'jpg' in x}):
|
||||
|
||||
# makes sure that the link points to the absolute web address
|
||||
if figure['href'].startswith('/'):
|
||||
figure['href'] = self.site + figure['href']
|
||||
|
||||
figure.name = 'img' # converts the links to img
|
||||
figure['src'] = figure['href'] # with the same address as href
|
||||
figure['style'] = 'display:block' # adds /n before and after the image
|
||||
del figure['href']
|
||||
del figure['target']
|
||||
|
||||
# Makes the title standing out
|
||||
for title in soup.findAll('a', attrs = {'class': 'commonSectionTitle'}):
|
||||
title.name = 'h1'
|
||||
del title['href']
|
||||
del title['target']
|
||||
|
||||
# Makes the section name more visible
|
||||
for section_name in soup.findAll('a', attrs = {'class': 'kicker2'}):
|
||||
section_name.name = 'h5'
|
||||
del section_name['href']
|
||||
del section_name['target']
|
||||
|
||||
# Removes all unrelated links
|
||||
for link in soup.findAll('a', attrs = {'href': True}):
|
||||
link.name = 'font'
|
||||
del link['href']
|
||||
del link['target']
|
||||
|
||||
return soup
|
13
recipes/schattenblick.recipe
Normal file
13
recipes/schattenblick.recipe
Normal file
@ -0,0 +1,13 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class AdvancedUserRecipe1345802300(BasicNewsRecipe):
|
||||
title = u'Online-Zeitung Schattenblick'
|
||||
language = 'de'
|
||||
__author__ = 'ThB'
|
||||
publisher = u'MA-Verlag'
|
||||
category = u'Nachrichten'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
cover_url = 'http://www.schattenblick.de/mobi/rss/cover.jpg'
|
||||
feeds = [(u'Schattenblick Tagesausgabe', u'http://www.schattenblick.de/mobi/rss/rss.xml')]
|
||||
|
@ -385,6 +385,14 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
fname = sanitize(fname)
|
||||
ext = os.path.splitext(fname)[1]
|
||||
|
||||
try:
|
||||
# If the device asked for it, try to use the UUID as the file name.
|
||||
# Fall back to the template if the UUID doesn't exist.
|
||||
if self.client_wants_uuid_file_names and mdata.uuid:
|
||||
return (mdata.uuid + ext)
|
||||
except:
|
||||
pass
|
||||
|
||||
maxlen = (self.MAX_PATH_LEN - (self.PATH_FUDGE_FACTOR +
|
||||
self.exts_path_lengths.get(ext, self.PATH_FUDGE_FACTOR)))
|
||||
|
||||
@ -845,6 +853,10 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
self._close_device_socket()
|
||||
return False
|
||||
|
||||
self.client_wants_uuid_file_names = result.get('useUuidFileNames', False)
|
||||
self._debug('Device wants UUID file names', self.client_wants_uuid_file_names)
|
||||
|
||||
|
||||
config = self._configProxy()
|
||||
config['format_map'] = exts
|
||||
self._debug('selected formats', config['format_map'])
|
||||
@ -1253,6 +1265,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
self.connection_attempts = {}
|
||||
self.client_can_stream_books = False
|
||||
self.client_can_stream_metadata = False
|
||||
self.client_wants_uuid_file_names = False
|
||||
|
||||
self._debug("All IP addresses", get_all_ips())
|
||||
|
||||
|
@ -291,6 +291,8 @@ def set_metadata(stream, mi, apply_null=False, update_timestamp=False):
|
||||
|
||||
|
||||
reader.opf.smart_update(mi)
|
||||
if getattr(mi, 'uuid', None):
|
||||
reader.opf.application_id = mi.uuid
|
||||
if apply_null:
|
||||
if not getattr(mi, 'series', None):
|
||||
reader.opf.series = None
|
||||
|
@ -941,12 +941,11 @@ class OPF(object): # {{{
|
||||
return self.get_text(match) or None
|
||||
|
||||
def fset(self, val):
|
||||
matches = self.application_id_path(self.metadata)
|
||||
if not matches:
|
||||
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: 'calibre'}
|
||||
matches = [self.create_metadata_element('identifier',
|
||||
attrib=attrib)]
|
||||
self.set_text(matches[0], unicode(val))
|
||||
for x in tuple(self.application_id_path(self.metadata)):
|
||||
x.getparent().remove(x)
|
||||
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: 'calibre'}
|
||||
self.set_text(self.create_metadata_element(
|
||||
'identifier', attrib=attrib), unicode(val))
|
||||
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
|
@ -115,8 +115,11 @@ class MergeMetadata(object):
|
||||
if mi.uuid is not None:
|
||||
m.filter('identifier', lambda x:x.id=='uuid_id')
|
||||
self.oeb.metadata.add('identifier', mi.uuid, id='uuid_id',
|
||||
scheme='uuid')
|
||||
scheme='uuid')
|
||||
self.oeb.uid = self.oeb.metadata.identifier[-1]
|
||||
if mi.application_id is not None:
|
||||
m.filter('identifier', lambda x:x.scheme=='calibre')
|
||||
self.oeb.metadata.add('identifier', mi.application_id, scheme='calibre')
|
||||
|
||||
def set_cover(self, mi, prefer_metadata_cover):
|
||||
cdata, ext = '', 'jpg'
|
||||
|
@ -7,7 +7,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import sys
|
||||
import sys, copy
|
||||
from future_builtins import map
|
||||
from collections import namedtuple
|
||||
|
||||
@ -83,8 +83,7 @@ class LinearGradientPattern(Dictionary):
|
||||
stop = gradient.finalStop()
|
||||
stops = list(map(lambda x: [x[0], x[1].getRgbF()], gradient.stops()))
|
||||
spread = gradient.spread()
|
||||
if False and spread != gradient.PadSpread:
|
||||
# TODO: Finish this implementation
|
||||
if spread != gradient.PadSpread:
|
||||
inv = matrix.inverted()[0]
|
||||
page_rect = tuple(map(inv.map, (
|
||||
QPointF(0, 0), QPointF(pixel_page_width, 0), QPointF(0, pixel_page_height),
|
||||
@ -103,23 +102,52 @@ class LinearGradientPattern(Dictionary):
|
||||
llimit, rlimit = start, stop
|
||||
|
||||
reflect = False
|
||||
base_stops = list(stops)
|
||||
base_stops = copy.deepcopy(stops)
|
||||
reversed_stops = list(reversed(stops))
|
||||
do_reflect = spread == gradient.ReflectSpread
|
||||
# totl = abs(stops[-1][0] - stops[0][0])
|
||||
# intervals = [abs(stops[i+1] - stops[i])/totl for i in xrange(len(stops)-1)]
|
||||
totl = abs(stops[-1][0] - stops[0][0])
|
||||
intervals = [abs(stops[i+1][0] - stops[i][0])/totl
|
||||
for i in xrange(len(stops)-1)]
|
||||
|
||||
while in_page(llimit):
|
||||
reflect ^= True
|
||||
llimit -= offset
|
||||
estops = reversed_stops if (reflect and do_reflect) else base_stops
|
||||
stops = estops + stops
|
||||
stops = copy.deepcopy(estops) + stops
|
||||
|
||||
first_is_reflected = reflect
|
||||
reflect = False
|
||||
|
||||
while in_page(rlimit):
|
||||
reflect ^= True
|
||||
rlimit += offset
|
||||
estops = reversed_stops if (reflect and do_reflect) else base_stops
|
||||
stops = stops + estops
|
||||
stops = stops + copy.deepcopy(estops)
|
||||
|
||||
start, stop = llimit, rlimit
|
||||
|
||||
num = len(stops) // len(base_stops)
|
||||
if num > 1:
|
||||
# Adjust the stop parameter values
|
||||
t = base_stops[0][0]
|
||||
rlen = totl/num
|
||||
reflect = first_is_reflected ^ True
|
||||
intervals = [i*rlen for i in intervals]
|
||||
rintervals = list(reversed(intervals))
|
||||
|
||||
for i in xrange(num):
|
||||
reflect ^= True
|
||||
pos = i * len(base_stops)
|
||||
tvals = [t]
|
||||
for ival in (rintervals if reflect and do_reflect else
|
||||
intervals):
|
||||
tvals.append(tvals[-1] + ival)
|
||||
for j in xrange(len(base_stops)):
|
||||
stops[pos+j][0] = tvals[j]
|
||||
t = tvals[-1]
|
||||
|
||||
# In case there were rounding errors
|
||||
stops[-1][0] = base_stops[-1][0]
|
||||
|
||||
return start, stop, tuple(Stop(s[0], s[1]) for s in stops)
|
||||
|
||||
|
@ -86,10 +86,11 @@ def brush(p, xmax, ymax):
|
||||
x = 0
|
||||
y = 0
|
||||
w = xmax/2
|
||||
g = QLinearGradient(QPointF(x, y), QPointF(x, y+w))
|
||||
g = QLinearGradient(QPointF(x, y+w/3), QPointF(x, y+(2*w/3)))
|
||||
g.setColorAt(0, QColor('#f00'))
|
||||
g.setColorAt(0.5, QColor('#fff'))
|
||||
g.setColorAt(1, QColor('#00f'))
|
||||
g.setSpread(g.ReflectSpread)
|
||||
p.fillRect(x, y, w, w, QBrush(g))
|
||||
p.drawRect(x, y, w, w)
|
||||
|
||||
@ -112,7 +113,7 @@ def main():
|
||||
app
|
||||
tdir = os.path.abspath('.')
|
||||
pdf = os.path.join(tdir, 'painter.pdf')
|
||||
func = full
|
||||
func = brush
|
||||
dpi = 100
|
||||
with open(pdf, 'wb') as f:
|
||||
dev = PdfDevice(f, xdpi=dpi, ydpi=dpi, compress=False)
|
||||
|
@ -411,7 +411,7 @@
|
||||
<item row="6" column="3" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_subset_embedded_fonts">
|
||||
<property name="text">
|
||||
<string>&Subset all embedded fonts (Experimental)</string>
|
||||
<string>&Subset all embedded fonts</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
|
@ -26,6 +26,7 @@ def create_opf_file(db, book_id):
|
||||
mi.application_id = uuid.uuid4()
|
||||
old_cover = mi.cover
|
||||
mi.cover = None
|
||||
mi.application_id = mi.uuid
|
||||
raw = metadata_to_opf(mi)
|
||||
mi.cover = old_cover
|
||||
opf_file = PersistentTemporaryFile('.opf')
|
||||
|
@ -88,13 +88,16 @@ class DateDelegate(QStyledItemDelegate): # {{{
|
||||
|
||||
class PubDateDelegate(QStyledItemDelegate): # {{{
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
QStyledItemDelegate.__init__(self, *args, **kwargs)
|
||||
self.format = tweaks['gui_pubdate_display_format']
|
||||
if self.format is None:
|
||||
self.format = 'MMM yyyy'
|
||||
|
||||
def displayText(self, val, locale):
|
||||
d = val.toDateTime()
|
||||
if d <= UNDEFINED_QDATETIME:
|
||||
return ''
|
||||
self.format = tweaks['gui_pubdate_display_format']
|
||||
if self.format is None:
|
||||
self.format = 'MMM yyyy'
|
||||
return format_date(qt_to_dt(d, as_utc=False), self.format)
|
||||
|
||||
def createEditor(self, parent, option, index):
|
||||
|
@ -197,7 +197,7 @@ class NookColor(Nook):
|
||||
|
||||
class NookTablet(NookColor):
|
||||
id = 'nook_tablet'
|
||||
name = 'Nook Tablet'
|
||||
name = 'Nook Tablet/HD'
|
||||
|
||||
class CybookG3(Device):
|
||||
|
||||
|
@ -239,6 +239,8 @@ class BrowseServer(object):
|
||||
self.browse_details)
|
||||
connect('browse_book', base_href+'/book/{id}',
|
||||
self.browse_book)
|
||||
connect('browse_random', base_href+'/random',
|
||||
self.browse_random)
|
||||
connect('browse_category_icon', base_href+'/icon/{name}',
|
||||
self.browse_icon)
|
||||
|
||||
@ -351,6 +353,7 @@ class BrowseServer(object):
|
||||
cats = [
|
||||
(_('Newest'), 'newest', 'forward.png'),
|
||||
(_('All books'), 'allbooks', 'book.png'),
|
||||
(_('Random book'), 'randombook', 'random.png'),
|
||||
]
|
||||
|
||||
def getter(x):
|
||||
@ -599,6 +602,9 @@ class BrowseServer(object):
|
||||
elif category == 'allbooks':
|
||||
raise cherrypy.InternalRedirect(prefix +
|
||||
'/browse/matches/allbooks/dummy')
|
||||
elif category == 'randombook':
|
||||
raise cherrypy.InternalRedirect(prefix +
|
||||
'/browse/random')
|
||||
else:
|
||||
ans = self.browse_category(category, category_sort)
|
||||
|
||||
@ -885,6 +891,13 @@ class BrowseServer(object):
|
||||
|
||||
return json.dumps(ans, ensure_ascii=False)
|
||||
|
||||
@Endpoint()
|
||||
def browse_random(self, *args, **kwargs):
|
||||
import random
|
||||
book_id = random.choice(tuple(self.db.all_ids()))
|
||||
ans = self.browse_render_details(book_id)
|
||||
return self.browse_template('').format(
|
||||
title='', script='book();', main=ans)
|
||||
|
||||
@Endpoint()
|
||||
def browse_book(self, id=None, category_sort=None):
|
||||
|
Loading…
x
Reference in New Issue
Block a user