This commit is contained in:
GRiker 2012-06-22 04:07:20 -06:00
commit 7e2122d93b
144 changed files with 26440 additions and 22382 deletions

View File

@ -20,6 +20,80 @@
# - title:
- version: 0.8.57
date: 2012-06-22
new features:
- title: "PDF Output: Full pagination support. No more cutoff bottom line."
type: major
description: "Fixes a long standing bug in calibre's PDF Output that caused the bottom line of some pages to be partially cut off and prevented top and bottom margins from working."
- title: "calibredb add now prints out the ids of added books"
tickets: [1014303]
- title: "Kobo Vox driver: Add support for new Google Play firmware"
tickets: [1014129]
- title: "Driver for Prestigio PMP5097PRO"
tickets: [1013864]
- title: "Add option to disable tooltips in the book list under Preferences->Look & Feel"
- title: "When customizing builtin recipes download the latest version of the recipe to customize instead of using the possibly out of date bundled version"
bug fixes:
- title: "PDF Output: Use the cover from the input document when no cover is specified during a conversion"
- title: "E-book Viewer: Printing now has proper pagination with top and bottom margins no lines partially cut-off at the bottom and full style retention"
- title: "KF8 Input: Handle files with incorrectly encoded guide type entries."
tickets: [1015020]
- title: "E-book viewer: Disable hyphenation on windows xp as Qt WebKit barfs on soft hyphens on windows XP"
- title: "Handle OS X systems with invalid palette colors."
tickets: [1014900]
- title: "Tag Browser: Fix regression that broke partitioning of hierarchical categories."
tickets: [1014065]
- title: "LRF Output: Handle negative page margins"
tickets: [1014103]
- title: "Template language: Fix arithmetic functions to tolerate the value 'None' as returned by raw_field()"
- title: "Fix custom title sort set in the edit metadata dialog getting reset by the conversion dialog"
improved recipes:
- The Economist
- Akter
- 24 Sata sr
- Novi List
- Metro Montreal
- Mode Durable
- CanardPC
- The Economic Collapse
- Our Daily Bread
new recipes:
- title: Akter Daily
author: Darko MIletic
- title: BBC Brasil
author: Claviola
- title: Homopedia.pl
author: rainbowwarrior
- title: National Geographic Magazine
author: Terminal Veracity
- title: Something Awful
author: atordo
- title: Huffington Post UK
author: Krittika Goyal
- version: 0.8.56
date: 2012-06-15

View File

@ -1,6 +1,7 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2009-2012, Darko Miletic <darko.miletic at gmail.com>'
'''
24sata.rs
@ -21,26 +22,29 @@ class Ser24Sata(BasicNewsRecipe):
encoding = 'utf-8'
use_embedded_content = False
language = 'sr'
publication_type = 'newspaper'
extra_css = '@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)} body{font-family: serif1, serif} .article_description{font-family: serif1, serif}'
publication_type = 'newsportal'
extra_css = """
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
body{font-family: serif1, serif}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher': publisher
, 'language' : language
, 'linearize_tables' : True
}
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
feeds = [(u'Vesti Dana', u'http://www.24sata.rs/rss.php')]
def preprocess_html(self, soup):
return self.adeify_images(soup)
feeds = [
(u'Vesti' , u'http://www.24sata.rs/rss/vesti.xml' ),
(u'Sport' , u'http://www.24sata.rs/rss/sport.xml' ),
(u'Šou' , u'http://www.24sata.rs/rss/sou.xml' ),
(u'Specijal', u'http://www.24sata.rs/rss/specijal.xml'),
(u'Novi Sad', u'http://www.24sata.rs/rss/ns.xml' )
]
def print_version(self, url):
article = url.partition('#')[0]
article_id = article.partition('id=')[2]
return 'http://www.24sata.rs/_print.php?id=' + article_id
dpart, spart, apart = url.rpartition('/')
return dpart + '/print/' + apart

View File

@ -1,5 +1,5 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2010-2012, Darko Miletic <darko.miletic at gmail.com>'
'''
akter.co.rs
'''
@ -8,7 +8,7 @@ import re
from calibre.web.feeds.news import BasicNewsRecipe
class Akter(BasicNewsRecipe):
title = 'AKTER'
title = 'AKTER - Nedeljnik'
__author__ = 'Darko Miletic'
description = 'AKTER - nedeljni politicki magazin savremene Srbije'
publisher = 'Akter Media Group d.o.o.'
@ -18,61 +18,37 @@ class Akter(BasicNewsRecipe):
no_stylesheets = True
use_embedded_content = False
encoding = 'utf-8'
masthead_url = 'http://www.akter.co.rs/templates/gk_thenews2/images/style2/logo.png'
masthead_url = 'http://www.akter.co.rs/gfx/logoneover.png'
language = 'sr'
publication_type = 'magazine'
remove_empty_feeds = True
PREFIX = 'http://www.akter.co.rs'
extra_css = """
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
.article_description,body{font-family: Arial,Helvetica,sans1,sans-serif}
.color-2{display:block; margin-bottom: 10px; padding: 5px, 10px;
border-left: 1px solid #D00000; color: #D00000}
img{margin-bottom: 0.8em} """
body{font-family: Tahoma,Geneva,sans1,sans-serif}
img{margin-bottom: 0.8em; display: block;}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher': publisher
, 'language' : language
, 'linearize_tables' : True
}
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
feeds = [
(u'Politika' , u'http://www.akter.co.rs/index.php/politikaprint.html' )
,(u'Ekonomija' , u'http://www.akter.co.rs/index.php/ekonomijaprint.html')
,(u'Life&Style' , u'http://www.akter.co.rs/index.php/lsprint.html' )
,(u'Sport' , u'http://www.akter.co.rs/index.php/sportprint.html' )
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return self.adeify_images(soup)
keep_only_tags = [dict(name='div', attrs={'id':'section_to_print'})]
feeds = [(u'Nedeljnik', u'http://akter.co.rs/rss/nedeljnik')]
def print_version(self, url):
return url + '?tmpl=component&print=1&page='
dpart, spart, apart = url.rpartition('/')
return dpart + '/print-' + apart
def parse_index(self):
totalfeeds = []
lfeeds = self.get_feeds()
for feedobj in lfeeds:
feedtitle, feedurl = feedobj
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
articles = []
soup = self.index_to_soup(feedurl)
for item in soup.findAll(attrs={'class':['sectiontableentry1','sectiontableentry2']}):
link = item.find('a')
url = self.PREFIX + link['href']
title = self.tag_to_string(link)
articles.append({
'title' :title
,'date' :''
,'url' :url
,'description':''
})
totalfeeds.append((feedtitle, articles))
return totalfeeds
def get_cover_url(self):
soup = self.index_to_soup('http://www.akter.co.rs/weekly.html')
divt = soup.find('div', attrs={'class':'lastissue'})
if divt:
imgt = divt.find('img')
if imgt:
return 'http://www.akter.co.rs' + imgt['src']
return None

View File

@ -0,0 +1,44 @@
__license__ = 'GPL v3'
__copyright__ = '2012, Darko Miletic <darko.miletic at gmail.com>'
'''
akter.co.rs
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class Akter(BasicNewsRecipe):
title = 'AKTER - Dnevnik'
__author__ = 'Darko Miletic'
description = 'AKTER - Najnovije vesti iz Srbije'
publisher = 'Akter Media Group d.o.o.'
category = 'vesti, online vesti, najnovije vesti, politika, sport, ekonomija, biznis, finansije, berza, kultura, zivot, putovanja, auto, automobili, tehnologija, politicki magazin, dogadjaji, desavanja, lifestyle, zdravlje, zdravstvo, vest, novine, nedeljnik, srbija, novi sad, vojvodina, svet, drustvo, zabava, republika srpska, beograd, intervju, komentar, reportaza, arhiva vesti, news, serbia, politics'
oldest_article = 8
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
encoding = 'utf-8'
masthead_url = 'http://www.akter.co.rs/gfx/logodnover.png'
language = 'sr'
publication_type = 'magazine'
remove_empty_feeds = True
extra_css = """
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: Tahoma,Geneva,sans1,sans-serif}
img{margin-bottom: 0.8em; display: block;}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher': publisher
, 'language' : language
}
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
keep_only_tags = [dict(name='div', attrs={'id':'section_to_print'})]
feeds = [(u'Vesti', u'http://akter.co.rs/rss/dnevni')]
def print_version(self, url):
dpart, spart, apart = url.rpartition('/')
return dpart + '/print-' + apart

595
recipes/bbc_brasil.recipe Normal file
View File

@ -0,0 +1,595 @@
##
## Title: BBC News, Sport, and Blog Calibre Recipe
## Contact: mattst - jmstanfield@gmail.com
##
## License: GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html
## Copyright: mattst - jmstanfield@gmail.com
##
## Written: November 2011
## Last Edited: 2011-11-19
##
__license__ = 'GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html'
__copyright__ = 'mattst - jmstanfield@gmail.com'
'''
BBC News, Sport, and Blog Calibre Recipe
'''
# Import the regular expressions module.
import re
# Import the BasicNewsRecipe class which this class extends.
from calibre.web.feeds.recipes import BasicNewsRecipe
class BBCBrasilRecipe(BasicNewsRecipe):
#
# **** IMPORTANT USERS READ ME ****
#
# First select the feeds you want then scroll down below the feeds list
# and select the values you want for the other user preferences, like
# oldest_article and such like.
#
#
# Select the BBC rss feeds which you want in your ebook.
# Selected feed have NO '#' at their start, de-selected feeds begin with a '#'.
#
# Eg. ("News Home", "http://feeds.bbci.co.uk/... - include feed.
# Eg. #("News Home", "http://feeds.bbci.co.uk/... - do not include feed.
#
# There are 68 feeds below which constitute the bulk of the available rss
# feeds on the BBC web site. These include 5 blogs by editors and
# correspondants, 16 sports feeds, 15 'sub' regional feeds (Eg. North West
# Wales, Scotland Business), and 7 Welsh language feeds.
#
# Some of the feeds are low volume (Eg. blogs), or very low volume (Eg. Click)
# so if "oldest_article = 1.5" (only articles published in the last 36 hours)
# you may get some 'empty feeds' which will not then be included in the ebook.
#
# The 15 feeds currently selected below are simply my default ones.
#
# Note: With all 68 feeds selected, oldest_article set to 2,
# max_articles_per_feed set to 100, and simultaneous_downloads set to 10,
# the ebook creation took 29 minutes on my speedy 100 mbps net connection,
# fairly high-end desktop PC running Linux (Ubuntu Lucid-Lynx).
# More realistically with 15 feeds selected, oldest_article set to 1.5,
# max_articles_per_feed set to 100, and simultaneous_downloads set to 20,
# it took 6 minutes. If that's too slow increase 'simultaneous_downloads'.
#
# Select / de-select the feeds you want in your ebook.
#
feeds = [
(u'Primeira P\xe1gina', u'http://www.bbc.co.uk/portuguese/index.xml'),
(u'\xdaltimas Not\xedcias', u'http://www.bbc.co.uk/portuguese/ultimas_noticias/index.xml'),
(u'Internacional', u'http://www.bbc.co.uk/portuguese/topicos/internacional/index.xml'),
(u'Brasil', u'http://www.bbc.co.uk/portuguese/topicos/brasil/index.xml'),
(u'Am\xe9rica Latina', u'http://www.bbc.co.uk/portuguese/topicos/america_latina/index.xml'),
(u'Economia', u'http://www.bbc.co.uk/portuguese/topicos/economia/index.xml'),
(u'Sa\xfade', u'http://www.bbc.co.uk/portuguese/topicos/saude/index.xml'),
(u'Ci\xeancia e Tecnologia', u'http://www.bbc.co.uk/portuguese/topicos/ciencia_e_tecnologia/index.xml'),
(u'Cultura', u'http://www.bbc.co.uk/portuguese/topicos/cultura/index.xml'),
(u'V\xeddeos e Fotos', u'http://www.bbc.co.uk/portuguese/videos_e_fotos/index.xml'),
(u'Especiais', u'http://www.bbc.co.uk/portuguese/especiais/index.xml')
]
# **** SELECT YOUR USER PREFERENCES ****
# Title to use for the ebook.
#
title = 'BBC Brasil'
# A brief description for the ebook.
#
description = u'Not\xedcias do Brasil e do mundo pela British Broadcasting Corporation'
# The max number of articles which may be downloaded from each feed.
# I've never seen more than about 70 articles in a single feed in the
# BBC feeds.
#
max_articles_per_feed = 100
# The max age of articles which may be downloaded from each feed. This is
# specified in days - note fractions of days are allowed, Eg. 2.5 (2 and a
# half days). My default of 1.5 days is the last 36 hours, the point at
# which I've decided 'news' becomes 'old news', but be warned this is not
# so good for the blogs, technology, magazine, etc., and sports feeds.
# You may wish to extend this to 2-5 but watch out ebook creation time will
# increase as well. Setting this to 30 will get everything (AFAICT) as long
# as max_articles_per_feed remains set high (except for 'Click' which is
# v. low volume and its currently oldest article is 4th Feb 2011).
#
oldest_article = 1.5
# Number of simultaneous downloads. 20 is consistantly working fine on the
# BBC News feeds with no problems. Speeds things up from the defualt of 5.
# If you have a lot of feeds and/or have increased oldest_article above 2
# then you may wish to try increasing simultaneous_downloads to 25-30,
# Or, of course, if you are in a hurry. [I've not tried beyond 20.]
#
simultaneous_downloads = 20
# Timeout for fetching files from the server in seconds. The default of
# 120 seconds, seems somewhat excessive.
#
timeout = 30
# The format string for the date shown on the ebook's first page.
# List of all values: http://docs.python.org/library/time.html
# Default in news.py has a leading space so that's mirrored here.
# As with 'feeds' select/de-select by adding/removing the initial '#',
# only one timefmt should be selected, here's a few to choose from.
#
timefmt = ' [%a, %d %b %Y]' # [Fri, 14 Nov 2011] (Calibre default)
#timefmt = ' [%a, %d %b %Y %H:%M]' # [Fri, 14 Nov 2011 18:30]
#timefmt = ' [%a, %d %b %Y %I:%M %p]' # [Fri, 14 Nov 2011 06:30 PM]
#timefmt = ' [%d %b %Y]' # [14 Nov 2011]
#timefmt = ' [%d %b %Y %H:%M]' # [14 Nov 2011 18.30]
#timefmt = ' [%Y-%m-%d]' # [2011-11-14]
#timefmt = ' [%Y-%m-%d-%H-%M]' # [2011-11-14-18-30]
#
# **** IMPORTANT ****
#
# DO NOT EDIT BELOW HERE UNLESS YOU KNOW WHAT YOU ARE DOING.
#
# DO NOT EDIT BELOW HERE UNLESS YOU KNOW WHAT YOU ARE DOING.
#
# I MEAN IT, YES I DO, ABSOLUTELY, AT YOU OWN RISK. :)
#
# **** IMPORTANT ****
#
# Author of this recipe.
__author__ = 'claviola'
# Specify English as the language of the RSS feeds (ISO-639 code).
language = 'en_GB'
# Set tags.
tags = 'news, sport, blog'
# Set publisher and publication type.
publisher = 'BBC'
publication_type = 'newspaper'
# Disable stylesheets from site.
no_stylesheets = True
# Specifies an override encoding for sites that have an incorrect charset
# specified. Default of 'None' says to auto-detect. Some other BBC recipes
# use 'utf8', which works fine (so use that if necessary) but auto-detecting
# with None is working fine, so stick with that for robustness.
encoding = None
# Sets whether a feed has full articles embedded in it. The BBC feeds do not.
use_embedded_content = False
# Removes empty feeds - why keep them!?
remove_empty_feeds = True
# Create a custom title which fits nicely in the Kindle title list.
# Requires "import time" above class declaration, and replacing
# title with custom_title in conversion_options (right column only).
# Example of string below: "BBC News - 14 Nov 2011"
#
# custom_title = "BBC News - " + time.strftime('%d %b %Y')
'''
# Conversion options for advanced users, but don't forget to comment out the
# current conversion_options below. Avoid setting 'linearize_tables' as that
# plays havoc with the 'old style' table based pages.
#
conversion_options = { 'title' : title,
'comments' : description,
'tags' : tags,
'language' : language,
'publisher' : publisher,
'authors' : publisher,
'smarten_punctuation' : True
}
'''
conversion_options = { 'smarten_punctuation' : True }
# Specify extra CSS - overrides ALL other CSS (IE. Added last).
extra_css = 'body { font-family: verdana, helvetica, sans-serif; } \
.introduction, .first { font-weight: bold; } \
.cross-head { font-weight: bold; font-size: 125%; } \
.cap, .caption { display: block; font-size: 80%; font-style: italic; } \
.cap, .caption, .caption img, .caption span { display: block; text-align: center; margin: 5px auto; } \
.byl, .byd, .byline img, .byline-name, .byline-title, .author-name, .author-position, \
.correspondent-portrait img, .byline-lead-in, .name, .role, .bbc-role { display: block; \
text-align: center; font-size: 80%; font-style: italic; margin: 1px auto; } \
.story-date, .published, .datestamp { font-size: 80%; } \
table { width: 100%; } \
td img { display: block; margin: 5px auto; } \
ul { padding-top: 10px; } \
ol { padding-top: 10px; } \
li { padding-top: 5px; padding-bottom: 5px; } \
h1 { text-align: center; font-size: 175%; font-weight: bold; } \
h2 { text-align: center; font-size: 150%; font-weight: bold; } \
h3 { text-align: center; font-size: 125%; font-weight: bold; } \
h4, h5, h6 { text-align: center; font-size: 100%; font-weight: bold; }'
# Remove various tag attributes to improve the look of the ebook pages.
remove_attributes = [ 'border', 'cellspacing', 'align', 'cellpadding', 'colspan',
'valign', 'vspace', 'hspace', 'alt', 'width', 'height' ]
# Remove the (admittedly rarely used) line breaks, "<br />", which sometimes
# cause a section of the ebook to start in an unsightly fashion or, more
# frequently, a "<br />" will muck up the formatting of a correspondant's byline.
# "<br />" and "<br clear/>" are far more frequently used on the table formatted
# style of pages, and really spoil the look of the ebook pages.
preprocess_regexps = [(re.compile(r'<br[ ]*/>', re.IGNORECASE), lambda m: ''),
(re.compile(r'<br[ ]*clear.*/>', re.IGNORECASE), lambda m: '')]
# Create regular expressions for tag keeping and removal to make the matches more
# robust against minor changes and errors in the HTML, Eg. double spaces, leading
# and trailing spaces, missing hyphens, and such like.
# Python regular expression ('re' class) page: http://docs.python.org/library/re.html
# ***************************************
# Regular expressions for keep_only_tags:
# ***************************************
# The BBC News HTML pages use variants of 'storybody' to denote the section of a HTML
# page which contains the main text of the article. Match storybody variants: 'storybody',
# 'story-body', 'story body','storybody ', etc.
storybody_reg_exp = '^.*story[_ -]*body.*$'
# The BBC sport and 'newsbeat' (features) HTML pages use 'blq_content' to hold the title
# and published date. This is one level above the usual news pages which have the title
# and date within 'story-body'. This is annoying since 'blq_content' must also be kept,
# resulting in a lot of extra things to be removed by remove_tags.
blq_content_reg_exp = '^.*blq[_ -]*content.*$'
# The BBC has an alternative page design structure, which I suspect is an out-of-date
# design but which is still used in some articles, Eg. 'Click' (technology), 'FastTrack'
# (travel), and in some sport pages. These alternative pages are table based (which is
# why I think they are an out-of-date design) and account for -I'm guesstimaking- less
# than 1% of all articles. They use a table class 'storycontent' to hold the article
# and like blq_content (above) have required lots of extra removal by remove_tags.
story_content_reg_exp = '^.*story[_ -]*content.*$'
# Keep the sections of the HTML which match the list below. The HTML page created by
# Calibre will fill <body> with those sections which are matched. Note that the
# blq_content_reg_exp must be listed before storybody_reg_exp in keep_only_tags due to
# it being the parent of storybody_reg_exp, that is to say the div class/id 'story-body'
# will be inside div class/id 'blq_content' in the HTML (if 'blq_content' is there at
# all). If they are the other way around in keep_only_tags then blq_content_reg_exp
# will end up being discarded.
keep_only_tags = [ dict(name='table', attrs={'class':re.compile(story_content_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(blq_content_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'id':re.compile(blq_content_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(storybody_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'id':re.compile(storybody_reg_exp, re.IGNORECASE)}) ]
# ************************************
# Regular expressions for remove_tags:
# ************************************
# Regular expression to remove share-help and variant tags. The share-help class
# is used by the site for a variety of 'sharing' type links, Eg. Facebook, delicious,
# twitter, email. Removed to avoid page clutter.
share_help_reg_exp = '^.*share[_ -]*help.*$'
# Regular expression to remove embedded-hyper and variant tags. This class is used to
# display links to other BBC News articles on the same/similar subject.
embedded_hyper_reg_exp = '^.*embed*ed[_ -]*hyper.*$'
# Regular expression to remove hypertabs and variant tags. This class is used to
# display a tab bar at the top of an article which allows the user to switch to
# an article (viewed on the same page) providing further info., 'in depth' analysis,
# an editorial, a correspondant's blog entry, and such like. The ability to handle
# a tab bar of this nature is currently beyond the scope of this recipe and
# possibly of Calibre itself (not sure about that - TO DO - check!).
hypertabs_reg_exp = '^.*hyper[_ -]*tabs.*$'
# Regular expression to remove story-feature and variant tags. Eg. 'story-feature',
# 'story-feature related narrow', 'story-feature wide', 'story-feature narrow'.
# This class is used to add additional info. boxes, or small lists, outside of
# the main story. TO DO: Work out a way to incorporate these neatly.
story_feature_reg_exp = '^.*story[_ -]*feature.*$'
# Regular expression to remove video and variant tags, Eg. 'videoInStoryB',
# 'videoInStoryC'. This class is used to embed video.
video_reg_exp = '^.*video.*$'
# Regular expression to remove audio and variant tags, Eg. 'audioInStoryD'.
# This class is used to embed audio.
audio_reg_exp = '^.*audio.*$'
# Regular expression to remove pictureGallery and variant tags, Eg. 'pictureGallery'.
# This class is used to embed a photo slideshow. See also 'slideshow' below.
picture_gallery_reg_exp = '^.*picture.*$'
# Regular expression to remove slideshow and variant tags, Eg. 'dslideshow-enclosure'.
# This class is used to embed a slideshow (not necessarily photo) but both
# 'slideshow' and 'pictureGallery' are used for slideshows.
slideshow_reg_exp = '^.*slide[_ -]*show.*$'
# Regular expression to remove social-links and variant tags. This class is used to
# display links to a BBC bloggers main page, used in various columnist's blogs
# (Eg. Nick Robinson, Robert Preston).
social_links_reg_exp = '^.*social[_ -]*links.*$'
# Regular expression to remove quote and (multi) variant tags, Eg. 'quote',
# 'endquote', 'quote-credit', 'quote-credit-title', etc. These are usually
# removed by 'story-feature' removal (as they are usually within them), but
# not always. The quotation removed is always (AFAICT) in the article text
# as well but a 2nd copy is placed in a quote tag to draw attention to it.
# The quote class tags may or may not appear in div's.
quote_reg_exp = '^.*quote.*$'
# Regular expression to remove hidden and variant tags, Eg. 'hidden'.
# The purpose of these is unclear, they seem to be an internal link to a
# section within the article, but the text of the link (Eg. 'Continue reading
# the main story') never seems to be displayed anyway. Removed to avoid clutter.
# The hidden class tags may or may not appear in div's.
hidden_reg_exp = '^.*hidden.*$'
# Regular expression to remove comment and variant tags, Eg. 'comment-introduction'.
# Used on the site to display text about registered users entering comments.
comment_reg_exp = '^.*comment.*$'
# Regular expression to remove form and variant tags, Eg. 'comment-form'.
# Used on the site to allow registered BBC users to fill in forms, typically
# for entering comments about an article.
form_reg_exp = '^.*form.*$'
# Extra things to remove due to the addition of 'blq_content' in keep_only_tags.
#<div class="story-actions"> Used on sports pages for 'email' and 'print'.
story_actions_reg_exp = '^.*story[_ -]*actions.*$'
#<div class="bookmark-list"> Used on sports pages instead of 'share-help' (for
# social networking links).
bookmark_list_reg_exp = '^.*bookmark[_ -]*list.*$'
#<div id="secondary-content" class="content-group">
# NOTE: Don't remove class="content-group" that is needed.
# Used on sports pages to link to 'similar stories'.
secondary_content_reg_exp = '^.*secondary[_ -]*content.*$'
#<div id="featured-content" class="content-group">
# NOTE: Don't remove class="content-group" that is needed.
# Used on sports pages to link to pages like 'tables', 'fixtures', etc.
featured_content_reg_exp = '^.*featured[_ -]*content.*$'
#<div id="navigation">
# Used on sports pages to link to pages like 'tables', 'fixtures', etc.
# Used sometimes instead of "featured-content" above.
navigation_reg_exp = '^.*navigation.*$'
#<a class="skip" href="#blq-container-inner">Skip to top</a>
# Used on sports pages to link to the top of the page.
skip_reg_exp = '^.*skip.*$'
# Extra things to remove due to the addition of 'storycontent' in keep_only_tags,
# which are the alterative table design based pages. The purpose of some of these
# is not entirely clear from the pages (which are a total mess!).
# Remove mapping based tags, Eg. <map id="world_map">
# The dynamic maps don't seem to work during ebook creation. TO DO: Investigate.
map_reg_exp = '^.*map.*$'
# Remove social bookmarking variation, called 'socialBookMarks'.
social_bookmarks_reg_exp = '^.*social[_ -]*bookmarks.*$'
# Remove page navigation tools, like 'search', 'email', 'print', called 'blq-mast'.
blq_mast_reg_exp = '^.*blq[_ -]*mast.*$'
# Remove 'sharesb', I think this is a generic 'sharing' class. It seems to appear
# alongside 'socialBookMarks' whenever that appears. I am removing it as well
# under the assumption that it can appear alone as well.
sharesb_reg_exp = '^.*sharesb.*$'
# Remove class 'o'. The worst named user created css class of all time. The creator
# should immediately be fired. I've seen it used to hold nothing at all but with
# 20 or so empty lines in it. Also to hold a single link to another article.
# Whatever it was designed to do it is not wanted by this recipe. Exact match only.
o_reg_exp = '^o$'
# Remove 'promotopbg' and 'promobottombg', link lists. Have decided to
# use two reg expressions to make removing this (and variants) robust.
promo_top_reg_exp = '^.*promotopbg.*$'
promo_bottom_reg_exp = '^.*promobottombg.*$'
# Remove 'nlp', provides heading for link lists. Requires an exact match due to
# risk of matching those letters in something needed, unless I see a variation
# of 'nlp' used at a later date.
nlp_reg_exp = '^nlp$'
# Remove 'mva', provides embedded floating content of various types. Variant 'mvb'
# has also now been seen. Requires an exact match of 'mva' or 'mvb' due to risk of
# matching those letters in something needed.
mva_or_mvb_reg_exp = '^mv[ab]$'
# Remove 'mvtb', seems to be page navigation tools, like 'blq-mast'.
mvtb_reg_exp = '^mvtb$'
# Remove 'blq-toplink', class to provide a link to the top of the page.
blq_toplink_reg_exp = '^.*blq[_ -]*top[_ -]*link.*$'
# Remove 'products and services' links, Eg. desktop tools, alerts, and so on.
# Eg. Class="servicev4 ukfs_services" - what a mess of a name. Have decided to
# use two reg expressions to make removing this (and variants) robust.
prods_services_01_reg_exp = '^.*servicev4.*$'
prods_services_02_reg_exp = '^.*ukfs[_ -]*services.*$'
# Remove -what I think is- some kind of navigation tools helper class, though I am
# not sure, it's called: 'blq-rst blq-new-nav'. What I do know is it pops up
# frequently and it is not wanted. Have decided to use two reg expressions to make
# removing this (and variants) robust.
blq_misc_01_reg_exp = '^.*blq[_ -]*rst.*$'
blq_misc_02_reg_exp = '^.*blq[_ -]*new[_ -]*nav.*$'
# Remove 'puffbox' - this may only appear inside 'storyextra', so it may not
# need removing - I have no clue what it does other than it contains links.
# Whatever it is - it is not part of the article and is not wanted.
puffbox_reg_exp = '^.*puffbox.*$'
# Remove 'sibtbg' and 'sibtbgf' - some kind of table formatting classes.
sibtbg_reg_exp = '^.*sibtbg.*$'
# Remove 'storyextra' - links to relevant articles and external sites.
storyextra_reg_exp = '^.*story[_ -]*extra.*$'
remove_tags = [ dict(name='div', attrs={'class':re.compile(story_feature_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(share_help_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(embedded_hyper_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(hypertabs_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(video_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(audio_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(picture_gallery_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(slideshow_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(quote_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(hidden_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(comment_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(story_actions_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(bookmark_list_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'id':re.compile(secondary_content_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'id':re.compile(featured_content_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'id':re.compile(navigation_reg_exp, re.IGNORECASE)}),
dict(name='form', attrs={'id':re.compile(form_reg_exp, re.IGNORECASE)}),
dict(attrs={'class':re.compile(quote_reg_exp, re.IGNORECASE)}),
dict(attrs={'class':re.compile(hidden_reg_exp, re.IGNORECASE)}),
dict(attrs={'class':re.compile(social_links_reg_exp, re.IGNORECASE)}),
dict(attrs={'class':re.compile(comment_reg_exp, re.IGNORECASE)}),
dict(attrs={'class':re.compile(skip_reg_exp, re.IGNORECASE)}),
dict(name='map', attrs={'id':re.compile(map_reg_exp, re.IGNORECASE)}),
dict(name='map', attrs={'name':re.compile(map_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'id':re.compile(social_bookmarks_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'id':re.compile(blq_mast_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(sharesb_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(o_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(promo_top_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(promo_bottom_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(nlp_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(mva_or_mvb_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(mvtb_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(blq_toplink_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(prods_services_01_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(prods_services_02_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(blq_misc_01_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(blq_misc_02_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':re.compile(puffbox_reg_exp, re.IGNORECASE)}),
dict(attrs={'class':re.compile(sibtbg_reg_exp, re.IGNORECASE)}),
dict(attrs={'class':re.compile(storyextra_reg_exp, re.IGNORECASE)}),
dict(name='div', attrs={'class':'tools-container'}),
dict(name='div', attrs={'class':'tools-container-end'}),
dict(name='div', attrs={'class':'g-block story-body contextual-links'}),
dict(name='div', attrs={'class':' g-w11 sidebar'})
]
# Uses url to create and return the 'printer friendly' version of the url.
# In other words the 'print this page' address of the page.
#
# There are 3 types of urls used in the BBC site's rss feeds. There is just
# 1 type for the standard news while there are 2 used for sports feed urls.
# Note: Sports urls are linked from regular news feeds (Eg. 'News Home') when
# there is a major story of interest to 'everyone'. So even if no BBC sports
# feeds are added to 'feeds' the logic of this method is still needed to avoid
# blank / missing / empty articles which have an index title and then no body.
def print_version(self, url):
# Handle sports page urls type 01:
if (url.find("go/rss/-/sport1/") != -1):
temp_url = url.replace("go/rss/-/", "")
# Handle sports page urls type 02:
elif (url.find("go/rss/int/news/-/sport1/") != -1):
temp_url = url.replace("go/rss/int/news/-/", "")
# Handle regular news page urls:
else:
temp_url = url.replace("go/rss/int/news/-/", "")
# Always add "?print=true" to the end of the url.
print_url = temp_url + "?print=true"
return print_url
# Remove articles in feeds based on a string in the article title or url.
#
# Code logic written by: Starson17 - posted in: "Recipes - Re-usable code"
# thread, in post with title: "Remove articles from feed", see url:
# http://www.mobileread.com/forums/showpost.php?p=1165462&postcount=6
# Many thanks and all credit to Starson17.
#
# Starson17's code has obviously been altered to suite my requirements.
def parse_feeds(self):
# Call parent's method.
feeds = BasicNewsRecipe.parse_feeds(self)
# Loop through all feeds.
for feed in feeds:
# Loop through all articles in feed.
for article in feed.articles[:]:
# Match key words and remove article if there's a match.
# Most BBC rss feed video only 'articles' use upper case 'VIDEO'
# as a title prefix. Just match upper case 'VIDEO', so that
# articles like 'Video game banned' won't be matched and removed.
if 'VIDEO' in article.title:
feed.articles.remove(article)
# Most BBC rss feed audio only 'articles' use upper case 'AUDIO'
# as a title prefix. Just match upper case 'AUDIO', so that
# articles like 'Hi-Def audio...' won't be matched and removed.
elif 'AUDIO' in article.title:
feed.articles.remove(article)
# Most BBC rss feed photo slideshow 'articles' use 'In Pictures',
# 'In pictures', and 'in pictures', somewhere in their title.
# Match any case of that phrase.
elif 'IN PICTURES' in article.title.upper():
feed.articles.remove(article)
# As above, but user contributed pictures. Match any case.
elif 'YOUR PICTURES' in article.title.upper():
feed.articles.remove(article)
# 'Sportsday Live' are articles which contain a constantly and
# dynamically updated 'running commentary' during a live sporting
# event. Match any case.
elif 'SPORTSDAY LIVE' in article.title.upper():
feed.articles.remove(article)
# Sometimes 'Sportsday Live' (above) becomes 'Live - Sport Name'.
# These are being matched below using 'Live - ' because removing all
# articles with 'live' in their titles would remove some articles
# that are in fact not live sports pages. Match any case.
elif 'LIVE - ' in article.title.upper():
feed.articles.remove(article)
# 'Quiz of the week' is a Flash player weekly news quiz. Match only
# the 'Quiz of the' part in anticipation of monthly and yearly
# variants. Match any case.
elif 'QUIZ OF THE' in article.title.upper():
feed.articles.remove(article)
# Remove articles with 'scorecards' in the url. These are BBC sports
# pages which just display a cricket scorecard. The pages have a mass
# of table and css entries to display the scorecards nicely. Probably
# could make them work with this recipe, but might take a whole day
# of work to sort out all the css - basically a formatting nightmare.
elif 'scorecards' in article.url:
feed.articles.remove(article)
return feeds
# End of class and file.

View File

@ -6,10 +6,12 @@ class AdvancedUserRecipe1271446252(BasicNewsRecipe):
max_articles_per_feed = 100
language = 'fr'
__author__ = 'zorgluf'
max_articles_per_feed = 25
#encoding = 'cp1252'
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
feeds = [(u'CanardPC', u'http://www.canardpc.com/feed.php')]
remove_tags_after = dict(id='auteur_news')
remove_tags_before = dict(id='fil_ariane')
no_stylesheets = True
remove_tags = [dict(name='a', attrs={'class':'news_tags'}),
dict(name='div', attrs={'id':'fil_ariane'})]

View File

@ -20,7 +20,23 @@ class Economist(BasicNewsRecipe):
INDEX = 'http://www.economist.com/printedition'
description = ('Global news and current affairs from a European'
' perspective. Best downloaded on Friday mornings (GMT)')
extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }'
extra_css = '''
.headline {font-size: x-large;}
h2 { font-size: small; }
h1 { font-size: medium; }
.pullquote {
float: right;
font-size: larger;
font-weight: bold;
font-style: italic;
page-break-inside:avoid;
border-bottom: 3px solid black;
border-top: 3px solid black;
width: 228px;
margin: 0px 0px 10px 15px;
padding: 7px 0px 9px;
}
'''
oldest_article = 7.0
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),

View File

@ -20,7 +20,24 @@ class Economist(BasicNewsRecipe):
INDEX = 'http://www.economist.com/printedition'
description = ('Global news and current affairs from a European'
' perspective. Best downloaded on Friday mornings (GMT)')
extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }'
extra_css = '''
.headline {font-size: x-large;}
h2 { font-size: small; }
h1 { font-size: medium; }
.pullquote {
float: right;
font-size: larger;
font-weight: bold;
font-style: italic;
page-break-inside:avoid;
border-bottom: 3px solid black;
border-top: 3px solid black;
width: 228px;
margin: 0px 0px 10px 15px;
padding: 7px 0px 9px;
}
'''
oldest_article = 7.0
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),

View File

@ -0,0 +1,28 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1325420346(BasicNewsRecipe):
title = u'Homopedia'
__author__ = 'rainbowwarrior'
language = 'pl'
oldest_article = 7
max_articles_per_feed = 100
publication_type = 'newspaper'
masthead_url = 'http://a5.sphotos.ak.fbcdn.net/hphotos-ak-snc6/67335_168352243178437_166186720061656_594975_5800720_n.jpg'
encoding = 'utf-8'
def get_cover_url(self):
return 'http://a7.sphotos.ak.fbcdn.net/hphotos-ak-snc4/65568_166186970061631_166186720061656_580324_7584264_n.jpg'
feeds = [(u'Nowe has\u0142a', u'http://www.homopedia.pl/w/index.php?title=Specjalna:Nowe_strony&feed=atom&hideliu=&hidepatrolled=&hidebots=&hideredirs=1&limit=50&namespace=0'), (u'Blog', u'http://blog.homopedia.pl/feeds/posts/default')]
def get_article_url(self, article):
artl = article.get('link', None)
rest, sep, article_id = artl.rpartition('/')
return 'http://www.homopedia.pl/w/index.php?redirect=no&printable=yes&title=' + article_id
remove_tags = [dict(name='div', attrs={'class':'noprint'}),dict(name='ul', attrs={'class':'noprint'}),dict(name='ul', attrs={'id':'footer-places'}),dict(name='li', attrs={'id':'footer-info-viewcount'}),dict(name='span', attrs={'class':'editsection'}),dict(name='div', attrs={'id':'jump-to-nav'})]
remove_tags_before = dict(dict(name = 'h2', attrs = {'class' : 'post-title'}))
remove_tags_after = dict(dict(name = 'a', attrs = {'class' : 'timestamp-link'}))
extra_css = 'p{text-indent:1.5em!important;padding:0!important;margin;0!important}'

View File

@ -0,0 +1,47 @@
from calibre.web.feeds.news import BasicNewsRecipe
class HindustanTimes(BasicNewsRecipe):
title = u'Huffington Post UK'
language = 'en_GB'
__author__ = 'Krittika Goyal'
oldest_article = 2 #days
max_articles_per_feed = 25
#encoding = 'cp1252'
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
auto_cleanup_keep = '//div[@class="articleBody"]'
feeds = [
('UK Politics',
'http://www.huffingtonpost.com/feeds/verticals/uk-politics/news.xml'),
('UK Entertainment',
'http://www.huffingtonpost.com/feeds/verticals/uk-entertainment/news.xml'),
('UK Style',
'http://www.huffingtonpost.com/feeds/verticals/uk-style/news.xml'),
('UK Fashion:',
'http://www.huffingtonpost.com/feeds/verticals/uk-fashion/news.xml'),
('UK Universities:',
'http://www.huffingtonpost.com/feeds/verticals/uk-universities-education/news.xml'),
('UK World',
'http://www.huffingtonpost.com/feeds/verticals/uk-world/news.xml'),
('UK Lifestyle',
'http://www.huffingtonpost.com/feeds/verticals/uk-lifestyle/news.xml'),
('UK Comedy',
'http://www.huffingtonpost.com/feeds/verticals/uk-comedy/news.xml'),
('UK Celebrity',
'http://www.huffingtonpost.com/feeds/verticals/uk-celebrity/news.xml'),
('UK Culture',
'http://www.huffingtonpost.com/feeds/verticals/uk-culture/news.xml'),
('UK News',
'http://www.huffingtonpost.com/feeds/verticals/uk/news.xml'),
('UK Tech',
'http://www.huffingtonpost.com/feeds/verticals/uk-tech/news.xml'),
('UK Sport',
'http://www.huffingtonpost.com/feeds/verticals/uk-sport/news.xml'),
]
def get_article_url(self, entry):
if entry.links:
return entry.links[0]['href']
return BasicNewsRecipe.get_article_url(self, entry)

View File

@ -12,6 +12,7 @@ class Metro_Montreal(BasicNewsRecipe):
use_embedded_content = False
remove_javascript = True
no_stylesheets = True
auto_cleanup = True
encoding = 'utf-8'
extra_css = '.headline {font-size: x-large;} \n .fact {padding-top: 10pt}'

View File

@ -20,6 +20,7 @@ class AdventureGamers(BasicNewsRecipe):
delay = 2
max_articles_per_feed = 100
no_stylesheets = True
auto_cleanup = True
encoding = 'utf-8'
remove_javascript = True
use_embedded_content = False
@ -32,11 +33,9 @@ class AdventureGamers(BasicNewsRecipe):
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
remove_tags = [dict(name=['object','link','embed','form','img'])]
feeds = [(u'Articles', u'http://mondedurable.science-et-vie.com/feed/')]
feeds = [(u'Articles', u'http://mondedurable.science-et-vie.com/comments/feed/')]
def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>'

View File

@ -0,0 +1,46 @@
from calibre.web.feeds.recipes import BasicNewsRecipe
class NatGeoMag(BasicNewsRecipe):
title = 'National Geographic Mag'
__author__ = 'Terminal Veracity'
description = 'The National Geographic Magazine'
publisher = 'National Geographic'
oldest_article = 31
max_articles_per_feed = 50
category = 'geography, magazine'
language = 'en_US'
publication_type = 'magazine'
cover_url = 'http://www.yourlogoresources.com/wp-content/uploads/2011/09/national-geographic-logo.jpg'
use_embedded_content = False
no_stylesheets = True
remove_javascript = True
recursions = 1
remove_empty_feeds = True
feeds = [('National Geographic Magazine', 'http://feeds.nationalgeographic.com/ng/NGM/NGM_Magazine')]
remove_tags = [dict(name='div', attrs={'class':['nextpage_continue', 'subscribe']})]
keep_only_tags = [dict(attrs={'class':'main_3narrow'})]
extra_css = """
h1 {font-size: large; font-weight: bold; margin: .5em 0; }
h2 {font-size: large; font-weight: bold; margin: .5em 0; }
h3 {font-size: medium; font-weight: bold; margin: 0 0; }
.article_credits_author {font-size: small; font-style: italic; }
.article_credits_photographer {font-size: small; font-style: italic; display: inline }
"""
def parse_feeds(self):
feeds = BasicNewsRecipe.parse_feeds(self)
for feed in feeds:
for article in feed.articles[:]:
if 'Flashback' in article.title:
feed.articles.remove(article)
elif 'Desktop Wallpaper' in article.title:
feed.articles.remove(article)
elif 'Visions of Earth' in article.title:
feed.articles.remove(article)
elif 'Your Shot' in article.title:
feed.articles.remove(article)
elif 'MyShot' in article.title:
feed.articles.remove(article)
elif 'Field Test' in article.title:
feed.articles.remove(article)
return feeds

View File

@ -20,10 +20,12 @@ class NoviList_Portal_hr(BasicNewsRecipe):
use_embedded_content = False
language = 'hr'
publication_type = 'newsportal'
masthead_url = 'http://www.novilist.hr/design/novilist/images/logo-print.gif'
masthead_url = 'http://www.novilist.hr/extension/novilist/design/novilist/images/logo-print.gif'
extra_css = """
body{font-family: Geneva,Arial,Helvetica,Swiss,sans-serif }
h1{font-family: Georgia,serif}
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
body{font-family: Geneva,Arial,Helvetica,Swiss,sans1,sans-serif }
h1{font-family: Georgia,serif1,serif}
img{display:block; margin-bottom: 0.4em; margin-top: 0.4em}
"""
@ -39,11 +41,22 @@ class NoviList_Portal_hr(BasicNewsRecipe):
keep_only_tags = [dict(name='div', attrs={'id':'content'})]
remove_tags = [dict(name=['meta', 'link', 'iframe', 'embed', 'object'])]
remove_tags = [
dict(name=['meta', 'link', 'iframe', 'embed', 'object']),
dict(name='div', attrs={'class':lambda x: x and 'embed-object' in x.split()})
]
remove_attributes=['border', 'lang']
feeds = [(u'Vijesti', u'http://www.novilist.hr/rss/feed/sve.xml')]
def get_article_url(self, article):
url = BasicNewsRecipe.get_article_url(self, article)
filter = ['/Foto/','/Informator/']
for item in filter:
if item in url:
return None
return url
def print_version(self, url):
return url.replace('http://www.novilist.hr/','http://www.novilist.hr/layout/set/print/')

View File

@ -1,3 +1,4 @@
__license__ = 'GPL v3'
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
'''
@ -5,16 +6,17 @@ odb.org
'''
from calibre.web.feeds.news import BasicNewsRecipe
import uuid
from lxml import html
class OurDailyBread(BasicNewsRecipe):
title = 'Our Daily Bread'
__author__ = 'Darko Miletic and Sujata Raman'
__author__ = 'Kovid Goyal'
description = "Our Daily Bread is a daily devotional from RBC Ministries which helps readers spend time each day in God's Word."
oldest_article = 15
language = 'en'
max_articles_per_feed = 100
no_stylesheets = True
auto_cleanup = True
use_embedded_content = False
category = 'ODB, Daily Devotional, Bible, Christian Devotional, Devotional, RBC Ministries, Our Daily Bread, Devotionals, Daily Devotionals, Christian Devotionals, Faith, Bible Study, Bible Studies, Scripture, RBC, religion'
encoding = 'utf-8'
@ -26,12 +28,14 @@ class OurDailyBread(BasicNewsRecipe):
,'linearize_tables' : True
}
#keep_only_tags = [dict(attrs={'class':'module-content'})]
#remove_tags = [
#dict(attrs={'id':'article-zoom'})
#,dict(attrs={'class':'listen-now-box'})
#]
#remove_tags_after = dict(attrs={'class':'readable-area'})
keep_only_tags = [dict(attrs={'class':'calibre-inserted-psalm'}),
{'id':'content'}]
remove_tags = [
dict(attrs={'class':['listen-box', 'entry-zoom',
'entry-footer']}),
{'id':'nav-single'},
dict(attrs={'class':lambda x:x and ' sharing ' in x}),
]
extra_css = '''
.text{font-family:Arial,Helvetica,sans-serif;font-size:x-small;}
@ -43,18 +47,33 @@ class OurDailyBread(BasicNewsRecipe):
feeds = [(u'Our Daily Bread', u'http://odb.org/feed/')]
def preprocess_raw_html(self, raw, url):
# Convert links to referenced Psalms to the actual psalms
root = html.fromstring(raw)
for a in root.xpath(
'//a[starts-with(@href, "http://www.biblegateway.com")]'):
uid = type(u'')(uuid.uuid4())
raw = self.index_to_soup(a.get('href'), raw=True)
iroot = html.fromstring(raw)
matches = iroot.xpath('//div[contains(@class, "result-text-style-normal")]')
if matches:
div = matches[0]
div.getparent().remove(div)
root.xpath('//body')[0].append(div)
a.set('href', '#'+uid)
del a.attrib['target']
div.set('id', uid)
div.set('class', 'calibre-inserted-psalm')
hr = div.makeelement('hr')
div.insert(0, hr)
# print html.tostring(div)
raw = html.tostring(root, encoding=unicode)
return raw
def preprocess_html(self, soup):
return self.adeify_images(soup)
d = soup.find(id='content')
d.extract()
soup.find('body').insert(0, d)
return soup
def get_cover_url(self):
href = 'http://www.rbc.org/index.aspx'
soup = self.index_to_soup(href)
a = soup.find('a',attrs={'id':'ctl00_hlTodaysDevotionalImage'})
if a :
cover_url = a.img['src']
return cover_url

View File

@ -0,0 +1,95 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class SomethingAwfulRecipe(BasicNewsRecipe):
title = 'Something Awful'
__author__ = 'atordo'
description = 'The Internet Makes You Stupid'
cover_url = 'http://i.somethingawful.com/core/head-logo-bluegren.png'
masthead_url = 'http://i.somethingawful.com/core/head-logo-bluegren.png'
oldest_article = 7
max_articles_per_feed = 50
auto_cleanup = False
no_stylesheets = True
remove_javascript = True
language = 'en'
use_embedded_content = False
remove_empty_feeds = True
publication_type = 'magazine'
recursions = 1
match_regexps = [r'\?page=\d+$']
preprocess_regexps = [
(re.compile(r'<!-- content end-->.*</body>', re.DOTALL), lambda match: '</body>')
]
remove_attributes = [ 'align', 'alt', 'valign' ]
keep_only_tags = [
dict(name='div', attrs={'class':'content_area'})
# ,dict(name='p', attrs={'class':'pagebar'})
]
remove_tags = [
dict(name='div', attrs={'class':['column_box','featurenav','social']})
,dict(name='div', attrs={'id':'sidebar'})
,dict(name='a', attrs={'class':'curpage'})
]
extra_css = '''
.byline{font-size:small} .font_big{font-size:large}
.compat5{font-weight:bold} .accentbox{background-color:#E3E3E3; border:solid black}
img{margin-bottom:0.4em; display:block; margin-left: auto; margin-right:auto}
'''
#feeds = [(u'Something Awful', u'http://www.somethingawful.com/rss/index.rss.xml')]
feeds = [
('Photoshop Phriday', 'http://www.somethingawful.com/rss/photoshop-phriday.rss.xml')
,('Comedy Goldmine', 'http://www.somethingawful.com/rss/comedy-goldmine.rss.xml')
#,('The Flash Tub', 'http://www.somethingawful.com/rss/flash-tub.rss.xml')
,('Awful Link of the Day', 'http://www.somethingawful.com/rss/awful-links.rss.xml')
,('Fake Something Awfuls', 'http://www.somethingawful.com/rss/fake-something-awful.rss.xml')
,('The Barbarian\'s Dojo', 'http://www.somethingawful.com/rss/steve-sumner.rss.xml')
,('The Great Goon Database', 'http://www.somethingawful.com/rss/great-goon-database.rss.xml')
,('Livejournal Theater', 'http://www.somethingawful.com/rss/livejournal-theater.rss.xml')
,('Joystick Token Healthpack', 'http://www.somethingawful.com/rss/token-healthpack.rss.xml')
#,('Webcam Ward', 'http://www.somethingawful.com/rss/webcam-ward.rss.xml')
,('Features / Articles', 'http://www.somethingawful.com/rss/feature-articles.rss.xml')
,('Guides', 'http://www.somethingawful.com/rss/guides.rss.xml')
,('Legal Threats', 'http://www.somethingawful.com/rss/legal-threats.rss.xml')
,('Pranks [ICQ]', 'http://www.somethingawful.com/rss/icq-pranks.rss.xml')
,('State Og', 'http://www.somethingawful.com/rss/state-og.rss.xml')
,('Everquest', 'http://www.somethingawful.com/rss/everquest.rss.xml')
,('Pranks [Email]', 'http://www.somethingawful.com/rss/email-pranks.rss.xml')
,('The Weekend Web', 'http://www.somethingawful.com/rss/weekend-web.rss.xml')
,('Daily Dirt', 'http://www.somethingawful.com/rss/daily-dirt.rss.xml')
,('The Art of Warcraft', 'http://www.somethingawful.com/rss/art-of-warcraft.rss.xml')
,('Video Game Article', 'http://www.somethingawful.com/rss/video-game-article.rss.xml')
,('The Awful Movie Database', 'http://www.somethingawful.com/rss/awful-movie-database.rss.xml')
#,('Downloads', 'http://www.somethingawful.com/rss/downloads.rss.xml')
,('Pregame Wrapup', 'http://www.somethingawful.com/rss/pregame-wrapup.rss.xml')
,('Second Life Safari', 'http://www.somethingawful.com/rss/second-life-safari.rss.xml')
,('The Hogosphere', 'http://www.somethingawful.com/rss/hogosphere.rss.xml')
,('Front Page News', 'http://www.somethingawful.com/rss/news.rss.xml')
,('Forum Friday\'s Monday', 'http://www.somethingawful.com/rss/forum-fridays.rss.xml')
,('Cliff Yablonski Hates You', 'http://www.somethingawful.com/rss/cliff-yablonski.rss.xml')
,('Manifestos From the Internet', 'http://www.somethingawful.com/rss/manifestos-from-internet.rss.xml')
,('Johnston Checks In', 'http://www.somethingawful.com/rss/levi-johnston.rss.xml')
,('Twitter Tuesday', 'http://www.somethingawful.com/rss/twitter-tuesday.rss.xml')
,('Music Article', 'http://www.somethingawful.com/rss/music-article.rss.xml')
,('Reviews [Games]', 'http://www.somethingawful.com/rss/game-reviews.rss.xml')
,('Reviews [Movies]', 'http://www.somethingawful.com/rss/movie-reviews.rss.xml')
,('Rom Pit', 'http://www.somethingawful.com/rss/rom-pit.rss.xml')
,('Truth Media [Reviews]', 'http://www.somethingawful.com/rss/truth-media-reviews.rss.xml')
,('Truth Media [Flames]', 'http://www.somethingawful.com/rss/truth-media-flames.rss.xml')
,('Awful Anime', 'http://www.somethingawful.com/rss/hentai-game-reviews.rss.xml')
,('The Horrors of Pornography', 'http://www.somethingawful.com/rss/horrors-of-porn.rss.xml')
,('Your Band Sucks', 'http://www.somethingawful.com/rss/your-band-sucks.rss.xml')
,('Fashion SWAT', 'http://www.somethingawful.com/rss/fashion-swat.rss.xml')
#,('AwfulVision', 'http://www.somethingawful.com/rss/awfulvision.rss.xml')
,('MMO Roulette', 'http://www.somethingawful.com/rss/mmo-roulette.rss.xml')
,('The Most Awful', 'http://www.somethingawful.com/rss/most-awful.rss.xml')
,('Garbage Day', 'http://www.somethingawful.com/rss/garbage-day.rss.xml')
,('WTF, D&D!?', 'http://www.somethingawful.com/rss/dungeons-and-dragons.rss.xml')
,('Current Releases', 'http://www.somethingawful.com/rss/current-movie-reviews.rss.xml')
]

View File

@ -1,5 +1,5 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2010-2012, Darko Miletic <darko.miletic at gmail.com>'
'''
theeconomiccollapseblog.com
'''
@ -11,7 +11,7 @@ class TheEconomicCollapse(BasicNewsRecipe):
description = 'Are You Prepared For The Coming Economic Collapse And The Next Great Depression?'
publisher = 'The Economic Collapse'
category = 'news, politics, USA, economy'
oldest_article = 2
oldest_article = 7
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'utf8'
@ -20,7 +20,7 @@ class TheEconomicCollapse(BasicNewsRecipe):
remove_empty_feeds = True
extra_css = """
body{font-family: Tahoma,Arial,sans-serif }
img{margin-bottom: 0.4em}
img{margin-bottom: 0.4em; display: block;}
"""
conversion_options = {
@ -35,12 +35,9 @@ class TheEconomicCollapse(BasicNewsRecipe):
,dict(name=['iframe','object','embed','meta','link','base'])
]
remove_attributes=['lang','onclick','width','height']
keep_only_tags=[dict(attrs={'class':['post-headline','post-bodycopy clearfix','']})]
keep_only_tags=[
dict(name='div', attrs={'class':'post-headline'}),
dict(name='div', attrs={'class':lambda x: x and 'post-bodycopy' in x.split()})
]
feeds = [(u'Posts', u'http://theeconomiccollapseblog.com/feed')]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return self.adeify_images(soup)

Binary file not shown.

View File

@ -0,0 +1,11 @@
<!DOCTYPE html>
<html>
<head>
<title>blank</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
</head>
<body>
<div>&nbsp;</div>
</body>
</html>

View File

@ -24,6 +24,15 @@ def get_rsync_pw():
return open('/home/kovid/work/kde/conf/buildbot').read().partition(
':')[-1].strip()
def is_vm_running(name):
pat = '/%s/'%name
pids= [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
if 'vmware-vmx' in cmdline and pat in cmdline:
return True
return False
class Rsync(Command):
description = 'Sync source tree from development machine'
@ -46,14 +55,16 @@ class Push(Command):
def run(self, opts):
from threading import Thread
threads = []
for host in (
r'Owner@winxp:/cygdrive/c/Documents\ and\ Settings/Owner/calibre',
'kovid@ox:calibre',
r'kovid@win7:/cygdrive/c/Users/kovid/calibre',
):
for host, vmname in {
r'Owner@winxp:/cygdrive/c/Documents\ and\ Settings/Owner/calibre':'winxp',
'kovid@ox:calibre':None,
r'kovid@win7:/cygdrive/c/Users/kovid/calibre':'Windows 7',
}.iteritems():
if vmname is None or is_vm_running(vmname):
rcmd = BASE_RSYNC + EXCLUDES + ['.', host]
print '\n\nPushing to:', host, '\n'
threads.append(Thread(target=subprocess.check_call, args=(rcmd,)))
print '\n\nPushing to:', vmname or host, '\n'
threads.append(Thread(target=subprocess.check_call, args=(rcmd,),
kwargs={'stdout':open(os.devnull, 'wb')}))
threads[-1].start()
for thread in threads:
thread.join()
@ -118,13 +129,7 @@ class VMInstaller(Command):
def run_vm(self):
pat = '/%s/'%(self.VM_CHECK or self.VM_NAME)
pids= [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
if 'vmware-vmx' in cmdline and pat in cmdline:
return
if is_vm_running(self.VM_CHECK or self.VM_NAME): return
self.__p = subprocess.Popen([self.vm])
def start_vm(self, sleep=75):

View File

@ -27,6 +27,7 @@ binary_includes = [
'/usr/lib/libwmflite-0.2.so.7',
'/usr/lib/liblcms.so.1',
'/usr/lib/liblzma.so.0',
'/usr/lib/libexpat.so.1',
'/usr/lib/libunrar.so',
'/usr/lib/libsqlite3.so.0',
'/usr/lib/libmng.so.1',

View File

@ -97,7 +97,7 @@ Now, run configure and make::
-no-plugin-manifests is needed so that loading the plugins does not fail looking for the CRT assembly
configure -opensource -release -ltcg -qt-zlib -qt-libmng -qt-libpng -qt-libtiff -qt-libjpeg -release -platform win32-msvc2008 -no-qt3support -webkit -xmlpatterns -no-phonon -no-style-plastique -no-style-cleanlooks -no-style-motif -no-style-cde -no-declarative -no-scripttools -no-audio-backend -no-multimedia -no-dbus -no-openvg -no-opengl -no-qt3support -confirm-license -nomake examples -nomake demos -nomake docs -no-plugin-manifests -openssl -I Q:\openssl\include -L Q:\openssl\lib && nmake
configure -opensource -release -qt-zlib -qt-libmng -qt-libpng -qt-libtiff -qt-libjpeg -release -platform win32-msvc2008 -no-qt3support -webkit -xmlpatterns -no-phonon -no-style-plastique -no-style-cleanlooks -no-style-motif -no-style-cde -no-declarative -no-scripttools -no-audio-backend -no-multimedia -no-dbus -no-openvg -no-opengl -no-qt3support -confirm-license -nomake examples -nomake demos -nomake docs -no-plugin-manifests -openssl -I Q:\openssl\include -L Q:\openssl\lib && nmake
Add the path to the bin folder inside the Qt dir to your system PATH.

File diff suppressed because it is too large Load Diff

View File

@ -13,31 +13,31 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2011-09-27 18:14+0000\n"
"Last-Translator: Kovid Goyal <Unknown>\n"
"PO-Revision-Date: 2012-06-14 09:06+0000\n"
"Last-Translator: Eugene Marshal <Unknown>\n"
"Language-Team: Russian <debian-l10n-russian@lists.debian.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-26 05:35+0000\n"
"X-Generator: Launchpad (build 14381)\n"
"X-Launchpad-Export-Date: 2012-06-15 04:42+0000\n"
"X-Generator: Launchpad (build 15414)\n"
"Language: ru\n"
#. name for aaa
msgid "Ghotuo"
msgstr ""
msgstr "Гхотуо"
#. name for aab
msgid "Alumu-Tesu"
msgstr ""
msgstr "Алуму-тесу"
#. name for aac
msgid "Ari"
msgstr ""
msgstr "Ари"
#. name for aad
msgid "Amal"
msgstr ""
msgstr "Амал"
#. name for aae
msgid "Albanian; Arbëreshë"
@ -45,11 +45,11 @@ msgstr ""
#. name for aaf
msgid "Aranadan"
msgstr ""
msgstr "Аранадан"
#. name for aag
msgid "Ambrak"
msgstr ""
msgstr "Амбрак"
#. name for aah
msgid "Arapesh; Abu'"
@ -57,23 +57,23 @@ msgstr ""
#. name for aai
msgid "Arifama-Miniafia"
msgstr ""
msgstr "Арифама-Миниафиа"
#. name for aak
msgid "Ankave"
msgstr ""
msgstr "Анкаве"
#. name for aal
msgid "Afade"
msgstr ""
msgstr "Афаде"
#. name for aam
msgid "Aramanik"
msgstr ""
msgstr "Араманик"
#. name for aan
msgid "Anambé"
msgstr ""
msgstr "Анамбе"
#. name for aao
msgid "Arabic; Algerian Saharan"
@ -93,7 +93,7 @@ msgstr "Афар"
#. name for aas
msgid "Aasáx"
msgstr ""
msgstr "Асакс"
#. name for aat
msgid "Albanian; Arvanitika"
@ -101,27 +101,27 @@ msgstr ""
#. name for aau
msgid "Abau"
msgstr ""
msgstr "Абау"
#. name for aaw
msgid "Solong"
msgstr ""
msgstr "Солонг"
#. name for aax
msgid "Mandobo Atas"
msgstr ""
msgstr "Мандобо Атас"
#. name for aaz
msgid "Amarasi"
msgstr ""
msgstr "Амараси"
#. name for aba
msgid "Abé"
msgstr ""
msgstr "Абе"
#. name for abb
msgid "Bankon"
msgstr ""
msgstr "Банкон"
#. name for abc
msgid "Ayta; Ambala"
@ -129,7 +129,7 @@ msgstr ""
#. name for abd
msgid "Manide"
msgstr ""
msgstr "Мэнайд"
#. name for abe
msgid "Abnaki; Western"
@ -137,11 +137,11 @@ msgstr ""
#. name for abf
msgid "Abai Sungai"
msgstr ""
msgstr "Абаи Сунгаи"
#. name for abg
msgid "Abaga"
msgstr ""
msgstr "Абага"
#. name for abh
msgid "Arabic; Tajiki"
@ -149,11 +149,11 @@ msgstr ""
#. name for abi
msgid "Abidji"
msgstr ""
msgstr "Абиджи"
#. name for abj
msgid "Aka-Bea"
msgstr ""
msgstr "Ака-Беа"
#. name for abk
msgid "Abkhazian"
@ -161,19 +161,19 @@ msgstr "Абхазский"
#. name for abl
msgid "Lampung Nyo"
msgstr ""
msgstr "Лампунг Ньё"
#. name for abm
msgid "Abanyom"
msgstr ""
msgstr "Абанйом"
#. name for abn
msgid "Abua"
msgstr ""
msgstr "Абуа"
#. name for abo
msgid "Abon"
msgstr ""
msgstr "Абон"
#. name for abp
msgid "Ayta; Abellen"
@ -185,7 +185,7 @@ msgstr ""
#. name for abr
msgid "Abron"
msgstr ""
msgstr "Аброн"
#. name for abs
msgid "Malay; Ambonese"

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 56)
numeric_version = (0, 8, 57)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
@ -34,6 +34,7 @@ isfrozen = hasattr(sys, 'frozen')
isunix = isosx or islinux
isportable = os.environ.get('CALIBRE_PORTABLE_BUILD', None) is not None
ispy3 = sys.version_info.major > 2
isxp = iswindows and sys.getwindowsversion().major < 6
try:
preferred_encoding = locale.getpreferredencoding()

View File

@ -55,6 +55,7 @@ class ANDROID(USBMS):
0x7086 : [0x0226], 0x70a8: [0x9999], 0x42c4 : [0x216],
0x70c6 : [0x226],
0x4316 : [0x216],
0x4317 : [0x216],
0x42d6 : [0x216],
0x42d7 : [0x216],
0x42f7 : [0x216],
@ -169,6 +170,9 @@ class ANDROID(USBMS):
# Pantech
0x10a9 : { 0x6050 : [0x227] },
# Prestigio
0x2207 : { 0 : [0x222] },
}
EBOOK_DIR_MAIN = ['eBooks/import', 'wordplayer/calibretransfer', 'Books',
'sdcard/ebooks']
@ -182,7 +186,8 @@ class ANDROID(USBMS):
'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA',
'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON',
'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP',
'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC', 'PMID701C', 'PD']
'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC', 'PMID701C', 'PD',
'PMP5097C']
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID',
@ -198,7 +203,7 @@ class ANDROID(USBMS):
'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855',
'XT910', 'BOOK_A10', 'USB_2.0_DRIVER', 'I9100T', 'P999DW',
'KTABLET_PC', 'INGENIC', 'GT-I9001_CARD', 'USB_2.0_DRIVER',
'GT-S5830L_CARD', 'UNIVERSE', 'XT875']
'GT-S5830L_CARD', 'UNIVERSE', 'XT875', 'PRO', '.KOBO_VOX']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
@ -207,7 +212,7 @@ class ANDROID(USBMS):
'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD',
'USB_2.0_DRIVER', 'I9100T', 'P999DW_SD_CARD', 'KTABLET_PC',
'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0_DRIVER', 'XT875',
'UMS_COMPOSITE']
'UMS_COMPOSITE', 'PRO', '.KOBO_VOX']
OSX_MAIN_MEM = 'Android Device Main Memory'

View File

@ -5,7 +5,11 @@ __license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, dbus
# First repeat after me: Linux desktop infrastructure is designed by a
# committee of rabid monkeys on crack. They would not know a decent desktop if
# it was driving the rabid monkey extermination truck that runs them over.
import os, dbus, re
def node_mountpoint(node):
@ -56,6 +60,87 @@ class UDisks(object):
d = self.device(parent)
d.DriveEject([])
class NoUDisks2(Exception):
pass
class UDisks2(object):
BLOCK = 'org.freedesktop.UDisks2.Block'
FILESYSTEM = 'org.freedesktop.UDisks2.Filesystem'
def __init__(self):
self.bus = dbus.SystemBus()
try:
self.bus.get_object('org.freedesktop.UDisks2',
'/org/freedesktop/UDisks2')
except dbus.exceptions.DBusException as e:
if getattr(e, '_dbus_error_name', None) == 'org.freedesktop.DBus.Error.ServiceUnknown':
raise NoUDisks2()
raise
def device(self, device_node_path):
device_node_path = os.path.realpath(device_node_path)
devname = device_node_path.split('/')[-1]
# First we try a direct object path
bd = self.bus.get_object('org.freedesktop.UDisks2',
'/org/freedesktop/UDisks2/block_devices/%s'%devname)
try:
device = bd.Get(self.BLOCK, 'Device',
dbus_interface='org.freedesktop.DBus.Properties')
device = bytearray(device).replace(b'\x00', b'').decode('utf-8')
except:
device = None
if device == device_node_path:
return bd
# Enumerate all devices known to UDisks
devs = self.bus.get_object('org.freedesktop.UDisks2',
'/org/freedesktop/UDisks2/block_devices')
xml = devs.Introspect(dbus_interface='org.freedesktop.DBus.Introspectable')
for dev in re.finditer(r'name=[\'"](.+?)[\'"]', type(u'')(xml)):
bd = self.bus.get_object('org.freedesktop.UDisks2',
'/org/freedesktop/UDisks2/block_devices/%s2'%dev.group(1))
try:
device = bd.Get(self.BLOCK, 'Device',
dbus_interface='org.freedesktop.DBus.Properties')
device = bytearray(device).replace(b'\x00', b'').decode('utf-8')
except:
device = None
if device == device_node_path:
return bd
raise ValueError('%r not known to UDisks2'%device_node_path)
def mount(self, device_node_path):
d = self.device(device_node_path)
mount_options = ['rw', 'noexec', 'nosuid',
'sync', 'nodev', 'uid=%d'%os.geteuid(), 'gid=%d'%os.getegid()]
try:
return unicode(d.Mount(
{
'auth.no_user_interaction':True,
'options':','.join(mount_options)
},
dbus_interface=self.FILESYSTEM))
except:
# May be already mounted, check
mp = node_mountpoint(str(device_node_path))
if mp is None:
raise
return mp
def get_udisks(ver=None):
if ver is None:
try:
u = UDisks2()
except NoUDisks2:
u = UDisks()
return u
return UDisks2() if ver == 2 else UDisks()
def mount(node_path):
u = UDisks()
u.mount(node_path)
@ -68,15 +153,19 @@ def umount(node_path):
u = UDisks()
u.unmount(node_path)
if __name__ == '__main__':
def test_udisks(ver=None):
import sys
dev = sys.argv[1]
print 'Testing with node', dev
u = UDisks()
u = get_udisks(ver=ver)
print 'Using Udisks:', u.__class__.__name__
print 'Mounted at:', u.mount(dev)
print 'Unmounting'
u.unmount(dev)
print 'Ejecting:'
u.eject(dev)
if __name__ == '__main__':
test_udisks()

View File

@ -20,6 +20,11 @@ class LRFOptions(object):
except:
return ''
m = oeb.metadata
for x in ('left', 'top', 'right', 'bottom'):
attr = 'margin_'+x
val = getattr(opts, attr)
if val < 0:
setattr(opts, attr, 0)
self.title = None
self.author = self.publisher = _('Unknown')
self.title_sort = self.author_sort = ''

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
@ -61,7 +61,7 @@ ORIENTATIONS = ['portrait', 'landscape']
class PDFOutput(OutputFormatPlugin):
name = 'PDF Output'
author = 'John Schember and Kovid Goyal'
author = 'Kovid Goyal'
file_type = 'pdf'
options = set([
@ -97,24 +97,6 @@ class PDFOutput(OutputFormatPlugin):
self.metadata = oeb_book.metadata
self.cover_data = None
# Remove page-break-before on <body> element as it causes
# blank pages in PDF Output
from calibre.ebooks.oeb.base import XPath
stylesheet = self.oeb.manifest.main_stylesheet
if stylesheet is not None:
from cssutils.css import CSSRule
classes = set(['.calibre'])
for x in self.oeb.spine:
root = x.data
body = XPath('//h:body[@class]')(root)
if body:
classes.add('.'+body[0].get('class'))
for rule in stylesheet.data.cssRules.rulesOfType(CSSRule.STYLE_RULE):
if rule.selectorList.selectorText in classes:
rule.style.removeProperty('page-break-before')
rule.style.removeProperty('page-break-after')
if input_plugin.is_image_collection:
log.debug('Converting input as an image collection...')
@ -128,16 +110,12 @@ class PDFOutput(OutputFormatPlugin):
self.write(ImagePDFWriter, images)
def get_cover_data(self):
g, m = self.oeb.guide, self.oeb.manifest
if 'titlepage' not in g:
if 'cover' in g:
href = g['cover'].href
from calibre.ebooks.oeb.base import urlnormalize
for item in m:
if item.href == urlnormalize(href):
oeb = self.oeb
if (oeb.metadata.cover and
unicode(oeb.metadata.cover[0]) in oeb.manifest.ids):
cover_id = unicode(oeb.metadata.cover[0])
item = oeb.manifest.ids[cover_id]
self.cover_data = item.data
if not isinstance(self.cover_data, basestring):
self.cover_data = None
def convert_text(self, oeb_book):
from calibre.ebooks.pdf.writer import PDFWriter

View File

@ -446,7 +446,7 @@ class HTMLPreProcessor(object):
# Remove page links
(re.compile(r'<a name=\d+></a>', re.IGNORECASE), lambda match: ''),
# Remove <hr> tags
(re.compile(r'<hr.*?>', re.IGNORECASE), lambda match: '<br>'),
(re.compile(r'<hr.*?>', re.IGNORECASE), lambda match: ''),
# Remove gray background
(re.compile(r'<BODY[^<>]+>'), lambda match : '<BODY>'),

View File

@ -28,6 +28,7 @@ class EXTHHeader(object): # {{{
self.start_offset = None
left = self.num_items
self.kf8_header = None
self.uuid = self.cdetype = None
while left > 0:
left -= 1

View File

@ -224,7 +224,18 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
for j in xrange(entry_count):
start, end = idx_positions[j:j+2]
rec = data[start:end]
# Sometimes (in the guide table if the type attribute has non ascii
# values) the ident is UTF-16 encoded. Try to handle that.
try:
ident, consumed = decode_string(rec, codec=codec, ordt_map=ordt_map)
except UnicodeDecodeError:
ident, consumed = decode_string(rec, codec='utf-16', ordt_map=ordt_map)
if u'\x00' in ident:
try:
ident, consumed = decode_string(rec, codec='utf-16',
ordt_map=ordt_map)
except UnicodeDecodeError:
ident = ident.replace('u\x00', u'')
rec = rec[consumed:]
tag_map = get_tag_map(control_byte_count, tags, rec, strict=strict)
table[ident] = tag_map

View File

@ -572,29 +572,22 @@ class CanonicalFragmentIdentifier
null
# }}}
at_current: () -> # {{{
[winx, winy] = window_scroll_pos()
[winw, winh] = [window.innerWidth, window.innerHeight]
max = Math.max
winw = max(winw, 400)
winh = max(winh, 600)
deltay = Math.floor(winh/50)
deltax = Math.floor(winw/25)
miny = max(-winy, -winh)
maxy = winh
minx = max(-winx, -winw)
maxx = winw
at_point: (ox, oy) ->
# The CFI at the specified point. Different to at() in that this method
# returns null if there is an error, and also calculates a point from
# the CFI and returns null if the calculated point is far from the
# original point.
dist = (p1, p2) ->
Math.sqrt(Math.pow(p1[0]-p2[0], 2), Math.pow(p1[1]-p2[1], 2))
get_cfi = (ox, oy) ->
try
cfi = window.cfi.at(ox, oy)
point = window.cfi.point(cfi)
catch err
cfi = null
if cfi
if point.range != null
r = point.range
@ -617,12 +610,25 @@ class CanonicalFragmentIdentifier
return cfi
x_loop = (cury) ->
at_current: () -> # {{{
[winx, winy] = window_scroll_pos()
[winw, winh] = [window.innerWidth, window.innerHeight]
max = Math.max
winw = max(winw, 400)
winh = max(winh, 600)
deltay = Math.floor(winh/50)
deltax = Math.floor(winw/25)
miny = max(-winy, -winh)
maxy = winh
minx = max(-winx, -winw)
maxx = winw
x_loop = (cury) =>
for direction in [-1, 1]
delta = deltax * direction
curx = 0
until (direction < 0 and curx < minx) or (direction > 0 and curx > maxx)
cfi = get_cfi(curx, cury)
cfi = this.at_point(curx, cury)
if cfi
return cfi
curx += delta

View File

@ -0,0 +1,335 @@
#!/usr/bin/env coffee
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
###
Copyright 2012, Kovid Goyal <kovid@kovidgoyal.net>
Released under the GPLv3 License
###
log = (args...) -> # {{{
if args
msg = args.join(' ')
if window?.console?.log
window.console.log(msg)
else if process?.stdout?.write
process.stdout.write(msg + '\n')
# }}}
window_scroll_pos = (win=window) -> # {{{
if typeof(win.pageXOffset) == 'number'
x = win.pageXOffset
y = win.pageYOffset
else # IE < 9
if document.body and ( document.body.scrollLeft or document.body.scrollTop )
x = document.body.scrollLeft
y = document.body.scrollTop
else if document.documentElement and ( document.documentElement.scrollLeft or document.documentElement.scrollTop)
y = document.documentElement.scrollTop
x = document.documentElement.scrollLeft
return [x, y]
# }}}
viewport_to_document = (x, y, doc=window?.document) -> # {{{
until doc == window.document
# We are in a frame
frame = doc.defaultView.frameElement
rect = frame.getBoundingClientRect()
x += rect.left
y += rect.top
doc = frame.ownerDocument
win = doc.defaultView
[wx, wy] = window_scroll_pos(win)
x += wx
y += wy
return [x, y]
# }}}
absleft = (elem) -> # {{{
r = elem.getBoundingClientRect()
return viewport_to_document(r.left, 0, elem.ownerDocument)[0]
# }}}
class PagedDisplay
###
This class is a namespace to expose functions via the
window.paged_display object. The most important functions are:
set_geometry(): sets the parameters used to layout text in paged mode
layout(): causes the currently loaded document to be laid out in columns.
###
constructor: () ->
if not this instanceof arguments.callee
throw new Error('PagedDisplay constructor called as function')
this.set_geometry()
this.page_width = 0
this.screen_width = 0
this.in_paged_mode = false
this.current_margin_side = 0
this.is_full_screen_layout = false
set_geometry: (cols_per_screen=1, margin_top=20, margin_side=40, margin_bottom=20) ->
this.margin_top = margin_top
this.margin_side = margin_side
this.margin_bottom = margin_bottom
this.cols_per_screen = cols_per_screen
layout: () ->
body_style = window.getComputedStyle(document.body)
# When laying body out in columns, webkit bleeds the top margin of the
# first block element out above the columns, leading to an extra top
# margin for the page. We compensate for that here. Computing the
# boundingrect of body is very expensive with column layout, so we do
# it before the column layout is applied.
first_layout = false
if not this.in_paged_mode
document.body.style.marginTop = '0px'
extra_margin = document.body.getBoundingClientRect().top
margin_top = (this.margin_top - extra_margin) + 'px'
# Check if the current document is a full screen layout like
# cover, if so we treat it specially.
single_screen = (document.body.scrollWidth < window.innerWidth + 25 and document.body.scrollHeight < window.innerHeight + 25)
first_layout = true
else
# resize event
margin_top = body_style.marginTop
ww = window.innerWidth
# Calculate the column width so that cols_per_screen columns fit in the
# window in such a way the right margin of the last column is <=
# side_margin (it may be less if the window width is not a
# multiple of n*(col_width+2*side_margin).
n = this.cols_per_screen
adjust = ww - Math.floor(ww/n)*n
# Ensure that the margins are large enough that the adjustment does not
# cause them to become negative semidefinite
sm = Math.max(2*adjust, this.margin_side)
# Minimum column width, for the cases when the window is too
# narrow
col_width = Math.max(100, ((ww - adjust)/n) - 2*sm)
this.page_width = col_width + 2*sm
this.screen_width = this.page_width * this.cols_per_screen
fgcolor = body_style.getPropertyValue('color')
bs = document.body.style
bs.setProperty('-webkit-column-gap', (2*sm)+'px')
bs.setProperty('-webkit-column-width', col_width+'px')
bs.setProperty('-webkit-column-rule-color', fgcolor)
bs.setProperty('overflow', 'visible')
bs.setProperty('height', (window.innerHeight - this.margin_top - this.margin_bottom) + 'px')
bs.setProperty('width', (window.innerWidth - 2*sm)+'px')
bs.setProperty('margin-top', margin_top)
bs.setProperty('margin-bottom', this.margin_bottom+'px')
bs.setProperty('margin-left', sm+'px')
bs.setProperty('margin-right', sm+'px')
for edge in ['left', 'right', 'top', 'bottom']
bs.setProperty('padding-'+edge, '0px')
bs.setProperty('border-'+edge+'-width', '0px')
bs.setProperty('min-width', '0')
bs.setProperty('max-width', 'none')
bs.setProperty('min-height', '0')
bs.setProperty('max-height', 'none')
# Convert page-breaks to column-breaks
for sheet in document.styleSheets
for rule in sheet.rules
if rule.type == 1 # CSSStyleRule
for prop in ['page-break-before', 'page-break-after', 'page-break-inside']
val = rule.style.getPropertyValue(prop)
if val
cprop = '-webkit-column-' + prop.substr(5)
priority = rule.style.getPropertyPriority(prop)
rule.style.setProperty(cprop, val, priority)
if first_layout
# Because of a bug in webkit column mode, svg elements defined with
# width 100% are wider than body and lead to a blank page after the
# current page (when cols_per_screen == 1). Similarly img elements
# with height=100% overflow the first column
has_svg = document.getElementsByTagName('svg').length > 0
only_img = document.getElementsByTagName('img').length == 1 and document.getElementsByTagName('div').length < 2 and document.getElementsByTagName('p').length < 2
this.is_full_screen_layout = (only_img or has_svg) and single_screen and document.body.scrollWidth > document.body.clientWidth
this.in_paged_mode = true
this.current_margin_side = sm
return sm
scroll_to_pos: (frac) ->
# Scroll to the position represented by frac (number between 0 and 1)
xpos = Math.floor(document.body.scrollWidth * frac)
this.scroll_to_xpos(xpos)
scroll_to_xpos: (xpos) ->
# Scroll so that the column containing xpos is the left most column in
# the viewport
if typeof(xpos) != 'number'
log(xpos, 'is not a number, cannot scroll to it!')
return
if this.is_full_screen_layout
window.scrollTo(0, 0)
return
pos = 0
until (pos <= xpos < pos + this.page_width)
pos += this.page_width
limit = document.body.scrollWidth - this.screen_width
pos = limit if pos > limit
window.scrollTo(pos, 0)
current_pos: (frac) ->
# The current scroll position as a fraction between 0 and 1
limit = document.body.scrollWidth - window.innerWidth
if limit <= 0
return 0.0
return window.pageXOffset / limit
current_column_location: () ->
# The location of the left edge of the left most column currently
# visible in the viewport
if this.is_full_screen_layout
return 0
x = window.pageXOffset + Math.max(10, this.current_margin_side)
edge = Math.floor(x/this.page_width) * this.page_width
while edge < x
edge += this.page_width
return edge - this.page_width
next_screen_location: () ->
# The position to scroll to for the next screen (which could contain
# more than one pages). Returns -1 if no further scrolling is possible.
if this.is_full_screen_layout
return -1
cc = this.current_column_location()
ans = cc + this.screen_width
limit = document.body.scrollWidth - window.innerWidth
if ans > limit
ans = if window.pageXOffset < limit then limit else -1
return ans
previous_screen_location: () ->
# The position to scroll to for the previous screen (which could contain
# more than one pages). Returns -1 if no further scrolling is possible.
if this.is_full_screen_layout
return -1
cc = this.current_column_location()
ans = cc - this.screen_width
if ans < 0
# We ignore small scrolls (less than 15px) when going to previous
# screen
ans = if window.pageXOffset > 15 then 0 else -1
return ans
next_col_location: () ->
# The position to scroll to for the next column (same as
# next_screen_location() if columns per screen == 1). Returns -1 if no
# further scrolling is possible.
if this.is_full_screen_layout
return -1
cc = this.current_column_location()
ans = cc + this.page_width
limit = document.body.scrollWidth - window.innerWidth
if ans > limit
ans = if window.pageXOffset < limit then limit else -1
return ans
previous_col_location: () ->
# The position to scroll to for the previous column (same as
# previous_screen_location() if columns per screen == 1). Returns -1 if
# no further scrolling is possible.
if this.is_full_screen_layout
return -1
cc = this.current_column_location()
ans = cc - this.page_width
if ans < 0
ans = if window.pageXOffset > 0 then 0 else -1
return ans
jump_to_anchor: (name) ->
# Jump to the element identified by anchor name. Ensures that the left
# most column in the viewport is the column containing the start of the
# element and that the scroll position is at the start of the column.
elem = document.getElementById(name)
if not elem
elems = document.getElementsByName(name)
if elems
elem = elems[0]
if not elem
return
elem.scrollIntoView()
if this.in_paged_mode
# Ensure we are scrolled to the column containing elem
this.scroll_to_xpos(absleft(elem) + 5)
snap_to_selection: () ->
# Ensure that the viewport is positioned at the start of the column
# containing the start of the current selection
if this.in_paged_mode
sel = window.getSelection()
r = sel.getRangeAt(0).getBoundingClientRect()
node = sel.anchorNode
left = viewport_to_document(r.left, r.top, doc=node.ownerDocument)[0]
# Ensure we are scrolled to the column containing the start of the
# selection
this.scroll_to_xpos(left+5)
jump_to_cfi: (cfi) ->
# Jump to the position indicated by the specified conformal fragment
# indicator (requires the cfi.coffee library). When in paged mode, the
# scroll is performed so that the column containing the position
# pointed to by the cfi is the left most column in the viewport
window.cfi.scroll_to(cfi, (x, y) =>
if this.in_paged_mode
this.scroll_to_xpos(x)
else
window.scrollTo(0, y)
)
current_cfi: () ->
# The Conformal Fragment Identifier at the current position, returns
# null if it could not be calculated. Requires the cfi.coffee library.
ans = null
if not window.cfi?
return ans
if this.in_paged_mode
c = this.current_column_location()
for x in [c, c-this.page_width, c+this.page_width]
# Try the current column, the previous column and the next
# column. Each column is tried from top to bottom.
[left, right] = [x, x + this.page_width]
if left < 0 or right > document.body.scrollWidth
continue
deltax = Math.floor(this.page_width/25)
deltay = Math.floor(window.innerHeight/25)
cury = this.margin_top
until cury >= (window.innerHeight - this.margin_bottom)
curx = left + this.current_margin_side
until curx >= (right - this.current_margin_side)
cfi = window.cfi.at_point(curx-window.pageXOffset, cury-window.pageYOffset)
if cfi
log('Viewport cfi:', cfi)
return cfi
curx += deltax
cury += deltay
else
try
ans = window.cfi.at_current()
if not ans
ans = null
catch err
log(err)
if ans
log('Viewport cfi:', ans)
return ans
if window?
window.paged_display = new PagedDisplay()
# TODO:
# Go to reference positions
# Indexing
# Resizing of images
# Full screen mode

View File

@ -31,10 +31,13 @@ def self_closing_sub(match):
return '<%s %s></%s>'%(match.group(1), match.group(2), match.group(1))
def load_html(path, view, codec='utf-8', mime_type=None,
pre_load_callback=lambda x:None):
pre_load_callback=lambda x:None, path_is_html=False):
from PyQt4.Qt import QUrl, QByteArray
if mime_type is None:
mime_type = guess_type(path)[0]
if path_is_html:
html = path
else:
with open(path, 'rb') as f:
html = f.read().decode(codec, 'replace')

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
@ -87,11 +87,6 @@ def get_pdf_printer(opts, for_comic=False, output_file_name=None):
return printer
def get_printer_page_size(opts, for_comic=False):
printer = get_pdf_printer(opts, for_comic=for_comic)
size = printer.paperSize(QPrinter.Millimeter)
return size.width() / 10., size.height() / 10.
def draw_image_page(printer, painter, p, preserve_aspect_ratio=True):
page_rect = printer.pageRect()
if preserve_aspect_ratio:
@ -138,13 +133,16 @@ class PDFWriter(QObject): # {{{
self.view.setRenderHints(QPainter.Antialiasing|QPainter.TextAntialiasing|QPainter.SmoothPixmapTransform)
self.view.loadFinished.connect(self._render_html,
type=Qt.QueuedConnection)
for x in (Qt.Horizontal, Qt.Vertical):
self.view.page().mainFrame().setScrollBarPolicy(x,
Qt.ScrollBarAlwaysOff)
self.render_queue = []
self.combine_queue = []
self.tmp_path = PersistentTemporaryDirectory(u'_pdf_output_parts')
self.opts = opts
self.size = get_printer_page_size(opts)
self.cover_data = cover_data
self.paged_js = None
def dump(self, items, out_stream, pdf_metadata):
self.metadata = pdf_metadata
@ -176,19 +174,46 @@ class PDFWriter(QObject): # {{{
if ok:
item_path = os.path.join(self.tmp_path, '%i.pdf' % len(self.combine_queue))
self.logger.debug('\tRendering item %s as %i.pdf' % (os.path.basename(str(self.view.url().toLocalFile())), len(self.combine_queue)))
printer = get_pdf_printer(self.opts, output_file_name=item_path)
self.view.page().mainFrame().evaluateJavaScript('''
document.body.style.backgroundColor = "white";
''')
self.view.print_(printer)
printer.abort()
self.do_paged_render(item_path)
else:
# The document is so corrupt that we can't render the page.
self.loop.exit(0)
raise Exception('Document cannot be rendered.')
self._render_book()
def do_paged_render(self, outpath):
from PyQt4.Qt import QSize, QPainter
if self.paged_js is None:
from calibre.utils.resources import compiled_coffeescript
self.paged_js = compiled_coffeescript('ebooks.oeb.display.paged',
dynamic=False)
printer = get_pdf_printer(self.opts, output_file_name=outpath)
painter = QPainter(printer)
zoomx = printer.logicalDpiX()/self.view.logicalDpiX()
zoomy = printer.logicalDpiY()/self.view.logicalDpiY()
painter.scale(zoomx, zoomy)
pr = printer.pageRect()
evaljs = self.view.page().mainFrame().evaluateJavaScript
evaljs(self.paged_js)
self.view.page().setViewportSize(QSize(pr.width()/zoomx,
pr.height()/zoomy))
evaljs('''
document.body.style.backgroundColor = "white";
paged_display.set_geometry(1, 0, 0, 0);
paged_display.layout();
''')
mf = self.view.page().mainFrame()
while True:
mf.render(painter)
nsl = evaljs('paged_display.next_screen_location()').toInt()
if not nsl[1] or nsl[0] <= 0: break
evaljs('window.scrollTo(%d, 0)'%nsl[0])
printer.newPage()
painter.end()
printer.abort()
def _delete_tmpdir(self):
if os.path.exists(self.tmp_path):
shutil.rmtree(self.tmp_path, True)
@ -237,7 +262,6 @@ class ImagePDFWriter(object):
def __init__(self, opts, log, cover_data=None):
self.opts = opts
self.log = log
self.size = get_printer_page_size(opts, for_comic=True)
def dump(self, items, out_stream, pdf_metadata):
f = PersistentTemporaryFile('_comic2pdf.pdf')

View File

@ -7,7 +7,7 @@ from urllib import unquote
from PyQt4.Qt import (QVariant, QFileInfo, QObject, SIGNAL, QBuffer, Qt,
QByteArray, QTranslator, QCoreApplication, QThread,
QEvent, QTimer, pyqtSignal, QDateTime, QDesktopServices,
QFileDialog, QFileIconProvider, QSettings,
QFileDialog, QFileIconProvider, QSettings, QColor,
QIcon, QApplication, QDialog, QUrl, QFont, QPalette)
ORG_NAME = 'KovidsBrain'
@ -108,6 +108,7 @@ gprefs.defaults['blocked_auto_formats'] = []
gprefs.defaults['auto_add_auto_convert'] = True
gprefs.defaults['ui_style'] = 'calibre' if iswindows or isosx else 'system'
gprefs.defaults['tag_browser_old_look'] = False
gprefs.defaults['book_list_tooltips'] = True
# }}}
NONE = QVariant() #: Null value to return from the data function of item models
@ -737,11 +738,18 @@ class Application(QApplication):
def load_calibre_style(self):
# On OS X QtCurve resets the palette, so we preserve it explicitly
orig_pal = QPalette(self.palette())
from calibre.constants import plugins
pi = plugins['progress_indicator'][0]
path = os.path.join(sys.extensions_location, 'calibre_style.'+(
'pyd' if iswindows else 'so'))
pi.load_style(path, 'Calibre')
# On OSX, on some machines, colors can be invalid. See https://bugs.launchpad.net/bugs/1014900
for role in (orig_pal.Button, orig_pal.Window):
c = orig_pal.brush(role).color()
if not c.isValid() or not c.toRgb().isValid():
orig_pal.setColor(role, QColor(u'lightgray'))
self.setPalette(orig_pal)
style = self.style()
icon_map = {}

View File

@ -329,10 +329,11 @@ class AddAction(InterfaceAction):
x.decode(preferred_encoding, 'replace') for x in
self._adder.merged_books])
info_dialog(self.gui, _('Merged some books'),
_('The following duplicate books were found and incoming '
_('The following %d duplicate books were found and incoming '
'book formats were processed and merged into your '
'Calibre database according to your automerge '
'settings:'), det_msg=books, show=True)
'settings:')%len(self._adder.merged_books),
det_msg=books, show=True)
if getattr(self._adder, 'number_of_books_added', 0) > 0 or \
getattr(self._adder, 'merged_books', False):

View File

@ -116,6 +116,9 @@ class EditorWidget(QWebView): # {{{
ss = extra_shortcuts.get(wac, None)
if ss:
ac.setShortcut(QKeySequence(getattr(QKeySequence, ss)))
if wac == 'RemoveFormat':
ac.triggered.connect(self.remove_format_cleanup,
type=Qt.QueuedConnection)
self.action_color = QAction(QIcon(I('format-text-color')), _('Foreground color'),
self)
@ -227,6 +230,9 @@ class EditorWidget(QWebView): # {{{
js = 'document.execCommand("%s", false, null);' % cmd
frame.evaluateJavaScript(js)
def remove_format_cleanup(self):
self.html = self.html
@dynamic_property
def html(self):

View File

@ -12,8 +12,8 @@ from PyQt4.Qt import QPixmap, SIGNAL
from calibre.gui2 import choose_images, error_dialog
from calibre.gui2.convert.metadata_ui import Ui_Form
from calibre.ebooks.metadata import authors_to_string, string_to_authors, \
MetaInformation
from calibre.ebooks.metadata import (authors_to_string, string_to_authors,
MetaInformation, title_sort)
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.ptempfile import PersistentTemporaryFile
from calibre.gui2.convert import Widget
@ -230,9 +230,19 @@ class MetadataWidget(Widget, Ui_Form):
Both may be None. Also returns a recommendation dictionary.
'''
recs = self.commit_options(save_defaults)
self.user_mi = self.get_metadata()
self.user_mi = mi = self.get_metadata()
self.cover_file = self.opf_file = None
if self.db is not None:
if mi.title == self.db.title(self.book_id, index_is_id=True):
mi.title_sort = self.db.title_sort(self.book_id, index_is_id=True)
else:
# Regenerate title sort taking into account book language
languages = self.db.languages(self.book_id, index_is_id=True)
if languages:
lang = languages.split(',')[0]
else:
lang = None
mi.title_sort = title_sort(mi.title, lang=lang)
self.db.set_metadata(self.book_id, self.user_mi)
self.mi, self.opf_file = create_opf_file(self.db, self.book_id)
if self.cover_changed and self.cover_data is not None:

View File

@ -68,6 +68,7 @@ class DeleteMatchingFromDeviceDialog(QDialog, Ui_DeleteMatchingFromDeviceDialog)
'<b>permanently deleted</b> from your '
'device. Please verify the list.')+'</p>')
self.buttonBox.accepted.connect(self.accepted)
self.buttonBox.rejected.connect(self.rejected)
self.table.cellClicked.connect(self.cell_clicked)
self.table.setSelectionMode(QAbstractItemView.NoSelection)
self.table.setColumnCount(7)

View File

@ -521,7 +521,8 @@ class PluginUpdaterDialog(SizePersistedDialog):
layout.addWidget(self.description)
self.button_box = QDialogButtonBox(QDialogButtonBox.Close)
self.button_box.rejected.connect(self._close_clicked)
self.button_box.rejected.connect(self.reject)
self.finished.connect(self._finished)
self.install_button = self.button_box.addButton(_('&Install'), QDialogButtonBox.AcceptRole)
self.install_button.setToolTip(_('Install the selected plugin'))
self.install_button.clicked.connect(self._install_clicked)
@ -584,12 +585,10 @@ class PluginUpdaterDialog(SizePersistedDialog):
self.configure_action.setEnabled(False)
self.plugin_view.addAction(self.configure_action)
def _close_clicked(self):
# Force our toolbar/action to be updated based on uninstalled updates
def _finished(self, *args):
if self.model:
update_plugins = filter(filter_upgradeable_plugins, self.model.display_plugins)
self.gui.recalc_update_label(len(update_plugins))
self.reject()
def _plugin_current_changed(self, current, previous):
if current.isValid():

View File

@ -100,9 +100,7 @@ def restore_database(db, parent=None):
'the database from the individual book '
'metadata. This is useful if the '
'database has been corrupted and you get a '
'blank list of books. Note that restoring only '
'restores books, not any settings stored in the '
'database, or any custom recipes.'
'blank list of books.'
'<p>Do you want to restore the database?')):
return False
db.conn.close()

View File

@ -312,7 +312,7 @@ class %(classname)s(%(base_class)s):
item = items[-1]
id_ = unicode(item.data(Qt.UserRole).toString())
title = unicode(item.data(Qt.DisplayRole).toString()).rpartition(' [')[0]
profile = get_builtin_recipe_by_id(id_)
profile = get_builtin_recipe_by_id(id_, download_recipe=True)
if profile is None:
raise Exception('Something weird happened')

View File

@ -82,6 +82,11 @@ class BooksView(QTableView): # {{{
files_dropped = pyqtSignal(object)
add_column_signal = pyqtSignal()
def viewportEvent(self, event):
if (event.type() == event.ToolTip and not gprefs['book_list_tooltips']):
return False
return QTableView.viewportEvent(self, event)
def __init__(self, parent, modelcls=BooksModel, use_edit_metadata_dialog=True):
QTableView.__init__(self, parent)

View File

@ -104,6 +104,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
r('ui_style', gprefs, restart_required=True, choices=
[(_('System default'), 'system'), (_('Calibre style'),
'calibre')])
r('book_list_tooltips', gprefs)
r('tag_browser_old_look', gprefs, restart_required=True)
r('cover_flow_queue_length', config, restart_required=True)

View File

@ -105,7 +105,7 @@
</property>
</widget>
</item>
<item row="6" column="0" colspan="2">
<item row="7" column="0" colspan="2">
<widget class="QGroupBox" name="groupBox_2">
<property name="title">
<string>&amp;Toolbar</string>
@ -140,7 +140,7 @@
</layout>
</widget>
</item>
<item row="7" column="0">
<item row="8" column="0">
<spacer name="verticalSpacer_3">
<property name="orientation">
<enum>Qt::Vertical</enum>
@ -153,7 +153,7 @@
</property>
</spacer>
</item>
<item row="5" column="0">
<item row="6" column="0">
<layout class="QHBoxLayout" name="horizontalLayout">
<item>
<widget class="QLabel" name="label_2">
@ -174,7 +174,7 @@
</item>
</layout>
</item>
<item row="5" column="1">
<item row="6" column="1">
<widget class="QPushButton" name="change_font_button">
<property name="text">
<string>Change &amp;font (needs restart)</string>
@ -194,6 +194,13 @@
<item row="0" column="1">
<widget class="QComboBox" name="opt_ui_style"/>
</item>
<item row="5" column="0">
<widget class="QCheckBox" name="opt_book_list_tooltips">
<property name="text">
<string>Show &amp;tooltips in the book list</string>
</property>
</widget>
</item>
</layout>
</widget>
<widget class="QWidget" name="tab_4">

View File

@ -378,6 +378,12 @@ class TagsModel(QAbstractItemModel): # {{{
collapse_model = 'partition'
collapse_template = tweaks['categories_collapsed_popularity_template']
def get_name_components(name):
components = [t.strip() for t in name.split('.') if t.strip()]
if len(components) == 0 or '.'.join(components) != name:
components = [name]
return components
def process_one_node(category, collapse_model, state_map): # {{{
collapse_letter = None
category_node = category
@ -437,24 +443,52 @@ class TagsModel(QAbstractItemModel): # {{{
for i in range(start, t[1]+1):
cl_list[i] = nc
if len(data[key]) > 0:
top_level_component = 'z' + data[key][0].original_name
else:
top_level_component = ''
last_idx = -collapse
category_is_hierarchical = not (
key in ['authors', 'publisher', 'news', 'formats', 'rating'] or
key not in self.db.prefs.get('categories_using_hierarchy', []))
for idx,tag in enumerate(data[key]):
components = None
if clear_rating:
tag.avg_rating = None
tag.state = state_map.get((tag.name, tag.category), 0)
if collapse_model != 'disable' and cat_len > collapse:
if collapse_model == 'partition':
if (idx % collapse) == 0:
d = {'first': tag}
# Only partition at the top level. This means that we must
# not do a break until the outermost component changes.
if idx >= last_idx + collapse and \
not tag.original_name.startswith(top_level_component+'.'):
if cat_len > idx + collapse:
d['last'] = data[key][idx+collapse-1]
last = idx + collapse - 1
else:
d['last'] = data[key][cat_len-1]
last = cat_len - 1
if category_is_hierarchical:
ct = copy.copy(data[key][last])
components = get_name_components(ct.original_name)
ct.sort = ct.name = components[0]
d = {'last': ct}
# Do the first node after the last node so that
# the components array contains the right values
# to be used later
ct2 = copy.copy(tag)
components = get_name_components(ct2.original_name)
ct2.sort = ct2.name = components[0]
d['first'] = ct2
else:
d = {'first': tag}
d['last'] = data[key][last]
name = eval_formatter.safe_format(collapse_template,
d, '##TAG_VIEW##', None)
if name.startswith('##TAG_VIEW##'):
# Formatter threw an exception. Don't create subnode
node_parent = category
node_parent = sub_cat = category
else:
sub_cat = self.create_node(parent=category, data = name,
tooltip = None, temporary=True,
@ -464,6 +498,9 @@ class TagsModel(QAbstractItemModel): # {{{
sub_cat.tag.is_searchable = False
sub_cat.is_gst = is_gst
node_parent = sub_cat
last_idx = idx # remember where we last partitioned
else:
node_parent = sub_cat
else: # by 'first letter'
cl = cl_list[idx]
if cl != collapse_letter:
@ -480,17 +517,16 @@ class TagsModel(QAbstractItemModel): # {{{
node_parent = category
# category display order is important here. The following works
# only of all the non-user categories are displayed before the
# only if all the non-user categories are displayed before the
# user categories
components = [t.strip() for t in tag.original_name.split('.')
if t.strip()]
if len(components) == 0 or '.'.join(components) != tag.original_name:
if category_is_hierarchical:
components = get_name_components(tag.original_name)
else:
components = [tag.original_name]
if (not tag.is_hierarchical) and (in_uc or
(fm['is_custom'] and fm['display'].get('is_names', False)) or
key in ['authors', 'publisher', 'news', 'formats', 'rating'] or
key not in self.db.prefs.get('categories_using_hierarchy', []) or
len(components) == 1):
not category_is_hierarchical or len(components) == 1):
n = self.create_node(parent=node_parent, data=tag, tooltip=tt,
icon_map=self.icon_state_map)
if tag.id_set is not None:
@ -500,6 +536,7 @@ class TagsModel(QAbstractItemModel): # {{{
for i,comp in enumerate(components):
if i == 0:
child_map = category_child_map
top_level_component = comp
else:
child_map = dict([((t.tag.name, t.tag.category), t)
for t in node_parent.children

View File

@ -738,6 +738,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
# Goes here, because if cf is valid, db is valid.
db.prefs['field_metadata'] = db.field_metadata.all_metadata()
db.commit_dirty_cache()
db.prefs.write_serialized(prefs['library_path'])
for action in self.iactions.values():
if not action.shutting_down():
return

View File

@ -11,7 +11,7 @@ import zipfile
from PyQt4.Qt import QFont, QVariant, QDialog
from calibre.constants import iswindows
from calibre.constants import iswindows, isxp
from calibre.utils.config import Config, StringConfig
from calibre.gui2.shortcuts import ShortcutConfig
from calibre.gui2.viewer.config_ui import Ui_Dialog
@ -113,7 +113,10 @@ class ConfigDialog(QDialog, Ui_Dialog):
p = self.tabs.widget(1)
p.layout().addWidget(self.shortcut_config)
self.opt_fit_images.setChecked(opts.fit_images)
if isxp:
self.hyphenate.setVisible(False)
self.hyphenate_default_lang.setVisible(False)
self.hyphenate_label.setVisible(False)
def accept(self, *args):
if self.shortcut_config.is_editing:

View File

@ -196,7 +196,7 @@
</widget>
</item>
<item row="6" column="0">
<widget class="QLabel" name="label_8">
<widget class="QLabel" name="hyphenate_label">
<property name="text">
<string>Default &amp;language for hyphenation:</string>
</property>

View File

@ -22,7 +22,8 @@ from calibre.gui2.viewer.javascript import JavaScriptLoader
from calibre.gui2.viewer.position import PagePosition
from calibre.gui2.viewer.config import config, ConfigDialog
from calibre.ebooks.oeb.display.webview import load_html
from calibre.utils.config import tweaks
from calibre.constants import isxp
# }}}
def load_builtin_fonts():
@ -59,10 +60,12 @@ class Document(QWebPage): # {{{
def __init__(self, shortcuts, parent=None, debug_javascript=False):
QWebPage.__init__(self, parent)
self.setObjectName("py_bridge")
self.in_paged_mode = tweaks.get('viewer_test_paged_mode', False)
# Use this to pass arbitrary JSON encodable objects between python and
# javascript. In python get/set the value as: self.bridge_value. In
# javascript, get/set the value as: py_bridge.value
self.bridge_value = None
self.first_load = True
self.debug_javascript = debug_javascript
self.anchor_positions = {}
@ -104,6 +107,13 @@ class Document(QWebPage): # {{{
self.mainFrame().javaScriptWindowObjectCleared.connect(
self.add_window_objects)
self.turn_off_internal_scrollbars()
def turn_off_internal_scrollbars(self):
mf = self.mainFrame()
mf.setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff)
mf.setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
def set_user_stylesheet(self):
raw = config().parse().user_css
raw = '::selection {background:#ffff00; color:#000;}\nbody {background-color: white;}\n'+raw
@ -154,7 +164,8 @@ class Document(QWebPage): # {{{
@pyqtSignature("")
def init_hyphenate(self):
if self.hyphenate and getattr(self, 'loaded_lang', ''):
# Qt fails to render soft hyphens correctly on windows xp
if not isxp and self.hyphenate and getattr(self, 'loaded_lang', ''):
self.javascript('do_hyphenation("%s")'%self.loaded_lang)
def _pass_json_value_getter(self):
@ -175,9 +186,12 @@ class Document(QWebPage): # {{{
'document.body.style.marginLeft').toString())
self.initial_right_margin = unicode(self.javascript(
'document.body.style.marginRight').toString())
if self.in_paged_mode:
self.switch_to_paged_mode()
if self.in_fullscreen_mode:
self.switch_to_fullscreen_mode()
self.read_anchor_positions(use_cache=False)
self.first_load = False
def read_anchor_positions(self, use_cache=True):
self.bridge_value = tuple(self.index_anchors)
@ -190,6 +204,22 @@ class Document(QWebPage): # {{{
self.anchor_positions = {}
return self.anchor_positions
def switch_to_paged_mode(self, onresize=False):
side_margin = self.javascript('window.paged_display.layout()', typ=int)
# Setup the contents size to ensure that there is a right most margin.
# Without this webkit renders the final column with no margin, as the
# columns extend beyond the boundaries (and margin) of body
mf = self.mainFrame()
sz = mf.contentsSize()
if sz.width() > self.window_width:
sz.setWidth(sz.width()+side_margin)
self.setPreferredContentsSize(sz)
def after_resize(self):
if self.in_paged_mode:
self.setPreferredContentsSize(QSize())
self.switch_to_paged_mode(onresize=True)
def switch_to_fullscreen_mode(self):
self.in_fullscreen_mode = True
self.javascript('''
@ -233,20 +263,21 @@ class Document(QWebPage): # {{{
def javascript(self, string, typ=None):
ans = self.mainFrame().evaluateJavaScript(string)
if typ == 'int':
if typ in {'int', int}:
ans = ans.toInt()
if ans[1]:
return ans[0]
return 0
if typ in {'float', float}:
ans = ans.toReal()
return ans[0] if ans[1] else 0.0
if typ == 'string':
return unicode(ans.toString())
return ans
def javaScriptConsoleMessage(self, msg, lineno, msgid):
if self.debug_javascript:
prints( 'JS:', msgid, lineno)
prints(msg)
prints(' ')
else:
return QWebPage.javaScriptConsoleMessage(self, msg, lineno, msgid)
@ -263,13 +294,7 @@ class Document(QWebPage): # {{{
self.mainFrame().setScrollPosition(QPoint(x, y))
def jump_to_anchor(self, anchor):
self.javascript('document.location.hash = "%s"'%anchor)
def quantize(self):
if self.height > self.window_height:
r = self.height%self.window_height
if r > 0:
self.javascript('document.body.style.paddingBottom = "%dpx"'%r)
self.javascript('window.paged_display.jump_to_anchor("%s")'%anchor)
def element_ypos(self, elem):
ans, ok = elem.evaluateJavaScript('$(this).offset().top').toInt()
@ -314,11 +339,22 @@ class Document(QWebPage): # {{{
@dynamic_property
def scroll_fraction(self):
def fget(self):
if self.in_paged_mode:
return self.javascript('''
ans = 0.0;
if (window.paged_display) {
ans = window.paged_display.current_pos();
}
ans;''', typ='float')
else:
try:
return abs(float(self.ypos)/(self.height-self.window_height))
except ZeroDivisionError:
return 0.
def fset(self, val):
if self.in_paged_mode:
self.javascript('paged_display.scroll_to_pos(%f)'%val)
else:
npos = val * (self.height - self.window_height)
if npos < 0:
npos = 0
@ -363,6 +399,7 @@ class DocumentView(QWebView): # {{{
DISABLED_BRUSH = QBrush(Qt.lightGray, Qt.Dense5Pattern)
def initialize_view(self, debug_javascript=False):
self.setRenderHints(QPainter.Antialiasing|QPainter.TextAntialiasing|QPainter.SmoothPixmapTransform)
self.flipper = SlideFlip(self)
self.is_auto_repeat_event = False
self.debug_javascript = debug_javascript
@ -555,9 +592,11 @@ class DocumentView(QWebView): # {{{
return property(fget=fget, fset=fset)
def search(self, text, backwards=False):
if backwards:
return self.findText(text, self.document.FindBackward)
return self.findText(text)
flags = self.document.FindBackward if backwards else self.document.FindFlags(0)
found = self.findText(text, flags)
if found and self.document.in_paged_mode:
self.document.javascript('paged_display.snap_to_selection()')
return found
def path(self):
return os.path.abspath(unicode(self.url().toLocalFile()))
@ -570,7 +609,7 @@ class DocumentView(QWebView): # {{{
if self.manager is not None:
self.manager.load_started()
load_html(path, self, codec=path.encoding, mime_type=getattr(path,
load_html(path, self, codec=getattr(path, 'encoding', 'utf-8'), mime_type=getattr(path,
'mime_type', None), pre_load_callback=callback)
entries = set()
for ie in getattr(path, 'index_entries', []):
@ -579,10 +618,12 @@ class DocumentView(QWebView): # {{{
if ie.end_anchor:
entries.add(ie.end_anchor)
self.document.index_anchors = entries
self.turn_off_internal_scrollbars()
def initialize_scrollbar(self):
if getattr(self, 'scrollbar', None) is not None:
if self.document.in_paged_mode:
self.scrollbar.setVisible(False)
return
delta = self.document.width - self.size().width()
if delta > 0:
self._ignore_scrollbar_signals = True
@ -623,7 +664,6 @@ class DocumentView(QWebView): # {{{
self.manager.scrolled(self.document.scroll_fraction,
onload=True)
self.turn_off_internal_scrollbars()
if self.flipper.isVisible():
if self.flipper.running:
self.flipper.setVisible(False)
@ -631,12 +671,6 @@ class DocumentView(QWebView): # {{{
self.flipper(self.current_page_image(),
duration=self.document.page_flip_duration)
def turn_off_internal_scrollbars(self):
self.document.mainFrame().setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff)
self.document.mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
@classmethod
def test_line(cls, img, y):
'Test if line contains pixels of exactly the same color'
@ -651,6 +685,7 @@ class DocumentView(QWebView): # {{{
overlap = self.height()
img = QImage(self.width(), overlap, QImage.Format_ARGB32_Premultiplied)
painter = QPainter(img)
painter.setRenderHints(self.renderHints())
self.document.mainFrame().render(painter, QRegion(0, 0, self.width(), overlap))
painter.end()
return img
@ -670,6 +705,28 @@ class DocumentView(QWebView): # {{{
return
epf = self.document.enable_page_flip and not self.is_auto_repeat_event
if self.document.in_paged_mode:
loc = self.document.javascript(
'paged_display.previous_screen_location()', typ='int')
if loc < 0:
if self.manager is not None:
if epf:
self.flipper.initialize(self.current_page_image(),
forwards=False)
self.manager.previous_document()
else:
if epf:
self.flipper.initialize(self.current_page_image(),
forwards=False)
self.document.scroll_to(x=loc, y=0)
if epf:
self.flipper(self.current_page_image(),
duration=self.document.page_flip_duration)
if self.manager is not None:
self.manager.scrolled(self.scroll_fraction)
return
delta_y = self.document.window_height - 25
if self.document.at_top:
if self.manager is not None:
@ -700,6 +757,26 @@ class DocumentView(QWebView): # {{{
return
epf = self.document.enable_page_flip and not self.is_auto_repeat_event
if self.document.in_paged_mode:
loc = self.document.javascript(
'paged_display.next_screen_location()', typ='int')
if loc < 0:
if self.manager is not None:
if epf:
self.flipper.initialize(self.current_page_image())
self.manager.next_document()
else:
if epf:
self.flipper.initialize(self.current_page_image())
self.document.scroll_to(x=loc, y=0)
if epf:
self.flipper(self.current_page_image(),
duration=self.document.page_flip_duration)
if self.manager is not None:
self.manager.scrolled(self.scroll_fraction)
return
window_height = self.document.window_height
document_height = self.document.height
ddelta = document_height - window_height
@ -762,15 +839,25 @@ class DocumentView(QWebView): # {{{
#print 'After all:', self.document.ypos
def scroll_by(self, x=0, y=0, notify=True):
old_pos = self.document.ypos
old_pos = (self.document.xpos if self.document.in_paged_mode else
self.document.ypos)
self.document.scroll_by(x, y)
if notify and self.manager is not None and self.document.ypos != old_pos:
new_pos = (self.document.xpos if self.document.in_paged_mode else
self.document.ypos)
if notify and self.manager is not None and new_pos != old_pos:
self.manager.scrolled(self.scroll_fraction)
def scroll_to(self, pos, notify=True):
if self._ignore_scrollbar_signals:
return
old_pos = self.document.ypos
old_pos = (self.document.xpos if self.document.in_paged_mode else
self.document.ypos)
if self.document.in_paged_mode:
if isinstance(pos, basestring):
self.document.jump_to_anchor(pos)
else:
self.document.scroll_fraction = pos
else:
if isinstance(pos, basestring):
self.document.jump_to_anchor(pos)
else:
@ -780,7 +867,10 @@ class DocumentView(QWebView): # {{{
y = int(math.ceil(
pos*(self.document.height-self.document.window_height)))
self.document.scroll_to(0, y)
if notify and self.manager is not None and self.document.ypos != old_pos:
new_pos = (self.document.xpos if self.document.in_paged_mode else
self.document.ypos)
if notify and self.manager is not None and new_pos != old_pos:
self.manager.scrolled(self.scroll_fraction)
@dynamic_property
@ -813,9 +903,8 @@ class DocumentView(QWebView): # {{{
return QWebView.changeEvent(self, event)
def paintEvent(self, event):
self.turn_off_internal_scrollbars()
painter = QPainter(self)
painter.setRenderHints(self.renderHints())
self.document.mainFrame().render(painter, event.region())
if not self.isEnabled():
painter.fillRect(event.region().boundingRect(), self.DISABLED_BRUSH)
@ -827,6 +916,27 @@ class DocumentView(QWebView): # {{{
if self.manager is not None and event.delta() != 0:
(self.manager.font_size_larger if event.delta() > 0 else
self.manager.font_size_smaller)()
return
if self.document.in_paged_mode:
if abs(event.delta()) < 15: return
typ = 'screen' if self.document.wheel_flips_pages else 'col'
direction = 'next' if event.delta() < 0 else 'previous'
loc = self.document.javascript('paged_display.%s_%s_location()'%(
direction, typ), typ='int')
if loc > -1:
self.document.scroll_to(x=loc, y=0)
if self.manager is not None:
self.manager.scrolled(self.scroll_fraction)
event.accept()
elif self.manager is not None:
if direction == 'next':
self.manager.next_document()
else:
self.manager.previous_document()
event.accept()
return
if event.delta() < -14:
if self.document.wheel_flips_pages:
self.next_page()
@ -866,6 +976,17 @@ class DocumentView(QWebView): # {{{
if not self.handle_key_press(event):
return QWebView.keyPressEvent(self, event)
def paged_col_scroll(self, forward=True):
dir = 'next' if forward else 'previous'
loc = self.document.javascript(
'paged_display.%s_col_location()'%dir, typ='int')
if loc > -1:
self.document.scroll_to(x=loc, y=0)
self.manager.scrolled(self.document.scroll_fraction)
else:
(self.manager.next_document() if forward else
self.manager.previous_document())
def handle_key_press(self, event):
handled = True
key = self.shortcuts.get_match(event)
@ -877,20 +998,32 @@ class DocumentView(QWebView): # {{{
finally:
self.is_auto_repeat_event = False
elif key == 'Down':
if self.document.in_paged_mode:
self.paged_col_scroll()
else:
if (not self.document.line_scrolling_stops_on_pagebreaks and
self.document.at_bottom):
self.manager.next_document()
else:
self.scroll_by(y=15)
elif key == 'Up':
if self.document.in_paged_mode:
self.paged_col_scroll(forward=False)
else:
if (not self.document.line_scrolling_stops_on_pagebreaks and
self.document.at_top):
self.manager.previous_document()
else:
self.scroll_by(y=-15)
elif key == 'Left':
if self.document.in_paged_mode:
self.paged_col_scroll(forward=False)
else:
self.scroll_by(x=-15)
elif key == 'Right':
if self.document.in_paged_mode:
self.paged_col_scroll()
else:
self.scroll_by(x=15)
else:
handled = False

View File

@ -30,10 +30,11 @@ class JavaScriptLoader(object):
CS = {
'cfi':'ebooks.oeb.display.cfi',
'indexing':'ebooks.oeb.display.indexing',
'paged':'ebooks.oeb.display.paged',
}
ORDER = ('jquery', 'jquery_scrollTo', 'bookmarks', 'referencing', 'images',
'hyphenation', 'hyphenator', 'cfi', 'indexing',)
'hyphenation', 'hyphenator', 'cfi', 'indexing', 'paged')
def __init__(self, dynamic_coffeescript=False):

View File

@ -411,10 +411,12 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
return c.remember_current_page
def print_book(self):
Printing(self.iterator.spine, False)
p = Printing(self.iterator, self)
p.start_print()
def print_preview(self):
Printing(self.iterator.spine, True)
p = Printing(self.iterator, self)
p.start_preview()
def toggle_fullscreen(self, x):
if self.isFullScreen():
@ -747,6 +749,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
# There hasn't been a resize event for some time
# restore the current page position.
self.resize_in_progress = False
self.view.document.after_resize()
if self.window_mode_changed:
# This resize is part of a window mode change, special case it
self.handle_window_mode_toggle()
@ -1003,6 +1006,12 @@ def main(args=sys.argv):
QApplication.setApplicationName(APP_UID)
main = EbookViewer(args[1] if len(args) > 1 else None,
debug_javascript=opts.debug_javascript, open_at=open_at)
# This is needed for paged mode. Without it, the first document that is
# loaded will have extra blank space at the bottom, as
# turn_off_internal_scrollbars does not take effect for the first
# rendered document
main.view.load_path(P('viewer/blank.html', allow_user_override=False))
sys.excepthook = main.unhandled_exception
main.show()
if opts.raise_window:

View File

@ -19,13 +19,10 @@ class PagePosition(object):
ans = None
res = self.document.mainFrame().evaluateJavaScript('''
ans = 'undefined';
try {
ans = window.cfi.at_current();
if (window.paged_display) {
ans = window.paged_display.current_cfi();
if (!ans) ans = 'undefined';
} catch (err) {
window.console.log(err);
}
window.console.log("Viewport cfi: " + ans);
ans;
''')
if res.isValid() and not res.isNull() and res.type() == res.String:
@ -37,17 +34,8 @@ class PagePosition(object):
def scroll_to_cfi(self, cfi):
if cfi:
cfi = json.dumps(cfi)
self.document.mainFrame().evaluateJavaScript('''
function fix_scroll() {
/* cfi.scroll_to() uses scrollIntoView() which can result
in scrolling along the x-axis. So we
explicitly scroll to x=0.
*/
scrollTo(0, window.pageYOffset)
}
window.cfi.scroll_to(%s, fix_scroll);
'''%cfi)
self.document.mainFrame().evaluateJavaScript(
'paged_display.jump_to_cfi(%s)'%cfi)
@property
def current_pos(self):

View File

@ -1,127 +1,104 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, sys, urlparse
from BeautifulSoup import BeautifulSoup, Tag
from PyQt4 import QtCore
from PyQt4.Qt import QUrl, QEventLoop, SIGNAL, QObject, Qt, \
QPrinter, QPrintPreviewDialog, QPrintDialog, QDialog, QMetaObject, Q_ARG
from PyQt4.Qt import (QObject, QEventLoop, Qt, QPrintDialog, QPainter, QSize,
QPrintPreviewDialog)
from PyQt4.QtWebKit import QWebView
PRINTCSS = 'body{width:100%;margin:0;padding:0;font-family:Arial;color:#000;background:none;font-size:12pt;text-align:left;}h1,h2,h3,h4,h5,h6{font-family:Helvetica;}h1{font-size:19pt;}h2{font-size:17pt;}h3{font-size:15pt;}h4,h5,h6{font-size:12pt;}pre,code,samp{font:10ptCourier,monospace;white-space:pre-wrap;page-break-inside:avoid;}blockquote{margin:1.3em;padding:1em;font-size:10pt;}hr{background-color:#ccc;}aimg{border:none;}a:link,a:visited{background:transparent;font-weight:700;text-decoration:underline;color:#333;}a:link:after,a{color:#000;}table{margin:1px;text-align:left;}th{border-bottom:1pxsolid#333;font-weight:bold;}td{border-bottom:1pxsolid#333;}th,td{padding:4px10px4px0;}tfoot{font-style:italic;}caption{background:#fff;margin-bottom:2em;text-align:left;}thead{display:table-header-group;}tr{page-break-inside:avoid;}#header,.header,#footer,.footer,#navbar,.navbar,#navigation,.navigation,#rightSideBar,.rightSideBar,#leftSideBar,.leftSideBar{display:none;}'
from calibre.gui2 import error_dialog
from calibre.ebooks.oeb.display.webview import load_html
from calibre.utils.resources import compiled_coffeescript
class Printing(QObject):
def __init__(self, spine, preview):
from calibre.gui2 import is_ok_to_use_qt
if not is_ok_to_use_qt():
raise Exception('Not OK to use Qt')
QObject.__init__(self)
self.loop = QEventLoop()
self.view = QWebView()
if preview:
self.connect(self.view, SIGNAL('loadFinished(bool)'), self.print_preview)
else:
self.connect(self.view, SIGNAL('loadFinished(bool)'), self.print_book)
def __init__(self, iterator, parent):
QObject.__init__(self, parent)
self.current_index = 0
self.iterator = iterator
self.view = QWebView(self.parent())
self.mf = mf = self.view.page().mainFrame()
for x in (Qt.Horizontal, Qt.Vertical):
mf.setScrollBarPolicy(x, Qt.ScrollBarAlwaysOff)
self.view.loadFinished.connect(self.load_finished)
self.paged_js = compiled_coffeescript('ebooks.oeb.display.paged',
dynamic=False)
self.process_content(spine)
def load_finished(self, ok):
self.loaded_ok = ok
def process_content(self, spine):
content = ''
def start_print(self):
self.pd = QPrintDialog(self.parent())
self.pd.open(self._start_print)
for path in spine:
raw = self.raw_content(path)
content += self.parsed_content(raw, path)
def _start_print(self):
self.do_print(self.pd.printer())
refined_content = self.refine_content(content)
def start_preview(self):
self.pd = QPrintPreviewDialog(self.parent())
self.pd.paintRequested.connect(self.do_print)
self.pd.exec_()
base = os.path.splitdrive(spine[0])[0]
base = base if base != '' else '/'
def do_print(self, printer):
painter = QPainter(printer)
zoomx = printer.logicalDpiX()/self.view.logicalDpiX()
zoomy = printer.logicalDpiY()/self.view.logicalDpiY()
painter.scale(zoomx, zoomy)
pr = printer.pageRect()
self.view.page().setViewportSize(QSize(pr.width()/zoomx,
pr.height()/zoomy))
evaljs = self.mf.evaluateJavaScript
loop = QEventLoop(self)
first = True
QMetaObject.invokeMethod(self, "load_content", Qt.QueuedConnection, Q_ARG('QString', refined_content), Q_ARG('QString', base))
self.loop.exec_()
for path in self.iterator.spine:
self.loaded_ok = None
load_html(path, self.view, codec=getattr(path, 'encoding', 'utf-8'),
mime_type=getattr(path, 'mime_type', None))
while self.loaded_ok is None:
loop.processEvents(loop.ExcludeUserInputEvents)
if not self.loaded_ok:
return error_dialog(self.parent(), _('Failed to render'),
_('Failed to render document %s')%path, show=True)
evaljs(self.paged_js)
evaljs('''
document.body.style.backgroundColor = "white";
paged_display.set_geometry(1, 0, 0, 0);
paged_display.layout();
''')
@QtCore.pyqtSignature('load_content(QString, QString)')
def load_content(self, content, base):
self.view.setHtml(content, QUrl(base))
while True:
if not first:
printer.newPage()
first = False
self.mf.render(painter)
nsl = evaljs('paged_display.next_screen_location()').toInt()
if not nsl[1] or nsl[0] <= 0: break
evaljs('window.scrollTo(%d, 0)'%nsl[0])
def raw_content(self, path):
return open(path, 'rb').read().decode(path.encoding)
def parsed_content(self, raw_content, path):
dom_tree = BeautifulSoup(raw_content).body
# Remove sytle information that is applied to the entire document.
# This does not remove styles applied within a tag.
styles = dom_tree.findAll('style')
for s in styles:
s.extract()
scripts = dom_tree.findAll('script')
for s in scripts:
s.extract()
# Convert all relative links to absolute paths.
links = dom_tree.findAll(src=True)
for s in links:
if QUrl(s['src']).isRelative():
s['src'] = urlparse.urljoin(path, s['src'])
links = dom_tree.findAll(href=True)
for s in links:
if QUrl(s['href']).isRelative():
s['href'] = urlparse.urljoin(path, s['href'])
return unicode(dom_tree)
# Adds the begenning and endings tags to the document.
# Adds the print css.
def refine_content(self, content):
dom_tree = BeautifulSoup('<html><head></head><body>%s</body></html>' % content)
css = dom_tree.findAll('link')
for c in css:
c.extract()
print_css = Tag(BeautifulSoup(), 'style', [('type', 'text/css'), ('title', 'override_css')])
print_css.insert(0, PRINTCSS)
dom_tree.findAll('head')[0].insert(0, print_css)
return unicode(dom_tree)
def print_preview(self, ok):
printer = QPrinter(QPrinter.HighResolution)
printer.setPageMargins(1, 1, 1, 1, QPrinter.Inch)
previewDialog = QPrintPreviewDialog(printer)
self.connect(previewDialog, SIGNAL('paintRequested(QPrinter *)'), self.view.print_)
previewDialog.exec_()
self.disconnect(previewDialog, SIGNAL('paintRequested(QPrinter *)'), self.view.print_)
self.loop.quit()
def print_book(self, ok):
printer = QPrinter(QPrinter.HighResolution)
printer.setPageMargins(1, 1, 1, 1, QPrinter.Inch)
printDialog = QPrintDialog(printer)
printDialog.setWindowTitle(_("Print eBook"))
printDialog.exec_()
if printDialog.result() == QDialog.Accepted:
self.view.print_(printer)
self.loop.quit()
def main():
return 0
painter.end()
if __name__ == '__main__':
sys.exit(main())
from calibre.gui2 import Application
from calibre.ebooks.oeb.iterator.book import EbookIterator
from PyQt4.Qt import QPrinter, QTimer
import sys
app = Application([])
def doit():
with EbookIterator(sys.argv[-1]) as it:
p = Printing(it, None)
printer = QPrinter()
of = sys.argv[-1]+'.pdf'
printer.setOutputFileName(of)
p.do_print(printer)
print ('Printed to:', of)
app.exit()
QTimer.singleShot(0, doit)
app.exec_()

View File

@ -93,7 +93,8 @@ class CheckLibrary(object):
lib = self.src_library_path
for auth_dir in os.listdir(lib):
if self.ignore_name(auth_dir) or auth_dir == 'metadata.db':
if self.ignore_name(auth_dir) or auth_dir in {'metadata.db',
'metadata_db_prefs_backup.json'}:
continue
auth_path = os.path.join(lib, auth_dir)
# First check: author must be a directory

View File

@ -243,20 +243,22 @@ def do_add(db, paths, one_book_per_directory, recurse, add_duplicates, otitle,
metadata.append(mi)
file_duplicates = []
added_ids = set()
if files:
file_duplicates = db.add_books(files, formats, metadata,
add_duplicates=add_duplicates)
if file_duplicates:
file_duplicates = file_duplicates[0]
file_duplicates, ids = db.add_books(files, formats, metadata,
add_duplicates=add_duplicates,
return_ids=True)
added_ids |= set(ids)
dir_dups = []
for dir in dirs:
if recurse:
dir_dups.extend(db.recursive_import(dir, single_book_per_directory=one_book_per_directory))
dir_dups.extend(db.recursive_import(dir,
single_book_per_directory=one_book_per_directory,
added_ids=added_ids))
else:
func = db.import_book_directory if one_book_per_directory else db.import_book_directory_multiple
dups = func(dir)
dups = func(dir, added_ids=added_ids)
if not dups:
dups = []
dir_dups.extend(dups)
@ -265,7 +267,8 @@ def do_add(db, paths, one_book_per_directory, recurse, add_duplicates, otitle,
if add_duplicates:
for mi, formats in dir_dups:
db.import_book(mi, formats)
book_id = db.import_book(mi, formats)
added_ids.add(book_id)
else:
if dir_dups or file_duplicates:
print >>sys.stderr, _('The following books were not added as '
@ -287,6 +290,9 @@ def do_add(db, paths, one_book_per_directory, recurse, add_duplicates, otitle,
print >>sys.stderr, '\t\t ', path
write_dirtied(db)
if added_ids:
prints(_('Added book ids: %s')%(', '.join(map(type(u''),
added_ids))))
send_message()
finally:
sys.stdout = orig

View File

@ -162,7 +162,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False, is_second_db=False):
read_only=False, is_second_db=False, progress_callback=None,
restore_all_prefs=False):
self.is_second_db = is_second_db
try:
if isbytestring(library_path):
@ -205,15 +206,21 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
if apply_default_prefs and default_prefs is not None:
if progress_callback is None:
progress_callback = lambda x, y: True
dbprefs = DBPrefs(self)
for key in default_prefs:
progress_callback(None, len(default_prefs))
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if key in frozenset(['news_to_be_synced']):
if not restore_all_prefs and key in frozenset(['news_to_be_synced']):
continue
dbprefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values() if f['is_custom']]
for f in fmvals:
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'], f['datatype'],
f['is_multiple'] is not None and len(f['is_multiple']) > 0,
f['is_editable'], f['display'])
@ -3566,7 +3573,8 @@ books_series_link feeds
for formats in books.values():
yield formats
def import_book_directory_multiple(self, dirpath, callback=None):
def import_book_directory_multiple(self, dirpath, callback=None,
added_ids=None):
from calibre.ebooks.metadata.meta import metadata_from_formats
duplicates = []
@ -3577,13 +3585,15 @@ books_series_link feeds
if self.has_book(mi):
duplicates.append((mi, formats))
continue
self.import_book(mi, formats)
book_id = self.import_book(mi, formats)
if added_ids is not None:
added_ids.add(book_id)
if callable(callback):
if callback(mi.title):
break
return duplicates
def import_book_directory(self, dirpath, callback=None):
def import_book_directory(self, dirpath, callback=None, added_ids=None):
from calibre.ebooks.metadata.meta import metadata_from_formats
dirpath = os.path.abspath(dirpath)
formats = self.find_books_in_directory(dirpath, True)
@ -3595,17 +3605,21 @@ books_series_link feeds
return
if self.has_book(mi):
return [(mi, formats)]
self.import_book(mi, formats)
book_id = self.import_book(mi, formats)
if added_ids is not None:
added_ids.add(book_id)
if callable(callback):
callback(mi.title)
def recursive_import(self, root, single_book_per_directory=True, callback=None):
def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
root = os.path.abspath(root)
duplicates = []
for dirpath in os.walk(root):
res = self.import_book_directory(dirpath[0], callback=callback) if \
single_book_per_directory else \
self.import_book_directory_multiple(dirpath[0], callback=callback)
res = (self.import_book_directory(dirpath[0], callback=callback,
added_ids=added_ids) if single_book_per_directory else
self.import_book_directory_multiple(dirpath[0],
callback=callback, added_ids=added_ids))
if res is not None:
duplicates.extend(res)
if callable(callback):

View File

@ -5,7 +5,7 @@ __license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import json
import json, os
from calibre.constants import preferred_encoding
from calibre.utils.config import to_json, from_json
@ -57,4 +57,50 @@ class DBPrefs(dict):
def set(self, key, val):
self.__setitem__(key, val)
def get_namespaced(self, namespace, key, default=None):
key = u'namespaced:%s:%s'%(namespace, key)
try:
return dict.__getitem__(self, key)
except KeyError:
return default
def set_namespaced(self, namespace, key, val):
if u':' in key: raise KeyError('Colons are not allowed in keys')
if u':' in namespace: raise KeyError('Colons are not allowed in'
' the namespace')
key = u'namespaced:%s:%s'%(namespace, key)
self[key] = val
def write_serialized(self, library_path):
try:
to_filename = os.path.join(library_path, 'metadata_db_prefs_backup.json')
with open(to_filename, "wb") as f:
f.write(json.dumps(self, indent=2, default=to_json))
except:
import traceback
traceback.print_exc()
@classmethod
def read_serialized(cls, library_path, recreate_prefs=False):
try:
from_filename = os.path.join(library_path,
'metadata_db_prefs_backup.json')
with open(from_filename, "rb") as f:
d = json.load(f, object_hook=from_json)
if not recreate_prefs:
return d
cls.clear()
cls.db.conn.execute('DELETE FROM preferences')
for k,v in d.iteritems():
raw = cls.to_raw(v)
cls.db.conn.execute(
'INSERT INTO preferences (key,val) VALUES (?,?)', (k, raw))
cls.db.conn.commit()
cls.clear()
cls.update(d)
return d
except:
import traceback
traceback.print_exc()
raise
return None

View File

@ -12,6 +12,7 @@ from operator import itemgetter
from calibre.ptempfile import TemporaryDirectory
from calibre.ebooks.metadata.opf2 import OPF
from calibre.library.database2 import LibraryDatabase2
from calibre.library.prefs import DBPrefs
from calibre.constants import filesystem_encoding
from calibre.utils.date import utcfromtimestamp
from calibre import isbytestring
@ -101,6 +102,13 @@ class Restore(Thread):
with TemporaryDirectory('_library_restore') as tdir:
self.library_path = tdir
self.scan_library()
if not self.load_preferences():
# Something went wrong with preferences restore. Start over
# with a new database and attempt to rebuild the structure
# from the metadata in the opf
dbpath = os.path.join(self.library_path, 'metadata.db')
if os.path.exists(dbpath):
os.remove(dbpath)
self.create_cc_metadata()
self.restore_books()
if self.successes == 0 and len(self.dirs) > 0:
@ -109,6 +117,32 @@ class Restore(Thread):
except:
self.tb = traceback.format_exc()
def load_preferences(self):
self.progress_callback(None, 1)
self.progress_callback(_('Starting restoring preferences and column metadata'), 0)
prefs_path = os.path.join(self.src_library_path, 'metadata_db_prefs_backup.json')
if not os.path.exists(prefs_path):
self.progress_callback(_('Cannot restore preferences. Backup file not found.'), 1)
return False
try:
prefs = DBPrefs.read_serialized(self.src_library_path, recreate_prefs=False)
db = RestoreDatabase(self.library_path, default_prefs=prefs,
restore_all_prefs=True,
progress_callback=self.progress_callback)
db.commit()
db.conn.close()
self.progress_callback(None, 1)
if 'field_metadata' in prefs:
self.progress_callback(_('Finished restoring preferences and column metadata'), 1)
return True
self.progress_callback(_('Finished restoring preferences'), 1)
return False
except:
traceback.print_exc()
self.progress_callback(None, 1)
self.progress_callback(_('Restoring preferences and column metadata failed'), 0)
return False
def scan_library(self):
for dirpath, dirnames, filenames in os.walk(self.src_library_path):
leaf = os.path.basename(dirpath)

View File

@ -102,6 +102,7 @@ class AuthController(object):
@wraps(func)
def authenticate(*args, **kwargs):
cookie = cherrypy.request.cookie.get(self.cookie_name, None)
if not (allow_cookie_auth and self.is_valid(cookie)):
digest_auth(self.realm, get_ha1_dict_plain(self.users_dict),
self.secret)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More