[merge] trunk

This commit is contained in:
Kolenka 2011-10-15 09:10:52 -07:00
commit 84530a6764
102 changed files with 30972 additions and 26983 deletions

View File

@ -19,6 +19,94 @@
# new recipes:
# - title:
- version: 0.8.22
date: 2011-10-14
new features:
- title: "Input plugin for OCR-ed DJVU files (i.e. .djvu files that contain text. Only the text is converted)"
type: major
- title: "Driver for the SONY PRS T1"
- title: "Add a 'Back' button to the metadata download dialog while downloading covers, so that you can go back and select a different match if you dont lke the covers, instead of having to re-do the entire download."
tickets: [855055]
- title: "Add an option in Preferences->Saving to disk to not show files in file browser after saving to disk"
- title: "Get Books: Add the amazon.fr store. Remove leading 'by' from author names. Fix encoding issues with non English titles/names"
- title: "Driver for Onyx BOOX A61S/X61S"
tickets: [872741]
- title: "Kobo: Add support for uploading new covers to the device without converting the ePub. You can just resend the book to have the cover updated"
- title: "Make it a little harder to ignore the fact that there are multiple toolbars when customizing toolbars"
tickets: [864589]
bug fixes:
- title: "MOBI Input: Remove invalid tags of the form <xyz: >"
tickets: [872883]
- title: "calibredb add_format does not refresh running calibre instance"
tickets: [872961]
- title: "Conversion pipeline: Translate <font face> to CSS font-family"
tickets: [871388]
- title: "When sending email add a Date: header so that amavis does not consider the emails to be spam"
- title: "Fix for the problem where setting the restriction to an empty current search clears the restriction box but does not clear the restriction."
tickets: [871921]
- title: "Fix generation of column coloring rules for date/time columns"
- title: "Fix plugboard problem where customizations to formats accepted by a device were ignored."
- title: "Enable adding of various actions to the toolbar when device is connected (they had been erroneously marked as being non-addable)"
- title: "Fixable content in library check is not hidden after repair"
tickets: [864096]
- title: "Catalog generation: Handle a corrupted thumbnail cache."
- title: "Do not error out when user clicks stop selected job with no job selected."
tickets: [863766]
improved recipes:
- automatiseringgids
- CNET
- Geek and Poke
- Gosc Niedzielny
- Dilbert
- Economist
- Ming Pao
- Metro UK
- Heise Online
- FAZ.net
- Houston Chronicle
- Slate
- Descopera
new recipes:
- title: WoW Insider
author: Krittika Goyal
- title: Merco Press and Penguin news
author: Russell Phillips
- title: Defense News
author: Darko Miletic
- title: Revista Piaui
author: Eduardo Simoes
- title: Dark Horizons
author: Jaded
- title: Various polish news sources
author: fenuks
- version: 0.8.21
date: 2011-09-30

View File

@ -10,27 +10,15 @@ class autogids(BasicNewsRecipe):
publisher = 'AutomatiseringGids'
category = 'Nieuws, IT, Nederlandstalig'
simultaneous_downloads = 5
#delay = 1
timefmt = ' [%A, %d %B, %Y]'
#timefmt = ''
timefmt = ' [%a, %d %B, %Y]'
no_stylesheets = True
remove_javascript = True
remove_empty_feeds = True
publication_type = 'newspaper'
encoding = 'utf-8'
cover_url = 'http://www.automatiseringgids.nl/siteimg/header_logo.gif'
keep_only_tags = [dict(id=['content'])]
extra_css = '.artikelheader {font-size:0.8em; color: #666;} .artikelintro {font-weight:bold} div.imgArticle {float: right; margin: 0 0em 1em 1em; display: block; position: relative; } \
h2 { margin: 0 0 0.5em; min-height: 30px; font-size: 1.5em; letter-spacing: -0.2px; margin: 0 0 0.5em; color: black; font-weight: bold; line-height: 1.2em; padding: 4px 3px 0; }'
cover_url = 'http://www.automatiseringgids.nl/binaries/content/gallery/ag/marketing/ag-avatar-100x50.jpg'
keep_only_tags = [dict(name='div', attrs={'class':['content']})]
remove_tags = [dict(name='div', attrs={'id':['loginbox','reactiecollapsible','reactiebox']}),
dict(name='div', attrs={'class':['column_a','column_c','bannerfullsize','reactieheader','reactiecollapsible','formulier','artikel_headeroptions']}),
dict(name='ul', attrs={'class':['highlightlist']}),
dict(name='input', attrs={'type':['button']}),
dict(name='div', attrs={'style':['display:block; width:428px; height:30px; float:left;']}),
]
preprocess_regexps = [
(re.compile(r'(<h3>Reacties</h3>|<h2>Zie ook:</h2>|<div style=".*</div>|<a[^>]*>|</a>)', re.DOTALL|re.IGNORECASE),
lambda match: ''),

View File

@ -110,8 +110,10 @@ class BrandEins(BasicNewsRecipe):
selected_issue = issue_map[selected_issue_key]
url = selected_issue.get('href', False)
# Get the title for the magazin - build it out of the title of the cover - take the issue and year;
self.title = "brand eins " + selected_issue_key[4:] + "/" + selected_issue_key[0:4]
# self.title = "brand eins " + selected_issue_key[4:] + "/" + selected_issue_key[0:4]
# Get the alternative title for the magazin - build it out of the title of the cover - without the issue and year;
url = 'http://brandeins.de/'+url
self.timefmt = ' ' + selected_issue_key[4:] + '/' + selected_issue_key[:4]
# url = "http://www.brandeins.de/archiv/magazin/tierisch.html"
titles_and_articles = self.brand_eins_parse_issue(url)
@ -163,4 +165,3 @@ class BrandEins(BasicNewsRecipe):
current_articles.append({'title': title, 'url': url, 'description': description, 'date':''})
titles_and_articles.append([chapter_title, current_articles])
return titles_and_articles

View File

@ -5,8 +5,8 @@ __copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
Changelog:
2011-09-24
Changed cover (drMerry)
'''
'''
2011-10-13
Updated Cover (drMerry)
news.cnet.com
'''
@ -24,7 +24,7 @@ class CnetNews(BasicNewsRecipe):
encoding = 'cp1252'
use_embedded_content = False
language = 'en'
cover_url = 'http://reviews.cnet.com/i/ff/wp/logo_cnet.gif'
conversion_options = {
'comment' : description
, 'tags' : category

View File

@ -8,11 +8,7 @@ class DallasNews(BasicNewsRecipe):
no_stylesheets = True
use_embedded_content = False
remove_tags_before = dict(name='h1')
keep_only_tags = {'class':lambda x: x and 'article' in x}
remove_tags = [
{'class':['DMNSocialTools', 'article ', 'article first ', 'article premium']},
]
auto_cleanup = True
feeds = [
('Local News',

View File

@ -2,6 +2,7 @@ __license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
'''
http://www.dilbert.com
DrMerry added cover Image 2011-11-12
'''
from calibre.web.feeds.recipes import BasicNewsRecipe
@ -9,7 +10,7 @@ import re
class DilbertBig(BasicNewsRecipe):
title = 'Dilbert'
__author__ = 'Darko Miletic and Starson17'
__author__ = 'Darko Miletic and Starson17 contribution of DrMerry'
description = 'Dilbert'
reverse_article_order = True
oldest_article = 15
@ -20,6 +21,7 @@ class DilbertBig(BasicNewsRecipe):
publisher = 'UNITED FEATURE SYNDICATE, INC.'
category = 'comic'
language = 'en'
cover_url = 'http://dilbert.com/mobile/mobile/dilbert.app.icon.png'
conversion_options = {
'comments' : description

View File

@ -16,7 +16,7 @@ class FTDe(BasicNewsRecipe):
use_embedded_content = False
timefmt = ' [%d %b %Y]'
language = 'de'
max_articles_per_feed = 40
max_articles_per_feed = 30
no_stylesheets = True
remove_tags = [dict(id='navi_top'),
@ -84,19 +84,19 @@ class FTDe(BasicNewsRecipe):
dict(name='div', attrs={'class':'artikelsplitfaq'})]
#remove_tags_after = [dict(name='a', attrs={'class':'more'})]
feeds = [ ('Finanzen', 'http://www.ftd.de/rss2/finanzen/maerkte'),
('Meinungshungrige', 'http://www.ftd.de/rss2/meinungshungrige'),
('Unternehmen', 'http://www.ftd.de/rss2/unternehmen'),
('Politik', 'http://www.ftd.de/rss2/politik'),
('Karriere_Management', 'http://www.ftd.de/rss2/karriere-management'),
('IT_Medien', 'http://www.ftd.de/rss2/it-medien'),
('Wissen', 'http://www.ftd.de/rss2/wissen'),
('Sport', 'http://www.ftd.de/rss2/sport'),
('Auto', 'http://www.ftd.de/rss2/auto'),
('Lifestyle', 'http://www.ftd.de/rss2/lifestyle')
]
feeds = [
('Unternehmen', 'http://www.ftd.de/rss2/unternehmen'),
('Finanzen', 'http://www.ftd.de/rss2/finanzen/maerkte'),
('Meinungen', 'http://www.ftd.de/rss2/meinungshungrige'),
('Politik', 'http://www.ftd.de/rss2/politik'),
('Management & Karriere', 'http://www.ftd.de/rss2/karriere-management'),
('IT & Medien', 'http://www.ftd.de/rss2/it-medien'),
('Wissen', 'http://www.ftd.de/rss2/wissen'),
('Sport', 'http://www.ftd.de/rss2/sport'),
('Auto', 'http://www.ftd.de/rss2/auto'),
('Lifestyle', 'http://www.ftd.de/rss2/lifestyle')
]
def print_version(self, url):
return url.replace('.html', '.html?mode=print')
return url.replace('.html', '.html?mode=print')

View File

@ -1,6 +1,6 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
from calibre.utils.magick import Image
from calibre.utils.magick import Image, create_canvas
class AdvancedUserRecipe1307556816(BasicNewsRecipe):
title = u'Geek and Poke'
@ -11,7 +11,7 @@ class AdvancedUserRecipe1307556816(BasicNewsRecipe):
oldest_article = 31
max_articles_per_feed = 100
language = u'en'
simultaneous_downloads = 5
simultaneous_downloads = 1
#delay = 1
timefmt = ' [%a, %d %B, %Y]'
summary_length = -1
@ -22,6 +22,7 @@ class AdvancedUserRecipe1307556816(BasicNewsRecipe):
remove_javascript = True
remove_empty_feeds = True
publication_type = 'blog'
masthead_url = None
conversion_options = {
'comments' : ''
,'tags' : category
@ -44,28 +45,38 @@ class AdvancedUserRecipe1307556816(BasicNewsRecipe):
(r'yimg\.com'),
(r'scorecardresearch\.com')]
preprocess_regexps = [(re.compile(r'(<p>(&nbsp;|\s)*</p>|<a[^>]*>Tweet</a>|<a[^>]*>|</a>)', re.DOTALL|re.IGNORECASE),lambda match: ''),
preprocess_regexps = [(re.compile(r'(<p>(&nbsp;|\s)*</p>|<a[^>]*>Tweet</a>|<a[^>]*>|</a>|<!--.*?-->|<h2[^>]*>[^<]*</h2>[^<]*)', re.DOTALL|re.IGNORECASE),lambda match: ''),
(re.compile(r'(&nbsp;|\s\s)+\s*', re.DOTALL|re.IGNORECASE),lambda match: ' '),
(re.compile(r'<h2[^>]*>([^<]*)</h2>[^>]*(<div[^>]*>)', re.DOTALL|re.IGNORECASE), lambda match: match.group(2) + '<div id="MERRYdate">' + match.group(1) + '</div>'),
(re.compile(r'(<h3[^>]*>)<a[^>]>((?!</a)*)</a></h3>', re.DOTALL|re.IGNORECASE),lambda match: match.group(1) + match.group(2) + '</h3>'),
(re.compile(r'(<img[^>]*alt="([^"]*)"[^>]*>)', re.DOTALL|re.IGNORECASE),lambda match: match.group(1) + '<br><cite>' + match.group(2) + '</cite>'),
(re.compile(r'(<img[^>]*alt="([^"]*)"[^>]*>)', re.DOTALL|re.IGNORECASE),lambda match: '<div id="merryImage"><cite>' + match.group(2) + '</cite><br>' + match.group(1) + '</div>'),
(re.compile(r'<br( /)?>(<br( /)?>)+', re.DOTALL|re.IGNORECASE),lambda match: '<br>'),
(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')
]
extra_css = 'body, h3, p, #MERRYdate, h1, div, span{margin:0px; padding:0px} h3.entry-header{font-size: 0.8em} div.entry-body{font-size: 0.7em} #MERRYdate {font-size: 0.5em}'
extra_css = 'body, h3, p, div, span{margin:0px; padding:0px} h3.entry-header{font-size: 0.8em} div.entry-body{font-size: 0.7em}'
def postprocess_html(self, soup, first):
for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')):
iurl = tag['src']
img = Image()
img.open(iurl)
width, height = img.size
#print 'img is: ', iurl, 'width is: ', width, 'height is: ', height
#width, height = img.size
#print '***img is: ', iurl, '\n****width is: ', width, 'height is: ', height
img.trim(0)
img.save(iurl)
#width, height = img.size
#print '***TRIMMED img width is: ', width, 'height is: ', height
left=0
top=0
border_color='#ffffff'
width, height = img.size
#print 'img is: ', iurl, 'width is: ', width, 'height is: ', height
#print '***retrieved img width is: ', width, 'height is: ', height
height_correction = 1.17
canvas = create_canvas(width, height*height_correction,border_color)
canvas.compose(img, left, top)
#img = canvas
#img.save(iurl)
canvas.save(iurl)
#width, height = canvas.size
#print '***NEW img width is: ', width, 'height is: ', height
return soup
feeds = ['http://feeds.feedburner.com/GeekAndPoke?format=xml']

View File

@ -19,6 +19,7 @@ class GN(BasicNewsRecipe):
language = 'pl'
remove_javascript = True
temp_files = []
simultaneous_downloads = 1
articles_are_obfuscated = True
@ -94,16 +95,16 @@ class GN(BasicNewsRecipe):
def find_articles(self, main_block):
for a in main_block.findAll('div', attrs={'class':'prev_doc2'}):
art = a.find('a')
yield {
art = a.find('a')
yield {
'title' : self.tag_to_string(art),
'url' : 'http://www.gosc.pl' + art['href'].replace('/doc/','/doc_pr/'),
'date' : '',
'description' : ''
}
for a in main_block.findAll('div', attrs={'class':'sr-document'}):
art = a.find('a')
yield {
art = a.find('a')
yield {
'title' : self.tag_to_string(art),
'url' : 'http://www.gosc.pl' + art['href'].replace('/doc/','/doc_pr/'),
'date' : '',

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1298137661(BasicNewsRecipe):
title = u'Helsingin Sanomat'
__author__ = 'oneillpt'
language = 'fi'
language = 'fi'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
@ -11,21 +11,12 @@ class AdvancedUserRecipe1298137661(BasicNewsRecipe):
conversion_options = {
'linearize_tables' : True
}
remove_tags = [
dict(name='a', attrs={'id':'articleCommentUrl'}),
dict(name='p', attrs={'class':'newsSummary'}),
dict(name='div', attrs={'class':'headerTools'})
]
keep_only_tags = [dict(name='div', attrs={'id':'main-content'}),
dict(name='div', attrs={'class':'contentNewsArticle'})]
feeds = [(u'Uutiset - HS.fi', u'http://www.hs.fi/uutiset/rss/'), (u'Politiikka - HS.fi', u'http://www.hs.fi/politiikka/rss/'),
feeds = [(u'Uutiset - HS.fi', u'http://www.hs.fi/uutiset/rss/')
, (u'Politiikka - HS.fi', u'http://www.hs.fi/politiikka/rss/'),
(u'Ulkomaat - HS.fi', u'http://www.hs.fi/ulkomaat/rss/'), (u'Kulttuuri - HS.fi', u'http://www.hs.fi/kulttuuri/rss/'),
(u'Kirjat - HS.fi', u'http://www.hs.fi/kulttuuri/kirjat/rss/'), (u'Elokuvat - HS.fi', u'http://www.hs.fi/kulttuuri/elokuvat/rss/')
]
def print_version(self, url):
j = url.rfind("/")
s = url[j:]
i = s.rfind("?ref=rss")
if i > 0:
s = s[:i]
return "http://www.hs.fi/tulosta" + s

Binary file not shown.

After

Width:  |  Height:  |  Size: 868 B

View File

@ -1,51 +1,55 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__author__ = 'Lorenzo Vigentini, based on Darko Miletic, Gabriele Marini'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>, Lorenzo Vigentini <l.vigentini at gmail.com>'
__copyright__ = '2009-2011, Darko Miletic <darko.miletic at gmail.com>, Lorenzo Vigentini <l.vigentini at gmail.com>'
description = 'Italian daily newspaper - v1.01 (04, January 2010); 16.05.2010 new version'
'''
http://www.repubblica.it/
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class LaRepubblica(BasicNewsRecipe):
__author__ = 'Lorenzo Vigentini, Gabriele Marini'
description = 'Italian daily newspaper'
cover_url = 'http://www.repubblica.it/images/homepage/la_repubblica_logo.gif'
title = u'La Repubblica'
publisher = 'Gruppo editoriale L\'Espresso'
category = 'News, politics, culture, economy, general interest'
language = 'it'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 5
max_articles_per_feed = 100
use_embedded_content = False
recursion = 10
remove_javascript = True
no_stylesheets = True
title = 'La Repubblica'
__author__ = 'Lorenzo Vigentini, Gabriele Marini, Darko Miletic'
description = 'il quotidiano online con tutte le notizie in tempo reale. News e ultime notizie. Tutti i settori: politica, cronaca, economia, sport, esteri, scienza, tecnologia, internet, spettacoli, musica, cultura, arte, mostre, libri, dvd, vhs, concerti, cinema, attori, attrici, recensioni, chat, cucina, mappe. Le citta di Repubblica: Roma, Milano, Bologna, Firenze, Palermo, Napoli, Bari, Torino.'
masthead_url = 'http://www.repubblica.it/static/images/homepage/2010/la-repubblica-logo-home-payoff.png'
publisher = 'Gruppo editoriale L\'Espresso'
category = 'News, politics, culture, economy, general interest'
language = 'it'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 5
encoding = 'utf8'
use_embedded_content = False
#recursion = 10
no_stylesheets = True
extra_css = """
img{display: block}
"""
preprocess_regexps = [
(re.compile(r'.*?<head>', re.DOTALL|re.IGNORECASE), lambda match: '<head>'),
(re.compile(r'<head>.*?<title>', re.DOTALL|re.IGNORECASE), lambda match: '<head><title>'),
(re.compile(r'</title>.*?</head>', re.DOTALL|re.IGNORECASE), lambda match: '</title></head>')
]
def get_article_url(self, article):
link = article.get('id', article.get('guid', None))
if link is None:
return article
return link
keep_only_tags = [dict(name='div', attrs={'class':'articolo'}),
dict(name='div', attrs={'class':'body-text'}),
# dict(name='div', attrs={'class':'page-content'}),
keep_only_tags = [
dict(attrs={'class':'articolo'}),
dict(attrs={'class':'body-text'}),
dict(name='p', attrs={'class':'disclaimer clearfix'}),
dict(name='div', attrs={'id':'contA'})
dict(attrs={'id':'contA'})
]
remove_tags = [
dict(name=['object','link']),
dict(name=['object','link','meta']),
dict(name='span',attrs={'class':'linkindice'}),
dict(name='div', attrs={'class':'bottom-mobile'}),
dict(name='div', attrs={'id':['rssdiv','blocco']}),

View File

@ -10,7 +10,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Sueddeutsche(BasicNewsRecipe):
title = u'Süddeutsche'
title = u'sueddeutsche.de'
description = 'News from Germany'
__author__ = 'Oliver Niesner and Armin Geller'
use_embedded_content = False
@ -62,7 +62,7 @@ class Sueddeutsche(BasicNewsRecipe):
(u'Sport', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5ESport%24?output=rss'),
(u'Leben', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5ELeben%24?output=rss'),
(u'Karriere', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EKarriere%24?output=rss'),
(u'München&Region', u'http://www.sueddeutsche.de/app/service/rss/ressort/muenchen/rss.xml'),
(u'M&uuml;nchen & Region', u'http://www.sueddeutsche.de/app/service/rss/ressort/muenchen/rss.xml'),
(u'Bayern', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EBayern%24?output=rss'),
(u'Medien', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EMedien%24?output=rss'),
(u'Digital', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EDigital%24?output=rss'),
@ -75,7 +75,7 @@ class Sueddeutsche(BasicNewsRecipe):
(u'Job', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EJob%24?output=rss'), # sometimes only
(u'Service', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EService%24?output=rss'), # sometimes only
(u'Verlag', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EVerlag%24?output=rss'), # sometimes only
]
]
def print_version(self, url):
main, sep, id = url.rpartition('/')

View File

@ -3,7 +3,7 @@
from calibre.web.feeds.news import BasicNewsRecipe
class TelepolisNews(BasicNewsRecipe):
title = u'Telepolis (News+Artikel)'
title = u'Telepolis'
__author__ = 'syntaxis'
publisher = 'Heise Zeitschriften Verlag GmbH & Co KG'
description = 'News from Telepolis'
@ -15,11 +15,8 @@ class TelepolisNews(BasicNewsRecipe):
encoding = "utf-8"
language = 'de'
remove_empty_feeds = True
keep_only_tags = [dict(name = 'div',attrs={'class':'head'}),dict(name = 'div',attrs={'class':'leftbox'}),dict(name='td',attrs={'class':'strict'})]
remove_tags = [ dict(name='td',attrs={'class':'blogbottom'}),
dict(name='div',attrs={'class':'forum'}), dict(name='div',attrs={'class':'social'}),dict(name='div',attrs={'class':'blog-letter p-news'}),
@ -28,7 +25,6 @@ class TelepolisNews(BasicNewsRecipe):
remove_tags_after = [dict(name='span', attrs={'class':['breadcrumb']})]
feeds = [(u'News', u'http://www.heise.de/tp/news-atom.xml')]
html2lrf_options = [
@ -39,8 +35,7 @@ class TelepolisNews(BasicNewsRecipe):
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Type" content="text/html; charset=' + self.encoding + '">'
soup.head.insert(0,mtag)
return soup
return soup

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 21)
numeric_version = (0, 8, 22)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -62,7 +62,8 @@ class ANDROID(USBMS):
0x4e11 : [0x0100, 0x226, 0x227],
0x4e12 : [0x0100, 0x226, 0x227],
0x4e21 : [0x0100, 0x226, 0x227],
0xb058 : [0x0222, 0x226, 0x227]
0xb058 : [0x0222, 0x226, 0x227],
0x0ff9 : [0x0226],
},
# Samsung

View File

@ -116,6 +116,7 @@ class BOOX(HANLINV3):
supported_platforms = ['windows', 'osx', 'linux']
METADATA_CACHE = '.metadata.calibre'
DRIVEINFO = '.driveinfo.calibre'
icon = I('devices/boox.jpg')
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'djvu', 'pdf', 'html', 'txt', 'rtf', 'mobi',
@ -123,7 +124,7 @@ class BOOX(HANLINV3):
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x322]
BCD = [0x322, 0x323]
MAIN_MEMORY_VOLUME_LABEL = 'BOOX Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'BOOX Storage Card'

View File

@ -464,6 +464,13 @@ class DevicePlugin(Plugin):
'''
pass
def prepare_addable_books(self, paths):
'''
Given a list of paths, returns another list of paths. These paths
point to addable versions of the books.
'''
return paths
class BookList(list):
'''
A list of books. Each Book object must have the fields
@ -518,9 +525,3 @@ class BookList(list):
'''
raise NotImplementedError()
def prepare_addable_books(self, paths):
'''
Given a list of paths, returns another list of paths. These paths
point to addable versions of the books.
'''
return paths

View File

@ -84,7 +84,7 @@ class PDNOVEL(USBMS):
FORMATS = ['epub', 'pdf']
VENDOR_ID = [0x18d1]
PRODUCT_ID = [0xb004]
PRODUCT_ID = [0xb004, 0xa004]
BCD = [0x224]
VENDOR_NAME = 'ANDROID'

View File

@ -11,7 +11,7 @@ __docformat__ = 'restructuredtext en'
Device driver for the SONY T1 devices
'''
import os, time, calendar, re
import os, time, re
import sqlite3 as sqlite
from contextlib import closing
@ -19,6 +19,7 @@ from calibre.devices.usbms.driver import USBMS, debug_print
from calibre.devices.usbms.device import USBDevice
from calibre.devices.usbms.books import CollectionsBookList
from calibre.devices.usbms.books import BookList
from calibre.ebooks.metadata import authors_to_sort_string
from calibre.constants import islinux
from calibre.ebooks.metadata import authors_to_string, authors_to_sort_string
@ -488,7 +489,7 @@ class PRST1(USBMS):
cursor.close()
if metadata.bookId is not None:
if getattr(metadata, 'bookId', None) is not None:
debug_print('PRS-T1: refreshing cover for book being sent')
self.upload_book_cover(connection, metadata, source_id)

View File

@ -325,6 +325,10 @@ class MobiReader(object):
self.processed_html = self.processed_html.replace('</</', '</')
self.processed_html = re.sub(r'</([a-zA-Z]+)<', r'</\1><',
self.processed_html)
# Remove tags of the form <xyz: ...> as they can cause issues further
# along the pipeline
self.processed_html = re.sub(r'</{0,1}[a-zA-Z]+:\s+[^>]*>', '',
self.processed_html)
for pat in ENCODING_PATS:
self.processed_html = pat.sub('', self.processed_html)

View File

@ -204,6 +204,7 @@ def render_data(mi, use_roman_numbers=True, all_fields=False):
class CoverView(QWidget): # {{{
cover_changed = pyqtSignal(object, object)
cover_removed = pyqtSignal(object)
def __init__(self, vertical, parent=None):
QWidget.__init__(self, parent)
@ -289,10 +290,12 @@ class CoverView(QWidget): # {{{
cm = QMenu(self)
paste = cm.addAction(_('Paste Cover'))
copy = cm.addAction(_('Copy Cover'))
remove = cm.addAction(_('Remove Cover'))
if not QApplication.instance().clipboard().mimeData().hasImage():
paste.setEnabled(False)
copy.triggered.connect(self.copy_to_clipboard)
paste.triggered.connect(self.paste_from_clipboard)
remove.triggered.connect(self.remove_cover)
cm.exec_(ev.globalPos())
def copy_to_clipboard(self):
@ -315,6 +318,13 @@ class CoverView(QWidget): # {{{
self.cover_changed.emit(id_,
pixmap_to_data(pmap))
def remove_cover(self):
id_ = self.data.get('id', None)
self.pixmap = self.default_pixmap
self.do_layout()
self.update()
if id_ is not None:
self.cover_removed.emit(id_)
# }}}
@ -457,6 +467,7 @@ class BookDetails(QWidget): # {{{
remote_file_dropped = pyqtSignal(object, object)
files_dropped = pyqtSignal(object, object)
cover_changed = pyqtSignal(object, object)
cover_removed = pyqtSignal(object)
# Drag 'n drop {{{
DROPABBLE_EXTENSIONS = IMAGE_EXTENSIONS+BOOK_EXTENSIONS
@ -514,6 +525,7 @@ class BookDetails(QWidget): # {{{
self.cover_view = CoverView(vertical, self)
self.cover_view.cover_changed.connect(self.cover_changed.emit)
self.cover_view.cover_removed.connect(self.cover_removed.emit)
self._layout.addWidget(self.cover_view)
self.book_info = BookInfo(vertical, self)
self._layout.addWidget(self.book_info)

View File

@ -261,6 +261,8 @@ class LayoutMixin(object): # {{{
self.book_details.files_dropped.connect(self.iactions['Add Books'].files_dropped_on_book)
self.book_details.cover_changed.connect(self.bd_cover_changed,
type=Qt.QueuedConnection)
self.book_details.cover_removed.connect(self.bd_cover_removed,
type=Qt.QueuedConnection)
self.book_details.remote_file_dropped.connect(
self.iactions['Add Books'].remote_file_dropped_on_book,
type=Qt.QueuedConnection)
@ -279,6 +281,12 @@ class LayoutMixin(object): # {{{
if self.cover_flow:
self.cover_flow.dataChanged()
def bd_cover_removed(self, id_):
self.library_view.model().db.remove_cover(id_, commit=True,
notify=False)
if self.cover_flow:
self.cover_flow.dataChanged()
def save_layout_state(self):
for x in ('library', 'memory', 'card_a', 'card_b'):
getattr(self, x+'_view').save_state()

View File

@ -500,7 +500,8 @@ class JobsDialog(QDialog, Ui_JobsDialog):
def kill_job(self, *args):
rows = [index.row() for index in
self.jobs_view.selectionModel().selectedRows()]
return error_dialog(self, _('No job'),
if not rows:
return error_dialog(self, _('No job'),
_('No job selected'), show=True)
if question_dialog(self, _('Are you sure?'),
ngettext('Do you really want to stop the selected job?',

View File

@ -127,7 +127,7 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
self.composite_sort_by.setCurrentIndex(sb)
self.composite_make_category.setChecked(
c['display'].get('make_category', False))
self.composite_make_category.setChecked(
self.composite_contains_html.setChecked(
c['display'].get('contains_html', False))
elif ct == 'enumeration':
self.enum_box.setText(','.join(c['display'].get('enum_values', [])))

View File

@ -368,6 +368,7 @@ def command_remove(args, dbpath):
def do_add_format(db, id, fmt, path):
db.add_format_with_hooks(id, fmt.upper(), path, index_is_id=True)
send_message()
def add_format_option_parser():
return get_parser(_(
@ -396,6 +397,7 @@ def command_add_format(args, dbpath):
def do_remove_format(db, id, fmt):
db.remove_format(id, fmt, index_is_id=True)
send_message()
def remove_format_option_parser():
return get_parser(_(

View File

@ -20,7 +20,7 @@ What formats does |app| support conversion to/from?
|app| supports the conversion of many input formats to many output formats.
It can convert every input format in the following list, to every output format.
*Input Formats:* CBZ, CBR, CBC, CHM, EPUB, FB2, HTML, HTMLZ, LIT, LRF, MOBI, ODT, PDF, PRC, PDB, PML, RB, RTF, SNB, TCR, TXT, TXTZ
*Input Formats:* CBZ, CBR, CBC, CHM, DJVU, EPUB, FB2, HTML, HTMLZ, LIT, LRF, MOBI, ODT, PDF, PRC, PDB, PML, RB, RTF, SNB, TCR, TXT, TXTZ
*Output Formats:* EPUB, FB2, OEB, LIT, LRF, MOBI, HTMLZ, PDB, PML, RB, PDF, RTF, SNB, TCR, TXT, TXTZ
@ -28,6 +28,7 @@ It can convert every input format in the following list, to every output format.
PRC is a generic format, |app| supports PRC files with TextRead and MOBIBook headers.
PDB is also a generic format. |app| supports eReder, Plucker, PML and zTxt PDB files.
DJVU support is only for converting DJVU files that contain embedded text. These are typically generated by OCR software.
.. _best-source-formats:

View File

@ -268,6 +268,7 @@ The following functions are available in addition to those described in single-f
* ``list_difference(list1, list2, separator)`` -- return a list made by removing from `list1` any item found in `list2`, using a case-insensitive compare. The items in `list1` and `list2` are separated by separator, as are the items in the returned list.
* ``list_equals(list1, sep1, list2, sep2, yes_val, no_val)`` -- return `yes_val` if `list1` and `list2` contain the same items, otherwise return `no_val`. The items are determined by splitting each list using the appropriate separator character (`sep1` or `sep2`). The order of items in the lists is not relevant. The compare is case insensitive.
* ``list_intersection(list1, list2, separator)`` -- return a list made by removing from `list1` any item not found in `list2`, using a case-insensitive compare. The items in `list1` and `list2` are separated by separator, as are the items in the returned list.
* ``list_re(src_list, separator, search_re, opt_replace)`` -- Construct a list by first separating `src_list` into items using the `separator` character. For each item in the list, check if it matches `search_re`. If it does, then add it to the list to be returned. If `opt_replace` is not the empty string, then apply the replacement before adding the item to the returned list.
* ``list_sort(list, direction, separator)`` -- return list sorted using a case-insensitive sort. If `direction` is zero, the list is sorted ascending, otherwise descending. The list items are separated by separator, as are the items in the returned list.
* ``list_union(list1, list2, separator)`` -- return a list made by merging the items in list1 and list2, removing duplicate items using a case-insensitive compare. If items differ in case, the one in list1 is used. The items in list1 and list2 are separated by separator, as are the items in the returned list.
* ``multiply(x, y)`` -- returns x * y. Throws an exception if either x or y are not numbers.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More