Merge from trunk

This commit is contained in:
Charles Haley 2013-05-18 14:01:09 +02:00
commit f1b3aca17b
109 changed files with 32105 additions and 26400 deletions

View File

@ -20,6 +20,42 @@
# new recipes: # new recipes:
# - title: # - title:
- version: 0.9.31
date: 2013-05-17
new features:
- title: "Book list: Highlight the current cell in the book list, particularly convenient for usage with the keyboard."
- title: "Allow creation of advanced rules for column icons."
- title: "Driver for the limited edition SONY PRS-T2N"
- title: "MOBI Input: Add support for MOBI/KF8 files generated with the to be released kindlegen 2.9."
tickets: [1179144]
bug fixes:
- title: "ToC Editor: Fix incorrect playOrders in the generated toc.ncx when editing the toc in an epub file. This apparently affects FBReader."
- title: "PDF Input: Fix crashes on some malformed files, by updating the PDF library calibre uses (poppler 0.22.4)"
- title: "PDF Output: Ignore invalid links instead of erroring out on them."
tickets: [1179314]
- title: "MOBI Output: Fix space errorneously being removed when the input document contains a tag with leading space and sub-tags."
tickets: [1179216]
- title: "Search and replace wizard: Fix generated html being slightly different from the actual html in the conversion pipeline for some input formats (mainly HTML, CHM, LIT)."
improved recipes:
- Weblogs SL
- .net magazine
new recipes:
- title: nrc-next
author: Niels Giesen
- version: 0.9.30 - version: 0.9.30
date: 2013-05-10 date: 2013-05-10

View File

@ -9,21 +9,24 @@ class AdvancedUserRecipe1274742400(BasicNewsRecipe):
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
keep_only_tags = [dict(id='content-main')] #keep_only_tags = [dict(id='content-main')]
remove_tags = [dict(id=['right-col-content', 'trending-topics']), #remove_tags = [dict(id=['right-col-content', 'trending-topics']),
{'class':['ppy-outer']} #{'class':['ppy-outer']}
] #]
no_stylesheets = True no_stylesheets = True
use_embedded_content = False
auto_cleanup = True
feeds = [ feeds = [
(u'News', u'http://www.lvrj.com/news.rss'), (u'News', u'http://www.lvrj.com/news.rss'),
(u'Business', u'http://www.lvrj.com/business.rss'), (u'Business', u'http://www.lvrj.com/business.rss'),
(u'Living', u'http://www.lvrj.com/living.rss'), (u'Living', u'http://www.lvrj.com/living.rss'),
(u'Opinion', u'http://www.lvrj.com/opinion.rss'), (u'Opinion', u'http://www.lvrj.com/opinion.rss'),
(u'Neon', u'http://www.lvrj.com/neon.rss'), (u'Neon', u'http://www.lvrj.com/neon.rss'),
(u'Image', u'http://www.lvrj.com/image.rss'), #(u'Image', u'http://www.lvrj.com/image.rss'),
(u'Home & Garden', u'http://www.lvrj.com/home_and_garden.rss'), #(u'Home & Garden', u'http://www.lvrj.com/home_and_garden.rss'),
(u'Furniture & Design', u'http://www.lvrj.com/furniture_and_design.rss'), #(u'Furniture & Design', u'http://www.lvrj.com/furniture_and_design.rss'),
(u'Drive', u'http://www.lvrj.com/drive.rss'), #(u'Drive', u'http://www.lvrj.com/drive.rss'),
(u'Real Estate', u'http://www.lvrj.com/real_estate.rss'), #(u'Real Estate', u'http://www.lvrj.com/real_estate.rss'),
(u'Sports', u'http://www.lvrj.com/sports.rss')] (u'Sports', u'http://www.lvrj.com/sports.rss')]

View File

@ -4,7 +4,7 @@ class AdvancedUserRecipe1306061239(BasicNewsRecipe):
title = u'New Musical Express Magazine' title = u'New Musical Express Magazine'
description = 'Author D.Asbury. UK Rock & Pop Mag. ' description = 'Author D.Asbury. UK Rock & Pop Mag. '
__author__ = 'Dave Asbury' __author__ = 'Dave Asbury'
# last updated 7/10/12 # last updated 17/5/13 News feed url altered
remove_empty_feeds = True remove_empty_feeds = True
remove_javascript = True remove_javascript = True
no_stylesheets = True no_stylesheets = True
@ -13,62 +13,57 @@ class AdvancedUserRecipe1306061239(BasicNewsRecipe):
#auto_cleanup = True #auto_cleanup = True
language = 'en_GB' language = 'en_GB'
compress_news_images = True compress_news_images = True
def get_cover_url(self): def get_cover_url(self):
soup = self.index_to_soup('http://www.nme.com/component/subscribe') soup = self.index_to_soup('http://www.nme.com/component/subscribe')
cov = soup.find(attrs={'id' : 'magazine_cover'}) cov = soup.find(attrs={'id' : 'magazine_cover'})
cov2 = str(cov['src']) cov2 = str(cov['src'])
# print '**** Cov url =*', cover_url,'***' # print '**** Cov url =*', cover_url,'***'
#print '**** Cov url =*','http://www.magazinesdirect.com/article_images/articledir_3138/1569221/1_largelisting.jpg','***' #print '**** Cov url =*','http://www.magazinesdirect.com/article_images/articledir_3138/1569221/1_largelisting.jpg','***'
br = browser() br = browser()
br.set_handle_redirect(False) br.set_handle_redirect(False)
try: try:
br.open_novisit(cov2) br.open_novisit(cov2)
cover_url = str(cov2) cover_url = str(cov2)
except: except:
cover_url = 'http://tawanda3000.files.wordpress.com/2011/02/nme-logo.jpg' cover_url = 'http://tawanda3000.files.wordpress.com/2011/02/nme-logo.jpg'
return cover_url return cover_url
masthead_url = 'http://tawanda3000.files.wordpress.com/2011/02/nme-logo.jpg' masthead_url = 'http://tawanda3000.files.wordpress.com/2011/02/nme-logo.jpg'
remove_tags = [ remove_tags = [
dict( attrs={'class':'clear_icons'}), dict(attrs={'class':'clear_icons'}),
dict( attrs={'class':'share_links'}), dict(attrs={'class':'share_links'}),
dict( attrs={'id':'right_panel'}), dict(attrs={'id':'right_panel'}),
dict( attrs={'class':'today box'}), dict(attrs={'class':'today box'}),
] ]
keep_only_tags = [ keep_only_tags = [
dict(name='h1'), dict(name='h1'),
#dict(name='h3'), #dict(name='h3'),
dict(attrs={'class' : 'BText'}), dict(attrs={'class' : 'BText'}),
dict(attrs={'class' : 'Bmore'}), dict(attrs={'class' : 'Bmore'}),
dict(attrs={'class' : 'bPosts'}), dict(attrs={'class' : 'bPosts'}),
dict(attrs={'class' : 'text'}), dict(attrs={'class' : 'text'}),
dict(attrs={'id' : 'article_gallery'}), dict(attrs={'id' : 'article_gallery'}),
#dict(attrs={'class' : 'image'}), #dict(attrs={'class' : 'image'}),
dict(attrs={'class' : 'article_text'}) dict(attrs={'class' : 'article_text'})
]
feeds = [
(u'NME News', u'http://feeds.feedburner.com/nmecom/rss/newsxml?format=xml'),
#(u'Reviews', u'http://feeds2.feedburner.com/nme/SdML'),
(u'Reviews',u'http://feed43.com/1817687144061333.xml'),
(u'Bloggs',u'http://feed43.com/3326754333186048.xml'),
] ]
feeds = [
(u'NME News', u'http://www.nme.com/news?alt=rss' ), #http://feeds.feedburner.com/nmecom/rss/newsxml?format=xml'),
#(u'Reviews', u'http://feeds2.feedburner.com/nme/SdML'),
(u'Reviews',u'http://feed43.com/1817687144061333.xml'),
(u'Bloggs',u'http://feed43.com/3326754333186048.xml'),
]
extra_css = ''' extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;} h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;} h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
p{font-family:Arial,Helvetica,sans-serif;font-size:small;} p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;} body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
''' '''

View File

@ -3,7 +3,7 @@ __license__ = 'GPL v3'
__copyright__ = '4 February 2011, desUBIKado' __copyright__ = '4 February 2011, desUBIKado'
__author__ = 'desUBIKado' __author__ = 'desUBIKado'
__version__ = 'v0.09' __version__ = 'v0.09'
__date__ = '02, December 2012' __date__ = '14, May 2013'
''' '''
http://www.weblogssl.com/ http://www.weblogssl.com/
''' '''
@ -56,15 +56,16 @@ class weblogssl(BasicNewsRecipe):
,(u'Zona FandoM', u'http://feeds.weblogssl.com/zonafandom') ,(u'Zona FandoM', u'http://feeds.weblogssl.com/zonafandom')
,(u'Fandemia', u'http://feeds.weblogssl.com/fandemia') ,(u'Fandemia', u'http://feeds.weblogssl.com/fandemia')
,(u'Tendencias', u'http://feeds.weblogssl.com/trendencias') ,(u'Tendencias', u'http://feeds.weblogssl.com/trendencias')
,(u'Beb\xe9s y m\xe1s', u'http://feeds.weblogssl.com/bebesymas') ,(u'Tendencias Belleza', u'http://feeds.weblogssl.com/trendenciasbelleza')
,(u'Tendencias Hombre', u'http://feeds.weblogssl.com/trendenciashombre')
,(u'Tendencias Shopping', u'http://feeds.weblogssl.com/trendenciasshopping')
,(u'Directo al paladar', u'http://feeds.weblogssl.com/directoalpaladar') ,(u'Directo al paladar', u'http://feeds.weblogssl.com/directoalpaladar')
,(u'Compradicci\xf3n', u'http://feeds.weblogssl.com/compradiccion') ,(u'Compradicci\xf3n', u'http://feeds.weblogssl.com/compradiccion')
,(u'Decoesfera', u'http://feeds.weblogssl.com/decoesfera') ,(u'Decoesfera', u'http://feeds.weblogssl.com/decoesfera')
,(u'Embelezzia', u'http://feeds.weblogssl.com/embelezzia') ,(u'Embelezzia', u'http://feeds.weblogssl.com/embelezzia')
,(u'Vit\xf3nica', u'http://feeds.weblogssl.com/vitonica') ,(u'Vit\xf3nica', u'http://feeds.weblogssl.com/vitonica')
,(u'Ambiente G', u'http://feeds.weblogssl.com/ambienteg') ,(u'Ambiente G', u'http://feeds.weblogssl.com/ambienteg')
,(u'Tendencias Belleza', u'http://feeds.weblogssl.com/trendenciasbelleza') ,(u'Beb\xe9s y m\xe1s', u'http://feeds.weblogssl.com/bebesymas')
,(u'Tendencias Hombre', u'http://feeds.weblogssl.com/trendenciashombre')
,(u'Peques y m\xe1s', u'http://feeds.weblogssl.com/pequesymas') ,(u'Peques y m\xe1s', u'http://feeds.weblogssl.com/pequesymas')
,(u'Motorpasi\xf3n', u'http://feeds.weblogssl.com/motorpasion') ,(u'Motorpasi\xf3n', u'http://feeds.weblogssl.com/motorpasion')
,(u'Motorpasi\xf3n F1', u'http://feeds.weblogssl.com/motorpasionf1') ,(u'Motorpasi\xf3n F1', u'http://feeds.weblogssl.com/motorpasionf1')
@ -90,7 +91,7 @@ class weblogssl(BasicNewsRecipe):
dict(name='section' , attrs={'class':'comments'}), #m.xataka.com dict(name='section' , attrs={'class':'comments'}), #m.xataka.com
dict(name='div' , attrs={'class':'article-comments'}), #m.xataka.com dict(name='div' , attrs={'class':'article-comments'}), #m.xataka.com
dict(name='nav' , attrs={'class':'article-taxonomy'}) #m.xataka.com dict(name='nav' , attrs={'class':'article-taxonomy'}) #m.xataka.com
] ]
remove_tags_after = dict(name='section' , attrs={'class':'comments'}) remove_tags_after = dict(name='section' , attrs={'class':'comments'})
@ -119,23 +120,6 @@ class weblogssl(BasicNewsRecipe):
return soup return soup
# Para obtener la url original del articulo a partir de la de "feedsportal"
# El siguiente código es gracias al usuario "bosplans" de www.mobileread.com
# http://www.mobileread.com/forums/showthread.php?t=130297
def get_article_url(self, article): def get_article_url(self, article):
link = article.get('link', None)
if link is None:
return article
# if link.split('/')[-4]=="xataka2":
# return article.get('feedburner_origlink', article.get('link', article.get('guid')))
if link.split('/')[-4]=="xataka2":
return article.get('guid', None) return article.get('guid', None)
if link.split('/')[-1]=="story01.htm":
link=link.split('/')[-2]
a=['0B','0C','0D','0E','0F','0G','0N' ,'0L0S','0A']
b=['.' ,'/' ,'?' ,'-' ,'=' ,'&' ,'.com','www.','0']
for i in range(0,len(a)):
link=link.replace(a[i],b[i])
link="http://"+link
return link

View File

@ -9,8 +9,9 @@ import copy
# http://online.wsj.com/page/us_in_todays_paper.html # http://online.wsj.com/page/us_in_todays_paper.html
def filter_classes(x): def filter_classes(x):
if not x: return False if not x:
bad_classes = {'sTools', 'printSummary', 'mostPopular', 'relatedCollection'} return False
bad_classes = {'articleInsetPoll', 'trendingNow', 'sTools', 'printSummary', 'mostPopular', 'relatedCollection'}
classes = frozenset(x.split()) classes = frozenset(x.split())
return len(bad_classes.intersection(classes)) > 0 return len(bad_classes.intersection(classes)) > 0
@ -42,14 +43,15 @@ class WallStreetJournal(BasicNewsRecipe):
remove_tags_before = dict(name='h1') remove_tags_before = dict(name='h1')
remove_tags = [ remove_tags = [
dict(id=["articleTabs_tab_article", dict(id=["articleTabs_tab_article",
"articleTabs_tab_comments", "articleTabs_tab_comments", 'msnLinkback', 'yahooLinkback',
'articleTabs_panel_comments', 'footer', 'articleTabs_panel_comments', 'footer', 'emailThisScrim', 'emailConfScrim', 'emailErrorScrim',
"articleTabs_tab_interactive", "articleTabs_tab_video", "articleTabs_tab_interactive", "articleTabs_tab_video",
"articleTabs_tab_map", "articleTabs_tab_slideshow", "articleTabs_tab_map", "articleTabs_tab_slideshow",
"articleTabs_tab_quotes", "articleTabs_tab_document", "articleTabs_tab_quotes", "articleTabs_tab_document",
"printModeAd", "aFbLikeAuth", "videoModule", "printModeAd", "aFbLikeAuth", "videoModule",
"mostRecommendations", "topDiscussions"]), "mostRecommendations", "topDiscussions"]),
{'class':['footer_columns','network','insetCol3wide','interactive','video','slideshow','map','insettip','insetClose','more_in', "insetContent", 'articleTools_bottom', 'aTools', "tooltip", "adSummary", "nav-inline"]}, {'class':['footer_columns','hidden', 'network','insetCol3wide','interactive','video','slideshow','map','insettip',
'insetClose','more_in', "insetContent", 'articleTools_bottom', 'aTools', "tooltip", "adSummary", "nav-inline"]},
dict(rel='shortcut icon'), dict(rel='shortcut icon'),
{'class':filter_classes}, {'class':filter_classes},
] ]
@ -74,7 +76,10 @@ class WallStreetJournal(BasicNewsRecipe):
for tag in soup.findAll(name=['table', 'tr', 'td']): for tag in soup.findAll(name=['table', 'tr', 'td']):
tag.name = 'div' tag.name = 'div'
for tag in soup.findAll('div', dict(id=["articleThumbnail_1", "articleThumbnail_2", "articleThumbnail_3", "articleThumbnail_4", "articleThumbnail_5", "articleThumbnail_6", "articleThumbnail_7"])): for tag in soup.findAll('div', dict(id=[
"articleThumbnail_1", "articleThumbnail_2", "articleThumbnail_3",
"articleThumbnail_4", "articleThumbnail_5", "articleThumbnail_6",
"articleThumbnail_7"])):
tag.extract() tag.extract()
return soup return soup
@ -92,7 +97,7 @@ class WallStreetJournal(BasicNewsRecipe):
except: except:
articles = [] articles = []
if articles: if articles:
feeds.append((title, articles)) feeds.append((title, articles))
return feeds return feeds
def abs_wsj_url(self, href): def abs_wsj_url(self, href):
@ -119,16 +124,16 @@ class WallStreetJournal(BasicNewsRecipe):
for a in div.findAll('a', href=lambda x: x and '/itp/' in x): for a in div.findAll('a', href=lambda x: x and '/itp/' in x):
pageone = a['href'].endswith('pageone') pageone = a['href'].endswith('pageone')
if pageone: if pageone:
title = 'Front Section' title = 'Front Section'
url = self.abs_wsj_url(a['href']) url = self.abs_wsj_url(a['href'])
feeds = self.wsj_add_feed(feeds,title,url) feeds = self.wsj_add_feed(feeds,title,url)
title = "What's News" title = "What's News"
url = url.replace('pageone','whatsnews') url = url.replace('pageone','whatsnews')
feeds = self.wsj_add_feed(feeds,title,url) feeds = self.wsj_add_feed(feeds,title,url)
else: else:
title = self.tag_to_string(a) title = self.tag_to_string(a)
url = self.abs_wsj_url(a['href']) url = self.abs_wsj_url(a['href'])
feeds = self.wsj_add_feed(feeds,title,url) feeds = self.wsj_add_feed(feeds,title,url)
return feeds return feeds
def wsj_find_wn_articles(self, url): def wsj_find_wn_articles(self, url):
@ -137,22 +142,22 @@ class WallStreetJournal(BasicNewsRecipe):
whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x}) whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
if whats_news is not None: if whats_news is not None:
for a in whats_news.findAll('a', href=lambda x: x and '/article/' in x): for a in whats_news.findAll('a', href=lambda x: x and '/article/' in x):
container = a.findParent(['p']) container = a.findParent(['p'])
meta = a.find(attrs={'class':'meta_sectionName'}) meta = a.find(attrs={'class':'meta_sectionName'})
if meta is not None: if meta is not None:
meta.extract() meta.extract()
title = self.tag_to_string(a).strip() title = self.tag_to_string(a).strip()
url = a['href'] url = a['href']
desc = '' desc = ''
if container is not None: if container is not None:
desc = self.tag_to_string(container) desc = self.tag_to_string(container)
articles.append({'title':title, 'url':url, articles.append({'title':title, 'url':url,
'description':desc, 'date':''}) 'description':desc, 'date':''})
self.log('\tFound WN article:', title) self.log('\tFound WN article:', title)
self.log('\t\t', desc) self.log('\t\t', desc)
return articles return articles
@ -161,18 +166,18 @@ class WallStreetJournal(BasicNewsRecipe):
whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x}) whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
if whats_news is not None: if whats_news is not None:
whats_news.extract() whats_news.extract()
articles = [] articles = []
flavorarea = soup.find('div', attrs={'class':lambda x: x and 'ahed' in x}) flavorarea = soup.find('div', attrs={'class':lambda x: x and 'ahed' in x})
if flavorarea is not None: if flavorarea is not None:
flavorstory = flavorarea.find('a', href=lambda x: x and x.startswith('/article')) flavorstory = flavorarea.find('a', href=lambda x: x and x.startswith('/article'))
if flavorstory is not None: if flavorstory is not None:
flavorstory['class'] = 'mjLinkItem' flavorstory['class'] = 'mjLinkItem'
metapage = soup.find('span', attrs={'class':lambda x: x and 'meta_sectionName' in x}) metapage = soup.find('span', attrs={'class':lambda x: x and 'meta_sectionName' in x})
if metapage is not None: if metapage is not None:
flavorstory.append( copy.copy(metapage) ) #metapage should always be A1 because that should be first on the page flavorstory.append(copy.copy(metapage)) # metapage should always be A1 because that should be first on the page
for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True): for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True):
container = a.findParent(['li', 'div']) container = a.findParent(['li', 'div'])
@ -199,7 +204,6 @@ class WallStreetJournal(BasicNewsRecipe):
return articles return articles
def cleanup(self): def cleanup(self):
self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com') self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')

View File

@ -38,7 +38,7 @@ binary_includes = [
'/lib/libz.so.1', '/lib/libz.so.1',
'/usr/lib/libtiff.so.5', '/usr/lib/libtiff.so.5',
'/lib/libbz2.so.1', '/lib/libbz2.so.1',
'/usr/lib/libpoppler.so.28', '/usr/lib/libpoppler.so.37',
'/usr/lib/libxml2.so.2', '/usr/lib/libxml2.so.2',
'/usr/lib/libopenjpeg.so.2', '/usr/lib/libopenjpeg.so.2',
'/usr/lib/libxslt.so.1', '/usr/lib/libxslt.so.1',

View File

@ -378,7 +378,7 @@ class Py2App(object):
@flush @flush
def add_poppler(self): def add_poppler(self):
info('\nAdding poppler') info('\nAdding poppler')
for x in ('libpoppler.28.dylib',): for x in ('libpoppler.37.dylib',):
self.install_dylib(os.path.join(SW, 'lib', x)) self.install_dylib(os.path.join(SW, 'lib', x))
for x in ('pdftohtml', 'pdftoppm', 'pdfinfo'): for x in ('pdftohtml', 'pdftoppm', 'pdfinfo'):
self.install_dylib(os.path.join(SW, 'bin', x), False) self.install_dylib(os.path.join(SW, 'bin', x), False)

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
__appname__ = u'calibre' __appname__ = u'calibre'
numeric_version = (0, 9, 30) numeric_version = (0, 9, 31)
__version__ = u'.'.join(map(unicode, numeric_version)) __version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>" __author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -1476,6 +1476,7 @@ class StoreKoobeStore(StoreBase):
drm_free_only = True drm_free_only = True
headquarters = 'PL' headquarters = 'PL'
formats = ['EPUB', 'MOBI', 'PDF'] formats = ['EPUB', 'MOBI', 'PDF']
affiliate = True
class StoreLegimiStore(StoreBase): class StoreLegimiStore(StoreBase):
name = 'Legimi' name = 'Legimi'

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,2 @@
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,300 @@
#!/usr/bin/env python
from __future__ import (unicode_literals, division, absolute_import,
print_function)
"""
https://github.com/ishikawa/python-plist-parser/blob/master/plist_parser.py
A `Property Lists`_ is a data representation used in Apple's Mac OS X as
a convenient way to store standard object types, such as string, number,
boolean, and container object.
This file contains a class ``XmlPropertyListParser`` for parse
a property list file and get back a python native data structure.
:copyright: 2008 by Takanori Ishikawa <takanori.ishikawa@gmail.com>
:license: MIT (See LICENSE file for more details)
.. _Property Lists: http://developer.apple.com/documentation/Cocoa/Conceptual/PropertyLists/
"""
class PropertyListParseError(Exception):
"""Raised when parsing a property list is failed."""
pass
class XmlPropertyListParser(object):
"""
The ``XmlPropertyListParser`` class provides methods that
convert `Property Lists`_ objects from xml format.
Property list objects include ``string``, ``unicode``,
``list``, ``dict``, ``datetime``, and ``int`` or ``float``.
:copyright: 2008 by Takanori Ishikawa <takanori.ishikawa@gmail.com>
:license: MIT License
.. _Property List: http://developer.apple.com/documentation/Cocoa/Conceptual/PropertyLists/
"""
def _assert(self, test, message):
if not test:
raise PropertyListParseError(message)
# ------------------------------------------------
# SAX2: ContentHandler
# ------------------------------------------------
def setDocumentLocator(self, locator):
pass
def startPrefixMapping(self, prefix, uri):
pass
def endPrefixMapping(self, prefix):
pass
def startElementNS(self, name, qname, attrs):
pass
def endElementNS(self, name, qname):
pass
def ignorableWhitespace(self, whitespace):
pass
def processingInstruction(self, target, data):
pass
def skippedEntity(self, name):
pass
def startDocument(self):
self.__stack = []
self.__plist = self.__key = self.__characters = None
# For reducing runtime type checking,
# the parser caches top level object type.
self.__in_dict = False
def endDocument(self):
self._assert(self.__plist is not None, "A top level element must be <plist>.")
self._assert(
len(self.__stack) is 0,
"multiple objects at top level.")
def startElement(self, name, attributes):
if name in XmlPropertyListParser.START_CALLBACKS:
XmlPropertyListParser.START_CALLBACKS[name](self, name, attributes)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
self.__characters = []
def endElement(self, name):
if name in XmlPropertyListParser.END_CALLBACKS:
XmlPropertyListParser.END_CALLBACKS[name](self, name)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
# Creates character string from buffered characters.
content = ''.join(self.__characters)
# For compatibility with ``xml.etree`` and ``plistlib``,
# convert text string to ascii, if possible
try:
content = content.encode('ascii')
except (UnicodeError, AttributeError):
pass
XmlPropertyListParser.PARSE_CALLBACKS[name](self, name, content)
self.__characters = None
def characters(self, content):
if self.__characters is not None:
self.__characters.append(content)
# ------------------------------------------------
# XmlPropertyListParser private
# ------------------------------------------------
def _push_value(self, value):
if not self.__stack:
self._assert(self.__plist is None, "Multiple objects at top level")
self.__plist = value
else:
top = self.__stack[-1]
#assert isinstance(top, (dict, list))
if self.__in_dict:
k = self.__key
if k is None:
raise PropertyListParseError("Missing key for dictionary.")
top[k] = value
self.__key = None
else:
top.append(value)
def _push_stack(self, value):
self.__stack.append(value)
self.__in_dict = isinstance(value, dict)
def _pop_stack(self):
self.__stack.pop()
self.__in_dict = self.__stack and isinstance(self.__stack[-1], dict)
def _start_plist(self, name, attrs):
self._assert(not self.__stack and self.__plist is None, "<plist> more than once.")
self._assert(attrs.get('version', '1.0') == '1.0',
"version 1.0 is only supported, but was '%s'." % attrs.get('version'))
def _start_array(self, name, attrs):
v = list()
self._push_value(v)
self._push_stack(v)
def _start_dict(self, name, attrs):
v = dict()
self._push_value(v)
self._push_stack(v)
def _end_array(self, name):
self._pop_stack()
def _end_dict(self, name):
if self.__key is not None:
raise PropertyListParseError("Missing value for key '%s'" % self.__key)
self._pop_stack()
def _start_true(self, name, attrs):
self._push_value(True)
def _start_false(self, name, attrs):
self._push_value(False)
def _parse_key(self, name, content):
if not self.__in_dict:
print("XmlPropertyListParser() WARNING: ignoring <key>%s</key> (<key> elements must be contained in <dict> element)" % content)
#raise PropertyListParseError("<key> element '%s' must be in <dict> element." % content)
else:
self.__key = content
def _parse_string(self, name, content):
self._push_value(content)
def _parse_data(self, name, content):
import base64
self._push_value(base64.b64decode(content))
# http://www.apple.com/DTDs/PropertyList-1.0.dtd says:
#
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'.
# Smaller units may be omitted with a loss of precision)
import re
DATETIME_PATTERN = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z$")
def _parse_date(self, name, content):
import datetime
units = ('year', 'month', 'day', 'hour', 'minute', 'second', )
pattern = XmlPropertyListParser.DATETIME_PATTERN
match = pattern.match(content)
if not match:
raise PropertyListParseError("Failed to parse datetime '%s'" % content)
groups, components = match.groupdict(), []
for key in units:
value = groups[key]
if value is None:
break
components.append(int(value))
while len(components) < 3:
components.append(1)
d = datetime.datetime(*components)
self._push_value(d)
def _parse_real(self, name, content):
self._push_value(float(content))
def _parse_integer(self, name, content):
self._push_value(int(content))
START_CALLBACKS = {
'plist': _start_plist,
'array': _start_array,
'dict': _start_dict,
'true': _start_true,
'false': _start_false,
}
END_CALLBACKS = {
'array': _end_array,
'dict': _end_dict,
}
PARSE_CALLBACKS = {
'key': _parse_key,
'string': _parse_string,
'data': _parse_data,
'date': _parse_date,
'real': _parse_real,
'integer': _parse_integer,
}
# ------------------------------------------------
# XmlPropertyListParser
# ------------------------------------------------
def _to_stream(self, io_or_string):
if isinstance(io_or_string, basestring):
# Creates a string stream for in-memory contents.
from cStringIO import StringIO
return StringIO(io_or_string)
elif hasattr(io_or_string, 'read') and callable(getattr(io_or_string, 'read')):
return io_or_string
else:
raise TypeError('Can\'t convert %s to file-like-object' % type(io_or_string))
def _parse_using_etree(self, xml_input):
from xml.etree.cElementTree import iterparse
parser = iterparse(self._to_stream(xml_input), events=(b'start', b'end'))
self.startDocument()
try:
for action, element in parser:
name = element.tag
if action == 'start':
if name in XmlPropertyListParser.START_CALLBACKS:
XmlPropertyListParser.START_CALLBACKS[name](self, element.tag, element.attrib)
elif action == 'end':
if name in XmlPropertyListParser.END_CALLBACKS:
XmlPropertyListParser.END_CALLBACKS[name](self, name)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
XmlPropertyListParser.PARSE_CALLBACKS[name](self, name, element.text or "")
element.clear()
except SyntaxError, e:
raise PropertyListParseError(e)
self.endDocument()
return self.__plist
def _parse_using_sax_parser(self, xml_input):
from xml.sax import make_parser, xmlreader, SAXParseException
source = xmlreader.InputSource()
source.setByteStream(self._to_stream(xml_input))
reader = make_parser()
reader.setContentHandler(self)
try:
reader.parse(source)
except SAXParseException, e:
raise PropertyListParseError(e)
return self.__plist
def parse(self, xml_input):
"""
Parse the property list (`.plist`, `.xml, for example) ``xml_input``,
which can be either a string or a file-like object.
>>> parser = XmlPropertyListParser()
>>> parser.parse(r'<plist version="1.0">'
... r'<dict><key>Python</key><string>.py</string></dict>'
... r'</plist>')
{'Python': '.py'}
"""
try:
return self._parse_using_etree(xml_input)
except ImportError:
# No xml.etree.ccElementTree found.
return self._parse_using_sax_parser(xml_input)

View File

@ -27,7 +27,7 @@ class NOOK(USBMS):
# Ordered list of supported formats # Ordered list of supported formats
FORMATS = ['epub', 'pdb', 'pdf'] FORMATS = ['epub', 'pdb', 'pdf']
VENDOR_ID = [0x2080, 0x18d1] # 0x18d1 is for softrooted nook VENDOR_ID = [0x2080, 0x18d1] # 0x18d1 is for softrooted nook
PRODUCT_ID = [0x001] PRODUCT_ID = [0x001]
BCD = [0x322] BCD = [0x322]
@ -53,7 +53,6 @@ class NOOK(USBMS):
except ImportError: except ImportError:
import Image, ImageDraw import Image, ImageDraw
coverdata = getattr(metadata, 'thumbnail', None) coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]: if coverdata and coverdata[2]:
cover = Image.open(cStringIO.StringIO(coverdata[2])) cover = Image.open(cStringIO.StringIO(coverdata[2]))
@ -87,12 +86,13 @@ class NOOK_COLOR(NOOK):
PRODUCT_ID = [0x002, 0x003, 0x004] PRODUCT_ID = [0x002, 0x003, 0x004]
if isosx: if isosx:
PRODUCT_ID.append(0x005) # Nook HD+ PRODUCT_ID.append(0x005) # Nook HD+
BCD = [0x216] BCD = [0x216]
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOK_DISK', 'NOOK_TABLET', WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOK_DISK', 'NOOK_TABLET',
'NOOK_SIMPLETOUCH'] 'NOOK_SIMPLETOUCH']
EBOOK_DIR_MAIN = 'My Files' EBOOK_DIR_MAIN = 'My Files'
SCAN_FROM_ROOT = True
NEWS_IN_FOLDER = False NEWS_IN_FOLDER = False
def upload_cover(self, path, filename, metadata, filepath): def upload_cover(self, path, filename, metadata, filepath):

View File

@ -214,6 +214,8 @@ class Numbering(object):
p.set('list-template', val) p.set('list-template', val)
self.update_counter(counter, ilvl, d.levels) self.update_counter(counter, ilvl, d.levels)
templates = {}
def commit(current_run): def commit(current_run):
if not current_run: if not current_run:
return return
@ -244,6 +246,9 @@ class Numbering(object):
span.append(gc) span.append(gc)
child.append(span) child.append(span)
span = SPAN(child.get('list-template')) span = SPAN(child.get('list-template'))
last = templates.get(lvlid, '')
if span.text and len(span.text) > len(last):
templates[lvlid] = span.text
child.insert(0, span) child.insert(0, span)
for attr in ('list-lvl', 'list-id', 'list-template'): for attr in ('list-lvl', 'list-id', 'list-template'):
child.attrib.pop(attr, None) child.attrib.pop(attr, None)
@ -272,8 +277,14 @@ class Numbering(object):
commit(current_run) commit(current_run)
for wrap in body.xpath('//ol[@lvlid]'): for wrap in body.xpath('//ol[@lvlid]'):
wrap.attrib.pop('lvlid') lvlid = wrap.attrib.pop('lvlid')
wrap.tag = 'div' wrap.tag = 'div'
text = ''
maxtext = templates.get(lvlid, '').replace('.', '')[:-1]
for li in wrap.iterchildren('li'):
t = li[0].text
if t and len(t) > len(text):
text = t
for i, li in enumerate(wrap.iterchildren('li')): for i, li in enumerate(wrap.iterchildren('li')):
li.tag = 'div' li.tag = 'div'
li.attrib.pop('value', None) li.attrib.pop('value', None)
@ -281,7 +292,8 @@ class Numbering(object):
obj = object_map[li] obj = object_map[li]
bs = styles.para_cache[obj] bs = styles.para_cache[obj]
if i == 0: if i == 0:
wrap.set('style', 'display:table; margin-left: %s' % (bs.css.get('margin-left', 0))) m = len(maxtext) # Move the table left to simulate the behavior of a list (number is to the left of text margin)
wrap.set('style', 'display:table; margin-left: -%dem; padding-left: %s' % (m, bs.css.get('margin-left', 0)))
bs.css.pop('margin-left', None) bs.css.pop('margin-left', None)
for child in li: for child in li:
child.set('style', 'display:table-cell') child.set('style', 'display:table-cell')

View File

@ -183,7 +183,7 @@ class BookHeader(object):
self.codec)) self.codec))
# Some KF8 files have header length == 264 (generated by kindlegen # Some KF8 files have header length == 264 (generated by kindlegen
# 2.9?). See https://bugs.launchpad.net/bugs/1179144 # 2.9?). See https://bugs.launchpad.net/bugs/1179144
max_header_length = 0x108 max_header_length = 500 # We choose 500 for future versions of kindlegen
if (ident == 'TEXTREAD' or self.length < 0xE4 or if (ident == 'TEXTREAD' or self.length < 0xE4 or
self.length > max_header_length or self.length > max_header_length or

View File

@ -373,7 +373,7 @@ def urlquote(href):
result.append(char) result.append(char)
return ''.join(result) return ''.join(result)
def urlunquote(href): def urlunquote(href, error_handling='strict'):
# unquote must run on a bytestring and will return a bytestring # unquote must run on a bytestring and will return a bytestring
# If it runs on a unicode object, it returns a double encoded unicode # If it runs on a unicode object, it returns a double encoded unicode
# string: unquote(u'%C3%A4') != unquote(b'%C3%A4').decode('utf-8') # string: unquote(u'%C3%A4') != unquote(b'%C3%A4').decode('utf-8')
@ -383,7 +383,10 @@ def urlunquote(href):
href = href.encode('utf-8') href = href.encode('utf-8')
href = unquote(href) href = unquote(href)
if want_unicode: if want_unicode:
href = href.decode('utf-8') # The quoted characters could have been in some encoding other than
# UTF-8, this often happens with old/broken web servers. There is no
# way to know what that encoding should be in this context.
href = href.decode('utf-8', error_handling)
return href return href
def urlnormalize(href): def urlnormalize(href):

View File

@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
import re import re
from urlparse import urlparse from urlparse import urlparse
from collections import deque from collections import deque, Counter
from functools import partial from functools import partial
from lxml import etree from lxml import etree
@ -29,7 +29,8 @@ class TOC(object):
def __init__(self, title=None, dest=None, frag=None): def __init__(self, title=None, dest=None, frag=None):
self.title, self.dest, self.frag = title, dest, frag self.title, self.dest, self.frag = title, dest, frag
self.dest_exists = self.dest_error = None self.dest_exists = self.dest_error = None
if self.title: self.title = self.title.strip() if self.title:
self.title = self.title.strip()
self.parent = None self.parent = None
self.children = [] self.children = []
@ -326,11 +327,13 @@ def create_ncx(toc, to_href, btitle, lang, uid):
navmap = etree.SubElement(ncx, NCX('navMap')) navmap = etree.SubElement(ncx, NCX('navMap'))
spat = re.compile(r'\s+') spat = re.compile(r'\s+')
def process_node(xml_parent, toc_parent, play_order=0): play_order = Counter()
def process_node(xml_parent, toc_parent):
for child in toc_parent: for child in toc_parent:
play_order += 1 play_order['c'] += 1
point = etree.SubElement(xml_parent, NCX('navPoint'), id=uuid_id(), point = etree.SubElement(xml_parent, NCX('navPoint'), id=uuid_id(),
playOrder=str(play_order)) playOrder=str(play_order['c']))
label = etree.SubElement(point, NCX('navLabel')) label = etree.SubElement(point, NCX('navLabel'))
title = child.title title = child.title
if title: if title:
@ -341,7 +344,7 @@ def create_ncx(toc, to_href, btitle, lang, uid):
if child.frag: if child.frag:
href += '#'+child.frag href += '#'+child.frag
etree.SubElement(point, NCX('content'), src=href) etree.SubElement(point, NCX('content'), src=href)
process_node(point, child, play_order) process_node(point, child)
process_node(navmap, toc) process_node(navmap, toc)
return ncx return ncx

View File

@ -113,7 +113,7 @@ class Split(object):
for i, elem in enumerate(item.data.iter()): for i, elem in enumerate(item.data.iter()):
try: try:
elem.set('pb_order', str(i)) elem.set('pb_order', str(i))
except TypeError: # Cant set attributes on comment nodes etc. except TypeError: # Cant set attributes on comment nodes etc.
continue continue
page_breaks = list(page_breaks) page_breaks = list(page_breaks)
@ -159,7 +159,11 @@ class Split(object):
except ValueError: except ValueError:
# Unparseable URL # Unparseable URL
return url return url
href = urlnormalize(href) try:
href = urlnormalize(href)
except ValueError:
# href has non utf-8 quoting
return url
if href in self.map: if href in self.map:
anchor_map = self.map[href] anchor_map = self.map[href]
nhref = anchor_map[frag if frag else None] nhref = anchor_map[frag if frag else None]
@ -171,7 +175,6 @@ class Split(object):
return url return url
class FlowSplitter(object): class FlowSplitter(object):
'The actual splitting logic' 'The actual splitting logic'
@ -313,7 +316,6 @@ class FlowSplitter(object):
split_point = root.xpath(path)[0] split_point = root.xpath(path)[0]
split_point2 = root2.xpath(path)[0] split_point2 = root2.xpath(path)[0]
def nix_element(elem, top=True): def nix_element(elem, top=True):
# Remove elem unless top is False in which case replace elem by its # Remove elem unless top is False in which case replace elem by its
# children # children
@ -393,7 +395,6 @@ class FlowSplitter(object):
buf = part buf = part
return ans return ans
def split_to_size(self, tree): def split_to_size(self, tree):
self.log.debug('\t\tSplitting...') self.log.debug('\t\tSplitting...')
root = tree.getroot() root = tree.getroot()
@ -440,7 +441,7 @@ class FlowSplitter(object):
len(self.split_trees), size/1024.)) len(self.split_trees), size/1024.))
else: else:
self.log.debug( self.log.debug(
'\t\t\tSplit tree still too large: %d KB' % \ '\t\t\tSplit tree still too large: %d KB' %
(size/1024.)) (size/1024.))
self.split_to_size(t) self.split_to_size(t)
@ -546,7 +547,6 @@ class FlowSplitter(object):
for x in toc: for x in toc:
fix_toc_entry(x) fix_toc_entry(x)
if self.oeb.toc: if self.oeb.toc:
fix_toc_entry(self.oeb.toc) fix_toc_entry(self.oeb.toc)

View File

@ -9,7 +9,7 @@ import sys
from PyQt4.Qt import (Qt, QApplication, QStyle, QIcon, QDoubleSpinBox, from PyQt4.Qt import (Qt, QApplication, QStyle, QIcon, QDoubleSpinBox,
QVariant, QSpinBox, QStyledItemDelegate, QComboBox, QTextDocument, QVariant, QSpinBox, QStyledItemDelegate, QComboBox, QTextDocument,
QAbstractTextDocumentLayout, QFont, QFontInfo, QDate) QAbstractTextDocumentLayout, QFont, QFontInfo, QDate, QDateTimeEdit, QDateTime)
from calibre.gui2 import UNDEFINED_QDATETIME, error_dialog, rating_font from calibre.gui2 import UNDEFINED_QDATETIME, error_dialog, rating_font
from calibre.constants import iswindows from calibre.constants import iswindows
@ -23,8 +23,28 @@ from calibre.gui2.dialogs.comments_dialog import CommentsDialog
from calibre.gui2.dialogs.template_dialog import TemplateDialog from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2.languages import LanguagesEdit from calibre.gui2.languages import LanguagesEdit
class DateTimeEdit(QDateTimeEdit): # {{{
class RatingDelegate(QStyledItemDelegate): # {{{ def __init__(self, parent, format):
QDateTimeEdit.__init__(self, parent)
self.setFrame(False)
self.setMinimumDateTime(UNDEFINED_QDATETIME)
self.setSpecialValueText(_('Undefined'))
self.setCalendarPopup(True)
self.setDisplayFormat(format)
def keyPressEvent(self, ev):
if ev.key() == Qt.Key_Minus:
ev.accept()
self.setDateTime(self.minimumDateTime())
elif ev.key() == Qt.Key_Equal:
ev.accept()
self.setDateTime(QDateTime.currentDateTime())
else:
return QDateTimeEdit.keyPressEvent(self, ev)
# }}}
class RatingDelegate(QStyledItemDelegate): # {{{
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
QStyledItemDelegate.__init__(self, *args, **kwargs) QStyledItemDelegate.__init__(self, *args, **kwargs)
@ -60,7 +80,7 @@ class RatingDelegate(QStyledItemDelegate): # {{{
# }}} # }}}
class DateDelegate(QStyledItemDelegate): # {{{ class DateDelegate(QStyledItemDelegate): # {{{
def __init__(self, parent, tweak_name='gui_timestamp_display_format', def __init__(self, parent, tweak_name='gui_timestamp_display_format',
default_format='dd MMM yyyy'): default_format='dd MMM yyyy'):
@ -77,16 +97,11 @@ class DateDelegate(QStyledItemDelegate): # {{{
return format_date(qt_to_dt(d, as_utc=False), self.format) return format_date(qt_to_dt(d, as_utc=False), self.format)
def createEditor(self, parent, option, index): def createEditor(self, parent, option, index):
qde = QStyledItemDelegate.createEditor(self, parent, option, index) return DateTimeEdit(parent, self.format)
qde.setDisplayFormat(self.format)
qde.setMinimumDateTime(UNDEFINED_QDATETIME)
qde.setSpecialValueText(_('Undefined'))
qde.setCalendarPopup(True)
return qde
# }}} # }}}
class PubDateDelegate(QStyledItemDelegate): # {{{ class PubDateDelegate(QStyledItemDelegate): # {{{
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
QStyledItemDelegate.__init__(self, *args, **kwargs) QStyledItemDelegate.__init__(self, *args, **kwargs)
@ -101,12 +116,7 @@ class PubDateDelegate(QStyledItemDelegate): # {{{
return format_date(qt_to_dt(d, as_utc=False), self.format) return format_date(qt_to_dt(d, as_utc=False), self.format)
def createEditor(self, parent, option, index): def createEditor(self, parent, option, index):
qde = QStyledItemDelegate.createEditor(self, parent, option, index) return DateTimeEdit(parent, self.format)
qde.setDisplayFormat(self.format)
qde.setMinimumDateTime(UNDEFINED_QDATETIME)
qde.setSpecialValueText(_('Undefined'))
qde.setCalendarPopup(True)
return qde
def setEditorData(self, editor, index): def setEditorData(self, editor, index):
val = index.data(Qt.EditRole).toDate() val = index.data(Qt.EditRole).toDate()
@ -116,7 +126,7 @@ class PubDateDelegate(QStyledItemDelegate): # {{{
# }}} # }}}
class TextDelegate(QStyledItemDelegate): # {{{ class TextDelegate(QStyledItemDelegate): # {{{
def __init__(self, parent): def __init__(self, parent):
''' '''
Delegate for text data. If auto_complete_function needs to return a list Delegate for text data. If auto_complete_function needs to return a list
@ -153,7 +163,7 @@ class TextDelegate(QStyledItemDelegate): # {{{
#}}} #}}}
class CompleteDelegate(QStyledItemDelegate): # {{{ class CompleteDelegate(QStyledItemDelegate): # {{{
def __init__(self, parent, sep, items_func_name, space_before_sep=False): def __init__(self, parent, sep, items_func_name, space_before_sep=False):
QStyledItemDelegate.__init__(self, parent) QStyledItemDelegate.__init__(self, parent)
self.sep = sep self.sep = sep
@ -194,7 +204,7 @@ class CompleteDelegate(QStyledItemDelegate): # {{{
QStyledItemDelegate.setModelData(self, editor, model, index) QStyledItemDelegate.setModelData(self, editor, model, index)
# }}} # }}}
class LanguagesDelegate(QStyledItemDelegate): # {{{ class LanguagesDelegate(QStyledItemDelegate): # {{{
def createEditor(self, parent, option, index): def createEditor(self, parent, option, index):
editor = LanguagesEdit(parent=parent) editor = LanguagesEdit(parent=parent)
@ -210,7 +220,7 @@ class LanguagesDelegate(QStyledItemDelegate): # {{{
model.setData(index, QVariant(val), Qt.EditRole) model.setData(index, QVariant(val), Qt.EditRole)
# }}} # }}}
class CcDateDelegate(QStyledItemDelegate): # {{{ class CcDateDelegate(QStyledItemDelegate): # {{{
''' '''
Delegate for custom columns dates. Because this delegate stores the Delegate for custom columns dates. Because this delegate stores the
format as an instance variable, a new instance must be created for each format as an instance variable, a new instance must be created for each
@ -230,12 +240,7 @@ class CcDateDelegate(QStyledItemDelegate): # {{{
return format_date(qt_to_dt(d, as_utc=False), self.format) return format_date(qt_to_dt(d, as_utc=False), self.format)
def createEditor(self, parent, option, index): def createEditor(self, parent, option, index):
qde = QStyledItemDelegate.createEditor(self, parent, option, index) return DateTimeEdit(parent, self.format)
qde.setDisplayFormat(self.format)
qde.setMinimumDateTime(UNDEFINED_QDATETIME)
qde.setSpecialValueText(_('Undefined'))
qde.setCalendarPopup(True)
return qde
def setEditorData(self, editor, index): def setEditorData(self, editor, index):
m = index.model() m = index.model()
@ -254,7 +259,7 @@ class CcDateDelegate(QStyledItemDelegate): # {{{
# }}} # }}}
class CcTextDelegate(QStyledItemDelegate): # {{{ class CcTextDelegate(QStyledItemDelegate): # {{{
''' '''
Delegate for text data. Delegate for text data.
''' '''
@ -279,7 +284,7 @@ class CcTextDelegate(QStyledItemDelegate): # {{{
model.setData(index, QVariant(val), Qt.EditRole) model.setData(index, QVariant(val), Qt.EditRole)
# }}} # }}}
class CcNumberDelegate(QStyledItemDelegate): # {{{ class CcNumberDelegate(QStyledItemDelegate): # {{{
''' '''
Delegate for text/int/float data. Delegate for text/int/float data.
''' '''
@ -314,7 +319,7 @@ class CcNumberDelegate(QStyledItemDelegate): # {{{
# }}} # }}}
class CcEnumDelegate(QStyledItemDelegate): # {{{ class CcEnumDelegate(QStyledItemDelegate): # {{{
''' '''
Delegate for text/int/float data. Delegate for text/int/float data.
''' '''
@ -346,7 +351,7 @@ class CcEnumDelegate(QStyledItemDelegate): # {{{
editor.setCurrentIndex(idx) editor.setCurrentIndex(idx)
# }}} # }}}
class CcCommentsDelegate(QStyledItemDelegate): # {{{ class CcCommentsDelegate(QStyledItemDelegate): # {{{
''' '''
Delegate for comments data. Delegate for comments data.
''' '''
@ -364,7 +369,7 @@ class CcCommentsDelegate(QStyledItemDelegate): # {{{
if hasattr(QStyle, 'CE_ItemViewItem'): if hasattr(QStyle, 'CE_ItemViewItem'):
style.drawControl(QStyle.CE_ItemViewItem, option, painter) style.drawControl(QStyle.CE_ItemViewItem, option, painter)
ctx = QAbstractTextDocumentLayout.PaintContext() ctx = QAbstractTextDocumentLayout.PaintContext()
ctx.palette = option.palette #.setColor(QPalette.Text, QColor("red")); ctx.palette = option.palette # .setColor(QPalette.Text, QColor("red"));
if hasattr(QStyle, 'SE_ItemViewItemText'): if hasattr(QStyle, 'SE_ItemViewItemText'):
textRect = style.subElementRect(QStyle.SE_ItemViewItemText, option) textRect = style.subElementRect(QStyle.SE_ItemViewItemText, option)
painter.save() painter.save()
@ -387,7 +392,7 @@ class CcCommentsDelegate(QStyledItemDelegate): # {{{
model.setData(index, QVariant(editor.textbox.html), Qt.EditRole) model.setData(index, QVariant(editor.textbox.html), Qt.EditRole)
# }}} # }}}
class DelegateCB(QComboBox): # {{{ class DelegateCB(QComboBox): # {{{
def __init__(self, parent): def __init__(self, parent):
QComboBox.__init__(self, parent) QComboBox.__init__(self, parent)
@ -398,7 +403,7 @@ class DelegateCB(QComboBox): # {{{
return QComboBox.event(self, e) return QComboBox.event(self, e)
# }}} # }}}
class CcBoolDelegate(QStyledItemDelegate): # {{{ class CcBoolDelegate(QStyledItemDelegate): # {{{
def __init__(self, parent): def __init__(self, parent):
''' '''
Delegate for custom_column bool data. Delegate for custom_column bool data.
@ -431,7 +436,7 @@ class CcBoolDelegate(QStyledItemDelegate): # {{{
# }}} # }}}
class CcTemplateDelegate(QStyledItemDelegate): # {{{ class CcTemplateDelegate(QStyledItemDelegate): # {{{
def __init__(self, parent): def __init__(self, parent):
''' '''
Delegate for custom_column bool data. Delegate for custom_column bool data.
@ -457,7 +462,7 @@ class CcTemplateDelegate(QStyledItemDelegate): # {{{
validation_formatter.validate(val) validation_formatter.validate(val)
except Exception as err: except Exception as err:
error_dialog(self.parent(), _('Invalid template'), error_dialog(self.parent(), _('Invalid template'),
'<p>'+_('The template %s is invalid:')%val + \ '<p>'+_('The template %s is invalid:')%val +
'<br>'+str(err), show=True) '<br>'+str(err), show=True)
model.setData(index, QVariant(val), Qt.EditRole) model.setData(index, QVariant(val), Qt.EditRole)
@ -469,3 +474,4 @@ class CcTemplateDelegate(QStyledItemDelegate): # {{{
# }}} # }}}

View File

@ -13,7 +13,7 @@ from PyQt4.Qt import (Qt, QDateTimeEdit, pyqtSignal, QMessageBox, QIcon,
QToolButton, QWidget, QLabel, QGridLayout, QApplication, QToolButton, QWidget, QLabel, QGridLayout, QApplication,
QDoubleSpinBox, QListWidgetItem, QSize, QPixmap, QDialog, QMenu, QDoubleSpinBox, QListWidgetItem, QSize, QPixmap, QDialog, QMenu,
QPushButton, QSpinBox, QLineEdit, QSizePolicy, QDialogButtonBox, QPushButton, QSpinBox, QLineEdit, QSizePolicy, QDialogButtonBox,
QAction, QCalendarWidget, QDate) QAction, QCalendarWidget, QDate, QDateTime)
from calibre.gui2.widgets import EnLineEdit, FormatList as _FormatList, ImageView from calibre.gui2.widgets import EnLineEdit, FormatList as _FormatList, ImageView
from calibre.utils.icu import sort_key from calibre.utils.icu import sort_key
@ -1472,6 +1472,16 @@ class DateEdit(QDateTimeEdit):
o, c = self.original_val, self.current_val o, c = self.original_val, self.current_val
return o != c return o != c
def keyPressEvent(self, ev):
if ev.key() == Qt.Key_Minus:
ev.accept()
self.setDateTime(self.minimumDateTime())
elif ev.key() == Qt.Key_Equal:
ev.accept()
self.setDateTime(QDateTime.currentDateTime())
else:
return QDateTimeEdit.keyPressEvent(self, ev)
class PubdateEdit(DateEdit): class PubdateEdit(DateEdit):
LABEL = _('Publishe&d:') LABEL = _('Publishe&d:')
FMT = 'MMM yyyy' FMT = 'MMM yyyy'

View File

@ -1,13 +1,14 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function) from __future__ import (division, absolute_import, print_function)
store_version = 2 # Needed for dynamic plugin loading store_version = 3 # Needed for dynamic plugin loading
__license__ = 'GPL 3' __license__ = 'GPL 3'
__copyright__ = '2013, Tomasz Długosz <tomek3d@gmail.com>' __copyright__ = '2013, Tomasz Długosz <tomek3d@gmail.com>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import urllib import urllib
from base64 import b64encode
from contextlib import closing from contextlib import closing
from lxml import html from lxml import html
@ -24,21 +25,20 @@ from calibre.gui2.store.web_store_dialog import WebStoreDialog
class KoobeStore(BasicStoreConfig, StorePlugin): class KoobeStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False): def open(self, parent=None, detail_item=None, external=False):
#aff_root = 'https://www.a4b-tracking.com/pl/stat-click-text-link/15/58/' aff_root = 'https://www.a4b-tracking.com/pl/stat-click-text-link/15/58/'
url = 'http://www.koobe.pl/' url = 'http://www.koobe.pl/'
#aff_url = aff_root + str(b64encode(url)) aff_url = aff_root + str(b64encode(url))
detail_url = None detail_url = None
if detail_item: if detail_item:
detail_url = detail_item #aff_root + str(b64encode(detail_item)) detail_url = aff_root + str(b64encode(detail_item))
if external or self.config.get('open_external', False): if external or self.config.get('open_external', False):
#open_url(QUrl(url_slash_cleaner(detail_url if detail_url else aff_url))) open_url(QUrl(url_slash_cleaner(detail_url if detail_url else aff_url)))
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
else: else:
#d = WebStoreDialog(self.gui, url, parent, detail_url if detail_url else aff_url) d = WebStoreDialog(self.gui, url, parent, detail_url if detail_url else aff_url)
d = WebStoreDialog(self.gui, url, parent, detail_url if detail_url else url)
d.setWindowTitle(self.name) d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', '')) d.set_tags(self.config.get('tags', ''))
d.exec_() d.exec_()
@ -63,7 +63,7 @@ class KoobeStore(BasicStoreConfig, StorePlugin):
cover_url = ''.join(data.xpath('.//div[@class="cover"]/a/img/@src')) cover_url = ''.join(data.xpath('.//div[@class="cover"]/a/img/@src'))
price = ''.join(data.xpath('.//span[@class="current_price"]/text()')) price = ''.join(data.xpath('.//span[@class="current_price"]/text()'))
title = ''.join(data.xpath('.//h2[@class="title"]/a/text()')) title = ''.join(data.xpath('.//h2[@class="title"]/a/text()'))
author = ''.join(data.xpath('.//h3[@class="book_author"]/a/text()')) author = ', '.join(data.xpath('.//h3[@class="book_author"]/a/text()'))
formats = ', '.join(data.xpath('.//div[@class="formats"]/div/div/@title')) formats = ', '.join(data.xpath('.//div[@class="formats"]/div/div/@title'))
counter -= 1 counter -= 1

View File

@ -1,10 +1,10 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function) from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 1 # Needed for dynamic plugin loading store_version = 2 # Needed for dynamic plugin loading
__license__ = 'GPL 3' __license__ = 'GPL 3'
__copyright__ = '2012, Tomasz Długosz <tomek3d@gmail.com>' __copyright__ = '2012-2013, Tomasz Długosz <tomek3d@gmail.com>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import urllib import urllib
@ -25,12 +25,12 @@ class PublioStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False): def open(self, parent=None, detail_item=None, external=False):
google_analytics = '?utm_source=tdcalibre&utm_medium=calibre' google_analytics = '?utm_source=tdcalibre&utm_medium=calibre'
url = 'http://www.publio.pl/e-booki.html' + google_analytics url = 'http://www.publio.pl/' + google_analytics
if external or self.config.get('open_external', False): if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner((detail_item + google_analytics) if detail_item else url))) open_url(QUrl(url_slash_cleaner((detail_item + google_analytics) if detail_item else url)))
else: else:
d = WebStoreDialog(self.gui, url, parent, detail_item) d = WebStoreDialog(self.gui, url, parent, detail_item if detail_item else url)
d.setWindowTitle(self.name) d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', '')) d.set_tags(self.config.get('tags', ''))
d.exec_() d.exec_()
@ -42,7 +42,7 @@ class PublioStore(BasicStoreConfig, StorePlugin):
counter = max_results counter = max_results
page = 1 page = 1
while counter: while counter:
with closing(br.open('http://www.publio.pl/e-booki,strona' + str(page) + '.html?q=' + urllib.quote(query), timeout=timeout)) as f: with closing(br.open('http://www.publio.pl/szukaj,strona' + str(page) + '.html?q=' + urllib.quote(query) + '&sections=EMAGAZINE&sections=MINIBOOK&sections=EBOOK', timeout=timeout)) as f:
doc = html.fromstring(f.read()) doc = html.fromstring(f.read())
for data in doc.xpath('//div[@class="item"]'): for data in doc.xpath('//div[@class="item"]'):
if counter <= 0: if counter <= 0:

View File

@ -431,7 +431,7 @@ def do_add_format(db, id, fmt, path, opts):
done = db.add_format_with_hooks(id, fmt.upper(), path, index_is_id=True, done = db.add_format_with_hooks(id, fmt.upper(), path, index_is_id=True,
replace=opts.replace) replace=opts.replace)
if not done and not opts.replace: if not done and not opts.replace:
prints(_('A %s file already exists for book: %d, not replacing')%(fmt.upper(), id)) prints(_('A %(fmt)s file already exists for book: %(id)d, not replacing')%dict(fmt=fmt.upper(), id=id))
else: else:
send_message() send_message()

View File

@ -47,6 +47,7 @@ FORMAT_ARG_DESCS = dict(
pubdate=_('The published date'), pubdate=_('The published date'),
last_modified=_('The date when the metadata for this book record' last_modified=_('The date when the metadata for this book record'
' was last modified'), ' was last modified'),
languages=_('The language(s) of this book'),
id=_('The calibre internal id') id=_('The calibre internal id')
) )
@ -283,7 +284,6 @@ def save_book_to_disk(id_, db, root, opts, length):
pass pass
def do_save_book_to_disk(id_, mi, cover, plugboards, def do_save_book_to_disk(id_, mi, cover, plugboards,
format_map, root, opts, length): format_map, root, opts, length):
from calibre.ebooks.metadata.meta import set_metadata from calibre.ebooks.metadata.meta import set_metadata

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More