Merge from trunk

This commit is contained in:
Charles Haley 2012-12-20 10:30:53 +01:00
commit 10221f051f
164 changed files with 21688 additions and 19649 deletions

View File

@ -19,6 +19,81 @@
# new recipes: # new recipes:
# - title: # - title:
- version: 0.9.10
date: 2012-12-14
new features:
- title: "Drivers for Nextbook Premium 8 se, HTC Desire X and Emerson EM 543"
tickets: [1088149, 1088112, 1087978]
bug fixes:
- title: "Fix rich text delegate not working with Qt compiled in debug mode."
tickets: [1089011]
- title: "When deleting all books in the library, blank the book details panel"
- title: "Conversion: Fix malformed values in the bgcolor attribute causing conversion to abort"
- title: "Conversion: Fix heuristics applying incorrect style in some circumstances"
tickets: [1066507]
- title: "Possible fix for 64bit calibre not starting up on some Windows systems"
tickets: [1087816]
improved recipes:
- Sivil Dusunce
- Anchorage Daily News
- Le Monde
- Harpers
new recipes:
- title: Titanic
author: Krittika Goyal
- version: 0.9.9
date: 2012-12-07
new features:
- title: "64 bit build for windows"
type: major
description: "calibre now has a 64 bit version for windows, available at: http://calibre-ebook.com/download_windows64 The 64bit build is not limited to using only 3GB of RAM when converting large/complex documents. It may also be slightly faster for some tasks. You can have both the 32 bit and the 64 bit build installed at the same time, they will use the same libraries, plugins and settings."
- title: "Content server: Make the identifiers in each books metadata clickable."
tickets: [1085726]
bug fixes:
- title: "EPUB Input: Fix an infinite loop while trying to recover a damaged EPUB file."
tickets: [1086917]
- title: "KF8 Input: Fix handling of links in files that link to the obsolete <a name> tags instead of tags with an id attribute."
tickets: [1086705]
- title: "Conversion: Fix a bug in removal of invalid entries from the spine, where not all invalid entries were removed, causing conversion to fail."
tickets: [1086054]
- title: "KF8 Input: Ignore invalid flow references in the KF8 document instead of erroring out on them."
tickets: [1085306]
- title: "Fix command line output on linux systems with incorrect LANG/LC_TYPE env vars."
tickets: [1085103]
- title: "KF8 Input: Fix page breaks specified using the data-AmznPageBreak attribute being ignored by calibre."
- title: "PDF Output: Fix custom size field not accepting fractional numbers as sizes"
- title: "Get Books: Update libre.de and publio for website changes"
- title: "Wireless driver: Increase timeout interval, and when allocating a random port try 9090 first"
improved recipes:
- New York Times
- Weblogs SL
- Zaman Gazetesi
- Aksiyon Dergisi
- Endgadget
- Metro UK
- Heise Online
- version: 0.9.8 - version: 0.9.8
date: 2012-11-30 date: 2012-11-30

View File

@ -9,11 +9,12 @@ class Adventure_zone(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
oldest_article = 20 oldest_article = 20
max_articles_per_feed = 100 max_articles_per_feed = 100
cover_url = 'http://www.adventure-zone.info/inne/logoaz_2012.png'
index='http://www.adventure-zone.info/fusion/' index='http://www.adventure-zone.info/fusion/'
use_embedded_content=False use_embedded_content=False
preprocess_regexps = [(re.compile(r"<td class='capmain'>Komentarze</td>", re.IGNORECASE), lambda m: ''), preprocess_regexps = [(re.compile(r"<td class='capmain'>Komentarze</td>", re.IGNORECASE), lambda m: ''),
(re.compile(r'\<table .*?\>'), lambda match: ''), (re.compile(r'</?table.*?>'), lambda match: ''),
(re.compile(r'\<tbody\>'), lambda match: '')] (re.compile(r'</?tbody.*?>'), lambda match: '')]
remove_tags_before= dict(name='td', attrs={'class':'main-bg'}) remove_tags_before= dict(name='td', attrs={'class':'main-bg'})
remove_tags= [dict(name='img', attrs={'alt':'Drukuj'})] remove_tags= [dict(name='img', attrs={'alt':'Drukuj'})]
remove_tags_after= dict(id='comments') remove_tags_after= dict(id='comments')
@ -36,11 +37,11 @@ class Adventure_zone(BasicNewsRecipe):
return feeds return feeds
def get_cover_url(self): '''def get_cover_url(self):
soup = self.index_to_soup('http://www.adventure-zone.info/fusion/news.php') soup = self.index_to_soup('http://www.adventure-zone.info/fusion/news.php')
cover=soup.find(id='box_OstatninumerAZ') cover=soup.find(id='box_OstatninumerAZ')
self.cover_url='http://www.adventure-zone.info/fusion/'+ cover.center.a.img['src'] self.cover_url='http://www.adventure-zone.info/fusion/'+ cover.center.a.img['src']
return getattr(self, 'cover_url', self.cover_url) return getattr(self, 'cover_url', self.cover_url)'''
def skip_ad_pages(self, soup): def skip_ad_pages(self, soup):

View File

@ -5,6 +5,8 @@ class AdvancedUserRecipe1278347258(BasicNewsRecipe):
__author__ = 'rty' __author__ = 'rty'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
auto_cleanup = True
feeds = [(u'Alaska News', u'http://www.adn.com/news/alaska/index.xml'), feeds = [(u'Alaska News', u'http://www.adn.com/news/alaska/index.xml'),
(u'Business', u'http://www.adn.com/money/index.xml'), (u'Business', u'http://www.adn.com/money/index.xml'),
@ -28,13 +30,13 @@ class AdvancedUserRecipe1278347258(BasicNewsRecipe):
conversion_options = {'linearize_tables':True} conversion_options = {'linearize_tables':True}
masthead_url = 'http://media.adn.com/includes/assets/images/adn_logo.2.gif' masthead_url = 'http://media.adn.com/includes/assets/images/adn_logo.2.gif'
keep_only_tags = [ #keep_only_tags = [
dict(name='div', attrs={'class':'left_col story_mainbar'}), #dict(name='div', attrs={'class':'left_col story_mainbar'}),
] #]
remove_tags = [ #remove_tags = [
dict(name='div', attrs={'class':'story_tools'}), #dict(name='div', attrs={'class':'story_tools'}),
dict(name='p', attrs={'class':'ad_label'}), #dict(name='p', attrs={'class':'ad_label'}),
] #]
remove_tags_after = [ #remove_tags_after = [
dict(name='div', attrs={'class':'advertisement'}), #dict(name='div', attrs={'class':'advertisement'}),
] #]

View File

@ -3,11 +3,11 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Android_com_pl(BasicNewsRecipe): class Android_com_pl(BasicNewsRecipe):
title = u'Android.com.pl' title = u'Android.com.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Android.com.pl - biggest polish Android site' description = u'Android.com.pl - to największe w Polsce centrum Android OS. Znajdziesz tu: nowości, forum, pomoc, recenzje, gry, aplikacje.'
category = 'Android, mobile' category = 'Android, mobile'
language = 'pl' language = 'pl'
use_embedded_content=True use_embedded_content=True
cover_url =u'http://upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Android_robot.svg/220px-Android_robot.svg.png' cover_url =u'http://android.com.pl/wp-content/themes/android/images/logo.png'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
feeds = [(u'Android', u'http://android.com.pl/component/content/frontpage/frontpage.feed?type=rss')] feeds = [(u'Android', u'http://android.com.pl/feed/')]

19
recipes/astroflesz.recipe Normal file
View File

@ -0,0 +1,19 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
class Astroflesz(BasicNewsRecipe):
title = u'Astroflesz'
oldest_article = 7
__author__ = 'fenuks'
description = u'astroflesz.pl - to portal poświęcony astronomii. Informuje zarówno o aktualnych wydarzeniach i odkryciach naukowych, jak również zapowiada ciekawe zjawiska astronomiczne'
category = 'astronomy'
language = 'pl'
cover_url = 'http://www.astroflesz.pl/templates/astroflesz/images/logo/logo.png'
ignore_duplicate_articles = {'title', 'url'}
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
keep_only_tags = [dict(id="k2Container")]
remove_tags_after = dict(name='div', attrs={'class':'itemLinks'})
remove_tags = [dict(name='div', attrs={'class':['itemLinks', 'itemToolbar', 'itemRatingBlock']})]
feeds = [(u'Wszystkie', u'http://astroflesz.pl/?format=feed')]

View File

@ -1,9 +1,11 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
import mechanize
class AdvancedUserRecipe1306097511(BasicNewsRecipe): class AdvancedUserRecipe1306097511(BasicNewsRecipe):
title = u'Birmingham post' title = u'Birmingham post'
description = 'Author D.Asbury. News for Birmingham UK' description = 'Author D.Asbury. News for Birmingham UK'
#timefmt = '' #timefmt = ''
# last update 8/9/12
__author__ = 'Dave Asbury' __author__ = 'Dave Asbury'
cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/161987_9010212100_2035706408_n.jpg' cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/161987_9010212100_2035706408_n.jpg'
oldest_article = 2 oldest_article = 2
@ -15,8 +17,30 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
#auto_cleanup = True #auto_cleanup = True
language = 'en_GB' language = 'en_GB'
cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/161987_9010212100_2035706408_n.jpg'
masthead_url = 'http://www.pressgazette.co.uk/Pictures/web/t/c/g/birmingham_post.jpg' masthead_url = 'http://www.trinitymirror.com/images/birminghampost-logo.gif'
def get_cover_url(self):
soup = self.index_to_soup('http://www.birminghampost.net')
# look for the block containing the sun button and url
cov = soup.find(attrs={'height' : re.compile('3'), 'alt' : re.compile('Birmingham Post')})
print
print '%%%%%%%%%%%%%%%',cov
print
cov2 = str(cov['src'])
# cov2=cov2[7:]
print '88888888 ',cov2,' 888888888888'
#cover_url=cov2
#return cover_url
br = mechanize.Browser()
br.set_handle_redirect(False)
try:
br.open_novisit(cov2)
cover_url = cov2
except:
cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/161987_9010212100_2035706408_n.jpg'
return cover_url
keep_only_tags = [ keep_only_tags = [

View File

@ -7,24 +7,29 @@ class AdvancedUserRecipe1325006965(BasicNewsRecipe):
#cover_url = 'http://www.countryfile.com/sites/default/files/imagecache/160px_wide/cover/2_1.jpg' #cover_url = 'http://www.countryfile.com/sites/default/files/imagecache/160px_wide/cover/2_1.jpg'
__author__ = 'Dave Asbury' __author__ = 'Dave Asbury'
description = 'The official website of Countryfile Magazine' description = 'The official website of Countryfile Magazine'
# last updated 7/10/12 # last updated 8/12/12
language = 'en_GB' language = 'en_GB'
oldest_article = 30 oldest_article = 30
max_articles_per_feed = 25 max_articles_per_feed = 25
remove_empty_feeds = True remove_empty_feeds = True
no_stylesheets = True no_stylesheets = True
auto_cleanup = True auto_cleanup = True
ignore_duplicate_articles = {'title', 'url'}
#articles_are_obfuscated = True #articles_are_obfuscated = True
ignore_duplicate_articles = {'title'} #article_already_exists = False
#feed_hash = ''
def get_cover_url(self): def get_cover_url(self):
soup = self.index_to_soup('http://www.countryfile.com/') soup = self.index_to_soup('http://www.countryfile.com/magazine')
cov = soup.find(attrs={'class' : re.compile('imagecache imagecache-250px_wide')})#'width' : '160',
print '&&&&&&&& ',cov,' ***'
cov=str(cov)
#cov2 = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', cov)
cov2 = re.findall('/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', cov)
cov2 = str(cov2)
cov2= "http://www.countryfile.com"+cov2[2:len(cov2)-8]
cov = soup.find(attrs={'width' : '160', 'class' : re.compile('imagecache imagecache-160px_wide')})
print '******** ',cov,' ***'
cov2 = str(cov)
cov2=cov2[10:101]
print '******** ',cov2,' ***' print '******** ',cov2,' ***'
#cov2='http://www.countryfile.com/sites/default/files/imagecache/160px_wide/cover/1b_0.jpg'
# try to get cover - if can't get known cover # try to get cover - if can't get known cover
br = browser() br = browser()
@ -45,5 +50,3 @@ class AdvancedUserRecipe1325006965(BasicNewsRecipe):
(u'Countryside', u'http://www.countryfile.com/rss/countryside'), (u'Countryside', u'http://www.countryfile.com/rss/countryside'),
] ]

View File

@ -0,0 +1,20 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
class CzasGentlemanow(BasicNewsRecipe):
title = u'Czas Gentlemanów'
__author__ = 'fenuks'
description = u'Historia mężczyzn z dala od wielkiej polityki'
category = 'blog'
language = 'pl'
cover_url = 'http://czasgentlemanow.pl/wp-content/uploads/2012/10/logo-Czas-Gentlemanow1.jpg'
ignore_duplicate_articles = {'title', 'url'}
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
use_embedded_content = False
keep_only_tags = [dict(name='div', attrs={'class':'content'})]
remove_tags = [dict(attrs={'class':'meta_comments'})]
remove_tags_after = dict(name='div', attrs={'class':'fblikebutton_button'})
feeds = [(u'M\u0119ski \u015awiat', u'http://czasgentlemanow.pl/category/meski-swiat/feed/'), (u'Styl', u'http://czasgentlemanow.pl/category/styl/feed/'), (u'Vademecum Gentlemana', u'http://czasgentlemanow.pl/category/vademecum/feed/'), (u'Dom i rodzina', u'http://czasgentlemanow.pl/category/dom-i-rodzina/feed/'), (u'Honor', u'http://czasgentlemanow.pl/category/honor/feed/'), (u'Gad\u017cety Gentlemana', u'http://czasgentlemanow.pl/category/gadzety-gentlemana/feed/')]

View File

@ -7,18 +7,64 @@ class Dzieje(BasicNewsRecipe):
cover_url = 'http://www.dzieje.pl/sites/default/files/dzieje_logo.png' cover_url = 'http://www.dzieje.pl/sites/default/files/dzieje_logo.png'
category = 'history' category = 'history'
language = 'pl' language = 'pl'
index='http://dzieje.pl' ignore_duplicate_articles = {'title', 'url'}
index = 'http://dzieje.pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
remove_javascript=True remove_javascript=True
no_stylesheets= True no_stylesheets= True
keep_only_tags = [dict(name='h1', attrs={'class':'title'}), dict(id='content-area')] keep_only_tags = [dict(name='h1', attrs={'class':'title'}), dict(id='content-area')]
remove_tags = [dict(attrs={'class':'field field-type-computed field-field-tagi'}), dict(id='dogory')] remove_tags = [dict(attrs={'class':'field field-type-computed field-field-tagi'}), dict(id='dogory')]
feeds = [(u'Dzieje', u'http://dzieje.pl/rss.xml')] #feeds = [(u'Dzieje', u'http://dzieje.pl/rss.xml')]
def append_page(self, soup, appendtag):
tag = appendtag.find('li', attrs={'class':'pager-next'})
if tag:
while tag:
url = tag.a['href']
if not url.startswith('http'):
url = 'http://dzieje.pl'+tag.a['href']
soup2 = self.index_to_soup(url)
pagetext = soup2.find(id='content-area').find(attrs={'class':'content'})
for r in pagetext.findAll(attrs={'class':['fieldgroup group-groupkul', 'fieldgroup group-zdjeciekult', 'fieldgroup group-zdjecieciekaw', 'fieldgroup group-zdjecieksiazka', 'fieldgroup group-zdjeciedu', 'field field-type-filefield field-field-zdjecieglownawyd']}):
r.extract()
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
tag = soup2.find('li', attrs={'class':'pager-next'})
for r in appendtag.findAll(attrs={'class':['item-list', 'field field-type-computed field-field-tagi', ]}):
r.extract()
def find_articles(self, url):
articles = []
soup=self.index_to_soup(url)
tag=soup.find(id='content-area').div.div
for i in tag.findAll('div', recursive=False):
temp = i.find(attrs={'class':'views-field-title'}).span.a
title = temp.string
url = self.index + temp['href']
date = '' #i.find(attrs={'class':'views-field-created'}).span.string
articles.append({'title' : title,
'url' : url,
'date' : date,
'description' : ''
})
return articles
def parse_index(self):
feeds = []
feeds.append((u"Wiadomości", self.find_articles('http://dzieje.pl/wiadomosci')))
feeds.append((u"Kultura i sztuka", self.find_articles('http://dzieje.pl/kulturaisztuka')))
feeds.append((u"Film", self.find_articles('http://dzieje.pl/kino')))
feeds.append((u"Rozmaitości historyczne", self.find_articles('http://dzieje.pl/rozmaitości')))
feeds.append((u"Książka", self.find_articles('http://dzieje.pl/ksiazka')))
feeds.append((u"Wystawa", self.find_articles('http://dzieje.pl/wystawa')))
feeds.append((u"Edukacja", self.find_articles('http://dzieje.pl/edukacja')))
feeds.append((u"Dzieje się", self.find_articles('http://dzieje.pl/wydarzenia')))
return feeds
def preprocess_html(self, soup): def preprocess_html(self, soup):
for a in soup('a'): for a in soup('a'):
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']: if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
a['href']=self.index + a['href'] a['href']=self.index + a['href']
self.append_page(soup, soup.body)
return soup return soup

View File

@ -0,0 +1,24 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
import re
class EkologiaPl(BasicNewsRecipe):
title = u'Ekologia.pl'
__author__ = 'fenuks'
description = u'Portal ekologiczny - eko, ekologia, ochrona przyrody, ochrona środowiska, przyroda, środowisko online. Ekologia i ochrona środowiska. Ekologia dla dzieci.'
category = 'ecology'
language = 'pl'
cover_url = 'http://www.ekologia.pl/assets/images/logo/ekologia_pl_223x69.png'
ignore_duplicate_articles = {'title', 'url'}
extra_css = '.title {font-size: 200%;}'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
use_embedded_content = False
remove_tags = [dict(attrs={'class':['ekoLogo', 'powrocArt', 'butonDrukuj']})]
feeds = [(u'Wiadomo\u015bci', u'http://www.ekologia.pl/rss/20,53,0'), (u'\u015arodowisko', u'http://www.ekologia.pl/rss/20,56,0'), (u'Styl \u017cycia', u'http://www.ekologia.pl/rss/20,55,0')]
def print_version(self, url):
id = re.search(r',(?P<id>\d+)\.html', url).group('id')
return 'http://drukuj.ekologia.pl/artykul/' + id

View File

@ -0,0 +1,19 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
import re
class FilmOrgPl(BasicNewsRecipe):
title = u'Film.org.pl'
__author__ = 'fenuks'
description = u"Recenzje, analizy, artykuły, rankingi - wszystko o filmie dla miłośników kina. Opisy efektów specjalnych, wersji reżyserskich, remake'ów, sequeli. No i forum filmowe. Jedne z największych w Polsce."
category = 'film'
language = 'pl'
cover_url = 'http://film.org.pl/wp-content/themes/KMF/images/logo_kmf10.png'
ignore_duplicate_articles = {'title', 'url'}
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
use_embedded_content = True
preprocess_regexps = [(re.compile(ur'<h3>Przeczytaj także:</h3>.*', re.IGNORECASE|re.DOTALL), lambda m: '</body>'), (re.compile(ur'<div>Artykuł</div>', re.IGNORECASE), lambda m: ''), (re.compile(ur'<div>Ludzie filmu</div>', re.IGNORECASE), lambda m: '')]
remove_tags = [dict(name='img', attrs={'alt':['Ludzie filmu', u'Artykuł']})]
feeds = [(u'Recenzje', u'http://film.org.pl/r/recenzje/feed/'), (u'Artyku\u0142', u'http://film.org.pl/a/artykul/feed/'), (u'Analiza', u'http://film.org.pl/a/analiza/feed/'), (u'Ranking', u'http://film.org.pl/a/ranking/feed/'), (u'Blog', u'http://film.org.pl/kmf/blog/feed/'), (u'Ludzie', u'http://film.org.pl/a/ludzie/feed/'), (u'Seriale', u'http://film.org.pl/a/seriale/feed/'), (u'Oceanarium', u'http://film.org.pl/a/ocenarium/feed/'), (u'VHS', u'http://film.org.pl/a/vhs-a/feed/')]

View File

@ -17,6 +17,7 @@ class FilmWebPl(BasicNewsRecipe):
preprocess_regexps = [(re.compile(u'\(kliknij\,\ aby powiększyć\)', re.IGNORECASE), lambda m: ''), ]#(re.compile(ur' | ', re.IGNORECASE), lambda m: '')] preprocess_regexps = [(re.compile(u'\(kliknij\,\ aby powiększyć\)', re.IGNORECASE), lambda m: ''), ]#(re.compile(ur' | ', re.IGNORECASE), lambda m: '')]
extra_css = '.hdrBig {font-size:22px;} ul {list-style-type:none; padding: 0; margin: 0;}' extra_css = '.hdrBig {font-size:22px;} ul {list-style-type:none; padding: 0; margin: 0;}'
remove_tags= [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'}), dict(attrs={'class':'userSurname anno'})] remove_tags= [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'}), dict(attrs={'class':'userSurname anno'})]
remove_attributes = ['style',]
keep_only_tags= [dict(name='h1', attrs={'class':['hdrBig', 'hdrEntity']}), dict(name='div', attrs={'class':['newsInfo', 'newsInfoSmall', 'reviewContent description']})] keep_only_tags= [dict(name='h1', attrs={'class':['hdrBig', 'hdrEntity']}), dict(name='div', attrs={'class':['newsInfo', 'newsInfoSmall', 'reviewContent description']})]
feeds = [(u'News / Filmy w produkcji', 'http://www.filmweb.pl/feed/news/category/filminproduction'), feeds = [(u'News / Filmy w produkcji', 'http://www.filmweb.pl/feed/news/category/filminproduction'),
(u'News / Festiwale, nagrody i przeglądy', u'http://www.filmweb.pl/feed/news/category/festival'), (u'News / Festiwale, nagrody i przeglądy', u'http://www.filmweb.pl/feed/news/category/festival'),
@ -50,4 +51,9 @@ class FilmWebPl(BasicNewsRecipe):
for i in soup.findAll('sup'): for i in soup.findAll('sup'):
if not i.string or i.string.startswith('(kliknij'): if not i.string or i.string.startswith('(kliknij'):
i.extract() i.extract()
tag = soup.find(name='ul', attrs={'class':'inline sep-line'})
if tag:
tag.name = 'div'
for t in tag.findAll('li'):
t.name = 'div'
return soup return soup

View File

@ -4,9 +4,10 @@ import re
class Gildia(BasicNewsRecipe): class Gildia(BasicNewsRecipe):
title = u'Gildia.pl' title = u'Gildia.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Gildia - cultural site' description = u'Fantastyczny Portal Kulturalny - newsy, recenzje, galerie, wywiady. Literatura, film, gry komputerowe i planszowe, komiks, RPG, sklep. Nie lekceważ potęgi wyobraźni!'
cover_url = 'http://www.film.gildia.pl/_n_/portal/redakcja/logo/logo-gildia.pl-500.jpg' cover_url = 'http://www.film.gildia.pl/_n_/portal/redakcja/logo/logo-gildia.pl-500.jpg'
category = 'culture' category = 'culture'
cover_url = 'http://gildia.pl/images/logo-main.png'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
@ -23,10 +24,13 @@ class Gildia(BasicNewsRecipe):
content = soup.find('div', attrs={'class':'news'}) content = soup.find('div', attrs={'class':'news'})
if 'recenzj' in soup.title.string.lower(): if 'recenzj' in soup.title.string.lower():
for link in content.findAll(name='a'): for link in content.findAll(name='a'):
if 'recenzj' in link['href']: if 'recenzj' in link['href'] or 'muzyka/plyty' in link['href']:
self.log.warn('odnosnik')
self.log.warn(link['href'])
return self.index_to_soup(link['href'], raw=True) return self.index_to_soup(link['href'], raw=True)
if 'fragmen' in soup.title.string.lower():
for link in content.findAll(name='a'):
if 'fragment' in link['href']:
return self.index_to_soup(link['href'], raw=True)
def preprocess_html(self, soup): def preprocess_html(self, soup):
for a in soup('a'): for a in soup('a'):

View File

@ -1,19 +1,20 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class Gram_pl(BasicNewsRecipe): class Gram_pl(BasicNewsRecipe):
title = u'Gram.pl' title = u'Gram.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Gram.pl - site about computer games' description = u'Serwis społecznościowy o grach: recenzje, newsy, zapowiedzi, encyklopedia gier, forum. Gry PC, PS3, X360, PS Vita, sprzęt dla graczy.'
category = 'games' category = 'games'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
index='http://www.gram.pl' index='http://www.gram.pl'
max_articles_per_feed = 100 max_articles_per_feed = 100
ignore_duplicate_articles = {'title', 'url'}
no_stylesheets= True no_stylesheets= True
extra_css = 'h2 {font-style: italic; font-size:20px;} .picbox div {float: left;}' #extra_css = 'h2 {font-style: italic; font-size:20px;} .picbox div {float: left;}'
cover_url=u'http://www.gram.pl/www/01/img/grampl_zima.png' cover_url=u'http://www.gram.pl/www/01/img/grampl_zima.png'
remove_tags= [dict(name='p', attrs={'class':['extraText', 'must-log-in']}), dict(attrs={'class':['el', 'headline', 'post-info', 'entry-footer clearfix']}), dict(name='div', attrs={'class':['twojaOcena', 'comment-body', 'comment-author vcard', 'comment-meta commentmetadata', 'tw_button', 'entry-comment-counter', 'snap_nopreview sharing robots-nocontent', 'sharedaddy sd-sharing-enabled']}), dict(id=['igit_rpwt_css', 'comments', 'reply-title', 'igit_title'])] keep_only_tags= [dict(id='articleModule')]
keep_only_tags= [dict(name='div', attrs={'class':['main', 'arkh-postmetadataheader', 'arkh-postcontent', 'post', 'content', 'news_header', 'news_subheader', 'news_text']}), dict(attrs={'class':['contentheading', 'contentpaneopen']}), dict(name='article')] remove_tags = [dict(attrs={'class':['breadCrump', 'dymek', 'articleFooter']})]
feeds = [(u'Informacje', u'http://www.gram.pl/feed_news.asp'), feeds = [(u'Informacje', u'http://www.gram.pl/feed_news.asp'),
(u'Publikacje', u'http://www.gram.pl/feed_news.asp?type=articles'), (u'Publikacje', u'http://www.gram.pl/feed_news.asp?type=articles'),
(u'Kolektyw- Indie Games', u'http://indie.gram.pl/feed/'), (u'Kolektyw- Indie Games', u'http://indie.gram.pl/feed/'),
@ -28,35 +29,21 @@ class Gram_pl(BasicNewsRecipe):
feed.articles.remove(article) feed.articles.remove(article)
return feeds return feeds
def append_page(self, soup, appendtag):
nexturl = appendtag.find('a', attrs={'class':'cpn'})
while nexturl:
soup2 = self.index_to_soup('http://www.gram.pl'+ nexturl['href'])
r=appendtag.find(id='pgbox')
if r:
r.extract()
pagetext = soup2.find(attrs={'class':'main'})
r=pagetext.find('h1')
if r:
r.extract()
r=pagetext.find('h2')
if r:
r.extract()
for r in pagetext.findAll('script'):
r.extract()
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
nexturl = appendtag.find('a', attrs={'class':'cpn'})
r=appendtag.find(id='pgbox')
if r:
r.extract()
def preprocess_html(self, soup): def preprocess_html(self, soup):
self.append_page(soup, soup.body) tag=soup.find(name='div', attrs={'class':'summary'})
tag=soup.findAll(name='div', attrs={'class':'picbox'}) if tag:
for t in tag: tag.find(attrs={'class':'pros'}).insert(0, BeautifulSoup('<h2>Plusy:</h2>').h2)
t['style']='float: left;' tag.find(attrs={'class':'cons'}).insert(0, BeautifulSoup('<h2>Minusy:</h2>').h2)
tag = soup.find(name='section', attrs={'class':'cenzurka'})
if tag:
rate = tag.p.img['data-ocena']
tag.p.img.extract()
tag.p.insert(len(tag.p.contents)-2, BeautifulSoup('<h2>Ocena: {0}</h2>'.format(rate)).h2)
for a in soup('a'): for a in soup('a'):
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']: if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
a['href']=self.index + a['href'] a['href']=self.index + a['href']
tag=soup.find(name='span', attrs={'class':'platforma'})
if tag:
tag.name = 'p'
return soup return soup

View File

@ -1,5 +1,5 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008-2010, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2008-2012, Darko Miletic <darko.miletic at gmail.com>'
''' '''
harpers.org harpers.org
''' '''
@ -16,6 +16,7 @@ class Harpers(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
masthead_url = 'http://harpers.org/wp-content/themes/harpers/images/pheader.gif'
conversion_options = { conversion_options = {
'comment' : description 'comment' : description
@ -31,27 +32,9 @@ class Harpers(BasicNewsRecipe):
.caption{font-family:Verdana,sans-serif;font-size:x-small;color:#666666;} .caption{font-family:Verdana,sans-serif;font-size:x-small;color:#666666;}
''' '''
keep_only_tags = [ dict(name='div', attrs={'id':'cached'}) ] keep_only_tags = [ dict(name='div', attrs={'class':['postdetailFull', 'articlePost']}) ]
remove_tags = [ remove_tags = [dict(name=['link','object','embed','meta','base'])]
dict(name='table', attrs={'class':['rcnt','rcnt topline']})
,dict(name=['link','object','embed','meta','base'])
]
remove_attributes = ['width','height'] remove_attributes = ['width','height']
feeds = [(u"Harper's Magazine", u'http://www.harpers.org/rss/frontpage-rss20.xml')] feeds = [(u"Harper's Magazine", u'http://harpers.org/feed/')]
def get_cover_url(self):
cover_url = None
index = 'http://harpers.org/'
soup = self.index_to_soup(index)
link_item = soup.find(name = 'img',attrs= {'class':"cover"})
if link_item:
cover_url = 'http://harpers.org' + link_item['src']
return cover_url
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll(xmlns=True):
del item['xmlns']
return soup

View File

@ -15,23 +15,12 @@ class AdvancedUserRecipe(BasicNewsRecipe):
timeout = 5 timeout = 5
no_stylesheets = True no_stylesheets = True
keep_only_tags = [dict(name='div', attrs={'id':'mitte_news'}),
dict(name='h1', attrs={'class':'clear'}),
dict(name='div', attrs={'class':'meldung_wrapper'})]
remove_tags_after = dict(name ='p', attrs={'class':'editor'})
remove_tags = [dict(id='navi_top_container'), remove_tags = [dict(id='navi_top_container'),
dict(id='navi_bottom'), dict(name='p', attrs={'class':'size80'})]
dict(id='mitte_rechts'),
dict(id='navigation'),
dict(id='subnavi'),
dict(id='social_bookmarks'),
dict(id='permalink'),
dict(id='content_foren'),
dict(id='seiten_navi'),
dict(id='adbottom'),
dict(id='sitemap'),
dict(name='div', attrs={'id':'sitemap'}),
dict(name='ul', attrs={'class':'erste_zeile'}),
dict(name='ul', attrs={'class':'zweite_zeile'}),
dict(name='div', attrs={'class':'navi_top_container'})]
feeds = [ feeds = [
('Newsticker', 'http://www.heise.de/newsticker/heise.rdf'), ('Newsticker', 'http://www.heise.de/newsticker/heise.rdf'),
@ -54,5 +43,3 @@ class AdvancedUserRecipe(BasicNewsRecipe):
def print_version(self, url): def print_version(self, url):
return url + '?view=print' return url + '?view=print'

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Historia_org_pl(BasicNewsRecipe): class Historia_org_pl(BasicNewsRecipe):
title = u'Historia.org.pl' title = u'Historia.org.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'history site' description = u'Artykuły dotyczące historii w układzie epok i tematów, forum. Najlepsza strona historii. Matura z historii i egzamin gimnazjalny z historii.'
cover_url = 'http://lh3.googleusercontent.com/_QeRQus12wGg/TOvHsZ2GN7I/AAAAAAAAD_o/LY1JZDnq7ro/logo5.jpg' cover_url = 'http://lh3.googleusercontent.com/_QeRQus12wGg/TOvHsZ2GN7I/AAAAAAAAD_o/LY1JZDnq7ro/logo5.jpg'
category = 'history' category = 'history'
language = 'pl' language = 'pl'
@ -12,16 +12,15 @@ class Historia_org_pl(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
use_embedded_content = True use_embedded_content = True
max_articles_per_feed = 100 max_articles_per_feed = 100
ignore_duplicate_articles = {'title', 'url'}
feeds = [(u'Wszystkie', u'http://www.historia.org.pl/index.php?format=feed&type=atom'),
(u'Wiadomości', u'http://www.historia.org.pl/index.php/wiadomosci.feed?type=atom'), feeds = [(u'Wszystkie', u'http://historia.org.pl/feed/'),
(u'Publikacje', u'http://www.historia.org.pl/index.php/publikacje.feed?type=atom'), (u'Wiadomości', u'http://historia.org.pl/Kategoria/wiadomosci/feed/'),
(u'Publicystyka', u'http://www.historia.org.pl/index.php/publicystyka.feed?type=atom'), (u'Publikacje', u'http://historia.org.pl/Kategoria/artykuly/feed/'),
(u'Recenzje', u'http://historia.org.pl/index.php/recenzje.feed?type=atom'), (u'Publicystyka', u'http://historia.org.pl/Kategoria/publicystyka/feed/'),
(u'Kultura i sztuka', u'http://www.historia.org.pl/index.php/kultura-i-sztuka.feed?type=atom'), (u'Recenzje', u'http://historia.org.pl/Kategoria/recenzje/feed/'),
(u'Rekonstykcje', u'http://www.historia.org.pl/index.php/rekonstrukcje.feed?type=atom'), (u'Projekty', u'http://historia.org.pl/Kategoria/projekty/feed/'),]
(u'Projekty', u'http://www.historia.org.pl/index.php/projekty.feed?type=atom'),
(u'Konkursy'), (u'http://www.historia.org.pl/index.php/konkursy.feed?type=atom')]
def print_version(self, url): def print_version(self, url):

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 702 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 350 B

BIN
recipes/icons/tvp_info.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 329 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 412 B

View File

@ -47,9 +47,10 @@ class TheIndependentNew(BasicNewsRecipe):
dict(name='img',attrs={'alt' : ['Get Adobe Flash player']}), dict(name='img',attrs={'alt' : ['Get Adobe Flash player']}),
dict(name='img',attrs={'alt' : ['view gallery']}), dict(name='img',attrs={'alt' : ['view gallery']}),
dict(attrs={'style' : re.compile('.*')}), dict(attrs={'style' : re.compile('.*')}),
dict(attrs={'class':lambda x: x and 'voicesRelatedTopics' in x.split()}),
] ]
keep_only_tags =[dict(attrs={'id':'main'})] keep_only_tags =[dict(attrs={'id':['main','top']})]
recursions = 0 recursions = 0
# fixes non compliant html nesting and 'marks' article graphics links # fixes non compliant html nesting and 'marks' article graphics links
@ -69,7 +70,7 @@ class TheIndependentNew(BasicNewsRecipe):
} }
extra_css = """ extra_css = """
h1{font-family: Georgia,serif } h1{font-family: Georgia,serif ; font-size: x-large; }
body{font-family: Verdana,Arial,Helvetica,sans-serif} body{font-family: Verdana,Arial,Helvetica,sans-serif}
img{margin-bottom: 0.4em; display:block} img{margin-bottom: 0.4em; display:block}
.starRating img {float: left} .starRating img {float: left}
@ -77,16 +78,21 @@ class TheIndependentNew(BasicNewsRecipe):
.image {clear:left; font-size: x-small; color:#888888;} .image {clear:left; font-size: x-small; color:#888888;}
.articleByTimeLocation {font-size: x-small; color:#888888; .articleByTimeLocation {font-size: x-small; color:#888888;
margin-bottom:0.2em ; margin-top:0.2em ; display:block} margin-bottom:0.2em ; margin-top:0.2em ; display:block}
.subtitle {clear:left} .subtitle {clear:left ;}
.column-1 h1 { color: #191919} .column-1 h1 { color: #191919}
.column-1 h2 { color: #333333} .column-1 h2 { color: #333333}
.column-1 h3 { color: #444444} .column-1 h3 { color: #444444}
.column-1 p { color: #777777} .subtitle { color: #777777; font-size: medium;}
.column-1 p,a,h1,h2,h3 { margin: 0; } .column-1 a,h1,h2,h3 { margin: 0; }
.column-1 div{color:#888888; margin: 0;} .column-1 div{margin: 0;}
.articleContent {display: block; clear:left;} .articleContent {display: block; clear:left;}
.articleContent {color: #000000; font-size: medium;}
.ivDrip-section {color: #000000; font-size: medium;}
.datetime {color: #888888}
.title {font-weight:bold;}
.storyTop{} .storyTop{}
.pictureContainer img { max-width: 400px; max-height: 400px;} .pictureContainer img { max-width: 400px; max-height: 400px;}
.image img { max-width: 400px; max-height: 400px;}
""" """
oldest_article = 1 oldest_article = 1
@ -325,6 +331,20 @@ class TheIndependentNew(BasicNewsRecipe):
item.contents[0] = '' item.contents[0] = ''
def postprocess_html(self,soup, first_fetch): def postprocess_html(self,soup, first_fetch):
#mark subtitle parent as non-compliant nesting causes
# p's to be 'popped out' of the h3 tag they are nested in.
subtitle = soup.find('h3', attrs={'class' : 'subtitle'})
subtitle_div = None
if subtitle:
subtitle_div = subtitle.parent
if subtitle_div:
clazz = ''
if 'class' in subtitle_div:
clazz = subtitle_div['class'] + ' '
clazz = clazz + 'subtitle'
subtitle_div['class'] = clazz
#find broken images and remove captions #find broken images and remove captions
items_to_extract = [] items_to_extract = []
for item in soup.findAll('div', attrs={'class' : 'image'}): for item in soup.findAll('div', attrs={'class' : 'image'}):
@ -501,6 +521,9 @@ class TheIndependentNew(BasicNewsRecipe):
), ),
(u'Opinion', (u'Opinion',
u'http://www.independent.co.uk/opinion/?service=rss'), u'http://www.independent.co.uk/opinion/?service=rss'),
(u'Voices',
u'http://www.independent.co.uk/voices/?service=rss'
),
(u'Environment', (u'Environment',
u'http://www.independent.co.uk/environment/?service=rss'), u'http://www.independent.co.uk/environment/?service=rss'),
(u'Sport - Athletics', (u'Sport - Athletics',

View File

@ -9,6 +9,21 @@ class Kosmonauta(BasicNewsRecipe):
language = 'pl' language = 'pl'
cover_url='http://bi.gazeta.pl/im/4/10393/z10393414X,Kosmonauta-net.jpg' cover_url='http://bi.gazeta.pl/im/4/10393/z10393414X,Kosmonauta-net.jpg'
no_stylesheets = True no_stylesheets = True
INDEX = 'http://www.kosmonauta.net'
oldest_article = 7 oldest_article = 7
no_stylesheets = True
max_articles_per_feed = 100 max_articles_per_feed = 100
feeds = [(u'Kosmonauta.net', u'http://www.kosmonauta.net/index.php/feed/rss.html')] keep_only_tags = [dict(name='div', attrs={'class':'item-page'})]
remove_tags = [dict(attrs={'class':['article-tools clearfix', 'cedtag', 'nav clearfix', 'jwDisqusForm']})]
remove_tags_after = dict(name='div', attrs={'class':'cedtag'})
feeds = [(u'Kosmonauta.net', u'http://www.kosmonauta.net/?format=feed&type=atom')]
def preprocess_html(self, soup):
for a in soup.findAll(name='a'):
if a.has_key('href'):
href = a['href']
if not href.startswith('http'):
a['href'] = self.INDEX + href
print '%%%%%%%%%%%%%%%%%%%%%%%%%', a['href']
return soup

View File

@ -1,15 +1,16 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re import re
class Ksiazka_net_pl(BasicNewsRecipe): class Ksiazka_net_pl(BasicNewsRecipe):
title = u'ksiazka.net.pl' title = u'książka.net.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Ksiazka.net.pl - book vortal' description = u'Portal Księgarski - tematyczny serwis o książkach. Wydarzenia z rynku księgarsko-wydawniczego, nowości, zapowiedzi, bestsellery, setki recenzji. Niezbędne informacje dla każdego miłośnika książek, księgarza, bibliotekarza i wydawcy.'
cover_url = 'http://www.ksiazka.net.pl/fileadmin/templates/ksiazka.net.pl/images/1PortalKsiegarski-logo.jpg' cover_url = 'http://www.ksiazka.net.pl/fileadmin/templates/ksiazka.net.pl/images/1PortalKsiegarski-logo.jpg'
category = 'books' category = 'books'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets= True no_stylesheets= True
remove_empty_feeds = True
#extra_css = 'img {float: right;}' #extra_css = 'img {float: right;}'
preprocess_regexps = [(re.compile(ur'Podoba mi się, kupuję:'), lambda match: '<br />')] preprocess_regexps = [(re.compile(ur'Podoba mi się, kupuję:'), lambda match: '<br />')]
remove_tags_before= dict(name='div', attrs={'class':'m-body'}) remove_tags_before= dict(name='div', attrs={'class':'m-body'})

View File

@ -22,13 +22,15 @@ class LeMonde(BasicNewsRecipe):
#publication_type = 'newsportal' #publication_type = 'newsportal'
extra_css = ''' extra_css = '''
h1{font-size:130%;} h1{font-size:130%;}
h2{font-size:100%;}
blockquote.aside {background-color: #DDD; padding: 0.5em;}
.ariane{font-size:xx-small;} .ariane{font-size:xx-small;}
.source{font-size:xx-small;} .source{font-size:xx-small;}
#.href{font-size:xx-small;} /*.href{font-size:xx-small;}*/
#.figcaption style{color:#666666; font-size:x-small;} /*.figcaption style{color:#666666; font-size:x-small;}*/
#.main-article-info{font-family:Arial,Helvetica,sans-serif;} /*.main-article-info{font-family:Arial,Helvetica,sans-serif;}*/
#full-contents{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;} /*full-contents{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}*/
#match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;} /*match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}*/
''' '''
#preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')] #preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
conversion_options = { conversion_options = {
@ -44,6 +46,9 @@ class LeMonde(BasicNewsRecipe):
filterDuplicates = True filterDuplicates = True
def preprocess_html(self, soup): def preprocess_html(self, soup):
for aside in soup.findAll('aside'):
aside.name='blockquote'
aside['class'] = "aside"
for alink in soup.findAll('a'): for alink in soup.findAll('a'):
if alink.string is not None: if alink.string is not None:
tstr = alink.string tstr = alink.string
@ -107,7 +112,9 @@ class LeMonde(BasicNewsRecipe):
] ]
remove_tags = [ remove_tags = [
dict(name='div', attrs={'class':['bloc_base meme_sujet']}), dict(attrs={'class':['rubriques_liees']}),
dict(attrs={'class':['sociaux']}),
dict(attrs={'class':['bloc_base meme_sujet']}),
dict(name='p', attrs={'class':['lire']}) dict(name='p', attrs={'class':['lire']})
] ]

View File

@ -32,26 +32,28 @@ class ledevoir(BasicNewsRecipe):
recursion = 10 recursion = 10
needs_subscription = 'optional' needs_subscription = 'optional'
filterDuplicates = False
url_list = [] url_list = []
remove_javascript = True remove_javascript = True
no_stylesheets = True no_stylesheets = True
auto_cleanup = True
preprocess_regexps = [(re.compile(r'(title|alt)=".*?>.*?"', re.DOTALL), lambda m: '')] preprocess_regexps = [(re.compile(r'(title|alt)=".*?>.*?"', re.DOTALL), lambda m: '')]
keep_only_tags = [ #keep_only_tags = [
dict(name='div', attrs={'id':'article'}), #dict(name='div', attrs={'id':'article_detail'}),
dict(name='div', attrs={'id':'colonne_principale'}) #dict(name='div', attrs={'id':'colonne_principale'})
] #]
remove_tags = [ #remove_tags = [
dict(name='div', attrs={'id':'dialog'}), #dict(name='div', attrs={'id':'dialog'}),
dict(name='div', attrs={'class':['interesse_actions','reactions']}), #dict(name='div', attrs={'class':['interesse_actions','reactions','taille_du_texte right clearfix','partage_sociaux clearfix']}),
dict(name='ul', attrs={'class':'mots_cles'}), #dict(name='aside', attrs={'class':['article_actions clearfix','reactions','partage_sociaux_wrapper']}),
dict(name='a', attrs={'class':'haut'}), #dict(name='ul', attrs={'class':'mots_cles'}),
dict(name='h5', attrs={'class':'interesse_actions'}) #dict(name='ul', attrs={'id':'commentaires'}),
] #dict(name='a', attrs={'class':'haut'}),
#dict(name='h5', attrs={'class':'interesse_actions'})
#]
feeds = [ feeds = [
(u'A la une', 'http://www.ledevoir.com/rss/manchettes.xml'), (u'A la une', 'http://www.ledevoir.com/rss/manchettes.xml'),
@ -95,10 +97,4 @@ class ledevoir(BasicNewsRecipe):
br.submit() br.submit()
return br return br
def print_version(self, url):
if self.filterDuplicates:
if url in self.url_list:
return
self.url_list.append(url)
return url

View File

@ -1,43 +1,74 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre import strftime
import re
import datetime
import time
class AdvancedUserRecipe1306097511(BasicNewsRecipe): class AdvancedUserRecipe1306097511(BasicNewsRecipe):
title = u'Metro UK' title = u'Metro UK'
description = 'Author Dave Asbury : News from The Metro - UK' description = 'News as provided by The Metro -UK'
#timefmt = '' #timefmt = ''
__author__ = 'Dave Asbury' __author__ = 'Dave Asbury'
#last update 9/9/12 #last update 9/6/12
cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg' cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg'
no_stylesheets = True
oldest_article = 1 oldest_article = 1
max_articles_per_feed = 12
remove_empty_feeds = True remove_empty_feeds = True
remove_javascript = True remove_javascript = True
#auto_cleanup = True auto_cleanup = True
encoding = 'UTF-8' encoding = 'UTF-8'
cover_url ='http://profile.ak.fbcdn.net/hprofile-ak-snc4/157897_117118184990145_840702264_n.jpg'
language = 'en_GB' language = 'en_GB'
masthead_url = 'http://e-edition.metro.co.uk/images/metro_logo.gif' masthead_url = 'http://e-edition.metro.co.uk/images/metro_logo.gif'
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:900;font-size:1.6em;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:1.2em;}
p{font-family:Arial,Helvetica,sans-serif;font-size:1.0em;}
body{font-family:Helvetica,Arial,sans-serif;font-size:1.0em;}
'''
keep_only_tags = [
#dict(name='h1'),
#dict(name='h2'),
#dict(name='div', attrs={'class' : ['row','article','img-cnt figure','clrd']})
#dict(name='h3'),
#dict(attrs={'class' : 'BText'}),
]
remove_tags = [
dict(name='div',attrs={'class' : 'art-fd fd-gr1-b clrd'}),
dict(name='span',attrs={'class' : 'share'}),
dict(name='li'),
dict(attrs={'class' : ['twitter-share-button','header-forms','hdr-lnks','close','art-rgt','fd-gr1-b clrd google-article','news m12 clrd clr-b p5t shareBtm','item-ds csl-3-img news','c-1of3 c-last','c-1of1','pd','item-ds csl-3-img sport']}),
dict(attrs={'id' : ['','sky-left','sky-right','ftr-nav','and-ftr','notificationList','logo','miniLogo','comments-news','metro_extras']})
]
remove_tags_before = dict(name='h1')
#remove_tags_after = dict(attrs={'id':['topic-buttons']})
feeds = [ def parse_index(self):
(u'News', u'http://www.metro.co.uk/rss/news/'), (u'Money', u'http://www.metro.co.uk/rss/money/'), (u'Sport', u'http://www.metro.co.uk/rss/sport/'), (u'Film', u'http://www.metro.co.uk/rss/metrolife/film/'), (u'Music', u'http://www.metro.co.uk/rss/metrolife/music/'), (u'TV', u'http://www.metro.co.uk/rss/tv/'), (u'Showbiz', u'http://www.metro.co.uk/rss/showbiz/'), (u'Weird News', u'http://www.metro.co.uk/rss/weird/'), (u'Travel', u'http://www.metro.co.uk/rss/travel/'), (u'Lifestyle', u'http://www.metro.co.uk/rss/lifestyle/'), (u'Books', u'http://www.metro.co.uk/rss/lifestyle/books/'), (u'Food', u'http://www.metro.co.uk/rss/lifestyle/restaurants/')] articles = {}
key = None
ans = []
feeds = [ ('UK', 'http://metro.co.uk/news/uk/'),
('World', 'http://metro.co.uk/news/world/'),
('Weird', 'http://metro.co.uk/news/weird/'),
('Money', 'http://metro.co.uk/news/money/'),
('Sport', 'http://metro.co.uk/sport/'),
('Guilty Pleasures', 'http://metro.co.uk/guilty-pleasures/')
]
for key, feed in feeds:
soup = self.index_to_soup(feed)
articles[key] = []
ans.append(key)
today = datetime.date.today()
today = time.mktime(today.timetuple())-60*60*24
for a in soup.findAll('a'):
for name, value in a.attrs:
if name == "class" and value=="post":
url = a['href']
title = a['title']
print title
description = ''
m = re.search('^.*uk/([^/]*)/([^/]*)/([^/]*)/', url)
skip = 1
if len(m.groups()) == 3:
g = m.groups()
dt = datetime.datetime.strptime(''+g[0]+'-'+g[1]+'-'+g[2], '%Y-%m-%d')
pubdate = time.strftime('%a, %d %b', dt.timetuple())
dt = time.mktime(dt.timetuple())
if dt >= today:
print pubdate
skip = 0
else:
pubdate = strftime('%a, %d %b')
summary = a.find(True, attrs={'class':'excerpt'})
if summary:
description = self.tag_to_string(summary, use_alt=False)
if skip == 0:
articles[key].append(
dict(title=title, url=url, date=pubdate,
description=description,
content=''))
#ans = self.sort_index_by(ans, {'The Front Page':-1, 'Dining In, Dining Out':1, 'Obituaries':2})
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return ans

View File

@ -2,7 +2,7 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Mlody_technik(BasicNewsRecipe): class Mlody_technik(BasicNewsRecipe):
title = u'Mlody technik' title = u'Młody technik'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Młody technik' description = u'Młody technik'
category = 'science' category = 'science'

View File

@ -15,7 +15,7 @@ class Nin(BasicNewsRecipe):
publisher = 'NIN d.o.o. - Ringier d.o.o.' publisher = 'NIN d.o.o. - Ringier d.o.o.'
category = 'news, politics, Serbia' category = 'news, politics, Serbia'
no_stylesheets = True no_stylesheets = True
oldest_article = 15 oldest_article = 180
encoding = 'utf-8' encoding = 'utf-8'
needs_subscription = True needs_subscription = True
remove_empty_feeds = True remove_empty_feeds = True
@ -25,7 +25,7 @@ class Nin(BasicNewsRecipe):
use_embedded_content = False use_embedded_content = False
language = 'sr' language = 'sr'
publication_type = 'magazine' publication_type = 'magazine'
masthead_url = 'http://www.nin.co.rs/img/head/logo.jpg' masthead_url = 'http://www.nin.co.rs/img/logo_print.jpg'
extra_css = """ extra_css = """
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)} @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: Verdana, Lucida, sans1, sans-serif} body{font-family: Verdana, Lucida, sans1, sans-serif}
@ -42,11 +42,11 @@ class Nin(BasicNewsRecipe):
, 'tags' : category , 'tags' : category
, 'publisher' : publisher , 'publisher' : publisher
, 'language' : language , 'language' : language
, 'linearize_tables': True
} }
preprocess_regexps = [ preprocess_regexps = [
(re.compile(r'</body>.*?<html>', re.DOTALL|re.IGNORECASE),lambda match: '</body>') (re.compile(r'<div class="standardFont">.*', re.DOTALL|re.IGNORECASE),lambda match: '')
,(re.compile(r'</html>.*?</html>', re.DOTALL|re.IGNORECASE),lambda match: '</html>')
,(re.compile(u'\u0110'), lambda match: u'\u00D0') ,(re.compile(u'\u0110'), lambda match: u'\u00D0')
] ]
@ -60,42 +60,21 @@ class Nin(BasicNewsRecipe):
br.submit() br.submit()
return br return br
keep_only_tags =[dict(name='td', attrs={'width':'520'})] remove_tags_before = dict(name='div', attrs={'class':'titleFont'})
remove_tags_before =dict(name='span', attrs={'class':'izjava'}) remove_tags_after = dict(name='div', attrs={'class':'standardFont'})
remove_tags_after =dict(name='html') remove_tags = [dict(name=['object','link','iframe','meta','base'])]
remove_tags = [ remove_attributes = ['border','background','height','width','align','valign']
dict(name=['object','link','iframe','meta','base'])
,dict(attrs={'class':['fb-like','twitter-share-button']})
,dict(attrs={'rel':'nofollow'})
]
remove_attributes=['border','background','height','width','align','valign']
def get_cover_url(self): def get_cover_url(self):
cover_url = None cover_url = None
soup = self.index_to_soup(self.INDEX) soup = self.index_to_soup(self.INDEX)
for item in soup.findAll('a', href=True): cover = soup.find('img', attrs={'class':'issueImg'})
if item['href'].startswith('/pages/issue.php?id='): if cover:
simg = item.find('img') return self.PREFIX + cover['src']
if simg:
return self.PREFIX + item.img['src']
return cover_url return cover_url
feeds = [(u'NIN Online', u'http://www.nin.co.rs/misc/rss.php?feed=RSS2.0')] feeds = [(u'NIN Online', u'http://www.nin.co.rs/misc/rss.php?feed=RSS2.0')]
def preprocess_html(self, soup): def print_version(self, url):
for item in soup.findAll(style=True): return url + '&pf=1'
del item['style']
for item in soup.findAll('div'):
if len(item.contents) == 0:
item.extract()
for item in soup.findAll(['td','tr']):
item.name='div'
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
for tbl in soup.findAll('table'):
img = tbl.find('img')
if img:
img.extract()
tbl.replaceWith(img)
return soup

View File

@ -0,0 +1,63 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
class PoradniaPWN(BasicNewsRecipe):
title = u'Poradnia Językowa PWN'
__author__ = 'fenuks'
description = u'Internetowa poradnia językowa Wydawnictwa Naukowego PWN. Poradnię prowadzi Redaktor Naczelny Słowników Języka Polskiego, prof. Mirosław Bańko. Pomagają mu eksperci - znani polscy językoznawcy. Współpracuje z nami m.in. prof. Jerzy Bralczyk oraz dr Jan Grzenia.'
category = 'language'
language = 'pl'
#cover_url = ''
oldest_article = 14
max_articles_per_feed = 100000
INDEX = "http://poradnia.pwn.pl/"
no_stylesheets = True
remove_attributes = ['style']
remove_javascript = True
use_embedded_content = False
#preprocess_regexps = [(re.compile('<li|ul', re.IGNORECASE), lambda m: '<div'),(re.compile('</li>', re.IGNORECASE), lambda m: '</div>'), (re.compile('</ul>', re.IGNORECASE), lambda m: '</div>')]
keep_only_tags = [dict(name="div", attrs={"class":"searchhi"})]
feeds = [(u'Poradnia', u'http://rss.pwn.pl/poradnia.rss')]
'''def find_articles(self, url):
articles = []
soup=self.index_to_soup(url)
counter = int(soup.find(name='p', attrs={'class':'count'}).findAll('b')[-1].string)
counter = 500
pos = 0
next = url
while next:
soup=self.index_to_soup(next)
tag=soup.find(id="listapytan")
art=tag.findAll(name='li')
for i in art:
if i.h4:
title=i.h4.a.string
url=self.INDEX+i.h4.a['href']
#date=soup.find(id='footer').ul.li.string[41:-1]
articles.append({'title' : title,
'url' : url,
'date' : '',
'description' : ''
})
pos += 10
if not pos >=counter:
next = 'http://poradnia.pwn.pl/lista.php?kat=18&od=' + str(pos)
print u'Tworzenie listy artykułów dla', next
else:
next = None
print articles
return articles
def parse_index(self):
feeds = []
feeds.append((u"Poradnia", self.find_articles('http://poradnia.pwn.pl/lista.php')))
return feeds'''
def preprocess_html(self, soup):
for i in soup.findAll(name=['ul', 'li']):
i.name="div"
for z in soup.findAll(name='a'):
if not z['href'].startswith('http'):
z['href'] = 'http://poradnia.pwn.pl/' + z['href']
return soup

View File

@ -1,12 +1,13 @@
from calibre.web.feeds.news import BasicNewsRecipe # -*- coding: utf-8 -*-
class BasicUserRecipe1324913680(BasicNewsRecipe): from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1355341662(BasicNewsRecipe):
title = u'Sivil Dusunce' title = u'Sivil Dusunce'
language = 'tr' language = 'tr'
__author__ = 'asalet_r' __author__ = 'asalet_r'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 20 max_articles_per_feed = 50
auto_cleanup = True auto_cleanup = True
feeds = [(u'Sivil Dusunce', u'http://www.sivildusunce.com/feed/')] feeds = [(u'Sivil Dusunce', u'http://www.sivildusunce.com/?t=rss&xml=1')]

20
recipes/titanic_de.recipe Normal file
View File

@ -0,0 +1,20 @@
from calibre.web.feeds.news import BasicNewsRecipe
class Titanic(BasicNewsRecipe):
title = u'Titanic'
language = 'de'
__author__ = 'Krittika Goyal'
oldest_article = 14 #days
max_articles_per_feed = 25
#encoding = 'cp1252'
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
feeds = [
('News',
'http://www.titanic-magazin.de/ich.war.bei.der.waffen.rss'),
]

20
recipes/tvp_info.recipe Normal file
View File

@ -0,0 +1,20 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
class TVPINFO(BasicNewsRecipe):
title = u'TVP.INFO'
__author__ = 'fenuks'
description = u'Serwis informacyjny TVP.INFO'
category = 'news'
language = 'pl'
cover_url = 'http://s.v3.tvp.pl/files/tvp-info/gfx/logo.png'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
remove_javascript = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
keep_only_tags = [dict(id='contentNews')]
remove_tags = [dict(attrs={'class':['toolbox', 'modulBox read', 'modulBox social', 'videoPlayerBox']}), dict(id='belka')]
feeds = [(u'Wiadomo\u015bci', u'http://tvp.info/informacje?xslt=tvp-info/news/rss.xslt&src_id=191865'),
(u'\u015awiat', u'http://tvp.info/informacje/swiat?xslt=tvp-info/news/rss.xslt&src_id=191867'), (u'Biznes', u'http://tvp.info/informacje/biznes?xslt=tvp-info/news/rss.xslt&src_id=191868'), (u'Nauka', u'http://tvp.info/informacje/nauka?xslt=tvp-info/news/rss.xslt&src_id=191870'), (u'Kultura', u'http://tvp.info/informacje/kultura?xslt=tvp-info/news/rss.xslt&src_id=191869'), (u'Rozmaito\u015bci', u'http://tvp.info/informacje/rozmaitosci?xslt=tvp-info/news/rss.xslt&src_id=191872'), (u'Opinie', u'http://tvp.info/opinie?xslt=tvp-info/news/rss.xslt&src_id=191875'), (u'Komentarze', u'http://tvp.info/opinie/komentarze?xslt=tvp-info/news/rss.xslt&src_id=238200'), (u'Wywiady', u'http://tvp.info/opinie/wywiady?xslt=tvp-info/news/rss.xslt&src_id=236644')]

View File

@ -0,0 +1,16 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
class ZTS(BasicNewsRecipe):
title = u'Zaufana Trzecia Strona'
__author__ = 'fenuks'
description = u'Niezależne źródło wiadomości o świecie bezpieczeństwa IT'
category = 'IT, security'
language = 'pl'
cover_url = 'http://www.zaufanatrzeciastrona.pl/wp-content/uploads/2012/08/z3s_h100.png'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
keep_only_tags = [dict(name='div', attrs={'class':'post postcontent'})]
remove_tags = [dict(name='div', attrs={'class':'dolna-ramka'})]
feeds = [(u'Strona g\u0142\xf3wna', u'http://feeds.feedburner.com/ZaufanaTrzeciaStronaGlowna'), (u'Drobiazgi', u'http://feeds.feedburner.com/ZaufanaTrzeciaStronaDrobiazgi')]

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -598,6 +598,10 @@ class Win32Freeze(Command, WixMixIn):
# from files # from files
'unrar.pyd', 'wpd.pyd', 'podofo.pyd', 'unrar.pyd', 'wpd.pyd', 'podofo.pyd',
'progress_indicator.pyd', 'progress_indicator.pyd',
# As per this https://bugs.launchpad.net/bugs/1087816
# on some systems magick.pyd fails to load from memory
# on 64 bit
'magick.pyd',
}: }:
self.add_to_zipfile(zf, pyd, x) self.add_to_zipfile(zf, pyd, x)
os.remove(self.j(x, pyd)) os.remove(self.j(x, pyd))

File diff suppressed because it is too large Load Diff

View File

@ -9,35 +9,35 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-" "Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n" "devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n" "POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2011-09-27 16:27+0000\n" "PO-Revision-Date: 2012-12-13 03:44+0000\n"
"Last-Translator: Kovid Goyal <Unknown>\n" "Last-Translator: Fábio Malcher Miranda <mirand863@hotmail.com>\n"
"Language-Team: Brazilian Portuguese\n" "Language-Team: Brazilian Portuguese\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-26 05:47+0000\n" "X-Launchpad-Export-Date: 2012-12-13 04:41+0000\n"
"X-Generator: Launchpad (build 14381)\n" "X-Generator: Launchpad (build 16361)\n"
"Language: \n" "Language: \n"
#. name for aaa #. name for aaa
msgid "Ghotuo" msgid "Ghotuo"
msgstr "" msgstr "Ghotuo"
#. name for aab #. name for aab
msgid "Alumu-Tesu" msgid "Alumu-Tesu"
msgstr "" msgstr "Alumu-Tesu"
#. name for aac #. name for aac
msgid "Ari" msgid "Ari"
msgstr "" msgstr "Ari"
#. name for aad #. name for aad
msgid "Amal" msgid "Amal"
msgstr "" msgstr "Amal"
#. name for aae #. name for aae
msgid "Albanian; Arbëreshë" msgid "Albanian; Arbëreshë"
msgstr "" msgstr "Albanês; Arbëreshë"
#. name for aaf #. name for aaf
msgid "Aranadan" msgid "Aranadan"
@ -45,7 +45,7 @@ msgstr ""
#. name for aag #. name for aag
msgid "Ambrak" msgid "Ambrak"
msgstr "" msgstr "Ambrak"
#. name for aah #. name for aah
msgid "Arapesh; Abu'" msgid "Arapesh; Abu'"
@ -53,23 +53,23 @@ msgstr ""
#. name for aai #. name for aai
msgid "Arifama-Miniafia" msgid "Arifama-Miniafia"
msgstr "" msgstr "Arifama-Miniafia"
#. name for aak #. name for aak
msgid "Ankave" msgid "Ankave"
msgstr "" msgstr "Ankave"
#. name for aal #. name for aal
msgid "Afade" msgid "Afade"
msgstr "" msgstr "Afade"
#. name for aam #. name for aam
msgid "Aramanik" msgid "Aramanik"
msgstr "" msgstr "Aramaico"
#. name for aan #. name for aan
msgid "Anambé" msgid "Anambé"
msgstr "" msgstr "Anambé"
#. name for aao #. name for aao
msgid "Arabic; Algerian Saharan" msgid "Arabic; Algerian Saharan"
@ -77,7 +77,7 @@ msgstr ""
#. name for aap #. name for aap
msgid "Arára; Pará" msgid "Arára; Pará"
msgstr "" msgstr "Arara; Pará"
#. name for aaq #. name for aaq
msgid "Abnaki; Eastern" msgid "Abnaki; Eastern"
@ -89,7 +89,7 @@ msgstr ""
#. name for aas #. name for aas
msgid "Aasáx" msgid "Aasáx"
msgstr "" msgstr "Aasáx"
#. name for aat #. name for aat
msgid "Albanian; Arvanitika" msgid "Albanian; Arvanitika"
@ -97,27 +97,27 @@ msgstr ""
#. name for aau #. name for aau
msgid "Abau" msgid "Abau"
msgstr "" msgstr "Abau"
#. name for aaw #. name for aaw
msgid "Solong" msgid "Solong"
msgstr "" msgstr "Solong"
#. name for aax #. name for aax
msgid "Mandobo Atas" msgid "Mandobo Atas"
msgstr "" msgstr "Mandobo Atas"
#. name for aaz #. name for aaz
msgid "Amarasi" msgid "Amarasi"
msgstr "" msgstr "Amarasi"
#. name for aba #. name for aba
msgid "Abé" msgid "Abé"
msgstr "" msgstr "Abé"
#. name for abb #. name for abb
msgid "Bankon" msgid "Bankon"
msgstr "" msgstr "Bankon"
#. name for abc #. name for abc
msgid "Ayta; Ambala" msgid "Ayta; Ambala"
@ -125,7 +125,7 @@ msgstr ""
#. name for abd #. name for abd
msgid "Manide" msgid "Manide"
msgstr "" msgstr "Manide"
#. name for abe #. name for abe
msgid "Abnaki; Western" msgid "Abnaki; Western"
@ -145,11 +145,11 @@ msgstr ""
#. name for abi #. name for abi
msgid "Abidji" msgid "Abidji"
msgstr "" msgstr "Abidji"
#. name for abj #. name for abj
msgid "Aka-Bea" msgid "Aka-Bea"
msgstr "" msgstr "Aka-Bea"
#. name for abk #. name for abk
msgid "Abkhazian" msgid "Abkhazian"
@ -157,19 +157,19 @@ msgstr ""
#. name for abl #. name for abl
msgid "Lampung Nyo" msgid "Lampung Nyo"
msgstr "" msgstr "Lampung Nyo"
#. name for abm #. name for abm
msgid "Abanyom" msgid "Abanyom"
msgstr "" msgstr "Abanyom"
#. name for abn #. name for abn
msgid "Abua" msgid "Abua"
msgstr "" msgstr "Abua"
#. name for abo #. name for abo
msgid "Abon" msgid "Abon"
msgstr "" msgstr "Abon"
#. name for abp #. name for abp
msgid "Ayta; Abellen" msgid "Ayta; Abellen"
@ -177,11 +177,11 @@ msgstr ""
#. name for abq #. name for abq
msgid "Abaza" msgid "Abaza"
msgstr "" msgstr "Abaza"
#. name for abr #. name for abr
msgid "Abron" msgid "Abron"
msgstr "" msgstr "Abron"
#. name for abs #. name for abs
msgid "Malay; Ambonese" msgid "Malay; Ambonese"
@ -189,11 +189,11 @@ msgstr ""
#. name for abt #. name for abt
msgid "Ambulas" msgid "Ambulas"
msgstr "" msgstr "Ambulas"
#. name for abu #. name for abu
msgid "Abure" msgid "Abure"
msgstr "" msgstr "Abure"
#. name for abv #. name for abv
msgid "Arabic; Baharna" msgid "Arabic; Baharna"
@ -201,15 +201,15 @@ msgstr ""
#. name for abw #. name for abw
msgid "Pal" msgid "Pal"
msgstr "" msgstr "Pal"
#. name for abx #. name for abx
msgid "Inabaknon" msgid "Inabaknon"
msgstr "" msgstr "Inabaknon"
#. name for aby #. name for aby
msgid "Aneme Wake" msgid "Aneme Wake"
msgstr "" msgstr "Aneme Wake"
#. name for abz #. name for abz
msgid "Abui" msgid "Abui"
@ -225,7 +225,7 @@ msgstr ""
#. name for acd #. name for acd
msgid "Gikyode" msgid "Gikyode"
msgstr "" msgstr "Gikyode"
#. name for ace #. name for ace
msgid "Achinese" msgid "Achinese"
@ -241,15 +241,15 @@ msgstr ""
#. name for aci #. name for aci
msgid "Aka-Cari" msgid "Aka-Cari"
msgstr "" msgstr "Aka-Cari"
#. name for ack #. name for ack
msgid "Aka-Kora" msgid "Aka-Kora"
msgstr "" msgstr "Aka-Kora"
#. name for acl #. name for acl
msgid "Akar-Bale" msgid "Akar-Bale"
msgstr "" msgstr "Akar-Bale"
#. name for acm #. name for acm
msgid "Arabic; Mesopotamian" msgid "Arabic; Mesopotamian"
@ -257,7 +257,7 @@ msgstr ""
#. name for acn #. name for acn
msgid "Achang" msgid "Achang"
msgstr "" msgstr "Achang"
#. name for acp #. name for acp
msgid "Acipa; Eastern" msgid "Acipa; Eastern"
@ -269,23 +269,23 @@ msgstr ""
#. name for acr #. name for acr
msgid "Achi" msgid "Achi"
msgstr "" msgstr "Achi"
#. name for acs #. name for acs
msgid "Acroá" msgid "Acroá"
msgstr "" msgstr "Acroá"
#. name for act #. name for act
msgid "Achterhoeks" msgid "Achterhoeks"
msgstr "" msgstr "Achterhoeks"
#. name for acu #. name for acu
msgid "Achuar-Shiwiar" msgid "Achuar-Shiwiar"
msgstr "" msgstr "Achuar-Shiwiar"
#. name for acv #. name for acv
msgid "Achumawi" msgid "Achumawi"
msgstr "" msgstr "Achumawi"
#. name for acw #. name for acw
msgid "Arabic; Hijazi" msgid "Arabic; Hijazi"
@ -301,23 +301,23 @@ msgstr ""
#. name for acz #. name for acz
msgid "Acheron" msgid "Acheron"
msgstr "" msgstr "Acheron"
#. name for ada #. name for ada
msgid "Adangme" msgid "Adangme"
msgstr "" msgstr "Adangme"
#. name for adb #. name for adb
msgid "Adabe" msgid "Adabe"
msgstr "" msgstr "Adabe"
#. name for add #. name for add
msgid "Dzodinka" msgid "Dzodinka"
msgstr "" msgstr "Dzodinka"
#. name for ade #. name for ade
msgid "Adele" msgid "Adele"
msgstr "" msgstr "Adele"
#. name for adf #. name for adf
msgid "Arabic; Dhofari" msgid "Arabic; Dhofari"
@ -325,59 +325,59 @@ msgstr ""
#. name for adg #. name for adg
msgid "Andegerebinha" msgid "Andegerebinha"
msgstr "" msgstr "Andegerebinha"
#. name for adh #. name for adh
msgid "Adhola" msgid "Adhola"
msgstr "" msgstr "Adhola"
#. name for adi #. name for adi
msgid "Adi" msgid "Adi"
msgstr "" msgstr "Adi"
#. name for adj #. name for adj
msgid "Adioukrou" msgid "Adioukrou"
msgstr "" msgstr "Adioukrou"
#. name for adl #. name for adl
msgid "Galo" msgid "Galo"
msgstr "" msgstr "Gaulês"
#. name for adn #. name for adn
msgid "Adang" msgid "Adang"
msgstr "" msgstr "Adang"
#. name for ado #. name for ado
msgid "Abu" msgid "Abu"
msgstr "" msgstr "Abu"
#. name for adp #. name for adp
msgid "Adap" msgid "Adap"
msgstr "" msgstr "Adap"
#. name for adq #. name for adq
msgid "Adangbe" msgid "Adangbe"
msgstr "" msgstr "Adangbe"
#. name for adr #. name for adr
msgid "Adonara" msgid "Adonara"
msgstr "" msgstr "Adonara"
#. name for ads #. name for ads
msgid "Adamorobe Sign Language" msgid "Adamorobe Sign Language"
msgstr "" msgstr "Idiomas de Sinais Adamorobe"
#. name for adt #. name for adt
msgid "Adnyamathanha" msgid "Adnyamathanha"
msgstr "" msgstr "Adnyamathanha"
#. name for adu #. name for adu
msgid "Aduge" msgid "Aduge"
msgstr "" msgstr "Aduge"
#. name for adw #. name for adw
msgid "Amundava" msgid "Amundava"
msgstr "" msgstr "Amundava"
#. name for adx #. name for adx
msgid "Tibetan; Amdo" msgid "Tibetan; Amdo"
@ -385,11 +385,11 @@ msgstr ""
#. name for ady #. name for ady
msgid "Adyghe" msgid "Adyghe"
msgstr "" msgstr "Adigue"
#. name for adz #. name for adz
msgid "Adzera" msgid "Adzera"
msgstr "" msgstr "Adzera"
#. name for aea #. name for aea
msgid "Areba" msgid "Areba"
@ -405,7 +405,7 @@ msgstr ""
#. name for aed #. name for aed
msgid "Argentine Sign Language" msgid "Argentine Sign Language"
msgstr "" msgstr "Idiomas de Sinais Argentino"
#. name for aee #. name for aee
msgid "Pashayi; Northeast" msgid "Pashayi; Northeast"
@ -413,23 +413,23 @@ msgstr ""
#. name for aek #. name for aek
msgid "Haeke" msgid "Haeke"
msgstr "" msgstr "Haeke"
#. name for ael #. name for ael
msgid "Ambele" msgid "Ambele"
msgstr "" msgstr "Ambele"
#. name for aem #. name for aem
msgid "Arem" msgid "Arem"
msgstr "" msgstr "Arem"
#. name for aen #. name for aen
msgid "Armenian Sign Language" msgid "Armenian Sign Language"
msgstr "" msgstr "Idiomas de Sinais Americano"
#. name for aeq #. name for aeq
msgid "Aer" msgid "Aer"
msgstr "" msgstr "Aer"
#. name for aer #. name for aer
msgid "Arrernte; Eastern" msgid "Arrernte; Eastern"
@ -437,23 +437,23 @@ msgstr ""
#. name for aes #. name for aes
msgid "Alsea" msgid "Alsea"
msgstr "" msgstr "Alsea"
#. name for aeu #. name for aeu
msgid "Akeu" msgid "Akeu"
msgstr "" msgstr "Akeu"
#. name for aew #. name for aew
msgid "Ambakich" msgid "Ambakich"
msgstr "" msgstr "Ambakich"
#. name for aey #. name for aey
msgid "Amele" msgid "Amele"
msgstr "" msgstr "Amele"
#. name for aez #. name for aez
msgid "Aeka" msgid "Aeka"
msgstr "" msgstr "Aeka"
#. name for afb #. name for afb
msgid "Arabic; Gulf" msgid "Arabic; Gulf"
@ -461,11 +461,11 @@ msgstr ""
#. name for afd #. name for afd
msgid "Andai" msgid "Andai"
msgstr "" msgstr "Andai"
#. name for afe #. name for afe
msgid "Putukwam" msgid "Putukwam"
msgstr "" msgstr "Putukwam"
#. name for afg #. name for afg
msgid "Afghan Sign Language" msgid "Afghan Sign Language"
@ -473,27 +473,27 @@ msgstr ""
#. name for afh #. name for afh
msgid "Afrihili" msgid "Afrihili"
msgstr "" msgstr "Afrihili"
#. name for afi #. name for afi
msgid "Akrukay" msgid "Akrukay"
msgstr "" msgstr "Akrukay"
#. name for afk #. name for afk
msgid "Nanubae" msgid "Nanubae"
msgstr "" msgstr "Nanubae"
#. name for afn #. name for afn
msgid "Defaka" msgid "Defaka"
msgstr "" msgstr "Defaka"
#. name for afo #. name for afo
msgid "Eloyi" msgid "Eloyi"
msgstr "" msgstr "Eloyi"
#. name for afp #. name for afp
msgid "Tapei" msgid "Tapei"
msgstr "" msgstr "Tapei"
#. name for afr #. name for afr
msgid "Afrikaans" msgid "Afrikaans"
@ -505,55 +505,55 @@ msgstr ""
#. name for aft #. name for aft
msgid "Afitti" msgid "Afitti"
msgstr "" msgstr "Afitti"
#. name for afu #. name for afu
msgid "Awutu" msgid "Awutu"
msgstr "" msgstr "Awutu"
#. name for afz #. name for afz
msgid "Obokuitai" msgid "Obokuitai"
msgstr "" msgstr "Obokuitai"
#. name for aga #. name for aga
msgid "Aguano" msgid "Aguano"
msgstr "" msgstr "Aguano"
#. name for agb #. name for agb
msgid "Legbo" msgid "Legbo"
msgstr "" msgstr "Legbo"
#. name for agc #. name for agc
msgid "Agatu" msgid "Agatu"
msgstr "" msgstr "Agatu"
#. name for agd #. name for agd
msgid "Agarabi" msgid "Agarabi"
msgstr "" msgstr "Agarabi"
#. name for age #. name for age
msgid "Angal" msgid "Angal"
msgstr "" msgstr "Angal"
#. name for agf #. name for agf
msgid "Arguni" msgid "Arguni"
msgstr "" msgstr "Arguni"
#. name for agg #. name for agg
msgid "Angor" msgid "Angor"
msgstr "" msgstr "Angor"
#. name for agh #. name for agh
msgid "Ngelima" msgid "Ngelima"
msgstr "" msgstr "Ngelima"
#. name for agi #. name for agi
msgid "Agariya" msgid "Agariya"
msgstr "" msgstr "Agariya"
#. name for agj #. name for agj
msgid "Argobba" msgid "Argobba"
msgstr "" msgstr "Argobba"
#. name for agk #. name for agk
msgid "Agta; Isarog" msgid "Agta; Isarog"
@ -561,31 +561,31 @@ msgstr ""
#. name for agl #. name for agl
msgid "Fembe" msgid "Fembe"
msgstr "" msgstr "Fembe"
#. name for agm #. name for agm
msgid "Angaataha" msgid "Angaataha"
msgstr "" msgstr "Angaataha"
#. name for agn #. name for agn
msgid "Agutaynen" msgid "Agutaynen"
msgstr "" msgstr "Agutaynen"
#. name for ago #. name for ago
msgid "Tainae" msgid "Tainae"
msgstr "" msgstr "Tainae"
#. name for agq #. name for agq
msgid "Aghem" msgid "Aghem"
msgstr "" msgstr "Aghem"
#. name for agr #. name for agr
msgid "Aguaruna" msgid "Aguaruna"
msgstr "" msgstr "Aguaruna"
#. name for ags #. name for ags
msgid "Esimbi" msgid "Esimbi"
msgstr "" msgstr "Esimbi"
#. name for agt #. name for agt
msgid "Agta; Central Cagayan" msgid "Agta; Central Cagayan"
@ -593,7 +593,7 @@ msgstr ""
#. name for agu #. name for agu
msgid "Aguacateco" msgid "Aguacateco"
msgstr "" msgstr "Aguacateco"
#. name for agv #. name for agv
msgid "Dumagat; Remontado" msgid "Dumagat; Remontado"
@ -601,11 +601,11 @@ msgstr ""
#. name for agw #. name for agw
msgid "Kahua" msgid "Kahua"
msgstr "" msgstr "Kahua"
#. name for agx #. name for agx
msgid "Aghul" msgid "Aghul"
msgstr "" msgstr "Aghul"
#. name for agy #. name for agy
msgid "Alta; Southern" msgid "Alta; Southern"
@ -617,19 +617,19 @@ msgstr ""
#. name for aha #. name for aha
msgid "Ahanta" msgid "Ahanta"
msgstr "" msgstr "Ahanta"
#. name for ahb #. name for ahb
msgid "Axamb" msgid "Axamb"
msgstr "" msgstr "Axamb"
#. name for ahg #. name for ahg
msgid "Qimant" msgid "Qimant"
msgstr "" msgstr "Qimant"
#. name for ahh #. name for ahh
msgid "Aghu" msgid "Aghu"
msgstr "" msgstr "Aghu"
#. name for ahi #. name for ahi
msgid "Aizi; Tiagbamrin" msgid "Aizi; Tiagbamrin"
@ -637,11 +637,11 @@ msgstr ""
#. name for ahk #. name for ahk
msgid "Akha" msgid "Akha"
msgstr "" msgstr "Akha"
#. name for ahl #. name for ahl
msgid "Igo" msgid "Igo"
msgstr "" msgstr "Igo"
#. name for ahm #. name for ahm
msgid "Aizi; Mobumrin" msgid "Aizi; Mobumrin"
@ -649,11 +649,11 @@ msgstr ""
#. name for ahn #. name for ahn
msgid "Àhàn" msgid "Àhàn"
msgstr "" msgstr "Àhàn"
#. name for aho #. name for aho
msgid "Ahom" msgid "Ahom"
msgstr "" msgstr "Ahom"
#. name for ahp #. name for ahp
msgid "Aizi; Aproumu" msgid "Aizi; Aproumu"
@ -661,39 +661,39 @@ msgstr ""
#. name for ahr #. name for ahr
msgid "Ahirani" msgid "Ahirani"
msgstr "" msgstr "Ahirani"
#. name for ahs #. name for ahs
msgid "Ashe" msgid "Ashe"
msgstr "" msgstr "Ashe"
#. name for aht #. name for aht
msgid "Ahtena" msgid "Ahtena"
msgstr "" msgstr "Ahtena"
#. name for aia #. name for aia
msgid "Arosi" msgid "Arosi"
msgstr "" msgstr "Arosi"
#. name for aib #. name for aib
msgid "Ainu (China)" msgid "Ainu (China)"
msgstr "" msgstr "Ainu (China)"
#. name for aic #. name for aic
msgid "Ainbai" msgid "Ainbai"
msgstr "" msgstr "Ainbai"
#. name for aid #. name for aid
msgid "Alngith" msgid "Alngith"
msgstr "" msgstr "Alngith"
#. name for aie #. name for aie
msgid "Amara" msgid "Amara"
msgstr "" msgstr "Amara"
#. name for aif #. name for aif
msgid "Agi" msgid "Agi"
msgstr "" msgstr "Agi"
#. name for aig #. name for aig
msgid "Creole English; Antigua and Barbuda" msgid "Creole English; Antigua and Barbuda"
@ -701,7 +701,7 @@ msgstr ""
#. name for aih #. name for aih
msgid "Ai-Cham" msgid "Ai-Cham"
msgstr "" msgstr "Ai-Cham"
#. name for aii #. name for aii
msgid "Neo-Aramaic; Assyrian" msgid "Neo-Aramaic; Assyrian"
@ -709,35 +709,35 @@ msgstr ""
#. name for aij #. name for aij
msgid "Lishanid Noshan" msgid "Lishanid Noshan"
msgstr "" msgstr "Lishanid Noshan"
#. name for aik #. name for aik
msgid "Ake" msgid "Ake"
msgstr "" msgstr "Ake"
#. name for ail #. name for ail
msgid "Aimele" msgid "Aimele"
msgstr "" msgstr "Aimele"
#. name for aim #. name for aim
msgid "Aimol" msgid "Aimol"
msgstr "" msgstr "Aimol"
#. name for ain #. name for ain
msgid "Ainu (Japan)" msgid "Ainu (Japan)"
msgstr "" msgstr "Ainu (Japão)"
#. name for aio #. name for aio
msgid "Aiton" msgid "Aiton"
msgstr "" msgstr "Aiton"
#. name for aip #. name for aip
msgid "Burumakok" msgid "Burumakok"
msgstr "" msgstr "Burumakok"
#. name for aiq #. name for aiq
msgid "Aimaq" msgid "Aimaq"
msgstr "" msgstr "Aimaq"
#. name for air #. name for air
msgid "Airoran" msgid "Airoran"
@ -2021,7 +2021,7 @@ msgstr ""
#. name for aze #. name for aze
msgid "Azerbaijani" msgid "Azerbaijani"
msgstr "Azerbaidjani" msgstr "Azerbaijano"
#. name for azg #. name for azg
msgid "Amuzgo; San Pedro Amuzgos" msgid "Amuzgo; San Pedro Amuzgos"

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
__appname__ = u'calibre' __appname__ = u'calibre'
numeric_version = (0, 9, 8) numeric_version = (0, 9, 10)
__version__ = u'.'.join(map(unicode, numeric_version)) __version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>" __author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -121,6 +121,8 @@ def debug(ioreg_to_tmp=False, buf=None, plugins=None,
out('\nDisabled plugins:', textwrap.fill(' '.join([x.__class__.__name__ for x in out('\nDisabled plugins:', textwrap.fill(' '.join([x.__class__.__name__ for x in
disabled_plugins]))) disabled_plugins])))
out(' ') out(' ')
else:
out('\nNo disabled plugins')
found_dev = False found_dev = False
for dev in devplugins: for dev in devplugins:
if not dev.MANAGES_DEVICE_PRESENCE: continue if not dev.MANAGES_DEVICE_PRESENCE: continue

View File

@ -10,7 +10,7 @@ import cStringIO
from calibre.devices.usbms.driver import USBMS from calibre.devices.usbms.driver import USBMS
HTC_BCDS = [0x100, 0x0222, 0x0226, 0x227, 0x228, 0x229, 0x9999] HTC_BCDS = [0x100, 0x0222, 0x0226, 0x227, 0x228, 0x229, 0x0231, 0x9999]
class ANDROID(USBMS): class ANDROID(USBMS):
@ -92,7 +92,7 @@ class ANDROID(USBMS):
# Google # Google
0x18d1 : { 0x18d1 : {
0x0001 : [0x0223, 0x230, 0x9999], 0x0001 : [0x0223, 0x230, 0x9999],
0x0003 : [0x0230], 0x0003 : [0x0230, 0x9999],
0x4e11 : [0x0100, 0x226, 0x227], 0x4e11 : [0x0100, 0x226, 0x227],
0x4e12 : [0x0100, 0x226, 0x227], 0x4e12 : [0x0100, 0x226, 0x227],
0x4e21 : [0x0100, 0x226, 0x227, 0x231], 0x4e21 : [0x0100, 0x226, 0x227, 0x231],
@ -212,7 +212,7 @@ class ANDROID(USBMS):
'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP', 'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP',
'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC', 'PMID701C', 'PD', 'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC', 'PMID701C', 'PD',
'PMP5097C', 'MASS', 'NOVO7', 'ZEKI', 'COBY', 'SXZ', 'USB_2.0', 'PMP5097C', 'MASS', 'NOVO7', 'ZEKI', 'COBY', 'SXZ', 'USB_2.0',
'COBY_MID', 'VS', 'AINOL', 'TOPWISE', 'PAD703'] 'COBY_MID', 'VS', 'AINOL', 'TOPWISE', 'PAD703', 'NEXT8D12']
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE', WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897', '__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID',
@ -232,7 +232,7 @@ class ANDROID(USBMS):
'THINKPAD_TABLET', 'SGH-T989', 'YP-G70', 'STORAGE_DEVICE', 'THINKPAD_TABLET', 'SGH-T989', 'YP-G70', 'STORAGE_DEVICE',
'ADVANCED', 'SGH-I727', 'USB_FLASH_DRIVER', 'ANDROID', 'ADVANCED', 'SGH-I727', 'USB_FLASH_DRIVER', 'ANDROID',
'S5830I_CARD', 'MID7042', 'LINK-CREATE', '7035', 'VIEWPAD_7E', 'S5830I_CARD', 'MID7042', 'LINK-CREATE', '7035', 'VIEWPAD_7E',
'NOVO7', 'MB526', '_USB#WYK7MSF8KE', 'TABLET_PC'] 'NOVO7', 'MB526', '_USB#WYK7MSF8KE', 'TABLET_PC', 'F']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897', WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD', 'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD', 'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
@ -243,7 +243,7 @@ class ANDROID(USBMS):
'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0', 'XT875', 'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0', 'XT875',
'UMS_COMPOSITE', 'PRO', '.KOBO_VOX', 'SGH-T989_CARD', 'SGH-I727', 'UMS_COMPOSITE', 'PRO', '.KOBO_VOX', 'SGH-T989_CARD', 'SGH-I727',
'USB_FLASH_DRIVER', 'ANDROID', 'MID7042', '7035', 'VIEWPAD_7E', 'USB_FLASH_DRIVER', 'ANDROID', 'MID7042', '7035', 'VIEWPAD_7E',
'NOVO7', 'ADVANCED', 'TABLET_PC'] 'NOVO7', 'ADVANCED', 'TABLET_PC', 'F']
OSX_MAIN_MEM = 'Android Device Main Memory' OSX_MAIN_MEM = 'Android Device Main Memory'

View File

@ -214,9 +214,9 @@ class ITUNES(DriverBase):
"Cannot copy books directly from iDevice. " "Cannot copy books directly from iDevice. "
"Drag from iTunes Library to desktop, then add to calibre's Library window.") "Drag from iTunes Library to desktop, then add to calibre's Library window.")
UNSUPPORTED_DIRECT_CONNECT_MODE_MESSAGE = _( UNSUPPORTED_DIRECT_CONNECT_MODE_MESSAGE = _(
"Unsupported direct connect mode. " "*** Unsupported direct connect mode. "
"See http://www.mobileread.com/forums/showthread.php?t=118559 " "See http://www.mobileread.com/forums/showthread.php?t=118559 "
"for instructions on using 'Connect to iTunes'") "for instructions on using 'Connect to iTunes' ***")
ITUNES_SANDBOX_LOCKOUT_MESSAGE = _( ITUNES_SANDBOX_LOCKOUT_MESSAGE = _(
'<p>Unable to communicate with iTunes.</p>' '<p>Unable to communicate with iTunes.</p>'
'<p>Refer to this ' '<p>Refer to this '
@ -818,7 +818,7 @@ class ITUNES(DriverBase):
if DEBUG: if DEBUG:
logger().info("%s.get_device_information()" % self.__class__.__name__) logger().info("%s.get_device_information()" % self.__class__.__name__)
return (self.sources['iPod'], 'hw v1.0', 'sw v1.0', 'mime type normally goes here') return (self.sources['iPod'], 'hw v1.0', 'sw v1.0', 'unknown mime type')
def get_file(self, path, outfile, end_session=True): def get_file(self, path, outfile, end_session=True):
''' '''
@ -871,13 +871,14 @@ class ITUNES(DriverBase):
product_id product_id
)) ))
if False:
# Display a dialog recommending using 'Connect to iTunes' if user hasn't # Display a dialog recommending using 'Connect to iTunes' if user hasn't
# previously disabled the dialog # previously disabled the dialog
if dynamic.get(confirm_config_name(self.DISPLAY_DISABLE_DIALOG), True): if dynamic.get(confirm_config_name(self.DISPLAY_DISABLE_DIALOG), True):
raise AppleOpenFeedback(self) raise AppleOpenFeedback(self)
else: else:
if DEBUG: if DEBUG:
logger().error(" %s" % self.UNSUPPORTED_DIRECT_CONNECT_MODE_MESSAGE) logger().info(" %s" % self.UNSUPPORTED_DIRECT_CONNECT_MODE_MESSAGE)
# Log supported DEVICE_IDs and BCDs # Log supported DEVICE_IDs and BCDs
logger().info(" BCD: %s" % ['0x%x' % x for x in sorted(self.BCD)]) logger().info(" BCD: %s" % ['0x%x' % x for x in sorted(self.BCD)])
@ -1027,6 +1028,10 @@ class ITUNES(DriverBase):
self.plugboards = plugboards self.plugboards = plugboards
self.plugboard_func = pb_func self.plugboard_func = pb_func
def shutdown(self):
if DEBUG:
logger().info("%s.shutdown()\n" % self.__class__.__name__)
def sync_booklists(self, booklists, end_session=True): def sync_booklists(self, booklists, end_session=True):
''' '''
Update metadata on device. Update metadata on device.
@ -1125,6 +1130,7 @@ class ITUNES(DriverBase):
metadata[i].uuid)) metadata[i].uuid))
self.cached_books[this_book.path] = { self.cached_books[this_book.path] = {
'author': authors_to_string(metadata[i].authors), 'author': authors_to_string(metadata[i].authors),
'authors': metadata[i].authors,
'dev_book': db_added, 'dev_book': db_added,
'format': format, 'format': format,
'lib_book': lb_added, 'lib_book': lb_added,
@ -1171,6 +1177,7 @@ class ITUNES(DriverBase):
metadata[i].uuid)) metadata[i].uuid))
self.cached_books[this_book.path] = { self.cached_books[this_book.path] = {
'author': authors_to_string(metadata[i].authors), 'author': authors_to_string(metadata[i].authors),
'authors': metadata[i].authors,
'dev_book': db_added, 'dev_book': db_added,
'format': format, 'format': format,
'lib_book': lb_added, 'lib_book': lb_added,
@ -1388,21 +1395,18 @@ class ITUNES(DriverBase):
db_added = None db_added = None
lb_added = None lb_added = None
if self.manual_sync_mode:
'''
DC mode. Add to iBooks only.
'''
db_added = self._add_device_book(fpath, metadata)
else:
# If using iTunes_local_storage, copy the file, redirect iTunes to use local copy # If using iTunes_local_storage, copy the file, redirect iTunes to use local copy
if not self.settings().extra_customization[self.USE_ITUNES_STORAGE]: if not self.settings().extra_customization[self.USE_ITUNES_STORAGE]:
local_copy = os.path.join(self.iTunes_local_storage, str(metadata.uuid) + os.path.splitext(fpath)[1]) local_copy = os.path.join(self.iTunes_local_storage, str(metadata.uuid) + os.path.splitext(fpath)[1])
shutil.copyfile(fpath, local_copy) shutil.copyfile(fpath, local_copy)
fpath = local_copy fpath = local_copy
if self.manual_sync_mode:
'''
Unsupported direct-connect mode.
'''
db_added = self._add_device_book(fpath, metadata)
lb_added = self._add_library_book(fpath, metadata)
if not lb_added and DEBUG:
logger().warn(" failed to add '%s' to iTunes, iTunes Media folder inaccessible" % metadata.title)
else:
lb_added = self._add_library_book(fpath, metadata) lb_added = self._add_library_book(fpath, metadata)
if not lb_added: if not lb_added:
raise UserFeedback("iTunes Media folder inaccessible", raise UserFeedback("iTunes Media folder inaccessible",
@ -2336,6 +2340,7 @@ class ITUNES(DriverBase):
except: except:
if DEBUG: if DEBUG:
logger().info(" no books in library") logger().info(" no books in library")
self.library_orphans = library_orphans self.library_orphans = library_orphans
return library_books return library_books
@ -2435,13 +2440,13 @@ class ITUNES(DriverBase):
as_binding = "dynamic" as_binding = "dynamic"
try: try:
# Try dynamic binding - works with iTunes <= 10.6.1 # Try dynamic binding - works with iTunes <= 10.6.1
foo = self.iTunes.name() self.iTunes.name()
except: except:
# Try static binding # Try static binding
import itunes import itunes
self.iTunes = appscript.app('iTunes', terms=itunes) self.iTunes = appscript.app('iTunes', terms=itunes)
try: try:
foo = self.iTunes.name() self.iTunes.name()
as_binding = "static" as_binding = "static"
except: except:
self.iTunes = None self.iTunes = None
@ -2494,8 +2499,8 @@ class ITUNES(DriverBase):
self.iTunes = win32com.client.Dispatch("iTunes.Application") self.iTunes = win32com.client.Dispatch("iTunes.Application")
except: except:
self.iTunes = None self.iTunes = None
raise UserFeedback(' %s._launch_iTunes(): unable to find installed iTunes' raise OpenFeedback('Unable to launch iTunes.\n' +
% self.__class__.__name__, details=None, level=UserFeedback.WARN) 'Try launching calibre as Administrator')
if not DEBUG: if not DEBUG:
self.iTunes.Windows[0].Minimized = True self.iTunes.Windows[0].Minimized = True
@ -2503,8 +2508,7 @@ class ITUNES(DriverBase):
try: try:
# Pre-emptive test to confirm functional iTunes automation interface # Pre-emptive test to confirm functional iTunes automation interface
foo = self.iTunes.Version logger().info(" automation interface with iTunes %s established" % self.iTunes.Version)
foo
except: except:
self.iTunes = None self.iTunes = None
raise OpenFeedback('Unable to connect to iTunes.\n' + raise OpenFeedback('Unable to connect to iTunes.\n' +
@ -2547,7 +2551,6 @@ class ITUNES(DriverBase):
''' '''
PURGE_ORPHANS = False PURGE_ORPHANS = False
if PURGE_ORPHANS:
if DEBUG: if DEBUG:
logger().info(" %s._purge_orphans()" % self.__class__.__name__) logger().info(" %s._purge_orphans()" % self.__class__.__name__)
#self._dump_library_books(library_books) #self._dump_library_books(library_books)
@ -2557,6 +2560,7 @@ class ITUNES(DriverBase):
if isosx: if isosx:
if book not in cached_books and \ if book not in cached_books and \
str(library_books[book].description()).startswith(self.description_prefix): str(library_books[book].description()).startswith(self.description_prefix):
if PURGE_ORPHANS:
if DEBUG: if DEBUG:
logger().info(" '%s' not found on iDevice, removing from iTunes" % book) logger().info(" '%s' not found on iDevice, removing from iTunes" % book)
btr = { btr = {
@ -2564,9 +2568,14 @@ class ITUNES(DriverBase):
'author': library_books[book].artist(), 'author': library_books[book].artist(),
'lib_book': library_books[book]} 'lib_book': library_books[book]}
self._remove_from_iTunes(btr) self._remove_from_iTunes(btr)
else:
if DEBUG:
logger().info(" '%s' found in iTunes, but not on iDevice" % (book))
elif iswindows: elif iswindows:
if book not in cached_books and \ if book not in cached_books and \
library_books[book].Description.startswith(self.description_prefix): library_books[book].Description.startswith(self.description_prefix):
if PURGE_ORPHANS:
if DEBUG: if DEBUG:
logger().info(" '%s' not found on iDevice, removing from iTunes" % book) logger().info(" '%s' not found on iDevice, removing from iTunes" % book)
btr = { btr = {
@ -2576,7 +2585,7 @@ class ITUNES(DriverBase):
self._remove_from_iTunes(btr) self._remove_from_iTunes(btr)
else: else:
if DEBUG: if DEBUG:
logger().info(" %s._purge_orphans(disabled)" % self.__class__.__name__) logger().info(" '%s' found in iTunes, but not on iDevice" % (book))
def _remove_existing_copy(self, path, metadata): def _remove_existing_copy(self, path, metadata):
''' '''
@ -3107,11 +3116,10 @@ class ITUNES(DriverBase):
def _wait_for_writable_metadata(self, db_added, delay=2.0): def _wait_for_writable_metadata(self, db_added, delay=2.0):
''' '''
Ensure iDevice metadata is writable. Direct connect mode only Ensure iDevice metadata is writable. DC mode only
''' '''
if DEBUG: if DEBUG:
logger().info(" %s._wait_for_writable_metadata()" % self.__class__.__name__) logger().info(" %s._wait_for_writable_metadata()" % self.__class__.__name__)
logger().warning(" %s" % self.UNSUPPORTED_DIRECT_CONNECT_MODE_MESSAGE)
attempts = 9 attempts = 9
while attempts: while attempts:

View File

@ -199,6 +199,11 @@ class KTCollectionsBookList(CollectionsBookList):
('series' in collection_attributes and ('series' in collection_attributes and
book.get('series', None) == category): book.get('series', None) == category):
is_series = True is_series = True
# The category should not be None, but, it has happened.
if not category:
continue
cat_name = category.strip(' ,') cat_name = category.strip(' ,')
if cat_name not in collections: if cat_name not in collections:

View File

@ -1537,7 +1537,11 @@ class KOBOTOUCH(KOBO):
return bookshelves return bookshelves
cursor = connection.cursor() cursor = connection.cursor()
query = "select ShelfName from ShelfContent where ContentId = ? and _IsDeleted = 'false'" query = "select ShelfName " \
"from ShelfContent " \
"where ContentId = ? " \
"and _IsDeleted = 'false' " \
"and ShelfName is not null" # This should never be nulll, but it is protection against an error cause by a sync to the Kobo server
values = (ContentID, ) values = (ContentID, )
cursor.execute(query, values) cursor.execute(query, values)
for i, row in enumerate(cursor): for i, row in enumerate(cursor):

View File

@ -13,6 +13,7 @@ from itertools import izip
from calibre import prints from calibre import prints
from calibre.constants import iswindows, numeric_version from calibre.constants import iswindows, numeric_version
from calibre.devices.errors import PathError
from calibre.devices.mtp.base import debug from calibre.devices.mtp.base import debug
from calibre.devices.mtp.defaults import DeviceDefaults from calibre.devices.mtp.defaults import DeviceDefaults
from calibre.ptempfile import SpooledTemporaryFile, PersistentTemporaryDirectory from calibre.ptempfile import SpooledTemporaryFile, PersistentTemporaryDirectory
@ -23,6 +24,12 @@ from calibre.utils.filenames import shorten_components_to
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%( BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%(
'windows' if iswindows else 'unix')).MTP_DEVICE 'windows' if iswindows else 'unix')).MTP_DEVICE
class MTPInvalidSendPathError(PathError):
def __init__(self, folder):
PathError.__init__(self, 'Trying to send to ignored folder: %s'%folder)
self.folder = folder
class MTP_DEVICE(BASE): class MTP_DEVICE(BASE):
METADATA_CACHE = 'metadata.calibre' METADATA_CACHE = 'metadata.calibre'
@ -46,6 +53,7 @@ class MTP_DEVICE(BASE):
self._prefs = None self._prefs = None
self.device_defaults = DeviceDefaults() self.device_defaults = DeviceDefaults()
self.current_device_defaults = {} self.current_device_defaults = {}
self.highlight_ignored_folders = False
@property @property
def prefs(self): def prefs(self):
@ -59,9 +67,25 @@ class MTP_DEVICE(BASE):
p.defaults['blacklist'] = [] p.defaults['blacklist'] = []
p.defaults['history'] = {} p.defaults['history'] = {}
p.defaults['rules'] = [] p.defaults['rules'] = []
p.defaults['ignored_folders'] = {}
return self._prefs return self._prefs
def is_folder_ignored(self, storage_or_storage_id, name,
ignored_folders=None):
storage_id = unicode(getattr(storage_or_storage_id, 'object_id',
storage_or_storage_id))
name = icu_lower(name)
if ignored_folders is None:
ignored_folders = self.get_pref('ignored_folders')
if storage_id in ignored_folders:
return name in {icu_lower(x) for x in ignored_folders[storage_id]}
return name in {
'alarms', 'android', 'dcim', 'movies', 'music', 'notifications',
'pictures', 'ringtones', 'samsung', 'sony', 'htc', 'bluetooth',
'games', 'lost.dir', 'video', 'whatsapp', 'image'}
def configure_for_kindle_app(self): def configure_for_kindle_app(self):
proxy = self.prefs proxy = self.prefs
with proxy: with proxy:
@ -371,6 +395,8 @@ class MTP_DEVICE(BASE):
for infile, fname, mi in izip(files, names, metadata): for infile, fname, mi in izip(files, names, metadata):
path = self.create_upload_path(prefix, mi, fname, routing) path = self.create_upload_path(prefix, mi, fname, routing)
if path and self.is_folder_ignored(storage, path[0]):
raise MTPInvalidSendPathError(path[0])
parent = self.ensure_parent(storage, path) parent = self.ensure_parent(storage, path)
if hasattr(infile, 'read'): if hasattr(infile, 'read'):
pos = infile.tell() pos = infile.tell()
@ -472,7 +498,7 @@ class MTP_DEVICE(BASE):
def config_widget(self): def config_widget(self):
from calibre.gui2.device_drivers.mtp_config import MTPConfig from calibre.gui2.device_drivers.mtp_config import MTPConfig
return MTPConfig(self) return MTPConfig(self, highlight_ignored_folders=self.highlight_ignored_folders)
def save_settings(self, cw): def save_settings(self, cw):
cw.commit() cw.commit()

View File

@ -239,12 +239,12 @@ class TestDeviceInteraction(unittest.TestCase):
# Test get_filesystem # Test get_filesystem
used_by_one = self.measure_memory_usage(1, used_by_one = self.measure_memory_usage(1,
self.dev.dev.get_filesystem, self.storage.object_id, lambda x: self.dev.dev.get_filesystem, self.storage.object_id,
x) lambda x, l:True)
used_by_many = self.measure_memory_usage(5, used_by_many = self.measure_memory_usage(5,
self.dev.dev.get_filesystem, self.storage.object_id, lambda x: self.dev.dev.get_filesystem, self.storage.object_id,
x) lambda x, l: True)
self.check_memory(used_by_one, used_by_many, self.check_memory(used_by_one, used_by_many,
'Memory consumption during get_filesystem') 'Memory consumption during get_filesystem')

View File

@ -13,6 +13,7 @@ const calibre_device_entry_t calibre_mtp_device_table[] = {
// Amazon Kindle Fire HD // Amazon Kindle Fire HD
, { "Amazon", 0x1949, "Fire HD", 0x0007, DEVICE_FLAGS_ANDROID_BUGS} , { "Amazon", 0x1949, "Fire HD", 0x0007, DEVICE_FLAGS_ANDROID_BUGS}
, { "Amazon", 0x1949, "Fire HD", 0x000a, DEVICE_FLAGS_ANDROID_BUGS}
// Nexus 10 // Nexus 10
, { "Google", 0x18d1, "Nexus 10", 0x4ee2, DEVICE_FLAGS_ANDROID_BUGS} , { "Google", 0x18d1, "Nexus 10", 0x4ee2, DEVICE_FLAGS_ANDROID_BUGS}

View File

@ -212,8 +212,13 @@ class MTP_DEVICE(MTPDeviceBase):
ans += pprint.pformat(storage) ans += pprint.pformat(storage)
return ans return ans
def _filesystem_callback(self, entry): def _filesystem_callback(self, entry, level):
self.filesystem_callback(_('Found object: %s')%entry.get('name', '')) name = entry.get('name', '')
self.filesystem_callback(_('Found object: %s')%name)
if (level == 0 and
self.is_folder_ignored(self._currently_getting_sid, name)):
return False
return True
@property @property
def filesystem_cache(self): def filesystem_cache(self):
@ -234,6 +239,7 @@ class MTP_DEVICE(MTPDeviceBase):
storage.append({'id':sid, 'size':capacity, storage.append({'id':sid, 'size':capacity,
'is_folder':True, 'name':name, 'can_delete':False, 'is_folder':True, 'name':name, 'can_delete':False,
'is_system':True}) 'is_system':True})
self._currently_getting_sid = unicode(sid)
items, errs = self.dev.get_filesystem(sid, items, errs = self.dev.get_filesystem(sid,
self._filesystem_callback) self._filesystem_callback)
all_items.extend(items), all_errs.extend(errs) all_items.extend(items), all_errs.extend(errs)

View File

@ -122,7 +122,7 @@ static PyObject* build_file_metadata(LIBMTP_file_t *nf, uint32_t storage_id) {
PyObject *ans = NULL; PyObject *ans = NULL;
ans = Py_BuildValue("{s:s, s:k, s:k, s:k, s:K, s:L, s:O}", ans = Py_BuildValue("{s:s, s:k, s:k, s:k, s:K, s:L, s:O}",
"name", (unsigned long)nf->filename, "name", nf->filename,
"id", (unsigned long)nf->item_id, "id", (unsigned long)nf->item_id,
"parent_id", (unsigned long)nf->parent_id, "parent_id", (unsigned long)nf->parent_id,
"storage_id", (unsigned long)storage_id, "storage_id", (unsigned long)storage_id,
@ -357,10 +357,10 @@ Device_storage_info(Device *self, void *closure) {
// Device.get_filesystem {{{ // Device.get_filesystem {{{
static int recursive_get_files(LIBMTP_mtpdevice_t *dev, uint32_t storage_id, uint32_t parent_id, PyObject *ans, PyObject *errs, PyObject *callback) { static int recursive_get_files(LIBMTP_mtpdevice_t *dev, uint32_t storage_id, uint32_t parent_id, PyObject *ans, PyObject *errs, PyObject *callback, unsigned int level) {
LIBMTP_file_t *f, *files; LIBMTP_file_t *f, *files;
PyObject *entry; PyObject *entry, *r;
int ok = 1; int ok = 1, recurse;
Py_BEGIN_ALLOW_THREADS; Py_BEGIN_ALLOW_THREADS;
files = LIBMTP_Get_Files_And_Folders(dev, storage_id, parent_id); files = LIBMTP_Get_Files_And_Folders(dev, storage_id, parent_id);
@ -372,13 +372,15 @@ static int recursive_get_files(LIBMTP_mtpdevice_t *dev, uint32_t storage_id, uin
entry = build_file_metadata(f, storage_id); entry = build_file_metadata(f, storage_id);
if (entry == NULL) { ok = 0; } if (entry == NULL) { ok = 0; }
else { else {
Py_XDECREF(PyObject_CallFunctionObjArgs(callback, entry, NULL)); r = PyObject_CallFunction(callback, "OI", entry, level);
recurse = (r != NULL && PyObject_IsTrue(r)) ? 1 : 0;
Py_XDECREF(r);
if (PyList_Append(ans, entry) != 0) { ok = 0; } if (PyList_Append(ans, entry) != 0) { ok = 0; }
Py_DECREF(entry); Py_DECREF(entry);
} }
if (ok && f->filetype == LIBMTP_FILETYPE_FOLDER) { if (ok && recurse && f->filetype == LIBMTP_FILETYPE_FOLDER) {
if (!recursive_get_files(dev, storage_id, f->item_id, ans, errs, callback)) { if (!recursive_get_files(dev, storage_id, f->item_id, ans, errs, callback, level+1)) {
ok = 0; ok = 0;
} }
} }
@ -408,7 +410,7 @@ Device_get_filesystem(Device *self, PyObject *args) {
if (errs == NULL || ans == NULL) { PyErr_NoMemory(); return NULL; } if (errs == NULL || ans == NULL) { PyErr_NoMemory(); return NULL; }
LIBMTP_Clear_Errorstack(self->device); LIBMTP_Clear_Errorstack(self->device);
ok = recursive_get_files(self->device, (uint32_t)storage_id, 0, ans, errs, callback); ok = recursive_get_files(self->device, (uint32_t)storage_id, 0xFFFFFFFF, ans, errs, callback, 0);
dump_errorstack(self->device, errs); dump_errorstack(self->device, errs);
if (!ok) { if (!ok) {
Py_DECREF(ans); Py_DECREF(ans);
@ -537,7 +539,7 @@ static PyMethodDef Device_methods[] = {
}, },
{"get_filesystem", (PyCFunction)Device_get_filesystem, METH_VARARGS, {"get_filesystem", (PyCFunction)Device_get_filesystem, METH_VARARGS,
"get_filesystem(storage_id, callback) -> Get the list of files and folders on the device in storage_id. Returns files, errors. callback must be a callable that accepts a single argument. It is called with every found object." "get_filesystem(storage_id, callback) -> Get the list of files and folders on the device in storage_id. Returns files, errors. callback must be a callable that is called as with (entry, level). It is called with every found object. If callback returns False and the object is a folder, it is not recursed into."
}, },
{"get_file", (PyCFunction)Device_get_file, METH_VARARGS, {"get_file", (PyCFunction)Device_get_file, METH_VARARGS,

View File

@ -133,12 +133,14 @@ class GetBulkCallback : public IPortableDevicePropertiesBulkCallback {
public: public:
PyObject *items; PyObject *items;
PyObject *subfolders;
unsigned int level;
HANDLE complete; HANDLE complete;
ULONG self_ref; ULONG self_ref;
PyThreadState *thread_state; PyThreadState *thread_state;
PyObject *callback; PyObject *callback;
GetBulkCallback(PyObject *items_dict, HANDLE ev, PyObject* pycallback) : items(items_dict), complete(ev), self_ref(1), thread_state(NULL), callback(pycallback) {} GetBulkCallback(PyObject *items_dict, PyObject *subfolders, unsigned int level, HANDLE ev, PyObject* pycallback) : items(items_dict), subfolders(subfolders), level(level), complete(ev), self_ref(1), thread_state(NULL), callback(pycallback) {}
~GetBulkCallback() {} ~GetBulkCallback() {}
HRESULT __stdcall OnStart(REFGUID Context) { return S_OK; } HRESULT __stdcall OnStart(REFGUID Context) { return S_OK; }
@ -172,7 +174,7 @@ public:
DWORD num = 0, i; DWORD num = 0, i;
wchar_t *property = NULL; wchar_t *property = NULL;
IPortableDeviceValues *properties = NULL; IPortableDeviceValues *properties = NULL;
PyObject *temp, *obj; PyObject *temp, *obj, *r;
HRESULT hr; HRESULT hr;
if (SUCCEEDED(values->GetCount(&num))) { if (SUCCEEDED(values->GetCount(&num))) {
@ -196,7 +198,11 @@ public:
Py_DECREF(temp); Py_DECREF(temp);
set_properties(obj, properties); set_properties(obj, properties);
Py_XDECREF(PyObject_CallFunctionObjArgs(callback, obj, NULL)); r = PyObject_CallFunction(callback, "OI", obj, this->level);
if (r != NULL && PyObject_IsTrue(r)) {
PyList_Append(this->subfolders, PyDict_GetItemString(obj, "id"));
}
Py_XDECREF(r);
properties->Release(); properties = NULL; properties->Release(); properties = NULL;
} }
@ -209,8 +215,7 @@ public:
}; };
static PyObject* bulk_get_filesystem(IPortableDevice *device, IPortableDevicePropertiesBulk *bulk_properties, const wchar_t *storage_id, IPortableDevicePropVariantCollection *object_ids, PyObject *pycallback) { static bool bulk_get_filesystem(unsigned int level, IPortableDevice *device, IPortableDevicePropertiesBulk *bulk_properties, IPortableDevicePropVariantCollection *object_ids, PyObject *pycallback, PyObject *ans, PyObject *subfolders) {
PyObject *folders = NULL;
GUID guid_context = GUID_NULL; GUID guid_context = GUID_NULL;
HANDLE ev = NULL; HANDLE ev = NULL;
IPortableDeviceKeyCollection *properties; IPortableDeviceKeyCollection *properties;
@ -218,18 +223,15 @@ static PyObject* bulk_get_filesystem(IPortableDevice *device, IPortableDevicePro
HRESULT hr; HRESULT hr;
DWORD wait_result; DWORD wait_result;
int pump_result; int pump_result;
BOOL ok = TRUE; bool ok = true;
ev = CreateEvent(NULL, FALSE, FALSE, NULL); ev = CreateEvent(NULL, FALSE, FALSE, NULL);
if (ev == NULL) return PyErr_NoMemory(); if (ev == NULL) {PyErr_NoMemory(); return false; }
folders = PyDict_New();
if (folders == NULL) {PyErr_NoMemory(); goto end;}
properties = create_filesystem_properties_collection(); properties = create_filesystem_properties_collection();
if (properties == NULL) goto end; if (properties == NULL) goto end;
callback = new (std::nothrow) GetBulkCallback(folders, ev, pycallback); callback = new (std::nothrow) GetBulkCallback(ans, subfolders, level, ev, pycallback);
if (callback == NULL) { PyErr_NoMemory(); goto end; } if (callback == NULL) { PyErr_NoMemory(); goto end; }
hr = bulk_properties->QueueGetValuesByObjectList(object_ids, properties, callback, &guid_context); hr = bulk_properties->QueueGetValuesByObjectList(object_ids, properties, callback, &guid_context);
@ -245,13 +247,13 @@ static PyObject* bulk_get_filesystem(IPortableDevice *device, IPortableDevicePro
break; // Event was signalled, bulk operation complete break; // Event was signalled, bulk operation complete
} else if (wait_result == WAIT_OBJECT_0 + 1) { // Messages need to be dispatched } else if (wait_result == WAIT_OBJECT_0 + 1) { // Messages need to be dispatched
pump_result = pump_waiting_messages(); pump_result = pump_waiting_messages();
if (pump_result == 1) { PyErr_SetString(PyExc_RuntimeError, "Application has been asked to quit."); ok = FALSE; break;} if (pump_result == 1) { PyErr_SetString(PyExc_RuntimeError, "Application has been asked to quit."); ok = false; break;}
} else if (wait_result == WAIT_TIMEOUT) { } else if (wait_result == WAIT_TIMEOUT) {
// 60 seconds with no updates, looks bad // 60 seconds with no updates, looks bad
PyErr_SetString(WPDError, "The device seems to have hung."); ok = FALSE; break; PyErr_SetString(WPDError, "The device seems to have hung."); ok = false; break;
} else if (wait_result == WAIT_ABANDONED_0) { } else if (wait_result == WAIT_ABANDONED_0) {
// This should never happen // This should never happen
PyErr_SetString(WPDError, "An unknown error occurred (mutex abandoned)"); ok = FALSE; break; PyErr_SetString(WPDError, "An unknown error occurred (mutex abandoned)"); ok = false; break;
} else { } else {
// The wait failed for some reason // The wait failed for some reason
PyErr_SetFromWindowsErr(0); ok = FALSE; break; PyErr_SetFromWindowsErr(0); ok = FALSE; break;
@ -261,22 +263,21 @@ static PyObject* bulk_get_filesystem(IPortableDevice *device, IPortableDevicePro
if (!ok) { if (!ok) {
bulk_properties->Cancel(guid_context); bulk_properties->Cancel(guid_context);
pump_waiting_messages(); pump_waiting_messages();
Py_DECREF(folders); folders = NULL;
} }
end: end:
if (ev != NULL) CloseHandle(ev); if (ev != NULL) CloseHandle(ev);
if (properties != NULL) properties->Release(); if (properties != NULL) properties->Release();
if (callback != NULL) callback->Release(); if (callback != NULL) callback->Release();
return folders; return ok;
} }
// }}} // }}}
// find_all_objects_in() {{{ // find_objects_in() {{{
static BOOL find_all_objects_in(IPortableDeviceContent *content, IPortableDevicePropVariantCollection *object_ids, const wchar_t *parent_id, PyObject *callback) { static bool find_objects_in(IPortableDeviceContent *content, IPortableDevicePropVariantCollection *object_ids, const wchar_t *parent_id) {
/* /*
* Find all children of the object identified by parent_id, recursively. * Find all children of the object identified by parent_id.
* The child ids are put into object_ids. Returns False if any errors * The child ids are put into object_ids. Returns False if any errors
* occurred (also sets the python exception). * occurred (also sets the python exception).
*/ */
@ -285,8 +286,7 @@ static BOOL find_all_objects_in(IPortableDeviceContent *content, IPortableDevice
PWSTR child_ids[10]; PWSTR child_ids[10];
DWORD fetched, i; DWORD fetched, i;
PROPVARIANT pv; PROPVARIANT pv;
BOOL ok = 1; bool ok = true;
PyObject *id;
PropVariantInit(&pv); PropVariantInit(&pv);
pv.vt = VT_LPWSTR; pv.vt = VT_LPWSTR;
@ -295,7 +295,7 @@ static BOOL find_all_objects_in(IPortableDeviceContent *content, IPortableDevice
hr = content->EnumObjects(0, parent_id, NULL, &children); hr = content->EnumObjects(0, parent_id, NULL, &children);
Py_END_ALLOW_THREADS; Py_END_ALLOW_THREADS;
if (FAILED(hr)) {hresult_set_exc("Failed to get children from device", hr); ok = 0; goto end;} if (FAILED(hr)) {hresult_set_exc("Failed to get children from device", hr); ok = false; goto end;}
hr = S_OK; hr = S_OK;
@ -306,19 +306,12 @@ static BOOL find_all_objects_in(IPortableDeviceContent *content, IPortableDevice
if (SUCCEEDED(hr)) { if (SUCCEEDED(hr)) {
for(i = 0; i < fetched; i++) { for(i = 0; i < fetched; i++) {
pv.pwszVal = child_ids[i]; pv.pwszVal = child_ids[i];
id = wchar_to_unicode(pv.pwszVal);
if (id != NULL) {
Py_XDECREF(PyObject_CallFunctionObjArgs(callback, id, NULL));
Py_DECREF(id);
}
hr2 = object_ids->Add(&pv); hr2 = object_ids->Add(&pv);
pv.pwszVal = NULL; pv.pwszVal = NULL;
if (FAILED(hr2)) { hresult_set_exc("Failed to add child ids to propvariantcollection", hr2); break; } if (FAILED(hr2)) { hresult_set_exc("Failed to add child ids to propvariantcollection", hr2); break; }
ok = find_all_objects_in(content, object_ids, child_ids[i], callback);
if (!ok) break;
} }
for (i = 0; i < fetched; i++) { CoTaskMemFree(child_ids[i]); child_ids[i] = NULL; } for (i = 0; i < fetched; i++) { CoTaskMemFree(child_ids[i]); child_ids[i] = NULL; }
if (FAILED(hr2) || !ok) { ok = 0; goto end; } if (FAILED(hr2) || !ok) { ok = false; goto end; }
} }
} }
@ -340,13 +333,8 @@ static PyObject* get_object_properties(IPortableDeviceProperties *devprops, IPor
Py_END_ALLOW_THREADS; Py_END_ALLOW_THREADS;
if (FAILED(hr)) { hresult_set_exc("Failed to get properties for object", hr); goto end; } if (FAILED(hr)) { hresult_set_exc("Failed to get properties for object", hr); goto end; }
temp = wchar_to_unicode(object_id); ans = Py_BuildValue("{s:N}", "id", wchar_to_unicode(object_id));
if (temp == NULL) goto end; if (ans == NULL) goto end;
ans = PyDict_New();
if (ans == NULL) { PyErr_NoMemory(); goto end; }
if (PyDict_SetItemString(ans, "id", temp) != 0) { Py_DECREF(ans); ans = NULL; PyErr_NoMemory(); goto end; }
set_properties(ans, values); set_properties(ans, values);
end: end:
@ -355,12 +343,12 @@ end:
return ans; return ans;
} }
static PyObject* single_get_filesystem(IPortableDeviceContent *content, const wchar_t *storage_id, IPortableDevicePropVariantCollection *object_ids, PyObject *callback) { static bool single_get_filesystem(unsigned int level, IPortableDeviceContent *content, IPortableDevicePropVariantCollection *object_ids, PyObject *callback, PyObject *ans, PyObject *subfolders) {
DWORD num, i; DWORD num, i;
PROPVARIANT pv; PROPVARIANT pv;
HRESULT hr; HRESULT hr;
BOOL ok = 1; bool ok = true;
PyObject *ans = NULL, *item = NULL; PyObject *item = NULL, *r = NULL, *recurse = NULL;
IPortableDeviceProperties *devprops = NULL; IPortableDeviceProperties *devprops = NULL;
IPortableDeviceKeyCollection *properties = NULL; IPortableDeviceKeyCollection *properties = NULL;
@ -373,32 +361,36 @@ static PyObject* single_get_filesystem(IPortableDeviceContent *content, const wc
hr = object_ids->GetCount(&num); hr = object_ids->GetCount(&num);
if (FAILED(hr)) { hresult_set_exc("Failed to get object id count", hr); goto end; } if (FAILED(hr)) { hresult_set_exc("Failed to get object id count", hr); goto end; }
ans = PyDict_New();
if (ans == NULL) goto end;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
ok = 0; ok = false;
recurse = NULL;
PropVariantInit(&pv); PropVariantInit(&pv);
hr = object_ids->GetAt(i, &pv); hr = object_ids->GetAt(i, &pv);
if (SUCCEEDED(hr) && pv.pwszVal != NULL) { if (SUCCEEDED(hr) && pv.pwszVal != NULL) {
item = get_object_properties(devprops, properties, pv.pwszVal); item = get_object_properties(devprops, properties, pv.pwszVal);
if (item != NULL) { if (item != NULL) {
Py_XDECREF(PyObject_CallFunctionObjArgs(callback, item, NULL)); r = PyObject_CallFunction(callback, "OI", item, level);
if (r != NULL && PyObject_IsTrue(r)) recurse = item;
Py_XDECREF(r);
PyDict_SetItem(ans, PyDict_GetItemString(item, "id"), item); PyDict_SetItem(ans, PyDict_GetItemString(item, "id"), item);
Py_DECREF(item); item = NULL; Py_DECREF(item); item = NULL;
ok = 1; ok = true;
} }
} else hresult_set_exc("Failed to get item from IPortableDevicePropVariantCollection", hr); } else hresult_set_exc("Failed to get item from IPortableDevicePropVariantCollection", hr);
PropVariantClear(&pv); PropVariantClear(&pv);
if (!ok) { Py_DECREF(ans); ans = NULL; break; } if (!ok) break;
if (recurse != NULL) {
if (PyList_Append(subfolders, PyDict_GetItemString(recurse, "id")) == -1) ok = false;
}
if (!ok) break;
} }
end: end:
if (devprops != NULL) devprops->Release(); if (devprops != NULL) devprops->Release();
if (properties != NULL) properties->Release(); if (properties != NULL) properties->Release();
return ans; return ok;
} }
// }}} // }}}
@ -438,35 +430,60 @@ end:
return values; return values;
} // }}} } // }}}
PyObject* wpd::get_filesystem(IPortableDevice *device, const wchar_t *storage_id, IPortableDevicePropertiesBulk *bulk_properties, PyObject *callback) { // {{{ static bool get_files_and_folders(unsigned int level, IPortableDevice *device, IPortableDeviceContent *content, IPortableDevicePropertiesBulk *bulk_properties, const wchar_t *parent_id, PyObject *callback, PyObject *ans) { // {{{
PyObject *folders = NULL; bool ok = true;
IPortableDevicePropVariantCollection *object_ids = NULL; IPortableDevicePropVariantCollection *object_ids = NULL;
PyObject *subfolders = NULL;
HRESULT hr;
subfolders = PyList_New(0);
if (subfolders == NULL) { ok = false; goto end; }
Py_BEGIN_ALLOW_THREADS;
hr = CoCreateInstance(CLSID_PortableDevicePropVariantCollection, NULL,
CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&object_ids));
Py_END_ALLOW_THREADS;
if (FAILED(hr)) { hresult_set_exc("Failed to create propvariantcollection", hr); ok = false; goto end; }
ok = find_objects_in(content, object_ids, parent_id);
if (!ok) goto end;
if (bulk_properties != NULL) ok = bulk_get_filesystem(level, device, bulk_properties, object_ids, callback, ans, subfolders);
else ok = single_get_filesystem(level, content, object_ids, callback, ans, subfolders);
if (!ok) goto end;
for (Py_ssize_t i = 0; i < PyList_GET_SIZE(subfolders); i++) {
const wchar_t *child_id = unicode_to_wchar(PyList_GET_ITEM(subfolders, i));
if (child_id == NULL) { ok = false; break; }
ok = get_files_and_folders(level+1, device, content, bulk_properties, child_id, callback, ans);
if (!ok) break;
}
end:
if (object_ids != NULL) object_ids->Release();
Py_XDECREF(subfolders);
return ok;
} // }}}
PyObject* wpd::get_filesystem(IPortableDevice *device, const wchar_t *storage_id, IPortableDevicePropertiesBulk *bulk_properties, PyObject *callback) { // {{{
PyObject *ans = NULL;
IPortableDeviceContent *content = NULL; IPortableDeviceContent *content = NULL;
HRESULT hr; HRESULT hr;
BOOL ok;
ans = PyDict_New();
if (ans == NULL) return PyErr_NoMemory();
Py_BEGIN_ALLOW_THREADS; Py_BEGIN_ALLOW_THREADS;
hr = device->Content(&content); hr = device->Content(&content);
Py_END_ALLOW_THREADS; Py_END_ALLOW_THREADS;
if (FAILED(hr)) { hresult_set_exc("Failed to create content interface", hr); goto end; } if (FAILED(hr)) { hresult_set_exc("Failed to create content interface", hr); goto end; }
Py_BEGIN_ALLOW_THREADS; if (!get_files_and_folders(0, device, content, bulk_properties, storage_id, callback, ans)) {
hr = CoCreateInstance(CLSID_PortableDevicePropVariantCollection, NULL, Py_DECREF(ans); ans = NULL;
CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&object_ids)); }
Py_END_ALLOW_THREADS;
if (FAILED(hr)) { hresult_set_exc("Failed to create propvariantcollection", hr); goto end; }
ok = find_all_objects_in(content, object_ids, storage_id, callback);
if (!ok) goto end;
if (bulk_properties != NULL) folders = bulk_get_filesystem(device, bulk_properties, storage_id, object_ids, callback);
else folders = single_get_filesystem(content, storage_id, object_ids, callback);
end: end:
if (content != NULL) content->Release(); if (content != NULL) content->Release();
if (object_ids != NULL) object_ids->Release(); return ans;
return folders;
} // }}} } // }}}
PyObject* wpd::get_file(IPortableDevice *device, const wchar_t *object_id, PyObject *dest, PyObject *callback) { // {{{ PyObject* wpd::get_file(IPortableDevice *device, const wchar_t *object_id, PyObject *dest, PyObject *callback) { // {{{

View File

@ -164,7 +164,7 @@ static PyMethodDef Device_methods[] = {
}, },
{"get_filesystem", (PyCFunction)py_get_filesystem, METH_VARARGS, {"get_filesystem", (PyCFunction)py_get_filesystem, METH_VARARGS,
"get_filesystem(storage_id, callback) -> Get all files/folders on the storage identified by storage_id. Tries to use bulk operations when possible. callback must be a callable that accepts a single argument. It is called with every found id and then with the metadata for every id." "get_filesystem(storage_id, callback) -> Get all files/folders on the storage identified by storage_id. Tries to use bulk operations when possible. callback must be a callable that is called as (object, level). It is called with every found object. If the callback returns False and the object is a folder, it is not recursed into."
}, },
{"get_file", (PyCFunction)py_get_file, METH_VARARGS, {"get_file", (PyCFunction)py_get_file, METH_VARARGS,

View File

@ -214,13 +214,14 @@ class MTP_DEVICE(MTPDeviceBase):
return True return True
def _filesystem_callback(self, obj): def _filesystem_callback(self, obj, level):
if isinstance(obj, dict):
n = obj.get('name', '') n = obj.get('name', '')
msg = _('Found object: %s')%n msg = _('Found object: %s')%n
else: if (level == 0 and
msg = _('Found id: %s')%obj self.is_folder_ignored(self._currently_getting_sid, n)):
return False
self.filesystem_callback(msg) self.filesystem_callback(msg)
return obj.get('is_folder', False)
@property @property
def filesystem_cache(self): def filesystem_cache(self):
@ -241,6 +242,7 @@ class MTP_DEVICE(MTPDeviceBase):
break break
storage = {'id':storage_id, 'size':capacity, 'name':name, storage = {'id':storage_id, 'size':capacity, 'name':name,
'is_folder':True, 'can_delete':False, 'is_system':True} 'is_folder':True, 'can_delete':False, 'is_system':True}
self._currently_getting_sid = unicode(storage_id)
id_map = self.dev.get_filesystem(storage_id, id_map = self.dev.get_filesystem(storage_id,
self._filesystem_callback) self._filesystem_callback)
for x in id_map.itervalues(): x['storage_id'] = storage_id for x in id_map.itervalues(): x['storage_id'] = storage_id

View File

@ -12,24 +12,24 @@ pprint, io
def build(mod='wpd'): def build(mod='wpd'):
master = subprocess.Popen('ssh -MN getafix'.split()) master = subprocess.Popen('ssh -MN getafix'.split())
master2 = subprocess.Popen('ssh -MN xp_build'.split()) master2 = subprocess.Popen('ssh -MN win64'.split())
try: try:
while not glob.glob(os.path.expanduser('~/.ssh/*kovid@xp_build*')): while not glob.glob(os.path.expanduser('~/.ssh/*kovid@win64*')):
time.sleep(0.05) time.sleep(0.05)
builder = subprocess.Popen('ssh xp_build ~/build-wpd'.split()) builder = subprocess.Popen('ssh win64 ~/build-wpd'.split())
if builder.wait() != 0: if builder.wait() != 0:
raise Exception('Failed to build plugin') raise Exception('Failed to build plugin')
while not glob.glob(os.path.expanduser('~/.ssh/*kovid@getafix*')): while not glob.glob(os.path.expanduser('~/.ssh/*kovid@getafix*')):
time.sleep(0.05) time.sleep(0.05)
syncer = subprocess.Popen('ssh getafix ~/test-wpd'.split()) syncer = subprocess.Popen('ssh getafix ~/update-calibre'.split())
if syncer.wait() != 0: if syncer.wait() != 0:
raise Exception('Failed to rsync to getafix') raise Exception('Failed to rsync to getafix')
subprocess.check_call( subprocess.check_call(
('scp xp_build:build/calibre/src/calibre/plugins/%s.pyd /tmp'%mod).split()) ('scp win64:build/calibre/src/calibre/plugins/%s.pyd /tmp'%mod).split())
subprocess.check_call( subprocess.check_call(
('scp /tmp/%s.pyd getafix:calibre/src/calibre/devices/mtp/windows'%mod).split()) ('scp /tmp/%s.pyd getafix:calibre-src/src/calibre/devices/mtp/windows'%mod).split())
p = subprocess.Popen( p = subprocess.Popen(
'ssh getafix calibre-debug -e calibre/src/calibre/devices/mtp/windows/remote.py'.split()) 'ssh getafix calibre-debug -e calibre-src/src/calibre/devices/mtp/windows/remote.py'.split())
p.wait() p.wait()
print() print()
finally: finally:
@ -59,7 +59,7 @@ def main():
# return # return
from calibre.devices.scanner import win_scanner from calibre.devices.scanner import win_scanner
from calibre.devices.mtp.windows.driver import MTP_DEVICE from calibre.devices.mtp.driver import MTP_DEVICE
dev = MTP_DEVICE(None) dev = MTP_DEVICE(None)
dev.startup() dev.startup()
print (dev.wpd, dev.wpd_error) print (dev.wpd, dev.wpd_error)

View File

@ -335,32 +335,50 @@ class HeuristicProcessor(object):
This function intentionally leaves hyphenated content alone as that is handled by the This function intentionally leaves hyphenated content alone as that is handled by the
dehyphenate routine in a separate step dehyphenate routine in a separate step
''' '''
def style_unwrap(match):
style_close = match.group('style_close')
style_open = match.group('style_open')
if style_open and style_close:
return style_close+' '+style_open
elif style_open and not style_close:
return ' '+style_open
elif not style_open and style_close:
return style_close+' '
else:
return ' '
# define the pieces of the regex # define the pieces of the regex
lookahead = "(?<=.{"+str(length)+u"}([a-zäëïöüàèìòùáćéíĺóŕńśúýâêîôûçąężıãõñæøþðßěľščťžňďřů,:)\IA\u00DF]|(?<!\&\w{4});))" # (?<!\&\w{4});) is a semicolon not part of an entity lookahead = "(?<=.{"+str(length)+u"}([a-zäëïöüàèìòùáćéíĺóŕńśúýâêîôûçąężıãõñæøþðßěľščťžňďřů,:)\IA\u00DF]|(?<!\&\w{4});))" # (?<!\&\w{4});) is a semicolon not part of an entity
em_en_lookahead = "(?<=.{"+str(length)+u"}[\u2013\u2014])" em_en_lookahead = "(?<=.{"+str(length)+u"}[\u2013\u2014])"
soft_hyphen = u"\xad" soft_hyphen = u"\xad"
line_ending = "\s*</(span|[iubp]|div)>\s*(</(span|[iubp]|div)>)?" line_ending = "\s*(?P<style_close></(span|[iub])>)?\s*(</(p|div)>)?"
blanklines = "\s*(?P<up2threeblanks><(p|span|div)[^>]*>\s*(<(p|span|div)[^>]*>\s*</(span|p|div)>\s*)</(span|p|div)>\s*){0,3}\s*" blanklines = "\s*(?P<up2threeblanks><(p|span|div)[^>]*>\s*(<(p|span|div)[^>]*>\s*</(span|p|div)>\s*)</(span|p|div)>\s*){0,3}\s*"
line_opening = "<(span|[iubp]|div)[^>]*>\s*(<(span|[iubp]|div)[^>]*>)?\s*" line_opening = "<(p|div)[^>]*>\s*(?P<style_open><(span|[iub])[^>]*>)?\s*"
txt_line_wrap = u"((\u0020|\u0009)*\n){1,4}" txt_line_wrap = u"((\u0020|\u0009)*\n){1,4}"
unwrap_regex = lookahead+line_ending+blanklines+line_opening
em_en_unwrap_regex = em_en_lookahead+line_ending+blanklines+line_opening
shy_unwrap_regex = soft_hyphen+line_ending+blanklines+line_opening
if format == 'txt': if format == 'txt':
unwrap_regex = lookahead+txt_line_wrap unwrap_regex = lookahead+txt_line_wrap
em_en_unwrap_regex = em_en_lookahead+txt_line_wrap em_en_unwrap_regex = em_en_lookahead+txt_line_wrap
shy_unwrap_regex = soft_hyphen+txt_line_wrap shy_unwrap_regex = soft_hyphen+txt_line_wrap
else:
unwrap_regex = lookahead+line_ending+blanklines+line_opening
em_en_unwrap_regex = em_en_lookahead+line_ending+blanklines+line_opening
shy_unwrap_regex = soft_hyphen+line_ending+blanklines+line_opening
unwrap = re.compile(u"%s" % unwrap_regex, re.UNICODE) unwrap = re.compile(u"%s" % unwrap_regex, re.UNICODE)
em_en_unwrap = re.compile(u"%s" % em_en_unwrap_regex, re.UNICODE) em_en_unwrap = re.compile(u"%s" % em_en_unwrap_regex, re.UNICODE)
shy_unwrap = re.compile(u"%s" % shy_unwrap_regex, re.UNICODE) shy_unwrap = re.compile(u"%s" % shy_unwrap_regex, re.UNICODE)
if format == 'txt':
content = unwrap.sub(' ', content) content = unwrap.sub(' ', content)
content = em_en_unwrap.sub('', content) content = em_en_unwrap.sub('', content)
content = shy_unwrap.sub('', content) content = shy_unwrap.sub('', content)
else:
content = unwrap.sub(style_unwrap, content)
content = em_en_unwrap.sub(style_unwrap, content)
content = shy_unwrap.sub(style_unwrap, content)
return content return content
def txt_process(self, match): def txt_process(self, match):

View File

@ -435,7 +435,7 @@ class Worker(Thread): # Get details {{{
def parse_cover(self, root): def parse_cover(self, root):
imgs = root.xpath('//img[@id="prodImage" and @src]') imgs = root.xpath('//img[(@id="prodImage" or @id="original-main-image") and @src]')
if imgs: if imgs:
src = imgs[0].get('src') src = imgs[0].get('src')
if '/no-image-avail' not in src: if '/no-image-avail' not in src:

View File

@ -483,8 +483,8 @@ def identify(log, abort, # {{{
log('The identify phase took %.2f seconds'%(time.time() - start_time)) log('The identify phase took %.2f seconds'%(time.time() - start_time))
log('The longest time (%f) was taken by:'%longest, lp) log('The longest time (%f) was taken by:'%longest, lp)
log('Merging results from different sources and finding earliest', log('Merging results from different sources and finding earliest ',
'publication dates from the xisbn service') 'publication dates from the worldcat.org service')
start_time = time.time() start_time = time.time()
results = merge_identify_results(results, log) results = merge_identify_results(results, log)

View File

@ -126,6 +126,7 @@ class EXTHHeader(object): # {{{
elif idx == 113: # ASIN or other id elif idx == 113: # ASIN or other id
try: try:
self.uuid = content.decode('ascii') self.uuid = content.decode('ascii')
self.mi.set_identifier('mobi-asin', self.uuid)
except: except:
self.uuid = None self.uuid = None
elif idx == 116: elif idx == 116:

View File

@ -356,7 +356,7 @@ class CSSFlattener(object):
if 'bgcolor' in node.attrib: if 'bgcolor' in node.attrib:
try: try:
cssdict['background-color'] = Property('background-color', node.attrib['bgcolor']).value cssdict['background-color'] = Property('background-color', node.attrib['bgcolor']).value
except ValueError: except (ValueError, SyntaxErr):
pass pass
del node.attrib['bgcolor'] del node.attrib['bgcolor']
if cssdict.get('font-weight', '').lower() == 'medium': if cssdict.get('font-weight', '').lower() == 'medium':

View File

@ -0,0 +1,11 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'

View File

@ -0,0 +1,152 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import codecs, zlib
from io import BytesIO
EOL = b'\n'
# Sizes {{{
inch = 72.0
cm = inch / 2.54
mm = cm * 0.1
pica = 12.0
_W, _H = (21*cm, 29.7*cm)
A6 = (_W*.5, _H*.5)
A5 = (_H*.5, _W)
A4 = (_W, _H)
A3 = (_H, _W*2)
A2 = (_W*2, _H*2)
A1 = (_H*2, _W*4)
A0 = (_W*4, _H*4)
LETTER = (8.5*inch, 11*inch)
LEGAL = (8.5*inch, 14*inch)
ELEVENSEVENTEEN = (11*inch, 17*inch)
_BW, _BH = (25*cm, 35.3*cm)
B6 = (_BW*.5, _BH*.5)
B5 = (_BH*.5, _BW)
B4 = (_BW, _BH)
B3 = (_BH*2, _BW)
B2 = (_BW*2, _BH*2)
B1 = (_BH*4, _BW*2)
B0 = (_BW*4, _BH*4)
# }}}
# Basic PDF datatypes {{{
def serialize(o, stream):
if hasattr(o, 'pdf_serialize'):
o.pdf_serialize(stream)
elif isinstance(o, bool):
stream.write(b'true' if o else b'false')
elif isinstance(o, (int, float)):
stream.write(type(u'')(o).encode('ascii'))
elif o is None:
stream.write(b'null')
else:
raise ValueError('Unknown object: %r'%o)
class Name(unicode):
def pdf_serialize(self, stream):
raw = self.encode('ascii')
if len(raw) > 126:
raise ValueError('Name too long: %r'%self)
buf = [x if 33 < ord(x) < 126 and x != b'#' else b'#'+hex(ord(x)) for x
in raw]
stream.write(b'/'+b''.join(buf))
class String(unicode):
def pdf_serialize(self, stream):
s = self.replace('\\', '\\\\').replace('(', r'\(').replace(')', r'\)')
try:
raw = s.encode('latin1')
if raw.startswith(codecs.BOM_UTF16_BE):
raise UnicodeEncodeError('')
except UnicodeEncodeError:
raw = codecs.BOM_UTF16_BE + s.encode('utf-16-be')
stream.write(b'('+raw+b')')
class Dictionary(dict):
def pdf_serialize(self, stream):
stream.write(b'<<' + EOL)
for k, v in self.iteritems():
serialize(Name(k), stream)
stream.write(b' ')
serialize(v, stream)
stream.write(EOL)
stream.write(b'>>' + EOL)
class InlineDictionary(Dictionary):
def pdf_serialize(self, stream):
stream.write(b'<< ')
for k, v in self.iteritems():
serialize(Name(k), stream)
stream.write(b' ')
serialize(v, stream)
stream.write(b' ')
stream.write(b'>>')
class Array(list):
def pdf_serialize(self, stream):
stream.write(b'[')
for i, o in enumerate(self):
if i != 0:
stream.write(b' ')
serialize(o, stream)
stream.write(b']')
class Stream(BytesIO):
def __init__(self, compress=False):
BytesIO.__init__(self)
self.compress = compress
def pdf_serialize(self, stream):
raw = self.getvalue()
dl = len(raw)
filters = Array()
if self.compress:
filters.append(Name('FlateDecode'))
raw = zlib.compress(raw)
d = InlineDictionary({'Length':len(raw), 'DL':dl})
if filters:
d['Filter'] = filters
serialize(d, stream)
stream.write(EOL+b'stream'+EOL)
stream.write(raw)
stream.write(EOL+b'endstream'+EOL)
def write_line(self, raw=b''):
self.write(raw if isinstance(raw, bytes) else raw.encode('ascii'))
self.write(EOL)
def write(self, raw):
super(Stream, self).write(raw if isinstance(raw, bytes) else
raw.encode('ascii'))
class Reference(object):
def __init__(self, num, obj):
self.num, self.obj = num, obj
def pdf_serialize(self, stream):
raw = '%d 0 R'%self.num
stream.write(raw.encode('ascii'))
# }}}

View File

@ -0,0 +1,502 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, traceback
from math import sqrt
from collections import namedtuple
from functools import wraps
from PyQt4.Qt import (QPaintEngine, QPaintDevice, Qt, QApplication, QPainter,
QTransform, QPainterPath, QRawFont)
from calibre.constants import DEBUG
from calibre.ebooks.pdf.render.serialize import (Color, PDFStream, Path, Text)
from calibre.ebooks.pdf.render.common import inch, A4
from calibre.utils.fonts.sfnt.container import Sfnt
from calibre.utils.fonts.sfnt.metrics import FontMetrics
XDPI = 1200
YDPI = 1200
Point = namedtuple('Point', 'x y')
ColorState = namedtuple('ColorState', 'color opacity do')
def store_error(func):
@wraps(func)
def errh(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except:
self.errors.append(traceback.format_exc())
return errh
class GraphicsState(object): # {{{
def __init__(self):
self.ops = {}
self.current_state = self.initial_state = {
'fill': ColorState(Color(0., 0., 0., 1.), 1.0, False),
'transform': QTransform(),
'dash': [],
'line_width': 0,
'stroke': ColorState(Color(0., 0., 0., 1.), 1.0, True),
'line_cap': 'flat',
'line_join': 'miter',
'clip': (Qt.NoClip, QPainterPath()),
}
def reset(self):
self.current_state = self.initial_state
def update_color_state(self, which, color=None, opacity=None,
brush_style=None, pen_style=None):
current = self.ops.get(which, self.current_state[which])
n = ColorState(*current)
if color is not None:
n = n._replace(color=Color(*color.getRgbF()))
if opacity is not None:
n = n._replace(opacity=opacity)
if opacity is not None:
opacity *= n.color.opacity
if brush_style is not None:
if which == 'fill':
do = (False if opacity == 0.0 or brush_style == Qt.NoBrush else
True)
else:
do = (False if opacity == 0.0 or brush_style == Qt.NoBrush or
pen_style == Qt.NoPen else True)
n = n._replace(do=do)
self.ops[which] = n
def read(self, state):
self.ops = {}
flags = state.state()
if flags & QPaintEngine.DirtyTransform:
self.ops['transform'] = state.transform()
# TODO: Add support for brush patterns
if flags & QPaintEngine.DirtyBrush:
brush = state.brush()
color = brush.color()
self.update_color_state('fill', color=color,
brush_style=brush.style())
if flags & QPaintEngine.DirtyPen:
pen = state.pen()
brush = pen.brush()
color = pen.color()
self.update_color_state('stroke', color, brush_style=brush.style(),
pen_style=pen.style())
ps = {Qt.DashLine:[3], Qt.DotLine:[1,2], Qt.DashDotLine:[3,2,1,2],
Qt.DashDotDotLine:[3, 2, 1, 2, 1, 2]}.get(pen.style(), [])
self.ops['dash'] = ps
self.ops['line_width'] = pen.widthF()
self.ops['line_cap'] = {Qt.FlatCap:'flat', Qt.RoundCap:'round',
Qt.SquareCap:'square'}.get(pen.capStyle(), 'flat')
self.ops['line_join'] = {Qt.MiterJoin:'miter', Qt.RoundJoin:'round',
Qt.BevelJoin:'bevel'}.get(pen.joinStyle(), 'miter')
if flags & QPaintEngine.DirtyOpacity:
self.update_color_state('fill', opacity=state.opacity())
self.update_color_state('stroke', opacity=state.opacity())
if flags & QPaintEngine.DirtyClipPath:
self.ops['clip'] = (state.clipOperation(), state.clipPath())
elif flags & QPaintEngine.DirtyClipRegion:
path = QPainterPath()
for rect in state.clipRegion().rects():
path.addRect(rect)
self.ops['clip'] = (state.clipOperation(), path)
def __call__(self, engine):
pdf = engine.pdf
ops = self.ops
current_transform = self.current_state['transform']
transform_changed = 'transform' in ops and ops['transform'] != current_transform
reset_stack = transform_changed or 'clip' in ops
if reset_stack:
pdf.restore_stack()
pdf.save_stack()
# We apply clip before transform as the clip may have to be merged with
# the previous clip path so it is easiest to work with clips that are
# pre-transformed
prev_op, prev_clip_path = self.current_state['clip']
if 'clip' in ops:
op, path = ops['clip']
self.current_state['clip'] = (op, path)
transform = ops.get('transform', QTransform())
if not transform.isIdentity() and path is not None:
# Pre transform the clip path
path = current_transform.map(path)
self.current_state['clip'] = (op, path)
if op == Qt.ReplaceClip:
pass
elif op == Qt.IntersectClip:
if prev_op != Qt.NoClip:
self.current_state['clip'] = (op, path.intersected(prev_clip_path))
elif op == Qt.UniteClip:
if prev_clip_path is not None:
path.addPath(prev_clip_path)
else:
self.current_state['clip'] = (Qt.NoClip, QPainterPath())
op, path = self.current_state['clip']
if op != Qt.NoClip:
engine.add_clip(path)
elif reset_stack and prev_op != Qt.NoClip:
# Re-apply the previous clip path since no clipping operation was
# specified
engine.add_clip(prev_clip_path)
if reset_stack:
# Since we have reset the stack we need to re-apply all previous
# operations, that are different from the default value (clip is
# handled separately).
for op in set(self.current_state) - (set(ops)|{'clip'}):
if self.current_state[op] != self.initial_state[op]:
self.apply(op, self.current_state[op], engine, pdf)
# Now apply the new operations
for op, val in ops.iteritems():
if op != 'clip':
self.apply(op, val, engine, pdf)
self.current_state[op] = val
def apply(self, op, val, engine, pdf):
getattr(self, 'apply_'+op)(val, engine, pdf)
def apply_transform(self, val, engine, pdf):
engine.qt_system = val
pdf.transform(val)
def apply_stroke(self, val, engine, pdf):
self.apply_color_state('stroke', val, engine, pdf)
def apply_fill(self, val, engine, pdf):
self.apply_color_state('fill', val, engine, pdf)
def apply_color_state(self, which, val, engine, pdf):
color = val.color._replace(opacity=val.opacity*val.color.opacity)
getattr(pdf, 'set_%s_color'%which)(color)
setattr(engine, 'do_%s'%which, val.do)
def apply_dash(self, val, engine, pdf):
pdf.set_dash(val)
def apply_line_width(self, val, engine, pdf):
pdf.set_line_width(val)
def apply_line_cap(self, val, engine, pdf):
pdf.set_line_cap(val)
def apply_line_join(self, val, engine, pdf):
pdf.set_line_join(val)
# }}}
class PdfEngine(QPaintEngine):
def __init__(self, file_object, page_width, page_height, left_margin,
top_margin, right_margin, bottom_margin, width, height):
QPaintEngine.__init__(self, self.features)
self.file_object = file_object
self.page_height, self.page_width = page_height, page_width
self.left_margin, self.top_margin = left_margin, top_margin
self.right_margin, self.bottom_margin = right_margin, bottom_margin
self.pixel_width, self.pixel_height = width, height
# Setup a co-ordinate transform that allows us to use co-ords
# from Qt's pixel based co-ordinate system with its origin at the top
# left corner. PDF's co-ordinate system is based on pts and has its
# origin in the bottom left corner. We also have to implement the page
# margins. Therefore, we need to translate, scale and reflect about the
# x-axis.
dy = self.page_height - self.top_margin
dx = self.left_margin
sx = (self.page_width - self.left_margin -
self.right_margin) / self.pixel_width
sy = (self.page_height - self.top_margin -
self.bottom_margin) / self.pixel_height
self.pdf_system = QTransform(sx, 0, 0, -sy, dx, dy)
self.qt_system = QTransform()
self.do_stroke = True
self.do_fill = False
self.scale = sqrt(sy**2 + sx**2)
self.xscale, self.yscale = sx, sy
self.graphics_state = GraphicsState()
self.errors = []
def init_page(self):
self.pdf.transform(self.pdf_system)
self.pdf.set_rgb_colorspace()
width = self.painter.pen().widthF() if self.isActive() else 0
self.pdf.set_line_width(width)
self.do_stroke = True
self.do_fill = False
self.graphics_state.reset()
self.pdf.save_stack()
@property
def features(self):
return (QPaintEngine.Antialiasing | QPaintEngine.AlphaBlend |
QPaintEngine.ConstantOpacity | QPaintEngine.PainterPaths |
QPaintEngine.PaintOutsidePaintEvent |
QPaintEngine.PrimitiveTransform)
def begin(self, device):
try:
self.pdf = PDFStream(self.file_object, (self.page_width,
self.page_height),
compress=not DEBUG)
self.init_page()
except:
self.errors.append(traceback.format_exc())
return False
return True
def end_page(self, start_new=True):
self.pdf.restore_stack()
self.pdf.end_page()
if start_new:
self.init_page()
def end(self):
try:
self.end_page(start_new=False)
self.pdf.end()
except:
self.errors.append(traceback.format_exc())
return False
finally:
self.pdf = self.file_object = None
return True
def type(self):
return QPaintEngine.Pdf
@store_error
def drawPixmap(self, rect, pixmap, source_rect):
print ('TODO: drawPixmap() currently unimplemented')
@store_error
def drawImage(self, rect, image, source_rect, flags=Qt.AutoColor):
print ('TODO: drawImage() currently unimplemented')
@store_error
def updateState(self, state):
self.graphics_state.read(state)
self.graphics_state(self)
def convert_path(self, path):
p = Path()
i = 0
while i < path.elementCount():
elem = path.elementAt(i)
em = (elem.x, elem.y)
i += 1
if elem.isMoveTo():
p.move_to(*em)
elif elem.isLineTo():
p.line_to(*em)
elif elem.isCurveTo():
added = False
if path.elementCount() > i+1:
c1, c2 = path.elementAt(i), path.elementAt(i+1)
if (c1.type == path.CurveToDataElement and c2.type ==
path.CurveToDataElement):
i += 2
p.curve_to(em[0], em[1], c1.x, c1.y, c2.x, c2.y)
added = True
if not added:
raise ValueError('Invalid curve to operation')
return p
@store_error
def drawPath(self, path):
p = self.convert_path(path)
fill_rule = {Qt.OddEvenFill:'evenodd',
Qt.WindingFill:'winding'}[path.fillRule()]
self.pdf.draw_path(p, stroke=self.do_stroke,
fill=self.do_fill, fill_rule=fill_rule)
def add_clip(self, path):
p = self.convert_path(path)
fill_rule = {Qt.OddEvenFill:'evenodd',
Qt.WindingFill:'winding'}[path.fillRule()]
self.pdf.add_clip(p, fill_rule=fill_rule)
@store_error
def drawPoints(self, points):
p = Path()
for point in points:
p.move_to(point.x(), point.y())
p.line_to(point.x(), point.y() + 0.001)
self.pdf.draw_path(p, stroke=self.do_stroke, fill=False)
@store_error
def drawRects(self, rects):
for rect in rects:
bl = rect.topLeft()
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=self.do_stroke, fill=self.do_fill)
@store_error
def drawTextItem(self, point, text_item):
# super(PdfEngine, self).drawTextItem(point+QPoint(0, 300), text_item)
f = text_item.font()
px, pt = f.pixelSize(), f.pointSizeF()
if px == -1:
sz = pt/self.yscale
else:
sz = px
r = QRawFont.fromFont(f)
metrics = FontMetrics(Sfnt(r))
to = Text()
to.size = sz
to.set_transform(1, 0, 0, -1, point.x(), point.y())
stretch = f.stretch()
if stretch != 100:
to.horizontal_scale = stretch
ws = f.wordSpacing()
if ws != 0:
to.word_spacing = ws
spacing = f.letterSpacing()
st = f.letterSpacingType()
text = type(u'')(text_item.text())
if st == f.AbsoluteSpacing and spacing != 0:
to.char_space = spacing/self.scale
if st == f.PercentageSpacing and spacing not in {100, 0}:
# TODO: Figure out why the results from uncommenting the super
# class call above differ. The advance widths are the same as those
# reported by QRawfont, so presumably, Qt use some other
# algorithm, I can't be bothered to track it down. This behavior is
# correct as per the Qt docs' description of PercentageSpacing
widths = [w*-1 for w in metrics.advance_widths(text,
sz, f.stretch()/100.)]
to.glyph_adjust = ((spacing-100)/100., widths)
to.text = text
with self:
self.graphics_state.apply_fill(self.graphics_state.current_state['stroke'],
self, self.pdf)
self.pdf.draw_text(to)
@store_error
def drawPolygon(self, points, mode):
if not points: return
p = Path()
p.move_to(points[0].x(), points[0].y())
for point in points[1:]:
p.line_to(point.x(), point.y())
p.close()
fill_rule = {self.OddEvenMode:'evenodd',
self.WindingMode:'winding'}.get(mode, 'evenodd')
self.pdf.draw_path(p, stroke=True, fill_rule=fill_rule,
fill=(mode in (self.OddEvenMode, self.WindingMode, self.ConvexMode)))
def __enter__(self):
self.pdf.save_stack()
self.saved_ps = (self.do_stroke, self.do_fill)
def __exit__(self, *args):
self.do_stroke, self.do_fill = self.saved_ps
self.pdf.restore_stack()
class PdfDevice(QPaintDevice): # {{{
def __init__(self, file_object, page_size=A4, left_margin=inch,
top_margin=inch, right_margin=inch, bottom_margin=inch):
QPaintDevice.__init__(self)
self.page_width, self.page_height = page_size
self.body_width = self.page_width - left_margin - right_margin
self.body_height = self.page_height - top_margin - bottom_margin
self.engine = PdfEngine(file_object, self.page_width, self.page_height,
left_margin, top_margin, right_margin,
bottom_margin, self.width(), self.height())
def paintEngine(self):
return self.engine
def metric(self, m):
if m in (self.PdmDpiX, self.PdmPhysicalDpiX):
return XDPI
if m in (self.PdmDpiY, self.PdmPhysicalDpiY):
return YDPI
if m == self.PdmDepth:
return 32
if m == self.PdmNumColors:
return sys.maxint
if m == self.PdmWidthMM:
return int(round(self.body_width * 0.35277777777778))
if m == self.PdmHeightMM:
return int(round(self.body_height * 0.35277777777778))
if m == self.PdmWidth:
return int(round(self.body_width * XDPI / 72.0))
if m == self.PdmHeight:
return int(round(self.body_height * YDPI / 72.0))
return 0
# }}}
if __name__ == '__main__':
from PyQt4.Qt import (QBrush, QColor, QPoint)
QBrush, QColor, QPoint
app = QApplication([])
p = QPainter()
with open('/tmp/painter.pdf', 'wb') as f:
dev = PdfDevice(f)
p.begin(dev)
xmax, ymax = p.viewport().width(), p.viewport().height()
try:
p.drawRect(0, 0, xmax, ymax)
p.drawPolyline(QPoint(0, 0), QPoint(xmax, 0), QPoint(xmax, ymax),
QPoint(0, ymax), QPoint(0, 0))
pp = QPainterPath()
pp.addRect(0, 0, xmax, ymax)
p.drawPath(pp)
p.save()
for i in xrange(3):
col = [0, 0, 0, 200]
col[i] = 255
p.setOpacity(0.3)
p.setBrush(QBrush(QColor(*col)))
p.drawRect(0, 0, xmax/10, xmax/10)
p.translate(xmax/10, xmax/10)
p.scale(1, 1.5)
p.restore()
p.save()
p.drawLine(0, 0, 5000, 0)
p.rotate(45)
p.drawLine(0, 0, 5000, 0)
p.restore()
f = p.font()
f.setPointSize(48)
f.setLetterSpacing(f.PercentageSpacing, 200)
# f.setUnderline(True)
# f.setOverline(True)
# f.setStrikeOut(True)
f.setFamily('Times New Roman')
p.setFont(f)
# p.scale(2, 2)
# p.rotate(45)
p.setPen(QColor(0, 0, 255))
p.drawText(QPoint(100, 300), 'Some text')
finally:
p.end()
if dev.engine.errors:
for err in dev.engine.errors: print (err)
raise SystemExit(1)

View File

@ -0,0 +1,35 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.pdf.render.common import (
Dictionary, Name)
STANDARD_FONTS = {
'Times-Roman', 'Helvetica', 'Courier', 'Symbol', 'Times-Bold',
'Helvetica-Bold', 'Courier-Bold', 'ZapfDingbats', 'Times-Italic',
'Helvetica-Oblique', 'Courier-Oblique', 'Times-BoldItalic',
'Helvetica-BoldOblique', 'Courier-BoldOblique', }
class FontManager(object):
def __init__(self, objects):
self.objects = objects
self.std_map = {}
def add_standard_font(self, name):
if name not in STANDARD_FONTS:
raise ValueError('%s is not a standard font'%name)
if name not in self.std_map:
self.std_map[name] = self.objects.add(Dictionary({
'Type':Name('Font'),
'Subtype':Name('Type1'),
'BaseFont':Name(name)
}))
return self.std_map[name]

View File

@ -0,0 +1,375 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import hashlib
from future_builtins import map
from itertools import izip
from collections import namedtuple
from calibre.constants import (__appname__, __version__)
from calibre.ebooks.pdf.render.common import (
Reference, EOL, serialize, Stream, Dictionary, String, Name, Array)
from calibre.ebooks.pdf.render.fonts import FontManager
PDFVER = b'%PDF-1.6'
Color = namedtuple('Color', 'red green blue opacity')
class IndirectObjects(object):
def __init__(self):
self._list = []
self._map = {}
self._offsets = []
def __len__(self):
return len(self._list)
def add(self, o):
self._list.append(o)
ref = Reference(len(self._list), o)
self._map[id(o)] = ref
self._offsets.append(None)
return ref
def commit(self, ref, stream):
self.write_obj(stream, ref.num, ref.obj)
def write_obj(self, stream, num, obj):
stream.write(EOL)
self._offsets[num-1] = stream.tell()
stream.write('%d 0 obj'%num)
stream.write(EOL)
serialize(obj, stream)
if stream.last_char != EOL:
stream.write(EOL)
stream.write('endobj')
stream.write(EOL)
def __getitem__(self, o):
try:
return self._map[id(self._list[o] if isinstance(o, int) else o)]
except (KeyError, IndexError):
raise KeyError('The object %r was not found'%o)
def pdf_serialize(self, stream):
for i, obj in enumerate(self._list):
offset = self._offsets[i]
if offset is None:
self.write_obj(stream, i+1, obj)
def write_xref(self, stream):
self.xref_offset = stream.tell()
stream.write(b'xref'+EOL)
stream.write('0 %d'%(1+len(self._offsets)))
stream.write(EOL)
stream.write('%010d 65535 f '%0)
stream.write(EOL)
for offset in self._offsets:
line = '%010d 00000 n '%offset
stream.write(line.encode('ascii') + EOL)
return self.xref_offset
class Page(Stream):
def __init__(self, parentref, *args, **kwargs):
super(Page, self).__init__(*args, **kwargs)
self.page_dict = Dictionary({
'Type': Name('Page'),
'Parent': parentref,
})
self.opacities = {}
self.fonts = {}
def set_opacity(self, opref):
if opref not in self.opacities:
self.opacities[opref] = 'Opa%d'%len(self.opacities)
name = self.opacities[opref]
serialize(Name(name), self)
self.write(b' gs ')
def add_font(self, fontref):
if fontref not in self.fonts:
self.fonts[fontref] = 'F%d'%len(self.fonts)
return self.fonts[fontref]
def add_resources(self):
r = Dictionary()
if self.opacities:
extgs = Dictionary()
for opref, name in self.opacities.iteritems():
extgs[name] = opref
r['ExtGState'] = extgs
if self.fonts:
fonts = Dictionary()
for ref, name in self.fonts.iteritems():
fonts[name] = ref
r['Font'] = fonts
if r:
self.page_dict['Resources'] = r
def end(self, objects, stream):
contents = objects.add(self)
objects.commit(contents, stream)
self.page_dict['Contents'] = contents
self.add_resources()
ret = objects.add(self.page_dict)
objects.commit(ret, stream)
return ret
class Path(object):
def __init__(self):
self.ops = []
def move_to(self, x, y):
self.ops.append((x, y, 'm'))
def line_to(self, x, y):
self.ops.append((x, y, 'l'))
def curve_to(self, x1, y1, x2, y2, x, y):
self.ops.append((x1, y1, x2, y2, x, y, 'c'))
def close(self):
self.ops.append(('h',))
class Text(object):
def __init__(self):
self.transform = self.default_transform = [1, 0, 0, 1, 0, 0]
self.font_name = 'Times-Roman'
self.font_path = None
self.horizontal_scale = self.default_horizontal_scale = 100
self.word_spacing = self.default_word_spacing = 0
self.char_space = self.default_char_space = 0
self.glyph_adjust = self.default_glyph_adjust = None
self.size = 12
self.text = ''
def set_transform(self, *args):
if len(args) == 1:
m = args[0]
vals = [m.m11(), m.m12(), m.m21(), m.m22(), m.dx(), m.dy()]
else:
vals = args
self.transform = vals
def pdf_serialize(self, stream, font_name):
if not self.text: return
stream.write_line('BT ')
serialize(Name(font_name), stream)
stream.write(' %g Tf '%self.size)
stream.write(' '.join(map(type(u''), self.transform)) + ' Tm ')
if self.horizontal_scale != self.default_horizontal_scale:
stream.write('%g Tz '%self.horizontal_scale)
if self.word_spacing != self.default_word_spacing:
stream.write('%g Tw '%self.word_spacing)
if self.char_space != self.default_char_space:
stream.write('%g Tc '%self.char_space)
stream.write_line()
if self.glyph_adjust is self.default_glyph_adjust:
serialize(String(self.text), stream)
stream.write(' Tj ')
else:
chars = Array()
frac, widths = self.glyph_adjust
for c, width in izip(self.text, widths):
chars.append(String(c))
chars.append(int(width * frac))
serialize(chars, stream)
stream.write(' TJ ')
stream.write_line('ET')
class Catalog(Dictionary):
def __init__(self, pagetree):
super(Catalog, self).__init__({'Type':Name('Catalog'),
'Pages': pagetree})
class PageTree(Dictionary):
def __init__(self, page_size):
super(PageTree, self).__init__({'Type':Name('Pages'),
'MediaBox':Array([0, 0, page_size[0], page_size[1]]),
'Kids':Array(), 'Count':0,
})
def add_page(self, pageref):
self['Kids'].append(pageref)
self['Count'] += 1
class HashingStream(object):
def __init__(self, f):
self.f = f
self.tell = f.tell
self.hashobj = hashlib.sha256()
self.last_char = b''
def write(self, raw):
raw = raw if isinstance(raw, bytes) else raw.encode('ascii')
self.f.write(raw)
self.hashobj.update(raw)
if raw:
self.last_char = raw[-1]
class PDFStream(object):
PATH_OPS = {
# stroke fill fill-rule
( False, False, 'winding') : 'n',
( False, False, 'evenodd') : 'n',
( False, True, 'winding') : 'f',
( False, True, 'evenodd') : 'f*',
( True, False, 'winding') : 'S',
( True, False, 'evenodd') : 'S',
( True, True, 'winding') : 'B',
( True, True, 'evenodd') : 'B*',
}
def __init__(self, stream, page_size, compress=False):
self.stream = HashingStream(stream)
self.compress = compress
self.write_line(PDFVER)
self.write_line(b'%íì¦"')
creator = ('%s %s [http://calibre-ebook.com]'%(__appname__,
__version__))
self.write_line('%% Created by %s'%creator)
self.objects = IndirectObjects()
self.objects.add(PageTree(page_size))
self.objects.add(Catalog(self.page_tree))
self.current_page = Page(self.page_tree, compress=self.compress)
self.info = Dictionary({'Creator':String(creator),
'Producer':String(creator)})
self.stroke_opacities, self.fill_opacities = {}, {}
self.font_manager = FontManager(self.objects)
@property
def page_tree(self):
return self.objects[0]
@property
def catalog(self):
return self.objects[1]
def write_line(self, byts=b''):
byts = byts if isinstance(byts, bytes) else byts.encode('ascii')
self.stream.write(byts + EOL)
def transform(self, *args):
if len(args) == 1:
m = args[0]
vals = [m.m11(), m.m12(), m.m21(), m.m22(), m.dx(), m.dy()]
else:
vals = args
cm = ' '.join(map(type(u''), vals))
self.current_page.write_line(cm + ' cm')
def set_rgb_colorspace(self):
self.current_page.write_line('/DeviceRGB CS /DeviceRGB cs')
def save_stack(self):
self.current_page.write_line('q')
def restore_stack(self):
self.current_page.write_line('Q')
def reset_stack(self):
self.current_page.write_line('Q q')
def draw_rect(self, x, y, width, height, stroke=True, fill=False):
self.current_page.write('%g %g %g %g re '%(x, y, width, height))
self.current_page.write_line(self.PATH_OPS[(stroke, fill, 'winding')])
def write_path(self, path):
for i, op in enumerate(path.ops):
if i != 0:
self.current_page.write_line()
for x in op:
self.current_page.write(type(u'')(x) + ' ')
def draw_path(self, path, stroke=True, fill=False, fill_rule='winding'):
if not path.ops: return
self.write_path(path)
self.current_page.write_line(self.PATH_OPS[(stroke, fill, fill_rule)])
def add_clip(self, path, fill_rule='winding'):
if not path.ops: return
self.write_path(path)
op = 'W' if fill_rule == 'winding' else 'W*'
self.current_page.write_line(op + ' ' + 'n')
def set_dash(self, array, phase=0):
array = Array(array)
serialize(array, self.current_page)
self.current_page.write(b' ')
serialize(phase, self.current_page)
self.current_page.write_line(' d')
def set_line_width(self, width):
serialize(width, self.current_page)
self.current_page.write_line(' w')
def set_line_cap(self, style):
serialize({'flat':0, 'round':1, 'square':2}.get(style),
self.current_page)
self.current_page.write_line(' J')
def set_line_join(self, style):
serialize({'miter':0, 'round':1, 'bevel':2}[style], self.current_page)
self.current_page.write_line(' j')
def set_stroke_color(self, color):
opacity = color.opacity
if opacity not in self.stroke_opacities:
op = Dictionary({'Type':Name('ExtGState'), 'CA': opacity})
self.stroke_opacities[opacity] = self.objects.add(op)
self.current_page.set_opacity(self.stroke_opacities[opacity])
self.current_page.write_line(' '.join(map(type(u''), color[:3])) + ' SC')
def set_fill_color(self, color):
opacity = color.opacity
if opacity not in self.fill_opacities:
op = Dictionary({'Type':Name('ExtGState'), 'ca': opacity})
self.fill_opacities[opacity] = self.objects.add(op)
self.current_page.set_opacity(self.fill_opacities[opacity])
self.current_page.write_line(' '.join(map(type(u''), color[:3])) + ' sc')
def end_page(self):
pageref = self.current_page.end(self.objects, self.stream)
self.page_tree.obj.add_page(pageref)
self.current_page = Page(self.page_tree, compress=self.compress)
def draw_text(self, text_object):
if text_object.font_path is None:
fontref = self.font_manager.add_standard_font(text_object.font_name)
else:
raise NotImplementedError()
name = self.current_page.add_font(fontref)
text_object.pdf_serialize(self.current_page, name)
def end(self):
if self.current_page.getvalue():
self.end_page()
inforef = self.objects.add(self.info)
self.objects.pdf_serialize(self.stream)
self.write_line()
startxref = self.objects.write_xref(self.stream)
file_id = String(self.stream.hashobj.hexdigest().decode('ascii'))
self.write_line('trailer')
trailer = Dictionary({'Root':self.catalog, 'Size':len(self.objects)+1,
'ID':Array([file_id, file_id]), 'Info':inforef})
serialize(trailer, self.stream)
self.write_line('startxref')
self.write_line('%d'%startxref)
self.stream.write('%%EOF')

View File

@ -1034,7 +1034,9 @@ def build_forms(srcdir, info=None):
dat = dat.replace('from widgets import', 'from calibre.gui2.widgets import') dat = dat.replace('from widgets import', 'from calibre.gui2.widgets import')
dat = dat.replace('from convert.xpath_wizard import', dat = dat.replace('from convert.xpath_wizard import',
'from calibre.gui2.convert.xpath_wizard import') 'from calibre.gui2.convert.xpath_wizard import')
dat = re.compile(r'QtGui.QApplication.translate\(.+?,\s+"(.+?)(?<!\\)",.+?\)', re.DOTALL).sub(r'_("\1")', dat) dat = re.sub(r'^ {4}def _translate\(context, text, disambig\):\s+return.*$', ' pass', dat,
flags=re.M)
dat = re.compile(r'(?:QtGui.QApplication.translate|(?<!def )_translate)\(.+?,\s+"(.+?)(?<!\\)",.+?\)', re.DOTALL).sub(r'_("\1")', dat)
dat = dat.replace('_("MMM yyyy")', '"MMM yyyy"') dat = dat.replace('_("MMM yyyy")', '"MMM yyyy"')
dat = pat.sub(sub, dat) dat = pat.sub(sub, dat)
dat = dat.replace('from QtWebKit.QWebView import QWebView', dat = dat.replace('from QtWebKit.QWebView import QWebView',

View File

@ -8,7 +8,7 @@ __docformat__ = 'restructuredtext en'
from functools import partial from functools import partial
from collections import Counter from collections import Counter
from PyQt4.Qt import QObject, QTimer from PyQt4.Qt import QObject, QTimer, QModelIndex
from calibre.gui2 import error_dialog, question_dialog from calibre.gui2 import error_dialog, question_dialog
from calibre.gui2.dialogs.delete_matching_from_device import DeleteMatchingFromDeviceDialog from calibre.gui2.dialogs.delete_matching_from_device import DeleteMatchingFromDeviceDialog
@ -285,6 +285,8 @@ class DeleteAction(InterfaceAction):
# Current row is after the last row, set it to the last row # Current row is after the last row, set it to the last row
current_row = view.row_count() - 1 current_row = view.row_count() - 1
view.set_current_row(current_row) view.set_current_row(current_row)
if view.model().rowCount(QModelIndex()) < 1:
self.gui.book_details.reset_info()
def library_ids_deleted2(self, ids_deleted, next_id=None): def library_ids_deleted2(self, ids_deleted, next_id=None):
view = self.gui.library_view view = self.gui.library_view

View File

@ -985,6 +985,12 @@ class DeviceMixin(object): # {{{
return return
except: except:
pass pass
if getattr(job, 'exception', None).__class__.__name__ == 'MTPInvalidSendPathError':
try:
from calibre.gui2.device_drivers.mtp_config import SendError
return SendError(self, job.exception).exec_()
except:
traceback.print_exc()
try: try:
prints(job.details, file=sys.stderr) prints(job.details, file=sys.stderr)
except: except:

View File

@ -13,13 +13,13 @@ from PyQt4.Qt import (QWidget, QListWidgetItem, Qt, QToolButton, QLabel,
QTabWidget, QGridLayout, QListWidget, QIcon, QLineEdit, QVBoxLayout, QTabWidget, QGridLayout, QListWidget, QIcon, QLineEdit, QVBoxLayout,
QPushButton, QGroupBox, QScrollArea, QHBoxLayout, QComboBox, QPushButton, QGroupBox, QScrollArea, QHBoxLayout, QComboBox,
pyqtSignal, QSizePolicy, QDialog, QDialogButtonBox, QPlainTextEdit, pyqtSignal, QSizePolicy, QDialog, QDialogButtonBox, QPlainTextEdit,
QApplication) QApplication, QSize)
from calibre.ebooks import BOOK_EXTENSIONS from calibre.ebooks import BOOK_EXTENSIONS
from calibre.gui2 import error_dialog from calibre.gui2 import error_dialog
from calibre.gui2.dialogs.template_dialog import TemplateDialog from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.utils.date import parse_date from calibre.utils.date import parse_date
from calibre.gui2.device_drivers.mtp_folder_browser import Browser from calibre.gui2.device_drivers.mtp_folder_browser import Browser, TopLevel
class FormatsConfig(QWidget): # {{{ class FormatsConfig(QWidget): # {{{
@ -328,7 +328,7 @@ class FormatRules(QGroupBox):
class MTPConfig(QTabWidget): class MTPConfig(QTabWidget):
def __init__(self, device, parent=None): def __init__(self, device, parent=None, highlight_ignored_folders=False):
QTabWidget.__init__(self, parent) QTabWidget.__init__(self, parent)
self._device = weakref.ref(device) self._device = weakref.ref(device)
@ -373,23 +373,33 @@ class MTPConfig(QTabWidget):
_('&Ignore the %s in calibre')%device.current_friendly_name, _('&Ignore the %s in calibre')%device.current_friendly_name,
self.base) self.base)
b.clicked.connect(self.ignore_device) b.clicked.connect(self.ignore_device)
self.config_ign_folders_button = cif = QPushButton(
QIcon(I('tb_folder.png')), _('Change scanned &folders'))
cif.setStyleSheet(
'QPushButton { font-weight: bold; }')
if highlight_ignored_folders:
cif.setIconSize(QSize(64, 64))
self.show_debug_button = bd = QPushButton(QIcon(I('debug.png')), self.show_debug_button = bd = QPushButton(QIcon(I('debug.png')),
_('Show device information')) _('Show device information'))
bd.clicked.connect(self.show_debug_info) bd.clicked.connect(self.show_debug_info)
cif.clicked.connect(self.change_ignored_folders)
l.addWidget(b, 0, 0, 1, 2) l.addWidget(b, 0, 0, 1, 2)
l.addWidget(la, 1, 0, 1, 1) l.addWidget(la, 1, 0, 1, 1)
l.addWidget(self.formats, 2, 0, 4, 1) l.addWidget(self.formats, 2, 0, 5, 1)
l.addWidget(self.send_to, 2, 1, 1, 1) l.addWidget(cif, 2, 1, 1, 1)
l.addWidget(self.template, 3, 1, 1, 1) l.addWidget(self.template, 3, 1, 1, 1)
l.addWidget(self.show_debug_button, 4, 1, 1, 1) l.addWidget(self.send_to, 4, 1, 1, 1)
l.setRowStretch(5, 10) l.addWidget(self.show_debug_button, 5, 1, 1, 1)
l.addWidget(r, 6, 0, 1, 2) l.setRowStretch(6, 10)
l.setRowStretch(6, 100) l.addWidget(r, 7, 0, 1, 2)
l.setRowStretch(7, 100)
self.igntab = IgnoredDevices(self.device.prefs['history'], self.igntab = IgnoredDevices(self.device.prefs['history'],
self.device.prefs['blacklist']) self.device.prefs['blacklist'])
self.addTab(self.igntab, _('Ignored devices')) self.addTab(self.igntab, _('Ignored devices'))
self.current_ignored_folders = self.get_pref('ignored_folders')
self.initial_ignored_folders = self.current_ignored_folders
self.setCurrentIndex(1 if msg else 0) self.setCurrentIndex(1 if msg else 0)
@ -413,6 +423,12 @@ class MTPConfig(QTabWidget):
QApplication.clipboard().setText(v.toPlainText())) QApplication.clipboard().setText(v.toPlainText()))
d.exec_() d.exec_()
def change_ignored_folders(self):
d = TopLevel(self.device,
self.current_ignored_folders, parent=self)
if d.exec_() == d.Accepted:
self.current_ignored_folders = d.ignored_folders
def ignore_device(self): def ignore_device(self):
self.igntab.ignore_device(self.device.current_serial_num) self.igntab.ignore_device(self.device.current_serial_num)
self.base.b.setEnabled(False) self.base.b.setEnabled(False)
@ -464,8 +480,42 @@ class MTPConfig(QTabWidget):
if r and r != self.device.prefs['rules']: if r and r != self.device.prefs['rules']:
p['rules'] = r p['rules'] = r
if self.current_ignored_folders != self.initial_ignored_folders:
p['ignored_folders'] = self.current_ignored_folders
self.device.prefs[self.current_device_key] = p self.device.prefs[self.current_device_key] = p
class SendError(QDialog):
def __init__(self, gui, error):
QDialog.__init__(self, gui)
self.l = l = QVBoxLayout()
self.setLayout(l)
self.la = la = QLabel('<p>'+
_('You are trying to send books into the <b>%s</b> folder. This '
'folder is currently ignored by calibre when scanning the '
'device. You have tell calibre you want this folder scanned '
'in order to be able to send books to it. Click the '
'<b>configure</b> button below to send books to it.')%error.folder)
la.setWordWrap(True)
la.setMinimumWidth(500)
l.addWidget(la)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Close)
self.b = bb.addButton(_('Configure'), bb.AcceptRole)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
l.addWidget(bb)
self.setWindowTitle(_('Cannot send to %s')%error.folder)
self.setWindowIcon(QIcon(I('dialog_error.png')))
self.resize(self.sizeHint())
def accept(self):
QDialog.accept(self)
dev = self.parent().device_manager.connected_device
dev.highlight_ignored_folders = True
self.parent().configure_connected_device()
dev.highlight_ignored_folders = False
if __name__ == '__main__': if __name__ == '__main__':
from calibre.gui2 import Application from calibre.gui2 import Application

View File

@ -10,7 +10,8 @@ __docformat__ = 'restructuredtext en'
from operator import attrgetter from operator import attrgetter
from PyQt4.Qt import (QTabWidget, QTreeWidget, QTreeWidgetItem, Qt, QDialog, from PyQt4.Qt import (QTabWidget, QTreeWidget, QTreeWidgetItem, Qt, QDialog,
QDialogButtonBox, QVBoxLayout, QSize, pyqtSignal, QIcon) QDialogButtonBox, QVBoxLayout, QSize, pyqtSignal, QIcon, QLabel,
QListWidget, QListWidgetItem)
from calibre.gui2 import file_icon_provider from calibre.gui2 import file_icon_provider
@ -95,25 +96,105 @@ class Browser(QDialog):
def current_item(self): def current_item(self):
return self.folders.current_item return self.folders.current_item
def browse(): class TopLevel(QDialog):
from calibre.gui2 import Application
def __init__(self, dev, ignored_folders=None, parent=None):
QDialog.__init__(self, parent)
self.l = l = QVBoxLayout()
self.setLayout(l)
self.la = la = QLabel('<p>'+ _('<b>Scanned folders:</b>') + ' ' +
_('You can select which top level folders calibre will '
'scan when searching this device for books.'))
la.setWordWrap(True)
l.addWidget(la)
self.tabs = QTabWidget(self)
l.addWidget(self.tabs)
self.widgets = []
for storage in dev.filesystem_cache.entries:
w = QListWidget(self)
w.storage = storage
self.tabs.addTab(w, storage.name)
self.widgets.append(w)
for child in sorted(storage.folders, key=attrgetter('name')):
i = QListWidgetItem(child.name)
i.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
i.setCheckState(Qt.Unchecked if
dev.is_folder_ignored(storage, child.name,
ignored_folders=ignored_folders) else Qt.Checked)
w.addItem(i)
self.bb = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.bb.accepted.connect(self.accept)
self.bb.rejected.connect(self.reject)
self.sab = self.bb.addButton(_('Select &All'), self.bb.ActionRole)
self.sab.clicked.connect(self.select_all)
self.snb = self.bb.addButton(_('Select &None'), self.bb.ActionRole)
self.snb.clicked.connect(self.select_none)
l.addWidget(self.bb)
self.setWindowTitle(_('Choose folders to scan'))
self.setWindowIcon(QIcon(I('devices/tablet.png')))
self.resize(500, 500)
def select_all(self):
w = self.tabs.currentWidget()
for i in xrange(w.count()):
x = w.item(i)
x.setCheckState(Qt.Checked)
def select_none(self):
w = self.tabs.currentWidget()
for i in xrange(w.count()):
x = w.item(i)
x.setCheckState(Qt.Unchecked)
@property
def ignored_folders(self):
ans = {}
for w in self.widgets:
ans[unicode(w.storage.object_id)] = folders = []
for i in xrange(w.count()):
x = w.item(i)
if x.checkState() != Qt.Checked:
folders.append(unicode(x.text()))
return ans
def setup_device():
from calibre.devices.mtp.driver import MTP_DEVICE from calibre.devices.mtp.driver import MTP_DEVICE
from calibre.devices.scanner import DeviceScanner from calibre.devices.scanner import DeviceScanner
s = DeviceScanner() s = DeviceScanner()
s.scan() s.scan()
app = Application([])
app
dev = MTP_DEVICE(None) dev = MTP_DEVICE(None)
dev.startup() dev.startup()
cd = dev.detect_managed_devices(s.devices) cd = dev.detect_managed_devices(s.devices)
if cd is None: if cd is None:
raise ValueError('No MTP device found') raise ValueError('No MTP device found')
dev.open(cd, 'test') dev.open(cd, 'test')
return dev
def browse():
from calibre.gui2 import Application
app = Application([])
app
dev = setup_device()
d = Browser(dev.filesystem_cache) d = Browser(dev.filesystem_cache)
d.exec_() d.exec_()
dev.shutdown() dev.shutdown()
return d.current_item return d.current_item
if __name__ == '__main__': def top_level():
print (browse()) from calibre.gui2 import Application
app = Application([])
app
dev = setup_device()
d = TopLevel(dev, None)
d.exec_()
dev.shutdown()
return d.ignored_folders
if __name__ == '__main__':
# print (browse())
print ('Ignored:', top_level())

View File

@ -273,7 +273,10 @@ class EmailMixin(object): # {{{
5000) 5000)
if remove: if remove:
try: try:
next_id = self.library_view.next_id
self.library_view.model().delete_books_by_id(remove) self.library_view.model().delete_books_by_id(remove)
self.iactions['Remove Books'].library_ids_deleted2(remove,
next_id=next_id)
except: except:
import traceback import traceback
# Probably the user deleted the files, in any case, failing # Probably the user deleted the files, in any case, failing

View File

@ -17,10 +17,12 @@ from Queue import Queue, Empty
from io import BytesIO from io import BytesIO
from PyQt4.Qt import (QStyledItemDelegate, QTextDocument, QRectF, QIcon, Qt, from PyQt4.Qt import (QStyledItemDelegate, QTextDocument, QRectF, QIcon, Qt,
QApplication, QDialog, QVBoxLayout, QLabel, QDialogButtonBox, QStyle, QApplication, QDialog, QVBoxLayout, QLabel,
QStackedWidget, QWidget, QTableView, QGridLayout, QFontInfo, QPalette, QDialogButtonBox, QStyle, QStackedWidget, QWidget,
QTimer, pyqtSignal, QAbstractTableModel, QVariant, QSize, QListView, QTableView, QGridLayout, QFontInfo, QPalette, QTimer,
QPixmap, QAbstractListModel, QColor, QRect, QTextBrowser, QModelIndex) pyqtSignal, QAbstractTableModel, QVariant, QSize,
QListView, QPixmap, QAbstractListModel, QColor, QRect,
QTextBrowser, QStringListModel)
from PyQt4.QtWebKit import QWebView from PyQt4.QtWebKit import QWebView
from calibre.customize.ui import metadata_plugins from calibre.customize.ui import metadata_plugins
@ -44,6 +46,8 @@ class RichTextDelegate(QStyledItemDelegate): # {{{
def __init__(self, parent=None, max_width=160): def __init__(self, parent=None, max_width=160):
QStyledItemDelegate.__init__(self, parent) QStyledItemDelegate.__init__(self, parent)
self.max_width = max_width self.max_width = max_width
self.dummy_model = QStringListModel([' '], self)
self.dummy_index = self.dummy_model.index(0)
def to_doc(self, index, option=None): def to_doc(self, index, option=None):
doc = QTextDocument() doc = QTextDocument()
@ -66,7 +70,7 @@ class RichTextDelegate(QStyledItemDelegate): # {{{
return ans return ans
def paint(self, painter, option, index): def paint(self, painter, option, index):
QStyledItemDelegate.paint(self, painter, option, QModelIndex()) QStyledItemDelegate.paint(self, painter, option, self.dummy_index)
painter.save() painter.save()
painter.setClipRect(QRectF(option.rect)) painter.setClipRect(QRectF(option.rect))
painter.translate(option.rect.topLeft()) painter.translate(option.rect.topLeft())

View File

@ -158,36 +158,6 @@ Author matching is exact.</string>
<string>&amp;Automatic Adding</string> <string>&amp;Automatic Adding</string>
</attribute> </attribute>
<layout class="QGridLayout" name="gridLayout_3"> <layout class="QGridLayout" name="gridLayout_3">
<item row="3" column="0" colspan="2">
<widget class="QCheckBox" name="opt_auto_add_check_for_duplicates">
<property name="toolTip">
<string>If set, this option will causes calibre to check if a file
being auto-added is already in the calibre library.
If it is, a message will pop up asking you whether
you want to add it anyway.</string>
</property>
<property name="text">
<string>Check for &amp;duplicates when auto-adding files</string>
</property>
</widget>
</item>
<item row="0" column="0" colspan="2">
<widget class="QLabel" name="label">
<property name="text">
<string>Specify a folder. Any files you put into this folder will be automatically added to calibre (restart required).</string>
</property>
<property name="wordWrap">
<bool>true</bool>
</property>
</widget>
</item>
<item row="2" column="0" colspan="2">
<widget class="QLabel" name="label_2">
<property name="text">
<string>&lt;b&gt;WARNING:&lt;/b&gt; Files in the above folder will be deleted after being added to calibre.</string>
</property>
</widget>
</item>
<item row="5" column="0"> <item row="5" column="0">
<widget class="QGroupBox" name="groupBox"> <widget class="QGroupBox" name="groupBox">
<layout class="QVBoxLayout" name="verticalLayout_2"> <layout class="QVBoxLayout" name="verticalLayout_2">
@ -214,6 +184,36 @@ Author matching is exact.</string>
</layout> </layout>
</widget> </widget>
</item> </item>
<item row="0" column="0" colspan="2">
<widget class="QLabel" name="label">
<property name="text">
<string>Specify a folder. Any files you put into this folder will be automatically added to calibre (restart required).</string>
</property>
<property name="wordWrap">
<bool>true</bool>
</property>
</widget>
</item>
<item row="2" column="0" colspan="2">
<widget class="QLabel" name="label_2">
<property name="text">
<string>&lt;b&gt;WARNING:&lt;/b&gt; Files in the above folder will be deleted after being added to calibre.</string>
</property>
</widget>
</item>
<item row="3" column="0" colspan="2">
<widget class="QCheckBox" name="opt_auto_add_check_for_duplicates">
<property name="toolTip">
<string>If set, this option will causes calibre to check if a file
being auto-added is already in the calibre library.
If it is, a message will pop up asking you whether
you want to add it anyway.</string>
</property>
<property name="text">
<string>Check for &amp;duplicates when auto-adding files</string>
</property>
</widget>
</item>
<item row="5" column="1"> <item row="5" column="1">
<spacer name="horizontalSpacer_2"> <spacer name="horizontalSpacer_2">
<property name="orientation"> <property name="orientation">
@ -259,6 +259,19 @@ Author matching is exact.</string>
</property> </property>
</widget> </widget>
</item> </item>
<item row="6" column="0">
<spacer name="verticalSpacer_2">
<property name="orientation">
<enum>Qt::Vertical</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>20</width>
<height>40</height>
</size>
</property>
</spacer>
</item>
</layout> </layout>
</widget> </widget>
</widget> </widget>

View File

@ -60,9 +60,9 @@ class PublioStore(BasicStoreConfig, StorePlugin):
series = ''.join(data.xpath('./div[@class="desc"]/div[@class="detailShortList"]/div[last()]/a/@title')) series = ''.join(data.xpath('./div[@class="desc"]/div[@class="detailShortList"]/div[last()]/a/@title'))
title = title + ' (seria ' + series + ')' title = title + ' (seria ' + series + ')'
author = ', '.join(data.xpath('./div[@class="desc"]/div[@class="detailShortList"]/div[@class="row"][1]/a/@title')) author = ', '.join(data.xpath('./div[@class="desc"]/div[@class="detailShortList"]/div[@class="row"][1]/a/@title'))
price = ''.join(data.xpath('.//div[@class="priceBoxContener "]/div/ins/text()')) price = ''.join(data.xpath('.//div[@class="priceBox tk-museo-slab"]/ins/text()'))
if not price: if not price:
price = ''.join(data.xpath('.//div[@class="priceBoxContener "]/div/text()')) price = ''.join(data.xpath('.//div[@class="priceBox tk-museo-slab"]/text()')).strip()
formats = ', '.join(data.xpath('.//div[@class="formats"]/a/img/@alt')) formats = ', '.join(data.xpath('.//div[@class="formats"]/a/img/@alt'))
counter -= 1 counter -= 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More