This commit is contained in:
GRiker 2011-12-11 13:45:08 -07:00
commit a608dd5833
133 changed files with 23428 additions and 22225 deletions

View File

@ -19,6 +19,66 @@
# new recipes: # new recipes:
# - title: # - title:
- version: 0.8.30
date: 2011-12-09
new features:
- title: "Get Books: Add amazon.es and amazon.it"
- title: "Bulk convert dialog: Disable the Use saved conversion settings checkbox when none of the books being converted has saved conversion settings"
- title: "ebook-viewer: And a command line switch to specify the position at which the file should be opened."
tickets: [899325]
- title: "Distribute calibre source code compressed with xz instead of gzip for a 40% reduction in size"
bug fixes:
- title: "Get Books: Fix ebooks.com and amazon.fr. Fix cover display in Diesel ebooks store."
- title: "HTML Input: Fix regression that broke processing of a small fraction of HTML files encoded in a multi-byte character encoding."
tickets: [899691]
- title: "Greatly reduce the delay at the end of a bulk metadata edit operation that operates on a very large number (thousands) of books"
- title: "Template language: Fix the subitems formatter function to split only when the period is surrounded by non-white space and not another period"
- title: "Fix ampersands in titles not displaying in the Cover Browser"
- title: "MOBI Output: Do not ignore an empty anchor at the end of a block element."
- title: "MOBI Output: Handle links to inline anchors placed inside large blocks of text correctly, i.e. the link should not point to the start of the block."
tickets: [899831]
- title: "E-book viewer: Fix searching for text that is represented as entities in the underlying HTML."
tickets: [899573]
- title: "Have the Esc shortcut perform exactly the same set of actions as clicking the clear button."
tickets: [900048]
- title: "Prevent the adding books dialog from becoming too wide"
- title: "Fix custom column editing not behaving correctly with the Previous button in the edit metadata dialog."
tickets: [899836]
- title: "T1 driver. More fixes to datetime handling to try to convince the T1's buggy firmware to not rescan metadata."
tickets: [899514]
- title: "Only allow searching via non accented author names if the user interface language in calibre is set to English."
tickets: [899227]
improved recipes:
- Die Zeit subscription
- Metro UK
- suedeutsche.de
new recipes:
- title: Blues News
author: Oskar Kunicki
- title: "TVXS"
author: Hargikas
- version: 0.8.29 - version: 0.8.29
date: 2011-12-02 date: 2011-12-02

26
recipes/blues.recipe Normal file
View File

@ -0,0 +1,26 @@
__license__ = 'GPL v3'
__copyright__ = '2011, Oskar Kunicki <rakso at interia.pl>'
'''
Changelog:
2011-11-27
News from BluesRSS.info
'''
from calibre.web.feeds.news import BasicNewsRecipe
class BluesRSS(BasicNewsRecipe):
title = 'Blues News'
__author__ = 'Oskar Kunicki'
description ='Blues news from around the world'
publisher = 'BluesRSS.info'
category = 'news, blues, USA,UK'
oldest_article = 5
max_articles_per_feed = 100
language = 'en'
cover_url = 'http://bluesrss.info/cover.jpg'
masthead_url = 'http://bluesrss.info/cover.jpg'
no_stylesheets = True
remove_tags = [dict(name='div', attrs={'class':'wp-pagenavi'})]
feeds = [(u'News', u'http://bluesrss.info/feed/')]

View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
'''
descopera.org
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Descopera(BasicNewsRecipe):
title = u'Descoperă.org'
__author__ = 'Marius Ignătescu'
description = 'Descoperă. Placerea de a cunoaște'
publisher = 'descopera.org'
category = 'science, technology, culture, history, earth'
language = 'ro'
oldest_article = 14
max_articles_per_feed = 100
encoding = 'utf8'
no_stylesheets = True
extra_css = ' body{ font-family: Verdana,Helvetica,Arial,sans-serif } .introduction{font-weight: bold} .story-feature{display: block; padding: 0; border: 1px solid; width: 40%; font-size: small} .story-feature h2{text-align: center; text-transform: uppercase} '
keep_only_tags = [dict(name='div', attrs={'class':['post']})]
remove_tags = [dict(name='div', attrs={'class':['topnav', 'box_a', 'shr-bookmarks shr-bookmarks-expand shr-bookmarks-center shr-bookmarks-bg-knowledge']})]
remove_attributes = ['width','height']
cover_url = 'http://www.descopera.org/wp-content/themes/dorg/styles/default/img/b_top.png?width=400'
feeds = [(u'Articles', u'http://www.descopera.org/feed/')]
def preprocess_html(self, soup):
return self.adeify_images(soup)

BIN
recipes/icons/blues.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 910 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.3 KiB

BIN
recipes/icons/zaman.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 999 B

View File

@ -15,13 +15,13 @@ try:
SHOWDEBUG1 = mlog.showdebuglevel(1) SHOWDEBUG1 = mlog.showdebuglevel(1)
SHOWDEBUG2 = mlog.showdebuglevel(2) SHOWDEBUG2 = mlog.showdebuglevel(2)
except: except:
print 'drMerry debuglogger not found, skipping debug options' #print 'drMerry debuglogger not found, skipping debug options'
SHOWDEBUG0 = False SHOWDEBUG0 = False
SHOWDEBUG1 = False SHOWDEBUG1 = False
SHOWDEBUG2 = False SHOWDEBUG2 = False
KEEPSTATS = False KEEPSTATS = False
print ('level0: %s\nlevel1: %s\nlevel2: %s' % (SHOWDEBUG0,SHOWDEBUG1,SHOWDEBUG2)) #print ('level0: %s\nlevel1: %s\nlevel2: %s' % (SHOWDEBUG0,SHOWDEBUG1,SHOWDEBUG2))
''' Version 1.2, updated cover image to match the changed website. ''' Version 1.2, updated cover image to match the changed website.
added info date on title added info date on title

View File

@ -5,8 +5,8 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
description = 'News as provide by The Metro -UK' description = 'News as provide by The Metro -UK'
__author__ = 'Dave Asbury' __author__ = 'Dave Asbury'
#last update 3/12/11
cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg' cover_url = 'http://profile.ak.fbcdn.net/hprofile-ak-snc4/276636_117118184990145_2132092232_n.jpg'
no_stylesheets = True no_stylesheets = True
oldest_article = 1 oldest_article = 1
max_articles_per_feed = 20 max_articles_per_feed = 20
@ -32,9 +32,11 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
dict(name='div', attrs={'class':'art-lft'}), dict(name='div', attrs={'class':'art-lft'}),
dict(name='p') dict(name='p')
] ]
remove_tags = [dict(name='div', attrs={'class':[ 'news m12 clrd clr-b p5t shareBtm', 'commentForm', 'metroCommentInnerWrap', remove_tags = [
'art-rgt','pluck-app pluck-comm','news m12 clrd clr-l p5t', 'flt-r' ]}), dict(name = 'div',attrs={'id' : ['comments-news','formSubmission']}),
dict(attrs={'class':[ 'metroCommentFormWrap','commentText','commentsNav','avatar','submDateAndTime']}) dict(name='div', attrs={'class':[ 'news m12 clrd clr-b p5t shareBtm', 'commentForm', 'metroCommentInnerWrap',
'art-rgt','pluck-app pluck-comm','news m12 clrd clr-l p5t', 'flt-r','username','clrd' ]}),
dict(attrs={'class':['username', 'metroCommentFormWrap','commentText','commentsNav','avatar','submDateAndTime','addYourComment','displayName']})
,dict(name='div', attrs={'class' : 'clrd art-fd fd-gr1-b'}) ,dict(name='div', attrs={'class' : 'clrd art-fd fd-gr1-b'})
] ]
feeds = [ feeds = [

View File

@ -0,0 +1,21 @@
from calibre.web.feeds.news import BasicNewsRecipe
class rynekzdrowia(BasicNewsRecipe):
title = u'Rynek Zdrowia'
__author__ = u'spi630'
language = 'pl'
masthead_url = 'http://k.rynekzdrowia.pl/images/headerLogo.png'
cover_url = 'http://k.rynekzdrowia.pl/images/headerLogo.png'
oldest_article = 3
max_articles_per_feed = 25
no_stylesheets = True
auto_cleanup = True
remove_empty_feeds=True
remove_tags_before = dict(name='h3')
feeds = [(u'Finanse i Zarz\u0105dzanie', u'http://www.rynekzdrowia.pl/Kanal/finanse.html'), (u'Inwestycje', u'http://www.rynekzdrowia.pl/Kanal/inwestycje.html'), (u'Aparatura i wyposa\u017cenie', u'http://www.rynekzdrowia.pl/Kanal/aparatura.html'), (u'Informatyka', u'http://www.rynekzdrowia.pl/Kanal/informatyka.html'), (u'Prawo', u'http://www.rynekzdrowia.pl/Kanal/prawo.html'), (u'Polityka zdrowotna', u'http://www.rynekzdrowia.pl/Kanal/polityka_zdrowotna.html'), (u'Ubezpieczenia Zdrowotne', u'http://www.rynekzdrowia.pl/Kanal/ubezpieczenia.html'), (u'Farmacja', u'http://www.rynekzdrowia.pl/Kanal/farmacja.html'), (u'Badania i rozw\xf3j', u'http://www.rynekzdrowia.pl/Kanal/badania.html'), (u'Nauka', u'http://www.rynekzdrowia.pl/Kanal/nauka.html'), (u'Po godzinach', u'http://www.rynekzdrowia.pl/Kanal/godziny.html'), (u'Us\u0142ugi medyczne', u'http://www.rynekzdrowia.pl/Kanal/uslugi.html')]
def print_version(self, url):
url = url.replace('.html', ',drukuj.html')
return url

View File

@ -12,7 +12,7 @@ class Sueddeutsche(BasicNewsRecipe):
title = u'sueddeutsche.de' title = u'sueddeutsche.de'
description = 'News from Germany' description = 'News from Germany'
__author__ = 'Oliver Niesner and Armin Geller' __author__ = 'Oliver Niesner and Armin Geller' #AGe 2011-11-25
use_embedded_content = False use_embedded_content = False
timefmt = ' [%d %b %Y]' timefmt = ' [%d %b %Y]'
oldest_article = 7 oldest_article = 7
@ -22,7 +22,7 @@ class Sueddeutsche(BasicNewsRecipe):
encoding = 'utf-8' encoding = 'utf-8'
remove_javascript = True remove_javascript = True
cover_url = 'http://polpix.sueddeutsche.com/polopoly_fs/1.1219199.1322239289!/image/image.jpg_gen/derivatives/860x860/image.jpg' # 2011-11-25 AGe
remove_tags = [ dict(name='link'), dict(name='iframe'), remove_tags = [ dict(name='link'), dict(name='iframe'),
dict(name='div', attrs={'id':["bookmarking","themenbox","artikelfoot","CAD_AD", dict(name='div', attrs={'id':["bookmarking","themenbox","artikelfoot","CAD_AD",
@ -47,7 +47,7 @@ class Sueddeutsche(BasicNewsRecipe):
extra_css = ''' extra_css = '''
h2{font-family:Arial,Helvetica,sans-serif; font-size: x-small; color: #003399;} h2{font-family:Arial,Helvetica,sans-serif; font-size: x-small; color: #003399;}
a{font-family:Arial,Helvetica,sans-serif; font-size: x-small; font-style:italic;} a{font-family:Arial,Helvetica,sans-serif; font-style:italic;}
.dachzeile p{font-family:Arial,Helvetica,sans-serif; font-size: x-small; } .dachzeile p{font-family:Arial,Helvetica,sans-serif; font-size: x-small; }
h1{ font-family:Arial,Helvetica,sans-serif; font-size:x-large; font-weight:bold;} h1{ font-family:Arial,Helvetica,sans-serif; font-size:x-large; font-weight:bold;}
.artikelTeaser{font-family:Arial,Helvetica,sans-serif; font-size: x-small; font-weight:bold; } .artikelTeaser{font-family:Arial,Helvetica,sans-serif; font-size: x-small; font-weight:bold; }

61
recipes/tvxs.recipe Normal file
View File

@ -0,0 +1,61 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from calibre.web.feeds.recipes import BasicNewsRecipe
class TVXS(BasicNewsRecipe):
title = 'TVXS'
__author__ = 'hargikas'
description = 'News from Greece'
max_articles_per_feed = 100
oldest_article = 3
simultaneous_downloads = 1
publisher = 'TVXS'
category = 'news, GR'
language = 'el'
encoding = None
use_embedded_content = False
remove_empty_feeds = True
#conversion_options = { 'linearize_tables': True}
no_stylesheets = True
remove_tags_before = dict(name='h1',attrs={'class':'print-title'})
remove_tags_after = dict(name='div',attrs={'class':'field field-type-relevant-content field-field-relevant-articles'})
remove_attributes = ['width', 'src', 'header', 'footer']
feeds = [(u'Ελλάδα', 'http://tvxs.gr/feeds/2/feed.xml'),
(u'Κόσμος', 'http://tvxs.gr/feeds/5/feed.xml'),
(u'Τοπικά Νέα', 'http://tvxs.gr/feeds/5363/feed.xml'),
(u'Sci Tech', 'http://tvxs.gr/feeds/26/feed.xml'),
(u'Αθλητικά', 'http://tvxs.gr/feeds/243/feed.xml'),
(u'Internet & ΜΜΕ', 'http://tvxs.gr/feeds/32/feed.xml'),
(u'Καλά Νέα', 'http://tvxs.gr/feeds/914/feed.xml'),
(u'Απόψεις', 'http://tvxs.gr/feeds/1109/feed.xml'),
(u'Πολιτισμός', 'http://tvxs.gr/feeds/1317/feed.xml'),
(u'Greenlife', 'http://tvxs.gr/feeds/3/feed.xml'),
(u'Ιστορία', 'http://tvxs.gr/feeds/1573/feed.xml'),
(u'Χιούμορ', 'http://tvxs.gr/feeds/692/feed.xml')]
def print_version(self, url):
import urllib2, urlparse, StringIO, gzip
fp = urllib2.urlopen(url)
data = fp.read()
if fp.info()['content-encoding'] == 'gzip':
gzip_data = StringIO.StringIO(data)
gzipper = gzip.GzipFile(fileobj=gzip_data)
data = gzipper.read()
fp.close()
pos_1 = data.find('<a href="/print/')
if pos_1 == -1:
return url
pos_2 = data.find('">', pos_1)
if pos_2 == -1:
return url
pos_1 += len('<a href="')
new_url = data[pos_1:pos_2]
print_url = urlparse.urljoin(url, new_url)
return print_url

View File

@ -5,9 +5,10 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Zaman (BasicNewsRecipe): class Zaman (BasicNewsRecipe):
title = u'ZAMAN Gazetesi' title = u'ZAMAN Gazetesi'
description = ' Zaman Gazetesi''nin internet sitesinden günlük haberler'
__author__ = u'thomass' __author__ = u'thomass'
oldest_article = 2 oldest_article = 2
max_articles_per_feed =100 max_articles_per_feed =50
# no_stylesheets = True # no_stylesheets = True
#delay = 1 #delay = 1
#use_embedded_content = False #use_embedded_content = False
@ -16,19 +17,19 @@ class Zaman (BasicNewsRecipe):
category = 'news, haberler,TR,gazete' category = 'news, haberler,TR,gazete'
language = 'tr' language = 'tr'
publication_type = 'newspaper ' publication_type = 'newspaper '
extra_css = ' body{ font-family: Verdana,Helvetica,Arial,sans-serif } .introduction{font-weight: bold} .story-feature{display: block; padding: 0; border: 1px solid; width: 40%; font-size: small} .story-feature h2{text-align: center; text-transform: uppercase} ' extra_css = '.buyukbaslik{font-weight: bold; font-size: 18px;color:#0000FF}'#body{ font-family: Verdana,Helvetica,Arial,sans-serif } .introduction{font-weight: bold} .story-feature{display: block; padding: 0; border: 1px solid; width: 40%; font-size: small} .story-feature h2{text-align: center; text-transform: uppercase} '
conversion_options = { conversion_options = {
'tags' : category 'tags' : category
,'language' : language ,'language' : language
,'publisher' : publisher ,'publisher' : publisher
,'linearize_tables': False ,'linearize_tables': True
} }
cover_img_url = 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-snc4/188140_81722291869_2111820_n.jpg' cover_img_url = 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-snc4/188140_81722291869_2111820_n.jpg'
masthead_url = 'http://medya.zaman.com.tr/extentions/zaman.com.tr/img/section/logo-section.png' masthead_url = 'http://medya.zaman.com.tr/extentions/zaman.com.tr/img/section/logo-section.png'
keep_only_tags = [dict(name='div', attrs={'id':[ 'news-detail-content']}), dict(name='td', attrs={'class':['columnist-detail','columnist_head']}) ] #keep_only_tags = [dict(name='div', attrs={'id':[ 'news-detail-content']}), dict(name='td', attrs={'class':['columnist-detail','columnist_head']}) ]
remove_tags = [ dict(name='div', attrs={'id':['news-detail-news-text-font-size','news-detail-gallery','news-detail-news-bottom-social']}),dict(name='div', attrs={'class':['radioEmbedBg','radyoProgramAdi']}),dict(name='a', attrs={'class':['webkit-html-attribute-value webkit-html-external-link']}),dict(name='table', attrs={'id':['yaziYorumTablosu']}),dict(name='img', attrs={'src':['http://medya.zaman.com.tr/pics/paylas.gif','http://medya.zaman.com.tr/extentions/zaman.com.tr/img/columnist/ma-16.png']})] remove_tags = [ dict(name='img', attrs={'src':['http://medya.zaman.com.tr/zamantryeni/pics/zamanonline.gif']})]#,dict(name='div', attrs={'class':['radioEmbedBg','radyoProgramAdi']}),dict(name='a', attrs={'class':['webkit-html-attribute-value webkit-html-external-link']}),dict(name='table', attrs={'id':['yaziYorumTablosu']}),dict(name='img', attrs={'src':['http://medya.zaman.com.tr/pics/paylas.gif','http://medya.zaman.com.tr/extentions/zaman.com.tr/img/columnist/ma-16.png']})
#remove_attributes = ['width','height'] #remove_attributes = ['width','height']
@ -37,7 +38,8 @@ class Zaman (BasicNewsRecipe):
feeds = [ feeds = [
( u'Anasayfa', u'http://www.zaman.com.tr/anasayfa.rss'), ( u'Anasayfa', u'http://www.zaman.com.tr/anasayfa.rss'),
( u'Son Dakika', u'http://www.zaman.com.tr/sondakika.rss'), ( u'Son Dakika', u'http://www.zaman.com.tr/sondakika.rss'),
( u'En çok Okunanlar', u'http://www.zaman.com.tr/max_all.rss'), #( u'En çok Okunanlar', u'http://www.zaman.com.tr/max_all.rss'),
#( u'Manşet', u'http://www.zaman.com.tr/manset.rss'),
( u'Gündem', u'http://www.zaman.com.tr/gundem.rss'), ( u'Gündem', u'http://www.zaman.com.tr/gundem.rss'),
( u'Yazarlar', u'http://www.zaman.com.tr/yazarlar.rss'), ( u'Yazarlar', u'http://www.zaman.com.tr/yazarlar.rss'),
( u'Politika', u'http://www.zaman.com.tr/politika.rss'), ( u'Politika', u'http://www.zaman.com.tr/politika.rss'),
@ -45,11 +47,20 @@ class Zaman (BasicNewsRecipe):
( u'Dış Haberler', u'http://www.zaman.com.tr/dishaberler.rss'), ( u'Dış Haberler', u'http://www.zaman.com.tr/dishaberler.rss'),
( u'Yorumlar', u'http://www.zaman.com.tr/yorumlar.rss'), ( u'Yorumlar', u'http://www.zaman.com.tr/yorumlar.rss'),
( u'Röportaj', u'http://www.zaman.com.tr/roportaj.rss'), ( u'Röportaj', u'http://www.zaman.com.tr/roportaj.rss'),
( u'Dizi Yazı', u'http://www.zaman.com.tr/dizi.rss'),
( u'Bilişim', u'http://www.zaman.com.tr/bilisim.rss'),
( u'Otomotiv', u'http://www.zaman.com.tr/otomobil.rss'),
( u'Spor', u'http://www.zaman.com.tr/spor.rss'), ( u'Spor', u'http://www.zaman.com.tr/spor.rss'),
( u'Kürsü', u'http://www.zaman.com.tr/kursu.rss'), ( u'Kürsü', u'http://www.zaman.com.tr/kursu.rss'),
( u'Eğitim', u'http://www.zaman.com.tr/egitim.rss'),
( u'Kültür Sanat', u'http://www.zaman.com.tr/kultursanat.rss'), ( u'Kültür Sanat', u'http://www.zaman.com.tr/kultursanat.rss'),
( u'Televizyon', u'http://www.zaman.com.tr/televizyon.rss'), ( u'Televizyon', u'http://www.zaman.com.tr/televizyon.rss'),
( u'Manşet', u'http://www.zaman.com.tr/manset.rss'), ( u'Aile', u'http://www.zaman.com.tr/aile.rss'),
( u'Cuma Eki', u'http://www.zaman.com.tr/cuma.rss'),
( u'Cumaertesi Eki', u'http://www.zaman.com.tr/cumaertesi.rss'),
( u'Pazar Eki', u'http://www.zaman.com.tr/pazar.rss'),
] ]
def print_version(self, url):
return url.replace('http://www.zaman.com.tr/haber.do?haberno=', 'http://www.zaman.com.tr/yazdir.do?haberno=')

View File

@ -131,7 +131,7 @@ class ZeitEPUBAbo(BasicNewsRecipe):
browser.form['pass']=self.password browser.form['pass']=self.password
browser.submit() browser.submit()
# now find the correct file, we will still use the ePub file # now find the correct file, we will still use the ePub file
epublink = browser.find_link(text_regex=re.compile('.*Ausgabe als Datei im ePub-Format.*')) epublink = browser.find_link(text_regex=re.compile('.*Download als Datei im ePub-Format für eReader.*'))
response = browser.follow_link(epublink) response = browser.follow_link(epublink)
self.report_progress(1,_('next step')) self.report_progress(1,_('next step'))

View File

@ -12,14 +12,14 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-" "Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n" "devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n" "POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2011-09-27 15:33+0000\n" "PO-Revision-Date: 2011-12-03 15:11+0000\n"
"Last-Translator: Kovid Goyal <Unknown>\n" "Last-Translator: Yuri Chornoivan <yurchor@gmail.com>\n"
"Language-Team: Ukrainian <translation-team-uk@lists.sourceforge.net>\n" "Language-Team: Ukrainian <translation-team-uk@lists.sourceforge.net>\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-26 05:43+0000\n" "X-Launchpad-Export-Date: 2011-12-04 04:43+0000\n"
"X-Generator: Launchpad (build 14381)\n" "X-Generator: Launchpad (build 14418)\n"
"Language: uk\n" "Language: uk\n"
#. name for aaa #. name for aaa
@ -17956,7 +17956,7 @@ msgstr "ндоола"
#. name for nds #. name for nds
msgid "German; Low" msgid "German; Low"
msgstr "" msgstr "нижньонімецька"
#. name for ndt #. name for ndt
msgid "Ndunga" msgid "Ndunga"

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
__appname__ = u'calibre' __appname__ = u'calibre'
numeric_version = (0, 8, 29) numeric_version = (0, 8, 30)
__version__ = u'.'.join(map(unicode, numeric_version)) __version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>" __author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -451,6 +451,10 @@ class CatalogPlugin(Plugin): # {{{
'series_index','series','size','tags','timestamp', 'series_index','series','size','tags','timestamp',
'title_sort','title','uuid','languages']) 'title_sort','title','uuid','languages'])
all_custom_fields = set(db.custom_field_keys()) all_custom_fields = set(db.custom_field_keys())
for field in list(all_custom_fields):
fm = db.field_metadata[field]
if fm['datatype'] == 'series':
all_custom_fields.add(field+'_index')
all_fields = all_std_fields.union(all_custom_fields) all_fields = all_std_fields.union(all_custom_fields)
if opts.fields != 'all': if opts.fields != 'all':

View File

@ -255,7 +255,7 @@ class LRXMetadataReader(MetadataReaderPlugin):
class MOBIMetadataReader(MetadataReaderPlugin): class MOBIMetadataReader(MetadataReaderPlugin):
name = 'Read MOBI metadata' name = 'Read MOBI metadata'
file_types = set(['mobi', 'prc', 'azw', 'azw4']) file_types = set(['mobi', 'prc', 'azw', 'azw4', 'pobi'])
description = _('Read metadata from %s files')%'MOBI' description = _('Read metadata from %s files')%'MOBI'
def get_metadata(self, stream, ftype): def get_metadata(self, stream, ftype):
@ -1155,6 +1155,26 @@ class StoreAmazonFRKindleStore(StoreBase):
formats = ['KINDLE'] formats = ['KINDLE']
affiliate = True affiliate = True
class StoreAmazonITKindleStore(StoreBase):
name = 'Amazon IT Kindle'
author = 'Charles Haley'
description = u'eBook Kindle a prezzi incredibili'
actual_plugin = 'calibre.gui2.store.stores.amazon_it_plugin:AmazonITKindleStore'
headquarters = 'IT'
formats = ['KINDLE']
affiliate = True
class StoreAmazonESKindleStore(StoreBase):
name = 'Amazon ES Kindle'
author = 'Charles Haley'
description = u'eBook Kindle en España'
actual_plugin = 'calibre.gui2.store.stores.amazon_es_plugin:AmazonESKindleStore'
headquarters = 'ES'
formats = ['KINDLE']
affiliate = True
class StoreAmazonUKKindleStore(StoreBase): class StoreAmazonUKKindleStore(StoreBase):
name = 'Amazon UK Kindle' name = 'Amazon UK Kindle'
author = 'Charles Haley' author = 'Charles Haley'
@ -1554,7 +1574,9 @@ plugins += [
StoreArchiveOrgStore, StoreArchiveOrgStore,
StoreAmazonKindleStore, StoreAmazonKindleStore,
StoreAmazonDEKindleStore, StoreAmazonDEKindleStore,
StoreAmazonESKindleStore,
StoreAmazonFRKindleStore, StoreAmazonFRKindleStore,
StoreAmazonITKindleStore,
StoreAmazonUKKindleStore, StoreAmazonUKKindleStore,
StoreBaenWebScriptionStore, StoreBaenWebScriptionStore,
StoreBNStore, StoreBNStore,

View File

@ -143,6 +143,9 @@ class ANDROID(USBMS):
# Kobo # Kobo
0x2237: { 0x2208 : [0x0226] }, 0x2237: { 0x2208 : [0x0226] },
# Lenovo
0x17ef : { 0x7421 : [0x0216] },
} }
EBOOK_DIR_MAIN = ['eBooks/import', 'wordplayer/calibretransfer', 'Books', EBOOK_DIR_MAIN = ['eBooks/import', 'wordplayer/calibretransfer', 'Books',
'sdcard/ebooks'] 'sdcard/ebooks']
@ -155,7 +158,7 @@ class ANDROID(USBMS):
'GT-I5700', 'SAMSUNG', 'DELL', 'LINUX', 'GOOGLE', 'ARCHOS', 'GT-I5700', 'SAMSUNG', 'DELL', 'LINUX', 'GOOGLE', 'ARCHOS',
'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA', 'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA',
'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON', 'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON',
'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC'] 'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO']
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE', WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897', '__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID',
@ -167,12 +170,13 @@ class ANDROID(USBMS):
'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612', 'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612',
'GT-S5830_CARD', 'GT-S5570_CARD', 'MB870', 'MID7015A', 'GT-S5830_CARD', 'GT-S5570_CARD', 'MB870', 'MID7015A',
'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI', 'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI',
'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD'] 'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897', WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD', 'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
'__UMS_COMPOSITE', 'SGH-I997_CARD', 'MB870', 'ALPANDIGITAL', '__UMS_COMPOSITE', 'SGH-I997_CARD', 'MB870', 'ALPANDIGITAL',
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853'] 'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853',
'A1-07___C0541A4F']
OSX_MAIN_MEM = 'Android Device Main Memory' OSX_MAIN_MEM = 'Android Device Main Memory'

View File

@ -287,7 +287,7 @@ class KINDLE2(KINDLE):
name = 'Kindle 2/3 Device Interface' name = 'Kindle 2/3 Device Interface'
description = _('Communicate with the Kindle 2/3 eBook reader.') description = _('Communicate with the Kindle 2/3 eBook reader.')
FORMATS = KINDLE.FORMATS + ['pdf', 'azw4'] FORMATS = KINDLE.FORMATS + ['pdf', 'azw4', 'pobi']
DELETE_EXTS = KINDLE.DELETE_EXTS DELETE_EXTS = KINDLE.DELETE_EXTS
PRODUCT_ID = [0x0002, 0x0004] PRODUCT_ID = [0x0002, 0x0004]

View File

@ -224,7 +224,7 @@ class TREKSTOR(USBMS):
FORMATS = ['epub', 'txt', 'pdf'] FORMATS = ['epub', 'txt', 'pdf']
VENDOR_ID = [0x1e68] VENDOR_ID = [0x1e68]
PRODUCT_ID = [0x0041, 0x0042, 0x0052, PRODUCT_ID = [0x0041, 0x0042, 0x0052, 0x004e,
0x003e # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091 0x003e # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
] ]
BCD = [0x0002] BCD = [0x0002]

View File

@ -296,6 +296,13 @@ class PRST1(USBMS):
lpath = row[0].replace('\\', '/') lpath = row[0].replace('\\', '/')
db_books[lpath] = row[1] db_books[lpath] = row[1]
# Work-around for Sony Bug (SD Card DB not using right SQLite sequence)
if source_id == 1:
sdcard_sequence_start = '4294967296'
query = 'UPDATE sqlite_sequence SET seq = ? WHERE seq < ?'
t = (sdcard_sequence_start, sdcard_sequence_start,)
cursor.execute(query, t)
for book in booklist: for book in booklist:
# Run through plugboard if needed # Run through plugboard if needed
if plugboard is not None: if plugboard is not None:
@ -322,12 +329,10 @@ class PRST1(USBMS):
title = newmi.title or _('Unknown') title = newmi.title or _('Unknown')
# Get modified date # Get modified date
# If there was a detected offset, use that. Otherwise use UTC (same as Sony software)
modified_date = os.path.getmtime(book.path) * 1000 modified_date = os.path.getmtime(book.path) * 1000
if self.device_offset is not None: if self.device_offset is not None:
modified_date = modified_date + self.device_offset modified_date = modified_date + self.device_offset
else:
time_offset = -time.altzone if time.daylight else -time.timezone
modified_date = modified_date + (time_offset * 1000)
if lpath not in db_books: if lpath not in db_books:
query = ''' query = '''
@ -578,17 +583,17 @@ class PRST1(USBMS):
# Setting this to the SONY periodical schema apparently causes errors # Setting this to the SONY periodical schema apparently causes errors
# with some periodicals, therefore set it to null, since the special # with some periodicals, therefore set it to null, since the special
# periodical navigation doesn't work anyway. # periodical navigation doesn't work anyway.
periodical_schema = 'null' periodical_schema = None
query = ''' query = '''
UPDATE books UPDATE books
SET conforms_to = %s, SET conforms_to = ?,
periodical_name = ?, periodical_name = ?,
description = ?, description = ?,
publication_date = ? publication_date = ?
WHERE _id = ? WHERE _id = ?
'''%periodical_schema '''
t = (name, None, pubdate, book.bookId,) t = (periodical_schema, name, None, pubdate, book.bookId,)
cursor.execute(query, t) cursor.execute(query, t)
connection.commit() connection.commit()

View File

@ -30,7 +30,7 @@ BOOK_EXTENSIONS = ['lrf', 'rar', 'zip', 'rtf', 'lit', 'txt', 'txtz', 'text', 'ht
'html', 'htmlz', 'xhtml', 'pdf', 'pdb', 'pdr', 'prc', 'mobi', 'azw', 'doc', 'html', 'htmlz', 'xhtml', 'pdf', 'pdb', 'pdr', 'prc', 'mobi', 'azw', 'doc',
'epub', 'fb2', 'djv', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip', 'epub', 'fb2', 'djv', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip',
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'pmlz', 'mbp', 'tan', 'snb', 'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'pmlz', 'mbp', 'tan', 'snb',
'xps', 'oxps', 'azw4', 'book', 'zbf'] 'xps', 'oxps', 'azw4', 'book', 'zbf', 'pobi']
class HTMLRenderer(object): class HTMLRenderer(object):

View File

@ -53,7 +53,6 @@ def substitute_entites(raw):
_CHARSET_ALIASES = { "macintosh" : "mac-roman", _CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" } "x-sjis" : "shift-jis" }
def force_encoding(raw, verbose, assume_utf8=False): def force_encoding(raw, verbose, assume_utf8=False):
from calibre.constants import preferred_encoding from calibre.constants import preferred_encoding
try: try:
@ -74,6 +73,36 @@ def force_encoding(raw, verbose, assume_utf8=False):
encoding = 'utf-8' encoding = 'utf-8'
return encoding return encoding
def detect_xml_encoding(raw, verbose=False, assume_utf8=False):
if not raw or isinstance(raw, unicode):
return raw, None
for x in ('utf8', 'utf-16-le', 'utf-16-be'):
bom = getattr(codecs, 'BOM_'+x.upper().replace('-16', '16').replace(
'-', '_'))
if raw.startswith(bom):
return raw[len(bom):], x
encoding = None
for pat in ENCODING_PATS:
match = pat.search(raw)
if match:
encoding = match.group(1)
break
if encoding is None:
encoding = force_encoding(raw, verbose, assume_utf8=assume_utf8)
if encoding.lower().strip() == 'macintosh':
encoding = 'mac-roman'
if encoding.lower().replace('_', '-').strip() in (
'gb2312', 'chinese', 'csiso58gb231280', 'euc-cn', 'euccn',
'eucgb2312-cn', 'gb2312-1980', 'gb2312-80', 'iso-ir-58'):
# Microsoft Word exports to HTML with encoding incorrectly set to
# gb2312 instead of gbk. gbk is a superset of gb2312, anyway.
encoding = 'gbk'
try:
codecs.lookup(encoding)
except LookupError:
encoding = 'utf-8'
return raw, encoding
def xml_to_unicode(raw, verbose=False, strip_encoding_pats=False, def xml_to_unicode(raw, verbose=False, strip_encoding_pats=False,
resolve_entities=False, assume_utf8=False): resolve_entities=False, assume_utf8=False):
@ -83,36 +112,11 @@ def xml_to_unicode(raw, verbose=False, strip_encoding_pats=False,
prints a warning if detection confidence is < 100% prints a warning if detection confidence is < 100%
@return: (unicode, encoding used) @return: (unicode, encoding used)
''' '''
encoding = None
if not raw: if not raw:
return u'', encoding return u'', None
raw, encoding = detect_xml_encoding(raw, verbose=verbose,
assume_utf8=assume_utf8)
if not isinstance(raw, unicode): if not isinstance(raw, unicode):
if raw.startswith(codecs.BOM_UTF8):
raw, encoding = raw.decode('utf-8')[1:], 'utf-8'
elif raw.startswith(codecs.BOM_UTF16_LE):
raw, encoding = raw.decode('utf-16-le')[1:], 'utf-16-le'
elif raw.startswith(codecs.BOM_UTF16_BE):
raw, encoding = raw.decode('utf-16-be')[1:], 'utf-16-be'
if not isinstance(raw, unicode):
for pat in ENCODING_PATS:
match = pat.search(raw)
if match:
encoding = match.group(1)
break
if encoding is None:
encoding = force_encoding(raw, verbose, assume_utf8=assume_utf8)
try:
if encoding.lower().strip() == 'macintosh':
encoding = 'mac-roman'
if encoding.lower().replace('_', '-').strip() in (
'gb2312', 'chinese', 'csiso58gb231280', 'euc-cn', 'euccn',
'eucgb2312-cn', 'gb2312-1980', 'gb2312-80', 'iso-ir-58'):
# Microsoft Word exports to HTML with encoding incorrectly set to
# gb2312 instead of gbk. gbk is a superset of gb2312, anyway.
encoding = 'gbk'
raw = raw.decode(encoding, 'replace')
except LookupError:
encoding = 'utf-8'
raw = raw.decode(encoding, 'replace') raw = raw.decode(encoding, 'replace')
if strip_encoding_pats: if strip_encoding_pats:
@ -120,6 +124,4 @@ def xml_to_unicode(raw, verbose=False, strip_encoding_pats=False,
if resolve_entities: if resolve_entities:
raw = substitute_entites(raw) raw = substitute_entites(raw)
return raw, encoding return raw, encoding

View File

@ -17,6 +17,10 @@ from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.ipc.server import Server from calibre.utils.ipc.server import Server
from calibre.utils.ipc.job import ParallelJob from calibre.utils.ipc.job import ParallelJob
# If the specified screen has either dimension larger than this value, no image
# rescaling is done (we assume that it is a tablet output profile)
MAX_SCREEN_SIZE = 3000
def extract_comic(path_to_comic_file): def extract_comic(path_to_comic_file):
''' '''
Un-archive the comic file. Un-archive the comic file.
@ -141,7 +145,7 @@ class PageProcessor(list): # {{{
newsizey = int(newsizex / aspect) newsizey = int(newsizex / aspect)
deltax = 0 deltax = 0
deltay = (SCRHEIGHT - newsizey) / 2 deltay = (SCRHEIGHT - newsizey) / 2
if newsizex < 20000 and newsizey < 20000: if newsizex < MAX_SCREEN_SIZE and newsizey < MAX_SCREEN_SIZE:
# Too large and resizing fails, so better # Too large and resizing fails, so better
# to leave it as original size # to leave it as original size
wand.size = (newsizex, newsizey) wand.size = (newsizex, newsizey)
@ -165,14 +169,14 @@ class PageProcessor(list): # {{{
newsizey = int(newsizex / aspect) newsizey = int(newsizex / aspect)
deltax = 0 deltax = 0
deltay = (wscreeny - newsizey) / 2 deltay = (wscreeny - newsizey) / 2
if newsizex < 20000 and newsizey < 20000: if newsizex < MAX_SCREEN_SIZE and newsizey < MAX_SCREEN_SIZE:
# Too large and resizing fails, so better # Too large and resizing fails, so better
# to leave it as original size # to leave it as original size
wand.size = (newsizex, newsizey) wand.size = (newsizex, newsizey)
wand.set_border_color(pw) wand.set_border_color(pw)
wand.add_border(pw, deltax, deltay) wand.add_border(pw, deltax, deltay)
else: else:
if SCRWIDTH < 20000 and SCRHEIGHT < 20000: if SCRWIDTH < MAX_SCREEN_SIZE and SCRHEIGHT < MAX_SCREEN_SIZE:
wand.size = (SCRWIDTH, SCRHEIGHT) wand.size = (SCRWIDTH, SCRHEIGHT)
if not self.opts.dont_sharpen: if not self.opts.dont_sharpen:

View File

@ -18,7 +18,7 @@ from functools import partial
from itertools import izip from itertools import izip
from calibre.customize.conversion import InputFormatPlugin from calibre.customize.conversion import InputFormatPlugin
from calibre.ebooks.chardet import xml_to_unicode from calibre.ebooks.chardet import detect_xml_encoding
from calibre.customize.conversion import OptionRecommendation from calibre.customize.conversion import OptionRecommendation
from calibre.constants import islinux, isbsd, iswindows from calibre.constants import islinux, isbsd, iswindows
from calibre import unicode_path, as_unicode from calibre import unicode_path, as_unicode
@ -121,7 +121,7 @@ class HTMLFile(object):
if not self.is_binary: if not self.is_binary:
if not encoding: if not encoding:
encoding = xml_to_unicode(src[:4096], verbose=verbose)[-1] encoding = detect_xml_encoding(src[:4096], verbose=verbose)[1]
self.encoding = encoding self.encoding = encoding
else: else:
self.encoding = encoding self.encoding = encoding
@ -148,7 +148,11 @@ class HTMLFile(object):
url = match.group(i) url = match.group(i)
if url: if url:
break break
try:
link = self.resolve(url) link = self.resolve(url)
except ValueError:
# Unparseable URL, ignore
continue
if link not in self.links: if link not in self.links:
self.links.append(link) self.links.append(link)

View File

@ -30,6 +30,8 @@ CONTENT_TAGS = set(['img', 'hr', 'br'])
NOT_VTAGS = HEADER_TAGS | NESTABLE_TAGS | TABLE_TAGS | SPECIAL_TAGS | \ NOT_VTAGS = HEADER_TAGS | NESTABLE_TAGS | TABLE_TAGS | SPECIAL_TAGS | \
CONTENT_TAGS CONTENT_TAGS
LEAF_TAGS = set(['base', 'basefont', 'frame', 'link', 'meta', 'area', 'br',
'col', 'hr', 'img', 'input', 'param'])
PAGE_BREAKS = set(['always', 'left', 'right']) PAGE_BREAKS = set(['always', 'left', 'right'])
COLLAPSE = re.compile(r'[ \t\r\n\v]+') COLLAPSE = re.compile(r'[ \t\r\n\v]+')
@ -246,7 +248,17 @@ class MobiMLizer(object):
last.text = None last.text = None
else: else:
last = bstate.body[-1] last = bstate.body[-1]
# We use append instead of addprevious so that inline
# anchors in large blocks point to the correct place. See
# https://bugs.launchpad.net/calibre/+bug/899831
# This could potentially break if inserting an anchor at
# this point in the markup is illegal, but I cannot think
# of such a case offhand.
if barename(last.tag) in LEAF_TAGS:
last.addprevious(anchor) last.addprevious(anchor)
else:
last.append(anchor)
istate.ids.clear() istate.ids.clear()
if not text: if not text:
return return
@ -528,7 +540,11 @@ class MobiMLizer(object):
old_mim = self.opts.mobi_ignore_margins old_mim = self.opts.mobi_ignore_margins
self.opts.mobi_ignore_margins = False self.opts.mobi_ignore_margins = False
if text or tag in CONTENT_TAGS or tag in NESTABLE_TAGS: if (text or tag in CONTENT_TAGS or tag in NESTABLE_TAGS or (
# We have an id but no text and no children, the id should still
# be added.
istate.ids and tag in ('a', 'span', 'i', 'b', 'u') and
len(elem)==0)):
self.mobimlize_content(tag, text, bstate, istates) self.mobimlize_content(tag, text, bstate, istates)
for child in elem: for child in elem:
self.mobimlize_elem(child, stylizer, bstate, istates) self.mobimlize_elem(child, stylizer, bstate, istates)

View File

@ -178,7 +178,11 @@ class Serializer(object):
at the end. at the end.
''' '''
hrefs = self.oeb.manifest.hrefs hrefs = self.oeb.manifest.hrefs
try:
path, frag = urldefrag(urlnormalize(href)) path, frag = urldefrag(urlnormalize(href))
except ValueError:
# Unparseable URL
return False
if path and base: if path and base:
path = base.abshref(path) path = base.abshref(path)
if path and path not in hrefs: if path and path not in hrefs:

View File

@ -18,7 +18,8 @@ from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.zipfile import safe_replace from calibre.utils.zipfile import safe_replace
from calibre.utils.config import DynamicConfig from calibre.utils.config import DynamicConfig
from calibre.utils.logging import Log from calibre.utils.logging import Log
from calibre import guess_type, prints, prepare_string_for_xml from calibre import (guess_type, prints, prepare_string_for_xml,
xml_replace_entities)
from calibre.ebooks.oeb.transforms.cover import CoverManager from calibre.ebooks.oeb.transforms.cover import CoverManager
from calibre.constants import filesystem_encoding from calibre.constants import filesystem_encoding
@ -96,13 +97,19 @@ class EbookIterator(object):
self.ebook_ext = ext.replace('original_', '') self.ebook_ext = ext.replace('original_', '')
def search(self, text, index, backwards=False): def search(self, text, index, backwards=False):
text = text.lower() text = prepare_string_for_xml(text.lower())
pmap = [(i, path) for i, path in enumerate(self.spine)] pmap = [(i, path) for i, path in enumerate(self.spine)]
if backwards: if backwards:
pmap.reverse() pmap.reverse()
for i, path in pmap: for i, path in pmap:
if (backwards and i < index) or (not backwards and i > index): if (backwards and i < index) or (not backwards and i > index):
if text in open(path, 'rb').read().decode(path.encoding).lower(): with open(path, 'rb') as f:
raw = f.read().decode(path.encoding)
try:
raw = xml_replace_entities(raw)
except:
pass
if text in raw.lower():
return i return i
def find_missing_css_files(self): def find_missing_css_files(self):

View File

@ -154,7 +154,11 @@ class Split(object):
def rewrite_links(self, url): def rewrite_links(self, url):
href, frag = urldefrag(url) href, frag = urldefrag(url)
try:
href = self.current_item.abshref(href) href = self.current_item.abshref(href)
except ValueError:
# Unparseable URL
return url
if href in self.map: if href in self.map:
anchor_map = self.map[href] anchor_map = self.map[href]
nhref = anchor_map[frag if frag else None] nhref = anchor_map[frag if frag else None]

View File

@ -9,7 +9,8 @@ import os
from functools import partial from functools import partial
from PyQt4.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog, from PyQt4.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog,
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QIcon, QSize) QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QIcon, QSize,
QCoreApplication)
from calibre import isbytestring from calibre import isbytestring
from calibre.constants import filesystem_encoding, iswindows from calibre.constants import filesystem_encoding, iswindows
@ -384,11 +385,18 @@ class ChooseLibraryAction(InterfaceAction):
_('Database integrity check failed, click Show details' _('Database integrity check failed, click Show details'
' for details.'), show=True, det_msg=d.error[1]) ' for details.'), show=True, det_msg=d.error[1])
self.gui.status_bar.show_message(
_('Starting library scan, this may take a while'))
try:
QCoreApplication.processEvents()
d = CheckLibraryDialog(self.gui, m.db) d = CheckLibraryDialog(self.gui, m.db)
if not d.do_exec(): if not d.do_exec():
info_dialog(self.gui, _('No problems found'), info_dialog(self.gui, _('No problems found'),
_('The files in your library match the information ' _('The files in your library match the information '
'in the database.'), show=True) 'in the database.'), show=True)
finally:
self.gui.status_bar.clear_message()
def switch_requested(self, location): def switch_requested(self, location):
if not self.change_library_allowed(): if not self.change_library_allowed():

View File

@ -9,7 +9,7 @@ from PyQt4.Qt import QThread, QObject, Qt, QProgressDialog, pyqtSignal, QTimer
from calibre.gui2.dialogs.progress import ProgressDialog from calibre.gui2.dialogs.progress import ProgressDialog
from calibre.gui2 import (question_dialog, error_dialog, info_dialog, gprefs, from calibre.gui2 import (question_dialog, error_dialog, info_dialog, gprefs,
warning_dialog) warning_dialog, available_width)
from calibre.ebooks.metadata.opf2 import OPF from calibre.ebooks.metadata.opf2 import OPF
from calibre.ebooks.metadata import MetaInformation from calibre.ebooks.metadata import MetaInformation
from calibre.constants import preferred_encoding, filesystem_encoding, DEBUG from calibre.constants import preferred_encoding, filesystem_encoding, DEBUG
@ -244,6 +244,7 @@ class Adder(QObject): # {{{
def __init__(self, parent, db, callback, spare_server=None): def __init__(self, parent, db, callback, spare_server=None):
QObject.__init__(self, parent) QObject.__init__(self, parent)
self.pd = ProgressDialog(_('Adding...'), parent=parent) self.pd = ProgressDialog(_('Adding...'), parent=parent)
self.pd.setMaximumWidth(min(600, int(available_width()*0.75)))
self.spare_server = spare_server self.spare_server = spare_server
self.db = db self.db = db
self.pd.setModal(True) self.pd.setModal(True)

View File

@ -35,7 +35,10 @@ class PluginWidget(QWidget, Ui_Form):
self.all_fields = [x for x in FIELDS if x != 'all'] self.all_fields = [x for x in FIELDS if x != 'all']
#add custom columns #add custom columns
self.all_fields.extend([x for x in sorted(db.custom_field_keys())]) for x in sorted(db.custom_field_keys()):
self.all_fields.append(x)
if db.field_metadata[x]['datatype'] == 'series':
self.all_fields.append(x+'_index')
#populate #populate
for x in self.all_fields: for x in self.all_fields:
QListWidgetItem(x, self.db_fields) QListWidgetItem(x, self.db_fields)

View File

@ -33,6 +33,9 @@ class PluginWidget(QWidget, Ui_Form):
self.all_fields.append(x) self.all_fields.append(x)
QListWidgetItem(x, self.db_fields) QListWidgetItem(x, self.db_fields)
fm = db.field_metadata[x]
if fm['datatype'] == 'series':
QListWidgetItem(x+'_index', self.db_fields)
def initialize(self, name, db): def initialize(self, name, db):
self.name = name self.name = name

View File

@ -25,7 +25,8 @@ from calibre.utils.logging import Log
class BulkConfig(Config): class BulkConfig(Config):
def __init__(self, parent, db, preferred_output_format=None): def __init__(self, parent, db, preferred_output_format=None,
has_saved_settings=True):
ResizableDialog.__init__(self, parent) ResizableDialog.__init__(self, parent)
self.setup_output_formats(db, preferred_output_format) self.setup_output_formats(db, preferred_output_format)
@ -54,6 +55,12 @@ class BulkConfig(Config):
rb = self.buttonBox.button(self.buttonBox.RestoreDefaults) rb = self.buttonBox.button(self.buttonBox.RestoreDefaults)
rb.setVisible(False) rb.setVisible(False)
self.groups.setMouseTracking(True) self.groups.setMouseTracking(True)
if not has_saved_settings:
o = self.opt_individual_saved_settings
o.setEnabled(False)
o.setToolTip(_('None of the selected books have saved conversion '
'settings.'))
o.setChecked(False)
def setup_pipeline(self, *args): def setup_pipeline(self, *args):

View File

@ -70,7 +70,7 @@ if pictureflow is not None:
ans = '' ans = ''
except: except:
ans = '' ans = ''
return ans return ans.replace('&', '&&')
def subtitle(self, index): def subtitle(self, index):
try: try:

View File

@ -230,8 +230,6 @@ class Text(Base):
def setup_ui(self, parent): def setup_ui(self, parent):
self.sep = self.col_metadata['multiple_seps'] self.sep = self.col_metadata['multiple_seps']
values = self.all_values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
if self.col_metadata['is_multiple']: if self.col_metadata['is_multiple']:
w = MultiCompleteLineEdit(parent) w = MultiCompleteLineEdit(parent)
@ -239,7 +237,6 @@ class Text(Base):
if self.sep['ui_to_list'] == '&': if self.sep['ui_to_list'] == '&':
w.set_space_before_sep(True) w.set_space_before_sep(True)
w.set_add_separator(tweaks['authors_completer_append_separator']) w.set_add_separator(tweaks['authors_completer_append_separator'])
w.update_items_cache(values)
w.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred) w.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
else: else:
w = MultiCompleteComboBox(parent) w = MultiCompleteComboBox(parent)
@ -249,16 +246,19 @@ class Text(Base):
self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent), w] self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent), w]
def initialize(self, book_id): def initialize(self, book_id):
values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
self.widgets[1].clear()
self.widgets[1].update_items_cache(values)
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True) val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
self.initial_val = val self.initial_val = val
val = self.normalize_db_val(val) val = self.normalize_db_val(val)
self.widgets[1].update_items_cache(self.all_values)
if self.col_metadata['is_multiple']: if self.col_metadata['is_multiple']:
self.setter(val) self.setter(val)
else: else:
idx = None idx = None
for i, c in enumerate(self.all_values): for i, c in enumerate(values):
if c == val: if c == val:
idx = i idx = i
self.widgets[1].addItem(c) self.widgets[1].addItem(c)
@ -287,8 +287,6 @@ class Text(Base):
class Series(Base): class Series(Base):
def setup_ui(self, parent): def setup_ui(self, parent):
values = self.all_values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
w = MultiCompleteComboBox(parent) w = MultiCompleteComboBox(parent)
w.set_separator(None) w.set_separator(None)
w.setSizeAdjustPolicy(w.AdjustToMinimumContentsLengthWithIcon) w.setSizeAdjustPolicy(w.AdjustToMinimumContentsLengthWithIcon)
@ -305,6 +303,8 @@ class Series(Base):
self.widgets.append(w) self.widgets.append(w)
def initialize(self, book_id): def initialize(self, book_id):
values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True) val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
s_index = self.db.get_custom_extra(book_id, num=self.col_id, index_is_id=True) s_index = self.db.get_custom_extra(book_id, num=self.col_id, index_is_id=True)
if s_index is None: if s_index is None:
@ -314,11 +314,12 @@ class Series(Base):
self.initial_val = val self.initial_val = val
val = self.normalize_db_val(val) val = self.normalize_db_val(val)
idx = None idx = None
for i, c in enumerate(self.all_values): self.name_widget.clear()
for i, c in enumerate(values):
if c == val: if c == val:
idx = i idx = i
self.name_widget.addItem(c) self.name_widget.addItem(c)
self.name_widget.update_items_cache(self.all_values) self.name_widget.update_items_cache(values)
self.name_widget.setEditText('') self.name_widget.setEditText('')
if idx is not None: if idx is not None:
self.widgets[1].setCurrentIndex(idx) self.widgets[1].setCurrentIndex(idx)

View File

@ -419,6 +419,13 @@ class Scheduler(QObject):
QObject.__init__(self, parent) QObject.__init__(self, parent)
self.internet_connection_failed = False self.internet_connection_failed = False
self._parent = parent self._parent = parent
self.no_internet_msg = _('Cannot download news as no internet connection '
'is active')
self.no_internet_dialog = d = error_dialog(self._parent,
self.no_internet_msg, _('No internet connection'),
show_copy_button=False)
d.setModal(False)
self.recipe_model = RecipeModel() self.recipe_model = RecipeModel()
self.db = db self.db = db
self.lock = QMutex(QMutex.Recursive) self.lock = QMutex(QMutex.Recursive)
@ -523,7 +530,6 @@ class Scheduler(QObject):
finally: finally:
self.lock.unlock() self.lock.unlock()
def download_clicked(self, urn): def download_clicked(self, urn):
if urn is not None: if urn is not None:
return self.download(urn) return self.download(urn)
@ -534,18 +540,25 @@ class Scheduler(QObject):
def download_all_scheduled(self): def download_all_scheduled(self):
self.download_clicked(None) self.download_clicked(None)
def download(self, urn): def has_internet_connection(self):
self.lock.lock()
if not internet_connected(): if not internet_connected():
if not self.internet_connection_failed: if not self.internet_connection_failed:
self.internet_connection_failed = True self.internet_connection_failed = True
d = error_dialog(self._parent, _('No internet connection'), if self._parent.is_minimized_to_tray:
_('Cannot download news as no internet connection ' self._parent.status_bar.show_message(self.no_internet_msg,
'is active')) 5000)
d.setModal(False) elif not self.no_internet_dialog.isVisible():
d.show() self.no_internet_dialog.show()
return False return False
self.internet_connection_failed = False self.internet_connection_failed = False
if self.no_internet_dialog.isVisible():
self.no_internet_dialog.hide()
return True
def download(self, urn):
self.lock.lock()
if not self.has_internet_connection():
return False
doit = urn not in self.download_queue doit = urn not in self.download_queue
self.lock.unlock() self.lock.unlock()
if doit: if doit:
@ -555,7 +568,9 @@ class Scheduler(QObject):
def check(self): def check(self):
recipes = self.recipe_model.get_to_be_downloaded_recipes() recipes = self.recipe_model.get_to_be_downloaded_recipes()
for urn in recipes: for urn in recipes:
self.download(urn) if not self.download(urn):
# No internet connection, we will try again in a minute
break
if __name__ == '__main__': if __name__ == '__main__':
from calibre.gui2 import is_ok_to_use_qt from calibre.gui2 import is_ok_to_use_qt

View File

@ -412,7 +412,8 @@ class DetailView(QDialog, Ui_Dialog): # {{{
self.timer = QTimer(self) self.timer = QTimer(self)
self.timer.timeout.connect(self.update) self.timer.timeout.connect(self.update)
self.timer.start(1000) self.timer.start(1000)
v = self.log.verticalScrollBar()
v.setValue(v.maximum())
def update(self): def update(self):
if self.html_view: if self.html_view:

View File

@ -5,8 +5,9 @@ __license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>' __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import os import os, itertools, operator
from functools import partial from functools import partial
from future_builtins import map
from PyQt4.Qt import (QTableView, Qt, QAbstractItemView, QMenu, pyqtSignal, from PyQt4.Qt import (QTableView, Qt, QAbstractItemView, QMenu, pyqtSignal,
QModelIndex, QIcon, QItemSelection, QMimeData, QDrag, QApplication, QModelIndex, QIcon, QItemSelection, QMimeData, QDrag, QApplication,
@ -793,8 +794,13 @@ class BooksView(QTableView): # {{{
sel = QItemSelection() sel = QItemSelection()
m = self.model() m = self.model()
max_col = m.columnCount(QModelIndex()) - 1 max_col = m.columnCount(QModelIndex()) - 1
for row in rows: # Create a range based selector for each set of contiguous rows
sel.select(m.index(row, 0), m.index(row, max_col)) # as supplying selectors for each individual row causes very poor
# performance if a large number of rows has to be selected.
for k, g in itertools.groupby(enumerate(rows), lambda (i,x):i-x):
group = list(map(operator.itemgetter(1), g))
sel.merge(QItemSelection(m.index(min(group), 0),
m.index(max(group), max_col)), sm.Select)
sm.select(sel, sm.ClearAndSelect) sm.select(sel, sm.ClearAndSelect)
def get_selected_ids(self): def get_selected_ids(self):

View File

@ -28,11 +28,11 @@ class BaseModel(QAbstractListModel):
def name_to_action(self, name, gui): def name_to_action(self, name, gui):
if name == 'Donate': if name == 'Donate':
return FakeAction(name, 'donate.png', return FakeAction(_('Donate'), 'donate.png',
dont_add_to=frozenset(['context-menu', dont_add_to=frozenset(['context-menu',
'context-menu-device'])) 'context-menu-device']))
if name == 'Location Manager': if name == 'Location Manager':
return FakeAction(name, None, return FakeAction(_('Location Manager'), None,
_('Switch between library and device views'), _('Switch between library and device views'),
dont_add_to=frozenset(['menubar', 'toolbar', dont_add_to=frozenset(['menubar', 'toolbar',
'toolbar-child', 'context-menu', 'toolbar-child', 'context-menu',

View File

@ -62,6 +62,7 @@ class Matches(QAbstractItemModel):
# Only the showing matches. # Only the showing matches.
self.matches = [] self.matches = []
self.query = '' self.query = ''
self.filterable_query = False
self.search_filter = SearchFilter() self.search_filter = SearchFilter()
self.cover_pool = CoverThreadPool(cover_thread_count) self.cover_pool = CoverThreadPool(cover_thread_count)
self.details_pool = DetailsThreadPool(detail_thread_count) self.details_pool = DetailsThreadPool(detail_thread_count)
@ -82,6 +83,7 @@ class Matches(QAbstractItemModel):
self.all_matches = [] self.all_matches = []
self.search_filter.clear_search_results() self.search_filter.clear_search_results()
self.query = '' self.query = ''
self.filterable_query = False
self.cover_pool.abort() self.cover_pool.abort()
self.details_pool.abort() self.details_pool.abort()
self.total_changed.emit(self.rowCount()) self.total_changed.emit(self.rowCount())
@ -113,7 +115,10 @@ class Matches(QAbstractItemModel):
def filter_results(self): def filter_results(self):
self.layoutAboutToBeChanged.emit() self.layoutAboutToBeChanged.emit()
if self.query: # Only use the search filter's filtered results when there is a query
# and it is a filterable query. This allows for the stores best guess
# matches to come though.
if self.query and self.filterable_query:
self.matches = list(self.search_filter.parse(self.query)) self.matches = list(self.search_filter.parse(self.query))
else: else:
self.matches = list(self.search_filter.universal_set()) self.matches = list(self.search_filter.universal_set())
@ -134,6 +139,35 @@ class Matches(QAbstractItemModel):
def set_query(self, query): def set_query(self, query):
self.query = query self.query = query
self.filterable_query = self.is_filterable_query(query)
def is_filterable_query(self, query):
# Remove control modifiers.
query = query.replace('\\', '')
query = query.replace('!', '')
query = query.replace('=', '')
query = query.replace('~', '')
query = query.replace('>', '')
query = query.replace('<', '')
# Store the query at this point for comparision later
mod_query = query
# Remove filter identifiers
# Remove the prefix.
for loc in ('all', 'author', 'authors', 'title'):
query = re.sub(r'%s:"(?P<a>[^\s"]+)"' % loc, '\g<a>', query)
query = query.replace('%s:' % loc, '')
# Remove the prefix and search text.
for loc in ('cover', 'download', 'downloads', 'drm', 'format', 'formats', 'price', 'store'):
query = re.sub(r'%s:"[^"]"' % loc, '', query)
query = re.sub(r'%s:[^\s]*' % loc, '', query)
# Remove whitespace
query = re.sub('\s', '', query)
mod_query = re.sub('\s', '', mod_query)
# If mod_query and query are the same then there were no filter modifiers
# so this isn't a filterable query.
if mod_query == query:
return False
return True
def index(self, row, column, parent=QModelIndex()): def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column) return self.createIndex(row, column)

View File

@ -252,7 +252,7 @@ class SearchDialog(QDialog, Ui_Dialog):
# Milliseconds # Milliseconds
self.hang_time = self.config.get('hang_time', 75) * 1000 self.hang_time = self.config.get('hang_time', 75) * 1000
self.max_results = self.config.get('max_results', 10) self.max_results = self.config.get('max_results', 15)
self.should_open_external = self.config.get('open_external', True) self.should_open_external = self.config.get('open_external', True)
# Number of threads to run for each type of operation # Number of threads to run for each type of operation

View File

@ -14,7 +14,7 @@
<string>Get Books</string> <string>Get Books</string>
</property> </property>
<property name="windowIcon"> <property name="windowIcon">
<iconset resource="../../../../../resources/images.qrc"> <iconset>
<normaloff>:/images/store.png</normaloff>:/images/store.png</iconset> <normaloff>:/images/store.png</normaloff>:/images/store.png</iconset>
</property> </property>
<property name="sizeGripEnabled"> <property name="sizeGripEnabled">
@ -82,8 +82,8 @@
<rect> <rect>
<x>0</x> <x>0</x>
<y>0</y> <y>0</y>
<width>173</width> <width>193</width>
<height>106</height> <height>127</height>
</rect> </rect>
</property> </property>
</widget> </widget>
@ -254,6 +254,19 @@
<header>widgets.h</header> <header>widgets.h</header>
</customwidget> </customwidget>
</customwidgets> </customwidgets>
<tabstops>
<tabstop>search_edit</tabstop>
<tabstop>search</tabstop>
<tabstop>results_view</tabstop>
<tabstop>store_list</tabstop>
<tabstop>select_all_stores</tabstop>
<tabstop>select_invert_stores</tabstop>
<tabstop>select_none_stores</tabstop>
<tabstop>configure</tabstop>
<tabstop>open_external</tabstop>
<tabstop>close</tabstop>
<tabstop>adv_search_button</tabstop>
</tabstops>
<resources> <resources>
<include location="../../../../../resources/images.qrc"/> <include location="../../../../../resources/images.qrc"/>
</resources> </resources>

View File

@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.search_result import SearchResult
class AmazonESKindleStore(StorePlugin):
'''
For comments on the implementation, please see amazon_plugin.py
'''
def open(self, parent=None, detail_item=None, external=False):
aff_id = {'tag': 'charhale09-21'}
store_link = 'http://www.amazon.es/ebooks-kindle/b?_encoding=UTF8&node=827231031&tag=%(tag)s&ie=UTF8&linkCode=ur2&camp=3626&creative=24790' % aff_id
if detail_item:
aff_id['asin'] = detail_item
store_link = 'http://www.amazon.es/gp/redirect.html?ie=UTF8&location=http://www.amazon.es/dp/%(asin)s&tag=%(tag)s&linkCode=ur2&camp=3626&creative=24790' % aff_id
open_url(QUrl(store_link))
def search(self, query, max_results=10, timeout=60):
search_url = 'http://www.amazon.es/s/?url=search-alias%3Ddigital-text&field-keywords='
url = search_url + query.encode('ascii', 'backslashreplace').replace('%', '%25').replace('\\x', '%').replace(' ', '+')
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read().decode('latin-1', 'replace'))
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
format_xpath = './/span[@class="format"]/text()'
cover_xpath = './/img[@class="productImage"]/@src'
for data in doc.xpath(data_xpath):
if counter <= 0:
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). So we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format = ''.join(data.xpath(format_xpath))
if 'kindle' not in format.lower():
continue
# We must have an asin otherwise we can't easily reference the
# book later.
asin = ''.join(data.xpath("@name"))
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath('.//div[@class="title"]/a/text()'))
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
author = unicode(''.join(data.xpath('.//div[@class="title"]/span[@class="ptBrand"]/text()')))
if author.startswith('de '):
author = author[3:]
counter -= 1
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.formats = 'Kindle'
s.drm = SearchResult.DRM_UNKNOWN
yield s

View File

@ -38,7 +38,9 @@ class AmazonFRKindleStore(StorePlugin):
counter = max_results counter = max_results
with closing(br.open(url, timeout=timeout)) as f: with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read().decode('latin-1', 'replace')) # doc = html.fromstring(f.read().decode('latin-1', 'replace'))
# Apparently amazon.fr is responding in UTF-8 now
doc = html.fromstring(f.read())
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]' data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
format_xpath = './/span[@class="format"]/text()' format_xpath = './/span[@class="format"]/text()'

View File

@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.search_result import SearchResult
class AmazonITKindleStore(StorePlugin):
'''
For comments on the implementation, please see amazon_plugin.py
'''
def open(self, parent=None, detail_item=None, external=False):
aff_id = {'tag': 'httpcharles07-21'}
store_link = 'http://www.amazon.it/ebooks-kindle/b?_encoding=UTF8&node=827182031&tag=%(tag)s&ie=UTF8&linkCode=ur2&camp=3370&creative=23322' % aff_id
if detail_item:
aff_id['asin'] = detail_item
store_link = 'http://www.amazon.it/gp/redirect.html?ie=UTF8&location=http://www.amazon.it/dp/%(asin)s&tag=%(tag)s&linkCode=ur2&camp=3370&creative=23322' % aff_id
open_url(QUrl(store_link))
def search(self, query, max_results=10, timeout=60):
search_url = 'http://www.amazon.it/s/?url=search-alias%3Ddigital-text&field-keywords='
url = search_url + query.encode('ascii', 'backslashreplace').replace('%', '%25').replace('\\x', '%').replace(' ', '+')
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read().decode('latin-1', 'replace'))
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
format_xpath = './/span[@class="format"]/text()'
cover_xpath = './/img[@class="productImage"]/@src'
for data in doc.xpath(data_xpath):
if counter <= 0:
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). So we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format = ''.join(data.xpath(format_xpath))
if 'kindle' not in format.lower():
continue
# We must have an asin otherwise we can't easily reference the
# book later.
asin = ''.join(data.xpath("@name"))
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath('.//div[@class="title"]/a/text()'))
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
author = unicode(''.join(data.xpath('.//div[@class="title"]/span[@class="ptBrand"]/text()')))
if author.startswith('di '):
author = author[3:]
counter -= 1
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.formats = 'Kindle'
s.drm = SearchResult.DRM_UNKNOWN
yield s

View File

@ -40,9 +40,9 @@ class ChitankaStore(BasicStoreConfig, StorePlugin):
d.exec_() d.exec_()
def search(self, query, max_results=10, timeout=60): def search(self, query, max_results=10, timeout=60):
# check for cyrilic symbols before performing search # check for cyrillic symbols before performing search
uquery = unicode(query.strip(), 'utf-8') uquery = unicode(query.strip(), 'utf-8')
reObj = re.search(u'^[а-яА-Я\\d]{4,}[а-яА-Я\\d\\s]*$', uquery) reObj = re.search(u'^[а-яА-Я\\d\\s]{3,}$', uquery)
if not reObj: if not reObj:
return return

View File

@ -63,9 +63,6 @@ class DieselEbooksStore(BasicStoreConfig, StorePlugin):
a, b, id = id.partition('/item/') a, b, id = id.partition('/item/')
cover_url = ''.join(data.xpath('div[@class="cover"]//img/@src')) cover_url = ''.join(data.xpath('div[@class="cover"]//img/@src'))
if cover_url.startswith('/'):
cover_url = cover_url[1:]
cover_url = 'http://www.diesel-ebooks.com/' + cover_url
title = ''.join(data.xpath('.//div[@class="content"]//h2/text()')) title = ''.join(data.xpath('.//div[@class="content"]//h2/text()'))
author = ''.join(data.xpath('//div[@class="content"]//div[@class="author"]/a/text()')) author = ''.join(data.xpath('//div[@class="content"]//div[@class="author"]/a/text()'))

View File

@ -54,7 +54,7 @@ class EbookscomStore(BasicStoreConfig, StorePlugin):
counter = max_results counter = max_results
with closing(br.open(url, timeout=timeout)) as f: with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read()) doc = html.fromstring(f.read())
for data in doc.xpath('//div[@class="book_a" or @class="book_b"]'): for data in doc.xpath('//div[@id="results"]//li'):
if counter <= 0: if counter <= 0:
break break
@ -64,15 +64,21 @@ class EbookscomStore(BasicStoreConfig, StorePlugin):
continue continue
id = mo.group() id = mo.group()
cover_url = ''.join(data.xpath('.//img[1]/@src')) cover_url = ''
cover_load = ''.join(data.xpath('.//div[@class="img"]//img/@onload'))
mo = re.search('(?<=\').+?(?=\')', cover_load)
if mo:
cover_url = mo.group();
title = '' title = ''
author = '' author = ''
heading_a = data.xpath('.//a[1]/text()') header_parts = data.xpath('.//div[@class="descr"]/h4//a//text()')
if heading_a: if header_parts:
title = heading_a[0] title = header_parts[0]
if len(heading_a) >= 2: header_parts = header_parts[1:]
author = heading_a[1] if header_parts:
author = ', '.join(header_parts)
counter -= 1 counter -= 1
@ -98,22 +104,18 @@ class EbookscomStore(BasicStoreConfig, StorePlugin):
with closing(br.open(url + id, timeout=timeout)) as nf: with closing(br.open(url + id, timeout=timeout)) as nf:
pdoc = html.fromstring(nf.read()) pdoc = html.fromstring(nf.read())
pdata = pdoc.xpath('//table[@class="price"]/tr/td/text()') price_l = pdoc.xpath('//span[@class="price"]/text()')
if len(pdata) >= 2: if price_l:
price = pdata[1] price = price_l[0]
search_result.price = price.strip()
search_result.drm = SearchResult.DRM_UNLOCKED search_result.drm = SearchResult.DRM_UNLOCKED
for sec in ('Printing', 'Copying', 'Lending'): permissions = ' '.join(pdoc.xpath('//div[@class="permissions-items"]//text()'))
if pdoc.xpath('boolean(//div[@class="formatTableInner"]//table//tr[contains(th, "%s") and contains(td, "Off")])' % sec): if 'off' in permissions:
search_result.drm = SearchResult.DRM_LOCKED search_result.drm = SearchResult.DRM_LOCKED
break
fdata = ', '.join(pdoc.xpath('//table[@class="price"]//tr//td[1]/text()')) fdata = pdoc.xpath('//div[contains(@class, "more-links") and contains(@class, "more-links-info")]/div//span/text()')
fdata = fdata.replace(':', '') if len(fdata) > 1:
fdata = re.sub(r'\s{2,}', ' ', fdata) search_result.formats = ', '.join(fdata[1:])
fdata = fdata.replace(' ,', ',')
fdata = fdata.strip()
search_result.formats = fdata
search_result.price = price.strip()
return True return True

View File

@ -46,9 +46,9 @@ class eKnigiStore(BasicStoreConfig, StorePlugin):
d.exec_() d.exec_()
def search(self, query, max_results=10, timeout=60): def search(self, query, max_results=10, timeout=60):
# check for cyrilic symbols before performing search # check for cyrillic symbols before performing search
uquery = unicode(query.strip(), 'utf-8') uquery = unicode(query.strip(), 'utf-8')
reObj = re.search(u'^[а-яА-Я\\d]{2,}[а-яА-Я\\d\\s]*$', uquery) reObj = re.search(u'^[а-яА-Я\\d\\s]{2,}$', uquery)
if not reObj: if not reObj:
return return

View File

@ -112,7 +112,10 @@ def convert_bulk_ebook(parent, queue, db, book_ids, out_format=None, args=[]):
if total == 0: if total == 0:
return None, None, None return None, None, None
d = BulkConfig(parent, db, out_format) has_saved_settings = db.has_conversion_options(book_ids)
d = BulkConfig(parent, db, out_format,
has_saved_settings=has_saved_settings)
if d.exec_() != QDialog.Accepted: if d.exec_() != QDialog.Accepted:
return None return None

View File

@ -359,7 +359,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
'log will be displayed automatically.')%self.gui_debug, show=True) 'log will be displayed automatically.')%self.gui_debug, show=True)
def esc(self, *args): def esc(self, *args):
self.search.clear() self.clear_button.click()
def start_content_server(self, check_started=True): def start_content_server(self, check_started=True):
from calibre.library.server.main import start_threaded_server from calibre.library.server.main import start_threaded_server
@ -726,7 +726,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
info_dialog(self, 'calibre', 'calibre '+ \ info_dialog(self, 'calibre', 'calibre '+ \
_('will keep running in the system tray. To close it, ' _('will keep running in the system tray. To close it, '
'choose <b>Quit</b> in the context menu of the ' 'choose <b>Quit</b> in the context menu of the '
'system tray.')).exec_() 'system tray.'), show_copy_button=False).exec_()
dynamic['systray_msg'] = True dynamic['systray_msg'] = True
self.hide_windows() self.hide_windows()
e.ignore() e.ignore()

View File

@ -172,7 +172,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
STATE_VERSION = 1 STATE_VERSION = 1
def __init__(self, pathtoebook=None, debug_javascript=False): def __init__(self, pathtoebook=None, debug_javascript=False, open_at=None):
MainWindow.__init__(self, None) MainWindow.__init__(self, None)
self.setupUi(self) self.setupUi(self)
self.view.magnification_changed.connect(self.magnification_changed) self.view.magnification_changed.connect(self.magnification_changed)
@ -280,7 +280,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
if pathtoebook is not None: if pathtoebook is not None:
f = functools.partial(self.load_ebook, pathtoebook) f = functools.partial(self.load_ebook, pathtoebook, open_at=open_at)
QTimer.singleShot(50, f) QTimer.singleShot(50, f)
self.view.setMinimumSize(100, 100) self.view.setMinimumSize(100, 100)
self.toc.setCursor(Qt.PointingHandCursor) self.toc.setCursor(Qt.PointingHandCursor)
@ -457,8 +457,8 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
def goto_end(self): def goto_end(self):
self.goto_page(self.pos.maximum()) self.goto_page(self.pos.maximum())
def goto_page(self, new_page): def goto_page(self, new_page, loaded_check=True):
if self.current_page is not None: if self.current_page is not None or not loaded_check:
for page in self.iterator.spine: for page in self.iterator.spine:
if new_page >= page.start_page and new_page <= page.max_page: if new_page >= page.start_page and new_page <= page.max_page:
try: try:
@ -672,7 +672,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
except: except:
traceback.print_exc() traceback.print_exc()
def load_ebook(self, pathtoebook): def load_ebook(self, pathtoebook, open_at=None):
if self.iterator is not None: if self.iterator is not None:
self.save_current_position() self.save_current_position()
self.iterator.__exit__() self.iterator.__exit__()
@ -731,10 +731,17 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
self.current_index = -1 self.current_index = -1
QApplication.instance().alert(self, 5000) QApplication.instance().alert(self, 5000)
previous = self.set_bookmarks(self.iterator.bookmarks) previous = self.set_bookmarks(self.iterator.bookmarks)
if previous is not None: if open_at is None and previous is not None:
self.goto_bookmark(previous) self.goto_bookmark(previous)
else: else:
if open_at is None:
self.next_document() self.next_document()
else:
if open_at > self.pos.maximum():
open_at = self.pos.maximum()
if open_at < self.pos.minimum():
open_at = self.pos.minimum()
self.goto_page(open_at, loaded_check=False)
def set_vscrollbar_value(self, pagenum): def set_vscrollbar_value(self, pagenum):
self.vertical_scrollbar.blockSignals(True) self.vertical_scrollbar.blockSignals(True)
@ -804,6 +811,9 @@ def config(defaults=None):
help=_('Remember last used window size')) help=_('Remember last used window size'))
c.add_opt('debug_javascript', ['--debug-javascript'], default=False, c.add_opt('debug_javascript', ['--debug-javascript'], default=False,
help=_('Print javascript alert and console messages to the console')) help=_('Print javascript alert and console messages to the console'))
c.add_opt('open_at', ['--open-at'], default=None,
help=_('The position at which to open the specified book. The position is '
'a location as displayed in the top left corner of the viewer.'))
return c return c
@ -823,13 +833,17 @@ def main(args=sys.argv):
parser = option_parser() parser = option_parser()
opts, args = parser.parse_args(args) opts, args = parser.parse_args(args)
pid = os.fork() if False and (islinux or isbsd) else -1 pid = os.fork() if False and (islinux or isbsd) else -1
try:
open_at = float(opts.open_at)
except:
open_at = None
if pid <= 0: if pid <= 0:
app = Application(args) app = Application(args)
app.setWindowIcon(QIcon(I('viewer.png'))) app.setWindowIcon(QIcon(I('viewer.png')))
QApplication.setOrganizationName(ORG_NAME) QApplication.setOrganizationName(ORG_NAME)
QApplication.setApplicationName(APP_UID) QApplication.setApplicationName(APP_UID)
main = EbookViewer(args[1] if len(args) > 1 else None, main = EbookViewer(args[1] if len(args) > 1 else None,
debug_javascript=opts.debug_javascript) debug_javascript=opts.debug_javascript, open_at=open_at)
sys.excepthook = main.unhandled_exception sys.excepthook = main.unhandled_exception
main.show() main.show()
if opts.raise_window: if opts.raise_window:

View File

@ -15,7 +15,8 @@ from calibre.utils.config import tweaks, prefs
from calibre.utils.date import parse_date, now, UNDEFINED_DATE, clean_date_for_sort from calibre.utils.date import parse_date, now, UNDEFINED_DATE, clean_date_for_sort
from calibre.utils.search_query_parser import SearchQueryParser from calibre.utils.search_query_parser import SearchQueryParser
from calibre.utils.pyparsing import ParseException from calibre.utils.pyparsing import ParseException
from calibre.utils.localization import canonicalize_lang, lang_map, get_udc from calibre.utils.localization import (canonicalize_lang, lang_map, get_udc,
get_lang)
from calibre.ebooks.metadata import title_sort, author_to_author_sort from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.ebooks.metadata.opf2 import metadata_to_opf from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre import prints from calibre import prints
@ -215,6 +216,10 @@ class ResultCache(SearchQueryParser): # {{{
''' '''
def __init__(self, FIELD_MAP, field_metadata, db_prefs=None): def __init__(self, FIELD_MAP, field_metadata, db_prefs=None):
self.FIELD_MAP = FIELD_MAP self.FIELD_MAP = FIELD_MAP
l = get_lang()
asciize_author_names = l and l.lower() in ('en', 'eng')
if not asciize_author_names:
self.ascii_name = lambda x: False
self.db_prefs = db_prefs self.db_prefs = db_prefs
self.composites = {} self.composites = {}
self.udc = get_udc() self.udc = get_udc()

View File

@ -348,6 +348,8 @@ class BIBTEX(CatalogPlugin): # {{{
for field in fields: for field in fields:
if field.startswith('#'): if field.startswith('#'):
item = db.get_field(entry['id'],field,index_is_id=True) item = db.get_field(entry['id'],field,index_is_id=True)
if isinstance(item, (bool, float, int)):
item = repr(item)
elif field == 'title_sort': elif field == 'title_sort':
item = entry['sort'] item = entry['sort']
else: else:
@ -391,7 +393,7 @@ class BIBTEX(CatalogPlugin): # {{{
elif field == 'isbn' : elif field == 'isbn' :
# Could be 9, 10 or 13 digits # Could be 9, 10 or 13 digits
bibtex_entry.append(u'isbn = "%s"' % re.sub(u'[\D]', u'', item)) bibtex_entry.append(u'isbn = "%s"' % re.sub(u'[0-9xX]', u'', item))
elif field == 'formats' : elif field == 'formats' :
#Add file path if format is selected #Add file path if format is selected
@ -413,7 +415,8 @@ class BIBTEX(CatalogPlugin): # {{{
bibtex_entry.append(u'month = "%s"' % bibtexdict.utf8ToBibtex(strftime("%b", item))) bibtex_entry.append(u'month = "%s"' % bibtexdict.utf8ToBibtex(strftime("%b", item)))
elif field.startswith('#') : elif field.startswith('#') :
bibtex_entry.append(u'%s = "%s"' % (field[1:], bibtexdict.utf8ToBibtex(item))) bibtex_entry.append(u'custom_%s = "%s"' % (field[1:],
bibtexdict.utf8ToBibtex(item)))
else: else:
# elif field in ['title', 'publisher', 'cover', 'uuid', 'ondevice', # elif field in ['title', 'publisher', 'cover', 'uuid', 'ondevice',

View File

@ -64,8 +64,17 @@ def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, se
data = db.get_data_as_dict(prefix, authors_as_string=True) data = db.get_data_as_dict(prefix, authors_as_string=True)
fields = ['id'] + fields fields = ['id'] + fields
title_fields = fields title_fields = fields
fields = [db.custom_column_label_map[x[1:]]['num'] if x[0]=='*' def field_name(f):
else x for x in fields] ans = f
if f[0] == '*':
if f.endswith('_index'):
fkey = f[1:-len('_index')]
num = db.custom_column_label_map[fkey]['num']
ans = '%d_index'%num
else:
ans = db.custom_column_label_map[f[1:]]['num']
return ans
fields = list(map(field_name, fields))
for f in data: for f in data:
fmts = [x for x in f['formats'] if x is not None] fmts = [x for x in f['formats'] if x is not None]
@ -121,8 +130,10 @@ def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, se
def list_option_parser(db=None): def list_option_parser(db=None):
fields = set(FIELDS) fields = set(FIELDS)
if db is not None: if db is not None:
for f in db.custom_column_label_map: for f, data in db.custom_column_label_map.iteritems():
fields.add('*'+f) fields.add('*'+f)
if data['datatype'] == 'series':
fields.add('*'+f+'_index')
parser = get_parser(_( parser = get_parser(_(
'''\ '''\
@ -161,8 +172,10 @@ def command_list(args, dbpath):
opts, args = parser.parse_args(sys.argv[:1] + args) opts, args = parser.parse_args(sys.argv[:1] + args)
afields = set(FIELDS) afields = set(FIELDS)
if db is not None: if db is not None:
for f in db.custom_column_label_map: for f, data in db.custom_column_label_map.iteritems():
afields.add('*'+f) afields.add('*'+f)
if data['datatype'] == 'series':
afields.add('*'+f+'_index')
fields = [str(f.strip().lower()) for f in opts.fields.split(',')] fields = [str(f.strip().lower()) for f in opts.fields.split(',')]
if 'all' in fields: if 'all' in fields:
fields = sorted(list(afields)) fields = sorted(list(afields))

View File

@ -1085,6 +1085,18 @@ ALTER TABLE books ADD COLUMN isbn TEXT DEFAULT "" COLLATE NOCASE;
return cPickle.loads(str(data)) return cPickle.loads(str(data))
return None return None
def has_conversion_options(self, ids, format='PIPE'):
ids = tuple(ids)
if len(ids) > 50000:
return True
if len(ids) == 1:
ids = '(%d)'%ids[0]
else:
ids = repr(ids)
return self.conn.get('''
SELECT data FROM conversion_options WHERE book IN %s AND
format=? LIMIT 1'''%(ids,), (format,), all=False) is not None
def delete_conversion_options(self, id, format, commit=True): def delete_conversion_options(self, id, format, commit=True):
self.conn.execute('DELETE FROM conversion_options WHERE book=? AND format=?', self.conn.execute('DELETE FROM conversion_options WHERE book=? AND format=?',
(id, format.upper())) (id, format.upper()))

View File

@ -3376,11 +3376,15 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
''' '''
if prefix is None: if prefix is None:
prefix = self.library_path prefix = self.library_path
FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher', 'rating', fdata = self.custom_column_num_map
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
'uuid', 'pubdate', 'last_modified', 'identifiers', 'languages']) FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher',
for x in self.custom_column_num_map: 'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
FIELDS.add(x) 'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
'languages']).union(set(fdata))
for x, data in fdata.iteritems():
if data['datatype'] == 'series':
FIELDS.add('%d_index'%x)
data = [] data = []
for record in self.data: for record in self.data:
if record is None: continue if record is None: continue

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More