Merge from trunk

This commit is contained in:
Charles Haley 2013-03-05 12:23:03 +01:00
commit 79835a2922
206 changed files with 34376 additions and 27180 deletions

View File

@ -37,7 +37,7 @@ nbproject/
calibre_plugins/
recipes/.git
recipes/.gitignore
recipes/README
recipes/README.md
recipes/katalog_egazeciarz.recipe
recipes/tv_axnscifi.recipe
recipes/tv_comedycentral.recipe

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -672,6 +672,7 @@ Some limitations of PDF input are:
* Links and Tables of Contents are not supported
* PDFs that use embedded non-unicode fonts to represent non-English characters will result in garbled output for those characters
* Some PDFs are made up of photographs of the page with OCRed text behind them. In such cases |app| uses the OCRed text, which can be very different from what you see when you view the PDF file
* PDFs that are used to display complex text, like right to left languages and math typesetting will not convert correctly
To re-iterate **PDF is a really, really bad** format to use as input. If you absolutely must use PDF, then be prepared for an
output ranging anywhere from decent to unusable, depending on the input PDF.

View File

@ -616,7 +616,10 @@ or a Remote Desktop solution.
If you must share the actual library, use a file syncing tool like
DropBox or rsync or Microsoft SkyDrive instead of a networked drive. Even with
these tools there is danger of data corruption/loss, so only do this if you are
willing to live with that risk.
willing to live with that risk. In particular, be aware that **Google Drive**
is incompatible with |app|, if you put your |app| library in Google Drive, you
*will* suffer data loss. See
`this thread <http://www.mobileread.com/forums/showthread.php?t=205581>`_ for details.
Content From The Web
---------------------

View File

@ -24,4 +24,3 @@ class app_funds(BasicNewsRecipe):
auto_cleanup = True
feeds = [(u'blog', u'http://feeds.feedburner.com/blogspot/etVI')]

View File

@ -47,4 +47,3 @@ class bankier(BasicNewsRecipe):
segments = urlPart.split('-')
urlPart2 = segments[-1]
return 'http://www.bankier.pl/wiadomosci/print.html?article_id=' + urlPart2

View File

@ -0,0 +1,27 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class AdvancedUserRecipe1361743898(BasicNewsRecipe):
title = u'Democracy Journal'
description = '''A journal of ideas. Published quarterly.'''
__author__ = u'David Nye'
language = 'en'
oldest_article = 90
max_articles_per_feed = 30
no_stylesheets = True
auto_cleanup = True
def parse_index(self):
articles = []
feeds = []
soup = self.index_to_soup("http://www.democracyjournal.org")
for x in soup.findAll(href=re.compile("http://www\.democracyjournal\.org/\d*/.*php$")):
url = x.get('href')
title = self.tag_to_string(x)
articles.append({'title':title, 'url':url, 'description':'', 'date':''})
feeds.append(('Articles', articles))
return feeds
def print_version(self, url):
return url + '?page=all'

View File

@ -0,0 +1,182 @@
__license__ = 'GPL v3'
__copyright__ = '2013, Darko Miletic <darko.miletic at gmail.com>'
'''
http://www.ft.com/intl/us-edition
'''
import datetime
from calibre.ptempfile import PersistentTemporaryFile
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class FinancialTimes(BasicNewsRecipe):
title = 'Financial Times (US) printed edition'
__author__ = 'Darko Miletic'
description = "The Financial Times (FT) is one of the world's leading business news and information organisations, recognised internationally for its authority, integrity and accuracy."
publisher = 'The Financial Times Ltd.'
category = 'news, finances, politics, UK, World'
oldest_article = 2
language = 'en'
max_articles_per_feed = 250
no_stylesheets = True
use_embedded_content = False
needs_subscription = True
encoding = 'utf8'
publication_type = 'newspaper'
articles_are_obfuscated = True
temp_files = []
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
LOGIN = 'https://registration.ft.com/registration/barrier/login'
LOGIN2 = 'http://media.ft.com/h/subs3.html'
INDEX = 'http://www.ft.com/intl/us-edition'
PREFIX = 'http://www.ft.com'
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
, 'linearize_tables' : True
}
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
br.open(self.INDEX)
if self.username is not None and self.password is not None:
br.open(self.LOGIN2)
br.select_form(name='loginForm')
br['username'] = self.username
br['password'] = self.password
br.submit()
return br
keep_only_tags = [
dict(name='div' , attrs={'class':['fullstory fullstoryHeader', 'ft-story-header']})
,dict(name='div' , attrs={'class':'standfirst'})
,dict(name='div' , attrs={'id' :'storyContent'})
,dict(name='div' , attrs={'class':['ft-story-body','index-detail']})
,dict(name='h2' , attrs={'class':'entry-title'} )
,dict(name='span', attrs={'class':lambda x: x and 'posted-on' in x.split()} )
,dict(name='span', attrs={'class':'author_byline'} )
,dict(name='div' , attrs={'class':'entry-content'} )
]
remove_tags = [
dict(name='div', attrs={'id':'floating-con'})
,dict(name=['meta','iframe','base','object','embed','link'])
,dict(attrs={'class':['storyTools','story-package','screen-copy','story-package separator','expandable-image']})
]
remove_attributes = ['width','height','lang']
extra_css = """
body{font-family: Georgia,Times,"Times New Roman",serif}
h2{font-size:large}
.ft-story-header{font-size: x-small}
.container{font-size:x-small;}
h3{font-size:x-small;color:#003399;}
.copyright{font-size: x-small}
img{margin-top: 0.8em; display: block}
.lastUpdated{font-family: Arial,Helvetica,sans-serif; font-size: x-small}
.byline,.ft-story-body,.ft-story-header{font-family: Arial,Helvetica,sans-serif}
"""
def get_artlinks(self, elem):
articles = []
count = 0
for item in elem.findAll('a',href=True):
count = count + 1
if self.test and count > 2:
return articles
rawlink = item['href']
url = rawlink
if not rawlink.startswith('http://'):
url = self.PREFIX + rawlink
try:
urlverified = self.browser.open_novisit(url).geturl() # resolve redirect.
except:
continue
title = self.tag_to_string(item)
date = strftime(self.timefmt)
articles.append({
'title' :title
,'date' :date
,'url' :urlverified
,'description':''
})
return articles
def parse_index(self):
feeds = []
soup = self.index_to_soup(self.INDEX)
dates= self.tag_to_string(soup.find('div', attrs={'class':'btm-links'}).find('div'))
self.timefmt = ' [%s]'%dates
wide = soup.find('div',attrs={'class':'wide'})
if not wide:
return feeds
allsections = wide.findAll(attrs={'class':lambda x: x and 'footwell' in x.split()})
if not allsections:
return feeds
count = 0
for item in allsections:
count = count + 1
if self.test and count > 2:
return feeds
fitem = item.h3
if not fitem:
fitem = item.h4
ftitle = self.tag_to_string(fitem)
self.report_progress(0, _('Fetching feed')+' %s...'%(ftitle))
feedarts = self.get_artlinks(item.ul)
feeds.append((ftitle,feedarts))
return feeds
def preprocess_html(self, soup):
items = ['promo-box','promo-title',
'promo-headline','promo-image',
'promo-intro','promo-link','subhead']
for item in items:
for it in soup.findAll(item):
it.name = 'div'
it.attrs = []
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('a'):
limg = item.find('img')
if item.string is not None:
str = item.string
item.replaceWith(str)
else:
if limg:
item.name = 'div'
item.attrs = []
else:
str = self.tag_to_string(item)
item.replaceWith(str)
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
return soup
def get_cover_url(self):
cdate = datetime.date.today()
if cdate.isoweekday() == 7:
cdate -= datetime.timedelta(days=1)
return cdate.strftime('http://specials.ft.com/vtf_pdf/%d%m%y_FRONT1_USA.pdf')
def get_obfuscated_article(self, url):
count = 0
while (count < 10):
try:
response = self.browser.open(url)
html = response.read()
count = 10
except:
print "Retrying download..."
count += 1
tfile = PersistentTemporaryFile('_fa.html')
tfile.write(html)
tfile.close()
self.temp_files.append(tfile)
return tfile.name
def cleanup(self):
self.browser.open('https://registration.ft.com/registration/login/logout?location=')

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = u'2010-2012, Tomasz Dlugosz <tomek3d@gmail.com>'
__copyright__ = u'2010-2013, Tomasz Dlugosz <tomek3d@gmail.com>'
'''
fronda.pl
'''
@ -68,6 +68,7 @@ class Fronda(BasicNewsRecipe):
article_url = 'http://www.fronda.pl' + article_a['href']
article_title = self.tag_to_string(article_a)
articles[genName].append( { 'title' : article_title, 'url' : article_url, 'date' : article_date })
if articles[genName]:
feeds.append((genName, articles[genName]))
return feeds
@ -82,8 +83,10 @@ class Fronda(BasicNewsRecipe):
dict(name='h3', attrs={'class':'block-header article comments'}),
dict(name='ul', attrs={'class':'comment-list'}),
dict(name='ul', attrs={'class':'category'}),
dict(name='ul', attrs={'class':'tag-list'}),
dict(name='p', attrs={'id':'comments-disclaimer'}),
dict(name='div', attrs={'style':'text-align: left; margin-bottom: 15px;'}),
dict(name='div', attrs={'style':'text-align: left; margin-top: 15px;'}),
dict(name='div', attrs={'style':'text-align: left; margin-top: 15px; margin-bottom: 30px;'}),
dict(name='div', attrs={'class':'related-articles content'}),
dict(name='div', attrs={'id':'comment-form'})
]

View File

@ -99,4 +99,3 @@ class gw_krakow(BasicNewsRecipe):
if soup.find(id='container_gal'):
self.gallery_article(soup.body)
return soup

View File

@ -96,4 +96,3 @@ class gw_wawa(BasicNewsRecipe):
if soup.find(id='container_gal'):
self.gallery_article(soup.body)
return soup

View File

@ -100,5 +100,3 @@ class GazetaPomorska(BasicNewsRecipe):
extra_css = '''h1 { font-size: 1.4em; }
h2 { font-size: 1.0em; }'''

View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1361379046(BasicNewsRecipe):
title = u'Geopolityka.org'
language = 'pl'
__author__ = 'chemik111'
oldest_article = 15
max_articles_per_feed = 100
auto_cleanup = True
feeds = [(u'Rss', u'http://geopolityka.org/index.php?format=feed&type=rss')]

View File

@ -2,7 +2,8 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Piotr Kontek, piotr.kontek@gmail.com'
__copyright__ = '2011, Piotr Kontek, piotr.kontek@gmail.com \
2013, Tomasz Długosz, tomek3d@gmail.com'
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ptempfile import PersistentTemporaryFile
@ -12,9 +13,9 @@ import re
class GN(BasicNewsRecipe):
EDITION = 0
__author__ = 'Piotr Kontek'
title = u'Gość niedzielny'
description = 'Weekly magazine'
__author__ = 'Piotr Kontek, Tomasz Długosz'
title = u'Gość Niedzielny'
description = 'Ogólnopolski tygodnik katolicki'
encoding = 'utf-8'
no_stylesheets = True
language = 'pl'
@ -38,17 +39,25 @@ class GN(BasicNewsRecipe):
first = True
for p in main_section.findAll('p', attrs={'class':None}, recursive=False):
if first and p.find('img') != None:
article = article + '<p>'
article = article + str(p.find('img')).replace('src="/files/','src="http://www.gosc.pl/files/')
article = article + '<font size="-2">'
article += '<p>'
article += str(p.find('img')).replace('src="/files/','src="http://www.gosc.pl/files/')
article += '<font size="-2">'
for s in p.findAll('span'):
article = article + self.tag_to_string(s)
article = article + '</font></p>'
article += self.tag_to_string(s)
article += '</font></p>'
else:
article = article + str(p).replace('src="/files/','src="http://www.gosc.pl/files/')
article += str(p).replace('src="/files/','src="http://www.gosc.pl/files/')
first = False
limiter = main_section.find('p', attrs={'class' : 'limiter'})
if limiter:
article += str(limiter)
html = unicode(title) + unicode(authors) + unicode(article)
html = unicode(title)
#sometimes authors are not filled in:
if authors:
html += unicode(authors) + unicode(article)
else:
html += unicode(article)
self.temp_files.append(PersistentTemporaryFile('_temparse.html'))
self.temp_files[-1].write(html)
@ -65,7 +74,8 @@ class GN(BasicNewsRecipe):
if img != None:
a = img.parent
self.EDITION = a['href']
self.title = img['alt']
#this was preventing kindles from moving old issues to 'Back Issues' category:
#self.title = img['alt']
self.cover_url = 'http://www.gosc.pl' + img['src']
if year != date.today().year or not first:
break

28
recipes/hatalska.recipe Normal file
View File

@ -0,0 +1,28 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = 'teepel 2012'
'''
hatalska.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class hatalska(BasicNewsRecipe):
title = u'Hatalska'
__author__ = 'teepel <teepel44@gmail.com>'
language = 'pl'
description = u'Blog specjalistki z branży mediowo-reklamowej - Natalii Hatalskiej'
oldest_article = 7
masthead_url='http://hatalska.com/wp-content/themes/jamel/images/logo.png'
max_articles_per_feed = 100
simultaneous_downloads = 5
remove_javascript=True
no_stylesheets=True
remove_tags =[]
remove_tags.append(dict(name = 'div', attrs = {'class' : 'feedflare'}))
feeds = [(u'Blog', u'http://feeds.feedburner.com/hatalskacom')]

View File

@ -1,5 +1,4 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class HNonlineRecipe(BasicNewsRecipe):
__license__ = 'GPL v3'

Binary file not shown.

Before

Width:  |  Height:  |  Size: 475 B

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

BIN
recipes/icons/hatalska.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 387 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

BIN
recipes/icons/money_pl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 428 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 454 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.1 KiB

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1018 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1018 B

View File

@ -0,0 +1,25 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = 'MrStefan'
'''
www.lifehacking.pl
'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class lifehacking(BasicNewsRecipe):
title = u'Lifehacker Polska'
__author__ = 'MrStefan <mrstefaan@gmail.com>'
language = 'pl'
description =u'Lifehacking - sposoby na zwiększanie własnej wydajności. Ułatwiaj sobie życie, wykorzystując wiedzę, metody, technologie, przydatne strony ...'
masthead_url='http://lifehacking.pl/wp-content/themes/lifehacking/images/lifehackerlogo.png'
remove_empty_feeds= True
oldest_article = 7
max_articles_per_feed = 100
remove_javascript=True
no_stylesheets=True
feeds = [(u'Lifehacker polska', u'http://feeds.feedburner.com/pl_lh')]

View File

@ -8,8 +8,6 @@ michalkiewicz.pl
from calibre.web.feeds.news import BasicNewsRecipe
#
class michalkiewicz(BasicNewsRecipe):
title = u'Stanis\u0142aw Michalkiewicz'
description = u'Strona autorska * felietony * artyku\u0142y * komentarze'
@ -23,4 +21,3 @@ class michalkiewicz(BasicNewsRecipe):
remove_tags = [dict(name='ul', attrs={'class':'menu'})]
feeds = [(u'Teksty', u'http://www.michalkiewicz.pl/rss.xml')]

View File

@ -0,0 +1,59 @@
__license__ = 'GPL v3'
__copyright__ = '2013, Darko Miletic <darko.miletic at gmail.com>'
'''
www.nezavisne.com
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class NezavisneNovine(BasicNewsRecipe):
title = 'Nezavisne novine'
__author__ = 'Darko Miletic'
description = 'Nezavisne novine - Najnovije vijesti iz BiH, Srbije, Hrvatske, Crne Gore i svijeta'
publisher = 'NIGP "DNN"'
category = 'news, politics, Bosnia, Balcans'
oldest_article = 2
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'utf8'
use_embedded_content = False
language = 'sr'
remove_empty_feeds = True
publication_type = 'newspaper'
cover_url = strftime('http://pdf.nezavisne.com/slika/novina/nezavisne_novine.jpg?v=%Y%m%d')
masthead_url = 'http://www.nezavisne.com/slika/osnova/nezavisne-novine-logo.gif'
extra_css = """
body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
keep_only_tags = [dict(name='div', attrs={'class':'vijest'})]
remove_tags_after = dict(name='div', attrs={'id':'wrap'})
remove_tags = [
dict(name=['meta','link','iframe','object'])
,dict(name='div', attrs={'id':'wrap'})
]
remove_attributes=['lang','xmlns:fb','xmlns:og']
feeds = [
(u'Novosti' , u'http://feeds.feedburner.com/Novosti-NezavisneNovine' )
,(u'Posao' , u'http://feeds.feedburner.com/Posao-NezavisneNovine' )
,(u'Sport' , u'http://feeds.feedburner.com/Sport-NezavisneNovine' )
,(u'Komentar' , u'http://feeds.feedburner.com/Komentari-NezavisneNovine' )
,(u'Umjetnost i zabava' , u'http://feeds.feedburner.com/UmjetnostIZabava-NezavisneNovine' )
,(u'Život i stil' , u'http://feeds.feedburner.com/ZivotIStil-NezavisneNovine' )
,(u'Auto' , u'http://feeds.feedburner.com/Auto-NezavisneNovine' )
,(u'Nauka i tehnologija', u'http://feeds.feedburner.com/NaukaITehnologija-NezavisneNovine')
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return soup

View File

@ -37,4 +37,3 @@ class rynek_kolejowy(BasicNewsRecipe):
segment = url.split('/')
urlPart = segment[3]
return 'http://www.rynek-kolejowy.pl/drukuj.php?id=' + urlPart

View File

@ -70,5 +70,3 @@ class RzeczpospolitaRecipe(BasicNewsRecipe):
forget, sep, index = rest.rpartition(',')
return start + '/' + index + '?print=tak'

View File

@ -8,10 +8,7 @@ class SATKurier(BasicNewsRecipe):
title = u'SATKurier.pl'
__author__ = 'Artur Stachecki <artur.stachecki@gmail.com>'
language = 'pl'
description = u'Największy i najstarszy serwis poświęcony\
telewizji cyfrowej, przygotowywany przez wydawcę\
miesięcznika SAT Kurier. Bieżące wydarzenia\
z rynku mediów i nowych technologii.'
description = u'Serwis poświęcony telewizji cyfrowej'
oldest_article = 7
masthead_url = 'http://satkurier.pl/img/header_sk_logo.gif'
max_articles_per_feed = 100

View File

@ -1,24 +1,38 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Darko Miletic <darko.miletic at gmail.com>'
'''
sciencenews.org
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Sciencenews(BasicNewsRecipe):
title = u'ScienceNews'
__author__ = u'Darko Miletic and Sujata Raman'
description = u"Science News is an award-winning weekly newsmagazine covering the most important research in all fields of science. Its 16 pages each week are packed with short, accurate articles that appeal to both general readers and scientists. Published since 1922, the magazine now reaches about 150,000 subscribers and more than 1 million readers. These are the latest News Items from Science News."
class ScienceNewsIssue(BasicNewsRecipe):
title = u'Science News Recent Issues'
__author__ = u'Darko Miletic, Sujata Raman and Starson17'
description = u'''Science News is an award-winning weekly
newsmagazine covering the most important research in all fields of science.
Its 16 pages each week are packed with short, accurate articles that appeal
to both general readers and scientists. Published since 1922, the magazine
now reaches about 150,000 subscribers and more than 1 million readers.
These are the latest News Items from Science News. This recipe downloads
the last 30 days worth of articles.'''
category = u'Science, Technology, News'
publisher = u'Society for Science & the Public'
oldest_article = 30
language = 'en'
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
auto_cleanup = True
timefmt = ' [%A, %d %B, %Y]'
recursions = 1
remove_attributes = ['style']
conversion_options = {'linearize_tables' : True
, 'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
extra_css = '''
.content_description{font-family:georgia ;font-size:x-large; color:#646464 ; font-weight:bold;}
@ -27,36 +41,33 @@ class Sciencenews(BasicNewsRecipe):
.content_edition{font-family:helvetica,arial ;font-size: xx-small ;}
.exclusive{color:#FF0000 ;}
.anonymous{color:#14487E ;}
.content_content{font-family:helvetica,arial ;font-size: x-small ; color:#000000;}
.description{color:#585858;font-family:helvetica,arial ;font-size: xx-small ;}
.content_content{font-family:helvetica,arial ;font-size: medium ; color:#000000;}
.description{color:#585858;font-family:helvetica,arial ;font-size: large ;}
.credit{color:#A6A6A6;font-family:helvetica,arial ;font-size: xx-small ;}
'''
#keep_only_tags = [ dict(name='div', attrs={'id':'column_action'}) ]
#remove_tags_after = dict(name='ul', attrs={'id':'content_functions_bottom'})
#remove_tags = [
#dict(name='ul', attrs={'id':'content_functions_bottom'})
#,dict(name='div', attrs={'id':['content_functions_top','breadcrumb_content']})
#,dict(name='img', attrs={'class':'icon'})
#,dict(name='div', attrs={'class': 'embiggen'})
#]
keep_only_tags = [ dict(name='div', attrs={'class':'content_content'}),
dict(name='ul', attrs={'id':'toc'})
]
feeds = [(u"Science News / News Items", u'http://sciencenews.org/index.php/feed/type/news/name/news.rss/view/feed/name/all.rss')]
feeds = [(u"Science News Current Issues", u'http://www.sciencenews.org/view/feed/type/edition/name/issues.rss')]
match_regexps = [
r'www.sciencenews.org/view/feature/id/',
r'www.sciencenews.org/view/generic/id'
]
def get_cover_url(self):
cover_url = None
index = 'http://www.sciencenews.org/view/home'
soup = self.index_to_soup(index)
link_item = soup.find(name = 'img',alt = "issue")
print link_item
if link_item:
cover_url = 'http://www.sciencenews.org' + link_item['src'] + '.jpg'
return cover_url
#def preprocess_html(self, soup):
#for tag in soup.findAll(name=['span']):
#tag.name = 'div'
#return soup
def preprocess_html(self, soup):
for tag in soup.findAll(name=['span']):
tag.name = 'div'
return soup

View File

@ -22,4 +22,3 @@ class swiatczytnikow(BasicNewsRecipe):
remove_tags = [dict(name = 'ul', attrs = {'class' : 'similar-posts'})]
preprocess_regexps = [(re.compile(u'<h3>Czytaj dalej:</h3>'), lambda match: '')]

View File

@ -8,60 +8,20 @@ import re
class telepolis(BasicNewsRecipe):
title = u'Telepolis.pl'
__author__ = 'Artur Stachecki <artur.stachecki@gmail.com>'
__author__ = 'Artur Stachecki <artur.stachecki@gmail.com>, Tomasz Długosz <tomek3d@gmail.com>'
language = 'pl'
description = u'Twój telekomunikacyjny serwis informacyjny.\
Codzienne informacje, testy i artykuły,\
promocje, baza telefonów oraz centrum rozrywki'
oldest_article = 7
description = u'Twój telekomunikacyjny serwis informacyjny.'
masthead_url = 'http://telepolis.pl/i/telepolis-logo2.gif'
max_articles_per_feed = 100
simultaneous_downloads = 5
remove_javascript = True
no_stylesheets = True
use_embedded_content = False
remove_tags = []
remove_tags.append(dict(attrs={'alt': 'TELEPOLIS.pl'}))
preprocess_regexps = [(re.compile(r'<: .*? :>'),
lambda match: ''),
(re.compile(r'<b>Zobacz:</b>.*?</a>', re.DOTALL),
lambda match: ''),
(re.compile(r'<-ankieta.*?>'),
lambda match: ''),
(re.compile(r'\(Q\!\)'),
lambda match: ''),
(re.compile(r'\(plik.*?\)'),
lambda match: ''),
(re.compile(r'<br.*?><br.*?>', re.DOTALL),
lambda match: '')
]
extra_css = '''.tb { font-weight: bold; font-size: 20px;}'''
feeds = [
(u'Wiadomości', u'http://www.telepolis.pl/rss/news.php'),
(u'Artykuły', u'http://www.telepolis.pl/rss/artykuly.php')
(u'Wiadomości', u'http://www.telepolis.pl/rss/news.php')#,
#(u'Artykuły', u'http://www.telepolis.pl/rss/artykuly.php')
]
def print_version(self, url):
if 'news.php' in url:
print_url = url.replace('news.php', 'news_print.php')
else:
print_url = url.replace('artykuly.php', 'art_print.php')
return print_url
def preprocess_html(self, soup):
for image in soup.findAll('img'):
if 'm.jpg' in image['src']:
image_big = image['src']
image_big = image_big.replace('m.jpg', '.jpg')
image['src'] = image_big
logo = soup.find('tr')
logo.extract()
for tag in soup.findAll('tr'):
for strings in ['Wiadomość wydrukowana', 'copyright']:
if strings in self.tag_to_string(tag):
tag.extract()
return self.adeify_images(soup)
keep_only_tags = [
dict(name='div', attrs={'class':'flol w510'}),
dict(name='div', attrs={'class':'main_tresc_news'})
]

View File

@ -5,7 +5,6 @@
http://www.unperiodico.unal.edu.co/
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class UNPeriodico(BasicNewsRecipe):

Binary file not shown.

View File

@ -482,5 +482,10 @@ h2.library_name {
border: none
}
.details #random_button {
display:block
}
/* }}} */

View File

@ -324,9 +324,15 @@ function show_details(a_dom) {
function book() {
hidesort();
$('.details .left img').load(function() {
var rb = $('#random_button');
rb.button();
var img = $('.details .left img');
var height = $('#main').height();
height = Math.max(height, img.height() + 100);
var bh = 0;
if (rb.length > 0) {
bh = rb.height();
}
height = Math.max(height, img.height() + bh + 100);
$('#main').height(height);
});
}

View File

@ -1,6 +1,7 @@
<div id="details_{id}" class="details">
<div class="left">
<a href="{get_url}" title="Click to read {title} in the {fmt} format" class="details_thumb"><img alt="Cover of {title}" src="{prefix}/get/cover/{id}" /></a>
{random}
</div>
<div class="right">
<div class="field formats">{formats}</div>

View File

@ -517,3 +517,17 @@ default_tweak_format = None
# your library and your personal editing style.
preselect_first_completion = False
#: Recognize numbers inside text when sorting
# This means that when sorting on text fields like title the text "Book 2"
# will sort before the text "Book 100". If you want this behavior, set
# numeric_collation = True note that doing so will cause problems with text
# that starts with numbers and is a little slower.
numeric_collation = False
#: Sort the list of libraries alphabetically
# The list of libraries in the Copy to Library and Quick Switch menus are
# normally sorted by most used. However, if there are more than a certain
# number of such libraries, the sorting becomes alphabetic. You can set that
# number here. The default is ten libraries.
many_libraries = 10

View File

@ -38,7 +38,7 @@ binary_includes = [
'/lib/libz.so.1',
'/usr/lib/libtiff.so.5',
'/lib/libbz2.so.1',
'/usr/lib/libpoppler.so.27',
'/usr/lib/libpoppler.so.28',
'/usr/lib/libxml2.so.2',
'/usr/lib/libopenjpeg.so.2',
'/usr/lib/libxslt.so.1',

View File

@ -9,14 +9,14 @@ msgstr ""
"Project-Id-Version: calibre\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2013-01-12 08:34+0000\n"
"Last-Translator: Jellby <Unknown>\n"
"PO-Revision-Date: 2013-02-26 12:21+0000\n"
"Last-Translator: Miguel Angel del Olmo <silinio45@gmail.com>\n"
"Language-Team: Español; Castellano <>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2013-01-13 04:37+0000\n"
"X-Generator: Launchpad (build 16420)\n"
"X-Launchpad-Export-Date: 2013-02-27 04:37+0000\n"
"X-Generator: Launchpad (build 16506)\n"
#. name for aaa
msgid "Ghotuo"
@ -9708,7 +9708,7 @@ msgstr ""
#. name for hto
msgid "Huitoto; Minica"
msgstr ""
msgstr "Huitoto; Meneca"
#. name for hts
msgid "Hadza"
@ -9736,7 +9736,7 @@ msgstr ""
#. name for hue
msgid "Huave; San Francisco Del Mar"
msgstr ""
msgstr "Huave; San Francisco Del Mar"
#. name for huf
msgid "Humene"
@ -9792,7 +9792,7 @@ msgstr ""
#. name for hus
msgid "Huastec"
msgstr ""
msgstr "Huasteco"
#. name for hut
msgid "Humla"
@ -9800,11 +9800,11 @@ msgstr ""
#. name for huu
msgid "Huitoto; Murui"
msgstr ""
msgstr "Huitoto; Murui"
#. name for huv
msgid "Huave; San Mateo Del Mar"
msgstr ""
msgstr "Huave; San Mateo Del Mar"
#. name for huw
msgid "Hukumina"
@ -9812,7 +9812,7 @@ msgstr ""
#. name for hux
msgid "Huitoto; Nüpode"
msgstr ""
msgstr "Huitoto; Nipode"
#. name for huy
msgid "Hulaulá"
@ -9828,7 +9828,7 @@ msgstr ""
#. name for hve
msgid "Huave; San Dionisio Del Mar"
msgstr ""
msgstr "Huave; San Dionisio Del Mar"
#. name for hvk
msgid "Haveke"
@ -9840,7 +9840,7 @@ msgstr ""
#. name for hvv
msgid "Huave; Santa María Del Mar"
msgstr ""
msgstr "Huave; Santa María Del Mar"
#. name for hwa
msgid "Wané"
@ -9884,7 +9884,7 @@ msgstr "Iban"
#. name for ibb
msgid "Ibibio"
msgstr ""
msgstr "Ibibio"
#. name for ibd
msgid "Iwaidja"
@ -9964,7 +9964,7 @@ msgstr ""
#. name for ide
msgid "Idere"
msgstr ""
msgstr "Idere"
#. name for idi
msgid "Idi"
@ -9976,7 +9976,7 @@ msgstr "Ido"
#. name for idr
msgid "Indri"
msgstr ""
msgstr "Indri"
#. name for ids
msgid "Idesa"
@ -9988,7 +9988,7 @@ msgstr ""
#. name for idu
msgid "Idoma"
msgstr ""
msgstr "Idoma"
#. name for ifa
msgid "Ifugao; Amganad"
@ -9996,7 +9996,7 @@ msgstr ""
#. name for ifb
msgid "Ifugao; Batad"
msgstr ""
msgstr "Ifugao; Batad"
#. name for ife
msgid "Ifè"
@ -10004,7 +10004,7 @@ msgstr ""
#. name for iff
msgid "Ifo"
msgstr ""
msgstr "Ifo"
#. name for ifk
msgid "Ifugao; Tuwali"
@ -10064,7 +10064,7 @@ msgstr ""
#. name for ihi
msgid "Ihievbe"
msgstr ""
msgstr "Ihievbe"
#. name for ihp
msgid "Iha"
@ -10288,15 +10288,15 @@ msgstr ""
#. name for iou
msgid "Tuma-Irumu"
msgstr ""
msgstr "Tuma-Irumu"
#. name for iow
msgid "Iowa-Oto"
msgstr ""
msgstr "Iowa-Oto"
#. name for ipi
msgid "Ipili"
msgstr ""
msgstr "Ipili"
#. name for ipk
msgid "Inupiaq"
@ -10304,7 +10304,7 @@ msgstr "Iñupiaq"
#. name for ipo
msgid "Ipiko"
msgstr ""
msgstr "Ipiko"
#. name for iqu
msgid "Iquito"
@ -30768,7 +30768,7 @@ msgstr ""
#. name for zts
msgid "Zapotec; Tilquiapan"
msgstr ""
msgstr "Zapoteco; Tilquiapan"
#. name for ztt
msgid "Zapotec; Tejalapan"

View File

@ -13,14 +13,14 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2012-04-04 19:53+0000\n"
"Last-Translator: Antoni Kudelski <antekk@linux.pl>\n"
"PO-Revision-Date: 2013-02-23 12:04+0000\n"
"Last-Translator: Marcin Ostajewski (panszpik) <Unknown>\n"
"Language-Team: Polish <translation-team-pl@lists.sourceforge.net>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2012-04-05 04:43+0000\n"
"X-Generator: Launchpad (build 15060)\n"
"X-Launchpad-Export-Date: 2013-02-24 04:41+0000\n"
"X-Generator: Launchpad (build 16506)\n"
"Language: pl\n"
#. name for aaa
@ -857,11 +857,11 @@ msgstr "Akurio"
#. name for akp
msgid "Siwu"
msgstr ""
msgstr "Siwu"
#. name for akq
msgid "Ak"
msgstr ""
msgstr "Ak"
#. name for akr
msgid "Araki"
@ -973,7 +973,7 @@ msgstr "ałtajski południowy"
#. name for alu
msgid "'Are'are"
msgstr ""
msgstr "'Are'are"
#. name for alw
msgid "Alaba-Kabeena"
@ -1037,7 +1037,7 @@ msgstr "War-Jaintia"
#. name for amm
msgid "Ama (Papua New Guinea)"
msgstr ""
msgstr "Ama (Papua New Guinea)"
#. name for amn
msgid "Amanab"
@ -1061,7 +1061,7 @@ msgstr "Amarakaeri"
#. name for ams
msgid "Amami-Oshima; Southern"
msgstr ""
msgstr "Południowy amami-oshima"
#. name for amt
msgid "Amto"
@ -1069,7 +1069,7 @@ msgstr "Amto"
#. name for amu
msgid "Amuzgo; Guerrero"
msgstr ""
msgstr "Amuzgo; Guerrero"
#. name for amv
msgid "Ambelau"
@ -1249,7 +1249,7 @@ msgstr "Ömie"
#. name for aon
msgid "Arapesh; Bumbita"
msgstr ""
msgstr "Arapesh; Bumbita"
#. name for aor
msgid "Aore"
@ -1289,7 +1289,7 @@ msgstr "Bukiyip"
#. name for apf
msgid "Agta; Pahanan"
msgstr ""
msgstr "Agta; Pahanan"
#. name for apg
msgid "Ampanang"
@ -1305,19 +1305,19 @@ msgstr "Apiaká"
#. name for apj
msgid "Apache; Jicarilla"
msgstr ""
msgstr "Apache; Jicarilla"
#. name for apk
msgid "Apache; Kiowa"
msgstr ""
msgstr "Apache; Kiowa"
#. name for apl
msgid "Apache; Lipan"
msgstr ""
msgstr "Apache; Lipan"
#. name for apm
msgid "Apache; Mescalero-Chiricahua"
msgstr ""
msgstr "Apache; Mescalero-Chiricahua"
#. name for apn
msgid "Apinayé"
@ -1337,11 +1337,11 @@ msgstr "a-pucikwar"
#. name for apr
msgid "Arop-Lokep"
msgstr ""
msgstr "Arop-Lokep"
#. name for aps
msgid "Arop-Sissano"
msgstr ""
msgstr "Arop-Sissano"
#. name for apt
msgid "Apatani"
@ -1357,7 +1357,7 @@ msgstr "Alapmunte"
#. name for apw
msgid "Apache; Western"
msgstr ""
msgstr "Zachodni apache"
#. name for apx
msgid "Aputai"
@ -1389,7 +1389,7 @@ msgstr "Atohwaim"
#. name for aqn
msgid "Alta; Northern"
msgstr ""
msgstr "Północny alta"
#. name for aqp
msgid "Atakapa"
@ -1409,7 +1409,7 @@ msgstr "arabski"
#. name for arb
msgid "Arabic; Standard"
msgstr ""
msgstr "Standardowy arabski"
#. name for arc
msgid "Aramaic; Official (700-300 BCE)"
@ -1465,15 +1465,15 @@ msgstr "arabski algierski"
#. name for arr
msgid "Karo (Brazil)"
msgstr ""
msgstr "Karo (Brazylia)"
#. name for ars
msgid "Arabic; Najdi"
msgstr ""
msgstr "Arabski Najdi"
#. name for aru
msgid "Aruá (Amazonas State)"
msgstr ""
msgstr "Aruá (stan Amazonas)"
#. name for arv
msgid "Arbore"
@ -1485,7 +1485,7 @@ msgstr "arawak"
#. name for arx
msgid "Aruá (Rodonia State)"
msgstr ""
msgstr "Aruá (stan Rodonia)"
#. name for ary
msgid "Arabic; Moroccan"
@ -1529,11 +1529,11 @@ msgstr "Abishira"
#. name for asi
msgid "Buruwai"
msgstr ""
msgstr "Buruwai"
#. name for asj
msgid "Nsari"
msgstr ""
msgstr "Nsari"
#. name for ask
msgid "Ashkun"
@ -1541,7 +1541,7 @@ msgstr "aszkun"
#. name for asl
msgid "Asilulu"
msgstr ""
msgstr "Asilulu"
#. name for asm
msgid "Assamese"
@ -1549,11 +1549,11 @@ msgstr "asamski"
#. name for asn
msgid "Asuriní; Xingú"
msgstr ""
msgstr "Asuriní; Xingú"
#. name for aso
msgid "Dano"
msgstr ""
msgstr "Dano"
#. name for asp
msgid "Algerian Sign Language"
@ -1565,11 +1565,11 @@ msgstr "austriacki język migowy"
#. name for asr
msgid "Asuri"
msgstr ""
msgstr "Asuri"
#. name for ass
msgid "Ipulo"
msgstr ""
msgstr "Ipulo"
#. name for ast
msgid "Asturian"
@ -1577,11 +1577,11 @@ msgstr "asturyjski"
#. name for asu
msgid "Asurini; Tocantins"
msgstr ""
msgstr "Asurini; Tocantins"
#. name for asv
msgid "Asoa"
msgstr ""
msgstr "Asoa"
#. name for asw
msgid "Australian Aborigines Sign Language"
@ -1589,43 +1589,43 @@ msgstr "język migowy Aborygenów australijskich"
#. name for asx
msgid "Muratayak"
msgstr ""
msgstr "Muratayak"
#. name for asy
msgid "Asmat; Yaosakor"
msgstr ""
msgstr "Asmat; Yaosakor"
#. name for asz
msgid "As"
msgstr ""
msgstr "As"
#. name for ata
msgid "Pele-Ata"
msgstr ""
msgstr "Pele-Ata"
#. name for atb
msgid "Zaiwa"
msgstr ""
msgstr "Zaiwa"
#. name for atc
msgid "Atsahuaca"
msgstr ""
msgstr "Atsahuaca"
#. name for atd
msgid "Manobo; Ata"
msgstr ""
msgstr "Manobo; Ata"
#. name for ate
msgid "Atemble"
msgstr ""
msgstr "Atemble"
#. name for atg
msgid "Ivbie North-Okpela-Arhe"
msgstr ""
msgstr "Ivbie North-Okpela-Arhe"
#. name for ati
msgid "Attié"
msgstr ""
msgstr "Attié"
#. name for atj
msgid "Atikamekw"
@ -1633,111 +1633,111 @@ msgstr "atikamekw"
#. name for atk
msgid "Ati"
msgstr ""
msgstr "Ati"
#. name for atl
msgid "Agta; Mt. Iraya"
msgstr ""
msgstr "Agta; Mt. Iraya"
#. name for atm
msgid "Ata"
msgstr ""
msgstr "Ata"
#. name for atn
msgid "Ashtiani"
msgstr ""
msgstr "Ashtiani"
#. name for ato
msgid "Atong"
msgstr ""
msgstr "Atong"
#. name for atp
msgid "Atta; Pudtol"
msgstr ""
msgstr "Atta; Pudtol"
#. name for atq
msgid "Aralle-Tabulahan"
msgstr ""
msgstr "Aralle-Tabulahan"
#. name for atr
msgid "Waimiri-Atroari"
msgstr ""
msgstr "Waimiri-Atroari"
#. name for ats
msgid "Gros Ventre"
msgstr ""
msgstr "Gros Ventre"
#. name for att
msgid "Atta; Pamplona"
msgstr ""
msgstr "Atta; Pamplona"
#. name for atu
msgid "Reel"
msgstr ""
msgstr "Reel"
#. name for atv
msgid "Altai; Northern"
msgstr ""
msgstr "Altai; Northern"
#. name for atw
msgid "Atsugewi"
msgstr ""
msgstr "Atsugewi"
#. name for atx
msgid "Arutani"
msgstr ""
msgstr "Arutani"
#. name for aty
msgid "Aneityum"
msgstr ""
msgstr "Aneityum"
#. name for atz
msgid "Arta"
msgstr ""
msgstr "Arta"
#. name for aua
msgid "Asumboa"
msgstr ""
msgstr "Asumboa"
#. name for aub
msgid "Alugu"
msgstr ""
msgstr "Alugu"
#. name for auc
msgid "Waorani"
msgstr ""
msgstr "Waorani"
#. name for aud
msgid "Anuta"
msgstr ""
msgstr "Anuta"
#. name for aue
msgid "=/Kx'au//'ein"
msgstr ""
msgstr "=/Kx'au//'ein"
#. name for aug
msgid "Aguna"
msgstr ""
msgstr "Aguna"
#. name for auh
msgid "Aushi"
msgstr ""
msgstr "Aushi"
#. name for aui
msgid "Anuki"
msgstr ""
msgstr "Anuki"
#. name for auj
msgid "Awjilah"
msgstr ""
msgstr "Awjilah"
#. name for auk
msgid "Heyo"
msgstr ""
msgstr "Heyo"
#. name for aul
msgid "Aulua"
msgstr ""
msgstr "Aulua"
#. name for aum
msgid "Asu (Nigeria)"
@ -1745,11 +1745,11 @@ msgstr "asu (Nigeria)"
#. name for aun
msgid "One; Molmo"
msgstr ""
msgstr "One; Molmo"
#. name for auo
msgid "Auyokawa"
msgstr ""
msgstr "Auyokawa"
#. name for aup
msgid "Makayam"
@ -1757,19 +1757,19 @@ msgstr ""
#. name for auq
msgid "Anus"
msgstr ""
msgstr "Anus"
#. name for aur
msgid "Aruek"
msgstr ""
msgstr "Aruek"
#. name for aut
msgid "Austral"
msgstr ""
msgstr "Austral"
#. name for auu
msgid "Auye"
msgstr ""
msgstr "Auye"
#. name for auw
msgid "Awyi"
@ -1781,7 +1781,7 @@ msgstr ""
#. name for auy
msgid "Awiyaana"
msgstr ""
msgstr "Awiyaana"
#. name for auz
msgid "Arabic; Uzbeki"
@ -1793,11 +1793,11 @@ msgstr "awarski"
#. name for avb
msgid "Avau"
msgstr ""
msgstr "Avau"
#. name for avd
msgid "Alviri-Vidari"
msgstr ""
msgstr "Alviri-Vidari"
#. name for ave
msgid "Avestan"
@ -1805,11 +1805,11 @@ msgstr "awestyjski"
#. name for avi
msgid "Avikam"
msgstr ""
msgstr "Avikam"
#. name for avk
msgid "Kotava"
msgstr ""
msgstr "Kotava"
#. name for avl
msgid "Arabic; Eastern Egyptian Bedawi"
@ -1817,23 +1817,23 @@ msgstr ""
#. name for avn
msgid "Avatime"
msgstr ""
msgstr "Avatime"
#. name for avo
msgid "Agavotaguerra"
msgstr ""
msgstr "Agavotaguerra"
#. name for avs
msgid "Aushiri"
msgstr ""
msgstr "Aushiri"
#. name for avt
msgid "Au"
msgstr ""
msgstr "Au"
#. name for avu
msgid "Avokaya"
msgstr ""
msgstr "Avokaya"
#. name for avv
msgid "Avá-Canoeiro"
@ -1849,7 +1849,7 @@ msgstr "awa (Papua Nowa Gwinea)"
#. name for awc
msgid "Cicipu"
msgstr ""
msgstr "Cicipu"
#. name for awe
msgid "Awetí"
@ -1857,15 +1857,15 @@ msgstr ""
#. name for awh
msgid "Awbono"
msgstr ""
msgstr "Awbono"
#. name for awi
msgid "Aekyom"
msgstr ""
msgstr "Aekyom"
#. name for awk
msgid "Awabakal"
msgstr ""
msgstr "Awabakal"
#. name for awm
msgid "Arawum"
@ -1873,31 +1873,31 @@ msgstr "arawum"
#. name for awn
msgid "Awngi"
msgstr ""
msgstr "Awngi"
#. name for awo
msgid "Awak"
msgstr ""
msgstr "Awak"
#. name for awr
msgid "Awera"
msgstr ""
msgstr "Awera"
#. name for aws
msgid "Awyu; South"
msgstr ""
msgstr "Południowy aywu"
#. name for awt
msgid "Araweté"
msgstr ""
msgstr "Araweté"
#. name for awu
msgid "Awyu; Central"
msgstr ""
msgstr "Środkowy aywu"
#. name for awv
msgid "Awyu; Jair"
msgstr ""
msgstr "Awyu; Jair"
#. name for aww
msgid "Awun"
@ -1905,7 +1905,7 @@ msgstr "awun"
#. name for awx
msgid "Awara"
msgstr ""
msgstr "Awara"
#. name for awy
msgid "Awyu; Edera"
@ -1913,15 +1913,15 @@ msgstr "ederah"
#. name for axb
msgid "Abipon"
msgstr ""
msgstr "Abipon"
#. name for axg
msgid "Arára; Mato Grosso"
msgstr ""
msgstr "Arára; Mato Grosso"
#. name for axk
msgid "Yaka (Central African Republic)"
msgstr ""
msgstr "Yaka (Central African Republic)"
#. name for axm
msgid "Armenian; Middle"
@ -1929,7 +1929,7 @@ msgstr "średnioormiański"
#. name for axx
msgid "Xaragure"
msgstr ""
msgstr "Xaragure"
#. name for aya
msgid "Awar"
@ -1937,7 +1937,7 @@ msgstr "awar"
#. name for ayb
msgid "Gbe; Ayizo"
msgstr ""
msgstr "Gbe; Ayizo"
#. name for ayc
msgid "Aymara; Southern"
@ -1945,27 +1945,27 @@ msgstr "ajmara południowy"
#. name for ayd
msgid "Ayabadhu"
msgstr ""
msgstr "Ayabadhu"
#. name for aye
msgid "Ayere"
msgstr ""
msgstr "Ayere"
#. name for ayg
msgid "Ginyanga"
msgstr ""
msgstr "Ginyanga"
#. name for ayh
msgid "Arabic; Hadrami"
msgstr ""
msgstr "Arabski Hadrami"
#. name for ayi
msgid "Leyigha"
msgstr ""
msgstr "Leyigha"
#. name for ayk
msgid "Akuku"
msgstr ""
msgstr "Akuku"
#. name for ayl
msgid "Arabic; Libyan"
@ -1977,19 +1977,19 @@ msgstr "ajmara"
#. name for ayn
msgid "Arabic; Sanaani"
msgstr ""
msgstr "Arabski Sanaani"
#. name for ayo
msgid "Ayoreo"
msgstr ""
msgstr "Ayoreo"
#. name for ayp
msgid "Arabic; North Mesopotamian"
msgstr ""
msgstr "Arabski; Mezopotamia Północna"
#. name for ayq
msgid "Ayi (Papua New Guinea)"
msgstr ""
msgstr "Ayi (Papua Nowa Gwinea)"
#. name for ayr
msgid "Aymara; Central"
@ -1997,27 +1997,27 @@ msgstr "ajmara centralny"
#. name for ays
msgid "Ayta; Sorsogon"
msgstr ""
msgstr "Ayta; Sorsogon"
#. name for ayt
msgid "Ayta; Magbukun"
msgstr ""
msgstr "Ayta; Magbukun"
#. name for ayu
msgid "Ayu"
msgstr ""
msgstr "Ayu"
#. name for ayy
msgid "Ayta; Tayabas"
msgstr ""
msgstr "Ayta; Tayabas"
#. name for ayz
msgid "Mai Brat"
msgstr ""
msgstr "Mai Brat"
#. name for aza
msgid "Azha"
msgstr ""
msgstr "Azha"
#. name for azb
msgid "Azerbaijani; South"
@ -2029,7 +2029,7 @@ msgstr "azerski"
#. name for azg
msgid "Amuzgo; San Pedro Amuzgos"
msgstr ""
msgstr "Amuzgo; San Pedro Amuzgos"
#. name for azj
msgid "Azerbaijani; North"
@ -2037,35 +2037,35 @@ msgstr "północnoazerski"
#. name for azm
msgid "Amuzgo; Ipalapa"
msgstr ""
msgstr "Amuzgo; Ipalapa"
#. name for azo
msgid "Awing"
msgstr ""
msgstr "Awing"
#. name for azt
msgid "Atta; Faire"
msgstr ""
msgstr "Atta; Faire"
#. name for azz
msgid "Nahuatl; Highland Puebla"
msgstr ""
msgstr "Nahuatl; Wyżyna Puebla"
#. name for baa
msgid "Babatana"
msgstr ""
msgstr "Babatana"
#. name for bab
msgid "Bainouk-Gunyuño"
msgstr ""
msgstr "Bainouk-Gunyuño"
#. name for bac
msgid "Badui"
msgstr ""
msgstr "Badui"
#. name for bae
msgid "Baré"
msgstr ""
msgstr "Baré"
#. name for baf
msgid "Nubaca"

File diff suppressed because it is too large Load Diff

View File

@ -13,14 +13,14 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2013-01-21 14:06+0000\n"
"Last-Translator: Don Miguel <bmv@mail.ru>\n"
"PO-Revision-Date: 2013-02-21 23:51+0000\n"
"Last-Translator: Глория Хрусталёва <gloriya@hushmail.com>\n"
"Language-Team: Russian <debian-l10n-russian@lists.debian.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2013-01-22 04:46+0000\n"
"X-Generator: Launchpad (build 16430)\n"
"X-Launchpad-Export-Date: 2013-02-23 05:19+0000\n"
"X-Generator: Launchpad (build 16506)\n"
"Language: ru\n"
#. name for aaa
@ -237,7 +237,7 @@ msgstr "Ачехский"
#. name for acf
msgid "Creole French; Saint Lucian"
msgstr ""
msgstr "Креольский французский; Сент-люсийский"
#. name for ach
msgid "Acoli"
@ -257,7 +257,7 @@ msgstr ""
#. name for acm
msgid "Arabic; Mesopotamian"
msgstr ""
msgstr "Арабский; Месопатамский"
#. name for acn
msgid "Achang"
@ -273,7 +273,7 @@ msgstr ""
#. name for acr
msgid "Achi"
msgstr ""
msgstr "Ачи"
#. name for acs
msgid "Acroá"
@ -297,7 +297,7 @@ msgstr ""
#. name for acx
msgid "Arabic; Omani"
msgstr ""
msgstr "Арабский; Оманский"
#. name for acy
msgid "Arabic; Cypriot"
@ -369,7 +369,7 @@ msgstr ""
#. name for ads
msgid "Adamorobe Sign Language"
msgstr ""
msgstr "Знаковый язык Адаморобе"
#. name for adt
msgid "Adnyamathanha"
@ -389,7 +389,7 @@ msgstr ""
#. name for ady
msgid "Adyghe"
msgstr ""
msgstr "Адыгейский"
#. name for adz
msgid "Adzera"
@ -401,7 +401,7 @@ msgstr ""
#. name for aeb
msgid "Arabic; Tunisian"
msgstr ""
msgstr "Арабский; Тунисский"
#. name for aec
msgid "Arabic; Saidi"
@ -409,7 +409,7 @@ msgstr ""
#. name for aed
msgid "Argentine Sign Language"
msgstr ""
msgstr "Аргентинский язык жестов"
#. name for aee
msgid "Pashayi; Northeast"
@ -429,7 +429,7 @@ msgstr ""
#. name for aen
msgid "Armenian Sign Language"
msgstr ""
msgstr "Армянский язык жестов"
#. name for aeq
msgid "Aer"
@ -609,7 +609,7 @@ msgstr ""
#. name for agx
msgid "Aghul"
msgstr ""
msgstr "Агульский"
#. name for agy
msgid "Alta; Southern"
@ -665,7 +665,7 @@ msgstr ""
#. name for ahr
msgid "Ahirani"
msgstr ""
msgstr "Ахирани"
#. name for ahs
msgid "Ashe"
@ -701,7 +701,7 @@ msgstr ""
#. name for aig
msgid "Creole English; Antigua and Barbuda"
msgstr ""
msgstr "Креольский английский; Антигуа и Барбуда"
#. name for aih
msgid "Ai-Cham"
@ -709,7 +709,7 @@ msgstr ""
#. name for aii
msgid "Neo-Aramaic; Assyrian"
msgstr ""
msgstr "Новоарамейский; Ассирийский"
#. name for aij
msgid "Lishanid Noshan"
@ -825,7 +825,7 @@ msgstr ""
#. name for akg
msgid "Anakalangu"
msgstr ""
msgstr "Анакалангу"
#. name for akh
msgid "Angal Heneng"
@ -881,7 +881,7 @@ msgstr ""
#. name for akv
msgid "Akhvakh"
msgstr ""
msgstr "Ахвахский"
#. name for akw
msgid "Akwa"
@ -897,7 +897,7 @@ msgstr ""
#. name for akz
msgid "Alabama"
msgstr ""
msgstr "Язык племени алабама"
#. name for ala
msgid "Alago"
@ -945,7 +945,7 @@ msgstr ""
#. name for aln
msgid "Albanian; Gheg"
msgstr ""
msgstr "Албанский; Гегский"
#. name for alo
msgid "Larike-Wakasihu"
@ -953,11 +953,11 @@ msgstr ""
#. name for alp
msgid "Alune"
msgstr ""
msgstr "Алуне"
#. name for alq
msgid "Algonquin"
msgstr ""
msgstr "Алгонкинский"
#. name for alr
msgid "Alutor"
@ -965,7 +965,7 @@ msgstr ""
#. name for als
msgid "Albanian; Tosk"
msgstr ""
msgstr "Албанский; Тоскский"
#. name for alt
msgid "Altai; Southern"
@ -1037,7 +1037,7 @@ msgstr ""
#. name for amm
msgid "Ama (Papua New Guinea)"
msgstr ""
msgstr "Ама (Папуа-Новая Гвинея)"
#. name for amn
msgid "Amanab"
@ -1077,7 +1077,7 @@ msgstr ""
#. name for amw
msgid "Neo-Aramaic; Western"
msgstr ""
msgstr "Новоарамейский; Западный"
#. name for amx
msgid "Anmatyerre"
@ -1085,7 +1085,7 @@ msgstr ""
#. name for amy
msgid "Ami"
msgstr ""
msgstr "Ами"
#. name for amz
msgid "Atampaya"
@ -1281,7 +1281,7 @@ msgstr ""
#. name for apd
msgid "Arabic; Sudanese"
msgstr ""
msgstr "Арабский; Суданский"
#. name for ape
msgid "Bukiyip"
@ -1373,7 +1373,7 @@ msgstr ""
#. name for aqc
msgid "Archi"
msgstr ""
msgstr "Арчинский"
#. name for aqd
msgid "Dogon; Ampari"
@ -1409,11 +1409,11 @@ msgstr "Арабский"
#. name for arb
msgid "Arabic; Standard"
msgstr ""
msgstr "Арабский; Стандартный"
#. name for arc
msgid "Aramaic; Official (700-300 BCE)"
msgstr ""
msgstr "Арамейский; Официальный"
#. name for ard
msgid "Arabana"
@ -1461,7 +1461,7 @@ msgstr "Арапахо"
#. name for arq
msgid "Arabic; Algerian"
msgstr ""
msgstr "Арабский; Алжирский"
#. name for arr
msgid "Karo (Brazil)"
@ -1489,11 +1489,11 @@ msgstr ""
#. name for ary
msgid "Arabic; Moroccan"
msgstr ""
msgstr "Арабский; Марокканский"
#. name for arz
msgid "Arabic; Egyptian"
msgstr ""
msgstr "Арабский; Египетский"
#. name for asa
msgid "Asu (Tanzania)"
@ -1537,7 +1537,7 @@ msgstr ""
#. name for ask
msgid "Ashkun"
msgstr ""
msgstr "Ашкун"
#. name for asl
msgid "Asilulu"
@ -1573,7 +1573,7 @@ msgstr ""
#. name for ast
msgid "Asturian"
msgstr ""
msgstr "Астурийский"
#. name for asu
msgid "Asurini; Tocantins"
@ -1693,7 +1693,7 @@ msgstr ""
#. name for atz
msgid "Arta"
msgstr ""
msgstr "Арта"
#. name for aua
msgid "Asumboa"
@ -1969,7 +1969,7 @@ msgstr ""
#. name for ayl
msgid "Arabic; Libyan"
msgstr ""
msgstr "Арабский; Ливийский"
#. name for aym
msgid "Aymara"
@ -1985,7 +1985,7 @@ msgstr ""
#. name for ayp
msgid "Arabic; North Mesopotamian"
msgstr ""
msgstr "Арабский; Северомесопатамский"
#. name for ayq
msgid "Ayi (Papua New Guinea)"
@ -2021,7 +2021,7 @@ msgstr ""
#. name for azb
msgid "Azerbaijani; South"
msgstr ""
msgstr "Азербайджанский; Южный"
#. name for aze
msgid "Azerbaijani"
@ -2033,7 +2033,7 @@ msgstr ""
#. name for azj
msgid "Azerbaijani; North"
msgstr ""
msgstr "Азербайджанский; Северный"
#. name for azm
msgid "Amuzgo; Ipalapa"
@ -2077,7 +2077,7 @@ msgstr ""
#. name for bah
msgid "Creole English; Bahamas"
msgstr ""
msgstr "Креольский английский; Багамский"
#. name for baj
msgid "Barakai"
@ -2113,7 +2113,7 @@ msgstr ""
#. name for bas
msgid "Basa (Cameroon)"
msgstr ""
msgstr "Баса (Камерун)"
#. name for bau
msgid "Bada (Nigeria)"
@ -2381,7 +2381,7 @@ msgstr ""
#. name for bdj
msgid "Bai"
msgstr ""
msgstr "Бай"
#. name for bdk
msgid "Budukh"
@ -2473,7 +2473,7 @@ msgstr ""
#. name for beg
msgid "Belait"
msgstr ""
msgstr "Белайт"
#. name for beh
msgid "Biali"
@ -2497,7 +2497,7 @@ msgstr "Белорусский"
#. name for bem
msgid "Bemba (Zambia)"
msgstr ""
msgstr "Бемба (Замбия)"
#. name for ben
msgid "Bengali"
@ -2641,7 +2641,7 @@ msgstr ""
#. name for bfy
msgid "Bagheli"
msgstr ""
msgstr "Багхели"
#. name for bfz
msgid "Pahari; Mahasu"
@ -2737,7 +2737,7 @@ msgstr ""
#. name for bgx
msgid "Turkish; Balkan Gagauz"
msgstr ""
msgstr "Турецкий; Гагаузский"
#. name for bgy
msgid "Benggoi"
@ -2753,7 +2753,7 @@ msgstr ""
#. name for bhb
msgid "Bhili"
msgstr ""
msgstr "Бхили"
#. name for bhc
msgid "Biga"
@ -3113,7 +3113,7 @@ msgstr ""
#. name for bku
msgid "Buhid"
msgstr ""
msgstr "Бухид"
#. name for bkv
msgid "Bekwarra"
@ -3333,7 +3333,7 @@ msgstr ""
#. name for bmy
msgid "Bemba (Democratic Republic of Congo)"
msgstr ""
msgstr "Бемба (Демократическая Республика Конго)"
#. name for bmz
msgid "Baramu"
@ -3409,7 +3409,7 @@ msgstr ""
#. name for bns
msgid "Bundeli"
msgstr ""
msgstr "Бундели"
#. name for bnu
msgid "Bentong"
@ -3553,7 +3553,7 @@ msgstr ""
#. name for bph
msgid "Botlikh"
msgstr ""
msgstr "Ботлихский"
#. name for bpi
msgid "Bagupi"
@ -3613,7 +3613,7 @@ msgstr ""
#. name for bpw
msgid "Bo (Papua New Guinea)"
msgstr ""
msgstr "Бо (Папуа-Новая Гвинея)"
#. name for bpx
msgid "Bareli; Palya"
@ -3621,7 +3621,7 @@ msgstr ""
#. name for bpy
msgid "Bishnupriya"
msgstr ""
msgstr "Бишнуприя"
#. name for bpz
msgid "Bilba"
@ -3821,7 +3821,7 @@ msgstr ""
#. name for brx
msgid "Bodo (India)"
msgstr ""
msgstr "Бодо (Индия)"
#. name for bry
msgid "Burui"
@ -3849,7 +3849,7 @@ msgstr ""
#. name for bsf
msgid "Bauchi"
msgstr ""
msgstr "Баучи"
#. name for bsg
msgid "Bashkardi"
@ -3857,7 +3857,7 @@ msgstr ""
#. name for bsh
msgid "Kati"
msgstr ""
msgstr "Кати"
#. name for bsi
msgid "Bassossi"
@ -3869,7 +3869,7 @@ msgstr ""
#. name for bsk
msgid "Burushaski"
msgstr ""
msgstr "Бурушаски"
#. name for bsl
msgid "Basa-Gumna"
@ -4389,7 +4389,7 @@ msgstr ""
#. name for bxr
msgid "Buriat; Russia"
msgstr ""
msgstr "Бурятский; Россия"
#. name for bxs
msgid "Busam"
@ -4553,11 +4553,11 @@ msgstr ""
#. name for bzj
msgid "Kriol English; Belize"
msgstr ""
msgstr "Креольский английский; Белиз"
#. name for bzk
msgid "Creole English; Nicaragua"
msgstr ""
msgstr "Креольский английский; Никарагуа"
#. name for bzl
msgid "Boano (Sulawesi)"
@ -5001,7 +5001,7 @@ msgstr ""
#. name for chm
msgid "Mari (Russia)"
msgstr ""
msgstr "Марийский (Россия)"
#. name for chn
msgid "Chinook jargon"
@ -5285,7 +5285,7 @@ msgstr ""
#. name for cmn
msgid "Chinese; Mandarin"
msgstr ""
msgstr "Китайский; Мандарин"
#. name for cmo
msgid "Mnong; Central"
@ -7581,7 +7581,7 @@ msgstr ""
#. name for fij
msgid "Fijian"
msgstr "Фиджи"
msgstr "Фиджийский"
#. name for fil
msgid "Filipino"
@ -8037,11 +8037,11 @@ msgstr ""
#. name for gcf
msgid "Creole French; Guadeloupean"
msgstr ""
msgstr "Креольский французский; Гваделупский"
#. name for gcl
msgid "Creole English; Grenadian"
msgstr ""
msgstr "Креольский английский; Гренадский"
#. name for gcn
msgid "Gaina"
@ -8049,7 +8049,7 @@ msgstr ""
#. name for gcr
msgid "Creole French; Guianese"
msgstr ""
msgstr "Креольский французский; Гвианский"
#. name for gct
msgid "German; Colonia Tovar"
@ -9089,7 +9089,7 @@ msgstr ""
#. name for gyn
msgid "Creole English; Guyanese"
msgstr ""
msgstr "Креольский английский; Гайянский"
#. name for gyr
msgid "Guarayu"
@ -9853,7 +9853,7 @@ msgstr ""
#. name for hwc
msgid "Creole English; Hawai'i"
msgstr ""
msgstr "Креольский английский; Гавайский"
#. name for hwo
msgid "Hwana"
@ -10577,7 +10577,7 @@ msgstr ""
#. name for jam
msgid "Creole English; Jamaican"
msgstr ""
msgstr "Креольский английский; Ямайский"
#. name for jao
msgid "Yanyuwa"
@ -14245,7 +14245,7 @@ msgstr ""
#. name for lir
msgid "English; Liberian"
msgstr ""
msgstr "Креольский английский; Либерийский"
#. name for lis
msgid "Lisu"
@ -14661,7 +14661,7 @@ msgstr ""
#. name for lou
msgid "Creole French; Louisiana"
msgstr ""
msgstr "Креольский французский; Луизиана"
#. name for lov
msgid "Lopi"
@ -15021,7 +15021,7 @@ msgstr ""
#. name for lzz
msgid "Laz"
msgstr ""
msgstr "Лазский"
#. name for maa
msgid "Mazatec; San Jerónimo Tecóatl"
@ -15337,7 +15337,7 @@ msgstr ""
#. name for mdf
msgid "Moksha"
msgstr "Мокша"
msgstr "Мокшанский"
#. name for mdg
msgid "Massalat"
@ -19993,7 +19993,7 @@ msgstr ""
#. name for orv
msgid "Russian; Old"
msgstr ""
msgstr "Древнерусский"
#. name for orw
msgid "Oro Win"
@ -20109,7 +20109,7 @@ msgstr ""
#. name for oty
msgid "Tamil; Old"
msgstr ""
msgstr "Древнетамильский"
#. name for otz
msgid "Otomi; Ixtenco"
@ -21897,7 +21897,7 @@ msgstr ""
#. name for rcf
msgid "Creole French; Réunion"
msgstr ""
msgstr "Креольский французский; Реюньон"
#. name for rdb
msgid "Rudbari"
@ -23081,7 +23081,7 @@ msgstr ""
#. name for sin
msgid "Sinhala"
msgstr ""
msgstr "Сингальский"
#. name for sip
msgid "Sikkimese"
@ -24661,7 +24661,7 @@ msgstr ""
#. name for tch
msgid "Creole English; Turks And Caicos"
msgstr ""
msgstr "Креольский английский; Тёркс и Кайкос"
#. name for tci
msgid "Wára"
@ -24957,7 +24957,7 @@ msgstr ""
#. name for tgh
msgid "Creole English; Tobagonian"
msgstr ""
msgstr "Креольский английский; Тобагский"
#. name for tgi
msgid "Lawunuia"
@ -25401,7 +25401,7 @@ msgstr ""
#. name for tly
msgid "Talysh"
msgstr ""
msgstr "Талышский"
#. name for tma
msgid "Tama (Chad)"
@ -25845,7 +25845,7 @@ msgstr ""
#. name for trf
msgid "Creole English; Trinidadian"
msgstr ""
msgstr "Креольский английский; Тринидадский"
#. name for trg
msgid "Lishán Didán"
@ -27121,7 +27121,7 @@ msgstr ""
#. name for vic
msgid "Creole English; Virgin Islands"
msgstr ""
msgstr "Креольский английский; Виргинские острова"
#. name for vid
msgid "Vidunda"
@ -28209,7 +28209,7 @@ msgstr ""
#. name for wyy
msgid "Fijian; Western"
msgstr ""
msgstr "Западнофиджийский"
#. name for xaa
msgid "Arabic; Andalusian"

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 9, 20)
numeric_version = (0, 9, 21)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -20,7 +20,7 @@ from calibre.ptempfile import PersistentTemporaryFile
from calibre.db.schema_upgrades import SchemaUpgrade
from calibre.library.field_metadata import FieldMetadata
from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.utils.icu import strcmp
from calibre.utils.icu import sort_key
from calibre.utils.config import to_json, from_json, prefs, tweaks
from calibre.utils.date import utcfromtimestamp, parse_date
from calibre.utils.filenames import (is_case_sensitive, samefile, hardlink_file)
@ -172,7 +172,9 @@ def _author_to_author_sort(x):
return author_to_author_sort(x.replace('|', ','))
def icu_collator(s1, s2):
return strcmp(force_unicode(s1, 'utf-8'), force_unicode(s2, 'utf-8'))
return cmp(sort_key(force_unicode(s1, 'utf-8')),
sort_key(force_unicode(s2, 'utf-8')))
# }}}
# Unused aggregators {{{

View File

@ -19,6 +19,7 @@ from calibre.db.errors import NoSuchFormat
from calibre.db.fields import create_field
from calibre.db.search import Search
from calibre.db.tables import VirtualTable
from calibre.db.write import get_series_values
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ptempfile import (base_dir, PersistentTemporaryFile,
@ -216,6 +217,8 @@ class Cache(object):
field.series_field = self.fields[name[:-len('_index')]]
elif name == 'series_index':
field.series_field = self.fields['series']
elif name == 'authors':
field.author_sort_field = self.fields['author_sort']
@read_api
def field_for(self, name, book_id, default_value=None):
@ -615,11 +618,34 @@ class Cache(object):
icon_map=icon_map)
@write_api
def set_field(self, name, book_id_to_val_map):
def set_field(self, name, book_id_to_val_map, allow_case_change=True):
# TODO: Specialize title/authors to also update path
# TODO: Handle updating caches used by composite fields
dirtied = self.fields[name].writer.set_books(
book_id_to_val_map, self.backend)
# TODO: Ensure the sort fields are updated for title/author/series?
f = self.fields[name]
is_series = f.metadata['datatype'] == 'series'
if is_series:
bimap, simap = {}, {}
for k, v in book_id_to_val_map.iteritems():
if isinstance(v, basestring):
v, sid = get_series_values(v)
else:
v = sid = None
if name.startswith('#') and sid is None:
sid = 1.0 # The value will be set to 1.0 in the db table
bimap[k] = v
if sid is not None:
simap[k] = sid
book_id_to_val_map = bimap
dirtied = f.writer.set_books(
book_id_to_val_map, self.backend, allow_case_change=allow_case_change)
if is_series and simap:
sf = self.fields[f.name+'_index']
dirtied |= sf.writer.set_books(simap, self.backend, allow_case_change=False)
return dirtied
# }}}

View File

@ -22,6 +22,7 @@ from calibre.utils.localization import calibre_langcode_to_name
class Field(object):
is_many = False
is_many_many = False
def __init__(self, name, table):
self.name, self.table = name, table
@ -299,6 +300,7 @@ class ManyToOneField(Field):
class ManyToManyField(Field):
is_many = True
is_many_many = True
def __init__(self, *args, **kwargs):
Field.__init__(self, *args, **kwargs)
@ -400,6 +402,13 @@ class AuthorsField(ManyToManyField):
def category_sort_value(self, item_id, book_ids, lang_map):
return self.table.asort_map[item_id]
def db_author_sort_for_book(self, book_id):
return self.author_sort_field.for_book(book_id)
def author_sort_for_book(self, book_id):
return ' & '.join(self.table.asort_map[k] for k in
self.table.book_col_map[book_id])
class FormatsField(ManyToManyField):
def for_book(self, book_id, default_value=None):

View File

@ -124,7 +124,6 @@ class ManyToOneTable(Table):
def read_id_maps(self, db):
for row in db.conn.execute('SELECT id, {0} FROM {1}'.format(
self.metadata['column'], self.metadata['table'])):
if row[1]:
self.id_map[row[0]] = self.unserialize(row[1])
def read_maps(self, db):
@ -169,7 +168,7 @@ class AuthorsTable(ManyToManyTable):
self.asort_map = {}
for row in db.conn.execute(
'SELECT id, name, sort, link FROM authors'):
self.id_map[row[0]] = row[1]
self.id_map[row[0]] = self.unserialize(row[1])
self.asort_map[row[0]] = (row[2] if row[2] else
author_to_author_sort(row[1]))
self.alink_map[row[0]] = row[3]
@ -218,3 +217,4 @@ class LanguagesTable(ManyToManyTable):
ManyToManyTable.read_id_maps(self, db)
lm = lang_map()
self.lang_name_map = {x:lm.get(x, x) for x in self.id_map.itervalues()}

View File

@ -75,7 +75,7 @@ class WritingTest(BaseTest):
test.name, old_sqlite_res, sqlite_res))
del db
def test_one_one(self):
def test_one_one(self): # {{{
'Test setting of values in one-one fields'
tests = [self.create_test('#yesno', (True, False, 'true', 'false', None))]
for name, getter, setter in (
@ -114,9 +114,152 @@ class WritingTest(BaseTest):
tests.append(self.create_test(name, tuple(vals), getter, setter))
self.run_tests(tests)
# }}}
def test_many_one_basic(self): # {{{
'Test the different code paths for writing to a many-one field'
cl = self.cloned_library
cache = self.init_cache(cl)
f = cache.fields['publisher']
item_ids = {f.ids_for_book(1)[0], f.ids_for_book(2)[0]}
val = 'Changed'
self.assertEqual(cache.set_field('publisher', {1:val, 2:val}), {1, 2})
cache2 = self.init_cache(cl)
for book_id in (1, 2):
for c in (cache, cache2):
self.assertEqual(c.field_for('publisher', book_id), val)
self.assertFalse(item_ids.intersection(set(c.fields['publisher'].table.id_map)))
del cache2
self.assertFalse(cache.set_field('publisher', {1:val, 2:val}))
val = val.lower()
self.assertFalse(cache.set_field('publisher', {1:val, 2:val},
allow_case_change=False))
self.assertEqual(cache.set_field('publisher', {1:val, 2:val}), {1, 2})
cache2 = self.init_cache(cl)
for book_id in (1, 2):
for c in (cache, cache2):
self.assertEqual(c.field_for('publisher', book_id), val)
del cache2
self.assertEqual(cache.set_field('publisher', {1:'new', 2:'New'}), {1, 2})
self.assertEqual(cache.field_for('publisher', 1).lower(), 'new')
self.assertEqual(cache.field_for('publisher', 2).lower(), 'new')
self.assertEqual(cache.set_field('publisher', {1:None, 2:'NEW'}), {1, 2})
self.assertEqual(len(f.table.id_map), 1)
self.assertEqual(cache.set_field('publisher', {2:None}), {2})
self.assertEqual(len(f.table.id_map), 0)
cache2 = self.init_cache(cl)
self.assertEqual(len(cache2.fields['publisher'].table.id_map), 0)
del cache2
self.assertEqual(cache.set_field('publisher', {1:'one', 2:'two',
3:'three'}), {1, 2, 3})
self.assertEqual(cache.set_field('publisher', {1:''}), set([1]))
self.assertEqual(cache.set_field('publisher', {1:'two'}), set([1]))
self.assertEqual(tuple(map(f.for_book, (1,2,3))), ('two', 'two', 'three'))
self.assertEqual(cache.set_field('publisher', {1:'Two'}), {1, 2})
cache2 = self.init_cache(cl)
self.assertEqual(tuple(map(f.for_book, (1,2,3))), ('Two', 'Two', 'three'))
del cache2
# Enum
self.assertFalse(cache.set_field('#enum', {1:'Not allowed'}))
self.assertEqual(cache.set_field('#enum', {1:'One', 2:'One', 3:'Three'}), {1, 3})
self.assertEqual(cache.set_field('#enum', {1:None}), set([1]))
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:None, 2:'One', 3:'Three'}.iteritems():
self.assertEqual(c.field_for('#enum', i), val)
del cache2
# Rating
self.assertFalse(cache.set_field('rating', {1:6, 2:4}))
self.assertEqual(cache.set_field('rating', {1:0, 3:2}), {1, 3})
self.assertEqual(cache.set_field('#rating', {1:None, 2:4, 3:8}), {1, 2, 3})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:None, 2:4, 3:2}.iteritems():
self.assertEqual(c.field_for('rating', i), val)
for i, val in {1:None, 2:4, 3:8}.iteritems():
self.assertEqual(c.field_for('#rating', i), val)
del cache2
# Series
self.assertFalse(cache.set_field('series',
{1:'a series one', 2:'a series one'}, allow_case_change=False))
self.assertEqual(cache.set_field('series', {3:'Series [3]'}), set([3]))
self.assertEqual(cache.set_field('#series', {1:'Series', 3:'Series'}),
{1, 3})
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), set([2]))
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.iteritems():
self.assertEqual(c.field_for('series', i), val)
for i in (1, 2, 3):
self.assertEqual(c.field_for('#series', i), 'Series')
for i, val in {1:2, 2:1, 3:3}.iteritems():
self.assertEqual(c.field_for('series_index', i), val)
for i, val in {1:1, 2:0, 3:1}.iteritems():
self.assertEqual(c.field_for('#series_index', i), val)
del cache2
# }}}
def test_many_many_basic(self): # {{{
'Test the different code paths for writing to a many-many field'
cl = self.cloned_library
cache = self.init_cache(cl)
ae, af, sf = self.assertEqual, self.assertFalse, cache.set_field
# Tags
ae(sf('#tags', {1:cache.field_for('tags', 1), 2:cache.field_for('tags', 2)}),
{1, 2})
for name in ('tags', '#tags'):
f = cache.fields[name]
af(sf(name, {1:('tag one', 'News')}, allow_case_change=False))
ae(sf(name, {1:'tag one, News'}), {1, 2})
ae(sf(name, {3:('tag two', 'sep,sep2')}), {2, 3})
ae(len(f.table.id_map), 4)
ae(sf(name, {1:None}), set([1]))
cache2 = self.init_cache(cl)
for c in (cache, cache2):
ae(c.field_for(name, 3), ('tag two', 'sep;sep2'))
ae(len(c.fields[name].table.id_map), 3)
ae(len(c.fields[name].table.id_map), 3)
ae(c.field_for(name, 1), ())
ae(c.field_for(name, 2), ('tag one', 'tag two'))
del cache2
# Authors
ae(sf('#authors', {k:cache.field_for('authors', k) for k in (1,2,3)}),
{1,2,3})
for name in ('authors', '#authors'):
f = cache.fields[name]
ae(len(f.table.id_map), 3)
af(cache.set_field(name, {3:None if name == 'authors' else 'Unknown'}))
ae(cache.set_field(name, {3:'Kovid Goyal & Divok Layog'}), set([3]))
ae(cache.set_field(name, {1:'', 2:'An, Author'}), {1,2})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
ae(len(c.fields[name].table.id_map), 4 if name =='authors' else 3)
ae(c.field_for(name, 3), ('Kovid Goyal', 'Divok Layog'))
ae(c.field_for(name, 2), ('An, Author',))
ae(c.field_for(name, 1), ('Unknown',) if name=='authors' else ())
ae(c.field_for('author_sort', 1), 'Unknown')
ae(c.field_for('author_sort', 2), 'An, Author')
ae(c.field_for('author_sort', 3), 'Goyal, Kovid & Layog, Divok')
del cache2
ae(cache.set_field('authors', {1:'KoviD GoyaL'}), {1, 3})
ae(cache.field_for('author_sort', 1), 'GoyaL, KoviD')
ae(cache.field_for('author_sort', 3), 'GoyaL, KoviD & Layog, Divok')
# TODO: identifiers, languages
# }}}
def tests():
return unittest.TestLoader().loadTestsFromTestCase(WritingTest)
tl = unittest.TestLoader()
# return tl.loadTestsFromName('writing.WritingTest.test_many_many_basic')
return tl.loadTestsFromTestCase(WritingTest)
def run():
unittest.TextTestRunner(verbosity=2).run(tests())

View File

@ -7,12 +7,16 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from functools import partial
from datetime import datetime
from calibre.constants import preferred_encoding, ispy3
from calibre.ebooks.metadata import author_to_author_sort
from calibre.utils.date import (parse_only_date, parse_date, UNDEFINED_DATE,
isoformat)
from calibre.utils.icu import strcmp
if ispy3:
unicode = str
@ -29,15 +33,35 @@ def single_text(x):
x = x.strip()
return x if x else None
def multiple_text(sep, x):
if x is None:
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def get_series_values(val):
if not val:
return (val, None)
match = series_index_pat.match(val.strip())
if match is not None:
idx = match.group(2)
try:
idx = float(idx)
return (match.group(1).strip(), idx)
except:
pass
return (val, None)
def multiple_text(sep, ui_sep, x):
if not x:
return ()
if isinstance(x, bytes):
x = x.decode(preferred_encoding, 'replce')
if isinstance(x, unicode):
x = x.split(sep)
x = (y.strip() for y in x if y.strip())
return (' '.join(y.split()) for y in x if y)
else:
x = (y.decode(preferred_encoding, 'replace') if isinstance(y, bytes)
else y for y in x)
ui_sep = ui_sep.strip()
repsep = ',' if ui_sep == ';' else ';'
x = (y.strip().replace(ui_sep, repsep) for y in x if y.strip())
return tuple(' '.join(y.split()) for y in x if y)
def adapt_datetime(x):
if isinstance(x, (unicode, bytes)):
@ -76,7 +100,8 @@ def get_adapter(name, metadata):
dt = metadata['datatype']
if dt == 'text':
if metadata['is_multiple']:
ans = partial(multiple_text, metadata['is_multiple']['ui_to_list'])
m = metadata['is_multiple']
ans = partial(multiple_text, m['ui_to_list'], m['list_to_ui'])
else:
ans = single_text
elif dt == 'series':
@ -92,7 +117,7 @@ def get_adapter(name, metadata):
elif dt == 'comments':
ans = single_text
elif dt == 'rating':
ans = lambda x: x if x is None else min(10., max(0., adapt_number(float, x))),
ans = lambda x: None if x in {None, 0} else min(10., max(0., adapt_number(float, x)))
elif dt == 'enumeration':
ans = single_text
elif dt == 'composite':
@ -116,7 +141,7 @@ def get_adapter(name, metadata):
def one_one_in_books(book_id_val_map, db, field, *args):
'Set a one-one field in the books table'
if book_id_val_map:
sequence = tuple((sqlite_datetime(v), k) for k, v in book_id_val_map.iteritems())
sequence = ((sqlite_datetime(v), k) for k, v in book_id_val_map.iteritems())
db.conn.executemany(
'UPDATE books SET %s=? WHERE id=?'%field.metadata['column'], sequence)
field.table.book_col_map.update(book_id_val_map)
@ -128,13 +153,13 @@ def one_one_in_other(book_id_val_map, db, field, *args):
if deleted:
db.conn.executemany('DELETE FROM %s WHERE book=?'%field.metadata['table'],
deleted)
for book_id in book_id_val_map:
field.table.book_col_map.pop(book_id, None)
for book_id in deleted:
field.table.book_col_map.pop(book_id[0], None)
updated = {k:v for k, v in book_id_val_map.iteritems() if v is not None}
if updated:
db.conn.executemany('INSERT OR REPLACE INTO %s(book,%s) VALUES (?,?)'%(
field.metadata['table'], field.metadata['column']),
tuple((k, sqlite_datetime(v)) for k, v in updated.iteritems()))
((k, sqlite_datetime(v)) for k, v in updated.iteritems()))
field.table.book_col_map.update(updated)
return set(book_id_val_map)
@ -151,7 +176,212 @@ def custom_series_index(book_id_val_map, db, field, *args):
if sequence:
db.conn.executemany('UPDATE %s SET %s=? WHERE book=? AND value=?'%(
field.metadata['table'], field.metadata['column']), sequence)
return {s[0] for s in sequence}
return {s[1] for s in sequence}
# }}}
# Many-One fields {{{
def safe_lower(x):
try:
return icu_lower(x)
except (TypeError, ValueError, KeyError, AttributeError):
return x
def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map, is_authors=False):
''' Get the db id for the value val. If val does not exist in the db it is
inserted into the db. '''
kval = kmap(val)
item_id = rid_map.get(kval, None)
if item_id is None:
if is_authors:
aus = author_to_author_sort(val)
db.conn.execute('INSERT INTO authors(name,sort) VALUES (?,?)',
(val.replace(',', '|'), aus))
else:
db.conn.execute('INSERT INTO %s(%s) VALUES (?)'%(
m['table'], m['column']), (val,))
item_id = rid_map[kval] = db.conn.last_insert_rowid()
table.id_map[item_id] = val
table.col_book_map[item_id] = set()
if is_authors:
table.asort_map[item_id] = aus
table.alink_map[item_id] = ''
elif allow_case_change and val != table.id_map[item_id]:
case_changes[item_id] = val
val_map[val] = item_id
def change_case(case_changes, dirtied, db, table, m, is_authors=False):
if is_authors:
vals = ((val.replace(',', '|'), item_id) for item_id, val in
case_changes.iteritems())
else:
vals = ((val, item_id) for item_id, val in case_changes.iteritems())
db.conn.executemany(
'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']), vals)
for item_id, val in case_changes.iteritems():
table.id_map[item_id] = val
dirtied.update(table.col_book_map[item_id])
if is_authors:
table.asort_map[item_id] = author_to_author_sort(val)
def many_one(book_id_val_map, db, field, allow_case_change, *args):
dirtied = set()
m = field.metadata
table = field.table
dt = m['datatype']
is_custom_series = dt == 'series' and table.name.startswith('#')
# Map values to db ids, including any new values
kmap = safe_lower if dt in {'text', 'series'} else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
val_map = {None:None}
case_changes = {}
for val in book_id_val_map.itervalues():
if val is not None:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map)
if case_changes:
change_case(case_changes, dirtied, db, table, m)
book_id_item_id_map = {k:val_map[v] for k, v in book_id_val_map.iteritems()}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_id in book_id_item_id_map.iteritems():
old_item_id = table.book_col_map.get(book_id, None)
if old_item_id is not None:
table.col_book_map[old_item_id].discard(book_id)
if item_id is None:
table.book_col_map.pop(book_id, None)
deleted.add(book_id)
else:
table.book_col_map[book_id] = item_id
table.col_book_map[item_id].add(book_id)
updated[book_id] = item_id
# Update the db link table
if deleted:
db.conn.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
((k,) for k in deleted))
if updated:
sql = (
'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1},extra) VALUES(?, ?, 1.0)'
if is_custom_series else
'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1}) VALUES(?, ?)'
)
db.conn.executemany(sql.format(table.link_table, m['link_column']),
((book_id, book_id, item_id) for book_id, item_id in
updated.iteritems()))
# Remove no longer used items
remove = {item_id for item_id in table.id_map if not
table.col_book_map.get(item_id, False)}
if remove:
db.conn.executemany('DELETE FROM %s WHERE id=?'%m['table'],
((item_id,) for item_id in remove))
for item_id in remove:
del table.id_map[item_id]
table.col_book_map.pop(item_id, None)
return dirtied
# }}}
# Many-Many fields {{{
def many_many(book_id_val_map, db, field, allow_case_change, *args):
dirtied = set()
m = field.metadata
table = field.table
dt = m['datatype']
is_authors = field.name == 'authors'
# Map values to db ids, including any new values
kmap = safe_lower if dt == 'text' else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
val_map = {}
case_changes = {}
for vals in book_id_val_map.itervalues():
for val in vals:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map, is_authors=is_authors)
if case_changes:
change_case(case_changes, dirtied, db, table, m, is_authors=is_authors)
if is_authors:
for item_id, val in case_changes.iteritems():
for book_id in table.col_book_map[item_id]:
current_sort = field.db_author_sort_for_book(book_id)
new_sort = field.author_sort_for_book(book_id)
if strcmp(current_sort, new_sort) == 0:
# The sort strings differ only by case, update the db
# sort
field.author_sort_field.writer.set_books({book_id:new_sort}, db)
book_id_item_id_map = {k:tuple(val_map[v] for v in vals)
for k, vals in book_id_val_map.iteritems()}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_ids in book_id_item_id_map.iteritems():
old_item_ids = table.book_col_map.get(book_id, None)
if old_item_ids:
for old_item_id in old_item_ids:
table.col_book_map[old_item_id].discard(book_id)
if item_ids:
table.book_col_map[book_id] = item_ids
for item_id in item_ids:
table.col_book_map[item_id].add(book_id)
updated[book_id] = item_ids
else:
table.book_col_map.pop(book_id, None)
deleted.add(book_id)
# Update the db link table
if deleted:
db.conn.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
((k,) for k in deleted))
if updated:
vals = (
(book_id, val) for book_id, vals in updated.iteritems()
for val in vals
)
db.conn.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
((k,) for k in updated))
db.conn.executemany('INSERT INTO {0}(book,{1}) VALUES(?, ?)'.format(
table.link_table, m['link_column']), vals)
if is_authors:
aus_map = {book_id:field.author_sort_for_book(book_id) for book_id
in updated}
field.author_sort_field.writer.set_books(aus_map, db)
# Remove no longer used items
remove = {item_id for item_id in table.id_map if not
table.col_book_map.get(item_id, False)}
if remove:
db.conn.executemany('DELETE FROM %s WHERE id=?'%m['table'],
((item_id,) for item_id in remove))
for item_id in remove:
del table.id_map[item_id]
table.col_book_map.pop(item_id, None)
if is_authors:
table.asort_map.pop(item_id, None)
table.alink_map.pop(item_id, None)
return dirtied
# }}}
def dummy(book_id_val_map, *args):
@ -170,21 +400,33 @@ class Writer(object):
self.set_books_func = dummy
elif self.name[0] == '#' and self.name.endswith('_index'):
self.set_books_func = custom_series_index
elif field.is_many_many:
self.set_books_func = many_many
elif field.is_many:
# TODO: Implement this
pass
# TODO: Remember to change commas to | when writing authors to sqlite
self.set_books_func = (self.set_books_for_enum if dt ==
'enumeration' else many_one)
else:
self.set_books_func = (one_one_in_books if field.metadata['table']
== 'books' else one_one_in_other)
if self.name in {'timestamp', 'uuid', 'sort'}:
self.accept_vals = bool
def set_books(self, book_id_val_map, db):
def set_books(self, book_id_val_map, db, allow_case_change=True):
book_id_val_map = {k:self.adapter(v) for k, v in
book_id_val_map.iteritems() if self.accept_vals(v)}
if not book_id_val_map:
return set()
dirtied = self.set_books_func(book_id_val_map, db, self.field)
dirtied = self.set_books_func(book_id_val_map, db, self.field,
allow_case_change)
return dirtied
def set_books_for_enum(self, book_id_val_map, db, field,
allow_case_change):
allowed = set(field.metadata['display']['enum_values'])
book_id_val_map = {k:v for k, v in book_id_val_map.iteritems() if v is
None or v in allowed}
if not book_id_val_map:
return set()
return many_one(book_id_val_map, db, field, False)

View File

@ -23,12 +23,11 @@ It also contains interfaces to various bits of calibre that do not have
dedicated command line tools, such as font subsetting, tweaking ebooks and so
on.
''')
parser.add_option('-c', '--command', help='Run python code.', default=None)
parser.add_option('-e', '--exec-file', default=None, help='Run the python code in file.')
parser.add_option('-f', '--subset-font', default=False,
action='store_true', help='Subset the specified font')
parser.add_option('-c', '--command', help='Run python code.')
parser.add_option('-e', '--exec-file', help='Run the python code in file.')
parser.add_option('-f', '--subset-font', help='Subset the specified font')
parser.add_option('-d', '--debug-device-driver', default=False, action='store_true',
help='Debug the specified device driver.')
help='Debug device detection')
parser.add_option('-g', '--gui', default=False, action='store_true',
help='Run the GUI with debugging enabled. Debug output is '
'printed to stdout and stderr.')
@ -59,7 +58,7 @@ on.
parser.add_option('-m', '--inspect-mobi', action='store_true',
default=False,
help='Inspect the MOBI file(s) at the specified path(s)')
parser.add_option('--tweak-book', default=None,
parser.add_option('-t', '--tweak-book', default=None,
help='Tweak the book (exports the book as a collection of HTML '
'files and metadata, which you can edit using standard HTML '
'editing tools, and then rebuilds the file from the edited HTML. '
@ -174,30 +173,24 @@ def run_debug_gui(logpath):
from calibre.gui2.main import main
main(['__CALIBRE_GUI_DEBUG__', logpath])
def main(args=sys.argv):
from calibre.constants import debug
debug()
if len(args) > 2 and args[1] in ('-e', '--exec-file'):
# Load all plugins user defined plugins so the script can import from the
def run_script(path, args):
# Load all user defined plugins so the script can import from the
# calibre_plugins namespace
import calibre.customize.ui as dummy
dummy
sys.argv = [args[2]] + args[3:]
ef = os.path.abspath(args[2])
sys.argv = [path] + args
ef = os.path.abspath(path)
base = os.path.dirname(ef)
sys.path.insert(0, base)
g = globals()
g['__name__'] = '__main__'
g['__file__'] = ef
execfile(ef, g)
return
if len(args) > 1 and args[1] in ('-f', '--subset-font'):
from calibre.utils.fonts.sfnt.subset import main
main(['subset-font']+args[2:])
return
def main(args=sys.argv):
from calibre.constants import debug
debug()
opts, args = option_parser().parse_args(args)
if opts.gui:
@ -258,6 +251,13 @@ def main(args=sys.argv):
elif opts.shutdown_running_calibre:
from calibre.gui2.main import shutdown_other
shutdown_other()
elif opts.subset_font:
from calibre.utils.fonts.sfnt.subset import main
main(['subset-font']+[opts.subset_font]+args[1:])
elif opts.exec_file:
run_script(opts.exec_file, args[1:])
elif len(args) >= 2 and args[1].rpartition('.')[-1] in {'py', 'recipe'}:
run_script(args[1], args[2:])
else:
from calibre import ipython
ipython()

View File

@ -7,9 +7,10 @@ __docformat__ = 'restructuredtext en'
import cStringIO, ctypes, datetime, os, platform, re, shutil, sys, tempfile, time
from calibre.constants import __appname__, __version__, DEBUG, cache_dir
from calibre import fit_image, confirm_config_name, strftime as _strftime
from calibre.constants import isosx, iswindows
from calibre.constants import (
__appname__, __version__, DEBUG as CALIBRE_DEBUG, isosx, iswindows,
cache_dir as _cache_dir)
from calibre.devices.errors import OpenFeedback, UserFeedback
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.devices.interface import DevicePlugin
@ -20,6 +21,7 @@ from calibre.utils.config import config_dir, dynamic, prefs
from calibre.utils.date import now, parse_date
from calibre.utils.zipfile import ZipFile
DEBUG = CALIBRE_DEBUG
def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
@ -309,7 +311,7 @@ class ITUNES(DriverBase):
@property
def cache_dir(self):
return os.path.join(cache_dir(), 'itunes')
return os.path.join(_cache_dir(), 'itunes')
@property
def archive_path(self):
@ -858,7 +860,6 @@ class ITUNES(DriverBase):
Note that most of the initialization is necessarily performed in can_handle(), as
we need to talk to iTunes to discover if there's a connected iPod
'''
if self.iTunes is None:
raise OpenFeedback(self.ITUNES_SANDBOX_LOCKOUT_MESSAGE)
@ -887,6 +888,7 @@ class ITUNES(DriverBase):
logger().info(" %s" % self.UNSUPPORTED_DIRECT_CONNECT_MODE_MESSAGE)
# Log supported DEVICE_IDs and BCDs
if DEBUG:
logger().info(" BCD: %s" % ['0x%x' % x for x in sorted(self.BCD)])
logger().info(" PRODUCT_ID: %s" % ['0x%x' % x for x in sorted(self.PRODUCT_ID)])
@ -1035,7 +1037,7 @@ class ITUNES(DriverBase):
self.plugboard_func = pb_func
def shutdown(self):
if DEBUG:
if False and DEBUG:
logger().info("%s.shutdown()\n" % self.__class__.__name__)
def sync_booklists(self, booklists, end_session=True):
@ -1673,6 +1675,7 @@ class ITUNES(DriverBase):
except:
self.manual_sync_mode = False
if DEBUG:
logger().info(" iTunes.manual_sync_mode: %s" % self.manual_sync_mode)
def _dump_booklist(self, booklist, header=None, indent=0):
@ -2151,6 +2154,7 @@ class ITUNES(DriverBase):
if 'iPod' in self.sources:
connected_device = self.sources['iPod']
device = self.iTunes.sources[connected_device]
if device.playlists() is not None:
dev_books = None
for pl in device.playlists():
if pl.special_kind() == appscript.k.Books:
@ -2181,7 +2185,7 @@ class ITUNES(DriverBase):
pythoncom.CoInitialize()
connected_device = self.sources['iPod']
device = self.iTunes.sources.ItemByName(connected_device)
if device.Playlists is not None:
dev_books = None
for pl in device.Playlists:
if pl.Kind == self.PlaylistKind.index('User') and \

View File

@ -22,13 +22,14 @@ class IRIVER_STORY(USBMS):
FORMATS = ['epub', 'fb2', 'pdf', 'djvu', 'txt']
VENDOR_ID = [0x1006]
PRODUCT_ID = [0x4023, 0x4024, 0x4025, 0x4034]
BCD = [0x0323, 0x0326]
PRODUCT_ID = [0x4023, 0x4024, 0x4025, 0x4034, 0x4037]
BCD = [0x0323, 0x0326, 0x226]
VENDOR_NAME = 'IRIVER'
WINDOWS_MAIN_MEM = ['STORY', 'STORY_EB05', 'STORY_WI-FI', 'STORY_EB07']
WINDOWS_MAIN_MEM = ['STORY', 'STORY_EB05', 'STORY_WI-FI', 'STORY_EB07',
'STORY_EB12']
WINDOWS_MAIN_MEM = re.compile(r'(%s)&'%('|'.join(WINDOWS_MAIN_MEM)))
WINDOWS_CARD_A_MEM = ['STORY', 'STORY_SD']
WINDOWS_CARD_A_MEM = ['STORY', 'STORY_SD', 'STORY_EB12_SD']
WINDOWS_CARD_A_MEM = re.compile(r'(%s)&'%('|'.join(WINDOWS_CARD_A_MEM)))
#OSX_MAIN_MEM = 'Kindle Internal Storage Media'

View File

@ -6,7 +6,7 @@ import os, time, sys
from calibre.constants import preferred_encoding, DEBUG
from calibre import isbytestring, force_unicode
from calibre.utils.icu import strcmp
from calibre.utils.icu import sort_key
from calibre.devices.usbms.books import Book as Book_
from calibre.devices.usbms.books import CollectionsBookList
@ -239,8 +239,7 @@ class KTCollectionsBookList(CollectionsBookList):
if y is None:
return -1
if isinstance(x, basestring) and isinstance(y, basestring):
c = strcmp(force_unicode(x), force_unicode(y))
else:
x, y = sort_key(force_unicode(x)), sort_key(force_unicode(y))
c = cmp(x, y)
if c != 0:
return c

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import division
__license__ = 'GPL v3'
__copyright__ = '2010-2012, Timothy Legge <timlegge@gmail.com>, Kovid Goyal <kovid@kovidgoyal.net> and David Forrester <davidfor@internode.on.net>'
@ -13,6 +14,7 @@ Extended to support Touch firmware 2.0.0 and later and newer devices by David Fo
'''
import os, time
from contextlib import closing
from calibre.devices.usbms.books import BookList
from calibre.devices.usbms.books import CollectionsBookList
@ -33,7 +35,7 @@ class KOBO(USBMS):
gui_name = 'Kobo Reader'
description = _('Communicate with the Kobo Reader')
author = 'Timothy Legge and David Forrester'
version = (2, 0, 5)
version = (2, 0, 6)
dbversion = 0
fwversion = 0
@ -1200,6 +1202,7 @@ class KOBOTOUCH(KOBO):
gui_name = 'Kobo Touch'
author = 'David Forrester'
description = 'Communicate with the Kobo Touch, Glo and Mini firmware. Based on the existing Kobo driver by %s.' % (KOBO.author)
# icon = I('devices/kobotouch.jpg')
supported_dbversion = 75
min_supported_dbversion = 53
@ -1219,14 +1222,11 @@ class KOBOTOUCH(KOBO):
_('Delete Empty Bookshelves') +
':::'+_('Delete any empty bookshelves from the Kobo Touch when syncing is finished. This is only for firmware V2.0.0 or later.'),
_('Upload covers for books') +
':::'+_('Normally, the KOBO readers get the cover image from the'
' ebook file itself. With this option, calibre will send a '
'separate cover image to the reader, useful if you '
'have modified the cover.'),
':::'+_('Upload cover images from the calibre library when sending books to the device.'),
_('Upload Black and White Covers'),
_('Always upload covers') +
':::'+_('If the Upload covers option is selected, the driver will only replace covers already on the device.'
' Select this option if you want covers uploaded the first time you send the book to the device.'),
_('Keep cover aspect ratio') +
':::'+_('When uploading covers, do not change the aspect ratio when resizing for the device.'
' This is for firmware versions 2.3.1 and later.'),
_('Show expired books') +
':::'+_('A bug in an earlier version left non kepubs book records'
' in the database. With this option Calibre will show the '
@ -1278,7 +1278,7 @@ class KOBOTOUCH(KOBO):
OPT_DELETE_BOOKSHELVES = 2
OPT_UPLOAD_COVERS = 3
OPT_UPLOAD_GRAYSCALE_COVERS = 4
OPT_ALWAYS_UPLOAD_COVERS = 5
OPT_KEEP_COVER_ASPECT_RATIO = 5
OPT_SHOW_EXPIRED_BOOK_RECORDS = 6
OPT_SHOW_PREVIEWS = 7
OPT_SHOW_RECOMMENDATIONS = 8
@ -1290,16 +1290,27 @@ class KOBOTOUCH(KOBO):
TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ"
PRODUCT_ID = [0x4163, 0x4173, 0x4183]
GLO_PRODUCT_ID = [0x4173]
MINI_PRODUCT_ID = [0x4183]
TOUCH_PRODUCT_ID = [0x4163]
PRODUCT_ID = GLO_PRODUCT_ID + MINI_PRODUCT_ID + TOUCH_PRODUCT_ID
BCD = [0x0110, 0x0326]
# Image file name endings. Made up of: image size, min_dbversion, max_dbversion,
COVER_FILE_ENDINGS = {
' - N3_LIBRARY_FULL.parsed':[(355,473),0, 99,], # Used for Details screen
' - N3_LIBRARY_GRID.parsed':[(149,198),0, 99,], # Used for library lists
' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
' - N3_FULL.parsed':[(600,800),0, 99,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355,473),0, 99,False,], # Used for Details screen
' - N3_LIBRARY_GRID.parsed':[(149,198),0, 99,False,], # Used for library lists
' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,False,],
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
}
GLO_COVER_FILE_ENDINGS = {
' - N3_FULL.parsed':[(758,1024),0, 99,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355,479),0, 99,False,], # Used for Details screen
' - N3_LIBRARY_GRID.parsed':[(149,201),0, 99,False,], # Used for library lists
# ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver, home screen
}
#Following are the sizes used with pre2.1.4 firmware
# COVER_FILE_ENDINGS = {
@ -1311,6 +1322,7 @@ class KOBOTOUCH(KOBO):
# ' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver if "Full screen" is checked.
# }
def initialize(self):
super(KOBOTOUCH, self).initialize()
self.bookshelvelist = []
@ -1691,7 +1703,7 @@ class KOBOTOUCH(KOBO):
def imagefilename_from_imageID(self, ImageID):
show_debug = self.is_debugging_title(ImageID)
for ending, cover_options in self.COVER_FILE_ENDINGS.items():
for ending, cover_options in self.cover_file_endings().items():
fpath = self._main_prefix + '.kobo/images/' + ImageID + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
if os.path.exists(fpath):
@ -1733,12 +1745,16 @@ class KOBOTOUCH(KOBO):
self.set_filesize_in_device_database(connection, contentID, fname)
if not self.copying_covers():
imageID = self.imageid_from_contentid(contentID)
self.delete_images(imageID)
connection.commit()
cursor.close()
except Exception as e:
debug_print('KoboTouch:upload_books - Exception: %s'%str(e))
return result
@ -1794,7 +1810,7 @@ class KOBOTOUCH(KOBO):
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
for ending in self.COVER_FILE_ENDINGS.keys():
for ending in self.cover_file_endings().keys():
fpath = path + ending
fpath = self.normalize_path(fpath)
@ -2049,23 +2065,23 @@ class KOBOTOUCH(KOBO):
# debug_print("KoboTouch:upload_cover - path='%s' filename='%s'"%(path, filename))
opts = self.settings()
if not opts.extra_customization[self.OPT_UPLOAD_COVERS]:
if not self.copying_covers():
# Building thumbnails disabled
# debug_print('KoboTouch: not uploading cover')
return
# Don't upload covers if book is on the SD card
if self._card_a_prefix and path.startswith(self._card_a_prefix):
return
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
uploadgrayscale = False
else:
uploadgrayscale = True
if not opts.extra_customization[self.OPT_ALWAYS_UPLOAD_COVERS]:
always_upload_covers = False
else:
always_upload_covers = True
# debug_print('KoboTouch: uploading cover')
try:
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale, always_upload_covers)
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale, self.keep_cover_aspect())
except Exception as e:
debug_print('KoboTouch: FAILED to upload cover=%s Exception=%s'%(filepath, str(e)))
@ -2077,9 +2093,9 @@ class KOBOTOUCH(KOBO):
ImageID = ImageID.replace('.', '_')
return ImageID
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, always_upload_covers=False):
from calibre.utils.magick.draw import save_cover_data_to
debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' always_upload_covers='%s'"%(filename, uploadgrayscale, always_upload_covers))
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, keep_cover_aspect=False):
from calibre.utils.magick.draw import save_cover_data_to, identify_data
debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' "%(filename, uploadgrayscale))
if metadata.cover:
show_debug = self.is_debugging_title(filename)
@ -2122,8 +2138,8 @@ class KOBOTOUCH(KOBO):
if show_debug:
debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
for ending, cover_options in self.COVER_FILE_ENDINGS.items():
resize, min_dbversion, max_dbversion = cover_options
for ending, cover_options in self.cover_file_endings().items():
resize, min_dbversion, max_dbversion, isFullsize = cover_options
if show_debug:
debug_print("KoboTouch:_upload_cover - resize=%s min_dbversion=%d max_dbversion=%d" % (resize, min_dbversion, max_dbversion))
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
@ -2132,11 +2148,20 @@ class KOBOTOUCH(KOBO):
fpath = path + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
if os.path.exists(fpath) or always_upload_covers:
debug_print("KoboTouch:_upload_cover - path exists or always_upload_covers%s"% always_upload_covers)
with open(cover, 'rb') as f:
data = f.read()
if keep_cover_aspect:
if isFullsize:
resize = None
else:
width, height, fmt = identify_data(data)
cover_aspect = width / height
if cover_aspect > 1:
resize = (resize[0], int(resize[0] / cover_aspect ))
elif cover_aspect < 1:
resize = (int(cover_aspect * resize[1]), resize[1] )
# Return the data resized and in Grayscale if
# required
data = save_cover_data_to(data, 'dummy.jpg',
@ -2453,21 +2478,30 @@ class KOBOTOUCH(KOBO):
return opts
def isGlo(self):
return self.detected_device.idProduct in self.GLO_PRODUCT_ID
def isMini(self):
return self.detected_device.idProduct in self.MINI_PRODUCT_ID
def isTouch(self):
return self.detected_device.idProduct in self.TOUCH_PRODUCT_ID
def cover_file_endings(self):
return self.GLO_COVER_FILE_ENDINGS if self.isGlo() else self.COVER_FILE_ENDINGS
def copying_covers(self):
opts = self.settings()
return opts.extra_customization[self.OPT_UPLOAD_COVERS] or opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
def keep_cover_aspect(self):
opts = self.settings()
return opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
def supports_bookshelves(self):
return self.dbversion >= self.min_supported_dbversion
def supports_series(self):
return self.dbversion >= self.min_dbversion_series
# def is_debugging_title(self, title):
## debug_print("KoboTouch:is_debugging - title=", title)
# is_debugging = False
# opts = self.settings()
# if opts.extra_customization:
# debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
# is_debugging = len(debugging_title) > 0 and title.find(debugging_title) >= 0 or len(title) == 0
#
# return is_debugging
@classmethod
def is_debugging_title(cls, title):

View File

@ -13,7 +13,7 @@ from calibre.devices.interface import BookList as _BookList
from calibre.constants import preferred_encoding
from calibre import isbytestring, force_unicode
from calibre.utils.config import device_prefs, tweaks
from calibre.utils.icu import strcmp
from calibre.utils.icu import sort_key
from calibre.utils.formatter import EvalFormatter
class Book(Metadata):
@ -281,8 +281,7 @@ class CollectionsBookList(BookList):
if y is None:
return -1
if isinstance(x, basestring) and isinstance(y, basestring):
c = strcmp(force_unicode(x), force_unicode(y))
else:
x, y = sort_key(force_unicode(x)), sort_key(force_unicode(y))
c = cmp(x, y)
if c != 0:
return c

View File

@ -100,6 +100,9 @@ def option_recommendation_to_cli_option(add_option, rec):
switches = ['--disable-'+opt.long_switch]
add_option(Option(*switches, **attrs))
def group_titles():
return _('INPUT OPTIONS'), _('OUTPUT OPTIONS')
def add_input_output_options(parser, plumber):
input_options, output_options = \
plumber.input_options, plumber.output_options
@ -109,14 +112,14 @@ def add_input_output_options(parser, plumber):
option_recommendation_to_cli_option(group, opt)
if input_options:
title = _('INPUT OPTIONS')
title = group_titles()[0]
io = OptionGroup(parser, title, _('Options to control the processing'
' of the input %s file')%plumber.input_fmt)
add_options(io.add_option, input_options)
parser.add_option_group(io)
if output_options:
title = _('OUTPUT OPTIONS')
title = group_titles()[1]
oo = OptionGroup(parser, title, _('Options to control the processing'
' of the output %s')%plumber.output_fmt)
add_options(oo.add_option, output_options)

View File

@ -941,9 +941,19 @@ class OPF(object): # {{{
return self.get_text(match) or None
def fset(self, val):
removed_ids = set()
for x in tuple(self.application_id_path(self.metadata)):
removed_ids.add(x.get('id', None))
x.getparent().remove(x)
uuid_id = None
for attr in self.root.attrib:
if attr.endswith('unique-identifier'):
uuid_id = self.root.attrib[attr]
break
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: 'calibre'}
if uuid_id and uuid_id in removed_ids:
attrib['id'] = uuid_id
self.set_text(self.create_metadata_element(
'identifier', attrib=attrib), unicode(val))

View File

@ -157,8 +157,9 @@ class TOC(list):
toc = m[0]
self.read_ncx_toc(toc)
def read_ncx_toc(self, toc):
def read_ncx_toc(self, toc, root=None):
self.base_path = os.path.dirname(toc)
if root is None:
raw = xml_to_unicode(open(toc, 'rb').read(), assume_utf8=True,
strip_encoding_pats=True)[0]
root = etree.fromstring(raw, parser=etree.XMLParser(recover=True,

View File

@ -81,6 +81,11 @@ class BookIndexing
if elem == null
pos = [body.scrollWidth+1000, body.scrollHeight+1000]
else
# Because of a bug in WebKit's getBoundingClientRect() in
# column mode, this position can be inaccurate,
# see https://bugs.launchpad.net/calibre/+bug/1132641 for a
# test case. The usual symptom of the inaccuracy is br.top is
# highly negative.
br = elem.getBoundingClientRect()
pos = viewport_to_document(br.left, br.top, elem.ownerDocument)

View File

@ -75,6 +75,13 @@ class PagedDisplay
this.margin_side = margin_side
this.margin_bottom = margin_bottom
handle_rtl_body: (body_style) ->
if body_style.direction == "rtl"
for node in document.body.childNodes
if node.nodeType == node.ELEMENT_NODE and window.getComputedStyle(node).direction == "rtl"
node.style.setProperty("direction", "rtl")
document.body.style.direction = "ltr"
layout: (is_single_page=false) ->
# start_time = new Date().getTime()
body_style = window.getComputedStyle(document.body)
@ -84,6 +91,7 @@ class PagedDisplay
# Check if the current document is a full screen layout like
# cover, if so we treat it specially.
single_screen = (document.body.scrollHeight < window.innerHeight + 75)
this.handle_rtl_body(body_style)
first_layout = true
ww = window.innerWidth
@ -402,7 +410,22 @@ class PagedDisplay
elem.scrollIntoView()
if this.in_paged_mode
# Ensure we are scrolled to the column containing elem
this.scroll_to_xpos(calibre_utils.absleft(elem) + 5)
# Because of a bug in WebKit's getBoundingClientRect() in column
# mode, this position can be inaccurate, see
# https://bugs.launchpad.net/calibre/+bug/1132641 for a test case.
# The usual symptom of the inaccuracy is br.top is highly negative.
br = elem.getBoundingClientRect()
if br.top < -1000
# This only works because of the preceding call to
# elem.scrollIntoView(). However, in some cases it gives
# inaccurate results, so we prefer the bounding client rect,
# when possible.
left = elem.scrollLeft
else
left = br.left
this.scroll_to_xpos(calibre_utils.viewport_to_document(
left+this.margin_side, elem.scrollTop, elem.ownerDocument)[0])
snap_to_selection: () ->
# Ensure that the viewport is positioned at the start of the column

View File

@ -86,7 +86,9 @@ class CalibreUtils
absleft: (elem) -> # {{{
# The left edge of elem in document co-ords. Works in all
# circumstances, including column layout. Note that this will cause
# a relayout if the render tree is dirty.
# a relayout if the render tree is dirty. Also, because of a bug in the
# version of WebKit bundled with Qt 4.8, this does not always work, see
# https://bugs.launchpad.net/bugs/1132641 for a test case.
r = elem.getBoundingClientRect()
return this.viewport_to_document(r.left, 0, elem.ownerDocument)[0]
# }}}

View File

@ -8,6 +8,7 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, logging, sys, hashlib, uuid, re
from collections import defaultdict
from io import BytesIO
from urllib import unquote as urlunquote, quote as urlquote
from urlparse import urlparse
@ -88,7 +89,7 @@ class Container(object):
self.mime_map[name] = guess_type('a.opf')
if not hasattr(self, 'opf_name'):
raise InvalidBook('Book has no OPF file')
raise InvalidBook('Could not locate opf file: %r'%opfpath)
# Update mime map with data from the OPF
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
@ -230,6 +231,14 @@ class Container(object):
return {item.get('id'):self.href_to_name(item.get('href'), self.opf_name)
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @id]')}
@property
def manifest_type_map(self):
ans = defaultdict(list)
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
ans[item.get('media-type').lower()].append(self.href_to_name(
item.get('href'), self.opf_name))
return {mt:tuple(v) for mt, v in ans.iteritems()}
@property
def guide_type_map(self):
return {item.get('type', ''):self.href_to_name(item.get('href'), self.opf_name)

View File

@ -192,7 +192,7 @@ def remove_cover_image_in_page(container, page, cover_images):
href = img.get('src')
name = container.href_to_name(href, page)
if name in cover_images:
img.getparent.remove(img)
img.getparent().remove(img)
break
def set_epub_cover(container, cover_path, report):

View File

@ -174,6 +174,7 @@ def gui_polish(data):
files = data.pop('files')
if not data.pop('metadata'):
data.pop('opf')
if not data.pop('do_cover'):
data.pop('cover')
file_map = {x:x for x in files}
opts = ALL_OPTS.copy()

View File

@ -9,10 +9,11 @@ __docformat__ = 'restructuredtext en'
import os, sys
from calibre import prints
from calibre import prints, as_unicode
from calibre.ebooks.oeb.base import OEB_STYLES, OEB_DOCS, XPath
from calibre.ebooks.oeb.polish.container import OEB_FONTS
from calibre.utils.fonts.sfnt.subset import subset
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from calibre.utils.fonts.utils import get_font_names
def remove_font_face_rules(container, sheet, remove_names, base):
@ -46,9 +47,16 @@ def subset_all_fonts(container, font_stats, report):
raw = f.read()
font_name = get_font_names(raw)[-1]
warnings = []
container.log('Subsetting font: %s'%font_name)
container.log('Subsetting font: %s'%(font_name or name))
try:
nraw, old_sizes, new_sizes = subset(raw, chars,
warnings=warnings)
except UnsupportedFont as e:
container.log.warning(
'Unsupported font: %s, ignoring. Error: %s'%(
name, as_unicode(e)))
continue
for w in warnings:
container.log.warn(w)
olen = sum(old_sizes.itervalues())

View File

@ -0,0 +1,84 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from urlparse import urlparse
from lxml import etree
from calibre.ebooks.oeb.polish.container import guess_type
ns = etree.FunctionNamespace('calibre_xpath_extensions')
ns.prefix = 'calibre'
ns['lower-case'] = lambda c, x: x.lower() if hasattr(x, 'lower') else x
class TOC(object):
def __init__(self, title=None, dest=None, frag=None):
self.title, self.dest, self.frag = title, dest, frag
if self.title: self.title = self.title.strip()
self.parent = None
self.children = []
def add(self, title, dest, frag=None):
c = TOC(title, dest, frag)
self.children.append(c)
c.parent = self
return c
def __iter__(self):
for c in self.children:
yield c
def child_xpath(tag, name):
return tag.xpath('./*[calibre:lower-case(local-name()) = "%s"]'%name)
def add_from_navpoint(container, navpoint, parent, ncx_name):
dest = frag = text = None
nl = child_xpath(navpoint, 'navlabel')
if nl:
nl = nl[0]
text = ''
for txt in child_xpath(nl, 'text'):
text += etree.tostring(txt, method='text',
encoding=unicode, with_tail=False)
content = child_xpath(navpoint, 'content')
if content:
content = content[0]
href = content.get('src', None)
if href:
dest = container.href_to_name(href, base=ncx_name)
frag = urlparse(href).fragment or None
return parent.add(text or None, dest or None, frag or None)
def process_ncx_node(container, node, toc_parent, ncx_name):
for navpoint in node.xpath('./*[calibre:lower-case(local-name()) = "navpoint"]'):
child = add_from_navpoint(container, navpoint, toc_parent, ncx_name)
if child is not None:
process_ncx_node(container, navpoint, child, ncx_name)
def parse_ncx(container, ncx_name):
root = container.parsed(ncx_name)
toc_root = TOC()
navmaps = root.xpath('//*[calibre:lower-case(local-name()) = "navmap"]')
if navmaps:
process_ncx_node(container, navmaps[0], toc_root, ncx_name)
return toc_root
def get_toc(container):
toc = container.opf_xpath('//opf:spine/@toc')
if toc:
toc = container.manifest_id_map.get(toc[0], None)
if not toc:
ncx = guess_type('a.ncx')
toc = container.manifest_type_map.get(ncx, [None])[0]
if not toc:
return None
return parse_ncx(container, toc)

View File

@ -10,6 +10,7 @@ assumes a prior call to the flatcss transform.
'''
import os, math, functools, collections, re, copy
from collections import OrderedDict
from lxml.etree import XPath as _XPath
from lxml import etree
@ -106,8 +107,7 @@ class Split(object):
continue
for elem in selector(body[0]):
if elem not in body:
if before:
elem.set('pb_before', '1')
elem.set('pb_before', '1' if before else '0')
page_breaks.add(elem)
for i, elem in enumerate(item.data.iter()):
@ -134,14 +134,12 @@ class Split(object):
id = 'calibre_pb_%d'%i
x.set('id', id)
xp = XPath('//*[@id=%r]'%id)
page_breaks_.append((xp,
x.get('pb_before', False)))
page_breaks_.append((xp, x.get('pb_before', '0') == '1'))
page_break_ids.append(id)
for elem in item.data.iter():
elem.attrib.pop('pb_order', False)
if elem.get('pb_before', False):
elem.attrib.pop('pb_before')
elem.attrib.pop('pb_before', False)
return page_breaks_, page_break_ids
@ -223,22 +221,27 @@ class FlowSplitter(object):
self.commit()
def split_on_page_breaks(self, orig_tree):
ordered_ids = []
for elem in orig_tree.xpath('//*[@id]'):
id = elem.get('id')
if id in self.page_break_ids:
ordered_ids.append(self.page_breaks[self.page_break_ids.index(id)])
ordered_ids = OrderedDict()
all_page_break_ids = frozenset(self.page_break_ids)
for elem_id in orig_tree.xpath('//*/@id'):
if elem_id in all_page_break_ids:
ordered_ids[elem_id] = self.page_breaks[
self.page_break_ids.index(elem_id)]
self.trees = []
tree = orig_tree
for pattern, before in ordered_ids:
self.trees = [orig_tree]
while ordered_ids:
pb_id, (pattern, before) = ordered_ids.iteritems().next()
del ordered_ids[pb_id]
for i in xrange(len(self.trees)-1, -1, -1):
tree = self.trees[i]
elem = pattern(tree)
if elem:
self.log.debug('\t\tSplitting on page-break at %s'%
self.log.debug('\t\tSplitting on page-break at id=%s'%
elem[0].get('id'))
before, after = self.do_split(tree, elem[0], before)
self.trees.append(before)
tree = after
before_tree, after_tree = self.do_split(tree, elem[0], before)
self.trees[i:i+1] = [before_tree, after_tree]
break
self.trees.append(tree)
trees, ids = [], set([])
for tree in self.trees:
@ -289,7 +292,6 @@ class FlowSplitter(object):
if self.opts.verbose > 3 and npath != path:
self.log.debug('\t\t\tMoved split point %s to %s'%(path, npath))
return npath
def do_split(self, tree, split_point, before):
@ -304,6 +306,10 @@ class FlowSplitter(object):
root = tree.getroot()
root2 = tree2.getroot()
body, body2 = map(self.get_body, (root, root2))
if before:
# We cannot adjust for after since moving an after split point to a
# parent will cause breakage if the parent contains any content
# after the original split point
path = self.adjust_split_point(root, path)
split_point = root.xpath(path)[0]
split_point2 = root2.xpath(path)[0]

View File

@ -13,9 +13,10 @@ from operator import itemgetter
from collections import Counter, OrderedDict
from future_builtins import map
from calibre import as_unicode
from calibre.ebooks.pdf.render.common import (Array, String, Stream,
Dictionary, Name)
from calibre.utils.fonts.sfnt.subset import pdf_subset
from calibre.utils.fonts.sfnt.subset import pdf_subset, UnsupportedFont
STANDARD_FONTS = {
'Times-Roman', 'Helvetica', 'Courier', 'Symbol', 'Times-Bold',
@ -150,12 +151,16 @@ class Font(object):
self.used_glyphs = set()
def embed(self, objects):
def embed(self, objects, debug):
self.font_descriptor['FontFile'+('3' if self.is_otf else '2')
] = objects.add(self.font_stream)
self.write_widths(objects)
self.write_to_unicode(objects)
try:
pdf_subset(self.metrics.sfnt, self.used_glyphs)
except UnsupportedFont as e:
debug('Subsetting of %s not supported, embedding full font. Error: %s'%(
self.metrics.names.get('full_name', 'Unknown'), as_unicode(e)))
if self.is_otf:
self.font_stream.write(self.metrics.sfnt['CFF '].raw)
else:
@ -221,7 +226,7 @@ class FontManager(object):
}))
return self.std_map[name]
def embed_fonts(self):
def embed_fonts(self, debug):
for font in self.fonts:
font.embed(self.objects)
font.embed(self.objects, debug)

View File

@ -488,7 +488,7 @@ class PDFStream(object):
def end(self):
if self.current_page.getvalue():
self.end_page()
self.font_manager.embed_fonts()
self.font_manager.embed_fonts(self.debug)
inforef = self.objects.add(self.info)
self.links.add_links()
self.objects.pdf_serialize(self.stream)

View File

@ -101,6 +101,11 @@ class InterfaceAction(QObject):
#: on calibre as a whole
action_type = 'global'
#: If True, then this InterfaceAction will have the opportunity to interact
#: with drag and drop events. See the methods, :meth:`accept_enter_event`,
#: :meth`:accept_drag_move_event`, :meth:`drop_event` for details.
accepts_drops = False
def __init__(self, parent, site_customization):
QObject.__init__(self, parent)
self.setObjectName(self.name)
@ -108,6 +113,27 @@ class InterfaceAction(QObject):
self.site_customization = site_customization
self.interface_action_base_plugin = None
def accept_enter_event(self, event, mime_data):
''' This method should return True iff this interface action is capable
of handling the drag event. Do not call accept/ignore on the event,
that will be taken care of by the calibre UI.'''
return False
def accept_drag_move_event(self, event, mime_data):
''' This method should return True iff this interface action is capable
of handling the drag event. Do not call accept/ignore on the event,
that will be taken care of by the calibre UI.'''
return False
def drop_event(self, event, mime_data):
''' This method should perform some useful action and return True
iff this interface action is capable of handling the drop event. Do not
call accept/ignore on the event, that will be taken care of by the
calibre UI. You should not perform blocking/long operations in this
function. Instead emit a signal or use QTimer.singleShot and return
quickly. See the builtin actions for examples.'''
return False
def do_genesis(self):
self.Dispatcher = partial(Dispatcher, parent=self)
self.create_action()

View File

@ -18,7 +18,8 @@ from calibre import sanitize_file_name_unicode
class GenerateCatalogAction(InterfaceAction):
name = 'Generate Catalog'
action_spec = (_('Create catalog'), 'catalog.png', 'Catalog builder', ())
action_spec = (_('Create catalog'), 'catalog.png',
_('Create a catalog of the books in your calibre library in different formats'), ())
dont_add_to = frozenset(['context-menu-device'])
def genesis(self):

View File

@ -15,7 +15,8 @@ from PyQt4.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog,
from calibre import isbytestring, sanitize_file_name_unicode
from calibre.constants import (filesystem_encoding, iswindows,
get_portable_base)
from calibre.utils.config import prefs
from calibre.utils.config import prefs, tweaks
from calibre.utils.icu import sort_key
from calibre.gui2 import (gprefs, warning_dialog, Dispatcher, error_dialog,
question_dialog, info_dialog, open_local_file, choose_dir)
from calibre.library.database2 import LibraryDatabase2
@ -46,7 +47,7 @@ class LibraryUsageStats(object): # {{{
locs = list(self.stats.keys())
locs.sort(cmp=lambda x, y: cmp(self.stats[x], self.stats[y]),
reverse=True)
for key in locs[25:]:
for key in locs[500:]:
self.stats.pop(key)
gprefs.set('library_usage_stats', self.stats)
@ -72,8 +73,9 @@ class LibraryUsageStats(object): # {{{
locs = list(self.stats.keys())
if lpath in locs:
locs.remove(lpath)
locs.sort(cmp=lambda x, y: cmp(self.stats[x], self.stats[y]),
reverse=True)
limit = tweaks['many_libraries']
key = sort_key if len(locs) > limit else lambda x:self.stats[x]
locs.sort(key=key, reverse=len(locs)<=limit)
for loc in locs:
yield self.pretty(loc), loc

View File

@ -8,7 +8,7 @@ __docformat__ = 'restructuredtext en'
import os
from functools import partial
from PyQt4.Qt import QModelIndex
from PyQt4.Qt import QModelIndex, QTimer
from calibre.gui2 import error_dialog, Dispatcher
from calibre.gui2.tools import convert_single_ebook, convert_bulk_ebook
@ -19,11 +19,36 @@ from calibre.customize.ui import plugin_for_input_format
class ConvertAction(InterfaceAction):
name = 'Convert Books'
action_spec = (_('Convert books'), 'convert.png', None, _('C'))
action_spec = (_('Convert books'), 'convert.png', _('Convert books between different ebook formats'), _('C'))
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
action_add_menu = True
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_ids = self.dropped_ids
del self.dropped_ids
self.do_convert(book_ids)
def genesis(self):
m = self.convert_menu = self.qaction.menu()
cm = partial(self.create_menu_action, self.convert_menu)
@ -112,6 +137,9 @@ class ConvertAction(InterfaceAction):
def convert_ebook(self, checked, bulk=None):
book_ids = self.get_books_for_conversion()
if book_ids is None: return
self.do_convert(book_ids, bulk=bulk)
def do_convert(self, book_ids, bulk=None):
previous = self.gui.library_view.currentIndex()
rows = [x.row() for x in \
self.gui.library_view.selectionModel().selectedRows()]

View File

@ -83,11 +83,37 @@ class MultiDeleter(QObject): # {{{
class DeleteAction(InterfaceAction):
name = 'Remove Books'
action_spec = (_('Remove books'), 'trash.png', None, 'Del')
action_spec = (_('Remove books'), 'trash.png', _('Delete books'), 'Del')
action_type = 'current'
action_add_menu = True
action_menu_clone_qaction = _('Remove selected books')
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_ids = self.dropped_ids
del self.dropped_ids
if book_ids:
self.do_library_delete(book_ids)
def genesis(self):
self.qaction.triggered.connect(self.delete_books)
self.delete_menu = self.qaction.menu()
@ -296,17 +322,8 @@ class DeleteAction(InterfaceAction):
current_row = rmap.get(next_id, None)
self.library_ids_deleted(ids_deleted, current_row=current_row)
def delete_books(self, *args):
'''
Delete selected books from device or library.
'''
def do_library_delete(self, to_delete_ids):
view = self.gui.current_view()
rows = view.selectionModel().selectedRows()
if not rows or len(rows) == 0:
return
# Library view is visible.
if self.gui.stack.currentIndex() == 0:
to_delete_ids = [view.model().id(r) for r in rows]
# Ask the user if they want to delete the book from the library or device if it is in both.
if self.gui.device_manager.is_device_connected:
on_device = False
@ -336,12 +353,25 @@ class DeleteAction(InterfaceAction):
+'</p>', 'library_delete_books', self.gui):
return
next_id = view.next_id
if len(rows) < 5:
if len(to_delete_ids) < 5:
view.model().delete_books_by_id(to_delete_ids)
self.library_ids_deleted2(to_delete_ids, next_id=next_id)
else:
self.__md = MultiDeleter(self.gui, to_delete_ids,
partial(self.library_ids_deleted2, next_id=next_id))
def delete_books(self, *args):
'''
Delete selected books from device or library.
'''
view = self.gui.current_view()
rows = view.selectionModel().selectedRows()
if not rows or len(rows) == 0:
return
# Library view is visible.
if self.gui.stack.currentIndex() == 0:
to_delete_ids = [view.model().id(r) for r in rows]
self.do_library_delete(to_delete_ids)
# Device view is visible.
else:
if self.gui.stack.currentIndex() == 1:

View File

@ -177,7 +177,8 @@ class SendToDeviceAction(InterfaceAction):
class ConnectShareAction(InterfaceAction):
name = 'Connect Share'
action_spec = (_('Connect/share'), 'connect_share.png', None, None)
action_spec = (_('Connect/share'), 'connect_share.png',
_('Share books using a web server or email. Connect to special devices, etc.'), None)
popup_type = QToolButton.InstantPopup
def genesis(self):

View File

@ -23,10 +23,38 @@ from calibre.db.errors import NoSuchFormat
class EditMetadataAction(InterfaceAction):
name = 'Edit Metadata'
action_spec = (_('Edit metadata'), 'edit_input.png', None, _('E'))
action_spec = (_('Edit metadata'), 'edit_input.png', _('Change the title/author/cover etc. of books'), _('E'))
action_type = 'current'
action_add_menu = True
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_ids = self.dropped_ids
del self.dropped_ids
if book_ids:
db = self.gui.library_view.model().db
rows = [db.row(i) for i in book_ids]
self.edit_metadata_for(rows, book_ids)
def genesis(self):
md = self.qaction.menu()
cm = partial(self.create_menu_action, md)
@ -186,18 +214,23 @@ class EditMetadataAction(InterfaceAction):
Edit metadata of selected books in library.
'''
rows = self.gui.library_view.selectionModel().selectedRows()
previous = self.gui.library_view.currentIndex()
if not rows or len(rows) == 0:
d = error_dialog(self.gui, _('Cannot edit metadata'),
_('No books selected'))
d.exec_()
return
if bulk or (bulk is None and len(rows) > 1):
return self.edit_bulk_metadata(checked)
row_list = [r.row() for r in rows]
m = self.gui.library_view.model()
ids = [m.id(r) for r in rows]
self.edit_metadata_for(row_list, ids, bulk=bulk)
def edit_metadata_for(self, rows, book_ids, bulk=None):
previous = self.gui.library_view.currentIndex()
if bulk or (bulk is None and len(rows) > 1):
return self.do_edit_bulk_metadata(rows, book_ids)
current_row = 0
row_list = rows
if len(row_list) == 1:
cr = row_list[0]
@ -242,7 +275,6 @@ class EditMetadataAction(InterfaceAction):
db = self.gui.library_view.model().db
view.view_format(db.row(id_), fmt)
def edit_bulk_metadata(self, checked):
'''
Edit metadata of selected books in library in bulk.
@ -256,6 +288,9 @@ class EditMetadataAction(InterfaceAction):
_('No books selected'))
d.exec_()
return
self.do_edit_bulk_metadata(rows, ids)
def do_edit_bulk_metadata(self, rows, book_ids):
# Prevent the TagView from updating due to signals from the database
self.gui.tags_view.blockSignals(True)
changed = False
@ -278,7 +313,7 @@ class EditMetadataAction(InterfaceAction):
self.gui.tags_view.recount()
if self.gui.cover_flow:
self.gui.cover_flow.dataChanged()
self.gui.library_view.select_rows(ids)
self.gui.library_view.select_rows(book_ids)
# Merge books {{{
def merge_books(self, safe_merge=False, merge_only_formats=False):

View File

@ -16,7 +16,7 @@ from calibre.gui2.actions import InterfaceAction
class FetchNewsAction(InterfaceAction):
name = 'Fetch News'
action_spec = (_('Fetch news'), 'news.png', None, _('F'))
action_spec = (_('Fetch news'), 'news.png', _('Download news in ebook form from various websites all over the world'), _('F'))
def location_selected(self, loc):
enabled = loc == 'library'

View File

@ -11,8 +11,8 @@ from calibre.gui2.actions import InterfaceAction
class OpenFolderAction(InterfaceAction):
name = 'Open Folder'
action_spec = (_('Open containing folder'), 'document_open.png', None,
_('O'))
action_spec = (_('Open containing folder'), 'document_open.png',
_('Open the folder containing the current book\'s files'), _('O'))
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'

View File

@ -15,7 +15,7 @@ from calibre.gui2.dialogs.plugin_updater import (PluginUpdaterDialog,
class PluginUpdaterAction(InterfaceAction):
name = 'Plugin Updater'
action_spec = (_('Plugin Updater'), None, None, ())
action_spec = (_('Plugin Updater'), None, _('Update any plugins you have installed in calibre'), ())
action_type = 'current'
def genesis(self):

View File

@ -10,6 +10,7 @@ __docformat__ = 'restructuredtext en'
import os, weakref, shutil, textwrap
from collections import OrderedDict
from functools import partial
from future_builtins import map
from PyQt4.Qt import (QDialog, QGridLayout, QIcon, QCheckBox, QLabel, QFrame,
QApplication, QDialogButtonBox, Qt, QSize, QSpacerItem,
@ -44,13 +45,18 @@ class Polish(QDialog): # {{{
_('<h3>Smarten punctuation</h3>%s')%HELP['smarten_punctuation'],
'metadata':_('<h3>Updating metadata</h3>'
'<p>This will update all metadata and covers in the'
'<p>This will update all metadata <i>except</i> the cover in the'
' ebook files to match the current metadata in the'
' calibre library.</p><p>If the ebook file does not have'
' an identifiable cover, a new cover is inserted.</p>'
' calibre library.</p>'
' <p>Note that most ebook'
' formats are not capable of supporting all the'
' metadata in calibre.</p>'),
' metadata in calibre.</p><p>There is a separate option to'
' update the cover.</p>'),
'do_cover': _('<p>Update the covers in the ebook files to match the'
' current cover in the calibre library.</p>'
'<p>If the ebook file does not have'
' an identifiable cover, a new cover is inserted.</p>'
),
'jacket':_('<h3>Book Jacket</h3>%s')%HELP['jacket'],
'remove_jacket':_('<h3>Remove Book Jacket</h3>%s')%HELP['remove_jacket'],
}
@ -63,11 +69,12 @@ class Polish(QDialog): # {{{
count = 0
self.all_actions = OrderedDict([
('subset', _('Subset all embedded fonts')),
('smarten_punctuation', _('Smarten punctuation')),
('metadata', _('Update metadata in book files')),
('jacket', _('Add metadata as a "book jacket" page')),
('remove_jacket', _('Remove a previously inserted book jacket')),
('subset', _('&Subset all embedded fonts')),
('smarten_punctuation', _('Smarten &punctuation')),
('metadata', _('Update &metadata in the book files')),
('do_cover', _('Update the &cover in the book files')),
('jacket', _('Add metadata as a "book &jacket" page')),
('remove_jacket', _('&Remove a previously inserted book jacket')),
])
prefs = gprefs.get('polishing_settings', {})
for name, text in self.all_actions.iteritems():
@ -243,8 +250,10 @@ class Polish(QDialog): # {{{
cover = os.path.join(base, 'cover.jpg')
if db.copy_cover_to(book_id, cover, index_is_id=True):
data['cover'] = cover
is_orig = {}
for fmt in formats:
ext = fmt.replace('ORIGINAL_', '').lower()
is_orig[ext.upper()] = 'ORIGINAL_' in fmt
with open(os.path.join(base, '%s.%s'%(book_id, ext)), 'wb') as f:
db.copy_format_to(book_id, fmt, f, index_is_id=True)
data['files'].append(f.name)
@ -257,7 +266,7 @@ class Polish(QDialog): # {{{
self.pd.set_msg(_('Queueing book %(nums)s of %(tot)s (%(title)s)')%dict(
nums=num, tot=len(self.book_id_map), title=mi.title))
self.jobs.append((desc, data, book_id, base))
self.jobs.append((desc, data, book_id, base, is_orig))
# }}}
class Report(QDialog): # {{{
@ -356,9 +365,35 @@ class Report(QDialog): # {{{
class PolishAction(InterfaceAction):
name = 'Polish Books'
action_spec = (_('Polish books'), 'polish.png', None, _('P'))
action_spec = (_('Polish books'), 'polish.png',
_('Apply the shine of perfection to your books'), _('P'))
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_id_map = self.get_supported_books(self.dropped_ids)
del self.dropped_ids
if book_id_map:
self.do_polish(book_id_map)
def genesis(self):
self.qaction.triggered.connect(self.polish_books)
@ -369,7 +404,6 @@ class PolishAction(InterfaceAction):
self.qaction.setEnabled(enabled)
def get_books_for_polishing(self):
from calibre.ebooks.oeb.polish.main import SUPPORTED
rows = [r.row() for r in
self.gui.library_view.selectionModel().selectedRows()]
if not rows or len(rows) == 0:
@ -379,11 +413,16 @@ class PolishAction(InterfaceAction):
return None
db = self.gui.library_view.model().db
ans = (db.id(r) for r in rows)
return self.get_supported_books(ans)
def get_supported_books(self, book_ids):
from calibre.ebooks.oeb.polish.main import SUPPORTED
db = self.gui.library_view.model().db
supported = set(SUPPORTED)
for x in SUPPORTED:
supported.add('ORIGINAL_'+x)
ans = [(x, set( (db.formats(x, index_is_id=True) or '').split(',') )
.intersection(supported)) for x in ans]
.intersection(supported)) for x in book_ids]
ans = [x for x in ans if x[1]]
if not ans:
error_dialog(self.gui, _('Cannot polish'),
@ -401,14 +440,17 @@ class PolishAction(InterfaceAction):
book_id_map = self.get_books_for_polishing()
if not book_id_map:
return
self.do_polish(book_id_map)
def do_polish(self, book_id_map):
d = Polish(self.gui.library_view.model().db, book_id_map, parent=self.gui)
if d.exec_() == d.Accepted and d.jobs:
show_reports = bool(d.show_reports.isChecked())
for desc, data, book_id, base in reversed(d.jobs):
for desc, data, book_id, base, is_orig in reversed(d.jobs):
job = self.gui.job_manager.run_job(
Dispatcher(self.book_polished), 'gui_polish', args=(data,),
description=desc)
job.polish_args = (book_id, base, data['files'], show_reports)
job.polish_args = (book_id, base, data['files'], show_reports, is_orig)
if d.jobs:
self.gui.jobs_pointer.start()
self.gui.status_bar.show_message(
@ -419,11 +461,11 @@ class PolishAction(InterfaceAction):
self.gui.job_exception(job)
return
db = self.gui.current_db
book_id, base, files, show_reports = job.polish_args
book_id, base, files, show_reports, is_orig = job.polish_args
fmts = set()
for path in files:
fmt = path.rpartition('.')[-1].upper()
if tweaks['save_original_format_when_polishing']:
if tweaks['save_original_format_when_polishing'] and not is_orig[fmt]:
fmts.add(fmt)
db.save_original_format(book_id, fmt, notify=False)
with open(path, 'rb') as f:

View File

@ -17,7 +17,7 @@ from calibre.constants import DEBUG, isosx
class PreferencesAction(InterfaceAction):
name = 'Preferences'
action_spec = (_('Preferences'), 'config.png', None, _('Ctrl+P'))
action_spec = (_('Preferences'), 'config.png', _('Configure calibre'), _('Ctrl+P'))
action_add_menu = True
action_menu_clone_qaction = _('Change calibre behavior')

View File

@ -11,7 +11,7 @@ from calibre.gui2.actions import InterfaceAction
class RestartAction(InterfaceAction):
name = 'Restart'
action_spec = (_('Restart'), None, None, _('Ctrl+R'))
action_spec = (_('Restart'), None, _('Restart calibre'), _('Ctrl+R'))
def genesis(self):
self.qaction.triggered.connect(self.restart)

View File

@ -17,7 +17,8 @@ from calibre.gui2.actions import InterfaceAction
class SaveToDiskAction(InterfaceAction):
name = "Save To Disk"
action_spec = (_('Save to disk'), 'save.png', None, _('S'))
action_spec = (_('Save to disk'), 'save.png',
_('Export ebook files from the calibre library'), _('S'))
action_type = 'current'
action_add_menu = True
action_menu_clone_qaction = True

View File

@ -13,8 +13,8 @@ from calibre.gui2 import error_dialog
class ShowBookDetailsAction(InterfaceAction):
name = 'Show Book Details'
action_spec = (_('Show book details'), 'dialog_information.png', None,
_('I'))
action_spec = (_('Show book details'), 'dialog_information.png',
_('Show the detailed metadata for the current book in a separate window'), _('I'))
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'

View File

@ -14,7 +14,7 @@ from calibre.gui2.actions import InterfaceAction
class SimilarBooksAction(InterfaceAction):
name = 'Similar Books'
action_spec = (_('Similar books...'), None, None, None)
action_spec = (_('Similar books...'), None, _('Show books similar to the current book'), None)
popup_type = QToolButton.InstantPopup
action_type = 'current'
action_add_menu = True

View File

@ -17,7 +17,7 @@ from calibre.gui2.dialogs.confirm_delete import confirm
class StoreAction(InterfaceAction):
name = 'Store'
action_spec = (_('Get books'), 'store.png', None, _('G'))
action_spec = (_('Get books'), 'store.png', _('Search dozens of online ebook retailers for the cheapest books'), _('G'))
action_add_menu = True
action_menu_clone_qaction = _('Search for ebooks')

View File

@ -64,7 +64,7 @@ class TweakBook(QDialog):
self.fmt_choice_box = QGroupBox(_('Choose the format to tweak:'), self)
self._fl = fl = QHBoxLayout()
self.fmt_choice_box.setLayout(self._fl)
self.fmt_choice_buttons = [QRadioButton(x, self) for x in fmts]
self.fmt_choice_buttons = [QRadioButton(y, self) for y in fmts]
for x in self.fmt_choice_buttons:
fl.addWidget(x, stretch=10 if x is self.fmt_choice_buttons[-1] else
0)
@ -291,6 +291,32 @@ class TweakEpubAction(InterfaceAction):
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_ids = self.dropped_ids
del self.dropped_ids
if book_ids:
self.do_tweak(book_ids[0])
def genesis(self):
self.qaction.triggered.connect(self.tweak_book)
@ -301,6 +327,9 @@ class TweakEpubAction(InterfaceAction):
_('No book selected'), show=True)
book_id = self.gui.library_view.model().id(row)
self.do_tweak(book_id)
def do_tweak(self, book_id):
db = self.gui.library_view.model().db
fmts = db.formats(book_id, index_is_id=True) or ''
fmts = [x.lower().strip() for x in fmts.split(',')]

View File

@ -34,7 +34,7 @@ class HistoryAction(QAction):
class ViewAction(InterfaceAction):
name = 'View'
action_spec = (_('View'), 'view.png', None, _('V'))
action_spec = (_('View'), 'view.png', _('Read books'), _('V'))
action_type = 'current'
action_add_menu = True
action_menu_clone_qaction = True

View File

@ -8,8 +8,8 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import (QObject, QToolBar, Qt, QSize, QToolButton, QVBoxLayout,
QLabel, QWidget, QAction, QMenuBar, QMenu)
from PyQt4.Qt import (Qt, QAction, QLabel, QMenu, QMenuBar, QObject,
QToolBar, QToolButton, QSize, QVBoxLayout, QWidget)
from calibre.constants import isosx
from calibre.gui2 import gprefs
@ -116,20 +116,38 @@ class ToolBar(QToolBar): # {{{
ch.setPopupMode(menu_mode)
return ch
#support drag&drop from/to library from/to reader/card
# support drag&drop from/to library, from/to reader/card, enabled plugins
def check_iactions_for_drag(self, event, md, func):
if self.added_actions:
pos = event.pos()
for iac in self.gui.iactions.itervalues():
if iac.accepts_drops:
aa = iac.qaction
w = self.widgetForAction(aa)
m = aa.menu()
if (( (w is not None and w.geometry().contains(pos)) or
(m is not None and m.isVisible() and m.geometry().contains(pos)) ) and
getattr(iac, func)(event, md)):
return True
return False
def dragEnterEvent(self, event):
md = event.mimeData()
if md.hasFormat("application/calibre+from_library") or \
md.hasFormat("application/calibre+from_device"):
event.setDropAction(Qt.CopyAction)
event.accept()
return
if self.check_iactions_for_drag(event, md, 'accept_enter_event'):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
allowed = False
md = event.mimeData()
#Drop is only allowed in the location manager widget's different from the selected one
# Drop is only allowed in the location manager widget's different from the selected one
for ac in self.location_manager.available_actions:
w = self.widgetForAction(ac)
if w is not None:
@ -141,12 +159,15 @@ class ToolBar(QToolBar): # {{{
break
if allowed:
event.acceptProposedAction()
return
if self.check_iactions_for_drag(event, md, 'accept_drag_move_event'):
event.acceptProposedAction()
else:
event.ignore()
def dropEvent(self, event):
data = event.mimeData()
mime = 'application/calibre+from_library'
if data.hasFormat(mime):
ids = list(map(int, str(data.data(mime)).split()))
@ -160,6 +181,7 @@ class ToolBar(QToolBar): # {{{
tgt = None
self.gui.sync_to_device(tgt, False, send_ids=ids)
event.accept()
return
mime = 'application/calibre+from_device'
if data.hasFormat(mime):
@ -168,6 +190,13 @@ class ToolBar(QToolBar): # {{{
self.gui.iactions['Add Books'].add_books_from_device(
self.gui.current_view(), paths=paths)
event.accept()
return
# Give added_actions an opportunity to process the drag&drop event
if self.check_iactions_for_drag(event, data, 'drop_event'):
event.accept()
else:
event.ignore()
# }}}

View File

@ -327,6 +327,13 @@ class EditorWidget(QWebView): # {{{
else:
return QWebView.keyReleaseEvent(self, ev)
def contextMenuEvent(self, ev):
menu = self.page().createStandardContextMenu()
paste = self.pageAction(QWebPage.Paste)
for action in menu.actions():
if action == paste:
menu.insertAction(action, self.pageAction(QWebPage.PasteAndMatchStyle))
menu.exec_(ev.globalPos())
# }}}

View File

@ -28,9 +28,10 @@ class BaseModel(QAbstractListModel):
def name_to_action(self, name, gui):
if name == 'Donate':
return FakeAction('Donate', _('Donate'), 'donate.png',
dont_add_to=frozenset(['context-menu',
'context-menu-device']))
return FakeAction(
'Donate', _('Donate'), 'donate.png', tooltip=
_('Donate to support the development of calibre'),
dont_add_to=frozenset(['context-menu', 'context-menu-device']))
if name == 'Location Manager':
return FakeAction('Location Manager', _('Location Manager'), 'reader.png',
_('Switch between library and device views'),
@ -247,6 +248,18 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
self.remove_action_button.clicked.connect(self.remove_action)
self.action_up_button.clicked.connect(partial(self.move, -1))
self.action_down_button.clicked.connect(partial(self.move, 1))
self.all_actions.setMouseTracking(True)
self.current_actions.setMouseTracking(True)
self.all_actions.entered.connect(self.all_entered)
self.current_actions.entered.connect(self.current_entered)
def all_entered(self, index):
tt = self.all_actions.model().data(index, Qt.ToolTipRole).toString()
self.help_text.setText(tt)
def current_entered(self, index):
tt = self.current_actions.model().data(index, Qt.ToolTipRole).toString()
self.help_text.setText(tt)
def what_changed(self, idx):
key = unicode(self.what.itemData(idx).toString())
@ -264,7 +277,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
names = self.all_actions.model().names(x)
if names:
not_added = self.current_actions.model().add(names)
ns = set([x.name for x in not_added])
ns = set([y.name for y in not_added])
added = set(names) - ns
self.all_actions.model().remove(x, added)
if not_added:
@ -283,7 +296,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
names = self.current_actions.model().names(x)
if names:
not_removed = self.current_actions.model().remove(x)
ns = set([x.name for x in not_removed])
ns = set([y.name for y in not_removed])
removed = set(names) - ns
self.all_actions.model().add(removed)
if not_removed:

View File

@ -234,6 +234,13 @@
</layout>
</widget>
</item>
<item>
<widget class="QLabel" name="help_text">
<property name="text">
<string/>
</property>
</widget>
</item>
<item>
<widget class="QWidget" name="spacer_widget" native="true">
<layout class="QVBoxLayout" name="verticalLayout_5">

View File

@ -7,6 +7,7 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
from contextlib import closing
from lxml import html
@ -49,7 +50,7 @@ class AmazonEUBase(StorePlugin):
asin_xpath = '@name'
cover_xpath = './/img[@class="productImage"]/@src'
title_xpath = './/h3[@class="newaps"]/a//text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
for data in doc.xpath(data_xpath):
@ -57,7 +58,7 @@ class AmazonEUBase(StorePlugin):
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# put in results for non Kindle books (authors pages). Se we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format_ = ''.join(data.xpath(format_xpath))
@ -75,12 +76,13 @@ class AmazonEUBase(StorePlugin):
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath(title_xpath))
author = ''.join(data.xpath(author_xpath))
try:
if self.author_article:
author = author.split(self.author_article, 1)[1].split(" (")[0]
except:
pass
authors = ''.join(data.xpath(author_xpath))
authors = re.sub('^' + self.author_article, '', authors)
authors = re.sub(self.and_word, ' & ', authors)
mo = re.match(r'(.*)(\(\d.*)$', authors)
if mo:
authors = mo.group(1).strip()
price = ''.join(data.xpath(price_xpath))
@ -89,7 +91,7 @@ class AmazonEUBase(StorePlugin):
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.author = authors.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.drm = SearchResult.DRM_UNKNOWN
@ -115,3 +117,5 @@ class AmazonDEKindleStore(AmazonEUBase):
search_url = 'http://www.amazon.de/s/?url=search-alias%3Ddigital-text&field-keywords='
author_article = 'von '
and_word = ' und '

View File

@ -7,6 +7,7 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
from contextlib import closing
from lxml import html
@ -48,7 +49,7 @@ class AmazonEUBase(StorePlugin):
asin_xpath = '@name'
cover_xpath = './/img[@class="productImage"]/@src'
title_xpath = './/h3[@class="newaps"]/a//text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
for data in doc.xpath(data_xpath):
@ -56,7 +57,7 @@ class AmazonEUBase(StorePlugin):
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# put in results for non Kindle books (authors pages). Se we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format_ = ''.join(data.xpath(format_xpath))
@ -74,12 +75,13 @@ class AmazonEUBase(StorePlugin):
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath(title_xpath))
author = ''.join(data.xpath(author_xpath))
try:
if self.author_article:
author = author.split(self.author_article, 1)[1].split(" (")[0]
except:
pass
authors = ''.join(data.xpath(author_xpath))
authors = re.sub('^' + self.author_article, '', authors)
authors = re.sub(self.and_word, ' & ', authors)
mo = re.match(r'(.*)(\(\d.*)$', authors)
if mo:
authors = mo.group(1).strip()
price = ''.join(data.xpath(price_xpath))
@ -88,7 +90,7 @@ class AmazonEUBase(StorePlugin):
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.author = authors.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.drm = SearchResult.DRM_UNKNOWN
@ -113,3 +115,5 @@ class AmazonESKindleStore(AmazonEUBase):
search_url = 'http://www.amazon.es/s/?url=search-alias%3Ddigital-text&field-keywords='
author_article = 'de '
and_word = ' y '

View File

@ -7,7 +7,7 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
from contextlib import closing
from lxml import html
@ -50,7 +50,7 @@ class AmazonEUBase(StorePlugin):
asin_xpath = '@name'
cover_xpath = './/img[@class="productImage"]/@src'
title_xpath = './/h3[@class="newaps"]/a//text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
for data in doc.xpath(data_xpath):
@ -58,7 +58,7 @@ class AmazonEUBase(StorePlugin):
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# put in results for non Kindle books (authors pages). Se we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format_ = ''.join(data.xpath(format_xpath))
@ -76,12 +76,13 @@ class AmazonEUBase(StorePlugin):
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath(title_xpath))
author = ''.join(data.xpath(author_xpath))
try:
if self.author_article:
author = author.split(self.author_article, 1)[1].split(" (")[0]
except:
pass
authors = ''.join(data.xpath(author_xpath))
authors = re.sub('^' + self.author_article, '', authors)
authors = re.sub(self.and_word, ' & ', authors)
mo = re.match(r'(.*)(\(\d.*)$', authors)
if mo:
authors = mo.group(1).strip()
price = ''.join(data.xpath(price_xpath))
@ -90,7 +91,7 @@ class AmazonEUBase(StorePlugin):
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.author = authors.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.drm = SearchResult.DRM_UNKNOWN
@ -112,3 +113,5 @@ class AmazonFRKindleStore(AmazonEUBase):
search_url = 'http://www.amazon.fr/s/?url=search-alias%3Ddigital-text&field-keywords='
author_article = 'de '
and_word = ' et '

View File

@ -7,6 +7,7 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
from contextlib import closing
from lxml import html
@ -48,7 +49,7 @@ class AmazonEUBase(StorePlugin):
asin_xpath = '@name'
cover_xpath = './/img[@class="productImage"]/@src'
title_xpath = './/h3[@class="newaps"]/a//text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
for data in doc.xpath(data_xpath):
@ -56,7 +57,7 @@ class AmazonEUBase(StorePlugin):
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# put in results for non Kindle books (authors pages). Se we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format_ = ''.join(data.xpath(format_xpath))
@ -74,12 +75,13 @@ class AmazonEUBase(StorePlugin):
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath(title_xpath))
author = ''.join(data.xpath(author_xpath))
try:
if self.author_article:
author = author.split(self.author_article, 1)[1].split(" (")[0]
except:
pass
authors = ''.join(data.xpath(author_xpath))
authors = re.sub('^' + self.author_article, '', authors)
authors = re.sub(self.and_word, ' & ', authors)
mo = re.match(r'(.*)(\(\d.*)$', authors)
if mo:
authors = mo.group(1).strip()
price = ''.join(data.xpath(price_xpath))
@ -88,7 +90,7 @@ class AmazonEUBase(StorePlugin):
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.author = authors.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.drm = SearchResult.DRM_UNKNOWN
@ -99,7 +101,6 @@ class AmazonEUBase(StorePlugin):
def get_details(self, search_result, timeout):
pass
class AmazonITKindleStore(AmazonEUBase):
'''
For comments on the implementation, please see amazon_plugin.py
@ -114,3 +115,5 @@ class AmazonITKindleStore(AmazonEUBase):
search_url = 'http://www.amazon.it/s/?url=search-alias%3Ddigital-text&field-keywords='
author_article = 'di '
and_word = ' e '

Some files were not shown because too many files have changed in this diff Show More