Merge from trunk

This commit is contained in:
Charles Haley 2012-07-01 09:23:12 +02:00
commit 8099ab05db
118 changed files with 39225 additions and 35437 deletions

View File

@ -19,6 +19,56 @@
# new recipes:
# - title:
- version: 0.8.58
date: 2012-06-29
new features:
- title: "Add some texture to calibre generated covers"
- title: "Drivers for Sogo SS-4370, HTC G2 and Lenovo ThinkPad Tablet"
tickets: [1019050, 1017010]
- title: "Add search to the Manage tags/series/etc. dialogs"
- title: "News download: Add support for images embedded in the HTML"
- title: "calibre -s now waits for calibre to shutdown"
bug fixes:
- title: "Workaround for iTunes breaking scripting with version 10.6.3 on OS X."
tickets: [1012243]
- title: "EPUB Input: When there are multiple elements of the same type in the OPF guide, use the first rather than the last element."
- title: "Windows: Disable the new UI style if the color depth of the desktop is less than 32 bits per pixel"
- title: "ISBNDB metadata plugin: Return results even though they have no comments"
- title: "More robust handling of EINTR during IPC"
- title: "Metadata download: Support for amazon's new results page markup"
- title: "EPUB Output: Fix a bug that could cause corrupted output when doing an EPUB/OEB to EPUB conversion if the input EPUB had multiple files with the same name"
- title: "KF8 Output: Fix a couple of bugs that could lead to generation of invalid KF8 files."
tickets: [1016672]
improved recipes:
- ABC Digital
- O Globo
new recipes:
- title: Sign of the Times and New Statesman
author: TerminalVeracity
- title: CT24
author: zoidozoido
- title: SmileZilla
author: Will
- title: Marketing Sensoriale
author: NotTaken
- version: 0.8.57
date: 2012-06-22

View File

@ -195,7 +195,7 @@ It can get tiresome to keep re-adding a plugin to calibre to test small changes.
Once you've located the zip file of your plugin you can then directly update it with your changes instead of re-adding it each time. To do so from the command line, in the directory that contains your plugin source code, use::
calibre -s; zip -R /path/to/plugin/zip/file.zip *; calibre
calibre -s; zip -r /path/to/plugin/zip/file.zip *; calibre
This will shutdown a running calibre. Wait for the shutdown to complete, then update your plugin files and relaunch calibre.
It relies on the freely available zip command line tool.

12
recipes/ct24.recipe Normal file
View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1339974788(BasicNewsRecipe):
title = u'\u010cT24'
oldest_article = 1
language = 'cs'
__author__ = 'zoidozoido'
max_articles_per_feed = 100
auto_cleanup = True
feeds = [(u'Hlavn\xed zpr\xe1vy', u'http://www.ceskatelevize.cz/ct24/rss/hlavni-zpravy/'), (u'Dom\xe1c\xed', u'http://www.ceskatelevize.cz/ct24/rss/domaci/'), (u'Sv\u011bt', u'http://www.ceskatelevize.cz/ct24/rss/svet/'), (u'Regiony', u'http://www.ceskatelevize.cz/ct24/rss/regiony/'), (u'Kultura', u'http://www.ceskatelevize.cz/ct24/rss/kultura/'), (u'Ekonomika', u'http://www.ceskatelevize.cz/ct24/rss/ekonomika/'), (u'Sport - hlavn\xed zpr\xe1vy', u'http://www.ceskatelevize.cz/ct4/rss/hlavni-zpravy/'), (u'OH 2012', u'http://www.ceskatelevize.cz/ct4/rss/oh-2012/')]
remove_tags = [dict(name='img')]

View File

@ -3,8 +3,8 @@ __license__ = 'GPL v3'
__copyright__ = '04 December 2010, desUBIKado'
__author__ = 'desUBIKado'
__description__ = 'Daily newspaper from Aragon'
__version__ = 'v0.04'
__date__ = '6, Januery 2011'
__version__ = 'v0.05'
__date__ = '5, Februery 2012'
'''
[url]http://www.heraldo.es/[/url]
'''
@ -38,7 +38,7 @@ class heraldo(BasicNewsRecipe):
keep_only_tags = [dict(name='div', attrs={'id':['dts','com']})]
remove_tags = [dict(name='a', attrs={'class':['com flo-r','enl-if','enl-df']}),
dict(name='div', attrs={'class':['brb-b-s con marg-btt','cnt-rel con']}),
dict(name='div', attrs={'class':['brb-b-s con marg-btt','cnt-rel con','col5-f1']}),
dict(name='form', attrs={'class':'form'}),
dict(name='ul', attrs={'id':['cont-tags','pag-1']})]
@ -72,6 +72,9 @@ class heraldo(BasicNewsRecipe):
preprocess_regexps = [
# To separate the comments with a blank line
# Para separar los comentarios con una linea en blanco
(re.compile(r'<div id="com"', re.DOTALL|re.IGNORECASE), lambda match: '<br><div id="com"')
]

View File

@ -0,0 +1,90 @@
__license__ = 'GPL v3'
'''
newstatesman.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class NewStatesman(BasicNewsRecipe):
title = 'New Statesman'
language = 'en_GB'
__author__ = "NotTaken"
description = "Britain's Current Affairs & Politics Magazine (bi-weekly)"
oldest_article = 4.0
no_stylesheets = True
use_embedded_content = False
remove_empty_feeds = True
keep_only_tags = [dict(attrs={'class' : 'node'})]
remove_tags_after = [
dict(attrs={'class' : lambda x: x and 'content123' in x})
]
remove_tags = [
dict(attrs={'class' : lambda x: x and 'links_bookmark' in x})
]
extra_css = '''
.title-main {font-size: x-large;}
h2 { font-size: small; }
h1 { font-size: medium; }
.field-field-nodeimage-title {
font-size: small;
color: #3C3C3C;
}
.link_col {
font-size: x-small;
}
'''
processed_urls = []
def populate_article_metadata(self, article, soup, first):
if first and hasattr(self, 'add_toc_thumbnail'):
pic = soup.find('img')
if pic is not None:
self.add_toc_thumbnail(article,pic['src'])
def get_article_url(self, article):
url = BasicNewsRecipe.get_article_url(self,article)
if url in self.processed_urls:
self.log('skipping duplicate article: %s' %article.title )
return None
self.processed_urls.append(url)
return url
feeds = [
(u'Politics',
u'http://www.newstatesman.com/politics.rss'),
(u'Business',
u'http://www.newstatesman.com/business.rss'),
(u'Economics',
u'http://www.newstatesman.com/economics.rss'),
(u'Culture',
u'http://www.newstatesman.com/culture.rss'),
(u'Media',
u'http://www.newstatesman.com/media.rss'),
(u'Books',
u'http://www.newstatesman.com/taxonomy/term/feed/27'),
(u'Life & Society',
u'http://www.newstatesman.com/taxonomyfeed/11'),
(u'World Affairs',
u'http://www.newstatesman.com/world-affairs.rss'),
(u'Sci-Tech',
u'http://www.newstatesman.com/feeds/topics/sci-tech.rss'),
(u'Others',
u'http://www.newstatesman.com/feeds_allsite/site_feed.php'),
]

View File

@ -0,0 +1,33 @@
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class SignOfTheTimes(BasicNewsRecipe):
title = u'Sign of the Times'
language = 'en'
__author__ = 'TerminalVeracity'
oldest_article = 31#days
max_articles_per_feed = 50
use_embedded_content = False
extra_css = """
h2{font-size: large; margin: .2em 0; text-decoration: none;}
.image-caption{font-size: medium; font-style:italic; margin: 0 0 1em 0;}
.article-info{font-size: small; font-style:italic; margin: 0 0 .5em 0;}
"""
remove_stylesheets = True
remove_tags = [
dict(name='div', attrs={'class':['article-icon','article-print','article-footer']}),
dict(name='span', attrs={'class':['tiny']}),
]
feeds = [('Signs', 'http://www.sott.net/xml_engine/signs_rss'),]
def preprocess_html(self, soup):
story = soup.find(name='div', attrs={'class':'article'})
soup = BeautifulSoup('<html><head><title>t</title></head><body></body></html>')
body = soup.find(name='body')
body.insert(0, story)
for div in soup.findAll(attrs={'class':'image-caption'}):
for br in div.findAll('br'): br.extract()
return soup

114
recipes/smilezilla.recipe Normal file
View File

@ -0,0 +1,114 @@
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ptempfile import PersistentTemporaryFile
class SmileZilla(BasicNewsRecipe):
title = 'SmileZilla'
language = 'en'
__author__ = "Will"
JOKES_INDEX = 'http://www.smilezilla.com/joke.do'
STORIES_INDEX = 'http://www.smilezilla.com/story.do'
description = 'Daily Jokes and funny stoires'
oldest_article = 1
remove_tags = [
]
keep_only_tags = []
no_stylesheets = True
simultaneous_downloads = 1
articles_are_obfuscated = True
encoding = 'utf-8'
remove_tags = [dict(name='table')]
counter = {JOKES_INDEX: 0, STORIES_INDEX: 0 }
cache = {}
def cached_fetch(self, url):
cache = self.cache
if url in cache:
f = open(cache[url])
html = f.read()
f.close()
return BeautifulSoup(html, fromEncoding=self.encoding)
br = BasicNewsRecipe.get_browser()
response = br.open(url)
html = response.read()
soup = BeautifulSoup(html, fromEncoding=self.encoding)
for img in soup.findAll('img',src=True):
if img['src'].startswith('/'):
img['src'] = 'http://www.smilezilla.com' + img['src']
pt = PersistentTemporaryFile('.html')
pt.write(str(soup.html).encode(self.encoding))
pt.close()
cache[url] = pt.name
return soup
def _get_entry(self,soup):
return soup.find('form', attrs={'name':'contentForm'})
def _get_section_title(self, soup):
title_div = soup.find('div', attrs={'class':'title'})
return self.tag_to_string(title_div).strip()
def parse_index(self):
articles = []
soup = self.cached_fetch(self.JOKES_INDEX)
jokes_entry = self._get_entry(soup)
section_title = self._get_section_title(soup)
todays_jokes = []
for hr in enumerate(jokes_entry.findAll('hr')):
title = 'Joke ' + str(hr[0] + 1)
url = self.JOKES_INDEX
todays_jokes.append({'title':title, 'url':url,
'description':'', 'date':''})
articles.append((section_title,todays_jokes))
soup = self.cached_fetch(self.STORIES_INDEX)
entry = self._get_entry(soup)
section_title = self._get_section_title(soup)
todays_stories = []
for hr in enumerate(entry.findAll('hr')):
title = 'Story ' + str(hr[0] + 1)
current = hr[1]
while True:
current = current.findPrevious()
if current is None:
break
elif current.name == 'hr':
break
elif current.name == 'b':
title = title + ': ' + self.tag_to_string(current)
break
url = self.STORIES_INDEX
todays_stories.append({'title':title, 'url':url,
'description':'', 'date':''})
articles.append((section_title,todays_stories))
return articles
def get_obfuscated_article(self, url):
return self.cache[url]
def preprocess_raw_html(self,raw_html, url):
url = self.JOKES_INDEX if (self.cache[self.JOKES_INDEX] in url) else self.STORIES_INDEX
count = self.counter[url] +1
self.counter[url] = count
soup = self.index_to_soup(raw_html)
entry = self._get_entry(soup)
soup2 = BeautifulSoup('<html><head></head><body></body></html>')
body = soup2.find('body')
entries = str(entry).split('<hr />')
body.insert(0,entries[count -1])
return str(soup2)

View File

@ -18,7 +18,7 @@ class TheAge(BasicNewsRecipe):
publication_type = 'newspaper'
__author__ = 'Matthew Briggs'
language = 'en_AU'
max_articles_per_feed = 1000
recursions = 0
remove_tags = [dict(name=['table', 'script', 'noscript', 'style']), dict(name='a', attrs={'href':'/'}), dict(name='a', attrs={'href':'/text/'})]
@ -47,18 +47,19 @@ class TheAge(BasicNewsRecipe):
if url.startswith('/'):
url = 'http://www.theage.com.au' + url
title = self.tag_to_string(tag)
sections[section].append({
'title': title,
'url' : url,
'date' : strftime('%a, %d %b'),
'description' : '',
'content' : '',
})
if url != 'http://www.theage.com.au':
sections[section].append({
'title': title,
'url' : url,
'date' : strftime('%a, %d %b'),
'description' : '',
'content' : '',
})
feeds = []
# Insert feeds in specified order, if available
feedSort = [ 'National', 'World', 'Opinion', 'Columns', 'Business', 'Sport', 'Entertainment' ]
for i in feedSort:
if i in sections:
@ -68,12 +69,12 @@ class TheAge(BasicNewsRecipe):
for i in feedSort:
del sections[i]
# Append what is left over...
for i in sections:
feeds.append((i,sections[i]))
return feeds
def get_cover_url(self):
@ -88,9 +89,9 @@ class TheAge(BasicNewsRecipe):
return None
def preprocess_html(self,soup):
for p in soup.findAll('p'):
# Collapse the paragraph by joining the non-tag contents
contents = [i for i in p.contents if isinstance(i,unicode)]
@ -103,10 +104,10 @@ class TheAge(BasicNewsRecipe):
p.extract()
continue
# Shrink the fine print font
# Shrink the fine print font
if contents=='This material is subject to copyright and any unauthorised use, copying or mirroring is prohibited.':
p['style'] = 'font-size:small'
continue
continue
return soup

View File

@ -2,8 +2,8 @@
__license__ = 'GPL v3'
__copyright__ = '4 February 2011, desUBIKado'
__author__ = 'desUBIKado'
__version__ = 'v0.07'
__date__ = '13, November 2011'
__version__ = 'v0.08'
__date__ = '30, June 2012'
'''
http://www.weblogssl.com/
'''
@ -33,6 +33,7 @@ class weblogssl(BasicNewsRecipe):
feeds = [
(u'Xataka', u'http://feeds.weblogssl.com/xataka2')
,(u'Xataka Smart Home', u'http://feeds.weblogssl.com/Xatakahome')
,(u'Xataka Mexico', u'http://feeds.weblogssl.com/xatakamx')
,(u'Xataka M\xf3vil', u'http://feeds.weblogssl.com/xatakamovil')
,(u'Xataka Android', u'http://feeds.weblogssl.com/xatakandroid')
@ -107,12 +108,14 @@ class weblogssl(BasicNewsRecipe):
# Para obtener la url original del articulo a partir de la de "feedsportal"
# El siguiente código es gracias al usuario "bosplans" de www.mobileread.com
# http://www.mobileread.com/forums/showthread.php?t=130297
# http://www.mobileread.com/forums/sho...d.php?t=130297
def get_article_url(self, article):
link = article.get('link', None)
if link is None:
return article
if link.split('/')[-4]=="xataka2":
return article.get('feedburner_origlink', article.get('link', article.get('guid')))
if link.split('/')[-1]=="story01.htm":
link=link.split('/')[-2]
a=['0B','0C','0D','0E','0F','0G','0N' ,'0L0S','0A']
@ -121,6 +124,3 @@ class weblogssl(BasicNewsRecipe):
link=link.replace(a[i],b[i])
link="http://"+link
return link

View File

@ -35,12 +35,17 @@ class WallStreetJournal(BasicNewsRecipe):
remove_tags_before = dict(name='h1')
remove_tags = [
dict(id=["articleTabs_tab_article", "articleTabs_tab_comments", "articleTabs_tab_interactive","articleTabs_tab_video","articleTabs_tab_map","articleTabs_tab_slideshow"]),
dict(id=["articleTabs_tab_article",
"articleTabs_tab_comments",
"articleTabs_tab_interactive","articleTabs_tab_video","articleTabs_tab_map","articleTabs_tab_slideshow",
"articleTabs_tab_quotes"]),
{'class':['footer_columns','network','insetCol3wide','interactive','video','slideshow','map','insettip','insetClose','more_in', "insetContent", 'articleTools_bottom', 'aTools', "tooltip", "adSummary", "nav-inline"]},
dict(name='div', attrs={'data-flash-settings':True}),
{'class':['insetContent embedType-interactive insetCol3wide','insetCol6wide','insettipUnit']},
dict(rel='shortcut icon'),
{'class':lambda x: x and 'sTools' in x},
{'class':lambda x: x and 'printSummary' in x},
{'class':lambda x: x and 'mostPopular' in x},
]
remove_tags_after = [dict(id="article_story_body"), {'class':"article story"},]

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -8,6 +8,7 @@ let g:syntastic_cpp_include_dirs = [
\'/usr/include/qt4/QtGui',
\'/usr/include/qt4',
\'src/qtcurve/common', 'src/qtcurve',
\'/usr/include/ImageMagick',
\]
let g:syntastic_c_include_dirs = g:syntastic_cpp_include_dirs

View File

@ -9,14 +9,14 @@ msgstr ""
"Project-Id-Version: calibre\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2012-06-16 13:09+0000\n"
"Last-Translator: Paco Molinero <paco@byasl.com>\n"
"PO-Revision-Date: 2012-06-22 17:32+0000\n"
"Last-Translator: Jellby <Unknown>\n"
"Language-Team: Español; Castellano <>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2012-06-17 04:34+0000\n"
"X-Generator: Launchpad (build 15419)\n"
"X-Launchpad-Export-Date: 2012-06-23 05:01+0000\n"
"X-Generator: Launchpad (build 15461)\n"
#. name for aaa
msgid "Ghotuo"
@ -8016,7 +8016,7 @@ msgstr "Gbari"
#. name for gbz
msgid "Dari; Zoroastrian"
msgstr "Dari; Zoroastrian"
msgstr "Darí zoroástrico"
#. name for gcc
msgid "Mali"
@ -8048,7 +8048,7 @@ msgstr "Francés criollo guyanés"
#. name for gct
msgid "German; Colonia Tovar"
msgstr "Alemán; Colonia Tovar"
msgstr "Alemán coloniero"
#. name for gda
msgid "Lohar; Gade"
@ -8372,7 +8372,7 @@ msgstr "Geji"
#. name for gjk
msgid "Koli; Kachi"
msgstr "Koli; Kachi"
msgstr "Koli kachi"
#. name for gjn
msgid "Gonja"
@ -8396,7 +8396,7 @@ msgstr "Gokana"
#. name for gkp
msgid "Kpelle; Guinea"
msgstr "Kpelle; Guinea"
msgstr "Kpelle de Guinea"
#. name for gla
msgid "Gaelic; Scottish"
@ -8480,7 +8480,7 @@ msgstr "Bajo alemán medio"
#. name for gmm
msgid "Gbaya-Mbodomo"
msgstr "Gbaya-Mbodomo"
msgstr "Gbaya-mbodomo"
#. name for gmn
msgid "Gimnime"
@ -8560,7 +8560,7 @@ msgstr "Gana"
#. name for gnr
msgid "Gureng Gureng"
msgstr "Gureng Gureng"
msgstr "Gureng gureng"
#. name for gnt
msgid "Guntai"
@ -8616,7 +8616,7 @@ msgstr "Gobasi"
#. name for goj
msgid "Gowlan"
msgstr "Gowlan"
msgstr "Gowlano"
#. name for gok
msgid "Gowli"
@ -8628,7 +8628,7 @@ msgstr "Gola"
#. name for gom
msgid "Konkani; Goan"
msgstr "Konkani; Goan"
msgstr "konkaní de Goa"
#. name for gon
msgid "Gondi"
@ -8636,7 +8636,7 @@ msgstr "Gondi"
#. name for goo
msgid "Gone Dau"
msgstr "Gone Dau"
msgstr "Gone dau"
#. name for gop
msgid "Yeretuar"
@ -8652,7 +8652,7 @@ msgstr "Gorontalo"
#. name for gos
msgid "Gronings"
msgstr "Gronings"
msgstr "Gronings"
#. name for got
msgid "Gothic"
@ -8676,11 +8676,11 @@ msgstr "Goundo"
#. name for goz
msgid "Gozarkhani"
msgstr "Gozarkhani"
msgstr "Gozarjaní"
#. name for gpa
msgid "Gupa-Abawa"
msgstr "Gupa-Abawa"
msgstr "Gupa-abawa"
#. name for gpn
msgid "Taiap"
@ -8704,7 +8704,7 @@ msgstr "Gor"
#. name for gra
msgid "Garasia; Rajput"
msgstr "Garasia; Rajput"
msgstr "Garasia rajput"
#. name for grb
msgid "Grebo"
@ -8716,7 +8716,7 @@ msgstr "Griego antiguo (hasta 1453)"
#. name for grd
msgid "Guruntum-Mbaaru"
msgstr "Guruntum-Mbaaru"
msgstr "Guruntum-mbaaru"
#. name for grg
msgid "Madi"
@ -8736,7 +8736,7 @@ msgstr "Grebo meridional"
#. name for grm
msgid "Kota Marudu Talantang"
msgstr "Kota Marudu Talantang"
msgstr "Talantang de Kota Marudu"
#. name for grn
msgid "Guarani"
@ -8780,7 +8780,7 @@ msgstr "Guriaso"
#. name for gry
msgid "Grebo; Barclayville"
msgstr "Grebo; Barclayville"
msgstr "Grebo de Barclayville"
#. name for grz
msgid "Guramalum"
@ -8808,7 +8808,7 @@ msgstr "Gusan"
#. name for gso
msgid "Gbaya; Southwest"
msgstr "Gbaya; suodeste"
msgstr "Gbaya suroccidental"
#. name for gsp
msgid "Wasembo"
@ -8844,7 +8844,7 @@ msgstr "Wayuu"
#. name for gud
msgid "Dida; Yocoboué"
msgstr "Dida; Yocoboué"
msgstr "Dida de Yocoboué"
#. name for gue
msgid "Gurinji"
@ -8908,7 +8908,7 @@ msgstr "Lengua de signos guineana"
#. name for gut
msgid "Maléku Jaíka"
msgstr "Maléku Jaíka"
msgstr "Maléku jaíka"
#. name for guu
msgid "Yanomamö"
@ -8960,15 +8960,15 @@ msgstr "Gurmana"
#. name for gvn
msgid "Kuku-Yalanji"
msgstr "Kuku-Yalanji"
msgstr "Kuku-yalanji"
#. name for gvo
msgid "Gavião Do Jiparaná"
msgstr "Gavião Do Jiparaná"
msgstr "Gavião de Jiparaná"
#. name for gvp
msgid "Gavião; Pará"
msgstr "Gavião; Pará"
msgstr "Gavião de Pará"
#. name for gvr
msgid "Gurung; Western"
@ -9028,7 +9028,7 @@ msgstr "Gwere"
#. name for gwt
msgid "Gawar-Bati"
msgstr "Gawar-Bati"
msgstr "Gawar-bati"
#. name for gwu
msgid "Guwamu"
@ -9048,7 +9048,7 @@ msgstr "Wè meridional"
#. name for gya
msgid "Gbaya; Northwest"
msgstr "Gbaya; moroeste"
msgstr "Gbaya nororiental"
#. name for gyb
msgid "Garus"
@ -9296,7 +9296,7 @@ msgstr "Haigwai"
#. name for hhi
msgid "Hoia Hoia"
msgstr "Hoia Hoia"
msgstr "Hoia hoia"
#. name for hhr
msgid "Kerak"
@ -9320,7 +9320,7 @@ msgstr "Hidatsa"
#. name for hif
msgid "Hindi; Fiji"
msgstr "Hindi; Fiji"
msgstr "Hindi de Fiyi"
#. name for hig
msgid "Kamwe"
@ -9340,7 +9340,7 @@ msgstr "Hijuk"
#. name for hik
msgid "Seit-Kaitetu"
msgstr "Seit-Kaitetu"
msgstr "Seit-kaitetu"
#. name for hil
msgid "Hiligaynon"
@ -9384,7 +9384,7 @@ msgstr "Hunde"
#. name for hkk
msgid "Hunjara-Kaina Ke"
msgstr "Hunjara-Kaina Ke"
msgstr "Hunjara-kaina ke"
#. name for hks
msgid "Hong Kong Sign Language"
@ -9400,7 +9400,7 @@ msgstr "Halbi"
#. name for hld
msgid "Halang Doan"
msgstr "Halang Doan"
msgstr "Halang doan"
#. name for hle
msgid "Hlersu"
@ -9408,7 +9408,7 @@ msgstr "Hlersu"
#. name for hlt
msgid "Nga La"
msgstr "Nga La"
msgstr "Nga la"
#. name for hlu
msgid "Luwian; Hieroglyphic"
@ -9424,7 +9424,7 @@ msgstr ""
#. name for hmc
msgid "Miao; Central Huishui"
msgstr "Miao; Central Huishui"
msgstr "Miao de Huishui central"
#. name for hmd
msgid "Miao; Large Flowery"
@ -9460,7 +9460,7 @@ msgstr "Maek"
#. name for hml
msgid "Miao; Luopohe"
msgstr "Miao; Luopohe"
msgstr "Miao del río Luobo"
#. name for hmm
msgid "Miao; Central Mashan"
@ -9472,7 +9472,7 @@ msgstr "Hmong"
#. name for hmo
msgid "Hiri Motu"
msgstr "Hiri Motu"
msgstr "Hiri motu"
#. name for hmp
msgid "Miao; Northern Mashan"
@ -9500,7 +9500,7 @@ msgstr "Hamap"
#. name for hmv
msgid "Hmong Dô"
msgstr "Hmong Dô"
msgstr "Hmong dô"
#. name for hmw
msgid "Miao; Western Mashan"
@ -9536,7 +9536,7 @@ msgstr "Hani"
#. name for hnj
msgid "Hmong Njua"
msgstr "Hmong Njua"
msgstr "Hmong njua"
#. name for hnn
msgid "Hanunoo"
@ -12396,7 +12396,7 @@ msgstr ""
#. name for knn
msgid "Konkani (individual language)"
msgstr "Konkani (idioma individual)"
msgstr "Konkaní (idioma individual)"
#. name for kno
msgid "Kono (Sierra Leone)"
@ -12484,7 +12484,7 @@ msgstr ""
#. name for kok
msgid "Konkani (macrolanguage)"
msgstr "Konkani (macrolengua)"
msgstr "Konkaní (macrolengua)"
#. name for kol
msgid "Kol (Papua New Guinea)"
@ -13020,7 +13020,7 @@ msgstr ""
#. name for ktr
msgid "Kota Marudu Tinagas"
msgstr ""
msgstr "Tinagas de Kota Marudu"
#. name for kts
msgid "Muyu; South"
@ -13252,7 +13252,7 @@ msgstr ""
#. name for kvx
msgid "Koli; Parkari"
msgstr ""
msgstr "Koli parkari"
#. name for kvy
msgid "Karen; Yintale"
@ -13424,7 +13424,7 @@ msgstr ""
#. name for kxp
msgid "Koli; Wadiyara"
msgstr ""
msgstr "Koli wadiyara"
#. name for kxq
msgid "Kanum; Smärky"
@ -21184,7 +21184,7 @@ msgstr ""
#. name for prd
msgid "Parsi-Dari"
msgstr ""
msgstr "Persa dari"
#. name for pre
msgid "Principense"
@ -23716,7 +23716,7 @@ msgstr "Spokane"
#. name for spp
msgid "Senoufo; Supyire"
msgstr "Senoufo; Supyire"
msgstr "Senoufo supyire"
#. name for spq
msgid "Spanish; Loreto-Ucayali"
@ -23828,7 +23828,7 @@ msgstr "Siriano"
#. name for srk
msgid "Serudung Murut"
msgstr "Serudung Murut"
msgstr "Serudung murut"
#. name for srl
msgid "Isirawa"
@ -23836,11 +23836,11 @@ msgstr "Isirawa"
#. name for srm
msgid "Saramaccan"
msgstr "Saramaccan"
msgstr "Saramacano"
#. name for srn
msgid "Sranan Tongo"
msgstr "Sranan Tongo"
msgstr "Sranan tongo"
#. name for sro
msgid "Sardinian; Campidanese"
@ -23904,7 +23904,7 @@ msgstr "Siroi"
#. name for sse
msgid "Sama; Bangingih"
msgstr "Sama; Bangingih"
msgstr "Sama banguingui"
#. name for ssf
msgid "Thao"
@ -28868,7 +28868,7 @@ msgstr ""
#. name for xpe
msgid "Kpelle; Liberia"
msgstr ""
msgstr "Kpelle de Liberia"
#. name for xpg
msgid "Phrygian"
@ -29364,7 +29364,7 @@ msgstr ""
#. name for ybe
msgid "Yugur; West"
msgstr ""
msgstr "Yugur occidental"
#. name for ybh
msgid "Yakha"
@ -29524,7 +29524,7 @@ msgstr "Árabe judeo-iraquí"
#. name for yhl
msgid "Phowa; Hlepho"
msgstr ""
msgstr "Phowa hlepho"
#. name for yia
msgid "Yinggarda"
@ -29556,7 +29556,7 @@ msgstr ""
#. name for yik
msgid "Lalo; Dongshanba"
msgstr ""
msgstr "Lalo dongshanba"
#. name for yil
msgid "Yindjilandji"
@ -29748,7 +29748,7 @@ msgstr "Yamna"
#. name for ymo
msgid "Yangum Mon"
msgstr "Yangum Mon"
msgstr "Yangum mon"
#. name for ymp
msgid "Yamap"
@ -29756,7 +29756,7 @@ msgstr "Yamap"
#. name for ymq
msgid "Muji; Qila"
msgstr "Muji; Qila"
msgstr "Muji qila"
#. name for ymr
msgid "Malasar"
@ -29768,7 +29768,7 @@ msgstr "Misiano"
#. name for ymt
msgid "Mator-Taygi-Karagas"
msgstr "Mator-Taygi-Karagas"
msgstr "Mator-taygi-karagas"
#. name for ymx
msgid "Muji; Northern"
@ -29800,7 +29800,7 @@ msgstr ""
#. name for ynk
msgid "Yupik; Naukan"
msgstr ""
msgstr "Yupik naukan"
#. name for ynl
msgid "Yangulam"
@ -29872,7 +29872,7 @@ msgstr "Phala"
#. name for ypb
msgid "Phowa; Labo"
msgstr "Phowa; Labo"
msgstr "Phowa labo"
#. name for ypg
msgid "Phola"
@ -29888,7 +29888,7 @@ msgstr ""
#. name for ypn
msgid "Phowa; Ani"
msgstr ""
msgstr "Phowa ani"
#. name for ypo
msgid "Phola; Alo"
@ -29940,7 +29940,7 @@ msgstr "Yarawata"
#. name for ysc
msgid "Yassic"
msgstr "Yassic"
msgstr "Jásico"
#. name for ysd
msgid "Samatao"
@ -29964,11 +29964,11 @@ msgstr "Lolopo meridional"
#. name for ysr
msgid "Yupik; Sirenik"
msgstr "Yupik; Sirenik"
msgstr "Yupik sirenik"
#. name for yss
msgid "Yessan-Mayo"
msgstr "Yessan-Mayo"
msgstr "Yessan-mayo"
#. name for ysy
msgid "Sanie"
@ -29988,11 +29988,11 @@ msgstr "Thopho"
#. name for ytw
msgid "Yout Wam"
msgstr "Yout Wam"
msgstr "Yout wam"
#. name for yua
msgid "Maya; Yucatec"
msgstr "Maya; Yucateco"
msgstr "Maya yucateco"
#. name for yub
msgid "Yugambal"
@ -30012,7 +30012,7 @@ msgstr "Chino yuè"
#. name for yuf
msgid "Havasupai-Walapai-Yavapai"
msgstr "Havasupai-Walapai-Yavapai"
msgstr "Havasupai-walapai-yavapai"
#. name for yug
msgid "Yug"
@ -30024,7 +30024,7 @@ msgstr "Yurutí"
#. name for yuj
msgid "Karkar-Yuri"
msgstr "Karkar-Yuri"
msgstr "Karkar-yuri"
#. name for yuk
msgid "Yuki"
@ -30072,7 +30072,7 @@ msgstr "Yukaghir meridional"
#. name for yuy
msgid "Yugur; East"
msgstr "Yugur; este"
msgstr "Yugur oriental"
#. name for yuz
msgid "Yuracare"
@ -30100,7 +30100,7 @@ msgstr "Yawanawa"
#. name for ywq
msgid "Yi; Wuding-Luquan"
msgstr "Yi; Wuding-Luquan"
msgstr "Yu wuding-luquan"
#. name for ywr
msgid "Yawuru"
@ -30108,11 +30108,11 @@ msgstr "Yawuru"
#. name for ywt
msgid "Lalo; Xishanba"
msgstr "Lalo; Xishanba"
msgstr "Lalo xishanba"
#. name for ywu
msgid "Nasu; Wumeng"
msgstr "Nasu; Wumeng"
msgstr "Nasu wumeng"
#. name for yww
msgid "Yawarawarga"
@ -30128,7 +30128,7 @@ msgstr "Ayizi"
#. name for yzg
msgid "Buyang; E'ma"
msgstr "Buyang; E'ma"
msgstr "Buyang e'ma"
#. name for yzk
msgid "Zokhuo"
@ -30136,11 +30136,11 @@ msgstr "Zokhuo"
#. name for zaa
msgid "Zapotec; Sierra de Juárez"
msgstr "Zapoteco; Sierra de Juárez"
msgstr "Zapoteco de Sierra de Juárez"
#. name for zab
msgid "Zapotec; San Juan Guelavía"
msgstr "Zapoteco; San Juan Guelavía"
msgstr "Zapoteco de San Juan Guelavía"
#. name for zac
msgid "Zapotec; Ocotlán"
@ -30260,7 +30260,7 @@ msgstr ""
#. name for zch
msgid "Zhuang; Central Hongshuihe"
msgstr "Zhuang; Central Hongshuihe"
msgstr "Zhuang de Hongshuihe central"
#. name for zdj
msgid "Comorian; Ngazidja"
@ -30772,7 +30772,7 @@ msgstr ""
#. name for ztt
msgid "Zapotec; Tejalapan"
msgstr "Zapoteco; Tejalapan"
msgstr "Zapoteco de Tejalapan"
#. name for ztu
msgid "Zapotec; Güilá"

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 57)
numeric_version = (0, 8, 58)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -101,6 +101,7 @@ class ANDROID(USBMS):
0x685b : [0x0400, 0x0226],
0x685e : [0x0400],
0x6860 : [0x0400],
0x6863 : [0x226],
0x6877 : [0x0400],
0x689e : [0x0400],
0xdeed : [0x0222],

View File

@ -212,11 +212,8 @@ class ITUNES(DriverBase):
"for instructions on using 'Connect to iTunes'")
ITUNES_SANDBOX_LOCKOUT_MESSAGE = _(
'<p>Unable to communicate with iTunes.</p>'
"<p>As of iTunes version 10.6.3, application 'sandboxing' "
'was implemented by Apple, disabling inter-application communications '
'between iTunes and third-party applications.</p>'
'<p>Refer to the forum post '
'<a href="http://www.mobileread.com/forums/showpost.php?p=2113958&postcount=3">Apple implements sandboxing for iTunes 10.6.3</a> '
'<p>Refer to this '
'<a href="http://www.mobileread.com/forums/showpost.php?p=2113958&postcount=3">forum post</a> '
'for more information.</p>'
'<p></p>')
@ -232,8 +229,9 @@ class ITUNES(DriverBase):
# 0x12a0 iPhone 4S
# 0x12a2 iPad2 (GSM)
# 0x12a3 iPad2 (CDMA)
# 0x12a6 iPad3 (GSM)
VENDOR_ID = [0x05ac]
PRODUCT_ID = [0x1292,0x1293,0x1294,0x1297,0x1299,0x129a,0x129f,0x12a2,0x12a3]
PRODUCT_ID = [0x1292,0x1293,0x1294,0x1297,0x1299,0x129a,0x129f,0x12a2,0x12a3,0x12a6]
BCD = [0x01]
# Plugboard ID
@ -2362,6 +2360,8 @@ class ITUNES(DriverBase):
if isosx:
import appscript
as_name = appscript.__name__
as_version = appscript.__version__
'''
Launch iTunes if not already running
'''
@ -2371,7 +2371,7 @@ class ITUNES(DriverBase):
if DEBUG:
logger().info( "ITUNES:_launch_iTunes(): Launching iTunes" )
try:
self.iTunes = iTunes= appscript.app('iTunes', hide=True)
self.iTunes = iTunes = appscript.app('iTunes', hide=True)
except:
self.iTunes = None
raise UserFeedback(' ITUNES._launch_iTunes(): unable to find installed iTunes', details=None, level=UserFeedback.WARN)
@ -2383,19 +2383,26 @@ class ITUNES(DriverBase):
self.initial_status = 'already running'
'''
Test OSA. If we can't get the app name, we can't talk to iTunes.
As of iTunes 10.6.3 (June 2012), sandboxing was implemented disabling OSA
interapp communications.
Test OSA communication with iTunes.
If unable to communicate with iTunes, set self.iTunes to None, then
report to user in open()
'''
as_binding = "dynamic"
try:
# Try dynamic binding - works with iTunes <= 10.6.1
foo = self.iTunes.name()
except:
self.iTunes = None
if DEBUG:
logger().info(" unable to communicate with iTunes, raising dialog to user")
return
# Try static binding
import itunes
self.iTunes = appscript.app('iTunes',terms=itunes)
try:
foo = self.iTunes.name()
as_binding = "static"
except:
self.iTunes = None
if DEBUG:
logger().info(" unable to communicate with iTunes via %s %s using any binding" % (as_name, as_version))
return
'''
# Read the current storage path for iTunes media
@ -2415,6 +2422,7 @@ class ITUNES(DriverBase):
logger().info(" [OSX %s - %s (%s), driver version %d.%d.%d]" %
(self.iTunes.name(), self.iTunes.version(), self.initial_status,
self.version[0],self.version[1],self.version[2]))
logger().info(" communicating with iTunes via %s %s using %s binding" % (as_name, as_version, as_binding))
logger().info(" calibre_library_path: %s" % self.calibre_library_path)
if iswindows:

View File

@ -0,0 +1,280 @@
version = 1.1
path = '/Applications/iTunes.app'
classes = \
[('print_settings', 'pset'),
('application', 'capp'),
('artwork', 'cArt'),
('audio_CD_playlist', 'cCDP'),
('audio_CD_track', 'cCDT'),
('browser_window', 'cBrW'),
('device_playlist', 'cDvP'),
('device_track', 'cDvT'),
('encoder', 'cEnc'),
('EQ_preset', 'cEQP'),
('EQ_window', 'cEQW'),
('file_track', 'cFlT'),
('folder_playlist', 'cFoP'),
('item', 'cobj'),
('library_playlist', 'cLiP'),
('playlist', 'cPly'),
('playlist_window', 'cPlW'),
('radio_tuner_playlist', 'cRTP'),
('shared_track', 'cShT'),
('source', 'cSrc'),
('track', 'cTrk'),
('URL_track', 'cURT'),
('user_playlist', 'cUsP'),
('visual', 'cVis'),
('window', 'cwin')]
enums = \
[('track_listing', 'kTrk'),
('album_listing', 'kAlb'),
('cd_insert', 'kCDi'),
('standard', 'lwst'),
('detailed', 'lwdt'),
('stopped', 'kPSS'),
('playing', 'kPSP'),
('paused', 'kPSp'),
('fast_forwarding', 'kPSF'),
('rewinding', 'kPSR'),
('off', 'kRpO'),
('one', 'kRp1'),
('all', 'kAll'),
('small', 'kVSS'),
('medium', 'kVSM'),
('large', 'kVSL'),
('library', 'kLib'),
('iPod', 'kPod'),
('audio_CD', 'kACD'),
('MP3_CD', 'kMCD'),
('device', 'kDev'),
('radio_tuner', 'kTun'),
('shared_library', 'kShd'),
('unknown', 'kUnk'),
('albums', 'kSrL'),
('artists', 'kSrR'),
('composers', 'kSrC'),
('displayed', 'kSrV'),
('songs', 'kSrS'),
('none', 'kNon'),
('Books', 'kSpA'),
('folder', 'kSpF'),
('Genius', 'kSpG'),
('iTunes_U', 'kSpU'),
('Library', 'kSpL'),
('Movies', 'kSpI'),
('Music', 'kSpZ'),
('Party_Shuffle', 'kSpS'),
('Podcasts', 'kSpP'),
('Purchased_Music', 'kSpM'),
('TV_Shows', 'kSpT'),
('movie', 'kVdM'),
('music_video', 'kVdV'),
('TV_show', 'kVdT'),
('user', 'kRtU'),
('computed', 'kRtC')]
properties = \
[('copies', 'lwcp'),
('collating', 'lwcl'),
('starting_page', 'lwfp'),
('ending_page', 'lwlp'),
('pages_across', 'lwla'),
('pages_down', 'lwld'),
('error_handling', 'lweh'),
('requested_print_time', 'lwqt'),
('printer_features', 'lwpf'),
('fax_number', 'faxn'),
('target_printer', 'trpr'),
('current_encoder', 'pEnc'),
('current_EQ_preset', 'pEQP'),
('current_playlist', 'pPla'),
('current_stream_title', 'pStT'),
('current_stream_URL', 'pStU'),
('current_track', 'pTrk'),
('current_visual', 'pVis'),
('EQ_enabled', 'pEQ '),
('fixed_indexing', 'pFix'),
('frontmost', 'pisf'),
('full_screen', 'pFSc'),
('name', 'pnam'),
('mute', 'pMut'),
('player_position', 'pPos'),
('player_state', 'pPlS'),
('selection', 'sele'),
('sound_volume', 'pVol'),
('version', 'vers'),
('visuals_enabled', 'pVsE'),
('visual_size', 'pVSz'),
('data', 'pPCT'),
('description', 'pDes'),
('downloaded', 'pDlA'),
('format', 'pFmt'),
('kind', 'pKnd'),
('raw_data', 'pRaw'),
('artist', 'pArt'),
('compilation', 'pAnt'),
('composer', 'pCmp'),
('disc_count', 'pDsC'),
('disc_number', 'pDsN'),
('genre', 'pGen'),
('year', 'pYr '),
('location', 'pLoc'),
('minimized', 'pMin'),
('view', 'pPly'),
('band_1', 'pEQ1'),
('band_2', 'pEQ2'),
('band_3', 'pEQ3'),
('band_4', 'pEQ4'),
('band_5', 'pEQ5'),
('band_6', 'pEQ6'),
('band_7', 'pEQ7'),
('band_8', 'pEQ8'),
('band_9', 'pEQ9'),
('band_10', 'pEQ0'),
('modifiable', 'pMod'),
('preamp', 'pEQA'),
('update_tracks', 'pUTC'),
('container', 'ctnr'),
('id', 'ID '),
('index', 'pidx'),
('persistent_ID', 'pPIS'),
('duration', 'pDur'),
('parent', 'pPlP'),
('shuffle', 'pShf'),
('size', 'pSiz'),
('song_repeat', 'pRpt'),
('special_kind', 'pSpK'),
('time', 'pTim'),
('visible', 'pvis'),
('capacity', 'capa'),
('free_space', 'frsp'),
('album', 'pAlb'),
('album_artist', 'pAlA'),
('album_rating', 'pAlR'),
('album_rating_kind', 'pARk'),
('bit_rate', 'pBRt'),
('bookmark', 'pBkt'),
('bookmarkable', 'pBkm'),
('bpm', 'pBPM'),
('category', 'pCat'),
('comment', 'pCmt'),
('database_ID', 'pDID'),
('date_added', 'pAdd'),
('enabled', 'enbl'),
('episode_ID', 'pEpD'),
('episode_number', 'pEpN'),
('EQ', 'pEQp'),
('finish', 'pStp'),
('gapless', 'pGpl'),
('grouping', 'pGrp'),
('long_description', 'pLds'),
('lyrics', 'pLyr'),
('modification_date', 'asmo'),
('played_count', 'pPlC'),
('played_date', 'pPlD'),
('podcast', 'pTPc'),
('rating', 'pRte'),
('rating_kind', 'pRtk'),
('release_date', 'pRlD'),
('sample_rate', 'pSRt'),
('season_number', 'pSeN'),
('shufflable', 'pSfa'),
('skipped_count', 'pSkC'),
('skipped_date', 'pSkD'),
('show', 'pShw'),
('sort_album', 'pSAl'),
('sort_artist', 'pSAr'),
('sort_album_artist', 'pSAA'),
('sort_name', 'pSNm'),
('sort_composer', 'pSCm'),
('sort_show', 'pSSN'),
('start', 'pStr'),
('track_count', 'pTrC'),
('track_number', 'pTrN'),
('unplayed', 'pUnp'),
('video_kind', 'pVdK'),
('volume_adjustment', 'pAdj'),
('address', 'pURL'),
('shared', 'pShr'),
('smart', 'pSmt'),
('bounds', 'pbnd'),
('closeable', 'hclb'),
('collapseable', 'pWSh'),
('collapsed', 'wshd'),
('position', 'ppos'),
('resizable', 'prsz'),
('zoomable', 'iszm'),
('zoomed', 'pzum')]
elements = \
[('artworks', 'cArt'),
('audio_CD_playlists', 'cCDP'),
('audio_CD_tracks', 'cCDT'),
('browser_windows', 'cBrW'),
('device_playlists', 'cDvP'),
('device_tracks', 'cDvT'),
('encoders', 'cEnc'),
('EQ_presets', 'cEQP'),
('EQ_windows', 'cEQW'),
('file_tracks', 'cFlT'),
('folder_playlists', 'cFoP'),
('items', 'cobj'),
('library_playlists', 'cLiP'),
('playlists', 'cPly'),
('playlist_windows', 'cPlW'),
('radio_tuner_playlists', 'cRTP'),
('shared_tracks', 'cShT'),
('sources', 'cSrc'),
('tracks', 'cTrk'),
('URL_tracks', 'cURT'),
('user_playlists', 'cUsP'),
('visuals', 'cVis'),
('windows', 'cwin'),
('application', 'capp'),
('print_settings', 'pset')]
commands = \
[('set', 'coresetd', [('to', 'data')]),
('exists', 'coredoex', []),
('move', 'coremove', [('to', 'insh')]),
('subscribe', 'hookpSub', []),
('playpause', 'hookPlPs', []),
('download', 'hookDwnl', []),
('close', 'coreclos', []),
('open', 'aevtodoc', []),
('open_location', 'GURLGURL', []),
('quit', 'aevtquit', []),
('pause', 'hookPaus', []),
('make',
'corecrel',
[('new', 'kocl'), ('at', 'insh'), ('with_properties', 'prdt')]),
('duplicate', 'coreclon', [('to', 'insh')]),
('print_',
'aevtpdoc',
[('print_dialog', 'pdlg'),
('with_properties', 'prdt'),
('kind', 'pKnd'),
('theme', 'pThm')]),
('add', 'hookAdd ', [('to', 'insh')]),
('rewind', 'hookRwnd', []),
('play', 'hookPlay', [('once', 'POne')]),
('run', 'aevtoapp', []),
('resume', 'hookResu', []),
('updatePodcast', 'hookUpd1', []),
('next_track', 'hookNext', []),
('stop', 'hookStop', []),
('search', 'hookSrch', [('for_', 'pTrm'), ('only', 'pAre')]),
('updateAllPodcasts', 'hookUpdp', []),
('update', 'hookUpdt', []),
('previous_track', 'hookPrev', []),
('fast_forward', 'hookFast', []),
('count', 'corecnte', [('each', 'kocl')]),
('reveal', 'hookRevl', []),
('convert', 'hookConv', []),
('eject', 'hookEjct', []),
('back_track', 'hookBack', []),
('refresh', 'hookRfrs', []),
('delete', 'coredelo', [])]

View File

@ -71,7 +71,7 @@ class IPAPYRUS(TECLAST_K3):
FORMATS = ['epub', 'pdf', 'txt']
VENDOR_NAME = 'E_READER'
VENDOR_NAME = ['E_READER', 'EBOOKREA']
WINDOWS_MAIN_MEM = ''
class SOVOS(TECLAST_K3):

View File

@ -187,7 +187,9 @@ def calibre_cover(title, author_string, series_string=None,
lines.append(TextLine(series_string, author_size))
if logo_path is None:
logo_path = I('library.png')
return create_cover_page(lines, logo_path, output_format='jpg')
return create_cover_page(lines, logo_path, output_format='jpg',
texture_opacity=0.3, texture_data=I('cover_texture.png',
data=True))
UNIT_RE = re.compile(r'^(-*[0-9]*[.]?[0-9]*)\s*(%|em|ex|en|px|mm|cm|in|pt|pc)$')

View File

@ -152,27 +152,31 @@ class CHMInput(InputFormatPlugin):
#print "============================="
log.debug('Found %d section nodes' % len(chapters))
htmlpath = os.path.splitext(hhcpath)[0] + ".html"
f = open(htmlpath, 'wb')
if chapters:
f.write('<html><head><meta http-equiv="Content-type"'
' content="text/html;charset=UTF-8" /></head><body>\n')
path0 = chapters[0][1]
subpath = os.path.dirname(path0)
with open(htmlpath, 'wb') as f:
if chapters:
f.write('<html><head><meta http-equiv="Content-type"'
' content="text/html;charset=UTF-8" /></head><body>\n')
path0 = chapters[0][1]
subpath = os.path.dirname(path0)
base = os.path.dirname(f.name)
for chapter in chapters:
title = chapter[0]
rsrcname = os.path.basename(chapter[1])
rsrcpath = os.path.join(subpath, rsrcname)
# title should already be url encoded
url = "<br /><a href=" + rsrcpath + ">" + title + " </a>\n"
if isinstance(url, unicode):
url = url.encode('utf-8')
f.write(url)
for chapter in chapters:
title = chapter[0]
rsrcname = os.path.basename(chapter[1])
rsrcpath = os.path.join(subpath, rsrcname)
if (not os.path.exists(os.path.join(base, rsrcpath)) and
os.path.exists(os.path.join(base, chapter[1]))):
rsrcpath = chapter[1]
f.write("</body></html>")
else:
f.write(hhcdata)
f.close()
# title should already be url encoded
url = "<br /><a href=" + rsrcpath + ">" + title + " </a>\n"
if isinstance(url, unicode):
url = url.encode('utf-8')
f.write(url)
f.write("</body></html>")
else:
f.write(hhcdata)
return htmlpath

View File

@ -154,10 +154,11 @@ class ISBNDB(Source):
total_results = int(bl.get('total_results'))
shown_results = int(bl.get('shown_results'))
for bd in bl.xpath('.//BookData'):
isbn = check_isbn(bd.get('isbn13', bd.get('isbn', None)))
if not isbn:
isbn = check_isbn(bd.get('isbn', None))
isbn13 = check_isbn(bd.get('isbn13', None))
if not isbn and not isbn13:
continue
if orig_isbn and isbn != orig_isbn:
if orig_isbn and orig_isbn not in {isbn, isbn13}:
continue
title = tostring(bd.find('Title'))
if not title:
@ -173,10 +174,6 @@ class ISBNDB(Source):
if not authors:
continue
comments = tostring(bd.find('Summary'))
if not comments:
# Require comments, since without them the result is useless
# anyway
continue
id_ = (title, tuple(authors))
if id_ in seen:
continue

View File

@ -469,6 +469,8 @@ class DirContainer(object):
return f.write(data)
def exists(self, path):
if not path:
return False
try:
path = os.path.join(self.rootdir, self._unquote(path))
except ValueError: #Happens if path contains quoted special chars

View File

@ -6,20 +6,34 @@
Released under the GPLv3 License
###
body_height = () ->
db = document.body
dde = document.documentElement
if db? and dde?
return Math.max(db.scrollHeight, dde.scrollHeight, db.offsetHeight,
dde.offsetHeight, db.clientHeight, dde.clientHeight)
return 0
window_scroll_pos = (win=window) -> # {{{
if typeof(win.pageXOffset) == 'number'
x = win.pageXOffset
y = win.pageYOffset
else # IE < 9
if document.body and ( document.body.scrollLeft or document.body.scrollTop )
x = document.body.scrollLeft
y = document.body.scrollTop
else if document.documentElement and ( document.documentElement.scrollLeft or document.documentElement.scrollTop)
y = document.documentElement.scrollTop
x = document.documentElement.scrollLeft
return [x, y]
# }}}
abstop = (elem) ->
ans = elem.offsetTop
while elem.offsetParent
elem = elem.offsetParent
ans += elem.offsetTop
return ans
viewport_to_document = (x, y, doc=window?.document) -> # {{{
until doc == window.document
# We are in a frame
frame = doc.defaultView.frameElement
rect = frame.getBoundingClientRect()
x += rect.left
y += rect.top
doc = frame.ownerDocument
win = doc.defaultView
[wx, wy] = window_scroll_pos(win)
x += wx
y += wy
return [x, y]
# }}}
class BookIndexing
###
@ -33,7 +47,7 @@ class BookIndexing
constructor: () ->
this.cache = {}
this.body_height_at_last_check = null
this.last_check = [null, null]
cache_valid: (anchors) ->
for a in anchors
@ -45,7 +59,9 @@ class BookIndexing
return true
anchor_positions: (anchors, use_cache=false) ->
if use_cache and body_height() == this.body_height_at_last_check and this.cache_valid(anchors)
body = document.body
doc_constant = body.scrollHeight == this.last_check[1] and body.scrollWidth == this.last_check[0]
if use_cache and doc_constant and this.cache_valid(anchors)
return this.cache
ans = {}
@ -56,19 +72,24 @@ class BookIndexing
try
result = document.evaluate(
".//*[local-name() = 'a' and @name='#{ anchor }']",
document.body, null,
body, null,
XPathResult.FIRST_ORDERED_NODE_TYPE, null)
elem = result.singleNodeValue
catch error
# The anchor had a ' or other invalid char
elem = null
if elem == null
pos = body_height() + 10000
pos = [body.scrollWidth+1000, body.scrollHeight+1000]
else
pos = abstop(elem)
br = elem.getBoundingClientRect()
pos = viewport_to_document(br.left, br.top, elem.ownerDocument)
if window.paged_display?.in_paged_mode
pos[0] = window.paged_display.column_at(pos[0])
ans[anchor] = pos
this.cache = ans
this.body_height_at_last_check = body_height()
this.last_check = [body.scrollWidth, body.scrollHeight]
return ans
if window?

View File

@ -66,6 +66,7 @@ class PagedDisplay
this.in_paged_mode = false
this.current_margin_side = 0
this.is_full_screen_layout = false
this.max_col_width = -1
set_geometry: (cols_per_screen=1, margin_top=20, margin_side=40, margin_bottom=20) ->
this.margin_top = margin_top
@ -108,6 +109,11 @@ class PagedDisplay
# Minimum column width, for the cases when the window is too
# narrow
col_width = Math.max(100, ((ww - adjust)/n) - 2*sm)
if this.max_col_width > 0 and col_width > this.max_col_width
# Increase the side margin to ensure that col_width is no larger
# than max_col_width
sm += Math.ceil( (col_width - this.max_col_width) / 2*n )
col_width = Math.max(100, ((ww - adjust)/n) - 2*sm)
this.page_width = col_width + 2*sm
this.screen_width = this.page_width * this.cols_per_screen
@ -170,9 +176,7 @@ class PagedDisplay
if this.is_full_screen_layout
window.scrollTo(0, 0)
return
pos = 0
until (pos <= xpos < pos + this.page_width)
pos += this.page_width
pos = Math.floor(xpos/this.page_width) * this.page_width
limit = document.body.scrollWidth - this.screen_width
pos = limit if pos > limit
if animated
@ -180,6 +184,16 @@ class PagedDisplay
else
window.scrollTo(pos, 0)
column_at: (xpos) ->
# Return the number of the column that contains xpos
return Math.floor(xpos/this.page_width)
column_boundaries: () ->
# Return the column numbers at the left edge and after the right edge
# of the viewport
l = this.column_at(window.pageXOffset + 10)
return [l, l + this.cols_per_screen]
animated_scroll: (pos, duration=1000, notify=true) ->
# Scroll the window to X-position pos in an animated fashion over
# duration milliseconds. If notify is true, py_bridge.animated_scroll_done is
@ -217,10 +231,7 @@ class PagedDisplay
if this.is_full_screen_layout
return 0
x = window.pageXOffset + Math.max(10, this.current_margin_side)
edge = Math.floor(x/this.page_width) * this.page_width
while edge < x
edge += this.page_width
return edge - this.page_width
return Math.floor(x/this.page_width) * this.page_width
next_screen_location: () ->
# The position to scroll to for the next screen (which could contain
@ -354,7 +365,5 @@ if window?
window.paged_display = new PagedDisplay()
# TODO:
# Indexing
# Resizing of images
# Full screen mode
# Highlight on jump_to_anchor

View File

@ -347,7 +347,9 @@ class OEBReader(object):
self.logger.warn(u'Guide reference %r not found' % href)
continue
href = corrected_href
guide.add(elem.get('type'), elem.get('title'), href)
typ = elem.get('type')
if typ not in guide:
guide.add(typ, elem.get('title'), href)
def _find_ncx(self, opf):
result = xpath(opf, '/o2:package/o2:spine/@toc')

View File

@ -13,7 +13,7 @@ from lxml import etree
from calibre import guess_type, strftime
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.oeb.base import XPath, XHTML_NS, XHTML
from calibre.ebooks.oeb.base import XPath, XHTML_NS, XHTML, xml2text, urldefrag
from calibre.library.comments import comments_to_html
from calibre.utils.date import is_date_undefined
from calibre.ebooks.chardet import strip_encoding_declarations
@ -41,11 +41,25 @@ class Jacket(object):
return removed
def remove_first_image(self):
deleted_item = None
for item in self.oeb.spine:
removed = self.remove_images(item)
if removed > 0:
self.log('Removed first image')
body = XPath('//h:body')(item.data)
if body:
raw = xml2text(body[0]).strip()
imgs = XPath('//h:img|//svg:svg')(item.data)
if not raw and not imgs:
self.log('Removing %s as it has no content'%item.href)
self.oeb.manifest.remove(item)
deleted_item = item
break
if deleted_item is not None:
for item in list(self.oeb.toc):
href = urldefrag(item.href)[0]
if href == deleted_item.href:
self.oeb.toc.remove(item)
def insert_metadata(self, mi):
self.log('Inserting metadata into book...')

View File

@ -248,6 +248,18 @@ def available_width():
desktop = QCoreApplication.instance().desktop()
return desktop.availableGeometry().width()
def get_windows_color_depth():
import win32gui, win32con, win32print
hwin = win32gui.GetDesktopWindow()
hwindc = win32gui.GetWindowDC(hwin)
ans = win32print.GetDeviceCaps(hwindc, win32con.BITSPIXEL)
win32gui.ReleaseDC(hwin, hwindc)
return ans
def get_screen_dpi():
d = QApplication.desktop()
return (d.logicalDpiX(), d.logicalDpiY())
_is_widescreen = None
def is_widescreen():
@ -791,7 +803,18 @@ class Application(QApplication):
font.setStretch(s)
QApplication.setFont(font)
if force_calibre_style or gprefs['ui_style'] != 'system':
depth_ok = True
if iswindows:
# There are some people that still run 16 bit winxp installs. The
# new style does not render well on 16bit machines.
try:
depth_ok = get_windows_color_depth() >= 32
except:
import traceback
traceback.print_exc()
if force_calibre_style or (depth_ok and gprefs['ui_style'] !=
'system'):
self.load_calibre_style()
else:
st = self.style()

View File

@ -15,7 +15,7 @@
</property>
<property name="windowIcon">
<iconset resource="../../../../resources/images.qrc">
<normaloff>:/images/library.png</normaloff>:/images/library.png</iconset>
<normaloff>:/images/lt.png</normaloff>:/images/lt.png</iconset>
</property>
<layout class="QGridLayout" name="gridLayout">
<item row="0" column="0">

View File

@ -54,7 +54,7 @@
</property>
<property name="icon">
<iconset resource="../../../../resources/images.qrc">
<normaloff>:/images/library.png</normaloff>:/images/library.png</iconset>
<normaloff>:/images/lt.png</normaloff>:/images/lt.png</iconset>
</property>
</widget>
</item>

View File

@ -14,7 +14,7 @@
</property>
<property name="windowIcon" >
<iconset resource="../../../../resources/images.qrc" >
<normaloff>:/images/library.png</normaloff>:/images/library.png</iconset>
<normaloff>:/images/lt.png</normaloff>:/images/lt.png</iconset>
</property>
<layout class="QGridLayout" >
<item row="0" column="0" >

View File

@ -255,7 +255,7 @@ class MainWindowMixin(object): # {{{
def __init__(self, db):
self.setObjectName('MainWindow')
self.setWindowIcon(QIcon(I('library.png')))
self.setWindowIcon(QIcon(I('lt.png')))
self.setWindowTitle(__appname__)
self.setContextMenuPolicy(Qt.NoContextMenu)

View File

@ -60,7 +60,7 @@ def init_qt(args):
QCoreApplication.setApplicationName(APP_UID)
app = Application(args)
actions = tuple(Main.create_application_menubar())
app.setWindowIcon(QIcon(I('library.png')))
app.setWindowIcon(QIcon(I('lt.png')))
return app, opts, args, actions

View File

@ -228,7 +228,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
self.default_thumbnail = None
self.tb_wrapper = textwrap.TextWrapper(width=40)
self.viewers = collections.deque()
self.system_tray_icon = SystemTrayIcon(QIcon(I('library.png')), self)
self.system_tray_icon = SystemTrayIcon(QIcon(I('lt.png')), self)
self.system_tray_icon.setToolTip('calibre')
self.system_tray_icon.tooltip_requested.connect(
self.job_manager.show_tooltip)

View File

@ -202,7 +202,7 @@ class Document(QWebPage): # {{{
if not isinstance(self.anchor_positions, dict):
# Some weird javascript error happened
self.anchor_positions = {}
return self.anchor_positions
return {k:tuple(v) for k, v in self.anchor_positions.iteritems()}
def switch_to_paged_mode(self, onresize=False):
if onresize and not self.loaded_javascript:
@ -213,10 +213,20 @@ class Document(QWebPage): # {{{
# columns extend beyond the boundaries (and margin) of body
mf = self.mainFrame()
sz = mf.contentsSize()
if sz.width() > self.window_width:
sz.setWidth(sz.width()+side_margin)
scroll_width = self.javascript('document.body.scrollWidth', int)
# At this point sz.width() is not reliable, presumably because Qt
# has not yet been updated
if scroll_width > self.window_width:
sz.setWidth(scroll_width+side_margin)
self.setPreferredContentsSize(sz)
@property
def column_boundaries(self):
if not self.loaded_javascript:
return (0, 1)
self.javascript(u'py_bridge.value = paged_display.column_boundaries()')
return tuple(self.bridge_value)
def after_resize(self):
if self.in_paged_mode:
self.setPreferredContentsSize(QSize())
@ -224,21 +234,27 @@ class Document(QWebPage): # {{{
def switch_to_fullscreen_mode(self):
self.in_fullscreen_mode = True
self.javascript('''
var s = document.body.style;
s.maxWidth = "%dpx";
s.marginLeft = "auto";
s.marginRight = "auto";
'''%self.max_fs_width)
if self.in_paged_mode:
self.javascript('paged_display.max_col_width = %d'%self.max_fs_width)
else:
self.javascript('''
var s = document.body.style;
s.maxWidth = "%dpx";
s.marginLeft = "auto";
s.marginRight = "auto";
'''%self.max_fs_width)
def switch_to_window_mode(self):
self.in_fullscreen_mode = False
self.javascript('''
var s = document.body.style;
s.maxWidth = "none";
s.marginLeft = "%s";
s.marginRight = "%s";
'''%(self.initial_left_margin, self.initial_right_margin))
if self.in_paged_mode:
self.javascript('paged_display.max_col_width = %d'%-1)
else:
self.javascript('''
var s = document.body.style;
s.maxWidth = "none";
s.marginLeft = "%s";
s.marginRight = "%s";
'''%(self.initial_left_margin, self.initial_right_margin))
@pyqtSignature("QString")
def debug(self, msg):
@ -558,6 +574,21 @@ class DocumentView(QWebView): # {{{
return (self.document.ypos, self.document.ypos +
self.document.window_height)
@property
def viewport_rect(self):
# (left, top, right, bottom) of the viewport in document co-ordinates
# When in paged mode, left and right are the numbers of the columns
# at the left edge and *after* the right edge of the viewport
d = self.document
if d.in_paged_mode:
try:
l, r = d.column_boundaries
except ValueError:
l, r = (0, 1)
else:
l, r = d.xpos, d.xpos + d.window_width
return (l, d.ypos, r, d.ypos + d.window_height)
def link_hovered(self, link, text, context):
link, text = unicode(link), unicode(text)
if link:

View File

@ -477,6 +477,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
else:
self.view.document.switch_to_window_mode()
self.view.document.page_position.restore()
self.scrolled(self.view.scroll_fraction)
def goto(self, ref):
if ref:
@ -683,7 +684,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
if hasattr(self, 'current_index'):
entry = self.toc_model.next_entry(self.current_index,
self.view.document.read_anchor_positions(),
self.view.scroll_pos)
self.view.viewport_rect, self.view.document.in_paged_mode)
if entry is not None:
self.pending_goto_next_section = (
self.toc_model.currently_viewed_entry, entry, False)
@ -693,7 +694,8 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
if hasattr(self, 'current_index'):
entry = self.toc_model.next_entry(self.current_index,
self.view.document.read_anchor_positions(),
self.view.scroll_pos, backwards=True)
self.view.viewport_rect, self.view.document.in_paged_mode,
backwards=True)
if entry is not None:
self.pending_goto_next_section = (
self.toc_model.currently_viewed_entry, entry, True)
@ -705,7 +707,8 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
if anchor_positions is None:
anchor_positions = self.view.document.read_anchor_positions()
items = self.toc_model.update_indexing_state(self.current_index,
self.view.scroll_pos, anchor_positions)
self.view.viewport_rect, anchor_positions,
self.view.document.in_paged_mode)
if items:
self.toc.scrollTo(items[-1].index())
if pgns is not None:
@ -714,7 +717,8 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
if pgns[0] is self.toc_model.currently_viewed_entry:
entry = self.toc_model.next_entry(self.current_index,
self.view.document.read_anchor_positions(),
self.view.scroll_pos,
self.view.viewport_rect,
self.view.document.in_paged_mode,
backwards=pgns[2], current_entry=pgns[1])
if entry is not None:
self.pending_goto_next_section = (
@ -751,12 +755,12 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
# There hasn't been a resize event for some time
# restore the current page position.
self.resize_in_progress = False
self.view.document.after_resize()
if self.window_mode_changed:
# This resize is part of a window mode change, special case it
self.handle_window_mode_toggle()
else:
self.view.document.page_position.restore()
self.view.document.after_resize()
def close_progress_indicator(self):
self.pi.stop()

View File

@ -93,9 +93,19 @@ class TOCItem(QStandardItem):
def type(cls):
return QStandardItem.UserType+10
def update_indexing_state(self, spine_index, scroll_pos, anchor_map):
def update_indexing_state(self, spine_index, viewport_rect, anchor_map,
in_paged_mode):
if in_paged_mode:
self.update_indexing_state_paged(spine_index, viewport_rect,
anchor_map)
else:
self.update_indexing_state_unpaged(spine_index, viewport_rect,
anchor_map)
def update_indexing_state_unpaged(self, spine_index, viewport_rect,
anchor_map):
is_being_viewed = False
top, bottom = scroll_pos
top, bottom = viewport_rect[1], viewport_rect[3]
# We use bottom-25 in the checks below to account for the case where
# the next entry has some invisible margin that just overlaps with the
# bottom of the screen. In this case it will appear to the user that
@ -103,6 +113,9 @@ class TOCItem(QStandardItem):
# be larger than 25, but that's a decent compromise. Also we dont want
# to count a partial line as being visible.
# We only care about y position
anchor_map = {k:v[1] for k, v in anchor_map.iteritems()}
if spine_index >= self.starts_at and spine_index <= self.ends_at:
# The position at which this anchor is present in the document
start_pos = anchor_map.get(self.start_anchor, 0)
@ -115,7 +128,7 @@ class TOCItem(QStandardItem):
# ancestors of this entry.
psp = [anchor_map.get(x, 0) for x in self.possible_end_anchors]
psp = [x for x in psp if x >= start_pos]
# The end position. The first anchor whose pos is >= self.start_pos
# The end position. The first anchor whose pos is >= start_pos
# or if the end is not in this spine item, we set it to the bottom
# of the window +1
end_pos = min(psp) if psp else (bottom+1 if self.ends_at >=
@ -141,6 +154,51 @@ class TOCItem(QStandardItem):
if changed:
self.setFont(self.bold_font if is_being_viewed else self.normal_font)
def update_indexing_state_paged(self, spine_index, viewport_rect,
anchor_map):
is_being_viewed = False
left, right = viewport_rect[0], viewport_rect[2]
left, right = (left, 0), (right, -1)
if spine_index >= self.starts_at and spine_index <= self.ends_at:
# The position at which this anchor is present in the document
start_pos = anchor_map.get(self.start_anchor, (0, 0))
psp = []
if self.ends_at == spine_index:
# Anchors that could possibly indicate the start of the next
# section and therefore the end of this section.
# self.possible_end_anchors is a set of anchors belonging to
# toc entries with depth <= self.depth that are also not
# ancestors of this entry.
psp = [anchor_map.get(x, (0, 0)) for x in self.possible_end_anchors]
psp = [x for x in psp if x >= start_pos]
# The end position. The first anchor whose pos is >= start_pos
# or if the end is not in this spine item, we set it to the column
# after the right edge of the viewport
end_pos = min(psp) if psp else (right if self.ends_at >=
spine_index else (0, 0))
if spine_index > self.starts_at and spine_index < self.ends_at:
# The entire spine item is contained in this entry
is_being_viewed = True
elif (spine_index == self.starts_at and right > start_pos and
# This spine item contains the start
# The start position is before the end of the viewport
(spine_index != self.ends_at or left < end_pos)):
# The end position is after the start of the viewport
is_being_viewed = True
elif (spine_index == self.ends_at and left < end_pos and
# This spine item contains the end
# The end position is after the start of the viewport
(spine_index != self.starts_at or right > start_pos)):
# The start position is before the end of the viewport
is_being_viewed = True
changed = is_being_viewed != self.is_being_viewed
self.is_being_viewed = is_being_viewed
if changed:
self.setFont(self.bold_font if is_being_viewed else self.normal_font)
def __repr__(self):
return 'TOC Item: %s %s#%s'%(self.title, self.abspath, self.fragment)
@ -183,20 +241,26 @@ class TOC(QStandardItemModel):
self.currently_viewed_entry = t
return items_being_viewed
def next_entry(self, spine_pos, anchor_map, scroll_pos, backwards=False,
current_entry=None):
def next_entry(self, spine_pos, anchor_map, viewport_rect, in_paged_mode,
backwards=False, current_entry=None):
current_entry = (self.currently_viewed_entry if current_entry is None
else current_entry)
if current_entry is None: return
items = reversed(self.all_items) if backwards else self.all_items
found = False
top = scroll_pos[0]
if in_paged_mode:
start = viewport_rect[0]
anchor_map = {k:v[0] for k, v in anchor_map.iteritems()}
else:
start = viewport_rect[1]
anchor_map = {k:v[1] for k, v in anchor_map.iteritems()}
for item in items:
if found:
start_pos = anchor_map.get(item.start_anchor, 0)
if backwards and item.is_being_viewed and start_pos >= top:
# Going to this item will either not move the scroll
# position or cause to to *increase* instead of descresing
if backwards and item.is_being_viewed and start_pos >= start:
# This item will not cause any scrolling
continue
if item.starts_at != spine_pos or item.start_anchor:
return item

View File

@ -1203,7 +1203,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if m:
return m['mtime']
def format_metadata(self, id_, fmt, allow_cache=True):
def format_metadata(self, id_, fmt, allow_cache=True, update_db=False,
commit=False):
if not fmt:
return {}
fmt = fmt.upper()
@ -1218,6 +1219,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
self.format_metadata_cache[id_][fmt] = ans
if update_db:
self.conn.execute(
'UPDATE data SET uncompressed_size=? WHERE format=? AND'
' book=?', (stat.st_size, fmt, id_))
if commit:
self.conn.commit()
return ans
def format_hash(self, id_, fmt):
@ -2564,7 +2571,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
return []
return result
def rename_series(self, old_id, new_name):
def rename_series(self, old_id, new_name, change_index=True):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from series
@ -2577,22 +2584,24 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
# New series exists. Must update the link, then assign a
# new series index to each of the books.
# Get the list of books where we must update the series index
books = self.conn.get('''SELECT books.id
FROM books, books_series_link as lt
WHERE books.id = lt.book AND lt.series=?
ORDER BY books.series_index''', (old_id,))
if change_index:
# Get the list of books where we must update the series index
books = self.conn.get('''SELECT books.id
FROM books, books_series_link as lt
WHERE books.id = lt.book AND lt.series=?
ORDER BY books.series_index''', (old_id,))
# Now update the link table
self.conn.execute('''UPDATE books_series_link
SET series=?
WHERE series=?''',(new_id, old_id,))
# Now set the indices
for (book_id,) in books:
# Get the next series index
index = self.get_next_series_num_for(new_name)
self.conn.execute('''UPDATE books
SET series_index=?
WHERE id=?''',(index, book_id,))
if change_index:
# Now set the indices
for (book_id,) in books:
# Get the next series index
index = self.get_next_series_num_for(new_name)
self.conn.execute('''UPDATE books
SET series_index=?
WHERE id=?''',(index, book_id,))
self.dirty_books_referencing('series', new_id, commit=False)
self.conn.commit()
@ -3684,4 +3693,12 @@ books_series_link feeds
s = self.conn.get('''SELECT book FROM books_plugin_data WHERE name=?''', (name,))
return [x[0] for x in s]
def get_usage_count_by_id(self, field):
fm = self.field_metadata[field]
if not fm.get('link_column', None):
raise ValueError('%s is not an is_multiple field')
return self.conn.get(
'SELECT {0}, count(*) FROM books_{1}_link GROUP BY {0}'.format(
fm['link_column'], fm['table']))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More