mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
merge with John's branch
This commit is contained in:
commit
ce5cebc613
@ -19,6 +19,66 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.8.9
|
||||
date: 2011-07-08
|
||||
|
||||
new features:
|
||||
- title: "Kobo Touch: Display Preview Tag for book previews on the device"
|
||||
|
||||
- title: "Improved display of grouped search terms in Tag Browser"
|
||||
|
||||
- title: "When adding HTML files to calibre, add an option to process links in breadth first rather than depth first order. Access it via Preferences->Plugins and customize the HTML to ZIP plugin"
|
||||
|
||||
- title: "Conversion pipeline: Add option to control if duplicate entries are allowed when generating the Table of Contents from links."
|
||||
tickets: [806095]
|
||||
|
||||
- title: "Metadata download: When merging results, if the query to the xisbn service hangs, wait no more than 10 seconds. Also try harder to preserve the month when downlaoding published date. Do not throw away isbnless results if there are some sources that return isbns and some that do not."
|
||||
tickets: [798309]
|
||||
|
||||
- title: "Get Books: Remove OpenLibrary since it has the same files as archive.org. Allow direct downloading from Project Gutenberg."
|
||||
|
||||
- title: "Add functions to the template language that allow getting the last modified time and size of the individual format files for a book. Also add a has_cover() function."
|
||||
|
||||
bug fixes:
|
||||
- title: "Fix true/false searches dont work on device views"
|
||||
tickets: [807262]
|
||||
|
||||
- title: "Fix renaming of collections in device views"
|
||||
tickets: [807256]
|
||||
|
||||
- title: "Fix regression that broke the use of the device_db plugboard"
|
||||
tickets: [806483]
|
||||
|
||||
- title: "Kobo driver: Hide Expired Book Status for deleted books. Also fix regression that broke connecting to Kobo devices running very old firmware."
|
||||
tickets: [802083]
|
||||
|
||||
- title: "Fix bug in 0.8.8 that could cause the metadata.db to be left in an unusable state if calibre is interrupted at just the wrong moment or if the db is stored in dropbox"
|
||||
|
||||
- title: "Fix sorting of composite custom columns that display numbers."
|
||||
|
||||
improved recipes:
|
||||
- "Computer Act!ve"
|
||||
- Metro News NL
|
||||
- Spiegel Online International
|
||||
- cracked.com
|
||||
- Endgadget
|
||||
- Independent
|
||||
- Telegraph UK
|
||||
|
||||
new recipes:
|
||||
- title: "Blog da Cidadania and Noticias UnB"
|
||||
author: Diniz Bortolotto
|
||||
|
||||
- title: "Galicia Confidential"
|
||||
author: Susana Sotelo Docio
|
||||
|
||||
- title: "South China Morning Post"
|
||||
author: llam
|
||||
|
||||
- title: "Szinti Derigisi"
|
||||
author: thomass
|
||||
|
||||
|
||||
- version: 0.8.8
|
||||
date: 2011-07-01
|
||||
|
||||
|
39
recipes/automatiseringgids.recipe
Normal file
39
recipes/automatiseringgids.recipe
Normal file
@ -0,0 +1,39 @@
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class autogids(BasicNewsRecipe):
|
||||
title = u'Automatiseringgids IT'
|
||||
oldest_article = 7
|
||||
__author__ = 'DrMerry'
|
||||
description = 'IT-nieuws van Automatiseringgids'
|
||||
language = 'nl'
|
||||
publisher = 'AutomatiseringGids'
|
||||
category = 'Nieuws, IT, Nederlandstalig'
|
||||
simultaneous_downloads = 5
|
||||
#delay = 1
|
||||
timefmt = ' [%A, %d %B, %Y]'
|
||||
#timefmt = ''
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'newspaper'
|
||||
encoding = 'utf-8'
|
||||
cover_url = 'http://www.automatiseringgids.nl/siteimg/header_logo.gif'
|
||||
keep_only_tags = [dict(id=['content'])]
|
||||
extra_css = '.artikelheader {font-size:0.8em; color: #666;} .artikelintro {font-weight:bold} div.imgArticle {float: right; margin: 0 0em 1em 1em; display: block; position: relative; } \
|
||||
h2 { margin: 0 0 0.5em; min-height: 30px; font-size: 1.5em; letter-spacing: -0.2px; margin: 0 0 0.5em; color: black; font-weight: bold; line-height: 1.2em; padding: 4px 3px 0; }'
|
||||
|
||||
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'id':['loginbox','reactiecollapsible','reactiebox']}),
|
||||
dict(name='div', attrs={'class':['column_a','column_c','bannerfullsize','reactieheader','reactiecollapsible','formulier','artikel_headeroptions']}),
|
||||
dict(name='ul', attrs={'class':['highlightlist']}),
|
||||
dict(name='input', attrs={'type':['button']}),
|
||||
dict(name='div', attrs={'style':['display:block; width:428px; height:30px; float:left;']}),
|
||||
]
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'(<h3>Reacties</h3>|<h2>Zie ook:</h2>|<div style=".*</div>|<a[^>]*>|</a>)', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
|
||||
feeds = [(u'Actueel', u'http://www.automatiseringgids.nl/rss.aspx')]
|
20
recipes/blog_da_cidadania.recipe
Normal file
20
recipes/blog_da_cidadania.recipe
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class BlogdaCidadania(BasicNewsRecipe):
|
||||
title = 'Blog da Cidadania'
|
||||
__author__ = 'Diniz Bortolotto'
|
||||
description = 'Posts do Blog da Cidadania'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 50
|
||||
encoding = 'utf8'
|
||||
publisher = 'Eduardo Guimaraes'
|
||||
category = 'politics, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'politics portal'
|
||||
|
||||
feeds = [(u'Blog da Cidadania', u'http://www.blogcidadania.com.br/feed/')]
|
||||
|
||||
reverse_article_order = True
|
||||
|
@ -1,19 +1,20 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
__author__ = 'Lorenzo Vigentini'
|
||||
__copyright__ = '2009, Lorenzo Vigentini <l.vigentini at gmail.com>'
|
||||
__version__ = 'v1.01'
|
||||
__date__ = '14, January 2010'
|
||||
__description__ = 'Computeractive publishes new downloads, reviews, news stories, step-by-step guides and answers to PC problems every day.'
|
||||
__author__ = 'DrMerry Based on v1.01 by Lorenzo Vigentini'
|
||||
__copyright__ = 'For version 1.02, 1.03: DrMerry'
|
||||
__version__ = 'v1.03'
|
||||
__date__ = '11, July 2011'
|
||||
__description__ = 'Computeractive publishes new downloads, reviews, news stories, step-by-step guides and answers to PC problems every day. Original version (c): 2009, Lorenzo Vigentini <l.vigentini at gmail.com>'
|
||||
|
||||
'''
|
||||
http://www.computeractive.co.uk/
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class computeractive(BasicNewsRecipe):
|
||||
__author__ = 'Lorenzo Vigentini'
|
||||
__author__ = 'DrMerry'
|
||||
description = 'Computeractive publishes new downloads, reviews, news stories, step-by-step guides and answers to PC problems every day.'
|
||||
cover_url = 'http://images.pcworld.com/images/common/header/header-logo.gif'
|
||||
|
||||
@ -31,24 +32,27 @@ class computeractive(BasicNewsRecipe):
|
||||
|
||||
remove_javascript = True
|
||||
no_stylesheets = True
|
||||
remove_empty_feeds = True
|
||||
remove_tags_after = dict(name='div', attrs={'class':'article_tags_block'})
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'id':'main'})
|
||||
dict(name='div', attrs={'id':'container_left'})
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':['seeAlsoTags','commentsModule','relatedArticles','mainLeft','mainRight']}),
|
||||
dict(name='div', attrs={'class':['buyIt','detailMpu']}),
|
||||
dict(name='div', attrs={'id':['seeAlsoTags','commentsModule','relatedArticles','mainLeft','mainRight','recent_comment_block_parent','reviewDetails']}),
|
||||
dict(name='div', attrs={'class':['buyIt','detailMpu','small_section','recent_comment_block_parent','title_right_button_fix','section_title.title_right_button_fix','common_button']}),
|
||||
dict(name='a', attrs={'class':'largerImage'})
|
||||
]
|
||||
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'(<a [^>]*>|</a>)', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
|
||||
feeds = [
|
||||
(u'General content', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/all'),
|
||||
(u'News', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/news'),
|
||||
(u'Downloads', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/downloads'),
|
||||
(u'Hardware', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/hardware'),
|
||||
(u'Software', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/software'),
|
||||
(u'Competitions', u'http://www.v3.co.uk/feeds/rss20/personal-technology/competitions')
|
||||
]
|
||||
|
||||
|
||||
|
@ -1,83 +1,63 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class Cracked(BasicNewsRecipe):
|
||||
title = u'Cracked.com'
|
||||
__author__ = u'Nudgenudge'
|
||||
__author__ = 'UnWeave'
|
||||
language = 'en'
|
||||
description = 'America''s Only Humor and Video Site, since 1958'
|
||||
description = "America's Only HumorSite since 1958"
|
||||
publisher = 'Cracked'
|
||||
category = 'comedy, lists'
|
||||
oldest_article = 2
|
||||
delay = 10
|
||||
max_articles_per_feed = 2
|
||||
oldest_article = 3 #days
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
encoding = 'cp1252'
|
||||
encoding = 'ascii'
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
INDEX = u'http://www.cracked.com'
|
||||
extra_css = """
|
||||
.pageheader_type{font-size: x-large; font-weight: bold; color: #828D74}
|
||||
.pageheader_title{font-size: xx-large; color: #394128}
|
||||
.pageheader_byline{font-size: small; font-weight: bold; color: #394128}
|
||||
.score_bg {display: inline; width: 100%; margin-bottom: 2em}
|
||||
.score_column_1{ padding-left: 10px; font-size: small; width: 50%}
|
||||
.score_column_2{ padding-left: 10px; font-size: small; width: 50%}
|
||||
.score_column_3{ padding-left: 10px; font-size: small; width: 50%}
|
||||
.score_header{font-size: large; color: #50544A}
|
||||
.bodytext{display: block}
|
||||
body{font-family: Helvetica,Arial,sans-serif}
|
||||
"""
|
||||
|
||||
feeds = [ (u'Articles', u'http://feeds.feedburner.com/CrackedRSS/') ]
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
, 'linearize_tables' : True
|
||||
}
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class':['Column1']})
|
||||
]
|
||||
remove_tags_before = dict(id='PrimaryContent')
|
||||
|
||||
feeds = [(u'Articles', u'http://feeds.feedburner.com/CrackedRSS')]
|
||||
remove_tags_after = dict(name='div', attrs={'class':'shareBar'})
|
||||
|
||||
def get_article_url(self, article):
|
||||
return article.get('guid', None)
|
||||
remove_tags = [ dict(name='div', attrs={'class':['social',
|
||||
'FacebookLike',
|
||||
'shareBar'
|
||||
]}),
|
||||
|
||||
def cleanup_page(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for alink in soup.findAll('a'):
|
||||
if alink.string is not None:
|
||||
tstr = alink.string
|
||||
alink.replaceWith(tstr)
|
||||
for div_to_remove in soup.findAll('div', attrs={'id':['googlead_1','fb-like-article','comments_section']}):
|
||||
div_to_remove.extract()
|
||||
for div_to_remove in soup.findAll('div', attrs={'class':['share_buttons_col_1','GenericModule1']}):
|
||||
div_to_remove.extract()
|
||||
for div_to_remove in soup.findAll('div', attrs={'class':re.compile("prev_next")}):
|
||||
div_to_remove.extract()
|
||||
for ul_to_remove in soup.findAll('ul', attrs={'class':['Nav6']}):
|
||||
ul_to_remove.extract()
|
||||
for image in soup.findAll('img', attrs={'alt': 'article image'}):
|
||||
image.extract()
|
||||
dict(name='div', attrs={'id':['inline-share-buttons',
|
||||
]}),
|
||||
|
||||
def append_page(self, soup, appendtag, position):
|
||||
pager = soup.find('a',attrs={'class':'next_arrow_active'})
|
||||
if pager:
|
||||
nexturl = self.INDEX + pager['href']
|
||||
soup2 = self.index_to_soup(nexturl)
|
||||
texttag = soup2.find('div', attrs={'class':re.compile("userStyled")})
|
||||
newpos = len(texttag.contents)
|
||||
self.append_page(soup2,texttag,newpos)
|
||||
texttag.extract()
|
||||
self.cleanup_page(appendtag)
|
||||
appendtag.insert(position,texttag)
|
||||
else:
|
||||
self.cleanup_page(appendtag)
|
||||
dict(name='span', attrs={'class':['views',
|
||||
'KonaFilter'
|
||||
]}),
|
||||
#dict(name='img'),
|
||||
]
|
||||
|
||||
def appendPage(self, soup, appendTag, position):
|
||||
# Check if article has multiple pages
|
||||
pageNav = soup.find('nav', attrs={'class':'PaginationContent'})
|
||||
if pageNav:
|
||||
# Check not at last page
|
||||
nextPage = pageNav.find('a', attrs={'class':'next'})
|
||||
if nextPage:
|
||||
nextPageURL = nextPage['href']
|
||||
nextPageSoup = self.index_to_soup(nextPageURL)
|
||||
# 8th <section> tag contains article content
|
||||
nextPageContent = nextPageSoup.findAll('section')[7]
|
||||
newPosition = len(nextPageContent.contents)
|
||||
self.appendPage(nextPageSoup,nextPageContent,newPosition)
|
||||
nextPageContent.extract()
|
||||
pageNav.extract()
|
||||
appendTag.insert(position,nextPageContent)
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
self.append_page(soup, soup.body, 3)
|
||||
return self.adeify_images(soup)
|
||||
self.appendPage(soup, soup.body, 3)
|
||||
return soup
|
||||
|
@ -9,7 +9,7 @@ engadget.com
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Engadget(BasicNewsRecipe):
|
||||
title = u'Engadget_Full'
|
||||
title = u'Engadget'
|
||||
__author__ = 'Starson17'
|
||||
__version__ = 'v1.00'
|
||||
__date__ = '02, July 2011'
|
||||
|
49
recipes/galicia_confidential.recipe
Normal file
49
recipes/galicia_confidential.recipe
Normal file
@ -0,0 +1,49 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.web.feeds import Feed
|
||||
|
||||
class GC_gl(BasicNewsRecipe):
|
||||
title = u'Galicia Confidencial (RSS)'
|
||||
__author__ = u'Susana Sotelo Docío'
|
||||
description = u'Unha fiestra de información aberta a todos'
|
||||
publisher = u'Galicia Confidencial'
|
||||
category = u'news, society, politics, Galicia'
|
||||
encoding = 'utf-8'
|
||||
language = 'gl'
|
||||
direction = 'ltr'
|
||||
cover_url = 'http://galiciaconfidencial.com/imagenes/header/logo_gc.gif'
|
||||
oldest_article = 5
|
||||
max_articles_per_feed = 100
|
||||
center_navbar = False
|
||||
|
||||
feeds = [(u'Novas no RSS', u'http://galiciaconfidencial.com/rss2/xeral.rss')]
|
||||
|
||||
extra_css = u' p{text-align:left} '
|
||||
|
||||
def print_version(self, url):
|
||||
return url.replace('http://galiciaconfidencial.com/nova/', 'http://galiciaconfidencial.com/imprimir/')
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
self.gc_parse_feeds(feeds)
|
||||
return feeds
|
||||
|
||||
def gc_parse_feeds(self, feeds):
|
||||
rssFeeds = Feed()
|
||||
rssFeeds = BasicNewsRecipe.parse_feeds(self)
|
||||
self.feed_to_index_append(rssFeeds[:], feeds)
|
||||
|
||||
|
||||
def feed_to_index_append(self, feedObject, masterFeed):
|
||||
for feed in feedObject:
|
||||
newArticles = []
|
||||
for article in feed.articles:
|
||||
newArt = {
|
||||
'title' : article.title,
|
||||
'url' : article.url,
|
||||
'date' : article.date
|
||||
}
|
||||
newArticles.append(newArt)
|
||||
masterFeed.append((feed.title,newArticles))
|
||||
|
35
recipes/geek_poke.recipe
Normal file
35
recipes/geek_poke.recipe
Normal file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class AdvancedUserRecipe1307556816(BasicNewsRecipe):
|
||||
title = u'Geek and Poke'
|
||||
__author__ = u'DrMerry'
|
||||
description = u'Geek and Poke Cartoons'
|
||||
oldest_article = 31
|
||||
max_articles_per_feed = 100
|
||||
language = u'en'
|
||||
simultaneous_downloads = 5
|
||||
#delay = 1
|
||||
timefmt = ' [%A, %d %B, %Y]'
|
||||
summary_length = -1
|
||||
no_stylesheets = True
|
||||
cover_url = 'http://geekandpoke.typepad.com/aboutcoders.jpeg'
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'blog'
|
||||
|
||||
preprocess_regexps = [ (re.compile(r'(<p> </p>|<iframe.*</iframe>|<a[^>]*>Tweet</a>|<a[^>]*>|</a>)', re.DOTALL|re.IGNORECASE),lambda match: ''),
|
||||
(re.compile(r'( | )', re.DOTALL|re.IGNORECASE),lambda match: ' '),
|
||||
(re.compile(r'<br( /)?>(<br( /)?>)+', re.DOTALL|re.IGNORECASE),lambda match: '<br>')
|
||||
]
|
||||
|
||||
extra_css = 'body, h3, p, h2, h1, div, span{margin:0px} h2.date-header {font-size: 0.7em; color:#eee;} h3.entry-header{font-size: 1.0em} div.entry-body{font-size: 0.9em}'
|
||||
|
||||
|
||||
remove_tags_before = dict(name='h2', attrs={'class':'date-header'})
|
||||
remove_tags_after = dict(name='div', attrs={'class':'entry-body'})
|
||||
|
||||
|
||||
feeds = [(u'Geek and Poke', u'http://feeds.feedburner.com/GeekAndPoke?format=xml')]
|
BIN
recipes/icons/pecat.png
Normal file
BIN
recipes/icons/pecat.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 383 B |
43
recipes/idg_now.recipe
Normal file
43
recipes/idg_now.recipe
Normal file
@ -0,0 +1,43 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class IDGNow(BasicNewsRecipe):
|
||||
title = 'IDG Now!'
|
||||
__author__ = 'Diniz Bortolotto'
|
||||
description = 'Posts do IDG Now!'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 20
|
||||
encoding = 'utf8'
|
||||
publisher = 'Now!Digital Business Ltda.'
|
||||
category = 'technology, telecom, IT, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'technology portal'
|
||||
use_embedded_content = False
|
||||
extra_css = '.headline {font-size: x-large;} \n .fact { padding-top: 10pt }'
|
||||
|
||||
def get_article_url(self, article):
|
||||
link = article.get('link', None)
|
||||
if link is None:
|
||||
return article
|
||||
if link.split('/')[-1]=="story01.htm":
|
||||
link=link.split('/')[-2]
|
||||
a=['0B','0C','0D','0E','0F','0G','0I','0N' ,'0L0S','0A','0J3A']
|
||||
b=['.' ,'/' ,'?' ,'-' ,'=' ,'&' ,'_','.com','www.','0',':']
|
||||
for i in range(0,len(a)):
|
||||
link=link.replace(a[i],b[i])
|
||||
link=link.split('&')[-3]
|
||||
link=link.split('=')[1]
|
||||
link=link + "/IDGNoticiaPrint_view"
|
||||
return link
|
||||
|
||||
feeds = [
|
||||
(u'Ultimas noticias', u'http://rss.idgnow.com.br/c/32184/f/499640/index.rss'),
|
||||
(u'Computa\xe7\xe3o Corporativa', u'http://rss.idgnow.com.br/c/32184/f/499643/index.rss'),
|
||||
(u'Carreira', u'http://rss.idgnow.com.br/c/32184/f/499644/index.rss'),
|
||||
(u'Computa\xe7\xe3o Pessoal', u'http://rss.idgnow.com.br/c/32184/f/499645/index.rss'),
|
||||
(u'Internet', u'http://rss.idgnow.com.br/c/32184/f/499646/index.rss'),
|
||||
(u'Mercado', u'http://rss.idgnow.com.br/c/32184/f/419982/index.rss'),
|
||||
(u'Seguran\xe7a', u'http://rss.idgnow.com.br/c/32184/f/499647/index.rss'),
|
||||
(u'Telecom e Redes', u'http://rss.idgnow.com.br/c/32184/f/499648/index.rss')
|
||||
]
|
||||
|
||||
reverse_article_order = True
|
@ -16,16 +16,14 @@ class i09(BasicNewsRecipe):
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
encoding = 'utf-8'
|
||||
use_embedded_content = False
|
||||
use_embedded_content = True
|
||||
language = 'en'
|
||||
masthead_url = 'http://cache.gawkerassets.com/assets/io9.com/img/logo.png'
|
||||
extra_css = '''
|
||||
body{font-family: "Lucida Grande",Helvetica,Arial,sans-serif}
|
||||
img{margin-bottom: 1em}
|
||||
h1{font-family :Arial,Helvetica,sans-serif; font-size:large}
|
||||
h2{font-family :Arial,Helvetica,sans-serif; font-size:x-small}
|
||||
'''
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
@ -33,13 +31,11 @@ class i09(BasicNewsRecipe):
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
remove_attributes = ['width','height']
|
||||
keep_only_tags = [dict(attrs={'class':'content permalink'})]
|
||||
remove_tags_before = dict(name='h1')
|
||||
remove_tags = [dict(attrs={'class':'contactinfo'})]
|
||||
remove_tags_after = dict(attrs={'class':'contactinfo'})
|
||||
feeds = [(u'Articles', u'http://feeds.gawker.com/io9/vip?format=xml')]
|
||||
|
||||
feeds = [(u'Articles', u'http://feeds.gawker.com/io9/full')]
|
||||
remove_tags = [
|
||||
{'class': 'feedflare'},
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
return self.adeify_images(soup)
|
||||
|
138
recipes/menorca.recipe
Normal file
138
recipes/menorca.recipe
Normal file
@ -0,0 +1,138 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from calibre.web.feeds import Feed
|
||||
|
||||
class Menorca(BasicNewsRecipe):
|
||||
|
||||
title = 'Menorca'
|
||||
publisher = 'Editorial Menorca S.A. '
|
||||
__author__ = 'M. Sintes'
|
||||
description = u'Peri\xf3dico con informaci\xf3n de Menorca, Espa\xf1a'
|
||||
category = 'news, politics, economy, culture, Menorca, Spain '
|
||||
language = 'es'
|
||||
enconding = 'cp1252'
|
||||
|
||||
no_stylesheets = True
|
||||
oldest_article = 5
|
||||
max_articles_per_feed = 25
|
||||
|
||||
|
||||
feeds = [ (u'Principal',u'http://www.menorca.info/rss'),
|
||||
(u'Opini\xf3n',u'http://www.menorca.info/rss?seccion=opinion'),
|
||||
(u'Menorca',u'http://www.menorca.info/rss?seccion=menorca'),
|
||||
(u'Alaior',u'http://www.menorca.info/rss?seccion=pueblos/alaior'),
|
||||
(u'Ciutadella', u'http://www.menorca.info/rss?seccion=pueblos/ciutadella'),
|
||||
(u'Es Castell', u'http://www.menorca.info/rss?seccion=pueblos/escastell'),
|
||||
(u'Es Mercadal', u'http://www.menorca.info/rss?seccion=pueblos/esmercadal'),
|
||||
(u'Es Migjorn', u'http://www.menorca.info/rss?seccion=pueblos/esmigjorn'),
|
||||
(u'Ferreries', u'http://www.menorca.info/rss?seccion=pueblos/ferreries'),
|
||||
(u'Fornells', u'http://www.menorca.info/rss?seccion=pueblos/fornells'),
|
||||
(u'Llucma\xe7anes', u'http://www.menorca.info/rss?seccion=pueblos/llucmaanes'),
|
||||
(u'Ma\xf3', u'http://www.menorca.info/rss?seccion=pueblos/mao'),
|
||||
(u'Sant Climent', u'http://www.menorca.info/rss?seccion=pueblos/santcliment'),
|
||||
(u'Sant Llu\xeds', u'http://www.menorca.info/rss?seccion=pueblos/santlluis'),
|
||||
(u'Deportes',u'http://www.menorca.info/rss?seccion=deportes'),
|
||||
(u'Balears', u'http://www.menorca.info/rss?seccion=balears')]
|
||||
|
||||
#Seccions amb link rss erroni. Es recupera directament de la pagina web
|
||||
seccions_web = [(u'Mundo',u'http://www.menorca.info/actualidad/mundo'),
|
||||
(u'Econom\xeda',u'http://www.menorca.info/actualidad/economia'),
|
||||
(u'Espa\xf1a',u'http://www.menorca.info/actualidad/espana')]
|
||||
|
||||
remove_tags_before = dict(name='div', attrs={'class':'bloqueTitulosNoticia'})
|
||||
remove_tags_after = dict(name='div', attrs={'class':'compartir'})
|
||||
remove_tags = [dict(id = 'utilidades'),
|
||||
dict(name='div', attrs={'class': 'totalComentarios'}),
|
||||
dict(name='div', attrs={'class': 'compartir'}),
|
||||
dict(name='div', attrs={'class': re.compile("img_noticia*")})
|
||||
]
|
||||
|
||||
def print_version(self, url):
|
||||
url_imprimir = url + '?d=print'
|
||||
return url.replace(url, url_imprimir)
|
||||
|
||||
def feed_to_index_append(self, feedObject, masterFeed):
|
||||
|
||||
# Loop thru the feed object and build the correct type of article list
|
||||
for feed in feedObject:
|
||||
newArticles = []
|
||||
for article in feed.articles:
|
||||
newArt = {
|
||||
'title' : article.title,
|
||||
'url' : article.url,
|
||||
'date' : article.date,
|
||||
'description' : article.text_summary
|
||||
}
|
||||
|
||||
newArticles.append(newArt)
|
||||
|
||||
# append the newly-built list object to the index object # passed in as masterFeed.
|
||||
masterFeed.append((feed.title,newArticles))
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
|
||||
rssFeeds = Feed()
|
||||
rssFeeds = BasicNewsRecipe.parse_feeds(self)
|
||||
|
||||
articles = []
|
||||
feeds = []
|
||||
|
||||
self.feed_to_index_append(rssFeeds,feeds)
|
||||
|
||||
|
||||
|
||||
for (nom_seccio, url_seccio) in self.seccions_web:
|
||||
|
||||
|
||||
articles = []
|
||||
|
||||
soup = self.index_to_soup(url_seccio)
|
||||
for article in soup.findAll('div', attrs={'class':re.compile("articulo noticia|cajaNoticiaPortada")}):
|
||||
h = article.find(['h2','h3'])
|
||||
titol = self.tag_to_string(h)
|
||||
a = article.find('a', href=True)
|
||||
url = 'http://www.menorca.info' + a['href']
|
||||
|
||||
desc = None
|
||||
autor = ''
|
||||
dt = ''
|
||||
|
||||
soup_art = self.index_to_soup(url)
|
||||
aut = soup_art.find('div', attrs={'class':'autor'})
|
||||
tx = self.tag_to_string(aut)
|
||||
ls = re.split('[,;]',tx)
|
||||
|
||||
t = len(ls)
|
||||
if t >= 1:
|
||||
autor = ls[0]
|
||||
|
||||
if t > 1:
|
||||
d = ls[t-1]
|
||||
|
||||
if len(d) >= 10:
|
||||
lt = len(d) - 10
|
||||
dt = d[lt:]
|
||||
|
||||
|
||||
|
||||
self.log('\tTrobat article: ', titol, 'a', url, 'Seccio: ', nom_seccio, 'Autor: ', autor, 'Data: ', dt)
|
||||
|
||||
articles.append({'title': titol, 'url': url, 'description': desc, 'date':dt, 'author': autor})
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if articles:
|
||||
feeds.append((nom_seccio, articles))
|
||||
|
||||
|
||||
|
||||
|
||||
return feeds
|
||||
|
||||
|
||||
|
@ -20,7 +20,7 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
remove_tags_before = dict(name='div', attrs={'id':'date'})
|
||||
remove_tags_after = dict(name='div', attrs={'id':'column-1-3'})
|
||||
encoding = 'utf-8'
|
||||
extra_css = '#date {font-size: 10px} .article-image-caption {font-size: 8px}'
|
||||
extra_css = 'body{font-size:12px} #date, .article-image-caption {font-size: 0.583em} h2 {font-size: 0.917em} p.small, span, li, li span span, p, b, i, u, p.small.article-paragraph, p.small.article-paragraph p, p.small.article-paragraph span, p span, span {font-size: 0.833em} h1 {font-size: 1em}'
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'class':[ 'metroCommentFormWrap',
|
||||
'commentForm', 'metroCommentInnerWrap', 'article-slideshow-counter-container', 'article-slideshow-control', 'ad', 'header-links',
|
||||
|
24
recipes/noticias_unb.recipe
Normal file
24
recipes/noticias_unb.recipe
Normal file
@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class NoticiasUnB(BasicNewsRecipe):
|
||||
title = 'Noticias UnB'
|
||||
__author__ = 'Diniz Bortolotto'
|
||||
description = 'Noticias da UnB'
|
||||
oldest_article = 5
|
||||
max_articles_per_feed = 20
|
||||
category = 'news, educational, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'newsportal'
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
feeds = [(u'UnB Agência', u'http://www.unb.br/noticias/rss/noticias.rss')]
|
||||
|
||||
reverse_article_order = True
|
||||
|
||||
def print_version(self, url):
|
||||
return url.replace('http://', 'http://www.unb.br/noticias/print_email/imprimir.php?u=http://')
|
||||
|
72
recipes/pecat.recipe
Normal file
72
recipes/pecat.recipe
Normal file
@ -0,0 +1,72 @@
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
www.pecat.co.rs
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Pecat_rs(BasicNewsRecipe):
|
||||
title = 'Pecat'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Internet portal slobodne Srbije'
|
||||
oldest_article = 15
|
||||
max_articles_per_feed = 100
|
||||
language = 'sr'
|
||||
encoding = 'utf-8'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = True
|
||||
masthead_url = 'http://www.pecat.co.rs/wp-content/themes/zenko-v1/images/logo.jpg'
|
||||
publication_type = 'magazine'
|
||||
extra_css = """
|
||||
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
|
||||
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
|
||||
body{font-family: Arial,Helvetica,sans1,sans-serif}
|
||||
img{display: block; margin-bottom: 1em; margin-top: 1em}
|
||||
p{display: block; margin-bottom: 1em; margin-top: 1em}
|
||||
"""
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : 'politika, Srbija'
|
||||
, 'publisher': 'Pecat'
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
|
||||
|
||||
feeds = [(u'Clanci', u'http://www.pecat.co.rs/feed/')]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
limg = item.find('img')
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
if limg:
|
||||
limg.extract()
|
||||
item.replaceWith(limg)
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
for item in soup.findAll('img'):
|
||||
dad = item.findParent('p')
|
||||
if dad:
|
||||
mydad = dad.parent
|
||||
myIndex = mydad.contents.index(dad)
|
||||
item.extract()
|
||||
mydad.insert(myIndex,item)
|
||||
for item in soup.findAll('strong'):
|
||||
dad = item.findParent('p')
|
||||
if dad:
|
||||
mydad = dad.parent
|
||||
myIndex = mydad.contents.index(dad)
|
||||
item.extract()
|
||||
item.name='h4'
|
||||
mydad.insert(myIndex,item)
|
||||
return soup
|
@ -1,94 +1,67 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
|
||||
__copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
spiegel.de
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class Spiegel_int(BasicNewsRecipe):
|
||||
title = 'Spiegel Online International'
|
||||
__author__ = 'Darko Miletic and Sujata Raman'
|
||||
description = "News and POV from Europe's largest newsmagazine"
|
||||
description = "Daily news, analysis and opinion from Europe's leading newsmagazine and Germany's top news Web site"
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
language = 'en'
|
||||
|
||||
language = 'en_DE'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = 'cp1252'
|
||||
publisher = 'SPIEGEL ONLINE GmbH'
|
||||
category = 'news, politics, Germany'
|
||||
lang = 'en'
|
||||
recursions = 1
|
||||
match_regexps = [r'http://www.spiegel.de/.*-[1-9],00.html']
|
||||
masthead_url = 'http://www.spiegel.de/static/sys/v9/spiegelonline_logo.png'
|
||||
publication_type = 'magazine'
|
||||
|
||||
conversion_options = {
|
||||
'comments' : description
|
||||
,'tags' : category
|
||||
,'language' : lang
|
||||
,'publisher' : publisher
|
||||
,'pretty_print': True
|
||||
'comments' : description
|
||||
,'tags' : category
|
||||
,'language' : language
|
||||
,'publisher': publisher
|
||||
}
|
||||
|
||||
extra_css = '''
|
||||
#spArticleColumn{font-family:verdana,arial,helvetica,geneva,sans-serif ; }
|
||||
#spArticleContent{font-family: Verdana,Arial,Helvetica,Geneva,sans-serif}
|
||||
h1{color:#666666; font-weight:bold;}
|
||||
h2{color:#990000;}
|
||||
h3{color:#990000;}
|
||||
h4 {color:#990000;}
|
||||
a{color:#990000;}
|
||||
.spAuthor{font-style:italic;}
|
||||
#spIntroTeaser{font-weight:bold;}
|
||||
#spIntroTeaser{font-weight:bold}
|
||||
.spCredit{color:#666666; font-size:x-small;}
|
||||
.spShortDate{font-size:x-small;}
|
||||
.spArticleImageBox {font-size:x-small;}
|
||||
.spPhotoGallery{font-size:x-small; color:#990000 ;}
|
||||
'''
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name ='div', attrs={'id': ['spArticleImageBox spAssetAlignleft','spArticleColumn']}),
|
||||
]
|
||||
keep_only_tags = [dict(attrs={'id':'spArticleContent'})]
|
||||
remove_tags_after = dict(attrs={'id':'spArticleBody'})
|
||||
remove_tags = [dict(name=['meta','base','iframe','embed','object'])]
|
||||
remove_attributes = ['clear']
|
||||
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/international/index.rss')]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':['spSocialBookmark','spArticleFunctions','spMultiPagerHeadlines',]}),
|
||||
dict(name='div', attrs={'class':['spCommercial spM520','spArticleCredit','spPicZoom']}),
|
||||
]
|
||||
|
||||
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/schlagzeilen/rss/0,5291,676,00.xml')]
|
||||
|
||||
def postprocess_html(self, soup,first):
|
||||
|
||||
for tag in soup.findAll(name='div',attrs={'id':"spMultiPagerControl"}):
|
||||
tag.extract()
|
||||
|
||||
p = soup.find(name = 'p', attrs={'id':'spIntroTeaser'})
|
||||
|
||||
if p.string is not None:
|
||||
t = p.string.rpartition(':')[0]
|
||||
|
||||
if 'Part'in t:
|
||||
if soup.h1 is not None:
|
||||
soup.h1.extract()
|
||||
if soup.h2 is not None:
|
||||
soup.h2.extract()
|
||||
functag = soup.find(name= 'div', attrs={'id':"spArticleFunctions"})
|
||||
if functag is not None:
|
||||
functag.extract()
|
||||
auttag = soup.find(name= 'p', attrs={'class':"spAuthor"})
|
||||
if auttag is not None:
|
||||
auttag.extract()
|
||||
|
||||
pictag = soup.find(name= 'div', attrs={'id':"spArticleTopAsset"})
|
||||
if pictag is not None:
|
||||
pictag.extract()
|
||||
|
||||
|
||||
return soup
|
||||
|
||||
# def print_version(self, url):
|
||||
# main, sep, rest = url.rpartition(',')
|
||||
# rmain, rsep, rrest = main.rpartition(',')
|
||||
# return rmain + ',druck-' + rrest + ',' + rest
|
||||
def print_version(self, url):
|
||||
main, sep, rest = url.rpartition(',')
|
||||
rmain, rsep, rrest = main.rpartition(',')
|
||||
return rmain + ',druck-' + rrest + ',' + rest
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
return soup
|
||||
|
@ -1,5 +1,5 @@
|
||||
" Project wide builtins
|
||||
let g:pyflakes_builtins += ["dynamic_property", "__", "P", "I", "lopen", "icu_lower", "icu_upper", "icu_title"]
|
||||
let g:pyflakes_builtins += ["dynamic_property", "__", "P", "I", "lopen", "icu_lower", "icu_upper", "icu_title", "ngettext"]
|
||||
|
||||
python << EOFPY
|
||||
import os
|
||||
|
@ -64,7 +64,7 @@ class Check(Command):
|
||||
description = 'Check for errors in the calibre source code'
|
||||
|
||||
BUILTINS = ['_', '__', 'dynamic_property', 'I', 'P', 'lopen', 'icu_lower',
|
||||
'icu_upper', 'icu_title']
|
||||
'icu_upper', 'icu_title', 'ngettext']
|
||||
CACHE = '.check-cache.pickle'
|
||||
|
||||
def get_files(self, cache):
|
||||
|
@ -1,646 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
# Originally written by Barry Warsaw <barry@zope.com>
|
||||
#
|
||||
# Minimally patched to make it even more xgettext compatible
|
||||
# by Peter Funk <pf@artcom-gmbh.de>
|
||||
#
|
||||
# 2002-11-22 Jrgen Hermann <jh@web.de>
|
||||
# Added checks that _() only contains string literals, and
|
||||
# command line args are resolved to module lists, i.e. you
|
||||
# can now pass a filename, a module or package name, or a
|
||||
# directory (including globbing chars, important for Win32).
|
||||
# Made docstring fit in 80 chars wide displays using pydoc.
|
||||
#
|
||||
|
||||
__doc__ = """pygettext -- Python equivalent of xgettext(1)
|
||||
|
||||
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
|
||||
internationalization of C programs. Most of these tools are independent of
|
||||
the programming language and can be used from within Python programs.
|
||||
Martin von Loewis' work[1] helps considerably in this regard.
|
||||
|
||||
There's one problem though; xgettext is the program that scans source code
|
||||
looking for message strings, but it groks only C (or C++). Python
|
||||
introduces a few wrinkles, such as dual quoting characters, triple quoted
|
||||
strings, and raw strings. xgettext understands none of this.
|
||||
|
||||
Enter pygettext, which uses Python's standard tokenize module to scan
|
||||
Python source code, generating .pot files identical to what GNU xgettext[2]
|
||||
generates for C and C++ code. From there, the standard GNU tools can be
|
||||
used.
|
||||
|
||||
A word about marking Python strings as candidates for translation. GNU
|
||||
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
|
||||
and gettext_noop. But those can be a lot of text to include all over your
|
||||
code. C and C++ have a trick: they use the C preprocessor. Most
|
||||
internationalized C source includes a #define for gettext() to _() so that
|
||||
what has to be written in the source is much less. Thus these are both
|
||||
translatable strings:
|
||||
|
||||
gettext("Translatable String")
|
||||
_("Translatable String")
|
||||
|
||||
Python of course has no preprocessor so this doesn't work so well. Thus,
|
||||
pygettext searches only for _() by default, but see the -k/--keyword flag
|
||||
below for how to augment this.
|
||||
|
||||
[1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
|
||||
[2] http://www.gnu.org/software/gettext/gettext.html
|
||||
|
||||
NOTE: pygettext attempts to be option and feature compatible with GNU
|
||||
xgettext where ever possible. However some options are still missing or are
|
||||
not fully implemented. Also, xgettext's use of command line switches with
|
||||
option arguments is broken, and in these cases, pygettext just defines
|
||||
additional switches.
|
||||
|
||||
Usage: pygettext [options] inputfile ...
|
||||
|
||||
Options:
|
||||
|
||||
-a
|
||||
--extract-all
|
||||
Extract all strings.
|
||||
|
||||
-d name
|
||||
--default-domain=name
|
||||
Rename the default output file from messages.pot to name.pot.
|
||||
|
||||
-E
|
||||
--escape
|
||||
Replace non-ASCII characters with octal escape sequences.
|
||||
|
||||
-D
|
||||
--docstrings
|
||||
Extract module, class, method, and function docstrings. These do
|
||||
not need to be wrapped in _() markers, and in fact cannot be for
|
||||
Python to consider them docstrings. (See also the -X option).
|
||||
|
||||
-h
|
||||
--help
|
||||
Print this help message and exit.
|
||||
|
||||
-k word
|
||||
--keyword=word
|
||||
Keywords to look for in addition to the default set, which are:
|
||||
%(DEFAULTKEYWORDS)s
|
||||
|
||||
You can have multiple -k flags on the command line.
|
||||
|
||||
-K
|
||||
--no-default-keywords
|
||||
Disable the default set of keywords (see above). Any keywords
|
||||
explicitly added with the -k/--keyword option are still recognized.
|
||||
|
||||
--no-location
|
||||
Do not write filename/lineno location comments.
|
||||
|
||||
-n
|
||||
--add-location
|
||||
Write filename/lineno location comments indicating where each
|
||||
extracted string is found in the source. These lines appear before
|
||||
each msgid. The style of comments is controlled by the -S/--style
|
||||
option. This is the default.
|
||||
|
||||
-o filename
|
||||
--output=filename
|
||||
Rename the default output file from messages.pot to filename. If
|
||||
filename is `-' then the output is sent to standard out.
|
||||
|
||||
-p dir
|
||||
--output-dir=dir
|
||||
Output files will be placed in directory dir.
|
||||
|
||||
-S stylename
|
||||
--style stylename
|
||||
Specify which style to use for location comments. Two styles are
|
||||
supported:
|
||||
|
||||
Solaris # File: filename, line: line-number
|
||||
GNU #: filename:line
|
||||
|
||||
The style name is case insensitive. GNU style is the default.
|
||||
|
||||
-v
|
||||
--verbose
|
||||
Print the names of the files being processed.
|
||||
|
||||
-V
|
||||
--version
|
||||
Print the version of pygettext and exit.
|
||||
|
||||
-w columns
|
||||
--width=columns
|
||||
Set width of output to columns.
|
||||
|
||||
-x filename
|
||||
--exclude-file=filename
|
||||
Specify a file that contains a list of strings that are not be
|
||||
extracted from the input files. Each string to be excluded must
|
||||
appear on a line by itself in the file.
|
||||
|
||||
-X filename
|
||||
--no-docstrings=filename
|
||||
Specify a file that contains a list of files (one per line) that
|
||||
should not have their docstrings extracted. This is only useful in
|
||||
conjunction with the -D option above.
|
||||
|
||||
If `inputfile' is -, standard input is read.
|
||||
"""
|
||||
|
||||
import os
|
||||
import imp
|
||||
import sys
|
||||
import glob
|
||||
import time
|
||||
import getopt
|
||||
import token
|
||||
import tokenize
|
||||
import operator
|
||||
|
||||
__version__ = '1.5'
|
||||
|
||||
default_keywords = ['_']
|
||||
DEFAULTKEYWORDS = ', '.join(default_keywords)
|
||||
|
||||
EMPTYSTRING = ''
|
||||
|
||||
from setup import __appname__, __version__ as version
|
||||
|
||||
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
|
||||
# there.
|
||||
pot_header = '''\
|
||||
# Translation template file..
|
||||
# Copyright (C) %(year)s Kovid Goyal
|
||||
# Kovid Goyal <kovid@kovidgoyal.net>, %(year)s.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: %(appname)s %(version)s\\n"
|
||||
"POT-Creation-Date: %%(time)s\\n"
|
||||
"PO-Revision-Date: %%(time)s\\n"
|
||||
"Last-Translator: Automatically generated\\n"
|
||||
"Language-Team: LANGUAGE\\n"
|
||||
"MIME-Version: 1.0\\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\\n"
|
||||
"Content-Transfer-Encoding: 8bit\\n"
|
||||
"Generated-By: pygettext.py %%(version)s\\n"
|
||||
|
||||
'''%dict(appname=__appname__, version=version, year=time.strftime('%Y'))
|
||||
|
||||
def usage(code, msg=''):
|
||||
print >> sys.stderr, __doc__ % globals()
|
||||
if msg:
|
||||
print >> sys.stderr, msg
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
|
||||
escapes = []
|
||||
|
||||
def make_escapes(pass_iso8859):
|
||||
global escapes
|
||||
if pass_iso8859:
|
||||
# Allow iso-8859 characters to pass through so that e.g. 'msgid
|
||||
# would result not result in 'msgid "H\366he"'. Otherwise we
|
||||
# escape any character outside the 32..126 range.
|
||||
mod = 128
|
||||
else:
|
||||
mod = 256
|
||||
for i in range(256):
|
||||
if 32 <= (i % mod) <= 126:
|
||||
escapes.append(chr(i))
|
||||
else:
|
||||
escapes.append("\\%03o" % i)
|
||||
escapes[ord('\\')] = '\\\\'
|
||||
escapes[ord('\t')] = '\\t'
|
||||
escapes[ord('\r')] = '\\r'
|
||||
escapes[ord('\n')] = '\\n'
|
||||
escapes[ord('\"')] = '\\"'
|
||||
|
||||
|
||||
def escape(s):
|
||||
global escapes
|
||||
s = list(s)
|
||||
for i in range(len(s)):
|
||||
s[i] = escapes[ord(s[i])]
|
||||
return EMPTYSTRING.join(s)
|
||||
|
||||
|
||||
def safe_eval(s):
|
||||
# unwrap quotes, safely
|
||||
return eval(s, {'__builtins__':{}}, {})
|
||||
|
||||
|
||||
def normalize(s):
|
||||
# This converts the various Python string types into a format that is
|
||||
# appropriate for .po files, namely much closer to C style.
|
||||
lines = s.split('\n')
|
||||
if len(lines) == 1:
|
||||
s = '"' + escape(s) + '"'
|
||||
else:
|
||||
if not lines[-1]:
|
||||
del lines[-1]
|
||||
lines[-1] = lines[-1] + '\n'
|
||||
for i in range(len(lines)):
|
||||
lines[i] = escape(lines[i])
|
||||
lineterm = '\\n"\n"'
|
||||
s = '""\n"' + lineterm.join(lines) + '"'
|
||||
return s
|
||||
|
||||
|
||||
def containsAny(str, set):
|
||||
"""Check whether 'str' contains ANY of the chars in 'set'"""
|
||||
return 1 in [c in str for c in set]
|
||||
|
||||
|
||||
def _visit_pyfiles(list, dirname, names):
|
||||
"""Helper for getFilesForName()."""
|
||||
# get extension for python source files
|
||||
if not globals().has_key('_py_ext'):
|
||||
global _py_ext
|
||||
_py_ext = [triple[0] for triple in imp.get_suffixes()
|
||||
if triple[2] == imp.PY_SOURCE][0]
|
||||
|
||||
# don't recurse into CVS directories
|
||||
if 'CVS' in names:
|
||||
names.remove('CVS')
|
||||
|
||||
# add all *.py files to list
|
||||
list.extend(
|
||||
[os.path.join(dirname, file) for file in names
|
||||
if os.path.splitext(file)[1] == _py_ext]
|
||||
)
|
||||
|
||||
|
||||
def _get_modpkg_path(dotted_name, pathlist=None):
|
||||
"""Get the filesystem path for a module or a package.
|
||||
|
||||
Return the file system path to a file for a module, and to a directory for
|
||||
a package. Return None if the name is not found, or is a builtin or
|
||||
extension module.
|
||||
"""
|
||||
# split off top-most name
|
||||
parts = dotted_name.split('.', 1)
|
||||
|
||||
if len(parts) > 1:
|
||||
# we have a dotted path, import top-level package
|
||||
try:
|
||||
file, pathname, description = imp.find_module(parts[0], pathlist)
|
||||
if file: file.close()
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
# check if it's indeed a package
|
||||
if description[2] == imp.PKG_DIRECTORY:
|
||||
# recursively handle the remaining name parts
|
||||
pathname = _get_modpkg_path(parts[1], [pathname])
|
||||
else:
|
||||
pathname = None
|
||||
else:
|
||||
# plain name
|
||||
try:
|
||||
file, pathname, description = imp.find_module(
|
||||
dotted_name, pathlist)
|
||||
if file:
|
||||
file.close()
|
||||
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
|
||||
pathname = None
|
||||
except ImportError:
|
||||
pathname = None
|
||||
|
||||
return pathname
|
||||
|
||||
|
||||
def getFilesForName(name):
|
||||
"""Get a list of module files for a filename, a module or package name,
|
||||
or a directory.
|
||||
"""
|
||||
if not os.path.exists(name):
|
||||
# check for glob chars
|
||||
if containsAny(name, "*?[]"):
|
||||
files = glob.glob(name)
|
||||
list = []
|
||||
for file in files:
|
||||
list.extend(getFilesForName(file))
|
||||
return list
|
||||
|
||||
# try to find module or package
|
||||
name = _get_modpkg_path(name)
|
||||
if not name:
|
||||
return []
|
||||
|
||||
if os.path.isdir(name):
|
||||
# find all python files in directory
|
||||
list = []
|
||||
os.path.walk(name, _visit_pyfiles, list)
|
||||
return list
|
||||
elif os.path.exists(name):
|
||||
# a single file
|
||||
return [name]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
class TokenEater:
|
||||
def __init__(self, options):
|
||||
self.__options = options
|
||||
self.__messages = {}
|
||||
self.__state = self.__waiting
|
||||
self.__data = []
|
||||
self.__lineno = -1
|
||||
self.__freshmodule = 1
|
||||
self.__curfile = None
|
||||
|
||||
def __call__(self, ttype, tstring, stup, etup, line):
|
||||
# dispatch
|
||||
## import token
|
||||
## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
|
||||
## 'tstring:', tstring
|
||||
self.__state(ttype, tstring, stup[0])
|
||||
|
||||
def __waiting(self, ttype, tstring, lineno):
|
||||
opts = self.__options
|
||||
# Do docstring extractions, if enabled
|
||||
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
|
||||
# module docstring?
|
||||
if self.__freshmodule:
|
||||
if ttype == tokenize.STRING:
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__freshmodule = 0
|
||||
elif ttype not in (tokenize.COMMENT, tokenize.NL):
|
||||
self.__freshmodule = 0
|
||||
return
|
||||
# class docstring?
|
||||
if ttype == tokenize.NAME and tstring in ('class', 'def'):
|
||||
self.__state = self.__suiteseen
|
||||
return
|
||||
if ttype == tokenize.NAME and tstring in opts.keywords:
|
||||
self.__state = self.__keywordseen
|
||||
|
||||
def __suiteseen(self, ttype, tstring, lineno):
|
||||
# ignore anything until we see the colon
|
||||
if ttype == tokenize.OP and tstring == ':':
|
||||
self.__state = self.__suitedocstring
|
||||
|
||||
def __suitedocstring(self, ttype, tstring, lineno):
|
||||
# ignore any intervening noise
|
||||
if ttype == tokenize.STRING:
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__state = self.__waiting
|
||||
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
|
||||
tokenize.COMMENT):
|
||||
# there was no class docstring
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __keywordseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == '(':
|
||||
self.__data = []
|
||||
self.__lineno = lineno
|
||||
self.__state = self.__openseen
|
||||
else:
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __openseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == ')':
|
||||
# We've seen the last of the translatable strings. Record the
|
||||
# line number of the first line of the strings and update the list
|
||||
# of messages seen. Reset state for the next batch. If there
|
||||
# were no strings inside _(), then just ignore this entry.
|
||||
if self.__data:
|
||||
self.__addentry(EMPTYSTRING.join(self.__data))
|
||||
self.__state = self.__waiting
|
||||
elif ttype == tokenize.STRING:
|
||||
self.__data.append(safe_eval(tstring))
|
||||
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
|
||||
token.NEWLINE, tokenize.NL]:
|
||||
# warn if we see anything else than STRING or whitespace
|
||||
print >> sys.stderr, \
|
||||
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'\
|
||||
% {
|
||||
'token': tstring,
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno
|
||||
}
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __addentry(self, msg, lineno=None, isdocstring=0):
|
||||
if lineno is None:
|
||||
lineno = self.__lineno
|
||||
if not msg in self.__options.toexclude:
|
||||
entry = (self.__curfile, lineno)
|
||||
self.__messages.setdefault(msg, {})[entry] = isdocstring
|
||||
|
||||
def set_filename(self, filename):
|
||||
self.__curfile = filename
|
||||
self.__freshmodule = 1
|
||||
|
||||
def write(self, fp):
|
||||
options = self.__options
|
||||
timestamp = time.strftime('%Y-%m-%d %H:%M+%Z')
|
||||
# The time stamp in the header doesn't have the same format as that
|
||||
# generated by xgettext...
|
||||
print >> fp, pot_header % {'time': timestamp, 'version': __version__}
|
||||
# Sort the entries. First sort each particular entry's keys, then
|
||||
# sort all the entries by their first item.
|
||||
reverse = {}
|
||||
for k, v in self.__messages.items():
|
||||
keys = v.keys()
|
||||
keys.sort()
|
||||
reverse.setdefault(tuple(keys), []).append((k, v))
|
||||
rkeys = reverse.keys()
|
||||
rkeys.sort()
|
||||
for rkey in rkeys:
|
||||
rentries = reverse[rkey]
|
||||
rentries.sort()
|
||||
for k, v in rentries:
|
||||
isdocstring = 0
|
||||
# If the entry was gleaned out of a docstring, then add a
|
||||
# comment stating so. This is to aid translators who may wish
|
||||
# to skip translating some unimportant docstrings.
|
||||
if reduce(operator.__add__, v.values()):
|
||||
isdocstring = 1
|
||||
# k is the message string, v is a dictionary-set of (filename,
|
||||
# lineno) tuples. We want to sort the entries in v first by
|
||||
# file name and then by line number.
|
||||
v = v.keys()
|
||||
v.sort()
|
||||
if not options.writelocations:
|
||||
pass
|
||||
# location comments are different b/w Solaris and GNU:
|
||||
elif options.locationstyle == options.SOLARIS:
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
print >>fp, \
|
||||
'# File: %(filename)s, line: %(lineno)d' % d
|
||||
elif options.locationstyle == options.GNU:
|
||||
# fit as many locations on one line, as long as the
|
||||
# resulting line length doesn't exceeds 'options.width'
|
||||
locline = '#:'
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
s = ' %(filename)s:%(lineno)d' % d
|
||||
if len(locline) + len(s) <= options.width:
|
||||
locline = locline + s
|
||||
else:
|
||||
print >> fp, locline
|
||||
locline = "#:" + s
|
||||
if len(locline) > 2:
|
||||
print >> fp, locline
|
||||
if isdocstring:
|
||||
print >> fp, '#, docstring'
|
||||
print >> fp, 'msgid', normalize(k)
|
||||
print >> fp, 'msgstr ""\n'
|
||||
|
||||
|
||||
|
||||
def main(outfile, args=sys.argv[1:]):
|
||||
global default_keywords
|
||||
try:
|
||||
opts, args = getopt.getopt(
|
||||
args,
|
||||
'ad:DEhk:Kno:p:S:Vvw:x:X:',
|
||||
['extract-all', 'default-domain=', 'escape', 'help',
|
||||
'keyword=', 'no-default-keywords',
|
||||
'add-location', 'no-location', 'output=', 'output-dir=',
|
||||
'style=', 'verbose', 'version', 'width=', 'exclude-file=',
|
||||
'docstrings', 'no-docstrings',
|
||||
])
|
||||
except getopt.error, msg:
|
||||
usage(1, msg)
|
||||
|
||||
# for holding option values
|
||||
class Options:
|
||||
# constants
|
||||
GNU = 1
|
||||
SOLARIS = 2
|
||||
# defaults
|
||||
extractall = 0 # FIXME: currently this option has no effect at all.
|
||||
escape = 0
|
||||
keywords = []
|
||||
outpath = ''
|
||||
outfile = 'messages.pot'
|
||||
writelocations = 1
|
||||
locationstyle = GNU
|
||||
verbose = 0
|
||||
width = 78
|
||||
excludefilename = ''
|
||||
docstrings = 0
|
||||
nodocstrings = {}
|
||||
|
||||
options = Options()
|
||||
locations = {'gnu' : options.GNU,
|
||||
'solaris' : options.SOLARIS,
|
||||
}
|
||||
|
||||
# parse options
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-a', '--extract-all'):
|
||||
options.extractall = 1
|
||||
elif opt in ('-d', '--default-domain'):
|
||||
options.outfile = arg + '.pot'
|
||||
elif opt in ('-E', '--escape'):
|
||||
options.escape = 1
|
||||
elif opt in ('-D', '--docstrings'):
|
||||
options.docstrings = 1
|
||||
elif opt in ('-k', '--keyword'):
|
||||
options.keywords.append(arg)
|
||||
elif opt in ('-K', '--no-default-keywords'):
|
||||
default_keywords = []
|
||||
elif opt in ('-n', '--add-location'):
|
||||
options.writelocations = 1
|
||||
elif opt in ('--no-location',):
|
||||
options.writelocations = 0
|
||||
elif opt in ('-S', '--style'):
|
||||
options.locationstyle = locations.get(arg.lower())
|
||||
if options.locationstyle is None:
|
||||
usage(1, ('Invalid value for --style: %s') % arg)
|
||||
elif opt in ('-o', '--output'):
|
||||
options.outfile = arg
|
||||
elif opt in ('-p', '--output-dir'):
|
||||
options.outpath = arg
|
||||
elif opt in ('-v', '--verbose'):
|
||||
options.verbose = 1
|
||||
elif opt in ('-V', '--version'):
|
||||
print ('pygettext.py (xgettext for Python) %s') % __version__
|
||||
sys.exit(0)
|
||||
elif opt in ('-w', '--width'):
|
||||
try:
|
||||
options.width = int(arg)
|
||||
except ValueError:
|
||||
usage(1, ('--width argument must be an integer: %s') % arg)
|
||||
elif opt in ('-x', '--exclude-file'):
|
||||
options.excludefilename = arg
|
||||
elif opt in ('-X', '--no-docstrings'):
|
||||
fp = open(arg)
|
||||
try:
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
options.nodocstrings[line[:-1]] = 1
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# calculate escapes
|
||||
make_escapes(options.escape)
|
||||
|
||||
# calculate all keywords
|
||||
options.keywords.extend(default_keywords)
|
||||
|
||||
# initialize list of strings to exclude
|
||||
if options.excludefilename:
|
||||
try:
|
||||
fp = open(options.excludefilename)
|
||||
options.toexclude = fp.readlines()
|
||||
fp.close()
|
||||
except IOError:
|
||||
print >> sys.stderr, (
|
||||
"Can't read --exclude-file: %s") % options.excludefilename
|
||||
sys.exit(1)
|
||||
else:
|
||||
options.toexclude = []
|
||||
|
||||
# resolve args to module lists
|
||||
expanded = []
|
||||
for arg in args:
|
||||
if arg == '-':
|
||||
expanded.append(arg)
|
||||
else:
|
||||
expanded.extend(getFilesForName(arg))
|
||||
args = expanded
|
||||
|
||||
# slurp through all the files
|
||||
eater = TokenEater(options)
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
if options.verbose:
|
||||
print ('Reading standard input')
|
||||
fp = sys.stdin
|
||||
closep = 0
|
||||
else:
|
||||
if options.verbose:
|
||||
print ('Working on %s') % filename
|
||||
fp = open(filename)
|
||||
closep = 1
|
||||
try:
|
||||
eater.set_filename(filename)
|
||||
try:
|
||||
tokenize.tokenize(fp.readline, eater)
|
||||
except tokenize.TokenError, e:
|
||||
print >> sys.stderr, '%s: %s, line %d, column %d' % (
|
||||
e[0], filename, e[1][0], e[1][1])
|
||||
except IndentationError, e:
|
||||
print >> sys.stderr, '%s: %s, line %s, column %s' % (
|
||||
e[0], filename, e.lineno, e[1][1])
|
||||
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
||||
# write the output
|
||||
eater.write(outfile)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.stdout)
|
@ -6,11 +6,10 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, cStringIO, tempfile, shutil, atexit, subprocess, glob, re
|
||||
import os, tempfile, shutil, subprocess, glob, re, time, textwrap
|
||||
from distutils import sysconfig
|
||||
|
||||
from setup import Command, __appname__
|
||||
from setup.pygettext import main as pygettext
|
||||
from setup import Command, __appname__, __version__
|
||||
from setup.build_environment import pyqt
|
||||
|
||||
class POT(Command):
|
||||
@ -60,19 +59,50 @@ class POT(Command):
|
||||
|
||||
|
||||
def run(self, opts):
|
||||
pot_header = textwrap.dedent('''\
|
||||
# Translation template file..
|
||||
# Copyright (C) %(year)s Kovid Goyal
|
||||
# Kovid Goyal <kovid@kovidgoyal.net>, %(year)s.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: %(appname)s %(version)s\\n"
|
||||
"POT-Creation-Date: %(time)s\\n"
|
||||
"PO-Revision-Date: %(time)s\\n"
|
||||
"Last-Translator: Automatically generated\\n"
|
||||
"Language-Team: LANGUAGE\\n"
|
||||
"MIME-Version: 1.0\\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/calibre\\n"
|
||||
"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\\n"
|
||||
"Content-Transfer-Encoding: 8bit\\n"
|
||||
|
||||
''')%dict(appname=__appname__, version=__version__,
|
||||
year=time.strftime('%Y'),
|
||||
time=time.strftime('%Y-%m-%d %H:%M+%Z'))
|
||||
|
||||
files = self.source_files()
|
||||
buf = cStringIO.StringIO()
|
||||
self.info('Creating translations template...')
|
||||
tempdir = tempfile.mkdtemp()
|
||||
atexit.register(shutil.rmtree, tempdir)
|
||||
pygettext(buf, ['-k', '__', '-p', tempdir]+files)
|
||||
src = buf.getvalue()
|
||||
src += '\n\n' + self.get_tweaks_docs()
|
||||
pot = os.path.join(self.PATH, __appname__+'.pot')
|
||||
with open(pot, 'wb') as f:
|
||||
f.write(src)
|
||||
self.info('Translations template:', os.path.abspath(pot))
|
||||
return pot
|
||||
with tempfile.NamedTemporaryFile() as fl:
|
||||
fl.write('\n'.join(files))
|
||||
fl.flush()
|
||||
out = tempfile.NamedTemporaryFile(suffix='.pot', delete=False)
|
||||
out.close()
|
||||
self.info('Creating translations template...')
|
||||
subprocess.check_call(['xgettext', '-f', fl.name,
|
||||
'--default-domain=calibre', '-o', out.name, '-L', 'Python',
|
||||
'--from-code=UTF-8', '--sort-by-file', '--omit-header',
|
||||
'--no-wrap', '-k__',
|
||||
])
|
||||
with open(out.name, 'rb') as f:
|
||||
src = f.read()
|
||||
os.remove(out.name)
|
||||
src = pot_header + '\n' + src
|
||||
src += '\n\n' + self.get_tweaks_docs()
|
||||
pot = os.path.join(self.PATH, __appname__+'.pot')
|
||||
with open(pot, 'wb') as f:
|
||||
f.write(src)
|
||||
self.info('Translations template:', os.path.abspath(pot))
|
||||
return pot
|
||||
|
||||
|
||||
class Translations(POT):
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 8, 8)
|
||||
numeric_version = (0, 8, 9)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -3,57 +3,16 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import textwrap, os, glob, functools, re
|
||||
import os, glob, functools, re
|
||||
from calibre import guess_type
|
||||
from calibre.customize import FileTypePlugin, MetadataReaderPlugin, \
|
||||
MetadataWriterPlugin, PreferencesPlugin, InterfaceActionBase, StoreBase
|
||||
from calibre.constants import numeric_version
|
||||
from calibre.ebooks.metadata.archive import ArchiveExtract, get_cbz_metadata
|
||||
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
||||
from calibre.ebooks.html.to_zip import HTML2ZIP
|
||||
|
||||
# To archive plugins {{{
|
||||
class HTML2ZIP(FileTypePlugin):
|
||||
name = 'HTML to ZIP'
|
||||
author = 'Kovid Goyal'
|
||||
description = textwrap.dedent(_('''\
|
||||
Follow all local links in an HTML file and create a ZIP \
|
||||
file containing all linked files. This plugin is run \
|
||||
every time you add an HTML file to the library.\
|
||||
'''))
|
||||
version = numeric_version
|
||||
file_types = set(['html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'])
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
on_import = True
|
||||
|
||||
def run(self, htmlfile):
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
from calibre.gui2.convert.gui_conversion import gui_convert
|
||||
from calibre.customize.conversion import OptionRecommendation
|
||||
from calibre.ebooks.epub import initialize_container
|
||||
|
||||
with TemporaryDirectory('_plugin_html2zip') as tdir:
|
||||
recs =[('debug_pipeline', tdir, OptionRecommendation.HIGH)]
|
||||
recs.append(['keep_ligatures', True, OptionRecommendation.HIGH])
|
||||
if self.site_customization and self.site_customization.strip():
|
||||
recs.append(['input_encoding', self.site_customization.strip(),
|
||||
OptionRecommendation.HIGH])
|
||||
gui_convert(htmlfile, tdir, recs, abort_after_input_dump=True)
|
||||
of = self.temporary_file('_plugin_html2zip.zip')
|
||||
tdir = os.path.join(tdir, 'input')
|
||||
opf = glob.glob(os.path.join(tdir, '*.opf'))[0]
|
||||
ncx = glob.glob(os.path.join(tdir, '*.ncx'))
|
||||
if ncx:
|
||||
os.remove(ncx[0])
|
||||
epub = initialize_container(of.name, os.path.basename(opf))
|
||||
epub.add_dir(tdir)
|
||||
epub.close()
|
||||
|
||||
return of.name
|
||||
|
||||
def customization_help(self, gui=False):
|
||||
return _('Character encoding for the input HTML files. Common choices '
|
||||
'include: cp1252, latin1, iso-8859-1 and utf-8.')
|
||||
|
||||
|
||||
class PML2PMLZ(FileTypePlugin):
|
||||
name = 'PML to PMLZ'
|
||||
@ -1231,6 +1190,15 @@ class StoreDieselEbooksStore(StoreBase):
|
||||
formats = ['EPUB', 'PDF']
|
||||
affiliate = True
|
||||
|
||||
class StoreEbookNLStore(StoreBase):
|
||||
name = 'eBook.nl'
|
||||
description = u'De eBookwinkel van Nederland'
|
||||
actual_plugin = 'calibre.gui2.store.stores.ebook_nl_plugin:EBookNLStore'
|
||||
|
||||
headquarters = 'NL'
|
||||
formats = ['EPUB', 'PDF']
|
||||
affiliate = True
|
||||
|
||||
class StoreEbookscomStore(StoreBase):
|
||||
name = 'eBooks.com'
|
||||
description = u'Sells books in multiple electronic formats in all categories. Technical infrastructure is cutting edge, robust and scalable, with servers in the US and Europe.'
|
||||
@ -1488,6 +1456,7 @@ plugins += [
|
||||
StoreBeamEBooksDEStore,
|
||||
StoreBeWriteStore,
|
||||
StoreDieselEbooksStore,
|
||||
StoreEbookNLStore,
|
||||
StoreEbookscomStore,
|
||||
StoreEBookShoppeUKStore,
|
||||
StoreEPubBuyDEStore,
|
||||
|
@ -63,5 +63,4 @@ Various things that require other things before they can be migrated:
|
||||
columns/categories/searches info into
|
||||
self.field_metadata. Finally, implement metadata dirtied
|
||||
functionality.
|
||||
2. Test Schema upgrades
|
||||
'''
|
||||
|
@ -17,12 +17,13 @@ from calibre import isbytestring, force_unicode, prints
|
||||
from calibre.constants import (iswindows, filesystem_encoding,
|
||||
preferred_encoding)
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.library.schema_upgrades import SchemaUpgrade
|
||||
from calibre.db.schema_upgrades import SchemaUpgrade
|
||||
from calibre.library.field_metadata import FieldMetadata
|
||||
from calibre.ebooks.metadata import title_sort, author_to_author_sort
|
||||
from calibre.utils.icu import strcmp
|
||||
from calibre.utils.config import to_json, from_json, prefs, tweaks
|
||||
from calibre.utils.date import utcfromtimestamp, parse_date
|
||||
from calibre.utils.filenames import is_case_sensitive
|
||||
from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
|
||||
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable)
|
||||
# }}}
|
||||
@ -30,7 +31,9 @@ from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
|
||||
'''
|
||||
Differences in semantics from pysqlite:
|
||||
|
||||
1. execute/executemany/executescript operate in autocommit mode
|
||||
1. execute/executemany operate in autocommit mode
|
||||
2. There is no fetchone() method on cursor objects, instead use next()
|
||||
3. There is no executescript
|
||||
|
||||
'''
|
||||
|
||||
@ -119,6 +122,66 @@ def icu_collator(s1, s2):
|
||||
return strcmp(force_unicode(s1, 'utf-8'), force_unicode(s2, 'utf-8'))
|
||||
# }}}
|
||||
|
||||
# Unused aggregators {{{
|
||||
def Concatenate(sep=','):
|
||||
'''String concatenation aggregator for sqlite'''
|
||||
|
||||
def step(ctxt, value):
|
||||
if value is not None:
|
||||
ctxt.append(value)
|
||||
|
||||
def finalize(ctxt):
|
||||
if not ctxt:
|
||||
return None
|
||||
return sep.join(ctxt)
|
||||
|
||||
return ([], step, finalize)
|
||||
|
||||
def SortedConcatenate(sep=','):
|
||||
'''String concatenation aggregator for sqlite, sorted by supplied index'''
|
||||
|
||||
def step(ctxt, ndx, value):
|
||||
if value is not None:
|
||||
ctxt[ndx] = value
|
||||
|
||||
def finalize(ctxt):
|
||||
if len(ctxt) == 0:
|
||||
return None
|
||||
return sep.join(map(ctxt.get, sorted(ctxt.iterkeys())))
|
||||
|
||||
return ({}, step, finalize)
|
||||
|
||||
def IdentifiersConcat():
|
||||
'''String concatenation aggregator for the identifiers map'''
|
||||
|
||||
def step(ctxt, key, val):
|
||||
ctxt.append(u'%s:%s'%(key, val))
|
||||
|
||||
def finalize(ctxt):
|
||||
return ','.join(ctxt)
|
||||
|
||||
return ([], step, finalize)
|
||||
|
||||
def AumSortedConcatenate():
|
||||
'''String concatenation aggregator for the author sort map'''
|
||||
|
||||
def step(ctxt, ndx, author, sort, link):
|
||||
if author is not None:
|
||||
ctxt[ndx] = ':::'.join((author, sort, link))
|
||||
|
||||
def finalize(ctxt):
|
||||
keys = list(ctxt.iterkeys())
|
||||
l = len(keys)
|
||||
if l == 0:
|
||||
return None
|
||||
if l == 1:
|
||||
return ctxt[keys[0]]
|
||||
return ':#:'.join([ctxt[v] for v in sorted(keys)])
|
||||
|
||||
return ({}, step, finalize)
|
||||
|
||||
# }}}
|
||||
|
||||
class Connection(apsw.Connection): # {{{
|
||||
|
||||
BUSY_TIMEOUT = 2000 # milliseconds
|
||||
@ -128,32 +191,46 @@ class Connection(apsw.Connection): # {{{
|
||||
|
||||
self.setbusytimeout(self.BUSY_TIMEOUT)
|
||||
self.execute('pragma cache_size=5000')
|
||||
self.conn.execute('pragma temp_store=2')
|
||||
self.execute('pragma temp_store=2')
|
||||
|
||||
encoding = self.execute('pragma encoding').fetchone()[0]
|
||||
self.conn.create_collation('PYNOCASE', partial(pynocase,
|
||||
encoding = self.execute('pragma encoding').next()[0]
|
||||
self.createcollation('PYNOCASE', partial(pynocase,
|
||||
encoding=encoding))
|
||||
|
||||
self.conn.create_function('title_sort', 1, title_sort)
|
||||
self.conn.create_function('author_to_author_sort', 1,
|
||||
_author_to_author_sort)
|
||||
|
||||
self.conn.create_function('uuid4', 0, lambda : str(uuid.uuid4()))
|
||||
self.createscalarfunction('title_sort', title_sort, 1)
|
||||
self.createscalarfunction('author_to_author_sort',
|
||||
_author_to_author_sort, 1)
|
||||
self.createscalarfunction('uuid4', lambda : str(uuid.uuid4()),
|
||||
0)
|
||||
|
||||
# Dummy functions for dynamically created filters
|
||||
self.conn.create_function('books_list_filter', 1, lambda x: 1)
|
||||
self.conn.create_collation('icucollate', icu_collator)
|
||||
self.createscalarfunction('books_list_filter', lambda x: 1, 1)
|
||||
self.createcollation('icucollate', icu_collator)
|
||||
|
||||
# Legacy aggregators (never used) but present for backwards compat
|
||||
self.createaggregatefunction('sortconcat', SortedConcatenate, 2)
|
||||
self.createaggregatefunction('sortconcat_bar',
|
||||
partial(SortedConcatenate, sep='|'), 2)
|
||||
self.createaggregatefunction('sortconcat_amper',
|
||||
partial(SortedConcatenate, sep='&'), 2)
|
||||
self.createaggregatefunction('identifiers_concat',
|
||||
IdentifiersConcat, 2)
|
||||
self.createaggregatefunction('concat', Concatenate, 1)
|
||||
self.createaggregatefunction('aum_sortconcat',
|
||||
AumSortedConcatenate, 4)
|
||||
|
||||
def create_dynamic_filter(self, name):
|
||||
f = DynamicFilter(name)
|
||||
self.conn.create_function(name, 1, f)
|
||||
self.createscalarfunction(name, f, 1)
|
||||
|
||||
def get(self, *args, **kw):
|
||||
ans = self.cursor().execute(*args)
|
||||
if kw.get('all', True):
|
||||
return ans.fetchall()
|
||||
for row in ans:
|
||||
return ans[0]
|
||||
try:
|
||||
return ans.next()[0]
|
||||
except (StopIteration, IndexError):
|
||||
return None
|
||||
|
||||
def execute(self, sql, bindings=None):
|
||||
cursor = self.cursor()
|
||||
@ -162,14 +239,9 @@ class Connection(apsw.Connection): # {{{
|
||||
def executemany(self, sql, sequence_of_bindings):
|
||||
return self.cursor().executemany(sql, sequence_of_bindings)
|
||||
|
||||
def executescript(self, sql):
|
||||
with self:
|
||||
# Use an explicit savepoint so that even if this is called
|
||||
# while a transaction is active, it is atomic
|
||||
return self.cursor().execute(sql)
|
||||
# }}}
|
||||
|
||||
class DB(object, SchemaUpgrade):
|
||||
class DB(object):
|
||||
|
||||
PATH_LIMIT = 40 if iswindows else 100
|
||||
WINDOWS_LIBRARY_PATH_LIMIT = 75
|
||||
@ -213,25 +285,24 @@ class DB(object, SchemaUpgrade):
|
||||
shutil.copyfile(self.dbpath, pt.name)
|
||||
self.dbpath = pt.name
|
||||
|
||||
self.is_case_sensitive = (not iswindows and
|
||||
not os.path.exists(self.dbpath.replace('metadata.db',
|
||||
'MeTAdAtA.dB')))
|
||||
if not os.path.exists(os.path.dirname(self.dbpath)):
|
||||
os.makedirs(os.path.dirname(self.dbpath))
|
||||
|
||||
self._conn = None
|
||||
|
||||
if self.user_version == 0:
|
||||
self.initialize_database()
|
||||
|
||||
with self.conn:
|
||||
SchemaUpgrade.__init__(self)
|
||||
if not os.path.exists(self.library_path):
|
||||
os.makedirs(self.library_path)
|
||||
self.is_case_sensitive = is_case_sensitive(self.library_path)
|
||||
|
||||
SchemaUpgrade(self.conn, self.library_path, self.field_metadata)
|
||||
|
||||
# Guarantee that the library_id is set
|
||||
self.library_id
|
||||
|
||||
self.initialize_prefs(default_prefs)
|
||||
|
||||
# Fix legacy triggers and columns
|
||||
self.conn.executescript('''
|
||||
self.conn.execute('''
|
||||
DROP TRIGGER IF EXISTS author_insert_trg;
|
||||
CREATE TEMP TRIGGER author_insert_trg
|
||||
AFTER INSERT ON authors
|
||||
@ -248,6 +319,7 @@ class DB(object, SchemaUpgrade):
|
||||
UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL;
|
||||
''')
|
||||
|
||||
self.initialize_prefs(default_prefs)
|
||||
self.initialize_custom_columns()
|
||||
self.initialize_tables()
|
||||
|
||||
@ -516,12 +588,16 @@ class DB(object, SchemaUpgrade):
|
||||
def initialize_tables(self): # {{{
|
||||
tables = self.tables = {}
|
||||
for col in ('title', 'sort', 'author_sort', 'series_index', 'comments',
|
||||
'timestamp', 'published', 'uuid', 'path', 'cover',
|
||||
'timestamp', 'pubdate', 'uuid', 'path', 'cover',
|
||||
'last_modified'):
|
||||
metadata = self.field_metadata[col].copy()
|
||||
if metadata['table'] is None:
|
||||
metadata['table'], metadata['column'] == 'books', ('has_cover'
|
||||
if col == 'comments':
|
||||
metadata['table'], metadata['column'] = 'comments', 'text'
|
||||
if not metadata['table']:
|
||||
metadata['table'], metadata['column'] = 'books', ('has_cover'
|
||||
if col == 'cover' else col)
|
||||
if not metadata['column']:
|
||||
metadata['column'] = col
|
||||
tables[col] = OneToOneTable(col, metadata)
|
||||
|
||||
for col in ('series', 'publisher', 'rating'):
|
||||
@ -538,6 +614,7 @@ class DB(object, SchemaUpgrade):
|
||||
tables['size'] = SizeTable('size', self.field_metadata['size'].copy())
|
||||
|
||||
for label, data in self.custom_column_label_map.iteritems():
|
||||
label = '#' + label
|
||||
metadata = self.field_metadata[label].copy()
|
||||
link_table = self.custom_table_names(data['num'])[1]
|
||||
|
||||
@ -562,11 +639,11 @@ class DB(object, SchemaUpgrade):
|
||||
@property
|
||||
def conn(self):
|
||||
if self._conn is None:
|
||||
self._conn = apsw.Connection(self.dbpath)
|
||||
self._conn = Connection(self.dbpath)
|
||||
if self._exists and self.user_version == 0:
|
||||
self._conn.close()
|
||||
os.remove(self.dbpath)
|
||||
self._conn = apsw.Connection(self.dbpath)
|
||||
self._conn = Connection(self.dbpath)
|
||||
return self._conn
|
||||
|
||||
@dynamic_property
|
||||
@ -584,7 +661,14 @@ class DB(object, SchemaUpgrade):
|
||||
def initialize_database(self):
|
||||
metadata_sqlite = P('metadata_sqlite.sql', data=True,
|
||||
allow_user_override=False).decode('utf-8')
|
||||
self.conn.executescript(metadata_sqlite)
|
||||
cur = self.conn.cursor()
|
||||
cur.execute('BEGIN EXCLUSIVE TRANSACTION')
|
||||
try:
|
||||
cur.execute(metadata_sqlite)
|
||||
except:
|
||||
cur.execute('ROLLBACK')
|
||||
else:
|
||||
cur.execute('COMMIT')
|
||||
if self.user_version == 0:
|
||||
self.user_version = 1
|
||||
# }}}
|
||||
@ -624,7 +708,7 @@ class DB(object, SchemaUpgrade):
|
||||
self.conn.execute('''
|
||||
DELETE FROM library_id;
|
||||
INSERT INTO library_id (uuid) VALUES (?);
|
||||
''', self._library_id_)
|
||||
''', (self._library_id_,))
|
||||
|
||||
return property(doc=doc, fget=fget, fset=fset)
|
||||
|
||||
@ -641,9 +725,11 @@ class DB(object, SchemaUpgrade):
|
||||
# the db while we are reading
|
||||
for table in self.tables.itervalues():
|
||||
try:
|
||||
table.read()
|
||||
table.read(self)
|
||||
except:
|
||||
prints('Failed to read table:', table.name)
|
||||
import pprint
|
||||
pprint.pprint(table.metadata)
|
||||
raise
|
||||
|
||||
# }}}
|
||||
|
11
src/calibre/db/cache.py
Normal file
11
src/calibre/db/cache.py
Normal file
@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
|
353
src/calibre/db/locking.py
Normal file
353
src/calibre/db/locking.py
Normal file
@ -0,0 +1,353 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from threading import Lock, Condition, current_thread
|
||||
|
||||
class LockingError(RuntimeError):
|
||||
pass
|
||||
|
||||
def create_locks():
|
||||
'''
|
||||
Return a pair of locks: (read_lock, write_lock)
|
||||
|
||||
The read_lock can be acquired by multiple threads simultaneously, it can
|
||||
also be acquired multiple times by the same thread.
|
||||
|
||||
Only one thread can hold write_lock at a time, and only if there are no
|
||||
current read_locks. While the write_lock is held no
|
||||
other threads can acquire read locks. The write_lock can also be acquired
|
||||
multiple times by the same thread.
|
||||
|
||||
Both read_lock and write_lock are meant to be used in with statements (they
|
||||
operate on a single underlying lock.
|
||||
|
||||
WARNING: Be very careful to not try to acquire a read lock while the same
|
||||
thread holds a write lock and vice versa. That is, a given thread should
|
||||
always release *all* locks of type A before trying to acquire a lock of type
|
||||
B. Bad things will happen if you violate this rule, the most benign of
|
||||
which is the raising of a LockingError (I haven't been able to eliminate
|
||||
the possibility of deadlocking in this scenario).
|
||||
'''
|
||||
l = SHLock()
|
||||
return RWLockWrapper(l), RWLockWrapper(l, is_shared=False)
|
||||
|
||||
class SHLock(object):
|
||||
'''
|
||||
Shareable lock class. Used to implement the Multiple readers-single writer
|
||||
paradigm. As best as I can tell, neither writer nor reader starvation
|
||||
should be possible.
|
||||
|
||||
Based on code from: https://github.com/rfk/threading2
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self._lock = Lock()
|
||||
# When a shared lock is held, is_shared will give the cumulative
|
||||
# number of locks and _shared_owners maps each owning thread to
|
||||
# the number of locks is holds.
|
||||
self.is_shared = 0
|
||||
self._shared_owners = {}
|
||||
# When an exclusive lock is held, is_exclusive will give the number
|
||||
# of locks held and _exclusive_owner will give the owning thread
|
||||
self.is_exclusive = 0
|
||||
self._exclusive_owner = None
|
||||
# When someone is forced to wait for a lock, they add themselves
|
||||
# to one of these queues along with a "waiter" condition that
|
||||
# is used to wake them up.
|
||||
self._shared_queue = []
|
||||
self._exclusive_queue = []
|
||||
# This is for recycling waiter objects.
|
||||
self._free_waiters = []
|
||||
|
||||
def acquire(self, blocking=True, shared=False):
|
||||
'''
|
||||
Acquire the lock in shared or exclusive mode.
|
||||
|
||||
If blocking is False this method will return False if acquiring the
|
||||
lock failed.
|
||||
'''
|
||||
with self._lock:
|
||||
if shared:
|
||||
return self._acquire_shared(blocking)
|
||||
else:
|
||||
return self._acquire_exclusive(blocking)
|
||||
assert not (self.is_shared and self.is_exclusive)
|
||||
|
||||
def release(self):
|
||||
''' Release the lock. '''
|
||||
# This decrements the appropriate lock counters, and if the lock
|
||||
# becomes free, it looks for a queued thread to hand it off to.
|
||||
# By doing the handoff here we ensure fairness.
|
||||
me = current_thread()
|
||||
with self._lock:
|
||||
if self.is_exclusive:
|
||||
if self._exclusive_owner is not me:
|
||||
raise LockingError("release() called on unheld lock")
|
||||
self.is_exclusive -= 1
|
||||
if not self.is_exclusive:
|
||||
self._exclusive_owner = None
|
||||
# If there are waiting shared locks, issue them
|
||||
# all and them wake everyone up.
|
||||
if self._shared_queue:
|
||||
for (thread, waiter) in self._shared_queue:
|
||||
self.is_shared += 1
|
||||
self._shared_owners[thread] = 1
|
||||
waiter.notify()
|
||||
del self._shared_queue[:]
|
||||
# Otherwise, if there are waiting exclusive locks,
|
||||
# they get first dibbs on the lock.
|
||||
elif self._exclusive_queue:
|
||||
(thread, waiter) = self._exclusive_queue.pop(0)
|
||||
self._exclusive_owner = thread
|
||||
self.is_exclusive += 1
|
||||
waiter.notify()
|
||||
elif self.is_shared:
|
||||
try:
|
||||
self._shared_owners[me] -= 1
|
||||
if self._shared_owners[me] == 0:
|
||||
del self._shared_owners[me]
|
||||
except KeyError:
|
||||
raise LockingError("release() called on unheld lock")
|
||||
self.is_shared -= 1
|
||||
if not self.is_shared:
|
||||
# If there are waiting exclusive locks,
|
||||
# they get first dibbs on the lock.
|
||||
if self._exclusive_queue:
|
||||
(thread, waiter) = self._exclusive_queue.pop(0)
|
||||
self._exclusive_owner = thread
|
||||
self.is_exclusive += 1
|
||||
waiter.notify()
|
||||
else:
|
||||
assert not self._shared_queue
|
||||
else:
|
||||
raise LockingError("release() called on unheld lock")
|
||||
|
||||
def _acquire_shared(self, blocking=True):
|
||||
me = current_thread()
|
||||
# Each case: acquiring a lock we already hold.
|
||||
if self.is_shared and me in self._shared_owners:
|
||||
self.is_shared += 1
|
||||
self._shared_owners[me] += 1
|
||||
return True
|
||||
# If the lock is already spoken for by an exclusive, add us
|
||||
# to the shared queue and it will give us the lock eventually.
|
||||
if self.is_exclusive or self._exclusive_queue:
|
||||
if self._exclusive_owner is me:
|
||||
raise LockingError("can't downgrade SHLock object")
|
||||
if not blocking:
|
||||
return False
|
||||
waiter = self._take_waiter()
|
||||
try:
|
||||
self._shared_queue.append((me, waiter))
|
||||
waiter.wait()
|
||||
assert not self.is_exclusive
|
||||
finally:
|
||||
self._return_waiter(waiter)
|
||||
else:
|
||||
self.is_shared += 1
|
||||
self._shared_owners[me] = 1
|
||||
return True
|
||||
|
||||
def _acquire_exclusive(self, blocking=True):
|
||||
me = current_thread()
|
||||
# Each case: acquiring a lock we already hold.
|
||||
if self._exclusive_owner is me:
|
||||
assert self.is_exclusive
|
||||
self.is_exclusive += 1
|
||||
return True
|
||||
# Do not allow upgrade of lock
|
||||
if self.is_shared and me in self._shared_owners:
|
||||
raise LockingError("can't upgrade SHLock object")
|
||||
# If the lock is already spoken for, add us to the exclusive queue.
|
||||
# This will eventually give us the lock when it's our turn.
|
||||
if self.is_shared or self.is_exclusive:
|
||||
if not blocking:
|
||||
return False
|
||||
waiter = self._take_waiter()
|
||||
try:
|
||||
self._exclusive_queue.append((me, waiter))
|
||||
waiter.wait()
|
||||
finally:
|
||||
self._return_waiter(waiter)
|
||||
else:
|
||||
self._exclusive_owner = me
|
||||
self.is_exclusive += 1
|
||||
return True
|
||||
|
||||
def _take_waiter(self):
|
||||
try:
|
||||
return self._free_waiters.pop()
|
||||
except IndexError:
|
||||
return Condition(self._lock)#, verbose=True)
|
||||
|
||||
def _return_waiter(self, waiter):
|
||||
self._free_waiters.append(waiter)
|
||||
|
||||
class RWLockWrapper(object):
|
||||
|
||||
def __init__(self, shlock, is_shared=True):
|
||||
self._shlock = shlock
|
||||
self._is_shared = is_shared
|
||||
|
||||
def __enter__(self):
|
||||
self._shlock.acquire(shared=self._is_shared)
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self._shlock.release()
|
||||
|
||||
|
||||
# Tests {{{
|
||||
if __name__ == '__main__':
|
||||
import time, random, unittest
|
||||
from threading import Thread
|
||||
|
||||
class TestSHLock(unittest.TestCase):
|
||||
"""Testcases for SHLock class."""
|
||||
|
||||
def test_multithread_deadlock(self):
|
||||
lock = SHLock()
|
||||
def two_shared():
|
||||
r = RWLockWrapper(lock)
|
||||
with r:
|
||||
time.sleep(0.2)
|
||||
with r:
|
||||
pass
|
||||
def one_exclusive():
|
||||
time.sleep(0.1)
|
||||
w = RWLockWrapper(lock, is_shared=False)
|
||||
with w:
|
||||
pass
|
||||
threads = [Thread(target=two_shared), Thread(target=one_exclusive)]
|
||||
for t in threads:
|
||||
t.daemon = True
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join(5)
|
||||
live = [t for t in threads if t.is_alive()]
|
||||
self.assertListEqual(live, [], 'ShLock hung')
|
||||
|
||||
def test_upgrade(self):
|
||||
lock = SHLock()
|
||||
lock.acquire(shared=True)
|
||||
self.assertRaises(LockingError, lock.acquire, shared=False)
|
||||
lock.release()
|
||||
|
||||
def test_downgrade(self):
|
||||
lock = SHLock()
|
||||
lock.acquire(shared=False)
|
||||
self.assertRaises(LockingError, lock.acquire, shared=True)
|
||||
lock.release()
|
||||
|
||||
def test_recursive(self):
|
||||
lock = SHLock()
|
||||
lock.acquire(shared=True)
|
||||
lock.acquire(shared=True)
|
||||
self.assertEqual(lock.is_shared, 2)
|
||||
lock.release()
|
||||
lock.release()
|
||||
self.assertFalse(lock.is_shared)
|
||||
lock.acquire(shared=False)
|
||||
lock.acquire(shared=False)
|
||||
self.assertEqual(lock.is_exclusive, 2)
|
||||
lock.release()
|
||||
lock.release()
|
||||
self.assertFalse(lock.is_exclusive)
|
||||
|
||||
def test_release(self):
|
||||
lock = SHLock()
|
||||
self.assertRaises(LockingError, lock.release)
|
||||
|
||||
def get_lock(shared):
|
||||
lock.acquire(shared=shared)
|
||||
time.sleep(1)
|
||||
lock.release()
|
||||
|
||||
threads = [Thread(target=get_lock, args=(x,)) for x in (True,
|
||||
False)]
|
||||
for t in threads:
|
||||
t.daemon = True
|
||||
t.start()
|
||||
self.assertRaises(LockingError, lock.release)
|
||||
t.join(2)
|
||||
self.assertFalse(t.is_alive())
|
||||
self.assertFalse(lock.is_shared)
|
||||
self.assertFalse(lock.is_exclusive)
|
||||
|
||||
def test_acquire(self):
|
||||
lock = SHLock()
|
||||
|
||||
def get_lock(shared):
|
||||
lock.acquire(shared=shared)
|
||||
time.sleep(1)
|
||||
lock.release()
|
||||
|
||||
shared = Thread(target=get_lock, args=(True,))
|
||||
shared.daemon = True
|
||||
shared.start()
|
||||
time.sleep(0.1)
|
||||
self.assertTrue(lock.acquire(shared=True, blocking=False))
|
||||
lock.release()
|
||||
self.assertFalse(lock.acquire(shared=False, blocking=False))
|
||||
lock.acquire(shared=False)
|
||||
self.assertFalse(shared.is_alive())
|
||||
lock.release()
|
||||
self.assertTrue(lock.acquire(shared=False, blocking=False))
|
||||
lock.release()
|
||||
|
||||
exclusive = Thread(target=get_lock, args=(False,))
|
||||
exclusive.daemon = True
|
||||
exclusive.start()
|
||||
time.sleep(0.1)
|
||||
self.assertFalse(lock.acquire(shared=False, blocking=False))
|
||||
self.assertFalse(lock.acquire(shared=True, blocking=False))
|
||||
lock.acquire(shared=True)
|
||||
self.assertFalse(exclusive.is_alive())
|
||||
lock.release()
|
||||
lock.acquire(shared=False)
|
||||
lock.release()
|
||||
lock.acquire(shared=True)
|
||||
lock.release()
|
||||
self.assertFalse(lock.is_shared)
|
||||
self.assertFalse(lock.is_exclusive)
|
||||
|
||||
def test_contention(self):
|
||||
lock = SHLock()
|
||||
done = []
|
||||
def lots_of_acquires():
|
||||
for _ in xrange(1000):
|
||||
shared = random.choice([True,False])
|
||||
lock.acquire(shared=shared)
|
||||
lock.acquire(shared=shared)
|
||||
time.sleep(random.random() * 0.0001)
|
||||
lock.release()
|
||||
time.sleep(random.random() * 0.0001)
|
||||
lock.acquire(shared=shared)
|
||||
time.sleep(random.random() * 0.0001)
|
||||
lock.release()
|
||||
lock.release()
|
||||
done.append(True)
|
||||
threads = [Thread(target=lots_of_acquires) for _ in xrange(10)]
|
||||
for t in threads:
|
||||
t.daemon = True
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join(20)
|
||||
live = [t for t in threads if t.is_alive()]
|
||||
self.assertListEqual(live, [], 'ShLock hung')
|
||||
self.assertEqual(len(done), len(threads), 'SHLock locking failed')
|
||||
self.assertFalse(lock.is_shared)
|
||||
self.assertFalse(lock.is_exclusive)
|
||||
|
||||
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(TestSHLock)
|
||||
unittest.TextTestRunner(verbosity=2).run(suite)
|
||||
|
||||
# }}}
|
||||
|
618
src/calibre/db/schema_upgrades.py
Normal file
618
src/calibre/db/schema_upgrades.py
Normal file
@ -0,0 +1,618 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
|
||||
from calibre import prints
|
||||
from calibre.utils.date import isoformat, DEFAULT_DATE
|
||||
|
||||
class SchemaUpgrade(object):
|
||||
|
||||
def __init__(self, conn, library_path, field_metadata):
|
||||
conn.execute('BEGIN EXCLUSIVE TRANSACTION')
|
||||
self.conn = conn
|
||||
self.library_path = library_path
|
||||
self.field_metadata = field_metadata
|
||||
# Upgrade database
|
||||
try:
|
||||
while True:
|
||||
uv = self.conn.execute('pragma user_version').next()[0]
|
||||
meth = getattr(self, 'upgrade_version_%d'%uv, None)
|
||||
if meth is None:
|
||||
break
|
||||
else:
|
||||
prints('Upgrading database to version %d...'%(uv+1))
|
||||
meth()
|
||||
self.conn.execute('pragma user_version=%d'%(uv+1))
|
||||
except:
|
||||
self.conn.execute('ROLLBACK')
|
||||
raise
|
||||
else:
|
||||
self.conn.execute('COMMIT')
|
||||
finally:
|
||||
self.conn = self.field_metadata = None
|
||||
|
||||
def upgrade_version_1(self):
|
||||
'''
|
||||
Normalize indices.
|
||||
'''
|
||||
self.conn.execute('''\
|
||||
DROP INDEX IF EXISTS authors_idx;
|
||||
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE, sort COLLATE NOCASE);
|
||||
DROP INDEX IF EXISTS series_idx;
|
||||
CREATE INDEX series_idx ON series (name COLLATE NOCASE);
|
||||
DROP INDEX IF EXISTS series_sort_idx;
|
||||
CREATE INDEX series_sort_idx ON books (series_index, id);
|
||||
''')
|
||||
|
||||
def upgrade_version_2(self):
|
||||
''' Fix Foreign key constraints for deleting from link tables. '''
|
||||
script = '''\
|
||||
DROP TRIGGER IF EXISTS fkc_delete_books_%(ltable)s_link;
|
||||
CREATE TRIGGER fkc_delete_on_%(table)s
|
||||
BEFORE DELETE ON %(table)s
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=OLD.id) > 0
|
||||
THEN RAISE(ABORT, 'Foreign key violation: %(table)s is still referenced')
|
||||
END;
|
||||
END;
|
||||
DELETE FROM %(table)s WHERE (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=%(table)s.id) < 1;
|
||||
'''
|
||||
self.conn.execute(script%dict(ltable='authors', table='authors', ltable_col='author'))
|
||||
self.conn.execute(script%dict(ltable='publishers', table='publishers', ltable_col='publisher'))
|
||||
self.conn.execute(script%dict(ltable='tags', table='tags', ltable_col='tag'))
|
||||
self.conn.execute(script%dict(ltable='series', table='series', ltable_col='series'))
|
||||
|
||||
def upgrade_version_3(self):
|
||||
' Add path to result cache '
|
||||
self.conn.execute('''
|
||||
DROP VIEW IF EXISTS meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_4(self):
|
||||
'Rationalize books table'
|
||||
self.conn.execute('''
|
||||
CREATE TEMPORARY TABLE
|
||||
books_backup(id,title,sort,timestamp,series_index,author_sort,isbn,path);
|
||||
INSERT INTO books_backup SELECT id,title,sort,timestamp,series_index,author_sort,isbn,path FROM books;
|
||||
DROP TABLE books;
|
||||
CREATE TABLE books ( id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT NOT NULL DEFAULT 'Unknown' COLLATE NOCASE,
|
||||
sort TEXT COLLATE NOCASE,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
pubdate TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
series_index REAL NOT NULL DEFAULT 1.0,
|
||||
author_sort TEXT COLLATE NOCASE,
|
||||
isbn TEXT DEFAULT "" COLLATE NOCASE,
|
||||
lccn TEXT DEFAULT "" COLLATE NOCASE,
|
||||
path TEXT NOT NULL DEFAULT "",
|
||||
flags INTEGER NOT NULL DEFAULT 1
|
||||
);
|
||||
INSERT INTO
|
||||
books (id,title,sort,timestamp,pubdate,series_index,author_sort,isbn,path)
|
||||
SELECT id,title,sort,timestamp,timestamp,series_index,author_sort,isbn,path FROM books_backup;
|
||||
DROP TABLE books_backup;
|
||||
|
||||
DROP VIEW IF EXISTS meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_5(self):
|
||||
'Update indexes/triggers for new books table'
|
||||
self.conn.execute('''
|
||||
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE);
|
||||
CREATE INDEX books_idx ON books (sort COLLATE NOCASE);
|
||||
CREATE TRIGGER books_delete_trg
|
||||
AFTER DELETE ON books
|
||||
BEGIN
|
||||
DELETE FROM books_authors_link WHERE book=OLD.id;
|
||||
DELETE FROM books_publishers_link WHERE book=OLD.id;
|
||||
DELETE FROM books_ratings_link WHERE book=OLD.id;
|
||||
DELETE FROM books_series_link WHERE book=OLD.id;
|
||||
DELETE FROM books_tags_link WHERE book=OLD.id;
|
||||
DELETE FROM data WHERE book=OLD.id;
|
||||
DELETE FROM comments WHERE book=OLD.id;
|
||||
DELETE FROM conversion_options WHERE book=OLD.id;
|
||||
END;
|
||||
CREATE TRIGGER books_insert_trg
|
||||
AFTER INSERT ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
CREATE TRIGGER books_update_trg
|
||||
AFTER UPDATE ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
UPDATE books SET sort=title_sort(title) WHERE sort IS NULL;
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def upgrade_version_6(self):
|
||||
'Show authors in order'
|
||||
self.conn.execute('''
|
||||
DROP VIEW IF EXISTS meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_7(self):
|
||||
'Add uuid column'
|
||||
self.conn.execute('''
|
||||
ALTER TABLE books ADD COLUMN uuid TEXT;
|
||||
DROP TRIGGER IF EXISTS books_insert_trg;
|
||||
DROP TRIGGER IF EXISTS books_update_trg;
|
||||
UPDATE books SET uuid=uuid4();
|
||||
|
||||
CREATE TRIGGER books_insert_trg AFTER INSERT ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title),uuid=uuid4() WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER books_update_trg AFTER UPDATE ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
DROP VIEW IF EXISTS meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags,
|
||||
uuid
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_8(self):
|
||||
'Add Tag Browser views'
|
||||
def create_tag_browser_view(table_name, column_name):
|
||||
self.conn.execute('''
|
||||
DROP VIEW IF EXISTS tag_browser_{tn};
|
||||
CREATE VIEW tag_browser_{tn} AS SELECT
|
||||
id,
|
||||
name,
|
||||
(SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count
|
||||
FROM {tn};
|
||||
'''.format(tn=table_name, cn=column_name))
|
||||
|
||||
for tn in ('authors', 'tags', 'publishers', 'series'):
|
||||
cn = tn[:-1]
|
||||
if tn == 'series':
|
||||
cn = tn
|
||||
create_tag_browser_view(tn, cn)
|
||||
|
||||
def upgrade_version_9(self):
|
||||
'Add custom columns'
|
||||
self.conn.execute('''
|
||||
CREATE TABLE custom_columns (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
label TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
datatype TEXT NOT NULL,
|
||||
mark_for_delete BOOL DEFAULT 0 NOT NULL,
|
||||
editable BOOL DEFAULT 1 NOT NULL,
|
||||
display TEXT DEFAULT "{}" NOT NULL,
|
||||
is_multiple BOOL DEFAULT 0 NOT NULL,
|
||||
normalized BOOL NOT NULL,
|
||||
UNIQUE(label)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS custom_columns_idx ON custom_columns (label);
|
||||
CREATE INDEX IF NOT EXISTS formats_idx ON data (format);
|
||||
''')
|
||||
|
||||
def upgrade_version_10(self):
|
||||
'Add restricted Tag Browser views'
|
||||
def create_tag_browser_view(table_name, column_name, view_column_name):
|
||||
script = ('''
|
||||
DROP VIEW IF EXISTS tag_browser_{tn};
|
||||
CREATE VIEW tag_browser_{tn} AS SELECT
|
||||
id,
|
||||
{vcn},
|
||||
(SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count
|
||||
FROM {tn};
|
||||
DROP VIEW IF EXISTS tag_browser_filtered_{tn};
|
||||
CREATE VIEW tag_browser_filtered_{tn} AS SELECT
|
||||
id,
|
||||
{vcn},
|
||||
(SELECT COUNT(books_{tn}_link.id) FROM books_{tn}_link WHERE
|
||||
{cn}={tn}.id AND books_list_filter(book)) count
|
||||
FROM {tn};
|
||||
'''.format(tn=table_name, cn=column_name, vcn=view_column_name))
|
||||
self.conn.execute(script)
|
||||
|
||||
for field in self.field_metadata.itervalues():
|
||||
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
|
||||
table = self.conn.get(
|
||||
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
|
||||
('books_%s_link'%field['table'],), all=False)
|
||||
if table is not None:
|
||||
create_tag_browser_view(field['table'], field['link_column'], field['column'])
|
||||
|
||||
def upgrade_version_11(self):
|
||||
'Add average rating to tag browser views'
|
||||
def create_std_tag_browser_view(table_name, column_name,
|
||||
view_column_name, sort_column_name):
|
||||
script = ('''
|
||||
DROP VIEW IF EXISTS tag_browser_{tn};
|
||||
CREATE VIEW tag_browser_{tn} AS SELECT
|
||||
id,
|
||||
{vcn},
|
||||
(SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count,
|
||||
(SELECT AVG(ratings.rating)
|
||||
FROM books_{tn}_link AS tl, books_ratings_link AS bl, ratings
|
||||
WHERE tl.{cn}={tn}.id AND bl.book=tl.book AND
|
||||
ratings.id = bl.rating AND ratings.rating <> 0) avg_rating,
|
||||
{scn} AS sort
|
||||
FROM {tn};
|
||||
DROP VIEW IF EXISTS tag_browser_filtered_{tn};
|
||||
CREATE VIEW tag_browser_filtered_{tn} AS SELECT
|
||||
id,
|
||||
{vcn},
|
||||
(SELECT COUNT(books_{tn}_link.id) FROM books_{tn}_link WHERE
|
||||
{cn}={tn}.id AND books_list_filter(book)) count,
|
||||
(SELECT AVG(ratings.rating)
|
||||
FROM books_{tn}_link AS tl, books_ratings_link AS bl, ratings
|
||||
WHERE tl.{cn}={tn}.id AND bl.book=tl.book AND
|
||||
ratings.id = bl.rating AND ratings.rating <> 0 AND
|
||||
books_list_filter(bl.book)) avg_rating,
|
||||
{scn} AS sort
|
||||
FROM {tn};
|
||||
|
||||
'''.format(tn=table_name, cn=column_name,
|
||||
vcn=view_column_name, scn= sort_column_name))
|
||||
self.conn.execute(script)
|
||||
|
||||
def create_cust_tag_browser_view(table_name, link_table_name):
|
||||
script = '''
|
||||
DROP VIEW IF EXISTS tag_browser_{table};
|
||||
CREATE VIEW tag_browser_{table} AS SELECT
|
||||
id,
|
||||
value,
|
||||
(SELECT COUNT(id) FROM {lt} WHERE value={table}.id) count,
|
||||
(SELECT AVG(r.rating)
|
||||
FROM {lt},
|
||||
books_ratings_link AS bl,
|
||||
ratings AS r
|
||||
WHERE {lt}.value={table}.id AND bl.book={lt}.book AND
|
||||
r.id = bl.rating AND r.rating <> 0) avg_rating,
|
||||
value AS sort
|
||||
FROM {table};
|
||||
|
||||
DROP VIEW IF EXISTS tag_browser_filtered_{table};
|
||||
CREATE VIEW tag_browser_filtered_{table} AS SELECT
|
||||
id,
|
||||
value,
|
||||
(SELECT COUNT({lt}.id) FROM {lt} WHERE value={table}.id AND
|
||||
books_list_filter(book)) count,
|
||||
(SELECT AVG(r.rating)
|
||||
FROM {lt},
|
||||
books_ratings_link AS bl,
|
||||
ratings AS r
|
||||
WHERE {lt}.value={table}.id AND bl.book={lt}.book AND
|
||||
r.id = bl.rating AND r.rating <> 0 AND
|
||||
books_list_filter(bl.book)) avg_rating,
|
||||
value AS sort
|
||||
FROM {table};
|
||||
'''.format(lt=link_table_name, table=table_name)
|
||||
self.conn.execute(script)
|
||||
|
||||
for field in self.field_metadata.itervalues():
|
||||
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
|
||||
table = self.conn.get(
|
||||
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
|
||||
('books_%s_link'%field['table'],), all=False)
|
||||
if table is not None:
|
||||
create_std_tag_browser_view(field['table'], field['link_column'],
|
||||
field['column'], field['category_sort'])
|
||||
|
||||
db_tables = self.conn.get('''SELECT name FROM sqlite_master
|
||||
WHERE type='table'
|
||||
ORDER BY name''')
|
||||
tables = []
|
||||
for (table,) in db_tables:
|
||||
tables.append(table)
|
||||
for table in tables:
|
||||
link_table = 'books_%s_link'%table
|
||||
if table.startswith('custom_column_') and link_table in tables:
|
||||
create_cust_tag_browser_view(table, link_table)
|
||||
|
||||
self.conn.execute('UPDATE authors SET sort=author_to_author_sort(name)')
|
||||
|
||||
def upgrade_version_12(self):
|
||||
'DB based preference store'
|
||||
script = '''
|
||||
DROP TABLE IF EXISTS preferences;
|
||||
CREATE TABLE preferences(id INTEGER PRIMARY KEY,
|
||||
key TEXT NON NULL,
|
||||
val TEXT NON NULL,
|
||||
UNIQUE(key));
|
||||
'''
|
||||
self.conn.execute(script)
|
||||
|
||||
def upgrade_version_13(self):
|
||||
'Dirtied table for OPF metadata backups'
|
||||
script = '''
|
||||
DROP TABLE IF EXISTS metadata_dirtied;
|
||||
CREATE TABLE metadata_dirtied(id INTEGER PRIMARY KEY,
|
||||
book INTEGER NOT NULL,
|
||||
UNIQUE(book));
|
||||
INSERT INTO metadata_dirtied (book) SELECT id FROM books;
|
||||
'''
|
||||
self.conn.execute(script)
|
||||
|
||||
def upgrade_version_14(self):
|
||||
'Cache has_cover'
|
||||
self.conn.execute('ALTER TABLE books ADD COLUMN has_cover BOOL DEFAULT 0')
|
||||
data = self.conn.get('SELECT id,path FROM books', all=True)
|
||||
def has_cover(path):
|
||||
if path:
|
||||
path = os.path.join(self.library_path, path.replace('/', os.sep),
|
||||
'cover.jpg')
|
||||
return os.path.exists(path)
|
||||
return False
|
||||
|
||||
ids = [(x[0],) for x in data if has_cover(x[1])]
|
||||
self.conn.executemany('UPDATE books SET has_cover=1 WHERE id=?', ids)
|
||||
|
||||
def upgrade_version_15(self):
|
||||
'Remove commas from tags'
|
||||
self.conn.execute("UPDATE OR IGNORE tags SET name=REPLACE(name, ',', ';')")
|
||||
self.conn.execute("UPDATE OR IGNORE tags SET name=REPLACE(name, ',', ';;')")
|
||||
self.conn.execute("UPDATE OR IGNORE tags SET name=REPLACE(name, ',', '')")
|
||||
|
||||
def upgrade_version_16(self):
|
||||
self.conn.execute('''
|
||||
DROP TRIGGER IF EXISTS books_update_trg;
|
||||
CREATE TRIGGER books_update_trg
|
||||
AFTER UPDATE ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title)
|
||||
WHERE id=NEW.id AND OLD.title <> NEW.title;
|
||||
END;
|
||||
''')
|
||||
|
||||
def upgrade_version_17(self):
|
||||
'custom book data table (for plugins)'
|
||||
script = '''
|
||||
DROP TABLE IF EXISTS books_plugin_data;
|
||||
CREATE TABLE books_plugin_data(id INTEGER PRIMARY KEY,
|
||||
book INTEGER NON NULL,
|
||||
name TEXT NON NULL,
|
||||
val TEXT NON NULL,
|
||||
UNIQUE(book,name));
|
||||
DROP TRIGGER IF EXISTS books_delete_trg;
|
||||
CREATE TRIGGER books_delete_trg
|
||||
AFTER DELETE ON books
|
||||
BEGIN
|
||||
DELETE FROM books_authors_link WHERE book=OLD.id;
|
||||
DELETE FROM books_publishers_link WHERE book=OLD.id;
|
||||
DELETE FROM books_ratings_link WHERE book=OLD.id;
|
||||
DELETE FROM books_series_link WHERE book=OLD.id;
|
||||
DELETE FROM books_tags_link WHERE book=OLD.id;
|
||||
DELETE FROM data WHERE book=OLD.id;
|
||||
DELETE FROM comments WHERE book=OLD.id;
|
||||
DELETE FROM conversion_options WHERE book=OLD.id;
|
||||
DELETE FROM books_plugin_data WHERE book=OLD.id;
|
||||
END;
|
||||
'''
|
||||
self.conn.execute(script)
|
||||
|
||||
def upgrade_version_18(self):
|
||||
'''
|
||||
Add a library UUID.
|
||||
Add an identifiers table.
|
||||
Add a languages table.
|
||||
Add a last_modified column.
|
||||
NOTE: You cannot downgrade after this update, if you do
|
||||
any changes you make to book isbns will be lost.
|
||||
'''
|
||||
script = '''
|
||||
DROP TABLE IF EXISTS library_id;
|
||||
CREATE TABLE library_id ( id INTEGER PRIMARY KEY,
|
||||
uuid TEXT NOT NULL,
|
||||
UNIQUE(uuid)
|
||||
);
|
||||
|
||||
DROP TABLE IF EXISTS identifiers;
|
||||
CREATE TABLE identifiers ( id INTEGER PRIMARY KEY,
|
||||
book INTEGER NON NULL,
|
||||
type TEXT NON NULL DEFAULT "isbn" COLLATE NOCASE,
|
||||
val TEXT NON NULL COLLATE NOCASE,
|
||||
UNIQUE(book, type)
|
||||
);
|
||||
|
||||
DROP TABLE IF EXISTS languages;
|
||||
CREATE TABLE languages ( id INTEGER PRIMARY KEY,
|
||||
lang_code TEXT NON NULL COLLATE NOCASE,
|
||||
UNIQUE(lang_code)
|
||||
);
|
||||
|
||||
DROP TABLE IF EXISTS books_languages_link;
|
||||
CREATE TABLE books_languages_link ( id INTEGER PRIMARY KEY,
|
||||
book INTEGER NOT NULL,
|
||||
lang_code INTEGER NOT NULL,
|
||||
item_order INTEGER NOT NULL DEFAULT 0,
|
||||
UNIQUE(book, lang_code)
|
||||
);
|
||||
|
||||
DROP TRIGGER IF EXISTS fkc_delete_on_languages;
|
||||
CREATE TRIGGER fkc_delete_on_languages
|
||||
BEFORE DELETE ON languages
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT COUNT(id) FROM books_languages_link WHERE lang_code=OLD.id) > 0
|
||||
THEN RAISE(ABORT, 'Foreign key violation: language is still referenced')
|
||||
END;
|
||||
END;
|
||||
|
||||
DROP TRIGGER IF EXISTS fkc_delete_on_languages_link;
|
||||
CREATE TRIGGER fkc_delete_on_languages_link
|
||||
BEFORE INSERT ON books_languages_link
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
|
||||
END;
|
||||
END;
|
||||
|
||||
DROP TRIGGER IF EXISTS fkc_update_books_languages_link_a;
|
||||
CREATE TRIGGER fkc_update_books_languages_link_a
|
||||
BEFORE UPDATE OF book ON books_languages_link
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
END;
|
||||
END;
|
||||
DROP TRIGGER IF EXISTS fkc_update_books_languages_link_b;
|
||||
CREATE TRIGGER fkc_update_books_languages_link_b
|
||||
BEFORE UPDATE OF lang_code ON books_languages_link
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from languages WHERE id=NEW.lang_code) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: lang_code not in languages')
|
||||
END;
|
||||
END;
|
||||
|
||||
DROP INDEX IF EXISTS books_languages_link_aidx;
|
||||
CREATE INDEX books_languages_link_aidx ON books_languages_link (lang_code);
|
||||
DROP INDEX IF EXISTS books_languages_link_bidx;
|
||||
CREATE INDEX books_languages_link_bidx ON books_languages_link (book);
|
||||
DROP INDEX IF EXISTS languages_idx;
|
||||
CREATE INDEX languages_idx ON languages (lang_code COLLATE NOCASE);
|
||||
|
||||
DROP TRIGGER IF EXISTS books_delete_trg;
|
||||
CREATE TRIGGER books_delete_trg
|
||||
AFTER DELETE ON books
|
||||
BEGIN
|
||||
DELETE FROM books_authors_link WHERE book=OLD.id;
|
||||
DELETE FROM books_publishers_link WHERE book=OLD.id;
|
||||
DELETE FROM books_ratings_link WHERE book=OLD.id;
|
||||
DELETE FROM books_series_link WHERE book=OLD.id;
|
||||
DELETE FROM books_tags_link WHERE book=OLD.id;
|
||||
DELETE FROM books_languages_link WHERE book=OLD.id;
|
||||
DELETE FROM data WHERE book=OLD.id;
|
||||
DELETE FROM comments WHERE book=OLD.id;
|
||||
DELETE FROM conversion_options WHERE book=OLD.id;
|
||||
DELETE FROM books_plugin_data WHERE book=OLD.id;
|
||||
DELETE FROM identifiers WHERE book=OLD.id;
|
||||
END;
|
||||
|
||||
INSERT INTO identifiers (book, val) SELECT id,isbn FROM books WHERE isbn;
|
||||
|
||||
ALTER TABLE books ADD COLUMN last_modified TIMESTAMP NOT NULL DEFAULT "%s";
|
||||
|
||||
'''%isoformat(DEFAULT_DATE, sep=' ')
|
||||
# Sqlite does not support non constant default values in alter
|
||||
# statements
|
||||
self.conn.execute(script)
|
||||
|
||||
def upgrade_version_19(self):
|
||||
recipes = self.conn.get('SELECT id,title,script FROM feeds')
|
||||
if recipes:
|
||||
from calibre.web.feeds.recipes import (custom_recipes,
|
||||
custom_recipe_filename)
|
||||
bdir = os.path.dirname(custom_recipes.file_path)
|
||||
for id_, title, script in recipes:
|
||||
existing = frozenset(map(int, custom_recipes.iterkeys()))
|
||||
if id_ in existing:
|
||||
id_ = max(existing) + 1000
|
||||
id_ = str(id_)
|
||||
fname = custom_recipe_filename(id_, title)
|
||||
custom_recipes[id_] = (title, fname)
|
||||
if isinstance(script, unicode):
|
||||
script = script.encode('utf-8')
|
||||
with open(os.path.join(bdir, fname), 'wb') as f:
|
||||
f.write(script)
|
||||
|
||||
def upgrade_version_20(self):
|
||||
'''
|
||||
Add a link column to the authors table.
|
||||
'''
|
||||
|
||||
script = '''
|
||||
ALTER TABLE authors ADD COLUMN link TEXT NOT NULL DEFAULT "";
|
||||
'''
|
||||
self.conn.execute(script)
|
||||
|
||||
|
@ -35,8 +35,8 @@ class Table(object):
|
||||
def __init__(self, name, metadata, link_table=None):
|
||||
self.name, self.metadata = name, metadata
|
||||
|
||||
# self.adapt() maps values from the db to python objects
|
||||
self.adapt = \
|
||||
# self.unserialize() maps values from the db to python objects
|
||||
self.unserialize = \
|
||||
{
|
||||
'datetime': _c_convert_timestamp,
|
||||
'bool': bool
|
||||
@ -44,7 +44,7 @@ class Table(object):
|
||||
metadata['datatype'], lambda x: x)
|
||||
if name == 'authors':
|
||||
# Legacy
|
||||
self.adapt = lambda x: x.replace('|', ',') if x else None
|
||||
self.unserialize = lambda x: x.replace('|', ',') if x else None
|
||||
|
||||
self.link_table = (link_table if link_table else
|
||||
'books_%s_link'%self.metadata['table'])
|
||||
@ -62,7 +62,7 @@ class OneToOneTable(Table):
|
||||
idcol = 'id' if self.metadata['table'] == 'books' else 'book'
|
||||
for row in db.conn.execute('SELECT {0}, {1} FROM {2}'.format(idcol,
|
||||
self.metadata['column'], self.metadata['table'])):
|
||||
self.book_col_map[row[0]] = self.adapt(row[1])
|
||||
self.book_col_map[row[0]] = self.unserialize(row[1])
|
||||
|
||||
class SizeTable(OneToOneTable):
|
||||
|
||||
@ -71,7 +71,7 @@ class SizeTable(OneToOneTable):
|
||||
for row in db.conn.execute(
|
||||
'SELECT books.id, (SELECT MAX(uncompressed_size) FROM data '
|
||||
'WHERE data.book=books.id) FROM books'):
|
||||
self.book_col_map[row[0]] = self.adapt(row[1])
|
||||
self.book_col_map[row[0]] = self.unserialize(row[1])
|
||||
|
||||
class ManyToOneTable(Table):
|
||||
|
||||
@ -92,9 +92,9 @@ class ManyToOneTable(Table):
|
||||
|
||||
def read_id_maps(self, db):
|
||||
for row in db.conn.execute('SELECT id, {0} FROM {1}'.format(
|
||||
self.metadata['name'], self.metadata['table'])):
|
||||
self.metadata['column'], self.metadata['table'])):
|
||||
if row[1]:
|
||||
self.id_map[row[0]] = self.adapt(row[1])
|
||||
self.id_map[row[0]] = self.unserialize(row[1])
|
||||
|
||||
def read_maps(self, db):
|
||||
for row in db.conn.execute(
|
||||
@ -102,7 +102,7 @@ class ManyToOneTable(Table):
|
||||
self.metadata['link_column'], self.link_table)):
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map.append(row[0])
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
self.book_col_map[row[0]] = row[1]
|
||||
|
||||
class ManyToManyTable(ManyToOneTable):
|
||||
@ -119,7 +119,7 @@ class ManyToManyTable(ManyToOneTable):
|
||||
self.metadata['link_column'], self.link_table)):
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map.append(row[0])
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append(row[1])
|
||||
@ -145,7 +145,7 @@ class FormatsTable(ManyToManyTable):
|
||||
if row[1] is not None:
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map.append(row[0])
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append((row[1], row[2]))
|
||||
@ -160,7 +160,7 @@ class IdentifiersTable(ManyToManyTable):
|
||||
if row[1] is not None and row[2] is not None:
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map.append(row[0])
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append((row[1], row[2]))
|
||||
|
@ -24,12 +24,12 @@ class ANDROID(USBMS):
|
||||
0xff9 : [0x0100, 0x0227, 0x0226],
|
||||
0xc87 : [0x0100, 0x0227, 0x0226],
|
||||
0xc91 : [0x0100, 0x0227, 0x0226],
|
||||
0xc92 : [0x100],
|
||||
0xc97 : [0x226],
|
||||
0xc99 : [0x0100],
|
||||
0xca2 : [0x226],
|
||||
0xca3 : [0x100],
|
||||
0xca4 : [0x226],
|
||||
0xc92 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xc97 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xc99 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xca2 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xca3 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xca4 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
},
|
||||
|
||||
# Eken
|
||||
@ -72,7 +72,8 @@ class ANDROID(USBMS):
|
||||
0x413c : { 0xb007 : [0x0100, 0x0224, 0x0226]},
|
||||
|
||||
# LG
|
||||
0x1004 : { 0x61cc : [0x100], 0x61ce : [0x100], 0x618e : [0x226] },
|
||||
0x1004 : { 0x61cc : [0x100], 0x61ce : [0x100], 0x618e : [0x226,
|
||||
0x9999] },
|
||||
|
||||
# Archos
|
||||
0x0e79 : {
|
||||
@ -123,11 +124,11 @@ class ANDROID(USBMS):
|
||||
'IDEOS_TABLET', 'MYTOUCH_4G', 'UMS_COMPOSITE', 'SCH-I800_CARD',
|
||||
'7', 'A956', 'A955', 'A43', 'ANDROID_PLATFORM', 'TEGRA_2',
|
||||
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE', 'A7EB', 'STREAK',
|
||||
'MB525', 'ANDROID2.3']
|
||||
'MB525', 'ANDROID2.3', 'SGH-I997']
|
||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||
'__UMS_COMPOSITE']
|
||||
'__UMS_COMPOSITE', 'SGH-I997_CARD']
|
||||
|
||||
OSX_MAIN_MEM = 'Android Device Main Memory'
|
||||
|
||||
|
@ -107,6 +107,7 @@ class DriverBase(DeviceConfig, DevicePlugin):
|
||||
FORMATS = ['epub', 'pdf']
|
||||
USER_CAN_ADD_NEW_FORMATS = False
|
||||
KEEP_TEMP_FILES_AFTER_UPLOAD = True
|
||||
CAN_DO_DEVICE_DB_PLUGBOARD = True
|
||||
|
||||
# Hide the standard customization widgets
|
||||
SUPPORTS_SUB_DIRS = False
|
||||
@ -445,7 +446,8 @@ class ITUNES(DriverBase):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=book_count))
|
||||
self._purge_orphans(library_books, cached_books)
|
||||
|
||||
elif iswindows:
|
||||
@ -484,7 +486,8 @@ class ITUNES(DriverBase):
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%d of %d') % (i+1, book_count))
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1,
|
||||
tot=book_count))
|
||||
self._purge_orphans(library_books, cached_books)
|
||||
|
||||
finally:
|
||||
@ -1074,7 +1077,8 @@ class ITUNES(DriverBase):
|
||||
|
||||
# Report progress
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||
self.report_progress((i+1)/file_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=file_count))
|
||||
|
||||
elif iswindows:
|
||||
try:
|
||||
@ -1117,7 +1121,8 @@ class ITUNES(DriverBase):
|
||||
|
||||
# Report progress
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||
self.report_progress((i+1)/file_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=file_count))
|
||||
finally:
|
||||
pythoncom.CoUninitialize()
|
||||
|
||||
@ -3106,7 +3111,8 @@ class ITUNES_ASYNC(ITUNES):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=book_count))
|
||||
|
||||
elif iswindows:
|
||||
try:
|
||||
@ -3146,7 +3152,8 @@ class ITUNES_ASYNC(ITUNES):
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%d of %d') % (i+1, book_count))
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1,
|
||||
tot=book_count))
|
||||
|
||||
finally:
|
||||
pythoncom.CoUninitialize()
|
||||
|
@ -49,6 +49,9 @@ class DevicePlugin(Plugin):
|
||||
#: Whether the metadata on books can be set via the GUI.
|
||||
CAN_SET_METADATA = ['title', 'authors', 'collections']
|
||||
|
||||
#: Whether the device can handle device_db metadata plugboards
|
||||
CAN_DO_DEVICE_DB_PLUGBOARD = False
|
||||
|
||||
# Set this to None if the books on the device are files that the GUI can
|
||||
# access in order to add the books from the device to the library
|
||||
BACKLOADING_ERROR_MESSAGE = _('Cannot get files from this device')
|
||||
|
@ -57,6 +57,7 @@ class KOBO(USBMS):
|
||||
def initialize(self):
|
||||
USBMS.initialize(self)
|
||||
self.book_class = Book
|
||||
self.dbversion = 7
|
||||
|
||||
def books(self, oncard=None, end_session=True):
|
||||
from calibre.ebooks.metadata.meta import path_to_ext
|
||||
@ -100,7 +101,7 @@ class KOBO(USBMS):
|
||||
for idx,b in enumerate(bl):
|
||||
bl_cache[b.lpath] = idx
|
||||
|
||||
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex):
|
||||
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility):
|
||||
changed = False
|
||||
try:
|
||||
lpath = path.partition(self.normalize_path(prefix))[2]
|
||||
@ -129,6 +130,10 @@ class KOBO(USBMS):
|
||||
if favouritesindex == 1:
|
||||
playlist_map[lpath].append('Shortlist')
|
||||
|
||||
# Label Previews
|
||||
if accessibility == 6:
|
||||
playlist_map[lpath].append('Preview')
|
||||
|
||||
path = self.normalize_path(path)
|
||||
# print "Normalized FileName: " + path
|
||||
|
||||
@ -137,7 +142,7 @@ class KOBO(USBMS):
|
||||
bl_cache[lpath] = None
|
||||
if ImageID is not None:
|
||||
imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - NickelBookCover.parsed')
|
||||
if not os.path.exists(imagename):
|
||||
if not os.path.exists(imagename):
|
||||
# Try the Touch version if the image does not exist
|
||||
imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
|
||||
|
||||
@ -203,14 +208,35 @@ class KOBO(USBMS):
|
||||
result = cursor.fetchone()
|
||||
self.dbversion = result[0]
|
||||
|
||||
if self.dbversion >= 14:
|
||||
debug_print("Database Version: ", self.dbversion)
|
||||
if self.dbversion >= 16:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex from content where BookID is Null'
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility from content where ' \
|
||||
'BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
elif self.dbversion < 16 and self.dbversion >= 14:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, "-1" as Accessibility from content where ' \
|
||||
'BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
elif self.dbversion < 14 and self.dbversion >= 8:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility from content where ' \
|
||||
'BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
else:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex from content where BookID is Null'
|
||||
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility from content where BookID is Null'
|
||||
|
||||
cursor.execute (query)
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except Exception as e:
|
||||
err = str(e)
|
||||
if not ('___ExpirationStatus' in err or 'FavouritesIndex' in err or
|
||||
'Accessibility' in err):
|
||||
raise
|
||||
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
|
||||
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as '
|
||||
'FavouritesIndex, "-1" as Accessibility from content where '
|
||||
'BookID is Null')
|
||||
cursor.execute(query)
|
||||
|
||||
changed = False
|
||||
for i, row in enumerate(cursor):
|
||||
@ -223,10 +249,10 @@ class KOBO(USBMS):
|
||||
# debug_print("mime:", mime)
|
||||
|
||||
if oncard != 'carda' and oncard != 'cardb' and not row[3].startswith("file:///mnt/sd/"):
|
||||
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9])
|
||||
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
|
||||
# print "shortbook: " + path
|
||||
elif oncard == 'carda' and row[3].startswith("file:///mnt/sd/"):
|
||||
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9])
|
||||
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
|
||||
|
||||
if changed:
|
||||
need_sync = True
|
||||
@ -294,8 +320,15 @@ class KOBO(USBMS):
|
||||
# Kobo does not delete the Book row (ie the row where the BookID is Null)
|
||||
# The next server sync should remove the row
|
||||
cursor.execute('delete from content where BookID = ?', t)
|
||||
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 ' \
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 ' \
|
||||
'where BookID is Null and ContentID =?',t)
|
||||
except Exception as e:
|
||||
if 'no such column' not in str(e):
|
||||
raise
|
||||
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0 ' \
|
||||
'where BookID is Null and ContentID =?',t)
|
||||
|
||||
|
||||
connection.commit()
|
||||
|
||||
@ -532,7 +565,92 @@ class KOBO(USBMS):
|
||||
paths[source_id] = os.path.join(prefix, *(path.split('/')))
|
||||
return paths
|
||||
|
||||
def reset_readstatus(self, connection, oncard):
|
||||
cursor = connection.cursor()
|
||||
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print(' Database Exception: Unable to reset ReadStatus list')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Reset ReadStatus list')
|
||||
|
||||
cursor.close()
|
||||
|
||||
def set_readstatus(self, connection, ContentID, ReadStatus):
|
||||
cursor = connection.cursor()
|
||||
t = (ContentID,)
|
||||
cursor.execute('select DateLastRead from Content where BookID is Null and ContentID = ?', t)
|
||||
result = cursor.fetchone()
|
||||
if result is None:
|
||||
datelastread = '1970-01-01T00:00:00'
|
||||
else:
|
||||
datelastread = result[0] if result[0] is not None else '1970-01-01T00:00:00'
|
||||
|
||||
t = (ReadStatus,datelastread,ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=?,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print(' Database Exception: Unable update ReadStatus')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Setting ReadStatus List')
|
||||
cursor.close()
|
||||
|
||||
def reset_favouritesindex(self, connection, oncard):
|
||||
# Reset FavouritesIndex list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
cursor = connection.cursor()
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except Exception as e:
|
||||
debug_print(' Database Exception: Unable to reset Shortlist list')
|
||||
if 'no such column' not in str(e):
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Reset FavouritesIndex list')
|
||||
|
||||
def set_favouritesindex(self, connection, ContentID):
|
||||
cursor = connection.cursor()
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t)
|
||||
except Exception as e:
|
||||
debug_print(' Database Exception: Unable set book as Shortlist')
|
||||
if 'no such column' not in str(e):
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Set FavouritesIndex')
|
||||
|
||||
def update_device_database_collections(self, booklists, collections_attributes, oncard):
|
||||
# Define lists for the ReadStatus
|
||||
readstatuslist = {
|
||||
"Im_Reading":1,
|
||||
"Read":2,
|
||||
"Closed":3,
|
||||
}
|
||||
|
||||
accessibilitylist = {
|
||||
"Preview":6,
|
||||
}
|
||||
# debug_print('Starting update_device_database_collections', collections_attributes)
|
||||
|
||||
# Force collections_attributes to be 'tags' as no other is currently supported
|
||||
@ -551,188 +669,44 @@ class KOBO(USBMS):
|
||||
# return bytestrings if the content cannot the decoded as unicode
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
|
||||
if collections:
|
||||
|
||||
# Need to reset the collections outside the particular loops
|
||||
# otherwise the last item will not be removed
|
||||
self.reset_readstatus(connection, oncard)
|
||||
if self.dbversion >= 14:
|
||||
self.reset_favouritesindex(connection, oncard)
|
||||
|
||||
# Process any collections that exist
|
||||
for category, books in collections.items():
|
||||
# debug_print (category)
|
||||
if category == 'Im_Reading':
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 1 and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 1 and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Im_Reading list')
|
||||
connection.commit()
|
||||
|
||||
debug_print("Category: ", category, " id = ", readstatuslist.get(category))
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Im_Reading' not in book.device_collections:
|
||||
book.device_collections.append('Im_Reading')
|
||||
debug_print(' Title:', book.title, 'category: ', category)
|
||||
if category not in book.device_collections:
|
||||
book.device_collections.append(category)
|
||||
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
|
||||
t = (ContentID,)
|
||||
cursor.execute('select DateLastRead from Content where BookID is Null and ContentID = ?', t)
|
||||
result = cursor.fetchone()
|
||||
if result is None:
|
||||
datelastread = '1970-01-01T00:00:00'
|
||||
else:
|
||||
datelastread = result[0] if result[0] is not None else '1970-01-01T00:00:00'
|
||||
|
||||
t = (datelastread,ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=1,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable create Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit create Im_Reading list')
|
||||
if category == 'Read':
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 2 and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 2 and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Im_Reading list')
|
||||
connection.commit()
|
||||
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Read' not in book.device_collections:
|
||||
book.device_collections.append('Read')
|
||||
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
# datelastread = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=2,FirstTimeReading=\'true\' where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Finished')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set ReadStatus as Finished')
|
||||
if category == 'Closed':
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 3 and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 3 and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Closed list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Closed list')
|
||||
connection.commit()
|
||||
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Closed' not in book.device_collections:
|
||||
book.device_collections.append('Closed')
|
||||
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
# datelastread = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=3,FirstTimeReading=\'true\' where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Closed')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set ReadStatus as Closed')
|
||||
if category == 'Shortlist':
|
||||
# Reset FavouritesIndex list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Shortlist list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Shortlist list')
|
||||
connection.commit()
|
||||
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Shortlist' not in book.device_collections:
|
||||
book.device_collections.append('Shortlist')
|
||||
# debug_print ("Shortlist found for: ", book.title)
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
# datelastread = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Shortlist')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set Shortlist as Shortlist')
|
||||
|
||||
if category in readstatuslist.keys():
|
||||
# Manage ReadStatus
|
||||
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
|
||||
if category == 'Shortlist' and self.dbversion >= 14:
|
||||
# Manage FavouritesIndex/Shortlist
|
||||
self.set_favouritesindex(connection, ContentID)
|
||||
if category in accessibilitylist.keys():
|
||||
# Do not manage the Accessibility List
|
||||
pass
|
||||
else: # No collections
|
||||
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
|
||||
print "Reseting ReadStatus to 0"
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
debug_print("No Collections - reseting ReadStatus")
|
||||
self.reset_readstatus(connection, oncard)
|
||||
if self.dbversion >= 14:
|
||||
debug_print("No Collections - reseting FavouritesIndex")
|
||||
self.reset_favouritesindex(connection, oncard)
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Im_Reading list')
|
||||
connection.commit()
|
||||
|
||||
cursor.close()
|
||||
connection.close()
|
||||
|
||||
# debug_print('Finished update_device_database_collections', collections_attributes)
|
||||
|
@ -28,6 +28,7 @@ class PRS505(USBMS):
|
||||
|
||||
FORMATS = ['epub', 'lrf', 'lrx', 'rtf', 'pdf', 'txt']
|
||||
CAN_SET_METADATA = ['title', 'authors', 'collections']
|
||||
CAN_DO_DEVICE_DB_PLUGBOARD = True
|
||||
|
||||
VENDOR_ID = [0x054c] #: SONY Vendor Id
|
||||
PRODUCT_ID = [0x031e]
|
||||
@ -66,10 +67,10 @@ class PRS505(USBMS):
|
||||
_('Comma separated list of metadata fields '
|
||||
'to turn into collections on the device. Possibilities include: ')+\
|
||||
'series, tags, authors' +\
|
||||
_('. Two special collections are available: %s:%s and %s:%s. Add '
|
||||
_('. Two special collections are available: %(abt)s:%(abtv)s and %(aba)s:%(abav)s. Add '
|
||||
'these values to the list to enable them. The collections will be '
|
||||
'given the name provided after the ":" character.')%(
|
||||
'abt', ALL_BY_TITLE, 'aba', ALL_BY_AUTHOR),
|
||||
'given the name provided after the ":" character.')%dict(
|
||||
abt='abt', abtv=ALL_BY_TITLE, aba='aba', abav=ALL_BY_AUTHOR),
|
||||
_('Upload separate cover thumbnails for books (newer readers)') +
|
||||
':::'+_('Normally, the SONY readers get the cover image from the'
|
||||
' ebook file itself. With this option, calibre will send a '
|
||||
|
@ -144,9 +144,9 @@ def add_pipeline_options(parser, plumber):
|
||||
|
||||
'HEURISTIC PROCESSING' : (
|
||||
_('Modify the document text and structure using common'
|
||||
' patterns. Disabled by default. Use %s to enable. '
|
||||
' Individual actions can be disabled with the %s options.')
|
||||
% ('--enable-heuristics', '--disable-*'),
|
||||
' patterns. Disabled by default. Use %(en)s to enable. '
|
||||
' Individual actions can be disabled with the %(dis)s options.')
|
||||
% dict(en='--enable-heuristics', dis='--disable-*'),
|
||||
['enable_heuristics'] + HEURISTIC_OPTIONS
|
||||
),
|
||||
|
||||
@ -176,7 +176,7 @@ def add_pipeline_options(parser, plumber):
|
||||
[
|
||||
'level1_toc', 'level2_toc', 'level3_toc',
|
||||
'toc_threshold', 'max_toc_links', 'no_chapters_in_toc',
|
||||
'use_auto_toc', 'toc_filter',
|
||||
'use_auto_toc', 'toc_filter', 'duplicate_links_in_toc',
|
||||
]
|
||||
),
|
||||
|
||||
|
@ -265,6 +265,14 @@ OptionRecommendation(name='toc_filter',
|
||||
)
|
||||
),
|
||||
|
||||
OptionRecommendation(name='duplicate_links_in_toc',
|
||||
recommended_value=False, level=OptionRecommendation.LOW,
|
||||
help=_('When creating a TOC from links in the input document, '
|
||||
'allow duplicate entries, i.e. allow more than one entry '
|
||||
'with the same text, provided that they point to a '
|
||||
'different location.')
|
||||
),
|
||||
|
||||
|
||||
OptionRecommendation(name='chapter',
|
||||
recommended_value="//*[((name()='h1' or name()='h2') and "
|
||||
|
@ -17,7 +17,8 @@ class ParseError(ValueError):
|
||||
self.name = name
|
||||
self.desc = desc
|
||||
ValueError.__init__(self,
|
||||
_('Failed to parse: %s with error: %s')%(name, desc))
|
||||
_('Failed to parse: %(name)s with error: %(err)s')%dict(
|
||||
name=name, err=desc))
|
||||
|
||||
class ePubFixer(Plugin):
|
||||
|
||||
|
117
src/calibre/ebooks/html/to_zip.py
Normal file
117
src/calibre/ebooks/html/to_zip.py
Normal file
@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import textwrap, os, glob
|
||||
|
||||
from calibre.customize import FileTypePlugin
|
||||
from calibre.constants import numeric_version
|
||||
|
||||
class HTML2ZIP(FileTypePlugin):
|
||||
name = 'HTML to ZIP'
|
||||
author = 'Kovid Goyal'
|
||||
description = textwrap.dedent(_('''\
|
||||
Follow all local links in an HTML file and create a ZIP \
|
||||
file containing all linked files. This plugin is run \
|
||||
every time you add an HTML file to the library.\
|
||||
'''))
|
||||
version = numeric_version
|
||||
file_types = set(['html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'])
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
on_import = True
|
||||
|
||||
def run(self, htmlfile):
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
from calibre.gui2.convert.gui_conversion import gui_convert
|
||||
from calibre.customize.conversion import OptionRecommendation
|
||||
from calibre.ebooks.epub import initialize_container
|
||||
|
||||
with TemporaryDirectory('_plugin_html2zip') as tdir:
|
||||
recs =[('debug_pipeline', tdir, OptionRecommendation.HIGH)]
|
||||
recs.append(['keep_ligatures', True, OptionRecommendation.HIGH])
|
||||
if self.site_customization and self.site_customization.strip():
|
||||
sc = self.site_customization.strip()
|
||||
enc, _, bf = sc.partition('|')
|
||||
if enc:
|
||||
recs.append(['input_encoding', enc,
|
||||
OptionRecommendation.HIGH])
|
||||
if bf == 'bf':
|
||||
recs.append(['breadth_first', True,
|
||||
OptionRecommendation.HIGH])
|
||||
gui_convert(htmlfile, tdir, recs, abort_after_input_dump=True)
|
||||
of = self.temporary_file('_plugin_html2zip.zip')
|
||||
tdir = os.path.join(tdir, 'input')
|
||||
opf = glob.glob(os.path.join(tdir, '*.opf'))[0]
|
||||
ncx = glob.glob(os.path.join(tdir, '*.ncx'))
|
||||
if ncx:
|
||||
os.remove(ncx[0])
|
||||
epub = initialize_container(of.name, os.path.basename(opf))
|
||||
epub.add_dir(tdir)
|
||||
epub.close()
|
||||
|
||||
return of.name
|
||||
|
||||
def customization_help(self, gui=False):
|
||||
return _('Character encoding for the input HTML files. Common choices '
|
||||
'include: cp1252, cp1251, latin1 and utf-8.')
|
||||
|
||||
def do_user_config(self, parent=None):
|
||||
'''
|
||||
This method shows a configuration dialog for this plugin. It returns
|
||||
True if the user clicks OK, False otherwise. The changes are
|
||||
automatically applied.
|
||||
'''
|
||||
from PyQt4.Qt import (QDialog, QDialogButtonBox, QVBoxLayout,
|
||||
QLabel, Qt, QLineEdit, QCheckBox)
|
||||
|
||||
config_dialog = QDialog(parent)
|
||||
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
|
||||
v = QVBoxLayout(config_dialog)
|
||||
|
||||
def size_dialog():
|
||||
config_dialog.resize(config_dialog.sizeHint())
|
||||
|
||||
button_box.accepted.connect(config_dialog.accept)
|
||||
button_box.rejected.connect(config_dialog.reject)
|
||||
config_dialog.setWindowTitle(_('Customize') + ' ' + self.name)
|
||||
from calibre.customize.ui import (plugin_customization,
|
||||
customize_plugin)
|
||||
help_text = self.customization_help(gui=True)
|
||||
help_text = QLabel(help_text, config_dialog)
|
||||
help_text.setWordWrap(True)
|
||||
help_text.setTextInteractionFlags(Qt.LinksAccessibleByMouse
|
||||
| Qt.LinksAccessibleByKeyboard)
|
||||
help_text.setOpenExternalLinks(True)
|
||||
v.addWidget(help_text)
|
||||
bf = QCheckBox(_('Add linked files in breadth first order'))
|
||||
bf.setToolTip(_('Normally, when following links in HTML files'
|
||||
' calibre does it depth first, i.e. if file A links to B and '
|
||||
' C, but B links to D, the files are added in the order A, B, D, C. '
|
||||
' With this option, they will instead be added as A, B, C, D'))
|
||||
sc = plugin_customization(self)
|
||||
if not sc:
|
||||
sc = ''
|
||||
sc = sc.strip()
|
||||
enc = sc.partition('|')[0]
|
||||
bfs = sc.partition('|')[-1]
|
||||
bf.setChecked(bfs == 'bf')
|
||||
sc = QLineEdit(enc, config_dialog)
|
||||
v.addWidget(sc)
|
||||
v.addWidget(bf)
|
||||
v.addWidget(button_box)
|
||||
size_dialog()
|
||||
config_dialog.exec_()
|
||||
|
||||
if config_dialog.result() == QDialog.Accepted:
|
||||
sc = unicode(sc.text()).strip()
|
||||
if bf.isChecked():
|
||||
sc += '|bf'
|
||||
customize_plugin(self, sc)
|
||||
|
||||
return config_dialog.result()
|
||||
|
@ -20,7 +20,7 @@ class HTMLZInput(InputFormatPlugin):
|
||||
author = 'John Schember'
|
||||
description = 'Convert HTML files to HTML'
|
||||
file_types = set(['htmlz'])
|
||||
|
||||
|
||||
def convert(self, stream, options, file_ext, log,
|
||||
accelerators):
|
||||
self.log = log
|
||||
@ -35,14 +35,14 @@ class HTMLZInput(InputFormatPlugin):
|
||||
with open(x, 'rb') as tf:
|
||||
html = tf.read()
|
||||
break
|
||||
|
||||
|
||||
# Encoding
|
||||
if options.input_encoding:
|
||||
ienc = options.input_encoding
|
||||
else:
|
||||
ienc = xml_to_unicode(html[:4096])[-1]
|
||||
html = html.decode(ienc, 'replace')
|
||||
|
||||
|
||||
# Run the HTML through the html processing plugin.
|
||||
from calibre.customize.ui import plugin_for_input_format
|
||||
html_input = plugin_for_input_format('html')
|
||||
@ -71,7 +71,7 @@ class HTMLZInput(InputFormatPlugin):
|
||||
from calibre.ebooks.oeb.transforms.metadata import meta_info_to_oeb_metadata
|
||||
mi = get_file_type_metadata(stream, file_ext)
|
||||
meta_info_to_oeb_metadata(mi, oeb.metadata, log)
|
||||
|
||||
|
||||
# Get the cover path from the OPF.
|
||||
cover_path = None
|
||||
opf = None
|
||||
|
@ -561,7 +561,9 @@ class HTMLConverter(object):
|
||||
para = children[i]
|
||||
break
|
||||
if para is None:
|
||||
raise ConversionError(_('Failed to parse link %s %s')%(tag, children))
|
||||
raise ConversionError(
|
||||
_('Failed to parse link %(tag)s %(children)s')%dict(
|
||||
tag=tag, children=children))
|
||||
text = self.get_text(tag, 1000)
|
||||
if not text:
|
||||
text = 'Link'
|
||||
@ -954,7 +956,9 @@ class HTMLConverter(object):
|
||||
self.scaled_images[path] = pt
|
||||
return pt.name
|
||||
except (IOError, SystemError) as err: # PIL chokes on interlaced PNG images as well a some GIF images
|
||||
self.log.warning(_('Unable to process image %s. Error: %s')%(path, err))
|
||||
self.log.warning(
|
||||
_('Unable to process image %(path)s. Error: %(err)s')%dict(
|
||||
path=path, err=err))
|
||||
|
||||
if width == None or height == None:
|
||||
width, height = im.size
|
||||
@ -1014,7 +1018,7 @@ class HTMLConverter(object):
|
||||
try:
|
||||
self.images[path] = ImageStream(path, encoding=encoding)
|
||||
except LrsError as err:
|
||||
self.log.warning(_('Could not process image: %s\n%s')%(
|
||||
self.log.warning(('Could not process image: %s\n%s')%(
|
||||
original_path, err))
|
||||
return
|
||||
|
||||
|
@ -4,8 +4,9 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import sys, array, os, re, codecs, logging
|
||||
|
||||
from calibre import setup_cli_handlers, sanitize_file_name
|
||||
from calibre import setup_cli_handlers
|
||||
from calibre.utils.config import OptionParser
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
from calibre.ebooks.lrf.meta import LRFMetaFile
|
||||
from calibre.ebooks.lrf.objects import get_object, PageTree, StyleObject, \
|
||||
Font, Text, TOCObject, BookAttr, ruby_tags
|
||||
@ -89,7 +90,7 @@ class LRFDocument(LRFMetaFile):
|
||||
bookinfo += u'<FreeText reading="">%s</FreeText>\n</BookInfo>\n<DocInfo>\n'%(self.metadata.free_text,)
|
||||
th = self.doc_info.thumbnail
|
||||
if th:
|
||||
prefix = sanitize_file_name(self.metadata.title, as_unicode=True)
|
||||
prefix = ascii_filename(self.metadata.title)
|
||||
bookinfo += u'<CThumbnail file="%s" />\n'%(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension,)
|
||||
if write_files:
|
||||
open(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension, 'wb').write(th)
|
||||
|
@ -529,8 +529,8 @@ class Metadata(object):
|
||||
for t in st.intersection(ot):
|
||||
sidx = lstags.index(t)
|
||||
oidx = lotags.index(t)
|
||||
self_tags[sidx] = other.tags[oidx]
|
||||
self_tags += [t for t in other.tags if t.lower() in ot-st]
|
||||
self_tags[sidx] = other_tags[oidx]
|
||||
self_tags += [t for t in other_tags if t.lower() in ot-st]
|
||||
setattr(self, x, self_tags)
|
||||
|
||||
my_comments = getattr(self, 'comments', '')
|
||||
@ -742,7 +742,7 @@ class Metadata(object):
|
||||
ans += [('ISBN', unicode(self.isbn))]
|
||||
ans += [(_('Tags'), u', '.join([unicode(t) for t in self.tags]))]
|
||||
if self.series:
|
||||
ans += [(_('Series'), unicode(self.series)+ ' #%s'%self.format_series_index())]
|
||||
ans += [_('Series'), unicode(self.series) + ' #%s'%self.format_series_index()]
|
||||
ans += [(_('Language'), unicode(self.language))]
|
||||
if self.timestamp is not None:
|
||||
ans += [(_('Timestamp'), unicode(self.timestamp.isoformat(' ')))]
|
||||
|
@ -21,9 +21,9 @@ USAGE='%%prog ebook_file [' + _('options') + ']\n' + \
|
||||
_('''
|
||||
Read/Write metadata from/to ebook files.
|
||||
|
||||
Supported formats for reading metadata: %s
|
||||
Supported formats for reading metadata: %(read)s
|
||||
|
||||
Supported formats for writing metadata: %s
|
||||
Supported formats for writing metadata: %(write)s
|
||||
|
||||
Different file types support different kinds of metadata. If you try to set
|
||||
some metadata on a file type that does not support it, the metadata will be
|
||||
@ -99,7 +99,7 @@ def option_parser():
|
||||
for w in metadata_writers():
|
||||
writers = writers.union(set(w.file_types))
|
||||
ft, w = ', '.join(sorted(filetypes())), ', '.join(sorted(writers))
|
||||
return config().option_parser(USAGE%(ft, w))
|
||||
return config().option_parser(USAGE%dict(read=ft, write=w))
|
||||
|
||||
def do_set_metadata(opts, mi, stream, stream_type):
|
||||
mi = MetaInformation(mi)
|
||||
|
@ -153,7 +153,8 @@ class Douban(Source):
|
||||
author = 'Li Fanxi'
|
||||
version = (2, 0, 0)
|
||||
|
||||
description = _('Downloads metadata and covers from Douban.com')
|
||||
description = _('Downloads metadata and covers from Douban.com. '
|
||||
'Useful only for chinese language books.')
|
||||
|
||||
capabilities = frozenset(['identify', 'cover'])
|
||||
touched_fields = frozenset(['title', 'authors', 'tags',
|
||||
|
@ -19,7 +19,7 @@ from calibre.customize.ui import metadata_plugins, all_metadata_plugins
|
||||
from calibre.ebooks.metadata.sources.base import create_log, msprefs
|
||||
from calibre.ebooks.metadata.xisbn import xisbn
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import utc_tz
|
||||
from calibre.utils.date import utc_tz, as_utc
|
||||
from calibre.utils.html2text import html2text
|
||||
from calibre.utils.icu import lower
|
||||
|
||||
@ -57,11 +57,34 @@ def is_worker_alive(workers):
|
||||
|
||||
# Merge results from different sources {{{
|
||||
|
||||
class xISBN(Thread):
|
||||
|
||||
def __init__(self, isbn):
|
||||
Thread.__init__(self)
|
||||
self.isbn = isbn
|
||||
self.isbns = frozenset()
|
||||
self.min_year = None
|
||||
self.daemon = True
|
||||
self.exception = self.tb = None
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.isbns, self.min_year = xisbn.get_isbn_pool(self.isbn)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
self.exception = e
|
||||
self.tb = traceback.format_exception()
|
||||
|
||||
|
||||
|
||||
class ISBNMerge(object):
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, log):
|
||||
self.pools = {}
|
||||
self.isbnless_results = []
|
||||
self.results = []
|
||||
self.log = log
|
||||
self.use_xisbn = True
|
||||
|
||||
def isbn_in_pool(self, isbn):
|
||||
if isbn:
|
||||
@ -82,7 +105,20 @@ class ISBNMerge(object):
|
||||
if isbn:
|
||||
pool = self.isbn_in_pool(isbn)
|
||||
if pool is None:
|
||||
isbns, min_year = xisbn.get_isbn_pool(isbn)
|
||||
isbns = min_year = None
|
||||
if self.use_xisbn:
|
||||
xw = xISBN(isbn)
|
||||
xw.start()
|
||||
xw.join(10)
|
||||
if xw.is_alive():
|
||||
self.log.error('Query to xISBN timed out')
|
||||
self.use_xisbn = False
|
||||
else:
|
||||
if xw.exception:
|
||||
self.log.error('Query to xISBN failed:')
|
||||
self.log.debug(xw.tb)
|
||||
else:
|
||||
isbns, min_year = xw.isbns, xw.min_year
|
||||
if not isbns:
|
||||
isbns = frozenset([isbn])
|
||||
if isbns in self.pools:
|
||||
@ -102,15 +138,19 @@ class ISBNMerge(object):
|
||||
if results:
|
||||
has_isbn_result = True
|
||||
break
|
||||
self.has_isbn_result = has_isbn_result
|
||||
|
||||
isbn_sources = frozenset()
|
||||
if has_isbn_result:
|
||||
self.merge_isbn_results()
|
||||
else:
|
||||
results = sorted(self.isbnless_results,
|
||||
key=attrgetter('relevance_in_source'))
|
||||
isbn_sources = self.merge_isbn_results()
|
||||
|
||||
# Now handle results that have no ISBNs
|
||||
results = sorted(self.isbnless_results,
|
||||
key=attrgetter('relevance_in_source'))
|
||||
# Only use results that are from sources that have not also returned a
|
||||
# result with an ISBN
|
||||
results = [r for r in results if r.identify_plugin not in isbn_sources]
|
||||
if results:
|
||||
# Pick only the most relevant result from each source
|
||||
self.results = []
|
||||
seen = set()
|
||||
for result in results:
|
||||
if result.identify_plugin not in seen:
|
||||
@ -190,11 +230,15 @@ class ISBNMerge(object):
|
||||
|
||||
def merge_isbn_results(self):
|
||||
self.results = []
|
||||
sources = set()
|
||||
for min_year, results in self.pools.itervalues():
|
||||
if results:
|
||||
for r in results:
|
||||
sources.add(r.identify_plugin)
|
||||
self.results.append(self.merge(results, min_year))
|
||||
|
||||
self.results.sort(key=attrgetter('average_source_relevance'))
|
||||
return sources
|
||||
|
||||
def length_merge(self, attr, results, null_value=None, shortest=True):
|
||||
values = [getattr(x, attr) for x in results if not x.is_null(attr)]
|
||||
@ -254,13 +298,23 @@ class ISBNMerge(object):
|
||||
|
||||
# Published date
|
||||
if min_year:
|
||||
min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
|
||||
for r in results:
|
||||
year = getattr(r.pubdate, 'year', None)
|
||||
if year == min_year:
|
||||
ans.pubdate = r.pubdate
|
||||
break
|
||||
if getattr(ans.pubdate, 'year', None) == min_year:
|
||||
min_date = datetime(min_year, ans.pubdate.month, ans.pubdate.day)
|
||||
else:
|
||||
min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
|
||||
ans.pubdate = min_date
|
||||
else:
|
||||
min_date = datetime(3001, 1, 1, tzinfo=utc_tz)
|
||||
for r in results:
|
||||
if r.pubdate is not None and r.pubdate < min_date:
|
||||
min_date = r.pubdate
|
||||
if r.pubdate is not None:
|
||||
candidate = as_utc(r.pubdate)
|
||||
if candidate < min_date:
|
||||
min_date = candidate
|
||||
if min_date.year < 3000:
|
||||
ans.pubdate = min_date
|
||||
|
||||
@ -293,7 +347,7 @@ class ISBNMerge(object):
|
||||
|
||||
|
||||
def merge_identify_results(result_map, log):
|
||||
isbn_merge = ISBNMerge()
|
||||
isbn_merge = ISBNMerge(log)
|
||||
for plugin, results in result_map.iteritems():
|
||||
for result in results:
|
||||
isbn_merge.add_result(result)
|
||||
@ -505,7 +559,7 @@ if __name__ == '__main__': # tests {{{
|
||||
# unknown to Amazon
|
||||
{'identifiers':{'isbn': '9780307459671'},
|
||||
'title':'Invisible Gorilla', 'authors':['Christopher Chabris']},
|
||||
[title_test('The Invisible Gorilla', exact=True)]
|
||||
[title_test('The Invisible Gorilla: And Other Ways Our Intuitions Deceive Us', exact=True)]
|
||||
|
||||
),
|
||||
|
||||
|
@ -95,9 +95,9 @@ class CoverManager(object):
|
||||
authors = [unicode(x) for x in m.creator if x.role == 'aut']
|
||||
series_string = None
|
||||
if m.series and m.series_index:
|
||||
series_string = _('Book %s of %s')%(
|
||||
fmt_sidx(m.series_index[0], use_roman=True),
|
||||
unicode(m.series[0]))
|
||||
series_string = _('Book %(sidx)s of %(series)s')%dict(
|
||||
sidx=fmt_sidx(m.series_index[0], use_roman=True),
|
||||
series=unicode(m.series[0]))
|
||||
|
||||
try:
|
||||
from calibre.ebooks import calibre_cover
|
||||
|
@ -32,8 +32,8 @@ class SplitError(ValueError):
|
||||
size = len(tostring(root))/1024.
|
||||
ValueError.__init__(self,
|
||||
_('Could not find reasonable point at which to split: '
|
||||
'%s Sub-tree size: %d KB')%
|
||||
(path, size))
|
||||
'%(path)s Sub-tree size: %(size)d KB')%dict(
|
||||
path=path, size=size))
|
||||
|
||||
class Split(object):
|
||||
|
||||
|
@ -121,14 +121,16 @@ class DetectStructure(object):
|
||||
if not self.oeb.toc.has_href(href):
|
||||
text = xml2text(a)
|
||||
text = text[:100].strip()
|
||||
if not self.oeb.toc.has_text(text):
|
||||
num += 1
|
||||
self.oeb.toc.add(text, href,
|
||||
play_order=self.oeb.toc.next_play_order())
|
||||
if self.opts.max_toc_links > 0 and \
|
||||
num >= self.opts.max_toc_links:
|
||||
self.log('Maximum TOC links reached, stopping.')
|
||||
return
|
||||
if (not self.opts.duplicate_links_in_toc and
|
||||
self.oeb.toc.has_text(text)):
|
||||
continue
|
||||
num += 1
|
||||
self.oeb.toc.add(text, href,
|
||||
play_order=self.oeb.toc.next_play_order())
|
||||
if self.opts.max_toc_links > 0 and \
|
||||
num >= self.opts.max_toc_links:
|
||||
self.log('Maximum TOC links reached, stopping.')
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
@ -7,6 +7,7 @@ __docformat__ = 'restructuredtext en'
|
||||
import sys, struct, zlib, bz2, os
|
||||
|
||||
from calibre import guess_type
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
|
||||
class FileStream:
|
||||
def IsBinary(self):
|
||||
@ -156,6 +157,8 @@ class SNBFile:
|
||||
f.fileSize = os.path.getsize(os.path.join(tdir,fileName))
|
||||
f.fileBody = open(os.path.join(tdir,fileName), 'rb').read()
|
||||
f.fileName = fileName.replace(os.sep, '/')
|
||||
if isinstance(f.fileName, unicode):
|
||||
f.fileName = ascii_filename(f.fileName).encode('ascii')
|
||||
self.files.append(f)
|
||||
|
||||
def AppendBinary(self, fileName, tdir):
|
||||
@ -164,6 +167,8 @@ class SNBFile:
|
||||
f.fileSize = os.path.getsize(os.path.join(tdir,fileName))
|
||||
f.fileBody = open(os.path.join(tdir,fileName), 'rb').read()
|
||||
f.fileName = fileName.replace(os.sep, '/')
|
||||
if isinstance(f.fileName, unicode):
|
||||
f.fileName = ascii_filename(f.fileName).encode('ascii')
|
||||
self.files.append(f)
|
||||
|
||||
def GetFileStream(self, fileName):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# coding:utf8
|
||||
# coding:utf-8
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
@ -74,6 +74,13 @@ gprefs.defaults['action-layout-context-menu-device'] = (
|
||||
'Add To Library', 'Edit Collections',
|
||||
)
|
||||
|
||||
gprefs.defaults['action-layout-context-menu-cover-browser'] = (
|
||||
'Edit Metadata', 'Send To Device', 'Save To Disk',
|
||||
'Connect Share', 'Copy To Library', None,
|
||||
'Convert Books', 'View', 'Open Folder', 'Show Book Details',
|
||||
'Similar Books', 'Tweak ePub', None, 'Remove Books',
|
||||
)
|
||||
|
||||
gprefs.defaults['show_splash_screen'] = True
|
||||
gprefs.defaults['toolbar_icon_size'] = 'medium'
|
||||
gprefs.defaults['automerge'] = 'ignore'
|
||||
|
@ -120,16 +120,16 @@ class FetchAnnotationsAction(InterfaceAction):
|
||||
spanTag['style'] = 'font-weight:bold'
|
||||
if bookmark.book_format == 'pdf':
|
||||
spanTag.insert(0,NavigableString(
|
||||
_("%s<br />Last Page Read: %d (%d%%)") % \
|
||||
(strftime(u'%x', timestamp.timetuple()),
|
||||
last_read_location,
|
||||
percent_read)))
|
||||
_("%(time)s<br />Last Page Read: %(loc)d (%(pr)d%%)") % \
|
||||
dict(time=strftime(u'%x', timestamp.timetuple()),
|
||||
loc=last_read_location,
|
||||
pr=percent_read)))
|
||||
else:
|
||||
spanTag.insert(0,NavigableString(
|
||||
_("%s<br />Last Page Read: Location %d (%d%%)") % \
|
||||
(strftime(u'%x', timestamp.timetuple()),
|
||||
last_read_location,
|
||||
percent_read)))
|
||||
_("%(time)s<br />Last Page Read: Location %(loc)d (%(pr)d%%)") % \
|
||||
dict(time=strftime(u'%x', timestamp.timetuple()),
|
||||
loc=last_read_location,
|
||||
pr=percent_read)))
|
||||
|
||||
divTag.insert(dtc, spanTag)
|
||||
dtc += 1
|
||||
@ -145,23 +145,23 @@ class FetchAnnotationsAction(InterfaceAction):
|
||||
for location in sorted(user_notes):
|
||||
if user_notes[location]['text']:
|
||||
annotations.append(
|
||||
_('<b>Location %d • %s</b><br />%s<br />') % \
|
||||
(user_notes[location]['displayed_location'],
|
||||
user_notes[location]['type'],
|
||||
user_notes[location]['text'] if \
|
||||
_('<b>Location %(dl)d • %(typ)s</b><br />%(text)s<br />') % \
|
||||
dict(dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type'],
|
||||
text=(user_notes[location]['text'] if \
|
||||
user_notes[location]['type'] == 'Note' else \
|
||||
'<i>%s</i>' % user_notes[location]['text']))
|
||||
'<i>%s</i>' % user_notes[location]['text'])))
|
||||
else:
|
||||
if bookmark.book_format == 'pdf':
|
||||
annotations.append(
|
||||
_('<b>Page %d • %s</b><br />') % \
|
||||
(user_notes[location]['displayed_location'],
|
||||
user_notes[location]['type']))
|
||||
_('<b>Page %(dl)d • %(typ)s</b><br />') % \
|
||||
dict(dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type']))
|
||||
else:
|
||||
annotations.append(
|
||||
_('<b>Location %d • %s</b><br />') % \
|
||||
(user_notes[location]['displayed_location'],
|
||||
user_notes[location]['type']))
|
||||
_('<b>Location %(dl)d • %(typ)s</b><br />') % \
|
||||
dict(dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type']))
|
||||
|
||||
for annotation in annotations:
|
||||
divTag.insert(dtc, annotation)
|
||||
|
@ -82,7 +82,8 @@ class GenerateCatalogAction(InterfaceAction):
|
||||
self.gui.sync_catalogs()
|
||||
if job.fmt not in ['EPUB','MOBI']:
|
||||
export_dir = choose_dir(self.gui, _('Export Catalog Directory'),
|
||||
_('Select destination for %s.%s') % (job.catalog_title, job.fmt.lower()))
|
||||
_('Select destination for %(title)s.%(fmt)s') % dict(
|
||||
title=job.catalog_title, fmt=job.fmt.lower()))
|
||||
if export_dir:
|
||||
destination = os.path.join(export_dir, '%s.%s' % (job.catalog_title, job.fmt.lower()))
|
||||
shutil.copyfile(job.catalog_file_path, destination)
|
||||
|
@ -160,8 +160,9 @@ class CopyToLibraryAction(InterfaceAction):
|
||||
error_dialog(self.gui, _('Failed'), _('Could not copy books: ') + e,
|
||||
det_msg=tb, show=True)
|
||||
else:
|
||||
self.gui.status_bar.show_message(_('Copied %d books to %s') %
|
||||
(len(ids), loc), 2000)
|
||||
self.gui.status_bar.show_message(
|
||||
_('Copied %(num)d books to %(loc)s') %
|
||||
dict(num=len(ids), loc=loc), 2000)
|
||||
if delete_after and self.worker.processed:
|
||||
v = self.gui.library_view
|
||||
ci = v.currentIndex()
|
||||
|
@ -284,7 +284,7 @@ class EditMetadataAction(InterfaceAction):
|
||||
if not confirm('<p>'+_(
|
||||
'Book formats from the selected books will be merged '
|
||||
'into the <b>first selected book</b> (%s). '
|
||||
'Metadata in the first selected book will not be changed.'
|
||||
'Metadata in the first selected book will not be changed. '
|
||||
'Author, Title, ISBN and all other metadata will <i>not</i> be merged.<br><br>'
|
||||
'After merger the second and subsequently '
|
||||
'selected books, with any metadata they have will be <b>deleted</b>. <br><br>'
|
||||
@ -446,9 +446,8 @@ class EditMetadataAction(InterfaceAction):
|
||||
if d.result() == d.Accepted:
|
||||
to_rename = d.to_rename # dict of new text to old ids
|
||||
to_delete = d.to_delete # list of ids
|
||||
for text in to_rename:
|
||||
for old_id in to_rename[text]:
|
||||
model.rename_collection(old_id, new_name=unicode(text))
|
||||
for old_id, new_name in to_rename.iteritems():
|
||||
model.rename_collection(old_id, new_name=unicode(new_name))
|
||||
for item in to_delete:
|
||||
model.delete_collection_using_id(item)
|
||||
self.gui.upload_collections(model.db, view=view, oncard=oncard)
|
||||
|
@ -159,9 +159,9 @@ def render_data(mi, use_roman_numbers=True, all_fields=False):
|
||||
sidx = mi.get(field+'_index')
|
||||
if sidx is None:
|
||||
sidx = 1.0
|
||||
val = _('Book %s of <span class="series_name">%s</span>')%(fmt_sidx(sidx,
|
||||
use_roman=use_roman_numbers),
|
||||
prepare_string_for_xml(getattr(mi, field)))
|
||||
val = _('Book %(sidx)s of <span class="series_name">%(series)s</span>')%dict(
|
||||
sidx=fmt_sidx(sidx, use_roman=use_roman_numbers),
|
||||
series=prepare_string_for_xml(getattr(mi, field)))
|
||||
|
||||
ans.append((field, u'<td class="title">%s</td><td>%s</td>'%(name, val)))
|
||||
|
||||
@ -541,7 +541,8 @@ class BookDetails(QWidget): # {{{
|
||||
self.setToolTip(
|
||||
'<p>'+_('Double-click to open Book Details window') +
|
||||
'<br><br>' + _('Path') + ': ' + self.current_path +
|
||||
'<br><br>' + _('Cover size: %dx%d')%(sz.width(), sz.height())
|
||||
'<br><br>' + _('Cover size: %(width)d x %(height)d')%dict(
|
||||
width=sz.width(), height=sz.height())
|
||||
)
|
||||
|
||||
def reset_info(self):
|
||||
|
@ -22,7 +22,7 @@ class TOCWidget(Widget, Ui_Form):
|
||||
Widget.__init__(self, parent,
|
||||
['level1_toc', 'level2_toc', 'level3_toc',
|
||||
'toc_threshold', 'max_toc_links', 'no_chapters_in_toc',
|
||||
'use_auto_toc', 'toc_filter',
|
||||
'use_auto_toc', 'toc_filter', 'duplicate_links_in_toc',
|
||||
]
|
||||
)
|
||||
self.db, self.book_id = db, book_id
|
||||
|
@ -21,7 +21,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<item row="3" column="0">
|
||||
<widget class="QLabel" name="label_10">
|
||||
<property name="text">
|
||||
<string>Number of &links to add to Table of Contents</string>
|
||||
@ -31,14 +31,14 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<item row="3" column="1">
|
||||
<widget class="QSpinBox" name="opt_max_toc_links">
|
||||
<property name="maximum">
|
||||
<number>10000</number>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="0">
|
||||
<item row="4" column="0">
|
||||
<widget class="QLabel" name="label_16">
|
||||
<property name="text">
|
||||
<string>Chapter &threshold</string>
|
||||
@ -48,7 +48,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<item row="4" column="1">
|
||||
<widget class="QSpinBox" name="opt_toc_threshold"/>
|
||||
</item>
|
||||
<item row="0" column="0" colspan="2">
|
||||
@ -58,7 +58,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="0">
|
||||
<item row="5" column="0">
|
||||
<widget class="QLabel" name="label">
|
||||
<property name="text">
|
||||
<string>TOC &Filter:</string>
|
||||
@ -68,19 +68,19 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="1">
|
||||
<item row="5" column="1">
|
||||
<widget class="QLineEdit" name="opt_toc_filter"/>
|
||||
</item>
|
||||
<item row="5" column="0" colspan="2">
|
||||
<item row="6" column="0" colspan="2">
|
||||
<widget class="XPathEdit" name="opt_level1_toc" native="true"/>
|
||||
</item>
|
||||
<item row="6" column="0" colspan="2">
|
||||
<item row="7" column="0" colspan="2">
|
||||
<widget class="XPathEdit" name="opt_level2_toc" native="true"/>
|
||||
</item>
|
||||
<item row="7" column="0" colspan="2">
|
||||
<item row="8" column="0" colspan="2">
|
||||
<widget class="XPathEdit" name="opt_level3_toc" native="true"/>
|
||||
</item>
|
||||
<item row="8" column="0">
|
||||
<item row="9" column="0">
|
||||
<spacer name="verticalSpacer">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Vertical</enum>
|
||||
@ -93,6 +93,13 @@
|
||||
</property>
|
||||
</spacer>
|
||||
</item>
|
||||
<item row="2" column="0" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_duplicate_links_in_toc">
|
||||
<property name="text">
|
||||
<string>Allow &duplicate links when creating the Table of Contents</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
<customwidgets>
|
||||
|
@ -9,8 +9,8 @@ Module to implement the Cover Flow feature
|
||||
|
||||
import sys, os, time
|
||||
|
||||
from PyQt4.Qt import QImage, QSizePolicy, QTimer, QDialog, Qt, QSize, \
|
||||
QStackedLayout, QLabel, QByteArray, pyqtSignal
|
||||
from PyQt4.Qt import (QImage, QSizePolicy, QTimer, QDialog, Qt, QSize,
|
||||
QStackedLayout, QLabel, QByteArray, pyqtSignal)
|
||||
|
||||
from calibre import plugins
|
||||
from calibre.gui2 import config, available_height, available_width, gprefs
|
||||
@ -84,6 +84,7 @@ if pictureflow is not None:
|
||||
class CoverFlow(pictureflow.PictureFlow):
|
||||
|
||||
dc_signal = pyqtSignal()
|
||||
context_menu_requested = pyqtSignal()
|
||||
|
||||
def __init__(self, parent=None):
|
||||
pictureflow.PictureFlow.__init__(self, parent,
|
||||
@ -94,6 +95,17 @@ if pictureflow is not None:
|
||||
QSizePolicy.Expanding))
|
||||
self.dc_signal.connect(self._data_changed,
|
||||
type=Qt.QueuedConnection)
|
||||
self.context_menu = None
|
||||
self.setContextMenuPolicy(Qt.DefaultContextMenu)
|
||||
|
||||
def set_context_menu(self, cm):
|
||||
self.context_menu = cm
|
||||
|
||||
def contextMenuEvent(self, event):
|
||||
if self.context_menu is not None:
|
||||
self.context_menu_requested.emit()
|
||||
self.context_menu.popup(event.globalPos())
|
||||
event.accept()
|
||||
|
||||
def sizeHint(self):
|
||||
return self.minimumSize()
|
||||
@ -149,6 +161,7 @@ class CoverFlowMixin(object):
|
||||
self.cover_flow_sync_flag = True
|
||||
self.cover_flow = CoverFlow(parent=self)
|
||||
self.cover_flow.currentChanged.connect(self.sync_listview_to_cf)
|
||||
self.cover_flow.context_menu_requested.connect(self.cf_context_menu_requested)
|
||||
self.library_view.selectionModel().currentRowChanged.connect(
|
||||
self.sync_cf_to_listview)
|
||||
self.db_images = DatabaseImages(self.library_view.model())
|
||||
@ -234,6 +247,14 @@ class CoverFlowMixin(object):
|
||||
self.cover_flow.setCurrentSlide(current.row())
|
||||
self.cover_flow_sync_flag = True
|
||||
|
||||
def cf_context_menu_requested(self):
|
||||
row = self.cover_flow.currentSlide()
|
||||
m = self.library_view.model()
|
||||
index = m.index(row, 0)
|
||||
sm = self.library_view.selectionModel()
|
||||
sm.select(index, sm.ClearAndSelect|sm.Rows)
|
||||
self.library_view.setCurrentIndex(index)
|
||||
|
||||
def cover_flow_do_sync(self):
|
||||
self.cover_flow_sync_flag = True
|
||||
try:
|
||||
|
@ -912,8 +912,9 @@ class DeviceMixin(object): # {{{
|
||||
format_count[f] = 1
|
||||
for f in self.device_manager.device.settings().format_map:
|
||||
if f in format_count.keys():
|
||||
formats.append((f, _('%i of %i Books') % (format_count[f],
|
||||
len(rows)), True if f in aval_out_formats else False))
|
||||
formats.append((f, _('%(num)i of %(total)i Books') % dict(
|
||||
num=format_count[f], total=len(rows)),
|
||||
True if f in aval_out_formats else False))
|
||||
elif f in aval_out_formats:
|
||||
formats.append((f, _('0 of %i Books') % len(rows), True))
|
||||
d = ChooseFormatDeviceDialog(self, _('Choose format to send to device'), formats)
|
||||
|
@ -106,7 +106,8 @@ class BookInfo(QDialog, Ui_BookInfo):
|
||||
Qt.KeepAspectRatio, Qt.SmoothTransformation)
|
||||
self.cover.set_pixmap(pixmap)
|
||||
sz = pixmap.size()
|
||||
self.cover.setToolTip(_('Cover size: %dx%d')%(sz.width(), sz.height()))
|
||||
self.cover.setToolTip(_('Cover size: %(width)d x %(height)d')%dict(
|
||||
width=sz.width(), height=sz.height()))
|
||||
|
||||
def refresh(self, row):
|
||||
if isinstance(row, QModelIndex):
|
||||
|
@ -173,10 +173,10 @@ class MyBlockingBusy(QDialog): # {{{
|
||||
mi = self.db.get_metadata(id, index_is_id=True)
|
||||
series_string = None
|
||||
if mi.series:
|
||||
series_string = _('Book %s of %s')%(
|
||||
fmt_sidx(mi.series_index,
|
||||
series_string = _('Book %(sidx)s of %(series)s')%dict(
|
||||
sidx=fmt_sidx(mi.series_index,
|
||||
use_roman=config['use_roman_numerals_for_series_number']),
|
||||
mi.series)
|
||||
series=mi.series)
|
||||
|
||||
cdata = calibre_cover(mi.title, mi.format_field('authors')[-1],
|
||||
series_string=series_string)
|
||||
@ -749,15 +749,9 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
||||
val = self.s_r_do_regexp(mi)
|
||||
val = self.s_r_do_destination(mi, val)
|
||||
if dfm['is_multiple']:
|
||||
if dest == 'authors' and len(val) == 0:
|
||||
error_dialog(self, _('Search/replace invalid'),
|
||||
_('Authors cannot be set to the empty string. '
|
||||
'Book title %s not processed')%mi.title,
|
||||
show=True)
|
||||
return
|
||||
# convert the colon-separated pair strings back into a dict, which
|
||||
# is what set_identifiers wants
|
||||
if dfm['is_csp']:
|
||||
# convert the colon-separated pair strings back into a dict,
|
||||
# which is what set_identifiers wants
|
||||
dst_id_type = unicode(self.s_r_dst_ident.text())
|
||||
if dst_id_type:
|
||||
v = ''.join(val)
|
||||
@ -769,11 +763,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
||||
else:
|
||||
val = self.s_r_replace_mode_separator().join(val)
|
||||
if dest == 'title' and len(val) == 0:
|
||||
error_dialog(self, _('Search/replace invalid'),
|
||||
_('Title cannot be set to the empty string. '
|
||||
'Book title %s not processed')%mi.title,
|
||||
show=True)
|
||||
return
|
||||
val = _('Unknown')
|
||||
|
||||
if dfm['is_custom']:
|
||||
extra = self.db.get_custom_extra(id, label=dfm['label'], index_is_id=True)
|
||||
|
@ -701,7 +701,9 @@ class PluginUpdaterDialog(SizePersistedDialog):
|
||||
|
||||
if DEBUG:
|
||||
prints('Locating zip file for %s: %s'% (display_plugin.name, display_plugin.forum_link))
|
||||
self.gui.status_bar.showMessage(_('Locating zip file for %s: %s') % (display_plugin.name, display_plugin.forum_link))
|
||||
self.gui.status_bar.showMessage(
|
||||
_('Locating zip file for %(name)s: %(link)s') % dict(
|
||||
name=display_plugin.name, link=display_plugin.forum_link))
|
||||
plugin_zip_url = self._read_zip_attachment_url(display_plugin.forum_link)
|
||||
if not plugin_zip_url:
|
||||
return error_dialog(self.gui, _('Install Plugin Failed'),
|
||||
|
@ -336,7 +336,12 @@ class SchedulerDialog(QDialog, Ui_Dialog):
|
||||
self.download_button.setVisible(True)
|
||||
self.detail_box.setCurrentIndex(0)
|
||||
recipe = self.recipe_model.recipe_from_urn(urn)
|
||||
schedule_info = self.recipe_model.schedule_info_from_urn(urn)
|
||||
try:
|
||||
schedule_info = self.recipe_model.schedule_info_from_urn(urn)
|
||||
except:
|
||||
# Happens if user does something stupid like unchecking all the
|
||||
# days of the week
|
||||
schedule_info = None
|
||||
account_info = self.recipe_model.account_info_from_urn(urn)
|
||||
customize_info = self.recipe_model.get_customize_info(urn)
|
||||
|
||||
@ -376,7 +381,9 @@ class SchedulerDialog(QDialog, Ui_Dialog):
|
||||
d = utcnow() - last_downloaded
|
||||
def hm(x): return (x-x%3600)//3600, (x%3600 - (x%3600)%60)//60
|
||||
hours, minutes = hm(d.seconds)
|
||||
tm = _('%d days, %d hours and %d minutes ago')%(d.days, hours, minutes)
|
||||
tm = _('%(days)d days, %(hours)d hours'
|
||||
' and %(mins)d minutes ago')%dict(
|
||||
days=d.days, hours=hours, mins=minutes)
|
||||
if d < timedelta(days=366):
|
||||
ld_text = tm
|
||||
else:
|
||||
|
@ -57,7 +57,7 @@ class TagCategories(QDialog, Ui_TagCategories):
|
||||
lambda: [n for (id, n) in self.db.all_publishers()],
|
||||
lambda: self.db.all_tags()
|
||||
]
|
||||
category_names = ['', _('Authors'), _('Series'), _('Publishers'), _('Tags')]
|
||||
category_names = ['', _('Authors'), ngettext('Series', 'Series', 2), _('Publishers'), _('Tags')]
|
||||
|
||||
cvals = {}
|
||||
for key,cc in self.db.custom_field_metadata().iteritems():
|
||||
@ -260,6 +260,7 @@ class TagCategories(QDialog, Ui_TagCategories):
|
||||
self.applied_items = [cat[2] for cat in self.categories.get(self.current_cat_name, [])]
|
||||
else:
|
||||
self.applied_items = []
|
||||
self.applied_items.sort(key=lambda x:sort_key(self.all_items[x].name))
|
||||
self.display_filtered_categories(None)
|
||||
|
||||
def accept(self):
|
||||
@ -284,4 +285,4 @@ class TagCategories(QDialog, Ui_TagCategories):
|
||||
self.category_box.blockSignals(True)
|
||||
self.category_box.clear()
|
||||
self.category_box.addItems(sorted(self.categories.keys(), key=sort_key))
|
||||
self.category_box.blockSignals(False)
|
||||
self.category_box.blockSignals(False)
|
||||
|
@ -18,7 +18,8 @@ class ListWidgetItem(QListWidgetItem):
|
||||
def data(self, role):
|
||||
if role == Qt.DisplayRole:
|
||||
if self.initial_value != self.current_value:
|
||||
return _('%s (was %s)')%(self.current_value, self.initial_value)
|
||||
return _('%(curr)s (was %(initial)s)')%dict(
|
||||
curr=self.current_value, initial=self.initial_value)
|
||||
else:
|
||||
return self.current_value
|
||||
elif role == Qt.EditRole:
|
||||
|
@ -143,7 +143,9 @@ class UserProfiles(ResizableDialog, Ui_Dialog):
|
||||
pt = PersistentTemporaryFile(suffix='.recipe')
|
||||
pt.write(src.encode('utf-8'))
|
||||
pt.close()
|
||||
body = _('The attached file: %s is a recipe to download %s.')%(os.path.basename(pt.name), title)
|
||||
body = _('The attached file: %(fname)s is a '
|
||||
'recipe to download %(title)s.')%dict(
|
||||
fname=os.path.basename(pt.name), title=title)
|
||||
subject = _('Recipe for ')+title
|
||||
url = QUrl('mailto:')
|
||||
url.addQueryItem('subject', subject)
|
||||
|
@ -51,8 +51,8 @@ class DownloadDialog(QDialog): # {{{
|
||||
self.setWindowTitle(_('Download %s')%fname)
|
||||
self.l = QVBoxLayout(self)
|
||||
self.purl = urlparse(url)
|
||||
self.msg = QLabel(_('Downloading <b>%s</b> from %s')%(fname,
|
||||
self.purl.netloc))
|
||||
self.msg = QLabel(_('Downloading <b>%(fname)s</b> from %(url)s')%dict(
|
||||
fname=fname, url=self.purl.netloc))
|
||||
self.msg.setWordWrap(True)
|
||||
self.l.addWidget(self.msg)
|
||||
self.pb = QProgressBar(self)
|
||||
@ -82,9 +82,9 @@ class DownloadDialog(QDialog): # {{{
|
||||
self.exec_()
|
||||
if self.worker.err is not None:
|
||||
error_dialog(self.parent(), _('Download failed'),
|
||||
_('Failed to download from %r with error: %s')%(
|
||||
self.worker.url, self.worker.err),
|
||||
det_msg=self.worker.tb, show=True)
|
||||
_('Failed to download from %(url)r with error: %(err)s')%dict(
|
||||
url=self.worker.url, err=self.worker.err),
|
||||
det_msg=self.worker.tb, show=True)
|
||||
|
||||
def update(self):
|
||||
if self.rejected:
|
||||
|
@ -120,7 +120,7 @@ def send_mails(jobnames, callback, attachments, to_s, subjects,
|
||||
texts, attachment_names, job_manager):
|
||||
for name, attachment, to, subject, text, aname in zip(jobnames,
|
||||
attachments, to_s, subjects, texts, attachment_names):
|
||||
description = _('Email %s to %s') % (name, to)
|
||||
description = _('Email %(name)s to %(to)s') % dict(name=name, to=to)
|
||||
job = ThreadedJob('email', description, gui_sendmail, (attachment, aname, to,
|
||||
subject, text), {}, callback)
|
||||
job_manager.run_threaded_job(job)
|
||||
|
@ -62,7 +62,6 @@ class LibraryViewMixin(object): # {{{
|
||||
view = getattr(self, view+'_view')
|
||||
view.verticalHeader().sectionDoubleClicked.connect(self.iactions['View'].view_specific_book)
|
||||
|
||||
self.build_context_menus()
|
||||
self.library_view.model().set_highlight_only(config['highlight_search_matches'])
|
||||
|
||||
def build_context_menus(self):
|
||||
@ -81,6 +80,11 @@ class LibraryViewMixin(object): # {{{
|
||||
for v in (self.memory_view, self.card_a_view, self.card_b_view):
|
||||
v.set_context_menu(dm, ec)
|
||||
|
||||
if self.cover_flow is not None:
|
||||
cm = QMenu(self.cover_flow)
|
||||
populate_menu(cm,
|
||||
gprefs['action-layout-context-menu-cover-browser'])
|
||||
self.cover_flow.set_context_menu(cm)
|
||||
|
||||
def search_done(self, view, ok):
|
||||
if view is self.current_view():
|
||||
|
@ -950,11 +950,11 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
for locvalue in locations:
|
||||
accessor = q[locvalue]
|
||||
if query == 'true':
|
||||
if accessor(row) is not None:
|
||||
if accessor(row):
|
||||
matches.add(index)
|
||||
continue
|
||||
if query == 'false':
|
||||
if accessor(row) is None:
|
||||
if not accessor(row):
|
||||
matches.add(index)
|
||||
continue
|
||||
if locvalue == 'inlibrary':
|
||||
|
@ -878,9 +878,10 @@ class Cover(ImageView): # {{{
|
||||
series = self.dialog.series.current_val
|
||||
series_string = None
|
||||
if series:
|
||||
series_string = _('Book %s of %s')%(
|
||||
fmt_sidx(self.dialog.series_index.current_val,
|
||||
use_roman=config['use_roman_numerals_for_series_number']), series)
|
||||
series_string = _('Book %(sidx)s of %(series)s')%dict(
|
||||
sidx=fmt_sidx(self.dialog.series_index.current_val,
|
||||
use_roman=config['use_roman_numerals_for_series_number']),
|
||||
series=series)
|
||||
self.current_val = calibre_cover(title, author,
|
||||
series_string=series_string)
|
||||
|
||||
@ -921,8 +922,8 @@ class Cover(ImageView): # {{{
|
||||
self.setPixmap(pm)
|
||||
tt = _('This book has no cover')
|
||||
if self._cdata:
|
||||
tt = _('Cover size: %dx%d pixels') % \
|
||||
(pm.width(), pm.height())
|
||||
tt = _('Cover size: %(width)d x %(height)d pixels') % \
|
||||
dict(width=pm.width(), height=pm.height())
|
||||
self.setToolTip(tt)
|
||||
|
||||
return property(fget=fget, fset=fset)
|
||||
|
@ -20,6 +20,7 @@ from calibre.ebooks.metadata.sources.covers import download_cover
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.customize.ui import metadata_plugins
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.date import as_utc
|
||||
|
||||
# Start download {{{
|
||||
def show_config(gui, parent):
|
||||
@ -124,10 +125,18 @@ def merge_result(oldmi, newmi):
|
||||
for plugin in metadata_plugins(['identify']):
|
||||
fields |= plugin.touched_fields
|
||||
|
||||
def is_equal(x, y):
|
||||
if hasattr(x, 'tzinfo'):
|
||||
x = as_utc(x)
|
||||
if hasattr(y, 'tzinfo'):
|
||||
y = as_utc(y)
|
||||
return x == y
|
||||
|
||||
for f in fields:
|
||||
# Optimize so that set_metadata does not have to do extra work later
|
||||
if not f.startswith('identifier:'):
|
||||
if (not newmi.is_null(f) and getattr(newmi, f) == getattr(oldmi, f)):
|
||||
if (not newmi.is_null(f) and is_equal(getattr(newmi, f),
|
||||
getattr(oldmi, f))):
|
||||
setattr(newmi, f, getattr(dummy, f))
|
||||
|
||||
newmi.last_modified = oldmi.last_modified
|
||||
@ -187,7 +196,7 @@ def download(ids, db, do_identify, covers,
|
||||
ans[i] = mi
|
||||
count += 1
|
||||
notifications.put((count/len(ids),
|
||||
_('Downloaded %d of %d')%(count, len(ids))))
|
||||
_('Downloaded %(num)d of %(tot)d')%dict(num=count, tot=len(ids))))
|
||||
log('Download complete, with %d failures'%len(failed_ids))
|
||||
return (ans, failed_ids, failed_covers, title_map, all_failed)
|
||||
|
||||
|
@ -254,6 +254,10 @@ class ResultsView(QTableView): # {{{
|
||||
'<h2>%s</h2>'%book.title,
|
||||
'<div><i>%s</i></div>'%authors_to_string(book.authors),
|
||||
]
|
||||
if not book.is_null('series'):
|
||||
series = book.format_field('series')
|
||||
if series[1]:
|
||||
parts.append('<div>%s: %s</div>'%series)
|
||||
if not book.is_null('rating'):
|
||||
parts.append('<div>%s</div>'%('\u2605'*int(book.rating)))
|
||||
parts.append('</center>')
|
||||
@ -722,8 +726,8 @@ class CoversWidget(QWidget): # {{{
|
||||
if num < 2:
|
||||
txt = _('Could not find any covers for <b>%s</b>')%self.book.title
|
||||
else:
|
||||
txt = _('Found <b>%d</b> covers of %s. Pick the one you like'
|
||||
' best.')%(num-1, self.title)
|
||||
txt = _('Found <b>%(num)d</b> covers of %(title)s. Pick the one you like'
|
||||
' best.')%dict(num=num-1, title=self.title)
|
||||
self.msg.setText(txt)
|
||||
|
||||
self.finished.emit()
|
||||
|
@ -1332,6 +1332,7 @@ void PictureFlow::mousePressEvent(QMouseEvent* event)
|
||||
|
||||
void PictureFlow::mouseReleaseEvent(QMouseEvent* event)
|
||||
{
|
||||
bool accepted = false;
|
||||
int sideWidth = (d->buffer.width() - slideSize().width()) /2;
|
||||
|
||||
if (d->singlePress)
|
||||
@ -1339,13 +1340,20 @@ void PictureFlow::mouseReleaseEvent(QMouseEvent* event)
|
||||
if (event->x() < sideWidth )
|
||||
{
|
||||
showPrevious();
|
||||
accepted = true;
|
||||
} else if ( event->x() > sideWidth + slideSize().width() ) {
|
||||
showNext();
|
||||
accepted = true;
|
||||
} else {
|
||||
emit itemActivated(d->getTarget());
|
||||
if (event->button() == Qt::LeftButton) {
|
||||
emit itemActivated(d->getTarget());
|
||||
accepted = true;
|
||||
}
|
||||
}
|
||||
|
||||
event->accept();
|
||||
if (accepted) {
|
||||
event->accept();
|
||||
}
|
||||
}
|
||||
|
||||
emit inputReceived();
|
||||
|
@ -445,15 +445,15 @@ class RulesModel(QAbstractListModel): # {{{
|
||||
def rule_to_html(self, col, rule):
|
||||
if not isinstance(rule, Rule):
|
||||
return _('''
|
||||
<p>Advanced Rule for column <b>%s</b>:
|
||||
<pre>%s</pre>
|
||||
''')%(col, prepare_string_for_xml(rule))
|
||||
<p>Advanced Rule for column <b>%(col)s</b>:
|
||||
<pre>%(rule)s</pre>
|
||||
''')%dict(col=col, rule=prepare_string_for_xml(rule))
|
||||
conditions = [self.condition_to_html(c) for c in rule.conditions]
|
||||
return _('''\
|
||||
<p>Set the color of <b>%s</b> to <b>%s</b> if the following
|
||||
<p>Set the color of <b>%(col)s</b> to <b>%(color)s</b> if the following
|
||||
conditions are met:</p>
|
||||
<ul>%s</ul>
|
||||
''') % (col, rule.color, ''.join(conditions))
|
||||
<ul>%(rule)s</ul>
|
||||
''') % dict(col=col, color=rule.color, rule=''.join(conditions))
|
||||
|
||||
def condition_to_html(self, condition):
|
||||
c, a, v = condition
|
||||
@ -464,8 +464,8 @@ class RulesModel(QAbstractListModel): # {{{
|
||||
action_name = trans
|
||||
|
||||
return (
|
||||
_('<li>If the <b>%s</b> column <b>%s</b> value: <b>%s</b>') %
|
||||
(c, action_name, prepare_string_for_xml(v)))
|
||||
_('<li>If the <b>%(col)s</b> column <b>%(action)s</b> value: <b>%(val)s</b>') %
|
||||
dict(col=c, action=action_name, val=prepare_string_for_xml(v)))
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -17,12 +17,13 @@ from calibre.gui2.preferences.metadata_sources_ui import Ui_Form
|
||||
from calibre.ebooks.metadata.sources.base import msprefs
|
||||
from calibre.customize.ui import (all_metadata_plugins, is_disabled,
|
||||
enable_plugin, disable_plugin, default_disabled_plugins)
|
||||
from calibre.gui2 import NONE, error_dialog
|
||||
from calibre.gui2 import NONE, error_dialog, question_dialog
|
||||
|
||||
class SourcesModel(QAbstractTableModel): # {{{
|
||||
|
||||
def __init__(self, parent=None):
|
||||
QAbstractTableModel.__init__(self, parent)
|
||||
self.gui_parent = parent
|
||||
|
||||
self.plugins = []
|
||||
self.enabled_overrides = {}
|
||||
@ -87,6 +88,15 @@ class SourcesModel(QAbstractTableModel): # {{{
|
||||
if col == 0 and role == Qt.CheckStateRole:
|
||||
val, ok = val.toInt()
|
||||
if ok:
|
||||
if val == Qt.Checked and 'Douban' in plugin.name:
|
||||
if not question_dialog(self.gui_parent,
|
||||
_('Are you sure?'), '<p>'+
|
||||
_('This plugin is useful only for <b>Chinese</b>'
|
||||
' language books. It can return incorrect'
|
||||
' results for books in English. Are you'
|
||||
' sure you want to enable it?'),
|
||||
show_copy_button=False):
|
||||
return ret
|
||||
self.enabled_overrides[plugin] = val
|
||||
ret = True
|
||||
if col == 1 and role == Qt.EditRole:
|
||||
@ -252,8 +262,8 @@ class PluginConfig(QWidget): # {{{
|
||||
|
||||
self.l = l = QVBoxLayout()
|
||||
self.setLayout(l)
|
||||
self.c = c = QLabel(_('<b>Configure %s</b><br>%s') % (plugin.name,
|
||||
plugin.description))
|
||||
self.c = c = QLabel(_('<b>Configure %(name)s</b><br>%(desc)s') % dict(
|
||||
name=plugin.name, desc=plugin.description))
|
||||
c.setAlignment(Qt.AlignHCenter)
|
||||
l.addWidget(c)
|
||||
|
||||
|
@ -58,7 +58,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
self.device_to_formats_map = {}
|
||||
for device in device_plugins():
|
||||
n = device_name_for_plugboards(device)
|
||||
self.device_to_formats_map[n] = device.FORMATS
|
||||
self.device_to_formats_map[n] = set(device.FORMATS)
|
||||
if getattr(device, 'CAN_DO_DEVICE_DB_PLUGBOARD', False):
|
||||
self.device_to_formats_map[n].add('device_db')
|
||||
if n not in self.devices:
|
||||
self.devices.append(n)
|
||||
self.devices.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
|
||||
@ -358,5 +360,5 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
if __name__ == '__main__':
|
||||
from PyQt4.Qt import QApplication
|
||||
app = QApplication([])
|
||||
test_widget('Import/Export', 'plugboards')
|
||||
test_widget('Import/Export', 'Plugboard')
|
||||
|
||||
|
@ -155,7 +155,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
return
|
||||
if self.argument_count.value() == 0:
|
||||
box = warning_dialog(self.gui, _('Template functions'),
|
||||
_('Argument count should be -1 or greater than zero.'
|
||||
_('Argument count should be -1 or greater than zero. '
|
||||
'Setting it to zero means that this function cannot '
|
||||
'be used in single function mode.'), det_msg = '',
|
||||
show=False)
|
||||
|
@ -225,6 +225,8 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
'calibre library')),
|
||||
('context-menu-device', _('The context menu for the books on '
|
||||
'the device')),
|
||||
('context-menu-cover-browser', _('The context menu for the cover '
|
||||
'browser')),
|
||||
]
|
||||
|
||||
def genesis(self, gui):
|
||||
|
96
src/calibre/gui2/store/stores/ebook_nl_plugin.py
Normal file
96
src/calibre/gui2/store/stores/ebook_nl_plugin.py
Normal file
@ -0,0 +1,96 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import urllib2
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre import browser
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
|
||||
class EBookNLStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
url = 'http://ad.zanox.com/ppc/?19015168C29310186T'
|
||||
url_details = ('http://ad.zanox.com/ppc/?19016028C1098154549T&ULP=[['
|
||||
'http://www.ebook.nl/store/{0}]]')
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
if detail_item:
|
||||
url = url_details.format(detail_item)
|
||||
open_url(QUrl(url))
|
||||
else:
|
||||
detail_url = None
|
||||
if detail_item:
|
||||
detail_url = url_details.format(detail_item)
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_url)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
url = ('http://www.ebook.nl/store/advanced_search_result.php?keywords='
|
||||
+ urllib2.quote(query))
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
for data in doc.xpath('//table[contains(@class, "productListing")]/tr'):
|
||||
if counter <= 0:
|
||||
break
|
||||
|
||||
details = data.xpath('./td/div[@class="prodImage"]/a')
|
||||
if not details:
|
||||
continue
|
||||
details = details[0]
|
||||
id = ''.join(details.xpath('./@href')).strip()
|
||||
id = id[id.rfind('/')+1:]
|
||||
i = id.rfind('?')
|
||||
if i > 0:
|
||||
id = id[:i]
|
||||
if not id:
|
||||
continue
|
||||
cover_url = 'http://www.ebook.nl/store/' + ''.join(details.xpath('./img/@src'))
|
||||
title = ''.join(details.xpath('./img/@title')).strip()
|
||||
author = ''.join(data.xpath('./td/div[@class="prodTitle"]/h3/a/text()')).strip()
|
||||
price = ''.join(data.xpath('./td/div[@class="prodTitle"]/b/text()'))
|
||||
pdf = data.xpath('boolean(./td/div[@class="prodTitle"]/'
|
||||
'p[contains(text(), "Bestandsformaat: Pdf")])')
|
||||
epub = data.xpath('boolean(./td/div[@class="prodTitle"]/'
|
||||
'p[contains(text(), "Bestandsformaat: ePub")])')
|
||||
nodrm = data.xpath('boolean(./td/div[@class="prodTitle"]/'
|
||||
'p[contains(text(), "zonder DRM") or'
|
||||
' contains(text(), "watermerk")])')
|
||||
counter -= 1
|
||||
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.price = price
|
||||
if nodrm:
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
else:
|
||||
s.drm = SearchResult.DRM_LOCKED
|
||||
s.detail_item = id
|
||||
formats = []
|
||||
if epub:
|
||||
formats.append('ePub')
|
||||
if pdf:
|
||||
formats.append('PDF')
|
||||
s.formats = ','.join(formats)
|
||||
|
||||
yield s
|
@ -18,11 +18,11 @@ from calibre import browser
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
|
||||
class CacheUpdateThread(Thread, QObject):
|
||||
|
||||
|
||||
total_changed = pyqtSignal(int)
|
||||
update_progress = pyqtSignal(int)
|
||||
update_details = pyqtSignal(unicode)
|
||||
|
||||
|
||||
def __init__(self, config, seralize_books_function, timeout):
|
||||
Thread.__init__(self)
|
||||
QObject.__init__(self)
|
||||
@ -32,19 +32,19 @@ class CacheUpdateThread(Thread, QObject):
|
||||
self.seralize_books = seralize_books_function
|
||||
self.timeout = timeout
|
||||
self._run = True
|
||||
|
||||
|
||||
def abort(self):
|
||||
self._run = False
|
||||
|
||||
|
||||
def run(self):
|
||||
url = 'http://www.mobileread.com/forums/ebooks.php?do=getlist&type=html'
|
||||
|
||||
|
||||
self.update_details.emit(_('Checking last download date.'))
|
||||
last_download = self.config.get('last_download', None)
|
||||
# Don't update the book list if our cache is less than one week old.
|
||||
if last_download and (time.time() - last_download) < 604800:
|
||||
return
|
||||
|
||||
|
||||
self.update_details.emit(_('Downloading book list from MobileRead.'))
|
||||
# Download the book list HTML file from MobileRead.
|
||||
br = browser()
|
||||
@ -54,10 +54,10 @@ class CacheUpdateThread(Thread, QObject):
|
||||
raw_data = f.read()
|
||||
except:
|
||||
return
|
||||
|
||||
|
||||
if not raw_data or not self._run:
|
||||
return
|
||||
|
||||
|
||||
self.update_details.emit(_('Processing books.'))
|
||||
# Turn books listed in the HTML file into SearchResults's.
|
||||
books = []
|
||||
@ -65,21 +65,23 @@ class CacheUpdateThread(Thread, QObject):
|
||||
data = html.fromstring(raw_data)
|
||||
raw_books = data.xpath('//ul/li')
|
||||
self.total_changed.emit(len(raw_books))
|
||||
|
||||
|
||||
for i, book_data in enumerate(raw_books):
|
||||
self.update_details.emit(_('%s of %s books processed.') % (i, len(raw_books)))
|
||||
self.update_details.emit(
|
||||
_('%(num)s of %(tot)s books processed.') % dict(
|
||||
num=i, tot=len(raw_books)))
|
||||
book = SearchResult()
|
||||
book.detail_item = ''.join(book_data.xpath('.//a/@href'))
|
||||
book.formats = ''.join(book_data.xpath('.//i/text()'))
|
||||
book.formats = book.formats.strip()
|
||||
|
||||
|
||||
text = ''.join(book_data.xpath('.//a/text()'))
|
||||
if ':' in text:
|
||||
book.author, q, text = text.partition(':')
|
||||
book.author = book.author.strip()
|
||||
book.title = text.strip()
|
||||
books.append(book)
|
||||
|
||||
|
||||
if not self._run:
|
||||
books = []
|
||||
break
|
||||
|
@ -12,7 +12,7 @@ import traceback, cPickle, copy
|
||||
from itertools import repeat
|
||||
|
||||
from PyQt4.Qt import (QAbstractItemModel, QIcon, QVariant, QFont, Qt,
|
||||
QMimeData, QModelIndex, pyqtSignal)
|
||||
QMimeData, QModelIndex, pyqtSignal, QObject)
|
||||
|
||||
from calibre.gui2 import NONE, gprefs, config, error_dialog
|
||||
from calibre.library.database2 import Tag
|
||||
@ -227,6 +227,10 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
self._build_in_progress = False
|
||||
self.reread_collapse_model({}, rebuild=False)
|
||||
|
||||
@property
|
||||
def gui_parent(self):
|
||||
return QObject.parent(self)
|
||||
|
||||
def reread_collapse_model(self, state_map, rebuild=True):
|
||||
if gprefs['tags_browser_collapse_at'] == 0:
|
||||
self.collapse_model = 'disable'
|
||||
@ -315,9 +319,11 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
for i,p in enumerate(path_parts):
|
||||
path += p
|
||||
if path not in category_node_map:
|
||||
icon = self.category_icon_map['gst'] if is_gst else \
|
||||
self.category_icon_map[key]
|
||||
node = self.create_node(parent=last_category_node,
|
||||
data=p[1:] if i == 0 else p,
|
||||
category_icon=self.category_icon_map[key],
|
||||
category_icon=icon,
|
||||
tooltip=tt if path == key else path,
|
||||
category_key=path,
|
||||
icon_map=self.icon_state_map)
|
||||
@ -375,6 +381,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
collapse_letter = None
|
||||
category_node = category
|
||||
key = category_node.category_key
|
||||
is_gst = category_node.is_gst
|
||||
if key not in data:
|
||||
return
|
||||
cat_len = len(data[key])
|
||||
@ -387,7 +394,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
not fm['is_custom'] and \
|
||||
not fm['kind'] == 'user' \
|
||||
else False
|
||||
in_uc = fm['kind'] == 'user'
|
||||
in_uc = fm['kind'] == 'user' and not is_gst
|
||||
tt = key if in_uc else None
|
||||
|
||||
if collapse_model == 'first letter':
|
||||
@ -455,6 +462,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
tooltip = None, temporary=True,
|
||||
category_key=category_node.category_key,
|
||||
icon_map=self.icon_state_map)
|
||||
sub_cat.is_gst = is_gst
|
||||
node_parent = sub_cat
|
||||
else:
|
||||
node_parent = category
|
||||
@ -677,44 +685,37 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
|
||||
def handle_user_category_drop(self, on_node, ids, column):
|
||||
categories = self.db.prefs.get('user_categories', {})
|
||||
category = categories.get(on_node.category_key[1:], None)
|
||||
if category is None:
|
||||
cat_contents = categories.get(on_node.category_key[1:], None)
|
||||
if cat_contents is None:
|
||||
return
|
||||
cat_contents = set([(v, c) for v,c,ign in cat_contents])
|
||||
|
||||
fm_src = self.db.metadata_for_field(column)
|
||||
label = fm_src['label']
|
||||
|
||||
for id in ids:
|
||||
label = fm_src['label']
|
||||
if not fm_src['is_custom']:
|
||||
if label == 'authors':
|
||||
items = self.db.get_authors_with_ids()
|
||||
items = [(i[0], i[1].replace('|', ',')) for i in items]
|
||||
value = self.db.authors(id, index_is_id=True)
|
||||
value = [v.replace('|', ',') for v in value.split(',')]
|
||||
elif label == 'publisher':
|
||||
items = self.db.get_publishers_with_ids()
|
||||
value = self.db.publisher(id, index_is_id=True)
|
||||
elif label == 'series':
|
||||
items = self.db.get_series_with_ids()
|
||||
value = self.db.series(id, index_is_id=True)
|
||||
else:
|
||||
items = self.db.get_custom_items_with_ids(label=label)
|
||||
if fm_src['datatype'] != 'composite':
|
||||
value = self.db.get_custom(id, label=label, index_is_id=True)
|
||||
else:
|
||||
value = self.db.get_property(id, loc=fm_src['rec_index'],
|
||||
index_is_id=True)
|
||||
if value is None:
|
||||
return
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
for val in value:
|
||||
for (v, c, id) in category:
|
||||
if v == val and c == column:
|
||||
break
|
||||
else:
|
||||
category.append([val, column, 0])
|
||||
categories[on_node.category_key[1:]] = category
|
||||
self.db.prefs.set('user_categories', categories)
|
||||
self.refresh_required.emit()
|
||||
if value:
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
cat_contents |= set([(v, column) for v in value])
|
||||
|
||||
categories[on_node.category_key[1:]] = [[v, c, 0] for v,c in cat_contents]
|
||||
self.db.prefs.set('user_categories', categories)
|
||||
self.refresh_required.emit()
|
||||
|
||||
def handle_drop(self, on_node, ids):
|
||||
#print 'Dropped ids:', ids, on_node.tag
|
||||
@ -722,12 +723,12 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
if (key == 'authors' and len(ids) >= 5):
|
||||
if not confirm('<p>'+_('Changing the authors for several books can '
|
||||
'take a while. Are you sure?')
|
||||
+'</p>', 'tag_browser_drop_authors', self.parent()):
|
||||
+'</p>', 'tag_browser_drop_authors', self.gui_parent):
|
||||
return
|
||||
elif len(ids) > 15:
|
||||
if not confirm('<p>'+_('Changing the metadata for that many books '
|
||||
'can take a while. Are you sure?')
|
||||
+'</p>', 'tag_browser_many_changes', self.parent()):
|
||||
+'</p>', 'tag_browser_many_changes', self.gui_parent):
|
||||
return
|
||||
|
||||
fm = self.db.metadata_for_field(key)
|
||||
@ -871,13 +872,13 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
# we position at the parent label
|
||||
val = unicode(value.toString()).strip()
|
||||
if not val:
|
||||
error_dialog(self.parent(), _('Item is blank'),
|
||||
error_dialog(self.gui_parent, _('Item is blank'),
|
||||
_('An item cannot be set to nothing. Delete it instead.')).exec_()
|
||||
return False
|
||||
item = self.get_node(index)
|
||||
if item.type == TagTreeItem.CATEGORY and item.category_key.startswith('@'):
|
||||
if val.find('.') >= 0:
|
||||
error_dialog(self.parent(), _('Rename user category'),
|
||||
error_dialog(self.gui_parent, _('Rename user category'),
|
||||
_('You cannot use periods in the name when '
|
||||
'renaming user categories'), show=True)
|
||||
return False
|
||||
@ -897,7 +898,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
if len(c) == len(ckey):
|
||||
if strcmp(ckey, nkey) != 0 and \
|
||||
nkey_lower in user_cat_keys_lower:
|
||||
error_dialog(self.parent(), _('Rename user category'),
|
||||
error_dialog(self.gui_parent, _('Rename user category'),
|
||||
_('The name %s is already used')%nkey, show=True)
|
||||
return False
|
||||
user_cats[nkey] = user_cats[ckey]
|
||||
@ -906,7 +907,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
rest = c[len(ckey):]
|
||||
if strcmp(ckey, nkey) != 0 and \
|
||||
icu_lower(nkey + rest) in user_cat_keys_lower:
|
||||
error_dialog(self.parent(), _('Rename user category'),
|
||||
error_dialog(self.gui_parent, _('Rename user category'),
|
||||
_('The name %s is already used')%(nkey+rest), show=True)
|
||||
return False
|
||||
user_cats[nkey + rest] = user_cats[ckey + rest]
|
||||
@ -921,12 +922,12 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
return False
|
||||
if key == 'authors':
|
||||
if val.find('&') >= 0:
|
||||
error_dialog(self.parent(), _('Invalid author name'),
|
||||
error_dialog(self.gui_parent, _('Invalid author name'),
|
||||
_('Author names cannot contain & characters.')).exec_()
|
||||
return False
|
||||
if key == 'search':
|
||||
if val in saved_searches().names():
|
||||
error_dialog(self.parent(), _('Duplicate search name'),
|
||||
error_dialog(self.gui_parent, _('Duplicate search name'),
|
||||
_('The saved search name %s is already used.')%val).exec_()
|
||||
return False
|
||||
saved_searches().rename(unicode(item.data(role).toString()), val)
|
||||
@ -1161,7 +1162,10 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
prefix = ' not '
|
||||
else:
|
||||
prefix = ''
|
||||
category = tag.category if key != 'news' else 'tag'
|
||||
if node.is_gst:
|
||||
category = key
|
||||
else:
|
||||
category = tag.category if key != 'news' else 'tag'
|
||||
add_colon = False
|
||||
if self.db.field_metadata[tag.category]['is_csp']:
|
||||
add_colon = True
|
||||
|
@ -218,7 +218,7 @@ class TagBrowserMixin(object): # {{{
|
||||
d = TagListEditor(self, tag_to_match=tag, data=result, key=key)
|
||||
d.exec_()
|
||||
if d.result() == d.Accepted:
|
||||
to_rename = d.to_rename # dict of new text to old id
|
||||
to_rename = d.to_rename # dict of old id to new name
|
||||
to_delete = d.to_delete # list of ids
|
||||
orig_name = d.original_names # dict of id: name
|
||||
|
||||
|
@ -384,8 +384,8 @@ class TagsView(QTreeView): # {{{
|
||||
action='delete_search', key=tag.name))
|
||||
if key.startswith('@') and not item.is_gst:
|
||||
self.context_menu.addAction(self.user_category_icon,
|
||||
_('Remove %s from category %s')%
|
||||
(display_name(tag), item.py_name),
|
||||
_('Remove %(item)s from category %(cat)s')%
|
||||
dict(item=display_name(tag), cat=item.py_name),
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_item_from_user_category',
|
||||
key = key, index = tag_item))
|
||||
|
@ -94,8 +94,8 @@ def convert_single_ebook(parent, db, book_ids, auto_conversion=False, # {{{
|
||||
|
||||
msg = '%s' % '\n'.join(res)
|
||||
warning_dialog(parent, _('Could not convert some books'),
|
||||
_('Could not convert %d of %d books, because no suitable source'
|
||||
' format was found.') % (len(res), total),
|
||||
_('Could not convert %(num)d of %(tot)d books, because no suitable source'
|
||||
' format was found.') % dict(num=len(res), tot=total),
|
||||
msg).exec_()
|
||||
|
||||
return jobs, changed, bad
|
||||
@ -187,7 +187,8 @@ class QueueBulk(QProgressDialog):
|
||||
except:
|
||||
dtitle = repr(mi.title)
|
||||
self.setLabelText(_('Queueing ')+dtitle)
|
||||
desc = _('Convert book %d of %d (%s)') % (self.i, len(self.book_ids), dtitle)
|
||||
desc = _('Convert book %(num)d of %(tot)d (%(title)s)') % dict(
|
||||
num=self.i, tot=len(self.book_ids), title=dtitle)
|
||||
|
||||
args = [in_file.name, out_file.name, lrecs]
|
||||
temp_files.append(out_file)
|
||||
@ -209,8 +210,8 @@ class QueueBulk(QProgressDialog):
|
||||
|
||||
msg = '%s' % '\n'.join(res)
|
||||
warning_dialog(self.parent, _('Could not convert some books'),
|
||||
_('Could not convert %d of %d books, because no suitable '
|
||||
'source format was found.') % (len(res), len(self.book_ids)),
|
||||
_('Could not convert %(num)d of %(tot)d books, because no suitable '
|
||||
'source format was found.') % dict(num=len(res), tot=len(self.book_ids)),
|
||||
msg).exec_()
|
||||
self.parent = None
|
||||
self.jobs.reverse()
|
||||
|
@ -308,6 +308,8 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
self.height())
|
||||
self.resize(self.width(), self._calculated_available_height)
|
||||
|
||||
self.build_context_menus()
|
||||
|
||||
for ac in self.iactions.values():
|
||||
try:
|
||||
ac.gui_layout_complete()
|
||||
|
@ -70,10 +70,10 @@ class UpdateNotification(QDialog):
|
||||
self.logo.setPixmap(QPixmap(I('lt.png')).scaled(100, 100,
|
||||
Qt.IgnoreAspectRatio, Qt.SmoothTransformation))
|
||||
self.label = QLabel(('<p>'+
|
||||
_('%s has been updated to version <b>%s</b>. '
|
||||
_('%(app)s has been updated to version <b>%(ver)s</b>. '
|
||||
'See the <a href="http://calibre-ebook.com/whats-new'
|
||||
'">new features</a>.'))%(
|
||||
__appname__, calibre_version))
|
||||
'">new features</a>.'))%dict(
|
||||
app=__appname__, ver=calibre_version))
|
||||
self.label.setOpenExternalLinks(True)
|
||||
self.label.setWordWrap(True)
|
||||
self.setWindowTitle(_('Update available!'))
|
||||
|
@ -492,11 +492,11 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.set_page_number(frac)
|
||||
|
||||
def magnification_changed(self, val):
|
||||
tt = _('Make font size %s\nCurrent magnification: %.1f')
|
||||
tt = _('Make font size %(which)s\nCurrent magnification: %(mag).1f')
|
||||
self.action_font_size_larger.setToolTip(
|
||||
tt %(_('larger'), val))
|
||||
tt %dict(which=_('larger'), mag=val))
|
||||
self.action_font_size_smaller.setToolTip(
|
||||
tt %(_('smaller'), val))
|
||||
tt %dict(which=_('smaller'), mag=val))
|
||||
|
||||
def find(self, text, repeat=False, backwards=False):
|
||||
if not text:
|
||||
|
@ -569,9 +569,9 @@ def move_library(oldloc, newloc, parent, callback_on_complete):
|
||||
det = traceback.format_exc()
|
||||
error_dialog(parent, _('Invalid database'),
|
||||
_('<p>An invalid library already exists at '
|
||||
'%s, delete it before trying to move the '
|
||||
'existing library.<br>Error: %s')%(newloc,
|
||||
str(err)), det, show=True)
|
||||
'%(loc)s, delete it before trying to move the '
|
||||
'existing library.<br>Error: %(err)s')%dict(loc=newloc,
|
||||
err=str(err)), det, show=True)
|
||||
callback(None)
|
||||
return
|
||||
else:
|
||||
|
@ -31,9 +31,9 @@ class TestEmail(QDialog, TE_Dialog):
|
||||
if pa:
|
||||
self.to.setText(pa)
|
||||
if opts.relay_host:
|
||||
self.label.setText(_('Using: %s:%s@%s:%s and %s encryption')%
|
||||
(opts.relay_username, unhexlify(opts.relay_password),
|
||||
opts.relay_host, opts.relay_port, opts.encryption))
|
||||
self.label.setText(_('Using: %(un)s:%(pw)s@%(host)s:%(port)s and %(enc)s encryption')%
|
||||
dict(un=opts.relay_username, pw=unhexlify(opts.relay_password),
|
||||
host=opts.relay_host, port=opts.relay_port, enc=opts.encryption))
|
||||
|
||||
def test(self, *args):
|
||||
self.log.setPlainText(_('Sending...'))
|
||||
|
@ -54,12 +54,12 @@ class CSV_XML(CatalogPlugin): # {{{
|
||||
action = None,
|
||||
help = _('The fields to output when cataloging books in the '
|
||||
'database. Should be a comma-separated list of fields.\n'
|
||||
'Available fields: %s,\n'
|
||||
'Available fields: %(fields)s,\n'
|
||||
'plus user-created custom fields.\n'
|
||||
'Example: %s=title,authors,tags\n'
|
||||
'Example: %(opt)s=title,authors,tags\n'
|
||||
"Default: '%%default'\n"
|
||||
"Applies to: CSV, XML output formats")%(', '.join(FIELDS),
|
||||
'--fields')),
|
||||
"Applies to: CSV, XML output formats")%dict(
|
||||
fields=', '.join(FIELDS), opt='--fields')),
|
||||
|
||||
Option('--sort-by',
|
||||
default = 'id',
|
||||
@ -250,12 +250,12 @@ class BIBTEX(CatalogPlugin): # {{{
|
||||
action = None,
|
||||
help = _('The fields to output when cataloging books in the '
|
||||
'database. Should be a comma-separated list of fields.\n'
|
||||
'Available fields: %s.\n'
|
||||
'Available fields: %(fields)s.\n'
|
||||
'plus user-created custom fields.\n'
|
||||
'Example: %s=title,authors,tags\n'
|
||||
'Example: %(opt)s=title,authors,tags\n'
|
||||
"Default: '%%default'\n"
|
||||
"Applies to: BIBTEX output format")%(', '.join(FIELDS),
|
||||
'--fields')),
|
||||
"Applies to: BIBTEX output format")%dict(
|
||||
fields=', '.join(FIELDS), opt='--fields')),
|
||||
|
||||
Option('--sort-by',
|
||||
default = 'id',
|
||||
|
@ -7,7 +7,7 @@ __docformat__ = 'restructuredtext en'
|
||||
The database used to store ebook metadata
|
||||
'''
|
||||
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
|
||||
json, uuid, tempfile, hashlib
|
||||
json, uuid, tempfile, hashlib, copy
|
||||
from collections import defaultdict
|
||||
import threading, random
|
||||
from itertools import repeat
|
||||
@ -62,7 +62,8 @@ class Tag(object):
|
||||
if self.avg_rating > 0:
|
||||
if tooltip:
|
||||
tooltip = tooltip + ': '
|
||||
tooltip = _('%sAverage rating is %3.1f')%(tooltip, self.avg_rating)
|
||||
tooltip = _('%(tt)sAverage rating is %(rating)3.1f')%dict(
|
||||
tt=tooltip, rating=self.avg_rating)
|
||||
self.tooltip = tooltip
|
||||
self.icon = icon
|
||||
self.category = category
|
||||
@ -1794,10 +1795,24 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
|
||||
for user_cat in sorted(user_categories.keys(), key=sort_key):
|
||||
items = []
|
||||
names_seen = {}
|
||||
for (name,label,ign) in user_categories[user_cat]:
|
||||
n = icu_lower(name)
|
||||
if label in taglist and n in taglist[label]:
|
||||
items.append(taglist[label][n])
|
||||
if user_cat in gst:
|
||||
# for gst items, make copy and consolidate the tags by name.
|
||||
if n in names_seen:
|
||||
t = names_seen[n]
|
||||
t.id_set |= taglist[label][n].id_set
|
||||
t.count += taglist[label][n].count
|
||||
t.tooltip = t.tooltip.replace(')', ', ' + label + ')')
|
||||
else:
|
||||
t = copy.copy(taglist[label][n])
|
||||
t.icon = icon_map['gst']
|
||||
names_seen[t.name] = t
|
||||
items.append(t)
|
||||
else:
|
||||
items.append(taglist[label][n])
|
||||
# else: do nothing, to not include nodes w zero counts
|
||||
cat_name = '@' + user_cat # add the '@' to avoid name collision
|
||||
# Not a problem if we accumulate entries in the icon map
|
||||
|
@ -17,7 +17,7 @@ class TagsIcons(dict):
|
||||
|
||||
category_icons = ['authors', 'series', 'formats', 'publisher', 'rating',
|
||||
'news', 'tags', 'custom:', 'user:', 'search',
|
||||
'identifiers']
|
||||
'identifiers', 'gst']
|
||||
def __init__(self, icon_dict):
|
||||
for a in self.category_icons:
|
||||
if a not in icon_dict:
|
||||
@ -35,7 +35,8 @@ category_icon_map = {
|
||||
'custom:' : 'column.png',
|
||||
'user:' : 'tb_folder.png',
|
||||
'search' : 'search.png',
|
||||
'identifiers': 'identifiers.png'
|
||||
'identifiers': 'identifiers.png',
|
||||
'gst' : 'catalog.png',
|
||||
}
|
||||
|
||||
|
||||
@ -120,7 +121,7 @@ class FieldMetadata(dict):
|
||||
'datatype':'series',
|
||||
'is_multiple':{},
|
||||
'kind':'field',
|
||||
'name':_('Series'),
|
||||
'name':ngettext('Series', 'Series', 2),
|
||||
'search_terms':['series'],
|
||||
'is_custom':False,
|
||||
'is_category':True,
|
||||
|
@ -92,16 +92,17 @@ def config(defaults=None):
|
||||
' By default all available formats are saved.'))
|
||||
x('template', default=DEFAULT_TEMPLATE,
|
||||
help=_('The template to control the filename and directory structure of the saved files. '
|
||||
'Default is "%s" which will save books into a per-author '
|
||||
'Default is "%(templ)s" which will save books into a per-author '
|
||||
'subdirectory with filenames containing title and author. '
|
||||
'Available controls are: {%s}')%(DEFAULT_TEMPLATE, ', '.join(FORMAT_ARGS)))
|
||||
'Available controls are: {%(controls)s}')%dict(
|
||||
templ=DEFAULT_TEMPLATE, controls=', '.join(FORMAT_ARGS)))
|
||||
x('send_template', default=DEFAULT_SEND_TEMPLATE,
|
||||
help=_('The template to control the filename and directory structure of files '
|
||||
'sent to the device. '
|
||||
'Default is "%s" which will save books into a per-author '
|
||||
'Default is "%(templ)s" which will save books into a per-author '
|
||||
'directory with filenames containing title and author. '
|
||||
'Available controls are: {%s}')%(DEFAULT_SEND_TEMPLATE, ', '.join(FORMAT_ARGS)))
|
||||
|
||||
'Available controls are: {%(controls)s}')%dict(
|
||||
templ=DEFAULT_SEND_TEMPLATE, controls=', '.join(FORMAT_ARGS)))
|
||||
x('asciiize', default=True,
|
||||
help=_('Normally, calibre will convert all non English characters into English equivalents '
|
||||
'for the file names. '
|
||||
|
@ -606,6 +606,7 @@ class SchemaUpgrade(object):
|
||||
'''
|
||||
|
||||
script = '''
|
||||
BEGIN TRANSACTION;
|
||||
ALTER TABLE authors ADD COLUMN link TEXT NOT NULL DEFAULT "";
|
||||
'''
|
||||
self.conn.executescript(script)
|
||||
|
@ -124,7 +124,8 @@ def render_rating(rating, url_prefix, container='span', prefix=None): # {{{
|
||||
added = 0
|
||||
if prefix is None:
|
||||
prefix = _('Average rating')
|
||||
rstring = xml(_('%s: %.1f stars')% (prefix, rating if rating else 0.0),
|
||||
rstring = xml(_('%(prefix)s: %(rating).1f stars')%dict(
|
||||
prefix=prefix, rating=rating if rating else 0.0),
|
||||
True)
|
||||
ans = ['<%s class="rating">' % (container)]
|
||||
for i in range(5):
|
||||
|
@ -171,9 +171,9 @@ def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix):
|
||||
no_tag_count=True)))
|
||||
series = item[FM['series']]
|
||||
if series:
|
||||
extra.append(_('SERIES: %s [%s]<br />')%\
|
||||
(xml(series),
|
||||
fmt_sidx(float(item[FM['series_index']]))))
|
||||
extra.append(_('SERIES: %(series)s [%(sidx)s]<br />')%\
|
||||
dict(series=xml(series),
|
||||
sidx=fmt_sidx(float(item[FM['series_index']]))))
|
||||
for key in CKEYS:
|
||||
mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True)
|
||||
name, val = mi.format_field(key)
|
||||
|
@ -96,8 +96,8 @@ html_sidebars = {
|
||||
html_favicon = 'favicon.ico'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
# relative to this directory. They are copied after the built-in static files,
|
||||
# so a file named "default.css" will overwrite the built-in "default.css".
|
||||
html_static_path = ['resources', '../../../icons/favicon.ico']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
|
@ -2,16 +2,16 @@
|
||||
|
||||
.. _conversion:
|
||||
|
||||
E-book Conversion
|
||||
Ebook Conversion
|
||||
===================
|
||||
|
||||
|app| has a conversion system that is designed to be very easy to use. Normally, you just
|
||||
add a book to |app|, click convert and |app| will try hard to generate output that is as
|
||||
close as possible to the input. However, |app| accepts a very large number of input formats,
|
||||
not all of which are as suitable as others for conversion to e-books. In the case of
|
||||
not all of which are as suitable as others for conversion to ebooks. In the case of
|
||||
such input formats, or if you just want greater control over the conversion system,
|
||||
|app| has a lot of options to fine tune the conversion process. Note however that |app|'s
|
||||
conversion system is not a substitute for a full blown e-book editor. To edit e-books, I
|
||||
conversion system is not a substitute for a full blown ebook editor. To edit ebooks, I
|
||||
would recommend first converting them to EPUB using |app| and then using a dedicated EPUB editor,
|
||||
like `Sigil <http://code.google.com/p/sigil/>`_ to get the book into perfect shape. You can then
|
||||
use the edited EPUB as input for conversion into other formats in |app|.
|
||||
@ -23,7 +23,7 @@ mouse over it, a tooltip will appear describing the setting.
|
||||
|
||||
.. image:: images/conv_dialog.png
|
||||
:align: center
|
||||
:alt: E-book conversion dialog
|
||||
:alt: Ebook conversion dialog
|
||||
:scale: 50
|
||||
|
||||
.. contents:: Contents
|
||||
@ -75,7 +75,7 @@ The four sub-directories are:
|
||||
input This contains the HTML output by the Input Plugin. Use this to debug the Input Plugin.
|
||||
parsed The result of pre-processing and converting to XHTML the output from the Input Plugin. Use to debug structure detection.
|
||||
structure Post structure detection, but before CSS flattening and font size conversion. Use to debug font size conversion and CSS transforms.
|
||||
processed Just before the e-book is passed to the output plugin. Use to debug the Output Plugin.
|
||||
processed Just before the ebook is passed to the output plugin. Use to debug the Output Plugin.
|
||||
========== =============
|
||||
|
||||
If you want to edit the input document a little before having |app| convert it, the best thing to
|
||||
@ -94,7 +94,7 @@ Look & Feel
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
This group of options controls various aspects of the look and feel of the converted e-book.
|
||||
This group of options controls various aspects of the look and feel of the converted ebook.
|
||||
|
||||
.. _font-size-rescaling:
|
||||
|
||||
@ -209,7 +209,7 @@ Miscellaneous
|
||||
There are a few more options in this section.
|
||||
|
||||
:guilabel:`No text justification`
|
||||
Normally, if the output format supports it, |app| will force the output e-book
|
||||
Normally, if the output format supports it, |app| will force the output ebook
|
||||
to have *justified* text (i.e., a smooth right margin). This option will turn
|
||||
off this behavior, in which case whatever justification is specified in the input document
|
||||
will be used instead.
|
||||
@ -227,7 +227,7 @@ There are a few more options in this section.
|
||||
with "Mikhail Gorbachiov". Also, note that in cases where there are multiple representations
|
||||
of a character (characters shared by Chinese and Japanese for instance) the representation used
|
||||
by the largest number of people will be used (Chinese in the previous example).
|
||||
This option is mainly useful if you are going to view the e-book on a device that does not
|
||||
This option is mainly useful if you are going to view the ebook on a device that does not
|
||||
have support for unicode.
|
||||
|
||||
:guilabel:`Input character encoding`
|
||||
@ -416,7 +416,7 @@ There are a few more options in this section.
|
||||
:guilabel:`Insert metadata as page at start of book`
|
||||
One of the great things about |app| is that it allows you to maintain very complete metadata
|
||||
about all of your books, for example, a rating, tags, comments, etc. This option will create
|
||||
a single page with all this metadata and insert it into the converted e-book, typically just
|
||||
a single page with all this metadata and insert it into the converted ebook, typically just
|
||||
after the cover. Think of it as a way to create your own customised book jacket.
|
||||
|
||||
:guilabel:`Remove first image`
|
||||
@ -432,7 +432,7 @@ Table of Contents
|
||||
When the input document has a Table of Contents in its metadata, |app| will just use that. However,
|
||||
a number of older formats either do not support a metadata based Table of Contents, or individual
|
||||
documents do not have one. In these cases, the options in this section can help you automatically
|
||||
generate a Table of Contents in the converted e-book, based on the actual content in the input document.
|
||||
generate a Table of Contents in the converted ebook, based on the actual content in the input document.
|
||||
|
||||
The first option is :guilabel:`Force use of auto-generated Table of Contents`. By checking this option
|
||||
you can have |app| override any Table of Contents found in the metadata of the input document with the
|
||||
@ -681,7 +681,7 @@ The .cbc file will then contain::
|
||||
two.cbz
|
||||
three.cbz
|
||||
|
||||
|app| will automatically convert this .cbc file into a e-book with a Table of Contents pointing to each entry in comics.txt.
|
||||
|app| will automatically convert this .cbc file into a ebook with a Table of Contents pointing to each entry in comics.txt.
|
||||
|
||||
|
||||
EPUB advanced formatting demo
|
||||
@ -695,5 +695,5 @@ EPUB from the ZIP file are::
|
||||
|
||||
ebook-convert demo.zip .epub -vv --authors "Kovid Goyal" --language en --level1-toc '//*[@class="title"]' --disable-font-rescaling --page-breaks-before / --no-default-epub-cover
|
||||
|
||||
Note that because this file explores the potential of EPUB, most of the advanced formatting is not going to work on readers less capable than |app|'s builtin EPUB viewer.
|
||||
Note that because this file explores the potential of EPUB, most of the advanced formatting is not going to work on readers less capable than |app|'s built-in EPUB viewer.
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
Writing your own plugins to extend |app|'s functionality
|
||||
====================================================================
|
||||
|
||||
|app| has a very modular design. Almost all functionality in |app| comes in the form of plugins. Plugins are used for conversion, for downloading news (though these are called recipes), for various components of the user interface, to connect to different devices, to process files when adding them to |app| and so on. You can get a complete list of all the builtin plugins in |app| by going to :guilabel:`Preferences->Plugins`.
|
||||
|app| has a very modular design. Almost all functionality in |app| comes in the form of plugins. Plugins are used for conversion, for downloading news (though these are called recipes), for various components of the user interface, to connect to different devices, to process files when adding them to |app| and so on. You can get a complete list of all the built-in plugins in |app| by going to :guilabel:`Preferences->Plugins`.
|
||||
|
||||
Here, we will teach you how to create your own plugins to add new features to |app|.
|
||||
|
||||
@ -127,7 +127,7 @@ The actual logic to implement the Interface Plugin Demo dialog.
|
||||
Getting resources from the plugin zip file
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|app|'s plugin loading system defines a couple of builtin functions that allow you to conveniently get files from the plugin zip file.
|
||||
|app|'s plugin loading system defines a couple of built-in functions that allow you to conveniently get files from the plugin zip file.
|
||||
|
||||
**get_resources(name_or_list_of_names)**
|
||||
This function should be called with a list of paths to files inside the zip file. For example to access the file icon.png in
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user