mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
merge from trunk
This commit is contained in:
commit
6ce6f0e695
@ -19,6 +19,66 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.8.9
|
||||
date: 2011-07-08
|
||||
|
||||
new features:
|
||||
- title: "Kobo Touch: Display Preview Tag for book previews on the device"
|
||||
|
||||
- title: "Improved display of grouped search terms in Tag Browser"
|
||||
|
||||
- title: "When adding HTML files to calibre, add an option to process links in breadth first rather than depth first order. Access it via Preferences->Plugins and customize the HTML to ZIP plugin"
|
||||
|
||||
- title: "Conversion pipeline: Add option to control if duplicate entries are allowed when generating the Table of Contents from links."
|
||||
tickets: [806095]
|
||||
|
||||
- title: "Metadata download: When merging results, if the query to the xisbn service hangs, wait no more than 10 seconds. Also try harder to preserve the month when downlaoding published date. Do not throw away isbnless results if there are some sources that return isbns and some that do not."
|
||||
tickets: [798309]
|
||||
|
||||
- title: "Get Books: Remove OpenLibrary since it has the same files as archive.org. Allow direct downloading from Project Gutenberg."
|
||||
|
||||
- title: "Add functions to the template language that allow getting the last modified time and size of the individual format files for a book. Also add a has_cover() function."
|
||||
|
||||
bug fixes:
|
||||
- title: "Fix true/false searches dont work on device views"
|
||||
tickets: [807262]
|
||||
|
||||
- title: "Fix renaming of collections in device views"
|
||||
tickets: [807256]
|
||||
|
||||
- title: "Fix regression that broke the use of the device_db plugboard"
|
||||
tickets: [806483]
|
||||
|
||||
- title: "Kobo driver: Hide Expired Book Status for deleted books. Also fix regression that broke connecting to Kobo devices running very old firmware."
|
||||
tickets: [802083]
|
||||
|
||||
- title: "Fix bug in 0.8.8 that could cause the metadata.db to be left in an unusable state if calibre is interrupted at just the wrong moment or if the db is stored in dropbox"
|
||||
|
||||
- title: "Fix sorting of composite custom columns that display numbers."
|
||||
|
||||
improved recipes:
|
||||
- "Computer Act!ve"
|
||||
- Metro News NL
|
||||
- Spiegel Online International
|
||||
- cracked.com
|
||||
- Endgadget
|
||||
- Independent
|
||||
- Telegraph UK
|
||||
|
||||
new recipes:
|
||||
- title: "Blog da Cidadania and Noticias UnB"
|
||||
author: Diniz Bortolotto
|
||||
|
||||
- title: "Galicia Confidential"
|
||||
author: Susana Sotelo Docio
|
||||
|
||||
- title: "South China Morning Post"
|
||||
author: llam
|
||||
|
||||
- title: "Szinti Derigisi"
|
||||
author: thomass
|
||||
|
||||
|
||||
- version: 0.8.8
|
||||
date: 2011-07-01
|
||||
|
||||
|
39
recipes/automatiseringgids.recipe
Normal file
39
recipes/automatiseringgids.recipe
Normal file
@ -0,0 +1,39 @@
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class autogids(BasicNewsRecipe):
|
||||
title = u'Automatiseringgids IT'
|
||||
oldest_article = 7
|
||||
__author__ = 'DrMerry'
|
||||
description = 'IT-nieuws van Automatiseringgids'
|
||||
language = 'nl'
|
||||
publisher = 'AutomatiseringGids'
|
||||
category = 'Nieuws, IT, Nederlandstalig'
|
||||
simultaneous_downloads = 5
|
||||
#delay = 1
|
||||
timefmt = ' [%A, %d %B, %Y]'
|
||||
#timefmt = ''
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'newspaper'
|
||||
encoding = 'utf-8'
|
||||
cover_url = 'http://www.automatiseringgids.nl/siteimg/header_logo.gif'
|
||||
keep_only_tags = [dict(id=['content'])]
|
||||
extra_css = '.artikelheader {font-size:0.8em; color: #666;} .artikelintro {font-weight:bold} div.imgArticle {float: right; margin: 0 0em 1em 1em; display: block; position: relative; } \
|
||||
h2 { margin: 0 0 0.5em; min-height: 30px; font-size: 1.5em; letter-spacing: -0.2px; margin: 0 0 0.5em; color: black; font-weight: bold; line-height: 1.2em; padding: 4px 3px 0; }'
|
||||
|
||||
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'id':['loginbox','reactiecollapsible','reactiebox']}),
|
||||
dict(name='div', attrs={'class':['column_a','column_c','bannerfullsize','reactieheader','reactiecollapsible','formulier','artikel_headeroptions']}),
|
||||
dict(name='ul', attrs={'class':['highlightlist']}),
|
||||
dict(name='input', attrs={'type':['button']}),
|
||||
dict(name='div', attrs={'style':['display:block; width:428px; height:30px; float:left;']}),
|
||||
]
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'(<h3>Reacties</h3>|<h2>Zie ook:</h2>|<div style=".*</div>|<a[^>]*>|</a>)', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
|
||||
feeds = [(u'Actueel', u'http://www.automatiseringgids.nl/rss.aspx')]
|
20
recipes/blog_da_cidadania.recipe
Normal file
20
recipes/blog_da_cidadania.recipe
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class BlogdaCidadania(BasicNewsRecipe):
|
||||
title = 'Blog da Cidadania'
|
||||
__author__ = 'Diniz Bortolotto'
|
||||
description = 'Posts do Blog da Cidadania'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 50
|
||||
encoding = 'utf8'
|
||||
publisher = 'Eduardo Guimaraes'
|
||||
category = 'politics, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'politics portal'
|
||||
|
||||
feeds = [(u'Blog da Cidadania', u'http://www.blogcidadania.com.br/feed/')]
|
||||
|
||||
reverse_article_order = True
|
||||
|
@ -1,19 +1,20 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
__author__ = 'Lorenzo Vigentini'
|
||||
__copyright__ = '2009, Lorenzo Vigentini <l.vigentini at gmail.com>'
|
||||
__version__ = 'v1.01'
|
||||
__date__ = '14, January 2010'
|
||||
__description__ = 'Computeractive publishes new downloads, reviews, news stories, step-by-step guides and answers to PC problems every day.'
|
||||
__author__ = 'DrMerry Based on v1.01 by Lorenzo Vigentini'
|
||||
__copyright__ = 'For version 1.02, 1.03: DrMerry'
|
||||
__version__ = 'v1.03'
|
||||
__date__ = '11, July 2011'
|
||||
__description__ = 'Computeractive publishes new downloads, reviews, news stories, step-by-step guides and answers to PC problems every day. Original version (c): 2009, Lorenzo Vigentini <l.vigentini at gmail.com>'
|
||||
|
||||
'''
|
||||
http://www.computeractive.co.uk/
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class computeractive(BasicNewsRecipe):
|
||||
__author__ = 'Lorenzo Vigentini'
|
||||
__author__ = 'DrMerry'
|
||||
description = 'Computeractive publishes new downloads, reviews, news stories, step-by-step guides and answers to PC problems every day.'
|
||||
cover_url = 'http://images.pcworld.com/images/common/header/header-logo.gif'
|
||||
|
||||
@ -31,24 +32,27 @@ class computeractive(BasicNewsRecipe):
|
||||
|
||||
remove_javascript = True
|
||||
no_stylesheets = True
|
||||
remove_empty_feeds = True
|
||||
remove_tags_after = dict(name='div', attrs={'class':'article_tags_block'})
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'id':'main'})
|
||||
dict(name='div', attrs={'id':'container_left'})
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':['seeAlsoTags','commentsModule','relatedArticles','mainLeft','mainRight']}),
|
||||
dict(name='div', attrs={'class':['buyIt','detailMpu']}),
|
||||
dict(name='div', attrs={'id':['seeAlsoTags','commentsModule','relatedArticles','mainLeft','mainRight','recent_comment_block_parent','reviewDetails']}),
|
||||
dict(name='div', attrs={'class':['buyIt','detailMpu','small_section','recent_comment_block_parent','title_right_button_fix','section_title.title_right_button_fix','common_button']}),
|
||||
dict(name='a', attrs={'class':'largerImage'})
|
||||
]
|
||||
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'(<a [^>]*>|</a>)', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
|
||||
feeds = [
|
||||
(u'General content', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/all'),
|
||||
(u'News', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/news'),
|
||||
(u'Downloads', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/downloads'),
|
||||
(u'Hardware', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/hardware'),
|
||||
(u'Software', u'http://feeds.computeractive.co.uk/rss/latest/computeractive/software'),
|
||||
(u'Competitions', u'http://www.v3.co.uk/feeds/rss20/personal-technology/competitions')
|
||||
]
|
||||
|
||||
|
||||
|
@ -9,7 +9,7 @@ engadget.com
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Engadget(BasicNewsRecipe):
|
||||
title = u'Engadget_Full'
|
||||
title = u'Engadget'
|
||||
__author__ = 'Starson17'
|
||||
__version__ = 'v1.00'
|
||||
__date__ = '02, July 2011'
|
||||
|
@ -1,25 +1,29 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__copyright__ = '2011, Starson17'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Freakonomics(BasicNewsRecipe):
|
||||
|
||||
title = 'Freakonomics Blog'
|
||||
description = 'The Hidden side of everything'
|
||||
__author__ = 'Starson17'
|
||||
__author__ = 'Starson17'
|
||||
__version__ = '1.02'
|
||||
__date__ = '11 July 2011'
|
||||
language = 'en'
|
||||
cover_url = 'http://ilkerugur.files.wordpress.com/2009/04/freakonomics.jpg'
|
||||
use_embedded_content= False
|
||||
no_stylesheets = True
|
||||
oldest_article = 30
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
max_articles_per_feed = 50
|
||||
|
||||
feeds = [('Blog', 'http://feeds.feedburner.com/freakonomicsblog')]
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'id':'header'}),
|
||||
dict(name='h1'),
|
||||
dict(name='h2'),
|
||||
dict(name='div', attrs={'class':'entry-content'}),
|
||||
]
|
||||
feeds = [(u'Freakonomics Blog', u'http://www.freakonomics.com/feed/')]
|
||||
keep_only_tags = [dict(name='div', attrs={'id':['content']})]
|
||||
remove_tags_after = [dict(name='div', attrs={'class':['simple_socialmedia']})]
|
||||
remove_tags = [dict(name='div', attrs={'class':['simple_socialmedia','single-fb-share','wp-polls']})]
|
||||
extra_css = '''
|
||||
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
|
||||
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
|
||||
|
49
recipes/galicia_confidential.recipe
Normal file
49
recipes/galicia_confidential.recipe
Normal file
@ -0,0 +1,49 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.web.feeds import Feed
|
||||
|
||||
class GC_gl(BasicNewsRecipe):
|
||||
title = u'Galicia Confidencial (RSS)'
|
||||
__author__ = u'Susana Sotelo Docío'
|
||||
description = u'Unha fiestra de información aberta a todos'
|
||||
publisher = u'Galicia Confidencial'
|
||||
category = u'news, society, politics, Galicia'
|
||||
encoding = 'utf-8'
|
||||
language = 'gl'
|
||||
direction = 'ltr'
|
||||
cover_url = 'http://galiciaconfidencial.com/imagenes/header/logo_gc.gif'
|
||||
oldest_article = 5
|
||||
max_articles_per_feed = 100
|
||||
center_navbar = False
|
||||
|
||||
feeds = [(u'Novas no RSS', u'http://galiciaconfidencial.com/rss2/xeral.rss')]
|
||||
|
||||
extra_css = u' p{text-align:left} '
|
||||
|
||||
def print_version(self, url):
|
||||
return url.replace('http://galiciaconfidencial.com/nova/', 'http://galiciaconfidencial.com/imprimir/')
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
self.gc_parse_feeds(feeds)
|
||||
return feeds
|
||||
|
||||
def gc_parse_feeds(self, feeds):
|
||||
rssFeeds = Feed()
|
||||
rssFeeds = BasicNewsRecipe.parse_feeds(self)
|
||||
self.feed_to_index_append(rssFeeds[:], feeds)
|
||||
|
||||
|
||||
def feed_to_index_append(self, feedObject, masterFeed):
|
||||
for feed in feedObject:
|
||||
newArticles = []
|
||||
for article in feed.articles:
|
||||
newArt = {
|
||||
'title' : article.title,
|
||||
'url' : article.url,
|
||||
'date' : article.date
|
||||
}
|
||||
newArticles.append(newArt)
|
||||
masterFeed.append((feed.title,newArticles))
|
||||
|
35
recipes/geek_poke.recipe
Normal file
35
recipes/geek_poke.recipe
Normal file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class AdvancedUserRecipe1307556816(BasicNewsRecipe):
|
||||
title = u'Geek and Poke'
|
||||
__author__ = u'DrMerry'
|
||||
description = u'Geek and Poke Cartoons'
|
||||
oldest_article = 31
|
||||
max_articles_per_feed = 100
|
||||
language = u'en'
|
||||
simultaneous_downloads = 5
|
||||
#delay = 1
|
||||
timefmt = ' [%A, %d %B, %Y]'
|
||||
summary_length = -1
|
||||
no_stylesheets = True
|
||||
cover_url = 'http://geekandpoke.typepad.com/aboutcoders.jpeg'
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'blog'
|
||||
|
||||
preprocess_regexps = [ (re.compile(r'(<p> </p>|<iframe.*</iframe>|<a[^>]*>Tweet</a>|<a[^>]*>|</a>)', re.DOTALL|re.IGNORECASE),lambda match: ''),
|
||||
(re.compile(r'( | )', re.DOTALL|re.IGNORECASE),lambda match: ' '),
|
||||
(re.compile(r'<br( /)?>(<br( /)?>)+', re.DOTALL|re.IGNORECASE),lambda match: '<br>')
|
||||
]
|
||||
|
||||
extra_css = 'body, h3, p, h2, h1, div, span{margin:0px} h2.date-header {font-size: 0.7em; color:#eee;} h3.entry-header{font-size: 1.0em} div.entry-body{font-size: 0.9em}'
|
||||
|
||||
|
||||
remove_tags_before = dict(name='h2', attrs={'class':'date-header'})
|
||||
remove_tags_after = dict(name='div', attrs={'class':'entry-body'})
|
||||
|
||||
|
||||
feeds = [(u'Geek and Poke', u'http://feeds.feedburner.com/GeekAndPoke?format=xml')]
|
BIN
recipes/icons/pecat.png
Normal file
BIN
recipes/icons/pecat.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 383 B |
43
recipes/idg_now.recipe
Normal file
43
recipes/idg_now.recipe
Normal file
@ -0,0 +1,43 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class IDGNow(BasicNewsRecipe):
|
||||
title = 'IDG Now!'
|
||||
__author__ = 'Diniz Bortolotto'
|
||||
description = 'Posts do IDG Now!'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 20
|
||||
encoding = 'utf8'
|
||||
publisher = 'Now!Digital Business Ltda.'
|
||||
category = 'technology, telecom, IT, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'technology portal'
|
||||
use_embedded_content = False
|
||||
extra_css = '.headline {font-size: x-large;} \n .fact { padding-top: 10pt }'
|
||||
|
||||
def get_article_url(self, article):
|
||||
link = article.get('link', None)
|
||||
if link is None:
|
||||
return article
|
||||
if link.split('/')[-1]=="story01.htm":
|
||||
link=link.split('/')[-2]
|
||||
a=['0B','0C','0D','0E','0F','0G','0I','0N' ,'0L0S','0A','0J3A']
|
||||
b=['.' ,'/' ,'?' ,'-' ,'=' ,'&' ,'_','.com','www.','0',':']
|
||||
for i in range(0,len(a)):
|
||||
link=link.replace(a[i],b[i])
|
||||
link=link.split('&')[-3]
|
||||
link=link.split('=')[1]
|
||||
link=link + "/IDGNoticiaPrint_view"
|
||||
return link
|
||||
|
||||
feeds = [
|
||||
(u'Ultimas noticias', u'http://rss.idgnow.com.br/c/32184/f/499640/index.rss'),
|
||||
(u'Computa\xe7\xe3o Corporativa', u'http://rss.idgnow.com.br/c/32184/f/499643/index.rss'),
|
||||
(u'Carreira', u'http://rss.idgnow.com.br/c/32184/f/499644/index.rss'),
|
||||
(u'Computa\xe7\xe3o Pessoal', u'http://rss.idgnow.com.br/c/32184/f/499645/index.rss'),
|
||||
(u'Internet', u'http://rss.idgnow.com.br/c/32184/f/499646/index.rss'),
|
||||
(u'Mercado', u'http://rss.idgnow.com.br/c/32184/f/419982/index.rss'),
|
||||
(u'Seguran\xe7a', u'http://rss.idgnow.com.br/c/32184/f/499647/index.rss'),
|
||||
(u'Telecom e Redes', u'http://rss.idgnow.com.br/c/32184/f/499648/index.rss')
|
||||
]
|
||||
|
||||
reverse_article_order = True
|
@ -16,16 +16,14 @@ class i09(BasicNewsRecipe):
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
encoding = 'utf-8'
|
||||
use_embedded_content = False
|
||||
use_embedded_content = True
|
||||
language = 'en'
|
||||
masthead_url = 'http://cache.gawkerassets.com/assets/io9.com/img/logo.png'
|
||||
extra_css = '''
|
||||
body{font-family: "Lucida Grande",Helvetica,Arial,sans-serif}
|
||||
img{margin-bottom: 1em}
|
||||
h1{font-family :Arial,Helvetica,sans-serif; font-size:large}
|
||||
h2{font-family :Arial,Helvetica,sans-serif; font-size:x-small}
|
||||
'''
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
@ -33,13 +31,11 @@ class i09(BasicNewsRecipe):
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
remove_attributes = ['width','height']
|
||||
keep_only_tags = [dict(attrs={'class':'content permalink'})]
|
||||
remove_tags_before = dict(name='h1')
|
||||
remove_tags = [dict(attrs={'class':'contactinfo'})]
|
||||
remove_tags_after = dict(attrs={'class':'contactinfo'})
|
||||
feeds = [(u'Articles', u'http://feeds.gawker.com/io9/vip?format=xml')]
|
||||
|
||||
feeds = [(u'Articles', u'http://feeds.gawker.com/io9/full')]
|
||||
remove_tags = [
|
||||
{'class': 'feedflare'},
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
return self.adeify_images(soup)
|
||||
|
138
recipes/menorca.recipe
Normal file
138
recipes/menorca.recipe
Normal file
@ -0,0 +1,138 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from calibre.web.feeds import Feed
|
||||
|
||||
class Menorca(BasicNewsRecipe):
|
||||
|
||||
title = 'Menorca'
|
||||
publisher = 'Editorial Menorca S.A. '
|
||||
__author__ = 'M. Sintes'
|
||||
description = u'Peri\xf3dico con informaci\xf3n de Menorca, Espa\xf1a'
|
||||
category = 'news, politics, economy, culture, Menorca, Spain '
|
||||
language = 'es'
|
||||
enconding = 'cp1252'
|
||||
|
||||
no_stylesheets = True
|
||||
oldest_article = 5
|
||||
max_articles_per_feed = 25
|
||||
|
||||
|
||||
feeds = [ (u'Principal',u'http://www.menorca.info/rss'),
|
||||
(u'Opini\xf3n',u'http://www.menorca.info/rss?seccion=opinion'),
|
||||
(u'Menorca',u'http://www.menorca.info/rss?seccion=menorca'),
|
||||
(u'Alaior',u'http://www.menorca.info/rss?seccion=pueblos/alaior'),
|
||||
(u'Ciutadella', u'http://www.menorca.info/rss?seccion=pueblos/ciutadella'),
|
||||
(u'Es Castell', u'http://www.menorca.info/rss?seccion=pueblos/escastell'),
|
||||
(u'Es Mercadal', u'http://www.menorca.info/rss?seccion=pueblos/esmercadal'),
|
||||
(u'Es Migjorn', u'http://www.menorca.info/rss?seccion=pueblos/esmigjorn'),
|
||||
(u'Ferreries', u'http://www.menorca.info/rss?seccion=pueblos/ferreries'),
|
||||
(u'Fornells', u'http://www.menorca.info/rss?seccion=pueblos/fornells'),
|
||||
(u'Llucma\xe7anes', u'http://www.menorca.info/rss?seccion=pueblos/llucmaanes'),
|
||||
(u'Ma\xf3', u'http://www.menorca.info/rss?seccion=pueblos/mao'),
|
||||
(u'Sant Climent', u'http://www.menorca.info/rss?seccion=pueblos/santcliment'),
|
||||
(u'Sant Llu\xeds', u'http://www.menorca.info/rss?seccion=pueblos/santlluis'),
|
||||
(u'Deportes',u'http://www.menorca.info/rss?seccion=deportes'),
|
||||
(u'Balears', u'http://www.menorca.info/rss?seccion=balears')]
|
||||
|
||||
#Seccions amb link rss erroni. Es recupera directament de la pagina web
|
||||
seccions_web = [(u'Mundo',u'http://www.menorca.info/actualidad/mundo'),
|
||||
(u'Econom\xeda',u'http://www.menorca.info/actualidad/economia'),
|
||||
(u'Espa\xf1a',u'http://www.menorca.info/actualidad/espana')]
|
||||
|
||||
remove_tags_before = dict(name='div', attrs={'class':'bloqueTitulosNoticia'})
|
||||
remove_tags_after = dict(name='div', attrs={'class':'compartir'})
|
||||
remove_tags = [dict(id = 'utilidades'),
|
||||
dict(name='div', attrs={'class': 'totalComentarios'}),
|
||||
dict(name='div', attrs={'class': 'compartir'}),
|
||||
dict(name='div', attrs={'class': re.compile("img_noticia*")})
|
||||
]
|
||||
|
||||
def print_version(self, url):
|
||||
url_imprimir = url + '?d=print'
|
||||
return url.replace(url, url_imprimir)
|
||||
|
||||
def feed_to_index_append(self, feedObject, masterFeed):
|
||||
|
||||
# Loop thru the feed object and build the correct type of article list
|
||||
for feed in feedObject:
|
||||
newArticles = []
|
||||
for article in feed.articles:
|
||||
newArt = {
|
||||
'title' : article.title,
|
||||
'url' : article.url,
|
||||
'date' : article.date,
|
||||
'description' : article.text_summary
|
||||
}
|
||||
|
||||
newArticles.append(newArt)
|
||||
|
||||
# append the newly-built list object to the index object # passed in as masterFeed.
|
||||
masterFeed.append((feed.title,newArticles))
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
|
||||
rssFeeds = Feed()
|
||||
rssFeeds = BasicNewsRecipe.parse_feeds(self)
|
||||
|
||||
articles = []
|
||||
feeds = []
|
||||
|
||||
self.feed_to_index_append(rssFeeds,feeds)
|
||||
|
||||
|
||||
|
||||
for (nom_seccio, url_seccio) in self.seccions_web:
|
||||
|
||||
|
||||
articles = []
|
||||
|
||||
soup = self.index_to_soup(url_seccio)
|
||||
for article in soup.findAll('div', attrs={'class':re.compile("articulo noticia|cajaNoticiaPortada")}):
|
||||
h = article.find(['h2','h3'])
|
||||
titol = self.tag_to_string(h)
|
||||
a = article.find('a', href=True)
|
||||
url = 'http://www.menorca.info' + a['href']
|
||||
|
||||
desc = None
|
||||
autor = ''
|
||||
dt = ''
|
||||
|
||||
soup_art = self.index_to_soup(url)
|
||||
aut = soup_art.find('div', attrs={'class':'autor'})
|
||||
tx = self.tag_to_string(aut)
|
||||
ls = re.split('[,;]',tx)
|
||||
|
||||
t = len(ls)
|
||||
if t >= 1:
|
||||
autor = ls[0]
|
||||
|
||||
if t > 1:
|
||||
d = ls[t-1]
|
||||
|
||||
if len(d) >= 10:
|
||||
lt = len(d) - 10
|
||||
dt = d[lt:]
|
||||
|
||||
|
||||
|
||||
self.log('\tTrobat article: ', titol, 'a', url, 'Seccio: ', nom_seccio, 'Autor: ', autor, 'Data: ', dt)
|
||||
|
||||
articles.append({'title': titol, 'url': url, 'description': desc, 'date':dt, 'author': autor})
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if articles:
|
||||
feeds.append((nom_seccio, articles))
|
||||
|
||||
|
||||
|
||||
|
||||
return feeds
|
||||
|
||||
|
||||
|
@ -20,7 +20,7 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
remove_tags_before = dict(name='div', attrs={'id':'date'})
|
||||
remove_tags_after = dict(name='div', attrs={'id':'column-1-3'})
|
||||
encoding = 'utf-8'
|
||||
extra_css = '#date {font-size: 10px} .article-image-caption {font-size: 8px}'
|
||||
extra_css = 'body{font-size:12px} #date, .article-image-caption {font-size: 0.583em} h2 {font-size: 0.917em} p.small, span, li, li span span, p, b, i, u, p.small.article-paragraph, p.small.article-paragraph p, p.small.article-paragraph span, p span, span {font-size: 0.833em} h1 {font-size: 1em}'
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'class':[ 'metroCommentFormWrap',
|
||||
'commentForm', 'metroCommentInnerWrap', 'article-slideshow-counter-container', 'article-slideshow-control', 'ad', 'header-links',
|
||||
|
24
recipes/noticias_unb.recipe
Normal file
24
recipes/noticias_unb.recipe
Normal file
@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class NoticiasUnB(BasicNewsRecipe):
|
||||
title = 'Noticias UnB'
|
||||
__author__ = 'Diniz Bortolotto'
|
||||
description = 'Noticias da UnB'
|
||||
oldest_article = 5
|
||||
max_articles_per_feed = 20
|
||||
category = 'news, educational, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'newsportal'
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
feeds = [(u'UnB Agência', u'http://www.unb.br/noticias/rss/noticias.rss')]
|
||||
|
||||
reverse_article_order = True
|
||||
|
||||
def print_version(self, url):
|
||||
return url.replace('http://', 'http://www.unb.br/noticias/print_email/imprimir.php?u=http://')
|
||||
|
72
recipes/pecat.recipe
Normal file
72
recipes/pecat.recipe
Normal file
@ -0,0 +1,72 @@
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
www.pecat.co.rs
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Pecat_rs(BasicNewsRecipe):
|
||||
title = 'Pecat'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Internet portal slobodne Srbije'
|
||||
oldest_article = 15
|
||||
max_articles_per_feed = 100
|
||||
language = 'sr'
|
||||
encoding = 'utf-8'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = True
|
||||
masthead_url = 'http://www.pecat.co.rs/wp-content/themes/zenko-v1/images/logo.jpg'
|
||||
publication_type = 'magazine'
|
||||
extra_css = """
|
||||
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
|
||||
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
|
||||
body{font-family: Arial,Helvetica,sans1,sans-serif}
|
||||
img{display: block; margin-bottom: 1em; margin-top: 1em}
|
||||
p{display: block; margin-bottom: 1em; margin-top: 1em}
|
||||
"""
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : 'politika, Srbija'
|
||||
, 'publisher': 'Pecat'
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
|
||||
|
||||
feeds = [(u'Clanci', u'http://www.pecat.co.rs/feed/')]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
limg = item.find('img')
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
if limg:
|
||||
limg.extract()
|
||||
item.replaceWith(limg)
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
for item in soup.findAll('img'):
|
||||
dad = item.findParent('p')
|
||||
if dad:
|
||||
mydad = dad.parent
|
||||
myIndex = mydad.contents.index(dad)
|
||||
item.extract()
|
||||
mydad.insert(myIndex,item)
|
||||
for item in soup.findAll('strong'):
|
||||
dad = item.findParent('p')
|
||||
if dad:
|
||||
mydad = dad.parent
|
||||
myIndex = mydad.contents.index(dad)
|
||||
item.extract()
|
||||
item.name='h4'
|
||||
mydad.insert(myIndex,item)
|
||||
return soup
|
@ -1,94 +1,67 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
|
||||
__copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
spiegel.de
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class Spiegel_int(BasicNewsRecipe):
|
||||
title = 'Spiegel Online International'
|
||||
__author__ = 'Darko Miletic and Sujata Raman'
|
||||
description = "News and POV from Europe's largest newsmagazine"
|
||||
description = "Daily news, analysis and opinion from Europe's leading newsmagazine and Germany's top news Web site"
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
language = 'en'
|
||||
|
||||
language = 'en_DE'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = 'cp1252'
|
||||
publisher = 'SPIEGEL ONLINE GmbH'
|
||||
category = 'news, politics, Germany'
|
||||
lang = 'en'
|
||||
recursions = 1
|
||||
match_regexps = [r'http://www.spiegel.de/.*-[1-9],00.html']
|
||||
masthead_url = 'http://www.spiegel.de/static/sys/v9/spiegelonline_logo.png'
|
||||
publication_type = 'magazine'
|
||||
|
||||
conversion_options = {
|
||||
'comments' : description
|
||||
,'tags' : category
|
||||
,'language' : lang
|
||||
,'publisher' : publisher
|
||||
,'pretty_print': True
|
||||
'comments' : description
|
||||
,'tags' : category
|
||||
,'language' : language
|
||||
,'publisher': publisher
|
||||
}
|
||||
|
||||
extra_css = '''
|
||||
#spArticleColumn{font-family:verdana,arial,helvetica,geneva,sans-serif ; }
|
||||
#spArticleContent{font-family: Verdana,Arial,Helvetica,Geneva,sans-serif}
|
||||
h1{color:#666666; font-weight:bold;}
|
||||
h2{color:#990000;}
|
||||
h3{color:#990000;}
|
||||
h4 {color:#990000;}
|
||||
a{color:#990000;}
|
||||
.spAuthor{font-style:italic;}
|
||||
#spIntroTeaser{font-weight:bold;}
|
||||
#spIntroTeaser{font-weight:bold}
|
||||
.spCredit{color:#666666; font-size:x-small;}
|
||||
.spShortDate{font-size:x-small;}
|
||||
.spArticleImageBox {font-size:x-small;}
|
||||
.spPhotoGallery{font-size:x-small; color:#990000 ;}
|
||||
'''
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name ='div', attrs={'id': ['spArticleImageBox spAssetAlignleft','spArticleColumn']}),
|
||||
]
|
||||
keep_only_tags = [dict(attrs={'id':'spArticleContent'})]
|
||||
remove_tags_after = dict(attrs={'id':'spArticleBody'})
|
||||
remove_tags = [dict(name=['meta','base','iframe','embed','object'])]
|
||||
remove_attributes = ['clear']
|
||||
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/international/index.rss')]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':['spSocialBookmark','spArticleFunctions','spMultiPagerHeadlines',]}),
|
||||
dict(name='div', attrs={'class':['spCommercial spM520','spArticleCredit','spPicZoom']}),
|
||||
]
|
||||
|
||||
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/schlagzeilen/rss/0,5291,676,00.xml')]
|
||||
|
||||
def postprocess_html(self, soup,first):
|
||||
|
||||
for tag in soup.findAll(name='div',attrs={'id':"spMultiPagerControl"}):
|
||||
tag.extract()
|
||||
|
||||
p = soup.find(name = 'p', attrs={'id':'spIntroTeaser'})
|
||||
|
||||
if p.string is not None:
|
||||
t = p.string.rpartition(':')[0]
|
||||
|
||||
if 'Part'in t:
|
||||
if soup.h1 is not None:
|
||||
soup.h1.extract()
|
||||
if soup.h2 is not None:
|
||||
soup.h2.extract()
|
||||
functag = soup.find(name= 'div', attrs={'id':"spArticleFunctions"})
|
||||
if functag is not None:
|
||||
functag.extract()
|
||||
auttag = soup.find(name= 'p', attrs={'class':"spAuthor"})
|
||||
if auttag is not None:
|
||||
auttag.extract()
|
||||
|
||||
pictag = soup.find(name= 'div', attrs={'id':"spArticleTopAsset"})
|
||||
if pictag is not None:
|
||||
pictag.extract()
|
||||
|
||||
|
||||
return soup
|
||||
|
||||
# def print_version(self, url):
|
||||
# main, sep, rest = url.rpartition(',')
|
||||
# rmain, rsep, rrest = main.rpartition(',')
|
||||
# return rmain + ',druck-' + rrest + ',' + rest
|
||||
def print_version(self, url):
|
||||
main, sep, rest = url.rpartition(',')
|
||||
rmain, rsep, rrest = main.rpartition(',')
|
||||
return rmain + ',druck-' + rrest + ',' + rest
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
return soup
|
||||
|
24
recipes/tijolaco.recipe
Normal file
24
recipes/tijolaco.recipe
Normal file
@ -0,0 +1,24 @@
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
|
||||
class Tijolaco(BasicNewsRecipe):
|
||||
title = u'Tijolaco.com'
|
||||
__author__ = u'Diniz Bortolotto'
|
||||
description = u'Posts do Blog Tijola\xe7o.com'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 50
|
||||
encoding = 'utf8'
|
||||
publisher = u'Brizola Neto'
|
||||
category = 'politics, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'politics portal'
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
feeds = [(u'Blog Tijola\xe7o.com', u'http://feeds.feedburner.com/Tijolacoblog')]
|
||||
|
||||
reverse_article_order = True
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
|
||||
|
||||
remove_tags = [dict(name='span', attrs={'class':'com'})]
|
@ -8,47 +8,33 @@ time.com
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from lxml import html
|
||||
|
||||
class Time(BasicNewsRecipe):
|
||||
#recipe_disabled = ('This recipe has been disabled as TIME no longer'
|
||||
# ' publish complete articles on the web.')
|
||||
title = u'Time'
|
||||
__author__ = 'Kovid Goyal and Sujata Raman'
|
||||
__author__ = 'Kovid Goyal'
|
||||
description = 'Weekly magazine'
|
||||
encoding = 'utf-8'
|
||||
no_stylesheets = True
|
||||
language = 'en'
|
||||
remove_javascript = True
|
||||
|
||||
extra_css = ''' h1 {font-family:georgia,serif;color:#000000;}
|
||||
.mainHd{font-family:georgia,serif;color:#000000;}
|
||||
h2 {font-family:Arial,Sans-serif;}
|
||||
.name{font-family:Arial,Sans-serif; font-size:x-small;font-weight:bold; }
|
||||
.date{font-family:Arial,Sans-serif; font-size:x-small ;color:#999999;}
|
||||
.byline{font-family:Arial,Sans-serif; font-size:x-small ;}
|
||||
.photoBkt{ font-size:x-small ;}
|
||||
.vertPhoto{font-size:x-small ;}
|
||||
.credits{font-family:Arial,Sans-serif; font-size:x-small ;color:gray;}
|
||||
.credit{font-family:Arial,Sans-serif; font-size:x-small ;color:gray;}
|
||||
.artTxt{font-family:georgia,serif;}
|
||||
#content{font-family:georgia,serif;}
|
||||
.caption{font-family:georgia,serif; font-size:x-small;color:#333333;}
|
||||
.credit{font-family:georgia,serif; font-size:x-small;color:#999999;}
|
||||
a:link{color:#CC0000;}
|
||||
.breadcrumb{font-family:Arial,Sans-serif;font-size:x-small;}
|
||||
'''
|
||||
|
||||
keep_only_tags = [
|
||||
{
|
||||
'class':['artHd', 'articleContent',
|
||||
'entry-title','entry-meta', 'entry-content', 'thumbnail']
|
||||
},
|
||||
]
|
||||
remove_tags = [
|
||||
{'class':['content-tools', 'quigo', 'see',
|
||||
'first-tier-social-tools', 'navigation', 'enlarge lightbox']},
|
||||
{'id':['share-tools']},
|
||||
{'rel':'lightbox'},
|
||||
]
|
||||
|
||||
keep_only_tags = [ dict(name ="div",attrs = {"id" :["content"]}) ,
|
||||
dict(name ="div",attrs = {"class" :["artHd","artTxt","photoBkt","vertPhoto","image","copy"]}) ,]
|
||||
remove_tags = [ dict(name ="div",attrs = {'class':['articleFooterNav','listsByTopic','articleTools2','relatedContent','sideContent','topBannerWrap','articlePagination','nextUp',"rtCol","pagination","enlarge","contentTools2",]}),
|
||||
dict(name ="span",attrs = {'class':['see']}),
|
||||
dict(name ="div",attrs = {'id':['header','articleSideBar',"articleTools","articleFooter","cmBotLt","quigoPackage"]}),
|
||||
dict(name ="a",attrs = {'class':['listLink']}),
|
||||
dict(name ="ul",attrs = {'id':['shareSocial','tabs']}),
|
||||
dict(name ="li",attrs = {'class':['back']}),
|
||||
dict(name ="ul",attrs = {'class':['navCount']}),
|
||||
]
|
||||
recursions = 10
|
||||
match_regexps = [r'/[0-9,]+-(2|3|4|5|6|7|8|9)(,\d+){0,1}.html',r'http://www.time.com/time/specials/packages/article/.*']
|
||||
|
||||
@ -56,10 +42,11 @@ class Time(BasicNewsRecipe):
|
||||
r'<meta .+/>'), lambda m:'')]
|
||||
|
||||
def parse_index(self):
|
||||
soup = self.index_to_soup('http://www.time.com/time/magazine')
|
||||
img = soup.find('a', title="View Large Cover", href=True)
|
||||
if img is not None:
|
||||
cover_url = 'http://www.time.com'+img['href']
|
||||
raw = self.index_to_soup('http://www.time.com/time/magazine', raw=True)
|
||||
root = html.fromstring(raw)
|
||||
img = root.xpath('//a[.="View Large Cover" and @href]')
|
||||
if img:
|
||||
cover_url = 'http://www.time.com' + img[0].get('href')
|
||||
try:
|
||||
nsoup = self.index_to_soup(cover_url)
|
||||
img = nsoup.find('img', src=re.compile('archive/covers'))
|
||||
@ -70,46 +57,48 @@ class Time(BasicNewsRecipe):
|
||||
|
||||
|
||||
feeds = []
|
||||
parent = soup.find(id='tocGuts')
|
||||
for seched in parent.findAll(attrs={'class':'toc_seched'}):
|
||||
section = self.tag_to_string(seched).capitalize()
|
||||
articles = list(self.find_articles(seched))
|
||||
feeds.append((section, articles))
|
||||
parent = root.xpath('//div[@class="content-main-aside"]')[0]
|
||||
for sec in parent.xpath(
|
||||
'descendant::section[contains(@class, "sec-mag-section")]'):
|
||||
h3 = sec.xpath('./h3')
|
||||
if h3:
|
||||
section = html.tostring(h3[0], encoding=unicode,
|
||||
method='text').strip().capitalize()
|
||||
self.log('Found section', section)
|
||||
articles = list(self.find_articles(sec))
|
||||
if articles:
|
||||
feeds.append((section, articles))
|
||||
|
||||
return feeds
|
||||
|
||||
def find_articles(self, seched):
|
||||
for a in seched.findNextSiblings( attrs={'class':['toc_hed','rule2']}):
|
||||
if a.name in "div":
|
||||
break
|
||||
else:
|
||||
yield {
|
||||
'title' : self.tag_to_string(a),
|
||||
'url' : 'http://www.time.com'+a['href'],
|
||||
'date' : '',
|
||||
'description' : self.article_description(a)
|
||||
}
|
||||
def find_articles(self, sec):
|
||||
|
||||
|
||||
|
||||
def article_description(self, a):
|
||||
ans = []
|
||||
while True:
|
||||
t = a.nextSibling
|
||||
if t is None:
|
||||
break
|
||||
a = t
|
||||
if getattr(t, 'name', False):
|
||||
if t.get('class', '') == 'toc_parens' or t.name == 'br':
|
||||
continue
|
||||
if t.name in ('div', 'a'):
|
||||
break
|
||||
ans.append(self.tag_to_string(t))
|
||||
else:
|
||||
ans.append(unicode(t))
|
||||
return u' '.join(ans).replace(u'\xa0', u'').strip()
|
||||
for article in sec.xpath('./article'):
|
||||
h2 = article.xpath('./*[@class="entry-title"]')
|
||||
if not h2: continue
|
||||
a = h2[0].xpath('./a[@href]')
|
||||
if not a: continue
|
||||
title = html.tostring(a[0], encoding=unicode,
|
||||
method='text').strip()
|
||||
if not title: continue
|
||||
url = a[0].get('href')
|
||||
if url.startswith('/'):
|
||||
url = 'http://www.time.com'+url
|
||||
desc = ''
|
||||
p = article.xpath('./*[@class="entry-content"]')
|
||||
if p:
|
||||
desc = html.tostring(p[0], encoding=unicode,
|
||||
method='text')
|
||||
self.log('\t', title, ':\n\t\t', desc)
|
||||
yield {
|
||||
'title' : title,
|
||||
'url' : url,
|
||||
'date' : '',
|
||||
'description' : desc
|
||||
}
|
||||
|
||||
def postprocess_html(self,soup,first):
|
||||
for tag in soup.findAll(attrs ={'class':['artPag','pagination']}):
|
||||
tag.extract()
|
||||
return soup
|
||||
|
||||
|
30
recipes/vio_mundo.recipe
Normal file
30
recipes/vio_mundo.recipe
Normal file
@ -0,0 +1,30 @@
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class VioMundo(BasicNewsRecipe):
|
||||
title = 'Blog VioMundo'
|
||||
__author__ = 'Diniz Bortolotto'
|
||||
description = 'Posts do Blog VioMundo'
|
||||
publisher = 'Luiz Carlos Azenha'
|
||||
oldest_article = 5
|
||||
max_articles_per_feed = 20
|
||||
category = 'news, politics, Brazil'
|
||||
language = 'pt_BR'
|
||||
publication_type = 'news and politics portal'
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
feeds = [(u'Blog VioMundo', u'http://www.viomundo.com.br/feed')]
|
||||
|
||||
reverse_article_order = True
|
||||
|
||||
def print_version(self, url):
|
||||
return url + '/print/'
|
||||
|
||||
remove_tags_after = dict(id='BlogContent')
|
||||
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'\|\ <u>.*</p>'),
|
||||
lambda match: '</p>')
|
||||
]
|
@ -15,15 +15,16 @@ class ZeitDe(BasicNewsRecipe):
|
||||
encoding = 'UTF-8'
|
||||
|
||||
__author__ = 'Martin Pitt, Sujata Raman, Ingo Paschke and Marc Toensing'
|
||||
no_stylesheets = True
|
||||
|
||||
max_articles_per_feed = 40
|
||||
|
||||
remove_tags = [
|
||||
dict(name='iframe'),
|
||||
dict(name='div', attrs={'class':["response","pagination block","pagenav","inline link", "copyright"] }),
|
||||
dict(name='p', attrs={'class':["ressortbacklink", "copyright"] }),
|
||||
dict(name='div', attrs={'id':["place_5","place_4","comments"]})
|
||||
]
|
||||
dict(name='iframe'),
|
||||
dict(name='div', attrs={'class':["response","pagination block","pagenav","inline link", "copyright"] }),
|
||||
dict(name='p', attrs={'class':["ressortbacklink", "copyright"] }),
|
||||
dict(name='div', attrs={'id':["place_5","place_4","comments"]})
|
||||
]
|
||||
|
||||
keep_only_tags = [dict(id=['main'])]
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
" Project wide builtins
|
||||
let g:pyflakes_builtins += ["dynamic_property", "__", "P", "I", "lopen", "icu_lower", "icu_upper", "icu_title"]
|
||||
let g:pyflakes_builtins += ["dynamic_property", "__", "P", "I", "lopen", "icu_lower", "icu_upper", "icu_title", "ngettext"]
|
||||
|
||||
python << EOFPY
|
||||
import os
|
||||
|
@ -64,7 +64,7 @@ class Check(Command):
|
||||
description = 'Check for errors in the calibre source code'
|
||||
|
||||
BUILTINS = ['_', '__', 'dynamic_property', 'I', 'P', 'lopen', 'icu_lower',
|
||||
'icu_upper', 'icu_title']
|
||||
'icu_upper', 'icu_title', 'ngettext']
|
||||
CACHE = '.check-cache.pickle'
|
||||
|
||||
def get_files(self, cache):
|
||||
|
@ -1,646 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
# Originally written by Barry Warsaw <barry@zope.com>
|
||||
#
|
||||
# Minimally patched to make it even more xgettext compatible
|
||||
# by Peter Funk <pf@artcom-gmbh.de>
|
||||
#
|
||||
# 2002-11-22 Jrgen Hermann <jh@web.de>
|
||||
# Added checks that _() only contains string literals, and
|
||||
# command line args are resolved to module lists, i.e. you
|
||||
# can now pass a filename, a module or package name, or a
|
||||
# directory (including globbing chars, important for Win32).
|
||||
# Made docstring fit in 80 chars wide displays using pydoc.
|
||||
#
|
||||
|
||||
__doc__ = """pygettext -- Python equivalent of xgettext(1)
|
||||
|
||||
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
|
||||
internationalization of C programs. Most of these tools are independent of
|
||||
the programming language and can be used from within Python programs.
|
||||
Martin von Loewis' work[1] helps considerably in this regard.
|
||||
|
||||
There's one problem though; xgettext is the program that scans source code
|
||||
looking for message strings, but it groks only C (or C++). Python
|
||||
introduces a few wrinkles, such as dual quoting characters, triple quoted
|
||||
strings, and raw strings. xgettext understands none of this.
|
||||
|
||||
Enter pygettext, which uses Python's standard tokenize module to scan
|
||||
Python source code, generating .pot files identical to what GNU xgettext[2]
|
||||
generates for C and C++ code. From there, the standard GNU tools can be
|
||||
used.
|
||||
|
||||
A word about marking Python strings as candidates for translation. GNU
|
||||
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
|
||||
and gettext_noop. But those can be a lot of text to include all over your
|
||||
code. C and C++ have a trick: they use the C preprocessor. Most
|
||||
internationalized C source includes a #define for gettext() to _() so that
|
||||
what has to be written in the source is much less. Thus these are both
|
||||
translatable strings:
|
||||
|
||||
gettext("Translatable String")
|
||||
_("Translatable String")
|
||||
|
||||
Python of course has no preprocessor so this doesn't work so well. Thus,
|
||||
pygettext searches only for _() by default, but see the -k/--keyword flag
|
||||
below for how to augment this.
|
||||
|
||||
[1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
|
||||
[2] http://www.gnu.org/software/gettext/gettext.html
|
||||
|
||||
NOTE: pygettext attempts to be option and feature compatible with GNU
|
||||
xgettext where ever possible. However some options are still missing or are
|
||||
not fully implemented. Also, xgettext's use of command line switches with
|
||||
option arguments is broken, and in these cases, pygettext just defines
|
||||
additional switches.
|
||||
|
||||
Usage: pygettext [options] inputfile ...
|
||||
|
||||
Options:
|
||||
|
||||
-a
|
||||
--extract-all
|
||||
Extract all strings.
|
||||
|
||||
-d name
|
||||
--default-domain=name
|
||||
Rename the default output file from messages.pot to name.pot.
|
||||
|
||||
-E
|
||||
--escape
|
||||
Replace non-ASCII characters with octal escape sequences.
|
||||
|
||||
-D
|
||||
--docstrings
|
||||
Extract module, class, method, and function docstrings. These do
|
||||
not need to be wrapped in _() markers, and in fact cannot be for
|
||||
Python to consider them docstrings. (See also the -X option).
|
||||
|
||||
-h
|
||||
--help
|
||||
Print this help message and exit.
|
||||
|
||||
-k word
|
||||
--keyword=word
|
||||
Keywords to look for in addition to the default set, which are:
|
||||
%(DEFAULTKEYWORDS)s
|
||||
|
||||
You can have multiple -k flags on the command line.
|
||||
|
||||
-K
|
||||
--no-default-keywords
|
||||
Disable the default set of keywords (see above). Any keywords
|
||||
explicitly added with the -k/--keyword option are still recognized.
|
||||
|
||||
--no-location
|
||||
Do not write filename/lineno location comments.
|
||||
|
||||
-n
|
||||
--add-location
|
||||
Write filename/lineno location comments indicating where each
|
||||
extracted string is found in the source. These lines appear before
|
||||
each msgid. The style of comments is controlled by the -S/--style
|
||||
option. This is the default.
|
||||
|
||||
-o filename
|
||||
--output=filename
|
||||
Rename the default output file from messages.pot to filename. If
|
||||
filename is `-' then the output is sent to standard out.
|
||||
|
||||
-p dir
|
||||
--output-dir=dir
|
||||
Output files will be placed in directory dir.
|
||||
|
||||
-S stylename
|
||||
--style stylename
|
||||
Specify which style to use for location comments. Two styles are
|
||||
supported:
|
||||
|
||||
Solaris # File: filename, line: line-number
|
||||
GNU #: filename:line
|
||||
|
||||
The style name is case insensitive. GNU style is the default.
|
||||
|
||||
-v
|
||||
--verbose
|
||||
Print the names of the files being processed.
|
||||
|
||||
-V
|
||||
--version
|
||||
Print the version of pygettext and exit.
|
||||
|
||||
-w columns
|
||||
--width=columns
|
||||
Set width of output to columns.
|
||||
|
||||
-x filename
|
||||
--exclude-file=filename
|
||||
Specify a file that contains a list of strings that are not be
|
||||
extracted from the input files. Each string to be excluded must
|
||||
appear on a line by itself in the file.
|
||||
|
||||
-X filename
|
||||
--no-docstrings=filename
|
||||
Specify a file that contains a list of files (one per line) that
|
||||
should not have their docstrings extracted. This is only useful in
|
||||
conjunction with the -D option above.
|
||||
|
||||
If `inputfile' is -, standard input is read.
|
||||
"""
|
||||
|
||||
import os
|
||||
import imp
|
||||
import sys
|
||||
import glob
|
||||
import time
|
||||
import getopt
|
||||
import token
|
||||
import tokenize
|
||||
import operator
|
||||
|
||||
__version__ = '1.5'
|
||||
|
||||
default_keywords = ['_']
|
||||
DEFAULTKEYWORDS = ', '.join(default_keywords)
|
||||
|
||||
EMPTYSTRING = ''
|
||||
|
||||
from setup import __appname__, __version__ as version
|
||||
|
||||
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
|
||||
# there.
|
||||
pot_header = '''\
|
||||
# Translation template file..
|
||||
# Copyright (C) %(year)s Kovid Goyal
|
||||
# Kovid Goyal <kovid@kovidgoyal.net>, %(year)s.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: %(appname)s %(version)s\\n"
|
||||
"POT-Creation-Date: %%(time)s\\n"
|
||||
"PO-Revision-Date: %%(time)s\\n"
|
||||
"Last-Translator: Automatically generated\\n"
|
||||
"Language-Team: LANGUAGE\\n"
|
||||
"MIME-Version: 1.0\\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\\n"
|
||||
"Content-Transfer-Encoding: 8bit\\n"
|
||||
"Generated-By: pygettext.py %%(version)s\\n"
|
||||
|
||||
'''%dict(appname=__appname__, version=version, year=time.strftime('%Y'))
|
||||
|
||||
def usage(code, msg=''):
|
||||
print >> sys.stderr, __doc__ % globals()
|
||||
if msg:
|
||||
print >> sys.stderr, msg
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
|
||||
escapes = []
|
||||
|
||||
def make_escapes(pass_iso8859):
|
||||
global escapes
|
||||
if pass_iso8859:
|
||||
# Allow iso-8859 characters to pass through so that e.g. 'msgid
|
||||
# would result not result in 'msgid "H\366he"'. Otherwise we
|
||||
# escape any character outside the 32..126 range.
|
||||
mod = 128
|
||||
else:
|
||||
mod = 256
|
||||
for i in range(256):
|
||||
if 32 <= (i % mod) <= 126:
|
||||
escapes.append(chr(i))
|
||||
else:
|
||||
escapes.append("\\%03o" % i)
|
||||
escapes[ord('\\')] = '\\\\'
|
||||
escapes[ord('\t')] = '\\t'
|
||||
escapes[ord('\r')] = '\\r'
|
||||
escapes[ord('\n')] = '\\n'
|
||||
escapes[ord('\"')] = '\\"'
|
||||
|
||||
|
||||
def escape(s):
|
||||
global escapes
|
||||
s = list(s)
|
||||
for i in range(len(s)):
|
||||
s[i] = escapes[ord(s[i])]
|
||||
return EMPTYSTRING.join(s)
|
||||
|
||||
|
||||
def safe_eval(s):
|
||||
# unwrap quotes, safely
|
||||
return eval(s, {'__builtins__':{}}, {})
|
||||
|
||||
|
||||
def normalize(s):
|
||||
# This converts the various Python string types into a format that is
|
||||
# appropriate for .po files, namely much closer to C style.
|
||||
lines = s.split('\n')
|
||||
if len(lines) == 1:
|
||||
s = '"' + escape(s) + '"'
|
||||
else:
|
||||
if not lines[-1]:
|
||||
del lines[-1]
|
||||
lines[-1] = lines[-1] + '\n'
|
||||
for i in range(len(lines)):
|
||||
lines[i] = escape(lines[i])
|
||||
lineterm = '\\n"\n"'
|
||||
s = '""\n"' + lineterm.join(lines) + '"'
|
||||
return s
|
||||
|
||||
|
||||
def containsAny(str, set):
|
||||
"""Check whether 'str' contains ANY of the chars in 'set'"""
|
||||
return 1 in [c in str for c in set]
|
||||
|
||||
|
||||
def _visit_pyfiles(list, dirname, names):
|
||||
"""Helper for getFilesForName()."""
|
||||
# get extension for python source files
|
||||
if not globals().has_key('_py_ext'):
|
||||
global _py_ext
|
||||
_py_ext = [triple[0] for triple in imp.get_suffixes()
|
||||
if triple[2] == imp.PY_SOURCE][0]
|
||||
|
||||
# don't recurse into CVS directories
|
||||
if 'CVS' in names:
|
||||
names.remove('CVS')
|
||||
|
||||
# add all *.py files to list
|
||||
list.extend(
|
||||
[os.path.join(dirname, file) for file in names
|
||||
if os.path.splitext(file)[1] == _py_ext]
|
||||
)
|
||||
|
||||
|
||||
def _get_modpkg_path(dotted_name, pathlist=None):
|
||||
"""Get the filesystem path for a module or a package.
|
||||
|
||||
Return the file system path to a file for a module, and to a directory for
|
||||
a package. Return None if the name is not found, or is a builtin or
|
||||
extension module.
|
||||
"""
|
||||
# split off top-most name
|
||||
parts = dotted_name.split('.', 1)
|
||||
|
||||
if len(parts) > 1:
|
||||
# we have a dotted path, import top-level package
|
||||
try:
|
||||
file, pathname, description = imp.find_module(parts[0], pathlist)
|
||||
if file: file.close()
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
# check if it's indeed a package
|
||||
if description[2] == imp.PKG_DIRECTORY:
|
||||
# recursively handle the remaining name parts
|
||||
pathname = _get_modpkg_path(parts[1], [pathname])
|
||||
else:
|
||||
pathname = None
|
||||
else:
|
||||
# plain name
|
||||
try:
|
||||
file, pathname, description = imp.find_module(
|
||||
dotted_name, pathlist)
|
||||
if file:
|
||||
file.close()
|
||||
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
|
||||
pathname = None
|
||||
except ImportError:
|
||||
pathname = None
|
||||
|
||||
return pathname
|
||||
|
||||
|
||||
def getFilesForName(name):
|
||||
"""Get a list of module files for a filename, a module or package name,
|
||||
or a directory.
|
||||
"""
|
||||
if not os.path.exists(name):
|
||||
# check for glob chars
|
||||
if containsAny(name, "*?[]"):
|
||||
files = glob.glob(name)
|
||||
list = []
|
||||
for file in files:
|
||||
list.extend(getFilesForName(file))
|
||||
return list
|
||||
|
||||
# try to find module or package
|
||||
name = _get_modpkg_path(name)
|
||||
if not name:
|
||||
return []
|
||||
|
||||
if os.path.isdir(name):
|
||||
# find all python files in directory
|
||||
list = []
|
||||
os.path.walk(name, _visit_pyfiles, list)
|
||||
return list
|
||||
elif os.path.exists(name):
|
||||
# a single file
|
||||
return [name]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
class TokenEater:
|
||||
def __init__(self, options):
|
||||
self.__options = options
|
||||
self.__messages = {}
|
||||
self.__state = self.__waiting
|
||||
self.__data = []
|
||||
self.__lineno = -1
|
||||
self.__freshmodule = 1
|
||||
self.__curfile = None
|
||||
|
||||
def __call__(self, ttype, tstring, stup, etup, line):
|
||||
# dispatch
|
||||
## import token
|
||||
## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
|
||||
## 'tstring:', tstring
|
||||
self.__state(ttype, tstring, stup[0])
|
||||
|
||||
def __waiting(self, ttype, tstring, lineno):
|
||||
opts = self.__options
|
||||
# Do docstring extractions, if enabled
|
||||
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
|
||||
# module docstring?
|
||||
if self.__freshmodule:
|
||||
if ttype == tokenize.STRING:
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__freshmodule = 0
|
||||
elif ttype not in (tokenize.COMMENT, tokenize.NL):
|
||||
self.__freshmodule = 0
|
||||
return
|
||||
# class docstring?
|
||||
if ttype == tokenize.NAME and tstring in ('class', 'def'):
|
||||
self.__state = self.__suiteseen
|
||||
return
|
||||
if ttype == tokenize.NAME and tstring in opts.keywords:
|
||||
self.__state = self.__keywordseen
|
||||
|
||||
def __suiteseen(self, ttype, tstring, lineno):
|
||||
# ignore anything until we see the colon
|
||||
if ttype == tokenize.OP and tstring == ':':
|
||||
self.__state = self.__suitedocstring
|
||||
|
||||
def __suitedocstring(self, ttype, tstring, lineno):
|
||||
# ignore any intervening noise
|
||||
if ttype == tokenize.STRING:
|
||||
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
|
||||
self.__state = self.__waiting
|
||||
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
|
||||
tokenize.COMMENT):
|
||||
# there was no class docstring
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __keywordseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == '(':
|
||||
self.__data = []
|
||||
self.__lineno = lineno
|
||||
self.__state = self.__openseen
|
||||
else:
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __openseen(self, ttype, tstring, lineno):
|
||||
if ttype == tokenize.OP and tstring == ')':
|
||||
# We've seen the last of the translatable strings. Record the
|
||||
# line number of the first line of the strings and update the list
|
||||
# of messages seen. Reset state for the next batch. If there
|
||||
# were no strings inside _(), then just ignore this entry.
|
||||
if self.__data:
|
||||
self.__addentry(EMPTYSTRING.join(self.__data))
|
||||
self.__state = self.__waiting
|
||||
elif ttype == tokenize.STRING:
|
||||
self.__data.append(safe_eval(tstring))
|
||||
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
|
||||
token.NEWLINE, tokenize.NL]:
|
||||
# warn if we see anything else than STRING or whitespace
|
||||
print >> sys.stderr, \
|
||||
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'\
|
||||
% {
|
||||
'token': tstring,
|
||||
'file': self.__curfile,
|
||||
'lineno': self.__lineno
|
||||
}
|
||||
self.__state = self.__waiting
|
||||
|
||||
def __addentry(self, msg, lineno=None, isdocstring=0):
|
||||
if lineno is None:
|
||||
lineno = self.__lineno
|
||||
if not msg in self.__options.toexclude:
|
||||
entry = (self.__curfile, lineno)
|
||||
self.__messages.setdefault(msg, {})[entry] = isdocstring
|
||||
|
||||
def set_filename(self, filename):
|
||||
self.__curfile = filename
|
||||
self.__freshmodule = 1
|
||||
|
||||
def write(self, fp):
|
||||
options = self.__options
|
||||
timestamp = time.strftime('%Y-%m-%d %H:%M+%Z')
|
||||
# The time stamp in the header doesn't have the same format as that
|
||||
# generated by xgettext...
|
||||
print >> fp, pot_header % {'time': timestamp, 'version': __version__}
|
||||
# Sort the entries. First sort each particular entry's keys, then
|
||||
# sort all the entries by their first item.
|
||||
reverse = {}
|
||||
for k, v in self.__messages.items():
|
||||
keys = v.keys()
|
||||
keys.sort()
|
||||
reverse.setdefault(tuple(keys), []).append((k, v))
|
||||
rkeys = reverse.keys()
|
||||
rkeys.sort()
|
||||
for rkey in rkeys:
|
||||
rentries = reverse[rkey]
|
||||
rentries.sort()
|
||||
for k, v in rentries:
|
||||
isdocstring = 0
|
||||
# If the entry was gleaned out of a docstring, then add a
|
||||
# comment stating so. This is to aid translators who may wish
|
||||
# to skip translating some unimportant docstrings.
|
||||
if reduce(operator.__add__, v.values()):
|
||||
isdocstring = 1
|
||||
# k is the message string, v is a dictionary-set of (filename,
|
||||
# lineno) tuples. We want to sort the entries in v first by
|
||||
# file name and then by line number.
|
||||
v = v.keys()
|
||||
v.sort()
|
||||
if not options.writelocations:
|
||||
pass
|
||||
# location comments are different b/w Solaris and GNU:
|
||||
elif options.locationstyle == options.SOLARIS:
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
print >>fp, \
|
||||
'# File: %(filename)s, line: %(lineno)d' % d
|
||||
elif options.locationstyle == options.GNU:
|
||||
# fit as many locations on one line, as long as the
|
||||
# resulting line length doesn't exceeds 'options.width'
|
||||
locline = '#:'
|
||||
for filename, lineno in v:
|
||||
d = {'filename': filename, 'lineno': lineno}
|
||||
s = ' %(filename)s:%(lineno)d' % d
|
||||
if len(locline) + len(s) <= options.width:
|
||||
locline = locline + s
|
||||
else:
|
||||
print >> fp, locline
|
||||
locline = "#:" + s
|
||||
if len(locline) > 2:
|
||||
print >> fp, locline
|
||||
if isdocstring:
|
||||
print >> fp, '#, docstring'
|
||||
print >> fp, 'msgid', normalize(k)
|
||||
print >> fp, 'msgstr ""\n'
|
||||
|
||||
|
||||
|
||||
def main(outfile, args=sys.argv[1:]):
|
||||
global default_keywords
|
||||
try:
|
||||
opts, args = getopt.getopt(
|
||||
args,
|
||||
'ad:DEhk:Kno:p:S:Vvw:x:X:',
|
||||
['extract-all', 'default-domain=', 'escape', 'help',
|
||||
'keyword=', 'no-default-keywords',
|
||||
'add-location', 'no-location', 'output=', 'output-dir=',
|
||||
'style=', 'verbose', 'version', 'width=', 'exclude-file=',
|
||||
'docstrings', 'no-docstrings',
|
||||
])
|
||||
except getopt.error, msg:
|
||||
usage(1, msg)
|
||||
|
||||
# for holding option values
|
||||
class Options:
|
||||
# constants
|
||||
GNU = 1
|
||||
SOLARIS = 2
|
||||
# defaults
|
||||
extractall = 0 # FIXME: currently this option has no effect at all.
|
||||
escape = 0
|
||||
keywords = []
|
||||
outpath = ''
|
||||
outfile = 'messages.pot'
|
||||
writelocations = 1
|
||||
locationstyle = GNU
|
||||
verbose = 0
|
||||
width = 78
|
||||
excludefilename = ''
|
||||
docstrings = 0
|
||||
nodocstrings = {}
|
||||
|
||||
options = Options()
|
||||
locations = {'gnu' : options.GNU,
|
||||
'solaris' : options.SOLARIS,
|
||||
}
|
||||
|
||||
# parse options
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-a', '--extract-all'):
|
||||
options.extractall = 1
|
||||
elif opt in ('-d', '--default-domain'):
|
||||
options.outfile = arg + '.pot'
|
||||
elif opt in ('-E', '--escape'):
|
||||
options.escape = 1
|
||||
elif opt in ('-D', '--docstrings'):
|
||||
options.docstrings = 1
|
||||
elif opt in ('-k', '--keyword'):
|
||||
options.keywords.append(arg)
|
||||
elif opt in ('-K', '--no-default-keywords'):
|
||||
default_keywords = []
|
||||
elif opt in ('-n', '--add-location'):
|
||||
options.writelocations = 1
|
||||
elif opt in ('--no-location',):
|
||||
options.writelocations = 0
|
||||
elif opt in ('-S', '--style'):
|
||||
options.locationstyle = locations.get(arg.lower())
|
||||
if options.locationstyle is None:
|
||||
usage(1, ('Invalid value for --style: %s') % arg)
|
||||
elif opt in ('-o', '--output'):
|
||||
options.outfile = arg
|
||||
elif opt in ('-p', '--output-dir'):
|
||||
options.outpath = arg
|
||||
elif opt in ('-v', '--verbose'):
|
||||
options.verbose = 1
|
||||
elif opt in ('-V', '--version'):
|
||||
print ('pygettext.py (xgettext for Python) %s') % __version__
|
||||
sys.exit(0)
|
||||
elif opt in ('-w', '--width'):
|
||||
try:
|
||||
options.width = int(arg)
|
||||
except ValueError:
|
||||
usage(1, ('--width argument must be an integer: %s') % arg)
|
||||
elif opt in ('-x', '--exclude-file'):
|
||||
options.excludefilename = arg
|
||||
elif opt in ('-X', '--no-docstrings'):
|
||||
fp = open(arg)
|
||||
try:
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
options.nodocstrings[line[:-1]] = 1
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# calculate escapes
|
||||
make_escapes(options.escape)
|
||||
|
||||
# calculate all keywords
|
||||
options.keywords.extend(default_keywords)
|
||||
|
||||
# initialize list of strings to exclude
|
||||
if options.excludefilename:
|
||||
try:
|
||||
fp = open(options.excludefilename)
|
||||
options.toexclude = fp.readlines()
|
||||
fp.close()
|
||||
except IOError:
|
||||
print >> sys.stderr, (
|
||||
"Can't read --exclude-file: %s") % options.excludefilename
|
||||
sys.exit(1)
|
||||
else:
|
||||
options.toexclude = []
|
||||
|
||||
# resolve args to module lists
|
||||
expanded = []
|
||||
for arg in args:
|
||||
if arg == '-':
|
||||
expanded.append(arg)
|
||||
else:
|
||||
expanded.extend(getFilesForName(arg))
|
||||
args = expanded
|
||||
|
||||
# slurp through all the files
|
||||
eater = TokenEater(options)
|
||||
for filename in args:
|
||||
if filename == '-':
|
||||
if options.verbose:
|
||||
print ('Reading standard input')
|
||||
fp = sys.stdin
|
||||
closep = 0
|
||||
else:
|
||||
if options.verbose:
|
||||
print ('Working on %s') % filename
|
||||
fp = open(filename)
|
||||
closep = 1
|
||||
try:
|
||||
eater.set_filename(filename)
|
||||
try:
|
||||
tokenize.tokenize(fp.readline, eater)
|
||||
except tokenize.TokenError, e:
|
||||
print >> sys.stderr, '%s: %s, line %d, column %d' % (
|
||||
e[0], filename, e[1][0], e[1][1])
|
||||
except IndentationError, e:
|
||||
print >> sys.stderr, '%s: %s, line %s, column %s' % (
|
||||
e[0], filename, e.lineno, e[1][1])
|
||||
|
||||
finally:
|
||||
if closep:
|
||||
fp.close()
|
||||
|
||||
# write the output
|
||||
eater.write(outfile)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.stdout)
|
@ -6,11 +6,10 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, cStringIO, tempfile, shutil, atexit, subprocess, glob, re
|
||||
import os, tempfile, shutil, subprocess, glob, re, time, textwrap
|
||||
from distutils import sysconfig
|
||||
|
||||
from setup import Command, __appname__
|
||||
from setup.pygettext import main as pygettext
|
||||
from setup import Command, __appname__, __version__
|
||||
from setup.build_environment import pyqt
|
||||
|
||||
class POT(Command):
|
||||
@ -60,19 +59,50 @@ class POT(Command):
|
||||
|
||||
|
||||
def run(self, opts):
|
||||
pot_header = textwrap.dedent('''\
|
||||
# Translation template file..
|
||||
# Copyright (C) %(year)s Kovid Goyal
|
||||
# Kovid Goyal <kovid@kovidgoyal.net>, %(year)s.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: %(appname)s %(version)s\\n"
|
||||
"POT-Creation-Date: %(time)s\\n"
|
||||
"PO-Revision-Date: %(time)s\\n"
|
||||
"Last-Translator: Automatically generated\\n"
|
||||
"Language-Team: LANGUAGE\\n"
|
||||
"MIME-Version: 1.0\\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/calibre\\n"
|
||||
"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\\n"
|
||||
"Content-Transfer-Encoding: 8bit\\n"
|
||||
|
||||
''')%dict(appname=__appname__, version=__version__,
|
||||
year=time.strftime('%Y'),
|
||||
time=time.strftime('%Y-%m-%d %H:%M+%Z'))
|
||||
|
||||
files = self.source_files()
|
||||
buf = cStringIO.StringIO()
|
||||
self.info('Creating translations template...')
|
||||
tempdir = tempfile.mkdtemp()
|
||||
atexit.register(shutil.rmtree, tempdir)
|
||||
pygettext(buf, ['-k', '__', '-p', tempdir]+files)
|
||||
src = buf.getvalue()
|
||||
src += '\n\n' + self.get_tweaks_docs()
|
||||
pot = os.path.join(self.PATH, __appname__+'.pot')
|
||||
with open(pot, 'wb') as f:
|
||||
f.write(src)
|
||||
self.info('Translations template:', os.path.abspath(pot))
|
||||
return pot
|
||||
with tempfile.NamedTemporaryFile() as fl:
|
||||
fl.write('\n'.join(files))
|
||||
fl.flush()
|
||||
out = tempfile.NamedTemporaryFile(suffix='.pot', delete=False)
|
||||
out.close()
|
||||
self.info('Creating translations template...')
|
||||
subprocess.check_call(['xgettext', '-f', fl.name,
|
||||
'--default-domain=calibre', '-o', out.name, '-L', 'Python',
|
||||
'--from-code=UTF-8', '--sort-by-file', '--omit-header',
|
||||
'--no-wrap', '-k__',
|
||||
])
|
||||
with open(out.name, 'rb') as f:
|
||||
src = f.read()
|
||||
os.remove(out.name)
|
||||
src = pot_header + '\n' + src
|
||||
src += '\n\n' + self.get_tweaks_docs()
|
||||
pot = os.path.join(self.PATH, __appname__+'.pot')
|
||||
with open(pot, 'wb') as f:
|
||||
f.write(src)
|
||||
self.info('Translations template:', os.path.abspath(pot))
|
||||
return pot
|
||||
|
||||
|
||||
class Translations(POT):
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 8, 8)
|
||||
numeric_version = (0, 8, 9)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -3,57 +3,16 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import textwrap, os, glob, functools, re
|
||||
import os, glob, functools, re
|
||||
from calibre import guess_type
|
||||
from calibre.customize import FileTypePlugin, MetadataReaderPlugin, \
|
||||
MetadataWriterPlugin, PreferencesPlugin, InterfaceActionBase, StoreBase
|
||||
from calibre.constants import numeric_version
|
||||
from calibre.ebooks.metadata.archive import ArchiveExtract, get_cbz_metadata
|
||||
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
||||
from calibre.ebooks.html.to_zip import HTML2ZIP
|
||||
|
||||
# To archive plugins {{{
|
||||
class HTML2ZIP(FileTypePlugin):
|
||||
name = 'HTML to ZIP'
|
||||
author = 'Kovid Goyal'
|
||||
description = textwrap.dedent(_('''\
|
||||
Follow all local links in an HTML file and create a ZIP \
|
||||
file containing all linked files. This plugin is run \
|
||||
every time you add an HTML file to the library.\
|
||||
'''))
|
||||
version = numeric_version
|
||||
file_types = set(['html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'])
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
on_import = True
|
||||
|
||||
def run(self, htmlfile):
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
from calibre.gui2.convert.gui_conversion import gui_convert
|
||||
from calibre.customize.conversion import OptionRecommendation
|
||||
from calibre.ebooks.epub import initialize_container
|
||||
|
||||
with TemporaryDirectory('_plugin_html2zip') as tdir:
|
||||
recs =[('debug_pipeline', tdir, OptionRecommendation.HIGH)]
|
||||
recs.append(['keep_ligatures', True, OptionRecommendation.HIGH])
|
||||
if self.site_customization and self.site_customization.strip():
|
||||
recs.append(['input_encoding', self.site_customization.strip(),
|
||||
OptionRecommendation.HIGH])
|
||||
gui_convert(htmlfile, tdir, recs, abort_after_input_dump=True)
|
||||
of = self.temporary_file('_plugin_html2zip.zip')
|
||||
tdir = os.path.join(tdir, 'input')
|
||||
opf = glob.glob(os.path.join(tdir, '*.opf'))[0]
|
||||
ncx = glob.glob(os.path.join(tdir, '*.ncx'))
|
||||
if ncx:
|
||||
os.remove(ncx[0])
|
||||
epub = initialize_container(of.name, os.path.basename(opf))
|
||||
epub.add_dir(tdir)
|
||||
epub.close()
|
||||
|
||||
return of.name
|
||||
|
||||
def customization_help(self, gui=False):
|
||||
return _('Character encoding for the input HTML files. Common choices '
|
||||
'include: cp1252, latin1, iso-8859-1 and utf-8.')
|
||||
|
||||
|
||||
class PML2PMLZ(FileTypePlugin):
|
||||
name = 'PML to PMLZ'
|
||||
@ -1231,6 +1190,15 @@ class StoreDieselEbooksStore(StoreBase):
|
||||
formats = ['EPUB', 'PDF']
|
||||
affiliate = True
|
||||
|
||||
class StoreEbookNLStore(StoreBase):
|
||||
name = 'eBook.nl'
|
||||
description = u'De eBookwinkel van Nederland'
|
||||
actual_plugin = 'calibre.gui2.store.stores.ebook_nl_plugin:EBookNLStore'
|
||||
|
||||
headquarters = 'NL'
|
||||
formats = ['EPUB', 'PDF']
|
||||
affiliate = True
|
||||
|
||||
class StoreEbookscomStore(StoreBase):
|
||||
name = 'eBooks.com'
|
||||
description = u'Sells books in multiple electronic formats in all categories. Technical infrastructure is cutting edge, robust and scalable, with servers in the US and Europe.'
|
||||
@ -1488,6 +1456,7 @@ plugins += [
|
||||
StoreBeamEBooksDEStore,
|
||||
StoreBeWriteStore,
|
||||
StoreDieselEbooksStore,
|
||||
StoreEbookNLStore,
|
||||
StoreEbookscomStore,
|
||||
StoreEBookShoppeUKStore,
|
||||
StoreEPubBuyDEStore,
|
||||
|
@ -8,7 +8,7 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
# Imports {{{
|
||||
import os, shutil, uuid, json
|
||||
import os, shutil, uuid, json, glob
|
||||
from functools import partial
|
||||
|
||||
import apsw
|
||||
@ -25,7 +25,7 @@ from calibre.utils.config import to_json, from_json, prefs, tweaks
|
||||
from calibre.utils.date import utcfromtimestamp, parse_date
|
||||
from calibre.utils.filenames import is_case_sensitive
|
||||
from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
|
||||
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable)
|
||||
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable, CompositeTable)
|
||||
# }}}
|
||||
|
||||
'''
|
||||
@ -478,7 +478,6 @@ class DB(object):
|
||||
remove.append(data)
|
||||
continue
|
||||
|
||||
self.custom_column_label_map[data['label']] = data['num']
|
||||
self.custom_column_num_map[data['num']] = \
|
||||
self.custom_column_label_map[data['label']] = data
|
||||
|
||||
@ -613,10 +612,31 @@ class DB(object):
|
||||
|
||||
tables['size'] = SizeTable('size', self.field_metadata['size'].copy())
|
||||
|
||||
for label, data in self.custom_column_label_map.iteritems():
|
||||
label = '#' + label
|
||||
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
|
||||
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
|
||||
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
|
||||
'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
|
||||
'au_map':18, 'last_modified':19, 'identifiers':20}
|
||||
|
||||
for k,v in self.FIELD_MAP.iteritems():
|
||||
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
|
||||
|
||||
base = max(self.FIELD_MAP.itervalues())
|
||||
|
||||
for label_, data in self.custom_column_label_map.iteritems():
|
||||
label = self.field_metadata.custom_field_prefix + label_
|
||||
metadata = self.field_metadata[label].copy()
|
||||
link_table = self.custom_table_names(data['num'])[1]
|
||||
self.FIELD_MAP[data['num']] = base = base+1
|
||||
self.field_metadata.set_field_record_index(label_, base,
|
||||
prefer_custom=True)
|
||||
if data['datatype'] == 'series':
|
||||
# account for the series index column. Field_metadata knows that
|
||||
# the series index is one larger than the series. If you change
|
||||
# it here, be sure to change it there as well.
|
||||
self.FIELD_MAP[str(data['num'])+'_index'] = base = base+1
|
||||
self.field_metadata.set_field_record_index(label_+'_index', base,
|
||||
prefer_custom=True)
|
||||
|
||||
if data['normalized']:
|
||||
if metadata['is_multiple']:
|
||||
@ -633,7 +653,16 @@ class DB(object):
|
||||
metadata['table'] = link_table
|
||||
tables[label] = OneToOneTable(label, metadata)
|
||||
else:
|
||||
tables[label] = OneToOneTable(label, metadata)
|
||||
if data['datatype'] == 'composite':
|
||||
tables[label] = CompositeTable(label, metadata)
|
||||
else:
|
||||
tables[label] = OneToOneTable(label, metadata)
|
||||
|
||||
self.FIELD_MAP['ondevice'] = base = base+1
|
||||
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
|
||||
self.FIELD_MAP['marked'] = base = base+1
|
||||
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
|
||||
|
||||
# }}}
|
||||
|
||||
@property
|
||||
@ -732,5 +761,28 @@ class DB(object):
|
||||
pprint.pprint(table.metadata)
|
||||
raise
|
||||
|
||||
def format_abspath(self, book_id, fmt, fname, path):
|
||||
path = os.path.join(self.library_path, path)
|
||||
fmt = ('.' + fmt.lower()) if fmt else ''
|
||||
fmt_path = os.path.join(path, fname+fmt)
|
||||
if os.path.exists(fmt_path):
|
||||
return fmt_path
|
||||
try:
|
||||
candidates = glob.glob(os.path.join(path, '*'+fmt))
|
||||
except: # If path contains strange characters this throws an exc
|
||||
candidates = []
|
||||
if fmt and candidates and os.path.exists(candidates[0]):
|
||||
shutil.copyfile(candidates[0], fmt_path)
|
||||
return fmt_path
|
||||
|
||||
def format_metadata(self, book_id, fmt, fname, path):
|
||||
path = self.format_abspath(book_id, fmt, fname, path)
|
||||
ans = {}
|
||||
if path is not None:
|
||||
stat = os.stat(path)
|
||||
ans['size'] = stat.st_size
|
||||
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
|
||||
return ans
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -7,5 +7,300 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from functools import wraps
|
||||
|
||||
from calibre.db.locking import create_locks
|
||||
from calibre.db.fields import create_field
|
||||
from calibre.ebooks.book.base import Metadata
|
||||
from calibre.utils.date import now
|
||||
|
||||
def api(f):
|
||||
f.is_cache_api = True
|
||||
return f
|
||||
|
||||
def read_api(f):
|
||||
f = api(f)
|
||||
f.is_read_api = True
|
||||
return f
|
||||
|
||||
def write_api(f):
|
||||
f = api(f)
|
||||
f.is_read_api = False
|
||||
return f
|
||||
|
||||
def wrap_simple(lock, func):
|
||||
@wraps(func)
|
||||
def ans(*args, **kwargs):
|
||||
with lock:
|
||||
return func(*args, **kwargs)
|
||||
return ans
|
||||
|
||||
|
||||
class Cache(object):
|
||||
|
||||
def __init__(self, backend):
|
||||
self.backend = backend
|
||||
self.fields = {}
|
||||
self.read_lock, self.write_lock = create_locks()
|
||||
self.format_metadata_cache = defaultdict(dict)
|
||||
|
||||
# Implement locking for all simple read/write API methods
|
||||
# An unlocked version of the method is stored with the name starting
|
||||
# with a leading underscore. Use the unlocked versions when the lock
|
||||
# has already been acquired.
|
||||
for name in dir(self):
|
||||
func = getattr(self, name)
|
||||
ira = getattr(func, 'is_read_api', None)
|
||||
if ira is not None:
|
||||
# Save original function
|
||||
setattr(self, '_'+name, func)
|
||||
# Wrap it in a lock
|
||||
lock = self.read_lock if ira else self.write_lock
|
||||
setattr(self, name, wrap_simple(lock, func))
|
||||
|
||||
def _format_abspath(self, book_id, fmt):
|
||||
'''
|
||||
Return absolute path to the ebook file of format `format`
|
||||
|
||||
WARNING: This method will return a dummy path for a network backend DB,
|
||||
so do not rely on it, use format(..., as_path=True) instead.
|
||||
|
||||
Currently used only in calibredb list, the viewer and the catalogs (via
|
||||
get_data_as_dict()).
|
||||
|
||||
Apart from the viewer, I don't believe any of the others do any file
|
||||
I/O with the results of this call.
|
||||
'''
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
return None
|
||||
if name and path:
|
||||
return self.backend.format_abspath(book_id, fmt, name, path)
|
||||
|
||||
# Cache Layer API {{{
|
||||
|
||||
@api
|
||||
def init(self):
|
||||
'''
|
||||
Initialize this cache with data from the backend.
|
||||
'''
|
||||
with self.write_lock:
|
||||
self.backend.read_tables()
|
||||
|
||||
for field, table in self.backend.tables.iteritems():
|
||||
self.fields[field] = create_field(field, table)
|
||||
|
||||
self.fields['ondevice'] = create_field('ondevice', None)
|
||||
|
||||
@read_api
|
||||
def field_for(self, name, book_id, default_value=None):
|
||||
'''
|
||||
Return the value of the field ``name`` for the book identified by
|
||||
``book_id``. If no such book exists or it has no defined value for the
|
||||
field ``name`` or no such field exists, then ``default_value`` is returned.
|
||||
|
||||
The returned value for is_multiple fields are always tuples.
|
||||
'''
|
||||
try:
|
||||
return self.fields[name].for_book(book_id, default_value=default_value)
|
||||
except (KeyError, IndexError):
|
||||
return default_value
|
||||
|
||||
@read_api
|
||||
def composite_for(self, name, book_id, mi, default_value=''):
|
||||
try:
|
||||
f = self.fields[name]
|
||||
except KeyError:
|
||||
return default_value
|
||||
|
||||
f.render_composite(book_id, mi)
|
||||
|
||||
@read_api
|
||||
def field_ids_for(self, name, book_id):
|
||||
'''
|
||||
Return the ids (as a tuple) for the values that the field ``name`` has on the book
|
||||
identified by ``book_id``. If there are no values, or no such book, or
|
||||
no such field, an empty tuple is returned.
|
||||
'''
|
||||
try:
|
||||
return self.fields[name].ids_for_book(book_id)
|
||||
except (KeyError, IndexError):
|
||||
return ()
|
||||
|
||||
@read_api
|
||||
def books_for_field(self, name, item_id):
|
||||
'''
|
||||
Return all the books associated with the item identified by
|
||||
``item_id``, where the item belongs to the field ``name``.
|
||||
|
||||
Returned value is a tuple of book ids, or the empty tuple if the item
|
||||
or the field does not exist.
|
||||
'''
|
||||
try:
|
||||
return self.fields[name].books_for(item_id)
|
||||
except (KeyError, IndexError):
|
||||
return ()
|
||||
|
||||
@read_api
|
||||
def all_book_ids(self):
|
||||
'''
|
||||
Frozen set of all known book ids.
|
||||
'''
|
||||
return frozenset(self.fields['uuid'].iter_book_ids())
|
||||
|
||||
@read_api
|
||||
def all_field_ids(self, name):
|
||||
'''
|
||||
Frozen set of ids for all values in the field ``name``.
|
||||
'''
|
||||
return frozenset(iter(self.fields[name]))
|
||||
|
||||
@read_api
|
||||
def author_data(self, author_id):
|
||||
'''
|
||||
Return author data as a dictionary with keys: name, sort, link
|
||||
|
||||
If no author with the specified id is found an empty dictionary is
|
||||
returned.
|
||||
'''
|
||||
try:
|
||||
return self.fields['authors'].author_data(author_id)
|
||||
except (KeyError, IndexError):
|
||||
return {}
|
||||
|
||||
@read_api
|
||||
def format_metadata(self, book_id, fmt, allow_cache=True):
|
||||
if not fmt:
|
||||
return {}
|
||||
fmt = fmt.upper()
|
||||
if allow_cache:
|
||||
x = self.format_metadata_cache[book_id].get(fmt, None)
|
||||
if x is not None:
|
||||
return x
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
return {}
|
||||
|
||||
ans = {}
|
||||
if path and name:
|
||||
ans = self.backend.format_metadata(book_id, fmt, name, path)
|
||||
self.format_metadata_cache[book_id][fmt] = ans
|
||||
return ans
|
||||
|
||||
@read_api
|
||||
def get_metadata(self, book_id, get_cover=False,
|
||||
get_user_categories=True, cover_as_data=False):
|
||||
'''
|
||||
Convenience method to return metadata as a :class:`Metadata` object.
|
||||
Note that the list of formats is not verified.
|
||||
'''
|
||||
mi = Metadata(None)
|
||||
|
||||
author_ids = self._field_ids_for('authors', book_id)
|
||||
aut_list = [self._author_data(i) for i in author_ids]
|
||||
aum = []
|
||||
aus = {}
|
||||
aul = {}
|
||||
for rec in aut_list:
|
||||
aut = rec['name']
|
||||
aum.append(aut)
|
||||
aus[aut] = rec['sort']
|
||||
aul[aut] = rec['link']
|
||||
mi.title = self._field_for('title', book_id,
|
||||
default_value=_('Unknown'))
|
||||
mi.authors = aum
|
||||
mi.author_sort = self._field_for('author_sort', book_id,
|
||||
default_value=_('Unknown'))
|
||||
mi.author_sort_map = aus
|
||||
mi.author_link_map = aul
|
||||
mi.comments = self._field_for('comments', book_id)
|
||||
mi.publisher = self._field_for('publisher', book_id)
|
||||
n = now()
|
||||
mi.timestamp = self._field_for('timestamp', book_id, default_value=n)
|
||||
mi.pubdate = self._field_for('pubdate', book_id, default_value=n)
|
||||
mi.uuid = self._field_for('uuid', book_id,
|
||||
default_value='dummy')
|
||||
mi.title_sort = self._field_for('sort', book_id,
|
||||
default_value=_('Unknown'))
|
||||
mi.book_size = self._field_for('size', book_id, default_value=0)
|
||||
mi.ondevice_col = self._field_for('ondevice', book_id, default_value='')
|
||||
mi.last_modified = self._field_for('last_modified', book_id,
|
||||
default_value=n)
|
||||
formats = self._field_for('formats', book_id)
|
||||
mi.format_metadata = {}
|
||||
if not formats:
|
||||
formats = None
|
||||
else:
|
||||
for f in formats:
|
||||
mi.format_metadata[f] = self._format_metadata(book_id, f)
|
||||
formats = ','.join(formats)
|
||||
mi.formats = formats
|
||||
mi.has_cover = _('Yes') if self._field_for('cover', book_id,
|
||||
default_value=False) else ''
|
||||
mi.tags = list(self._field_for('tags', book_id, default_value=()))
|
||||
mi.series = self._field_for('series', book_id)
|
||||
if mi.series:
|
||||
mi.series_index = self._field_for('series_index', book_id,
|
||||
default_value=1.0)
|
||||
mi.rating = self._field_for('rating', book_id)
|
||||
mi.set_identifiers(self._field_for('identifiers', book_id,
|
||||
default_value={}))
|
||||
mi.application_id = book_id
|
||||
mi.id = book_id
|
||||
composites = {}
|
||||
for key, meta in self.field_metadata.custom_iteritems():
|
||||
mi.set_user_metadata(key, meta)
|
||||
if meta['datatype'] == 'composite':
|
||||
composites.append(key)
|
||||
else:
|
||||
mi.set(key, val=self._field_for(meta['label'], book_id),
|
||||
extra=self._field_for(meta['label']+'_index', book_id))
|
||||
for c in composites:
|
||||
mi.set(key, val=self._composite_for(key, book_id, mi))
|
||||
|
||||
user_cat_vals = {}
|
||||
if get_user_categories:
|
||||
user_cats = self.prefs['user_categories']
|
||||
for ucat in user_cats:
|
||||
res = []
|
||||
for name,cat,ign in user_cats[ucat]:
|
||||
v = mi.get(cat, None)
|
||||
if isinstance(v, list):
|
||||
if name in v:
|
||||
res.append([name,cat])
|
||||
elif name == v:
|
||||
res.append([name,cat])
|
||||
user_cat_vals[ucat] = res
|
||||
mi.user_categories = user_cat_vals
|
||||
|
||||
if get_cover:
|
||||
if cover_as_data:
|
||||
cdata = self.cover(id, index_is_id=True)
|
||||
if cdata:
|
||||
mi.cover_data = ('jpeg', cdata)
|
||||
else:
|
||||
mi.cover = self.cover(id, index_is_id=True, as_path=True)
|
||||
return mi
|
||||
|
||||
# }}}
|
||||
|
||||
# Testing {{{
|
||||
|
||||
def test(library_path):
|
||||
from calibre.db.backend import DB
|
||||
backend = DB(library_path)
|
||||
cache = Cache(backend)
|
||||
cache.init()
|
||||
print ('All book ids:', cache.all_book_ids())
|
||||
|
||||
if __name__ == '__main__':
|
||||
from calibre.utils.config import prefs
|
||||
test(prefs['library_path'])
|
||||
|
||||
# }}}
|
||||
|
193
src/calibre/db/fields.py
Normal file
193
src/calibre/db/fields.py
Normal file
@ -0,0 +1,193 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY
|
||||
|
||||
class Field(object):
|
||||
|
||||
def __init__(self, name, table):
|
||||
self.name, self.table = name, table
|
||||
self.has_text_data = self.metadata['datatype'] in ('text', 'comments',
|
||||
'series', 'enumeration')
|
||||
self.table_type = self.table.table_type
|
||||
|
||||
@property
|
||||
def metadata(self):
|
||||
return self.table.metadata
|
||||
|
||||
def for_book(self, book_id, default_value=None):
|
||||
'''
|
||||
Return the value of this field for the book identified by book_id.
|
||||
When no value is found, returns ``default_value``.
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def ids_for_book(self, book_id):
|
||||
'''
|
||||
Return a tuple of items ids for items associated with the book
|
||||
identified by book_ids. Returns an empty tuple if no such items are
|
||||
found.
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def books_for(self, item_id):
|
||||
'''
|
||||
Return the ids of all books associated with the item identified by
|
||||
item_id as a tuple. An empty tuple is returned if no books are found.
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def __iter__(self):
|
||||
'''
|
||||
Iterate over the ids for all values in this field
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
class OneToOneField(Field):
|
||||
|
||||
def for_book(self, book_id, default_value=None):
|
||||
return self.table.book_col_map.get(book_id, default_value)
|
||||
|
||||
def ids_for_book(self, book_id):
|
||||
return (book_id,)
|
||||
|
||||
def books_for(self, item_id):
|
||||
return (item_id,)
|
||||
|
||||
def __iter__(self):
|
||||
return self.table.book_col_map.iterkeys()
|
||||
|
||||
def iter_book_ids(self):
|
||||
return self.table.book_col_map.iterkeys()
|
||||
|
||||
class CompositeField(OneToOneField):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
OneToOneField.__init__(self, *args, **kwargs)
|
||||
|
||||
self._render_cache = {}
|
||||
|
||||
def render_composite(self, book_id, mi):
|
||||
ans = self._render_cache.get(book_id, None)
|
||||
if ans is None:
|
||||
ans = mi.get(self.metadata['label'])
|
||||
self._render_cache[book_id] = ans
|
||||
return ans
|
||||
|
||||
def clear_cache(self):
|
||||
self._render_cache = {}
|
||||
|
||||
def pop_cache(self, book_id):
|
||||
self._render_cache.pop(book_id, None)
|
||||
|
||||
class OnDeviceField(OneToOneField):
|
||||
|
||||
def __init__(self, name, table):
|
||||
self.name = name
|
||||
self.book_on_device_func = None
|
||||
|
||||
def book_on_device(self, book_id):
|
||||
if callable(self.book_on_device_func):
|
||||
return self.book_on_device_func(book_id)
|
||||
return None
|
||||
|
||||
def set_book_on_device_func(self, func):
|
||||
self.book_on_device_func = func
|
||||
|
||||
def for_book(self, book_id, default_value=None):
|
||||
loc = []
|
||||
count = 0
|
||||
on = self.book_on_device(book_id)
|
||||
if on is not None:
|
||||
m, a, b, count = on[:4]
|
||||
if m is not None:
|
||||
loc.append(_('Main'))
|
||||
if a is not None:
|
||||
loc.append(_('Card A'))
|
||||
if b is not None:
|
||||
loc.append(_('Card B'))
|
||||
return ', '.join(loc) + ((' (%s books)'%count) if count > 1 else '')
|
||||
|
||||
def __iter__(self):
|
||||
return iter(())
|
||||
|
||||
def iter_book_ids(self):
|
||||
return iter(())
|
||||
|
||||
class ManyToOneField(Field):
|
||||
|
||||
def for_book(self, book_id, default_value=None):
|
||||
ids = self.table.book_col_map.get(book_id, None)
|
||||
if ids is not None:
|
||||
ans = self.id_map[ids]
|
||||
else:
|
||||
ans = default_value
|
||||
return ans
|
||||
|
||||
def ids_for_book(self, book_id):
|
||||
ids = self.table.book_col_map.get(book_id, None)
|
||||
if ids is None:
|
||||
return ()
|
||||
return ids
|
||||
|
||||
def books_for(self, item_id):
|
||||
return self.table.col_book_map.get(item_id, ())
|
||||
|
||||
def __iter__(self):
|
||||
return self.table.id_map.iterkeys()
|
||||
|
||||
class ManyToManyField(Field):
|
||||
|
||||
def for_book(self, book_id, default_value=None):
|
||||
ids = self.table.book_col_map.get(book_id, ())
|
||||
if ids:
|
||||
ans = tuple(self.id_map[i] for i in ids)
|
||||
else:
|
||||
ans = default_value
|
||||
return ans
|
||||
|
||||
def ids_for_book(self, book_id):
|
||||
return self.table.book_col_map.get(book_id, ())
|
||||
|
||||
def books_for(self, item_id):
|
||||
return self.table.col_book_map.get(item_id, ())
|
||||
|
||||
def __iter__(self):
|
||||
return self.table.id_map.iterkeys()
|
||||
|
||||
class AuthorsField(ManyToManyField):
|
||||
|
||||
def author_data(self, author_id):
|
||||
return {
|
||||
'name' : self.table.id_map[author_id],
|
||||
'sort' : self.table.asort_map[author_id],
|
||||
'link' : self.table.alink_map[author_id],
|
||||
}
|
||||
|
||||
class FormatsField(ManyToManyField):
|
||||
|
||||
def format_fname(self, book_id, fmt):
|
||||
return self.table.fname_map[book_id][fmt.upper()]
|
||||
|
||||
def create_field(name, table):
|
||||
cls = {
|
||||
ONE_ONE : OneToOneField,
|
||||
MANY_ONE : ManyToOneField,
|
||||
MANY_MANY : ManyToManyField,
|
||||
}[table.table_type]
|
||||
if name == 'authors':
|
||||
cls = AuthorsField
|
||||
elif name == 'ondevice':
|
||||
cls = OnDeviceField
|
||||
elif name == 'formats':
|
||||
cls = FormatsField
|
||||
elif table.metadata['datatype'] == 'composite':
|
||||
cls = CompositeField
|
||||
return cls(name, table)
|
||||
|
@ -211,6 +211,28 @@ if __name__ == '__main__':
|
||||
class TestSHLock(unittest.TestCase):
|
||||
"""Testcases for SHLock class."""
|
||||
|
||||
def test_multithread_deadlock(self):
|
||||
lock = SHLock()
|
||||
def two_shared():
|
||||
r = RWLockWrapper(lock)
|
||||
with r:
|
||||
time.sleep(0.2)
|
||||
with r:
|
||||
pass
|
||||
def one_exclusive():
|
||||
time.sleep(0.1)
|
||||
w = RWLockWrapper(lock, is_shared=False)
|
||||
with w:
|
||||
pass
|
||||
threads = [Thread(target=two_shared), Thread(target=one_exclusive)]
|
||||
for t in threads:
|
||||
t.daemon = True
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join(5)
|
||||
live = [t for t in threads if t.is_alive()]
|
||||
self.assertListEqual(live, [], 'ShLock hung')
|
||||
|
||||
def test_upgrade(self):
|
||||
lock = SHLock()
|
||||
lock.acquire(shared=True)
|
||||
|
@ -17,6 +17,8 @@ from calibre.ebooks.metadata import author_to_author_sort
|
||||
|
||||
_c_speedup = plugins['speedup'][0]
|
||||
|
||||
ONE_ONE, MANY_ONE, MANY_MANY = xrange(3)
|
||||
|
||||
def _c_convert_timestamp(val):
|
||||
if not val:
|
||||
return None
|
||||
@ -57,6 +59,8 @@ class OneToOneTable(Table):
|
||||
timestamp, size, etc.
|
||||
'''
|
||||
|
||||
table_type = ONE_ONE
|
||||
|
||||
def read(self, db):
|
||||
self.book_col_map = {}
|
||||
idcol = 'id' if self.metadata['table'] == 'books' else 'book'
|
||||
@ -73,6 +77,17 @@ class SizeTable(OneToOneTable):
|
||||
'WHERE data.book=books.id) FROM books'):
|
||||
self.book_col_map[row[0]] = self.unserialize(row[1])
|
||||
|
||||
class CompositeTable(OneToOneTable):
|
||||
|
||||
def read(self, db):
|
||||
self.book_col_map = {}
|
||||
d = self.metadata['display']
|
||||
self.composite_template = ['composite_template']
|
||||
self.contains_html = d['contains_html']
|
||||
self.make_category = d['make_category']
|
||||
self.composite_sort = d['composite_sort']
|
||||
self.use_decorations = d['use_decorations']
|
||||
|
||||
class ManyToOneTable(Table):
|
||||
|
||||
'''
|
||||
@ -82,9 +97,10 @@ class ManyToOneTable(Table):
|
||||
Each book however has only one value for data of this type.
|
||||
'''
|
||||
|
||||
table_type = MANY_ONE
|
||||
|
||||
def read(self, db):
|
||||
self.id_map = {}
|
||||
self.extra_map = {}
|
||||
self.col_book_map = {}
|
||||
self.book_col_map = {}
|
||||
self.read_id_maps(db)
|
||||
@ -105,6 +121,9 @@ class ManyToOneTable(Table):
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
self.book_col_map[row[0]] = row[1]
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
class ManyToManyTable(ManyToOneTable):
|
||||
|
||||
'''
|
||||
@ -113,6 +132,8 @@ class ManyToManyTable(ManyToOneTable):
|
||||
book. For example: tags or authors.
|
||||
'''
|
||||
|
||||
table_type = MANY_MANY
|
||||
|
||||
def read_maps(self, db):
|
||||
for row in db.conn.execute(
|
||||
'SELECT book, {0} FROM {1}'.format(
|
||||
@ -124,14 +145,21 @@ class ManyToManyTable(ManyToOneTable):
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append(row[1])
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
for key in tuple(self.book_col_map.iterkeys()):
|
||||
self.book_col_map[key] = tuple(self.book_col_map[key])
|
||||
|
||||
class AuthorsTable(ManyToManyTable):
|
||||
|
||||
def read_id_maps(self, db):
|
||||
self.alink_map = {}
|
||||
self.asort_map = {}
|
||||
for row in db.conn.execute(
|
||||
'SELECT id, name, sort, link FROM authors'):
|
||||
self.id_map[row[0]] = row[1]
|
||||
self.extra_map[row[0]] = (row[2] if row[2] else
|
||||
self.asort_map[row[0]] = (row[2] if row[2] else
|
||||
author_to_author_sort(row[1]))
|
||||
self.alink_map[row[0]] = row[3]
|
||||
|
||||
@ -141,14 +169,25 @@ class FormatsTable(ManyToManyTable):
|
||||
pass
|
||||
|
||||
def read_maps(self, db):
|
||||
self.fname_map = {}
|
||||
for row in db.conn.execute('SELECT book, format, name FROM data'):
|
||||
if row[1] is not None:
|
||||
if row[1] not in self.col_book_map:
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
fmt = row[1].upper()
|
||||
if fmt not in self.col_book_map:
|
||||
self.col_book_map[fmt] = []
|
||||
self.col_book_map[fmt].append(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append((row[1], row[2]))
|
||||
self.book_col_map[row[0]].append(fmt)
|
||||
if row[0] not in self.fname_map:
|
||||
self.fname_map[row[0]] = {}
|
||||
self.fname_map[row[0]][fmt] = row[2]
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
for key in tuple(self.book_col_map.iterkeys()):
|
||||
self.book_col_map[key] = tuple(self.book_col_map[key])
|
||||
|
||||
class IdentifiersTable(ManyToManyTable):
|
||||
|
||||
@ -162,6 +201,9 @@ class IdentifiersTable(ManyToManyTable):
|
||||
self.col_book_map[row[1]] = []
|
||||
self.col_book_map[row[1]].append(row[0])
|
||||
if row[0] not in self.book_col_map:
|
||||
self.book_col_map[row[0]] = []
|
||||
self.book_col_map[row[0]].append((row[1], row[2]))
|
||||
self.book_col_map[row[0]] = {}
|
||||
self.book_col_map[row[0]][row[1]] = row[2]
|
||||
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
|
83
src/calibre/db/view.py
Normal file
83
src/calibre/db/view.py
Normal file
@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from functools import partial
|
||||
|
||||
class View(object):
|
||||
|
||||
def __init__(self, cache):
|
||||
self.cache = cache
|
||||
self._field_getters = {}
|
||||
for col, idx in cache.backend.FIELD_MAP.iteritems():
|
||||
if isinstance(col, int):
|
||||
label = self.cache.backend.custom_column_num_map[col]['label']
|
||||
label = (self.cache.backend.field_metadata.custom_field_prefix
|
||||
+ label)
|
||||
self._field_getters[idx] = partial(self.get, label)
|
||||
else:
|
||||
try:
|
||||
self._field_getters[idx] = {
|
||||
'id' : self._get_id,
|
||||
'au_map' : self.get_author_data,
|
||||
'ondevice': self.get_ondevice,
|
||||
'marked' : self.get_is_marked,
|
||||
}[col]
|
||||
except KeyError:
|
||||
self._field_getters[idx] = partial(self.get, col)
|
||||
|
||||
self._map = list(self.cache.all_book_ids())
|
||||
self._map_filtered = list(self._map)
|
||||
|
||||
def _get_id(self, idx, index_is_id=True):
|
||||
ans = idx if index_is_id else self.index_to_id(idx)
|
||||
return ans
|
||||
|
||||
def get_field_map_field(self, row, col, index_is_id=True):
|
||||
'''
|
||||
Supports the legacy FIELD_MAP interface for getting metadata. Do not use
|
||||
in new code.
|
||||
'''
|
||||
getter = self._field_getters[col]
|
||||
return getter(row, index_is_id=index_is_id)
|
||||
|
||||
def index_to_id(self, idx):
|
||||
pass
|
||||
|
||||
def get(self, field, idx, index_is_id=True, default_value=None):
|
||||
id_ = idx if index_is_id else self.index_to_id(idx)
|
||||
return self.cache.field_for(field, id_)
|
||||
|
||||
def get_ondevice(self, idx, index_is_id=True, default_value=False):
|
||||
pass
|
||||
|
||||
def get_is_marked(self, idx, index_is_id=True, default_value=False):
|
||||
pass
|
||||
|
||||
def get_author_data(self, idx, index_is_id=True, default_value=()):
|
||||
'''
|
||||
Return author data for all authors of the book identified by idx as a
|
||||
tuple of dictionaries. The dictionaries should never be empty, unless
|
||||
there is a bug somewhere. The list could be empty if idx point to an
|
||||
non existent book, or book with no authors (though again a book with no
|
||||
authors should never happen).
|
||||
|
||||
Each dictionary has the keys: name, sort, link. Link can be an empty
|
||||
string.
|
||||
|
||||
default_value is ignored, this method always returns a tuple
|
||||
'''
|
||||
id_ = idx if index_is_id else self.index_to_id(idx)
|
||||
with self.cache.read_lock:
|
||||
ids = self.cache._field_ids_for('authors', id_)
|
||||
ans = []
|
||||
for id_ in ids:
|
||||
ans.append(self.cache._author_data(id_))
|
||||
return tuple(ans)
|
||||
|
||||
|
@ -39,7 +39,7 @@ class ANDROID(USBMS):
|
||||
0x22b8 : { 0x41d9 : [0x216], 0x2d61 : [0x100], 0x2d67 : [0x100],
|
||||
0x41db : [0x216], 0x4285 : [0x216], 0x42a3 : [0x216],
|
||||
0x4286 : [0x216], 0x42b3 : [0x216], 0x42b4 : [0x216],
|
||||
0x7086 : [0x0226], 0x70a8: [0x9999],
|
||||
0x7086 : [0x0226], 0x70a8: [0x9999], 0x42c4 : [0x216],
|
||||
},
|
||||
|
||||
# Sony Ericsson
|
||||
@ -72,7 +72,8 @@ class ANDROID(USBMS):
|
||||
0x413c : { 0xb007 : [0x0100, 0x0224, 0x0226]},
|
||||
|
||||
# LG
|
||||
0x1004 : { 0x61cc : [0x100], 0x61ce : [0x100], 0x618e : [0x226] },
|
||||
0x1004 : { 0x61cc : [0x100], 0x61ce : [0x100], 0x618e : [0x226,
|
||||
0x9999] },
|
||||
|
||||
# Archos
|
||||
0x0e79 : {
|
||||
@ -123,11 +124,11 @@ class ANDROID(USBMS):
|
||||
'IDEOS_TABLET', 'MYTOUCH_4G', 'UMS_COMPOSITE', 'SCH-I800_CARD',
|
||||
'7', 'A956', 'A955', 'A43', 'ANDROID_PLATFORM', 'TEGRA_2',
|
||||
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE', 'A7EB', 'STREAK',
|
||||
'MB525', 'ANDROID2.3']
|
||||
'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612']
|
||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||
'__UMS_COMPOSITE']
|
||||
'__UMS_COMPOSITE', 'SGH-I997_CARD']
|
||||
|
||||
OSX_MAIN_MEM = 'Android Device Main Memory'
|
||||
|
||||
|
@ -107,6 +107,7 @@ class DriverBase(DeviceConfig, DevicePlugin):
|
||||
FORMATS = ['epub', 'pdf']
|
||||
USER_CAN_ADD_NEW_FORMATS = False
|
||||
KEEP_TEMP_FILES_AFTER_UPLOAD = True
|
||||
CAN_DO_DEVICE_DB_PLUGBOARD = True
|
||||
|
||||
# Hide the standard customization widgets
|
||||
SUPPORTS_SUB_DIRS = False
|
||||
@ -445,7 +446,8 @@ class ITUNES(DriverBase):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=book_count))
|
||||
self._purge_orphans(library_books, cached_books)
|
||||
|
||||
elif iswindows:
|
||||
@ -484,7 +486,8 @@ class ITUNES(DriverBase):
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%d of %d') % (i+1, book_count))
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1,
|
||||
tot=book_count))
|
||||
self._purge_orphans(library_books, cached_books)
|
||||
|
||||
finally:
|
||||
@ -1074,7 +1077,8 @@ class ITUNES(DriverBase):
|
||||
|
||||
# Report progress
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||
self.report_progress((i+1)/file_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=file_count))
|
||||
|
||||
elif iswindows:
|
||||
try:
|
||||
@ -1117,7 +1121,8 @@ class ITUNES(DriverBase):
|
||||
|
||||
# Report progress
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/file_count, _('%d of %d') % (i+1, file_count))
|
||||
self.report_progress((i+1)/file_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=file_count))
|
||||
finally:
|
||||
pythoncom.CoUninitialize()
|
||||
|
||||
@ -3106,7 +3111,8 @@ class ITUNES_ASYNC(ITUNES):
|
||||
}
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count, _('%d of %d') % (i+1, book_count))
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1, tot=book_count))
|
||||
|
||||
elif iswindows:
|
||||
try:
|
||||
@ -3146,7 +3152,8 @@ class ITUNES_ASYNC(ITUNES):
|
||||
|
||||
if self.report_progress is not None:
|
||||
self.report_progress((i+1)/book_count,
|
||||
_('%d of %d') % (i+1, book_count))
|
||||
_('%(num)d of %(tot)d') % dict(num=i+1,
|
||||
tot=book_count))
|
||||
|
||||
finally:
|
||||
pythoncom.CoUninitialize()
|
||||
|
@ -49,6 +49,9 @@ class DevicePlugin(Plugin):
|
||||
#: Whether the metadata on books can be set via the GUI.
|
||||
CAN_SET_METADATA = ['title', 'authors', 'collections']
|
||||
|
||||
#: Whether the device can handle device_db metadata plugboards
|
||||
CAN_DO_DEVICE_DB_PLUGBOARD = False
|
||||
|
||||
# Set this to None if the books on the device are files that the GUI can
|
||||
# access in order to add the books from the device to the library
|
||||
BACKLOADING_ERROR_MESSAGE = _('Cannot get files from this device')
|
||||
|
@ -57,6 +57,7 @@ class KOBO(USBMS):
|
||||
def initialize(self):
|
||||
USBMS.initialize(self)
|
||||
self.book_class = Book
|
||||
self.dbversion = 7
|
||||
|
||||
def books(self, oncard=None, end_session=True):
|
||||
from calibre.ebooks.metadata.meta import path_to_ext
|
||||
@ -100,7 +101,7 @@ class KOBO(USBMS):
|
||||
for idx,b in enumerate(bl):
|
||||
bl_cache[b.lpath] = idx
|
||||
|
||||
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex):
|
||||
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility):
|
||||
changed = False
|
||||
try:
|
||||
lpath = path.partition(self.normalize_path(prefix))[2]
|
||||
@ -129,6 +130,10 @@ class KOBO(USBMS):
|
||||
if favouritesindex == 1:
|
||||
playlist_map[lpath].append('Shortlist')
|
||||
|
||||
# Label Previews
|
||||
if accessibility == 6:
|
||||
playlist_map[lpath].append('Preview')
|
||||
|
||||
path = self.normalize_path(path)
|
||||
# print "Normalized FileName: " + path
|
||||
|
||||
@ -204,23 +209,33 @@ class KOBO(USBMS):
|
||||
self.dbversion = result[0]
|
||||
|
||||
debug_print("Database Version: ", self.dbversion)
|
||||
if self.dbversion >= 14:
|
||||
if self.dbversion >= 16:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex from content where BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility from content where ' \
|
||||
'BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
elif self.dbversion < 16 and self.dbversion >= 14:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, "-1" as Accessibility from content where ' \
|
||||
'BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
elif self.dbversion < 14 and self.dbversion >= 8:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex from content where BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility from content where ' \
|
||||
'BookID is Null and ( ___ExpirationStatus <> "3" or ___ExpirationStatus is Null)'
|
||||
else:
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex from content where BookID is Null'
|
||||
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility from content where BookID is Null'
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except Exception as e:
|
||||
if '___ExpirationStatus' not in str(e):
|
||||
err = str(e)
|
||||
if not ('___ExpirationStatus' in err or 'FavouritesIndex' in err or
|
||||
'Accessibility' in err):
|
||||
raise
|
||||
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
|
||||
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex from content where BookID is Null'
|
||||
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
|
||||
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as '
|
||||
'FavouritesIndex, "-1" as Accessibility from content where '
|
||||
'BookID is Null')
|
||||
cursor.execute(query)
|
||||
|
||||
changed = False
|
||||
@ -234,10 +249,10 @@ class KOBO(USBMS):
|
||||
# debug_print("mime:", mime)
|
||||
|
||||
if oncard != 'carda' and oncard != 'cardb' and not row[3].startswith("file:///mnt/sd/"):
|
||||
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9])
|
||||
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
|
||||
# print "shortbook: " + path
|
||||
elif oncard == 'carda' and row[3].startswith("file:///mnt/sd/"):
|
||||
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9])
|
||||
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
|
||||
|
||||
if changed:
|
||||
need_sync = True
|
||||
@ -305,8 +320,15 @@ class KOBO(USBMS):
|
||||
# Kobo does not delete the Book row (ie the row where the BookID is Null)
|
||||
# The next server sync should remove the row
|
||||
cursor.execute('delete from content where BookID = ?', t)
|
||||
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 ' \
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 ' \
|
||||
'where BookID is Null and ContentID =?',t)
|
||||
except Exception as e:
|
||||
if 'no such column' not in str(e):
|
||||
raise
|
||||
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0 ' \
|
||||
'where BookID is Null and ContentID =?',t)
|
||||
|
||||
|
||||
connection.commit()
|
||||
|
||||
@ -543,7 +565,92 @@ class KOBO(USBMS):
|
||||
paths[source_id] = os.path.join(prefix, *(path.split('/')))
|
||||
return paths
|
||||
|
||||
def reset_readstatus(self, connection, oncard):
|
||||
cursor = connection.cursor()
|
||||
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print(' Database Exception: Unable to reset ReadStatus list')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Reset ReadStatus list')
|
||||
|
||||
cursor.close()
|
||||
|
||||
def set_readstatus(self, connection, ContentID, ReadStatus):
|
||||
cursor = connection.cursor()
|
||||
t = (ContentID,)
|
||||
cursor.execute('select DateLastRead from Content where BookID is Null and ContentID = ?', t)
|
||||
result = cursor.fetchone()
|
||||
if result is None:
|
||||
datelastread = '1970-01-01T00:00:00'
|
||||
else:
|
||||
datelastread = result[0] if result[0] is not None else '1970-01-01T00:00:00'
|
||||
|
||||
t = (ReadStatus,datelastread,ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=?,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print(' Database Exception: Unable update ReadStatus')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Setting ReadStatus List')
|
||||
cursor.close()
|
||||
|
||||
def reset_favouritesindex(self, connection, oncard):
|
||||
# Reset FavouritesIndex list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
cursor = connection.cursor()
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except Exception as e:
|
||||
debug_print(' Database Exception: Unable to reset Shortlist list')
|
||||
if 'no such column' not in str(e):
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Reset FavouritesIndex list')
|
||||
|
||||
def set_favouritesindex(self, connection, ContentID):
|
||||
cursor = connection.cursor()
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t)
|
||||
except Exception as e:
|
||||
debug_print(' Database Exception: Unable set book as Shortlist')
|
||||
if 'no such column' not in str(e):
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
debug_print(' Commit: Set FavouritesIndex')
|
||||
|
||||
def update_device_database_collections(self, booklists, collections_attributes, oncard):
|
||||
# Define lists for the ReadStatus
|
||||
readstatuslist = {
|
||||
"Im_Reading":1,
|
||||
"Read":2,
|
||||
"Closed":3,
|
||||
}
|
||||
|
||||
accessibilitylist = {
|
||||
"Preview":6,
|
||||
}
|
||||
# debug_print('Starting update_device_database_collections', collections_attributes)
|
||||
|
||||
# Force collections_attributes to be 'tags' as no other is currently supported
|
||||
@ -562,188 +669,44 @@ class KOBO(USBMS):
|
||||
# return bytestrings if the content cannot the decoded as unicode
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
|
||||
if collections:
|
||||
|
||||
# Need to reset the collections outside the particular loops
|
||||
# otherwise the last item will not be removed
|
||||
self.reset_readstatus(connection, oncard)
|
||||
if self.dbversion >= 14:
|
||||
self.reset_favouritesindex(connection, oncard)
|
||||
|
||||
# Process any collections that exist
|
||||
for category, books in collections.items():
|
||||
# debug_print (category)
|
||||
if category == 'Im_Reading':
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 1 and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 1 and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Im_Reading list')
|
||||
connection.commit()
|
||||
|
||||
debug_print("Category: ", category, " id = ", readstatuslist.get(category))
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Im_Reading' not in book.device_collections:
|
||||
book.device_collections.append('Im_Reading')
|
||||
debug_print(' Title:', book.title, 'category: ', category)
|
||||
if category not in book.device_collections:
|
||||
book.device_collections.append(category)
|
||||
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
|
||||
t = (ContentID,)
|
||||
cursor.execute('select DateLastRead from Content where BookID is Null and ContentID = ?', t)
|
||||
result = cursor.fetchone()
|
||||
if result is None:
|
||||
datelastread = '1970-01-01T00:00:00'
|
||||
else:
|
||||
datelastread = result[0] if result[0] is not None else '1970-01-01T00:00:00'
|
||||
|
||||
t = (datelastread,ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=1,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable create Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit create Im_Reading list')
|
||||
if category == 'Read':
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 2 and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 2 and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Im_Reading list')
|
||||
connection.commit()
|
||||
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Read' not in book.device_collections:
|
||||
book.device_collections.append('Read')
|
||||
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
# datelastread = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=2,FirstTimeReading=\'true\' where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Finished')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set ReadStatus as Finished')
|
||||
if category == 'Closed':
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 3 and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 3 and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Closed list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Closed list')
|
||||
connection.commit()
|
||||
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Closed' not in book.device_collections:
|
||||
book.device_collections.append('Closed')
|
||||
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
# datelastread = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=3,FirstTimeReading=\'true\' where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Closed')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set ReadStatus as Closed')
|
||||
if category == 'Shortlist':
|
||||
# Reset FavouritesIndex list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Shortlist list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Shortlist list')
|
||||
connection.commit()
|
||||
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
if 'Shortlist' not in book.device_collections:
|
||||
book.device_collections.append('Shortlist')
|
||||
# debug_print ("Shortlist found for: ", book.title)
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
# datelastread = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Shortlist')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set Shortlist as Shortlist')
|
||||
|
||||
if category in readstatuslist.keys():
|
||||
# Manage ReadStatus
|
||||
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
|
||||
if category == 'Shortlist' and self.dbversion >= 14:
|
||||
# Manage FavouritesIndex/Shortlist
|
||||
self.set_favouritesindex(connection, ContentID)
|
||||
if category in accessibilitylist.keys():
|
||||
# Do not manage the Accessibility List
|
||||
pass
|
||||
else: # No collections
|
||||
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
|
||||
print "Reseting ReadStatus to 0"
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
|
||||
debug_print("No Collections - reseting ReadStatus")
|
||||
self.reset_readstatus(connection, oncard)
|
||||
if self.dbversion >= 14:
|
||||
debug_print("No Collections - reseting FavouritesIndex")
|
||||
self.reset_favouritesindex(connection, oncard)
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Im_Reading list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Im_Reading list')
|
||||
connection.commit()
|
||||
|
||||
cursor.close()
|
||||
connection.close()
|
||||
|
||||
# debug_print('Finished update_device_database_collections', collections_attributes)
|
||||
|
@ -28,6 +28,7 @@ class PRS505(USBMS):
|
||||
|
||||
FORMATS = ['epub', 'lrf', 'lrx', 'rtf', 'pdf', 'txt']
|
||||
CAN_SET_METADATA = ['title', 'authors', 'collections']
|
||||
CAN_DO_DEVICE_DB_PLUGBOARD = True
|
||||
|
||||
VENDOR_ID = [0x054c] #: SONY Vendor Id
|
||||
PRODUCT_ID = [0x031e]
|
||||
@ -66,10 +67,10 @@ class PRS505(USBMS):
|
||||
_('Comma separated list of metadata fields '
|
||||
'to turn into collections on the device. Possibilities include: ')+\
|
||||
'series, tags, authors' +\
|
||||
_('. Two special collections are available: %s:%s and %s:%s. Add '
|
||||
_('. Two special collections are available: %(abt)s:%(abtv)s and %(aba)s:%(abav)s. Add '
|
||||
'these values to the list to enable them. The collections will be '
|
||||
'given the name provided after the ":" character.')%(
|
||||
'abt', ALL_BY_TITLE, 'aba', ALL_BY_AUTHOR),
|
||||
'given the name provided after the ":" character.')%dict(
|
||||
abt='abt', abtv=ALL_BY_TITLE, aba='aba', abav=ALL_BY_AUTHOR),
|
||||
_('Upload separate cover thumbnails for books (newer readers)') +
|
||||
':::'+_('Normally, the SONY readers get the cover image from the'
|
||||
' ebook file itself. With this option, calibre will send a '
|
||||
|
@ -1077,8 +1077,13 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
settings = self.settings()
|
||||
template = self.save_template()
|
||||
if mdata.tags and _('News') in mdata.tags:
|
||||
today = time.localtime()
|
||||
template = "{title}_%d-%d-%d" % (today[0], today[1], today[2])
|
||||
try:
|
||||
p = mdata.pubdate
|
||||
date = (p.year, p.month, p.day)
|
||||
except:
|
||||
today = time.localtime()
|
||||
date = (today[0], today[1], today[2])
|
||||
template = "{title}_%d-%d-%d" % date
|
||||
use_subdirs = self.SUPPORTS_SUB_DIRS and settings.use_subdirs
|
||||
|
||||
fname = sanitize(fname)
|
||||
|
@ -159,7 +159,7 @@ def normalize(x):
|
||||
return x
|
||||
|
||||
def calibre_cover(title, author_string, series_string=None,
|
||||
output_format='jpg', title_size=46, author_size=36):
|
||||
output_format='jpg', title_size=46, author_size=36, logo_path=None):
|
||||
title = normalize(title)
|
||||
author_string = normalize(author_string)
|
||||
series_string = normalize(series_string)
|
||||
@ -167,7 +167,9 @@ def calibre_cover(title, author_string, series_string=None,
|
||||
lines = [TextLine(title, title_size), TextLine(author_string, author_size)]
|
||||
if series_string:
|
||||
lines.append(TextLine(series_string, author_size))
|
||||
return create_cover_page(lines, I('library.png'), output_format='jpg')
|
||||
if logo_path is None:
|
||||
logo_path = I('library.png')
|
||||
return create_cover_page(lines, logo_path, output_format='jpg')
|
||||
|
||||
UNIT_RE = re.compile(r'^(-*[0-9]*[.]?[0-9]*)\s*(%|em|ex|en|px|mm|cm|in|pt|pc)$')
|
||||
|
||||
|
@ -38,8 +38,12 @@ ENCODING_PATS = [
|
||||
ENTITY_PATTERN = re.compile(r'&(\S+?);')
|
||||
|
||||
def strip_encoding_declarations(raw):
|
||||
limit = 50*1024
|
||||
for pat in ENCODING_PATS:
|
||||
raw = pat.sub('', raw)
|
||||
prefix = raw[:limit]
|
||||
suffix = raw[limit:]
|
||||
prefix = pat.sub('', prefix)
|
||||
raw = prefix + suffix
|
||||
return raw
|
||||
|
||||
def substitute_entites(raw):
|
||||
|
@ -144,9 +144,9 @@ def add_pipeline_options(parser, plumber):
|
||||
|
||||
'HEURISTIC PROCESSING' : (
|
||||
_('Modify the document text and structure using common'
|
||||
' patterns. Disabled by default. Use %s to enable. '
|
||||
' Individual actions can be disabled with the %s options.')
|
||||
% ('--enable-heuristics', '--disable-*'),
|
||||
' patterns. Disabled by default. Use %(en)s to enable. '
|
||||
' Individual actions can be disabled with the %(dis)s options.')
|
||||
% dict(en='--enable-heuristics', dis='--disable-*'),
|
||||
['enable_heuristics'] + HEURISTIC_OPTIONS
|
||||
),
|
||||
|
||||
@ -176,7 +176,7 @@ def add_pipeline_options(parser, plumber):
|
||||
[
|
||||
'level1_toc', 'level2_toc', 'level3_toc',
|
||||
'toc_threshold', 'max_toc_links', 'no_chapters_in_toc',
|
||||
'use_auto_toc', 'toc_filter',
|
||||
'use_auto_toc', 'toc_filter', 'duplicate_links_in_toc',
|
||||
]
|
||||
),
|
||||
|
||||
|
@ -265,6 +265,14 @@ OptionRecommendation(name='toc_filter',
|
||||
)
|
||||
),
|
||||
|
||||
OptionRecommendation(name='duplicate_links_in_toc',
|
||||
recommended_value=False, level=OptionRecommendation.LOW,
|
||||
help=_('When creating a TOC from links in the input document, '
|
||||
'allow duplicate entries, i.e. allow more than one entry '
|
||||
'with the same text, provided that they point to a '
|
||||
'different location.')
|
||||
),
|
||||
|
||||
|
||||
OptionRecommendation(name='chapter',
|
||||
recommended_value="//*[((name()='h1' or name()='h2') and "
|
||||
|
@ -17,7 +17,8 @@ class ParseError(ValueError):
|
||||
self.name = name
|
||||
self.desc = desc
|
||||
ValueError.__init__(self,
|
||||
_('Failed to parse: %s with error: %s')%(name, desc))
|
||||
_('Failed to parse: %(name)s with error: %(err)s')%dict(
|
||||
name=name, err=desc))
|
||||
|
||||
class ePubFixer(Plugin):
|
||||
|
||||
|
117
src/calibre/ebooks/html/to_zip.py
Normal file
117
src/calibre/ebooks/html/to_zip.py
Normal file
@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import textwrap, os, glob
|
||||
|
||||
from calibre.customize import FileTypePlugin
|
||||
from calibre.constants import numeric_version
|
||||
|
||||
class HTML2ZIP(FileTypePlugin):
|
||||
name = 'HTML to ZIP'
|
||||
author = 'Kovid Goyal'
|
||||
description = textwrap.dedent(_('''\
|
||||
Follow all local links in an HTML file and create a ZIP \
|
||||
file containing all linked files. This plugin is run \
|
||||
every time you add an HTML file to the library.\
|
||||
'''))
|
||||
version = numeric_version
|
||||
file_types = set(['html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'])
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
on_import = True
|
||||
|
||||
def run(self, htmlfile):
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
from calibre.gui2.convert.gui_conversion import gui_convert
|
||||
from calibre.customize.conversion import OptionRecommendation
|
||||
from calibre.ebooks.epub import initialize_container
|
||||
|
||||
with TemporaryDirectory('_plugin_html2zip') as tdir:
|
||||
recs =[('debug_pipeline', tdir, OptionRecommendation.HIGH)]
|
||||
recs.append(['keep_ligatures', True, OptionRecommendation.HIGH])
|
||||
if self.site_customization and self.site_customization.strip():
|
||||
sc = self.site_customization.strip()
|
||||
enc, _, bf = sc.partition('|')
|
||||
if enc:
|
||||
recs.append(['input_encoding', enc,
|
||||
OptionRecommendation.HIGH])
|
||||
if bf == 'bf':
|
||||
recs.append(['breadth_first', True,
|
||||
OptionRecommendation.HIGH])
|
||||
gui_convert(htmlfile, tdir, recs, abort_after_input_dump=True)
|
||||
of = self.temporary_file('_plugin_html2zip.zip')
|
||||
tdir = os.path.join(tdir, 'input')
|
||||
opf = glob.glob(os.path.join(tdir, '*.opf'))[0]
|
||||
ncx = glob.glob(os.path.join(tdir, '*.ncx'))
|
||||
if ncx:
|
||||
os.remove(ncx[0])
|
||||
epub = initialize_container(of.name, os.path.basename(opf))
|
||||
epub.add_dir(tdir)
|
||||
epub.close()
|
||||
|
||||
return of.name
|
||||
|
||||
def customization_help(self, gui=False):
|
||||
return _('Character encoding for the input HTML files. Common choices '
|
||||
'include: cp1252, cp1251, latin1 and utf-8.')
|
||||
|
||||
def do_user_config(self, parent=None):
|
||||
'''
|
||||
This method shows a configuration dialog for this plugin. It returns
|
||||
True if the user clicks OK, False otherwise. The changes are
|
||||
automatically applied.
|
||||
'''
|
||||
from PyQt4.Qt import (QDialog, QDialogButtonBox, QVBoxLayout,
|
||||
QLabel, Qt, QLineEdit, QCheckBox)
|
||||
|
||||
config_dialog = QDialog(parent)
|
||||
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
|
||||
v = QVBoxLayout(config_dialog)
|
||||
|
||||
def size_dialog():
|
||||
config_dialog.resize(config_dialog.sizeHint())
|
||||
|
||||
button_box.accepted.connect(config_dialog.accept)
|
||||
button_box.rejected.connect(config_dialog.reject)
|
||||
config_dialog.setWindowTitle(_('Customize') + ' ' + self.name)
|
||||
from calibre.customize.ui import (plugin_customization,
|
||||
customize_plugin)
|
||||
help_text = self.customization_help(gui=True)
|
||||
help_text = QLabel(help_text, config_dialog)
|
||||
help_text.setWordWrap(True)
|
||||
help_text.setTextInteractionFlags(Qt.LinksAccessibleByMouse
|
||||
| Qt.LinksAccessibleByKeyboard)
|
||||
help_text.setOpenExternalLinks(True)
|
||||
v.addWidget(help_text)
|
||||
bf = QCheckBox(_('Add linked files in breadth first order'))
|
||||
bf.setToolTip(_('Normally, when following links in HTML files'
|
||||
' calibre does it depth first, i.e. if file A links to B and '
|
||||
' C, but B links to D, the files are added in the order A, B, D, C. '
|
||||
' With this option, they will instead be added as A, B, C, D'))
|
||||
sc = plugin_customization(self)
|
||||
if not sc:
|
||||
sc = ''
|
||||
sc = sc.strip()
|
||||
enc = sc.partition('|')[0]
|
||||
bfs = sc.partition('|')[-1]
|
||||
bf.setChecked(bfs == 'bf')
|
||||
sc = QLineEdit(enc, config_dialog)
|
||||
v.addWidget(sc)
|
||||
v.addWidget(bf)
|
||||
v.addWidget(button_box)
|
||||
size_dialog()
|
||||
config_dialog.exec_()
|
||||
|
||||
if config_dialog.result() == QDialog.Accepted:
|
||||
sc = unicode(sc.text()).strip()
|
||||
if bf.isChecked():
|
||||
sc += '|bf'
|
||||
customize_plugin(self, sc)
|
||||
|
||||
return config_dialog.result()
|
||||
|
@ -8,7 +8,7 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
|
||||
from calibre import guess_type, walk
|
||||
from calibre import guess_type
|
||||
from calibre.customize.conversion import InputFormatPlugin
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
from calibre.ebooks.metadata.opf2 import OPF
|
||||
@ -20,29 +20,63 @@ class HTMLZInput(InputFormatPlugin):
|
||||
author = 'John Schember'
|
||||
description = 'Convert HTML files to HTML'
|
||||
file_types = set(['htmlz'])
|
||||
|
||||
|
||||
def convert(self, stream, options, file_ext, log,
|
||||
accelerators):
|
||||
self.log = log
|
||||
html = u''
|
||||
top_levels = []
|
||||
|
||||
# Extract content from zip archive.
|
||||
zf = ZipFile(stream)
|
||||
zf.extractall()
|
||||
|
||||
for x in walk('.'):
|
||||
# Find the HTML file in the archive. It needs to be
|
||||
# top level.
|
||||
index = u''
|
||||
multiple_html = False
|
||||
# Get a list of all top level files in the archive.
|
||||
for x in os.listdir('.'):
|
||||
if os.path.isfile(x):
|
||||
top_levels.append(x)
|
||||
# Try to find an index. file.
|
||||
for x in top_levels:
|
||||
if x.lower() in ('index.html', 'index.xhtml', 'index.htm'):
|
||||
index = x
|
||||
break
|
||||
# Look for multiple HTML files in the archive. We look at the
|
||||
# top level files only as only they matter in HTMLZ.
|
||||
for x in top_levels:
|
||||
if os.path.splitext(x)[1].lower() in ('.html', '.xhtml', '.htm'):
|
||||
with open(x, 'rb') as tf:
|
||||
html = tf.read()
|
||||
break
|
||||
|
||||
# Set index to the first HTML file found if it's not
|
||||
# called index.
|
||||
if not index:
|
||||
index = x
|
||||
else:
|
||||
multiple_html = True
|
||||
# Warn the user if there multiple HTML file in the archive. HTMLZ
|
||||
# supports a single HTML file. A conversion with a multiple HTML file
|
||||
# HTMLZ archive probably won't turn out as the user expects. With
|
||||
# Multiple HTML files ZIP input should be used in place of HTMLZ.
|
||||
if multiple_html:
|
||||
log.warn(_('Multiple HTML files found in the archive. Only %s will be used.') % index)
|
||||
|
||||
if index:
|
||||
with open(index, 'rb') as tf:
|
||||
html = tf.read()
|
||||
else:
|
||||
raise Exception(_('No top level HTML file found.'))
|
||||
|
||||
if not html:
|
||||
raise Exception(_('Top level HTML file %s is empty') % index)
|
||||
|
||||
# Encoding
|
||||
if options.input_encoding:
|
||||
ienc = options.input_encoding
|
||||
else:
|
||||
ienc = xml_to_unicode(html[:4096])[-1]
|
||||
html = html.decode(ienc, 'replace')
|
||||
|
||||
|
||||
# Run the HTML through the html processing plugin.
|
||||
from calibre.customize.ui import plugin_for_input_format
|
||||
html_input = plugin_for_input_format('html')
|
||||
@ -71,11 +105,11 @@ class HTMLZInput(InputFormatPlugin):
|
||||
from calibre.ebooks.oeb.transforms.metadata import meta_info_to_oeb_metadata
|
||||
mi = get_file_type_metadata(stream, file_ext)
|
||||
meta_info_to_oeb_metadata(mi, oeb.metadata, log)
|
||||
|
||||
|
||||
# Get the cover path from the OPF.
|
||||
cover_path = None
|
||||
opf = None
|
||||
for x in walk('.'):
|
||||
for x in top_levels:
|
||||
if os.path.splitext(x)[1].lower() in ('.opf'):
|
||||
opf = x
|
||||
break
|
||||
|
@ -561,7 +561,9 @@ class HTMLConverter(object):
|
||||
para = children[i]
|
||||
break
|
||||
if para is None:
|
||||
raise ConversionError(_('Failed to parse link %s %s')%(tag, children))
|
||||
raise ConversionError(
|
||||
_('Failed to parse link %(tag)s %(children)s')%dict(
|
||||
tag=tag, children=children))
|
||||
text = self.get_text(tag, 1000)
|
||||
if not text:
|
||||
text = 'Link'
|
||||
@ -954,7 +956,9 @@ class HTMLConverter(object):
|
||||
self.scaled_images[path] = pt
|
||||
return pt.name
|
||||
except (IOError, SystemError) as err: # PIL chokes on interlaced PNG images as well a some GIF images
|
||||
self.log.warning(_('Unable to process image %s. Error: %s')%(path, err))
|
||||
self.log.warning(
|
||||
_('Unable to process image %(path)s. Error: %(err)s')%dict(
|
||||
path=path, err=err))
|
||||
|
||||
if width == None or height == None:
|
||||
width, height = im.size
|
||||
@ -1014,7 +1018,7 @@ class HTMLConverter(object):
|
||||
try:
|
||||
self.images[path] = ImageStream(path, encoding=encoding)
|
||||
except LrsError as err:
|
||||
self.log.warning(_('Could not process image: %s\n%s')%(
|
||||
self.log.warning(('Could not process image: %s\n%s')%(
|
||||
original_path, err))
|
||||
return
|
||||
|
||||
|
@ -4,8 +4,9 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import sys, array, os, re, codecs, logging
|
||||
|
||||
from calibre import setup_cli_handlers, sanitize_file_name
|
||||
from calibre import setup_cli_handlers
|
||||
from calibre.utils.config import OptionParser
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
from calibre.ebooks.lrf.meta import LRFMetaFile
|
||||
from calibre.ebooks.lrf.objects import get_object, PageTree, StyleObject, \
|
||||
Font, Text, TOCObject, BookAttr, ruby_tags
|
||||
@ -89,7 +90,7 @@ class LRFDocument(LRFMetaFile):
|
||||
bookinfo += u'<FreeText reading="">%s</FreeText>\n</BookInfo>\n<DocInfo>\n'%(self.metadata.free_text,)
|
||||
th = self.doc_info.thumbnail
|
||||
if th:
|
||||
prefix = sanitize_file_name(self.metadata.title, as_unicode=True)
|
||||
prefix = ascii_filename(self.metadata.title)
|
||||
bookinfo += u'<CThumbnail file="%s" />\n'%(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension,)
|
||||
if write_files:
|
||||
open(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension, 'wb').write(th)
|
||||
|
@ -529,8 +529,8 @@ class Metadata(object):
|
||||
for t in st.intersection(ot):
|
||||
sidx = lstags.index(t)
|
||||
oidx = lotags.index(t)
|
||||
self_tags[sidx] = other.tags[oidx]
|
||||
self_tags += [t for t in other.tags if t.lower() in ot-st]
|
||||
self_tags[sidx] = other_tags[oidx]
|
||||
self_tags += [t for t in other_tags if t.lower() in ot-st]
|
||||
setattr(self, x, self_tags)
|
||||
|
||||
my_comments = getattr(self, 'comments', '')
|
||||
@ -742,7 +742,7 @@ class Metadata(object):
|
||||
ans += [('ISBN', unicode(self.isbn))]
|
||||
ans += [(_('Tags'), u', '.join([unicode(t) for t in self.tags]))]
|
||||
if self.series:
|
||||
ans += [(_('Series'), unicode(self.series)+ ' #%s'%self.format_series_index())]
|
||||
ans += [(_('Series'), unicode(self.series) + ' #%s'%self.format_series_index())]
|
||||
ans += [(_('Language'), unicode(self.language))]
|
||||
if self.timestamp is not None:
|
||||
ans += [(_('Timestamp'), unicode(self.timestamp.isoformat(' ')))]
|
||||
|
@ -21,9 +21,9 @@ USAGE='%%prog ebook_file [' + _('options') + ']\n' + \
|
||||
_('''
|
||||
Read/Write metadata from/to ebook files.
|
||||
|
||||
Supported formats for reading metadata: %s
|
||||
Supported formats for reading metadata: %(read)s
|
||||
|
||||
Supported formats for writing metadata: %s
|
||||
Supported formats for writing metadata: %(write)s
|
||||
|
||||
Different file types support different kinds of metadata. If you try to set
|
||||
some metadata on a file type that does not support it, the metadata will be
|
||||
@ -99,7 +99,7 @@ def option_parser():
|
||||
for w in metadata_writers():
|
||||
writers = writers.union(set(w.file_types))
|
||||
ft, w = ', '.join(sorted(filetypes())), ', '.join(sorted(writers))
|
||||
return config().option_parser(USAGE%(ft, w))
|
||||
return config().option_parser(USAGE%dict(read=ft, write=w))
|
||||
|
||||
def do_set_metadata(opts, mi, stream, stream_type):
|
||||
mi = MetaInformation(mi)
|
||||
|
@ -153,7 +153,8 @@ class Douban(Source):
|
||||
author = 'Li Fanxi'
|
||||
version = (2, 0, 0)
|
||||
|
||||
description = _('Downloads metadata and covers from Douban.com')
|
||||
description = _('Downloads metadata and covers from Douban.com. '
|
||||
'Useful only for chinese language books.')
|
||||
|
||||
capabilities = frozenset(['identify', 'cover'])
|
||||
touched_fields = frozenset(['title', 'authors', 'tags',
|
||||
|
@ -19,7 +19,7 @@ from calibre.customize.ui import metadata_plugins, all_metadata_plugins
|
||||
from calibre.ebooks.metadata.sources.base import create_log, msprefs
|
||||
from calibre.ebooks.metadata.xisbn import xisbn
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import utc_tz
|
||||
from calibre.utils.date import utc_tz, as_utc
|
||||
from calibre.utils.html2text import html2text
|
||||
from calibre.utils.icu import lower
|
||||
|
||||
@ -57,11 +57,34 @@ def is_worker_alive(workers):
|
||||
|
||||
# Merge results from different sources {{{
|
||||
|
||||
class xISBN(Thread):
|
||||
|
||||
def __init__(self, isbn):
|
||||
Thread.__init__(self)
|
||||
self.isbn = isbn
|
||||
self.isbns = frozenset()
|
||||
self.min_year = None
|
||||
self.daemon = True
|
||||
self.exception = self.tb = None
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.isbns, self.min_year = xisbn.get_isbn_pool(self.isbn)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
self.exception = e
|
||||
self.tb = traceback.format_exception()
|
||||
|
||||
|
||||
|
||||
class ISBNMerge(object):
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, log):
|
||||
self.pools = {}
|
||||
self.isbnless_results = []
|
||||
self.results = []
|
||||
self.log = log
|
||||
self.use_xisbn = True
|
||||
|
||||
def isbn_in_pool(self, isbn):
|
||||
if isbn:
|
||||
@ -82,7 +105,20 @@ class ISBNMerge(object):
|
||||
if isbn:
|
||||
pool = self.isbn_in_pool(isbn)
|
||||
if pool is None:
|
||||
isbns, min_year = xisbn.get_isbn_pool(isbn)
|
||||
isbns = min_year = None
|
||||
if self.use_xisbn:
|
||||
xw = xISBN(isbn)
|
||||
xw.start()
|
||||
xw.join(10)
|
||||
if xw.is_alive():
|
||||
self.log.error('Query to xISBN timed out')
|
||||
self.use_xisbn = False
|
||||
else:
|
||||
if xw.exception:
|
||||
self.log.error('Query to xISBN failed:')
|
||||
self.log.debug(xw.tb)
|
||||
else:
|
||||
isbns, min_year = xw.isbns, xw.min_year
|
||||
if not isbns:
|
||||
isbns = frozenset([isbn])
|
||||
if isbns in self.pools:
|
||||
@ -102,15 +138,19 @@ class ISBNMerge(object):
|
||||
if results:
|
||||
has_isbn_result = True
|
||||
break
|
||||
self.has_isbn_result = has_isbn_result
|
||||
|
||||
isbn_sources = frozenset()
|
||||
if has_isbn_result:
|
||||
self.merge_isbn_results()
|
||||
else:
|
||||
results = sorted(self.isbnless_results,
|
||||
key=attrgetter('relevance_in_source'))
|
||||
isbn_sources = self.merge_isbn_results()
|
||||
|
||||
# Now handle results that have no ISBNs
|
||||
results = sorted(self.isbnless_results,
|
||||
key=attrgetter('relevance_in_source'))
|
||||
# Only use results that are from sources that have not also returned a
|
||||
# result with an ISBN
|
||||
results = [r for r in results if r.identify_plugin not in isbn_sources]
|
||||
if results:
|
||||
# Pick only the most relevant result from each source
|
||||
self.results = []
|
||||
seen = set()
|
||||
for result in results:
|
||||
if result.identify_plugin not in seen:
|
||||
@ -190,11 +230,15 @@ class ISBNMerge(object):
|
||||
|
||||
def merge_isbn_results(self):
|
||||
self.results = []
|
||||
sources = set()
|
||||
for min_year, results in self.pools.itervalues():
|
||||
if results:
|
||||
for r in results:
|
||||
sources.add(r.identify_plugin)
|
||||
self.results.append(self.merge(results, min_year))
|
||||
|
||||
self.results.sort(key=attrgetter('average_source_relevance'))
|
||||
return sources
|
||||
|
||||
def length_merge(self, attr, results, null_value=None, shortest=True):
|
||||
values = [getattr(x, attr) for x in results if not x.is_null(attr)]
|
||||
@ -254,13 +298,23 @@ class ISBNMerge(object):
|
||||
|
||||
# Published date
|
||||
if min_year:
|
||||
min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
|
||||
for r in results:
|
||||
year = getattr(r.pubdate, 'year', None)
|
||||
if year == min_year:
|
||||
ans.pubdate = r.pubdate
|
||||
break
|
||||
if getattr(ans.pubdate, 'year', None) == min_year:
|
||||
min_date = datetime(min_year, ans.pubdate.month, ans.pubdate.day)
|
||||
else:
|
||||
min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
|
||||
ans.pubdate = min_date
|
||||
else:
|
||||
min_date = datetime(3001, 1, 1, tzinfo=utc_tz)
|
||||
for r in results:
|
||||
if r.pubdate is not None and r.pubdate < min_date:
|
||||
min_date = r.pubdate
|
||||
if r.pubdate is not None:
|
||||
candidate = as_utc(r.pubdate)
|
||||
if candidate < min_date:
|
||||
min_date = candidate
|
||||
if min_date.year < 3000:
|
||||
ans.pubdate = min_date
|
||||
|
||||
@ -293,7 +347,7 @@ class ISBNMerge(object):
|
||||
|
||||
|
||||
def merge_identify_results(result_map, log):
|
||||
isbn_merge = ISBNMerge()
|
||||
isbn_merge = ISBNMerge(log)
|
||||
for plugin, results in result_map.iteritems():
|
||||
for result in results:
|
||||
isbn_merge.add_result(result)
|
||||
@ -505,7 +559,7 @@ if __name__ == '__main__': # tests {{{
|
||||
# unknown to Amazon
|
||||
{'identifiers':{'isbn': '9780307459671'},
|
||||
'title':'Invisible Gorilla', 'authors':['Christopher Chabris']},
|
||||
[title_test('The Invisible Gorilla', exact=True)]
|
||||
[title_test('The Invisible Gorilla: And Other Ways Our Intuitions Deceive Us', exact=True)]
|
||||
|
||||
),
|
||||
|
||||
|
@ -95,9 +95,9 @@ class CoverManager(object):
|
||||
authors = [unicode(x) for x in m.creator if x.role == 'aut']
|
||||
series_string = None
|
||||
if m.series and m.series_index:
|
||||
series_string = _('Book %s of %s')%(
|
||||
fmt_sidx(m.series_index[0], use_roman=True),
|
||||
unicode(m.series[0]))
|
||||
series_string = _('Book %(sidx)s of %(series)s')%dict(
|
||||
sidx=fmt_sidx(m.series_index[0], use_roman=True),
|
||||
series=unicode(m.series[0]))
|
||||
|
||||
try:
|
||||
from calibre.ebooks import calibre_cover
|
||||
|
@ -32,8 +32,8 @@ class SplitError(ValueError):
|
||||
size = len(tostring(root))/1024.
|
||||
ValueError.__init__(self,
|
||||
_('Could not find reasonable point at which to split: '
|
||||
'%s Sub-tree size: %d KB')%
|
||||
(path, size))
|
||||
'%(path)s Sub-tree size: %(size)d KB')%dict(
|
||||
path=path, size=size))
|
||||
|
||||
class Split(object):
|
||||
|
||||
|
@ -121,14 +121,16 @@ class DetectStructure(object):
|
||||
if not self.oeb.toc.has_href(href):
|
||||
text = xml2text(a)
|
||||
text = text[:100].strip()
|
||||
if not self.oeb.toc.has_text(text):
|
||||
num += 1
|
||||
self.oeb.toc.add(text, href,
|
||||
play_order=self.oeb.toc.next_play_order())
|
||||
if self.opts.max_toc_links > 0 and \
|
||||
num >= self.opts.max_toc_links:
|
||||
self.log('Maximum TOC links reached, stopping.')
|
||||
return
|
||||
if (not self.opts.duplicate_links_in_toc and
|
||||
self.oeb.toc.has_text(text)):
|
||||
continue
|
||||
num += 1
|
||||
self.oeb.toc.add(text, href,
|
||||
play_order=self.oeb.toc.next_play_order())
|
||||
if self.opts.max_toc_links > 0 and \
|
||||
num >= self.opts.max_toc_links:
|
||||
self.log('Maximum TOC links reached, stopping.')
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
@ -7,6 +7,7 @@ __docformat__ = 'restructuredtext en'
|
||||
import sys, struct, zlib, bz2, os
|
||||
|
||||
from calibre import guess_type
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
|
||||
class FileStream:
|
||||
def IsBinary(self):
|
||||
@ -156,6 +157,8 @@ class SNBFile:
|
||||
f.fileSize = os.path.getsize(os.path.join(tdir,fileName))
|
||||
f.fileBody = open(os.path.join(tdir,fileName), 'rb').read()
|
||||
f.fileName = fileName.replace(os.sep, '/')
|
||||
if isinstance(f.fileName, unicode):
|
||||
f.fileName = ascii_filename(f.fileName).encode('ascii')
|
||||
self.files.append(f)
|
||||
|
||||
def AppendBinary(self, fileName, tdir):
|
||||
@ -164,6 +167,8 @@ class SNBFile:
|
||||
f.fileSize = os.path.getsize(os.path.join(tdir,fileName))
|
||||
f.fileBody = open(os.path.join(tdir,fileName), 'rb').read()
|
||||
f.fileName = fileName.replace(os.sep, '/')
|
||||
if isinstance(f.fileName, unicode):
|
||||
f.fileName = ascii_filename(f.fileName).encode('ascii')
|
||||
self.files.append(f)
|
||||
|
||||
def GetFileStream(self, fileName):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# coding:utf8
|
||||
# coding:utf-8
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
@ -74,6 +74,13 @@ gprefs.defaults['action-layout-context-menu-device'] = (
|
||||
'Add To Library', 'Edit Collections',
|
||||
)
|
||||
|
||||
gprefs.defaults['action-layout-context-menu-cover-browser'] = (
|
||||
'Edit Metadata', 'Send To Device', 'Save To Disk',
|
||||
'Connect Share', 'Copy To Library', None,
|
||||
'Convert Books', 'View', 'Open Folder', 'Show Book Details',
|
||||
'Similar Books', 'Tweak ePub', None, 'Remove Books',
|
||||
)
|
||||
|
||||
gprefs.defaults['show_splash_screen'] = True
|
||||
gprefs.defaults['toolbar_icon_size'] = 'medium'
|
||||
gprefs.defaults['automerge'] = 'ignore'
|
||||
|
@ -120,16 +120,16 @@ class FetchAnnotationsAction(InterfaceAction):
|
||||
spanTag['style'] = 'font-weight:bold'
|
||||
if bookmark.book_format == 'pdf':
|
||||
spanTag.insert(0,NavigableString(
|
||||
_("%s<br />Last Page Read: %d (%d%%)") % \
|
||||
(strftime(u'%x', timestamp.timetuple()),
|
||||
last_read_location,
|
||||
percent_read)))
|
||||
_("%(time)s<br />Last Page Read: %(loc)d (%(pr)d%%)") % \
|
||||
dict(time=strftime(u'%x', timestamp.timetuple()),
|
||||
loc=last_read_location,
|
||||
pr=percent_read)))
|
||||
else:
|
||||
spanTag.insert(0,NavigableString(
|
||||
_("%s<br />Last Page Read: Location %d (%d%%)") % \
|
||||
(strftime(u'%x', timestamp.timetuple()),
|
||||
last_read_location,
|
||||
percent_read)))
|
||||
_("%(time)s<br />Last Page Read: Location %(loc)d (%(pr)d%%)") % \
|
||||
dict(time=strftime(u'%x', timestamp.timetuple()),
|
||||
loc=last_read_location,
|
||||
pr=percent_read)))
|
||||
|
||||
divTag.insert(dtc, spanTag)
|
||||
dtc += 1
|
||||
@ -145,23 +145,23 @@ class FetchAnnotationsAction(InterfaceAction):
|
||||
for location in sorted(user_notes):
|
||||
if user_notes[location]['text']:
|
||||
annotations.append(
|
||||
_('<b>Location %d • %s</b><br />%s<br />') % \
|
||||
(user_notes[location]['displayed_location'],
|
||||
user_notes[location]['type'],
|
||||
user_notes[location]['text'] if \
|
||||
_('<b>Location %(dl)d • %(typ)s</b><br />%(text)s<br />') % \
|
||||
dict(dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type'],
|
||||
text=(user_notes[location]['text'] if \
|
||||
user_notes[location]['type'] == 'Note' else \
|
||||
'<i>%s</i>' % user_notes[location]['text']))
|
||||
'<i>%s</i>' % user_notes[location]['text'])))
|
||||
else:
|
||||
if bookmark.book_format == 'pdf':
|
||||
annotations.append(
|
||||
_('<b>Page %d • %s</b><br />') % \
|
||||
(user_notes[location]['displayed_location'],
|
||||
user_notes[location]['type']))
|
||||
_('<b>Page %(dl)d • %(typ)s</b><br />') % \
|
||||
dict(dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type']))
|
||||
else:
|
||||
annotations.append(
|
||||
_('<b>Location %d • %s</b><br />') % \
|
||||
(user_notes[location]['displayed_location'],
|
||||
user_notes[location]['type']))
|
||||
_('<b>Location %(dl)d • %(typ)s</b><br />') % \
|
||||
dict(dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type']))
|
||||
|
||||
for annotation in annotations:
|
||||
divTag.insert(dtc, annotation)
|
||||
|
@ -82,7 +82,8 @@ class GenerateCatalogAction(InterfaceAction):
|
||||
self.gui.sync_catalogs()
|
||||
if job.fmt not in ['EPUB','MOBI']:
|
||||
export_dir = choose_dir(self.gui, _('Export Catalog Directory'),
|
||||
_('Select destination for %s.%s') % (job.catalog_title, job.fmt.lower()))
|
||||
_('Select destination for %(title)s.%(fmt)s') % dict(
|
||||
title=job.catalog_title, fmt=job.fmt.lower()))
|
||||
if export_dir:
|
||||
destination = os.path.join(export_dir, '%s.%s' % (job.catalog_title, job.fmt.lower()))
|
||||
shutil.copyfile(job.catalog_file_path, destination)
|
||||
|
@ -160,8 +160,9 @@ class CopyToLibraryAction(InterfaceAction):
|
||||
error_dialog(self.gui, _('Failed'), _('Could not copy books: ') + e,
|
||||
det_msg=tb, show=True)
|
||||
else:
|
||||
self.gui.status_bar.show_message(_('Copied %d books to %s') %
|
||||
(len(ids), loc), 2000)
|
||||
self.gui.status_bar.show_message(
|
||||
_('Copied %(num)d books to %(loc)s') %
|
||||
dict(num=len(ids), loc=loc), 2000)
|
||||
if delete_after and self.worker.processed:
|
||||
v = self.gui.library_view
|
||||
ci = v.currentIndex()
|
||||
|
@ -284,7 +284,7 @@ class EditMetadataAction(InterfaceAction):
|
||||
if not confirm('<p>'+_(
|
||||
'Book formats from the selected books will be merged '
|
||||
'into the <b>first selected book</b> (%s). '
|
||||
'Metadata in the first selected book will not be changed.'
|
||||
'Metadata in the first selected book will not be changed. '
|
||||
'Author, Title, ISBN and all other metadata will <i>not</i> be merged.<br><br>'
|
||||
'After merger the second and subsequently '
|
||||
'selected books, with any metadata they have will be <b>deleted</b>. <br><br>'
|
||||
@ -446,9 +446,8 @@ class EditMetadataAction(InterfaceAction):
|
||||
if d.result() == d.Accepted:
|
||||
to_rename = d.to_rename # dict of new text to old ids
|
||||
to_delete = d.to_delete # list of ids
|
||||
for text in to_rename:
|
||||
for old_id in to_rename[text]:
|
||||
model.rename_collection(old_id, new_name=unicode(text))
|
||||
for old_id, new_name in to_rename.iteritems():
|
||||
model.rename_collection(old_id, new_name=unicode(new_name))
|
||||
for item in to_delete:
|
||||
model.delete_collection_using_id(item)
|
||||
self.gui.upload_collections(model.db, view=view, oncard=oncard)
|
||||
|
@ -159,9 +159,9 @@ def render_data(mi, use_roman_numbers=True, all_fields=False):
|
||||
sidx = mi.get(field+'_index')
|
||||
if sidx is None:
|
||||
sidx = 1.0
|
||||
val = _('Book %s of <span class="series_name">%s</span>')%(fmt_sidx(sidx,
|
||||
use_roman=use_roman_numbers),
|
||||
prepare_string_for_xml(getattr(mi, field)))
|
||||
val = _('Book %(sidx)s of <span class="series_name">%(series)s</span>')%dict(
|
||||
sidx=fmt_sidx(sidx, use_roman=use_roman_numbers),
|
||||
series=prepare_string_for_xml(getattr(mi, field)))
|
||||
|
||||
ans.append((field, u'<td class="title">%s</td><td>%s</td>'%(name, val)))
|
||||
|
||||
@ -541,7 +541,8 @@ class BookDetails(QWidget): # {{{
|
||||
self.setToolTip(
|
||||
'<p>'+_('Double-click to open Book Details window') +
|
||||
'<br><br>' + _('Path') + ': ' + self.current_path +
|
||||
'<br><br>' + _('Cover size: %dx%d')%(sz.width(), sz.height())
|
||||
'<br><br>' + _('Cover size: %(width)d x %(height)d')%dict(
|
||||
width=sz.width(), height=sz.height())
|
||||
)
|
||||
|
||||
def reset_info(self):
|
||||
|
@ -22,7 +22,7 @@ class TOCWidget(Widget, Ui_Form):
|
||||
Widget.__init__(self, parent,
|
||||
['level1_toc', 'level2_toc', 'level3_toc',
|
||||
'toc_threshold', 'max_toc_links', 'no_chapters_in_toc',
|
||||
'use_auto_toc', 'toc_filter',
|
||||
'use_auto_toc', 'toc_filter', 'duplicate_links_in_toc',
|
||||
]
|
||||
)
|
||||
self.db, self.book_id = db, book_id
|
||||
|
@ -21,7 +21,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<item row="3" column="0">
|
||||
<widget class="QLabel" name="label_10">
|
||||
<property name="text">
|
||||
<string>Number of &links to add to Table of Contents</string>
|
||||
@ -31,14 +31,14 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<item row="3" column="1">
|
||||
<widget class="QSpinBox" name="opt_max_toc_links">
|
||||
<property name="maximum">
|
||||
<number>10000</number>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="0">
|
||||
<item row="4" column="0">
|
||||
<widget class="QLabel" name="label_16">
|
||||
<property name="text">
|
||||
<string>Chapter &threshold</string>
|
||||
@ -48,7 +48,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<item row="4" column="1">
|
||||
<widget class="QSpinBox" name="opt_toc_threshold"/>
|
||||
</item>
|
||||
<item row="0" column="0" colspan="2">
|
||||
@ -58,7 +58,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="0">
|
||||
<item row="5" column="0">
|
||||
<widget class="QLabel" name="label">
|
||||
<property name="text">
|
||||
<string>TOC &Filter:</string>
|
||||
@ -68,19 +68,19 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="1">
|
||||
<item row="5" column="1">
|
||||
<widget class="QLineEdit" name="opt_toc_filter"/>
|
||||
</item>
|
||||
<item row="5" column="0" colspan="2">
|
||||
<item row="6" column="0" colspan="2">
|
||||
<widget class="XPathEdit" name="opt_level1_toc" native="true"/>
|
||||
</item>
|
||||
<item row="6" column="0" colspan="2">
|
||||
<item row="7" column="0" colspan="2">
|
||||
<widget class="XPathEdit" name="opt_level2_toc" native="true"/>
|
||||
</item>
|
||||
<item row="7" column="0" colspan="2">
|
||||
<item row="8" column="0" colspan="2">
|
||||
<widget class="XPathEdit" name="opt_level3_toc" native="true"/>
|
||||
</item>
|
||||
<item row="8" column="0">
|
||||
<item row="9" column="0">
|
||||
<spacer name="verticalSpacer">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Vertical</enum>
|
||||
@ -93,6 +93,13 @@
|
||||
</property>
|
||||
</spacer>
|
||||
</item>
|
||||
<item row="2" column="0" colspan="2">
|
||||
<widget class="QCheckBox" name="opt_duplicate_links_in_toc">
|
||||
<property name="text">
|
||||
<string>Allow &duplicate links when creating the Table of Contents</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
<customwidgets>
|
||||
|
@ -9,8 +9,8 @@ Module to implement the Cover Flow feature
|
||||
|
||||
import sys, os, time
|
||||
|
||||
from PyQt4.Qt import QImage, QSizePolicy, QTimer, QDialog, Qt, QSize, \
|
||||
QStackedLayout, QLabel, QByteArray, pyqtSignal
|
||||
from PyQt4.Qt import (QImage, QSizePolicy, QTimer, QDialog, Qt, QSize,
|
||||
QStackedLayout, QLabel, QByteArray, pyqtSignal)
|
||||
|
||||
from calibre import plugins
|
||||
from calibre.gui2 import config, available_height, available_width, gprefs
|
||||
@ -84,6 +84,7 @@ if pictureflow is not None:
|
||||
class CoverFlow(pictureflow.PictureFlow):
|
||||
|
||||
dc_signal = pyqtSignal()
|
||||
context_menu_requested = pyqtSignal()
|
||||
|
||||
def __init__(self, parent=None):
|
||||
pictureflow.PictureFlow.__init__(self, parent,
|
||||
@ -94,6 +95,17 @@ if pictureflow is not None:
|
||||
QSizePolicy.Expanding))
|
||||
self.dc_signal.connect(self._data_changed,
|
||||
type=Qt.QueuedConnection)
|
||||
self.context_menu = None
|
||||
self.setContextMenuPolicy(Qt.DefaultContextMenu)
|
||||
|
||||
def set_context_menu(self, cm):
|
||||
self.context_menu = cm
|
||||
|
||||
def contextMenuEvent(self, event):
|
||||
if self.context_menu is not None:
|
||||
self.context_menu_requested.emit()
|
||||
self.context_menu.popup(event.globalPos())
|
||||
event.accept()
|
||||
|
||||
def sizeHint(self):
|
||||
return self.minimumSize()
|
||||
@ -149,6 +161,7 @@ class CoverFlowMixin(object):
|
||||
self.cover_flow_sync_flag = True
|
||||
self.cover_flow = CoverFlow(parent=self)
|
||||
self.cover_flow.currentChanged.connect(self.sync_listview_to_cf)
|
||||
self.cover_flow.context_menu_requested.connect(self.cf_context_menu_requested)
|
||||
self.library_view.selectionModel().currentRowChanged.connect(
|
||||
self.sync_cf_to_listview)
|
||||
self.db_images = DatabaseImages(self.library_view.model())
|
||||
@ -234,6 +247,14 @@ class CoverFlowMixin(object):
|
||||
self.cover_flow.setCurrentSlide(current.row())
|
||||
self.cover_flow_sync_flag = True
|
||||
|
||||
def cf_context_menu_requested(self):
|
||||
row = self.cover_flow.currentSlide()
|
||||
m = self.library_view.model()
|
||||
index = m.index(row, 0)
|
||||
sm = self.library_view.selectionModel()
|
||||
sm.select(index, sm.ClearAndSelect|sm.Rows)
|
||||
self.library_view.setCurrentIndex(index)
|
||||
|
||||
def cover_flow_do_sync(self):
|
||||
self.cover_flow_sync_flag = True
|
||||
try:
|
||||
|
@ -723,6 +723,7 @@ class BulkSeries(BulkBase):
|
||||
layout.addWidget(self.force_number)
|
||||
self.series_start_number = QSpinBox(parent)
|
||||
self.series_start_number.setMinimum(1)
|
||||
self.series_start_number.setMaximum(9999999)
|
||||
self.series_start_number.setProperty("value", 1)
|
||||
layout.addWidget(self.series_start_number)
|
||||
layout.addItem(QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum))
|
||||
|
@ -912,8 +912,9 @@ class DeviceMixin(object): # {{{
|
||||
format_count[f] = 1
|
||||
for f in self.device_manager.device.settings().format_map:
|
||||
if f in format_count.keys():
|
||||
formats.append((f, _('%i of %i Books') % (format_count[f],
|
||||
len(rows)), True if f in aval_out_formats else False))
|
||||
formats.append((f, _('%(num)i of %(total)i Books') % dict(
|
||||
num=format_count[f], total=len(rows)),
|
||||
True if f in aval_out_formats else False))
|
||||
elif f in aval_out_formats:
|
||||
formats.append((f, _('0 of %i Books') % len(rows), True))
|
||||
d = ChooseFormatDeviceDialog(self, _('Choose format to send to device'), formats)
|
||||
|
@ -106,7 +106,8 @@ class BookInfo(QDialog, Ui_BookInfo):
|
||||
Qt.KeepAspectRatio, Qt.SmoothTransformation)
|
||||
self.cover.set_pixmap(pixmap)
|
||||
sz = pixmap.size()
|
||||
self.cover.setToolTip(_('Cover size: %dx%d')%(sz.width(), sz.height()))
|
||||
self.cover.setToolTip(_('Cover size: %(width)d x %(height)d')%dict(
|
||||
width=sz.width(), height=sz.height()))
|
||||
|
||||
def refresh(self, row):
|
||||
if isinstance(row, QModelIndex):
|
||||
|
@ -173,10 +173,10 @@ class MyBlockingBusy(QDialog): # {{{
|
||||
mi = self.db.get_metadata(id, index_is_id=True)
|
||||
series_string = None
|
||||
if mi.series:
|
||||
series_string = _('Book %s of %s')%(
|
||||
fmt_sidx(mi.series_index,
|
||||
series_string = _('Book %(sidx)s of %(series)s')%dict(
|
||||
sidx=fmt_sidx(mi.series_index,
|
||||
use_roman=config['use_roman_numerals_for_series_number']),
|
||||
mi.series)
|
||||
series=mi.series)
|
||||
|
||||
cdata = calibre_cover(mi.title, mi.format_field('authors')[-1],
|
||||
series_string=series_string)
|
||||
@ -749,15 +749,9 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
||||
val = self.s_r_do_regexp(mi)
|
||||
val = self.s_r_do_destination(mi, val)
|
||||
if dfm['is_multiple']:
|
||||
if dest == 'authors' and len(val) == 0:
|
||||
error_dialog(self, _('Search/replace invalid'),
|
||||
_('Authors cannot be set to the empty string. '
|
||||
'Book title %s not processed')%mi.title,
|
||||
show=True)
|
||||
return
|
||||
# convert the colon-separated pair strings back into a dict, which
|
||||
# is what set_identifiers wants
|
||||
if dfm['is_csp']:
|
||||
# convert the colon-separated pair strings back into a dict,
|
||||
# which is what set_identifiers wants
|
||||
dst_id_type = unicode(self.s_r_dst_ident.text())
|
||||
if dst_id_type:
|
||||
v = ''.join(val)
|
||||
@ -769,11 +763,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
||||
else:
|
||||
val = self.s_r_replace_mode_separator().join(val)
|
||||
if dest == 'title' and len(val) == 0:
|
||||
error_dialog(self, _('Search/replace invalid'),
|
||||
_('Title cannot be set to the empty string. '
|
||||
'Book title %s not processed')%mi.title,
|
||||
show=True)
|
||||
return
|
||||
val = _('Unknown')
|
||||
|
||||
if dfm['is_custom']:
|
||||
extra = self.db.get_custom_extra(id, label=dfm['label'], index_is_id=True)
|
||||
|
@ -701,7 +701,9 @@ class PluginUpdaterDialog(SizePersistedDialog):
|
||||
|
||||
if DEBUG:
|
||||
prints('Locating zip file for %s: %s'% (display_plugin.name, display_plugin.forum_link))
|
||||
self.gui.status_bar.showMessage(_('Locating zip file for %s: %s') % (display_plugin.name, display_plugin.forum_link))
|
||||
self.gui.status_bar.showMessage(
|
||||
_('Locating zip file for %(name)s: %(link)s') % dict(
|
||||
name=display_plugin.name, link=display_plugin.forum_link))
|
||||
plugin_zip_url = self._read_zip_attachment_url(display_plugin.forum_link)
|
||||
if not plugin_zip_url:
|
||||
return error_dialog(self.gui, _('Install Plugin Failed'),
|
||||
|
@ -336,7 +336,12 @@ class SchedulerDialog(QDialog, Ui_Dialog):
|
||||
self.download_button.setVisible(True)
|
||||
self.detail_box.setCurrentIndex(0)
|
||||
recipe = self.recipe_model.recipe_from_urn(urn)
|
||||
schedule_info = self.recipe_model.schedule_info_from_urn(urn)
|
||||
try:
|
||||
schedule_info = self.recipe_model.schedule_info_from_urn(urn)
|
||||
except:
|
||||
# Happens if user does something stupid like unchecking all the
|
||||
# days of the week
|
||||
schedule_info = None
|
||||
account_info = self.recipe_model.account_info_from_urn(urn)
|
||||
customize_info = self.recipe_model.get_customize_info(urn)
|
||||
|
||||
@ -376,7 +381,9 @@ class SchedulerDialog(QDialog, Ui_Dialog):
|
||||
d = utcnow() - last_downloaded
|
||||
def hm(x): return (x-x%3600)//3600, (x%3600 - (x%3600)%60)//60
|
||||
hours, minutes = hm(d.seconds)
|
||||
tm = _('%d days, %d hours and %d minutes ago')%(d.days, hours, minutes)
|
||||
tm = _('%(days)d days, %(hours)d hours'
|
||||
' and %(mins)d minutes ago')%dict(
|
||||
days=d.days, hours=hours, mins=minutes)
|
||||
if d < timedelta(days=366):
|
||||
ld_text = tm
|
||||
else:
|
||||
|
@ -57,7 +57,7 @@ class TagCategories(QDialog, Ui_TagCategories):
|
||||
lambda: [n for (id, n) in self.db.all_publishers()],
|
||||
lambda: self.db.all_tags()
|
||||
]
|
||||
category_names = ['', _('Authors'), _('Series'), _('Publishers'), _('Tags')]
|
||||
category_names = ['', _('Authors'), ngettext('Series', 'Series', 2), _('Publishers'), _('Tags')]
|
||||
|
||||
cvals = {}
|
||||
for key,cc in self.db.custom_field_metadata().iteritems():
|
||||
@ -260,6 +260,7 @@ class TagCategories(QDialog, Ui_TagCategories):
|
||||
self.applied_items = [cat[2] for cat in self.categories.get(self.current_cat_name, [])]
|
||||
else:
|
||||
self.applied_items = []
|
||||
self.applied_items.sort(key=lambda x:sort_key(self.all_items[x].name))
|
||||
self.display_filtered_categories(None)
|
||||
|
||||
def accept(self):
|
||||
@ -284,4 +285,4 @@ class TagCategories(QDialog, Ui_TagCategories):
|
||||
self.category_box.blockSignals(True)
|
||||
self.category_box.clear()
|
||||
self.category_box.addItems(sorted(self.categories.keys(), key=sort_key))
|
||||
self.category_box.blockSignals(False)
|
||||
self.category_box.blockSignals(False)
|
||||
|
@ -18,7 +18,8 @@ class ListWidgetItem(QListWidgetItem):
|
||||
def data(self, role):
|
||||
if role == Qt.DisplayRole:
|
||||
if self.initial_value != self.current_value:
|
||||
return _('%s (was %s)')%(self.current_value, self.initial_value)
|
||||
return _('%(curr)s (was %(initial)s)')%dict(
|
||||
curr=self.current_value, initial=self.initial_value)
|
||||
else:
|
||||
return self.current_value
|
||||
elif role == Qt.EditRole:
|
||||
|
@ -143,7 +143,9 @@ class UserProfiles(ResizableDialog, Ui_Dialog):
|
||||
pt = PersistentTemporaryFile(suffix='.recipe')
|
||||
pt.write(src.encode('utf-8'))
|
||||
pt.close()
|
||||
body = _('The attached file: %s is a recipe to download %s.')%(os.path.basename(pt.name), title)
|
||||
body = _('The attached file: %(fname)s is a '
|
||||
'recipe to download %(title)s.')%dict(
|
||||
fname=os.path.basename(pt.name), title=title)
|
||||
subject = _('Recipe for ')+title
|
||||
url = QUrl('mailto:')
|
||||
url.addQueryItem('subject', subject)
|
||||
|
@ -51,8 +51,8 @@ class DownloadDialog(QDialog): # {{{
|
||||
self.setWindowTitle(_('Download %s')%fname)
|
||||
self.l = QVBoxLayout(self)
|
||||
self.purl = urlparse(url)
|
||||
self.msg = QLabel(_('Downloading <b>%s</b> from %s')%(fname,
|
||||
self.purl.netloc))
|
||||
self.msg = QLabel(_('Downloading <b>%(fname)s</b> from %(url)s')%dict(
|
||||
fname=fname, url=self.purl.netloc))
|
||||
self.msg.setWordWrap(True)
|
||||
self.l.addWidget(self.msg)
|
||||
self.pb = QProgressBar(self)
|
||||
@ -82,9 +82,9 @@ class DownloadDialog(QDialog): # {{{
|
||||
self.exec_()
|
||||
if self.worker.err is not None:
|
||||
error_dialog(self.parent(), _('Download failed'),
|
||||
_('Failed to download from %r with error: %s')%(
|
||||
self.worker.url, self.worker.err),
|
||||
det_msg=self.worker.tb, show=True)
|
||||
_('Failed to download from %(url)r with error: %(err)s')%dict(
|
||||
url=self.worker.url, err=self.worker.err),
|
||||
det_msg=self.worker.tb, show=True)
|
||||
|
||||
def update(self):
|
||||
if self.rejected:
|
||||
|
@ -120,7 +120,7 @@ def send_mails(jobnames, callback, attachments, to_s, subjects,
|
||||
texts, attachment_names, job_manager):
|
||||
for name, attachment, to, subject, text, aname in zip(jobnames,
|
||||
attachments, to_s, subjects, texts, attachment_names):
|
||||
description = _('Email %s to %s') % (name, to)
|
||||
description = _('Email %(name)s to %(to)s') % dict(name=name, to=to)
|
||||
job = ThreadedJob('email', description, gui_sendmail, (attachment, aname, to,
|
||||
subject, text), {}, callback)
|
||||
job_manager.run_threaded_job(job)
|
||||
|
@ -62,7 +62,6 @@ class LibraryViewMixin(object): # {{{
|
||||
view = getattr(self, view+'_view')
|
||||
view.verticalHeader().sectionDoubleClicked.connect(self.iactions['View'].view_specific_book)
|
||||
|
||||
self.build_context_menus()
|
||||
self.library_view.model().set_highlight_only(config['highlight_search_matches'])
|
||||
|
||||
def build_context_menus(self):
|
||||
@ -81,6 +80,11 @@ class LibraryViewMixin(object): # {{{
|
||||
for v in (self.memory_view, self.card_a_view, self.card_b_view):
|
||||
v.set_context_menu(dm, ec)
|
||||
|
||||
if self.cover_flow is not None:
|
||||
cm = QMenu(self.cover_flow)
|
||||
populate_menu(cm,
|
||||
gprefs['action-layout-context-menu-cover-browser'])
|
||||
self.cover_flow.set_context_menu(cm)
|
||||
|
||||
def search_done(self, view, ok):
|
||||
if view is self.current_view():
|
||||
|
@ -172,8 +172,9 @@ class JobManager(QAbstractTableModel): # {{{
|
||||
if job.is_finished:
|
||||
self.job_done.emit(len(self.unfinished_jobs()))
|
||||
if needs_reset:
|
||||
self.layoutAboutToBeChanged.emit()
|
||||
self.jobs.sort()
|
||||
self.reset()
|
||||
self.layoutChanged.emit()
|
||||
else:
|
||||
for job in jobs:
|
||||
idx = self.jobs.index(job)
|
||||
|
@ -950,11 +950,11 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
for locvalue in locations:
|
||||
accessor = q[locvalue]
|
||||
if query == 'true':
|
||||
if accessor(row) is not None:
|
||||
if accessor(row):
|
||||
matches.add(index)
|
||||
continue
|
||||
if query == 'false':
|
||||
if accessor(row) is None:
|
||||
if not accessor(row):
|
||||
matches.add(index)
|
||||
continue
|
||||
if locvalue == 'inlibrary':
|
||||
|
@ -878,9 +878,10 @@ class Cover(ImageView): # {{{
|
||||
series = self.dialog.series.current_val
|
||||
series_string = None
|
||||
if series:
|
||||
series_string = _('Book %s of %s')%(
|
||||
fmt_sidx(self.dialog.series_index.current_val,
|
||||
use_roman=config['use_roman_numerals_for_series_number']), series)
|
||||
series_string = _('Book %(sidx)s of %(series)s')%dict(
|
||||
sidx=fmt_sidx(self.dialog.series_index.current_val,
|
||||
use_roman=config['use_roman_numerals_for_series_number']),
|
||||
series=series)
|
||||
self.current_val = calibre_cover(title, author,
|
||||
series_string=series_string)
|
||||
|
||||
@ -921,8 +922,8 @@ class Cover(ImageView): # {{{
|
||||
self.setPixmap(pm)
|
||||
tt = _('This book has no cover')
|
||||
if self._cdata:
|
||||
tt = _('Cover size: %dx%d pixels') % \
|
||||
(pm.width(), pm.height())
|
||||
tt = _('Cover size: %(width)d x %(height)d pixels') % \
|
||||
dict(width=pm.width(), height=pm.height())
|
||||
self.setToolTip(tt)
|
||||
|
||||
return property(fget=fget, fset=fset)
|
||||
|
@ -20,6 +20,7 @@ from calibre.ebooks.metadata.sources.covers import download_cover
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.customize.ui import metadata_plugins
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.date import as_utc
|
||||
|
||||
# Start download {{{
|
||||
def show_config(gui, parent):
|
||||
@ -124,10 +125,18 @@ def merge_result(oldmi, newmi):
|
||||
for plugin in metadata_plugins(['identify']):
|
||||
fields |= plugin.touched_fields
|
||||
|
||||
def is_equal(x, y):
|
||||
if hasattr(x, 'tzinfo'):
|
||||
x = as_utc(x)
|
||||
if hasattr(y, 'tzinfo'):
|
||||
y = as_utc(y)
|
||||
return x == y
|
||||
|
||||
for f in fields:
|
||||
# Optimize so that set_metadata does not have to do extra work later
|
||||
if not f.startswith('identifier:'):
|
||||
if (not newmi.is_null(f) and getattr(newmi, f) == getattr(oldmi, f)):
|
||||
if (not newmi.is_null(f) and is_equal(getattr(newmi, f),
|
||||
getattr(oldmi, f))):
|
||||
setattr(newmi, f, getattr(dummy, f))
|
||||
|
||||
newmi.last_modified = oldmi.last_modified
|
||||
@ -187,7 +196,7 @@ def download(ids, db, do_identify, covers,
|
||||
ans[i] = mi
|
||||
count += 1
|
||||
notifications.put((count/len(ids),
|
||||
_('Downloaded %d of %d')%(count, len(ids))))
|
||||
_('Downloaded %(num)d of %(tot)d')%dict(num=count, tot=len(ids))))
|
||||
log('Download complete, with %d failures'%len(failed_ids))
|
||||
return (ans, failed_ids, failed_covers, title_map, all_failed)
|
||||
|
||||
|
@ -254,6 +254,10 @@ class ResultsView(QTableView): # {{{
|
||||
'<h2>%s</h2>'%book.title,
|
||||
'<div><i>%s</i></div>'%authors_to_string(book.authors),
|
||||
]
|
||||
if not book.is_null('series'):
|
||||
series = book.format_field('series')
|
||||
if series[1]:
|
||||
parts.append('<div>%s: %s</div>'%series)
|
||||
if not book.is_null('rating'):
|
||||
parts.append('<div>%s</div>'%('\u2605'*int(book.rating)))
|
||||
parts.append('</center>')
|
||||
@ -722,8 +726,8 @@ class CoversWidget(QWidget): # {{{
|
||||
if num < 2:
|
||||
txt = _('Could not find any covers for <b>%s</b>')%self.book.title
|
||||
else:
|
||||
txt = _('Found <b>%d</b> covers of %s. Pick the one you like'
|
||||
' best.')%(num-1, self.title)
|
||||
txt = _('Found <b>%(num)d</b> covers of %(title)s. Pick the one you like'
|
||||
' best.')%dict(num=num-1, title=self.title)
|
||||
self.msg.setText(txt)
|
||||
|
||||
self.finished.emit()
|
||||
|
@ -1332,6 +1332,7 @@ void PictureFlow::mousePressEvent(QMouseEvent* event)
|
||||
|
||||
void PictureFlow::mouseReleaseEvent(QMouseEvent* event)
|
||||
{
|
||||
bool accepted = false;
|
||||
int sideWidth = (d->buffer.width() - slideSize().width()) /2;
|
||||
|
||||
if (d->singlePress)
|
||||
@ -1339,13 +1340,20 @@ void PictureFlow::mouseReleaseEvent(QMouseEvent* event)
|
||||
if (event->x() < sideWidth )
|
||||
{
|
||||
showPrevious();
|
||||
accepted = true;
|
||||
} else if ( event->x() > sideWidth + slideSize().width() ) {
|
||||
showNext();
|
||||
accepted = true;
|
||||
} else {
|
||||
emit itemActivated(d->getTarget());
|
||||
if (event->button() == Qt::LeftButton) {
|
||||
emit itemActivated(d->getTarget());
|
||||
accepted = true;
|
||||
}
|
||||
}
|
||||
|
||||
event->accept();
|
||||
if (accepted) {
|
||||
event->accept();
|
||||
}
|
||||
}
|
||||
|
||||
emit inputReceived();
|
||||
|
@ -445,15 +445,15 @@ class RulesModel(QAbstractListModel): # {{{
|
||||
def rule_to_html(self, col, rule):
|
||||
if not isinstance(rule, Rule):
|
||||
return _('''
|
||||
<p>Advanced Rule for column <b>%s</b>:
|
||||
<pre>%s</pre>
|
||||
''')%(col, prepare_string_for_xml(rule))
|
||||
<p>Advanced Rule for column <b>%(col)s</b>:
|
||||
<pre>%(rule)s</pre>
|
||||
''')%dict(col=col, rule=prepare_string_for_xml(rule))
|
||||
conditions = [self.condition_to_html(c) for c in rule.conditions]
|
||||
return _('''\
|
||||
<p>Set the color of <b>%s</b> to <b>%s</b> if the following
|
||||
<p>Set the color of <b>%(col)s</b> to <b>%(color)s</b> if the following
|
||||
conditions are met:</p>
|
||||
<ul>%s</ul>
|
||||
''') % (col, rule.color, ''.join(conditions))
|
||||
<ul>%(rule)s</ul>
|
||||
''') % dict(col=col, color=rule.color, rule=''.join(conditions))
|
||||
|
||||
def condition_to_html(self, condition):
|
||||
c, a, v = condition
|
||||
@ -464,8 +464,8 @@ class RulesModel(QAbstractListModel): # {{{
|
||||
action_name = trans
|
||||
|
||||
return (
|
||||
_('<li>If the <b>%s</b> column <b>%s</b> value: <b>%s</b>') %
|
||||
(c, action_name, prepare_string_for_xml(v)))
|
||||
_('<li>If the <b>%(col)s</b> column <b>%(action)s</b> value: <b>%(val)s</b>') %
|
||||
dict(col=c, action=action_name, val=prepare_string_for_xml(v)))
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -105,13 +105,18 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
|
||||
r('cover_flow_queue_length', config, restart_required=True)
|
||||
|
||||
def get_esc_lang(l):
|
||||
if l == 'en':
|
||||
return 'English'
|
||||
return get_language(l)
|
||||
|
||||
lang = get_lang()
|
||||
if lang is None or lang not in available_translations():
|
||||
lang = 'en'
|
||||
items = [(l, get_language(l)) for l in available_translations() \
|
||||
items = [(l, get_esc_lang(l)) for l in available_translations() \
|
||||
if l != lang]
|
||||
if lang != 'en':
|
||||
items.append(('en', get_language('en')))
|
||||
items.append(('en', get_esc_lang('en')))
|
||||
items.sort(cmp=lambda x, y: cmp(x[1].lower(), y[1].lower()))
|
||||
choices = [(y, x) for x, y in items]
|
||||
# Default language is the autodetected one
|
||||
|
@ -17,12 +17,13 @@ from calibre.gui2.preferences.metadata_sources_ui import Ui_Form
|
||||
from calibre.ebooks.metadata.sources.base import msprefs
|
||||
from calibre.customize.ui import (all_metadata_plugins, is_disabled,
|
||||
enable_plugin, disable_plugin, default_disabled_plugins)
|
||||
from calibre.gui2 import NONE, error_dialog
|
||||
from calibre.gui2 import NONE, error_dialog, question_dialog
|
||||
|
||||
class SourcesModel(QAbstractTableModel): # {{{
|
||||
|
||||
def __init__(self, parent=None):
|
||||
QAbstractTableModel.__init__(self, parent)
|
||||
self.gui_parent = parent
|
||||
|
||||
self.plugins = []
|
||||
self.enabled_overrides = {}
|
||||
@ -87,6 +88,15 @@ class SourcesModel(QAbstractTableModel): # {{{
|
||||
if col == 0 and role == Qt.CheckStateRole:
|
||||
val, ok = val.toInt()
|
||||
if ok:
|
||||
if val == Qt.Checked and 'Douban' in plugin.name:
|
||||
if not question_dialog(self.gui_parent,
|
||||
_('Are you sure?'), '<p>'+
|
||||
_('This plugin is useful only for <b>Chinese</b>'
|
||||
' language books. It can return incorrect'
|
||||
' results for books in English. Are you'
|
||||
' sure you want to enable it?'),
|
||||
show_copy_button=False):
|
||||
return ret
|
||||
self.enabled_overrides[plugin] = val
|
||||
ret = True
|
||||
if col == 1 and role == Qt.EditRole:
|
||||
@ -252,8 +262,8 @@ class PluginConfig(QWidget): # {{{
|
||||
|
||||
self.l = l = QVBoxLayout()
|
||||
self.setLayout(l)
|
||||
self.c = c = QLabel(_('<b>Configure %s</b><br>%s') % (plugin.name,
|
||||
plugin.description))
|
||||
self.c = c = QLabel(_('<b>Configure %(name)s</b><br>%(desc)s') % dict(
|
||||
name=plugin.name, desc=plugin.description))
|
||||
c.setAlignment(Qt.AlignHCenter)
|
||||
l.addWidget(c)
|
||||
|
||||
|
@ -58,7 +58,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
self.device_to_formats_map = {}
|
||||
for device in device_plugins():
|
||||
n = device_name_for_plugboards(device)
|
||||
self.device_to_formats_map[n] = device.FORMATS
|
||||
self.device_to_formats_map[n] = set(device.FORMATS)
|
||||
if getattr(device, 'CAN_DO_DEVICE_DB_PLUGBOARD', False):
|
||||
self.device_to_formats_map[n].add('device_db')
|
||||
if n not in self.devices:
|
||||
self.devices.append(n)
|
||||
self.devices.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
|
||||
@ -358,5 +360,5 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
if __name__ == '__main__':
|
||||
from PyQt4.Qt import QApplication
|
||||
app = QApplication([])
|
||||
test_widget('Import/Export', 'plugboards')
|
||||
test_widget('Import/Export', 'Plugboard')
|
||||
|
||||
|
@ -155,7 +155,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
return
|
||||
if self.argument_count.value() == 0:
|
||||
box = warning_dialog(self.gui, _('Template functions'),
|
||||
_('Argument count should be -1 or greater than zero.'
|
||||
_('Argument count should be -1 or greater than zero. '
|
||||
'Setting it to zero means that this function cannot '
|
||||
'be used in single function mode.'), det_msg = '',
|
||||
show=False)
|
||||
|
@ -225,6 +225,8 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
'calibre library')),
|
||||
('context-menu-device', _('The context menu for the books on '
|
||||
'the device')),
|
||||
('context-menu-cover-browser', _('The context menu for the cover '
|
||||
'browser')),
|
||||
]
|
||||
|
||||
def genesis(self, gui):
|
||||
|
96
src/calibre/gui2/store/stores/ebook_nl_plugin.py
Normal file
96
src/calibre/gui2/store/stores/ebook_nl_plugin.py
Normal file
@ -0,0 +1,96 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import urllib2
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre import browser
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
|
||||
class EBookNLStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
url = 'http://ad.zanox.com/ppc/?19015168C29310186T'
|
||||
url_details = ('http://ad.zanox.com/ppc/?19016028C1098154549T&ULP=[['
|
||||
'http://www.ebook.nl/store/{0}]]')
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
if detail_item:
|
||||
url = url_details.format(detail_item)
|
||||
open_url(QUrl(url))
|
||||
else:
|
||||
detail_url = None
|
||||
if detail_item:
|
||||
detail_url = url_details.format(detail_item)
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_url)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
url = ('http://www.ebook.nl/store/advanced_search_result.php?keywords='
|
||||
+ urllib2.quote(query))
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
for data in doc.xpath('//table[contains(@class, "productListing")]/tr'):
|
||||
if counter <= 0:
|
||||
break
|
||||
|
||||
details = data.xpath('./td/div[@class="prodImage"]/a')
|
||||
if not details:
|
||||
continue
|
||||
details = details[0]
|
||||
id = ''.join(details.xpath('./@href')).strip()
|
||||
id = id[id.rfind('/')+1:]
|
||||
i = id.rfind('?')
|
||||
if i > 0:
|
||||
id = id[:i]
|
||||
if not id:
|
||||
continue
|
||||
cover_url = 'http://www.ebook.nl/store/' + ''.join(details.xpath('./img/@src'))
|
||||
title = ''.join(details.xpath('./img/@title')).strip()
|
||||
author = ''.join(data.xpath('./td/div[@class="prodTitle"]/h3/a/text()')).strip()
|
||||
price = ''.join(data.xpath('./td/div[@class="prodTitle"]/b/text()'))
|
||||
pdf = data.xpath('boolean(./td/div[@class="prodTitle"]/'
|
||||
'p[contains(text(), "Bestandsformaat: Pdf")])')
|
||||
epub = data.xpath('boolean(./td/div[@class="prodTitle"]/'
|
||||
'p[contains(text(), "Bestandsformaat: ePub")])')
|
||||
nodrm = data.xpath('boolean(./td/div[@class="prodTitle"]/'
|
||||
'p[contains(text(), "zonder DRM") or'
|
||||
' contains(text(), "watermerk")])')
|
||||
counter -= 1
|
||||
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.price = price
|
||||
if nodrm:
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
else:
|
||||
s.drm = SearchResult.DRM_LOCKED
|
||||
s.detail_item = id
|
||||
formats = []
|
||||
if epub:
|
||||
formats.append('ePub')
|
||||
if pdf:
|
||||
formats.append('PDF')
|
||||
s.formats = ','.join(formats)
|
||||
|
||||
yield s
|
@ -60,8 +60,7 @@ class LegimiStore(BasicStoreConfig, StorePlugin):
|
||||
author = ''.join(data.xpath('.//div[@class="item_entries"]/span[1]/a/text()'))
|
||||
author = re.sub(',','',author)
|
||||
author = re.sub(';',',',author)
|
||||
price = ''.join(data.xpath('.//div[@class="item_entries"]/span[3]/text()'))
|
||||
price = re.sub(r'[^0-9,]*','',price) + ' zł'
|
||||
price = ''.join(data.xpath('.//span[@class="ebook_price"]/text()'))
|
||||
|
||||
counter -= 1
|
||||
|
||||
|
@ -18,11 +18,11 @@ from calibre import browser
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
|
||||
class CacheUpdateThread(Thread, QObject):
|
||||
|
||||
|
||||
total_changed = pyqtSignal(int)
|
||||
update_progress = pyqtSignal(int)
|
||||
update_details = pyqtSignal(unicode)
|
||||
|
||||
|
||||
def __init__(self, config, seralize_books_function, timeout):
|
||||
Thread.__init__(self)
|
||||
QObject.__init__(self)
|
||||
@ -32,19 +32,19 @@ class CacheUpdateThread(Thread, QObject):
|
||||
self.seralize_books = seralize_books_function
|
||||
self.timeout = timeout
|
||||
self._run = True
|
||||
|
||||
|
||||
def abort(self):
|
||||
self._run = False
|
||||
|
||||
|
||||
def run(self):
|
||||
url = 'http://www.mobileread.com/forums/ebooks.php?do=getlist&type=html'
|
||||
|
||||
|
||||
self.update_details.emit(_('Checking last download date.'))
|
||||
last_download = self.config.get('last_download', None)
|
||||
# Don't update the book list if our cache is less than one week old.
|
||||
if last_download and (time.time() - last_download) < 604800:
|
||||
return
|
||||
|
||||
|
||||
self.update_details.emit(_('Downloading book list from MobileRead.'))
|
||||
# Download the book list HTML file from MobileRead.
|
||||
br = browser()
|
||||
@ -54,10 +54,10 @@ class CacheUpdateThread(Thread, QObject):
|
||||
raw_data = f.read()
|
||||
except:
|
||||
return
|
||||
|
||||
|
||||
if not raw_data or not self._run:
|
||||
return
|
||||
|
||||
|
||||
self.update_details.emit(_('Processing books.'))
|
||||
# Turn books listed in the HTML file into SearchResults's.
|
||||
books = []
|
||||
@ -65,21 +65,23 @@ class CacheUpdateThread(Thread, QObject):
|
||||
data = html.fromstring(raw_data)
|
||||
raw_books = data.xpath('//ul/li')
|
||||
self.total_changed.emit(len(raw_books))
|
||||
|
||||
|
||||
for i, book_data in enumerate(raw_books):
|
||||
self.update_details.emit(_('%s of %s books processed.') % (i, len(raw_books)))
|
||||
self.update_details.emit(
|
||||
_('%(num)s of %(tot)s books processed.') % dict(
|
||||
num=i, tot=len(raw_books)))
|
||||
book = SearchResult()
|
||||
book.detail_item = ''.join(book_data.xpath('.//a/@href'))
|
||||
book.formats = ''.join(book_data.xpath('.//i/text()'))
|
||||
book.formats = book.formats.strip()
|
||||
|
||||
|
||||
text = ''.join(book_data.xpath('.//a/text()'))
|
||||
if ':' in text:
|
||||
book.author, q, text = text.partition(':')
|
||||
book.author = book.author.strip()
|
||||
book.title = text.strip()
|
||||
books.append(book)
|
||||
|
||||
|
||||
if not self._run:
|
||||
books = []
|
||||
break
|
||||
|
@ -12,7 +12,7 @@ import traceback, cPickle, copy
|
||||
from itertools import repeat
|
||||
|
||||
from PyQt4.Qt import (QAbstractItemModel, QIcon, QVariant, QFont, Qt,
|
||||
QMimeData, QModelIndex, pyqtSignal)
|
||||
QMimeData, QModelIndex, pyqtSignal, QObject)
|
||||
|
||||
from calibre.gui2 import NONE, gprefs, config, error_dialog
|
||||
from calibre.library.database2 import Tag
|
||||
@ -227,6 +227,10 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
self._build_in_progress = False
|
||||
self.reread_collapse_model({}, rebuild=False)
|
||||
|
||||
@property
|
||||
def gui_parent(self):
|
||||
return QObject.parent(self)
|
||||
|
||||
def reread_collapse_model(self, state_map, rebuild=True):
|
||||
if gprefs['tags_browser_collapse_at'] == 0:
|
||||
self.collapse_model = 'disable'
|
||||
@ -315,9 +319,11 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
for i,p in enumerate(path_parts):
|
||||
path += p
|
||||
if path not in category_node_map:
|
||||
icon = self.category_icon_map['gst'] if is_gst else \
|
||||
self.category_icon_map[key]
|
||||
node = self.create_node(parent=last_category_node,
|
||||
data=p[1:] if i == 0 else p,
|
||||
category_icon=self.category_icon_map[key],
|
||||
category_icon=icon,
|
||||
tooltip=tt if path == key else path,
|
||||
category_key=path,
|
||||
icon_map=self.icon_state_map)
|
||||
@ -375,6 +381,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
collapse_letter = None
|
||||
category_node = category
|
||||
key = category_node.category_key
|
||||
is_gst = category_node.is_gst
|
||||
if key not in data:
|
||||
return
|
||||
cat_len = len(data[key])
|
||||
@ -387,7 +394,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
not fm['is_custom'] and \
|
||||
not fm['kind'] == 'user' \
|
||||
else False
|
||||
in_uc = fm['kind'] == 'user'
|
||||
in_uc = fm['kind'] == 'user' and not is_gst
|
||||
tt = key if in_uc else None
|
||||
|
||||
if collapse_model == 'first letter':
|
||||
@ -455,6 +462,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
tooltip = None, temporary=True,
|
||||
category_key=category_node.category_key,
|
||||
icon_map=self.icon_state_map)
|
||||
sub_cat.is_gst = is_gst
|
||||
node_parent = sub_cat
|
||||
else:
|
||||
node_parent = category
|
||||
@ -677,44 +685,37 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
|
||||
def handle_user_category_drop(self, on_node, ids, column):
|
||||
categories = self.db.prefs.get('user_categories', {})
|
||||
category = categories.get(on_node.category_key[1:], None)
|
||||
if category is None:
|
||||
cat_contents = categories.get(on_node.category_key[1:], None)
|
||||
if cat_contents is None:
|
||||
return
|
||||
cat_contents = set([(v, c) for v,c,ign in cat_contents])
|
||||
|
||||
fm_src = self.db.metadata_for_field(column)
|
||||
label = fm_src['label']
|
||||
|
||||
for id in ids:
|
||||
label = fm_src['label']
|
||||
if not fm_src['is_custom']:
|
||||
if label == 'authors':
|
||||
items = self.db.get_authors_with_ids()
|
||||
items = [(i[0], i[1].replace('|', ',')) for i in items]
|
||||
value = self.db.authors(id, index_is_id=True)
|
||||
value = [v.replace('|', ',') for v in value.split(',')]
|
||||
elif label == 'publisher':
|
||||
items = self.db.get_publishers_with_ids()
|
||||
value = self.db.publisher(id, index_is_id=True)
|
||||
elif label == 'series':
|
||||
items = self.db.get_series_with_ids()
|
||||
value = self.db.series(id, index_is_id=True)
|
||||
else:
|
||||
items = self.db.get_custom_items_with_ids(label=label)
|
||||
if fm_src['datatype'] != 'composite':
|
||||
value = self.db.get_custom(id, label=label, index_is_id=True)
|
||||
else:
|
||||
value = self.db.get_property(id, loc=fm_src['rec_index'],
|
||||
index_is_id=True)
|
||||
if value is None:
|
||||
return
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
for val in value:
|
||||
for (v, c, id) in category:
|
||||
if v == val and c == column:
|
||||
break
|
||||
else:
|
||||
category.append([val, column, 0])
|
||||
categories[on_node.category_key[1:]] = category
|
||||
self.db.prefs.set('user_categories', categories)
|
||||
self.refresh_required.emit()
|
||||
if value:
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
cat_contents |= set([(v, column) for v in value])
|
||||
|
||||
categories[on_node.category_key[1:]] = [[v, c, 0] for v,c in cat_contents]
|
||||
self.db.prefs.set('user_categories', categories)
|
||||
self.refresh_required.emit()
|
||||
|
||||
def handle_drop(self, on_node, ids):
|
||||
#print 'Dropped ids:', ids, on_node.tag
|
||||
@ -722,12 +723,12 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
if (key == 'authors' and len(ids) >= 5):
|
||||
if not confirm('<p>'+_('Changing the authors for several books can '
|
||||
'take a while. Are you sure?')
|
||||
+'</p>', 'tag_browser_drop_authors', self.parent()):
|
||||
+'</p>', 'tag_browser_drop_authors', self.gui_parent):
|
||||
return
|
||||
elif len(ids) > 15:
|
||||
if not confirm('<p>'+_('Changing the metadata for that many books '
|
||||
'can take a while. Are you sure?')
|
||||
+'</p>', 'tag_browser_many_changes', self.parent()):
|
||||
+'</p>', 'tag_browser_many_changes', self.gui_parent):
|
||||
return
|
||||
|
||||
fm = self.db.metadata_for_field(key)
|
||||
@ -871,13 +872,13 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
# we position at the parent label
|
||||
val = unicode(value.toString()).strip()
|
||||
if not val:
|
||||
error_dialog(self.parent(), _('Item is blank'),
|
||||
error_dialog(self.gui_parent, _('Item is blank'),
|
||||
_('An item cannot be set to nothing. Delete it instead.')).exec_()
|
||||
return False
|
||||
item = self.get_node(index)
|
||||
if item.type == TagTreeItem.CATEGORY and item.category_key.startswith('@'):
|
||||
if val.find('.') >= 0:
|
||||
error_dialog(self.parent(), _('Rename user category'),
|
||||
error_dialog(self.gui_parent, _('Rename user category'),
|
||||
_('You cannot use periods in the name when '
|
||||
'renaming user categories'), show=True)
|
||||
return False
|
||||
@ -897,7 +898,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
if len(c) == len(ckey):
|
||||
if strcmp(ckey, nkey) != 0 and \
|
||||
nkey_lower in user_cat_keys_lower:
|
||||
error_dialog(self.parent(), _('Rename user category'),
|
||||
error_dialog(self.gui_parent, _('Rename user category'),
|
||||
_('The name %s is already used')%nkey, show=True)
|
||||
return False
|
||||
user_cats[nkey] = user_cats[ckey]
|
||||
@ -906,7 +907,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
rest = c[len(ckey):]
|
||||
if strcmp(ckey, nkey) != 0 and \
|
||||
icu_lower(nkey + rest) in user_cat_keys_lower:
|
||||
error_dialog(self.parent(), _('Rename user category'),
|
||||
error_dialog(self.gui_parent, _('Rename user category'),
|
||||
_('The name %s is already used')%(nkey+rest), show=True)
|
||||
return False
|
||||
user_cats[nkey + rest] = user_cats[ckey + rest]
|
||||
@ -921,12 +922,12 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
return False
|
||||
if key == 'authors':
|
||||
if val.find('&') >= 0:
|
||||
error_dialog(self.parent(), _('Invalid author name'),
|
||||
error_dialog(self.gui_parent, _('Invalid author name'),
|
||||
_('Author names cannot contain & characters.')).exec_()
|
||||
return False
|
||||
if key == 'search':
|
||||
if val in saved_searches().names():
|
||||
error_dialog(self.parent(), _('Duplicate search name'),
|
||||
error_dialog(self.gui_parent, _('Duplicate search name'),
|
||||
_('The saved search name %s is already used.')%val).exec_()
|
||||
return False
|
||||
saved_searches().rename(unicode(item.data(role).toString()), val)
|
||||
@ -1161,7 +1162,10 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
prefix = ' not '
|
||||
else:
|
||||
prefix = ''
|
||||
category = tag.category if key != 'news' else 'tag'
|
||||
if node.is_gst:
|
||||
category = key
|
||||
else:
|
||||
category = tag.category if key != 'news' else 'tag'
|
||||
add_colon = False
|
||||
if self.db.field_metadata[tag.category]['is_csp']:
|
||||
add_colon = True
|
||||
|
@ -218,7 +218,7 @@ class TagBrowserMixin(object): # {{{
|
||||
d = TagListEditor(self, tag_to_match=tag, data=result, key=key)
|
||||
d.exec_()
|
||||
if d.result() == d.Accepted:
|
||||
to_rename = d.to_rename # dict of new text to old id
|
||||
to_rename = d.to_rename # dict of old id to new name
|
||||
to_delete = d.to_delete # list of ids
|
||||
orig_name = d.original_names # dict of id: name
|
||||
|
||||
|
@ -384,8 +384,8 @@ class TagsView(QTreeView): # {{{
|
||||
action='delete_search', key=tag.name))
|
||||
if key.startswith('@') and not item.is_gst:
|
||||
self.context_menu.addAction(self.user_category_icon,
|
||||
_('Remove %s from category %s')%
|
||||
(display_name(tag), item.py_name),
|
||||
_('Remove %(item)s from category %(cat)s')%
|
||||
dict(item=display_name(tag), cat=item.py_name),
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_item_from_user_category',
|
||||
key = key, index = tag_item))
|
||||
|
@ -94,8 +94,8 @@ def convert_single_ebook(parent, db, book_ids, auto_conversion=False, # {{{
|
||||
|
||||
msg = '%s' % '\n'.join(res)
|
||||
warning_dialog(parent, _('Could not convert some books'),
|
||||
_('Could not convert %d of %d books, because no suitable source'
|
||||
' format was found.') % (len(res), total),
|
||||
_('Could not convert %(num)d of %(tot)d books, because no suitable source'
|
||||
' format was found.') % dict(num=len(res), tot=total),
|
||||
msg).exec_()
|
||||
|
||||
return jobs, changed, bad
|
||||
@ -187,7 +187,8 @@ class QueueBulk(QProgressDialog):
|
||||
except:
|
||||
dtitle = repr(mi.title)
|
||||
self.setLabelText(_('Queueing ')+dtitle)
|
||||
desc = _('Convert book %d of %d (%s)') % (self.i, len(self.book_ids), dtitle)
|
||||
desc = _('Convert book %(num)d of %(tot)d (%(title)s)') % dict(
|
||||
num=self.i, tot=len(self.book_ids), title=dtitle)
|
||||
|
||||
args = [in_file.name, out_file.name, lrecs]
|
||||
temp_files.append(out_file)
|
||||
@ -209,8 +210,8 @@ class QueueBulk(QProgressDialog):
|
||||
|
||||
msg = '%s' % '\n'.join(res)
|
||||
warning_dialog(self.parent, _('Could not convert some books'),
|
||||
_('Could not convert %d of %d books, because no suitable '
|
||||
'source format was found.') % (len(res), len(self.book_ids)),
|
||||
_('Could not convert %(num)d of %(tot)d books, because no suitable '
|
||||
'source format was found.') % dict(num=len(res), tot=len(self.book_ids)),
|
||||
msg).exec_()
|
||||
self.parent = None
|
||||
self.jobs.reverse()
|
||||
|
@ -308,6 +308,8 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
self.height())
|
||||
self.resize(self.width(), self._calculated_available_height)
|
||||
|
||||
self.build_context_menus()
|
||||
|
||||
for ac in self.iactions.values():
|
||||
try:
|
||||
ac.gui_layout_complete()
|
||||
|
@ -70,10 +70,10 @@ class UpdateNotification(QDialog):
|
||||
self.logo.setPixmap(QPixmap(I('lt.png')).scaled(100, 100,
|
||||
Qt.IgnoreAspectRatio, Qt.SmoothTransformation))
|
||||
self.label = QLabel(('<p>'+
|
||||
_('%s has been updated to version <b>%s</b>. '
|
||||
_('%(app)s has been updated to version <b>%(ver)s</b>. '
|
||||
'See the <a href="http://calibre-ebook.com/whats-new'
|
||||
'">new features</a>.'))%(
|
||||
__appname__, calibre_version))
|
||||
'">new features</a>.'))%dict(
|
||||
app=__appname__, ver=calibre_version))
|
||||
self.label.setOpenExternalLinks(True)
|
||||
self.label.setWordWrap(True)
|
||||
self.setWindowTitle(_('Update available!'))
|
||||
|
@ -492,11 +492,11 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.set_page_number(frac)
|
||||
|
||||
def magnification_changed(self, val):
|
||||
tt = _('Make font size %s\nCurrent magnification: %.1f')
|
||||
tt = _('Make font size %(which)s\nCurrent magnification: %(mag).1f')
|
||||
self.action_font_size_larger.setToolTip(
|
||||
tt %(_('larger'), val))
|
||||
tt %dict(which=_('larger'), mag=val))
|
||||
self.action_font_size_smaller.setToolTip(
|
||||
tt %(_('smaller'), val))
|
||||
tt %dict(which=_('smaller'), mag=val))
|
||||
|
||||
def find(self, text, repeat=False, backwards=False):
|
||||
if not text:
|
||||
|
@ -569,9 +569,9 @@ def move_library(oldloc, newloc, parent, callback_on_complete):
|
||||
det = traceback.format_exc()
|
||||
error_dialog(parent, _('Invalid database'),
|
||||
_('<p>An invalid library already exists at '
|
||||
'%s, delete it before trying to move the '
|
||||
'existing library.<br>Error: %s')%(newloc,
|
||||
str(err)), det, show=True)
|
||||
'%(loc)s, delete it before trying to move the '
|
||||
'existing library.<br>Error: %(err)s')%dict(loc=newloc,
|
||||
err=str(err)), det, show=True)
|
||||
callback(None)
|
||||
return
|
||||
else:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user