Merge from trunk

This commit is contained in:
Charles Haley 2011-11-18 13:06:25 +01:00
commit 71b59c4a1b
111 changed files with 42531 additions and 34952 deletions

View File

@ -19,6 +19,60 @@
# new recipes: # new recipes:
# - title: # - title:
- version: 0.8.27
date: 2011-11-18
new features:
- title: "Drivers for the Kindle Fire and the Nook Tablet"
tickets: [890918]
- title: "Conversion: Add an option under Look & Feel to remove specified style information (CSS) from the document during conversion."
tickets: [871384]
- title: "Add an option in the bulk metadata edit dialog to restore the pre-conversion files for many books with a single click."
tickets: [886116]
- title: "Jobs list: Add the ability to search for and to hide jobs, useful if you have run a lot of jobs and the list is getting crowded."
tickets: [883734]
- title: "Book jacket generation: Add ability to customize the book jacket template and add custom columns into the jacket."
tickets: [889912]
- title: "MOBI Input: Performance improvement when viewing/converting a file with a lot of links"
bug fixes:
- title: "Fix regression in 0.8.26 that broke disabling the update of particular fields during a bulk metadata download."
tickets: [889696]
- title: "Get Books: Fix DRM status for legimi"
- title: "When parsing for lxml via BeatifulSoup, use the calibre modified copy of BeautifulSoup (more robust)."
tickets: [889890]
- title: "HTML Input: Handle double encoded URLs in img tags"
tickets: [889323]
improved recipes:
- Various Polish recipes
- Academia Catavencu
- El Periodico de Aragon
- Weblogs SL
- Folha de Sao Paolo (subscription)
new recipes:
- title: News on Japan
author: Krittika Goyal
- title: Formula AS
author: Silviu Cotoara
- title: Various Turkish news sources
author: Osman Kaysan
- title: Infra.pl and Spider's Web
author: fenuks
- version: 0.8.26 - version: 0.8.26
date: 2011-11-12 date: 2011-11-12

38
recipes/biamag.recipe Normal file
View File

@ -0,0 +1,38 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
bianet.com.tr
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe):
title = 'BiaMag'
__author__ = 'Osman Kaysan'
description = 'Independent News from Turkey'
publisher = 'BiaMag'
category = 'news, politics, Turkey'
oldest_article = 15
max_articles_per_feed = 120
masthead_url = 'http://bianet.org/images/biamag_logo.gif'
language = 'tr'
no_stylesheets = True
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
remove_tags_before = dict(name='div', attrs={'class':'manset'})
remove_tags = [ dict(name='ul', attrs={'class':['altul']}), dict(name='div', attrs={'id':['habermenu']}), dict(name='div', attrs={'class':['mail']}), dict(name='div', attrs={'class':['from']})]
remove_tags_after = dict(name='div', attrs={'id':'habermenu'})
feeds = [(u'BiaMag', u'http://www.bianet.org/biamag.rss')]
def preprocess_html(self, soup):
return self.adeify_images(soup)

38
recipes/biamag_en.recipe Normal file
View File

@ -0,0 +1,38 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
bianet.com.tr
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe):
title = 'Bianet-English'
__author__ = 'Osman Kaysan'
description = 'Independent News Network from Turkey(English)'
publisher = 'Bianet'
category = 'news, politics, Turkey'
oldest_article = 7
max_articles_per_feed = 150
masthead_url = 'http://bianet.org/images/english_logo.gif'
language = 'en_TR'
no_stylesheets = True
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
remove_tags_before = dict(name='div', attrs={'class':'manset'})
remove_tags = [ dict(name='ul', attrs={'class':['altul']}), dict(name='div', attrs={'id':['habermenu']}), dict(name='div', attrs={'class':['mail']}), dict(name='div', attrs={'class':['from']})]
remove_tags_after = dict(name='div', attrs={'id':'habermenu'})
feeds = [(u'Bianet-English', u'http://www.bianet.org/english.rss')]
def preprocess_html(self, soup):
return self.adeify_images(soup)

38
recipes/bianet.recipe Normal file
View File

@ -0,0 +1,38 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
bianet.com.tr
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe):
title = 'Bianet'
__author__ = 'Osman Kaysan'
description = 'Independent News from Turkey'
publisher = 'Bianet'
category = 'news, politics, Turkey'
oldest_article = 7
max_articles_per_feed = 120
masthead_url = 'http://bianet.org/images/bianet_logo.gif'
language = 'tr'
no_stylesheets = True
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
remove_tags_before = dict(name='div', attrs={'class':'manset'})
remove_tags = [ dict(name='ul', attrs={'class':['altul']}), dict(name='div', attrs={'id':['habermenu']}), dict(name='div', attrs={'class':['mail']}), dict(name='div', attrs={'class':['from']})]
remove_tags_after = dict(name='div', attrs={'id':'habermenu'})
feeds = [(u'Bianet', u'http://bianet.org/bianet.rss')]
def preprocess_html(self, soup):
return self.adeify_images(soup)

View File

@ -0,0 +1,50 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from calibre.web.feeds.news import BasicNewsRecipe
class Birgun (BasicNewsRecipe):
title = u'Birgün Gazetesi'
__author__ = u'Osman Kaysan'
oldest_article = 7
max_articles_per_feed =150
use_embedded_content = False
description = 'Birgun gazatesi haberleri, kose yazarlari'
publisher = 'Birgün'
category = 'news,haberler,turkce,gazete,birgun'
language = 'tr'
no_stylesheets = True
publication_type = 'newspaper'
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
cover_img_url = 'http://www.birgun.net/i/birgun.png'
masthead_url = 'http://www.birgun.net/i/birgun.png'
remove_attributes = ['width','height']
remove_tags_before = dict(name='h2', attrs={'class':'storyHeadline'})
#remove_tags_after = dict(name='div', attrs={'class':'toollinks'})
remove_tags_after = dict(name='tr', attrs={'valign':'top'})
remove_tags = [ dict(name='div', attrs={'id':'byLine'}), dict(name='div', attrs={'class':'toollinks'})
, dict(name='div', attrs={'class':'main-lead'}), dict(name='div', attrs={'class':'addthis_toolbox addthis_default_style'})
, dict(name='a', attrs={'class':'addthis_button'})]
remove_empty_feeds= True
feeds = [
( u'Güncel', u'http://www.birgun.net/actuels.xml')
,( u'Köşe Yazarları', u'http://www.birgun.net/writer.xml')
,( u'Politika', u'http://www.birgun.net/politics.xml')
,( u'Ekonomi', u'http://www.birgun.net/economic.xml')
,( u'Çalışma Yaşamı', u'http://www.birgun.net/workers.xml')
,( u'Dünya', u'http://www.birgun.net/worlds.xml')
,( u'Yaşam', u'http://www.birgun.net/lifes.xml')
]

View File

@ -4,16 +4,16 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = u'2011, Silviu Cotoar\u0103' __copyright__ = u'2011, Silviu Cotoar\u0103'
''' '''
catavencu.ro academiacatavencu.info
''' '''
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Catavencu(BasicNewsRecipe): class AcademiaCatavencu(BasicNewsRecipe):
title = u'Academia Ca\u0163avencu' title = u'Academia Ca\u0163avencu'
__author__ = u'Silviu Cotoar\u0103' __author__ = u'Silviu Cotoar\u0103'
description = 'Tagma cum laude' description = 'Tagma cum laude'
publisher = 'Catavencu' publisher = u'Ca\u0163avencu'
oldest_article = 5 oldest_article = 5
language = 'ro' language = 'ro'
max_articles_per_feed = 100 max_articles_per_feed = 100
@ -21,32 +21,31 @@ class Catavencu(BasicNewsRecipe):
use_embedded_content = False use_embedded_content = False
category = 'Ziare' category = 'Ziare'
encoding = 'utf-8' encoding = 'utf-8'
cover_url = 'http://upload.wikimedia.org/wikipedia/en/1/1e/Academia_Catavencu.jpg' cover_url = 'http://www.academiacatavencu.info/images/logo.png'
conversion_options = { conversion_options = {
'comments' : description 'comments' : description
,'tags' : category ,'tags' : category
,'language' : language ,'language' : language
,'publisher' : publisher ,'publisher' : publisher
} }
keep_only_tags = [ keep_only_tags = [
dict(name='ul', attrs={'class':'articles'}) dict(name='h1', attrs={'class':'art_title'}),
dict(name='div', attrs={'class':'art_text'})
] ]
remove_tags = [ remove_tags = [
dict(name='div', attrs={'class':['tools']}) dict(name='div', attrs={'class':['desp_m']})
, dict(name='div', attrs={'class':['share']}) , dict(name='div', attrs={'id':['tags']})
, dict(name='div', attrs={'class':['category']})
, dict(name='div', attrs={'id':['comments']})
] ]
remove_tags_after = [ remove_tags_after = [
dict(name='div', attrs={'id':'comments'}) dict(name='div', attrs={'class':['desp_m']})
] ]
feeds = [ feeds = [
(u'Feeds', u'http://catavencu.ro/feed/rss') (u'Feeds', u'http://www.academiacatavencu.info/rss.xml')
] ]
def preprocess_html(self, soup): def preprocess_html(self, soup):

View File

@ -46,7 +46,8 @@ class DziennikInternautowRecipe(BasicNewsRecipe):
dict(name = 'div', attrs = {'class' : 'poradniki_context'}), dict(name = 'div', attrs = {'class' : 'poradniki_context'}),
dict(name = 'div', attrs = {'class' : 'uniBox'}), dict(name = 'div', attrs = {'class' : 'uniBox'}),
dict(name = 'object', attrs = {}), dict(name = 'object', attrs = {}),
dict(name = 'h3', attrs = {}) dict(name = 'h3', attrs = {}),
dict(attrs={'class':'twitter-share-button'})
] ]
preprocess_regexps = [ preprocess_regexps = [
@ -58,3 +59,8 @@ class DziennikInternautowRecipe(BasicNewsRecipe):
(r'\s*</', lambda match: '</'), (r'\s*</', lambda match: '</'),
] ]
] ]
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)

View File

@ -5,12 +5,11 @@ __license__ = 'GPL v3'
__copyright__ = '04 December 2010, desUBIKado' __copyright__ = '04 December 2010, desUBIKado'
__author__ = 'desUBIKado' __author__ = 'desUBIKado'
__description__ = 'Daily newspaper from Aragon' __description__ = 'Daily newspaper from Aragon'
__version__ = 'v0.07' __version__ = 'v0.08'
__date__ = '06, February 2011' __date__ = '13, November 2011'
''' '''
elperiodicodearagon.com elperiodicodearagon.com
''' '''
import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
@ -20,13 +19,13 @@ class elperiodicodearagon(BasicNewsRecipe):
description = u'Noticias desde Aragon' description = u'Noticias desde Aragon'
publisher = u'elperiodicodearagon.com' publisher = u'elperiodicodearagon.com'
category = u'news, politics, Spain, Aragon' category = u'news, politics, Spain, Aragon'
oldest_article = 2 oldest_article = 1
delay = 0 delay = 0
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
language = 'es' language = 'es'
encoding = 'utf8' encoding = 'iso-8859-1'
remove_empty_feeds = True remove_empty_feeds = True
remove_javascript = True remove_javascript = True
@ -39,61 +38,30 @@ class elperiodicodearagon(BasicNewsRecipe):
} }
feeds = [ feeds = [
(u'Arag\xf3n', u'http://elperiodicodearagon.com/RSS/2.xml'), (u'Portada', u'http://zetaestaticos.com/aragon/rss/portada_es.xml'),
(u'Internacional', u'http://elperiodicodearagon.com/RSS/4.xml'), (u'Arag\xf3n', u'http://zetaestaticos.com/aragon/rss/2_es.xml'),
(u'Espa\xf1a', u'http://elperiodicodearagon.com/RSS/3.xml'), (u'Internacional', u'http://zetaestaticos.com/aragon/rss/4_es.xml'),
(u'Econom\xeda', u'http://elperiodicodearagon.com/RSS/5.xml'), (u'Espa\xf1a', u'http://zetaestaticos.com/aragon/rss/3_es.xml'),
(u'Deportes', u'http://elperiodicodearagon.com/RSS/7.xml'), (u'Econom\xeda', u'http://zetaestaticos.com/aragon/rss/5_es.xml'),
(u'Real Zaragoza', u'http://elperiodicodearagon.com/RSS/10.xml'), (u'Deportes', u'http://zetaestaticos.com/aragon/rss/7_es.xml'),
(u'Opini\xf3n', u'http://elperiodicodearagon.com/RSS/103.xml'), (u'Real Zaragoza', u'http://zetaestaticos.com/aragon/rss/10_es.xml'),
(u'Escenarios', u'http://elperiodicodearagon.com/RSS/105.xml'), (u'CAI Zaragoza', u'http://zetaestaticos.com/aragon/rss/91_es.xml'),
(u'Sociedad', u'http://elperiodicodearagon.com/RSS/104.xml'), (u'Monta\xf1ismo', u'http://zetaestaticos.com/aragon/rss/354_es.xml'),
(u'Gente', u'http://elperiodicodearagon.com/RSS/330.xml') (u'Opini\xf3n', u'http://zetaestaticos.com/aragon/rss/103_es.xml'),
(u'Tema del d\xeda', u'http://zetaestaticos.com/aragon/rss/102_es.xml'),
(u'Escenarios', u'http://zetaestaticos.com/aragon/rss/105_es.xml'),
(u'Sociedad', u'http://zetaestaticos.com/aragon/rss/104_es.xml'),
(u'Gente', u'http://zetaestaticos.com/aragon/rss/330_es.xml'),
(u'Espacio 3', u'http://zetaestaticos.com/aragon/rss/328_es.xml'),
(u'Fiestas del Pilar', u'http://zetaestaticos.com/aragon/rss/107_es.xml')
] ]
extra_css = '''
h3 {font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:30px;}
h2 {font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:18px;}
h4 {font-family:Arial,Helvetica,sans-serif; font-style:italic; font-weight:normal;font-size:20px;}
.columnaDeRecursosRelacionados {font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:14px;}
img{margin-bottom: 0.4em}
'''
remove_attributes = ['height','width'] remove_attributes = ['height','width']
keep_only_tags = [dict(name='div', attrs={'id':'contenidos'})] keep_only_tags = [dict(name='div', attrs={'id':'Noticia'})]
# Quitar toda la morralla
remove_tags = [dict(name='ul', attrs={'class':'herramientasDeNoticia'}),
dict(name='span', attrs={'class':'MasInformacion '}),
dict(name='span', attrs={'class':'MasInformacion'}),
dict(name='div', attrs={'class':'Middle'}),
dict(name='div', attrs={'class':'MenuCabeceraRZaragoza'}),
dict(name='div', attrs={'id':'MenuCabeceraRZaragoza'}),
dict(name='div', attrs={'class':'MenuEquipo'}),
dict(name='div', attrs={'class':'TemasRelacionados'}),
dict(name='div', attrs={'class':'GaleriaEnNoticia'}),
dict(name='div', attrs={'class':'Recorte'}),
dict(name='div', attrs={'id':'NoticiasenRecursos'}),
dict(name='div', attrs={'id':'NoticiaEnPapel'}),
dict(name='p', attrs={'class':'RecorteEnNoticias'}),
dict(name='div', attrs={'id':'Comparte'}),
dict(name='div', attrs={'id':'CajaComparte'}),
dict(name='a', attrs={'class':'EscribirComentario'}),
dict(name='a', attrs={'class':'AvisoComentario'}),
dict(name='div', attrs={'class':'CajaAvisoComentario'}),
dict(name='div', attrs={'class':'navegaNoticias'}),
dict(name='div', attrs={'class':'Mensaje'}),
dict(name='div', attrs={'id':'PaginadorDiCom'}),
dict(name='div', attrs={'id':'CajaAccesoCuentaUsuario'}),
dict(name='div', attrs={'id':'CintilloComentario'}),
dict(name='div', attrs={'id':'EscribeComentario'}),
dict(name='div', attrs={'id':'FormularioComentario'}),
dict(name='div', attrs={'id':'FormularioNormas'})]
# Recuperamos la portada de papel (la imagen format=1 tiene mayor resolucion) # Recuperamos la portada de papel (la imagen format=1 tiene mayor resolucion)
def get_cover_url(self): def get_cover_url(self):
@ -104,23 +72,7 @@ class elperiodicodearagon(BasicNewsRecipe):
return image['src'].rstrip('format=2') + 'format=1' return image['src'].rstrip('format=2') + 'format=1'
return None return None
# Para quitar espacios entre la noticia y los comentarios (lineas 1 y 2) # Usamos la versión para móviles
# El indice no apuntaba correctamente al empiece de la noticia (linea 3)
preprocess_regexps = [ def print_version(self, url):
(re.compile(r'<p>&nbsp;</p>', re.DOTALL|re.IGNORECASE), lambda match: ''), return url.replace('http://www.elperiodicodearagon.com/', 'http://www.elperiodicodearagon.com/m/')
(re.compile(r'<p> </p>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<p id="">', re.DOTALL|re.IGNORECASE), lambda match: '<p>')
]
# Para sustituir el video incrustado de YouTube por una imagen
def preprocess_html(self, soup):
for video_yt in soup.findAll('iframe',{'title':'YouTube video player'}):
if video_yt:
video_yt.name = 'img'
fuente = video_yt['src']
fuente2 = fuente.replace('http://www.youtube.com/embed/','http://img.youtube.com/vi/')
video_yt['src'] = fuente2 + '/0.jpg'
return soup

50
recipes/formulaas.recipe Normal file
View File

@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = u'2011, Silviu Cotoar\u0103'
'''
formula-as.ro
'''
from calibre.web.feeds.news import BasicNewsRecipe
class FormulaAS(BasicNewsRecipe):
title = u'Formula AS'
__author__ = u'Silviu Cotoar\u0103'
publisher = u'Formula AS'
description = u'Formula AS'
oldest_article = 5
language = 'ro'
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
category = 'Ziare,Romania'
encoding = 'utf-8'
cover_url = 'http://www.formula-as.ro/_client/img/header_logo.png'
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
}
keep_only_tags = [
dict(name='div', attrs={'class':'item padded'})
]
remove_tags = [
dict(name='ul', attrs={'class':'subtitle lower'})
]
remove_tags_after = [
dict(name='ul', attrs={'class':'subtitle lower'}),
dict(name='div', attrs={'class':'item-brief-options'})
]
feeds = [
(u'\u0218tiri', u'http://www.formula-as.ro/rss/articole.xml')
]
def preprocess_html(self, soup):
return self.adeify_images(soup)

View File

@ -18,7 +18,7 @@ class FrazPC(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
use_embedded_content = False use_embedded_content = False
no_stylesheets = True no_stylesheets = True
cover_url='http://www.frazpc.pl/images/logo.png'
feeds = [ feeds = [
(u'Aktualno\u015bci', u'http://www.frazpc.pl/feed/aktualnosci'), (u'Aktualno\u015bci', u'http://www.frazpc.pl/feed/aktualnosci'),
(u'Artyku\u0142y', u'http://www.frazpc.pl/feed/artykuly') (u'Artyku\u0142y', u'http://www.frazpc.pl/feed/artykuly')
@ -33,6 +33,7 @@ class FrazPC(BasicNewsRecipe):
dict(name='div', attrs={'class':'comments_box'}) dict(name='div', attrs={'class':'comments_box'})
] ]
remove_tags_after=dict(name='div', attrs={'class':'content'})
preprocess_regexps = [(re.compile(r'\| <a href="#comments">Komentarze \([0-9]*\)</a>'), lambda match: '')] preprocess_regexps = [(re.compile(r'\| <a href="#comments">Komentarze \([0-9]*\)</a>'), lambda match: '')]
remove_attributes = [ 'width', 'height' ] remove_attributes = [ 'width', 'height' ]

View File

@ -12,7 +12,6 @@ class GN(BasicNewsRecipe):
EDITION = 0 EDITION = 0
__author__ = 'Piotr Kontek' __author__ = 'Piotr Kontek'
title = u'Gość niedzielny'
description = 'Weekly magazine' description = 'Weekly magazine'
encoding = 'utf-8' encoding = 'utf-8'
no_stylesheets = True no_stylesheets = True
@ -20,6 +19,8 @@ class GN(BasicNewsRecipe):
remove_javascript = True remove_javascript = True
temp_files = [] temp_files = []
simultaneous_downloads = 1 simultaneous_downloads = 1
masthead_url = 'http://gosc.pl/files/11/03/12/949089_top.gif'
title = u'Gość niedzielny'
articles_are_obfuscated = True articles_are_obfuscated = True
@ -64,7 +65,6 @@ class GN(BasicNewsRecipe):
if img != None: if img != None:
a = img.parent a = img.parent
self.EDITION = a['href'] self.EDITION = a['href']
self.title = img['alt']
self.cover_url = 'http://www.gosc.pl' + img['src'] self.cover_url = 'http://www.gosc.pl' + img['src']
if not first: if not first:
break break

View File

@ -4,56 +4,20 @@ __license__ = 'GPL v3'
__copyright__ = '2010, matek09, matek09@gmail.com' __copyright__ = '2010, matek09, matek09@gmail.com'
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class Histmag(BasicNewsRecipe): class Histmag(BasicNewsRecipe):
title = u'Histmag'
oldest_article = 7
max_articles_per_feed = 100
cover_url='http://histmag.org/grafika/loga/histmag-logo-2-300px.png'
__author__ = 'matek09'
description = u"Artykuly historyczne i publicystyczne"
encoding = 'utf-8'
#preprocess_regexps = [(re.compile(r'</span>'), lambda match: '</span><br><br>'),(re.compile(r'<span>'), lambda match: '<br><br><span>')]
no_stylesheets = True
language = 'pl'
remove_javascript = True
keep_only_tags=[dict(id='article')]
remove_tags=[dict(name = 'p', attrs = {'class' : 'article-tags'})]
title = u'Histmag' feeds = [(u'Wszystkie', u'http://histmag.org/rss/wszystkie.xml'), (u'Wydarzenia', u'http://histmag.org/rss/wydarzenia.xml'), (u'Recenzje', u'http://histmag.org/rss/recenzje.xml'), (u'Artykuły historyczne', u'http://histmag.org/rss/historia.xml'), (u'Publicystyka', u'http://histmag.org/rss/publicystyka.xml')]
__author__ = 'matek09'
description = u"Artykuly historyczne i publicystyczne"
encoding = 'utf-8'
no_stylesheets = True
language = 'pl'
remove_javascript = True
#max_articles_per_feed = 1
remove_tags_before = dict(dict(name = 'div', attrs = {'id' : 'article'}))
remove_tags_after = dict(dict(name = 'h2', attrs = {'class' : 'komentarze'}))
#keep_only_tags =[]
#keep_only_tags.append(dict(name = 'h2'))
#keep_only_tags.append(dict(name = 'p'))
remove_tags =[]
remove_tags.append(dict(name = 'p', attrs = {'class' : 'podpis'}))
remove_tags.append(dict(name = 'h2', attrs = {'class' : 'komentarze'}))
remove_tags.append(dict(name = 'img', attrs = {'src' : 'style/buttons/wesprzyjnas-1.jpg'}))
preprocess_regexps = [(re.compile(r'</span>'), lambda match: '</span><br><br>'),
(re.compile(r'<span>'), lambda match: '<br><br><span>')]
extra_css = '''
.left {font-size: x-small}
.right {font-size: x-small}
'''
def find_articles(self, soup):
articles = []
for div in soup.findAll('div', attrs={'class' : 'text'}):
articles.append({
'title' : self.tag_to_string(div.h3.a),
'url' : 'http://www.histmag.org/' + div.h3.a['href'],
'date' : self.tag_to_string(div.next('p')).split('|')[0],
'description' : self.tag_to_string(div.next('p', podpis=False)),
})
return articles
def parse_index(self):
soup = self.index_to_soup('http://histmag.org/?arc=4&dx=0')
feeds = []
feeds.append((u"Artykuly historyczne", self.find_articles(soup)))
soup = self.index_to_soup('http://histmag.org/?arc=5&dx=0')
feeds.append((u"Artykuly publicystyczne", self.find_articles(soup)))
soup = self.index_to_soup('http://histmag.org/?arc=1&dx=0')
feeds.append((u"Wydarzenia", self.find_articles(soup)))
return feeds

BIN
recipes/icons/formulaas.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 687 B

View File

@ -1,27 +1,26 @@
# adapted from old recipe by Darko Miletic <darko.miletic at gmail.com> # adapted from old recipe by Darko Miletic <darko.miletic at gmail.com>
import string, re import re
from calibre import strftime
from calibre.web.feeds.recipes import BasicNewsRecipe from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString from calibre.ebooks.BeautifulSoup import Tag, NavigableString
class TheIndependentNew(BasicNewsRecipe): class TheIndependentNew(BasicNewsRecipe):
# flag to enable/disable article graphics on business pages/some others # flag to enable/disable article graphics on business pages/some others
# eg http://www.independent.co.uk/news/world/europe/berlusconi-departure-fails-to-calm-the-markets-6259682.html # eg http://www.independent.co.uk/news/world/europe/berlusconi-departure-fails-to-calm-the-markets-6259682.html
# -max dimensions can be altered using the .pictureContainer img selector in the css # -max dimensions can be altered using the .pictureContainer img selector in the css
_FETCH_ARTICLE_GRAPHICS = True _FETCH_ARTICLE_GRAPHICS = True
#Flag to enable/disable image fetching (not business) #Flag to enable/disable image fetching (not business)
_FETCH_IMAGES = True _FETCH_IMAGES = True
#used for converting rating to stars #used for converting rating to stars
_STAR_URL = 'http://www.independent.co.uk/skins/ind/images/rating_star.png' _STAR_URL = 'http://www.independent.co.uk/skins/ind/images/rating_star.png'
_NO_STAR_URL = 'http://www.independent.co.uk/skins/ind/images/rating_star_grey.png' _NO_STAR_URL = 'http://www.independent.co.uk/skins/ind/images/rating_star_grey.png'
title = u'The Independent' title = u'The Independent'
__author__ = 'Will' __author__ = 'Will'
description = 'The latest in UK News and World News from The \ description = 'The latest in UK News and World News from The \
@ -42,26 +41,26 @@ class TheIndependentNew(BasicNewsRecipe):
dict(attrs={'id' : ['RelatedArtTag','renderBiography']}), dict(attrs={'id' : ['RelatedArtTag','renderBiography']}),
dict(attrs={'class' : ['autoplay','openBiogPopup']}) dict(attrs={'class' : ['autoplay','openBiogPopup']})
] ]
keep_only_tags =[dict(attrs={'id':'main'})] keep_only_tags =[dict(attrs={'id':'main'})]
recursions = 0 recursions = 0
# fixes non compliant html nesting and 'marks' article graphics links # fixes non compliant html nesting and 'marks' article graphics links
preprocess_regexps = [ preprocess_regexps = [
(re.compile('<span class="storyTop ">(?P<nested>.*?)</span>', re.DOTALL), (re.compile('<span class="storyTop ">(?P<nested>.*?)</span>', re.DOTALL),
lambda match: '<div class="storyTop">' + match.group('nested') + '</div>'), lambda match: '<div class="storyTop">' + match.group('nested') + '</div>'),
(re.compile('(<strong>.*?[Cc]lick.*?<a.*?((HERE)|([Hh]ere)).*?</strong>)', re.DOTALL), (re.compile('(<strong>.*?[Cc]lick.*?<a.*?((HERE)|([Hh]ere)).*?</strong>)', re.DOTALL),
lambda match: '<div class="article-graphic">' + match.group(0) + '</div>'), lambda match: '<div class="article-graphic">' + match.group(0) + '</div>'),
] ]
conversion_options = { conversion_options = {
'comment' : description 'comment' : description
, 'tags' : category , 'tags' : category
, 'publisher' : publisher , 'publisher' : publisher
, 'language' : language , 'language' : language
} }
extra_css = """ extra_css = """
h1{font-family: Georgia,serif } h1{font-family: Georgia,serif }
body{font-family: Verdana,Arial,Helvetica,sans-serif} body{font-family: Verdana,Arial,Helvetica,sans-serif}
@ -81,22 +80,22 @@ class TheIndependentNew(BasicNewsRecipe):
.articleContent {display: block; clear:left;} .articleContent {display: block; clear:left;}
.storyTop{} .storyTop{}
.pictureContainer img { max-width: 400px; max-height: 400px;} .pictureContainer img { max-width: 400px; max-height: 400px;}
""" """
oldest_article = 1 oldest_article = 1
max_articles_per_feed = 100 max_articles_per_feed = 100
_processed_urls = [] _processed_urls = []
def get_article_url(self, article): def get_article_url(self, article):
url = super(self.__class__,self).get_article_url(article) url = super(self.__class__,self).get_article_url(article)
title = article.get('title', None) title = article.get('title', None)
if title and re.search("^Video:",title): if title and re.search("^Video:",title):
return None return None
#remove duplicates #remove duplicates
if not (url in self._processed_urls): if not (url in self._processed_urls):
self._processed_urls.append(url) self._processed_urls.append(url)
else: else:
@ -104,101 +103,101 @@ class TheIndependentNew(BasicNewsRecipe):
return url return url
def preprocess_html(self, soup): def preprocess_html(self, soup):
#remove 'advertorial articles' #remove 'advertorial articles'
strapline = soup.find('div',attrs={'class' : re.compile('.*strapLine.*')}) strapline = soup.find('div',attrs={'class' : re.compile('.*strapLine.*')})
if strapline: if strapline:
for para in strapline.findAll('p'): for para in strapline.findAll('p'):
if len(para.contents) and isinstance(para.contents[0],NavigableString) \ if len(para.contents) and isinstance(para.contents[0],NavigableString) \
and para.contents[0] == 'ADVERTORIAL FEATURE': and para.contents[0] == 'ADVERTORIAL FEATURE':
return None return None
items_to_extract = [] items_to_extract = []
for item in soup.findAll(attrs={'class' : re.compile("widget.*")}): for item in soup.findAll(attrs={'class' : re.compile("widget.*")}):
remove = True remove = True
pattern = re.compile('((articleContent)|(title))$') pattern = re.compile('((articleContent)|(title))$')
if (pattern.search(item['class'])) is not None: if (pattern.search(item['class'])) is not None:
remove = False remove = False
# corrections # corrections
# story content always good # story content always good
pattern = re.compile('storyContent') pattern = re.compile('storyContent')
if (pattern.search(item['class'])) is not None: if (pattern.search(item['class'])) is not None:
remove = False remove = False
#images #images
pattern = re.compile('slideshow') pattern = re.compile('slideshow')
if (pattern.search(item['class'])) is not None: if (pattern.search(item['class'])) is not None:
if self._FETCH_IMAGES: if self._FETCH_IMAGES:
remove = False remove = False
else: else:
remove = True remove = True
#social widgets always bad #social widgets always bad
pattern = re.compile('socialwidget') pattern = re.compile('socialwidget')
if (pattern.search(item['class'])) is not None: if (pattern.search(item['class'])) is not None:
remove = True remove = True
if remove: if remove:
items_to_extract.append(item) items_to_extract.append(item)
for item in items_to_extract: for item in items_to_extract:
item.extract() item.extract()
items_to_extract = [] items_to_extract = []
if self._FETCH_IMAGES: if self._FETCH_IMAGES:
for item in soup.findAll('a',attrs={'href' : re.compile('.*')}): for item in soup.findAll('a',attrs={'href' : re.compile('.*')}):
if item.img is not None: if item.img is not None:
#use full size image #use full size image
img = item.findNext('img') img = item.findNext('img')
img['src'] = item['href'] img['src'] = item['href']
#insert caption if available #insert caption if available
if img['title'] is not None and (len(img['title']) > 1): if img['title'] is not None and (len(img['title']) > 1):
tag = Tag(soup,'h3') tag = Tag(soup,'h3')
text = NavigableString(img['title']) text = NavigableString(img['title'])
tag.insert(0,text) tag.insert(0,text)
#picture before text #picture before text
img.extract() img.extract()
item.insert(0,img) item.insert(0,img)
item.insert(1,tag) item.insert(1,tag)
# remove link # remove link
item.name = "div" item.name = "div"
item["class"]='image' item["class"]='image'
del item["href"] del item["href"]
#remove empty subtitles #remove empty subtitles
""" """
currently the subtitle is located in first paragraph after currently the subtitle is located in first paragraph after
sibling <h3 class="subtitle"> tag. This may be 'fixed' at sibling <h3 class="subtitle"> tag. This may be 'fixed' at
some point. some point.
""" """
subtitle = soup.find('h3',attrs={'class' : 'subtitle'}) subtitle = soup.find('h3',attrs={'class' : 'subtitle'})
if subtitle is not None: if subtitle is not None:
subtitleText = subtitle.findNext('p') subtitleText = subtitle.findNext('p')
if subtitleText is not None: if subtitleText is not None:
if len(subtitleText.contents[0]) <= 1 : if len(subtitleText.contents[0]) <= 1 :
subtitleText.extract() subtitleText.extract()
subtitle.extract() subtitle.extract()
#replace rating numbers with stars #replace rating numbers with stars
for item in soup.findAll('div',attrs={ 'class' : 'starRating'}): for item in soup.findAll('div',attrs={ 'class' : 'starRating'}):
if item is not None: if item is not None:
soup2 = self._insertRatingStars(soup,item) soup2 = self._insertRatingStars(soup,item)
if soup2 is not None: if soup2 is not None:
soup = soup2 soup = soup2
#remove empty paragraph tags in storyTop which can leave a space #remove empty paragraph tags in storyTop which can leave a space
#between first paragraph and rest of story #between first paragraph and rest of story
nested_content = False nested_content = False
storyTop = soup.find('div',attrs={ 'class' : ['storyTop']}) storyTop = soup.find('div',attrs={ 'class' : ['storyTop']})
for item in storyTop.findAll('p'): for item in storyTop.findAll('p'):
for nested in item: for nested in item:
@ -207,19 +206,19 @@ class TheIndependentNew(BasicNewsRecipe):
break break
if not nested_content and item.contents is not None and len(item.contents[0]) <= 1 : if not nested_content and item.contents is not None and len(item.contents[0]) <= 1 :
items_to_extract.append(item) items_to_extract.append(item)
for item in items_to_extract: for item in items_to_extract:
item.extract() item.extract()
items_to_extract = [] items_to_extract = []
#remove line breaks immediately next to tags with default margins #remove line breaks immediately next to tags with default margins
#to prevent double line spacing and narrow columns of text #to prevent double line spacing and narrow columns of text
storyTop = soup.find('div',attrs={ 'class' : ['storyTop']}) storyTop = soup.find('div',attrs={ 'class' : ['storyTop']})
self._remove_undesired_line_breaks_from_tag(storyTop,soup) self._remove_undesired_line_breaks_from_tag(storyTop,soup)
#replace article graphics link with the graphics themselves #replace article graphics link with the graphics themselves
if self._FETCH_ARTICLE_GRAPHICS: if self._FETCH_ARTICLE_GRAPHICS:
items_to_insert = [] items_to_insert = []
@ -231,20 +230,20 @@ class TheIndependentNew(BasicNewsRecipe):
if isinstance(child,Tag): if isinstance(child,Tag):
if str(child.name) == 'a': if str(child.name) == 'a':
items_to_insert.extend(self._get_article_graphic(strong,child['href'],soup)) items_to_insert.extend(self._get_article_graphic(strong,child['href'],soup))
for item in items_to_insert: for item in items_to_insert:
item[0].replaceWith(item[1]) item[0].replaceWith(item[1])
for item in items_to_extract: for item in items_to_extract:
item.extract() item.extract()
return soup return soup
def _get_article_graphic(self,old_item,url,soup): def _get_article_graphic(self,old_item,url,soup):
items_to_insert = [] items_to_insert = []
if re.search('\.jpg$',str(url)): if re.search('\.jpg$',str(url)):
div = Tag(soup,'div') div = Tag(soup,'div')
div['class'] = 'pictureContainer' div['class'] = 'pictureContainer'
@ -254,20 +253,20 @@ class TheIndependentNew(BasicNewsRecipe):
div.insert(0,img) div.insert(0,img)
items_to_insert.append((old_item,div,)) items_to_insert.append((old_item,div,))
return items_to_insert return items_to_insert
soup2 = self.index_to_soup(url) soup2 = self.index_to_soup(url)
for item in soup2.findAll('div',attrs={'class' : re.compile("widget picture article.*")}): for item in soup2.findAll('div',attrs={'class' : re.compile("widget picture article.*")}):
items_to_insert.append((old_item,item),) items_to_insert.append((old_item,item),)
return items_to_insert return items_to_insert
def _insertRatingStars(self,soup,item): def _insertRatingStars(self,soup,item):
if item.contents is None: if item.contents is None:
return return
rating = item.contents[0] rating = item.contents[0]
if not rating.isdigit(): if not rating.isdigit():
return None return None
rating = int(item.contents[0]) rating = int(item.contents[0])
for i in range(1,6): for i in range(1,6):
star = Tag(soup,'img') star = Tag(soup,'img')
if i <= rating: if i <= rating:
@ -277,26 +276,26 @@ class TheIndependentNew(BasicNewsRecipe):
star['alt'] = 'star number ' + str(i) star['alt'] = 'star number ' + str(i)
item.insert(i,star) item.insert(i,star)
#item.contents[0] = NavigableString('(' + str(rating) + ')') #item.contents[0] = NavigableString('(' + str(rating) + ')')
item.contents[0] = '' item.contents[0] = ''
def postprocess_html(self,soup, first_fetch): def postprocess_html(self,soup, first_fetch):
#find broken images and remove captions #find broken images and remove captions
items_to_extract = [] items_to_extract = []
for item in soup.findAll('div', attrs={'class' : 'image'}): for item in soup.findAll('div', attrs={'class' : 'image'}):
img = item.findNext('img') img = item.findNext('img')
if img is not None and img['src'] is not None: if img is not None and img['src'] is not None:
# broken images still point to remote url # broken images still point to remote url
pattern = re.compile('http://www.independent.co.uk.*') pattern = re.compile('http://www.independent.co.uk.*')
if pattern.match(img["src"]) is not None: if pattern.match(img["src"]) is not None:
caption = img.findNextSibling('h3') caption = img.findNextSibling('h3')
if caption is not None: if caption is not None:
items_to_extract.append(caption) items_to_extract.append(caption)
items_to_extract.append(img) items_to_extract.append(img)
for item in items_to_extract: for item in items_to_extract:
item.extract() item.extract()
return soup return soup
def _recurisvely_linearise_tag_tree( def _recurisvely_linearise_tag_tree(
self, self,
item, item,
@ -311,25 +310,25 @@ class TheIndependentNew(BasicNewsRecipe):
if not (isinstance(item,Tag)): if not (isinstance(item,Tag)):
return linearised return linearised
for nested in item: for nested in item:
linearised.append(nested) linearised.append(nested)
linearised = self._recurisvely_linearise_tag_tree(nested,linearised, count) linearised = self._recurisvely_linearise_tag_tree(nested,linearised, count)
return linearised return linearised
def _get_previous_tag(self,current_index, tag_tree): def _get_previous_tag(self,current_index, tag_tree):
if current_index == 0: if current_index == 0:
return None return None
else: else:
return tag_tree[current_index - 1] return tag_tree[current_index - 1]
def _get_next_tag(self,current_index, tag_tree): def _get_next_tag(self,current_index, tag_tree):
if current_index < len(tag_tree) - 1: if current_index < len(tag_tree) - 1:
return tag_tree[current_index + 1] return tag_tree[current_index + 1]
else: else:
return None return None
def _list_match(self,test_str, list_regex): def _list_match(self,test_str, list_regex):
for regex in list_regex: for regex in list_regex:
match = re.match(regex, test_str) match = re.match(regex, test_str)
@ -338,24 +337,24 @@ class TheIndependentNew(BasicNewsRecipe):
return False return False
def _remove_undesired_line_breaks_from_tag(self,parent,soup): def _remove_undesired_line_breaks_from_tag(self,parent,soup):
if parent is None: if parent is None:
return return
tag_tree = self._recurisvely_linearise_tag_tree(parent) tag_tree = self._recurisvely_linearise_tag_tree(parent)
items_to_remove = [] items_to_remove = []
for item in tag_tree: for item in tag_tree:
if item == u'\n': if item == u'\n':
items_to_remove.append(item) items_to_remove.append(item)
continue; continue;
for item in items_to_remove: for item in items_to_remove:
tag_tree.remove(item) tag_tree.remove(item)
spaced_tags = [r'p', r'h\d', r'blockquote'] spaced_tags = [r'p', r'h\d', r'blockquote']
tags_to_extract = [] tags_to_extract = []
tags_to_replace = [] tags_to_replace = []
@ -363,41 +362,41 @@ class TheIndependentNew(BasicNewsRecipe):
if isinstance(tag, Tag): if isinstance(tag, Tag):
if str(tag) == '<br />': if str(tag) == '<br />':
previous_tag = self._get_previous_tag(i, tag_tree) previous_tag = self._get_previous_tag(i, tag_tree)
if isinstance(previous_tag, Tag): if isinstance(previous_tag, Tag):
previous_tag_is_spaced = previous_tag is not None\ previous_tag_is_spaced = previous_tag is not None\
and self._list_match(str(previous_tag.name), and self._list_match(str(previous_tag.name),
spaced_tags) spaced_tags)
else: else:
previous_tag_is_spaced = False previous_tag_is_spaced = False
next_tag = self._get_next_tag(i, tag_tree) next_tag = self._get_next_tag(i, tag_tree)
if isinstance(next_tag, Tag): if isinstance(next_tag, Tag):
next_tag_is_spaced = next_tag is not None\ next_tag_is_spaced = next_tag is not None\
and self._list_match(str(next_tag.name), spaced_tags) and self._list_match(str(next_tag.name), spaced_tags)
else: else:
next_tag_is_spaced = False next_tag_is_spaced = False
if previous_tag_is_spaced or next_tag_is_spaced or i == 0\ if previous_tag_is_spaced or next_tag_is_spaced or i == 0\
or i == len(tag_tree) - 1: or i == len(tag_tree) - 1:
tags_to_extract.append(tag) tags_to_extract.append(tag)
else: else:
tags_to_replace.append((tag,NavigableString(' '),)) tags_to_replace.append((tag,NavigableString(' '),))
for pair in tags_to_replace: for pair in tags_to_replace:
pair[0].replaceWith(pair[1]) pair[0].replaceWith(pair[1])
for tag in tags_to_extract: for tag in tags_to_extract:
tag.extract() tag.extract()
feeds = [ feeds = [
(u'News - UK', (u'News - UK',
u'http://www.independent.co.uk/news/uk/?service=rss'), u'http://www.independent.co.uk/news/uk/?service=rss'),
(u'News - World', (u'News - World',
u'http://www.independent.co.uk/news/world/?service=rss'), u'http://www.independent.co.uk/news/world/?service=rss'),
(u'News - Business', (u'News - Business',
u'http://www.independent.co.uk/news/business/?service=rss'), u'http://www.independent.co.uk/news/business/?service=rss'),
(u'News - People', (u'News - People',
u'http://www.independent.co.uk/news/people/?service=rss'), u'http://www.independent.co.uk/news/people/?service=rss'),
(u'News - Science', (u'News - Science',
@ -497,4 +496,4 @@ class TheIndependentNew(BasicNewsRecipe):
(u'IndyBest', (u'IndyBest',
u'http://www.independent.co.uk/extras/indybest/?service=rss'), u'http://www.independent.co.uk/extras/indybest/?service=rss'),
] ]

18
recipes/japan_news.recipe Normal file
View File

@ -0,0 +1,18 @@
from calibre.web.feeds.news import BasicNewsRecipe
class NewsOnJapan(BasicNewsRecipe):
title = u'News On Japan'
language = 'en'
__author__ = 'Krittika Goyal'
oldest_article = 1 #days
max_articles_per_feed = 25
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
feeds = [
('News',
'http://newsonjapan.com/rss/top.xml'),
]

View File

@ -23,7 +23,7 @@ class OSNewsRecipe(BasicNewsRecipe):
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
cover_url='http://osnews.pl/wp-content/themes/osnews/img/logo.png'
extra_css = ''' extra_css = '''
.news-heading {font-size:150%} .news-heading {font-size:150%}
.newsinformations li {display:inline;} .newsinformations li {display:inline;}
@ -44,7 +44,9 @@ class OSNewsRecipe(BasicNewsRecipe):
dict(name = 'div', attrs = {'class' : 'sociable'}), dict(name = 'div', attrs = {'class' : 'sociable'}),
dict(name = 'div', attrs = {'class' : 'post_prev'}), dict(name = 'div', attrs = {'class' : 'post_prev'}),
dict(name = 'div', attrs = {'class' : 'post_next'}), dict(name = 'div', attrs = {'class' : 'post_next'}),
dict(name = 'div', attrs = {'class' : 'clr'}) dict(name = 'div', attrs = {'class' : 'clr'}),
dict(name = 'div', attrs = {'class' : 'tw_button'}),
dict(name = 'div', attrs = {'style' : 'width:56px;height:60px;float:left;margin-right:10px'})
] ]
preprocess_regexps = [(re.compile(u'</span>Komentarze: \(?[0-9]+\)? ?<span'), lambda match: '</span><span')] preprocess_regexps = [(re.compile(u'</span>Komentarze: \(?[0-9]+\)? ?<span'), lambda match: '</span><span')]

View File

@ -8,13 +8,13 @@ radikal.com.tr
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe): class Radikal_tr(BasicNewsRecipe):
title = 'Radikal - Turkey' title = 'Radikal Ekleri'
__author__ = 'Darko Miletic' __author__ = 'Darko Mileticden uyarlama'
description = 'News from Turkey' description = 'Politic-Cultural Articles from Turkey'
publisher = 'radikal' publisher = 'radikal'
category = 'news, politics, Turkey' category = 'news, politics, Turkey'
oldest_article = 7 oldest_article = 14
max_articles_per_feed = 150 max_articles_per_feed = 120
no_stylesheets = True no_stylesheets = True
encoding = 'cp1254' encoding = 'cp1254'
use_embedded_content = False use_embedded_content = False
@ -37,11 +37,9 @@ class Radikal_tr(BasicNewsRecipe):
feeds = [ feeds = [
(u'Yazarlar' , u'http://www.radikal.com.tr/d/rss/RssYazarlar.xml') (u'Radikal Iki' , u'http://www.radikal.com.tr/d/rss/Rss_42.xml')
,(u'Turkiye' , u'http://www.radikal.com.tr/d/rss/Rss_97.xml' ) ,(u'Radikal Hayat' , u'http://www.radikal.com.tr/d/rss/Rss_41.xml' )
,(u'Politika' , u'http://www.radikal.com.tr/d/rss/Rss_98.xml' ) ,(u'Radikal Kitap' , u'http://www.radikal.com.tr/d/rss/Rss_40.xml' )
,(u'Dis Haberler', u'http://www.radikal.com.tr/d/rss/Rss_100.xml' )
,(u'Ekonomi' , u'http://www.radikal.com.tr/d/rss/Rss_101.xml' )
] ]
def print_version(self, url): def print_version(self, url):

View File

@ -37,11 +37,13 @@ class TagesspiegelRSS(BasicNewsRecipe):
keep_only_tags = dict(name='div', attrs={'class':["hcf-article"]}) keep_only_tags = dict(name='div', attrs={'class':["hcf-article"]})
remove_tags = [ remove_tags = [
dict(name='link'), dict(name='iframe'),dict(name='style'),dict(name='meta'),dict(name='button'), dict(name='link'), dict(name='iframe'),dict(name='style'),dict(name='meta'),dict(name='button'),
dict(name='div', attrs={'class':["hcf-jump-to-comments","hcf-clear","hcf-magnify hcf-media-control"] }), dict(name='div', attrs={'class':["hcf-jump-to-comments","hcf-clear","hcf-magnify hcf-media-control",
"hcf-socials-widgets hcf-socials-top","hcf-socials-widgets hcf-socials-bottom"] }),
dict(name='span', attrs={'class':["hcf-mainsearch",] }), dict(name='span', attrs={'class':["hcf-mainsearch",] }),
dict(name='ul', attrs={'class':["hcf-tools"]}), dict(name='ul', attrs={'class':["hcf-tools"]}),
dict(name='ul', attrs={'class': re.compile('hcf-services')}) dict(name='ul', attrs={'class': re.compile('hcf-services')})
] ]
def parse_index(self): def parse_index(self):
soup = self.index_to_soup('http://www.tagesspiegel.de/zeitung/') soup = self.index_to_soup('http://www.tagesspiegel.de/zeitung/')

View File

@ -2,8 +2,8 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '4 February 2011, desUBIKado' __copyright__ = '4 February 2011, desUBIKado'
__author__ = 'desUBIKado' __author__ = 'desUBIKado'
__version__ = 'v0.05' __version__ = 'v0.07'
__date__ = '13, April 2011' __date__ = '13, November 2011'
''' '''
http://www.weblogssl.com/ http://www.weblogssl.com/
''' '''
@ -33,6 +33,7 @@ class weblogssl(BasicNewsRecipe):
feeds = [ feeds = [
(u'Xataka', u'http://feeds.weblogssl.com/xataka2') (u'Xataka', u'http://feeds.weblogssl.com/xataka2')
,(u'Xataka Mexico', u'http://feeds.weblogssl.com/xatakamx')
,(u'Xataka M\xf3vil', u'http://feeds.weblogssl.com/xatakamovil') ,(u'Xataka M\xf3vil', u'http://feeds.weblogssl.com/xatakamovil')
,(u'Xataka Android', u'http://feeds.weblogssl.com/xatakandroid') ,(u'Xataka Android', u'http://feeds.weblogssl.com/xatakandroid')
,(u'Xataka Foto', u'http://feeds.weblogssl.com/xatakafoto') ,(u'Xataka Foto', u'http://feeds.weblogssl.com/xatakafoto')
@ -40,6 +41,7 @@ class weblogssl(BasicNewsRecipe):
,(u'Xataka Ciencia', u'http://feeds.weblogssl.com/xatakaciencia') ,(u'Xataka Ciencia', u'http://feeds.weblogssl.com/xatakaciencia')
,(u'Genbeta', u'http://feeds.weblogssl.com/genbeta') ,(u'Genbeta', u'http://feeds.weblogssl.com/genbeta')
,(u'Genbeta Dev', u'http://feeds.weblogssl.com/genbetadev') ,(u'Genbeta Dev', u'http://feeds.weblogssl.com/genbetadev')
,(u'Genbeta Social Media', u'http://feeds.weblogssl.com/genbetasocialmedia')
,(u'Applesfera', u'http://feeds.weblogssl.com/applesfera') ,(u'Applesfera', u'http://feeds.weblogssl.com/applesfera')
,(u'Vida Extra', u'http://feeds.weblogssl.com/vidaextra') ,(u'Vida Extra', u'http://feeds.weblogssl.com/vidaextra')
,(u'Naci\xf3n Red', u'http://feeds.weblogssl.com/nacionred') ,(u'Naci\xf3n Red', u'http://feeds.weblogssl.com/nacionred')
@ -51,7 +53,6 @@ class weblogssl(BasicNewsRecipe):
,(u'Pop rosa', u'http://feeds.weblogssl.com/poprosa') ,(u'Pop rosa', u'http://feeds.weblogssl.com/poprosa')
,(u'Zona FandoM', u'http://feeds.weblogssl.com/zonafandom') ,(u'Zona FandoM', u'http://feeds.weblogssl.com/zonafandom')
,(u'Fandemia', u'http://feeds.weblogssl.com/fandemia') ,(u'Fandemia', u'http://feeds.weblogssl.com/fandemia')
,(u'Noctamina', u'http://feeds.weblogssl.com/noctamina')
,(u'Tendencias', u'http://feeds.weblogssl.com/trendencias') ,(u'Tendencias', u'http://feeds.weblogssl.com/trendencias')
,(u'Beb\xe9s y m\xe1s', u'http://feeds.weblogssl.com/bebesymas') ,(u'Beb\xe9s y m\xe1s', u'http://feeds.weblogssl.com/bebesymas')
,(u'Directo al paladar', u'http://feeds.weblogssl.com/directoalpaladar') ,(u'Directo al paladar', u'http://feeds.weblogssl.com/directoalpaladar')
@ -60,8 +61,8 @@ class weblogssl(BasicNewsRecipe):
,(u'Embelezzia', u'http://feeds.weblogssl.com/embelezzia') ,(u'Embelezzia', u'http://feeds.weblogssl.com/embelezzia')
,(u'Vit\xf3nica', u'http://feeds.weblogssl.com/vitonica') ,(u'Vit\xf3nica', u'http://feeds.weblogssl.com/vitonica')
,(u'Ambiente G', u'http://feeds.weblogssl.com/ambienteg') ,(u'Ambiente G', u'http://feeds.weblogssl.com/ambienteg')
,(u'Arrebatadora', u'http://feeds.weblogssl.com/arrebatadora') ,(u'Tendencias Belleza', u'http://feeds.weblogssl.com/trendenciasbelleza')
,(u'Mensencia', u'http://feeds.weblogssl.com/mensencia') ,(u'Tendencias Hombre', u'http://feeds.weblogssl.com/trendenciashombre')
,(u'Peques y m\xe1s', u'http://feeds.weblogssl.com/pequesymas') ,(u'Peques y m\xe1s', u'http://feeds.weblogssl.com/pequesymas')
,(u'Motorpasi\xf3n', u'http://feeds.weblogssl.com/motorpasion') ,(u'Motorpasi\xf3n', u'http://feeds.weblogssl.com/motorpasion')
,(u'Motorpasi\xf3n F1', u'http://feeds.weblogssl.com/motorpasionf1') ,(u'Motorpasi\xf3n F1', u'http://feeds.weblogssl.com/motorpasionf1')
@ -69,7 +70,6 @@ class weblogssl(BasicNewsRecipe):
,(u'Motorpasi\xf3n Futuro', u'http://feeds.weblogssl.com/motorpasionfuturo') ,(u'Motorpasi\xf3n Futuro', u'http://feeds.weblogssl.com/motorpasionfuturo')
,(u'Notas de futbol', u'http://feeds.weblogssl.com/notasdefutbol') ,(u'Notas de futbol', u'http://feeds.weblogssl.com/notasdefutbol')
,(u'Fuera de l\xedmites', u'http://feeds.weblogssl.com/fueradelimites') ,(u'Fuera de l\xedmites', u'http://feeds.weblogssl.com/fueradelimites')
,(u'Salir a ganar', u'http://feeds.weblogssl.com/saliraganar')
,(u'El blog salm\xf3n', u'http://feeds.weblogssl.com/elblogsalmon2') ,(u'El blog salm\xf3n', u'http://feeds.weblogssl.com/elblogsalmon2')
,(u'Pymes y aut\xf3nomos', u'http://feeds.weblogssl.com/pymesyautonomos') ,(u'Pymes y aut\xf3nomos', u'http://feeds.weblogssl.com/pymesyautonomos')
,(u'Tecnolog\xeda Pyme', u'http://feeds.weblogssl.com/tecnologiapyme') ,(u'Tecnolog\xeda Pyme', u'http://feeds.weblogssl.com/tecnologiapyme')
@ -105,3 +105,22 @@ class weblogssl(BasicNewsRecipe):
return soup return soup
# Para obtener la url original del articulo a partir de la de "feedsportal"
# El siguiente código es gracias al usuario "bosplans" de www.mobileread.com
# http://www.mobileread.com/forums/showthread.php?t=130297
def get_article_url(self, article):
link = article.get('link', None)
if link is None:
return article
if link.split('/')[-1]=="story01.htm":
link=link.split('/')[-2]
a=['0B','0C','0D','0E','0F','0G','0N' ,'0L0S','0A']
b=['.' ,'/' ,'?' ,'-' ,'=' ,'&' ,'.com','www.','0']
for i in range(0,len(a)):
link=link.replace(a[i],b[i])
link="http://"+link
return link

View File

@ -16,6 +16,7 @@ class ZAOBAO(BasicNewsRecipe):
recursions = 1 recursions = 1
language = 'zh' language = 'zh'
encoding = 'gbk' encoding = 'gbk'
masthead_url = 'http://www.zaobao.com/ssi/images1/zblogo_original.gif'
# multithreaded_fetch = True # multithreaded_fetch = True
keep_only_tags = [ keep_only_tags = [

View File

@ -36,6 +36,14 @@
<div class="cbj_footer">{footer}</div> <div class="cbj_footer">{footer}</div>
</div> </div>
<hr class="cbj_kindle_banner_hr" /> <hr class="cbj_kindle_banner_hr" />
<!--
In addition you can add code to show the values of custom columns here.
The value is available as _column_name and the title as _column_name_label.
For example, if you have a custom column with label #genre, you can add it to
this template with:
<div>{_genre_label}: {_genre}</div>
-->
<div class="cbj_comments">{comments}</div> <div class="cbj_comments">{comments}</div>
</body> </body>
</html> </html>

View File

@ -12,14 +12,14 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-" "Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n" "devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-09-27 14:31+0000\n" "POT-Creation-Date: 2011-09-27 14:31+0000\n"
"PO-Revision-Date: 2011-11-04 23:01+0000\n" "PO-Revision-Date: 2011-11-13 15:24+0000\n"
"Last-Translator: Ferran Rius <frius64@hotmail.com>\n" "Last-Translator: Ferran Rius <frius64@hotmail.com>\n"
"Language-Team: Catalan <linux@softcatala.org>\n" "Language-Team: Catalan <linux@softcatala.org>\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-06 05:23+0000\n" "X-Launchpad-Export-Date: 2011-11-14 05:15+0000\n"
"X-Generator: Launchpad (build 14231)\n" "X-Generator: Launchpad (build 14277)\n"
"Language: ca\n" "Language: ca\n"
#. name for aaa #. name for aaa
@ -8572,43 +8572,43 @@ msgstr "Guntai"
#. name for gnu #. name for gnu
msgid "Gnau" msgid "Gnau"
msgstr "" msgstr "Gnau"
#. name for gnw #. name for gnw
msgid "Guaraní; Western Bolivian" msgid "Guaraní; Western Bolivian"
msgstr "" msgstr "Guaraní; bolivià occidental"
#. name for gnz #. name for gnz
msgid "Ganzi" msgid "Ganzi"
msgstr "" msgstr "Ganzi"
#. name for goa #. name for goa
msgid "Guro" msgid "Guro"
msgstr "" msgstr "Guro"
#. name for gob #. name for gob
msgid "Playero" msgid "Playero"
msgstr "" msgstr "Playero"
#. name for goc #. name for goc
msgid "Gorakor" msgid "Gorakor"
msgstr "" msgstr "Gorakor"
#. name for god #. name for god
msgid "Godié" msgid "Godié"
msgstr "" msgstr "Godié"
#. name for goe #. name for goe
msgid "Gongduk" msgid "Gongduk"
msgstr "" msgstr "Gongduk"
#. name for gof #. name for gof
msgid "Gofa" msgid "Gofa"
msgstr "" msgstr "Gofa"
#. name for gog #. name for gog
msgid "Gogo" msgid "Gogo"
msgstr "" msgstr "Gogo"
#. name for goh #. name for goh
msgid "German; Old High (ca. 750-1050)" msgid "German; Old High (ca. 750-1050)"
@ -8616,23 +8616,23 @@ msgstr "Alt alemany; antic (ca. 750-1050)"
#. name for goi #. name for goi
msgid "Gobasi" msgid "Gobasi"
msgstr "" msgstr "Gobasi"
#. name for goj #. name for goj
msgid "Gowlan" msgid "Gowlan"
msgstr "" msgstr "Gowlan"
#. name for gok #. name for gok
msgid "Gowli" msgid "Gowli"
msgstr "" msgstr "Gowli"
#. name for gol #. name for gol
msgid "Gola" msgid "Gola"
msgstr "" msgstr "Gola"
#. name for gom #. name for gom
msgid "Konkani; Goan" msgid "Konkani; Goan"
msgstr "" msgstr "Konkani; goanès"
#. name for gon #. name for gon
msgid "Gondi" msgid "Gondi"
@ -8640,71 +8640,71 @@ msgstr "Gondi"
#. name for goo #. name for goo
msgid "Gone Dau" msgid "Gone Dau"
msgstr "" msgstr "Gone Dau"
#. name for gop #. name for gop
msgid "Yeretuar" msgid "Yeretuar"
msgstr "" msgstr "Yeretuar"
#. name for goq #. name for goq
msgid "Gorap" msgid "Gorap"
msgstr "" msgstr "Gorap"
#. name for gor #. name for gor
msgid "Gorontalo" msgid "Gorontalo"
msgstr "" msgstr "Gorontalo"
#. name for gos #. name for gos
msgid "Gronings" msgid "Gronings"
msgstr "" msgstr "Gronings"
#. name for got #. name for got
msgid "Gothic" msgid "Gothic"
msgstr "" msgstr "Gòtic"
#. name for gou #. name for gou
msgid "Gavar" msgid "Gavar"
msgstr "" msgstr "Gavar"
#. name for gow #. name for gow
msgid "Gorowa" msgid "Gorowa"
msgstr "" msgstr "Gorowa"
#. name for gox #. name for gox
msgid "Gobu" msgid "Gobu"
msgstr "" msgstr "Gobu"
#. name for goy #. name for goy
msgid "Goundo" msgid "Goundo"
msgstr "" msgstr "Goundo"
#. name for goz #. name for goz
msgid "Gozarkhani" msgid "Gozarkhani"
msgstr "" msgstr "Gozarkhani"
#. name for gpa #. name for gpa
msgid "Gupa-Abawa" msgid "Gupa-Abawa"
msgstr "" msgstr "Gupa-Abawa"
#. name for gpn #. name for gpn
msgid "Taiap" msgid "Taiap"
msgstr "" msgstr "Taiap"
#. name for gqa #. name for gqa
msgid "Ga'anda" msgid "Ga'anda"
msgstr "" msgstr "Gaanda"
#. name for gqi #. name for gqi
msgid "Guiqiong" msgid "Guiqiong"
msgstr "" msgstr "Guiqiong"
#. name for gqn #. name for gqn
msgid "Guana (Brazil)" msgid "Guana (Brazil)"
msgstr "" msgstr "Guana (Brasil)"
#. name for gqr #. name for gqr
msgid "Gor" msgid "Gor"
msgstr "" msgstr "Gor"
#. name for gra #. name for gra
msgid "Garasia; Rajput" msgid "Garasia; Rajput"
@ -8720,19 +8720,19 @@ msgstr "Grec antic (fins el 1453)"
#. name for grd #. name for grd
msgid "Guruntum-Mbaaru" msgid "Guruntum-Mbaaru"
msgstr "" msgstr "Guruntum"
#. name for grg #. name for grg
msgid "Madi" msgid "Madi"
msgstr "" msgstr "Madi"
#. name for grh #. name for grh
msgid "Gbiri-Niragu" msgid "Gbiri-Niragu"
msgstr "" msgstr "Gbiri-Niragu"
#. name for gri #. name for gri
msgid "Ghari" msgid "Ghari"
msgstr "" msgstr "Ghari"
#. name for grj #. name for grj
msgid "Grebo; Southern" msgid "Grebo; Southern"
@ -8740,35 +8740,35 @@ msgstr "Grebo; meridional"
#. name for grm #. name for grm
msgid "Kota Marudu Talantang" msgid "Kota Marudu Talantang"
msgstr "" msgstr "Kota Marudu; Talantang"
#. name for grn #. name for grn
msgid "Guarani" msgid "Guarani"
msgstr "guaraní" msgstr "Guaraní"
#. name for gro #. name for gro
msgid "Groma" msgid "Groma"
msgstr "" msgstr "Groma"
#. name for grq #. name for grq
msgid "Gorovu" msgid "Gorovu"
msgstr "" msgstr "Gorovu"
#. name for grr #. name for grr
msgid "Taznatit" msgid "Taznatit"
msgstr "" msgstr "Taznatit"
#. name for grs #. name for grs
msgid "Gresi" msgid "Gresi"
msgstr "" msgstr "Gresi"
#. name for grt #. name for grt
msgid "Garo" msgid "Garo"
msgstr "" msgstr "Garo"
#. name for gru #. name for gru
msgid "Kistane" msgid "Kistane"
msgstr "" msgstr "Gurage; septentrional"
#. name for grv #. name for grv
msgid "Grebo; Central" msgid "Grebo; Central"
@ -8776,11 +8776,11 @@ msgstr "Grebo; central"
#. name for grw #. name for grw
msgid "Gweda" msgid "Gweda"
msgstr "" msgstr "Gweda"
#. name for grx #. name for grx
msgid "Guriaso" msgid "Guriaso"
msgstr "" msgstr "Guriaso"
#. name for gry #. name for gry
msgid "Grebo; Barclayville" msgid "Grebo; Barclayville"
@ -8788,7 +8788,7 @@ msgstr "Grebo; Barclayville"
#. name for grz #. name for grz
msgid "Guramalum" msgid "Guramalum"
msgstr "" msgstr "Guramalum"
#. name for gse #. name for gse
msgid "Ghanaian Sign Language" msgid "Ghanaian Sign Language"
@ -8800,7 +8800,7 @@ msgstr "Llenguatge de signes alemany"
#. name for gsl #. name for gsl
msgid "Gusilay" msgid "Gusilay"
msgstr "" msgstr "Gusilay"
#. name for gsm #. name for gsm
msgid "Guatemalan Sign Language" msgid "Guatemalan Sign Language"
@ -8808,7 +8808,7 @@ msgstr "Llenguatge de signes guatemaltec"
#. name for gsn #. name for gsn
msgid "Gusan" msgid "Gusan"
msgstr "" msgstr "Gusan"
#. name for gso #. name for gso
msgid "Gbaya; Southwest" msgid "Gbaya; Southwest"
@ -8816,7 +8816,7 @@ msgstr "Gbaya; Sudoccidental"
#. name for gsp #. name for gsp
msgid "Wasembo" msgid "Wasembo"
msgstr "" msgstr "Wasembo"
#. name for gss #. name for gss
msgid "Greek Sign Language" msgid "Greek Sign Language"
@ -8828,23 +8828,23 @@ msgstr "Alemany; suís"
#. name for gta #. name for gta
msgid "Guató" msgid "Guató"
msgstr "" msgstr "Guató"
#. name for gti #. name for gti
msgid "Gbati-ri" msgid "Gbati-ri"
msgstr "" msgstr "Gbati-ri"
#. name for gua #. name for gua
msgid "Shiki" msgid "Shiki"
msgstr "" msgstr "Shiki"
#. name for gub #. name for gub
msgid "Guajajára" msgid "Guajajára"
msgstr "" msgstr "Guajajara"
#. name for guc #. name for guc
msgid "Wayuu" msgid "Wayuu"
msgstr "" msgstr "Guajiro"
#. name for gud #. name for gud
msgid "Dida; Yocoboué" msgid "Dida; Yocoboué"
@ -8852,23 +8852,23 @@ msgstr "Dida; Yocoboué"
#. name for gue #. name for gue
msgid "Gurinji" msgid "Gurinji"
msgstr "" msgstr "Gurindji"
#. name for guf #. name for guf
msgid "Gupapuyngu" msgid "Gupapuyngu"
msgstr "" msgstr "Gupapuyngu"
#. name for gug #. name for gug
msgid "Guaraní; Paraguayan" msgid "Guaraní; Paraguayan"
msgstr "" msgstr "Guaraní; paraguaià"
#. name for guh #. name for guh
msgid "Guahibo" msgid "Guahibo"
msgstr "" msgstr "Guahibo"
#. name for gui #. name for gui
msgid "Guaraní; Eastern Bolivian" msgid "Guaraní; Eastern Bolivian"
msgstr "" msgstr "Guaraní; bolivià oriental"
#. name for guj #. name for guj
msgid "Gujarati" msgid "Gujarati"
@ -8876,7 +8876,7 @@ msgstr "gujarati"
#. name for guk #. name for guk
msgid "Gumuz" msgid "Gumuz"
msgstr "" msgstr "Gumús"
#. name for gul #. name for gul
msgid "Creole English; Sea Island" msgid "Creole English; Sea Island"
@ -8884,27 +8884,27 @@ msgstr "Anglès crioll; Sea Island"
#. name for gum #. name for gum
msgid "Guambiano" msgid "Guambiano"
msgstr "" msgstr "Guambià"
#. name for gun #. name for gun
msgid "Guaraní; Mbyá" msgid "Guaraní; Mbyá"
msgstr "" msgstr "Guaraní; Mbyà"
#. name for guo #. name for guo
msgid "Guayabero" msgid "Guayabero"
msgstr "" msgstr "Guayabero"
#. name for gup #. name for gup
msgid "Gunwinggu" msgid "Gunwinggu"
msgstr "" msgstr "Gunwinggu"
#. name for guq #. name for guq
msgid "Aché" msgid "Aché"
msgstr "" msgstr "Aché"
#. name for gur #. name for gur
msgid "Farefare" msgid "Farefare"
msgstr "" msgstr "Gurenne"
#. name for gus #. name for gus
msgid "Guinean Sign Language" msgid "Guinean Sign Language"
@ -8912,67 +8912,67 @@ msgstr "Llenguatge de signes guineà"
#. name for gut #. name for gut
msgid "Maléku Jaíka" msgid "Maléku Jaíka"
msgstr "" msgstr "Guatuso"
#. name for guu #. name for guu
msgid "Yanomamö" msgid "Yanomamö"
msgstr "" msgstr "Guaharibo"
#. name for guv #. name for guv
msgid "Gey" msgid "Gey"
msgstr "" msgstr "Gey"
#. name for guw #. name for guw
msgid "Gun" msgid "Gun"
msgstr "" msgstr "Gun-Gbe"
#. name for gux #. name for gux
msgid "Gourmanchéma" msgid "Gourmanchéma"
msgstr "" msgstr "Gourmanchéma"
#. name for guz #. name for guz
msgid "Gusii" msgid "Gusii"
msgstr "" msgstr "Gusí"
#. name for gva #. name for gva
msgid "Guana (Paraguay)" msgid "Guana (Paraguay)"
msgstr "" msgstr "Guana (Paraguai)"
#. name for gvc #. name for gvc
msgid "Guanano" msgid "Guanano"
msgstr "" msgstr "Guanano"
#. name for gve #. name for gve
msgid "Duwet" msgid "Duwet"
msgstr "" msgstr "Duwet"
#. name for gvf #. name for gvf
msgid "Golin" msgid "Golin"
msgstr "" msgstr "Golin"
#. name for gvj #. name for gvj
msgid "Guajá" msgid "Guajá"
msgstr "" msgstr "Guajà"
#. name for gvl #. name for gvl
msgid "Gulay" msgid "Gulay"
msgstr "" msgstr "Gulay"
#. name for gvm #. name for gvm
msgid "Gurmana" msgid "Gurmana"
msgstr "" msgstr "Gurmana"
#. name for gvn #. name for gvn
msgid "Kuku-Yalanji" msgid "Kuku-Yalanji"
msgstr "" msgstr "Kuku; Yalanji"
#. name for gvo #. name for gvo
msgid "Gavião Do Jiparaná" msgid "Gavião Do Jiparaná"
msgstr "" msgstr "Gaviao Jiparanà"
#. name for gvp #. name for gvp
msgid "Gavião; Pará" msgid "Gavião; Pará"
msgstr "" msgstr "Gaviao Parà"
#. name for gvr #. name for gvr
msgid "Gurung; Western" msgid "Gurung; Western"
@ -8980,75 +8980,75 @@ msgstr "Gurung; occidental"
#. name for gvs #. name for gvs
msgid "Gumawana" msgid "Gumawana"
msgstr "" msgstr "Gumawana"
#. name for gvy #. name for gvy
msgid "Guyani" msgid "Guyani"
msgstr "" msgstr "Guyani"
#. name for gwa #. name for gwa
msgid "Mbato" msgid "Mbato"
msgstr "" msgstr "Mbato"
#. name for gwb #. name for gwb
msgid "Gwa" msgid "Gwa"
msgstr "" msgstr "Gwa"
#. name for gwc #. name for gwc
msgid "Kalami" msgid "Kalami"
msgstr "" msgstr "Kalami"
#. name for gwd #. name for gwd
msgid "Gawwada" msgid "Gawwada"
msgstr "" msgstr "Gawwada"
#. name for gwe #. name for gwe
msgid "Gweno" msgid "Gweno"
msgstr "" msgstr "Gweno"
#. name for gwf #. name for gwf
msgid "Gowro" msgid "Gowro"
msgstr "" msgstr "Gowro"
#. name for gwg #. name for gwg
msgid "Moo" msgid "Moo"
msgstr "" msgstr "Moo"
#. name for gwi #. name for gwi
msgid "Gwichʼin" msgid "Gwichʼin"
msgstr "" msgstr "Gwichin"
#. name for gwj #. name for gwj
msgid "/Gwi" msgid "/Gwi"
msgstr "" msgstr "Gwi"
#. name for gwn #. name for gwn
msgid "Gwandara" msgid "Gwandara"
msgstr "" msgstr "Gwandara"
#. name for gwr #. name for gwr
msgid "Gwere" msgid "Gwere"
msgstr "" msgstr "Gwere"
#. name for gwt #. name for gwt
msgid "Gawar-Bati" msgid "Gawar-Bati"
msgstr "" msgstr "Gawar-Bati"
#. name for gwu #. name for gwu
msgid "Guwamu" msgid "Guwamu"
msgstr "" msgstr "Guwamu"
#. name for gww #. name for gww
msgid "Kwini" msgid "Kwini"
msgstr "" msgstr "Goonan"
#. name for gwx #. name for gwx
msgid "Gua" msgid "Gua"
msgstr "" msgstr "Gua"
#. name for gxx #. name for gxx
msgid "Wè Southern" msgid "Wè Southern"
msgstr "" msgstr "We; meridional"
#. name for gya #. name for gya
msgid "Gbaya; Northwest" msgid "Gbaya; Northwest"
@ -9056,35 +9056,35 @@ msgstr "Gbaya; Nordoccidental"
#. name for gyb #. name for gyb
msgid "Garus" msgid "Garus"
msgstr "" msgstr "Garus"
#. name for gyd #. name for gyd
msgid "Kayardild" msgid "Kayardild"
msgstr "" msgstr "Gayardilt"
#. name for gye #. name for gye
msgid "Gyem" msgid "Gyem"
msgstr "" msgstr "Gyem"
#. name for gyf #. name for gyf
msgid "Gungabula" msgid "Gungabula"
msgstr "" msgstr "Gungabula"
#. name for gyg #. name for gyg
msgid "Gbayi" msgid "Gbayi"
msgstr "" msgstr "Gbayi"
#. name for gyi #. name for gyi
msgid "Gyele" msgid "Gyele"
msgstr "" msgstr "Gyele"
#. name for gyl #. name for gyl
msgid "Gayil" msgid "Gayil"
msgstr "" msgstr "Galila"
#. name for gym #. name for gym
msgid "Ngäbere" msgid "Ngäbere"
msgstr "" msgstr "Ngabere"
#. name for gyn #. name for gyn
msgid "Creole English; Guyanese" msgid "Creole English; Guyanese"
@ -9092,27 +9092,27 @@ msgstr "Creole English; Guyana"
#. name for gyr #. name for gyr
msgid "Guarayu" msgid "Guarayu"
msgstr "" msgstr "Guaraiú"
#. name for gyy #. name for gyy
msgid "Gunya" msgid "Gunya"
msgstr "" msgstr "Gunya"
#. name for gza #. name for gza
msgid "Ganza" msgid "Ganza"
msgstr "" msgstr "Ganza"
#. name for gzi #. name for gzi
msgid "Gazi" msgid "Gazi"
msgstr "" msgstr "Gazi"
#. name for gzn #. name for gzn
msgid "Gane" msgid "Gane"
msgstr "" msgstr "Gane"
#. name for haa #. name for haa
msgid "Han" msgid "Han"
msgstr "" msgstr "Han"
#. name for hab #. name for hab
msgid "Hanoi Sign Language" msgid "Hanoi Sign Language"
@ -9120,11 +9120,11 @@ msgstr "Llenguatge de signes de Hanoi"
#. name for hac #. name for hac
msgid "Gurani" msgid "Gurani"
msgstr "" msgstr "Hawrami"
#. name for had #. name for had
msgid "Hatam" msgid "Hatam"
msgstr "" msgstr "Hatam"
#. name for hae #. name for hae
msgid "Oromo; Eastern" msgid "Oromo; Eastern"
@ -9136,19 +9136,19 @@ msgstr "Llenguatge de signes Haipong"
#. name for hag #. name for hag
msgid "Hanga" msgid "Hanga"
msgstr "" msgstr "Hanga"
#. name for hah #. name for hah
msgid "Hahon" msgid "Hahon"
msgstr "" msgstr "Hahon"
#. name for hai #. name for hai
msgid "Haida" msgid "Haida"
msgstr "" msgstr "Haida"
#. name for haj #. name for haj
msgid "Hajong" msgid "Hajong"
msgstr "" msgstr "Hajong"
#. name for hak #. name for hak
msgid "Chinese; Hakka" msgid "Chinese; Hakka"
@ -9156,11 +9156,11 @@ msgstr "Xinès; Hakka"
#. name for hal #. name for hal
msgid "Halang" msgid "Halang"
msgstr "" msgstr "Halang"
#. name for ham #. name for ham
msgid "Hewa" msgid "Hewa"
msgstr "" msgstr "Hewa"
#. name for han #. name for han
msgid "Hangaza" msgid "Hangaza"
@ -18216,7 +18216,7 @@ msgstr ""
#. name for nhd #. name for nhd
msgid "Guaraní; Ava" msgid "Guaraní; Ava"
msgstr "" msgstr "Guaraní; Ava"
#. name for nhe #. name for nhe
msgid "Nahuatl; Eastern Huasteca" msgid "Nahuatl; Eastern Huasteca"
@ -22916,7 +22916,7 @@ msgstr ""
#. name for sgw #. name for sgw
msgid "Sebat Bet Gurage" msgid "Sebat Bet Gurage"
msgstr "" msgstr "Gurage; occidental"
#. name for sgx #. name for sgx
msgid "Sierra Leone Sign Language" msgid "Sierra Leone Sign Language"
@ -26588,7 +26588,7 @@ msgstr ""
#. name for ugb #. name for ugb
msgid "Kuku-Ugbanh" msgid "Kuku-Ugbanh"
msgstr "" msgstr "Kuku; Ugbanh"
#. name for uge #. name for uge
msgid "Ughele" msgid "Ughele"
@ -26984,7 +26984,7 @@ msgstr ""
#. name for uwa #. name for uwa
msgid "Kuku-Uwanh" msgid "Kuku-Uwanh"
msgstr "" msgstr "Kuku; Uwanh"
#. name for uya #. name for uya
msgid "Doko-Uyanga" msgid "Doko-Uyanga"
@ -27564,7 +27564,7 @@ msgstr ""
#. name for wec #. name for wec
msgid "Wè Western" msgid "Wè Western"
msgstr "" msgstr "We; occidental"
#. name for wed #. name for wed
msgid "Wedau" msgid "Wedau"
@ -27932,7 +27932,7 @@ msgstr ""
#. name for wob #. name for wob
msgid "Wè Northern" msgid "Wè Northern"
msgstr "" msgstr "We; septentrional"
#. name for woc #. name for woc
msgid "Wogeo" msgid "Wogeo"
@ -28716,7 +28716,7 @@ msgstr ""
#. name for xmh #. name for xmh
msgid "Kuku-Muminh" msgid "Kuku-Muminh"
msgstr "" msgstr "Kuku: Muminh"
#. name for xmj #. name for xmj
msgid "Majera" msgid "Majera"
@ -28744,11 +28744,11 @@ msgstr ""
#. name for xmp #. name for xmp
msgid "Kuku-Mu'inh" msgid "Kuku-Mu'inh"
msgstr "" msgstr "Kuku; Mu'inh"
#. name for xmq #. name for xmq
msgid "Kuku-Mangk" msgid "Kuku-Mangk"
msgstr "" msgstr "Kuku; Mangk"
#. name for xmr #. name for xmr
msgid "Meroitic" msgid "Meroitic"

View File

@ -9,13 +9,13 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-" "Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n" "devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-09-27 14:31+0000\n" "POT-Creation-Date: 2011-09-27 14:31+0000\n"
"PO-Revision-Date: 2011-11-10 07:13+0000\n" "PO-Revision-Date: 2011-11-12 07:52+0000\n"
"Last-Translator: Devilinside <Unknown>\n" "Last-Translator: Devilinside <Unknown>\n"
"Language-Team: Hungarian <debian-l10n-hungarian@lists.d.o>\n" "Language-Team: Hungarian <debian-l10n-hungarian@lists.d.o>\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-11 04:52+0000\n" "X-Launchpad-Export-Date: 2011-11-13 05:48+0000\n"
"X-Generator: Launchpad (build 14277)\n" "X-Generator: Launchpad (build 14277)\n"
"X-Poedit-Country: HUNGARY\n" "X-Poedit-Country: HUNGARY\n"
"Language: hu\n" "Language: hu\n"
@ -4969,7 +4969,7 @@ msgstr ""
#. name for cha #. name for cha
msgid "Chamorro" msgid "Chamorro"
msgstr "chamorro" msgstr "csamorro"
#. name for chb #. name for chb
msgid "Chibcha" msgid "Chibcha"
@ -19625,7 +19625,7 @@ msgstr ""
#. name for oco #. name for oco
msgid "Cornish; Old" msgid "Cornish; Old"
msgstr "" msgstr "cornwalli; ócornwalli"
#. name for ocu #. name for ocu
msgid "Matlatzinca; Atzingo" msgid "Matlatzinca; Atzingo"

View File

@ -16,7 +16,7 @@ msgstr ""
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-11 04:53+0000\n" "X-Launchpad-Export-Date: 2011-11-12 04:48+0000\n"
"X-Generator: Launchpad (build 14277)\n" "X-Generator: Launchpad (build 14277)\n"
"Language: tr\n" "Language: tr\n"

View File

@ -6,7 +6,7 @@ __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import os, re, cStringIO, base64, httplib, subprocess, hashlib, shutil, time, \ import os, re, cStringIO, base64, httplib, subprocess, hashlib, shutil, time, \
glob, stat glob, stat, sys
from subprocess import check_call from subprocess import check_call
from tempfile import NamedTemporaryFile, mkdtemp from tempfile import NamedTemporaryFile, mkdtemp
from zipfile import ZipFile from zipfile import ZipFile
@ -58,6 +58,47 @@ class ReUpload(Command): # {{{
os.remove(x) os.remove(x)
# }}} # }}}
class ReadFileWithProgressReporting(file): # {{{
def __init__(self, path, mode='rb'):
file.__init__(self, path, mode)
self.seek(0, os.SEEK_END)
self._total = self.tell()
self.seek(0)
self.start_time = time.time()
def __len__(self):
return self._total
def read(self, size):
data = file.read(self, size)
if data:
self.report_progress(len(data))
return data
def report_progress(self, size):
sys.stdout.write(b'\x1b[s')
sys.stdout.write(b'\x1b[K')
frac = float(self.tell())/self._total
mb_pos = self.tell()/float(1024**2)
mb_tot = self._total/float(1024**2)
kb_pos = self.tell()/1024.0
kb_rate = kb_pos/(time.time()-self.start_time)
bit_rate = kb_rate * 1024
eta = int((self._total - self.tell())/bit_rate) + 1
eta_m, eta_s = eta / 60, eta % 60
sys.stdout.write(
' %.1f%% %.1f/%.1fMB %.1f KB/sec %d minutes, %d seconds left'%(
frac*100, mb_pos, mb_tot, kb_rate, eta_m, eta_s))
sys.stdout.write(b'\x1b[u')
if self.tell() >= self._total:
sys.stdout.write('\n')
t = int(time.time() - self.start_time) + 1
print ('Upload took %d minutes and %d seconds at %.1f KB/sec' % (
t/60, t%60, kb_rate))
sys.stdout.flush()
# }}}
class UploadToGoogleCode(Command): # {{{ class UploadToGoogleCode(Command): # {{{
USERNAME = 'kovidgoyal' USERNAME = 'kovidgoyal'
@ -92,7 +133,7 @@ class UploadToGoogleCode(Command): # {{{
self.upload_one(src) self.upload_one(src)
def upload_one(self, fname): def upload_one(self, fname):
self.info('Uploading', fname) self.info('\nUploading', fname)
typ = 'Type-' + ('Source' if fname.endswith('.gz') else 'Archive' if typ = 'Type-' + ('Source' if fname.endswith('.gz') else 'Archive' if
fname.endswith('.zip') else 'Installer') fname.endswith('.zip') else 'Installer')
ext = os.path.splitext(fname)[1][1:] ext = os.path.splitext(fname)[1][1:]
@ -102,7 +143,7 @@ class UploadToGoogleCode(Command): # {{{
start = time.time() start = time.time()
path = self.upload(os.path.abspath(fname), desc, path = self.upload(os.path.abspath(fname), desc,
labels=[typ, op, 'Featured']) labels=[typ, op, 'Featured'])
self.info('\tUploaded to:', path, 'in', int(time.time() - start), self.info('Uploaded to:', path, 'in', int(time.time() - start),
'seconds') 'seconds')
return path return path
@ -198,9 +239,8 @@ class UploadToGoogleCode(Command): # {{{
# Now add the file itself # Now add the file itself
file_name = os.path.basename(file_path) file_name = os.path.basename(file_path)
f = open(file_path, 'rb') with open(file_path, 'rb') as f:
file_content = f.read() file_content = f.read()
f.close()
body.extend( body.extend(
['--' + BOUNDARY, ['--' + BOUNDARY,
@ -230,10 +270,17 @@ class UploadToGoogleCode(Command): # {{{
'Content-Type': content_type, 'Content-Type': content_type,
} }
server = httplib.HTTPSConnection(self.UPLOAD_HOST) with NamedTemporaryFile(delete=False) as f:
server.request('POST', upload_uri, body, headers) f.write(body)
resp = server.getresponse()
server.close() try:
body = ReadFileWithProgressReporting(f.name)
server = httplib.HTTPSConnection(self.UPLOAD_HOST)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
finally:
os.remove(f.name)
if resp.status == 201: if resp.status == 201:
return resp.getheader('Location') return resp.getheader('Location')
@ -265,7 +312,7 @@ class UploadToSourceForge(Command): # {{{
if not os.path.exists(x): continue if not os.path.exists(x): continue
start = time.time() start = time.time()
self.info('Uploading', x) self.info('Uploading', x)
check_call(['rsync', '-v', '-e', 'ssh -x', x, check_call(['rsync', '-z', '--progress', '-e', 'ssh -x', x,
'%s,%s@frs.sourceforge.net:%s'%(self.USERNAME, self.PROJECT, '%s,%s@frs.sourceforge.net:%s'%(self.USERNAME, self.PROJECT,
self.rdir+'/')]) self.rdir+'/')])
print 'Uploaded in', int(time.time() - start), 'seconds' print 'Uploaded in', int(time.time() - start), 'seconds'
@ -376,7 +423,8 @@ class UploadUserManual(Command): # {{{
for x in glob.glob(self.j(path, '*')): for x in glob.glob(self.j(path, '*')):
self.build_plugin_example(x) self.build_plugin_example(x)
check_call(' '.join(['scp', '-r', 'src/calibre/manual/.build/html/*', check_call(' '.join(['rsync', '-z', '-r', '--progress',
'src/calibre/manual/.build/html/',
'bugs:%s'%USER_MANUAL]), shell=True) 'bugs:%s'%USER_MANUAL]), shell=True)
# }}} # }}}

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
__appname__ = u'calibre' __appname__ = u'calibre'
numeric_version = (0, 8, 26) numeric_version = (0, 8, 27)
__version__ = u'.'.join(map(unicode, numeric_version)) __version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>" __author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -554,7 +554,8 @@ from calibre.devices.eb600.driver import (EB600, COOL_ER, SHINEBOOK,
from calibre.devices.iliad.driver import ILIAD from calibre.devices.iliad.driver import ILIAD
from calibre.devices.irexdr.driver import IREXDR1000, IREXDR800 from calibre.devices.irexdr.driver import IREXDR1000, IREXDR800
from calibre.devices.jetbook.driver import JETBOOK, MIBUK, JETBOOK_MINI from calibre.devices.jetbook.driver import JETBOOK, MIBUK, JETBOOK_MINI
from calibre.devices.kindle.driver import KINDLE, KINDLE2, KINDLE_DX from calibre.devices.kindle.driver import (KINDLE, KINDLE2, KINDLE_DX,
KINDLE_FIRE)
from calibre.devices.nook.driver import NOOK, NOOK_COLOR from calibre.devices.nook.driver import NOOK, NOOK_COLOR
from calibre.devices.prs505.driver import PRS505 from calibre.devices.prs505.driver import PRS505
from calibre.devices.prst1.driver import PRST1 from calibre.devices.prst1.driver import PRST1
@ -656,9 +657,7 @@ plugins += [
MIBUK, MIBUK,
SHINEBOOK, SHINEBOOK,
POCKETBOOK360, POCKETBOOK301, POCKETBOOK602, POCKETBOOK701, POCKETBOOK360P, POCKETBOOK360, POCKETBOOK301, POCKETBOOK602, POCKETBOOK701, POCKETBOOK360P,
KINDLE, KINDLE, KINDLE2, KINDLE_DX, KINDLE_FIRE,
KINDLE2,
KINDLE_DX,
NOOK, NOOK_COLOR, NOOK, NOOK_COLOR,
PRS505, PRST1, PRS505, PRST1,
ANDROID, S60, WEBOS, ANDROID, S60, WEBOS,

View File

@ -652,6 +652,15 @@ class KindleDXOutput(OutputProfile):
return u'%s <br/><span style="color: white">%s</span>' % (', '.join(tags), return u'%s <br/><span style="color: white">%s</span>' % (', '.join(tags),
'ttt '.join(tags)+'ttt ') 'ttt '.join(tags)+'ttt ')
class KindleFireOutput(KindleDXOutput):
name = 'Kindle Fire'
short_name = 'kindle_fire'
description = _('This profile is intended for the Amazon Kindle Fire.')
screen_size = (570, 1016)
dpi = 169.0
comic_screen_size = (570, 1016)
class IlliadOutput(OutputProfile): class IlliadOutput(OutputProfile):

View File

@ -377,3 +377,24 @@ class KINDLE_DX(KINDLE2):
PRODUCT_ID = [0x0003] PRODUCT_ID = [0x0003]
BCD = [0x0100] BCD = [0x0100]
class KINDLE_FIRE(KINDLE2):
name = 'Kindle Fire Device Interface'
description = _('Communicate with the Kindle Fire')
gui_name = 'Fire'
PRODUCT_ID = [0x0006]
BCD = [0x216, 0x100]
EBOOK_DIR_MAIN = 'Documents'
SUPPORTS_SUB_DIRS = False
VENDOR_NAME = 'AMAZON'
WINDOWS_MAIN_MEM = 'KINDLE'
def get_main_ebook_dir(self, for_upload=False):
if for_upload:
return self.EBOOK_DIR_MAIN
return ''

View File

@ -83,10 +83,10 @@ class NOOK(USBMS):
class NOOK_COLOR(NOOK): class NOOK_COLOR(NOOK):
description = _('Communicate with the Nook Color and TSR eBook readers.') description = _('Communicate with the Nook Color and TSR eBook readers.')
PRODUCT_ID = [0x002, 0x003] PRODUCT_ID = [0x002, 0x003, 0x004]
BCD = [0x216] BCD = [0x216]
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EBOOK_DISK' WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOK_DISK', 'NOOK_TABLET']
EBOOK_DIR_MAIN = 'My Files' EBOOK_DIR_MAIN = 'My Files'
NEWS_IN_FOLDER = False NEWS_IN_FOLDER = False
@ -105,4 +105,3 @@ class NOOK_COLOR(NOOK):
return USBMS.create_upload_path(self, path, mdata, fname, return USBMS.create_upload_path(self, path, mdata, fname,
create_dirs=create_dirs) create_dirs=create_dirs)

View File

@ -13,9 +13,11 @@ Device driver for the SONY T1 devices
import os, time, re import os, time, re
import sqlite3 as sqlite import sqlite3 as sqlite
from sqlite3 import DatabaseError
from contextlib import closing from contextlib import closing
from datetime import date from datetime import date
from calibre.devices.errors import DeviceError
from calibre.devices.usbms.driver import USBMS, debug_print from calibre.devices.usbms.driver import USBMS, debug_print
from calibre.devices.usbms.device import USBDevice from calibre.devices.usbms.device import USBDevice
from calibre.devices.usbms.books import CollectionsBookList from calibre.devices.usbms.books import CollectionsBookList
@ -275,11 +277,19 @@ class PRST1(USBMS):
refresh_covers = opts.extra_customization[self.OPT_REFRESH_COVERS] refresh_covers = opts.extra_customization[self.OPT_REFRESH_COVERS]
use_sony_authors = opts.extra_customization[self.OPT_USE_SONY_AUTHORS] use_sony_authors = opts.extra_customization[self.OPT_USE_SONY_AUTHORS]
cursor = connection.cursor() try:
cursor = connection.cursor()
# Get existing books # Get existing books
query = 'SELECT file_path, _id FROM books' query = 'SELECT file_path, _id FROM books'
cursor.execute(query) cursor.execute(query)
except DatabaseError:
raise DeviceError('The SONY database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file may cause your reader to forget '
' any notes/highlights, etc.')
db_books = {} db_books = {}
for i, row in enumerate(cursor): for i, row in enumerate(cursor):

View File

@ -929,7 +929,7 @@ class MobiReader(object):
for match in link_pattern.finditer(self.mobi_html): for match in link_pattern.finditer(self.mobi_html):
positions.add(int(match.group(1))) positions.add(int(match.group(1)))
pos = 0 pos = 0
self.processed_html = '' processed_html = cStringIO.StringIO()
end_tag_re = re.compile(r'<\s*/') end_tag_re = re.compile(r'<\s*/')
for end in sorted(positions): for end in sorted(positions):
if end == 0: if end == 0:
@ -947,12 +947,14 @@ class MobiReader(object):
end = r end = r
else: else:
end = r + 1 end = r + 1
self.processed_html += self.mobi_html[pos:end] + (anchor % oend) processed_html.write(self.mobi_html[pos:end] + (anchor % oend))
pos = end pos = end
self.processed_html += self.mobi_html[pos:] processed_html.write(self.mobi_html[pos:])
processed_html = processed_html.getvalue()
# Remove anchors placed inside entities # Remove anchors placed inside entities
self.processed_html = re.sub(r'&([^;]*?)(<a id="filepos\d+"></a>)([^;]*);', self.processed_html = re.sub(r'&([^;]*?)(<a id="filepos\d+"></a>)([^;]*);',
r'&\1\3;\2', self.processed_html) r'&\1\3;\2', processed_html)
def extract_images(self, processed_records, output_dir): def extract_images(self, processed_records, output_dir):

View File

@ -171,6 +171,14 @@ def render_jacket(mi, output_profile,
comments=comments, comments=comments,
footer='' footer=''
) )
for key in mi.custom_field_keys():
try:
display_name, val = mi.format_field_extended(key)[:2]
key = key.replace('#', '_')
args[key] = val
args[key+'_label'] = display_name
except:
pass
generated_html = P('jacket/template.xhtml', generated_html = P('jacket/template.xhtml',
data=True).decode('utf-8').format(**args) data=True).decode('utf-8').format(**args)

View File

@ -175,6 +175,8 @@ def _config(): # {{{
help='Search history for the plugin preferences') help='Search history for the plugin preferences')
c.add_opt('shortcuts_search_history', default=[], c.add_opt('shortcuts_search_history', default=[],
help='Search history for the keyboard preferences') help='Search history for the keyboard preferences')
c.add_opt('jobs_search_history', default=[],
help='Search history for the keyboard preferences')
c.add_opt('tweaks_search_history', default=[], c.add_opt('tweaks_search_history', default=[],
help='Search history for tweaks') help='Search history for tweaks')
c.add_opt('worker_limit', default=6, c.add_opt('worker_limit', default=6,

View File

@ -17,8 +17,36 @@
<iconset resource="../../../../resources/images.qrc"> <iconset resource="../../../../resources/images.qrc">
<normaloff>:/images/jobs.png</normaloff>:/images/jobs.png</iconset> <normaloff>:/images/jobs.png</normaloff>:/images/jobs.png</iconset>
</property> </property>
<layout class="QVBoxLayout"> <layout class="QGridLayout" name="gridLayout">
<item> <item row="0" column="0" colspan="2">
<layout class="QHBoxLayout" name="horizontalLayout">
<item>
<widget class="SearchBox2" name="search"/>
</item>
<item>
<widget class="QToolButton" name="search_button">
<property name="toolTip">
<string>Find next match</string>
</property>
<property name="text">
<string>&amp;Search</string>
</property>
</widget>
</item>
<item>
<widget class="QToolButton" name="clear_button">
<property name="toolTip">
<string>Find previous match</string>
</property>
<property name="icon">
<iconset resource="../../../../resources/images.qrc">
<normaloff>:/images/clear_left.png</normaloff>:/images/clear_left.png</iconset>
</property>
</widget>
</item>
</layout>
</item>
<item row="1" column="0" colspan="2">
<widget class="QTableView" name="jobs_view"> <widget class="QTableView" name="jobs_view">
<property name="contextMenuPolicy"> <property name="contextMenuPolicy">
<enum>Qt::NoContextMenu</enum> <enum>Qt::NoContextMenu</enum>
@ -40,29 +68,57 @@
</property> </property>
</widget> </widget>
</item> </item>
<item> <item row="2" column="0">
<widget class="QPushButton" name="kill_button"> <widget class="QPushButton" name="kill_button">
<property name="text"> <property name="text">
<string>&amp;Stop selected jobs</string> <string>&amp;Stop selected jobs</string>
</property> </property>
</widget> </widget>
</item> </item>
<item> <item row="2" column="1">
<widget class="QPushButton" name="hide_button">
<property name="text">
<string>&amp;Hide selected jobs</string>
</property>
</widget>
</item>
<item row="3" column="0">
<widget class="QPushButton" name="details_button"> <widget class="QPushButton" name="details_button">
<property name="text"> <property name="text">
<string>Show job &amp;details</string> <string>Show job &amp;details</string>
</property> </property>
</widget> </widget>
</item> </item>
<item> <item row="3" column="1">
<widget class="QPushButton" name="show_button">
<property name="text">
<string>Show &amp;all jobs</string>
</property>
</widget>
</item>
<item row="4" column="0">
<widget class="QPushButton" name="stop_all_jobs_button"> <widget class="QPushButton" name="stop_all_jobs_button">
<property name="text"> <property name="text">
<string>Stop &amp;all non device jobs</string> <string>Stop &amp;all non device jobs</string>
</property> </property>
</widget> </widget>
</item> </item>
<item row="4" column="1">
<widget class="QPushButton" name="hide_all_button">
<property name="text">
<string>&amp;Hide all jobs</string>
</property>
</widget>
</item>
</layout> </layout>
</widget> </widget>
<customwidgets>
<customwidget>
<class>SearchBox2</class>
<extends>QComboBox</extends>
<header>calibre/gui2/search_box.h</header>
</customwidget>
</customwidgets>
<resources> <resources>
<include location="../../../../resources/images.qrc"/> <include location="../../../../resources/images.qrc"/>
</resources> </resources>

View File

@ -23,6 +23,8 @@ from calibre.utils.icu import sort_key, capitalize
from calibre.utils.config import prefs, tweaks from calibre.utils.config import prefs, tweaks
from calibre.utils.magick.draw import identify_data from calibre.utils.magick.draw import identify_data
from calibre.utils.date import qt_to_dt from calibre.utils.date import qt_to_dt
from calibre.ptempfile import SpooledTemporaryFile
from calibre.db import SPOOL_SIZE
def get_cover_data(stream, ext): # {{{ def get_cover_data(stream, ext): # {{{
from calibre.ebooks.metadata.meta import get_metadata from calibre.ebooks.metadata.meta import get_metadata
@ -134,11 +136,12 @@ class MyBlockingBusy(QDialog): # {{{
do_autonumber, do_remove_format, remove_format, do_swap_ta, \ do_autonumber, do_remove_format, remove_format, do_swap_ta, \
do_remove_conv, do_auto_author, series, do_series_restart, \ do_remove_conv, do_auto_author, series, do_series_restart, \
series_start_value, do_title_case, cover_action, clear_series, \ series_start_value, do_title_case, cover_action, clear_series, \
pubdate, adddate, do_title_sort, languages, clear_languages = self.args pubdate, adddate, do_title_sort, languages, clear_languages, \
restore_original = self.args
# first loop: do author and title. These will commit at the end of each # first loop: All changes that modify the filesystem and commit
# operation, because each operation modifies the file system. We want to # immediately. We want to
# try hard to keep the DB and the file system in sync, even in the face # try hard to keep the DB and the file system in sync, even in the face
# of exceptions or forced exits. # of exceptions or forced exits.
if self.current_phase == 1: if self.current_phase == 1:
@ -196,6 +199,27 @@ class MyBlockingBusy(QDialog): # {{{
if covers: if covers:
self.db.set_cover(id, covers[-1][0]) self.db.set_cover(id, covers[-1][0])
covers = [] covers = []
if do_remove_format:
self.db.remove_format(id, remove_format, index_is_id=True,
notify=False, commit=True)
if restore_original:
formats = self.db.formats(id, index_is_id=True)
formats = formats.split(',') if formats else []
originals = [x.upper() for x in formats if
x.upper().startswith('ORIGINAL_')]
for ofmt in originals:
fmt = ofmt.replace('ORIGINAL_', '')
with SpooledTemporaryFile(SPOOL_SIZE) as stream:
self.db.copy_format_to(id, ofmt, stream,
index_is_id=True)
stream.seek(0)
self.db.add_format(id, fmt, stream, index_is_id=True,
notify=False)
self.db.remove_format(id, ofmt, index_is_id=True,
notify=False, commit=True)
elif self.current_phase == 2: elif self.current_phase == 2:
# All of these just affect the DB, so we can tolerate a total rollback # All of these just affect the DB, so we can tolerate a total rollback
if do_auto_author: if do_auto_author:
@ -233,9 +257,6 @@ class MyBlockingBusy(QDialog): # {{{
num = next if do_autonumber and series else 1.0 num = next if do_autonumber and series else 1.0
self.db.set_series_index(id, num, notify=False, commit=False) self.db.set_series_index(id, num, notify=False, commit=False)
if do_remove_format:
self.db.remove_format(id, remove_format, index_is_id=True, notify=False, commit=False)
if do_remove_conv: if do_remove_conv:
self.db.delete_conversion_options(id, 'PIPE', commit=False) self.db.delete_conversion_options(id, 'PIPE', commit=False)
@ -340,6 +361,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
self.restoreGeometry(bytes(geom)) self.restoreGeometry(bytes(geom))
self.languages.init_langs(self.db) self.languages.init_langs(self.db)
self.languages.setEditText('') self.languages.setEditText('')
self.authors.setFocus(Qt.OtherFocusReason)
self.exec_() self.exec_()
def save_state(self, *args): def save_state(self, *args):
@ -935,6 +957,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
do_title_case = self.change_title_to_title_case.isChecked() do_title_case = self.change_title_to_title_case.isChecked()
do_title_sort = self.update_title_sort.isChecked() do_title_sort = self.update_title_sort.isChecked()
clear_languages = self.clear_languages.isChecked() clear_languages = self.clear_languages.isChecked()
restore_original = self.restore_original.isChecked()
languages = self.languages.lang_codes languages = self.languages.lang_codes
pubdate = adddate = None pubdate = adddate = None
if self.apply_pubdate.isChecked(): if self.apply_pubdate.isChecked():
@ -954,7 +977,8 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
do_autonumber, do_remove_format, remove_format, do_swap_ta, do_autonumber, do_remove_format, remove_format, do_swap_ta,
do_remove_conv, do_auto_author, series, do_series_restart, do_remove_conv, do_auto_author, series, do_series_restart,
series_start_value, do_title_case, cover_action, clear_series, series_start_value, do_title_case, cover_action, clear_series,
pubdate, adddate, do_title_sort, languages, clear_languages) pubdate, adddate, do_title_sort, languages, clear_languages,
restore_original)
bb = MyBlockingBusy(_('Applying changes to %d books.\nPhase {0} {1}%%.') bb = MyBlockingBusy(_('Applying changes to %d books.\nPhase {0} {1}%%.')
%len(self.ids), args, self.db, self.ids, %len(self.ids), args, self.db, self.ids,

View File

@ -44,8 +44,8 @@
<rect> <rect>
<x>0</x> <x>0</x>
<y>0</y> <y>0</y>
<width>954</width> <width>950</width>
<height>584</height> <height>576</height>
</rect> </rect>
</property> </property>
<layout class="QVBoxLayout" name="verticalLayout_2"> <layout class="QVBoxLayout" name="verticalLayout_2">
@ -443,7 +443,30 @@ from the value in the box</string>
</property> </property>
</widget> </widget>
</item> </item>
<item row="13" column="0"> <item row="11" column="0">
<widget class="QLabel" name="label_11">
<property name="text">
<string>&amp;Languages:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>languages</cstring>
</property>
</widget>
</item>
<item row="11" column="1">
<widget class="LanguagesEdit" name="languages"/>
</item>
<item row="11" column="2">
<widget class="QCheckBox" name="clear_languages">
<property name="text">
<string>Remove &amp;all</string>
</property>
</widget>
</item>
<item row="12" column="0">
<widget class="QLabel" name="label_5"> <widget class="QLabel" name="label_5">
<property name="text"> <property name="text">
<string>Remove &amp;format:</string> <string>Remove &amp;format:</string>
@ -453,17 +476,44 @@ from the value in the box</string>
</property> </property>
</widget> </widget>
</item> </item>
<item row="13" column="1"> <item row="12" column="1">
<widget class="QComboBox" name="remove_format"> <layout class="QHBoxLayout" name="horizontalLayout_7">
<property name="maximumSize"> <item>
<size> <widget class="QComboBox" name="remove_format">
<width>120</width> <property name="maximumSize">
<height>16777215</height> <size>
</size> <width>120</width>
</property> <height>16777215</height>
</widget> </size>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_4">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="restore_original">
<property name="toolTip">
<string>When doing a same format to same format conversion, for e.g., EPUB to EPUB, calibre saves the original EPUB as ORIGINAL_EPUB. This option tells calibre to restore the EPUB from ORIGINAL_EPUB. Useful if you did a bulk conversion of a large number of books and something went wrong.</string>
</property>
<property name="text">
<string>Restore pre conversion &amp;originals, if available</string>
</property>
</widget>
</item>
</layout>
</item> </item>
<item row="14" column="0"> <item row="13" column="0">
<spacer name="verticalSpacer"> <spacer name="verticalSpacer">
<property name="orientation"> <property name="orientation">
<enum>Qt::Vertical</enum> <enum>Qt::Vertical</enum>
@ -479,7 +529,7 @@ from the value in the box</string>
</property> </property>
</spacer> </spacer>
</item> </item>
<item row="15" column="0" colspan="3"> <item row="14" column="0" colspan="2">
<layout class="QHBoxLayout" name="horizontalLayout_3"> <layout class="QHBoxLayout" name="horizontalLayout_3">
<item> <item>
<widget class="QCheckBox" name="change_title_to_title_case"> <widget class="QCheckBox" name="change_title_to_title_case">
@ -529,7 +579,7 @@ Future conversion of these books will use the default settings.</string>
</item> </item>
</layout> </layout>
</item> </item>
<item row="16" column="0" colspan="3"> <item row="15" column="0" colspan="2">
<widget class="QGroupBox" name="groupBox"> <widget class="QGroupBox" name="groupBox">
<property name="title"> <property name="title">
<string>Change &amp;cover</string> <string>Change &amp;cover</string>
@ -559,7 +609,7 @@ Future conversion of these books will use the default settings.</string>
</layout> </layout>
</widget> </widget>
</item> </item>
<item row="17" column="0"> <item row="16" column="0">
<spacer name="verticalSpacer_2"> <spacer name="verticalSpacer_2">
<property name="orientation"> <property name="orientation">
<enum>Qt::Vertical</enum> <enum>Qt::Vertical</enum>
@ -572,29 +622,6 @@ Future conversion of these books will use the default settings.</string>
</property> </property>
</spacer> </spacer>
</item> </item>
<item row="11" column="0">
<widget class="QLabel" name="label_11">
<property name="text">
<string>&amp;Languages:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>languages</cstring>
</property>
</widget>
</item>
<item row="11" column="1">
<widget class="LanguagesEdit" name="languages"/>
</item>
<item row="11" column="2">
<widget class="QCheckBox" name="clear_languages">
<property name="text">
<string>Remove &amp;all</string>
</property>
</widget>
</item>
</layout> </layout>
</widget> </widget>
<widget class="QWidget" name="tab"> <widget class="QWidget" name="tab">
@ -1078,8 +1105,8 @@ not multiple and the destination field is multiple</string>
<rect> <rect>
<x>0</x> <x>0</x>
<y>0</y> <y>0</y>
<width>197</width> <width>205</width>
<height>60</height> <height>66</height>
</rect> </rect>
</property> </property>
<layout class="QGridLayout" name="testgrid"> <layout class="QGridLayout" name="testgrid">
@ -1177,6 +1204,7 @@ not multiple and the destination field is multiple</string>
<tabstops> <tabstops>
<tabstop>authors</tabstop> <tabstop>authors</tabstop>
<tabstop>auto_author_sort</tabstop> <tabstop>auto_author_sort</tabstop>
<tabstop>swap_title_and_author</tabstop>
<tabstop>author_sort</tabstop> <tabstop>author_sort</tabstop>
<tabstop>rating</tabstop> <tabstop>rating</tabstop>
<tabstop>publisher</tabstop> <tabstop>publisher</tabstop>
@ -1185,47 +1213,50 @@ not multiple and the destination field is multiple</string>
<tabstop>remove_tags</tabstop> <tabstop>remove_tags</tabstop>
<tabstop>remove_all_tags</tabstop> <tabstop>remove_all_tags</tabstop>
<tabstop>series</tabstop> <tabstop>series</tabstop>
<tabstop>clear_series</tabstop>
<tabstop>autonumber_series</tabstop> <tabstop>autonumber_series</tabstop>
<tabstop>series_numbering_restarts</tabstop> <tabstop>series_numbering_restarts</tabstop>
<tabstop>series_start_number</tabstop> <tabstop>series_start_number</tabstop>
<tabstop>button_box</tabstop>
<tabstop>query_field</tabstop>
<tabstop>save_button</tabstop>
<tabstop>remove_button</tabstop>
<tabstop>search_field</tabstop>
<tabstop>search_mode</tabstop>
<tabstop>s_r_src_ident</tabstop>
<tabstop>s_r_template</tabstop>
<tabstop>search_for</tabstop>
<tabstop>case_sensitive</tabstop>
<tabstop>replace_with</tabstop>
<tabstop>replace_func</tabstop>
<tabstop>destination_field</tabstop>
<tabstop>replace_mode</tabstop>
<tabstop>comma_separated</tabstop>
<tabstop>s_r_dst_ident</tabstop>
<tabstop>results_count</tabstop>
<tabstop>starting_from</tabstop>
<tabstop>multiple_separator</tabstop>
<tabstop>test_text</tabstop>
<tabstop>test_result</tabstop>
<tabstop>scrollArea</tabstop>
<tabstop>central_widget</tabstop>
<tabstop>swap_title_and_author</tabstop>
<tabstop>clear_series</tabstop>
<tabstop>adddate</tabstop> <tabstop>adddate</tabstop>
<tabstop>clear_adddate_button</tabstop> <tabstop>clear_adddate_button</tabstop>
<tabstop>apply_adddate</tabstop> <tabstop>apply_adddate</tabstop>
<tabstop>pubdate</tabstop> <tabstop>pubdate</tabstop>
<tabstop>clear_pubdate_button</tabstop> <tabstop>clear_pubdate_button</tabstop>
<tabstop>apply_pubdate</tabstop> <tabstop>apply_pubdate</tabstop>
<tabstop>languages</tabstop>
<tabstop>clear_languages</tabstop>
<tabstop>remove_format</tabstop> <tabstop>remove_format</tabstop>
<tabstop>restore_original</tabstop>
<tabstop>change_title_to_title_case</tabstop> <tabstop>change_title_to_title_case</tabstop>
<tabstop>update_title_sort</tabstop>
<tabstop>remove_conversion_settings</tabstop> <tabstop>remove_conversion_settings</tabstop>
<tabstop>cover_generate</tabstop> <tabstop>cover_generate</tabstop>
<tabstop>cover_remove</tabstop> <tabstop>cover_remove</tabstop>
<tabstop>cover_from_fmt</tabstop> <tabstop>cover_from_fmt</tabstop>
<tabstop>multiple_separator</tabstop>
<tabstop>test_text</tabstop>
<tabstop>test_result</tabstop>
<tabstop>scrollArea</tabstop>
<tabstop>central_widget</tabstop>
<tabstop>query_field</tabstop>
<tabstop>button_box</tabstop>
<tabstop>save_button</tabstop>
<tabstop>remove_button</tabstop>
<tabstop>search_field</tabstop>
<tabstop>search_mode</tabstop>
<tabstop>s_r_src_ident</tabstop>
<tabstop>s_r_template</tabstop>
<tabstop>replace_with</tabstop>
<tabstop>replace_func</tabstop>
<tabstop>replace_mode</tabstop>
<tabstop>comma_separated</tabstop>
<tabstop>s_r_dst_ident</tabstop>
<tabstop>results_count</tabstop>
<tabstop>scrollArea11</tabstop> <tabstop>scrollArea11</tabstop>
<tabstop>destination_field</tabstop>
<tabstop>search_for</tabstop>
<tabstop>case_sensitive</tabstop>
<tabstop>starting_from</tabstop>
</tabstops> </tabstops>
<resources> <resources>
<include location="../../../../resources/images.qrc"/> <include location="../../../../resources/images.qrc"/>

View File

@ -14,25 +14,30 @@ from PyQt4.Qt import (QAbstractTableModel, QVariant, QModelIndex, Qt,
QTimer, pyqtSignal, QIcon, QDialog, QAbstractItemDelegate, QApplication, QTimer, pyqtSignal, QIcon, QDialog, QAbstractItemDelegate, QApplication,
QSize, QStyleOptionProgressBarV2, QString, QStyle, QToolTip, QFrame, QSize, QStyleOptionProgressBarV2, QString, QStyle, QToolTip, QFrame,
QHBoxLayout, QVBoxLayout, QSizePolicy, QLabel, QCoreApplication, QAction, QHBoxLayout, QVBoxLayout, QSizePolicy, QLabel, QCoreApplication, QAction,
QByteArray) QByteArray, QSortFilterProxyModel)
from calibre.utils.ipc.server import Server from calibre.utils.ipc.server import Server
from calibre.utils.ipc.job import ParallelJob from calibre.utils.ipc.job import ParallelJob
from calibre.gui2 import Dispatcher, error_dialog, question_dialog, NONE, config, gprefs from calibre.gui2 import (Dispatcher, error_dialog, question_dialog, NONE,
config, gprefs)
from calibre.gui2.device import DeviceJob from calibre.gui2.device import DeviceJob
from calibre.gui2.dialogs.jobs_ui import Ui_JobsDialog from calibre.gui2.dialogs.jobs_ui import Ui_JobsDialog
from calibre import __appname__, as_unicode from calibre import __appname__, as_unicode
from calibre.gui2.dialogs.job_view_ui import Ui_Dialog from calibre.gui2.dialogs.job_view_ui import Ui_Dialog
from calibre.gui2.progress_indicator import ProgressIndicator from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.threaded_jobs import ThreadedJobServer, ThreadedJob from calibre.gui2.threaded_jobs import ThreadedJobServer, ThreadedJob
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
from calibre.utils.icu import lower
class JobManager(QAbstractTableModel): # {{{ class JobManager(QAbstractTableModel, SearchQueryParser): # {{{
job_added = pyqtSignal(int) job_added = pyqtSignal(int)
job_done = pyqtSignal(int) job_done = pyqtSignal(int)
def __init__(self): def __init__(self):
QAbstractTableModel.__init__(self) QAbstractTableModel.__init__(self)
SearchQueryParser.__init__(self, ['all'])
self.wait_icon = QVariant(QIcon(I('jobs.png'))) self.wait_icon = QVariant(QIcon(I('jobs.png')))
self.running_icon = QVariant(QIcon(I('exec.png'))) self.running_icon = QVariant(QIcon(I('exec.png')))
self.error_icon = QVariant(QIcon(I('dialog_error.png'))) self.error_icon = QVariant(QIcon(I('dialog_error.png')))
@ -251,6 +256,18 @@ class JobManager(QAbstractTableModel): # {{{
else: else:
job.kill_on_start = True job.kill_on_start = True
def hide_jobs(self, rows):
for r in rows:
self.jobs[r].hidden_in_gui = True
for r in rows:
self.dataChanged.emit(self.index(r, 0), self.index(r, 0))
def show_hidden_jobs(self):
for j in self.jobs:
j.hidden_in_gui = False
for r in xrange(len(self.jobs)):
self.dataChanged.emit(self.index(r, 0), self.index(r, 0))
def kill_job(self, row, view): def kill_job(self, row, view):
job = self.jobs[row] job = self.jobs[row]
if isinstance(job, DeviceJob): if isinstance(job, DeviceJob):
@ -299,6 +316,62 @@ class JobManager(QAbstractTableModel): # {{{
continue continue
if not isinstance(job, ParallelJob): if not isinstance(job, ParallelJob):
self._kill_job(job) self._kill_job(job)
def universal_set(self):
return set([i for i, j in enumerate(self.jobs) if not getattr(j,
'hidden_in_gui', False)])
def get_matches(self, location, query, candidates=None):
if candidates is None:
candidates = self.universal_set()
ans = set()
if not query:
return ans
query = lower(query)
for j in candidates:
job = self.jobs[j]
if job.description and query in lower(job.description):
ans.add(j)
return ans
def find(self, query):
query = query.strip()
rows = self.parse(query)
return rows
# }}}
class FilterModel(QSortFilterProxyModel): # {{{
search_done = pyqtSignal(object)
def __init__(self, parent):
QSortFilterProxyModel.__init__(self, parent)
self.search_filter = None
def filterAcceptsRow(self, source_row, source_parent):
if (self.search_filter is not None and source_row not in
self.search_filter):
return False
m = self.sourceModel()
try:
job = m.row_to_job(source_row)
except:
return False
return not getattr(job, 'hidden_in_gui', False)
def find(self, query):
ok = True
val = None
if query:
try:
val = self.sourceModel().parse(query)
except ParseException:
ok = False
self.search_filter = val
self.search_done.emit(ok)
self.reset()
# }}} # }}}
# Jobs UI {{{ # Jobs UI {{{
@ -450,8 +523,11 @@ class JobsDialog(QDialog, Ui_JobsDialog):
QDialog.__init__(self, window) QDialog.__init__(self, window)
Ui_JobsDialog.__init__(self) Ui_JobsDialog.__init__(self)
self.setupUi(self) self.setupUi(self)
self.jobs_view.setModel(model)
self.model = model self.model = model
self.proxy_model = FilterModel(self)
self.proxy_model.setSourceModel(self.model)
self.proxy_model.search_done.connect(self.search.search_done)
self.jobs_view.setModel(self.proxy_model)
self.setWindowModality(Qt.NonModal) self.setWindowModality(Qt.NonModal)
self.setWindowTitle(__appname__ + _(' - Jobs')) self.setWindowTitle(__appname__ + _(' - Jobs'))
self.details_button.clicked.connect(self.show_details) self.details_button.clicked.connect(self.show_details)
@ -461,6 +537,15 @@ class JobsDialog(QDialog, Ui_JobsDialog):
self.jobs_view.setItemDelegateForColumn(2, self.pb_delegate) self.jobs_view.setItemDelegateForColumn(2, self.pb_delegate)
self.jobs_view.doubleClicked.connect(self.show_job_details) self.jobs_view.doubleClicked.connect(self.show_job_details)
self.jobs_view.horizontalHeader().setMovable(True) self.jobs_view.horizontalHeader().setMovable(True)
self.hide_button.clicked.connect(self.hide_selected)
self.hide_all_button.clicked.connect(self.hide_all)
self.show_button.clicked.connect(self.show_hidden)
self.search.initialize('jobs_search_history',
help_text=_('Search for a job by name'))
self.search.search.connect(self.find)
self.search_button.clicked.connect(lambda :
self.find(self.search.current_text))
self.clear_button.clicked.connect(lambda : self.search.clear())
self.restore_state() self.restore_state()
def restore_state(self): def restore_state(self):
@ -486,11 +571,13 @@ class JobsDialog(QDialog, Ui_JobsDialog):
pass pass
def show_job_details(self, index): def show_job_details(self, index):
row = index.row() index = self.proxy_model.mapToSource(index)
job = self.jobs_view.model().row_to_job(row) if index.isValid():
d = DetailView(self, job) row = index.row()
d.exec_() job = self.model.row_to_job(row)
d.timer.stop() d = DetailView(self, job)
d.exec_()
d.timer.stop()
def show_details(self, *args): def show_details(self, *args):
index = self.jobs_view.currentIndex() index = self.jobs_view.currentIndex()
@ -498,8 +585,10 @@ class JobsDialog(QDialog, Ui_JobsDialog):
self.show_job_details(index) self.show_job_details(index)
def kill_job(self, *args): def kill_job(self, *args):
rows = [index.row() for index in indices = [self.proxy_model.mapToSource(index) for index in
self.jobs_view.selectionModel().selectedRows()] self.jobs_view.selectionModel().selectedRows()]
indices = [i for i in indices if i.isValid()]
rows = [index.row() for index in indices]
if not rows: if not rows:
return error_dialog(self, _('No job'), return error_dialog(self, _('No job'),
_('No job selected'), show=True) _('No job selected'), show=True)
@ -517,6 +606,26 @@ class JobsDialog(QDialog, Ui_JobsDialog):
_('Do you really want to stop all non-device jobs?')): _('Do you really want to stop all non-device jobs?')):
self.model.kill_all_jobs() self.model.kill_all_jobs()
def hide_selected(self, *args):
indices = [self.proxy_model.mapToSource(index) for index in
self.jobs_view.selectionModel().selectedRows()]
indices = [i for i in indices if i.isValid()]
rows = [index.row() for index in indices]
if not rows:
return error_dialog(self, _('No job'),
_('No job selected'), show=True)
self.model.hide_jobs(rows)
self.proxy_model.reset()
def hide_all(self, *args):
self.model.hide_jobs(list(xrange(0,
self.model.rowCount(QModelIndex()))))
self.proxy_model.reset()
def show_hidden(self, *args):
self.model.show_hidden_jobs()
self.find(self.search.current_text)
def closeEvent(self, e): def closeEvent(self, e):
self.save_state() self.save_state()
return QDialog.closeEvent(self, e) return QDialog.closeEvent(self, e)
@ -528,5 +637,9 @@ class JobsDialog(QDialog, Ui_JobsDialog):
def hide(self, *args): def hide(self, *args):
self.save_state() self.save_state()
return QDialog.hide(self, *args) return QDialog.hide(self, *args)
def find(self, query):
self.proxy_model.find(query)
# }}} # }}}

View File

@ -43,6 +43,7 @@ class LegimiStore(BasicStoreConfig, StorePlugin):
url = 'http://www.legimi.com/pl/ebooks/?price=any&lang=pl&search=' + urllib.quote_plus(query) + '&sort=relevance' url = 'http://www.legimi.com/pl/ebooks/?price=any&lang=pl&search=' + urllib.quote_plus(query) + '&sort=relevance'
br = browser() br = browser()
drm_pattern = re.compile("(DRM)")
counter = max_results counter = max_results
with closing(br.open(url, timeout=timeout)) as f: with closing(br.open(url, timeout=timeout)) as f:
@ -61,6 +62,10 @@ class LegimiStore(BasicStoreConfig, StorePlugin):
author = re.sub(',','',author) author = re.sub(',','',author)
author = re.sub(';',',',author) author = re.sub(';',',',author)
price = ''.join(data.xpath('.//span[@class="ebook_price"]/text()')) price = ''.join(data.xpath('.//span[@class="ebook_price"]/text()'))
formats = ''.join(data.xpath('.//div[@class="item_entries"]/span[3]/text()'))
formats = re.sub('Format:','',formats)
drm = drm_pattern.search(formats)
formats = re.sub('\(DRM\)','',formats)
counter -= 1 counter -= 1
@ -70,7 +75,7 @@ class LegimiStore(BasicStoreConfig, StorePlugin):
s.author = author.strip() s.author = author.strip()
s.price = price s.price = price
s.detail_item = 'http://www.legimi.com/' + id.strip() s.detail_item = 'http://www.legimi.com/' + id.strip()
s.drm = SearchResult.DRM_LOCKED s.drm = SearchResult.DRM_LOCKED if drm else SearchResult.DRM_UNLOCKED
s.formats = 'EPUB' s.formats = formats.strip()
yield s yield s

View File

@ -107,6 +107,12 @@ class KindleDX(Kindle):
name = 'Kindle DX' name = 'Kindle DX'
id = 'kindledx' id = 'kindledx'
class KindleFire(KindleDX):
name = 'Kindle Fire'
id = 'kindle_fire'
output_profile = 'kindle_fire'
supports_color = True
class Sony505(Device): class Sony505(Device):
output_profile = 'sony' output_profile = 'sony'
@ -179,6 +185,10 @@ class NookColor(Nook):
output_profile = 'nook_color' output_profile = 'nook_color'
supports_color = True supports_color = True
class NookTablet(NookColor):
id = 'nook_tablet'
name = 'Nook Tablet'
class CybookG3(Device): class CybookG3(Device):
name = 'Cybook Gen 3' name = 'Cybook Gen 3'

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More