This commit is contained in:
GRiker 2011-11-18 05:19:10 -07:00
commit ef3ed589c2
138 changed files with 43127 additions and 35101 deletions

View File

@ -19,6 +19,60 @@
# new recipes:
# - title:
- version: 0.8.27
date: 2011-11-18
new features:
- title: "Drivers for the Kindle Fire and the Nook Tablet"
tickets: [890918]
- title: "Conversion: Add an option under Look & Feel to remove specified style information (CSS) from the document during conversion."
tickets: [871384]
- title: "Add an option in the bulk metadata edit dialog to restore the pre-conversion files for many books with a single click."
tickets: [886116]
- title: "Jobs list: Add the ability to search for and to hide jobs, useful if you have run a lot of jobs and the list is getting crowded."
tickets: [883734]
- title: "Book jacket generation: Add ability to customize the book jacket template and add custom columns into the jacket."
tickets: [889912]
- title: "MOBI Input: Performance improvement when viewing/converting a file with a lot of links"
bug fixes:
- title: "Fix regression in 0.8.26 that broke disabling the update of particular fields during a bulk metadata download."
tickets: [889696]
- title: "Get Books: Fix DRM status for legimi"
- title: "When parsing for lxml via BeatifulSoup, use the calibre modified copy of BeautifulSoup (more robust)."
tickets: [889890]
- title: "HTML Input: Handle double encoded URLs in img tags"
tickets: [889323]
improved recipes:
- Various Polish recipes
- Academia Catavencu
- El Periodico de Aragon
- Weblogs SL
- Folha de Sao Paolo (subscription)
new recipes:
- title: News on Japan
author: Krittika Goyal
- title: Formula AS
author: Silviu Cotoara
- title: Various Turkish news sources
author: Osman Kaysan
- title: Infra.pl and Spider's Web
author: fenuks
- version: 0.8.26
date: 2011-11-12

38
recipes/biamag.recipe Normal file
View File

@ -0,0 +1,38 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
bianet.com.tr
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe):
title = 'BiaMag'
__author__ = 'Osman Kaysan'
description = 'Independent News from Turkey'
publisher = 'BiaMag'
category = 'news, politics, Turkey'
oldest_article = 15
max_articles_per_feed = 120
masthead_url = 'http://bianet.org/images/biamag_logo.gif'
language = 'tr'
no_stylesheets = True
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
remove_tags_before = dict(name='div', attrs={'class':'manset'})
remove_tags = [ dict(name='ul', attrs={'class':['altul']}), dict(name='div', attrs={'id':['habermenu']}), dict(name='div', attrs={'class':['mail']}), dict(name='div', attrs={'class':['from']})]
remove_tags_after = dict(name='div', attrs={'id':'habermenu'})
feeds = [(u'BiaMag', u'http://www.bianet.org/biamag.rss')]
def preprocess_html(self, soup):
return self.adeify_images(soup)

38
recipes/biamag_en.recipe Normal file
View File

@ -0,0 +1,38 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
bianet.com.tr
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe):
title = 'Bianet-English'
__author__ = 'Osman Kaysan'
description = 'Independent News Network from Turkey(English)'
publisher = 'Bianet'
category = 'news, politics, Turkey'
oldest_article = 7
max_articles_per_feed = 150
masthead_url = 'http://bianet.org/images/english_logo.gif'
language = 'en_TR'
no_stylesheets = True
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
remove_tags_before = dict(name='div', attrs={'class':'manset'})
remove_tags = [ dict(name='ul', attrs={'class':['altul']}), dict(name='div', attrs={'id':['habermenu']}), dict(name='div', attrs={'class':['mail']}), dict(name='div', attrs={'class':['from']})]
remove_tags_after = dict(name='div', attrs={'id':'habermenu'})
feeds = [(u'Bianet-English', u'http://www.bianet.org/english.rss')]
def preprocess_html(self, soup):
return self.adeify_images(soup)

38
recipes/bianet.recipe Normal file
View File

@ -0,0 +1,38 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
bianet.com.tr
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe):
title = 'Bianet'
__author__ = 'Osman Kaysan'
description = 'Independent News from Turkey'
publisher = 'Bianet'
category = 'news, politics, Turkey'
oldest_article = 7
max_articles_per_feed = 120
masthead_url = 'http://bianet.org/images/bianet_logo.gif'
language = 'tr'
no_stylesheets = True
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
remove_tags_before = dict(name='div', attrs={'class':'manset'})
remove_tags = [ dict(name='ul', attrs={'class':['altul']}), dict(name='div', attrs={'id':['habermenu']}), dict(name='div', attrs={'class':['mail']}), dict(name='div', attrs={'class':['from']})]
remove_tags_after = dict(name='div', attrs={'id':'habermenu'})
feeds = [(u'Bianet', u'http://bianet.org/bianet.rss')]
def preprocess_html(self, soup):
return self.adeify_images(soup)

View File

@ -0,0 +1,50 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from calibre.web.feeds.news import BasicNewsRecipe
class Birgun (BasicNewsRecipe):
title = u'Birgün Gazetesi'
__author__ = u'Osman Kaysan'
oldest_article = 7
max_articles_per_feed =150
use_embedded_content = False
description = 'Birgun gazatesi haberleri, kose yazarlari'
publisher = 'Birgün'
category = 'news,haberler,turkce,gazete,birgun'
language = 'tr'
no_stylesheets = True
publication_type = 'newspaper'
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
,'remove_paragraph_spacing': True,
}
cover_img_url = 'http://www.birgun.net/i/birgun.png'
masthead_url = 'http://www.birgun.net/i/birgun.png'
remove_attributes = ['width','height']
remove_tags_before = dict(name='h2', attrs={'class':'storyHeadline'})
#remove_tags_after = dict(name='div', attrs={'class':'toollinks'})
remove_tags_after = dict(name='tr', attrs={'valign':'top'})
remove_tags = [ dict(name='div', attrs={'id':'byLine'}), dict(name='div', attrs={'class':'toollinks'})
, dict(name='div', attrs={'class':'main-lead'}), dict(name='div', attrs={'class':'addthis_toolbox addthis_default_style'})
, dict(name='a', attrs={'class':'addthis_button'})]
remove_empty_feeds= True
feeds = [
( u'Güncel', u'http://www.birgun.net/actuels.xml')
,( u'Köşe Yazarları', u'http://www.birgun.net/writer.xml')
,( u'Politika', u'http://www.birgun.net/politics.xml')
,( u'Ekonomi', u'http://www.birgun.net/economic.xml')
,( u'Çalışma Yaşamı', u'http://www.birgun.net/workers.xml')
,( u'Dünya', u'http://www.birgun.net/worlds.xml')
,( u'Yaşam', u'http://www.birgun.net/lifes.xml')
]

View File

@ -4,16 +4,16 @@
__license__ = 'GPL v3'
__copyright__ = u'2011, Silviu Cotoar\u0103'
'''
catavencu.ro
academiacatavencu.info
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Catavencu(BasicNewsRecipe):
class AcademiaCatavencu(BasicNewsRecipe):
title = u'Academia Ca\u0163avencu'
__author__ = u'Silviu Cotoar\u0103'
description = 'Tagma cum laude'
publisher = 'Catavencu'
publisher = u'Ca\u0163avencu'
oldest_article = 5
language = 'ro'
max_articles_per_feed = 100
@ -21,32 +21,31 @@ class Catavencu(BasicNewsRecipe):
use_embedded_content = False
category = 'Ziare'
encoding = 'utf-8'
cover_url = 'http://upload.wikimedia.org/wikipedia/en/1/1e/Academia_Catavencu.jpg'
cover_url = 'http://www.academiacatavencu.info/images/logo.png'
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'publisher' : publisher
}
keep_only_tags = [
dict(name='ul', attrs={'class':'articles'})
dict(name='h1', attrs={'class':'art_title'}),
dict(name='div', attrs={'class':'art_text'})
]
remove_tags = [
dict(name='div', attrs={'class':['tools']})
, dict(name='div', attrs={'class':['share']})
, dict(name='div', attrs={'class':['category']})
, dict(name='div', attrs={'id':['comments']})
dict(name='div', attrs={'class':['desp_m']})
, dict(name='div', attrs={'id':['tags']})
]
remove_tags_after = [
dict(name='div', attrs={'id':'comments'})
dict(name='div', attrs={'class':['desp_m']})
]
feeds = [
(u'Feeds', u'http://catavencu.ro/feed/rss')
(u'Feeds', u'http://www.academiacatavencu.info/rss.xml')
]
def preprocess_html(self, soup):

View File

@ -27,7 +27,7 @@ class CGM(BasicNewsRecipe):
del item['style']
ad=soup.findAll('a')
for r in ad:
if 'http://www.hustla.pl' in r['href']:
if 'http://www.hustla.pl' in r['href'] or 'http://www.ebilet.pl' in r['href']:
r.extract()
gallery=soup.find('div', attrs={'class':'galleryFlash'})
if gallery:

View File

@ -46,7 +46,8 @@ class DziennikInternautowRecipe(BasicNewsRecipe):
dict(name = 'div', attrs = {'class' : 'poradniki_context'}),
dict(name = 'div', attrs = {'class' : 'uniBox'}),
dict(name = 'object', attrs = {}),
dict(name = 'h3', attrs = {})
dict(name = 'h3', attrs = {}),
dict(attrs={'class':'twitter-share-button'})
]
preprocess_regexps = [
@ -58,3 +59,8 @@ class DziennikInternautowRecipe(BasicNewsRecipe):
(r'\s*</', lambda match: '</'),
]
]
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)

View File

@ -55,12 +55,17 @@ class Economist(BasicNewsRecipe):
'''
def get_cover_url(self):
br = self.browser
br.open(self.INDEX)
issue = br.geturl().split('/')[4]
self.log('Fetching cover for issue: %s'%issue)
cover_url = "http://media.economist.com/sites/default/files/imagecache/print-cover-full/print-covers/%s_CNA400_0.jpg" %(issue.translate(None,'-'))
return cover_url
soup = self.index_to_soup('http://www.economist.com/printedition/covers')
div = soup.find('div', attrs={'class':lambda x: x and
'print-cover-links' in x})
a = div.find('a', href=True)
url = a.get('href')
if url.startswith('/'):
url = 'http://www.economist.com' + url
soup = self.index_to_soup(url)
div = soup.find('div', attrs={'class':'cover-content'})
img = div.find('img', src=True)
return img.get('src')
def parse_index(self):
return self.economist_parse_index()

View File

@ -39,13 +39,17 @@ class Economist(BasicNewsRecipe):
delay = 1
def get_cover_url(self):
br = self.browser
br.open(self.INDEX)
issue = br.geturl().split('/')[4]
self.log('Fetching cover for issue: %s'%issue)
cover_url = "http://media.economist.com/sites/default/files/imagecache/print-cover-full/print-covers/%s_CNA400_0.jpg" %(issue.translate(None,'-'))
return cover_url
soup = self.index_to_soup('http://www.economist.com/printedition/covers')
div = soup.find('div', attrs={'class':lambda x: x and
'print-cover-links' in x})
a = div.find('a', href=True)
url = a.get('href')
if url.startswith('/'):
url = 'http://www.economist.com' + url
soup = self.index_to_soup(url)
div = soup.find('div', attrs={'class':'cover-content'})
img = div.find('img', src=True)
return img.get('src')
def parse_index(self):
try:

View File

@ -5,12 +5,11 @@ __license__ = 'GPL v3'
__copyright__ = '04 December 2010, desUBIKado'
__author__ = 'desUBIKado'
__description__ = 'Daily newspaper from Aragon'
__version__ = 'v0.07'
__date__ = '06, February 2011'
__version__ = 'v0.08'
__date__ = '13, November 2011'
'''
elperiodicodearagon.com
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
@ -20,13 +19,13 @@ class elperiodicodearagon(BasicNewsRecipe):
description = u'Noticias desde Aragon'
publisher = u'elperiodicodearagon.com'
category = u'news, politics, Spain, Aragon'
oldest_article = 2
oldest_article = 1
delay = 0
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'es'
encoding = 'utf8'
encoding = 'iso-8859-1'
remove_empty_feeds = True
remove_javascript = True
@ -39,61 +38,30 @@ class elperiodicodearagon(BasicNewsRecipe):
}
feeds = [
(u'Arag\xf3n', u'http://elperiodicodearagon.com/RSS/2.xml'),
(u'Internacional', u'http://elperiodicodearagon.com/RSS/4.xml'),
(u'Espa\xf1a', u'http://elperiodicodearagon.com/RSS/3.xml'),
(u'Econom\xeda', u'http://elperiodicodearagon.com/RSS/5.xml'),
(u'Deportes', u'http://elperiodicodearagon.com/RSS/7.xml'),
(u'Real Zaragoza', u'http://elperiodicodearagon.com/RSS/10.xml'),
(u'Opini\xf3n', u'http://elperiodicodearagon.com/RSS/103.xml'),
(u'Escenarios', u'http://elperiodicodearagon.com/RSS/105.xml'),
(u'Sociedad', u'http://elperiodicodearagon.com/RSS/104.xml'),
(u'Gente', u'http://elperiodicodearagon.com/RSS/330.xml')
(u'Portada', u'http://zetaestaticos.com/aragon/rss/portada_es.xml'),
(u'Arag\xf3n', u'http://zetaestaticos.com/aragon/rss/2_es.xml'),
(u'Internacional', u'http://zetaestaticos.com/aragon/rss/4_es.xml'),
(u'Espa\xf1a', u'http://zetaestaticos.com/aragon/rss/3_es.xml'),
(u'Econom\xeda', u'http://zetaestaticos.com/aragon/rss/5_es.xml'),
(u'Deportes', u'http://zetaestaticos.com/aragon/rss/7_es.xml'),
(u'Real Zaragoza', u'http://zetaestaticos.com/aragon/rss/10_es.xml'),
(u'CAI Zaragoza', u'http://zetaestaticos.com/aragon/rss/91_es.xml'),
(u'Monta\xf1ismo', u'http://zetaestaticos.com/aragon/rss/354_es.xml'),
(u'Opini\xf3n', u'http://zetaestaticos.com/aragon/rss/103_es.xml'),
(u'Tema del d\xeda', u'http://zetaestaticos.com/aragon/rss/102_es.xml'),
(u'Escenarios', u'http://zetaestaticos.com/aragon/rss/105_es.xml'),
(u'Sociedad', u'http://zetaestaticos.com/aragon/rss/104_es.xml'),
(u'Gente', u'http://zetaestaticos.com/aragon/rss/330_es.xml'),
(u'Espacio 3', u'http://zetaestaticos.com/aragon/rss/328_es.xml'),
(u'Fiestas del Pilar', u'http://zetaestaticos.com/aragon/rss/107_es.xml')
]
extra_css = '''
h3 {font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:30px;}
h2 {font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:18px;}
h4 {font-family:Arial,Helvetica,sans-serif; font-style:italic; font-weight:normal;font-size:20px;}
.columnaDeRecursosRelacionados {font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:14px;}
img{margin-bottom: 0.4em}
'''
remove_attributes = ['height','width']
keep_only_tags = [dict(name='div', attrs={'id':'contenidos'})]
keep_only_tags = [dict(name='div', attrs={'id':'Noticia'})]
# Quitar toda la morralla
remove_tags = [dict(name='ul', attrs={'class':'herramientasDeNoticia'}),
dict(name='span', attrs={'class':'MasInformacion '}),
dict(name='span', attrs={'class':'MasInformacion'}),
dict(name='div', attrs={'class':'Middle'}),
dict(name='div', attrs={'class':'MenuCabeceraRZaragoza'}),
dict(name='div', attrs={'id':'MenuCabeceraRZaragoza'}),
dict(name='div', attrs={'class':'MenuEquipo'}),
dict(name='div', attrs={'class':'TemasRelacionados'}),
dict(name='div', attrs={'class':'GaleriaEnNoticia'}),
dict(name='div', attrs={'class':'Recorte'}),
dict(name='div', attrs={'id':'NoticiasenRecursos'}),
dict(name='div', attrs={'id':'NoticiaEnPapel'}),
dict(name='p', attrs={'class':'RecorteEnNoticias'}),
dict(name='div', attrs={'id':'Comparte'}),
dict(name='div', attrs={'id':'CajaComparte'}),
dict(name='a', attrs={'class':'EscribirComentario'}),
dict(name='a', attrs={'class':'AvisoComentario'}),
dict(name='div', attrs={'class':'CajaAvisoComentario'}),
dict(name='div', attrs={'class':'navegaNoticias'}),
dict(name='div', attrs={'class':'Mensaje'}),
dict(name='div', attrs={'id':'PaginadorDiCom'}),
dict(name='div', attrs={'id':'CajaAccesoCuentaUsuario'}),
dict(name='div', attrs={'id':'CintilloComentario'}),
dict(name='div', attrs={'id':'EscribeComentario'}),
dict(name='div', attrs={'id':'FormularioComentario'}),
dict(name='div', attrs={'id':'FormularioNormas'})]
# Recuperamos la portada de papel (la imagen format=1 tiene mayor resolucion)
def get_cover_url(self):
@ -104,23 +72,7 @@ class elperiodicodearagon(BasicNewsRecipe):
return image['src'].rstrip('format=2') + 'format=1'
return None
# Para quitar espacios entre la noticia y los comentarios (lineas 1 y 2)
# El indice no apuntaba correctamente al empiece de la noticia (linea 3)
# Usamos la versión para móviles
preprocess_regexps = [
(re.compile(r'<p>&nbsp;</p>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<p> </p>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<p id="">', re.DOTALL|re.IGNORECASE), lambda match: '<p>')
]
# Para sustituir el video incrustado de YouTube por una imagen
def preprocess_html(self, soup):
for video_yt in soup.findAll('iframe',{'title':'YouTube video player'}):
if video_yt:
video_yt.name = 'img'
fuente = video_yt['src']
fuente2 = fuente.replace('http://www.youtube.com/embed/','http://img.youtube.com/vi/')
video_yt['src'] = fuente2 + '/0.jpg'
return soup
def print_version(self, url):
return url.replace('http://www.elperiodicodearagon.com/', 'http://www.elperiodicodearagon.com/m/')

View File

@ -8,31 +8,35 @@ class FSP(BasicNewsRecipe):
__author__ = 'fluzao'
description = u'Printed edition contents. UOL subscription required (Folha subscription currently not supported).' + \
u' [Conte\xfado completo da edi\xe7\xe3o impressa. Somente para assinantes UOL.]'
INDEX = 'http://www1.folha.uol.com.br/fsp/indices/'
#found this to be the easiest place to find the index page (13-Nov-2011).
# searching for the "Indice Geral" link
HOMEPAGE = 'http://www1.folha.uol.com.br/fsp/'
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
language = 'pt'
no_stylesheets = True
max_articles_per_feed = 40
remove_javascript = True
needs_subscription = True
remove_tags_before = dict(name='b')
remove_tags_before = dict(name='p')
remove_tags = [dict(name='td', attrs={'align':'center'})]
remove_attributes = ['height','width']
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
# fixes the problem with the section names
section_dict = {'cotidian' : 'cotidiano', 'ilustrad': 'ilustrada', \
'quadrin': 'quadrinhos' , 'opiniao' : u'opini\xE3o', \
'ciencia' : u'ci\xeancia' , 'saude' : u'sa\xfade', \
'ribeirao' : u'ribeir\xE3o' , 'equilibrio' : u'equil\xedbrio'}
'ribeirao' : u'ribeir\xE3o' , 'equilibrio' : u'equil\xedbrio', \
'imoveis' : u'im\xf3veis', 'negocios' : u'neg\xf3cios', \
'veiculos' : u've\xedculos', 'corrida' : 'folha corrida'}
# this solves the problem with truncated content in Kindle
conversion_options = {'linearize_tables' : True}
# this bit removes the footer where there are links for Proximo Texto, Texto Anterior,
# Indice e Comunicar Erros
preprocess_regexps = [(re.compile(r'<BR><BR>Texto Anterior:.*<!--/NOTICIA-->',
re.DOTALL|re.IGNORECASE), lambda match: r''),
(re.compile(r'<BR><BR>Pr&oacute;ximo Texto:.*<!--/NOTICIA-->',
preprocess_regexps = [(re.compile(r'<!--/NOTICIA-->.*Comunicar Erros</a>',
re.DOTALL|re.IGNORECASE), lambda match: r'')]
def get_browser(self):
@ -49,7 +53,25 @@ class FSP(BasicNewsRecipe):
def parse_index(self):
soup = self.index_to_soup(self.INDEX)
#Searching for the index page on the HOMEPAGE
hpsoup = self.index_to_soup(self.HOMEPAGE)
indexref = hpsoup.find('a', href=re.compile('^indices.*'))
self.log('--> tag containing the today s index: ', indexref)
INDEX = indexref['href']
INDEX = 'http://www1.folha.uol.com.br/fsp/'+INDEX
self.log('--> INDEX after extracting href and adding prefix: ', INDEX)
# ... and taking the opportunity to get the cover image link
coverurl = hpsoup.find('a', href=re.compile('^cp.*'))['href']
if coverurl:
self.log('--> tag containing the today s cover: ', coverurl)
coverurl = coverurl.replace('htm', 'jpg')
coverurl = 'http://www1.folha.uol.com.br/fsp/images/'+coverurl
self.log('--> coverurl after extracting href and adding prefix: ', coverurl)
self.cover_url = coverurl
#soup = self.index_to_soup(self.INDEX)
soup = self.index_to_soup(INDEX)
feeds = []
articles = []
section_title = "Preambulo"
@ -68,8 +90,12 @@ class FSP(BasicNewsRecipe):
self.log('--> new section title: ', section_title)
if strpost.startswith('<a href'):
url = post['href']
#this bit is kept if they ever go back to the old format (pre Nov-2011)
if url.startswith('/fsp'):
url = 'http://www1.folha.uol.com.br'+url
#
if url.startswith('http://www1.folha.uol.com.br/fsp'):
#url = 'http://www1.folha.uol.com.br'+url
title = self.tag_to_string(post)
self.log()
self.log('--> post: ', post)
@ -82,15 +108,11 @@ class FSP(BasicNewsRecipe):
# keeping the front page url
minha_capa = feeds[0][1][1]['url']
# removing the 'Preambulo' section
# removing the first section (now called 'top')
del feeds[0]
# creating the url for the cover image
coverurl = feeds[0][1][0]['url']
coverurl = coverurl.replace('/opiniao/fz', '/images/cp')
coverurl = coverurl.replace('01.htm', '.jpg')
self.cover_url = coverurl
# inserting the cover page as the first article (nicer for kindle users)
feeds.insert(0,(u'primeira p\xe1gina', [{'title':u'Primeira p\xe1gina' , 'url':minha_capa}]))
return feeds

50
recipes/formulaas.recipe Normal file
View File

@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = u'2011, Silviu Cotoar\u0103'
'''
formula-as.ro
'''
from calibre.web.feeds.news import BasicNewsRecipe
class FormulaAS(BasicNewsRecipe):
title = u'Formula AS'
__author__ = u'Silviu Cotoar\u0103'
publisher = u'Formula AS'
description = u'Formula AS'
oldest_article = 5
language = 'ro'
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
category = 'Ziare,Romania'
encoding = 'utf-8'
cover_url = 'http://www.formula-as.ro/_client/img/header_logo.png'
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
}
keep_only_tags = [
dict(name='div', attrs={'class':'item padded'})
]
remove_tags = [
dict(name='ul', attrs={'class':'subtitle lower'})
]
remove_tags_after = [
dict(name='ul', attrs={'class':'subtitle lower'}),
dict(name='div', attrs={'class':'item-brief-options'})
]
feeds = [
(u'\u0218tiri', u'http://www.formula-as.ro/rss/articole.xml')
]
def preprocess_html(self, soup):
return self.adeify_images(soup)

View File

@ -18,7 +18,7 @@ class FrazPC(BasicNewsRecipe):
max_articles_per_feed = 100
use_embedded_content = False
no_stylesheets = True
cover_url='http://www.frazpc.pl/images/logo.png'
feeds = [
(u'Aktualno\u015bci', u'http://www.frazpc.pl/feed/aktualnosci'),
(u'Artyku\u0142y', u'http://www.frazpc.pl/feed/artykuly')
@ -33,6 +33,7 @@ class FrazPC(BasicNewsRecipe):
dict(name='div', attrs={'class':'comments_box'})
]
remove_tags_after=dict(name='div', attrs={'class':'content'})
preprocess_regexps = [(re.compile(r'\| <a href="#comments">Komentarze \([0-9]*\)</a>'), lambda match: '')]
remove_attributes = [ 'width', 'height' ]

View File

@ -12,7 +12,6 @@ class GN(BasicNewsRecipe):
EDITION = 0
__author__ = 'Piotr Kontek'
title = u'Gość niedzielny'
description = 'Weekly magazine'
encoding = 'utf-8'
no_stylesheets = True
@ -20,6 +19,8 @@ class GN(BasicNewsRecipe):
remove_javascript = True
temp_files = []
simultaneous_downloads = 1
masthead_url = 'http://gosc.pl/files/11/03/12/949089_top.gif'
title = u'Gość niedzielny'
articles_are_obfuscated = True
@ -64,7 +65,6 @@ class GN(BasicNewsRecipe):
if img != None:
a = img.parent
self.EDITION = a['href']
self.title = img['alt']
self.cover_url = 'http://www.gosc.pl' + img['src']
if not first:
break

View File

@ -4,56 +4,20 @@ __license__ = 'GPL v3'
__copyright__ = '2010, matek09, matek09@gmail.com'
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Histmag(BasicNewsRecipe):
title = u'Histmag'
oldest_article = 7
max_articles_per_feed = 100
cover_url='http://histmag.org/grafika/loga/histmag-logo-2-300px.png'
__author__ = 'matek09'
description = u"Artykuly historyczne i publicystyczne"
encoding = 'utf-8'
#preprocess_regexps = [(re.compile(r'</span>'), lambda match: '</span><br><br>'),(re.compile(r'<span>'), lambda match: '<br><br><span>')]
no_stylesheets = True
language = 'pl'
remove_javascript = True
keep_only_tags=[dict(id='article')]
remove_tags=[dict(name = 'p', attrs = {'class' : 'article-tags'})]
title = u'Histmag'
__author__ = 'matek09'
description = u"Artykuly historyczne i publicystyczne"
encoding = 'utf-8'
no_stylesheets = True
language = 'pl'
remove_javascript = True
#max_articles_per_feed = 1
remove_tags_before = dict(dict(name = 'div', attrs = {'id' : 'article'}))
remove_tags_after = dict(dict(name = 'h2', attrs = {'class' : 'komentarze'}))
#keep_only_tags =[]
#keep_only_tags.append(dict(name = 'h2'))
#keep_only_tags.append(dict(name = 'p'))
remove_tags =[]
remove_tags.append(dict(name = 'p', attrs = {'class' : 'podpis'}))
remove_tags.append(dict(name = 'h2', attrs = {'class' : 'komentarze'}))
remove_tags.append(dict(name = 'img', attrs = {'src' : 'style/buttons/wesprzyjnas-1.jpg'}))
preprocess_regexps = [(re.compile(r'</span>'), lambda match: '</span><br><br>'),
(re.compile(r'<span>'), lambda match: '<br><br><span>')]
extra_css = '''
.left {font-size: x-small}
.right {font-size: x-small}
'''
def find_articles(self, soup):
articles = []
for div in soup.findAll('div', attrs={'class' : 'text'}):
articles.append({
'title' : self.tag_to_string(div.h3.a),
'url' : 'http://www.histmag.org/' + div.h3.a['href'],
'date' : self.tag_to_string(div.next('p')).split('|')[0],
'description' : self.tag_to_string(div.next('p', podpis=False)),
})
return articles
def parse_index(self):
soup = self.index_to_soup('http://histmag.org/?arc=4&dx=0')
feeds = []
feeds.append((u"Artykuly historyczne", self.find_articles(soup)))
soup = self.index_to_soup('http://histmag.org/?arc=5&dx=0')
feeds.append((u"Artykuly publicystyczne", self.find_articles(soup)))
soup = self.index_to_soup('http://histmag.org/?arc=1&dx=0')
feeds.append((u"Wydarzenia", self.find_articles(soup)))
return feeds
feeds = [(u'Wszystkie', u'http://histmag.org/rss/wszystkie.xml'), (u'Wydarzenia', u'http://histmag.org/rss/wydarzenia.xml'), (u'Recenzje', u'http://histmag.org/rss/recenzje.xml'), (u'Artykuły historyczne', u'http://histmag.org/rss/historia.xml'), (u'Publicystyka', u'http://histmag.org/rss/publicystyka.xml')]

View File

@ -8,6 +8,15 @@ class Historia_org_pl(BasicNewsRecipe):
category = 'history'
language = 'pl'
oldest_article = 8
remove_empty_feeds=True
max_articles_per_feed = 100
feeds = [(u'Artykuły', u'http://www.historia.org.pl/index.php?format=feed&type=rss')]
feeds = [(u'Wszystkie', u'http://www.historia.org.pl/index.php?format=feed&type=rss'),
(u'Wiadomości', u'http://www.historia.org.pl/index.php/wiadomosci.feed?type=rss'),
(u'Publikacje', u'http://www.historia.org.pl/index.php/publikacje.feed?type=rss'),
(u'Publicystyka', u'http://www.historia.org.pl/index.php/publicystyka.feed?type=rss'),
(u'Recenzje', u'http://historia.org.pl/index.php/recenzje.feed?type=rss'),
(u'Kultura i sztuka', u'http://www.historia.org.pl/index.php/kultura-i-sztuka.feed?type=rss'),
(u'Rekonstykcje', u'http://www.historia.org.pl/index.php/rekonstrukcje.feed?type=rss'),
(u'Projekty', u'http://www.historia.org.pl/index.php/projekty.feed?type=rss'),
(u'Konkursy'), (u'http://www.historia.org.pl/index.php/konkursy.feed?type=rss')]

BIN
recipes/icons/formulaas.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 687 B

BIN
recipes/icons/infra_pl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@ -49,7 +49,7 @@ class TheIndependentNew(BasicNewsRecipe):
preprocess_regexps = [
(re.compile('<span class="storyTop ">(?P<nested>.*?)</span>', re.DOTALL),
lambda match: '<div class="storyTop">' + match.group('nested') + '</div>'),
(re.compile('<strong>.*?Click.*?to view graphic.*?</strong>', re.DOTALL),
(re.compile('(<strong>.*?[Cc]lick.*?<a.*?((HERE)|([Hh]ere)).*?</strong>)', re.DOTALL),
lambda match: '<div class="article-graphic">' + match.group(0) + '</div>'),
]
@ -104,6 +104,14 @@ class TheIndependentNew(BasicNewsRecipe):
def preprocess_html(self, soup):
#remove 'advertorial articles'
strapline = soup.find('div',attrs={'class' : re.compile('.*strapLine.*')})
if strapline:
for para in strapline.findAll('p'):
if len(para.contents) and isinstance(para.contents[0],NavigableString) \
and para.contents[0] == 'ADVERTORIAL FEATURE':
return None
items_to_extract = []
for item in soup.findAll(attrs={'class' : re.compile("widget.*")}):
@ -189,9 +197,14 @@ class TheIndependentNew(BasicNewsRecipe):
#remove empty paragraph tags in storyTop which can leave a space
#between first paragraph and rest of story
nested_content = False
storyTop = soup.find('div',attrs={ 'class' : ['storyTop']})
for item in storyTop.findAll('p'):
if item.contents is not None and len(item.contents[0]) <= 1 :
for nested in item:
if isinstance(nested, Tag):
nested_content = True
break
if not nested_content and item.contents is not None and len(item.contents[0]) <= 1 :
items_to_extract.append(item)
for item in items_to_extract:
@ -211,6 +224,8 @@ class TheIndependentNew(BasicNewsRecipe):
items_to_insert = []
for item in soup.findAll('div', attrs={'class' : ['article-graphic']}):
strong = item.find('strong')
if not strong:
continue
for child in strong:
if isinstance(child,Tag):
if str(child.name) == 'a':

17
recipes/infra_pl.recipe Normal file
View File

@ -0,0 +1,17 @@
from calibre.web.feeds.news import BasicNewsRecipe
class INFRA(BasicNewsRecipe):
title = u'INFRA'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Serwis Informacyjny INFRA - UFO, Zjawiska Paranormalne, Duchy, Tajemnice świata.'
cover_url = 'http://npn.nazwa.pl/templates/ja_teline_ii/images/logo.jpg'
category = 'UFO'
language = 'pl'
max_articles_per_feed = 100
no_stylesheers=True
remove_tags_before=dict(name='h2', attrs={'class':'contentheading'})
remove_tags_after=dict(attrs={'class':'pagenav'})
remove_tags=[dict(attrs={'class':'pagenav'})]
feeds = [(u'Najnowsze wiadomo\u015bci', u'http://www.infra.org.pl/index.php?option=com_rd_rss&id=1')]

18
recipes/japan_news.recipe Normal file
View File

@ -0,0 +1,18 @@
from calibre.web.feeds.news import BasicNewsRecipe
class NewsOnJapan(BasicNewsRecipe):
title = u'News On Japan'
language = 'en'
__author__ = 'Krittika Goyal'
oldest_article = 1 #days
max_articles_per_feed = 25
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
feeds = [
('News',
'http://newsonjapan.com/rss/top.xml'),
]

View File

@ -23,7 +23,7 @@ class OSNewsRecipe(BasicNewsRecipe):
oldest_article = 7
max_articles_per_feed = 100
cover_url='http://osnews.pl/wp-content/themes/osnews/img/logo.png'
extra_css = '''
.news-heading {font-size:150%}
.newsinformations li {display:inline;}
@ -44,7 +44,9 @@ class OSNewsRecipe(BasicNewsRecipe):
dict(name = 'div', attrs = {'class' : 'sociable'}),
dict(name = 'div', attrs = {'class' : 'post_prev'}),
dict(name = 'div', attrs = {'class' : 'post_next'}),
dict(name = 'div', attrs = {'class' : 'clr'})
dict(name = 'div', attrs = {'class' : 'clr'}),
dict(name = 'div', attrs = {'class' : 'tw_button'}),
dict(name = 'div', attrs = {'style' : 'width:56px;height:60px;float:left;margin-right:10px'})
]
preprocess_regexps = [(re.compile(u'</span>Komentarze: \(?[0-9]+\)? ?<span'), lambda match: '</span><span')]

View File

@ -8,13 +8,13 @@ radikal.com.tr
from calibre.web.feeds.news import BasicNewsRecipe
class Radikal_tr(BasicNewsRecipe):
title = 'Radikal - Turkey'
__author__ = 'Darko Miletic'
description = 'News from Turkey'
title = 'Radikal Ekleri'
__author__ = 'Darko Mileticden uyarlama'
description = 'Politic-Cultural Articles from Turkey'
publisher = 'radikal'
category = 'news, politics, Turkey'
oldest_article = 7
max_articles_per_feed = 150
oldest_article = 14
max_articles_per_feed = 120
no_stylesheets = True
encoding = 'cp1254'
use_embedded_content = False
@ -37,11 +37,9 @@ class Radikal_tr(BasicNewsRecipe):
feeds = [
(u'Yazarlar' , u'http://www.radikal.com.tr/d/rss/RssYazarlar.xml')
,(u'Turkiye' , u'http://www.radikal.com.tr/d/rss/Rss_97.xml' )
,(u'Politika' , u'http://www.radikal.com.tr/d/rss/Rss_98.xml' )
,(u'Dis Haberler', u'http://www.radikal.com.tr/d/rss/Rss_100.xml' )
,(u'Ekonomi' , u'http://www.radikal.com.tr/d/rss/Rss_101.xml' )
(u'Radikal Iki' , u'http://www.radikal.com.tr/d/rss/Rss_42.xml')
,(u'Radikal Hayat' , u'http://www.radikal.com.tr/d/rss/Rss_41.xml' )
,(u'Radikal Kitap' , u'http://www.radikal.com.tr/d/rss/Rss_40.xml' )
]
def print_version(self, url):

BIN
recipes/spiders_web_pl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 605 B

View File

@ -0,0 +1,15 @@
from calibre.web.feeds.news import BasicNewsRecipe
class SpidersWeb(BasicNewsRecipe):
title = u"Spider's Web"
oldest_article = 7
__author__ = 'fenuks'
description = u''
cover_url = 'http://www.spidersweb.pl/wp-content/themes/spiderweb/img/Logo.jpg'
category = 'IT, WEB'
language = 'pl'
max_articles_per_feed = 100
remove_tags_before=dict(name="h1", attrs={'class':'Title'})
remove_tags_after=dict(name="div", attrs={'class':'Text'})
remove_tags=[dict(name='div', attrs={'class':['Tags', 'CommentCount FloatL', 'Show FloatL']})]
feeds = [(u'Wpisy', u'http://www.spidersweb.pl/feed')]

View File

@ -3,7 +3,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Fetch sueddeutsche.
Fetch sueddeutsche.de
'''
from calibre.web.feeds.news import BasicNewsRecipe
@ -62,7 +62,7 @@ class Sueddeutsche(BasicNewsRecipe):
(u'Sport', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5ESport%24?output=rss'),
(u'Leben', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5ELeben%24?output=rss'),
(u'Karriere', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EKarriere%24?output=rss'),
(u'M&uuml;nchen & Region', u'http://www.sueddeutsche.de/app/service/rss/ressort/muenchen/rss.xml'),
(u'München & Region', u'http://www.sueddeutsche.de/app/service/rss/ressort/muenchen/rss.xml'), # AGe 2011-11-13
(u'Bayern', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EBayern%24?output=rss'),
(u'Medien', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EMedien%24?output=rss'),
(u'Digital', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EDigital%24?output=rss'),

View File

@ -9,6 +9,7 @@ class Tablety_pl(BasicNewsRecipe):
language = 'pl'
oldest_article = 8
max_articles_per_feed = 100
keep_only_tags=[dict(name='header', attrs={'class':'entry-header'}), dict(name='div', attrs={'class':'entry-content clearfix'})]
remove_tags=[dict(name='div', attrs={'class':'snap_nopreview sharing robots-nocontent'}), dict(name='span', attrs={'class':'dsq-postid'})]
remove_tags_before=dict(name="h1", attrs={'class':'entry-title'})
remove_tags_after=dict(name="div", attrs={'class':'snap_nopreview sharing robots-nocontent'})
remove_tags=[dict(name='div', attrs={'class':'snap_nopreview sharing robots-nocontent'})]
feeds = [(u'Najnowsze posty', u'http://www.tablety.pl/feed/')]

View File

@ -37,11 +37,13 @@ class TagesspiegelRSS(BasicNewsRecipe):
keep_only_tags = dict(name='div', attrs={'class':["hcf-article"]})
remove_tags = [
dict(name='link'), dict(name='iframe'),dict(name='style'),dict(name='meta'),dict(name='button'),
dict(name='div', attrs={'class':["hcf-jump-to-comments","hcf-clear","hcf-magnify hcf-media-control"] }),
dict(name='div', attrs={'class':["hcf-jump-to-comments","hcf-clear","hcf-magnify hcf-media-control",
"hcf-socials-widgets hcf-socials-top","hcf-socials-widgets hcf-socials-bottom"] }),
dict(name='span', attrs={'class':["hcf-mainsearch",] }),
dict(name='ul', attrs={'class':["hcf-tools"]}),
dict(name='ul', attrs={'class': re.compile('hcf-services')})
]
def parse_index(self):
soup = self.index_to_soup('http://www.tagesspiegel.de/zeitung/')

View File

@ -2,8 +2,8 @@
__license__ = 'GPL v3'
__copyright__ = '4 February 2011, desUBIKado'
__author__ = 'desUBIKado'
__version__ = 'v0.05'
__date__ = '13, April 2011'
__version__ = 'v0.07'
__date__ = '13, November 2011'
'''
http://www.weblogssl.com/
'''
@ -33,6 +33,7 @@ class weblogssl(BasicNewsRecipe):
feeds = [
(u'Xataka', u'http://feeds.weblogssl.com/xataka2')
,(u'Xataka Mexico', u'http://feeds.weblogssl.com/xatakamx')
,(u'Xataka M\xf3vil', u'http://feeds.weblogssl.com/xatakamovil')
,(u'Xataka Android', u'http://feeds.weblogssl.com/xatakandroid')
,(u'Xataka Foto', u'http://feeds.weblogssl.com/xatakafoto')
@ -40,6 +41,7 @@ class weblogssl(BasicNewsRecipe):
,(u'Xataka Ciencia', u'http://feeds.weblogssl.com/xatakaciencia')
,(u'Genbeta', u'http://feeds.weblogssl.com/genbeta')
,(u'Genbeta Dev', u'http://feeds.weblogssl.com/genbetadev')
,(u'Genbeta Social Media', u'http://feeds.weblogssl.com/genbetasocialmedia')
,(u'Applesfera', u'http://feeds.weblogssl.com/applesfera')
,(u'Vida Extra', u'http://feeds.weblogssl.com/vidaextra')
,(u'Naci\xf3n Red', u'http://feeds.weblogssl.com/nacionred')
@ -51,7 +53,6 @@ class weblogssl(BasicNewsRecipe):
,(u'Pop rosa', u'http://feeds.weblogssl.com/poprosa')
,(u'Zona FandoM', u'http://feeds.weblogssl.com/zonafandom')
,(u'Fandemia', u'http://feeds.weblogssl.com/fandemia')
,(u'Noctamina', u'http://feeds.weblogssl.com/noctamina')
,(u'Tendencias', u'http://feeds.weblogssl.com/trendencias')
,(u'Beb\xe9s y m\xe1s', u'http://feeds.weblogssl.com/bebesymas')
,(u'Directo al paladar', u'http://feeds.weblogssl.com/directoalpaladar')
@ -60,8 +61,8 @@ class weblogssl(BasicNewsRecipe):
,(u'Embelezzia', u'http://feeds.weblogssl.com/embelezzia')
,(u'Vit\xf3nica', u'http://feeds.weblogssl.com/vitonica')
,(u'Ambiente G', u'http://feeds.weblogssl.com/ambienteg')
,(u'Arrebatadora', u'http://feeds.weblogssl.com/arrebatadora')
,(u'Mensencia', u'http://feeds.weblogssl.com/mensencia')
,(u'Tendencias Belleza', u'http://feeds.weblogssl.com/trendenciasbelleza')
,(u'Tendencias Hombre', u'http://feeds.weblogssl.com/trendenciashombre')
,(u'Peques y m\xe1s', u'http://feeds.weblogssl.com/pequesymas')
,(u'Motorpasi\xf3n', u'http://feeds.weblogssl.com/motorpasion')
,(u'Motorpasi\xf3n F1', u'http://feeds.weblogssl.com/motorpasionf1')
@ -69,7 +70,6 @@ class weblogssl(BasicNewsRecipe):
,(u'Motorpasi\xf3n Futuro', u'http://feeds.weblogssl.com/motorpasionfuturo')
,(u'Notas de futbol', u'http://feeds.weblogssl.com/notasdefutbol')
,(u'Fuera de l\xedmites', u'http://feeds.weblogssl.com/fueradelimites')
,(u'Salir a ganar', u'http://feeds.weblogssl.com/saliraganar')
,(u'El blog salm\xf3n', u'http://feeds.weblogssl.com/elblogsalmon2')
,(u'Pymes y aut\xf3nomos', u'http://feeds.weblogssl.com/pymesyautonomos')
,(u'Tecnolog\xeda Pyme', u'http://feeds.weblogssl.com/tecnologiapyme')
@ -105,3 +105,22 @@ class weblogssl(BasicNewsRecipe):
return soup
# Para obtener la url original del articulo a partir de la de "feedsportal"
# El siguiente código es gracias al usuario "bosplans" de www.mobileread.com
# http://www.mobileread.com/forums/showthread.php?t=130297
def get_article_url(self, article):
link = article.get('link', None)
if link is None:
return article
if link.split('/')[-1]=="story01.htm":
link=link.split('/')[-2]
a=['0B','0C','0D','0E','0F','0G','0N' ,'0L0S','0A']
b=['.' ,'/' ,'?' ,'-' ,'=' ,'&' ,'.com','www.','0']
for i in range(0,len(a)):
link=link.replace(a[i],b[i])
link="http://"+link
return link

View File

@ -16,6 +16,7 @@ class ZAOBAO(BasicNewsRecipe):
recursions = 1
language = 'zh'
encoding = 'gbk'
masthead_url = 'http://www.zaobao.com/ssi/images1/zblogo_original.gif'
# multithreaded_fetch = True
keep_only_tags = [

View File

@ -141,21 +141,33 @@ sort_columns_at_startup = None
#: Control how dates are displayed
# Format to be used for publication date and the timestamp (date).
# A string controlling how the publication date is displayed in the GUI
# d the day as number without a leading zero (1 to 31)
# d the day as number without a leading zero (1 to 31)
# dd the day as number with a leading zero (01 to 31)
# ddd the abbreviated localized day name (e.g. 'Mon' to 'Sun').
# dddd the long localized day name (e.g. 'Monday' to 'Qt::Sunday').
# M the month as number without a leading zero (1-12)
# ddd the abbreviated localized day name (e.g. 'Mon' to 'Sun').
# dddd the long localized day name (e.g. 'Monday' to 'Qt::Sunday').
# M the month as number without a leading zero (1-12)
# MM the month as number with a leading zero (01-12)
# MMM the abbreviated localized month name (e.g. 'Jan' to 'Dec').
# MMMM the long localized month name (e.g. 'January' to 'December').
# MMM the abbreviated localized month name (e.g. 'Jan' to 'Dec').
# MMMM the long localized month name (e.g. 'January' to 'December').
# yy the year as two digit number (00-99)
# yyyy the year as four digit number
# yyyy the year as four digit number
# h the hours without a leading 0 (0 to 11 or 0 to 23, depending on am/pm) '
# hh the hours with a leading 0 (00 to 11 or 00 to 23, depending on am/pm) '
# m the minutes without a leading 0 (0 to 59) '
# mm the minutes with a leading 0 (00 to 59) '
# s the seconds without a leading 0 (0 to 59) '
# ss the seconds with a leading 0 (00 to 59) '
# ap use a 12-hour clock instead of a 24-hour clock, with "ap"
# replaced by the localized string for am or pm '
# AP use a 12-hour clock instead of a 24-hour clock, with "AP"
# replaced by the localized string for AM or PM '
# iso the date with time and timezone. Must be the only format present
# For example, given the date of 9 Jan 2010, the following formats show
# MMM yyyy ==> Jan 2010 yyyy ==> 2010 dd MMM yyyy ==> 09 Jan 2010
# MM/yyyy ==> 01/2010 d/M/yy ==> 9/1/10 yy ==> 10
# publication default if not set: MMM yyyy
# timestamp default if not set: dd MMM yyyy
# last_modified_display_format if not set: dd MMM yyyy
gui_pubdate_display_format = 'MMM yyyy'
gui_timestamp_display_format = 'dd MMM yyyy'
gui_last_modified_display_format = 'dd MMM yyyy'

View File

@ -36,6 +36,14 @@
<div class="cbj_footer">{footer}</div>
</div>
<hr class="cbj_kindle_banner_hr" />
<!--
In addition you can add code to show the values of custom columns here.
The value is available as _column_name and the title as _column_name_label.
For example, if you have a custom column with label #genre, you can add it to
this template with:
<div>{_genre_label}: {_genre}</div>
-->
<div class="cbj_comments">{comments}</div>
</body>
</html>

View File

@ -12,14 +12,14 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
"PO-Revision-Date: 2011-11-04 23:01+0000\n"
"PO-Revision-Date: 2011-11-13 15:24+0000\n"
"Last-Translator: Ferran Rius <frius64@hotmail.com>\n"
"Language-Team: Catalan <linux@softcatala.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-06 05:23+0000\n"
"X-Generator: Launchpad (build 14231)\n"
"X-Launchpad-Export-Date: 2011-11-14 05:15+0000\n"
"X-Generator: Launchpad (build 14277)\n"
"Language: ca\n"
#. name for aaa
@ -8572,43 +8572,43 @@ msgstr "Guntai"
#. name for gnu
msgid "Gnau"
msgstr ""
msgstr "Gnau"
#. name for gnw
msgid "Guaraní; Western Bolivian"
msgstr ""
msgstr "Guaraní; bolivià occidental"
#. name for gnz
msgid "Ganzi"
msgstr ""
msgstr "Ganzi"
#. name for goa
msgid "Guro"
msgstr ""
msgstr "Guro"
#. name for gob
msgid "Playero"
msgstr ""
msgstr "Playero"
#. name for goc
msgid "Gorakor"
msgstr ""
msgstr "Gorakor"
#. name for god
msgid "Godié"
msgstr ""
msgstr "Godié"
#. name for goe
msgid "Gongduk"
msgstr ""
msgstr "Gongduk"
#. name for gof
msgid "Gofa"
msgstr ""
msgstr "Gofa"
#. name for gog
msgid "Gogo"
msgstr ""
msgstr "Gogo"
#. name for goh
msgid "German; Old High (ca. 750-1050)"
@ -8616,23 +8616,23 @@ msgstr "Alt alemany; antic (ca. 750-1050)"
#. name for goi
msgid "Gobasi"
msgstr ""
msgstr "Gobasi"
#. name for goj
msgid "Gowlan"
msgstr ""
msgstr "Gowlan"
#. name for gok
msgid "Gowli"
msgstr ""
msgstr "Gowli"
#. name for gol
msgid "Gola"
msgstr ""
msgstr "Gola"
#. name for gom
msgid "Konkani; Goan"
msgstr ""
msgstr "Konkani; goanès"
#. name for gon
msgid "Gondi"
@ -8640,71 +8640,71 @@ msgstr "Gondi"
#. name for goo
msgid "Gone Dau"
msgstr ""
msgstr "Gone Dau"
#. name for gop
msgid "Yeretuar"
msgstr ""
msgstr "Yeretuar"
#. name for goq
msgid "Gorap"
msgstr ""
msgstr "Gorap"
#. name for gor
msgid "Gorontalo"
msgstr ""
msgstr "Gorontalo"
#. name for gos
msgid "Gronings"
msgstr ""
msgstr "Gronings"
#. name for got
msgid "Gothic"
msgstr ""
msgstr "Gòtic"
#. name for gou
msgid "Gavar"
msgstr ""
msgstr "Gavar"
#. name for gow
msgid "Gorowa"
msgstr ""
msgstr "Gorowa"
#. name for gox
msgid "Gobu"
msgstr ""
msgstr "Gobu"
#. name for goy
msgid "Goundo"
msgstr ""
msgstr "Goundo"
#. name for goz
msgid "Gozarkhani"
msgstr ""
msgstr "Gozarkhani"
#. name for gpa
msgid "Gupa-Abawa"
msgstr ""
msgstr "Gupa-Abawa"
#. name for gpn
msgid "Taiap"
msgstr ""
msgstr "Taiap"
#. name for gqa
msgid "Ga'anda"
msgstr ""
msgstr "Gaanda"
#. name for gqi
msgid "Guiqiong"
msgstr ""
msgstr "Guiqiong"
#. name for gqn
msgid "Guana (Brazil)"
msgstr ""
msgstr "Guana (Brasil)"
#. name for gqr
msgid "Gor"
msgstr ""
msgstr "Gor"
#. name for gra
msgid "Garasia; Rajput"
@ -8720,19 +8720,19 @@ msgstr "Grec antic (fins el 1453)"
#. name for grd
msgid "Guruntum-Mbaaru"
msgstr ""
msgstr "Guruntum"
#. name for grg
msgid "Madi"
msgstr ""
msgstr "Madi"
#. name for grh
msgid "Gbiri-Niragu"
msgstr ""
msgstr "Gbiri-Niragu"
#. name for gri
msgid "Ghari"
msgstr ""
msgstr "Ghari"
#. name for grj
msgid "Grebo; Southern"
@ -8740,35 +8740,35 @@ msgstr "Grebo; meridional"
#. name for grm
msgid "Kota Marudu Talantang"
msgstr ""
msgstr "Kota Marudu; Talantang"
#. name for grn
msgid "Guarani"
msgstr "guaraní"
msgstr "Guaraní"
#. name for gro
msgid "Groma"
msgstr ""
msgstr "Groma"
#. name for grq
msgid "Gorovu"
msgstr ""
msgstr "Gorovu"
#. name for grr
msgid "Taznatit"
msgstr ""
msgstr "Taznatit"
#. name for grs
msgid "Gresi"
msgstr ""
msgstr "Gresi"
#. name for grt
msgid "Garo"
msgstr ""
msgstr "Garo"
#. name for gru
msgid "Kistane"
msgstr ""
msgstr "Gurage; septentrional"
#. name for grv
msgid "Grebo; Central"
@ -8776,11 +8776,11 @@ msgstr "Grebo; central"
#. name for grw
msgid "Gweda"
msgstr ""
msgstr "Gweda"
#. name for grx
msgid "Guriaso"
msgstr ""
msgstr "Guriaso"
#. name for gry
msgid "Grebo; Barclayville"
@ -8788,7 +8788,7 @@ msgstr "Grebo; Barclayville"
#. name for grz
msgid "Guramalum"
msgstr ""
msgstr "Guramalum"
#. name for gse
msgid "Ghanaian Sign Language"
@ -8800,7 +8800,7 @@ msgstr "Llenguatge de signes alemany"
#. name for gsl
msgid "Gusilay"
msgstr ""
msgstr "Gusilay"
#. name for gsm
msgid "Guatemalan Sign Language"
@ -8808,7 +8808,7 @@ msgstr "Llenguatge de signes guatemaltec"
#. name for gsn
msgid "Gusan"
msgstr ""
msgstr "Gusan"
#. name for gso
msgid "Gbaya; Southwest"
@ -8816,7 +8816,7 @@ msgstr "Gbaya; Sudoccidental"
#. name for gsp
msgid "Wasembo"
msgstr ""
msgstr "Wasembo"
#. name for gss
msgid "Greek Sign Language"
@ -8828,23 +8828,23 @@ msgstr "Alemany; suís"
#. name for gta
msgid "Guató"
msgstr ""
msgstr "Guató"
#. name for gti
msgid "Gbati-ri"
msgstr ""
msgstr "Gbati-ri"
#. name for gua
msgid "Shiki"
msgstr ""
msgstr "Shiki"
#. name for gub
msgid "Guajajára"
msgstr ""
msgstr "Guajajara"
#. name for guc
msgid "Wayuu"
msgstr ""
msgstr "Guajiro"
#. name for gud
msgid "Dida; Yocoboué"
@ -8852,23 +8852,23 @@ msgstr "Dida; Yocoboué"
#. name for gue
msgid "Gurinji"
msgstr ""
msgstr "Gurindji"
#. name for guf
msgid "Gupapuyngu"
msgstr ""
msgstr "Gupapuyngu"
#. name for gug
msgid "Guaraní; Paraguayan"
msgstr ""
msgstr "Guaraní; paraguaià"
#. name for guh
msgid "Guahibo"
msgstr ""
msgstr "Guahibo"
#. name for gui
msgid "Guaraní; Eastern Bolivian"
msgstr ""
msgstr "Guaraní; bolivià oriental"
#. name for guj
msgid "Gujarati"
@ -8876,7 +8876,7 @@ msgstr "gujarati"
#. name for guk
msgid "Gumuz"
msgstr ""
msgstr "Gumús"
#. name for gul
msgid "Creole English; Sea Island"
@ -8884,27 +8884,27 @@ msgstr "Anglès crioll; Sea Island"
#. name for gum
msgid "Guambiano"
msgstr ""
msgstr "Guambià"
#. name for gun
msgid "Guaraní; Mbyá"
msgstr ""
msgstr "Guaraní; Mbyà"
#. name for guo
msgid "Guayabero"
msgstr ""
msgstr "Guayabero"
#. name for gup
msgid "Gunwinggu"
msgstr ""
msgstr "Gunwinggu"
#. name for guq
msgid "Aché"
msgstr ""
msgstr "Aché"
#. name for gur
msgid "Farefare"
msgstr ""
msgstr "Gurenne"
#. name for gus
msgid "Guinean Sign Language"
@ -8912,67 +8912,67 @@ msgstr "Llenguatge de signes guineà"
#. name for gut
msgid "Maléku Jaíka"
msgstr ""
msgstr "Guatuso"
#. name for guu
msgid "Yanomamö"
msgstr ""
msgstr "Guaharibo"
#. name for guv
msgid "Gey"
msgstr ""
msgstr "Gey"
#. name for guw
msgid "Gun"
msgstr ""
msgstr "Gun-Gbe"
#. name for gux
msgid "Gourmanchéma"
msgstr ""
msgstr "Gourmanchéma"
#. name for guz
msgid "Gusii"
msgstr ""
msgstr "Gusí"
#. name for gva
msgid "Guana (Paraguay)"
msgstr ""
msgstr "Guana (Paraguai)"
#. name for gvc
msgid "Guanano"
msgstr ""
msgstr "Guanano"
#. name for gve
msgid "Duwet"
msgstr ""
msgstr "Duwet"
#. name for gvf
msgid "Golin"
msgstr ""
msgstr "Golin"
#. name for gvj
msgid "Guajá"
msgstr ""
msgstr "Guajà"
#. name for gvl
msgid "Gulay"
msgstr ""
msgstr "Gulay"
#. name for gvm
msgid "Gurmana"
msgstr ""
msgstr "Gurmana"
#. name for gvn
msgid "Kuku-Yalanji"
msgstr ""
msgstr "Kuku; Yalanji"
#. name for gvo
msgid "Gavião Do Jiparaná"
msgstr ""
msgstr "Gaviao Jiparanà"
#. name for gvp
msgid "Gavião; Pará"
msgstr ""
msgstr "Gaviao Parà"
#. name for gvr
msgid "Gurung; Western"
@ -8980,75 +8980,75 @@ msgstr "Gurung; occidental"
#. name for gvs
msgid "Gumawana"
msgstr ""
msgstr "Gumawana"
#. name for gvy
msgid "Guyani"
msgstr ""
msgstr "Guyani"
#. name for gwa
msgid "Mbato"
msgstr ""
msgstr "Mbato"
#. name for gwb
msgid "Gwa"
msgstr ""
msgstr "Gwa"
#. name for gwc
msgid "Kalami"
msgstr ""
msgstr "Kalami"
#. name for gwd
msgid "Gawwada"
msgstr ""
msgstr "Gawwada"
#. name for gwe
msgid "Gweno"
msgstr ""
msgstr "Gweno"
#. name for gwf
msgid "Gowro"
msgstr ""
msgstr "Gowro"
#. name for gwg
msgid "Moo"
msgstr ""
msgstr "Moo"
#. name for gwi
msgid "Gwichʼin"
msgstr ""
msgstr "Gwichin"
#. name for gwj
msgid "/Gwi"
msgstr ""
msgstr "Gwi"
#. name for gwn
msgid "Gwandara"
msgstr ""
msgstr "Gwandara"
#. name for gwr
msgid "Gwere"
msgstr ""
msgstr "Gwere"
#. name for gwt
msgid "Gawar-Bati"
msgstr ""
msgstr "Gawar-Bati"
#. name for gwu
msgid "Guwamu"
msgstr ""
msgstr "Guwamu"
#. name for gww
msgid "Kwini"
msgstr ""
msgstr "Goonan"
#. name for gwx
msgid "Gua"
msgstr ""
msgstr "Gua"
#. name for gxx
msgid "Wè Southern"
msgstr ""
msgstr "We; meridional"
#. name for gya
msgid "Gbaya; Northwest"
@ -9056,35 +9056,35 @@ msgstr "Gbaya; Nordoccidental"
#. name for gyb
msgid "Garus"
msgstr ""
msgstr "Garus"
#. name for gyd
msgid "Kayardild"
msgstr ""
msgstr "Gayardilt"
#. name for gye
msgid "Gyem"
msgstr ""
msgstr "Gyem"
#. name for gyf
msgid "Gungabula"
msgstr ""
msgstr "Gungabula"
#. name for gyg
msgid "Gbayi"
msgstr ""
msgstr "Gbayi"
#. name for gyi
msgid "Gyele"
msgstr ""
msgstr "Gyele"
#. name for gyl
msgid "Gayil"
msgstr ""
msgstr "Galila"
#. name for gym
msgid "Ngäbere"
msgstr ""
msgstr "Ngabere"
#. name for gyn
msgid "Creole English; Guyanese"
@ -9092,27 +9092,27 @@ msgstr "Creole English; Guyana"
#. name for gyr
msgid "Guarayu"
msgstr ""
msgstr "Guaraiú"
#. name for gyy
msgid "Gunya"
msgstr ""
msgstr "Gunya"
#. name for gza
msgid "Ganza"
msgstr ""
msgstr "Ganza"
#. name for gzi
msgid "Gazi"
msgstr ""
msgstr "Gazi"
#. name for gzn
msgid "Gane"
msgstr ""
msgstr "Gane"
#. name for haa
msgid "Han"
msgstr ""
msgstr "Han"
#. name for hab
msgid "Hanoi Sign Language"
@ -9120,11 +9120,11 @@ msgstr "Llenguatge de signes de Hanoi"
#. name for hac
msgid "Gurani"
msgstr ""
msgstr "Hawrami"
#. name for had
msgid "Hatam"
msgstr ""
msgstr "Hatam"
#. name for hae
msgid "Oromo; Eastern"
@ -9136,19 +9136,19 @@ msgstr "Llenguatge de signes Haipong"
#. name for hag
msgid "Hanga"
msgstr ""
msgstr "Hanga"
#. name for hah
msgid "Hahon"
msgstr ""
msgstr "Hahon"
#. name for hai
msgid "Haida"
msgstr ""
msgstr "Haida"
#. name for haj
msgid "Hajong"
msgstr ""
msgstr "Hajong"
#. name for hak
msgid "Chinese; Hakka"
@ -9156,11 +9156,11 @@ msgstr "Xinès; Hakka"
#. name for hal
msgid "Halang"
msgstr ""
msgstr "Halang"
#. name for ham
msgid "Hewa"
msgstr ""
msgstr "Hewa"
#. name for han
msgid "Hangaza"
@ -18216,7 +18216,7 @@ msgstr ""
#. name for nhd
msgid "Guaraní; Ava"
msgstr ""
msgstr "Guaraní; Ava"
#. name for nhe
msgid "Nahuatl; Eastern Huasteca"
@ -22916,7 +22916,7 @@ msgstr ""
#. name for sgw
msgid "Sebat Bet Gurage"
msgstr ""
msgstr "Gurage; occidental"
#. name for sgx
msgid "Sierra Leone Sign Language"
@ -26588,7 +26588,7 @@ msgstr ""
#. name for ugb
msgid "Kuku-Ugbanh"
msgstr ""
msgstr "Kuku; Ugbanh"
#. name for uge
msgid "Ughele"
@ -26984,7 +26984,7 @@ msgstr ""
#. name for uwa
msgid "Kuku-Uwanh"
msgstr ""
msgstr "Kuku; Uwanh"
#. name for uya
msgid "Doko-Uyanga"
@ -27564,7 +27564,7 @@ msgstr ""
#. name for wec
msgid "Wè Western"
msgstr ""
msgstr "We; occidental"
#. name for wed
msgid "Wedau"
@ -27932,7 +27932,7 @@ msgstr ""
#. name for wob
msgid "Wè Northern"
msgstr ""
msgstr "We; septentrional"
#. name for woc
msgid "Wogeo"
@ -28716,7 +28716,7 @@ msgstr ""
#. name for xmh
msgid "Kuku-Muminh"
msgstr ""
msgstr "Kuku: Muminh"
#. name for xmj
msgid "Majera"
@ -28744,11 +28744,11 @@ msgstr ""
#. name for xmp
msgid "Kuku-Mu'inh"
msgstr ""
msgstr "Kuku; Mu'inh"
#. name for xmq
msgid "Kuku-Mangk"
msgstr ""
msgstr "Kuku; Mangk"
#. name for xmr
msgid "Meroitic"

View File

@ -9,13 +9,13 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
"PO-Revision-Date: 2011-11-10 07:13+0000\n"
"PO-Revision-Date: 2011-11-12 07:52+0000\n"
"Last-Translator: Devilinside <Unknown>\n"
"Language-Team: Hungarian <debian-l10n-hungarian@lists.d.o>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-11 04:52+0000\n"
"X-Launchpad-Export-Date: 2011-11-13 05:48+0000\n"
"X-Generator: Launchpad (build 14277)\n"
"X-Poedit-Country: HUNGARY\n"
"Language: hu\n"
@ -4969,7 +4969,7 @@ msgstr ""
#. name for cha
msgid "Chamorro"
msgstr "chamorro"
msgstr "csamorro"
#. name for chb
msgid "Chibcha"
@ -19625,7 +19625,7 @@ msgstr ""
#. name for oco
msgid "Cornish; Old"
msgstr ""
msgstr "cornwalli; ócornwalli"
#. name for ocu
msgid "Matlatzinca; Atzingo"

View File

@ -16,7 +16,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-11 04:53+0000\n"
"X-Launchpad-Export-Date: 2011-11-12 04:48+0000\n"
"X-Generator: Launchpad (build 14277)\n"
"Language: tr\n"

View File

@ -6,7 +6,7 @@ __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, re, cStringIO, base64, httplib, subprocess, hashlib, shutil, time, \
glob, stat
glob, stat, sys
from subprocess import check_call
from tempfile import NamedTemporaryFile, mkdtemp
from zipfile import ZipFile
@ -58,6 +58,47 @@ class ReUpload(Command): # {{{
os.remove(x)
# }}}
class ReadFileWithProgressReporting(file): # {{{
def __init__(self, path, mode='rb'):
file.__init__(self, path, mode)
self.seek(0, os.SEEK_END)
self._total = self.tell()
self.seek(0)
self.start_time = time.time()
def __len__(self):
return self._total
def read(self, size):
data = file.read(self, size)
if data:
self.report_progress(len(data))
return data
def report_progress(self, size):
sys.stdout.write(b'\x1b[s')
sys.stdout.write(b'\x1b[K')
frac = float(self.tell())/self._total
mb_pos = self.tell()/float(1024**2)
mb_tot = self._total/float(1024**2)
kb_pos = self.tell()/1024.0
kb_rate = kb_pos/(time.time()-self.start_time)
bit_rate = kb_rate * 1024
eta = int((self._total - self.tell())/bit_rate) + 1
eta_m, eta_s = eta / 60, eta % 60
sys.stdout.write(
' %.1f%% %.1f/%.1fMB %.1f KB/sec %d minutes, %d seconds left'%(
frac*100, mb_pos, mb_tot, kb_rate, eta_m, eta_s))
sys.stdout.write(b'\x1b[u')
if self.tell() >= self._total:
sys.stdout.write('\n')
t = int(time.time() - self.start_time) + 1
print ('Upload took %d minutes and %d seconds at %.1f KB/sec' % (
t/60, t%60, kb_rate))
sys.stdout.flush()
# }}}
class UploadToGoogleCode(Command): # {{{
USERNAME = 'kovidgoyal'
@ -92,7 +133,7 @@ class UploadToGoogleCode(Command): # {{{
self.upload_one(src)
def upload_one(self, fname):
self.info('Uploading', fname)
self.info('\nUploading', fname)
typ = 'Type-' + ('Source' if fname.endswith('.gz') else 'Archive' if
fname.endswith('.zip') else 'Installer')
ext = os.path.splitext(fname)[1][1:]
@ -102,7 +143,7 @@ class UploadToGoogleCode(Command): # {{{
start = time.time()
path = self.upload(os.path.abspath(fname), desc,
labels=[typ, op, 'Featured'])
self.info('\tUploaded to:', path, 'in', int(time.time() - start),
self.info('Uploaded to:', path, 'in', int(time.time() - start),
'seconds')
return path
@ -198,9 +239,8 @@ class UploadToGoogleCode(Command): # {{{
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
with open(file_path, 'rb') as f:
file_content = f.read()
body.extend(
['--' + BOUNDARY,
@ -230,10 +270,17 @@ class UploadToGoogleCode(Command): # {{{
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(self.UPLOAD_HOST)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
with NamedTemporaryFile(delete=False) as f:
f.write(body)
try:
body = ReadFileWithProgressReporting(f.name)
server = httplib.HTTPSConnection(self.UPLOAD_HOST)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
finally:
os.remove(f.name)
if resp.status == 201:
return resp.getheader('Location')
@ -265,7 +312,7 @@ class UploadToSourceForge(Command): # {{{
if not os.path.exists(x): continue
start = time.time()
self.info('Uploading', x)
check_call(['rsync', '-v', '-e', 'ssh -x', x,
check_call(['rsync', '-z', '--progress', '-e', 'ssh -x', x,
'%s,%s@frs.sourceforge.net:%s'%(self.USERNAME, self.PROJECT,
self.rdir+'/')])
print 'Uploaded in', int(time.time() - start), 'seconds'
@ -376,7 +423,8 @@ class UploadUserManual(Command): # {{{
for x in glob.glob(self.j(path, '*')):
self.build_plugin_example(x)
check_call(' '.join(['scp', '-r', 'src/calibre/manual/.build/html/*',
check_call(' '.join(['rsync', '-z', '-r', '--progress',
'src/calibre/manual/.build/html/',
'bugs:%s'%USER_MANUAL]), shell=True)
# }}}

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 26)
numeric_version = (0, 8, 27)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -554,7 +554,8 @@ from calibre.devices.eb600.driver import (EB600, COOL_ER, SHINEBOOK,
from calibre.devices.iliad.driver import ILIAD
from calibre.devices.irexdr.driver import IREXDR1000, IREXDR800
from calibre.devices.jetbook.driver import JETBOOK, MIBUK, JETBOOK_MINI
from calibre.devices.kindle.driver import KINDLE, KINDLE2, KINDLE_DX
from calibre.devices.kindle.driver import (KINDLE, KINDLE2, KINDLE_DX,
KINDLE_FIRE)
from calibre.devices.nook.driver import NOOK, NOOK_COLOR
from calibre.devices.prs505.driver import PRS505
from calibre.devices.prst1.driver import PRST1
@ -656,9 +657,7 @@ plugins += [
MIBUK,
SHINEBOOK,
POCKETBOOK360, POCKETBOOK301, POCKETBOOK602, POCKETBOOK701, POCKETBOOK360P,
KINDLE,
KINDLE2,
KINDLE_DX,
KINDLE, KINDLE2, KINDLE_DX, KINDLE_FIRE,
NOOK, NOOK_COLOR,
PRS505, PRST1,
ANDROID, S60, WEBOS,

View File

@ -652,6 +652,15 @@ class KindleDXOutput(OutputProfile):
return u'%s <br/><span style="color: white">%s</span>' % (', '.join(tags),
'ttt '.join(tags)+'ttt ')
class KindleFireOutput(KindleDXOutput):
name = 'Kindle Fire'
short_name = 'kindle_fire'
description = _('This profile is intended for the Amazon Kindle Fire.')
screen_size = (570, 1016)
dpi = 169.0
comic_screen_size = (570, 1016)
class IlliadOutput(OutputProfile):

View File

@ -106,7 +106,7 @@ class ANDROID(USBMS):
0x61c5 : [0x100, 0x226, 0x9999],
0x61cc : [0x100],
0x61ce : [0x100],
0x618e : [0x226, 0x9999, 0x100]
0x618e : [0x226, 0x227, 0x9999, 0x100]
},
# Archos

View File

@ -377,3 +377,24 @@ class KINDLE_DX(KINDLE2):
PRODUCT_ID = [0x0003]
BCD = [0x0100]
class KINDLE_FIRE(KINDLE2):
name = 'Kindle Fire Device Interface'
description = _('Communicate with the Kindle Fire')
gui_name = 'Fire'
PRODUCT_ID = [0x0006]
BCD = [0x216, 0x100]
EBOOK_DIR_MAIN = 'Documents'
SUPPORTS_SUB_DIRS = False
VENDOR_NAME = 'AMAZON'
WINDOWS_MAIN_MEM = 'KINDLE'
def get_main_ebook_dir(self, for_upload=False):
if for_upload:
return self.EBOOK_DIR_MAIN
return ''

View File

@ -83,10 +83,10 @@ class NOOK(USBMS):
class NOOK_COLOR(NOOK):
description = _('Communicate with the Nook Color and TSR eBook readers.')
PRODUCT_ID = [0x002, 0x003]
PRODUCT_ID = [0x002, 0x003, 0x004]
BCD = [0x216]
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EBOOK_DISK'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOK_DISK', 'NOOK_TABLET']
EBOOK_DIR_MAIN = 'My Files'
NEWS_IN_FOLDER = False
@ -105,4 +105,3 @@ class NOOK_COLOR(NOOK):
return USBMS.create_upload_path(self, path, mdata, fname,
create_dirs=create_dirs)

View File

@ -13,9 +13,11 @@ Device driver for the SONY T1 devices
import os, time, re
import sqlite3 as sqlite
from sqlite3 import DatabaseError
from contextlib import closing
from datetime import date
from calibre.devices.errors import DeviceError
from calibre.devices.usbms.driver import USBMS, debug_print
from calibre.devices.usbms.device import USBDevice
from calibre.devices.usbms.books import CollectionsBookList
@ -275,11 +277,19 @@ class PRST1(USBMS):
refresh_covers = opts.extra_customization[self.OPT_REFRESH_COVERS]
use_sony_authors = opts.extra_customization[self.OPT_USE_SONY_AUTHORS]
cursor = connection.cursor()
try:
cursor = connection.cursor()
# Get existing books
query = 'SELECT file_path, _id FROM books'
cursor.execute(query)
# Get existing books
query = 'SELECT file_path, _id FROM books'
cursor.execute(query)
except DatabaseError:
raise DeviceError('The SONY database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file may cause your reader to forget '
' any notes/highlights, etc.')
db_books = {}
for i, row in enumerate(cursor):

View File

@ -134,7 +134,7 @@ def add_pipeline_options(parser, plumber):
'font_size_mapping',
'line_height', 'minimum_line_height',
'linearize_tables',
'extra_css',
'extra_css', 'filter_css',
'smarten_punctuation', 'unsmarten_punctuation',
'margin_top', 'margin_left', 'margin_right',
'margin_bottom', 'change_justification',

View File

@ -308,6 +308,16 @@ OptionRecommendation(name='extra_css',
'rules.')
),
OptionRecommendation(name='filter_css',
recommended_value=None, level=OptionRecommendation.LOW,
help=_('A comma separated list of CSS properties that '
'will be removed from all CSS style rules. This is useful '
'if the presence of some style information prevents it '
'from being overridden on your device. '
'For example: '
'font-family,color,margin-left,margin-right')
),
OptionRecommendation(name='page_breaks_before',
recommended_value="//*[name()='h1' or name()='h2']",
level=OptionRecommendation.LOW,

View File

@ -13,7 +13,7 @@ Input plugin for HTML or OPF ebooks.
import os, re, sys, uuid, tempfile, errno as gerrno
from urlparse import urlparse, urlunparse
from urllib import unquote
from urllib import unquote, quote
from functools import partial
from itertools import izip
@ -468,7 +468,10 @@ class HTMLInput(InputFormatPlugin):
self.oeb.log, ignore_opf=True)
# Load into memory
item = self.oeb.manifest.add(id, href, media_type)
item.html_input_href = bhref
# bhref refers to an already existing file. The read() method of
# DirContainer will call unquote on it before trying to read the
# file, therefore we quote it here.
item.html_input_href = quote(bhref)
if guessed in self.OEB_STYLES:
item.override_css_fetch = partial(
self.css_import_handler, os.path.dirname(link))

View File

@ -12,7 +12,7 @@ from urllib import urlencode
from threading import Thread
from Queue import Queue, Empty
from lxml.html import soupparser, tostring
from lxml.html import tostring
from calibre import as_unicode
from calibre.ebooks.metadata import check_isbn
@ -23,6 +23,7 @@ from calibre.ebooks.metadata.book.base import Metadata
from calibre.library.comments import sanitize_comments_html
from calibre.utils.date import parse_date
from calibre.utils.localization import canonicalize_lang
from calibre.utils.soupparser import fromstring
class Worker(Thread): # Get details {{{
@ -199,7 +200,7 @@ class Worker(Thread): # Get details {{{
return
try:
root = soupparser.fromstring(clean_ascii_chars(raw))
root = fromstring(clean_ascii_chars(raw))
except:
msg = 'Failed to parse amazon details page: %r'%self.url
self.log.exception(msg)
@ -623,7 +624,7 @@ class Amazon(Source):
if found:
try:
root = soupparser.fromstring(clean_ascii_chars(raw))
root = fromstring(clean_ascii_chars(raw))
except:
msg = 'Failed to parse amazon page for query: %r'%query
log.exception(msg)

View File

@ -14,13 +14,13 @@ from threading import RLock
from Queue import Queue, Empty
from lxml import html
from lxml.html import soupparser
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.sources.base import Source, Option
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.chardet import xml_to_unicode
from calibre.library.comments import sanitize_comments_html
from calibre.utils.soupparser import fromstring
ovrdrv_data_cache = {}
cache_lock = RLock()
@ -403,7 +403,7 @@ class OverDrive(Source):
raw = xml_to_unicode(raw, strip_encoding_pats=True,
resolve_entities=True)[0]
try:
root = soupparser.fromstring(raw)
root = fromstring(raw)
except:
return False

View File

@ -353,14 +353,14 @@ class MobiReader(object):
self.processed_html = self.remove_random_bytes(self.processed_html)
root = html.fromstring(self.processed_html)
if root.xpath('descendant::p/descendant::p'):
from lxml.html import soupparser
from calibre.utils.soupparser import fromstring
self.log.warning('Malformed markup, parsing using BeautifulSoup')
try:
root = soupparser.fromstring(self.processed_html)
root = fromstring(self.processed_html)
except Exception:
self.log.warning('MOBI markup appears to contain random bytes. Stripping.')
self.processed_html = self.remove_random_bytes(self.processed_html)
root = soupparser.fromstring(self.processed_html)
root = fromstring(self.processed_html)
if root.tag != 'html':
self.log.warn('File does not have opening <html> tag')
@ -929,7 +929,7 @@ class MobiReader(object):
for match in link_pattern.finditer(self.mobi_html):
positions.add(int(match.group(1)))
pos = 0
self.processed_html = ''
processed_html = cStringIO.StringIO()
end_tag_re = re.compile(r'<\s*/')
for end in sorted(positions):
if end == 0:
@ -947,12 +947,14 @@ class MobiReader(object):
end = r
else:
end = r + 1
self.processed_html += self.mobi_html[pos:end] + (anchor % oend)
processed_html.write(self.mobi_html[pos:end] + (anchor % oend))
pos = end
self.processed_html += self.mobi_html[pos:]
processed_html.write(self.mobi_html[pos:])
processed_html = processed_html.getvalue()
# Remove anchors placed inside entities
self.processed_html = re.sub(r'&([^;]*?)(<a id="filepos\d+"></a>)([^;]*);',
r'&\1\3;\2', self.processed_html)
r'&\1\3;\2', processed_html)
def extract_images(self, processed_records, output_dir):

View File

@ -894,8 +894,8 @@ class Manifest(object):
except etree.XMLSyntaxError as err:
self.oeb.logger.warn('Parsing file %r as HTML' % self.href)
if err.args and err.args[0].startswith('Excessive depth'):
from lxml.html import soupparser
data = soupparser.fromstring(data)
from calibre.utils.soupparser import fromstring
data = fromstring(data)
else:
data = html.fromstring(data)
data.attrib.pop('xmlns', None)

View File

@ -118,8 +118,20 @@ class CSSFlattener(object):
def __call__(self, oeb, context):
oeb.logger.info('Flattening CSS and remapping font sizes...')
self.context = self.opts =context
self.oeb = oeb
self.context = context
self.filter_css = frozenset()
if self.opts.filter_css:
try:
self.filter_css = frozenset([x.strip().lower() for x in
self.opts.filter_css.split(',')])
except:
self.oeb.log.warning('Failed to parse filter_css, ignoring')
else:
self.oeb.log.debug('Filtering CSS properties: %s'%
', '.join(self.filter_css))
self.stylize_spine()
self.sbase = self.baseline_spine() if self.fbase else None
self.fmap = FontMapper(self.sbase, self.fbase, self.fkey)
@ -279,6 +291,10 @@ class CSSFlattener(object):
except:
self.oeb.logger.exception('Failed to set minimum line-height')
if cssdict:
for x in self.filter_css:
cssdict.pop(x, None)
if cssdict:
if self.lineh and self.fbase and tag != 'body':
self.clean_edges(cssdict, style, psize)
@ -311,7 +327,6 @@ class CSSFlattener(object):
lineh = self.lineh / psize
cssdict['line-height'] = "%0.5fem" % lineh
if (self.context.remove_paragraph_spacing or
self.context.insert_blank_line) and tag in ('p', 'div'):
if item_id != 'calibre_jacket' or self.context.output_profile.name == 'Kindle':

View File

@ -171,6 +171,14 @@ def render_jacket(mi, output_profile,
comments=comments,
footer=''
)
for key in mi.custom_field_keys():
try:
display_name, val = mi.format_field_extended(key)[:2]
key = key.replace('#', '_')
args[key] = val
args[key+'_label'] = display_name
except:
pass
generated_html = P('jacket/template.xhtml',
data=True).decode('utf-8').format(**args)

View File

@ -175,6 +175,8 @@ def _config(): # {{{
help='Search history for the plugin preferences')
c.add_opt('shortcuts_search_history', default=[],
help='Search history for the keyboard preferences')
c.add_opt('jobs_search_history', default=[],
help='Search history for the keyboard preferences')
c.add_opt('tweaks_search_history', default=[],
help='Search history for tweaks')
c.add_opt('worker_limit', default=6,

View File

@ -8,7 +8,6 @@ __docformat__ = 'restructuredtext en'
import re, os
from lxml import html
from lxml.html import soupparser
from PyQt4.Qt import QApplication, QFontInfo, QSize, QWidget, QPlainTextEdit, \
QToolBar, QVBoxLayout, QAction, QIcon, Qt, QTabWidget, QUrl, \
@ -19,6 +18,7 @@ from PyQt4.QtWebKit import QWebView, QWebPage
from calibre.ebooks.chardet import xml_to_unicode
from calibre import xml_replace_entities
from calibre.gui2 import open_url
from calibre.utils.soupparser import fromstring
class PageAction(QAction): # {{{
@ -227,7 +227,7 @@ class EditorWidget(QWebView): # {{{
try:
root = html.fromstring(raw)
except:
root = soupparser.fromstring(raw)
root = fromstring(raw)
elems = []
for body in root.xpath('//body'):

View File

@ -18,6 +18,16 @@ class LookAndFeelWidget(Widget, Ui_Form):
HELP = _('Control the look and feel of the output')
COMMIT_NAME = 'look_and_feel'
FILTER_CSS = {
'fonts': {'font-family'},
'margins': {'margin', 'margin-left', 'margin-right', 'margin-top',
'margin-bottom'},
'padding': {'padding', 'padding-left', 'padding-right', 'padding-top',
'padding-bottom'},
'floats': {'float'},
'colors': {'color', 'background', 'background-color'},
}
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
['change_justification', 'extra_css', 'base_font_size',
@ -27,7 +37,7 @@ class LookAndFeelWidget(Widget, Ui_Form):
'remove_paragraph_spacing',
'remove_paragraph_spacing_indent_size',
'insert_blank_line_size',
'input_encoding',
'input_encoding', 'filter_css',
'asciiize', 'keep_ligatures',
'linearize_tables']
)
@ -56,6 +66,15 @@ class LookAndFeelWidget(Widget, Ui_Form):
if g is self.opt_change_justification:
ans = unicode(g.itemData(g.currentIndex()).toString())
return ans
if g is self.opt_filter_css:
ans = set()
for key, item in self.FILTER_CSS.iteritems():
w = getattr(self, 'filter_css_%s'%key)
if w.isChecked():
ans = ans.union(item)
ans = ans.union(set([x.strip().lower() for x in
unicode(self.filter_css_others.text()).split(',')]))
return ','.join(ans) if ans else None
return Widget.get_value_handler(self, g)
def set_value_handler(self, g, val):
@ -66,6 +85,27 @@ class LookAndFeelWidget(Widget, Ui_Form):
g.setCurrentIndex(i)
break
return True
if g is self.opt_filter_css:
if not val: val = ''
items = frozenset([x.strip().lower() for x in val.split(',')])
for key, vals in self.FILTER_CSS.iteritems():
w = getattr(self, 'filter_css_%s'%key)
if not vals - items:
items = items - vals
w.setChecked(True)
else:
w.setChecked(False)
self.filter_css_others.setText(', '.join(items))
return True
def connect_gui_obj_handler(self, gui_obj, slot):
if gui_obj is self.opt_filter_css:
for key in self.FILTER_CSS:
w = getattr(self, 'filter_css_%s'%key)
w.stateChanged.connect(slot)
self.filter_css_others.textChanged.connect(slot)
return
raise NotImplementedError()
def font_key_wizard(self):
from calibre.gui2.convert.font_key import FontKeyChooser

View File

@ -6,7 +6,7 @@
<rect>
<x>0</x>
<y>0</y>
<width>642</width>
<width>655</width>
<height>522</height>
</rect>
</property>
@ -164,6 +164,41 @@
</property>
</widget>
</item>
<item row="6" column="3">
<widget class="QLabel" name="label_4">
<property name="text">
<string>&amp;Indent size:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>opt_remove_paragraph_spacing_indent_size</cstring>
</property>
</widget>
</item>
<item row="6" column="4">
<widget class="QDoubleSpinBox" name="opt_remove_paragraph_spacing_indent_size">
<property name="toolTip">
<string>&lt;p&gt;When calibre removes inter paragraph spacing, it automatically sets a paragraph indent, to ensure that paragraphs can be easily distinguished. This option controls the width of that indent.</string>
</property>
<property name="specialValueText">
<string>No change</string>
</property>
<property name="suffix">
<string> em</string>
</property>
<property name="decimals">
<number>1</number>
</property>
<property name="minimum">
<double>-0.100000000000000</double>
</property>
<property name="singleStep">
<double>0.100000000000000</double>
</property>
</widget>
</item>
<item row="7" column="0" colspan="2">
<widget class="QCheckBox" name="opt_insert_blank_line">
<property name="text">
@ -171,6 +206,19 @@
</property>
</widget>
</item>
<item row="7" column="3">
<widget class="QLabel" name="label_7">
<property name="text">
<string>&amp;Line size:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>opt_insert_blank_line_size</cstring>
</property>
</widget>
</item>
<item row="7" column="4">
<widget class="QDoubleSpinBox" name="opt_insert_blank_line_size">
<property name="suffix">
@ -194,80 +242,6 @@
<item row="8" column="2" colspan="3">
<widget class="QComboBox" name="opt_change_justification"/>
</item>
<item row="9" column="1" colspan="4">
<widget class="QCheckBox" name="opt_asciiize">
<property name="text">
<string>&amp;Transliterate unicode characters to ASCII</string>
</property>
</widget>
</item>
<item row="10" column="1" colspan="2">
<widget class="QCheckBox" name="opt_keep_ligatures">
<property name="text">
<string>Keep &amp;ligatures</string>
</property>
</widget>
</item>
<item row="13" column="0" colspan="5">
<widget class="QGroupBox" name="groupBox">
<property name="title">
<string>Extra &amp;CSS</string>
</property>
<layout class="QGridLayout" name="gridLayout_3">
<item row="0" column="0">
<widget class="QTextEdit" name="opt_extra_css"/>
</item>
</layout>
</widget>
</item>
<item row="6" column="4">
<widget class="QDoubleSpinBox" name="opt_remove_paragraph_spacing_indent_size">
<property name="toolTip">
<string>&lt;p&gt;When calibre removes inter paragraph spacing, it automatically sets a paragraph indent, to ensure that paragraphs can be easily distinguished. This option controls the width of that indent.</string>
</property>
<property name="specialValueText">
<string>No change</string>
</property>
<property name="suffix">
<string> em</string>
</property>
<property name="decimals">
<number>1</number>
</property>
<property name="minimum">
<double>-0.100000000000000</double>
</property>
<property name="singleStep">
<double>0.100000000000000</double>
</property>
</widget>
</item>
<item row="6" column="3">
<widget class="QLabel" name="label_4">
<property name="text">
<string>&amp;Indent size:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>opt_remove_paragraph_spacing_indent_size</cstring>
</property>
</widget>
</item>
<item row="7" column="3">
<widget class="QLabel" name="label_7">
<property name="text">
<string>&amp;Line size:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>opt_insert_blank_line_size</cstring>
</property>
</widget>
</item>
<item row="9" column="0">
<widget class="QCheckBox" name="opt_smarten_punctuation">
<property name="text">
@ -275,6 +249,13 @@
</property>
</widget>
</item>
<item row="9" column="1" colspan="4">
<widget class="QCheckBox" name="opt_asciiize">
<property name="text">
<string>&amp;Transliterate unicode characters to ASCII</string>
</property>
</widget>
</item>
<item row="10" column="0">
<widget class="QCheckBox" name="opt_unsmarten_punctuation">
<property name="text">
@ -282,6 +263,13 @@
</property>
</widget>
</item>
<item row="10" column="1" colspan="2">
<widget class="QCheckBox" name="opt_keep_ligatures">
<property name="text">
<string>Keep &amp;ligatures</string>
</property>
</widget>
</item>
<item row="10" column="3">
<widget class="QCheckBox" name="opt_linearize_tables">
<property name="text">
@ -289,6 +277,111 @@
</property>
</widget>
</item>
<item row="11" column="0" colspan="5">
<widget class="QTabWidget" name="tabWidget">
<property name="currentIndex">
<number>0</number>
</property>
<widget class="QWidget" name="extra_css_tab">
<attribute name="title">
<string>&amp;Extra CSS</string>
</attribute>
<layout class="QGridLayout" name="gridLayout_2">
<item row="0" column="0">
<widget class="QTextEdit" name="opt_extra_css"/>
</item>
</layout>
</widget>
<widget class="QWidget" name="opt_filter_css">
<attribute name="title">
<string>&amp;Filter Style Information</string>
</attribute>
<layout class="QGridLayout" name="gridLayout_3">
<item row="0" column="0" colspan="5">
<widget class="QLabel" name="label_8">
<property name="text">
<string>Select what style information you want completely removed:</string>
</property>
<property name="wordWrap">
<bool>true</bool>
</property>
</widget>
</item>
<item row="1" column="0">
<widget class="QCheckBox" name="filter_css_fonts">
<property name="toolTip">
<string>Removes the font-family CSS property</string>
</property>
<property name="text">
<string>&amp;Fonts</string>
</property>
</widget>
</item>
<item row="1" column="1">
<widget class="QCheckBox" name="filter_css_margins">
<property name="toolTip">
<string>Removes the margin CSS properties. Note that page margins are not affected by this setting.</string>
</property>
<property name="text">
<string>&amp;Margins</string>
</property>
</widget>
</item>
<item row="1" column="2">
<widget class="QCheckBox" name="filter_css_padding">
<property name="toolTip">
<string>Removes the padding CSS properties</string>
</property>
<property name="text">
<string>&amp;Padding</string>
</property>
</widget>
</item>
<item row="1" column="3">
<widget class="QCheckBox" name="filter_css_floats">
<property name="toolTip">
<string>Convert floating images/text into static images/text</string>
</property>
<property name="text">
<string>F&amp;loats</string>
</property>
</widget>
</item>
<item row="1" column="4">
<widget class="QCheckBox" name="filter_css_colors">
<property name="toolTip">
<string>Removes foreground and background colors</string>
</property>
<property name="text">
<string>&amp;Colors</string>
</property>
</widget>
</item>
<item row="2" column="0" colspan="5">
<layout class="QHBoxLayout" name="horizontalLayout_2">
<item>
<widget class="QLabel" name="label_9">
<property name="text">
<string>&amp;Other CSS Properties:</string>
</property>
<property name="buddy">
<cstring>filter_css_others</cstring>
</property>
</widget>
</item>
<item>
<widget class="QLineEdit" name="filter_css_others">
<property name="toolTip">
<string>Comma separated list of CSS properties to remove. For example: display, color, font-family</string>
</property>
</widget>
</item>
</layout>
</item>
</layout>
</widget>
</widget>
</item>
</layout>
</widget>
<customwidgets>

View File

@ -17,8 +17,36 @@
<iconset resource="../../../../resources/images.qrc">
<normaloff>:/images/jobs.png</normaloff>:/images/jobs.png</iconset>
</property>
<layout class="QVBoxLayout">
<item>
<layout class="QGridLayout" name="gridLayout">
<item row="0" column="0" colspan="2">
<layout class="QHBoxLayout" name="horizontalLayout">
<item>
<widget class="SearchBox2" name="search"/>
</item>
<item>
<widget class="QToolButton" name="search_button">
<property name="toolTip">
<string>Find next match</string>
</property>
<property name="text">
<string>&amp;Search</string>
</property>
</widget>
</item>
<item>
<widget class="QToolButton" name="clear_button">
<property name="toolTip">
<string>Find previous match</string>
</property>
<property name="icon">
<iconset resource="../../../../resources/images.qrc">
<normaloff>:/images/clear_left.png</normaloff>:/images/clear_left.png</iconset>
</property>
</widget>
</item>
</layout>
</item>
<item row="1" column="0" colspan="2">
<widget class="QTableView" name="jobs_view">
<property name="contextMenuPolicy">
<enum>Qt::NoContextMenu</enum>
@ -40,29 +68,57 @@
</property>
</widget>
</item>
<item>
<item row="2" column="0">
<widget class="QPushButton" name="kill_button">
<property name="text">
<string>&amp;Stop selected jobs</string>
</property>
</widget>
</item>
<item>
<item row="2" column="1">
<widget class="QPushButton" name="hide_button">
<property name="text">
<string>&amp;Hide selected jobs</string>
</property>
</widget>
</item>
<item row="3" column="0">
<widget class="QPushButton" name="details_button">
<property name="text">
<string>Show job &amp;details</string>
</property>
</widget>
</item>
<item>
<item row="3" column="1">
<widget class="QPushButton" name="show_button">
<property name="text">
<string>Show &amp;all jobs</string>
</property>
</widget>
</item>
<item row="4" column="0">
<widget class="QPushButton" name="stop_all_jobs_button">
<property name="text">
<string>Stop &amp;all non device jobs</string>
</property>
</widget>
</item>
<item row="4" column="1">
<widget class="QPushButton" name="hide_all_button">
<property name="text">
<string>&amp;Hide all jobs</string>
</property>
</widget>
</item>
</layout>
</widget>
<customwidgets>
<customwidget>
<class>SearchBox2</class>
<extends>QComboBox</extends>
<header>calibre/gui2/search_box.h</header>
</customwidget>
</customwidgets>
<resources>
<include location="../../../../resources/images.qrc"/>
</resources>

View File

@ -23,6 +23,8 @@ from calibre.utils.icu import sort_key, capitalize
from calibre.utils.config import prefs, tweaks
from calibre.utils.magick.draw import identify_data
from calibre.utils.date import qt_to_dt
from calibre.ptempfile import SpooledTemporaryFile
from calibre.db import SPOOL_SIZE
def get_cover_data(stream, ext): # {{{
from calibre.ebooks.metadata.meta import get_metadata
@ -134,11 +136,12 @@ class MyBlockingBusy(QDialog): # {{{
do_autonumber, do_remove_format, remove_format, do_swap_ta, \
do_remove_conv, do_auto_author, series, do_series_restart, \
series_start_value, do_title_case, cover_action, clear_series, \
pubdate, adddate, do_title_sort, languages, clear_languages = self.args
pubdate, adddate, do_title_sort, languages, clear_languages, \
restore_original = self.args
# first loop: do author and title. These will commit at the end of each
# operation, because each operation modifies the file system. We want to
# first loop: All changes that modify the filesystem and commit
# immediately. We want to
# try hard to keep the DB and the file system in sync, even in the face
# of exceptions or forced exits.
if self.current_phase == 1:
@ -196,6 +199,27 @@ class MyBlockingBusy(QDialog): # {{{
if covers:
self.db.set_cover(id, covers[-1][0])
covers = []
if do_remove_format:
self.db.remove_format(id, remove_format, index_is_id=True,
notify=False, commit=True)
if restore_original:
formats = self.db.formats(id, index_is_id=True)
formats = formats.split(',') if formats else []
originals = [x.upper() for x in formats if
x.upper().startswith('ORIGINAL_')]
for ofmt in originals:
fmt = ofmt.replace('ORIGINAL_', '')
with SpooledTemporaryFile(SPOOL_SIZE) as stream:
self.db.copy_format_to(id, ofmt, stream,
index_is_id=True)
stream.seek(0)
self.db.add_format(id, fmt, stream, index_is_id=True,
notify=False)
self.db.remove_format(id, ofmt, index_is_id=True,
notify=False, commit=True)
elif self.current_phase == 2:
# All of these just affect the DB, so we can tolerate a total rollback
if do_auto_author:
@ -233,9 +257,6 @@ class MyBlockingBusy(QDialog): # {{{
num = next if do_autonumber and series else 1.0
self.db.set_series_index(id, num, notify=False, commit=False)
if do_remove_format:
self.db.remove_format(id, remove_format, index_is_id=True, notify=False, commit=False)
if do_remove_conv:
self.db.delete_conversion_options(id, 'PIPE', commit=False)
@ -340,6 +361,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
self.restoreGeometry(bytes(geom))
self.languages.init_langs(self.db)
self.languages.setEditText('')
self.authors.setFocus(Qt.OtherFocusReason)
self.exec_()
def save_state(self, *args):
@ -935,6 +957,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
do_title_case = self.change_title_to_title_case.isChecked()
do_title_sort = self.update_title_sort.isChecked()
clear_languages = self.clear_languages.isChecked()
restore_original = self.restore_original.isChecked()
languages = self.languages.lang_codes
pubdate = adddate = None
if self.apply_pubdate.isChecked():
@ -954,7 +977,8 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
do_autonumber, do_remove_format, remove_format, do_swap_ta,
do_remove_conv, do_auto_author, series, do_series_restart,
series_start_value, do_title_case, cover_action, clear_series,
pubdate, adddate, do_title_sort, languages, clear_languages)
pubdate, adddate, do_title_sort, languages, clear_languages,
restore_original)
bb = MyBlockingBusy(_('Applying changes to %d books.\nPhase {0} {1}%%.')
%len(self.ids), args, self.db, self.ids,

View File

@ -44,8 +44,8 @@
<rect>
<x>0</x>
<y>0</y>
<width>954</width>
<height>584</height>
<width>950</width>
<height>576</height>
</rect>
</property>
<layout class="QVBoxLayout" name="verticalLayout_2">
@ -443,7 +443,30 @@ from the value in the box</string>
</property>
</widget>
</item>
<item row="13" column="0">
<item row="11" column="0">
<widget class="QLabel" name="label_11">
<property name="text">
<string>&amp;Languages:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>languages</cstring>
</property>
</widget>
</item>
<item row="11" column="1">
<widget class="LanguagesEdit" name="languages"/>
</item>
<item row="11" column="2">
<widget class="QCheckBox" name="clear_languages">
<property name="text">
<string>Remove &amp;all</string>
</property>
</widget>
</item>
<item row="12" column="0">
<widget class="QLabel" name="label_5">
<property name="text">
<string>Remove &amp;format:</string>
@ -453,17 +476,44 @@ from the value in the box</string>
</property>
</widget>
</item>
<item row="13" column="1">
<widget class="QComboBox" name="remove_format">
<property name="maximumSize">
<size>
<width>120</width>
<height>16777215</height>
</size>
</property>
</widget>
<item row="12" column="1">
<layout class="QHBoxLayout" name="horizontalLayout_7">
<item>
<widget class="QComboBox" name="remove_format">
<property name="maximumSize">
<size>
<width>120</width>
<height>16777215</height>
</size>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_4">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="restore_original">
<property name="toolTip">
<string>When doing a same format to same format conversion, for e.g., EPUB to EPUB, calibre saves the original EPUB as ORIGINAL_EPUB. This option tells calibre to restore the EPUB from ORIGINAL_EPUB. Useful if you did a bulk conversion of a large number of books and something went wrong.</string>
</property>
<property name="text">
<string>Restore pre conversion &amp;originals, if available</string>
</property>
</widget>
</item>
</layout>
</item>
<item row="14" column="0">
<item row="13" column="0">
<spacer name="verticalSpacer">
<property name="orientation">
<enum>Qt::Vertical</enum>
@ -479,7 +529,7 @@ from the value in the box</string>
</property>
</spacer>
</item>
<item row="15" column="0" colspan="3">
<item row="14" column="0" colspan="2">
<layout class="QHBoxLayout" name="horizontalLayout_3">
<item>
<widget class="QCheckBox" name="change_title_to_title_case">
@ -529,7 +579,7 @@ Future conversion of these books will use the default settings.</string>
</item>
</layout>
</item>
<item row="16" column="0" colspan="3">
<item row="15" column="0" colspan="2">
<widget class="QGroupBox" name="groupBox">
<property name="title">
<string>Change &amp;cover</string>
@ -559,7 +609,7 @@ Future conversion of these books will use the default settings.</string>
</layout>
</widget>
</item>
<item row="17" column="0">
<item row="16" column="0">
<spacer name="verticalSpacer_2">
<property name="orientation">
<enum>Qt::Vertical</enum>
@ -572,29 +622,6 @@ Future conversion of these books will use the default settings.</string>
</property>
</spacer>
</item>
<item row="11" column="0">
<widget class="QLabel" name="label_11">
<property name="text">
<string>&amp;Languages:</string>
</property>
<property name="alignment">
<set>Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter</set>
</property>
<property name="buddy">
<cstring>languages</cstring>
</property>
</widget>
</item>
<item row="11" column="1">
<widget class="LanguagesEdit" name="languages"/>
</item>
<item row="11" column="2">
<widget class="QCheckBox" name="clear_languages">
<property name="text">
<string>Remove &amp;all</string>
</property>
</widget>
</item>
</layout>
</widget>
<widget class="QWidget" name="tab">
@ -1078,8 +1105,8 @@ not multiple and the destination field is multiple</string>
<rect>
<x>0</x>
<y>0</y>
<width>197</width>
<height>60</height>
<width>205</width>
<height>66</height>
</rect>
</property>
<layout class="QGridLayout" name="testgrid">
@ -1177,6 +1204,7 @@ not multiple and the destination field is multiple</string>
<tabstops>
<tabstop>authors</tabstop>
<tabstop>auto_author_sort</tabstop>
<tabstop>swap_title_and_author</tabstop>
<tabstop>author_sort</tabstop>
<tabstop>rating</tabstop>
<tabstop>publisher</tabstop>
@ -1185,47 +1213,50 @@ not multiple and the destination field is multiple</string>
<tabstop>remove_tags</tabstop>
<tabstop>remove_all_tags</tabstop>
<tabstop>series</tabstop>
<tabstop>clear_series</tabstop>
<tabstop>autonumber_series</tabstop>
<tabstop>series_numbering_restarts</tabstop>
<tabstop>series_start_number</tabstop>
<tabstop>button_box</tabstop>
<tabstop>query_field</tabstop>
<tabstop>save_button</tabstop>
<tabstop>remove_button</tabstop>
<tabstop>search_field</tabstop>
<tabstop>search_mode</tabstop>
<tabstop>s_r_src_ident</tabstop>
<tabstop>s_r_template</tabstop>
<tabstop>search_for</tabstop>
<tabstop>case_sensitive</tabstop>
<tabstop>replace_with</tabstop>
<tabstop>replace_func</tabstop>
<tabstop>destination_field</tabstop>
<tabstop>replace_mode</tabstop>
<tabstop>comma_separated</tabstop>
<tabstop>s_r_dst_ident</tabstop>
<tabstop>results_count</tabstop>
<tabstop>starting_from</tabstop>
<tabstop>multiple_separator</tabstop>
<tabstop>test_text</tabstop>
<tabstop>test_result</tabstop>
<tabstop>scrollArea</tabstop>
<tabstop>central_widget</tabstop>
<tabstop>swap_title_and_author</tabstop>
<tabstop>clear_series</tabstop>
<tabstop>adddate</tabstop>
<tabstop>clear_adddate_button</tabstop>
<tabstop>apply_adddate</tabstop>
<tabstop>pubdate</tabstop>
<tabstop>clear_pubdate_button</tabstop>
<tabstop>apply_pubdate</tabstop>
<tabstop>languages</tabstop>
<tabstop>clear_languages</tabstop>
<tabstop>remove_format</tabstop>
<tabstop>restore_original</tabstop>
<tabstop>change_title_to_title_case</tabstop>
<tabstop>update_title_sort</tabstop>
<tabstop>remove_conversion_settings</tabstop>
<tabstop>cover_generate</tabstop>
<tabstop>cover_remove</tabstop>
<tabstop>cover_from_fmt</tabstop>
<tabstop>multiple_separator</tabstop>
<tabstop>test_text</tabstop>
<tabstop>test_result</tabstop>
<tabstop>scrollArea</tabstop>
<tabstop>central_widget</tabstop>
<tabstop>query_field</tabstop>
<tabstop>button_box</tabstop>
<tabstop>save_button</tabstop>
<tabstop>remove_button</tabstop>
<tabstop>search_field</tabstop>
<tabstop>search_mode</tabstop>
<tabstop>s_r_src_ident</tabstop>
<tabstop>s_r_template</tabstop>
<tabstop>replace_with</tabstop>
<tabstop>replace_func</tabstop>
<tabstop>replace_mode</tabstop>
<tabstop>comma_separated</tabstop>
<tabstop>s_r_dst_ident</tabstop>
<tabstop>results_count</tabstop>
<tabstop>scrollArea11</tabstop>
<tabstop>destination_field</tabstop>
<tabstop>search_for</tabstop>
<tabstop>case_sensitive</tabstop>
<tabstop>starting_from</tabstop>
</tabstops>
<resources>
<include location="../../../../resources/images.qrc"/>

View File

@ -14,25 +14,30 @@ from PyQt4.Qt import (QAbstractTableModel, QVariant, QModelIndex, Qt,
QTimer, pyqtSignal, QIcon, QDialog, QAbstractItemDelegate, QApplication,
QSize, QStyleOptionProgressBarV2, QString, QStyle, QToolTip, QFrame,
QHBoxLayout, QVBoxLayout, QSizePolicy, QLabel, QCoreApplication, QAction,
QByteArray)
QByteArray, QSortFilterProxyModel)
from calibre.utils.ipc.server import Server
from calibre.utils.ipc.job import ParallelJob
from calibre.gui2 import Dispatcher, error_dialog, question_dialog, NONE, config, gprefs
from calibre.gui2 import (Dispatcher, error_dialog, question_dialog, NONE,
config, gprefs)
from calibre.gui2.device import DeviceJob
from calibre.gui2.dialogs.jobs_ui import Ui_JobsDialog
from calibre import __appname__, as_unicode
from calibre.gui2.dialogs.job_view_ui import Ui_Dialog
from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.threaded_jobs import ThreadedJobServer, ThreadedJob
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
from calibre.utils.icu import lower
class JobManager(QAbstractTableModel): # {{{
class JobManager(QAbstractTableModel, SearchQueryParser): # {{{
job_added = pyqtSignal(int)
job_done = pyqtSignal(int)
def __init__(self):
QAbstractTableModel.__init__(self)
SearchQueryParser.__init__(self, ['all'])
self.wait_icon = QVariant(QIcon(I('jobs.png')))
self.running_icon = QVariant(QIcon(I('exec.png')))
self.error_icon = QVariant(QIcon(I('dialog_error.png')))
@ -251,6 +256,18 @@ class JobManager(QAbstractTableModel): # {{{
else:
job.kill_on_start = True
def hide_jobs(self, rows):
for r in rows:
self.jobs[r].hidden_in_gui = True
for r in rows:
self.dataChanged.emit(self.index(r, 0), self.index(r, 0))
def show_hidden_jobs(self):
for j in self.jobs:
j.hidden_in_gui = False
for r in xrange(len(self.jobs)):
self.dataChanged.emit(self.index(r, 0), self.index(r, 0))
def kill_job(self, row, view):
job = self.jobs[row]
if isinstance(job, DeviceJob):
@ -299,6 +316,62 @@ class JobManager(QAbstractTableModel): # {{{
continue
if not isinstance(job, ParallelJob):
self._kill_job(job)
def universal_set(self):
return set([i for i, j in enumerate(self.jobs) if not getattr(j,
'hidden_in_gui', False)])
def get_matches(self, location, query, candidates=None):
if candidates is None:
candidates = self.universal_set()
ans = set()
if not query:
return ans
query = lower(query)
for j in candidates:
job = self.jobs[j]
if job.description and query in lower(job.description):
ans.add(j)
return ans
def find(self, query):
query = query.strip()
rows = self.parse(query)
return rows
# }}}
class FilterModel(QSortFilterProxyModel): # {{{
search_done = pyqtSignal(object)
def __init__(self, parent):
QSortFilterProxyModel.__init__(self, parent)
self.search_filter = None
def filterAcceptsRow(self, source_row, source_parent):
if (self.search_filter is not None and source_row not in
self.search_filter):
return False
m = self.sourceModel()
try:
job = m.row_to_job(source_row)
except:
return False
return not getattr(job, 'hidden_in_gui', False)
def find(self, query):
ok = True
val = None
if query:
try:
val = self.sourceModel().parse(query)
except ParseException:
ok = False
self.search_filter = val
self.search_done.emit(ok)
self.reset()
# }}}
# Jobs UI {{{
@ -450,8 +523,11 @@ class JobsDialog(QDialog, Ui_JobsDialog):
QDialog.__init__(self, window)
Ui_JobsDialog.__init__(self)
self.setupUi(self)
self.jobs_view.setModel(model)
self.model = model
self.proxy_model = FilterModel(self)
self.proxy_model.setSourceModel(self.model)
self.proxy_model.search_done.connect(self.search.search_done)
self.jobs_view.setModel(self.proxy_model)
self.setWindowModality(Qt.NonModal)
self.setWindowTitle(__appname__ + _(' - Jobs'))
self.details_button.clicked.connect(self.show_details)
@ -461,6 +537,15 @@ class JobsDialog(QDialog, Ui_JobsDialog):
self.jobs_view.setItemDelegateForColumn(2, self.pb_delegate)
self.jobs_view.doubleClicked.connect(self.show_job_details)
self.jobs_view.horizontalHeader().setMovable(True)
self.hide_button.clicked.connect(self.hide_selected)
self.hide_all_button.clicked.connect(self.hide_all)
self.show_button.clicked.connect(self.show_hidden)
self.search.initialize('jobs_search_history',
help_text=_('Search for a job by name'))
self.search.search.connect(self.find)
self.search_button.clicked.connect(lambda :
self.find(self.search.current_text))
self.clear_button.clicked.connect(lambda : self.search.clear())
self.restore_state()
def restore_state(self):
@ -486,11 +571,13 @@ class JobsDialog(QDialog, Ui_JobsDialog):
pass
def show_job_details(self, index):
row = index.row()
job = self.jobs_view.model().row_to_job(row)
d = DetailView(self, job)
d.exec_()
d.timer.stop()
index = self.proxy_model.mapToSource(index)
if index.isValid():
row = index.row()
job = self.model.row_to_job(row)
d = DetailView(self, job)
d.exec_()
d.timer.stop()
def show_details(self, *args):
index = self.jobs_view.currentIndex()
@ -498,8 +585,10 @@ class JobsDialog(QDialog, Ui_JobsDialog):
self.show_job_details(index)
def kill_job(self, *args):
rows = [index.row() for index in
indices = [self.proxy_model.mapToSource(index) for index in
self.jobs_view.selectionModel().selectedRows()]
indices = [i for i in indices if i.isValid()]
rows = [index.row() for index in indices]
if not rows:
return error_dialog(self, _('No job'),
_('No job selected'), show=True)
@ -517,6 +606,26 @@ class JobsDialog(QDialog, Ui_JobsDialog):
_('Do you really want to stop all non-device jobs?')):
self.model.kill_all_jobs()
def hide_selected(self, *args):
indices = [self.proxy_model.mapToSource(index) for index in
self.jobs_view.selectionModel().selectedRows()]
indices = [i for i in indices if i.isValid()]
rows = [index.row() for index in indices]
if not rows:
return error_dialog(self, _('No job'),
_('No job selected'), show=True)
self.model.hide_jobs(rows)
self.proxy_model.reset()
def hide_all(self, *args):
self.model.hide_jobs(list(xrange(0,
self.model.rowCount(QModelIndex()))))
self.proxy_model.reset()
def show_hidden(self, *args):
self.model.show_hidden_jobs()
self.find(self.search.current_text)
def closeEvent(self, e):
self.save_state()
return QDialog.closeEvent(self, e)
@ -528,5 +637,9 @@ class JobsDialog(QDialog, Ui_JobsDialog):
def hide(self, *args):
self.save_state()
return QDialog.hide(self, *args)
def find(self, query):
self.proxy_model.find(query)
# }}}

View File

@ -131,8 +131,9 @@ def get_job_details(job):
def merge_result(oldmi, newmi, ensure_fields=None):
dummy = Metadata(_('Unknown'))
for f in msprefs['ignore_fields']:
if ':' not in f and (ensure_fields and f not in ensure_fields):
setattr(newmi, f, getattr(dummy, f))
if ':' in f or (ensure_fields and f in ensure_fields):
continue
setattr(newmi, f, getattr(dummy, f))
fields = set()
for plugin in metadata_plugins(['identify']):
fields |= plugin.touched_fields

View File

@ -127,7 +127,7 @@
<item row="2" column="0">
<widget class="QLabel" name="label_6">
<property name="text">
<string>Number of conver download threads to use</string>
<string>Number of cover download threads to use</string>
</property>
</widget>
</item>

View File

@ -43,6 +43,7 @@ class LegimiStore(BasicStoreConfig, StorePlugin):
url = 'http://www.legimi.com/pl/ebooks/?price=any&lang=pl&search=' + urllib.quote_plus(query) + '&sort=relevance'
br = browser()
drm_pattern = re.compile("(DRM)")
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
@ -61,6 +62,10 @@ class LegimiStore(BasicStoreConfig, StorePlugin):
author = re.sub(',','',author)
author = re.sub(';',',',author)
price = ''.join(data.xpath('.//span[@class="ebook_price"]/text()'))
formats = ''.join(data.xpath('.//div[@class="item_entries"]/span[3]/text()'))
formats = re.sub('Format:','',formats)
drm = drm_pattern.search(formats)
formats = re.sub('\(DRM\)','',formats)
counter -= 1
@ -70,7 +75,7 @@ class LegimiStore(BasicStoreConfig, StorePlugin):
s.author = author.strip()
s.price = price
s.detail_item = 'http://www.legimi.com/' + id.strip()
s.drm = SearchResult.DRM_LOCKED
s.formats = 'EPUB'
s.drm = SearchResult.DRM_LOCKED if drm else SearchResult.DRM_UNLOCKED
s.formats = formats.strip()
yield s

View File

@ -107,6 +107,12 @@ class KindleDX(Kindle):
name = 'Kindle DX'
id = 'kindledx'
class KindleFire(KindleDX):
name = 'Kindle Fire'
id = 'kindle_fire'
output_profile = 'kindle_fire'
supports_color = True
class Sony505(Device):
output_profile = 'sony'
@ -179,6 +185,10 @@ class NookColor(Nook):
output_profile = 'nook_color'
supports_color = True
class NookTablet(NookColor):
id = 'nook_tablet'
name = 'Nook Tablet'
class CybookG3(Device):
name = 'Cybook Gen 3'

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More