mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Sync to trunk.
This commit is contained in:
commit
c69435c5ce
@ -19,6 +19,63 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.8.28
|
||||
date: 2011-11-25
|
||||
|
||||
new features:
|
||||
- title: "Get Books: Add litres.ru store"
|
||||
|
||||
- title: "Change the algorithm that generates title sort strings to strip leading articles from both english and the current language set for the calibre user interface. In addition, in the edit metadata dialog, calibre will use the book's language when calculating the sort string. This behavior can be adjusted via Preferences->Tweaks."
|
||||
tickets: [886763]
|
||||
|
||||
- title: "Driver for Cybook Odyssey."
|
||||
tickets: [893457]
|
||||
|
||||
- title: "Irex driver: Put books into the top level directory instead of into /ebooks or /Books."
|
||||
tickets: [883616]
|
||||
|
||||
bug fixes:
|
||||
- title: "Have downloaded periodicals recognized when transferred via USB to the Kindle Fire"
|
||||
|
||||
- title: "MOBI Output: Fix underline and strikethrough properties declared on parents not being rendered on child tags."
|
||||
tickets: [894245]
|
||||
|
||||
- title: "Template language: Fix regression that broke ordering of items when formatting a list"
|
||||
|
||||
- title: "Conversion pipeline: When removing obsolete <font> tags convert them to <div> instead of <span> if they contain block level tags."
|
||||
tickets: [892525]
|
||||
|
||||
- title: "When downloading metadata, fix the case normalization of double-barelled author names."
|
||||
tickets: [893257]
|
||||
|
||||
- title: "Template language: Fix regression that broke using general program mode in save to disk templates"
|
||||
|
||||
- title: "calibredb: Fix use of ranges when specifying ids for the remove command"
|
||||
|
||||
- title: "Apple driver: Add ids for iPhone 4S. More robust against iTunes automation errors when adding artwork."
|
||||
tickets: [892468]
|
||||
|
||||
- title: "Fix encoding of comments incorrectly detected when downloading metadata from ozon.ru"
|
||||
|
||||
- title: "Fix calibre not getting list of books on the Kindle Fire"
|
||||
|
||||
improved recipes:
|
||||
- El Mundo
|
||||
- BBC
|
||||
- NIN Online
|
||||
- ABC Australia
|
||||
- Salon.com
|
||||
- Expansion (Spanish)
|
||||
- The Week
|
||||
- Heise Online
|
||||
|
||||
new recipes:
|
||||
- title: Give me something to read and Let's get Critical
|
||||
author: Barty
|
||||
|
||||
- title: Worldcrunch
|
||||
author: Krittika Goyal
|
||||
|
||||
- version: 0.8.27
|
||||
date: 2011-11-18
|
||||
|
||||
|
@ -10,49 +10,39 @@ http://www.buffalonews.com/RSS/
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class AdvancedUserRecipe1298680852(BasicNewsRecipe):
|
||||
class BuffaloNews(BasicNewsRecipe):
|
||||
title = u'Buffalo News'
|
||||
oldest_article = 2
|
||||
language = 'en'
|
||||
__author__ = 'ChappyOnIce'
|
||||
__author__ = 'ChappyOnIce, Krittika Goyal'
|
||||
max_articles_per_feed = 20
|
||||
encoding = 'utf-8'
|
||||
masthead_url = 'http://www.buffalonews.com/buffalonews/skins/buffalonews/images/masthead/the_buffalo_news_logo.png'
|
||||
remove_javascript = True
|
||||
extra_css = 'body {text-align: justify;}\n \
|
||||
p {text-indent: 20px;}'
|
||||
auto_cleanup = True
|
||||
remove_empty_feeds = True
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class':['main-content-left']})
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':['commentCount']}),
|
||||
dict(name='div', attrs={'class':['story-list-links']})
|
||||
]
|
||||
|
||||
remove_tags_after = dict(name='div', attrs={'class':['body storyContent']})
|
||||
|
||||
feeds = [(u'City of Buffalo', u'http://www.buffalonews.com/city/communities/buffalo/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Southern Erie County', u'http://www.buffalonews.com/city/communities/southern-erie/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Eastern Erie County', u'http://www.buffalonews.com/city/communities/eastern-erie/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Southern Tier', u'http://www.buffalonews.com/city/communities/southern-tier/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Niagara County', u'http://www.buffalonews.com/city/communities/niagara-county/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Business', u'http://www.buffalonews.com/business/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'MoneySmart', u'http://www.buffalonews.com/business/moneysmart/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bills & NFL', u'http://www.buffalonews.com/sports/bills-nfl/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Sabres & NHL', u'http://www.buffalonews.com/sports/sabres-nhl/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bob DiCesare', u'http://www.buffalonews.com/sports/columns/bob-dicesare/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bucky Gleason', u'http://www.buffalonews.com/sports/columns/bucky-gleason/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Mark Gaughan', u'http://www.buffalonews.com/sports/bills-nfl/inside-the-nfl/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Mike Harrington', u'http://www.buffalonews.com/sports/columns/mike-harrington/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Jerry Sullivan', u'http://www.buffalonews.com/sports/columns/jerry-sullivan/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Other Sports Columns', u'http://www.buffalonews.com/sports/columns/other-sports-columns/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Life', u'http://www.buffalonews.com/life/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bruce Andriatch', u'http://www.buffalonews.com/city/columns/bruce-andriatch/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Donn Esmonde', u'http://www.buffalonews.com/city/columns/donn-esmonde/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Rod Watson', u'http://www.buffalonews.com/city/columns/rod-watson/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Entertainment', u'http://www.buffalonews.com/entertainment/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Off Main Street', u'http://www.buffalonews.com/city/columns/off-main-street/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Editorials', u'http://www.buffalonews.com/editorial-page/buffalo-news-editorials/?widget=rssfeed&view=feed&contentId=77944')
|
||||
feeds = [
|
||||
(u'City of Buffalo', u'http://www.buffalonews.com/city/communities/buffalo/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Southern Erie County', u'http://www.buffalonews.com/city/communities/southern-erie/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Eastern Erie County', u'http://www.buffalonews.com/city/communities/eastern-erie/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Southern Tier', u'http://www.buffalonews.com/city/communities/southern-tier/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Niagara County', u'http://www.buffalonews.com/city/communities/niagara-county/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Business', u'http://www.buffalonews.com/business/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'MoneySmart', u'http://www.buffalonews.com/business/moneysmart/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bills & NFL', u'http://www.buffalonews.com/sports/bills-nfl/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Sabres & NHL', u'http://www.buffalonews.com/sports/sabres-nhl/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bob DiCesare', u'http://www.buffalonews.com/sports/columns/bob-dicesare/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bucky Gleason', u'http://www.buffalonews.com/sports/columns/bucky-gleason/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Mark Gaughan', u'http://www.buffalonews.com/sports/bills-nfl/inside-the-nfl/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Mike Harrington', u'http://www.buffalonews.com/sports/columns/mike-harrington/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Jerry Sullivan', u'http://www.buffalonews.com/sports/columns/jerry-sullivan/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Other Sports Columns', u'http://www.buffalonews.com/sports/columns/other-sports-columns/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Life', u'http://www.buffalonews.com/life/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Bruce Andriatch', u'http://www.buffalonews.com/city/columns/bruce-andriatch/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Donn Esmonde', u'http://www.buffalonews.com/city/columns/donn-esmonde/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Rod Watson', u'http://www.buffalonews.com/city/columns/rod-watson/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Entertainment', u'http://www.buffalonews.com/entertainment/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Off Main Street', u'http://www.buffalonews.com/city/columns/off-main-street/?widget=rssfeed&view=feed&contentId=77944'),
|
||||
(u'Editorials', u'http://www.buffalonews.com/editorial-page/buffalo-news-editorials/?widget=rssfeed&view=feed&contentId=77944')
|
||||
]
|
||||
|
||||
|
@ -4,7 +4,8 @@ __copyright__ = '2009-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
elmundo.es
|
||||
'''
|
||||
|
||||
import re
|
||||
import time
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class ElMundo(BasicNewsRecipe):
|
||||
@ -18,12 +19,15 @@ class ElMundo(BasicNewsRecipe):
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = 'iso8859_15'
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
language = 'es'
|
||||
masthead_url = 'http://estaticos03.elmundo.es/elmundo/iconos/v4.x/v4.01/bg_h1.png'
|
||||
publication_type = 'newspaper'
|
||||
extra_css = """
|
||||
body{font-family: Arial,Helvetica,sans-serif}
|
||||
.metadata_noticia{font-size: small}
|
||||
.pestana_GDP{font-size: small; font-weight:bold}
|
||||
h1,h2,h3,h4,h5,h6,.subtitulo {color: #3F5974}
|
||||
.hora{color: red}
|
||||
.update{color: gray}
|
||||
@ -41,22 +45,43 @@ class ElMundo(BasicNewsRecipe):
|
||||
remove_tags_after = dict(name='div' , attrs={'id':['desarrollo_noticia','tamano']})
|
||||
remove_attributes = ['lang','border']
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'class':['herramientas','publicidad_google']})
|
||||
,dict(name='div', attrs={'id':'modulo_multimedia' })
|
||||
dict(name='div', attrs={'class':['herramientas','publicidad_google','comenta','col col-2b','apoyos','no-te-pierdas']})
|
||||
,dict(name='div', attrs={'class':['publicidad publicidad_cuerpo_noticia','comentarios_nav','mensaje_privado','interact']})
|
||||
,dict(name='div', attrs={'class':['num_comentarios estirar']})
|
||||
,dict(name='span', attrs={'class':['links_comentar']})
|
||||
,dict(name='div', attrs={'id':['comentar']})
|
||||
,dict(name='ul', attrs={'class':'herramientas' })
|
||||
,dict(name=['object','link','embed','iframe','base','meta'])
|
||||
]
|
||||
|
||||
feeds = [
|
||||
(u'Portada' , u'http://estaticos.elmundo.es/elmundo/rss/portada.xml' )
|
||||
(u'Portada' , u'http://estaticos.elmundo.es/elmundo/rss/portada.xml' )
|
||||
,(u'Deportes' , u'http://estaticos.elmundo.es/elmundodeporte/rss/portada.xml')
|
||||
,(u'Economia' , u'http://estaticos.elmundo.es/elmundo/rss/economia.xml' )
|
||||
,(u'Espana' , u'http://estaticos.elmundo.es/elmundo/rss/espana.xml' )
|
||||
,(u'Econom\xeda' , u'http://estaticos.elmundo.es/elmundo/rss/economia.xml' )
|
||||
,(u'Espa\xf1a' , u'http://estaticos.elmundo.es/elmundo/rss/espana.xml' )
|
||||
,(u'Internacional' , u'http://estaticos.elmundo.es/elmundo/rss/internacional.xml' )
|
||||
,(u'Cultura' , u'http://estaticos.elmundo.es/elmundo/rss/cultura.xml' )
|
||||
,(u'Ciencia/Ecologia', u'http://estaticos.elmundo.es/elmundo/rss/ciencia.xml' )
|
||||
,(u'Comunicacion' , u'http://estaticos.elmundo.es/elmundo/rss/comunicacion.xml' )
|
||||
,(u'Television' , u'http://estaticos.elmundo.es/elmundo/rss/television.xml' )
|
||||
,(u'Ciencia/Ecolog\xeda', u'http://estaticos.elmundo.es/elmundo/rss/ciencia.xml' )
|
||||
,(u'Comunicaci\xf3n' , u'http://estaticos.elmundo.es/elmundo/rss/comunicacion.xml' )
|
||||
,(u'Televisi\xf3n' , u'http://estaticos.elmundo.es/elmundo/rss/television.xml' )
|
||||
|
||||
,(u'Salud' , u'http://estaticos.elmundo.es/elmundosalud/rss/portada.xml' )
|
||||
,(u'Solidaridad' , u'http://estaticos.elmundo.es/elmundo/rss/solidaridad.xml' )
|
||||
,(u'Su vivienda' , u'http://estaticos.elmundo.es/elmundo/rss/suvivienda.xml' )
|
||||
,(u'Motor' , u'http://estaticos.elmundo.es/elmundomotor/rss/portada.xml' )
|
||||
|
||||
,(u'Madrid' , u'http://estaticos.elmundo.es/elmundo/rss/madrid.xml' )
|
||||
,(u'Barcelona' , u'http://estaticos.elmundo.es/elmundo/rss/barcelona.xml' )
|
||||
,(u'Pa\xeds Vasco' , u'http://estaticos.elmundo.es/elmundo/rss/paisvasco.xml' )
|
||||
,(u'Baleares' , u'http://estaticos.elmundo.es/elmundo/rss/baleares.xml' )
|
||||
,(u'Castilla y Le\xf3n' , u'http://estaticos.elmundo.es/elmundo/rss/castillayleon.xml' )
|
||||
,(u'Valladolid' , u'http://estaticos.elmundo.es/elmundo/rss/valladolid.xml' )
|
||||
,(u'Valencia' , u'http://estaticos.elmundo.es/elmundo/rss/valencia.xml' )
|
||||
,(u'Alicante' , u'http://estaticos.elmundo.es/elmundo/rss/alicante.xml' )
|
||||
,(u'Castell\xf3n' , u'http://estaticos.elmundo.es/elmundo/rss/castellon.xml' )
|
||||
,(u'Andaluc\xeda' , u'http://estaticos.elmundo.es/elmundo/rss/andalucia.xml' )
|
||||
,(u'Sevilla' , u'http://estaticos.elmundo.es/elmundo/rss/andalucia_sevilla.xml' )
|
||||
,(u'M\xe1laga' , u'http://estaticos.elmundo.es/elmundo/rss/andalucia_malaga.xml' )
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
@ -67,3 +92,34 @@ class ElMundo(BasicNewsRecipe):
|
||||
def get_article_url(self, article):
|
||||
return article.get('guid', None)
|
||||
|
||||
|
||||
preprocess_regexps = [
|
||||
# Para presentar la imagen de los videos incrustados
|
||||
|
||||
(re.compile(r'var imagen', re.DOTALL|re.IGNORECASE), lambda match: '--></script><img src'),
|
||||
(re.compile(r'.jpg";', re.DOTALL|re.IGNORECASE), lambda match: '.jpg">'),
|
||||
(re.compile(r'var video=', re.DOTALL|re.IGNORECASE), lambda match: '<script language="Javascript" type="text/javascript"><!--'),
|
||||
|
||||
# Para que no salga la numeración de comentarios: 1, 2, 3 ...
|
||||
|
||||
(re.compile(r'<ol>\n<li style="z-index:', re.DOTALL|re.IGNORECASE), lambda match: '<ul><li style="z-index:'),
|
||||
(re.compile(r'</ol>\n<div class="num_comentarios estirar">', re.DOTALL|re.IGNORECASE), lambda match: '</ul><div class="num_comentarios estirar">'),
|
||||
]
|
||||
|
||||
# Obtener la imagen de portada
|
||||
|
||||
def get_cover_url(self):
|
||||
cover = None
|
||||
st = time.localtime()
|
||||
year = str(st.tm_year)
|
||||
month = "%.2d" % st.tm_mon
|
||||
day = "%.2d" % st.tm_mday
|
||||
#http://img.kiosko.net/2011/11/19/es/elmundo.750.jpg
|
||||
cover='http://img.kiosko.net/'+ year + '/' + month + '/' + day +'/es/elmundo.750.jpg'
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
try:
|
||||
br.open(cover)
|
||||
except:
|
||||
self.log("\nPortada no disponible")
|
||||
cover ='http://estaticos03.elmundo.es/elmundo/iconos/v4.x/v4.01/bg_h1.png'
|
||||
return cover
|
||||
|
90
recipes/givemesomethingtoread.recipe
Normal file
90
recipes/givemesomethingtoread.recipe
Normal file
@ -0,0 +1,90 @@
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class GiveMeSomethingToRead(BasicNewsRecipe):
|
||||
title = u'Give Me Something To Read'
|
||||
description = 'Curation / aggregation of articles on diverse topics'
|
||||
language = 'en'
|
||||
__author__ = 'barty on mobileread.com forum'
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = False
|
||||
timefmt = ' [%a, %d %b, %Y]'
|
||||
oldest_article = 365
|
||||
auto_cleanup = True
|
||||
INDEX = 'http://givemesomethingtoread.com'
|
||||
CATEGORIES = [
|
||||
# comment out categories you don't want
|
||||
# (user friendly name, system name, max number of articles to load)
|
||||
('The Arts','arts',25),
|
||||
('Science','science',30),
|
||||
('Technology','technology',30),
|
||||
('Politics','politics',20),
|
||||
('Media','media',30),
|
||||
('Crime','crime',15),
|
||||
('Other articles','',10)
|
||||
]
|
||||
|
||||
def parse_index(self):
|
||||
self.cover_url = 'http://thegretchenshow.files.wordpress.com/2009/12/well-read-cat-small.jpg'
|
||||
feeds = []
|
||||
seen_urls = set([])
|
||||
regex = re.compile( r'http://(www\.)?([^/:]+)', re.I)
|
||||
|
||||
for category in self.CATEGORIES:
|
||||
|
||||
(cat_name, tag, max_articles) = category
|
||||
|
||||
tagurl = '' if tag=='' else '/tagged/'+tag
|
||||
self.log('Reading category:', cat_name)
|
||||
|
||||
articles = []
|
||||
pageno = 1
|
||||
|
||||
while len(articles) < max_articles and pageno < 100:
|
||||
|
||||
page = "%s%s/page/%d" % (self.INDEX, tagurl, pageno) if pageno > 1 else self.INDEX + tagurl
|
||||
pageno += 1
|
||||
|
||||
self.log('\tReading page:', page)
|
||||
try:
|
||||
soup = self.index_to_soup(page)
|
||||
except:
|
||||
break
|
||||
|
||||
headers = soup.findAll('h2')
|
||||
if len(headers) == .0:
|
||||
break
|
||||
|
||||
for header in headers:
|
||||
atag = header.find('a')
|
||||
url = atag['href']
|
||||
# skip promotionals and duplicate
|
||||
if url.startswith('http://givemesomethingtoread') or url.startswith('/') or url in seen_urls:
|
||||
continue
|
||||
seen_urls.add(url)
|
||||
title = self.tag_to_string(header)
|
||||
self.log('\tFound article:', title)
|
||||
#self.log('\t', url)
|
||||
desc = header.parent.find('blockquote')
|
||||
desc = self.tag_to_string(desc) if desc else ''
|
||||
m = regex.match( url)
|
||||
if m:
|
||||
desc = "[%s] %s" % (m.group(2), desc)
|
||||
#self.log('\t', desc)
|
||||
date = ''
|
||||
p = header.parent.previousSibling
|
||||
# navigate up to find h3, which contains the date
|
||||
while p:
|
||||
if hasattr(p,'name') and p.name == 'h3':
|
||||
date = self.tag_to_string(p)
|
||||
break
|
||||
p = p.previousSibling
|
||||
articles.append({'title':title,'url':url,'description':desc,'date':date})
|
||||
if len(articles) >= max_articles:
|
||||
break
|
||||
|
||||
if articles:
|
||||
feeds.append((cat_name, articles))
|
||||
|
||||
return feeds
|
||||
|
@ -9,6 +9,7 @@ www.guardian.co.uk
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from datetime import date
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag
|
||||
|
||||
class Guardian(BasicNewsRecipe):
|
||||
|
||||
@ -16,16 +17,19 @@ class Guardian(BasicNewsRecipe):
|
||||
if date.today().weekday() == 6:
|
||||
base_url = "http://www.guardian.co.uk/theobserver"
|
||||
cover_pic = 'Observer digital edition'
|
||||
masthead_url = 'http://static.guim.co.uk/sys-images/Guardian/Pix/site_furniture/2010/10/19/1287478087992/The-Observer-001.gif'
|
||||
else:
|
||||
base_url = "http://www.guardian.co.uk/theguardian"
|
||||
cover_pic = 'Guardian digital edition'
|
||||
masthead_url = 'http://static.guim.co.uk/static/f76b43f9dcfd761f0ecf7099a127b603b2922118/common/images/logos/the-guardian/titlepiece.gif'
|
||||
|
||||
__author__ = 'Seabound and Sujata Raman'
|
||||
language = 'en_GB'
|
||||
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
remove_javascript = True
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
remove_javascript = True
|
||||
encoding = 'utf-8'
|
||||
|
||||
# List of section titles to ignore
|
||||
# For example: ['Sport']
|
||||
@ -41,6 +45,16 @@ class Guardian(BasicNewsRecipe):
|
||||
dict(name='div', attrs={'class':["guardian-tickets promo-component",]}),
|
||||
dict(name='ul', attrs={'class':["pagination"]}),
|
||||
dict(name='ul', attrs={'id':["content-actions"]}),
|
||||
# article history link
|
||||
dict(name='a', attrs={'class':["rollover history-link"]}),
|
||||
# "a version of this article ..." speil
|
||||
dict(name='div' , attrs = { 'class' : ['section']}),
|
||||
# "about this article" js dialog
|
||||
dict(name='div', attrs={'class':["share-top",]}),
|
||||
# author picture
|
||||
dict(name='img', attrs={'class':["contributor-pic-small"]}),
|
||||
# embedded videos/captions
|
||||
dict(name='span',attrs={'class' : ['inline embed embed-media']}),
|
||||
#dict(name='img'),
|
||||
]
|
||||
use_embedded_content = False
|
||||
@ -67,6 +81,13 @@ class Guardian(BasicNewsRecipe):
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
|
||||
# multiple html sections in soup, useful stuff in the first
|
||||
html = soup.find('html')
|
||||
soup2 = BeautifulSoup()
|
||||
soup2.insert(0,html)
|
||||
|
||||
soup = soup2
|
||||
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
|
||||
@ -74,7 +95,18 @@ class Guardian(BasicNewsRecipe):
|
||||
del item['face']
|
||||
for tag in soup.findAll(name=['ul','li']):
|
||||
tag.name = 'div'
|
||||
|
||||
|
||||
# removes number next to rating stars
|
||||
items_to_remove = []
|
||||
rating_container = soup.find('div', attrs = {'class': ['rating-container']})
|
||||
if rating_container:
|
||||
for item in rating_container:
|
||||
if isinstance(item, Tag) and str(item.name) == 'span':
|
||||
items_to_remove.append(item)
|
||||
|
||||
for item in items_to_remove:
|
||||
item.extract()
|
||||
|
||||
return soup
|
||||
|
||||
def find_sections(self):
|
||||
|
94
recipes/letsgetcritical.recipe
Normal file
94
recipes/letsgetcritical.recipe
Normal file
@ -0,0 +1,94 @@
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class LetsGetCritical(BasicNewsRecipe):
|
||||
title = u"Let's Get Critical"
|
||||
description = 'Curation / aggregation of criticisms of the arts and culture '
|
||||
language = 'en'
|
||||
__author__ = 'barty on mobileread.com forum'
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = False
|
||||
timefmt = ' [%a, %d %b, %Y]'
|
||||
oldest_article = 365
|
||||
auto_cleanup = True
|
||||
INDEX = 'http://www.letsgetcritical.org'
|
||||
CATEGORIES = [
|
||||
# comment out categories you don't want
|
||||
# (user friendly name, system name, max number of articles to load)
|
||||
('Architecture','architecture',30),
|
||||
('Art','art',30),
|
||||
('Books','books',30),
|
||||
('Design','design',30),
|
||||
('Digital','digital',30),
|
||||
('Food','food',30),
|
||||
('Movies','movies',30),
|
||||
('Music','music',30),
|
||||
('Television','television',30),
|
||||
('Other articles','',10)
|
||||
]
|
||||
|
||||
def parse_index(self):
|
||||
self.cover_url = 'http://www.letsgetcritical.org/wp-content/themes/lets_get_critical/images/lgc.jpg'
|
||||
feeds = []
|
||||
seen_urls = set([])
|
||||
regex = re.compile( r'http://(www\.)?([^/:]+)', re.I)
|
||||
|
||||
for category in self.CATEGORIES:
|
||||
|
||||
(cat_name, tag, max_articles) = category
|
||||
|
||||
tagurl = '' if tag=='' else '/category/'+tag.lower()
|
||||
self.log('Reading category:', cat_name)
|
||||
|
||||
articles = []
|
||||
pageno = 1
|
||||
|
||||
while len(articles) < max_articles and pageno < 100:
|
||||
|
||||
page = "%s%s/page/%d" % (self.INDEX, tagurl, pageno) if pageno > 1 else self.INDEX + tagurl
|
||||
pageno += 1
|
||||
|
||||
self.log('\tReading page:', page)
|
||||
try:
|
||||
soup = self.index_to_soup(page)
|
||||
except:
|
||||
break
|
||||
|
||||
posts = soup.findAll('div',attrs={'class':'post_multi'})
|
||||
if len(posts) == 0:
|
||||
break
|
||||
|
||||
for post in posts:
|
||||
dt = post.find('div',attrs={'class':'title'})
|
||||
atag = dt.find('a')
|
||||
url = atag['href']
|
||||
# skip promotionals and duplicate
|
||||
if url.startswith('http://letsgetcritical') or url.startswith('/') or url in seen_urls:
|
||||
continue
|
||||
seen_urls.add(url)
|
||||
title = self.tag_to_string(atag)
|
||||
self.log('\tFound article:', title)
|
||||
self.log('\t', url)
|
||||
desc = post.find('blockquote')
|
||||
desc = self.tag_to_string(desc) if desc else ''
|
||||
m = regex.match( url)
|
||||
if m:
|
||||
desc = "[%s] %s" % (m.group(2), desc)
|
||||
#self.log('\t', desc)
|
||||
date = ''
|
||||
p = post.previousSibling
|
||||
# navigate up sibling to find date
|
||||
while p:
|
||||
if hasattr(p,'class') and p['class'] == 'singledate':
|
||||
date = self.tag_to_string(p)
|
||||
break
|
||||
p = p.previousSibling
|
||||
articles.append({'title':title,'url':url,'description':desc,'date':date})
|
||||
if len(articles) >= max_articles:
|
||||
break
|
||||
|
||||
if articles:
|
||||
feeds.append((cat_name, articles))
|
||||
|
||||
return feeds
|
||||
|
@ -6,11 +6,7 @@ www.nin.co.rs
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from contextlib import closing
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
from calibre import entity_to_unicode
|
||||
|
||||
class Nin(BasicNewsRecipe):
|
||||
title = 'NIN online'
|
||||
@ -81,7 +77,7 @@ class Nin(BasicNewsRecipe):
|
||||
return cover_url
|
||||
|
||||
feeds = [(u'NIN Online', u'http://www.nin.co.rs/misc/rss.php?feed=RSS2.0')]
|
||||
|
||||
|
||||
def get_article_url(self, article):
|
||||
url = BasicNewsRecipe.get_article_url(self, article)
|
||||
return url.replace('.co.yu', '.co.rs')
|
||||
|
@ -8,13 +8,13 @@ radikal.com.tr
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Radikal_tr(BasicNewsRecipe):
|
||||
title = 'Radikal Ekleri'
|
||||
__author__ = 'Darko Mileticden uyarlama'
|
||||
description = 'Politic-Cultural Articles from Turkey'
|
||||
title = 'Radikal - Turkey'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'News from Turkey'
|
||||
publisher = 'radikal'
|
||||
category = 'news, politics, Turkey'
|
||||
oldest_article = 14
|
||||
max_articles_per_feed = 120
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 150
|
||||
no_stylesheets = True
|
||||
encoding = 'cp1254'
|
||||
use_embedded_content = False
|
||||
@ -37,7 +37,12 @@ class Radikal_tr(BasicNewsRecipe):
|
||||
|
||||
|
||||
feeds = [
|
||||
(u'Radikal Iki' , u'http://www.radikal.com.tr/d/rss/Rss_42.xml')
|
||||
(u'Yazarlar' , u'http://www.radikal.com.tr/d/rss/RssYazarlar.xml')
|
||||
,(u'Turkiye' , u'http://www.radikal.com.tr/d/rss/Rss_97.xml' )
|
||||
,(u'Politika' , u'http://www.radikal.com.tr/d/rss/Rss_98.xml' )
|
||||
,(u'Dis Haberler', u'http://www.radikal.com.tr/d/rss/Rss_100.xml' )
|
||||
,(u'Ekonomi' , u'http://www.radikal.com.tr/d/rss/Rss_101.xml' )
|
||||
,(u'Radikal Iki' , u'http://www.radikal.com.tr/d/rss/Rss_42.xml')
|
||||
,(u'Radikal Hayat' , u'http://www.radikal.com.tr/d/rss/Rss_41.xml' )
|
||||
,(u'Radikal Kitap' , u'http://www.radikal.com.tr/d/rss/Rss_40.xml' )
|
||||
]
|
||||
|
@ -21,7 +21,7 @@ __all__ = [
|
||||
'linux32', 'linux64', 'linux', 'linux_freeze',
|
||||
'osx32_freeze', 'osx', 'rsync', 'push',
|
||||
'win32_freeze', 'win32', 'win',
|
||||
'stage1', 'stage2', 'stage3', 'stage4', 'publish'
|
||||
'stage1', 'stage2', 'stage3', 'stage4', 'stage5', 'publish'
|
||||
]
|
||||
|
||||
|
||||
@ -54,13 +54,14 @@ resources = Resources()
|
||||
kakasi = Kakasi()
|
||||
|
||||
from setup.publish import Manual, TagRelease, Stage1, Stage2, \
|
||||
Stage3, Stage4, Publish
|
||||
Stage3, Stage4, Stage5, Publish
|
||||
manual = Manual()
|
||||
tag_release = TagRelease()
|
||||
stage1 = Stage1()
|
||||
stage2 = Stage2()
|
||||
stage3 = Stage3()
|
||||
stage4 = Stage4()
|
||||
stage5 = Stage5()
|
||||
publish = Publish()
|
||||
|
||||
from setup.upload import UploadUserManual, UploadInstallers, UploadDemo, \
|
||||
|
@ -12,14 +12,14 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
|
||||
"PO-Revision-Date: 2011-11-13 15:24+0000\n"
|
||||
"PO-Revision-Date: 2011-11-22 16:45+0000\n"
|
||||
"Last-Translator: Ferran Rius <frius64@hotmail.com>\n"
|
||||
"Language-Team: Catalan <linux@softcatala.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-11-14 05:15+0000\n"
|
||||
"X-Generator: Launchpad (build 14277)\n"
|
||||
"X-Launchpad-Export-Date: 2011-11-23 05:19+0000\n"
|
||||
"X-Generator: Launchpad (build 14336)\n"
|
||||
"Language: ca\n"
|
||||
|
||||
#. name for aaa
|
||||
@ -9164,71 +9164,71 @@ msgstr "Hewa"
|
||||
|
||||
#. name for han
|
||||
msgid "Hangaza"
|
||||
msgstr ""
|
||||
msgstr "Hangaza"
|
||||
|
||||
#. name for hao
|
||||
msgid "Hakö"
|
||||
msgstr ""
|
||||
msgstr "Hako"
|
||||
|
||||
#. name for hap
|
||||
msgid "Hupla"
|
||||
msgstr ""
|
||||
msgstr "Hupla"
|
||||
|
||||
#. name for haq
|
||||
msgid "Ha"
|
||||
msgstr ""
|
||||
msgstr "Ha"
|
||||
|
||||
#. name for har
|
||||
msgid "Harari"
|
||||
msgstr ""
|
||||
msgstr "Harari"
|
||||
|
||||
#. name for has
|
||||
msgid "Haisla"
|
||||
msgstr ""
|
||||
msgstr "Haisla"
|
||||
|
||||
#. name for hat
|
||||
msgid "Creole; Haitian"
|
||||
msgstr ""
|
||||
msgstr "Crioll haitià"
|
||||
|
||||
#. name for hau
|
||||
msgid "Hausa"
|
||||
msgstr ""
|
||||
msgstr "Hausa"
|
||||
|
||||
#. name for hav
|
||||
msgid "Havu"
|
||||
msgstr ""
|
||||
msgstr "Havu"
|
||||
|
||||
#. name for haw
|
||||
msgid "Hawaiian"
|
||||
msgstr ""
|
||||
msgstr "Hawaià"
|
||||
|
||||
#. name for hax
|
||||
msgid "Haida; Southern"
|
||||
msgstr ""
|
||||
msgstr "Haida; meridional"
|
||||
|
||||
#. name for hay
|
||||
msgid "Haya"
|
||||
msgstr ""
|
||||
msgstr "Haya"
|
||||
|
||||
#. name for haz
|
||||
msgid "Hazaragi"
|
||||
msgstr ""
|
||||
msgstr "Hazaragi"
|
||||
|
||||
#. name for hba
|
||||
msgid "Hamba"
|
||||
msgstr ""
|
||||
msgstr "Hamba"
|
||||
|
||||
#. name for hbb
|
||||
msgid "Huba"
|
||||
msgstr ""
|
||||
msgstr "Huba"
|
||||
|
||||
#. name for hbn
|
||||
msgid "Heiban"
|
||||
msgstr ""
|
||||
msgstr "Heiban"
|
||||
|
||||
#. name for hbo
|
||||
msgid "Hebrew; Ancient"
|
||||
msgstr ""
|
||||
msgstr "Hebreu antic"
|
||||
|
||||
#. name for hbs
|
||||
msgid "Serbo-Croatian"
|
||||
@ -9236,7 +9236,7 @@ msgstr "Serbocroat"
|
||||
|
||||
#. name for hbu
|
||||
msgid "Habu"
|
||||
msgstr ""
|
||||
msgstr "Habu"
|
||||
|
||||
#. name for hca
|
||||
msgid "Creole Hindi; Andaman"
|
||||
@ -9244,11 +9244,11 @@ msgstr "Hindi crioll; Andaman"
|
||||
|
||||
#. name for hch
|
||||
msgid "Huichol"
|
||||
msgstr ""
|
||||
msgstr "Huichol"
|
||||
|
||||
#. name for hdn
|
||||
msgid "Haida; Northern"
|
||||
msgstr ""
|
||||
msgstr "Haida; septentrional"
|
||||
|
||||
#. name for hds
|
||||
msgid "Honduras Sign Language"
|
||||
@ -9256,7 +9256,7 @@ msgstr "Llenguatge de signes hondureny"
|
||||
|
||||
#. name for hdy
|
||||
msgid "Hadiyya"
|
||||
msgstr ""
|
||||
msgstr "Hadia"
|
||||
|
||||
#. name for hea
|
||||
msgid "Miao; Northern Qiandong"
|
||||
@ -9268,59 +9268,59 @@ msgstr "Hebreu"
|
||||
|
||||
#. name for hed
|
||||
msgid "Herdé"
|
||||
msgstr ""
|
||||
msgstr "Herdé"
|
||||
|
||||
#. name for heg
|
||||
msgid "Helong"
|
||||
msgstr ""
|
||||
msgstr "Helong"
|
||||
|
||||
#. name for heh
|
||||
msgid "Hehe"
|
||||
msgstr ""
|
||||
msgstr "Hehe"
|
||||
|
||||
#. name for hei
|
||||
msgid "Heiltsuk"
|
||||
msgstr ""
|
||||
msgstr "Heiltsuk"
|
||||
|
||||
#. name for hem
|
||||
msgid "Hemba"
|
||||
msgstr ""
|
||||
msgstr "Hemba"
|
||||
|
||||
#. name for her
|
||||
msgid "Herero"
|
||||
msgstr ""
|
||||
msgstr "Herero"
|
||||
|
||||
#. name for hgm
|
||||
msgid "Hai//om"
|
||||
msgstr ""
|
||||
msgstr "Hai om"
|
||||
|
||||
#. name for hgw
|
||||
msgid "Haigwai"
|
||||
msgstr ""
|
||||
msgstr "Haigwai"
|
||||
|
||||
#. name for hhi
|
||||
msgid "Hoia Hoia"
|
||||
msgstr ""
|
||||
msgstr "Hoia Hoia"
|
||||
|
||||
#. name for hhr
|
||||
msgid "Kerak"
|
||||
msgstr ""
|
||||
msgstr "Kerak"
|
||||
|
||||
#. name for hhy
|
||||
msgid "Hoyahoya"
|
||||
msgstr ""
|
||||
msgstr "Hoyahoya"
|
||||
|
||||
#. name for hia
|
||||
msgid "Lamang"
|
||||
msgstr ""
|
||||
msgstr "Lamang"
|
||||
|
||||
#. name for hib
|
||||
msgid "Hibito"
|
||||
msgstr ""
|
||||
msgstr "Hibito"
|
||||
|
||||
#. name for hid
|
||||
msgid "Hidatsa"
|
||||
msgstr ""
|
||||
msgstr "Hidatsa"
|
||||
|
||||
#. name for hif
|
||||
msgid "Hindi; Fiji"
|
||||
@ -9328,23 +9328,23 @@ msgstr "Hindi; Fiji"
|
||||
|
||||
#. name for hig
|
||||
msgid "Kamwe"
|
||||
msgstr ""
|
||||
msgstr "Kamwe"
|
||||
|
||||
#. name for hih
|
||||
msgid "Pamosu"
|
||||
msgstr ""
|
||||
msgstr "Hinihon"
|
||||
|
||||
#. name for hii
|
||||
msgid "Hinduri"
|
||||
msgstr ""
|
||||
msgstr "Hinduri"
|
||||
|
||||
#. name for hij
|
||||
msgid "Hijuk"
|
||||
msgstr ""
|
||||
msgstr "Hijuk"
|
||||
|
||||
#. name for hik
|
||||
msgid "Seit-Kaitetu"
|
||||
msgstr ""
|
||||
msgstr "Seit-Kaitetu"
|
||||
|
||||
#. name for hil
|
||||
msgid "Hiligaynon"
|
||||
@ -24696,7 +24696,7 @@ msgstr ""
|
||||
|
||||
#. name for tcs
|
||||
msgid "Creole; Torres Strait"
|
||||
msgstr ""
|
||||
msgstr "Crioll; Torres Estret"
|
||||
|
||||
#. name for tct
|
||||
msgid "T'en"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -17949,7 +17949,7 @@ msgid "Ndoola"
|
||||
msgstr ""
|
||||
|
||||
#. name for nds
|
||||
msgid "Saxon; Low"
|
||||
msgid "German; Low"
|
||||
msgstr ""
|
||||
|
||||
#. name for ndt
|
||||
|
@ -22836,7 +22836,7 @@ Source: <http://www.sil.org/iso639-3/>
|
||||
part2_code="nds"
|
||||
scope="I"
|
||||
type="L"
|
||||
name="Saxon; Low" />
|
||||
name="German; Low" />
|
||||
<iso_639_3_entry
|
||||
id="ndt"
|
||||
scope="I"
|
||||
|
@ -37,19 +37,20 @@ class Stage2(Command):
|
||||
if os.path.exists(build):
|
||||
shutil.rmtree(build)
|
||||
|
||||
|
||||
|
||||
class Stage3(Command):
|
||||
|
||||
description = 'Stage 3 of the publish process'
|
||||
sub_commands = ['upload_user_manual', 'upload_demo', 'sdist',
|
||||
'upload_to_sourceforge', 'upload_to_google_code',
|
||||
'tag_release', 'upload_to_server',
|
||||
]
|
||||
sub_commands = ['upload_user_manual', 'upload_demo', 'sdist']
|
||||
|
||||
class Stage4(Command):
|
||||
|
||||
description = 'Stage 4 of the publish process'
|
||||
sub_commands = ['upload_to_sourceforge', 'upload_to_google_code']
|
||||
|
||||
class Stage5(Command):
|
||||
|
||||
description = 'Stage 5 of the publish process'
|
||||
sub_commands = ['tag_release', 'upload_to_server']
|
||||
|
||||
def run(self, opts):
|
||||
subprocess.check_call('rm -rf build/* dist/*', shell=True)
|
||||
@ -57,7 +58,7 @@ class Stage4(Command):
|
||||
class Publish(Command):
|
||||
|
||||
description = 'Publish a new calibre release'
|
||||
sub_commands = ['stage1', 'stage2', 'stage3', 'stage4']
|
||||
sub_commands = ['stage1', 'stage2', 'stage3', 'stage4', 'stage5', ]
|
||||
|
||||
class Manual(Command):
|
||||
|
||||
|
@ -141,8 +141,17 @@ class UploadToGoogleCode(Command): # {{{
|
||||
'dmg':'OSX','bz2':'Linux','gz':'All'}[ext]
|
||||
desc = installer_description(fname)
|
||||
start = time.time()
|
||||
path = self.upload(os.path.abspath(fname), desc,
|
||||
labels=[typ, op, 'Featured'])
|
||||
for i in range(5):
|
||||
try:
|
||||
path = self.upload(os.path.abspath(fname), desc,
|
||||
labels=[typ, op, 'Featured'])
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
print ('\nUpload failed, trying again in 30 secs')
|
||||
time.sleep(30)
|
||||
else:
|
||||
break
|
||||
self.info('Uploaded to:', path, 'in', int(time.time() - start),
|
||||
'seconds')
|
||||
return path
|
||||
@ -312,9 +321,16 @@ class UploadToSourceForge(Command): # {{{
|
||||
if not os.path.exists(x): continue
|
||||
start = time.time()
|
||||
self.info('Uploading', x)
|
||||
check_call(['rsync', '-z', '--progress', '-e', 'ssh -x', x,
|
||||
'%s,%s@frs.sourceforge.net:%s'%(self.USERNAME, self.PROJECT,
|
||||
self.rdir+'/')])
|
||||
for i in range(5):
|
||||
try:
|
||||
check_call(['rsync', '-z', '--progress', '-e', 'ssh -x', x,
|
||||
'%s,%s@frs.sourceforge.net:%s'%(self.USERNAME, self.PROJECT,
|
||||
self.rdir+'/')])
|
||||
except:
|
||||
print ('\nUpload failed, trying again in 30 seconds')
|
||||
time.sleep(30)
|
||||
else:
|
||||
break
|
||||
print 'Uploaded in', int(time.time() - start), 'seconds'
|
||||
print ('\n')
|
||||
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 8, 27)
|
||||
numeric_version = (0, 8, 28)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -64,6 +64,7 @@ class ANDROID(USBMS):
|
||||
0xfce : {
|
||||
0xd12e : [0x0100],
|
||||
0xe14f : [0x0226],
|
||||
0x614f : [0x0226, 0x100],
|
||||
},
|
||||
|
||||
# Google
|
||||
|
@ -376,8 +376,8 @@ class MobiMLizer(object):
|
||||
istate.preserve = (style['white-space'] in ('pre', 'pre-wrap'))
|
||||
istate.bgcolor = style['background-color']
|
||||
istate.fgcolor = style['color']
|
||||
istate.strikethrough = style['text-decoration'] == 'line-through'
|
||||
istate.underline = style['text-decoration'] == 'underline'
|
||||
istate.strikethrough = style.effective_text_decoration == 'line-through'
|
||||
istate.underline = style.effective_text_decoration == 'underline'
|
||||
ff = style['font-family'].lower() if style['font-family'] else ''
|
||||
if 'monospace' in ff or 'courier' in ff or ff.endswith(' mono'):
|
||||
istate.family = 'monospace'
|
||||
|
@ -714,6 +714,26 @@ class Style(object):
|
||||
self._lineHeight = result
|
||||
return self._lineHeight
|
||||
|
||||
@property
|
||||
def effective_text_decoration(self):
|
||||
'''
|
||||
Browsers do this creepy thing with text-decoration where even though the
|
||||
property is not inherited, it looks like it is because containing
|
||||
blocks apply it. The actual algorithm is utterly ridiculous, see
|
||||
http://reference.sitepoint.com/css/text-decoration
|
||||
This matters for MOBI output, where text-decoration is mapped to <u>
|
||||
and <st> tags. Trying to implement the actual algorithm is too much
|
||||
work, so we just use a simple fake that should cover most cases.
|
||||
'''
|
||||
css = self._style.get('text-decoration', None)
|
||||
pcss = None
|
||||
parent = self._get_parent()
|
||||
if parent is not None:
|
||||
pcss = parent._style.get('text-decoration', None)
|
||||
if css in ('none', None) and pcss not in (None, 'none'):
|
||||
return pcss
|
||||
return css
|
||||
|
||||
@property
|
||||
def marginTop(self):
|
||||
return self._unit_convert(
|
||||
|
@ -11,7 +11,7 @@ import re
|
||||
import urllib2
|
||||
|
||||
from contextlib import closing
|
||||
from lxml import etree, html
|
||||
from lxml import etree
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre import browser, url_slash_cleaner, prints
|
||||
@ -25,18 +25,18 @@ from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
class LitResStore(BasicStoreConfig, StorePlugin):
|
||||
shop_url = u'http://www.litres.ru'
|
||||
#http://robot.litres.ru/pages/biblio_book/?art=174405
|
||||
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
|
||||
|
||||
aff_id = u'?' + _get_affiliate_id()
|
||||
|
||||
|
||||
url = self.shop_url + aff_id
|
||||
detail_url = None
|
||||
if detail_item:
|
||||
# http://www.litres.ru/pages/biblio_book/?art=157074
|
||||
detail_url = self.shop_url + u'/pages/biblio_book/' + aff_id +\
|
||||
u'&art=' + urllib2.quote(detail_item)
|
||||
|
||||
u'&art=' + urllib2.quote(detail_item)
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
|
||||
else:
|
||||
@ -44,28 +44,28 @@ class LitResStore(BasicStoreConfig, StorePlugin):
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
search_url = u'http://robot.litres.ru/pages/catalit_browser/?checkpoint=2000-01-02&'\
|
||||
'search=%s&limit=0,%s'
|
||||
search_url = search_url % (urllib2.quote(query), max_results)
|
||||
|
||||
|
||||
counter = max_results
|
||||
br = browser()
|
||||
br.addheaders.append( ['Accept-Encoding','gzip'] )
|
||||
|
||||
|
||||
with closing(br.open(search_url, timeout=timeout)) as r:
|
||||
ungzipResponse(r,br)
|
||||
raw= xml_to_unicode(r.read(), strip_encoding_pats=True, assume_utf8=True)[0]
|
||||
|
||||
|
||||
parser = etree.XMLParser(recover=True, no_network=True)
|
||||
doc = etree.fromstring(raw, parser=parser)
|
||||
for data in doc.xpath('//*[local-name() = "fb2-book"]'):
|
||||
if counter <= 0:
|
||||
break
|
||||
counter -= 1
|
||||
|
||||
|
||||
try:
|
||||
sRes = self.create_search_result(data)
|
||||
except Exception as e:
|
||||
@ -75,10 +75,10 @@ class LitResStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def get_details(self, search_result, timeout=60):
|
||||
pass
|
||||
|
||||
|
||||
def create_search_result(self, data):
|
||||
xp_template = 'normalize-space(@{0})'
|
||||
|
||||
|
||||
sRes = SearchResult()
|
||||
sRes.drm = SearchResult.DRM_UNLOCKED
|
||||
sRes.detail_item = data.xpath(xp_template.format('hub_id'))
|
||||
@ -92,7 +92,7 @@ class LitResStore(BasicStoreConfig, StorePlugin):
|
||||
# cover vs cover_preview
|
||||
sRes.cover_url = data.xpath(xp_template.format('cover_preview'))
|
||||
sRes.price = format_price_in_RUR(sRes.price)
|
||||
|
||||
|
||||
types = data.xpath('//fb2-book//files/file/@type')
|
||||
fmt_set = _parse_ebook_formats(' '.join(types))
|
||||
sRes.formats = ', '.join(fmt_set)
|
||||
@ -134,8 +134,8 @@ def _get_affiliate_id():
|
||||
def _parse_ebook_formats(formatsStr):
|
||||
'''
|
||||
Creates a set with displayable names of the formats
|
||||
|
||||
:param formatsStr: string with comma separated book formats
|
||||
|
||||
:param formatsStr: string with comma separated book formats
|
||||
as it provided by ozon.ru
|
||||
:return: a list with displayable book formats
|
||||
'''
|
||||
@ -166,4 +166,4 @@ def _parse_ebook_formats(formatsStr):
|
||||
formats.add('LRF')
|
||||
if 'jar' in formatsUnstruct:
|
||||
formats.add('JAR')
|
||||
return formats
|
||||
return formats
|
||||
|
@ -351,7 +351,7 @@ You can build advanced search queries easily using the :guilabel:`Advanced Searc
|
||||
clicking the button |sbi|.
|
||||
|
||||
Available fields for searching are: ``tag, title, author, publisher, series, series_index, rating, cover,
|
||||
comments, format, identifiers, date, pubdate, search, size`` and custom columns. If a device is plugged in, the ``ondevice`` field becomes available. To find the search name (actually called the `lookup name`) for a custom column, hover your mouse over the column header in the library view.
|
||||
comments, format, identifiers, date, pubdate, search, size`` and custom columns. If a device is plugged in, the ``ondevice`` field becomes available, when searching the calibre library view. To find the search name (actually called the `lookup name`) for a custom column, hover your mouse over the column header in the library view.
|
||||
|
||||
The syntax for searching for dates is::
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -4,9 +4,9 @@
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: calibre 0.8.27\n"
|
||||
"POT-Creation-Date: 2011-11-23 13:00+IST\n"
|
||||
"PO-Revision-Date: 2011-11-23 13:00+IST\n"
|
||||
"Project-Id-Version: calibre 0.8.28\n"
|
||||
"POT-Creation-Date: 2011-11-25 09:00+IST\n"
|
||||
"PO-Revision-Date: 2011-11-25 09:00+IST\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: LANGUAGE\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
@ -24,8 +24,8 @@ msgstr ""
|
||||
#: /home/kovid/work/calibre/src/calibre/db/cache.py:105
|
||||
#: /home/kovid/work/calibre/src/calibre/db/cache.py:108
|
||||
#: /home/kovid/work/calibre/src/calibre/db/cache.py:119
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:265
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:266
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:267
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/hanvon/driver.py:99
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/hanvon/driver.py:100
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/jetbook/driver.py:74
|
||||
@ -889,15 +889,15 @@ msgstr ""
|
||||
msgid "Communicate with Android phones."
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:148
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:149
|
||||
msgid "Comma separated list of directories to send e-books to on the device. The first one that exists will be used"
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:205
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:206
|
||||
msgid "Communicate with S60 phones."
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:224
|
||||
#: /home/kovid/work/calibre/src/calibre/devices/android/driver.py:225
|
||||
msgid "Communicate with WebOS tablets."
|
||||
msgstr ""
|
||||
|
||||
@ -16284,23 +16284,23 @@ msgstr ""
|
||||
msgid "list_union(list1, list2, separator) -- return a list made by merging the items in list1 and list2, removing duplicate items using a case-insensitive compare. If items differ in case, the one in list1 is used. The items in list1 and list2 are separated by separator, as are the items in the returned list."
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:936
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:935
|
||||
msgid "list_difference(list1, list2, separator) -- return a list made by removing from list1 any item found in list2, using a case-insensitive compare. The items in list1 and list2 are separated by separator, as are the items in the returned list."
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:957
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:956
|
||||
msgid "list_intersection(list1, list2, separator) -- return a list made by removing from list1 any item not found in list2, using a case-insensitive compare. The items in list1 and list2 are separated by separator, as are the items in the returned list."
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:978
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:977
|
||||
msgid "list_sort(list, direction, separator) -- return list sorted using a case-insensitive sort. If direction is zero, the list is sorted ascending, otherwise descending. The list items are separated by separator, as are the items in the returned list."
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:993
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:992
|
||||
msgid "list_equals(list1, sep1, list2, sep2, yes_val, no_val) -- return yes_val if list1 and list2 contain the same items, otherwise return no_val. The items are determined by splitting each list using the appropriate separator character (sep1 or sep2). The order of items in the lists is not relevant. The compare is case insensitive."
|
||||
msgstr ""
|
||||
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:1011
|
||||
#: /home/kovid/work/calibre/src/calibre/utils/formatter_functions.py:1010
|
||||
msgid "list_re(src_list, separator, search_re, opt_replace) -- Construct a list by first separating src_list into items using the separator character. For each item in the list, check if it matches search_re. If it does, then add it to the list to be returned. If opt_replace is not the empty string, then apply the replacement before adding the item to the returned list."
|
||||
msgstr ""
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -917,14 +917,13 @@ class BuiltinListUnion(BuiltinFormatterFunction):
|
||||
aliases = ['merge_lists']
|
||||
|
||||
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
|
||||
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
|
||||
res = [l.strip() for l in list1.split(separator) if l.strip()]
|
||||
l2 = [l.strip() for l in list2.split(separator) if l.strip()]
|
||||
lcl1 = set([icu_lower(l) for l in l1])
|
||||
lcl1 = set([icu_lower(l) for l in res])
|
||||
|
||||
res = set(l1)
|
||||
for i in l2:
|
||||
if icu_lower(i) not in lcl1:
|
||||
res.add(i)
|
||||
if icu_lower(i) not in lcl1 and i not in res:
|
||||
res.append(i)
|
||||
if separator == ',':
|
||||
return ', '.join(res)
|
||||
return separator.join(res)
|
||||
@ -944,7 +943,7 @@ class BuiltinListDifference(BuiltinFormatterFunction):
|
||||
|
||||
res = []
|
||||
for i in l1:
|
||||
if icu_lower(i) not in l2:
|
||||
if icu_lower(i) not in l2 and i not in res:
|
||||
res.append(i)
|
||||
if separator == ',':
|
||||
return ', '.join(res)
|
||||
@ -963,10 +962,10 @@ class BuiltinListIntersection(BuiltinFormatterFunction):
|
||||
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
|
||||
l2 = set([icu_lower(l.strip()) for l in list2.split(separator) if l.strip()])
|
||||
|
||||
res = set()
|
||||
res = []
|
||||
for i in l1:
|
||||
if icu_lower(i) in l2:
|
||||
res.add(i)
|
||||
if icu_lower(i) in l2 and i not in res:
|
||||
res.append(i)
|
||||
if separator == ',':
|
||||
return ', '.join(res)
|
||||
return separator.join(res)
|
||||
@ -1017,13 +1016,14 @@ class BuiltinListRe(BuiltinFormatterFunction):
|
||||
|
||||
def evaluate(self, formatter, kwargs, mi, locals, src_list, separator, search_re, opt_replace):
|
||||
l = [l.strip() for l in src_list.split(separator) if l.strip()]
|
||||
res = set()
|
||||
res = []
|
||||
for item in l:
|
||||
if re.search(search_re, item, flags=re.I) is not None:
|
||||
if opt_replace:
|
||||
item = re.sub(search_re, opt_replace, item)
|
||||
for i in [l.strip() for l in item.split(',') if l.strip()]:
|
||||
res.add(i)
|
||||
if i not in res:
|
||||
res.append(i)
|
||||
if separator == ',':
|
||||
return ', '.join(res)
|
||||
return separator.join(res)
|
||||
|
Loading…
x
Reference in New Issue
Block a user