Updated recipes for Critica Digital and Infobae

This commit is contained in:
Kovid Goyal 2009-11-11 10:40:40 -07:00
parent b31c952007
commit 14156737ce
2 changed files with 76 additions and 34 deletions

View File

@ -10,7 +10,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class CriticaDigital(BasicNewsRecipe): class CriticaDigital(BasicNewsRecipe):
title = 'Critica de la Argentina' title = 'Critica de la Argentina'
__author__ = 'Darko Miletic' __author__ = 'Darko Miletic and Sujata Raman'
description = 'Noticias de Argentina' description = 'Noticias de Argentina'
oldest_article = 2 oldest_article = 2
max_articles_per_feed = 100 max_articles_per_feed = 100
@ -20,17 +20,22 @@ class CriticaDigital(BasicNewsRecipe):
use_embedded_content = False use_embedded_content = False
encoding = 'cp1252' encoding = 'cp1252'
html2lrf_options = [ extra_css = '''
'--comment' , description h1{font-family:"Trebuchet MS";}
, '--category' , 'news, Argentina' h3{color:#9A0000; font-family:Tahoma; font-size:x-small;}
, '--publisher' , title h2{color:#504E53; font-family:Arial,Helvetica,sans-serif ;font-size:small;}
] #epigrafe{font-family:Arial,Helvetica,sans-serif ;color:#666666 ; font-size:x-small;}
p {font-family:Arial,Helvetica,sans-serif;}
#fecha{color:#858585; font-family:Tahoma; font-size:x-small;}
#autor{color:#858585; font-family:Tahoma; font-size:x-small;}
#hora{color:#F00000;font-family:Tahoma; font-size:x-small;}
'''
keep_only_tags = [ keep_only_tags = [
dict(name='div', attrs={'class':'bloqueTitulosNoticia'}) dict(name='div', attrs={'class':['bloqueTitulosNoticia','cfotonota']})
,dict(name='div', attrs={'id':'c453-1' }) ,dict(name='div', attrs={'id':'boxautor'})
,dict(name='p', attrs={'id':'textoNota'})
] ]
remove_tags = [ remove_tags = [
dict(name='div', attrs={'class':'box300' }) dict(name='div', attrs={'class':'box300' })
,dict(name='div', style=True ) ,dict(name='div', style=True )
@ -38,7 +43,7 @@ class CriticaDigital(BasicNewsRecipe):
,dict(name='div', attrs={'class':'comentario' }) ,dict(name='div', attrs={'class':'comentario' })
,dict(name='div', attrs={'class':'paginador' }) ,dict(name='div', attrs={'class':'paginador' })
] ]
feeds = [ feeds = [
(u'Politica', u'http://www.criticadigital.com/herramientas/rss.php?ch=politica' ) (u'Politica', u'http://www.criticadigital.com/herramientas/rss.php?ch=politica' )
,(u'Economia', u'http://www.criticadigital.com/herramientas/rss.php?ch=economia' ) ,(u'Economia', u'http://www.criticadigital.com/herramientas/rss.php?ch=economia' )
@ -60,3 +65,5 @@ class CriticaDigital(BasicNewsRecipe):
if link_item: if link_item:
cover_url = index + link_item.img['src'] cover_url = index + link_item.img['src']
return cover_url return cover_url

View File

@ -5,55 +5,90 @@ __copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
''' '''
infobae.com infobae.com
''' '''
import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Infobae(BasicNewsRecipe): class Infobae(BasicNewsRecipe):
title = 'Infobae.com' title = 'Infobae.com'
__author__ = 'Darko Miletic' __author__ = 'Darko Miletic and Sujata Raman'
description = 'Informacion Libre las 24 horas' description = 'Informacion Libre las 24 horas'
publisher = 'Infobae.com' publisher = 'Infobae.com'
category = 'news, politics, Argentina' category = 'news, politics, Argentina'
oldest_article = 1 oldest_article = 1
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
language = 'es' language = 'es'
lang = 'es-AR'
encoding = 'cp1252' encoding = 'cp1252'
cover_url = 'http://www.infobae.com/imgs/header/header.gif' cover_url = 'http://www.infobae.com/imgs/header/header.gif'
remove_javascript = True remove_javascript = True
preprocess_regexps = [(re.compile(
html2lrf_options = [ r'<meta name="Description" content="[^"]+">'), lambda m:'')]
'--comment' , description
, '--category' , category
, '--publisher', publisher
, '--ignore-tables'
, '--ignore-colors'
]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\nlinearize_tables=True' html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\nlinearize_tables=True'
extra_css = '''
.col-center{font-family:Arial,Helvetica,sans-serif;}
h1{font-family:Arial,Helvetica,sans-serif; color:#0D4261;}
.fuenteIntNota{font-family:Arial,Helvetica,sans-serif; color:#1D1D1D; font-size:x-small;}
'''
keep_only_tags = [dict(name='div', attrs={'class':['content']})]
remove_tags = [ remove_tags = [
dict(name=['embed','link','object']) dict(name='div', attrs={'class':['options','col-right','controles', 'bannerLibre','tiulo-masleidas','masleidas-h']}),
,dict(name='a', attrs={'onclick':'javascript:window.print()'}) dict(name='a', attrs={'name' : 'comentario',}),
] dict(name='iframe'),
dict(name='img', alt = "Ver galerias de imagenes"),
feeds = [
]
feeds = [
(u'Noticias' , u'http://www.infobae.com/adjuntos/html/RSS/hoy.xml' ) (u'Noticias' , u'http://www.infobae.com/adjuntos/html/RSS/hoy.xml' )
,(u'Salud' , u'http://www.infobae.com/adjuntos/html/RSS/salud.xml' ) ,(u'Salud' , u'http://www.infobae.com/adjuntos/html/RSS/salud.xml' )
,(u'Tecnologia', u'http://www.infobae.com/adjuntos/html/RSS/tecnologia.xml') ,(u'Tecnologia', u'http://www.infobae.com/adjuntos/html/RSS/tecnologia.xml')
,(u'Deportes' , u'http://www.infobae.com/adjuntos/html/RSS/deportes.xml' ) ,(u'Deportes' , u'http://www.infobae.com/adjuntos/html/RSS/deportes.xml' )
] ]
def print_version(self, url): # def print_version(self, url):
main, sep, article_part = url.partition('contenidos/') # main, sep, article_part = url.partition('contenidos/')
article_id, rsep, rrest = article_part.partition('-') # article_id, rsep, rrest = article_part.partition('-')
return u'http://www.infobae.com/notas/nota_imprimir.php?Idx=' + article_id # return u'http://www.infobae.com/notas/nota_imprimir.php?Idx=' + article_id
def get_article_url(self, article):
import urllib, urlparse
parts = list(urlparse.urlparse(article.get('link')))
parts[2] = urllib.quote(parts[2])
ans = urlparse.urlunparse(parts)
return ans
def preprocess_html(self, soup): def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n<meta http-equiv="Content-Language" content="es-AR"/>\n'
soup.head.insert(0,mtag) for tag in soup.head.findAll('strong'):
tag.extract()
for tag in soup.findAll('meta'):
del tag['content']
tag.extract()
mtag = '<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\n<meta http-equiv="Content-Language" content="es-AR"/>\n'
soup.head.insert(0,mtag)
for item in soup.findAll(style=True): for item in soup.findAll(style=True):
del item['style'] del item['style']
return soup return soup
def postprocess_html(self, soup, first):
for tag in soup.findAll(name='strong'):
tag.name = 'b'
return soup