Update Ambito Financiero

Fixes #1651190 [Updated recipe for Ambito Financiero](https://bugs.launchpad.net/calibre/+bug/1651190)
This commit is contained in:
Kovid Goyal 2016-12-20 09:58:29 +05:30
parent 39d01773a1
commit 7199d30fa1

View File

@ -1,78 +1,78 @@
#!/usr/bin/env python2
# -*- mode: python -*-
# -*- coding: utf-8 -*-
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2011, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2011 - 2016, Darko Miletic <darko.miletic at gmail.com>'
''' '''
ambito.com/diario ambito.com/diario
''' '''
import time import time
import urllib
import re
from calibre import strftime from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class Ambito_Financiero(BasicNewsRecipe): class Ambito_Financiero(BasicNewsRecipe):
title = 'Ambito Financiero' title = 'Ambito Financiero'
__author__ = 'Darko Miletic' __author__ = 'Darko Miletic'
description = 'Informacion Libre las 24 horas' description = 'Informacion Libre las 24 horas'
publisher = 'Editorial Nefir S.A.' publisher = 'Editorial Nefir S.A.'
category = 'news, politics, economy, Argentina' category = 'news, politics, economy, Argentina'
no_stylesheets = True no_stylesheets = True
encoding = 'cp1252' encoding = 'utf8'
masthead_url = 'http://www.ambito.com/diario/img/logo_af.gif' publication_type = 'newspaper'
publication_type = 'newspaper' needs_subscription = True
needs_subscription = 'optional'
use_embedded_content = False use_embedded_content = False
language = 'es_AR' language = 'es_AR'
PREFIX = 'http://www.ambito.com' delay = 1
INDEX = PREFIX + '/diario/index.asp' session_id = None
LOGIN = PREFIX + '/diario/login/entrada.asp' PREFIX = 'http://www.ambito.com'
extra_css = """ PREFIXDIARIO = PREFIX + '/diario'
body{font-family: "Trebuchet MS",Verdana,sans-serif} INDEX = PREFIX + '/diario/index.asp'
.volanta{font-size: small} LOGIN = PREFIX + '/login/login_cabezal.asp'
.t2_portada{font-size: xx-large; font-family: Georgia,serif; color: #026698} extra_css = """
""" body{font-family: Roboto,sans-serif}
"""
conversion_options = { conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language 'comment' : description,
'tags' : category,
'publisher': publisher,
'language' : language
} }
keep_only_tags = [dict(name='div', attrs={'align': 'justify'})] keep_only_tags = [
remove_tags = [dict(name=['object', 'link', 'embed', dict(name='h6', attrs={'class': lambda x: x and 'bajada' in x.split()})
'iframe', 'meta', 'link', 'table', 'img'])] ,dict(name='span', attrs={'class': lambda x: x and 'dia' in x.split()})
remove_attributes = ['align'] ,dict(attrs={'class': lambda x: x and 'titulo-noticia' in x.split()})
,dict(attrs={'class': lambda x: x and 'despliegue-noticia' in x.split()})
]
remove_tags = [dict(name=['object','link','embed','iframe','meta','link'])]
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser(self) br = BasicNewsRecipe.get_browser(self)
br.open(self.INDEX) br.open(self.INDEX)
if self.username is not None and self.password is not None: if self.username is not None and self.password is not None:
br.open(self.LOGIN) data = urllib.urlencode({'txtUser':self.username, 'txtPassword':self.password})
br.select_form(name='frmlogin') response = br.open('http://www.ambito.com/diario/no-cache/login/x_login_cabezal.asp', data)
br['USER_NAME'] = self.username sessiondata = response.read()
br['USER_PASS'] = self.password prog = re.compile(r"^(?P<status>\d+?),(?P<session_id>.+?),(?P<username>.+?),.*?")
br.submit() m = prog.match(sessiondata)
if m:
self.session_id = m.group('session_id')
return br return br
def print_version(self, url):
return url.replace('/diario/noticia.asp?', '/noticias/imprimir.asp?')
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('a'):
str = item.string
if str is None:
str = self.tag_to_string(item)
item.replaceWith(str)
return soup
def parse_index(self): def parse_index(self):
soup = self.index_to_soup(self.INDEX) soup = self.index_to_soup(self.INDEX)
cover_item = soup.find('img', attrs={'class': 'fotodespliegue'})
if cover_item:
self.cover_url = self.PREFIX + cover_item['src']
articles = [] articles = []
checker = [] checker = []
for feed_link in soup.findAll('a', attrs={'class': ['t0_portada', 't2_portada', 'bajada']}): rootitem = soup.find(attrs={'class':'ei-dropdown'})
url = self.PREFIX + feed_link['href'] for feed_link in rootitem.findAll('a', href=True):
url = self.PREFIXDIARIO + feed_link['href']
title = self.tag_to_string(feed_link) title = self.tag_to_string(feed_link)
date = strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) date = strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
if url not in checker: if url not in checker:
@ -81,3 +81,22 @@ class Ambito_Financiero(BasicNewsRecipe):
'title': title, 'date': date, 'url': url, 'description': u'' 'title': title, 'date': date, 'url': url, 'description': u''
}) })
return [(self.title, articles)] return [(self.title, articles)]
def preprocess_raw_html(self, raw_html, url):
if self.session_id:
l, s, r = url.rpartition('/')
artid, s1, r1 = r.partition('-')
data = urllib.urlencode({'id':artid, 'id_session':self.session_id})
response = self.browser.open('http://data.ambito.com/diario/cuerpo_noticia.asp', data)
soup = BeautifulSoup(raw_html)
p = soup.find('p', id="cuerpo_noticia")
if p:
p.append(response.read())
return unicode(soup)
return raw_html
def cleanup(self):
if self.session_id is not None:
data = urllib.urlencode({'session_id':self.session_id})
self.browser.open('http://www.ambito.com/diario/no-cache/login/x_logout.asp', data)
self.session_id = None