diff --git a/recipes/ambito_financiero.recipe b/recipes/ambito_financiero.recipe index 8cb8fe6281..46fd8e1d31 100644 --- a/recipes/ambito_financiero.recipe +++ b/recipes/ambito_financiero.recipe @@ -1,78 +1,78 @@ +#!/usr/bin/env python2 +# -*- mode: python -*- +# -*- coding: utf-8 -*- + __license__ = 'GPL v3' -__copyright__ = '2011, Darko Miletic ' +__copyright__ = '2011 - 2016, Darko Miletic ' ''' ambito.com/diario ''' import time +import urllib +import re from calibre import strftime from calibre.web.feeds.news import BasicNewsRecipe +from calibre.ebooks.BeautifulSoup import BeautifulSoup class Ambito_Financiero(BasicNewsRecipe): - title = 'Ambito Financiero' - __author__ = 'Darko Miletic' - description = 'Informacion Libre las 24 horas' - publisher = 'Editorial Nefir S.A.' - category = 'news, politics, economy, Argentina' - no_stylesheets = True - encoding = 'cp1252' - masthead_url = 'http://www.ambito.com/diario/img/logo_af.gif' - publication_type = 'newspaper' - needs_subscription = 'optional' + title = 'Ambito Financiero' + __author__ = 'Darko Miletic' + description = 'Informacion Libre las 24 horas' + publisher = 'Editorial Nefir S.A.' + category = 'news, politics, economy, Argentina' + no_stylesheets = True + encoding = 'utf8' + publication_type = 'newspaper' + needs_subscription = True use_embedded_content = False - language = 'es_AR' - PREFIX = 'http://www.ambito.com' - INDEX = PREFIX + '/diario/index.asp' - LOGIN = PREFIX + '/diario/login/entrada.asp' - extra_css = """ - body{font-family: "Trebuchet MS",Verdana,sans-serif} - .volanta{font-size: small} - .t2_portada{font-size: xx-large; font-family: Georgia,serif; color: #026698} - """ + language = 'es_AR' + delay = 1 + session_id = None + PREFIX = 'http://www.ambito.com' + PREFIXDIARIO = PREFIX + '/diario' + INDEX = PREFIX + '/diario/index.asp' + LOGIN = PREFIX + '/login/login_cabezal.asp' + extra_css = """ + body{font-family: Roboto,sans-serif} + """ conversion_options = { - 'comment': description, 'tags': category, 'publisher': publisher, 'language': language + 'comment' : description, + 'tags' : category, + 'publisher': publisher, + 'language' : language } - keep_only_tags = [dict(name='div', attrs={'align': 'justify'})] - remove_tags = [dict(name=['object', 'link', 'embed', - 'iframe', 'meta', 'link', 'table', 'img'])] - remove_attributes = ['align'] + keep_only_tags = [ + dict(name='h6', attrs={'class': lambda x: x and 'bajada' in x.split()}) + ,dict(name='span', attrs={'class': lambda x: x and 'dia' in x.split()}) + ,dict(attrs={'class': lambda x: x and 'titulo-noticia' in x.split()}) + ,dict(attrs={'class': lambda x: x and 'despliegue-noticia' in x.split()}) + ] + remove_tags = [dict(name=['object','link','embed','iframe','meta','link'])] def get_browser(self): br = BasicNewsRecipe.get_browser(self) br.open(self.INDEX) if self.username is not None and self.password is not None: - br.open(self.LOGIN) - br.select_form(name='frmlogin') - br['USER_NAME'] = self.username - br['USER_PASS'] = self.password - br.submit() + data = urllib.urlencode({'txtUser':self.username, 'txtPassword':self.password}) + response = br.open('http://www.ambito.com/diario/no-cache/login/x_login_cabezal.asp', data) + sessiondata = response.read() + prog = re.compile(r"^(?P\d+?),(?P.+?),(?P.+?),.*?") + m = prog.match(sessiondata) + if m: + self.session_id = m.group('session_id') return br - def print_version(self, url): - return url.replace('/diario/noticia.asp?', '/noticias/imprimir.asp?') - - def preprocess_html(self, soup): - for item in soup.findAll(style=True): - del item['style'] - for item in soup.findAll('a'): - str = item.string - if str is None: - str = self.tag_to_string(item) - item.replaceWith(str) - return soup - def parse_index(self): soup = self.index_to_soup(self.INDEX) - cover_item = soup.find('img', attrs={'class': 'fotodespliegue'}) - if cover_item: - self.cover_url = self.PREFIX + cover_item['src'] articles = [] checker = [] - for feed_link in soup.findAll('a', attrs={'class': ['t0_portada', 't2_portada', 'bajada']}): - url = self.PREFIX + feed_link['href'] + rootitem = soup.find(attrs={'class':'ei-dropdown'}) + for feed_link in rootitem.findAll('a', href=True): + url = self.PREFIXDIARIO + feed_link['href'] title = self.tag_to_string(feed_link) date = strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) if url not in checker: @@ -81,3 +81,22 @@ class Ambito_Financiero(BasicNewsRecipe): 'title': title, 'date': date, 'url': url, 'description': u'' }) return [(self.title, articles)] + + def preprocess_raw_html(self, raw_html, url): + if self.session_id: + l, s, r = url.rpartition('/') + artid, s1, r1 = r.partition('-') + data = urllib.urlencode({'id':artid, 'id_session':self.session_id}) + response = self.browser.open('http://data.ambito.com/diario/cuerpo_noticia.asp', data) + soup = BeautifulSoup(raw_html) + p = soup.find('p', id="cuerpo_noticia") + if p: + p.append(response.read()) + return unicode(soup) + return raw_html + + def cleanup(self): + if self.session_id is not None: + data = urllib.urlencode({'session_id':self.session_id}) + self.browser.open('http://www.ambito.com/diario/no-cache/login/x_logout.asp', data) + self.session_id = None