calibre/recipes/ambito_financiero.recipe
2019-04-13 09:17:31 +05:30

165 lines
5.8 KiB
Python

#!/usr/bin/env python2
# -*- mode: python -*-
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2011 - 2018, Darko Miletic <darko.miletic at gmail.com>'
'''
http://www.ambito.com/diario/
'''
import time
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import re
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ptempfile import PersistentTemporaryFile
class Ambito_Financiero(BasicNewsRecipe):
title = 'Ambito Financiero'
__author__ = 'Darko Miletic'
description = 'Informacion Libre las 24 horas'
publisher = 'Editorial Nefir S.A.'
category = 'news, politics, economy, Argentina'
no_stylesheets = True
encoding = 'utf8'
publication_type = 'newspaper'
needs_subscription = True
use_embedded_content = False
language = 'es_AR'
fetch_retries = 10
delay = 1
session_id = None
timeout = 8
ignore_duplicate_articles = {'url'}
articles_are_obfuscated = True
temp_files = []
PREFIX = 'http://www.ambito.com'
PREFIXDIARIO = PREFIX + '/diario'
INDEX = PREFIX + '/diario/index.asp'
LOGIN = PREFIX + '/login/login_cabezal.asp'
extra_css = """
body{font-family: Roboto, sans-serif;}
.titulo-noticia{font-family: "Roboto Condensed", sans-serif;}
.dia{font-family: "Roboto Condensed", sans-serif; font-size: small;}
"""
conversion_options = {
'comment': description,
'tags': category,
'publisher': publisher,
'language': language
}
keep_only_tags = [
dict(name='h6', attrs={'class': lambda x: x and 'bajada' in x.split()}),
dict(name='span', attrs={'class': lambda x: x and 'dia' in x.split()}),
dict(attrs={'class': lambda x: x and 'titulo-noticia' in x.split()}),
dict(attrs={'class': lambda x: x and 'foto-perfil-columnista' in x.split()}),
dict(attrs={'class': lambda x: x and 'despliegue-noticia' in x.split()}),
]
remove_tags = [dict(name=['object', 'link', 'embed', 'iframe', 'meta', 'link'])]
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
br.open(self.INDEX)
if self.username is not None and self.password is not None:
postdata = urlencode({
'txtUser': self.username,
'txtPassword': self.password
})
response = br.open(
'http://www.ambito.com/diario/no-cache/login/x_login_cabezal.asp',
data=postdata,
timeout=self.timeout
)
sessiondata = response.read()
prog = re.compile(
r"^(?P<status>\d+?),(?P<session_id>.+?),(?P<username>.+?),.*?"
)
m = prog.match(sessiondata)
if m:
self.session_id = m.group('session_id')
# br.set_debug_http(True)
# br.set_debug_responses(True)
# br.set_debug_redirects(True)
return br
def parse_index(self):
soup = self.index_to_soup(self.INDEX)
articles = []
checker = []
rootitem = soup.find(attrs={'class': 'ei-dropdown'})
for feed_link in rootitem.findAll('a', href=True):
url = self.PREFIXDIARIO + feed_link['href']
title = self.tag_to_string(feed_link)
date = strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
if url not in checker:
checker.append(url)
articles.append({
'title': title,
'date': date,
'url': url,
'description': u''
})
return [(self.title, articles)]
def preprocess_raw_html(self, raw_html, url):
if self.session_id:
l, s, r = url.rpartition('.html')
o, s1, artid = l.rpartition('_')
postdata = urlencode({'id': artid, 'id_session': self.session_id})
response = self.browser.open(
'http://data.ambito.com/diario/cuerpo_noticia.asp',
data=postdata,
timeout=self.timeout
)
soup = BeautifulSoup(raw_html)
p = soup.find(id="cuerpo_noticia")
if p:
smallsoup = BeautifulSoup(response.read())
cfind = smallsoup.find('div', id="contenido_data")
if cfind:
p.append(cfind)
return type(u'')(soup)
return raw_html
def cleanup(self):
if self.session_id is not None:
postdata = urlencode({'session_id': self.session_id})
self.browser.open(
'http://www.ambito.com/diario/no-cache/login/x_logout.asp', data=postdata, timeout=self.timeout
)
self.session_id = None
def get_obfuscated_article(self, url):
result = None
count = 0
while (count < self.fetch_retries):
try:
response = self.browser.open(url, timeout=self.timeout)
html = response.read()
count = self.fetch_retries
l, s, r = url.rpartition('/')
artid, s1, r1 = r.partition('-')
tfile = PersistentTemporaryFile('_' + artid + '.html')
tfile.write(html)
tfile.close()
self.temp_files.append(tfile)
result = tfile.name
except:
self.info("Retrying download...")
count += 1
return result
def image_url_processor(self, baseurl, url):
result = url
if url.startswith('/'):
result = self.PREFIX + url
return result