Update Folha de Sao Paolo

This commit is contained in:
Kovid Goyal 2013-06-05 07:35:05 +05:30
parent 85e46ead60
commit f639536b08

View File

@ -3,16 +3,20 @@ from calibre.web.feeds.news import BasicNewsRecipe
import re import re
import datetime import datetime
class FSP(BasicNewsRecipe): class FSP(BasicNewsRecipe):
title = u'Folha de S\xE3o Paulo' title = u'Folha de S\xE3o Paulo'
__author__ = 'fluzao' __author__ = 'Joao Eduardo Bertacchi'
description = u'Printed edition contents. UOL subscription required (Folha subscription currently not supported).' + \ description = u'Printed edition contents. UOL subscription required (Folha subscription currently not supported).' + \
u' [Conte\xfado completo da edi\xe7\xe3o impressa. Somente para assinantes UOL.]' u' [Conte\xfado completo da edi\xe7\xe3o impressa. Somente para assinantes UOL.]'
# found this to be the easiest place to find the index page (13-Nov-2011). # found this to be the easiest place to find the index page (13-Nov-2011).
# searching for the "Indice Geral" link # searching for the "Indice Geral" link
HOMEPAGE = 'http://www1.folha.uol.com.br/fsp/' HOMEPAGE = 'http://www1.folha.uol.com.br/fsp/'
today = datetime.date.today()
FIRSTPAGE = 'cp' + str(today.day).zfill(2) + str(
today.month).zfill(2) + str(today.year) + '.shtml'
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif' masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
language = 'pt_BR' language = 'pt_BR'
@ -52,24 +56,36 @@ class FSP(BasicNewsRecipe):
# raise Exception('Your username and password are incorrect') # raise Exception('Your username and password are incorrect')
return br return br
def postprocess_html(self, soup, first_fetch):
# Clean-up normal articles
tags = soup.findAll('div', id='articleNew')
if tags and tags[0]:
return tags[0]
# Clean-up first page
tags = soup.findAll('div', attrs={'class': 'double_column facsimile'})
if tags and tags[0]:
return tags[0]
return soup
def parse_index(self): def parse_index(self):
# Searching for the index page on the HOMEPAGE # Searching for the index page on the HOMEPAGE
# hpsoup = self.index_to_soup(self.HOMEPAGE) self.index_to_soup(self.HOMEPAGE)
# indexref = hpsoup.find('a', href=re.compile('^indices.*')) # indexref = hpsoup.find('a', href=re.compile('^indices.*'))
# self.log('--> tag containing the today s index: ', indexref) # self.log('--> tag containing the today s index: ', indexref)
# INDEX = indexref['href'] # INDEX = indexref['href']
# INDEX = 'http://www1.folha.uol.com.br/'+INDEX # INDEX = 'http://www1.folha.uol.com.br/'+INDEX
today=datetime.date.today() INDEX = 'http://www1.folha.uol.com.br/' + 'fsp/indices/index-' + \
INDEX = 'http://www1.folha.uol.com.br/' + 'fsp/indices/index-' + str(today).replace('-','') + '.shtml' str(self.today).replace('-', '') + '.shtml'
self.log('--> INDEX after extracting href and adding prefix: ', INDEX) self.log('--> INDEX after extracting href and adding prefix: ', INDEX)
# ... and taking the opportunity to get the cover image link # ... and taking the opportunity to get the cover image link
# coverurl = hpsoup.find('a', href=re.compile('^cp.*'))['href'] # coverurl = hpsoup.find('a', href=re.compile('^cp.*'))['href']
coverurl = 'cp' + str(today.day).zfill(2) + str(today.month).zfill(2) + str(today.year) + '.shtml' coverurl = self.FIRSTPAGE
if coverurl: if coverurl:
self.log('--> tag containing the today s cover: ', coverurl) self.log('--> tag containing the today s cover: ', coverurl)
coverurl = coverurl.replace('shtml', 'jpg') coverurl = coverurl.replace('shtml', 'jpg')
coverurl = 'http://www1.folha.uol.com.br/fsp/images/' + coverurl coverurl = 'http://www1.folha.uol.com.br/fsp/images/' + coverurl
self.log('--> coverurl after extracting href and adding prefix: ', coverurl) self.log(
'--> coverurl after extracting href and adding prefix: ', coverurl)
self.cover_url = coverurl self.cover_url = coverurl
# soup = self.index_to_soup(self.INDEX) # soup = self.index_to_soup(self.INDEX)
@ -77,16 +93,17 @@ class FSP(BasicNewsRecipe):
feeds = [] feeds = []
articles = [] articles = []
section_title = "Preambulo" section_title = u'Primeira p\xe1gina'
for post in soup.findAll('a'): for post in soup.findAll('a'):
# if name=True => new section # if name=True => new section
strpost = str(post) strpost = str(post)
# if strpost.startswith('<a name'): # if strpost.startswith('<a name'):
if re.match('<a href="/fsp/.*/index-' + str(today).replace('-','') + '.shtml"><span class="', strpost): if re.match('<a href="/fsp/.*/index-' + str(self.today).replace('-', '') + '.shtml"><span class="', strpost):
if articles: if articles:
feeds.append((section_title, articles)) feeds.append((section_title, articles))
self.log() self.log()
self.log('--> new section found, creating old section feed: ', section_title) self.log(
'--> new section found, creating old section feed: ', section_title)
# section_title = post['name'] # section_title = post['name']
section_title = self.tag_to_string(post) section_title = self.tag_to_string(post)
if section_title in self.section_dict: if section_title in self.section_dict:
@ -95,7 +112,8 @@ class FSP(BasicNewsRecipe):
self.log('--> new section title: ', section_title) self.log('--> new section title: ', section_title)
elif strpost.startswith('<a href'): elif strpost.startswith('<a href'):
url = post['href'] url = post['href']
# this bit is kept if they ever go back to the old format (pre Nov-2011) # this bit is kept if they ever go back to the old format (pre
# Nov-2011)
if url.startswith('/fsp'): if url.startswith('/fsp'):
url = 'http://www1.folha.uol.com.br' + url url = 'http://www1.folha.uol.com.br' + url
# #
@ -114,12 +132,12 @@ class FSP(BasicNewsRecipe):
# minha_capa = feeds[0][1][1]['url'] # minha_capa = feeds[0][1][1]['url']
# removing the first section ('Preambulo') # removing the first section ('Preambulo')
del feeds[0] # del feeds[0]
del feeds[0][1][0]
# inserting the cover page as the first article (nicer for kindle users) # inserting the cover page as the first article (nicer for kindle users)
# feeds.insert(0,(u'primeira p\xe1gina', [{'title':u'Primeira p\xe1gina' , 'url':minha_capa}])) # feeds.insert(0,(u'primeira p\xe1gina', [{'title':u'Primeira
feeds.insert(0,(u'Capa', [{'title':u'Capa' , 'url':self.get_cover_url().replace('jpg', 'shtml')}])) # p\xe1gina' , 'url':minha_capa}]))
feeds[0][1].insert(0, {
'title': u'fac-s\xedmile da capa', 'url': self.HOMEPAGE + self.FIRSTPAGE})
return feeds return feeds