Update Folha de Sao Paolo

This commit is contained in:
Kovid Goyal 2013-05-30 06:59:22 +05:30
parent 384e2048f9
commit 43bbfaf7e7
2 changed files with 40 additions and 33 deletions

View File

@ -109,7 +109,6 @@ class FolhaOnline(BasicNewsRecipe):
,(u'Valdo Cruz', u'http://feeds.folha.uol.com.br/colunas/valdocruz/rss091.xml') ,(u'Valdo Cruz', u'http://feeds.folha.uol.com.br/colunas/valdocruz/rss091.xml')
] ]
conversion_options = { conversion_options = {
'title' : title 'title' : title
,'comments' : description ,'comments' : description
@ -132,7 +131,7 @@ class FolhaOnline(BasicNewsRecipe):
def postprocess_html(self, soup, first): def postprocess_html(self, soup, first):
# process all the images. assumes that the new html has the correct path # process all the images. assumes that the new html has the correct path
for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')): for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and 'src' in tag):
iurl = tag['src'] iurl = tag['src']
img = Image() img = Image()
img.open(iurl) img.open(iurl)
@ -163,3 +162,4 @@ class FolhaOnline(BasicNewsRecipe):
except URLError: except URLError:
cover_url='http://api.thumbalizr.com/?api_key='+self.THUMBALIZR_API+'&url='+self.SCREENSHOT+'&width=600&quality=90' cover_url='http://api.thumbalizr.com/?api_key='+self.THUMBALIZR_API+'&url='+self.SCREENSHOT+'&width=600&quality=90'
return cover_url return cover_url

View File

@ -1,6 +1,7 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re import re
import datetime
class FSP(BasicNewsRecipe): class FSP(BasicNewsRecipe):
@ -24,11 +25,11 @@ class FSP(BasicNewsRecipe):
remove_tags = [dict(name='td', attrs={'align':'center'})] remove_tags = [dict(name='td', attrs={'align':'center'})]
remove_attributes = ['height','width'] remove_attributes = ['height','width']
# fixes the problem with the section names # fixes the problem with the section names
section_dict = {'cotidian' : 'cotidiano', 'ilustrad': 'ilustrada', \ section_dict = {'cotidian' : 'cotidiano', 'ilustrad': 'ilustrada',
'quadrin': 'quadrinhos' , 'opiniao' : u'opini\xE3o', \ 'quadrin': 'quadrinhos' , 'opiniao' : u'opini\xE3o',
'ciencia' : u'ci\xeancia' , 'saude' : u'sa\xfade', \ 'ciencia' : u'ci\xeancia' , 'saude' : u'sa\xfade',
'ribeirao' : u'ribeir\xE3o' , 'equilibrio' : u'equil\xedbrio', \ 'ribeirao' : u'ribeir\xE3o' , 'equilibrio' : u'equil\xedbrio',
'imoveis' : u'im\xf3veis', 'negocios' : u'neg\xf3cios', \ 'imoveis' : u'im\xf3veis', 'negocios' : u'neg\xf3cios',
'veiculos' : u've\xedculos', 'corrida' : 'folha corrida'} 'veiculos' : u've\xedculos', 'corrida' : 'folha corrida'}
# this solves the problem with truncated content in Kindle # this solves the problem with truncated content in Kindle
@ -47,24 +48,26 @@ class FSP(BasicNewsRecipe):
br['user'] = self.username br['user'] = self.username
br['pass'] = self.password br['pass'] = self.password
br.submit().read() br.submit().read()
## if 'Please try again' in raw: # if 'Please try again' in raw:
## raise Exception('Your username and password are incorrect') # raise Exception('Your username and password are incorrect')
return br return br
def parse_index(self): def parse_index(self):
# Searching for the index page on the HOMEPAGE # Searching for the index page on the HOMEPAGE
hpsoup = self.index_to_soup(self.HOMEPAGE) # hpsoup = self.index_to_soup(self.HOMEPAGE)
indexref = hpsoup.find('a', href=re.compile('^indices.*')) # indexref = hpsoup.find('a', href=re.compile('^indices.*'))
self.log('--> tag containing the today s index: ', indexref) # self.log('--> tag containing the today s index: ', indexref)
INDEX = indexref['href'] # INDEX = indexref['href']
INDEX = 'http://www1.folha.uol.com.br/fsp/'+INDEX # INDEX = 'http://www1.folha.uol.com.br/'+INDEX
today=datetime.date.today()
INDEX = 'http://www1.folha.uol.com.br/' + 'fsp/indices/index-' + str(today).replace('-','') + '.shtml'
self.log('--> INDEX after extracting href and adding prefix: ', INDEX) self.log('--> INDEX after extracting href and adding prefix: ', INDEX)
# ... and taking the opportunity to get the cover image link # ... and taking the opportunity to get the cover image link
coverurl = hpsoup.find('a', href=re.compile('^cp.*'))['href'] # coverurl = hpsoup.find('a', href=re.compile('^cp.*'))['href']
coverurl = 'cp' + str(today.day).zfill(2) + str(today.month).zfill(2) + str(today.year) + '.shtml'
if coverurl: if coverurl:
self.log('--> tag containing the today s cover: ', coverurl) self.log('--> tag containing the today s cover: ', coverurl)
coverurl = coverurl.replace('htm', 'jpg') coverurl = coverurl.replace('shtml', 'jpg')
coverurl = 'http://www1.folha.uol.com.br/fsp/images/'+coverurl coverurl = 'http://www1.folha.uol.com.br/fsp/images/'+coverurl
self.log('--> coverurl after extracting href and adding prefix: ', coverurl) self.log('--> coverurl after extracting href and adding prefix: ', coverurl)
self.cover_url = coverurl self.cover_url = coverurl
@ -78,17 +81,19 @@ class FSP(BasicNewsRecipe):
for post in soup.findAll('a'): for post in soup.findAll('a'):
# if name=True => new section # if name=True => new section
strpost = str(post) strpost = str(post)
if strpost.startswith('<a name'): # if strpost.startswith('<a name'):
if re.match('<a href="/fsp/.*/index-' + str(today).replace('-','') + '.shtml"><span class="', strpost):
if articles: if articles:
feeds.append((section_title, articles)) feeds.append((section_title, articles))
self.log() self.log()
self.log('--> new section found, creating old section feed: ', section_title) self.log('--> new section found, creating old section feed: ', section_title)
section_title = post['name'] # section_title = post['name']
section_title = self.tag_to_string(post)
if section_title in self.section_dict: if section_title in self.section_dict:
section_title = self.section_dict[section_title] section_title = self.section_dict[section_title]
articles = [] articles = []
self.log('--> new section title: ', section_title) self.log('--> new section title: ', section_title)
if strpost.startswith('<a href'): elif strpost.startswith('<a href'):
url = post['href'] url = post['href']
# this bit is kept if they ever go back to the old format (pre Nov-2011) # this bit is kept if they ever go back to the old format (pre Nov-2011)
if url.startswith('/fsp'): if url.startswith('/fsp'):
@ -106,13 +111,15 @@ class FSP(BasicNewsRecipe):
feeds.append((section_title, articles)) feeds.append((section_title, articles))
# keeping the front page url # keeping the front page url
minha_capa = feeds[0][1][1]['url'] # minha_capa = feeds[0][1][1]['url']
# removing the first section (now called 'top') # removing the first section ('Preambulo')
del feeds[0] del feeds[0]
# inserting the cover page as the first article (nicer for kindle users) # inserting the cover page as the first article (nicer for kindle users)
feeds.insert(0,(u'primeira p\xe1gina', [{'title':u'Primeira p\xe1gina' , 'url':minha_capa}])) # feeds.insert(0,(u'primeira p\xe1gina', [{'title':u'Primeira p\xe1gina' , 'url':minha_capa}]))
feeds.insert(0,(u'Capa', [{'title':u'Capa' , 'url':self.get_cover_url().replace('jpg', 'shtml')}]))
return feeds return feeds