Update Financial Times (UK)

This commit is contained in:
Kovid Goyal 2012-10-27 19:24:57 +05:30
parent f4e7908936
commit 792901d690

View File

@ -1,5 +1,5 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2010-2012, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2010-2011, Darko Miletic <darko.miletic at gmail.com>'
''' '''
www.ft.com/uk-edition www.ft.com/uk-edition
''' '''
@ -42,24 +42,18 @@ class FinancialTimes(BasicNewsRecipe):
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser() br = BasicNewsRecipe.get_browser()
br.open(self.INDEX) br.open(self.INDEX)
if self.username is not None and self.password is not None: br.open(self.LOGIN)
br.open(self.LOGIN2) br.select_form(name='loginForm')
br.select_form(name='loginForm') br['username'] = self.username
br['username'] = self.username br['password'] = self.password
br['password'] = self.password br.submit()
br.submit()
return br return br
keep_only_tags = [ keep_only_tags = [
dict(name='div' , attrs={'class':['fullstory fullstoryHeader', 'ft-story-header']}) dict(name='div', attrs={'class':['fullstory fullstoryHeader', 'ft-story-header']})
,dict(name='div' , attrs={'class':'standfirst'}) ,dict(name='div', attrs={'class':'standfirst'})
,dict(name='div' , attrs={'id' :'storyContent'}) ,dict(name='div', attrs={'id' :'storyContent'})
,dict(name='div' , attrs={'class':['ft-story-body','index-detail']}) ,dict(name='div', attrs={'class':['ft-story-body','index-detail']})
,dict(name='div' , attrs={'class':['ft-story-body','index-detail']})
,dict(name='h2' , attrs={'class':'entry-title'} )
,dict(name='span', attrs={'class':lambda x: x and 'posted-on' in x.split()} )
,dict(name='span', attrs={'class':'author_byline'} )
,dict(name='div' , attrs={'class':'entry-content'} )
] ]
remove_tags = [ remove_tags = [
dict(name='div', attrs={'id':'floating-con'}) dict(name='div', attrs={'id':'floating-con'})
@ -88,17 +82,21 @@ class FinancialTimes(BasicNewsRecipe):
if self.test and count > 2: if self.test and count > 2:
return articles return articles
rawlink = item['href'] rawlink = item['href']
url = rawlink if rawlink.startswith('http://'):
if not rawlink.startswith('http://'): url = rawlink
url = self.PREFIX + rawlink else:
urlverified = self.browser.open_novisit(url).geturl() # resolve redirect. url = self.PREFIX + rawlink
try:
urlverified = self.browser.open_novisit(url).geturl() # resolve redirect.
except:
continue
title = self.tag_to_string(item) title = self.tag_to_string(item)
date = strftime(self.timefmt) date = strftime(self.timefmt)
articles.append({ articles.append({
'title' :title 'title' :title
,'date' :date ,'date' :date
,'url' :urlverified ,'url' :urlverified
,'description':'' ,'description':''
}) })
return articles return articles
@ -110,20 +108,21 @@ class FinancialTimes(BasicNewsRecipe):
wide = soup.find('div',attrs={'class':'wide'}) wide = soup.find('div',attrs={'class':'wide'})
if not wide: if not wide:
return feeds return feeds
allsections = wide.findAll(attrs={'class':lambda x: x and 'footwell' in x.split()}) strest = wide.findAll('h3', attrs={'class':'section'})
if not allsections: if not strest:
return feeds return feeds
st = wide.findAll('h4',attrs={'class':'section-no-arrow'})
if st:
st.extend(strest)
count = 0 count = 0
for item in allsections: for item in st:
count = count + 1 count = count + 1
if self.test and count > 2: if self.test and count > 2:
return feeds return feeds
fitem = item.h3 ftitle = self.tag_to_string(item)
if not fitem:
fitem = item.h4
ftitle = self.tag_to_string(fitem)
self.report_progress(0, _('Fetching feed')+' %s...'%(ftitle)) self.report_progress(0, _('Fetching feed')+' %s...'%(ftitle))
feedarts = self.get_artlinks(item.ul) if item.parent.ul is not None:
feedarts = self.get_artlinks(item.parent.ul)
feeds.append((ftitle,feedarts)) feeds.append((ftitle,feedarts))
return feeds return feeds
@ -157,7 +156,7 @@ class FinancialTimes(BasicNewsRecipe):
def get_cover_url(self): def get_cover_url(self):
cdate = datetime.date.today() cdate = datetime.date.today()
if cdate.isoweekday() == 7: if cdate.isoweekday() == 7:
cdate -= datetime.timedelta(days=1) cdate -= datetime.timedelta(days=1)
return cdate.strftime('http://specials.ft.com/vtf_pdf/%d%m%y_FRONT1_LON.pdf') return cdate.strftime('http://specials.ft.com/vtf_pdf/%d%m%y_FRONT1_LON.pdf')
def get_obfuscated_article(self, url): def get_obfuscated_article(self, url):
@ -170,8 +169,10 @@ class FinancialTimes(BasicNewsRecipe):
except: except:
print "Retrying download..." print "Retrying download..."
count += 1 count += 1
tfile = PersistentTemporaryFile('_fa.html') self.temp_files.append(PersistentTemporaryFile('_fa.html'))
tfile.write(html) self.temp_files[-1].write(html)
tfile.close() self.temp_files[-1].close()
self.temp_files.append(tfile) return self.temp_files[-1].name
return tfile.name
def cleanup(self):
self.browser.open('https://registration.ft.com/registration/login/logout?location=')