Fix #5848 (WSJ error downloading)

This commit is contained in:
Kovid Goyal 2010-06-16 10:44:26 -06:00
parent 05aa76c149
commit d596bf55ba

View File

@ -3,9 +3,8 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import string
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre import strftime
# http://online.wsj.com/page/us_in_todays_paper.html # http://online.wsj.com/page/us_in_todays_paper.html
@ -72,56 +71,61 @@ class WallStreetJournal(BasicNewsRecipe):
def parse_index(self): def parse_index(self):
soup = self.wsj_get_index() soup = self.wsj_get_index()
year = strftime('%Y') date = soup.find('span', attrs={'class':'date-date'})
for x in soup.findAll('td', height='25', attrs={'class':'b14'}): if date is not None:
txt = self.tag_to_string(x).strip() self.timefmt = ' [%s]'%self.tag_to_string(date)
txt = txt.replace(u'\xa0', ' ')
txt = txt.encode('ascii', 'ignore')
if year in txt:
self.timefmt = ' [%s]'%txt
break
left_column = soup.find( sections = {}
text=lambda t: 'begin ITP Left Column' in str(t)) sec_order = []
for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True):
table = left_column.findNext('table') container = a.findParent(['li', 'div'])
if container.name == 'div':
current_section = None section = 'Page One'
current_articles = [] else:
feeds = [] section = ''
for x in table.findAllNext(True): sec = container.find('a', href=lambda x: x and '/search?' in x)
if x.name == 'td' and x.get('class', None) == 'b13': if sec is not None:
if current_articles and current_section: section = self.tag_to_string(sec).strip()
feeds.append((current_section, current_articles)) if not section:
current_section = self.tag_to_string(x.a).strip() h = container.find(['h1','h2','h3','h4','h5','h6'])
current_articles = [] section = self.tag_to_string(h)
self.log('\tProcessing section:', current_section) section = string.capitalize(section).replace('U.s.', 'U.S.')
if current_section is not None and x.name == 'a' and \ if section not in sections:
x.get('class', None) == 'bold80': sections[section] = []
title = self.tag_to_string(x) sec_order.append(section)
url = x.get('href', False) meta = a.find(attrs={'class':'meta_sectionName'})
if not url or not title: if meta is not None:
continue meta.extract()
url = url.partition('#')[0] title = self.tag_to_string(a).strip() + ' [%s]'%self.tag_to_string(meta)
url = 'http://online.wsj.com'+a['href']
desc = '' desc = ''
d = x.findNextSibling(True) p = container.find('p')
if d is not None and d.get('class', None) == 'arialResize': if p is not None:
desc = self.tag_to_string(d) desc = self.tag_to_string(p)
desc = desc.partition(u'\u2022')[0]
self.log('\t\tFound article:', title) sections[section].append({'title':title, 'url':url,
self.log('\t\t\t', url)
if url.startswith('/'):
url = 'http://online.wsj.com'+url
if desc:
self.log('\t\t\t', desc)
current_articles.append({'title': title, 'url':url,
'description':desc, 'date':''}) 'description':desc, 'date':''})
if current_articles and current_section: self.log('Found article:', title)
feeds.append((current_section, current_articles))
a.extract()
for a in container.findAll('a', href=lambda x: x and '/article/'
in x):
url = a['href']
if not url.startswith('http:'):
url = 'http://online.wsj.com'+url
title = self.tag_to_string(a).strip()
if not title or title.startswith('['): continue
if title:
sections[section].append({'title':self.tag_to_string(a),
'url':url, 'description':'', 'date':''})
self.log('\tFound related:', title)
feeds = [(sec, sections[sec]) for sec in sec_order]
return feeds return feeds
def cleanup(self): def cleanup(self):
self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com') self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')