2010-02-20 13:13:18 -07:00

129 lines
6.1 KiB
Python

#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
from calibre.web.feeds.news import BasicNewsRecipe
from calibre import strftime
# http://online.wsj.com/page/us_in_todays_paper.html
class WallStreetJournal(BasicNewsRecipe):
title = 'The Wall Street Journal (US)'
__author__ = 'Kovid Goyal and Sujata Raman'
description = 'News and current affairs'
needs_subscription = True
language = 'en'
max_articles_per_feed = 1000
timefmt = ' [%a, %b %d, %Y]'
no_stylesheets = True
extra_css = '''h1{color:#093D72 ; font-size:large ; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; }
h2{color:#474537; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;}
.subhead{color:gray; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;}
.insettipUnit {color:#666666; font-family:Arial,Sans-serif;font-size:xx-small }
.targetCaption{ font-size:x-small; color:#333333; font-family:Arial,Helvetica,sans-serif}
.article{font-family :Arial,Helvetica,sans-serif; font-size:x-small}
.tagline {color:#333333; font-size:xx-small}
.dateStamp {color:#666666; font-family:Arial,Helvetica,sans-serif}
h3{color:blue ;font-family:Arial,Helvetica,sans-serif; font-size:xx-small}
.byline{color:blue;font-family:Arial,Helvetica,sans-serif; font-size:xx-small}
h6{color:#333333; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small;font-style:italic; }
.paperLocation{color:#666666; font-size:xx-small}'''
remove_tags_before = dict(name='h1')
remove_tags = [
dict(id=["articleTabs_tab_article", "articleTabs_tab_comments", "articleTabs_tab_interactive","articleTabs_tab_video","articleTabs_tab_map","articleTabs_tab_slideshow"]),
{'class':['footer_columns','network','insetCol3wide','interactive','video','slideshow','map','insettip','insetClose','more_in', "insetContent", 'articleTools_bottom', 'aTools', "tooltip", "adSummary", "nav-inline"]},
dict(rel='shortcut icon'),
]
remove_tags_after = [dict(id="article_story_body"), {'class':"article story"},]
def get_browser(self):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open('http://commerce.wsj.com/auth/login')
br.select_form(nr=0)
br['user'] = self.username
br['password'] = self.password
res = br.submit()
raw = res.read()
if 'Welcome,' not in raw:
raise ValueError('Failed to log in to wsj.com, check your '
'username and password')
return br
def postprocess_html(self, soup, first):
for tag in soup.findAll(name=['table', 'tr', 'td']):
tag.name = 'div'
for tag in soup.findAll('div', dict(id=["articleThumbnail_1", "articleThumbnail_2", "articleThumbnail_3", "articleThumbnail_4", "articleThumbnail_5", "articleThumbnail_6", "articleThumbnail_7"])):
tag.extract()
return soup
def wsj_get_index(self):
return self.index_to_soup('http://online.wsj.com/page/us_in_todays_paper.html')
def parse_index(self):
soup = self.wsj_get_index()
year = strftime('%Y')
for x in soup.findAll('td', height='25', attrs={'class':'b14'}):
txt = self.tag_to_string(x).strip()
txt = txt.replace(u'\xa0', ' ')
txt = txt.encode('ascii', 'ignore')
if year in txt:
self.timefmt = ' [%s]'%txt
break
left_column = soup.find(
text=lambda t: 'begin ITP Left Column' in str(t))
table = left_column.findNext('table')
current_section = None
current_articles = []
feeds = []
for x in table.findAllNext(True):
if x.name == 'td' and x.get('class', None) == 'b13':
if current_articles and current_section:
feeds.append((current_section, current_articles))
current_section = self.tag_to_string(x.a).strip()
current_articles = []
self.log('\tProcessing section:', current_section)
if current_section is not None and x.name == 'a' and \
x.get('class', None) == 'bold80':
title = self.tag_to_string(x)
url = x.get('href', False)
if not url or not title:
continue
url = url.partition('#')[0]
desc = ''
d = x.findNextSibling(True)
if d is not None and d.get('class', None) == 'arialResize':
desc = self.tag_to_string(d)
desc = desc.partition(u'\u2022')[0]
self.log('\t\tFound article:', title)
self.log('\t\t\t', url)
if url.startswith('/'):
url = 'http://online.wsj.com'+url
if desc:
self.log('\t\t\t', desc)
current_articles.append({'title': title, 'url':url,
'description':desc, 'date':''})
if current_articles and current_section:
feeds.append((current_section, current_articles))
return feeds
def cleanup(self):
self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')