#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __docformat__ = 'restructuredtext en' import string from calibre.web.feeds.news import BasicNewsRecipe # http://online.wsj.com/page/us_in_todays_paper.html class WallStreetJournal(BasicNewsRecipe): title = 'The Wall Street Journal (US)' __author__ = 'Kovid Goyal and Sujata Raman' description = 'News and current affairs' needs_subscription = True language = 'en' max_articles_per_feed = 1000 timefmt = ' [%a, %b %d, %Y]' no_stylesheets = True extra_css = '''h1{color:#093D72 ; font-size:large ; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; } h2{color:#474537; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;} .subhead{color:gray; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;} .insettipUnit {color:#666666; font-family:Arial,Sans-serif;font-size:xx-small } .targetCaption{ font-size:x-small; color:#333333; font-family:Arial,Helvetica,sans-serif} .article{font-family :Arial,Helvetica,sans-serif; font-size:x-small} .tagline {color:#333333; font-size:xx-small} .dateStamp {color:#666666; font-family:Arial,Helvetica,sans-serif} h3{color:blue ;font-family:Arial,Helvetica,sans-serif; font-size:xx-small} .byline{color:blue;font-family:Arial,Helvetica,sans-serif; font-size:xx-small} h6{color:#333333; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small;font-style:italic; } .paperLocation{color:#666666; font-size:xx-small}''' remove_tags_before = dict(name='h1') remove_tags = [ dict(id=["articleTabs_tab_article", "articleTabs_tab_comments", "articleTabs_tab_interactive","articleTabs_tab_video","articleTabs_tab_map","articleTabs_tab_slideshow"]), {'class':['footer_columns','network','insetCol3wide','interactive','video','slideshow','map','insettip','insetClose','more_in', "insetContent", 'articleTools_bottom', 'aTools', "tooltip", "adSummary", "nav-inline"]}, dict(rel='shortcut icon'), ] remove_tags_after = [dict(id="article_story_body"), {'class':"article story"},] def get_browser(self): br = BasicNewsRecipe.get_browser() if self.username is not None and self.password is not None: br.open('http://commerce.wsj.com/auth/login') br.select_form(nr=0) br['user'] = self.username br['password'] = self.password res = br.submit() raw = res.read() if 'Welcome,' not in raw: raise ValueError('Failed to log in to wsj.com, check your ' 'username and password') return br def postprocess_html(self, soup, first): for tag in soup.findAll(name=['table', 'tr', 'td']): tag.name = 'div' for tag in soup.findAll('div', dict(id=["articleThumbnail_1", "articleThumbnail_2", "articleThumbnail_3", "articleThumbnail_4", "articleThumbnail_5", "articleThumbnail_6", "articleThumbnail_7"])): tag.extract() return soup def wsj_get_index(self): return self.index_to_soup('http://online.wsj.com/page/us_in_todays_paper.html') def parse_index(self): soup = self.wsj_get_index() date = soup.find('span', attrs={'class':'date-date'}) if date is not None: self.timefmt = ' [%s]'%self.tag_to_string(date) sections = {} sec_order = [] for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True): container = a.findParent(['li', 'div']) if container.name == 'div': section = 'Page One' else: section = '' sec = container.find('a', href=lambda x: x and '/search?' in x) if sec is not None: section = self.tag_to_string(sec).strip() if not section: h = container.find(['h1','h2','h3','h4','h5','h6']) section = self.tag_to_string(h) section = string.capitalize(section).replace('U.s.', 'U.S.') if section not in sections: sections[section] = [] sec_order.append(section) meta = a.find(attrs={'class':'meta_sectionName'}) if meta is not None: meta.extract() title = self.tag_to_string(a).strip() + ' [%s]'%self.tag_to_string(meta) url = 'http://online.wsj.com'+a['href'] desc = '' p = container.find('p') if p is not None: desc = self.tag_to_string(p) sections[section].append({'title':title, 'url':url, 'description':desc, 'date':''}) self.log('Found article:', title) a.extract() for a in container.findAll('a', href=lambda x: x and '/article/' in x): url = a['href'] if not url.startswith('http:'): url = 'http://online.wsj.com'+url title = self.tag_to_string(a).strip() if not title or title.startswith('['): continue if title: sections[section].append({'title':self.tag_to_string(a), 'url':url, 'description':'', 'date':''}) self.log('\tFound related:', title) feeds = [(sec, sections[sec]) for sec in sec_order] return feeds def cleanup(self): self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')