mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
211 lines
7.8 KiB
Python
211 lines
7.8 KiB
Python
#!/usr/bin/env python
|
|
__license__ = 'GPL v3'
|
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
|
__docformat__ = 'restructuredtext en'
|
|
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
import copy, re
|
|
|
|
# http://online.wsj.com/page/us_in_todays_paper.html
|
|
|
|
class WallStreetJournal(BasicNewsRecipe):
|
|
|
|
title = 'The Wall Street Journal'
|
|
__author__ = 'Kovid Goyal and Joshua Oster-Morris'
|
|
description = 'News and current affairs'
|
|
needs_subscription = True
|
|
language = 'en'
|
|
|
|
compress_news_images = True
|
|
compress_news_images_auto_size = 5
|
|
max_articles_per_feed = 1000
|
|
timefmt = ' [%a, %b %d, %Y]'
|
|
no_stylesheets = True
|
|
ignore_duplicate_articles = {'url'}
|
|
remove_attributes = ['style', 'data-scrim']
|
|
|
|
keep_only_tags = [
|
|
dict(name='h1'), dict(name='h2', attrs={'class':['subhead', 'subHed deck']}),
|
|
dict(name='span', itemprop='author', rel='author'),
|
|
dict(name='article', id=['article-contents', 'articleBody']),
|
|
dict(name='div', id='article_story_body'),
|
|
dict(name='div', attrs={'class':'snippet-ad-login'}),
|
|
]
|
|
remove_tags = [
|
|
dict(attrs={'class':['insetButton', 'insettipBox', 'author-info', 'media-object-video']}),
|
|
dict(attrs={'class':lambda x: x and 'article_tools' in x.split()}),
|
|
dict(name='span', attrs={'data-country-code':True, 'data-ticker-code':True}),
|
|
]
|
|
preprocess_regexps = [
|
|
(re.compile(r'<!--\[if lte IE 8\]>.+?<!\[endif\]-->', re.DOTALL), lambda m: ''),
|
|
(re.compile(r'<!\[if ! lte IE 8\]>.+?<!\[endif\]>', re.DOTALL), lambda m:''),
|
|
]
|
|
|
|
use_javascript_to_login = True
|
|
|
|
def javascript_login(self, br, username, password):
|
|
br.visit('https://id.wsj.com/access/pages/wsj/us/login_standalone.html?mg=com-wsj', timeout=120)
|
|
f = br.select_form(nr=0)
|
|
f['username'] = username
|
|
f['password'] = password
|
|
br.submit(timeout=120)
|
|
|
|
def populate_article_metadata(self, article, soup, first):
|
|
if first and hasattr(self, 'add_toc_thumbnail'):
|
|
picdiv = soup.find('img', src=True)
|
|
if picdiv is not None:
|
|
self.add_toc_thumbnail(article,picdiv['src'])
|
|
|
|
def preprocess_html(self, soup):
|
|
# Remove thumbnail for zoomable images
|
|
for div in soup.findAll('div', attrs={'class':lambda x: x and 'insetZoomTargetBox' in x.split()}):
|
|
img = div.find('img')
|
|
if img is not None:
|
|
img.extract()
|
|
# Use large images
|
|
for img in soup.findAll('img', attrs={'data-enlarge':True}):
|
|
img['src'] = img['data-enlarge']
|
|
|
|
return soup
|
|
|
|
def wsj_get_index(self):
|
|
return self.index_to_soup('http://online.wsj.com/itp')
|
|
|
|
def wsj_add_feed(self,feeds,title,url):
|
|
self.log('Found section:', title)
|
|
try:
|
|
if url.endswith('whatsnews'):
|
|
articles = self.wsj_find_wn_articles(url)
|
|
else:
|
|
articles = self.wsj_find_articles(url)
|
|
except:
|
|
articles = []
|
|
if articles:
|
|
feeds.append((title, articles))
|
|
return feeds
|
|
|
|
def abs_wsj_url(self, href):
|
|
if not href.startswith('http'):
|
|
href = 'http://online.wsj.com' + href
|
|
return href
|
|
|
|
def parse_index(self):
|
|
soup = self.wsj_get_index()
|
|
|
|
date = soup.find('span', attrs={'class':'date-date'})
|
|
if date is not None:
|
|
self.timefmt = ' [%s]'%self.tag_to_string(date)
|
|
|
|
cov = soup.find('div', attrs={'class':lambda x: x and 'itpSectionHeaderPdf' in x.split()})
|
|
if cov is not None:
|
|
a = cov.find('a', href=True)
|
|
if a is not None:
|
|
self.cover_url = a['href']
|
|
|
|
feeds = []
|
|
div = soup.find('div', attrs={'class':'itpHeader'})
|
|
div = div.find('ul', attrs={'class':'tab'})
|
|
for a in div.findAll('a', href=lambda x: x and '/itp/' in x):
|
|
pageone = a['href'].endswith('pageone')
|
|
if pageone:
|
|
title = 'Front Section'
|
|
url = self.abs_wsj_url(a['href'])
|
|
feeds = self.wsj_add_feed(feeds,title,url)
|
|
title = "What's News"
|
|
url = url.replace('pageone','whatsnews')
|
|
feeds = self.wsj_add_feed(feeds,title,url)
|
|
else:
|
|
title = self.tag_to_string(a)
|
|
url = self.abs_wsj_url(a['href'])
|
|
feeds = self.wsj_add_feed(feeds,title,url)
|
|
|
|
for li in soup.findAll('li', attrs={'class':'ahed_listitem'}):
|
|
h2 = li.find('h2')
|
|
if h2 is None:
|
|
continue
|
|
a = h2.find('a', href=True)
|
|
if a is None:
|
|
continue
|
|
url = a['href']
|
|
title = self.tag_to_string(a)
|
|
p = h2.findNextSibling('p')
|
|
if p is not None:
|
|
desc = self.tag_to_string(p)
|
|
else:
|
|
desc = ''
|
|
if feeds:
|
|
feeds[0][1].append({'title':title, 'url':url, 'description':desc, 'date':''})
|
|
return feeds
|
|
|
|
def wsj_find_wn_articles(self, url):
|
|
soup = self.index_to_soup(url)
|
|
articles = []
|
|
|
|
whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
|
|
if whats_news is not None:
|
|
for a in whats_news.findAll('a', href=lambda x: x and '/article/' in x):
|
|
container = a.findParent(['p'])
|
|
meta = a.find(attrs={'class':'meta_sectionName'})
|
|
if meta is not None:
|
|
meta.extract()
|
|
title = self.tag_to_string(a).strip()
|
|
url = a['href']
|
|
desc = ''
|
|
if container is not None:
|
|
desc = self.tag_to_string(container)
|
|
|
|
articles.append({'title':title, 'url':url,
|
|
'description':desc, 'date':''})
|
|
|
|
self.log('\tFound WN article:', title)
|
|
self.log('\t\t', desc)
|
|
|
|
return articles
|
|
|
|
def wsj_find_articles(self, url):
|
|
soup = self.index_to_soup(url)
|
|
|
|
whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
|
|
if whats_news is not None:
|
|
whats_news.extract()
|
|
|
|
articles = []
|
|
|
|
flavorarea = soup.find('div', attrs={'class':lambda x: x and 'ahed' in x})
|
|
if flavorarea is not None:
|
|
flavorstory = flavorarea.find('a', href=lambda x: x and x.startswith('/article'))
|
|
if flavorstory is not None:
|
|
flavorstory['class'] = 'mjLinkItem'
|
|
metapage = soup.find('span', attrs={'class':lambda x: x and 'meta_sectionName' in x})
|
|
if metapage is not None:
|
|
flavorstory.append(copy.copy(metapage)) # metapage should always be A1 because that should be first on the page
|
|
|
|
for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True):
|
|
container = a.findParent(['li', 'div'])
|
|
meta = a.find(attrs={'class':'meta_sectionName'})
|
|
if meta is not None:
|
|
meta.extract()
|
|
meta = self.tag_to_string(meta).strip()
|
|
if meta:
|
|
title = self.tag_to_string(a).strip() + ' [%s]'%meta
|
|
else:
|
|
title = self.tag_to_string(a).strip()
|
|
url = self.abs_wsj_url(a['href'])
|
|
desc = ''
|
|
for p in container.findAll('p'):
|
|
desc = self.tag_to_string(p)
|
|
if 'Subscriber Content' not in desc:
|
|
break
|
|
|
|
articles.append({'title':title, 'url':url,
|
|
'description':desc, 'date':''})
|
|
|
|
self.log('\tFound article:', title)
|
|
self.log('\t\t', desc)
|
|
|
|
return articles
|
|
|
|
def cleanup(self):
|
|
self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')
|
|
|