calibre/recipes/wsj.recipe
Kovid Goyal 4c66829e6a Update Wall Street Journal
No longer uses Qt WebKit
2016-04-25 12:19:19 +05:30

265 lines
11 KiB
Python

#!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import json
from mechanize import Request
from urllib import quote
import html5lib
from lxml import html
from calibre.web.feeds.news import BasicNewsRecipe
def CSSSelect(expr):
expr = {
'div.whatsNews-simple': '''descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' whatsNews-simple ')]''',
'a.mjLinkItem[href]': '''descendant-or-self::a[@class and contains(concat(' ', normalize-space(@class), ' '), ' mjLinkItem ') and (@href)]''',
'.meta_sectionName': '''descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' meta_sectionName ')]''',
'p':'descendant-or-self::p',
'div.whatsNews-simple.whatsNews-itp': '''descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' whatsNews-simple ') and (@class and contains(concat(' ', normalize-space(@class), ' '), ' whatsNews-itp '))]''',
'a[href]': 'descendant-or-self::a[@href]',
'span.date-date':"descendant-or-self::span[@class and contains(concat(' ', normalize-space(@class), ' '), ' date-date ')]",
'div.itpSectionHeaderPdf a[href]': "descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' itpSectionHeaderPdf ')]/descendant-or-self::*/a[@href]",
'div.itpHeader ul.tab a[href]': "descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' itpHeader ')]/descendant-or-self::*/ul[@class and contains(concat(' ', normalize-space(@class), ' '), ' tab ')]/descendant-or-self::*/a[@href]",
}[expr]
from lxml.etree import XPath
return XPath(expr)
def classes(classes):
q = frozenset(classes.split(' '))
return dict(attrs={
'class': lambda x: x and frozenset(x.split()).intersection(q)})
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'
class WSJ(BasicNewsRecipe):
title = 'The Wall Street Journal'
__author__ = 'Kovid Goyal'
description = 'News and current affairs'
language = 'en'
compress_news_images = True
compress_news_images_auto_size = 7
timefmt = ' [%a, %b %d, %Y]'
no_stylesheets = True
ignore_duplicate_articles = {'url'}
remove_attributes = ['style', 'data-scrim']
needs_subscription = True
WSJ_ITP = 'http://online.wsj.com/itp/today'
keep_only_tags = [
dict(classes('wsj-article-headline-wrap article_header')),
dict(name='span', itemprop='author', rel='author'),
dict(name='article', id='article-contents articleBody'.split()),
dict(name='div', id='article_story_body ncTitleArea snipper-ad-login'.split()),
dict(classes('nc-exp-artbody errorNotFound')),
dict(attrs={'data-module-zone': 'article_snippet'}),
]
remove_tags = [
classes('insetButton insettipBox author-info media-object-video article_tools nc-exp-artmeta category'),
dict(name='span', attrs={
'data-country-code': True, 'data-ticker-code': True}),
dict(name='meta link'.split()),
]
def preprocess_raw_html(self, raw_html, url):
root = html5lib.parse(raw_html, treebuilder='lxml', namespaceHTMLElements=False)
raw_html = html.tostring(root)
# open('/t/art.html', 'w').write(raw_html)
return raw_html
def preprocess_soup(self, soup):
# Slideshow and expandable images need to be processed here to
# set the src attribute correctly
found = 0
for img in soup.findAll('img', attrs={'data-in-base-data-lazy': True}):
img['src'] = img['data-in-base-data-lazy']
found += 1
for img in soup.findAll('img', attrs={'data-enlarge': True}):
img['src'] = img['data-enlarge']
found += 1
if found:
self.log.debug('Found %d dynamic images in:' % found)
return soup
def get_browser(self):
# To understand the signin logic read signin.js from
# https://id.wsj.com/access/pages/wsj/us/signin.html
br = BasicNewsRecipe.get_browser(self, user_agent=USER_AGENT)
# self.wsj_itp_page = open('/t/raw.html').read()
# return br
url = 'https://id.wsj.com/access/pages/wsj/us/signin.html?mg=com-wsj&mg=id-wsj'
# br.set_debug_http(True)
br.open(url).read()
rurl = 'https://id.wsj.com/auth/submitlogin.json'
rq = Request(rurl, headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-US,en;q=0.8',
'Content-Type': 'application/json',
'Referer': url,
'X-HTTP-Method-Override': 'POST',
'X-Requested-With': 'XMLHttpRequest',
}, data=json.dumps({
'username': self.username,
'password': self.password,
'realm': 'default',
'savelogin': 'true',
'template': 'default',
'url': quote(self.WSJ_ITP),
}))
r = br.open(rq)
if r.code != 200:
raise ValueError('Failed to login, check username and password')
data = json.loads(r.read())
# print(data)
if data.get('result') != 'success':
raise ValueError(
'Failed to login (XHR failed), check username and password')
br.set_cookie('m', data['username'], '.wsj.com')
r = br.open(data['url'])
self.wsj_itp_page = raw = r.read()
if b'>Sign Out<' not in raw:
raise ValueError(
'Failed to login (auth URL failed), check username and password')
# open('/t/raw.html', 'w').write(raw)
return br
def abs_wsj_url(self, href):
if not href.startswith('http'):
href = 'http://online.wsj.com' + href
return href
def wsj_find_articles(self, url, ahed=False):
root = self.index_to_soup(url, as_tree=True)
for x in CSSSelect('div.whatsNews-simple')(root):
x.getparent().remove(x)
articles = []
for a in CSSSelect('a.mjLinkItem[href]')(root):
container = a.xpath('ancestor::li')
meta = CSSSelect('.meta_sectionName')(a)
if meta:
meta = meta[0]
meta.getparent().remove(meta)
meta = self.tag_to_string(meta)
title = self.tag_to_string(a)
if meta:
title += ' [%s]' % meta
url = self.abs_wsj_url(a.get('href'))
desc = ''
if container:
for p in CSSSelect('p')(container[0]):
desc = self.tag_to_string(p)
if 'Subscriber Content' not in desc:
break
articles.append({'title': title, 'url': url,
'description': desc, 'date': ''})
self.log('\tFound article:', title)
self.log('\t\t', desc)
if ahed:
for h2 in root.xpath('//li[@class="ahed_listitem"]/h2'):
a = h2.xpath('descendant::a')[0]
title = self.tag_to_string(a)
url = self.abs_wsj_url(a.get('href'))
desc = ''
p = h2.xpath('following-sibling::p')
if p:
desc = self.tag_to_string(p[0])
articles.append({'title':title, 'url':url, 'description':desc, 'date':''})
self.log('Found article:', title)
self.log('\t\t', desc)
return articles
def wsj_find_wn_articles(self, url):
root = self.index_to_soup(url, as_tree=True)
articles = []
whats_news = CSSSelect('div.whatsNews-simple.whatsNews-itp')(root)
if whats_news:
for a in CSSSelect('a[href]')(whats_news[-1]):
if '/articles/' not in a.get('href', ''):
continue
container = a.xpath('ancestor::p')
for meta in CSSSelect('.meta_sectionName')(a):
meta.getparent().remove(meta)
title = self.tag_to_string(a).strip()
url = self.abs_wsj_url(a.get('href'))
desc = ''
if container:
desc = self.tag_to_string(container[0])
articles.append({'title': title, 'url': url,
'description': desc, 'date': ''})
self.log('\tFound WN article:', title)
self.log('\t\t', desc)
return articles
def wsj_add_feed(self, feeds, title, url):
self.log('Found section:', title)
try:
if url.endswith('whatsnews'):
articles = self.wsj_find_wn_articles(url)
else:
articles = self.wsj_find_articles(url, ahed=title == 'Front Section')
except:
articles = []
if articles:
feeds.append((title, articles))
def parse_index(self):
# return self.test_wsj_index()
root = self.index_to_soup(self.wsj_itp_page, as_tree=True)
for span in CSSSelect('span.date-date')(root):
if span.text:
self.timefmt = span.text
break
for a in CSSSelect('div.itpSectionHeaderPdf a[href]')(root):
self.cover_url = a.get('href')
break
feeds = []
for a in CSSSelect('div.itpHeader ul.tab a[href]')(root):
if '/itp/' not in a.get('href', ''):
continue
pageone = a.get('href').endswith('pageone')
if pageone:
title = 'Front Section'
url = self.abs_wsj_url(a.get('href'))
self.wsj_add_feed(feeds, title, url)
title = "What's News"
url = url.replace('pageone', 'whatsnews')
self.wsj_add_feed(feeds, title, url)
else:
title = self.tag_to_string(a)
url = self.abs_wsj_url(a.get('href'))
self.wsj_add_feed(feeds, title, url)
return feeds
def test_wsj_index(self):
return [
('Testing', [
{'title': 'Article One',
'url': 'http://online.wsj.com/articles/the-end-of-the-impulse-shopper-1416872108'}, # noqa
{'title': 'Article Two',
'url': 'http://online.wsj.com/articles/ferguson-police-officer-not-charged-in-black-teens-shooting-1416882438'}, # noqa
{'title': 'Article Three',
'url': 'http://online.wsj.com/article/SB10634695869867284248804580297251334393676.html'}, # noqa
]),
]