mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update WSJ
This commit is contained in:
parent
3108dfbfa4
commit
9707a95e88
@ -22,6 +22,7 @@ class barrons(BasicNewsRecipe):
|
|||||||
ignore_duplicate_articles = {'url'}
|
ignore_duplicate_articles = {'url'}
|
||||||
masthead_url = 'https://www.barrons.com/asset/barrons/images/barrons-logo.png'
|
masthead_url = 'https://www.barrons.com/asset/barrons/images/barrons-logo.png'
|
||||||
delay = 1
|
delay = 1
|
||||||
|
resolve_internal_links = True
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
img {display:block; margin:0 auto;}
|
img {display:block; margin:0 auto;}
|
||||||
|
@ -40,7 +40,7 @@ class TheHindu(BasicNewsRecipe):
|
|||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.caption {font-size:small; text-align:center;}
|
.caption {font-size:small; text-align:center;}
|
||||||
.author, .dateLine {font-size:small; font-weight:bold;}
|
.author, .dateLine {font-size:small;}
|
||||||
.subhead, .subhead_lead, .bold {font-weight:bold;}
|
.subhead, .subhead_lead, .bold {font-weight:bold;}
|
||||||
img {display:block; margin:0 auto;}
|
img {display:block; margin:0 auto;}
|
||||||
.italic, .sub-title {font-style:italic; color:#202020;}
|
.italic, .sub-title {font-style:italic; color:#202020;}
|
||||||
@ -117,7 +117,7 @@ class TheHindu(BasicNewsRecipe):
|
|||||||
ans2 = self.hindu_parse_index(soup)
|
ans2 = self.hindu_parse_index(soup)
|
||||||
if ans2:
|
if ans2:
|
||||||
return ans + ans2
|
return ans + ans2
|
||||||
self.log('\tMagazine not Found')
|
self.log('\nMagazine not Found')
|
||||||
return ans
|
return ans
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
@ -1,316 +1,143 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# vim:fileencoding=utf-8
|
|
||||||
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
|
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import random
|
import re
|
||||||
import time
|
import time
|
||||||
from base64 import standard_b64encode
|
from datetime import datetime, timedelta
|
||||||
from datetime import date, timedelta
|
from html5_parser import parse
|
||||||
|
|
||||||
from calibre.ptempfile import PersistentTemporaryFile
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
from css_selectors import Select
|
|
||||||
from mechanize import Request
|
|
||||||
|
|
||||||
try:
|
|
||||||
import urllib.parse as urlparse
|
|
||||||
except ImportError:
|
|
||||||
import urlparse
|
|
||||||
try:
|
|
||||||
from urllib.parse import quote
|
|
||||||
except ImportError:
|
|
||||||
from urllib import quote
|
|
||||||
|
|
||||||
|
|
||||||
needs_subscription = 'optional'
|
|
||||||
|
|
||||||
class WSJ(BasicNewsRecipe):
|
class WSJ(BasicNewsRecipe):
|
||||||
|
title = 'The Wall Street Journal'
|
||||||
if needs_subscription:
|
__author__ = 'unkn0wn'
|
||||||
title = 'The Wall Street Journal'
|
description = (
|
||||||
else:
|
'The Wall Street Journal is your source for breaking news, analysis and insights from the U.S. and '
|
||||||
title = 'The Wall Street Journal (free)'
|
'around the world, the world\'s leading business and finance publication.'
|
||||||
__author__ = 'Kovid Goyal'
|
)
|
||||||
description = 'News and current affairs'
|
language = 'en_US'
|
||||||
language = 'en'
|
|
||||||
masthead_url = 'https://s.wsj.net/media/wsj_amp_masthead_lg.png'
|
masthead_url = 'https://s.wsj.net/media/wsj_amp_masthead_lg.png'
|
||||||
|
encoding = 'utf-8'
|
||||||
compress_news_images = True
|
no_javascript = True
|
||||||
compress_news_images_auto_size = 7
|
|
||||||
|
|
||||||
timefmt = ' [%a, %b %d, %Y]'
|
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
ignore_duplicate_articles = {'url'}
|
remove_attributes = ['style', 'height', 'width']
|
||||||
remove_attributes = ['style','height','width']
|
|
||||||
needs_subscription = needs_subscription
|
|
||||||
WSJ_ITP = 'https://www.wsj.com/print-edition/today'
|
|
||||||
delay = 1
|
|
||||||
|
|
||||||
storage = []
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
#big-top-caption { font-size:small; text-align:center; }
|
#subhed, em { font-style:italic; color:#202020; }
|
||||||
[data-type:"tagline"] { font-style:italic; color:#202020; }
|
#byline, #time-to-read, #orig-pubdate-string, .article-byline, time, #flashline { font-size:small; }
|
||||||
.auth, time { font-size:small; }
|
.figc { font-size:small; text-align:center; }
|
||||||
.sub, em, i { color: #202020; }
|
img {display:block; margin:0 auto;}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def get_cover_url(self):
|
|
||||||
soup = self.index_to_soup('https://www.frontpages.com/the-wall-street-journal/')
|
|
||||||
return 'https://www.frontpages.com' + soup.find('img', attrs={'id':'giornale-img'})['src']
|
|
||||||
|
|
||||||
keep_only_tags = [
|
|
||||||
dict(name=['h1', 'h2']),
|
|
||||||
dict(attrs={'aria-describedby':'big-top-caption'}),
|
|
||||||
dict(attrs={'id':'big-top-caption'}),
|
|
||||||
dict(name='article', attrs={'style':lambda x: x and 'article-body' in x})
|
|
||||||
]
|
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(attrs={'data-type':['inset', 'video']}),
|
dict(name=['nav', 'svg', 'iframe', 'source']),
|
||||||
dict(attrs={'data-testid':'ad-container'}),
|
dict(name='panel', attrs={'id':'metadata'}),
|
||||||
dict(attrs={'data-spotim-app':'conversation'}),
|
dict(name='panel', attrs={'layout':'inline'}),
|
||||||
dict(name=['button', 'svg', 'old-script', 'video']),
|
dict(name='panel', attrs={'embed':'inner-article-ad'}),
|
||||||
dict(attrs={'aria-label':[
|
classes('lamrelated-articles-inset-panel'),
|
||||||
'Sponsored Offers', 'Listen To Article', 'What to Read Next', 'Utility Bar',
|
dict(name='p', attrs={'id':[
|
||||||
'Conversation', 'List of Comments', 'Comment', 'JR More Articles'
|
'keywords', 'orig-pubdate-number', 'type', 'is-custom-flashline', 'grouphed', 'author-ids',
|
||||||
|
'body-extract', 'category', 'sub-category', 'socialhed', 'summary', 'deckline', 'article-flashline'
|
||||||
]}),
|
]}),
|
||||||
dict(attrs={'data-spot-im-class':['message-text', 'messages-list', 'message-view', 'conversation-root']}),
|
dict(attrs={'data-inset_type':'dynamic'}),
|
||||||
dict(attrs={'id':lambda x: x and x.startswith(
|
dict(attrs={'data-block':'dynamic-inset'})
|
||||||
('comments_sector', 'wrapper-INLINE', 'audio-tag-inner-audio-', 'article-comments-tool')
|
|
||||||
)}),
|
|
||||||
dict(name='div', attrs={'data-message-depth':True})
|
|
||||||
]
|
]
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
remove_tags_before = [
|
||||||
def get_obfuscated_article(self, url):
|
dict(name='p', attrs={'id':'orig-pubdate-string'})
|
||||||
from calibre.scraper.simple import read_url
|
]
|
||||||
br = self.get_browser()
|
remove_tags_after = [
|
||||||
br.set_handle_redirect(False)
|
dict(name='article')
|
||||||
try:
|
]
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
hdrs_location = e.hdrs.get('location')
|
|
||||||
if hdrs_location:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
raw = read_url(self.storage, 'https://archive.is/latest/' + url)
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(raw.encode('utf-8'))
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for img in soup.findAll('img', attrs={'old-src':True}):
|
jpml = soup.find('jpml')
|
||||||
img['src'] = img['old-src']
|
if jpml:
|
||||||
for p in soup.findAll('div', attrs={'data-type':['paragraph', 'image']}):
|
jpml.name = 'article'
|
||||||
p.name = 'p'
|
h1 = soup.find('p', attrs={'id':'headline'})
|
||||||
for a in soup.findAll('a', href=True):
|
if h1:
|
||||||
a['href'] = 'http' + a['href'].split('http')[-1]
|
h1.name = 'h1'
|
||||||
for figc in soup.findAll('figcaption'):
|
|
||||||
figc['id'] = 'big-top-caption'
|
|
||||||
if name:= soup.find('h2', attrs={'itemprop':'name'}):
|
|
||||||
name.extract()
|
|
||||||
for h2 in soup.findAll('h2'):
|
for h2 in soup.findAll('h2'):
|
||||||
if self.tag_to_string(h2).startswith(('What to Read Next', 'Conversation')):
|
h2.name = 'h4'
|
||||||
h2.extract()
|
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
|
||||||
h2.name = 'h3'
|
read = soup.find('p', attrs={'id':'time-to-read'})
|
||||||
h2['class'] = 'sub'
|
byl = soup.find('p', attrs={'id':'byline'})
|
||||||
for ph in soup.findAll('a', attrs={'data-type':['phrase', 'link']}):
|
if dt and byl and read:
|
||||||
if div := ph.findParent('div'):
|
dt.name = read.name = byl.name = 'div'
|
||||||
div.name = 'span'
|
byl.insert(0, dt)
|
||||||
for auth in soup.findAll('a', attrs={'aria-label': lambda x: x and x.startswith('Author page')}):
|
byl.insert(0, read)
|
||||||
if div := auth.find_previous_sibling('div'):
|
url = soup.find('p', attrs={'id':'share-link'})
|
||||||
div.name = 'span'
|
if url:
|
||||||
if parent := auth.findParent('div'):
|
url['title'] = self.tag_to_string(url).strip()
|
||||||
parent['class'] = 'auth'
|
url.string = ''
|
||||||
for x in soup.findAll('ufc-follow-author-widget'):
|
for img in soup.findAll('img', attrs={'location':True}):
|
||||||
if y := x.findParent('div'):
|
img['src'] = img['location']
|
||||||
y.extract()
|
for figc in soup.findAll('figcaption'):
|
||||||
|
figc['class'] = 'figc'
|
||||||
|
col = soup.find('div', text = re.compile('What to Read Next'))
|
||||||
|
if col:
|
||||||
|
div = col.findParent('div')
|
||||||
|
if div:
|
||||||
|
div.extract()
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
# login {{{
|
def get_browser(self, *args, **kw):
|
||||||
|
kw['user_agent'] = 'okhttp/4.10.0'
|
||||||
def get_browser_for_wsj(self, *a, **kw):
|
br = BasicNewsRecipe.get_browser(self, *args, **kw)
|
||||||
br = BasicNewsRecipe.get_browser(self, *a, **kw)
|
br.addheaders += [
|
||||||
br.set_cookie('wsjregion', 'na,us', '.wsj.com')
|
('Accept-Encoding', 'gzip'),
|
||||||
br.set_cookie('gdprApplies', 'false', '.wsj.com')
|
('cache-control', 'no-cache'),
|
||||||
br.set_cookie('ccpaApplies', 'false', '.wsj.com')
|
('x-api-key', 'eb2408cd27f8913d421fa3d5c3d07ccf034cb448')
|
||||||
|
]
|
||||||
return br
|
return br
|
||||||
|
|
||||||
if False and needs_subscription: # disabled as we currently use archive.is
|
|
||||||
def get_browser(self, *a, **kw):
|
|
||||||
from pprint import pprint
|
|
||||||
pprint
|
|
||||||
# To understand the login logic read app-min.js from
|
|
||||||
# https://sso.accounts.dowjones.com/login
|
|
||||||
itp = quote(self.WSJ_ITP, safe='')
|
|
||||||
start_url = 'https://accounts.wsj.com/login?target=' + itp
|
|
||||||
self.log('Starting login process at', start_url)
|
|
||||||
br = self.get_browser_for_wsj(*a, **kw)
|
|
||||||
# br.set_debug_http(True)
|
|
||||||
res = br.open(start_url)
|
|
||||||
sso_url = res.geturl()
|
|
||||||
query = urlparse.parse_qs(urlparse.urlparse(sso_url).query)
|
|
||||||
query = {k:v[0] for k, v in query.items()}
|
|
||||||
# pprint(query)
|
|
||||||
request_query = {
|
|
||||||
'username': self.username,
|
|
||||||
'password': self.password,
|
|
||||||
'client_id': query['client'],
|
|
||||||
'tenant': 'sso',
|
|
||||||
'_intstate': 'deprecated',
|
|
||||||
'connection': 'DJldap',
|
|
||||||
'headers': {
|
|
||||||
'X-REMOTE-USER': self.username,
|
|
||||||
'x-_dj-_client__id': query['client'],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for cookie in br.cookiejar:
|
|
||||||
if cookie.name in ('_csrf', 'csrf'):
|
|
||||||
request_query['_csrf'] = cookie.value
|
|
||||||
for k in 'scope connection nonce state ui_locales ns mars protocol redirect_uri'.split():
|
|
||||||
if k in query:
|
|
||||||
request_query[k] = query[k]
|
|
||||||
# pprint(request_query)
|
|
||||||
login_url = 'https://sso.accounts.dowjones.com/usernamepassword/login'
|
|
||||||
# you can get the version below from lib-min.js
|
|
||||||
# search for: "\d+\.\d+\.\d+"
|
|
||||||
# This might need to be updated in the future
|
|
||||||
auth0_client = json.dumps({"name": "auth0.js-ulp", "version": "9.11.3"})
|
|
||||||
if not isinstance(auth0_client, bytes):
|
|
||||||
auth0_client = auth0_client.encode('utf-8')
|
|
||||||
auth0_client = standard_b64encode(auth0_client)
|
|
||||||
if isinstance(auth0_client, bytes):
|
|
||||||
auth0_client = auth0_client.decode('ascii')
|
|
||||||
rq = Request(login_url, headers={
|
|
||||||
'Accept': 'text/html',
|
|
||||||
'Accept-Language': 'en-US,en;q=0.8',
|
|
||||||
'Origin': 'https://sso.accounts.dowjones.com',
|
|
||||||
'Auth0-Client': auth0_client.rstrip('='),
|
|
||||||
'X-HTTP-Method-Override': 'POST',
|
|
||||||
'X-Requested-With': 'XMLHttpRequest',
|
|
||||||
'X-Remote-User': self.username,
|
|
||||||
'x-dj-client_id': request_query['client_id'],
|
|
||||||
}, data=request_query)
|
|
||||||
self.log('Sending login request...')
|
|
||||||
try:
|
|
||||||
res = br.open(rq)
|
|
||||||
except Exception as err:
|
|
||||||
if hasattr(err, 'read'):
|
|
||||||
raise Exception('Login request failed with error: {} and body: {}'.format(err, err.read().decode('utf-8', 'replace')))
|
|
||||||
raise
|
|
||||||
if res.code != 200:
|
|
||||||
raise ValueError('Failed to login, check your username and password')
|
|
||||||
br.select_form(nr=0)
|
|
||||||
self.log('Performing login callback...')
|
|
||||||
res = br.submit()
|
|
||||||
self.log('Print edition resolved url:', res.geturl())
|
|
||||||
self.wsj_itp_page = raw = res.read()
|
|
||||||
if b'/logout' not in raw:
|
|
||||||
raise ValueError(
|
|
||||||
'Failed to login (callback URL failed), check username and password')
|
|
||||||
return br
|
|
||||||
else:
|
|
||||||
def get_browser(self, *a, **kw):
|
|
||||||
br = self.get_browser_for_wsj(*a, **kw)
|
|
||||||
res = br.open(self.WSJ_ITP)
|
|
||||||
url = res.geturl()
|
|
||||||
if '/20210913/' in url:
|
|
||||||
today = date.today()
|
|
||||||
q = today.isoformat().replace('-', '')
|
|
||||||
try:
|
|
||||||
res = br.open(url.replace('/20210913/', '/' + q + '/'))
|
|
||||||
except Exception:
|
|
||||||
today -= timedelta(days=1)
|
|
||||||
q = today.isoformat().replace('-', '')
|
|
||||||
res = br.open(url.replace('/20210913/', '/' + q + '/'))
|
|
||||||
self.log('Print edition resolved url:', res.geturl())
|
|
||||||
self.wsj_itp_page = res.read()
|
|
||||||
return br
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
def abs_wsj_url(self, href, modify_query=True):
|
|
||||||
if not href.startswith('http'):
|
|
||||||
href = 'https://www.wsj.com' + href
|
|
||||||
if modify_query:
|
|
||||||
href = href
|
|
||||||
return href.split('?')[0]
|
|
||||||
|
|
||||||
def wsj_find_articles(self, url, ahed=False):
|
|
||||||
root = self.index_to_soup(url, as_tree=True)
|
|
||||||
CSSSelect = Select(root)
|
|
||||||
articles = []
|
|
||||||
for container in root.xpath('descendant::div[contains(@class, "WSJTheme--list-item--")]'):
|
|
||||||
heading = next(CSSSelect('h2, h3', container))
|
|
||||||
a = next(CSSSelect('a', heading))
|
|
||||||
title = self.tag_to_string(a)
|
|
||||||
url = self.abs_wsj_url(a.get('href'))
|
|
||||||
desc = ''
|
|
||||||
for p in container.xpath('descendant::p[contains(@class, "WSJTheme--description--")]'):
|
|
||||||
q = self.tag_to_string(p)
|
|
||||||
if 'Subscriber Content' in q:
|
|
||||||
continue
|
|
||||||
desc += q
|
|
||||||
break
|
|
||||||
|
|
||||||
articles.append({'title': title, 'url': url,
|
|
||||||
'description': desc, 'date': ''})
|
|
||||||
self.log('\tFound article:', title)
|
|
||||||
self.log('\t\t', desc + " " + url)
|
|
||||||
if self.test and len(articles) >= self.test[1]:
|
|
||||||
break
|
|
||||||
|
|
||||||
return articles
|
|
||||||
|
|
||||||
def wsj_add_feed(self, feeds, title, url):
|
|
||||||
try:
|
|
||||||
for i in range(5):
|
|
||||||
articles = self.wsj_find_articles(url)
|
|
||||||
if articles:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
pause = random.choice((1, 1.5, 2, 2.5))
|
|
||||||
self.log.warn('No articles found in', url, 'retrying after', pause, 'seconds')
|
|
||||||
time.sleep(pause)
|
|
||||||
except Exception:
|
|
||||||
self.log.exception('Failed to parse section:', title)
|
|
||||||
articles = []
|
|
||||||
if articles:
|
|
||||||
feeds.append((title, articles))
|
|
||||||
else:
|
|
||||||
self.log.warn('No articles found in', url)
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
# return self.test_wsj_index()
|
index = 'https://bartender.mobile.dowjones.io'
|
||||||
root = self.index_to_soup(self.wsj_itp_page, as_tree=True)
|
catalog = json.loads(self.index_to_soup(index + '/catalogs/v1/wsj/us/catalog.json', raw=True))
|
||||||
CSSSelect = Select(root)
|
for itm in catalog['items']:
|
||||||
# from calibre.utils.ipython import ipython
|
if itm['type'] == 'ITP':
|
||||||
# ipython({'root': root, 'CSSSelect': CSSSelect, 'raw': self.wsj_itp_page})
|
key = itm['key']
|
||||||
for inp in CSSSelect('.DayPickerInput > input'):
|
manifest = itm['manifest']
|
||||||
if inp.get('placeholder'):
|
dt = datetime.fromisoformat(itm['date'][:-1]) + timedelta(seconds=time.timezone)
|
||||||
self.timefmt = inp.get('placeholder')
|
dt = dt.strftime('%b %d, %Y')
|
||||||
|
self.log(dt)
|
||||||
|
self.timefmt = ' [' + dt + ']'
|
||||||
break
|
break
|
||||||
|
|
||||||
feeds = []
|
feeds = []
|
||||||
for container in root.xpath('descendant::*[contains(@class, "WSJTheme--top-menu-item--")]'):
|
|
||||||
for a in container.xpath('descendant::a[contains(@class, "WSJTheme--section-link--")]'):
|
manif = json.loads(self.index_to_soup(index + manifest, raw=True))
|
||||||
title = self.tag_to_string(a).capitalize().strip().replace('U.s.', 'U.S.')
|
for itm in manif['items']:
|
||||||
if not title:
|
for k, v in itm.items():
|
||||||
continue
|
if '-pages_' in k:
|
||||||
url = self.abs_wsj_url(a.get('href'), modify_query=False)
|
section = k.split('-pages_')[0].replace('_', ' ')
|
||||||
self.log('Found section:', title, 'at', url)
|
self.log(section)
|
||||||
self.wsj_add_feed(feeds, title, url)
|
|
||||||
if self.test and len(feeds) >= self.test[0]:
|
articles = []
|
||||||
break
|
|
||||||
|
sec_parse = json.loads(self.index_to_soup(index + v, raw=True))
|
||||||
|
data = sec_parse['articles']
|
||||||
|
for art in data:
|
||||||
|
title = data[art]['headline']
|
||||||
|
desc = data[art]['summary']
|
||||||
|
if 'articleWebViewLink' in data[art]:
|
||||||
|
url = data[art]['articleWebViewLink']
|
||||||
|
else:
|
||||||
|
url = index + '/contents/v1/wsj/us/' + key + '/' + data[art]['filename']
|
||||||
|
self.log(' ', title, '\n\t', desc)
|
||||||
|
articles.append({'title': title, 'description':desc, 'url': url})
|
||||||
|
feeds.append((section, articles))
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
def test_wsj_index(self):
|
def preprocess_raw_html(self, raw, url):
|
||||||
return [
|
if '/webview/' not in url:
|
||||||
('Testing', [
|
root = parse(raw)
|
||||||
{'title': 'Subscriber Article',
|
for x in root.xpath('//image'):
|
||||||
'url': self.abs_wsj_url('https://www.wsj.com/articles/remington-gun-call-of-duty-video-game-93059a66')},
|
x.tag = 'img'
|
||||||
]),
|
return BeautifulSoup(raw).prettify()
|
||||||
]
|
return raw
|
||||||
|
|
||||||
|
def populate_article_metadata(self, article, soup, first):
|
||||||
|
lnk = soup.find('p', attrs={'id':'share-link'})
|
||||||
|
if lnk:
|
||||||
|
article.url = lnk['title']
|
||||||
|
Loading…
x
Reference in New Issue
Block a user