Update Economist

This commit is contained in:
Kovid Goyal 2024-04-29 07:31:18 +05:30
parent a067f1d519
commit 9e99606c64
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
2 changed files with 404 additions and 156 deletions

View File

@ -2,17 +2,22 @@
# License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net> # License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net>
import json import json
import time
from collections import defaultdict from collections import defaultdict
from datetime import datetime, timedelta
from urllib.parse import quote, urlencode
from calibre import replace_entities from calibre import replace_entities
from calibre.ebooks.BeautifulSoup import NavigableString, Tag from calibre.ebooks.BeautifulSoup import NavigableString, Tag
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import parse_only_date from calibre.utils.date import parse_only_date
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from html5_parser import parse from html5_parser import parse
from lxml import etree from lxml import etree
# For past editions, set date to, for example, '2020-11-28' # For past editions, set date to, for example, '2020-11-28'. Currently not functional.
edition_date = None edition_date = None
use_archive = True
def E(parent, name, text='', **attrs): def E(parent, name, text='', **attrs):
@ -52,34 +57,63 @@ class JSONHasNoContent(ValueError):
pass pass
def load_article_from_json(raw, root): if use_archive:
# open('/t/raw.json', 'w').write(raw) def load_article_from_json(raw, root):
try: # open('/t/raw.json', 'w').write(raw)
data = json.loads(raw)['props']['pageProps']['content'] data = json.loads(raw)
except KeyError as e: body = root.xpath('//body')[0]
raise JSONHasNoContent(e) article = E(body, 'article')
if isinstance(data, list): E(article, 'div', data['flyTitle'] , style='color: red; font-size:small; font-weight:bold;')
data = data[0] E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
body = root.xpath('//body')[0] E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
for child in tuple(body):
body.remove(child)
article = E(body, 'article')
E(article, 'div', replace_entities(data['subheadline']) , style='color: red; font-size:small; font-weight:bold;')
E(article, 'h1', replace_entities(data['headline']))
E(article, 'div', replace_entities(data['description']), style='font-style: italic; color:#202020;')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), style='color: gray; font-size:small;')
else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try: try:
E(div, 'img', src=main_image_url) date = data['dateModified']
except Exception: except Exception:
pass date = data['datePublished']
for node in data.get('text') or (): dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
process_node(node, article) dt = dt.strftime('%b %d, %Y, %I:%M %p')
if data['dateline'] is None:
E(article, 'p', dt, style='color: gray; font-size:small;')
else:
E(article, 'p', dt + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try:
E(div, 'img', src=main_image_url)
except Exception:
pass
for node in data.get('text') or ():
process_node(node, article)
else:
def load_article_from_json(raw, root):
# open('/t/raw.json', 'w').write(raw)
try:
data = json.loads(raw)['props']['pageProps']['content']
except KeyError as e:
raise JSONHasNoContent(e)
if isinstance(data, list):
data = data[0]
body = root.xpath('//body')[0]
for child in tuple(body):
body.remove(child)
article = E(body, 'article')
E(article, 'div', replace_entities(data['subheadline']) , style='color: red; font-size:small; font-weight:bold;')
E(article, 'h1', replace_entities(data['headline']))
E(article, 'div', replace_entities(data['description']), style='font-style: italic; color:#202020;')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), style='color: gray; font-size:small;')
else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try:
E(div, 'img', src=main_image_url)
except Exception:
pass
for node in data.get('text') or ():
process_node(node, article)
def cleanup_html_article(root): def cleanup_html_article(root):
@ -163,7 +197,7 @@ class Economist(BasicNewsRecipe):
remove_attributes = ['data-reactid', 'width', 'height'] remove_attributes = ['data-reactid', 'width', 'height']
# economist.com has started throttling after about 60% of the total has # economist.com has started throttling after about 60% of the total has
# downloaded with connection reset by peer (104) errors. # downloaded with connection reset by peer (104) errors.
delay = 1 delay = 0 if use_archive else 1
needs_subscription = False needs_subscription = False
@ -183,19 +217,141 @@ class Economist(BasicNewsRecipe):
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')] br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
return br return br
def publication_date(self):
if edition_date:
return parse_only_date(edition_date, as_utc=False)
url = self.browser.open("https://www.economist.com/printedition").geturl()
return parse_only_date(url.split("/")[-1], as_utc=False)
def economist_test_article(self):
return [('Articles', [{'title':'test',
'url':'https://www.economist.com/the-americas/2024/04/14/elon-musk-is-feuding-with-brazils-powerful-supreme-court'
}])]
def economist_return_index(self, ans):
if not ans:
raise NoArticles(
'Could not find any articles, either the '
'economist.com server is having trouble and you should '
'try later or the website format has changed and the '
'recipe needs to be updated.'
)
return ans
if use_archive:
def parse_index(self):
# return self.economist_test_article()
soup = self.index_to_soup('https://www.economist.com/weeklyedition/archive')
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is None:
raise ValueError('No script tag with JSON data found in the weeklyedition archive')
data = json.loads(script_tag.string)
content_id = data['props']['pageProps']['content']['id'].split('/')[-1]
query = {
'query': 'query LatestWeeklyAutoEditionQuery($ref:String!){canonical(ref:$ref){hasPart(from:0 size:1 sort:"datePublished:desc"){parts{...WeeklyEditionFragment __typename}__typename}__typename}}fragment WeeklyEditionFragment on Content{id type datePublished image{...ImageCoverFragment __typename}url{canonical __typename}hasPart(size:100 sort:"publication.context.position"){parts{...ArticleFragment __typename}__typename}__typename}fragment ArticleFragment on Content{ad{grapeshot{channels{name __typename}__typename}__typename}articleSection{internal{id title:headline __typename}__typename}audio{main{id duration(format:"seconds")source:channel{id __typename}url{canonical __typename}__typename}__typename}byline dateline dateModified datePublished dateRevised flyTitle:subheadline id image{...ImageInlineFragment ...ImageMainFragment ...ImagePromoFragment __typename}print{title:headline flyTitle:subheadline rubric:description section{id title:headline __typename}__typename}publication{id tegID title:headline flyTitle:subheadline datePublished regionsAllowed url{canonical __typename}__typename}rubric:description source:channel{id __typename}tegID text(format:"json")title:headline type url{canonical __typename}topic contentIdentity{forceAppWebview mediaType articleType __typename}__typename}fragment ImageInlineFragment on Media{inline{url{canonical __typename}width height __typename}__typename}fragment ImageMainFragment on Media{main{url{canonical __typename}width height __typename}__typename}fragment ImagePromoFragment on Media{promo{url{canonical __typename}id width height __typename}__typename}fragment ImageCoverFragment on Media{cover{headline width height url{canonical __typename}regionsAllowed __typename}__typename}', # noqa
'operationName': 'LatestWeeklyAutoEditionQuery',
'variables': '{{"ref":"/content/{}"}}'.format(content_id),
}
url = 'https://cp2-graphql-gateway.p.aws.economist.com/graphql?' + urlencode(query, safe='()!', quote_via=quote)
raw = self.index_to_soup(url, raw=True)
ans = self.economist_parse_index(raw)
return self.economist_return_index(ans)
def economist_parse_index(self, raw):
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
self.description = data['image']['cover'][0]['headline']
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y')
self.timefmt = ' [' + dt + ']'
self.cover_url = data['image']['cover'][0]['url']['canonical']
self.log('Got cover:', self.cover_url)
feeds_dict = defaultdict(list)
for part in safe_dict(data, "hasPart", "parts"):
try:
section = part['articleSection']['internal'][0]['title']
except Exception:
section = safe_dict(part, 'print', 'section', 'title') or 'section'
if section not in feeds_dict:
self.log(section)
title = safe_dict(part, "title")
desc = safe_dict(part, "rubric") or ''
sub = safe_dict(part, "flyTitle") or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
pt = PersistentTemporaryFile('.html')
pt.write(json.dumps(part).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
feeds_dict[section].append({"title": title, "url": url, "description": desc})
self.log('\t', title, '\n\t', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
def populate_article_metadata(self, article, soup, first):
article.url = soup.find('h1')['title']
else: # Load articles from individual article pages {{{
def parse_index(self):
# return self.economist_test_article()
if edition_date:
url = 'https://www.economist.com/weeklyedition/' + edition_date
self.timefmt = ' [' + edition_date + ']'
else:
url = 'https://www.economist.com/weeklyedition'
soup = self.index_to_soup(url)
ans = self.economist_parse_index(soup)
return self.economist_return_index(ans)
def economist_parse_index(self, soup):
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
self.description = safe_dict(data, "props", "pageProps", "content", "image", "main", "headline")
self.timefmt = ' [' + safe_dict(data, "props", "pageProps", "content", "datePublishedString") + ']'
self.cover_url = safe_dict(data, "props", "pageProps", "content", "image", "main", "url", "canonical")
self.log('Got cover:', self.cover_url)
feeds_dict = defaultdict(list)
for part in safe_dict(data, "props", "pageProps", "content", "hasPart", "parts"):
section = safe_dict(part, "print", "section", "headline") or ''
title = safe_dict(part, "headline") or ''
url = safe_dict(part, "url", "canonical") or ''
if not section or not title or not url:
continue
desc = safe_dict(part, "description") or ''
sub = safe_dict(part, "subheadline") or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
feeds_dict[section].append({"title": title, "url": url, "description": desc})
self.log(' ', title, url, '\n ', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
else:
return []
# }}}
def preprocess_raw_html(self, raw, url): def preprocess_raw_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8')) # open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root = parse(raw) if use_archive:
body = '<html><body><article></article></body></html>'
root = parse(body)
load_article_from_json(raw, root)
else:
root = parse(raw)
script = root.xpath('//script[@id="__NEXT_DATA__"]')
if script:
try:
load_article_from_json(script[0].text, root)
except JSONHasNoContent:
cleanup_html_article(root)
if '/interactive/' in url: if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \ return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \
+ 'This article is supposed to be read in a browser' \ + 'This article is supposed to be read in a browser' \
+ '</em></article></body></html>' + '</em></article></body></html>'
script = root.xpath('//script[@id="__NEXT_DATA__"]')
if script:
try:
load_article_from_json(script[0].text, root)
except JSONHasNoContent:
cleanup_html_article(root)
for div in root.xpath('//div[@class="lazy-image"]'): for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript')) noscript = list(div.iter('noscript'))
if noscript and noscript[0].text: if noscript and noscript[0].text:
@ -224,17 +380,8 @@ class Economist(BasicNewsRecipe):
x.set('style', 'color:#404040;') x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode') raw = etree.tostring(root, encoding='unicode')
return raw return raw
def parse_index_from_printedition(self):
def publication_date(self): # return self.economist_test_article()
if edition_date:
return parse_only_date(edition_date, as_utc=False)
url = self.browser.open("https://www.economist.com/printedition").geturl()
return parse_only_date(url.split("/")[-1], as_utc=False)
def parse_index(self):
# return [('Articles', [{'title':'test',
# 'url':'https://www.economist.com/the-americas/2024/04/14/elon-musk-is-feuding-with-brazils-powerful-supreme-court'
# }])]
if edition_date: if edition_date:
url = 'https://www.economist.com/weeklyedition/' + edition_date url = 'https://www.economist.com/weeklyedition/' + edition_date
self.timefmt = ' [' + edition_date + ']' self.timefmt = ' [' + edition_date + ']'
@ -261,38 +408,6 @@ class Economist(BasicNewsRecipe):
) )
return ans return ans
def economist_parse_index(self, soup):
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
self.description = safe_dict(data, "props", "pageProps", "content", "image", "main", "headline")
self.timefmt = ' [' + safe_dict(data, "props", "pageProps", "content", "datePublishedString") + ']'
self.cover_url = safe_dict(data, "props", "pageProps", "content", "image", "main", "url", "canonical")
self.log('Got cover:', self.cover_url)
feeds_dict = defaultdict(list)
for part in safe_dict(data, "props", "pageProps", "content", "hasPart", "parts"):
section = safe_dict(part, "print", "section", "headline") or ''
title = safe_dict(part, "headline") or ''
url = safe_dict(part, "url", "canonical") or ''
if not section or not title or not url:
continue
desc = safe_dict(part, "description") or ''
sub = safe_dict(part, "subheadline") or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
feeds_dict[section].append({"title": title, "url": url, "description": desc})
self.log(' ', title, url, '\n ', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
else:
return []
def print_version(self, url):
if '/the-americas/' in url or '/china/' in url:
return 'https://webcache.googleusercontent.com/search?q=cache:' + url
return url
def eco_find_image_tables(self, soup): def eco_find_image_tables(self, soup):
for x in soup.findAll('table', align=['right', 'center']): for x in soup.findAll('table', align=['right', 'center']):
if len(x.findAll('font')) in (1, 2) and len(x.findAll('img')) == 1: if len(x.findAll('font')) in (1, 2) and len(x.findAll('img')) == 1:
@ -320,3 +435,12 @@ class Economist(BasicNewsRecipe):
if url.endswith('/print'): if url.endswith('/print'):
url = url.rpartition('/')[0] url = url.rpartition('/')[0]
return BasicNewsRecipe.canonicalize_internal_url(self, url, is_link=is_link) return BasicNewsRecipe.canonicalize_internal_url(self, url, is_link=is_link)
def get_login_cookies(username, password):
print(33333333333, username, password)
if __name__ == '__main__':
import sys
get_login_cookies(sys.argv[-2], sys.argv[-1])

View File

@ -2,17 +2,22 @@
# License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net> # License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net>
import json import json
import time
from collections import defaultdict from collections import defaultdict
from datetime import datetime, timedelta
from urllib.parse import quote, urlencode
from calibre import replace_entities from calibre import replace_entities
from calibre.ebooks.BeautifulSoup import NavigableString, Tag from calibre.ebooks.BeautifulSoup import NavigableString, Tag
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import parse_only_date from calibre.utils.date import parse_only_date
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from html5_parser import parse from html5_parser import parse
from lxml import etree from lxml import etree
# For past editions, set date to, for example, '2020-11-28' # For past editions, set date to, for example, '2020-11-28'. Currently not functional.
edition_date = None edition_date = None
use_archive = True
def E(parent, name, text='', **attrs): def E(parent, name, text='', **attrs):
@ -52,34 +57,63 @@ class JSONHasNoContent(ValueError):
pass pass
def load_article_from_json(raw, root): if use_archive:
# open('/t/raw.json', 'w').write(raw) def load_article_from_json(raw, root):
try: # open('/t/raw.json', 'w').write(raw)
data = json.loads(raw)['props']['pageProps']['content'] data = json.loads(raw)
except KeyError as e: body = root.xpath('//body')[0]
raise JSONHasNoContent(e) article = E(body, 'article')
if isinstance(data, list): E(article, 'div', data['flyTitle'] , style='color: red; font-size:small; font-weight:bold;')
data = data[0] E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
body = root.xpath('//body')[0] E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
for child in tuple(body):
body.remove(child)
article = E(body, 'article')
E(article, 'div', replace_entities(data['subheadline']) , style='color: red; font-size:small; font-weight:bold;')
E(article, 'h1', replace_entities(data['headline']))
E(article, 'div', replace_entities(data['description']), style='font-style: italic; color:#202020;')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), style='color: gray; font-size:small;')
else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try: try:
E(div, 'img', src=main_image_url) date = data['dateModified']
except Exception: except Exception:
pass date = data['datePublished']
for node in data.get('text') or (): dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
process_node(node, article) dt = dt.strftime('%b %d, %Y, %I:%M %p')
if data['dateline'] is None:
E(article, 'p', dt, style='color: gray; font-size:small;')
else:
E(article, 'p', dt + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try:
E(div, 'img', src=main_image_url)
except Exception:
pass
for node in data.get('text') or ():
process_node(node, article)
else:
def load_article_from_json(raw, root):
# open('/t/raw.json', 'w').write(raw)
try:
data = json.loads(raw)['props']['pageProps']['content']
except KeyError as e:
raise JSONHasNoContent(e)
if isinstance(data, list):
data = data[0]
body = root.xpath('//body')[0]
for child in tuple(body):
body.remove(child)
article = E(body, 'article')
E(article, 'div', replace_entities(data['subheadline']) , style='color: red; font-size:small; font-weight:bold;')
E(article, 'h1', replace_entities(data['headline']))
E(article, 'div', replace_entities(data['description']), style='font-style: italic; color:#202020;')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), style='color: gray; font-size:small;')
else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try:
E(div, 'img', src=main_image_url)
except Exception:
pass
for node in data.get('text') or ():
process_node(node, article)
def cleanup_html_article(root): def cleanup_html_article(root):
@ -163,7 +197,7 @@ class Economist(BasicNewsRecipe):
remove_attributes = ['data-reactid', 'width', 'height'] remove_attributes = ['data-reactid', 'width', 'height']
# economist.com has started throttling after about 60% of the total has # economist.com has started throttling after about 60% of the total has
# downloaded with connection reset by peer (104) errors. # downloaded with connection reset by peer (104) errors.
delay = 1 delay = 0 if use_archive else 1
needs_subscription = False needs_subscription = False
@ -183,19 +217,141 @@ class Economist(BasicNewsRecipe):
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')] br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
return br return br
def publication_date(self):
if edition_date:
return parse_only_date(edition_date, as_utc=False)
url = self.browser.open("https://www.economist.com/printedition").geturl()
return parse_only_date(url.split("/")[-1], as_utc=False)
def economist_test_article(self):
return [('Articles', [{'title':'test',
'url':'https://www.economist.com/the-americas/2024/04/14/elon-musk-is-feuding-with-brazils-powerful-supreme-court'
}])]
def economist_return_index(self, ans):
if not ans:
raise NoArticles(
'Could not find any articles, either the '
'economist.com server is having trouble and you should '
'try later or the website format has changed and the '
'recipe needs to be updated.'
)
return ans
if use_archive:
def parse_index(self):
# return self.economist_test_article()
soup = self.index_to_soup('https://www.economist.com/weeklyedition/archive')
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is None:
raise ValueError('No script tag with JSON data found in the weeklyedition archive')
data = json.loads(script_tag.string)
content_id = data['props']['pageProps']['content']['id'].split('/')[-1]
query = {
'query': 'query LatestWeeklyAutoEditionQuery($ref:String!){canonical(ref:$ref){hasPart(from:0 size:1 sort:"datePublished:desc"){parts{...WeeklyEditionFragment __typename}__typename}__typename}}fragment WeeklyEditionFragment on Content{id type datePublished image{...ImageCoverFragment __typename}url{canonical __typename}hasPart(size:100 sort:"publication.context.position"){parts{...ArticleFragment __typename}__typename}__typename}fragment ArticleFragment on Content{ad{grapeshot{channels{name __typename}__typename}__typename}articleSection{internal{id title:headline __typename}__typename}audio{main{id duration(format:"seconds")source:channel{id __typename}url{canonical __typename}__typename}__typename}byline dateline dateModified datePublished dateRevised flyTitle:subheadline id image{...ImageInlineFragment ...ImageMainFragment ...ImagePromoFragment __typename}print{title:headline flyTitle:subheadline rubric:description section{id title:headline __typename}__typename}publication{id tegID title:headline flyTitle:subheadline datePublished regionsAllowed url{canonical __typename}__typename}rubric:description source:channel{id __typename}tegID text(format:"json")title:headline type url{canonical __typename}topic contentIdentity{forceAppWebview mediaType articleType __typename}__typename}fragment ImageInlineFragment on Media{inline{url{canonical __typename}width height __typename}__typename}fragment ImageMainFragment on Media{main{url{canonical __typename}width height __typename}__typename}fragment ImagePromoFragment on Media{promo{url{canonical __typename}id width height __typename}__typename}fragment ImageCoverFragment on Media{cover{headline width height url{canonical __typename}regionsAllowed __typename}__typename}', # noqa
'operationName': 'LatestWeeklyAutoEditionQuery',
'variables': '{{"ref":"/content/{}"}}'.format(content_id),
}
url = 'https://cp2-graphql-gateway.p.aws.economist.com/graphql?' + urlencode(query, safe='()!', quote_via=quote)
raw = self.index_to_soup(url, raw=True)
ans = self.economist_parse_index(raw)
return self.economist_return_index(ans)
def economist_parse_index(self, raw):
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
self.description = data['image']['cover'][0]['headline']
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y')
self.timefmt = ' [' + dt + ']'
self.cover_url = data['image']['cover'][0]['url']['canonical']
self.log('Got cover:', self.cover_url)
feeds_dict = defaultdict(list)
for part in safe_dict(data, "hasPart", "parts"):
try:
section = part['articleSection']['internal'][0]['title']
except Exception:
section = safe_dict(part, 'print', 'section', 'title') or 'section'
if section not in feeds_dict:
self.log(section)
title = safe_dict(part, "title")
desc = safe_dict(part, "rubric") or ''
sub = safe_dict(part, "flyTitle") or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
pt = PersistentTemporaryFile('.html')
pt.write(json.dumps(part).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
feeds_dict[section].append({"title": title, "url": url, "description": desc})
self.log('\t', title, '\n\t', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
def populate_article_metadata(self, article, soup, first):
article.url = soup.find('h1')['title']
else: # Load articles from individual article pages {{{
def parse_index(self):
# return self.economist_test_article()
if edition_date:
url = 'https://www.economist.com/weeklyedition/' + edition_date
self.timefmt = ' [' + edition_date + ']'
else:
url = 'https://www.economist.com/weeklyedition'
soup = self.index_to_soup(url)
ans = self.economist_parse_index(soup)
return self.economist_return_index(ans)
def economist_parse_index(self, soup):
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
self.description = safe_dict(data, "props", "pageProps", "content", "image", "main", "headline")
self.timefmt = ' [' + safe_dict(data, "props", "pageProps", "content", "datePublishedString") + ']'
self.cover_url = safe_dict(data, "props", "pageProps", "content", "image", "main", "url", "canonical")
self.log('Got cover:', self.cover_url)
feeds_dict = defaultdict(list)
for part in safe_dict(data, "props", "pageProps", "content", "hasPart", "parts"):
section = safe_dict(part, "print", "section", "headline") or ''
title = safe_dict(part, "headline") or ''
url = safe_dict(part, "url", "canonical") or ''
if not section or not title or not url:
continue
desc = safe_dict(part, "description") or ''
sub = safe_dict(part, "subheadline") or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
feeds_dict[section].append({"title": title, "url": url, "description": desc})
self.log(' ', title, url, '\n ', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
else:
return []
# }}}
def preprocess_raw_html(self, raw, url): def preprocess_raw_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8')) # open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root = parse(raw) if use_archive:
body = '<html><body><article></article></body></html>'
root = parse(body)
load_article_from_json(raw, root)
else:
root = parse(raw)
script = root.xpath('//script[@id="__NEXT_DATA__"]')
if script:
try:
load_article_from_json(script[0].text, root)
except JSONHasNoContent:
cleanup_html_article(root)
if '/interactive/' in url: if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \ return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \
+ 'This article is supposed to be read in a browser' \ + 'This article is supposed to be read in a browser' \
+ '</em></article></body></html>' + '</em></article></body></html>'
script = root.xpath('//script[@id="__NEXT_DATA__"]')
if script:
try:
load_article_from_json(script[0].text, root)
except JSONHasNoContent:
cleanup_html_article(root)
for div in root.xpath('//div[@class="lazy-image"]'): for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript')) noscript = list(div.iter('noscript'))
if noscript and noscript[0].text: if noscript and noscript[0].text:
@ -224,17 +380,8 @@ class Economist(BasicNewsRecipe):
x.set('style', 'color:#404040;') x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode') raw = etree.tostring(root, encoding='unicode')
return raw return raw
def parse_index_from_printedition(self):
def publication_date(self): # return self.economist_test_article()
if edition_date:
return parse_only_date(edition_date, as_utc=False)
url = self.browser.open("https://www.economist.com/printedition").geturl()
return parse_only_date(url.split("/")[-1], as_utc=False)
def parse_index(self):
# return [('Articles', [{'title':'test',
# 'url':'https://www.economist.com/the-americas/2024/04/14/elon-musk-is-feuding-with-brazils-powerful-supreme-court'
# }])]
if edition_date: if edition_date:
url = 'https://www.economist.com/weeklyedition/' + edition_date url = 'https://www.economist.com/weeklyedition/' + edition_date
self.timefmt = ' [' + edition_date + ']' self.timefmt = ' [' + edition_date + ']'
@ -261,38 +408,6 @@ class Economist(BasicNewsRecipe):
) )
return ans return ans
def economist_parse_index(self, soup):
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
self.description = safe_dict(data, "props", "pageProps", "content", "image", "main", "headline")
self.timefmt = ' [' + safe_dict(data, "props", "pageProps", "content", "datePublishedString") + ']'
self.cover_url = safe_dict(data, "props", "pageProps", "content", "image", "main", "url", "canonical")
self.log('Got cover:', self.cover_url)
feeds_dict = defaultdict(list)
for part in safe_dict(data, "props", "pageProps", "content", "hasPart", "parts"):
section = safe_dict(part, "print", "section", "headline") or ''
title = safe_dict(part, "headline") or ''
url = safe_dict(part, "url", "canonical") or ''
if not section or not title or not url:
continue
desc = safe_dict(part, "description") or ''
sub = safe_dict(part, "subheadline") or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
feeds_dict[section].append({"title": title, "url": url, "description": desc})
self.log(' ', title, url, '\n ', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
else:
return []
def print_version(self, url):
if '/the-americas/' in url or '/china/' in url:
return 'https://webcache.googleusercontent.com/search?q=cache:' + url
return url
def eco_find_image_tables(self, soup): def eco_find_image_tables(self, soup):
for x in soup.findAll('table', align=['right', 'center']): for x in soup.findAll('table', align=['right', 'center']):
if len(x.findAll('font')) in (1, 2) and len(x.findAll('img')) == 1: if len(x.findAll('font')) in (1, 2) and len(x.findAll('img')) == 1:
@ -320,3 +435,12 @@ class Economist(BasicNewsRecipe):
if url.endswith('/print'): if url.endswith('/print'):
url = url.rpartition('/')[0] url = url.rpartition('/')[0]
return BasicNewsRecipe.canonicalize_internal_url(self, url, is_link=is_link) return BasicNewsRecipe.canonicalize_internal_url(self, url, is_link=is_link)
def get_login_cookies(username, password):
print(33333333333, username, password)
if __name__ == '__main__':
import sys
get_login_cookies(sys.argv[-2], sys.argv[-1])