Update The World Ahead

This commit is contained in:
unkn0w7n 2024-05-06 16:08:06 +05:30
parent 6d9d698ab2
commit 621d7d33c8
2 changed files with 230 additions and 120 deletions

View File

@ -2,13 +2,20 @@
# License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net>
import json
import time
from collections import defaultdict
from datetime import datetime, timedelta
from urllib.parse import quote, urlencode
from calibre import replace_entities
from calibre.ebooks.BeautifulSoup import NavigableString, Tag
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import parse_only_date
from calibre.web.feeds.news import BasicNewsRecipe
from html5_parser import parse
from lxml import etree
use_archive = True
def E(parent, name, text='', **attrs):
ans = parent.makeelement(name, **attrs)
@ -47,31 +54,63 @@ class JSONHasNoContent(ValueError):
pass
def load_article_from_json(raw, root):
# open('/t/raw.json', 'w').write(raw)
try:
data = json.loads(raw)['props']['pageProps']['content']
except KeyError as e:
raise JSONHasNoContent(e)
if isinstance(data, list):
data = data[0]
body = root.xpath('//body')[0]
for child in tuple(body):
body.remove(child)
article = E(body, 'article')
E(article, 'h4', data['subheadline'], style='color: red; margin: 0')
E(article, 'h1', data['headline'], style='font-size: x-large')
E(article, 'div', data['description'], style='font-style: italic; color: #202020;')
E(article, 'div', (data['datePublishedString'] or '') + ' | ' + (data['dateline'] or ''), style='color: gray; margin: 1em')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
if use_archive:
def load_article_from_json(raw, root):
# open('/t/raw.json', 'w').write(raw)
data = json.loads(raw)
body = root.xpath('//body')[0]
article = E(body, 'article')
E(article, 'div', data['flyTitle'] , style='color: red; font-size:small; font-weight:bold;')
E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
try:
E(div, 'img', src=main_image_url)
date = data['dateModified']
except Exception:
pass
for node in data.get('text') or ():
process_node(node, article)
date = data['datePublished']
dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y, %I:%M %p')
if data['dateline'] is None:
E(article, 'p', dt, style='color: gray; font-size:small;')
else:
E(article, 'p', dt + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try:
E(div, 'img', src=main_image_url)
except Exception:
pass
for node in data.get('text') or ():
process_node(node, article)
else:
def load_article_from_json(raw, root):
# open('/t/raw.json', 'w').write(raw)
try:
data = json.loads(raw)['props']['pageProps']['content']
except KeyError as e:
raise JSONHasNoContent(e)
if isinstance(data, list):
data = data[0]
body = root.xpath('//body')[0]
for child in tuple(body):
body.remove(child)
article = E(body, 'article')
E(article, 'div', replace_entities(data['subheadline']) , style='color: red; font-size:small; font-weight:bold;')
E(article, 'h1', replace_entities(data['headline']))
E(article, 'div', replace_entities(data['description']), style='font-style: italic; color:#202020;')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), style='color: gray; font-size:small;')
else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), style='color: gray; font-size:small;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
if main_image_url:
div = E(article, 'div')
try:
E(div, 'img', src=main_image_url)
except Exception:
pass
for node in data.get('text') or ():
process_node(node, article)
def cleanup_html_article(root):
@ -116,6 +155,7 @@ class Economist(BasicNewsRecipe):
title = 'The Economist World Ahead'
language = 'en'
encoding = 'utf-8'
masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png'
__author__ = "Kovid Goyal"
description = (
@ -124,31 +164,9 @@ class Economist(BasicNewsRecipe):
' Best downloaded in late November.'
)
extra_css = '''
.headline {font-size: x-large;}
h2 { font-size: small; }
h1 { font-size: medium; }
em.Bold {font-weight:bold;font-style:normal;}
em.Italic {font-style:italic;}
p.xhead {font-weight:bold;}
.pullquote {
float: right;
font-size: larger;
font-weight: bold;
font-style: italic;
page-break-inside:avoid;
border-bottom: 3px solid black;
border-top: 3px solid black;
width: 228px;
margin: 0px 0px 10px 15px;
padding: 7px 0px 9px;
}
.flytitle-and-title__flytitle {
display: block;
font-size: smaller;
color: red;
}
em { color:#202020; }
img {display:block; margin:0 auto;}
'''
'''
oldest_article = 7.0
resolve_internal_links = True
remove_tags = [
@ -181,16 +199,6 @@ class Economist(BasicNewsRecipe):
needs_subscription = False
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
if self.output_profile.short_name.startswith('kindle'):
# Reduce image sizes to get file size below amazon's email
# sending threshold
self.web2disk_options.compress_news_images = True
self.web2disk_options.compress_news_images_auto_size = 5
self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold')
def get_browser(self, *args, **kwargs):
# Needed to bypass cloudflare
kwargs['user_agent'] = 'common_words/based'
@ -198,19 +206,163 @@ class Economist(BasicNewsRecipe):
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
return br
def economist_test_article(self):
return [('Articles', [{'title':'test',
'url':'https://www.economist.com/the-americas/2024/04/14/elon-musk-is-feuding-with-brazils-powerful-supreme-court'
}])]
def economist_return_index(self, ans):
if not ans:
raise NoArticles(
'Could not find any articles, either the '
'economist.com server is having trouble and you should '
'try later or the website format has changed and the '
'recipe needs to be updated.'
)
return ans
if use_archive:
def parse_index(self):
# return self.economist_test_article()
soup = self.index_to_soup('https://www.economist.com/the-world-ahead')
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is None:
raise ValueError('No script tag with JSON data found in the weeklyedition archive')
data = json.loads(script_tag.string)
content_id = data['props']['pageProps']['content']['tegID'].split('/')[-1]
query = {
'query': 'query HubsDataQuery($id:String!$size:Int!){canonical(ref:$id){id headline description url{canonical __typename}image{ident{url{canonical __typename}width height __typename}__typename}text(mode:"hub" format:"json")hasPart(size:$size){parts{id title:headline isPartOf{context{title:headline __typename}__typename}hasPart{parts{...ArticleFragment isPartOf{id context{title:headline flyTitle:subheadline rubric:description dateline image{...ImageMainFragment ...ImagePromoFragment __typename}__typename}__typename}__typename}__typename}__typename}__typename}__typename}}fragment ArticleFragment on Content{ad{grapeshot{channels{name __typename}__typename}__typename}articleSection{internal{id title:headline __typename}__typename}audio{main{id duration(format:"seconds")source:channel{id __typename}url{canonical __typename}__typename}__typename}byline dateline dateModified datePublished dateRevised flyTitle:subheadline id image{...ImageInlineFragment ...ImageMainFragment ...ImagePromoFragment __typename}print{title:headline flyTitle:subheadline rubric:description section{id title:headline __typename}__typename}publication{id tegID title:headline flyTitle:subheadline datePublished regionsAllowed url{canonical __typename}__typename}rubric:description source:channel{id __typename}tegID text(format:"json")title:headline type url{canonical __typename}topic contentIdentity{forceAppWebview mediaType articleType __typename}__typename}fragment ImageInlineFragment on Media{inline{url{canonical __typename}width height __typename}__typename}fragment ImageMainFragment on Media{main{url{canonical __typename}width height __typename}__typename}fragment ImagePromoFragment on Media{promo{url{canonical __typename}id width height __typename}__typename}', # noqa
'operationName': 'HubsDataQuery',
'variables': '{{"id":"/content/{}","size":40}}'.format(content_id),
}
url = 'https://cp2-graphql-gateway.p.aws.economist.com/graphql?' + urlencode(query, safe='()!', quote_via=quote)
try:
raw = self.index_to_soup(url, raw=True)
except Exception:
raise ValueError('Server is not reachable, try again some other time.')
ans = self.economist_parse_index(raw)
return self.economist_return_index(ans)
def economist_parse_index(self, raw):
data = json.loads(raw)['data']['canonical']
self.description = data['description']
feeds_dict = defaultdict(list)
for part in safe_dict(data, "hasPart", "parts"):
section = part['title']
self.log(section)
for art in safe_dict(part, "hasPart", "parts"):
title = safe_dict(art, "title")
desc = safe_dict(art, "rubric") or ''
sub = safe_dict(art, "flyTitle") or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
pt = PersistentTemporaryFile('.html')
pt.write(json.dumps(art).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
feeds_dict[section].append({"title": title, "url": url, "description": desc})
self.log('\t', title, '\n\t\t', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
def populate_article_metadata(self, article, soup, first):
article.url = soup.find('h1')['title']
def preprocess_html(self, soup):
for img in soup.findAll('img', src=True):
img['src'] = img['src'].replace('economist.com/',
'economist.com/cdn-cgi/image/width=600,quality=80,format=auto/')
return soup
else: # Load articles from individual article pages {{{
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
if self.output_profile.short_name.startswith('kindle'):
# Reduce image sizes to get file size below amazon's email
# sending threshold
self.web2disk_options.compress_news_images = True
self.web2disk_options.compress_news_images_auto_size = 5
self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold')
def parse_index(self):
# return [('Articles', [{'title':'test',
# 'url':'https://www.economist.com/interactive/briefing/2022/06/11/huge-foundation-models-are-turbo-charging-ai-progress'
# }])]
url = 'https://www.economist.com/the-world-ahead'
# raw = open('/t/raw.html').read()
raw = self.index_to_soup(url, raw=True)
# with open('/t/raw.html', 'wb') as f:
# f.write(raw)
soup = self.index_to_soup(raw)
# nav = soup.find(attrs={'class':'navigation__wrapper'})
# if nav is not None:
# a = nav.find('a', href=lambda x: x and '/printedition/' in x)
# if a is not None:
# self.log('Following nav link to current edition', a['href'])
# soup = self.index_to_soup(process_url(a['href']))
ans = self.economist_parse_index(soup)
if not ans:
raise NoArticles(
'Could not find any articles, either the '
'economist.com server is having trouble and you should '
'try later or the website format has changed and the '
'recipe needs to be updated.'
)
return ans
def economist_parse_index(self, soup):
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
self.title = safe_dict(data, "props", "pageProps", "content", "headline")
# self.cover_url = 'https://mma.prnewswire.com/media/2275620/The_Economist_The_World_Ahead_2024.jpg?w=600'
feeds = []
for coll in safe_dict(data, "props", "pageProps", "content", "collections"):
section = safe_dict(coll, "headline") or ''
self.log(section)
articles = []
for part in safe_dict(coll, "hasPart", "parts"):
title = safe_dict(part, "headline") or ''
url = safe_dict(part, "url", "canonical") or ''
if not title or not url:
continue
desc = safe_dict(part, "description") or ''
sub = safe_dict(part, "subheadline") or ''
if sub:
desc = sub + ' :: ' + desc
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
articles.append({'title': title, 'description':desc, 'url': url})
if articles:
feeds.append((section, articles))
return feeds
# }}}
def preprocess_raw_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root = parse(raw)
if use_archive:
body = '<html><body><article></article></body></html>'
root = parse(body)
load_article_from_json(raw, root)
else:
root = parse(raw)
script = root.xpath('//script[@id="__NEXT_DATA__"]')
if script:
try:
load_article_from_json(script[0].text, root)
except JSONHasNoContent:
cleanup_html_article(root)
if '/interactive/' in url:
return '<html><body><article><h1 class="headline">' + root.xpath('//h1')[0].text + '</h1><em>' \
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \
+ 'This article is supposed to be read in a browser' \
+ '</em></article></body></html>'
script = root.xpath('//script[@id="__NEXT_DATA__"]')
if script:
try:
load_article_from_json(script[0].text, root)
except JSONHasNoContent:
cleanup_html_article(root)
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))
if noscript and noscript[0].text:
@ -223,11 +375,15 @@ class Economist(BasicNewsRecipe):
for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'):
x.getparent().remove(x)
# the economist uses <small> for small caps with a custom font
for init in root.xpath('//span[@data-caps="initial"]'):
init.set('style', 'font-weight:bold;')
for x in root.xpath('//small'):
if x.text and len(x) == 0:
x.text = x.text.upper()
x.tag = 'span'
x.set('style', 'font-variant: small-caps')
for h2 in root.xpath('//h2'):
h2.tag = 'h4'
for x in root.xpath('//figcaption'):
x.set('style', 'text-align:center; font-size:small;')
for x in root.xpath('//cite'):
@ -236,61 +392,6 @@ class Economist(BasicNewsRecipe):
raw = etree.tostring(root, encoding='unicode')
return raw
def parse_index(self):
# return [('Articles', [{'title':'test',
# 'url':'https://www.economist.com/interactive/briefing/2022/06/11/huge-foundation-models-are-turbo-charging-ai-progress'
# }])]
url = 'https://www.economist.com/the-world-ahead'
# raw = open('/t/raw.html').read()
raw = self.index_to_soup(url, raw=True)
# with open('/t/raw.html', 'wb') as f:
# f.write(raw)
soup = self.index_to_soup(raw)
# nav = soup.find(attrs={'class':'navigation__wrapper'})
# if nav is not None:
# a = nav.find('a', href=lambda x: x and '/printedition/' in x)
# if a is not None:
# self.log('Following nav link to current edition', a['href'])
# soup = self.index_to_soup(process_url(a['href']))
ans = self.economist_parse_index(soup)
if not ans:
raise NoArticles(
'Could not find any articles, either the '
'economist.com server is having trouble and you should '
'try later or the website format has changed and the '
'recipe needs to be updated.'
)
return ans
def economist_parse_index(self, soup):
script_tag = soup.find("script", id="__NEXT_DATA__")
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
self.title = safe_dict(data, "props", "pageProps", "content", "headline")
# self.cover_url = 'https://mma.prnewswire.com/media/2275620/The_Economist_The_World_Ahead_2024.jpg?w=600'
feeds = []
for coll in safe_dict(data, "props", "pageProps", "content", "collections"):
section = safe_dict(coll, "headline") or ''
self.log(section)
articles = []
for part in safe_dict(coll, "hasPart", "parts"):
title = safe_dict(part, "headline") or ''
url = safe_dict(part, "url", "canonical") or ''
if not title or not url:
continue
desc = safe_dict(part, "description") or ''
sub = safe_dict(part, "subheadline") or ''
if sub:
desc = sub + ' :: ' + desc
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
articles.append({'title': title, 'description':desc, 'url': url})
if articles:
feeds.append((section, articles))
return feeds
def eco_find_image_tables(self, soup):
for x in soup.findAll('table', align=['right', 'center']):
if len(x.findAll('font')) in (1, 2) and len(x.findAll('img')) == 1:
@ -318,3 +419,12 @@ class Economist(BasicNewsRecipe):
if url.endswith('/print'):
url = url.rpartition('/')[0]
return BasicNewsRecipe.canonicalize_internal_url(self, url, is_link=is_link)
def get_login_cookies(username, password):
print(33333333333, username, password)
if __name__ == '__main__':
import sys
get_login_cookies(sys.argv[-2], sys.argv[-1])

Binary file not shown.

Before

Width:  |  Height:  |  Size: 762 B

After

Width:  |  Height:  |  Size: 2.6 KiB