From 621d7d33c8c13276e2f7ff91cd944b06c9ac2498 Mon Sep 17 00:00:00 2001 From: unkn0w7n <51942695+unkn0w7n@users.noreply.github.com> Date: Mon, 6 May 2024 16:08:06 +0530 Subject: [PATCH] Update The World Ahead --- recipes/economist_world_ahead.recipe | 350 ++++++++++++++++-------- recipes/icons/economist_world_ahead.png | Bin 762 -> 2683 bytes 2 files changed, 230 insertions(+), 120 deletions(-) diff --git a/recipes/economist_world_ahead.recipe b/recipes/economist_world_ahead.recipe index 98f3e80d17..6e70499cd5 100644 --- a/recipes/economist_world_ahead.recipe +++ b/recipes/economist_world_ahead.recipe @@ -2,13 +2,20 @@ # License: GPLv3 Copyright: 2008, Kovid Goyal import json +import time +from collections import defaultdict +from datetime import datetime, timedelta +from urllib.parse import quote, urlencode from calibre import replace_entities from calibre.ebooks.BeautifulSoup import NavigableString, Tag +from calibre.ptempfile import PersistentTemporaryFile +from calibre.utils.date import parse_only_date from calibre.web.feeds.news import BasicNewsRecipe from html5_parser import parse from lxml import etree +use_archive = True def E(parent, name, text='', **attrs): ans = parent.makeelement(name, **attrs) @@ -47,31 +54,63 @@ class JSONHasNoContent(ValueError): pass -def load_article_from_json(raw, root): - # open('/t/raw.json', 'w').write(raw) - try: - data = json.loads(raw)['props']['pageProps']['content'] - except KeyError as e: - raise JSONHasNoContent(e) - if isinstance(data, list): - data = data[0] - body = root.xpath('//body')[0] - for child in tuple(body): - body.remove(child) - article = E(body, 'article') - E(article, 'h4', data['subheadline'], style='color: red; margin: 0') - E(article, 'h1', data['headline'], style='font-size: x-large') - E(article, 'div', data['description'], style='font-style: italic; color: #202020;') - E(article, 'div', (data['datePublishedString'] or '') + ' | ' + (data['dateline'] or ''), style='color: gray; margin: 1em') - main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical') - if main_image_url: - div = E(article, 'div') +if use_archive: + def load_article_from_json(raw, root): + # open('/t/raw.json', 'w').write(raw) + data = json.loads(raw) + body = root.xpath('//body')[0] + article = E(body, 'article') + E(article, 'div', data['flyTitle'] , style='color: red; font-size:small; font-weight:bold;') + E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '') + E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;') try: - E(div, 'img', src=main_image_url) + date = data['dateModified'] except Exception: - pass - for node in data.get('text') or (): - process_node(node, article) + date = data['datePublished'] + dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone) + dt = dt.strftime('%b %d, %Y, %I:%M %p') + if data['dateline'] is None: + E(article, 'p', dt, style='color: gray; font-size:small;') + else: + E(article, 'p', dt + ' | ' + (data['dateline']), style='color: gray; font-size:small;') + main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical') + if main_image_url: + div = E(article, 'div') + try: + E(div, 'img', src=main_image_url) + except Exception: + pass + for node in data.get('text') or (): + process_node(node, article) +else: + def load_article_from_json(raw, root): + # open('/t/raw.json', 'w').write(raw) + try: + data = json.loads(raw)['props']['pageProps']['content'] + except KeyError as e: + raise JSONHasNoContent(e) + if isinstance(data, list): + data = data[0] + body = root.xpath('//body')[0] + for child in tuple(body): + body.remove(child) + article = E(body, 'article') + E(article, 'div', replace_entities(data['subheadline']) , style='color: red; font-size:small; font-weight:bold;') + E(article, 'h1', replace_entities(data['headline'])) + E(article, 'div', replace_entities(data['description']), style='font-style: italic; color:#202020;') + if data['dateline'] is None: + E(article, 'p', (data['datePublishedString'] or ''), style='color: gray; font-size:small;') + else: + E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), style='color: gray; font-size:small;') + main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical') + if main_image_url: + div = E(article, 'div') + try: + E(div, 'img', src=main_image_url) + except Exception: + pass + for node in data.get('text') or (): + process_node(node, article) def cleanup_html_article(root): @@ -116,6 +155,7 @@ class Economist(BasicNewsRecipe): title = 'The Economist World Ahead' language = 'en' encoding = 'utf-8' + masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png' __author__ = "Kovid Goyal" description = ( @@ -124,31 +164,9 @@ class Economist(BasicNewsRecipe): ' Best downloaded in late November.' ) extra_css = ''' - .headline {font-size: x-large;} - h2 { font-size: small; } - h1 { font-size: medium; } - em.Bold {font-weight:bold;font-style:normal;} - em.Italic {font-style:italic;} - p.xhead {font-weight:bold;} - .pullquote { - float: right; - font-size: larger; - font-weight: bold; - font-style: italic; - page-break-inside:avoid; - border-bottom: 3px solid black; - border-top: 3px solid black; - width: 228px; - margin: 0px 0px 10px 15px; - padding: 7px 0px 9px; - } - .flytitle-and-title__flytitle { - display: block; - font-size: smaller; - color: red; - } + em { color:#202020; } img {display:block; margin:0 auto;} - ''' + ''' oldest_article = 7.0 resolve_internal_links = True remove_tags = [ @@ -181,16 +199,6 @@ class Economist(BasicNewsRecipe): needs_subscription = False - def __init__(self, *args, **kwargs): - BasicNewsRecipe.__init__(self, *args, **kwargs) - if self.output_profile.short_name.startswith('kindle'): - # Reduce image sizes to get file size below amazon's email - # sending threshold - self.web2disk_options.compress_news_images = True - self.web2disk_options.compress_news_images_auto_size = 5 - self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold') - - def get_browser(self, *args, **kwargs): # Needed to bypass cloudflare kwargs['user_agent'] = 'common_words/based' @@ -198,19 +206,163 @@ class Economist(BasicNewsRecipe): br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')] return br + def economist_test_article(self): + return [('Articles', [{'title':'test', + 'url':'https://www.economist.com/the-americas/2024/04/14/elon-musk-is-feuding-with-brazils-powerful-supreme-court' + }])] + + def economist_return_index(self, ans): + if not ans: + raise NoArticles( + 'Could not find any articles, either the ' + 'economist.com server is having trouble and you should ' + 'try later or the website format has changed and the ' + 'recipe needs to be updated.' + ) + return ans + + if use_archive: + def parse_index(self): + # return self.economist_test_article() + soup = self.index_to_soup('https://www.economist.com/the-world-ahead') + script_tag = soup.find("script", id="__NEXT_DATA__") + if script_tag is None: + raise ValueError('No script tag with JSON data found in the weeklyedition archive') + data = json.loads(script_tag.string) + content_id = data['props']['pageProps']['content']['tegID'].split('/')[-1] + query = { + 'query': 'query HubsDataQuery($id:String!$size:Int!){canonical(ref:$id){id headline description url{canonical __typename}image{ident{url{canonical __typename}width height __typename}__typename}text(mode:"hub" format:"json")hasPart(size:$size){parts{id title:headline isPartOf{context{title:headline __typename}__typename}hasPart{parts{...ArticleFragment isPartOf{id context{title:headline flyTitle:subheadline rubric:description dateline image{...ImageMainFragment ...ImagePromoFragment __typename}__typename}__typename}__typename}__typename}__typename}__typename}__typename}}fragment ArticleFragment on Content{ad{grapeshot{channels{name __typename}__typename}__typename}articleSection{internal{id title:headline __typename}__typename}audio{main{id duration(format:"seconds")source:channel{id __typename}url{canonical __typename}__typename}__typename}byline dateline dateModified datePublished dateRevised flyTitle:subheadline id image{...ImageInlineFragment ...ImageMainFragment ...ImagePromoFragment __typename}print{title:headline flyTitle:subheadline rubric:description section{id title:headline __typename}__typename}publication{id tegID title:headline flyTitle:subheadline datePublished regionsAllowed url{canonical __typename}__typename}rubric:description source:channel{id __typename}tegID text(format:"json")title:headline type url{canonical __typename}topic contentIdentity{forceAppWebview mediaType articleType __typename}__typename}fragment ImageInlineFragment on Media{inline{url{canonical __typename}width height __typename}__typename}fragment ImageMainFragment on Media{main{url{canonical __typename}width height __typename}__typename}fragment ImagePromoFragment on Media{promo{url{canonical __typename}id width height __typename}__typename}', # noqa + 'operationName': 'HubsDataQuery', + 'variables': '{{"id":"/content/{}","size":40}}'.format(content_id), + } + url = 'https://cp2-graphql-gateway.p.aws.economist.com/graphql?' + urlencode(query, safe='()!', quote_via=quote) + try: + raw = self.index_to_soup(url, raw=True) + except Exception: + raise ValueError('Server is not reachable, try again some other time.') + ans = self.economist_parse_index(raw) + return self.economist_return_index(ans) + + def economist_parse_index(self, raw): + data = json.loads(raw)['data']['canonical'] + self.description = data['description'] + + feeds_dict = defaultdict(list) + for part in safe_dict(data, "hasPart", "parts"): + section = part['title'] + self.log(section) + for art in safe_dict(part, "hasPart", "parts"): + title = safe_dict(art, "title") + desc = safe_dict(art, "rubric") or '' + sub = safe_dict(art, "flyTitle") or '' + if sub and section != sub: + desc = sub + ' :: ' + desc + pt = PersistentTemporaryFile('.html') + pt.write(json.dumps(art).encode('utf-8')) + pt.close() + url = 'file:///' + pt.name + feeds_dict[section].append({"title": title, "url": url, "description": desc}) + self.log('\t', title, '\n\t\t', desc) + return [(section, articles) for section, articles in feeds_dict.items()] + + def populate_article_metadata(self, article, soup, first): + article.url = soup.find('h1')['title'] + + def preprocess_html(self, soup): + for img in soup.findAll('img', src=True): + img['src'] = img['src'].replace('economist.com/', + 'economist.com/cdn-cgi/image/width=600,quality=80,format=auto/') + return soup + + else: # Load articles from individual article pages {{{ + + def __init__(self, *args, **kwargs): + BasicNewsRecipe.__init__(self, *args, **kwargs) + if self.output_profile.short_name.startswith('kindle'): + # Reduce image sizes to get file size below amazon's email + # sending threshold + self.web2disk_options.compress_news_images = True + self.web2disk_options.compress_news_images_auto_size = 5 + self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold') + + def parse_index(self): + # return [('Articles', [{'title':'test', + # 'url':'https://www.economist.com/interactive/briefing/2022/06/11/huge-foundation-models-are-turbo-charging-ai-progress' + # }])] + url = 'https://www.economist.com/the-world-ahead' + # raw = open('/t/raw.html').read() + raw = self.index_to_soup(url, raw=True) + # with open('/t/raw.html', 'wb') as f: + # f.write(raw) + soup = self.index_to_soup(raw) + # nav = soup.find(attrs={'class':'navigation__wrapper'}) + # if nav is not None: + # a = nav.find('a', href=lambda x: x and '/printedition/' in x) + # if a is not None: + # self.log('Following nav link to current edition', a['href']) + # soup = self.index_to_soup(process_url(a['href'])) + ans = self.economist_parse_index(soup) + if not ans: + raise NoArticles( + 'Could not find any articles, either the ' + 'economist.com server is having trouble and you should ' + 'try later or the website format has changed and the ' + 'recipe needs to be updated.' + ) + return ans + + def economist_parse_index(self, soup): + script_tag = soup.find("script", id="__NEXT_DATA__") + if script_tag is not None: + data = json.loads(script_tag.string) + # open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True)) + self.title = safe_dict(data, "props", "pageProps", "content", "headline") + # self.cover_url = 'https://mma.prnewswire.com/media/2275620/The_Economist_The_World_Ahead_2024.jpg?w=600' + + feeds = [] + + for coll in safe_dict(data, "props", "pageProps", "content", "collections"): + section = safe_dict(coll, "headline") or '' + self.log(section) + articles = [] + for part in safe_dict(coll, "hasPart", "parts"): + title = safe_dict(part, "headline") or '' + url = safe_dict(part, "url", "canonical") or '' + if not title or not url: + continue + desc = safe_dict(part, "description") or '' + sub = safe_dict(part, "subheadline") or '' + if sub: + desc = sub + ' :: ' + desc + self.log('\t', title, '\n\t', desc, '\n\t\t', url) + articles.append({'title': title, 'description':desc, 'url': url}) + if articles: + feeds.append((section, articles)) + return feeds + + # }}} + + def preprocess_raw_html(self, raw, url): # open('/t/raw.html', 'wb').write(raw.encode('utf-8')) - root = parse(raw) + if use_archive: + body = '
' + root = parse(body) + load_article_from_json(raw, root) + else: + root = parse(raw) + script = root.xpath('//script[@id="__NEXT_DATA__"]') + if script: + try: + load_article_from_json(script[0].text, root) + except JSONHasNoContent: + cleanup_html_article(root) + if '/interactive/' in url: - return '

' + root.xpath('//h1')[0].text + '

' \ + return '

' + root.xpath('//h1')[0].text + '

' \ + 'This article is supposed to be read in a browser' \ + '
' - script = root.xpath('//script[@id="__NEXT_DATA__"]') - if script: - try: - load_article_from_json(script[0].text, root) - except JSONHasNoContent: - cleanup_html_article(root) + for div in root.xpath('//div[@class="lazy-image"]'): noscript = list(div.iter('noscript')) if noscript and noscript[0].text: @@ -223,11 +375,15 @@ class Economist(BasicNewsRecipe): for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'): x.getparent().remove(x) # the economist uses for small caps with a custom font + for init in root.xpath('//span[@data-caps="initial"]'): + init.set('style', 'font-weight:bold;') for x in root.xpath('//small'): if x.text and len(x) == 0: x.text = x.text.upper() x.tag = 'span' x.set('style', 'font-variant: small-caps') + for h2 in root.xpath('//h2'): + h2.tag = 'h4' for x in root.xpath('//figcaption'): x.set('style', 'text-align:center; font-size:small;') for x in root.xpath('//cite'): @@ -236,61 +392,6 @@ class Economist(BasicNewsRecipe): raw = etree.tostring(root, encoding='unicode') return raw - def parse_index(self): - # return [('Articles', [{'title':'test', - # 'url':'https://www.economist.com/interactive/briefing/2022/06/11/huge-foundation-models-are-turbo-charging-ai-progress' - # }])] - url = 'https://www.economist.com/the-world-ahead' - # raw = open('/t/raw.html').read() - raw = self.index_to_soup(url, raw=True) - # with open('/t/raw.html', 'wb') as f: - # f.write(raw) - soup = self.index_to_soup(raw) - # nav = soup.find(attrs={'class':'navigation__wrapper'}) - # if nav is not None: - # a = nav.find('a', href=lambda x: x and '/printedition/' in x) - # if a is not None: - # self.log('Following nav link to current edition', a['href']) - # soup = self.index_to_soup(process_url(a['href'])) - ans = self.economist_parse_index(soup) - if not ans: - raise NoArticles( - 'Could not find any articles, either the ' - 'economist.com server is having trouble and you should ' - 'try later or the website format has changed and the ' - 'recipe needs to be updated.' - ) - return ans - - def economist_parse_index(self, soup): - script_tag = soup.find("script", id="__NEXT_DATA__") - if script_tag is not None: - data = json.loads(script_tag.string) - # open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True)) - self.title = safe_dict(data, "props", "pageProps", "content", "headline") - # self.cover_url = 'https://mma.prnewswire.com/media/2275620/The_Economist_The_World_Ahead_2024.jpg?w=600' - - feeds = [] - - for coll in safe_dict(data, "props", "pageProps", "content", "collections"): - section = safe_dict(coll, "headline") or '' - self.log(section) - articles = [] - for part in safe_dict(coll, "hasPart", "parts"): - title = safe_dict(part, "headline") or '' - url = safe_dict(part, "url", "canonical") or '' - if not title or not url: - continue - desc = safe_dict(part, "description") or '' - sub = safe_dict(part, "subheadline") or '' - if sub: - desc = sub + ' :: ' + desc - self.log('\t', title, '\n\t', desc, '\n\t\t', url) - articles.append({'title': title, 'description':desc, 'url': url}) - if articles: - feeds.append((section, articles)) - return feeds - def eco_find_image_tables(self, soup): for x in soup.findAll('table', align=['right', 'center']): if len(x.findAll('font')) in (1, 2) and len(x.findAll('img')) == 1: @@ -318,3 +419,12 @@ class Economist(BasicNewsRecipe): if url.endswith('/print'): url = url.rpartition('/')[0] return BasicNewsRecipe.canonicalize_internal_url(self, url, is_link=is_link) + + +def get_login_cookies(username, password): + print(33333333333, username, password) + + +if __name__ == '__main__': + import sys + get_login_cookies(sys.argv[-2], sys.argv[-1]) diff --git a/recipes/icons/economist_world_ahead.png b/recipes/icons/economist_world_ahead.png index 7fc33d384248077625bd99a7bf1e554c627b8a48..aa6c876a7769166802469cc0d59b7149d5c59499 100644 GIT binary patch literal 2683 zcmV->3WW8EP)j{00004b3#c}2nYxW zd6SS&+wRPH{bMh4W_B)TW~V^RKFR*^z4LtU_de%*-}!Fu!Xzd!iAhXi693nb_oI=s~tZDJExn5PDqO$Nj%F{ry#45V8ATl4=Q^rpM ze*=ap_?mCP=H5tX?Yq;bhb9{Pgv2Mh%mwWn+K#Yt+>fenA@Uuk;GZFe{1Aw^ejQTy z;@o8Bb-}rWQlSxrH8Nz(6u1u6x5_+=s9tr*v0vy7hdw@$_?zzbdn|K_fj$OO{ss~# z-;bv4hXou3BVm~@0>wa;Rk5^v=V-Av9J)2F`6GU`vStwojbaQVcctJBd8h-PMNNBO5n^2~@%m)nB%Vwvu ze{ypxy>$`JiYD!+%Ki3Qp^F9G;4-}&OWE_wdGSbKIclr}I$Y+Q;G7vv<(_RsOGD!O zx;u_@9A_Ia&1K$)a&9bbzcen;aPT}tHn~jyjHT>PSMseO3Cnx|7!xdpEROEa{JosJ zw>x-p!I3`$N4u2tYu>+T&cMJYQf zin7LSa~!>@5_i`A?DGzCCa}Ngo9P(&joYEkgB+j%@tQSn$MseK+cEU~YEOD!C^Q{p zsmru0n$B*n)yBcJ{Rr^xXjx$C4$GQWiQALPyoNaEBODsz3nw3C1#hgxuLMsEj6|2Y zK;de)O@*uH?9XnmR%fVgzrsG>$71~l%jfJ5 z6m`3O!U`-Z70#f6UR$e7IdsbfZDH3N^d^?(X1BpCfU=cZ8b1IhM+-_R3c9C#=v~HJtlueQ&&? zX16ySxs)R&yfG;F=8wnd5I8%Q%+(#H)w{jQ zl#;UFI8=@6hEujKP$BQ7UJKEQV_aRIR%@O&G~+H#QD0{@sv9Cw`jAAYR-qm#JTaKI zuk=E!+U*5>PhB&iqxr6C+!ClW=$=6h#|YG5m%`_}#?gGm{d;L_h&ZwaSX^RP`Qt!3 zbJ09F?4`NtW8RV8(p7*j>cyd898}kbQbjvz&`S{OKe(eFM?n(d;N8f?ghS14@4Q*k zsHmrDq@c35EZp+r=B7~;F5fQuX;UtpS<#4as5R9YZ7)+OxGksd>Ff;Gb*MwOQ##ef zs?{?(1_omZa$W+hCcK$9K32EZk8#V41D?8jbMU^UO<3k#3fGs^o`7(uv#V1WcfS5X zLE%YYY|SuH;ZScsuln0&9k&{>l>HZl3mAR?O^xw#)a>>O{yxi0C>-T7zoC$*YU*TREYlPRhP;qQS!O&aM@Hz{BaS49U--xY`rsQ*kR)Q^>4zt`~UCmMt~2}( z3p}0MC#=vdir(Zh?eia&Pt43@s%}?%E%PS=V@KQy=gdjjf34TnuCA^wLkF$Va^46e zYh6IZqXeAy+DmeNjE=#5UZzk_*Hs$K+yS`9?98F5dcW4%7&-EXvWBkk(w5&CddTqk zc8>nF%V5q^>Gxigx|Jy%yuF-GqgzBDi& z$NhGO?LUpB?CZ+|-8J1j+FxK7&^}^HPW1)n&gkzi*)9X@?da$(v^l#0bHsEw@1y6K ze*5jRwtR_j@MnmW?b-E_XexVgBdKZ)rKJeu+uH04fvsisYZI3F#IEV)(Qf-O{r20a z-tRI+4i$1YRPvo~(_1MDN0klb!gqHEXC}htmhr;nS%TTi8qLvbDj{i4XJ^>(4LuHW zTG>zUitydpN&CUkUNAD}@1Qu<G9#A6N&?jEf)l4-GxVwcb3^PM=l#U0Mt2z~`* z4W(IK0M9rD|FUN8{F+Kg>KPbF``WWh(9$nDFhiAf3CrBt7Y;5}FlME5y$bibI03)m zTdyNR75oLqxQCMCFZp$>|KJVD;aD|P7Df{eZFFPlcfjPX*G{1*% zE0tD@qB~VtCywz9hCWJ3@VYvd%HCWd4J{?Y3ROKF~j zzJx(bN3%I_!y zb}M?@JE`otQ!1yXHsiCkcon8hvw|x@t^__gL3axB95@fOQuHW}=sz3ub$1-+6r5!v zhmEHLK@ZhZz;+Ry&gTmo<_-*w+X}Y|@5_H$!0h literal 762 zcmV0e&_?Ck&k{`uF}^R2A=;^O$y(&7dH z;t2-g1_t946672h`{d;R{QK%%TkeB^;|2i!{r%wy3GRY`{O9N63|~4`~2$Z>Rw#>-Q4bfed$zE=SW8P%*)~l0Pl^A@Q;n@SXJzC zZ|O@&;~Es?Dk$U<4F31``r6t3@9z22()-}x>11Ku+!Kr={jIFyt&M zl#FRpT5Q;~Ey?5)bZ-i|AET^Qx%g2?^*;OXMIN?}~`*dwKAalK%Pm>uPA_ zGcx<;<@UkAd51!ICYDf7uclS!RrA8u+edWMb9OGCJ07@9cvnmP6X@P6ObBEC;oJ zU~p(y@Ua|Z^^AuIjoaD4#N|{{oSBW8S%CC32lzTj7QiVS{1q_IFW4;Phl%BJSu&7B zmt~P(az=k9@fs__s<8I7?)8n$pKai+Z9f2DXLoP^;1C=k%lDH@9-jmwk@Jhx)phD7 scpLwDmjr%NlzP|ZS0xf=%>5@l0JX9!9j1KaHvj+t07*qoM6N<$f*L-R^Z)<=