#!/usr/bin/env python import json import time from datetime import datetime, timedelta from urllib.parse import quote, urlencode from uuid import uuid4 from html5_parser import parse from lxml import etree from mechanize import Request from calibre import browser from calibre.ptempfile import PersistentTemporaryFile from calibre.web.feeds.news import BasicNewsRecipe def safe_dict(data, *names): ans = data for x in names: ans = ans.get(x) or {} return ans def process_web_list(li_node): li_html = '' for li in li_node['items']: if li.get('textHtml'): li_html += f'
  • {li["textHtml"]}
  • ' elif li.get('textJson'): li_html += f'
  • {parse_textjson(li["textJson"])}
  • ' else: li_html += f'
  • {li.get("text", "")}
  • ' return '' def process_info_box(bx): info = '' for x in safe_dict(bx, 'components'): info += f'
    {process_web_node(x)}
    ' return info def parse_txt(ty): typ = ty.get('type', '') children = ty.get('children', []) href = '#' attributes = ty.get('attributes') or () for a in attributes: if a.get('name') == 'href': href = a.get('value', href) break tag_map = { 'text': lambda: [ty.get('value', '')], 'scaps': lambda: [ f'{"".join(parse_txt(c))}' for c in children ], 'bold': lambda: [f'{"".join(parse_txt(c))}' for c in children], 'drop_caps': lambda: [f'{"".join(parse_txt(c))}' for c in children], 'italic': lambda: [f'{"".join(parse_txt(c))}' for c in children], 'linebreak': lambda: ['
    '], 'external_link': lambda: [ f'{"".join(parse_txt(c))}' for c in children ], 'internal_link': lambda: [ f'{"".join(parse_txt(c))}' for c in children ], 'ufinish': lambda: [text for c in children for text in parse_txt(c)], 'subscript': lambda: [f'{"".join(parse_txt(c))}' for c in children], 'superscript': lambda: [f'{"".join(parse_txt(c))}' for c in children], } if typ in tag_map: yield from tag_map[typ]() else: print('** ', typ) def parse_textjson(nt): return ''.join(''.join(parse_txt(n)) for n in nt) def process_web_node(node): ntype = node.get('type', '') if ntype == 'CROSSHEAD': if node.get('textHtml'): return f'

    {node.get("textHtml")}

    ' return f'

    {node.get("text", "")}

    ' elif ntype in ['PARAGRAPH', 'BOOK_INFO']: if node.get('textHtml'): return f'\n

    {node.get("textHtml")}

    ' if node.get('textJson'): return f'\n

    {parse_textjson(node["textJson"])}

    ' return f'\n

    {node.get("text", "")}

    ' elif (ntype == 'IMAGE') or (node.get('__typename', '') == 'ImageComponent'): alt = '' if node.get('altText') is None else node.get('altText') cap = '' if node.get('caption'): if node['caption'].get('textHtml') is not None: cap = node['caption']['textHtml'] elif node['caption'].get('textJson') is not None: cap = parse_textjson(node['caption']['textJson']) elif node['caption'].get('text') is not None: cap = node['caption']['text'] return f'
    {cap}
    ' elif ntype == 'PULL_QUOTE': if node.get('textHtml'): return f'
    {node.get("textHtml")}
    ' if node.get('textJson'): return f'
    {parse_textjson(node["textJson"])}
    ' return f'
    {node.get("text", "")}
    ' elif ntype == 'BLOCK_QUOTE': if node.get('textHtml'): return f'
    {node.get("textHtml")}
    ' if node.get('textJson'): return f'
    {parse_textjson(node["textJson"])}
    ' return f'
    {node.get("text", "")}
    ' elif ntype == 'DIVIDER': return '
    ' elif ntype == 'INFOGRAPHIC': if node.get('fallback'): return process_web_node(node['fallback']) elif ntype == 'INFOBOX': return process_info_box(node) elif ntype == 'UNORDERED_LIST': if node.get('items'): return process_web_list(node) elif ntype: print('** ', ntype) return '' def load_article_from_web_json(raw): # open('/t/raw.json', 'w').write(raw) body = '' data = json.loads(raw)['data']['findArticleByUrl'] body += f'
    {data.get("flyTitle", "")}
    ' body += f'

    {data["headline"]}

    ' if data.get('rubric') and data.get('rubric') is not None: body += f'
    {data.get("rubric", "")}
    ' try: date = data['dateModified'] except Exception: date = data['datePublished'] dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone) dt = dt.strftime('%b %d, %Y %I:%M %p') if data.get('dateline') is None: body += f'

    {dt}

    ' else: body += f'

    {dt + " | " + (data["dateline"])}

    ' main_image_url = safe_dict(data, 'leadComponent') or '' if main_image_url: body += process_web_node(data['leadComponent']) if data.get('byline'): if data['byline'] is not None: body += f'

    {"By " + data["byline"]}

    ' for node in data.get('body'): body += process_web_node(node) return '
    ' + body + '
    ' class NoArticles(Exception): pass def get_content(url_): headers = { 'User-Agent': 'TheEconomist-Liskov-android', 'accept': 'multipart/mixed; deferSpec=20220824, application/json', 'accept-encoding': 'gzip', 'content-type': 'application/json', 'x-app-trace-id': str(uuid4()), 'x-economist-consumer': 'TheEconomist-Liskov-android', 'x-teg-client-name': 'Economist-Android', 'x-teg-client-os': 'Android', 'x-teg-client-version': '4.40.0', } br = browser() req = Request( url_, headers=headers, ) res = br.open(req) return res.read() def process_url(url): if url.startswith('/'): url = 'https://www.economist.com' + url return url class Econ1843(BasicNewsRecipe): title = 'The Economist 1843' language = 'en_GB' encoding = 'utf-8' masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png' __author__ = 'unkn0wn' description = ( 'Published since September 1843 to take part in “a severe contest between intelligence, which presses forward, and ' 'an unworthy, timid ignorance obstructing our progress.”' ) cover_url = 'https://pbs.twimg.com/media/FQtV6T0WYAYDxKY?format=jpg&name=orig' extra_css = ''' em { color:#202020; } img {display:block; margin:0 auto;} ''' resolve_internal_links = True # economist.com has started throttling after about 60% of the total has # downloaded with connection reset by peer (104) errors. delay = 1 browser_type = 'webengine' recipe_specific_options = { 'res': { 'short': 'For hi-res images, select a resolution from the\nfollowing options: 834, 960, 1096, 1280, 1424', 'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.', 'default': '600', }, } def get_browser(self, *args, **kwargs): kwargs['user_agent'] = ( 'Mozilla/5.0 (Linux; Android 14) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.6533.103 Mobile Safari/537.36 Liskov' ) br = BasicNewsRecipe.get_browser(self, *args, **kwargs) return br def economist_test_article(self): return [('Articles', [{'title':'test', 'url':'https://www.economist.com/1843/2025/03/05/the-great-pretender-how-ahmed-al-sharaa-won-syria' }])] def economist_return_index(self, ans): if not ans: raise NoArticles( 'Could not find any articles, either the ' 'economist.com server is having trouble and you should ' 'try later or the website format has changed and the ' 'recipe needs to be updated.' ) return ans def parse_index(self): # return self.economist_test_article() self.title = '1843' raw = self.index_to_soup('https://www.economist.com/1843') ans = self.economist_parse_index(raw) return self.economist_return_index(ans) def economist_parse_index(self, soup): script_tag = soup.find('script', id='__NEXT_DATA__') if script_tag is not None: data = json.loads(script_tag.string) # open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True)) feeds = [] for part in safe_dict(data, 'props', 'pageProps', 'content', 'articles'): title = safe_dict(part, 'headline') or '' url = process_url(safe_dict(part, 'url') or '') desc = safe_dict(part, 'rubric') or '' sub = safe_dict(part, 'flyTitle') or '' if sub: desc = sub + ' :: ' + desc self.log('\t', title, '\n\t', desc, '\n\t\t', url) feeds.append({'title': title, 'description':desc, 'url': url}) return [('Articles', feeds)] def preprocess_html(self, soup): width = '600' w = self.recipe_specific_options.get('res') if w and isinstance(w, str): width = w for img in soup.findAll('img', src=True): qua = ( 'economist.com/cdn-cgi/image/width=' + width + ',quality=80,format=auto/' ) img['src'] = img['src'].replace('economist.com/', qua) return soup def preprocess_raw_html(self, raw, url): # open('/t/raw.html', 'wb').write(raw.encode('utf-8')) html = load_article_from_web_json(raw) root = parse(html) # the economist uses for small caps with a custom font for init in root.xpath('//span[@data-caps="initial"]'): init.set('style', 'font-weight:bold;') for x in root.xpath('//small'): if x.text and len(x) == 0: x.text = x.text.upper() x.tag = 'span' x.set( 'style', 'text-transform: uppercase; font-size: 0.85em; letter-spacing: 0.05em;', ) for h2 in root.xpath('//h2'): h2.tag = 'h4' for x in root.xpath('//figcaption'): x.set('style', 'text-align:center; font-size:small;') for x in root.xpath('//cite'): x.tag = 'blockquote' x.set('style', 'color:#404040;') raw = etree.tostring(root, encoding='unicode') return raw def get_article(self, url): query = { 'operationName': 'ArticleDeeplinkQuery', 'variables': '{{"ref":"{}"}}'.format(url), 'query': 'query ArticleDeeplinkQuery($ref: String!, $includeRelatedArticles: Boolean = true ) { findArticleByUrl(url: $ref) { __typename ...ArticleDataFragment } } fragment ContentIdentityFragment on ContentIdentity { articleType forceAppWebView leadMediaType } fragment NarrationFragment on Narration { album bitrate duration filename id provider url isAiGenerated fileHash } fragment ImageTeaserFragment on ImageComponent { altText height imageType source url width } fragment PodcastAudioFragment on PodcastEpisode { id audio { url durationInSeconds } } fragment ArticleTeaserFragment on Article { id tegId url rubric headline flyTitle brand byline dateFirstPublished dateline dateModified datePublished dateRevised estimatedReadTime wordCount printHeadline contentIdentity { __typename ...ContentIdentityFragment } section { tegId name } teaserImage { __typename type ...ImageTeaserFragment } leadComponent { __typename type ...ImageTeaserFragment } narration(selectionMethod: PREFER_ACTOR_NARRATION) { __typename ...NarrationFragment } podcast { __typename ...PodcastAudioFragment } } fragment AnnotatedTextFragment on AnnotatedText { text textJson annotations { type length index attributes { name value } } } fragment ImageComponentFragment on ImageComponent { altText caption { __typename ...AnnotatedTextFragment } credit height imageType mode source url width } fragment BlockQuoteComponentFragment on BlockQuoteComponent { text textJson annotations { type length index attributes { name value } } } fragment BookInfoComponentFragment on BookInfoComponent { text textJson annotations { type length index attributes { name value } } } fragment ParagraphComponentFragment on ParagraphComponent { text textJson annotations { type length index attributes { name value } } } fragment PullQuoteComponentFragment on PullQuoteComponent { text textJson annotations { type length index attributes { name value } } } fragment CrossheadComponentFragment on CrossheadComponent { text } fragment OrderedListComponentFragment on OrderedListComponent { items { __typename ...AnnotatedTextFragment } } fragment UnorderedListComponentFragment on UnorderedListComponent { items { __typename ...AnnotatedTextFragment } } fragment VideoComponentFragment on VideoComponent { url title thumbnailImage } fragment InfoboxComponentFragment on InfoboxComponent { components { __typename type ...BlockQuoteComponentFragment ...BookInfoComponentFragment ...ParagraphComponentFragment ...PullQuoteComponentFragment ...CrossheadComponentFragment ...OrderedListComponentFragment ...UnorderedListComponentFragment ...VideoComponentFragment } } fragment InfographicComponentFragment on InfographicComponent { url title width fallback { __typename ...ImageComponentFragment } altText height width } fragment ArticleDataFragment on Article { id url brand byline rubric headline layout { headerStyle } contentIdentity { __typename ...ContentIdentityFragment } dateline dateFirstPublished dateModified datePublished dateRevised estimatedReadTime narration(selectionMethod: PREFER_ACTOR_NARRATION) { __typename ...NarrationFragment } printFlyTitle printHeadline printRubric flyTitle wordCount section { tegId name articles(pagingInfo: { pagingType: OFFSET pageSize: 6 pageNumber: 1 } ) @include(if: $includeRelatedArticles) { edges { node { __typename ...ArticleTeaserFragment } } } } teaserImage { __typename type ...ImageComponentFragment } tegId leadComponent { __typename type ...ImageComponentFragment } body { __typename type ...BlockQuoteComponentFragment ...BookInfoComponentFragment ...ParagraphComponentFragment ...PullQuoteComponentFragment ...CrossheadComponentFragment ...OrderedListComponentFragment ...UnorderedListComponentFragment ...InfoboxComponentFragment ...ImageComponentFragment ...VideoComponentFragment ...InfographicComponentFragment } footer { __typename type ...ParagraphComponentFragment } tags { name } ads { adData } podcast { __typename ...PodcastAudioFragment } }', # noqa: E501 } deep_url = 'https://cp2-graphql-gateway.p.aws.economist.com/graphql?' + urlencode( query, safe='()!', quote_via=quote ) raw = get_content(deep_url) return raw def print_version(self, url): art_cont = self.get_article(url) pt = PersistentTemporaryFile('.html') pt.write(art_cont) pt.close() return 'file:///' + pt.name