diff --git a/recipes/economist_search.recipe b/recipes/economist_search.recipe index 6e61bde70b..f9ff27ec93 100644 --- a/recipes/economist_search.recipe +++ b/recipes/economist_search.recipe @@ -2,20 +2,46 @@ # License: GPLv3 Copyright: 2008, Kovid Goyal import json +import time +from datetime import datetime, timedelta from html5_parser import parse from lxml import etree -from calibre.ebooks.BeautifulSoup import BeautifulSoup, NavigableString, Tag -from calibre.scraper.simple import read_url +from calibre.ebooks.BeautifulSoup import NavigableString, Tag from calibre.web.feeds.news import BasicNewsRecipe -def E(parent, name, text='', **attrs): - ans = parent.makeelement(name, **attrs) - ans.text = text - parent.append(ans) - return ans +def process_node(node): + ntype = node.get('type', '') + if ntype == 'CROSSHEAD': + if node.get('textHtml'): + return f'

{node.get("textHtml")}

' + return f'

{node.get("text", "")}

' + elif ntype == 'PARAGRAPH': + if node.get('textHtml'): + return f'

{node.get("textHtml")}

' + return f'

{node.get("text", "")}

' + elif ntype == 'IMAGE': + alt = "" if node.get("altText") is None else node.get("altText") + cap = "" + if node.get('caption'): + if node['caption'].get('textHtml') is not None: + cap = node['caption']['textHtml'] + return f'
{cap}
' + elif ntype == 'PULL_QUOTE': + if node.get('textHtml'): + return f'
{node.get("textHtml")}
' + return f'
{node.get("text", "")}
' + elif ntype == 'DIVIDER': + return '
' + elif ntype == 'INFOBOX': + for x in safe_dict(node, 'components'): + return f'
{process_node(x)}
' + elif ntype: + print('** ', ntype) + return '' + def safe_dict(data, *names): ans = data @@ -28,33 +54,29 @@ class JSONHasNoContent(ValueError): pass -def load_article_from_json(raw, root): +def load_article_from_json(raw): # open('/t/raw.json', 'w').write(raw) + body = '' + data = json.loads(raw)['props']['pageProps']['cp2Content'] + body += f'
{data.get("flyTitle", "")}
' + body += f'

{data["headline"]}

' + body += f'
{data.get("rubric", "")}
' try: - data = json.loads(raw)['props']['pageProps']['content'] - except KeyError as e: - raise JSONHasNoContent(e) - if isinstance(data, list): - data = data[0] - body = root.xpath('//body')[0] - for child in tuple(body): - body.remove(child) - article = E(body, 'article') - E(article, 'div', data['subheadline'], id='subhead') - E(article, 'h1', data['headline']) - E(article, 'p', data['description'], id='desc') - if data['dateline'] is None: - E(article, 'p', (data['datePublishedString'] or ''), id='date') + date = data['dateModified'] + except Exception: + date = data['datePublished'] + dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone) + dt = dt.strftime('%b %d, %Y %I:%M %p') + if data.get('dateline') is None: + body += f'

{dt}

' else: - E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), id='date') - main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical') + body += f'

{dt + " | " + (data["dateline"])}

' + main_image_url = safe_dict(data, 'leadComponent') or '' if main_image_url: - div = E(article, 'div') - try: - E(div, 'img', src=main_image_url) - except Exception: - pass - E(article, 'section', id='body') + body += process_node(data['leadComponent']) + for node in data.get('body'): + body += process_node(node) + return '
' + body + '
' def cleanup_html_article(root): main = root.xpath('//main')[0] @@ -87,11 +109,10 @@ def process_url(url): class econ_search(BasicNewsRecipe): - title = 'The Economist - Search' language = 'en' encoding = 'utf-8' - __author__ = "Kovid Goyal" + __author__ = "unkn0wn" description = ( 'Use the Advanced section of the recipe to search.' ) @@ -100,11 +121,11 @@ class econ_search(BasicNewsRecipe): no_stylesheets = True ignore_duplicate_articles = {'url'} extra_css = ''' - em, blockquote { color:#202020; } + em { color:#202020; } img {display:block; margin:0 auto;} .sub { font-size:small; } #subhead { color: #404040; font-size:small; font-weight:bold; }' - #desc { font-style: italic; color:#202020; } + #descrip { font-style: italic; color:#202020; } #date { color: gray; font-size:small; } ''' @@ -137,37 +158,32 @@ class econ_search(BasicNewsRecipe): remove_attributes = ['data-reactid', 'width', 'height'] # economist.com has started throttling after about 60% of the total has # downloaded with connection reset by peer (104) errors. - delay = 1 - - def __init__(self, *args, **kwargs): - BasicNewsRecipe.__init__(self, *args, **kwargs) - if self.output_profile.short_name.startswith('kindle'): - # Reduce image sizes to get file size below amazon's email - # sending threshold - self.web2disk_options.compress_news_images = True - self.web2disk_options.compress_news_images_auto_size = 5 - self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold') + delay = 3 def get_browser(self, *args, **kwargs): - # Needed to bypass cloudflare - kwargs['user_agent'] = 'common_words/based' + kwargs['user_agent'] = ( + 'Mozilla/5.0 (Linux; Android 14; 330333QCG Build/AP1A.140705.005; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/125.0.6422.165 Mobile Safari/537.36 Lamarr/3.37.0-3037003 (android)' # noqa + ) br = BasicNewsRecipe.get_browser(self, *args, **kwargs) - br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')] + br.addheaders += [('x-requested-with', 'com.economist.lamarr')] return br def preprocess_raw_html(self, raw, url): # open('/t/raw.html', 'wb').write(raw.encode('utf-8')) - root = parse(raw) + root_ = parse(raw) if '/interactive/' in url: - return '

' + root.xpath('//h1')[0].text + '

' \ - + 'This article is supposed to be read in a browser' \ - + '
' - script = root.xpath('//script[@id="__NEXT_DATA__"]') - if script: - try: - load_article_from_json(script[0].text, root) - except JSONHasNoContent: - cleanup_html_article(root) + return ( + '

' + + root_.xpath('//h1')[0].text + '

' + + 'This article is supposed to be read in a browser' + + '
' + ) + + script = root_.xpath('//script[@id="__NEXT_DATA__"]') + + html = load_article_from_json(script[0].text) + + root = parse(html) for div in root.xpath('//div[@class="lazy-image"]'): noscript = list(div.iter('noscript')) if noscript and noscript[0].text: @@ -179,45 +195,32 @@ class econ_search(BasicNewsRecipe): p.remove(noscript[0]) for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'): x.getparent().remove(x) + # the economist uses for small caps with a custom font + for init in root.xpath('//span[@data-caps="initial"]'): + init.set('style', 'font-weight:bold;') + for x in root.xpath('//small'): + if x.text and len(x) == 0: + x.text = x.text.upper() + x.tag = 'span' + x.set('style', 'font-variant: small-caps') + for h2 in root.xpath('//h2'): + h2.tag = 'h4' + for x in root.xpath('//figcaption'): + x.set('style', 'text-align:center; font-size:small;') + for x in root.xpath('//cite'): + x.tag = 'blockquote' + x.set('style', 'color:#404040;') raw = etree.tostring(root, encoding='unicode') - - raw_ar = read_url([], 'https://archive.is/latest/' + url) - archive = BeautifulSoup(str(raw_ar)) - art = archive.find('article') - if art: - bdy = art.findAll('section') - if len(bdy) != 0: - content = bdy[-1] - else: - content = archive.find('div', attrs={'itemprop':'text'}) - soup = BeautifulSoup(raw) - article = soup.find('section', attrs={'id':'body'}) - if not article: - article = soup.find('div', attrs={'itemprop':'text'}) - if not article: - article = soup.find(attrs={'itemprop':'blogPost'}) - if article and content: - self.log('**fetching archive content') - article.append(content) - - div = soup.findAll(attrs={'style': lambda x: x and x.startswith( - ('color:rgb(13, 13, 13);', 'color: rgb(18, 18, 18);') - )}) - for p in div: - p.name = 'p' - return str(soup) - return raw return raw def preprocess_html(self, soup): - for img in soup.findAll('img', attrs={'old-src':True}): - img['src'] = img['old-src'] - for a in soup.findAll('a', href=True): - a['href'] = 'http' + a['href'].split('http')[-1] - for fig in soup.findAll('figure'): - fig['class'] = 'sub' - for sty in soup.findAll(attrs={'style':True}): - del sty['style'] + width = '600' + w = self.recipe_specific_options.get('res') + if w and isinstance(w, str): + width = w + for img in soup.findAll('img', src=True): + qua = 'economist.com/cdn-cgi/image/width=' + width + ',quality=80,format=auto/' + img['src'] = img['src'].replace('economist.com/', qua) return soup recipe_specific_options = { @@ -234,7 +237,12 @@ class econ_search(BasicNewsRecipe): 'short': 'number of pages', 'long': 'number of pages of search results you want', 'default': '2' - } + }, + 'res': { + 'short': 'For hi-res images, select a resolution from the\nfollowing options: 834, 960, 1096, 1280, 1424', + 'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.', + 'default': '600', + }, } def parse_index(self):