From 2af98c556b8abb5d170ee1e5713f32f6b42db050 Mon Sep 17 00:00:00 2001
From: unkn0w7n <51942695+unkn0w7n@users.noreply.github.com>
Date: Fri, 29 Nov 2024 17:43:30 +0530
Subject: [PATCH 1/3] update econ
---
recipes/economist.recipe | 150 +++++++++++++++++----------
recipes/economist_world_ahead.recipe | 9 +-
2 files changed, 99 insertions(+), 60 deletions(-)
diff --git a/recipes/economist.recipe b/recipes/economist.recipe
index 406b8f9503..9f25b40a3e 100644
--- a/recipes/economist.recipe
+++ b/recipes/economist.recipe
@@ -85,33 +85,60 @@ def load_article_from_json(raw, root):
process_node(node, article)
-def load_article_from_web_json(raw, root):
+def process_web_node(node):
+ ntype = node.get('type', '')
+ if ntype == 'CROSSHEAD':
+ if node.get('textHtml'):
+ return f'
{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'PARAGRAPH':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'IMAGE':
+ alt = "" if node.get("altText") is None else node.get("altText")
+ cap = ""
+ if node.get('caption'):
+ if node['caption'].get('textHtml') is not None:
+ cap = node['caption']['textHtml']
+ return f'{cap}
'
+ elif ntype == 'PULL_QUOTE':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'DIVIDER':
+ return '
'
+ elif ntype == 'INFOBOX':
+ for x in safe_dict(node, 'components'):
+ return f'{process_web_node(x)}
'
+ elif ntype:
+ print('** ', ntype)
+ return ''
+
+
+def load_article_from_web_json(raw):
# open('/t/raw.json', 'w').write(raw)
+ body = ''
+ data = json.loads(raw)['props']['pageProps']['cp2Content']
+ body += f'{data.get("flyTitle", "")}
'
+ body += f'{data["headline"]}
'
+ body += f'{data.get("rubric", "")}
'
try:
- data = json.loads(raw)['props']['pageProps']['content']
- except KeyError as e:
- raise JSONHasNoContent(e)
- if isinstance(data, list):
- data = data[0]
- body = root.xpath('//body')[0]
- for child in tuple(body):
- body.remove(child)
- article = E(body, 'article')
- E(article, 'div', data['subheadline'], id='subhead')
- E(article, 'h1', data['headline'])
- E(article, 'p', data['description'], id='desc')
- if data['dateline'] is None:
- E(article, 'p', (data['datePublishedString'] or ''), id='date')
+ date = data['dateModified']
+ except Exception:
+ date = data['datePublished']
+ dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
+ dt = dt.strftime('%b %d, %Y %I:%M %p')
+ if data.get('dateline') is None:
+ body += f'{dt}
'
else:
- E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), id='date')
- main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
+ body += f'{dt + " | " + (data["dateline"])}
'
+ main_image_url = safe_dict(data, 'leadComponent') or ''
if main_image_url:
- div = E(article, 'div')
- try:
- E(div, 'img', src=main_image_url)
- except Exception:
- pass
- E(article, 'section', id='body')
+ body += process_web_node(data['leadComponent'])
+ for node in data.get('body'):
+ body += process_web_node(node)
+ return '' + body + ''
def cleanup_html_article(root):
@@ -214,27 +241,28 @@ class Economist(BasicNewsRecipe):
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.',
'default': '600',
},
- 'archive': {
- 'short': 'Past Edition fails?',
- 'long': 'enter yes, this will fetch content from wayback machine.',
- 'default': 'no',
- },
}
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
- c = self.recipe_specific_options.get('archive')
+ c = self.recipe_specific_options.get('date')
if c and isinstance(c, str):
- if c.lower() == 'yes':
- self.from_archive = True
+ self.from_archive = True
needs_subscription = False
def get_browser(self, *args, **kwargs):
- # Needed to bypass cloudflare
- kwargs['user_agent'] = 'common_words/based'
- br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
- br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
+ if self.from_archive:
+ kwargs['user_agent'] = (
+ 'Mozilla/5.0 (Linux; Android 14; 330333QCG Build/AP1A.140705.005; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/125.0.6422.165 Mobile Safari/537.36 Lamarr/3.37.0-3037003 (android)' # noqa
+ )
+ br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
+ br.addheaders += [('x-requested-with', 'com.economist.lamarr')]
+ else:
+ # Needed to bypass cloudflare
+ kwargs['user_agent'] = 'common_words/based'
+ br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
+ br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
return br
def publication_date(self):
@@ -313,11 +341,12 @@ class Economist(BasicNewsRecipe):
return self.economist_return_index(ans)
def economist_parse_index(self, raw):
- edition_date = self.recipe_specific_options.get('date')
- if edition_date and isinstance(edition_date, str):
- data = json.loads(raw)['data']['section']
- else:
- data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
+ # edition_date = self.recipe_specific_options.get('date')
+ # if edition_date and isinstance(edition_date, str):
+ # data = json.loads(raw)['data']['section']
+ # else:
+ # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
+ data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y')
self.timefmt = ' [' + dt + ']'
@@ -364,8 +393,6 @@ class Economist(BasicNewsRecipe):
article.url = soup.find('h1')['title']
def preprocess_html(self, soup):
- if self.from_archive:
- return self.preprocess_web_html(soup)
width = '600'
w = self.recipe_specific_options.get('res')
if w and isinstance(w, str):
@@ -529,17 +556,20 @@ class Economist(BasicNewsRecipe):
def preprocess_raw_web_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
- root = parse(raw)
+ root_ = parse(raw)
if '/interactive/' in url:
- return '' + root.xpath('//h1')[0].text + '
' \
- + 'This article is supposed to be read in a browser' \
- + ''
- script = root.xpath('//script[@id="__NEXT_DATA__"]')
- if script:
- try:
- load_article_from_web_json(script[0].text, root)
- except JSONHasNoContent:
- cleanup_html_article(root)
+ return (
+ ''
+ + root_.xpath('//h1')[0].text + '
'
+ + 'This article is supposed to be read in a browser'
+ + ''
+ )
+
+ script = root_.xpath('//script[@id="__NEXT_DATA__"]')
+
+ html = load_article_from_web_json(script[0].text)
+
+ root = parse(html)
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))
if noscript and noscript[0].text:
@@ -551,7 +581,23 @@ class Economist(BasicNewsRecipe):
p.remove(noscript[0])
for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'):
x.getparent().remove(x)
+ # the economist uses for small caps with a custom font
+ for init in root.xpath('//span[@data-caps="initial"]'):
+ init.set('style', 'font-weight:bold;')
+ for x in root.xpath('//small'):
+ if x.text and len(x) == 0:
+ x.text = x.text.upper()
+ x.tag = 'span'
+ x.set('style', 'font-variant: small-caps')
+ for h2 in root.xpath('//h2'):
+ h2.tag = 'h4'
+ for x in root.xpath('//figcaption'):
+ x.set('style', 'text-align:center; font-size:small;')
+ for x in root.xpath('//cite'):
+ x.tag = 'blockquote'
+ x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode')
+ return raw
raw_ar = read_url([], 'https://archive.is/latest/' + url)
archive = BeautifulSoup(str(raw_ar))
diff --git a/recipes/economist_world_ahead.recipe b/recipes/economist_world_ahead.recipe
index a3706a6b30..9d340c405f 100644
--- a/recipes/economist_world_ahead.recipe
+++ b/recipes/economist_world_ahead.recipe
@@ -12,20 +12,13 @@ from calibre.ebooks.BeautifulSoup import NavigableString, Tag
from calibre.web.feeds.news import BasicNewsRecipe
-def E(parent, name, text='', **attrs):
- ans = parent.makeelement(name, **attrs)
- ans.text = text
- parent.append(ans)
- return ans
-
-
def process_node(node):
ntype = node.get('type', '')
if ntype == 'CROSSHEAD':
if node.get('textHtml'):
return f'{node.get("textHtml")}
'
return f'{node.get("text", "")}
'
- if ntype == 'PARAGRAPH':
+ elif ntype == 'PARAGRAPH':
if node.get('textHtml'):
return f'{node.get("textHtml")}
'
return f'{node.get("text", "")}
'
From bcd9326c521c291e9d32f3f6dfa126beb08a909c Mon Sep 17 00:00:00 2001
From: unkn0w7n <51942695+unkn0w7n@users.noreply.github.com>
Date: Fri, 29 Nov 2024 17:44:39 +0530
Subject: [PATCH 2/3] Update economist_free.recipe
---
recipes/economist_free.recipe | 150 ++++++++++++++++++++++------------
1 file changed, 98 insertions(+), 52 deletions(-)
diff --git a/recipes/economist_free.recipe b/recipes/economist_free.recipe
index 406b8f9503..9f25b40a3e 100644
--- a/recipes/economist_free.recipe
+++ b/recipes/economist_free.recipe
@@ -85,33 +85,60 @@ def load_article_from_json(raw, root):
process_node(node, article)
-def load_article_from_web_json(raw, root):
+def process_web_node(node):
+ ntype = node.get('type', '')
+ if ntype == 'CROSSHEAD':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'PARAGRAPH':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'IMAGE':
+ alt = "" if node.get("altText") is None else node.get("altText")
+ cap = ""
+ if node.get('caption'):
+ if node['caption'].get('textHtml') is not None:
+ cap = node['caption']['textHtml']
+ return f'{cap}
'
+ elif ntype == 'PULL_QUOTE':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'DIVIDER':
+ return '
'
+ elif ntype == 'INFOBOX':
+ for x in safe_dict(node, 'components'):
+ return f'{process_web_node(x)}
'
+ elif ntype:
+ print('** ', ntype)
+ return ''
+
+
+def load_article_from_web_json(raw):
# open('/t/raw.json', 'w').write(raw)
+ body = ''
+ data = json.loads(raw)['props']['pageProps']['cp2Content']
+ body += f'{data.get("flyTitle", "")}
'
+ body += f'{data["headline"]}
'
+ body += f'{data.get("rubric", "")}
'
try:
- data = json.loads(raw)['props']['pageProps']['content']
- except KeyError as e:
- raise JSONHasNoContent(e)
- if isinstance(data, list):
- data = data[0]
- body = root.xpath('//body')[0]
- for child in tuple(body):
- body.remove(child)
- article = E(body, 'article')
- E(article, 'div', data['subheadline'], id='subhead')
- E(article, 'h1', data['headline'])
- E(article, 'p', data['description'], id='desc')
- if data['dateline'] is None:
- E(article, 'p', (data['datePublishedString'] or ''), id='date')
+ date = data['dateModified']
+ except Exception:
+ date = data['datePublished']
+ dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
+ dt = dt.strftime('%b %d, %Y %I:%M %p')
+ if data.get('dateline') is None:
+ body += f'{dt}
'
else:
- E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), id='date')
- main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
+ body += f'{dt + " | " + (data["dateline"])}
'
+ main_image_url = safe_dict(data, 'leadComponent') or ''
if main_image_url:
- div = E(article, 'div')
- try:
- E(div, 'img', src=main_image_url)
- except Exception:
- pass
- E(article, 'section', id='body')
+ body += process_web_node(data['leadComponent'])
+ for node in data.get('body'):
+ body += process_web_node(node)
+ return '' + body + ''
def cleanup_html_article(root):
@@ -214,27 +241,28 @@ class Economist(BasicNewsRecipe):
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.',
'default': '600',
},
- 'archive': {
- 'short': 'Past Edition fails?',
- 'long': 'enter yes, this will fetch content from wayback machine.',
- 'default': 'no',
- },
}
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
- c = self.recipe_specific_options.get('archive')
+ c = self.recipe_specific_options.get('date')
if c and isinstance(c, str):
- if c.lower() == 'yes':
- self.from_archive = True
+ self.from_archive = True
needs_subscription = False
def get_browser(self, *args, **kwargs):
- # Needed to bypass cloudflare
- kwargs['user_agent'] = 'common_words/based'
- br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
- br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
+ if self.from_archive:
+ kwargs['user_agent'] = (
+ 'Mozilla/5.0 (Linux; Android 14; 330333QCG Build/AP1A.140705.005; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/125.0.6422.165 Mobile Safari/537.36 Lamarr/3.37.0-3037003 (android)' # noqa
+ )
+ br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
+ br.addheaders += [('x-requested-with', 'com.economist.lamarr')]
+ else:
+ # Needed to bypass cloudflare
+ kwargs['user_agent'] = 'common_words/based'
+ br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
+ br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
return br
def publication_date(self):
@@ -313,11 +341,12 @@ class Economist(BasicNewsRecipe):
return self.economist_return_index(ans)
def economist_parse_index(self, raw):
- edition_date = self.recipe_specific_options.get('date')
- if edition_date and isinstance(edition_date, str):
- data = json.loads(raw)['data']['section']
- else:
- data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
+ # edition_date = self.recipe_specific_options.get('date')
+ # if edition_date and isinstance(edition_date, str):
+ # data = json.loads(raw)['data']['section']
+ # else:
+ # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
+ data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y')
self.timefmt = ' [' + dt + ']'
@@ -364,8 +393,6 @@ class Economist(BasicNewsRecipe):
article.url = soup.find('h1')['title']
def preprocess_html(self, soup):
- if self.from_archive:
- return self.preprocess_web_html(soup)
width = '600'
w = self.recipe_specific_options.get('res')
if w and isinstance(w, str):
@@ -529,17 +556,20 @@ class Economist(BasicNewsRecipe):
def preprocess_raw_web_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
- root = parse(raw)
+ root_ = parse(raw)
if '/interactive/' in url:
- return '' + root.xpath('//h1')[0].text + '
' \
- + 'This article is supposed to be read in a browser' \
- + ''
- script = root.xpath('//script[@id="__NEXT_DATA__"]')
- if script:
- try:
- load_article_from_web_json(script[0].text, root)
- except JSONHasNoContent:
- cleanup_html_article(root)
+ return (
+ ''
+ + root_.xpath('//h1')[0].text + '
'
+ + 'This article is supposed to be read in a browser'
+ + ''
+ )
+
+ script = root_.xpath('//script[@id="__NEXT_DATA__"]')
+
+ html = load_article_from_web_json(script[0].text)
+
+ root = parse(html)
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))
if noscript and noscript[0].text:
@@ -551,7 +581,23 @@ class Economist(BasicNewsRecipe):
p.remove(noscript[0])
for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'):
x.getparent().remove(x)
+ # the economist uses for small caps with a custom font
+ for init in root.xpath('//span[@data-caps="initial"]'):
+ init.set('style', 'font-weight:bold;')
+ for x in root.xpath('//small'):
+ if x.text and len(x) == 0:
+ x.text = x.text.upper()
+ x.tag = 'span'
+ x.set('style', 'font-variant: small-caps')
+ for h2 in root.xpath('//h2'):
+ h2.tag = 'h4'
+ for x in root.xpath('//figcaption'):
+ x.set('style', 'text-align:center; font-size:small;')
+ for x in root.xpath('//cite'):
+ x.tag = 'blockquote'
+ x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode')
+ return raw
raw_ar = read_url([], 'https://archive.is/latest/' + url)
archive = BeautifulSoup(str(raw_ar))
From 82faac326bf427bbbed90b9171152bc13577083c Mon Sep 17 00:00:00 2001
From: unkn0w7n <51942695+unkn0w7n@users.noreply.github.com>
Date: Fri, 29 Nov 2024 17:57:21 +0530
Subject: [PATCH 3/3] Update economist_search.recipe
---
recipes/economist_search.recipe | 194 +++++++++++++++++---------------
1 file changed, 101 insertions(+), 93 deletions(-)
diff --git a/recipes/economist_search.recipe b/recipes/economist_search.recipe
index 6e61bde70b..f9ff27ec93 100644
--- a/recipes/economist_search.recipe
+++ b/recipes/economist_search.recipe
@@ -2,20 +2,46 @@
# License: GPLv3 Copyright: 2008, Kovid Goyal
import json
+import time
+from datetime import datetime, timedelta
from html5_parser import parse
from lxml import etree
-from calibre.ebooks.BeautifulSoup import BeautifulSoup, NavigableString, Tag
-from calibre.scraper.simple import read_url
+from calibre.ebooks.BeautifulSoup import NavigableString, Tag
from calibre.web.feeds.news import BasicNewsRecipe
-def E(parent, name, text='', **attrs):
- ans = parent.makeelement(name, **attrs)
- ans.text = text
- parent.append(ans)
- return ans
+def process_node(node):
+ ntype = node.get('type', '')
+ if ntype == 'CROSSHEAD':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'PARAGRAPH':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'IMAGE':
+ alt = "" if node.get("altText") is None else node.get("altText")
+ cap = ""
+ if node.get('caption'):
+ if node['caption'].get('textHtml') is not None:
+ cap = node['caption']['textHtml']
+ return f'{cap}
'
+ elif ntype == 'PULL_QUOTE':
+ if node.get('textHtml'):
+ return f'{node.get("textHtml")}
'
+ return f'{node.get("text", "")}
'
+ elif ntype == 'DIVIDER':
+ return '
'
+ elif ntype == 'INFOBOX':
+ for x in safe_dict(node, 'components'):
+ return f'{process_node(x)}
'
+ elif ntype:
+ print('** ', ntype)
+ return ''
+
def safe_dict(data, *names):
ans = data
@@ -28,33 +54,29 @@ class JSONHasNoContent(ValueError):
pass
-def load_article_from_json(raw, root):
+def load_article_from_json(raw):
# open('/t/raw.json', 'w').write(raw)
+ body = ''
+ data = json.loads(raw)['props']['pageProps']['cp2Content']
+ body += f'{data.get("flyTitle", "")}
'
+ body += f'{data["headline"]}
'
+ body += f'{data.get("rubric", "")}
'
try:
- data = json.loads(raw)['props']['pageProps']['content']
- except KeyError as e:
- raise JSONHasNoContent(e)
- if isinstance(data, list):
- data = data[0]
- body = root.xpath('//body')[0]
- for child in tuple(body):
- body.remove(child)
- article = E(body, 'article')
- E(article, 'div', data['subheadline'], id='subhead')
- E(article, 'h1', data['headline'])
- E(article, 'p', data['description'], id='desc')
- if data['dateline'] is None:
- E(article, 'p', (data['datePublishedString'] or ''), id='date')
+ date = data['dateModified']
+ except Exception:
+ date = data['datePublished']
+ dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
+ dt = dt.strftime('%b %d, %Y %I:%M %p')
+ if data.get('dateline') is None:
+ body += f'{dt}
'
else:
- E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), id='date')
- main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
+ body += f'{dt + " | " + (data["dateline"])}
'
+ main_image_url = safe_dict(data, 'leadComponent') or ''
if main_image_url:
- div = E(article, 'div')
- try:
- E(div, 'img', src=main_image_url)
- except Exception:
- pass
- E(article, 'section', id='body')
+ body += process_node(data['leadComponent'])
+ for node in data.get('body'):
+ body += process_node(node)
+ return '' + body + ''
def cleanup_html_article(root):
main = root.xpath('//main')[0]
@@ -87,11 +109,10 @@ def process_url(url):
class econ_search(BasicNewsRecipe):
-
title = 'The Economist - Search'
language = 'en'
encoding = 'utf-8'
- __author__ = "Kovid Goyal"
+ __author__ = "unkn0wn"
description = (
'Use the Advanced section of the recipe to search.'
)
@@ -100,11 +121,11 @@ class econ_search(BasicNewsRecipe):
no_stylesheets = True
ignore_duplicate_articles = {'url'}
extra_css = '''
- em, blockquote { color:#202020; }
+ em { color:#202020; }
img {display:block; margin:0 auto;}
.sub { font-size:small; }
#subhead { color: #404040; font-size:small; font-weight:bold; }'
- #desc { font-style: italic; color:#202020; }
+ #descrip { font-style: italic; color:#202020; }
#date { color: gray; font-size:small; }
'''
@@ -137,37 +158,32 @@ class econ_search(BasicNewsRecipe):
remove_attributes = ['data-reactid', 'width', 'height']
# economist.com has started throttling after about 60% of the total has
# downloaded with connection reset by peer (104) errors.
- delay = 1
-
- def __init__(self, *args, **kwargs):
- BasicNewsRecipe.__init__(self, *args, **kwargs)
- if self.output_profile.short_name.startswith('kindle'):
- # Reduce image sizes to get file size below amazon's email
- # sending threshold
- self.web2disk_options.compress_news_images = True
- self.web2disk_options.compress_news_images_auto_size = 5
- self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold')
+ delay = 3
def get_browser(self, *args, **kwargs):
- # Needed to bypass cloudflare
- kwargs['user_agent'] = 'common_words/based'
+ kwargs['user_agent'] = (
+ 'Mozilla/5.0 (Linux; Android 14; 330333QCG Build/AP1A.140705.005; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/125.0.6422.165 Mobile Safari/537.36 Lamarr/3.37.0-3037003 (android)' # noqa
+ )
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
- br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
+ br.addheaders += [('x-requested-with', 'com.economist.lamarr')]
return br
def preprocess_raw_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
- root = parse(raw)
+ root_ = parse(raw)
if '/interactive/' in url:
- return '' + root.xpath('//h1')[0].text + '
' \
- + 'This article is supposed to be read in a browser' \
- + ''
- script = root.xpath('//script[@id="__NEXT_DATA__"]')
- if script:
- try:
- load_article_from_json(script[0].text, root)
- except JSONHasNoContent:
- cleanup_html_article(root)
+ return (
+ ''
+ + root_.xpath('//h1')[0].text + '
'
+ + 'This article is supposed to be read in a browser'
+ + ''
+ )
+
+ script = root_.xpath('//script[@id="__NEXT_DATA__"]')
+
+ html = load_article_from_json(script[0].text)
+
+ root = parse(html)
for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript'))
if noscript and noscript[0].text:
@@ -179,45 +195,32 @@ class econ_search(BasicNewsRecipe):
p.remove(noscript[0])
for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'):
x.getparent().remove(x)
+ # the economist uses for small caps with a custom font
+ for init in root.xpath('//span[@data-caps="initial"]'):
+ init.set('style', 'font-weight:bold;')
+ for x in root.xpath('//small'):
+ if x.text and len(x) == 0:
+ x.text = x.text.upper()
+ x.tag = 'span'
+ x.set('style', 'font-variant: small-caps')
+ for h2 in root.xpath('//h2'):
+ h2.tag = 'h4'
+ for x in root.xpath('//figcaption'):
+ x.set('style', 'text-align:center; font-size:small;')
+ for x in root.xpath('//cite'):
+ x.tag = 'blockquote'
+ x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode')
-
- raw_ar = read_url([], 'https://archive.is/latest/' + url)
- archive = BeautifulSoup(str(raw_ar))
- art = archive.find('article')
- if art:
- bdy = art.findAll('section')
- if len(bdy) != 0:
- content = bdy[-1]
- else:
- content = archive.find('div', attrs={'itemprop':'text'})
- soup = BeautifulSoup(raw)
- article = soup.find('section', attrs={'id':'body'})
- if not article:
- article = soup.find('div', attrs={'itemprop':'text'})
- if not article:
- article = soup.find(attrs={'itemprop':'blogPost'})
- if article and content:
- self.log('**fetching archive content')
- article.append(content)
-
- div = soup.findAll(attrs={'style': lambda x: x and x.startswith(
- ('color:rgb(13, 13, 13);', 'color: rgb(18, 18, 18);')
- )})
- for p in div:
- p.name = 'p'
- return str(soup)
- return raw
return raw
def preprocess_html(self, soup):
- for img in soup.findAll('img', attrs={'old-src':True}):
- img['src'] = img['old-src']
- for a in soup.findAll('a', href=True):
- a['href'] = 'http' + a['href'].split('http')[-1]
- for fig in soup.findAll('figure'):
- fig['class'] = 'sub'
- for sty in soup.findAll(attrs={'style':True}):
- del sty['style']
+ width = '600'
+ w = self.recipe_specific_options.get('res')
+ if w and isinstance(w, str):
+ width = w
+ for img in soup.findAll('img', src=True):
+ qua = 'economist.com/cdn-cgi/image/width=' + width + ',quality=80,format=auto/'
+ img['src'] = img['src'].replace('economist.com/', qua)
return soup
recipe_specific_options = {
@@ -234,7 +237,12 @@ class econ_search(BasicNewsRecipe):
'short': 'number of pages',
'long': 'number of pages of search results you want',
'default': '2'
- }
+ },
+ 'res': {
+ 'short': 'For hi-res images, select a resolution from the\nfollowing options: 834, 960, 1096, 1280, 1424',
+ 'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.',
+ 'default': '600',
+ },
}
def parse_index(self):