This commit is contained in:
Kovid Goyal 2024-11-29 18:33:50 +05:30
commit f0839afb68
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
4 changed files with 298 additions and 205 deletions

View File

@ -85,33 +85,60 @@ def load_article_from_json(raw, root):
process_node(node, article) process_node(node, article)
def load_article_from_web_json(raw, root): def process_web_node(node):
ntype = node.get('type', '')
if ntype == 'CROSSHEAD':
if node.get('textHtml'):
return f'<h4>{node.get("textHtml")}</h4>'
return f'<h4>{node.get("text", "")}</h4>'
elif ntype == 'PARAGRAPH':
if node.get('textHtml'):
return f'<p>{node.get("textHtml")}</p>'
return f'<p>{node.get("text", "")}</p>'
elif ntype == 'IMAGE':
alt = "" if node.get("altText") is None else node.get("altText")
cap = ""
if node.get('caption'):
if node['caption'].get('textHtml') is not None:
cap = node['caption']['textHtml']
return f'<div><img src="{node["url"]}" title="{alt}"></div><div style="text-align:center; font-size:small;">{cap}</div>'
elif ntype == 'PULL_QUOTE':
if node.get('textHtml'):
return f'<blockquote>{node.get("textHtml")}</blockquote>'
return f'<blockquote>{node.get("text", "")}</blockquote>'
elif ntype == 'DIVIDER':
return '<hr>'
elif ntype == 'INFOBOX':
for x in safe_dict(node, 'components'):
return f'<blockquote>{process_web_node(x)}</blockquote>'
elif ntype:
print('** ', ntype)
return ''
def load_article_from_web_json(raw):
# open('/t/raw.json', 'w').write(raw) # open('/t/raw.json', 'w').write(raw)
body = ''
data = json.loads(raw)['props']['pageProps']['cp2Content']
body += f'<div style="color: red; font-size:small; font-weight:bold;">{data.get("flyTitle", "")}</div>'
body += f'<h1>{data["headline"]}</h1>'
body += f'<div style="font-style: italic; color:#202020;">{data.get("rubric", "")}</div>'
try: try:
data = json.loads(raw)['props']['pageProps']['content'] date = data['dateModified']
except KeyError as e: except Exception:
raise JSONHasNoContent(e) date = data['datePublished']
if isinstance(data, list): dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
data = data[0] dt = dt.strftime('%b %d, %Y %I:%M %p')
body = root.xpath('//body')[0] if data.get('dateline') is None:
for child in tuple(body): body += f'<p style="color: gray; font-size: small;">{dt}</p>'
body.remove(child)
article = E(body, 'article')
E(article, 'div', data['subheadline'], id='subhead')
E(article, 'h1', data['headline'])
E(article, 'p', data['description'], id='desc')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), id='date')
else: else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), id='date') body += f'<p style="color: gray; font-size: small;">{dt + " | " + (data["dateline"])}</p>'
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical') main_image_url = safe_dict(data, 'leadComponent') or ''
if main_image_url: if main_image_url:
div = E(article, 'div') body += process_web_node(data['leadComponent'])
try: for node in data.get('body'):
E(div, 'img', src=main_image_url) body += process_web_node(node)
except Exception: return '<html><body><article>' + body + '</article></body></html>'
pass
E(article, 'section', id='body')
def cleanup_html_article(root): def cleanup_html_article(root):
@ -214,27 +241,28 @@ class Economist(BasicNewsRecipe):
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.', 'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.',
'default': '600', 'default': '600',
}, },
'archive': {
'short': 'Past Edition fails?',
'long': 'enter yes, this will fetch content from wayback machine.',
'default': 'no',
},
} }
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs) BasicNewsRecipe.__init__(self, *args, **kwargs)
c = self.recipe_specific_options.get('archive') c = self.recipe_specific_options.get('date')
if c and isinstance(c, str): if c and isinstance(c, str):
if c.lower() == 'yes': self.from_archive = True
self.from_archive = True
needs_subscription = False needs_subscription = False
def get_browser(self, *args, **kwargs): def get_browser(self, *args, **kwargs):
# Needed to bypass cloudflare if self.from_archive:
kwargs['user_agent'] = 'common_words/based' kwargs['user_agent'] = (
br = BasicNewsRecipe.get_browser(self, *args, **kwargs) 'Mozilla/5.0 (Linux; Android 14; 330333QCG Build/AP1A.140705.005; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/125.0.6422.165 Mobile Safari/537.36 Lamarr/3.37.0-3037003 (android)' # noqa
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')] )
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [('x-requested-with', 'com.economist.lamarr')]
else:
# Needed to bypass cloudflare
kwargs['user_agent'] = 'common_words/based'
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
return br return br
def publication_date(self): def publication_date(self):
@ -313,11 +341,12 @@ class Economist(BasicNewsRecipe):
return self.economist_return_index(ans) return self.economist_return_index(ans)
def economist_parse_index(self, raw): def economist_parse_index(self, raw):
edition_date = self.recipe_specific_options.get('date') # edition_date = self.recipe_specific_options.get('date')
if edition_date and isinstance(edition_date, str): # if edition_date and isinstance(edition_date, str):
data = json.loads(raw)['data']['section'] # data = json.loads(raw)['data']['section']
else: # else:
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone) dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y') dt = dt.strftime('%b %d, %Y')
self.timefmt = ' [' + dt + ']' self.timefmt = ' [' + dt + ']'
@ -364,8 +393,6 @@ class Economist(BasicNewsRecipe):
article.url = soup.find('h1')['title'] article.url = soup.find('h1')['title']
def preprocess_html(self, soup): def preprocess_html(self, soup):
if self.from_archive:
return self.preprocess_web_html(soup)
width = '600' width = '600'
w = self.recipe_specific_options.get('res') w = self.recipe_specific_options.get('res')
if w and isinstance(w, str): if w and isinstance(w, str):
@ -529,17 +556,20 @@ class Economist(BasicNewsRecipe):
def preprocess_raw_web_html(self, raw, url): def preprocess_raw_web_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8')) # open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root = parse(raw) root_ = parse(raw)
if '/interactive/' in url: if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \ return (
+ 'This article is supposed to be read in a browser' \ '<html><body><article><h1>'
+ '</em></article></body></html>' + root_.xpath('//h1')[0].text + '</h1><em>'
script = root.xpath('//script[@id="__NEXT_DATA__"]') + 'This article is supposed to be read in a browser'
if script: + '</em></article></body></html>'
try: )
load_article_from_web_json(script[0].text, root)
except JSONHasNoContent: script = root_.xpath('//script[@id="__NEXT_DATA__"]')
cleanup_html_article(root)
html = load_article_from_web_json(script[0].text)
root = parse(html)
for div in root.xpath('//div[@class="lazy-image"]'): for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript')) noscript = list(div.iter('noscript'))
if noscript and noscript[0].text: if noscript and noscript[0].text:
@ -551,7 +581,23 @@ class Economist(BasicNewsRecipe):
p.remove(noscript[0]) p.remove(noscript[0])
for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'): for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'):
x.getparent().remove(x) x.getparent().remove(x)
# the economist uses <small> for small caps with a custom font
for init in root.xpath('//span[@data-caps="initial"]'):
init.set('style', 'font-weight:bold;')
for x in root.xpath('//small'):
if x.text and len(x) == 0:
x.text = x.text.upper()
x.tag = 'span'
x.set('style', 'font-variant: small-caps')
for h2 in root.xpath('//h2'):
h2.tag = 'h4'
for x in root.xpath('//figcaption'):
x.set('style', 'text-align:center; font-size:small;')
for x in root.xpath('//cite'):
x.tag = 'blockquote'
x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode') raw = etree.tostring(root, encoding='unicode')
return raw
raw_ar = read_url([], 'https://archive.is/latest/' + url) raw_ar = read_url([], 'https://archive.is/latest/' + url)
archive = BeautifulSoup(str(raw_ar)) archive = BeautifulSoup(str(raw_ar))

View File

@ -85,33 +85,60 @@ def load_article_from_json(raw, root):
process_node(node, article) process_node(node, article)
def load_article_from_web_json(raw, root): def process_web_node(node):
ntype = node.get('type', '')
if ntype == 'CROSSHEAD':
if node.get('textHtml'):
return f'<h4>{node.get("textHtml")}</h4>'
return f'<h4>{node.get("text", "")}</h4>'
elif ntype == 'PARAGRAPH':
if node.get('textHtml'):
return f'<p>{node.get("textHtml")}</p>'
return f'<p>{node.get("text", "")}</p>'
elif ntype == 'IMAGE':
alt = "" if node.get("altText") is None else node.get("altText")
cap = ""
if node.get('caption'):
if node['caption'].get('textHtml') is not None:
cap = node['caption']['textHtml']
return f'<div><img src="{node["url"]}" title="{alt}"></div><div style="text-align:center; font-size:small;">{cap}</div>'
elif ntype == 'PULL_QUOTE':
if node.get('textHtml'):
return f'<blockquote>{node.get("textHtml")}</blockquote>'
return f'<blockquote>{node.get("text", "")}</blockquote>'
elif ntype == 'DIVIDER':
return '<hr>'
elif ntype == 'INFOBOX':
for x in safe_dict(node, 'components'):
return f'<blockquote>{process_web_node(x)}</blockquote>'
elif ntype:
print('** ', ntype)
return ''
def load_article_from_web_json(raw):
# open('/t/raw.json', 'w').write(raw) # open('/t/raw.json', 'w').write(raw)
body = ''
data = json.loads(raw)['props']['pageProps']['cp2Content']
body += f'<div style="color: red; font-size:small; font-weight:bold;">{data.get("flyTitle", "")}</div>'
body += f'<h1>{data["headline"]}</h1>'
body += f'<div style="font-style: italic; color:#202020;">{data.get("rubric", "")}</div>'
try: try:
data = json.loads(raw)['props']['pageProps']['content'] date = data['dateModified']
except KeyError as e: except Exception:
raise JSONHasNoContent(e) date = data['datePublished']
if isinstance(data, list): dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
data = data[0] dt = dt.strftime('%b %d, %Y %I:%M %p')
body = root.xpath('//body')[0] if data.get('dateline') is None:
for child in tuple(body): body += f'<p style="color: gray; font-size: small;">{dt}</p>'
body.remove(child)
article = E(body, 'article')
E(article, 'div', data['subheadline'], id='subhead')
E(article, 'h1', data['headline'])
E(article, 'p', data['description'], id='desc')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), id='date')
else: else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), id='date') body += f'<p style="color: gray; font-size: small;">{dt + " | " + (data["dateline"])}</p>'
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical') main_image_url = safe_dict(data, 'leadComponent') or ''
if main_image_url: if main_image_url:
div = E(article, 'div') body += process_web_node(data['leadComponent'])
try: for node in data.get('body'):
E(div, 'img', src=main_image_url) body += process_web_node(node)
except Exception: return '<html><body><article>' + body + '</article></body></html>'
pass
E(article, 'section', id='body')
def cleanup_html_article(root): def cleanup_html_article(root):
@ -214,27 +241,28 @@ class Economist(BasicNewsRecipe):
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.', 'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.',
'default': '600', 'default': '600',
}, },
'archive': {
'short': 'Past Edition fails?',
'long': 'enter yes, this will fetch content from wayback machine.',
'default': 'no',
},
} }
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs) BasicNewsRecipe.__init__(self, *args, **kwargs)
c = self.recipe_specific_options.get('archive') c = self.recipe_specific_options.get('date')
if c and isinstance(c, str): if c and isinstance(c, str):
if c.lower() == 'yes': self.from_archive = True
self.from_archive = True
needs_subscription = False needs_subscription = False
def get_browser(self, *args, **kwargs): def get_browser(self, *args, **kwargs):
# Needed to bypass cloudflare if self.from_archive:
kwargs['user_agent'] = 'common_words/based' kwargs['user_agent'] = (
br = BasicNewsRecipe.get_browser(self, *args, **kwargs) 'Mozilla/5.0 (Linux; Android 14; 330333QCG Build/AP1A.140705.005; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/125.0.6422.165 Mobile Safari/537.36 Lamarr/3.37.0-3037003 (android)' # noqa
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')] )
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [('x-requested-with', 'com.economist.lamarr')]
else:
# Needed to bypass cloudflare
kwargs['user_agent'] = 'common_words/based'
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')]
return br return br
def publication_date(self): def publication_date(self):
@ -313,11 +341,12 @@ class Economist(BasicNewsRecipe):
return self.economist_return_index(ans) return self.economist_return_index(ans)
def economist_parse_index(self, raw): def economist_parse_index(self, raw):
edition_date = self.recipe_specific_options.get('date') # edition_date = self.recipe_specific_options.get('date')
if edition_date and isinstance(edition_date, str): # if edition_date and isinstance(edition_date, str):
data = json.loads(raw)['data']['section'] # data = json.loads(raw)['data']['section']
else: # else:
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0] # data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone) dt = datetime.fromisoformat(data['datePublished'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y') dt = dt.strftime('%b %d, %Y')
self.timefmt = ' [' + dt + ']' self.timefmt = ' [' + dt + ']'
@ -364,8 +393,6 @@ class Economist(BasicNewsRecipe):
article.url = soup.find('h1')['title'] article.url = soup.find('h1')['title']
def preprocess_html(self, soup): def preprocess_html(self, soup):
if self.from_archive:
return self.preprocess_web_html(soup)
width = '600' width = '600'
w = self.recipe_specific_options.get('res') w = self.recipe_specific_options.get('res')
if w and isinstance(w, str): if w and isinstance(w, str):
@ -529,17 +556,20 @@ class Economist(BasicNewsRecipe):
def preprocess_raw_web_html(self, raw, url): def preprocess_raw_web_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8')) # open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root = parse(raw) root_ = parse(raw)
if '/interactive/' in url: if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \ return (
+ 'This article is supposed to be read in a browser' \ '<html><body><article><h1>'
+ '</em></article></body></html>' + root_.xpath('//h1')[0].text + '</h1><em>'
script = root.xpath('//script[@id="__NEXT_DATA__"]') + 'This article is supposed to be read in a browser'
if script: + '</em></article></body></html>'
try: )
load_article_from_web_json(script[0].text, root)
except JSONHasNoContent: script = root_.xpath('//script[@id="__NEXT_DATA__"]')
cleanup_html_article(root)
html = load_article_from_web_json(script[0].text)
root = parse(html)
for div in root.xpath('//div[@class="lazy-image"]'): for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript')) noscript = list(div.iter('noscript'))
if noscript and noscript[0].text: if noscript and noscript[0].text:
@ -551,7 +581,23 @@ class Economist(BasicNewsRecipe):
p.remove(noscript[0]) p.remove(noscript[0])
for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'): for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'):
x.getparent().remove(x) x.getparent().remove(x)
# the economist uses <small> for small caps with a custom font
for init in root.xpath('//span[@data-caps="initial"]'):
init.set('style', 'font-weight:bold;')
for x in root.xpath('//small'):
if x.text and len(x) == 0:
x.text = x.text.upper()
x.tag = 'span'
x.set('style', 'font-variant: small-caps')
for h2 in root.xpath('//h2'):
h2.tag = 'h4'
for x in root.xpath('//figcaption'):
x.set('style', 'text-align:center; font-size:small;')
for x in root.xpath('//cite'):
x.tag = 'blockquote'
x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode') raw = etree.tostring(root, encoding='unicode')
return raw
raw_ar = read_url([], 'https://archive.is/latest/' + url) raw_ar = read_url([], 'https://archive.is/latest/' + url)
archive = BeautifulSoup(str(raw_ar)) archive = BeautifulSoup(str(raw_ar))

View File

@ -2,20 +2,46 @@
# License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net> # License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net>
import json import json
import time
from datetime import datetime, timedelta
from html5_parser import parse from html5_parser import parse
from lxml import etree from lxml import etree
from calibre.ebooks.BeautifulSoup import BeautifulSoup, NavigableString, Tag from calibre.ebooks.BeautifulSoup import NavigableString, Tag
from calibre.scraper.simple import read_url
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
def E(parent, name, text='', **attrs): def process_node(node):
ans = parent.makeelement(name, **attrs) ntype = node.get('type', '')
ans.text = text if ntype == 'CROSSHEAD':
parent.append(ans) if node.get('textHtml'):
return ans return f'<h4>{node.get("textHtml")}</h4>'
return f'<h4>{node.get("text", "")}</h4>'
elif ntype == 'PARAGRAPH':
if node.get('textHtml'):
return f'<p>{node.get("textHtml")}</p>'
return f'<p>{node.get("text", "")}</p>'
elif ntype == 'IMAGE':
alt = "" if node.get("altText") is None else node.get("altText")
cap = ""
if node.get('caption'):
if node['caption'].get('textHtml') is not None:
cap = node['caption']['textHtml']
return f'<div><img src="{node["url"]}" title="{alt}"></div><div style="text-align:center; font-size:small;">{cap}</div>'
elif ntype == 'PULL_QUOTE':
if node.get('textHtml'):
return f'<blockquote>{node.get("textHtml")}</blockquote>'
return f'<blockquote>{node.get("text", "")}</blockquote>'
elif ntype == 'DIVIDER':
return '<hr>'
elif ntype == 'INFOBOX':
for x in safe_dict(node, 'components'):
return f'<blockquote>{process_node(x)}</blockquote>'
elif ntype:
print('** ', ntype)
return ''
def safe_dict(data, *names): def safe_dict(data, *names):
ans = data ans = data
@ -28,33 +54,29 @@ class JSONHasNoContent(ValueError):
pass pass
def load_article_from_json(raw, root): def load_article_from_json(raw):
# open('/t/raw.json', 'w').write(raw) # open('/t/raw.json', 'w').write(raw)
body = ''
data = json.loads(raw)['props']['pageProps']['cp2Content']
body += f'<div style="color: red; font-size:small; font-weight:bold;">{data.get("flyTitle", "")}</div>'
body += f'<h1>{data["headline"]}</h1>'
body += f'<div style="font-style: italic; color:#202020;">{data.get("rubric", "")}</div>'
try: try:
data = json.loads(raw)['props']['pageProps']['content'] date = data['dateModified']
except KeyError as e: except Exception:
raise JSONHasNoContent(e) date = data['datePublished']
if isinstance(data, list): dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
data = data[0] dt = dt.strftime('%b %d, %Y %I:%M %p')
body = root.xpath('//body')[0] if data.get('dateline') is None:
for child in tuple(body): body += f'<p style="color: gray; font-size: small;">{dt}</p>'
body.remove(child)
article = E(body, 'article')
E(article, 'div', data['subheadline'], id='subhead')
E(article, 'h1', data['headline'])
E(article, 'p', data['description'], id='desc')
if data['dateline'] is None:
E(article, 'p', (data['datePublishedString'] or ''), id='date')
else: else:
E(article, 'p', (data['datePublishedString'] or '') + ' | ' + (data['dateline']), id='date') body += f'<p style="color: gray; font-size: small;">{dt + " | " + (data["dateline"])}</p>'
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical') main_image_url = safe_dict(data, 'leadComponent') or ''
if main_image_url: if main_image_url:
div = E(article, 'div') body += process_node(data['leadComponent'])
try: for node in data.get('body'):
E(div, 'img', src=main_image_url) body += process_node(node)
except Exception: return '<html><body><article>' + body + '</article></body></html>'
pass
E(article, 'section', id='body')
def cleanup_html_article(root): def cleanup_html_article(root):
main = root.xpath('//main')[0] main = root.xpath('//main')[0]
@ -87,11 +109,10 @@ def process_url(url):
class econ_search(BasicNewsRecipe): class econ_search(BasicNewsRecipe):
title = 'The Economist - Search' title = 'The Economist - Search'
language = 'en' language = 'en'
encoding = 'utf-8' encoding = 'utf-8'
__author__ = "Kovid Goyal" __author__ = "unkn0wn"
description = ( description = (
'Use the Advanced section of the recipe to search.' 'Use the Advanced section of the recipe to search.'
) )
@ -100,11 +121,11 @@ class econ_search(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
ignore_duplicate_articles = {'url'} ignore_duplicate_articles = {'url'}
extra_css = ''' extra_css = '''
em, blockquote { color:#202020; } em { color:#202020; }
img {display:block; margin:0 auto;} img {display:block; margin:0 auto;}
.sub { font-size:small; } .sub { font-size:small; }
#subhead { color: #404040; font-size:small; font-weight:bold; }' #subhead { color: #404040; font-size:small; font-weight:bold; }'
#desc { font-style: italic; color:#202020; } #descrip { font-style: italic; color:#202020; }
#date { color: gray; font-size:small; } #date { color: gray; font-size:small; }
''' '''
@ -137,37 +158,32 @@ class econ_search(BasicNewsRecipe):
remove_attributes = ['data-reactid', 'width', 'height'] remove_attributes = ['data-reactid', 'width', 'height']
# economist.com has started throttling after about 60% of the total has # economist.com has started throttling after about 60% of the total has
# downloaded with connection reset by peer (104) errors. # downloaded with connection reset by peer (104) errors.
delay = 1 delay = 3
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
if self.output_profile.short_name.startswith('kindle'):
# Reduce image sizes to get file size below amazon's email
# sending threshold
self.web2disk_options.compress_news_images = True
self.web2disk_options.compress_news_images_auto_size = 5
self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold')
def get_browser(self, *args, **kwargs): def get_browser(self, *args, **kwargs):
# Needed to bypass cloudflare kwargs['user_agent'] = (
kwargs['user_agent'] = 'common_words/based' 'Mozilla/5.0 (Linux; Android 14; 330333QCG Build/AP1A.140705.005; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/125.0.6422.165 Mobile Safari/537.36 Lamarr/3.37.0-3037003 (android)' # noqa
)
br = BasicNewsRecipe.get_browser(self, *args, **kwargs) br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [('Accept-Language', 'en-GB,en-US;q=0.9,en;q=0.8')] br.addheaders += [('x-requested-with', 'com.economist.lamarr')]
return br return br
def preprocess_raw_html(self, raw, url): def preprocess_raw_html(self, raw, url):
# open('/t/raw.html', 'wb').write(raw.encode('utf-8')) # open('/t/raw.html', 'wb').write(raw.encode('utf-8'))
root = parse(raw) root_ = parse(raw)
if '/interactive/' in url: if '/interactive/' in url:
return '<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>' \ return (
+ 'This article is supposed to be read in a browser' \ '<html><body><article><h1>'
+ '</em></article></body></html>' + root_.xpath('//h1')[0].text + '</h1><em>'
script = root.xpath('//script[@id="__NEXT_DATA__"]') + 'This article is supposed to be read in a browser'
if script: + '</em></article></body></html>'
try: )
load_article_from_json(script[0].text, root)
except JSONHasNoContent: script = root_.xpath('//script[@id="__NEXT_DATA__"]')
cleanup_html_article(root)
html = load_article_from_json(script[0].text)
root = parse(html)
for div in root.xpath('//div[@class="lazy-image"]'): for div in root.xpath('//div[@class="lazy-image"]'):
noscript = list(div.iter('noscript')) noscript = list(div.iter('noscript'))
if noscript and noscript[0].text: if noscript and noscript[0].text:
@ -179,45 +195,32 @@ class econ_search(BasicNewsRecipe):
p.remove(noscript[0]) p.remove(noscript[0])
for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'): for x in root.xpath('//*[name()="script" or name()="style" or name()="source" or name()="meta"]'):
x.getparent().remove(x) x.getparent().remove(x)
# the economist uses <small> for small caps with a custom font
for init in root.xpath('//span[@data-caps="initial"]'):
init.set('style', 'font-weight:bold;')
for x in root.xpath('//small'):
if x.text and len(x) == 0:
x.text = x.text.upper()
x.tag = 'span'
x.set('style', 'font-variant: small-caps')
for h2 in root.xpath('//h2'):
h2.tag = 'h4'
for x in root.xpath('//figcaption'):
x.set('style', 'text-align:center; font-size:small;')
for x in root.xpath('//cite'):
x.tag = 'blockquote'
x.set('style', 'color:#404040;')
raw = etree.tostring(root, encoding='unicode') raw = etree.tostring(root, encoding='unicode')
raw_ar = read_url([], 'https://archive.is/latest/' + url)
archive = BeautifulSoup(str(raw_ar))
art = archive.find('article')
if art:
bdy = art.findAll('section')
if len(bdy) != 0:
content = bdy[-1]
else:
content = archive.find('div', attrs={'itemprop':'text'})
soup = BeautifulSoup(raw)
article = soup.find('section', attrs={'id':'body'})
if not article:
article = soup.find('div', attrs={'itemprop':'text'})
if not article:
article = soup.find(attrs={'itemprop':'blogPost'})
if article and content:
self.log('**fetching archive content')
article.append(content)
div = soup.findAll(attrs={'style': lambda x: x and x.startswith(
('color:rgb(13, 13, 13);', 'color: rgb(18, 18, 18);')
)})
for p in div:
p.name = 'p'
return str(soup)
return raw
return raw return raw
def preprocess_html(self, soup): def preprocess_html(self, soup):
for img in soup.findAll('img', attrs={'old-src':True}): width = '600'
img['src'] = img['old-src'] w = self.recipe_specific_options.get('res')
for a in soup.findAll('a', href=True): if w and isinstance(w, str):
a['href'] = 'http' + a['href'].split('http')[-1] width = w
for fig in soup.findAll('figure'): for img in soup.findAll('img', src=True):
fig['class'] = 'sub' qua = 'economist.com/cdn-cgi/image/width=' + width + ',quality=80,format=auto/'
for sty in soup.findAll(attrs={'style':True}): img['src'] = img['src'].replace('economist.com/', qua)
del sty['style']
return soup return soup
recipe_specific_options = { recipe_specific_options = {
@ -234,7 +237,12 @@ class econ_search(BasicNewsRecipe):
'short': 'number of pages', 'short': 'number of pages',
'long': 'number of pages of search results you want', 'long': 'number of pages of search results you want',
'default': '2' 'default': '2'
} },
'res': {
'short': 'For hi-res images, select a resolution from the\nfollowing options: 834, 960, 1096, 1280, 1424',
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use from 480, 384, 360, 256.',
'default': '600',
},
} }
def parse_index(self): def parse_index(self):

View File

@ -12,20 +12,13 @@ from calibre.ebooks.BeautifulSoup import NavigableString, Tag
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
def E(parent, name, text='', **attrs):
ans = parent.makeelement(name, **attrs)
ans.text = text
parent.append(ans)
return ans
def process_node(node): def process_node(node):
ntype = node.get('type', '') ntype = node.get('type', '')
if ntype == 'CROSSHEAD': if ntype == 'CROSSHEAD':
if node.get('textHtml'): if node.get('textHtml'):
return f'<h4>{node.get("textHtml")}</h4>' return f'<h4>{node.get("textHtml")}</h4>'
return f'<h4>{node.get("text", "")}</h4>' return f'<h4>{node.get("text", "")}</h4>'
if ntype == 'PARAGRAPH': elif ntype == 'PARAGRAPH':
if node.get('textHtml'): if node.get('textHtml'):
return f'<p>{node.get("textHtml")}</p>' return f'<p>{node.get("textHtml")}</p>'
return f'<p>{node.get("text", "")}</p>' return f'<p>{node.get("text", "")}</p>'