calibre/recipes/natgeomag.recipe
unkn0w7n 62173e30f1 Nat Geo
add options for hi-res images
2024-08-12 09:55:29 +05:30

251 lines
9.8 KiB
Python

#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from datetime import date
from pprint import pformat
from calibre import prepare_string_for_xml as escape
from calibre.utils.iso8601 import parse_iso8601
from calibre.web.feeds.news import BasicNewsRecipe
def classes(classes):
q = frozenset(classes.split(' '))
return dict(attrs={
'class': lambda x: x and frozenset(x.split()).intersection(q)})
def extract_json(raw):
s = raw.find("window['__natgeo__']")
script = raw[s:raw.find('</script>', s)]
return json.loads(script[script.find('{'):].rstrip(';'))['page']['content']['prismarticle']
def parse_contributors(grp):
for item in grp:
line = '<div class="auth">' + escape(item['title']) + ' '
for c in item['contributors']:
line += escape(c['displayName'])
yield line + '</div>'
def parse_lead_image(media):
if 'image' in media:
yield '<p>'
if 'dsc' in media['image']:
yield '<div><img src="{}" alt="{}"></div>'.format(
escape(media['image']['src'], True), escape(media['image']['dsc'], True)
)
else:
yield '<div><img src="{}"></div>'.format(escape(media['image']['src'], True))
if 'caption' in media and 'credit' in media:
yield '<div class="cap">' + media['caption'] + '<span class="cred"> ' + media['credit'] + '</span></div>'
elif 'caption' in media:
yield '<div class="cap">' + media['caption'] + '</div>'
yield '</p>'
def parse_inline(inl):
if inl.get('content', {}).get('name', '') == 'Image':
props = inl['content']['props']
yield '<p>'
if 'image' in props:
yield '<div class="img"><img src="{}"></div>'.format(props['image']['src'])
if 'caption' in props:
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
props['caption'].get('text', ''), ' ' + props['caption'].get('credit', '')
)
yield '</p>'
if inl.get('content', {}).get('name', '') == 'ImageGroup':
if 'images' in inl['content']['props']:
for imgs in inl['content']['props']['images']:
yield '<p>'
if 'src' in imgs:
yield '<div class="img"><img src="{}"></div>'.format(imgs['src'])
if 'caption' in imgs:
yield '<div class="cap">{}<span class="cred">{}</span></div>'.format(
imgs['caption'].get('text', ''), ' ' + imgs['caption'].get('credit', '')
)
yield '</p>'
def parse_cont(content):
for cont in content.get('content', {}):
if isinstance(cont, dict):
yield from parse_body(cont)
if isinstance(cont, str):
yield cont
def parse_body(x):
if isinstance(x, dict):
if 'type' in x:
tag = x['type']
if tag == 'inline':
yield ''.join(parse_inline(x))
elif 'attrs' in x and 'href' in x.get('attrs', ''):
yield '<' + tag + ' href="{}">'.format(x['attrs']['href'])
for yld in parse_cont(x):
yield yld
yield '</' + tag + '>'
else:
yield '<' + tag + '>'
for yld in parse_cont(x):
yield yld
yield '</' + tag + '>'
elif isinstance(x, list):
for y in x:
if isinstance(y, dict):
yield from parse_body(y)
def parse_article(edg):
sc = edg['schma']
yield '<div class="sub">' + escape(edg['sctn']) + '</div>'
yield '<h1>' + escape(sc['sclTtl']) + '</h1>'
yield '<div class="byline">' + escape(sc['sclDsc']) + '</div>'
yield '<p>'
for line in parse_contributors(edg.get('cntrbGrp', {})):
yield line
ts = parse_iso8601(edg['mdDt'], as_utc=False).strftime('%B %d, %Y')
yield '<div class="time">Published: ' + escape(ts) + '</div>'
if 'readTime' in edg:
yield '<div class="time">' + escape(edg['readTime']) + '</div>'
yield '</p>'
if edg.get('ldMda', {}).get('cmsType') == 'image':
for line in parse_lead_image(edg['ldMda']):
yield line
for main in edg['prismData']['mainComponents']:
if main['name'] == 'Body':
for item in main['props']['body']:
if isinstance(item, dict):
if item.get('type', '') == 'inline':
yield ''.join(parse_inline(item))
elif isinstance(item, list):
for line in item:
yield ''.join(parse_body(line))
def article_parse(data):
yield "<html><body>"
for frm in data['frms']:
if not frm:
continue
for mod in frm.get('mods', ()):
for edg in mod.get('edgs', ()):
if edg.get('cmsType') == 'ImmersiveLeadTile':
if 'image' in edg.get('cmsImage', {}):
for line in parse_lead_image(edg['cmsImage']):
yield line
if edg.get('cmsType') == 'ArticleBodyTile':
for line in parse_article(edg):
yield line
yield "</body></html>"
class NatGeo(BasicNewsRecipe):
title = u'National Geographic Magazine'
description = 'The National Geographic, an American monthly magazine'
language = 'en'
encoding = 'utf8'
publisher = 'nationalgeographic.com'
category = 'science, nat geo'
__author__ = 'Kovid Goyal, unkn0wn'
description = 'Inspiring people to care about the planet since 1888'
timefmt = ' [%a, %d %b, %Y]'
no_stylesheets = True
use_embedded_content = False
remove_attributes = ['style']
remove_javascript = False
masthead_url = 'https://i.natgeofe.com/n/e76f5368-6797-4794-b7f6-8d757c79ea5c/ng-logo-2fl.png?w=600&h=600'
remove_empty_feeds = True
resolve_internal_links = True
extra_css = '''
blockquote { color:#404040; }
.byline, i { font-style:italic; color:#202020; }
.cap { font-size:small; }
img {display:block; margin:0 auto;}
.cred { font-style:italic; font-size:small; color:#404040; }
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
'''
recipe_specific_options = {
'date': {
'short': 'The date of the edition to download (Month-YYYY format)',
'long': 'For example, March-2023'
},
'res': {
'short': 'For hi-res images, select a resolution from the\nfollowing options: 800, 1000, 1200 or 1500',
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use 400 or 300.',
'default': '600'
}
}
def parse_index(self):
edition = date.today().strftime('%B-%Y')
d = self.recipe_specific_options.get('date')
if d and isinstance(d, str):
edition = d
url = 'https://www.nationalgeographic.com/magazine/issue/' + edition.lower()
self.log('Downloading ', url)
self.timefmt = ' [' + edition + ']'
soup = self.index_to_soup(url)
# png = re.findall('https://i\.natgeofe\.com\S+?national-geographic-\S+?\.jpg', soup.decode('utf-8'))
# self.cover_url = png[0] + '?w=1000&h=1000'
self.cover_url = soup.find('meta', attrs={'property':'og:image'})['content'].split('?')[0] + '?w=1000'
# self.title = 'National Geographic ' + self.tag_to_string(name)
ans = {}
if photoart := soup.find(attrs={'class':lambda x: x and 'BgImagePromo__Container__Text__Link' in x.split()}):
section = 'Photo Essay'
title = self.tag_to_string(photoart)
url = photoart['href']
if url.startswith('/'):
url = 'https://www.nationalgeographic.com' + url
articles = ans.setdefault(section, [])
articles.append({'title': title, 'url': url})
for promo in soup.findAll(**classes('OneUpPromoCard__Content')):
url = promo.a['href']
section = self.tag_to_string(promo.find(**classes('SectionLabel')))
title = self.tag_to_string(promo.find(**classes('Card__Content__Heading')))
articles = ans.setdefault(section, [])
articles.append({'title': title, 'url': url})
for gird in soup.findAll(attrs={'class':'GridPromoTile'}):
for article in soup.findAll('article'):
a = article.find('a')
url = a['href']
if url.startswith('/'):
url = 'https://www.nationalgeographic.com' + url
if '/graphics/' in url:
continue
section = self.tag_to_string(article.find(**classes('SectionLabel')))
title = self.tag_to_string(article.find(**classes('PromoTile__Title--truncated')))
articles = ans.setdefault(section, [])
articles.append({'title': title, 'url': url})
self.log(pformat(ans))
return list(ans.items())
def preprocess_raw_html(self, raw_html, url):
data = extract_json(raw_html)
return '\n'.join(article_parse(data))
def preprocess_html(self, soup):
for h2 in soup.findAll('h2'):
h2.name = 'h4'
for img in soup.findAll('img', src=True):
res = '?w=600'
w = self.recipe_specific_options.get('res')
if w and isinstance(w, str):
res = '?w=' + w
img['src'] = img['src'] + res
return soup
def populate_article_metadata(self, article, soup, first):
summ = soup.find(attrs={'class':'byline'})
if summ:
article.summary = self.tag_to_string(summ)
article.text_summary = self.tag_to_string(summ)