mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
180 lines
6.7 KiB
Python
180 lines
6.7 KiB
Python
#!/usr/bin/env python
|
|
# vim:fileencoding=utf-8
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
|
|
import json, re
|
|
from datetime import date
|
|
from pprint import pformat
|
|
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
from calibre import prepare_string_for_xml as escape
|
|
from calibre.utils.iso8601 import parse_iso8601
|
|
|
|
edition = date.today().strftime('%B-%Y')
|
|
|
|
# edition = 'March-2023'
|
|
|
|
def classes(classes):
|
|
q = frozenset(classes.split(' '))
|
|
return dict(attrs={
|
|
'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
|
|
|
|
|
def extract_json(raw):
|
|
s = raw.find("window['__natgeo__']")
|
|
script = raw[s:raw.find('</script>', s)]
|
|
return json.loads(
|
|
script[script.find('{'):].rstrip(';'))['page']['content']['article']
|
|
|
|
|
|
def parse_contributors(grp):
|
|
for item in grp:
|
|
line = '<div class="auth">' + escape(item['title']) + ' '
|
|
for c in item['contributors']:
|
|
line += escape(c['displayName'])
|
|
yield line + '</div>'
|
|
|
|
|
|
def parse_lead_image(media):
|
|
if 'dsc' in media['image']:
|
|
yield '<p><div><img src="{}" alt="{}"></div>'.format(
|
|
escape(media['image']['src'], True), escape(media['image']['dsc'], True))
|
|
else:
|
|
yield '<p><div><img src="{}"></div>'.format(escape(media['image']['src'], True))
|
|
if 'caption' in media:
|
|
yield '<div class="cap">' + media['caption'] + '</div>'
|
|
if 'credit' in media:
|
|
yield '<div class="cred">' + media['credit'] + '</div>'
|
|
|
|
|
|
def parse_body(item):
|
|
c = item['cntnt']
|
|
if item.get('type') == 'inline':
|
|
if c.get('cmsType') == 'listicle':
|
|
if 'title' in c:
|
|
yield '<h3>' + escape(c['title']) + '</h3>'
|
|
yield c['text']
|
|
elif c.get('cmsType') == 'image':
|
|
for line in parse_lead_image(c):
|
|
yield line
|
|
elif c.get('cmsType') == 'imagegroup':
|
|
for imgs in c['images']:
|
|
for line in parse_lead_image(imgs):
|
|
yield line
|
|
elif c.get('cmsType') == 'pullquote':
|
|
if 'quote' in c:
|
|
yield '<blockquote>' + c['quote'] + '</blockquote>'
|
|
elif c.get('cmsType') == 'editorsNote':
|
|
if 'note' in c:
|
|
yield '<blockquote>' + c['note'] + '</blockquote>'
|
|
else:
|
|
if c['mrkup'].strip().startswith('<'):
|
|
yield c['mrkup']
|
|
else:
|
|
yield '<{tag}>{markup}</{tag}>'.format(
|
|
tag=item['type'], markup=c['mrkup'])
|
|
|
|
|
|
def parse_article(edg):
|
|
sc = edg['schma']
|
|
yield '<h3 class="sub">' + escape(edg['sctn']) + '</h3>'
|
|
yield '<h1>' + escape(sc['sclTtl']) + '</h1>'
|
|
yield '<div class="byline">' + escape(sc['sclDsc']) + '</div><br>'
|
|
for line in parse_contributors(edg['cntrbGrp']):
|
|
yield line
|
|
ts = parse_iso8601(edg['mdDt'], as_utc=False).strftime('%B %d, %Y')
|
|
yield '<div class="time">Published: ' + escape(ts) + '</div>'
|
|
if 'readTime' in edg:
|
|
yield '<div class="time">' + escape(edg['readTime']) + '</div><br>'
|
|
if edg.get('ldMda', {}).get('cmsType') == 'image':
|
|
for line in parse_lead_image(edg['ldMda']):
|
|
yield line
|
|
for item in edg['bdy']:
|
|
for line in parse_body(item):
|
|
yield line
|
|
|
|
|
|
def article_parse(data):
|
|
yield "<html><body>"
|
|
for frm in data['frms']:
|
|
if not frm:
|
|
continue
|
|
for mod in frm.get('mods', ()):
|
|
for edg in mod.get('edgs', ()):
|
|
if edg.get('cmsType') == 'ImmersiveLeadTile':
|
|
if 'image' in edg.get('cmsImage', {}):
|
|
for line in parse_lead_image(edg['cmsImage']):
|
|
yield line
|
|
if edg.get('cmsType') == 'ArticleBodyTile':
|
|
for line in parse_article(edg):
|
|
yield line
|
|
yield "</body></html>"
|
|
|
|
|
|
class NatGeo(BasicNewsRecipe):
|
|
title = u'National Geographic Magazine'
|
|
description = 'The National Geographic, an American monthly magazine'
|
|
language = 'en'
|
|
encoding = 'utf8'
|
|
publisher = 'nationalgeographic.com'
|
|
category = 'science, nat geo'
|
|
__author__ = 'Kovid Goyal, unkn0wn'
|
|
description = 'Inspiring people to care about the planet since 1888'
|
|
timefmt = ' [%a, %d %b, %Y]'
|
|
no_stylesheets = True
|
|
use_embedded_content = False
|
|
remove_attributes = ['style']
|
|
remove_javascript = False
|
|
masthead_url = 'https://i.natgeofe.com/n/e76f5368-6797-4794-b7f6-8d757c79ea5c/ng-logo-2fl.png?w=600&h=600'
|
|
remove_empty_feeds = True
|
|
resolve_internal_links = True
|
|
|
|
extra_css = '''
|
|
.sub, blockquote { color:#404040; }
|
|
.byline, i { font-style:italic; color:#202020; }
|
|
.cap {text-align:center; font-size:small; }
|
|
.cred {text-align:center; font-size:small; color:#404040; }
|
|
.auth, .time { font-size:small; color:#5c5c5c; }
|
|
'''
|
|
|
|
def parse_index(self):
|
|
url = 'https://www.nationalgeographic.com/magazine/issue/' + edition.lower()
|
|
self.log('Downloading ', url)
|
|
self.timefmt = ' [' + edition + ']'
|
|
soup = self.index_to_soup(url)
|
|
png = re.findall('https://i\.natgeofe\.com\S+?national-geographic-magazine-\S+?\.jpg', soup.decode('utf-8'))
|
|
self.cover_url = png[0] + '?w=1000&h=1000'
|
|
|
|
name = soup.find(attrs={'class':lambda x: x and 'Header__Description' in x.split()})
|
|
self.title = 'National Geographic ' + self.tag_to_string(name)
|
|
ans = {}
|
|
ans2 = None
|
|
if photoart := soup.find(attrs={'class':lambda x: x and 'BgImagePromo__Container__Text__Link' in x.split()}):
|
|
ans2 = []
|
|
title = self.tag_to_string(photoart)
|
|
url = 'https://www.nationalgeographic.com' + photoart['href']
|
|
ans2.append(('Photo Essay', [{'title': title, 'url': url}]))
|
|
for gird in soup.findAll(attrs={'class':'GridPromoTile'}):
|
|
for article in soup.findAll('article'):
|
|
a = article.find('a')
|
|
url = a['href']
|
|
if '/graphics/' in url:
|
|
continue
|
|
section = self.tag_to_string(article.find(**classes('SectionLabel')))
|
|
title = self.tag_to_string(article.find(**classes('PromoTile__Title--truncated')))
|
|
articles = ans.setdefault(section, [])
|
|
articles.append({'title': title, 'url': url})
|
|
self.log(pformat(ans))
|
|
if ans2:
|
|
return list(ans.items()) + ans2
|
|
return list(ans.items())
|
|
|
|
def preprocess_raw_html(self, raw_html, url):
|
|
data = extract_json(raw_html)
|
|
return '\n'.join(article_parse(data))
|
|
|
|
def preprocess_html(self, soup):
|
|
for img in soup.findAll('img', src=True):
|
|
img['src'] = img['src'] + '?w=1000&h=1000'
|
|
return soup
|