Wall Street Journal Magazine by unkn0wn

Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
Kovid Goyal 2024-06-16 20:53:00 +05:30
commit 0d611c65f7
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C

156
recipes/wsj_mag.recipe Normal file
View File

@ -0,0 +1,156 @@
import json
import time
from datetime import datetime, timedelta
from itertools import zip_longest
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.web.feeds.news import BasicNewsRecipe, classes
def media_bucket(x):
if x.get('type', '') == 'image':
return '<img src="{}"><div class="figc">{}</div>\n'.format(
x['manifest-url'], x['caption'] + ' ' + x['credit']
)
if x.get('type', '') == 'video':
return '<img src="{}"><div class="figc">{}</div>\n'.format(
x['thumbnail_url'], x['caption'] + ' ' + x['credit']
)
return
class WSJ(BasicNewsRecipe):
title = 'Wall Street Journal Magazine'
__author__ = 'unkn0wn'
description = (
'Eight times a year the print edition of WSJ. Magazine covers contemporary culture '
'and the luminaries shaping it—with exclusive features and award-winning photography '
'documenting the worlds of entertainment, fashion, design, art, food, travel and more.'
)
language = 'en_US'
encoding = 'utf-8'
no_javascript = True
no_stylesheets = True
remove_attributes = ['style', 'height', 'width']
resolve_internal_links = True
extra_css = '''
#subhed, em { font-style:italic; color:#202020; }
#byline, #time-to-read, #orig-pubdate-string, .article-byline, time, #flashline { font-size:small; }
.figc { font-size:small; text-align:center; }
img {display:block; margin:0 auto;}
'''
remove_tags = [
dict(name='panel', attrs={'id':'summary-image'}),
dict(name='panel', attrs={'layout':'inline'}),
dict(name='panel', attrs={'embed':'inner-article-ad'}),
dict(name='span', attrs={'embed':'ticker'}),
classes('lamrelated-articles-inset-panel'),
dict(name='p', attrs={'id':[
'keywords', 'orig-pubdate-number', 'type', 'is-custom-flashline', 'grouphed', 'author-ids', 'article-manifest',
'body-extract', 'category', 'sub-category', 'socialhed', 'summary', 'deckline', 'article-flashline'
]}),
]
remove_tags_before = [
dict(name='p', attrs={'id':'orig-pubdate-string'})
]
def preprocess_html(self, soup):
jpml = soup.find('jpml')
if jpml:
jpml.name = 'article'
h1 = soup.find('p', attrs={'id':'headline'})
if h1:
h1.name = 'h1'
for h2 in soup.findAll('h2'):
h2.name = 'h4'
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
read = soup.find('p', attrs={'id':'time-to-read'})
byl = soup.find('p', attrs={'id':'byline'})
if dt and byl and read:
dt.name = read.name = byl.name = 'div'
byl.insert(0, dt)
byl.insert(0, read)
url = soup.find('p', attrs={'id':'share-link'})
if url:
url['title'] = self.tag_to_string(url).strip()
url.string = ''
panel = soup.find('panel', attrs={'id':'metadata'})
if panel:
buck = panel.find('p', attrs={'id':'media-bucket'})
if buck:
data = json.loads(buck.string)
buck.extract()
i_lst = [media_bucket(x) for x in data['items']]
m_itm = soup.findAll('panel', attrs={'class':'media-item'})
if i_lst and m_itm:
for x, y in list(zip_longest(m_itm, i_lst)):
x.insert_after(BeautifulSoup(y, 'html.parser'))
return soup
def get_browser(self, *args, **kw):
kw['user_agent'] = 'okhttp/4.10.0'
br = BasicNewsRecipe.get_browser(self, *args, **kw)
br.addheaders += [
('Accept-Encoding', 'gzip'),
('cache-control', 'no-cache'),
('x-api-key', ('e''b''2''4''0''8''c''d''2''7''f''8''9''1''3''d''4''2''1''f''a''3''d''5''c''3''d''0''7''c''c''f''0''3''4''c''b''4''4''8')),
]
return br
def parse_index(self):
index = 'https://bartender.mobile.dowjones.io'
catalog = json.loads(self.index_to_soup(index + '/catalogs/v1/wsj/us/catalog.json', raw=True))
for itm in catalog['items']:
if itm['type'] == 'MAG':
date = itm['date']
key = itm['key']
manifest = itm['manifest']
dt = datetime.fromisoformat(date[:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b, %Y')
self.log('Downloading ', dt)
self.timefmt = ' [' + dt + ']'
feeds = []
manif = json.loads(self.index_to_soup(index + manifest, raw=True))
for itm in manif['items']:
for k, v in itm.items():
if 'WSJMAG_PP' in k:
self.cover_url = v
if '-pages_' in k:
section = k.split('-pages_')[0].replace('_', ' ')
if 'MAGAZINE' not in section:
continue
self.log(section)
articles = []
sec_parse = json.loads(self.index_to_soup(index + v, raw=True))
data = sec_parse['articles']
for art in data:
title = data[art]['headline']
desc = data[art]['summary']
url = index + '/contents/v1/wsj/us/' + key + '/' + data[art]['filename']
self.log(' ', title, '\n\t', desc)
articles.append({'title': title, 'description':desc, 'url': url})
data = sec_parse['decos']
for art in data:
title = data[art]['headline']
desc = data[art]['summary']
url = index + '/contents/v1/wsj/us/' + key + '/' + data[art]['filename']
self.log(' ', title, '\n\t', desc)
articles.append({'title': title, 'description':desc, 'url': url})
feeds.append((section, articles))
return feeds
def preprocess_raw_html(self, raw, url):
return BeautifulSoup(raw).prettify()
def populate_article_metadata(self, article, soup, first):
lnk = soup.find('p', attrs={'id':'share-link'})
if lnk:
article.url = lnk['title']