Update wsj.recipe

This commit is contained in:
unkn0w7n 2024-05-15 11:16:07 +05:30
parent d7cedee366
commit b96480d5ab

View File

@ -8,6 +8,11 @@ from calibre.web.feeds.news import BasicNewsRecipe, classes
from html5_parser import parse from html5_parser import parse
# Past 6 editions are available for download.
# For available past editions see log and set date to, for example, '20240513'.
past_edition = None
class WSJ(BasicNewsRecipe): class WSJ(BasicNewsRecipe):
title = 'The Wall Street Journal' title = 'The Wall Street Journal'
__author__ = 'unkn0wn' __author__ = 'unkn0wn'
@ -21,6 +26,7 @@ class WSJ(BasicNewsRecipe):
no_javascript = True no_javascript = True
no_stylesheets = True no_stylesheets = True
remove_attributes = ['style', 'height', 'width'] remove_attributes = ['style', 'height', 'width']
resolve_internal_links = True
extra_css = ''' extra_css = '''
#subhed, em { font-style:italic; color:#202020; } #subhed, em { font-style:italic; color:#202020; }
@ -81,21 +87,22 @@ class WSJ(BasicNewsRecipe):
div.extract() div.extract()
return soup return soup
def _download_cover(self): if not past_edition:
import os def _download_cover(self):
from calibre import browser import os
from contextlib import closing from calibre import browser
from calibre.utils.img import add_borders_to_image, image_to_data, save_cover_data_to from contextlib import closing
br = browser() from calibre.utils.img import save_cover_data_to
raw = br.open('https://www.frontpages.com/the-wall-street-journal/') br = browser()
soup = BeautifulSoup(raw.read()) raw = br.open('https://www.frontpages.com/the-wall-street-journal/')
cu = 'https://www.frontpages.com' + soup.find('img', attrs={'id':'giornale-img'})['src'] soup = BeautifulSoup(raw.read())
self.report_progress(1, _('Downloading cover from %s')%cu) cu = 'https://www.frontpages.com' + soup.find('img', attrs={'id':'giornale-img'})['src']
with closing(br.open(cu, timeout=self.timeout)) as r: self.report_progress(1, _('Downloading cover from %s')%cu)
cdata = r.read() with closing(br.open(cu, timeout=self.timeout)) as r:
cpath = os.path.join(self.output_dir, 'cover.jpg') cdata = r.read()
save_cover_data_to(cdata, cpath) cpath = os.path.join(self.output_dir, 'cover.jpg')
self.cover_path = cpath save_cover_data_to(cdata, cpath)
self.cover_path = cpath
def get_browser(self, *args, **kw): def get_browser(self, *args, **kw):
kw['user_agent'] = 'okhttp/4.10.0' kw['user_agent'] = 'okhttp/4.10.0'
@ -110,13 +117,27 @@ class WSJ(BasicNewsRecipe):
def parse_index(self): def parse_index(self):
index = 'https://bartender.mobile.dowjones.io' index = 'https://bartender.mobile.dowjones.io'
catalog = json.loads(self.index_to_soup(index + '/catalogs/v1/wsj/us/catalog.json', raw=True)) catalog = json.loads(self.index_to_soup(index + '/catalogs/v1/wsj/us/catalog.json', raw=True))
edit = []
for itm in catalog['items']: for itm in catalog['items']:
if itm['type'] == 'ITP': if itm['type'] == 'ITP':
edit.append(itm['key'][3:])
self.log('**Past Editions available :', ', '.join(edit))
for itm in catalog['items']:
if past_edition:
if itm['key'] == 'ITP' + past_edition:
key = itm['key']
manifest = itm['manifest']
dt = datetime.fromisoformat(itm['date'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y')
self.log('Downloading Past Edition ', dt)
self.timefmt = ' [' + dt + ']'
break
elif itm['type'] == 'ITP':
key = itm['key'] key = itm['key']
manifest = itm['manifest'] manifest = itm['manifest']
dt = datetime.fromisoformat(itm['date'][:-1]) + timedelta(seconds=time.timezone) dt = datetime.fromisoformat(itm['date'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%b %d, %Y') dt = dt.strftime('%b %d, %Y')
self.log(dt) self.log('Downloading ', dt)
self.timefmt = ' [' + dt + ']' self.timefmt = ' [' + dt + ']'
break break
@ -151,9 +172,20 @@ class WSJ(BasicNewsRecipe):
for x in root.xpath('//image'): for x in root.xpath('//image'):
x.tag = 'img' x.tag = 'img'
return BeautifulSoup(raw).prettify() return BeautifulSoup(raw).prettify()
return raw else:
soup = BeautifulSoup(raw)
url = soup.find('meta', attrs={'property':'og:url'})
if url:
h1 = soup.find('h1')
if h1:
h1['title'] = url['content']
return soup.prettify()
def populate_article_metadata(self, article, soup, first): def populate_article_metadata(self, article, soup, first):
lnk = soup.find('p', attrs={'id':'share-link'}) lnk = soup.find('p', attrs={'id':'share-link'})
if lnk: if lnk:
article.url = lnk['title'] article.url = lnk['title']
art = soup.find('h1', attrs={'title':True})
if art:
self.log('found art ', art['title'])
article.url = art['title']