This commit is contained in:
Kovid Goyal 2023-10-18 06:36:22 +05:30
commit 9d7c20c267
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
2 changed files with 99 additions and 145 deletions

View File

@ -22,6 +22,8 @@ try:
except ImportError: except ImportError:
from urllib import quote from urllib import quote
from calibre.scraper.simple import read_url
from calibre.ptempfile import PersistentTemporaryFile
needs_subscription = True needs_subscription = True
@ -59,55 +61,59 @@ class WSJ(BasicNewsRecipe):
needs_subscription = needs_subscription needs_subscription = needs_subscription
WSJ_ITP = 'https://www.wsj.com/print-edition/today' WSJ_ITP = 'https://www.wsj.com/print-edition/today'
storage = []
extra_css = ''' extra_css = '''
.imageCaption{font-size:small; text-align:center;} #big-top-caption { font-size:small; text-align:center; }
.sub-head{font-style:italic; color:#404040;} [data-type:"tagline"] { font-style:italic; color:#202020; }
.bylineWrap{font-size:small; text-align:left;}
''' '''
keep_only_tags = [ keep_only_tags = [
dict(attrs={'class': lambda x: x and 'HeadlineContainer' in ''.join(x)}), dict(name=['h1', 'h2']),
dict(name='main'), dict(attrs={'aria-describedby':'big-top-caption'}),
dict(attrs={'id':'big-top-caption'}),
dict(name='article')
] ]
remove_tags = [ remove_tags = [
classes( dict(name=['button', 'svg', 'ufc-follow-author-widget']),
'wsj-ad newsletter-inset media-object-video media-object-podcast print-header article-body-tools' dict(attrs={'aria-label':['Sponsored Offers', 'Listen To Article', 'What to Read Next']}),
' podcast--iframe dynamic-inset-overflow-button snippet-logo'), dict(attrs={'data-type':'inset'}),
dict(role=["toolbar", "complementary"]), dict(attrs={'id':lambda x: x and x.startswith(('wrapper-INLINE', 'audio-tag-inner-audio-'))})
dict(attrs={"aria-label": ["Sponsored Offers", "What to Read Next", "breadcrumbs", "Listen To Article"]}),
dict(name='amp-iframe'), # interactive graphics
] ]
def preprocess_html(self, soup): articles_are_obfuscated = True
for by in soup.findAll(**classes('bylineWrap')): def get_obfuscated_article(self, url):
for p in by.findAll('p'): br = self.get_browser()
p.name = 'span' br.set_handle_redirect(False)
for img in soup.findAll('amp-img'):
img.name = 'img'
if img['src'] == 'https://s.wsj.net/img/meta/wsj-social-share.png':
img.extract()
h2 = soup.find('h2', attrs={'class':'sub-head'})
if h2:
h2.name = 'p'
return soup
def get_cover_url(self):
from datetime import date
cover = 'https://img.kiosko.net/' + date.today().strftime('%Y/%m/%d') + '/us/wsj.750.jpg'
br = BasicNewsRecipe.get_browser(self, verify_ssl_certificates=False)
try: try:
br.open(cover) br.open(url)
except: except Exception as e:
index = 'https://en.kiosko.net/us/np/wsj.html' url = e.hdrs.get('location')
soup = self.index_to_soup(index) raw = read_url(self.storage, 'https://archive.is/latest/' + url)
for image in soup.find('img', attrs={'src': lambda x: x and x.endswith('750.jpg')}): pt = PersistentTemporaryFile('.html')
if image['src'].startswith('/'): pt.write(raw.encode('utf-8'))
return 'https:' + image['src'] pt.close()
return image['src'] return pt.name
self.log("\nCover unavailable")
cover = None def preprocess_html(self, soup):
return cover for img in soup.findAll('img', attrs={'old-src':True}):
img['src'] = img['old-src']
for p in soup.findAll('div', attrs={'data-type':['paragraph', 'image']}):
p.name = 'p'
for a in soup.findAll('a', href=True):
a['href'] = 'http' + a['href'].split('http')[-1]
for fig in soup.findAll('figure'):
if fig.find('video'):
fig.extract()
for figc in soup.findAll('figcaption'):
figc['id'] = 'big-top-caption'
if name:= soup.find('h2', attrs={'itemprop':'name'}):
name.extract()
for h2 in soup.findAll('h2'):
if self.tag_to_string(h2).startswith('What to Read Next'):
h2.extract()
return soup
# login {{{ # login {{{
@ -212,10 +218,10 @@ class WSJ(BasicNewsRecipe):
def abs_wsj_url(self, href, modify_query=True): def abs_wsj_url(self, href, modify_query=True):
if not href.startswith('http'): if not href.startswith('http'):
href = 'https://www.wsj.com' + href.replace('/articles/', '/amp/articles/') href = 'https://www.wsj.com' + href
if modify_query: if modify_query:
href = href.replace('/articles/', '/amp/articles/') href = href
return href return href.split('?')[0]
def wsj_find_articles(self, url, ahed=False): def wsj_find_articles(self, url, ahed=False):
root = self.index_to_soup(url, as_tree=True) root = self.index_to_soup(url, as_tree=True)
@ -243,30 +249,6 @@ class WSJ(BasicNewsRecipe):
return articles return articles
def wsj_find_wn_articles(self, feeds, root, CSSSelect):
articles = []
for a in CSSSelect('.style--strap--ND8Cuaip'):
if 'WHAT\'S NEWS' in self.tag_to_string(a).upper():
whats_news = a.getparent()
break
else:
self.log.error('Failed to find Whats News section')
return
for li in CSSSelect('li', whats_news):
a = next(CSSSelect('a', li))
if '/articles/' not in a.get('href', ''):
continue
title = self.tag_to_string(a).strip()
url = self.abs_wsj_url(a.get('href'))
desc = self.tag_to_string(li)
articles.append({'title': title, 'url': url,
'description': desc, 'date': ''})
self.log('\tFound WN article:', title)
self.log('\t\t', desc + " " + url)
return articles
def wsj_add_feed(self, feeds, title, url): def wsj_add_feed(self, feeds, title, url):
try: try:
for i in range(5): for i in range(5):
@ -299,17 +281,12 @@ class WSJ(BasicNewsRecipe):
feeds = [] feeds = []
for container in root.xpath('descendant::*[contains(@class, "WSJTheme--top-menu-item--")]'): for container in root.xpath('descendant::*[contains(@class, "WSJTheme--top-menu-item--")]'):
for a in container.xpath('descendant::a[contains(@class, "WSJTheme--section-link--")]'): for a in container.xpath('descendant::a[contains(@class, "WSJTheme--section-link--")]'):
frontpage = a.get('href').endswith('frontpage')
title = self.tag_to_string(a).capitalize().strip().replace('U.s.', 'U.S.') title = self.tag_to_string(a).capitalize().strip().replace('U.s.', 'U.S.')
if not title: if not title:
continue continue
url = self.abs_wsj_url(a.get('href'), modify_query=False) url = self.abs_wsj_url(a.get('href'), modify_query=False)
self.log('Found section:', title, 'at', url) self.log('Found section:', title, 'at', url)
self.wsj_add_feed(feeds, title, url) self.wsj_add_feed(feeds, title, url)
if frontpage:
articles = self.wsj_find_wn_articles(feeds, root, CSSSelect)
if articles:
feeds.append(("What's News", articles))
if self.test and len(feeds) >= self.test[0]: if self.test and len(feeds) >= self.test[0]:
break break
return feeds return feeds
@ -318,6 +295,6 @@ class WSJ(BasicNewsRecipe):
return [ return [
('Testing', [ ('Testing', [
{'title': 'Subscriber Article', {'title': 'Subscriber Article',
'url': self.abs_wsj_url('https://www.wsj.com/articles/egg-prices-jump-as-bird-flu-hits-poultry-flocks-11648900800')}, 'url': self.abs_wsj_url('https://www.wsj.com/articles/remington-gun-call-of-duty-video-game-93059a66')},
]), ]),
] ]

View File

@ -22,6 +22,8 @@ try:
except ImportError: except ImportError:
from urllib import quote from urllib import quote
from calibre.scraper.simple import read_url
from calibre.ptempfile import PersistentTemporaryFile
needs_subscription = False needs_subscription = False
@ -59,55 +61,59 @@ class WSJ(BasicNewsRecipe):
needs_subscription = needs_subscription needs_subscription = needs_subscription
WSJ_ITP = 'https://www.wsj.com/print-edition/today' WSJ_ITP = 'https://www.wsj.com/print-edition/today'
storage = []
extra_css = ''' extra_css = '''
.imageCaption{font-size:small; text-align:center;} #big-top-caption { font-size:small; text-align:center; }
.sub-head{font-style:italic; color:#404040;} [data-type:"tagline"] { font-style:italic; color:#202020; }
.bylineWrap{font-size:small; text-align:left;}
''' '''
keep_only_tags = [ keep_only_tags = [
dict(attrs={'class': lambda x: x and 'HeadlineContainer' in ''.join(x)}), dict(name=['h1', 'h2']),
dict(name='main'), dict(attrs={'aria-describedby':'big-top-caption'}),
dict(attrs={'id':'big-top-caption'}),
dict(name='article')
]
remove_tags = [
dict(name=['button', 'svg', 'ufc-follow-author-widget']),
dict(attrs={'aria-label':['Sponsored Offers', 'Listen To Article', 'What to Read Next']}),
dict(attrs={'data-type':'inset'}),
dict(attrs={'id':lambda x: x and x.startswith(('wrapper-INLINE', 'audio-tag-inner-audio-'))})
] ]
remove_tags = [ articles_are_obfuscated = True
classes( def get_obfuscated_article(self, url):
'wsj-ad newsletter-inset media-object-video media-object-podcast print-header article-body-tools' br = self.get_browser()
' podcast--iframe dynamic-inset-overflow-button snippet-logo'), br.set_handle_redirect(False)
dict(role=["toolbar", "complementary"]), try:
dict(attrs={"aria-label": ["Sponsored Offers", "What to Read Next", "breadcrumbs", "Listen To Article"]}), br.open(url)
dict(name='amp-iframe'), # interactive graphics except Exception as e:
] url = e.hdrs.get('location')
raw = read_url(self.storage, 'https://archive.is/latest/' + url)
pt = PersistentTemporaryFile('.html')
pt.write(raw.encode('utf-8'))
pt.close()
return pt.name
def preprocess_html(self, soup): def preprocess_html(self, soup):
for by in soup.findAll(**classes('bylineWrap')): for img in soup.findAll('img', attrs={'old-src':True}):
for p in by.findAll('p'): img['src'] = img['old-src']
p.name = 'span' for p in soup.findAll('div', attrs={'data-type':['paragraph', 'image']}):
for img in soup.findAll('amp-img'): p.name = 'p'
img.name = 'img' for a in soup.findAll('a', href=True):
if img['src'] == 'https://s.wsj.net/img/meta/wsj-social-share.png': a['href'] = 'http' + a['href'].split('http')[-1]
img.extract() for fig in soup.findAll('figure'):
h2 = soup.find('h2', attrs={'class':'sub-head'}) if fig.find('video'):
if h2: fig.extract()
h2.name = 'p' for figc in soup.findAll('figcaption'):
figc['id'] = 'big-top-caption'
if name:= soup.find('h2', attrs={'itemprop':'name'}):
name.extract()
for h2 in soup.findAll('h2'):
if self.tag_to_string(h2).startswith('What to Read Next'):
h2.extract()
return soup return soup
def get_cover_url(self):
from datetime import date
cover = 'https://img.kiosko.net/' + date.today().strftime('%Y/%m/%d') + '/us/wsj.750.jpg'
br = BasicNewsRecipe.get_browser(self, verify_ssl_certificates=False)
try:
br.open(cover)
except:
index = 'https://en.kiosko.net/us/np/wsj.html'
soup = self.index_to_soup(index)
for image in soup.find('img', attrs={'src': lambda x: x and x.endswith('750.jpg')}):
if image['src'].startswith('/'):
return 'https:' + image['src']
return image['src']
self.log("\nCover unavailable")
cover = None
return cover
# login {{{ # login {{{
@ -212,10 +218,10 @@ class WSJ(BasicNewsRecipe):
def abs_wsj_url(self, href, modify_query=True): def abs_wsj_url(self, href, modify_query=True):
if not href.startswith('http'): if not href.startswith('http'):
href = 'https://www.wsj.com' + href.replace('/articles/', '/amp/articles/') href = 'https://www.wsj.com' + href
if modify_query: if modify_query:
href = href.replace('/articles/', '/amp/articles/') href = href
return href return href.split('?')[0]
def wsj_find_articles(self, url, ahed=False): def wsj_find_articles(self, url, ahed=False):
root = self.index_to_soup(url, as_tree=True) root = self.index_to_soup(url, as_tree=True)
@ -243,30 +249,6 @@ class WSJ(BasicNewsRecipe):
return articles return articles
def wsj_find_wn_articles(self, feeds, root, CSSSelect):
articles = []
for a in CSSSelect('.style--strap--ND8Cuaip'):
if 'WHAT\'S NEWS' in self.tag_to_string(a).upper():
whats_news = a.getparent()
break
else:
self.log.error('Failed to find Whats News section')
return
for li in CSSSelect('li', whats_news):
a = next(CSSSelect('a', li))
if '/articles/' not in a.get('href', ''):
continue
title = self.tag_to_string(a).strip()
url = self.abs_wsj_url(a.get('href'))
desc = self.tag_to_string(li)
articles.append({'title': title, 'url': url,
'description': desc, 'date': ''})
self.log('\tFound WN article:', title)
self.log('\t\t', desc + " " + url)
return articles
def wsj_add_feed(self, feeds, title, url): def wsj_add_feed(self, feeds, title, url):
try: try:
for i in range(5): for i in range(5):
@ -299,17 +281,12 @@ class WSJ(BasicNewsRecipe):
feeds = [] feeds = []
for container in root.xpath('descendant::*[contains(@class, "WSJTheme--top-menu-item--")]'): for container in root.xpath('descendant::*[contains(@class, "WSJTheme--top-menu-item--")]'):
for a in container.xpath('descendant::a[contains(@class, "WSJTheme--section-link--")]'): for a in container.xpath('descendant::a[contains(@class, "WSJTheme--section-link--")]'):
frontpage = a.get('href').endswith('frontpage')
title = self.tag_to_string(a).capitalize().strip().replace('U.s.', 'U.S.') title = self.tag_to_string(a).capitalize().strip().replace('U.s.', 'U.S.')
if not title: if not title:
continue continue
url = self.abs_wsj_url(a.get('href'), modify_query=False) url = self.abs_wsj_url(a.get('href'), modify_query=False)
self.log('Found section:', title, 'at', url) self.log('Found section:', title, 'at', url)
self.wsj_add_feed(feeds, title, url) self.wsj_add_feed(feeds, title, url)
if frontpage:
articles = self.wsj_find_wn_articles(feeds, root, CSSSelect)
if articles:
feeds.append(("What's News", articles))
if self.test and len(feeds) >= self.test[0]: if self.test and len(feeds) >= self.test[0]:
break break
return feeds return feeds
@ -318,6 +295,6 @@ class WSJ(BasicNewsRecipe):
return [ return [
('Testing', [ ('Testing', [
{'title': 'Subscriber Article', {'title': 'Subscriber Article',
'url': self.abs_wsj_url('https://www.wsj.com/articles/egg-prices-jump-as-bird-flu-hits-poultry-flocks-11648900800')}, 'url': self.abs_wsj_url('https://www.wsj.com/articles/remington-gun-call-of-duty-video-game-93059a66')},
]), ]),
] ]