Update bloomberg.recipe

This commit is contained in:
unkn0w7n 2024-07-14 14:09:43 +05:30
parent db03191457
commit 01f770da8d

View File

@ -1,64 +1,44 @@
import json import json
import random
import time import time
from datetime import datetime, timedelta
from calibre.ptempfile import PersistentTemporaryFile
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from html5_parser import parse from calibre.ebooks.BeautifulSoup import BeautifulSoup
def get_contents(x): def get_contents(x):
if x == '': if x == '':
return '' return ''
otype = x.get('type', '') otype = x.get('role', '')
if otype == 'text': if otype == 'p':
if 'attributes' in x: return '<p>' + ''.join(map(get_contents, x.get('parts', ''))) + '</p>'
if 'strong' in x['attributes']: elif otype == 'text':
return '<strong>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</strong>' if 'style' in x:
if 'emphasis' in x['attributes']: return '<' + x['style'] + '>' + ''.join(map(get_contents, x.get('parts', ''))) + '</' + x['style'] + '>'
return '<em>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</em>' return x.get('text', '') + ''.join(map(get_contents, x.get('parts', '')))
return '<i>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</i>'
return x.get('value', '') + ''.join(map(get_contents, x.get('content', '')))
elif otype == 'br': elif otype == 'br':
return '<br>' return '<br>'
elif otype == 'paragraph': elif otype == 'anchor':
return '<p>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</p>' return '<em>' + ''.join(map(get_contents, x.get('parts', ''))) + '</em>'
elif otype == 'heading': elif otype == 'h3':
return '<h3>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</h3>' return '<h4>' + ''.join(map(get_contents, x.get('parts', ''))) + '</h4>'
elif otype == 'list': elif otype == 'ul':
return '<ul>' + ''.join(map(get_contents, x.get('content', ''))) + '</ul>' return '<ul>' + ''.join(map(get_contents, x.get('parts', ''))) + '</ul>'
elif otype == 'listItem': elif otype == 'li':
return '<li>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</li>' return '<li>' + ''.join(map(get_contents, x.get('parts', ''))) + '</li>'
elif otype == 'quote': elif otype == 'webview':
return '<blockquote class="col">' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</blockquote>' return '<br>' + x['html'] + ''.join(map(get_contents, x.get('parts', '')))
elif otype == 'media': elif otype == 'blockquote':
if x['subType'] == 'photo': return '<blockquote>' + ''.join(map(get_contents, x.get('parts', ''))) + '</blockquote>'
return '<div><div class="img"><img src="{}"></div><div class="cap">{} <span>{}</span></div></div>'.format( elif otype == ('image' or 'video'):
x['data']['photo']['src'], x['data']['photo']['caption'], x['data']['photo']['credit']) return '<br><img src="{}"><div class="img">{}</div>\n'.format(
elif x['subType'] == 'chart': x['imageURLs']['default'], x['caption'] + '<i> ' + x['credit'] + '</i>'
if x['data'] and x['data']['chart']: )
return '<div class="img"><img src="{}"></div>'.format(x['data']['chart']['fallback']) elif otype == ('correction' or 'disclaimer'):
elif otype == 'link': return '<p class="corr">' + ''.join(map(get_contents, x.get('parts', ''))) + '</p>'
if 'data' in x:
if 'href' in x['data']:
return '<a href="' + x['data']['href'] + '">' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</a>'
return '<i>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</i>'
return '<i>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</i>'
elif otype == 'entity':
if x['subType'] == 'story':
if x['data'] and x['data']['link'] and x['data']['link']['destination']:
if 'web' in x['data']['link']['destination']:
return '<a href="' + x['data']['link']['destination']['web'] + '">' + x.get('value', '') + ''.join(
map(get_contents, x.get('content', ''))) + '</a>'
return '<i>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</i>'
return '<i>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</i>'
return '<i>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</i>'
elif otype in {'div', 'callout'}:
return '<div>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</div>'
elif not any(x == otype for x in ['', 'ad', 'inline-newsletter', 'tabularData']):
if any(b in x for b in ['value', 'content']):
return '<i>' + x.get('value', '') + ''.join(map(get_contents, x.get('content', ''))) + '</i>'
elif not any(x == otype for x in ['', 'ad', 'inline-newsletter', 'tabularData']):
return '<i>' + ''.join(map(get_contents, x.get('parts', ''))) + '</i>'
return '' return ''
@ -67,126 +47,94 @@ class Bloomberg(BasicNewsRecipe):
language = 'en' language = 'en'
__author__ = 'unkn0wn' __author__ = 'unkn0wn'
no_stylesheets = True no_stylesheets = True
use_embedded_content = False
remove_attributes = ['style', 'height', 'width'] remove_attributes = ['style', 'height', 'width']
encoding = 'utf-8'
ignore_duplicate_articles = {'url', 'title'} ignore_duplicate_articles = {'url', 'title'}
masthead_url = 'https://assets.bbhub.io/company/sites/70/2022/09/logoBBGblck.svg' masthead_url = 'https://assets.bbhub.io/company/sites/70/2022/09/logoBBGblck.svg'
description = ('Bloomberg delivers business and markets news, data, analysis, and video' description = (
' to the world, featuring stories from Businessweek and Bloomberg News.') 'Bloomberg delivers business and markets news, data, analysis, and video'
' to the world, featuring stories from Businessweek and Bloomberg News.'
simultaneous_downloads = 1 )
oldest_article = 1 # days
resolve_internal_links = True
remove_empty_feeds = True
extra_css = ''' extra_css = '''
.auth {font-size:small; font-weight:bold;} .auth { font-size:small; font-weight:bold; }
.time, .chart {font-size:small;} .subhead, .cap span { font-style:italic; color:#202020; }
.subhead, .cap span {font-style:italic; color:#404040;} em, blockquote { color:#202020; }
em, .col {color:#202020;} .cat { font-size:small; color:gray; }
.cat {font-size:small; color:gray;} .img, .news-figure-caption-text { font-size:small; text-align:center; }
.news-figure-caption-text, .cap, .img {font-size:small; text-align:center;} .corr { font-size:small; font-style:italic; color:#404040; }
.chart { font-size:small; }
.news-figure-credit {font-size:small; text-align:center; color:#202020;} .news-figure-credit {font-size:small; text-align:center; color:#202020;}
''' '''
articles_are_obfuscated = True def parse_index(self):
resolve_internal_links = True inx = 'https://cdn-mobapi.bloomberg.com'
sec = self.index_to_soup(inx + '/wssmobile/v1/navigation/bloomberg_app/search-v2', raw=True)
sec_data = json.loads(sec)['searchNav'][0]['items']
def get_obfuscated_article(self, url): feeds = []
br = self.get_browser()
try:
br.open(url)
except Exception as e:
url = e.hdrs.get('location')
soup = self.index_to_soup(url)
link = soup.find('a', attrs={'href':lambda x: x and x.startswith('https://www.bloomberg.com')})
skip_sections =[ # add sections you want to skip
'/video/', '/videos/', '/media/', 'podcast'
]
if any(x in link['href'] for x in skip_sections):
self.abort_article('Aborting Video article')
self.log('Found link: ', link['href'])
html = br.open(link['href']).read()
pt = PersistentTemporaryFile('.html')
pt.write(html)
pt.close()
return pt.name
def get_browser(self, *a, **kw): for sects in sec_data:
kw['user_agent'] = 'Mozilla/5.0 (Windows NT 10.0; rv:109.0) Gecko/20100101 Firefox/119.0' section = sects['title']
br = BasicNewsRecipe.get_browser(self, *a, **kw) sec_slug = sects['links']['self']['href']
br.set_handle_redirect(False) self.log(section)
return br
feeds = [ articles = []
('Features',
'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fnews%2Ffeatures%2F&hl=en-US&gl=US&ceid=US:en'), art_soup = self.index_to_soup(inx + sec_slug, raw=True)
('Opinion', 'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fopinion%2F&hl=en-US&gl=US&ceid=US:en'), for arts in json.loads(art_soup)['modules']:
('Newsletters', if arts['stories']:
'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fnews%2Fnewsletters%2F&hl=en-US&gl=US&ceid=US:en'), for x in arts['stories']:
('News', if x.get('type', '') == 'article':
'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fnews%2Farticles%2F&hl=en-US&gl=US&ceid=US:en'), dt = datetime.fromtimestamp(x['published'] + time.timezone)
('Others', 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fwww.bloomberg.com&hl=en-US&gl=US&ceid=US:en') if (datetime.now() - dt) > timedelta(self.oldest_article):
] continue
title = x['title']
desc = x['autoGeneratedSummary']
url = inx + '/wssmobile/v1/stories/' + x['internalID']
self.log(' ', title, '\n\t', desc)
articles.append({'title': title, 'description':desc, 'url': url})
feeds.append((section, articles))
return feeds
def preprocess_raw_html(self, raw, *a): def preprocess_raw_html(self, raw, *a):
root = parse(raw) data = json.loads(raw)
m = root.xpath('//script[@data-component-props="ArticleBody"]')
if not m:
m = root.xpath('//script[@data-component-props="FeatureBody"]')
if not m:
m2 = root.xpath('//script[@id="__NEXT_DATA__"]')
if m: title = '<h1 title="{}">'.format(data['longURL']) + data['title'] + '</h1>'
data = json.loads(m[0].text)
data = data['story']
elif m2:
data = json.loads(m2[0].text)
data = data['props']['pageProps']['story']
art_url = data['url']
if not art_url.startswith('http'):
art_url = 'https://www.bloomberg.com' + art_url
title = '<h1 title="{}">'.format(art_url) + data['headline'] + '</h1>'
cat = subhead = lede = auth = caption = '' cat = subhead = lede = auth = caption = ''
if 'primaryCategory' in data and data['primaryCategory'] is not None: if 'primaryCategory' in data and data['primaryCategory'] is not None:
cat = '<p class="cat">' + data['primaryCategory'] + '</p>' cat = '<p class="cat">' + data['primaryCategory'] + '</p>'
if len(data['abstract']) != 0 and len(data['abstract']) == 2: if 'abstract' in data and data['abstract'] and data['abstract'] is not None:
subhead = '<div class="subhead"><p>' + data['abstract'][0] + ' </p><p>' + data['abstract'][1] + '</p></div>' subhead = '<div class="subhead"><ul><li>' + '</li><li>'.join([x for x in data['abstract']]) + '</li></ul></div>'
else: elif 'summary' in data and data['summary']:
if 'summary' in data: subhead = '<div class="subhead"><p>' + data['summary'] + '</p></div>'
subhead = '<div class="subhead"><p>' + data['summary'] + '</p></div>'
if 'byline' in data and data['byline'] is not None: if 'byline' in data and data['byline'] is not None:
auth = '<div><span class="auth">' + data['byline']\ dt = datetime.fromtimestamp(data['updatedAt'] + time.timezone)
+ '</span> | <span class="time">' + data['publishedAt'][:-14] + '</span></div>' auth = '<p class="auth">' + 'By ' + data['byline'] + ' | Updated on ' + dt.strftime('%b %d, %Y at %I:%M %p') + '</p>'
if 'ledeImageUrl' in data and data['ledeImageUrl'] is not None: if 'ledeImage' in data and data['ledeImage'] is not None:
lede = '<p class="img"><img src="{}">'.format(data['ledeImageUrl']) x = data['ledeImage']
lede = '<br><img src="{}"><div class="img">{}</div>\n'.format(
x['imageURLs']['default'], x['caption'] + '<i> ' + x['credit'] + '</i>'
)
if 'ledeDescription' in data and data['ledeDescription'] is not None: body = ''
caption = '<span class="cap">' + data['ledeDescription'] + '</span>' body_data = data['components']
else: for x in body_data:
if 'lede' in data and data['lede'] is not None: body += get_contents(x)
if 'alt' in data['lede'] and data['lede']['alt'] is not None: html = '<html><body>' + cat + title + subhead + auth + lede + caption + '<div>' + body + '</div></body></html>'
caption = '<span class="cap">' + data['lede']['alt'] + '</span>' return BeautifulSoup(html).prettify()
if m:
time.sleep(3)
body = data['body']
elif m2:
body = ''
body_data = data['body']['content']
for x in body_data:
body += get_contents(x)
pause = random.choice((5, 6, 7, 8, 9))
self.log('Delay: ', pause, ' seconds')
time.sleep(pause)
return '<html><body>' + cat + title + subhead + auth + lede + caption + '<div>' + body + '</div></body></html>'
def preprocess_html(self, soup): def preprocess_html(self, soup):
for h3 in soup.findAll('h3'):
h3.name = 'h4'
for icon in soup.findAll('img', attrs={'class':'video-player__play-icon'}): for icon in soup.findAll('img', attrs={'class':'video-player__play-icon'}):
icon.decompose() icon.decompose()
for div in soup.findAll('div', attrs={'class':'chart'}): for div in soup.findAll('div', attrs={'class':'chart'}):
@ -204,14 +152,3 @@ class Bloomberg(BasicNewsRecipe):
def populate_article_metadata(self, article, soup, first): def populate_article_metadata(self, article, soup, first):
article.url = soup.find('h1')['title'] article.url = soup.find('h1')['title']
article.summary = self.tag_to_string(soup.find('div', attrs={'class':'subhead'}))
article.text_summary = self.tag_to_string(soup.find('div', attrs={'class':'subhead'}))
article.title = article.title.replace(' - Bloomberg', '')
def get_browser(self):
# -- Handle Google's cookies consent page
br = BasicNewsRecipe.get_browser(self)
br.open('https://news.google.com')
br.select_form(action="https://consent.google.com/save")
br.submit()
return br