mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-08 10:44:09 -04:00
Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
commit
43e6b36e15
@ -1,5 +1,6 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||
from calibre import browser
|
||||
from html5_parser import parse
|
||||
import json
|
||||
import re
|
||||
|
||||
@ -19,7 +20,7 @@ class Bloomberg(BasicNewsRecipe):
|
||||
#auth {font-size:small; font-weight:bold;}
|
||||
#time, .chart {font-size:small;}
|
||||
#subhead {font-style:italic; color:#404040;}
|
||||
.news-figure-caption-text, #cap {font-size:small; text-align:center;}
|
||||
.news-figure-caption-text, #cap, #img {font-size:small; text-align:center;}
|
||||
.news-figure-credit {font-size:small; text-align:center; color:#202020;}
|
||||
'''
|
||||
|
||||
@ -70,17 +71,22 @@ class Bloomberg(BasicNewsRecipe):
|
||||
return feeds
|
||||
|
||||
def preprocess_raw_html(self, raw, *a):
|
||||
m = re.search('data-component-props="ArticleBody">', raw)
|
||||
root = parse(raw)
|
||||
m = root.xpath('//script[@data-component-props="ArticleBody"]')
|
||||
if not m:
|
||||
m = re.search('data-component-props="FeatureBody">', raw)
|
||||
m = root.xpath('//script[@data-component-props="FeatureBody"]')
|
||||
if not m:
|
||||
return raw
|
||||
|
||||
raw = raw[m.start():]
|
||||
raw = raw.split('>', 1)[1]
|
||||
data = json.JSONDecoder().raw_decode(raw)[0]
|
||||
data = data['story']
|
||||
m2 = root.xpath('//script[@id="__NEXT_DATA__"]')
|
||||
if not m2:
|
||||
return raw
|
||||
if m:
|
||||
data = json.loads(m[0].text)
|
||||
data = data['story']
|
||||
|
||||
else:
|
||||
data = json.loads(m2[0].text)
|
||||
data = data['props']['pageProps']['story']
|
||||
|
||||
title = '<h1>' + data['headline'] + '</h1>'
|
||||
|
||||
cat = subhead = lede = auth = caption = ''
|
||||
@ -104,11 +110,39 @@ class Bloomberg(BasicNewsRecipe):
|
||||
if 'ledeImageUrl' in data:
|
||||
if data['ledeImageUrl'] is not None:
|
||||
lede = '<p><img src="{}">'.format(data['ledeImageUrl'])
|
||||
|
||||
if 'ledeDescription' in data:
|
||||
if data['ledeDescription'] is not None:
|
||||
caption = '<span id="cap">' + data['ledeDescription'] + '</span>'
|
||||
else:
|
||||
if 'lede' in data:
|
||||
if data['lede'] is not None:
|
||||
if 'alt' in data['lede']:
|
||||
if data['lede']['alt'] is not None:
|
||||
caption = '<span id="cap">' + data['lede']['alt'] + '</span>'
|
||||
|
||||
if data['ledeDescription'] is not None:
|
||||
caption = '<span id="cap">' + data['ledeDescription'] + '</span>'
|
||||
if m:
|
||||
body = data['body']
|
||||
else:
|
||||
body = ''
|
||||
body_data = data['body']['content']
|
||||
for objects in body_data:
|
||||
if objects['type'] == 'media':
|
||||
if objects['subType'] == 'photo':
|
||||
body += '<p id="img"><img src="{}">'.format(objects['data']['photo']['src'])
|
||||
body += '<span id="cap">' + objects['data']['photo']['caption'] + '</span></p>'
|
||||
if objects['type'] == 'paragraph' or 'heading':
|
||||
body += '<p>'
|
||||
if not 'content' in objects:
|
||||
continue
|
||||
for content in objects['content']:
|
||||
if 'value' in content:
|
||||
body += content['value']
|
||||
elif 'content' in content:
|
||||
for val_cont in content['content']:
|
||||
if 'value' in val_cont:
|
||||
body += val_cont['value']
|
||||
|
||||
body = data['body']
|
||||
html = '<html><body>' + cat + title + subhead + auth + lede + caption + '<div>' + body
|
||||
return html
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre import browser
|
||||
from html5_parser import parse
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
import json
|
||||
import re
|
||||
@ -21,7 +22,7 @@ class Bloomberg(BasicNewsRecipe):
|
||||
#time, .chart {font-size:small;}
|
||||
#subhead {font-style:italic; color:#404040;}
|
||||
#cat {font-size:small; color:gray;}
|
||||
.news-figure-caption-text, #cap {font-size:small; text-align:center;}
|
||||
.news-figure-caption-text, #cap, #img {font-size:small; text-align:center;}
|
||||
.news-figure-credit {font-size:small; text-align:center; color:#202020;}
|
||||
'''
|
||||
|
||||
@ -35,7 +36,10 @@ class Bloomberg(BasicNewsRecipe):
|
||||
url = e.hdrs.get('location')
|
||||
soup = self.index_to_soup(url)
|
||||
link = soup.find('a', attrs={'href':lambda x: x and x.startswith('https://www.bloomberg.com')})
|
||||
if '/videos/' in link['href']:
|
||||
skip_sections =[ # add sections you want to skip
|
||||
'/video/', '/videos/', '/media/', 'podcast'
|
||||
]
|
||||
if any(x in link['href'] for x in skip_sections):
|
||||
self.abort_article('Aborting Video article')
|
||||
self.log('Found link: ', link['href'])
|
||||
html = br.open(link['href']).read()
|
||||
@ -58,18 +62,25 @@ class Bloomberg(BasicNewsRecipe):
|
||||
'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fnews%2Fnewsletters%2F&hl=en-US&gl=US&ceid=US:en'),
|
||||
('News',
|
||||
'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fnews%2Farticles%2F&hl=en-US&gl=US&ceid=US:en'),
|
||||
('Others', 'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com&hl=en-US&gl=US&ceid=US:en')
|
||||
('Others', 'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2F&hl=en-US&gl=US&ceid=US:en')
|
||||
]
|
||||
|
||||
def preprocess_raw_html(self, raw, *a):
|
||||
m = re.search('data-component-props="ArticleBody">', raw)
|
||||
root = parse(raw)
|
||||
m = root.xpath('//script[@data-component-props="ArticleBody"]')
|
||||
if not m:
|
||||
m = re.search('data-component-props="FeatureBody">', raw)
|
||||
m = root.xpath('//script[@data-component-props="FeatureBody"]')
|
||||
if not m:
|
||||
m2 = root.xpath('//script[@id="__NEXT_DATA__"]')
|
||||
if not m2:
|
||||
return raw
|
||||
if m:
|
||||
data = json.loads(m[0].text)
|
||||
data = data['story']
|
||||
|
||||
raw = raw[m.start():]
|
||||
raw = raw.split('>', 1)[1]
|
||||
data = json.JSONDecoder().raw_decode(raw)[0]
|
||||
data = data['story']
|
||||
else:
|
||||
data = json.loads(m2[0].text)
|
||||
data = data['props']['pageProps']['story']
|
||||
|
||||
title = '<h1>' + data['headline'] + '</h1>'
|
||||
|
||||
@ -95,10 +106,38 @@ class Bloomberg(BasicNewsRecipe):
|
||||
if data['ledeImageUrl'] is not None:
|
||||
lede = '<p><img src="{}">'.format(data['ledeImageUrl'])
|
||||
|
||||
if data['ledeDescription'] is not None:
|
||||
caption = '<span id="cap">' + data['ledeDescription'] + '</span>'
|
||||
if 'ledeDescription' in data:
|
||||
if data['ledeDescription'] is not None:
|
||||
caption = '<span id="cap">' + data['ledeDescription'] + '</span>'
|
||||
else:
|
||||
if 'lede' in data:
|
||||
if data['lede'] is not None:
|
||||
if 'alt' in data['lede']:
|
||||
if data['lede']['alt'] is not None:
|
||||
caption = '<span id="cap">' + data['lede']['alt'] + '</span>'
|
||||
|
||||
if m:
|
||||
body = data['body']
|
||||
else:
|
||||
body = ''
|
||||
body_data = data['body']['content']
|
||||
for objects in body_data:
|
||||
if objects['type'] == 'media':
|
||||
if objects['subType'] == 'photo':
|
||||
body += '<p id="img"><img src="{}">'.format(objects['data']['photo']['src'])
|
||||
body += '<span id="cap">' + objects['data']['photo']['caption'] + '</span></p>'
|
||||
if objects['type'] == 'paragraph' or 'heading': # lists are missed :(
|
||||
body += '<p>'
|
||||
if not 'content' in objects:
|
||||
continue
|
||||
for content in objects['content']:
|
||||
if 'value' in content:
|
||||
body += content['value']
|
||||
elif 'content' in content:
|
||||
for val_cont in content['content']:
|
||||
if 'value' in val_cont:
|
||||
body += val_cont['value']
|
||||
|
||||
body = data['body']
|
||||
html = '<html><body>' + cat + title + subhead + auth + lede + caption + '<div>' + body
|
||||
return html
|
||||
|
||||
|
@ -138,6 +138,9 @@ class LiveMint(BasicNewsRecipe):
|
||||
return raw
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for embed in soup.findAll('div', attrs={'class':'embed'}):
|
||||
if nos := embed.find('noscript'):
|
||||
nos.name = 'span'
|
||||
for span in soup.findAll('figcaption'):
|
||||
span['id'] = 'img-cap'
|
||||
for auth in soup.findAll('span', attrs={'class':lambda x: x and 'articleInfo' in x.split()}):
|
||||
|
Loading…
x
Reference in New Issue
Block a user