diff --git a/recipes/bloomberg.recipe b/recipes/bloomberg.recipe index 8ac2aca586..d24e2f9696 100644 --- a/recipes/bloomberg.recipe +++ b/recipes/bloomberg.recipe @@ -1,5 +1,6 @@ from calibre.web.feeds.news import BasicNewsRecipe from calibre import browser +from html5_parser import parse from calibre.ptempfile import PersistentTemporaryFile import json import re @@ -21,7 +22,7 @@ class Bloomberg(BasicNewsRecipe): #time, .chart {font-size:small;} #subhead {font-style:italic; color:#404040;} #cat {font-size:small; color:gray;} - .news-figure-caption-text, #cap {font-size:small; text-align:center;} + .news-figure-caption-text, #cap, #img {font-size:small; text-align:center;} .news-figure-credit {font-size:small; text-align:center; color:#202020;} ''' @@ -35,7 +36,10 @@ class Bloomberg(BasicNewsRecipe): url = e.hdrs.get('location') soup = self.index_to_soup(url) link = soup.find('a', attrs={'href':lambda x: x and x.startswith('https://www.bloomberg.com')}) - if '/videos/' in link['href']: + skip_sections =[ # add sections you want to skip + '/video/', '/videos/', '/media/', 'podcast' + ] + if any(x in link['href'] for x in skip_sections): self.abort_article('Aborting Video article') self.log('Found link: ', link['href']) html = br.open(link['href']).read() @@ -58,18 +62,25 @@ class Bloomberg(BasicNewsRecipe): 'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fnews%2Fnewsletters%2F&hl=en-US&gl=US&ceid=US:en'), ('News', 'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2Fnews%2Farticles%2F&hl=en-US&gl=US&ceid=US:en'), - ('Others', 'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com&hl=en-US&gl=US&ceid=US:en') + ('Others', 'https://news.google.com/rss/search?q=when:27h+allinurl:bloomberg.com%2F&hl=en-US&gl=US&ceid=US:en') ] def preprocess_raw_html(self, raw, *a): - m = re.search('data-component-props="ArticleBody">', raw) + root = parse(raw) + m = root.xpath('//script[@data-component-props="ArticleBody"]') if not m: - m = re.search('data-component-props="FeatureBody">', raw) + m = root.xpath('//script[@data-component-props="FeatureBody"]') + if not m: + m2 = root.xpath('//script[@id="__NEXT_DATA__"]') + if not m2: + return raw + if m: + data = json.loads(m[0].text) + data = data['story'] - raw = raw[m.start():] - raw = raw.split('>', 1)[1] - data = json.JSONDecoder().raw_decode(raw)[0] - data = data['story'] + else: + data = json.loads(m2[0].text) + data = data['props']['pageProps']['story'] title = '
'.format(data['ledeImageUrl'])
- if data['ledeDescription'] is not None:
- caption = '' + data['ledeDescription'] + ''
+ if 'ledeDescription' in data:
+ if data['ledeDescription'] is not None:
+ caption = '' + data['ledeDescription'] + ''
+ else:
+ if 'lede' in data:
+ if data['lede'] is not None:
+ if 'alt' in data['lede']:
+ if data['lede']['alt'] is not None:
+ caption = '' + data['lede']['alt'] + ''
+
+ if m:
+ body = data['body']
+ else:
+ body = ''
+ body_data = data['body']['content']
+ for objects in body_data:
+ if objects['type'] == 'media':
+ if objects['subType'] == 'photo':
+ body += '
'.format(objects['data']['photo']['src'])
+ body += '' + objects['data']['photo']['caption'] + '
' + if not 'content' in objects: + continue + for content in objects['content']: + if 'value' in content: + body += content['value'] + elif 'content' in content: + for val_cont in content['content']: + if 'value' in val_cont: + body += val_cont['value'] - body = data['body'] html = '
' + cat + title + subhead + auth + lede + caption + '