mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
commit
fcad6379dd
BIN
recipes/icons/nytfeeds.png
Normal file
BIN
recipes/icons/nytfeeds.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 416 B |
184
recipes/nytfeeds.recipe
Normal file
184
recipes/nytfeeds.recipe
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
|
||||||
|
def extract_json(raw):
|
||||||
|
pre = re.search(r'<script>window.__preloadedData = ({.+)', raw).group(1)
|
||||||
|
js = json.JSONDecoder().raw_decode(re.sub('undefined', '[]', pre))[0]
|
||||||
|
return js['initialData']['data']['article']['sprinkledBody']['content']
|
||||||
|
|
||||||
|
def parse_image(i):
|
||||||
|
if i['__typename'] == 'Image':
|
||||||
|
yield '<div>'
|
||||||
|
yield '<img src="{}">'.format(i['crops'][0]['renditions'][0]['url'])
|
||||||
|
if 'caption' in i and i['caption']:
|
||||||
|
yield '<div class="cap">{}'.format(
|
||||||
|
i['caption'].get('text', '')
|
||||||
|
)
|
||||||
|
if 'credit' in i and i['credit']:
|
||||||
|
yield '<span class="cred"> ' + i['credit'] + '</span>'
|
||||||
|
yield '</div>'
|
||||||
|
yield '</div>'
|
||||||
|
|
||||||
|
def parse_img_grid(g):
|
||||||
|
for grd in g.get('gridMedia', {}):
|
||||||
|
yield ''.join(parse_image(grd))
|
||||||
|
if 'caption' in g and g['caption']:
|
||||||
|
yield '<div class="cap">{}'.format(g['caption'])
|
||||||
|
if 'credit' in g and g['credit']:
|
||||||
|
yield '<span class="cred"> ' + g['credit'] + '</span>'
|
||||||
|
yield '</div>'
|
||||||
|
|
||||||
|
def parse_cnt(cnt):
|
||||||
|
if cnt['__typename'] == 'TextInline':
|
||||||
|
if 'formats' in cnt and cnt['formats']:
|
||||||
|
for fmt in cnt.get('formats', {}):
|
||||||
|
if fmt['__typename'] == 'LinkFormat':
|
||||||
|
hrf = fmt['url']
|
||||||
|
yield '<a href="{}">'.format(hrf) + cnt['text'] + '</a>'
|
||||||
|
else:
|
||||||
|
yield cnt['text']
|
||||||
|
else:
|
||||||
|
yield cnt['text']
|
||||||
|
|
||||||
|
def parse_byline(byl):
|
||||||
|
for b in byl.get('bylines', {}):
|
||||||
|
yield '<div>' + b['renderedRepresentation'] + '</div>'
|
||||||
|
for rl in byl.get('role', {}):
|
||||||
|
yield '<div><i>' + ''.join(parse_cnt(rl)) + '</i></div>'
|
||||||
|
|
||||||
|
def iso_date(x):
|
||||||
|
dt = datetime.fromisoformat(x[:-1]) + timedelta(seconds=time.timezone)
|
||||||
|
return dt.strftime('%b %d, %Y at %I:%M %p')
|
||||||
|
|
||||||
|
def header_parse(h):
|
||||||
|
for ch in h['headline']['content']:
|
||||||
|
yield '<h1>' + ''.join(parse_cnt(ch)) + '</h1>'
|
||||||
|
if 'summary' in h and h['summary']:
|
||||||
|
for cs in h['summary']['content']:
|
||||||
|
yield '<p class="sub">' + ''.join(parse_cnt(cs)) + '</p>'
|
||||||
|
if 'ledeMedia' in h and h['ledeMedia']:
|
||||||
|
if h['ledeMedia'].get('__typename', '') == 'ImageBlock':
|
||||||
|
yield ''.join(parse_image(h['ledeMedia']['media']))
|
||||||
|
if 'byline' in h and h['byline']:
|
||||||
|
yield '<br><div class="byl">'
|
||||||
|
yield '\t'.join(parse_byline(h['byline']))
|
||||||
|
if 'timestampBlock' in h and h['timestampBlock']:
|
||||||
|
yield '\t<div>' + iso_date(h['timestampBlock']['timestamp']) + '</div>'
|
||||||
|
yield '</div>'
|
||||||
|
|
||||||
|
def article_parse(data):
|
||||||
|
yield "<html><body>"
|
||||||
|
for x in data:
|
||||||
|
if x.get('__typename', '') in {'HeaderBasicBlock', 'HeaderFullBleedVerticalBlock'}:
|
||||||
|
yield '\n'.join(header_parse(x))
|
||||||
|
elif x.get('__typename', '') == 'ParagraphBlock':
|
||||||
|
yield '<p>'
|
||||||
|
for para in x['content']:
|
||||||
|
yield '\t'.join(parse_cnt(para))
|
||||||
|
yield '</p>'
|
||||||
|
elif x.get('__typename', '') == 'Heading2Block':
|
||||||
|
yield '<h4>'
|
||||||
|
for para in x['content']:
|
||||||
|
yield '\t'.join(parse_cnt(para))
|
||||||
|
yield '</h4>'
|
||||||
|
elif x.get('__typename', '') == 'ImageBlock':
|
||||||
|
yield ''.join(parse_image(x['media']))
|
||||||
|
elif x.get('__typename', '') == 'GridBlock':
|
||||||
|
yield ''.join(parse_img_grid(x))
|
||||||
|
elif 'content' in x and x['content']:
|
||||||
|
yield '<p><i>'
|
||||||
|
for i in x['content']:
|
||||||
|
yield '\t'.join(parse_cnt(i))
|
||||||
|
yield '</i></p>'
|
||||||
|
yield "</body></html>"
|
||||||
|
|
||||||
|
|
||||||
|
class nytFeeds(BasicNewsRecipe):
|
||||||
|
title = 'NYT News'
|
||||||
|
__author__ = 'unkn0wn'
|
||||||
|
description = (
|
||||||
|
'The New York Times is dedicated to helping people understand the world through '
|
||||||
|
'on-the-ground, expert and deeply reported independent journalism. Feeds based recipe.'
|
||||||
|
)
|
||||||
|
oldest_article = 1
|
||||||
|
encoding = 'utf-8'
|
||||||
|
use_embedded_content = False
|
||||||
|
language = 'en_US'
|
||||||
|
remove_empty_feeds = True
|
||||||
|
resolve_internal_links = True
|
||||||
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
|
masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png'
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
soup = self.index_to_soup('https://www.frontpages.com/the-new-york-times/')
|
||||||
|
return 'https://www.frontpages.com' + soup.find('img', attrs={'id':'giornale-img'})['src']
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'days': {
|
||||||
|
'short': 'Oldest article to download from this news source. In days ',
|
||||||
|
'long': 'For example, 0.5, gives you articles from the past 12 hours',
|
||||||
|
'default': str(oldest_article)
|
||||||
|
},
|
||||||
|
'comp': {
|
||||||
|
'short': 'Compress News Images?',
|
||||||
|
'long': 'enter yes',
|
||||||
|
'default': 'no'
|
||||||
|
},
|
||||||
|
'rev': {
|
||||||
|
'short': 'Reverse the order of articles in each feed?',
|
||||||
|
'long': 'enter yes',
|
||||||
|
'default': 'no'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
||||||
|
d = self.recipe_specific_options.get('days')
|
||||||
|
if d and isinstance(d, str):
|
||||||
|
self.oldest_article = float(d)
|
||||||
|
r = self.recipe_specific_options.get('rev')
|
||||||
|
if r and isinstance(r, str):
|
||||||
|
if r.lower() == 'yes':
|
||||||
|
self.reverse_article_order = True
|
||||||
|
c = self.recipe_specific_options.get('comp')
|
||||||
|
if c and isinstance(c, str):
|
||||||
|
if c.lower() == 'yes':
|
||||||
|
self.compress_news_images = True
|
||||||
|
|
||||||
|
extra_css = '''
|
||||||
|
.byl { font-size:small; color:#202020; }
|
||||||
|
.cap { font-size:small; text-align:center; }
|
||||||
|
.cred { font-style:italic; font-size:small; }
|
||||||
|
.sub { font-style:italic; }
|
||||||
|
'''
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
('World', 'https://rss.nytimes.com/services/xml/rss/nyt/World.xml'),
|
||||||
|
('US', 'https://rss.nytimes.com/services/xml/rss/nyt/US.xml'),
|
||||||
|
('Business', 'https://rss.nytimes.com/services/xml/rss/nyt/Business.xml'),
|
||||||
|
('Technology', 'https://rss.nytimes.com/services/xml/rss/nyt/Technology.xml'),
|
||||||
|
('Science', 'https://rss.nytimes.com/services/xml/rss/nyt/Science.xml'),
|
||||||
|
('Arts', 'https://rss.nytimes.com/services/xml/rss/nyt/Arts.xml'),
|
||||||
|
('Fashion & Style', 'https://rss.nytimes.com/services/xml/rss/nyt/FashionandStyle.xml'),
|
||||||
|
('TMagazine', 'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml'),
|
||||||
|
('Travel', 'https://www.nytimes.com/services/xml/rss/nyt/Travel.xml'),
|
||||||
|
('Sunday Review', 'https://rss.nytimes.com/services/xml/rss/nyt/sunday-review.xml'),
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_browser(self, *args, **kwargs):
|
||||||
|
kwargs['user_agent'] = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
|
||||||
|
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
|
||||||
|
br.addheaders += [
|
||||||
|
('Referer', 'https://www.google.com/'),
|
||||||
|
('X-Forwarded-For', '66.249.66.1')
|
||||||
|
]
|
||||||
|
return br
|
||||||
|
|
||||||
|
def preprocess_raw_html(self, raw_html, url):
|
||||||
|
data = extract_json(raw_html)
|
||||||
|
return '\n'.join(article_parse(data))
|
Loading…
x
Reference in New Issue
Block a user