This commit is contained in:
Kovid Goyal 2024-10-06 08:26:01 +05:30
commit 2016d80094
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
9 changed files with 381 additions and 352 deletions

BIN
recipes/icons/nyt_tmag.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 416 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.5 KiB

After

Width:  |  Height:  |  Size: 416 B

127
recipes/nyt_tmag.recipe Normal file
View File

@ -0,0 +1,127 @@
#!/usr/bin/env python
from calibre.web.feeds.news import BasicNewsRecipe
use_wayback_machine = False
class NytMag(BasicNewsRecipe):
title = 'NYT T Magazine'
__author__ = 'unkn0wn'
description = 'The latest from The New York Times Style Magazine.'
oldest_article = 30 # days
encoding = 'utf-8'
use_embedded_content = False
language = 'en_US'
remove_empty_feeds = True
resolve_internal_links = True
ignore_duplicate_articles = {'title', 'url'}
masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png'
feeds = [
'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/FashionandStyle.xml',
]
recipe_specific_options = {
'days': {
'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article),
},
'comp': {'short': 'Compress News Images?', 'long': 'enter yes', 'default': 'no'},
'rev': {
'short': 'Reverse the order of articles in each feed?',
'long': 'enter yes',
'default': 'no',
},
'res': {
'short': (
'For hi-res images, select a resolution from the following\noptions: '
'popup, jumbo, mobileMasterAt3x, superJumbo'
),
'long': (
'This is useful for non e-ink devices, and for a lower file size\nthan '
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
),
},
}
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
d = self.recipe_specific_options.get('days')
if d and isinstance(d, str):
self.oldest_article = float(d)
r = self.recipe_specific_options.get('rev')
if r and isinstance(r, str):
if r.lower() == 'yes':
self.reverse_article_order = True
c = self.recipe_specific_options.get('comp')
if c and isinstance(c, str):
if c.lower() == 'yes':
self.compress_news_images = True
extra_css = """
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
em, blockquote { color: #202020; }
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
"""
@property
def nyt_parser(self):
ans = getattr(self, '_nyt_parser', None)
if ans is None:
from calibre.live import load_module
self._nyt_parser = ans = load_module('calibre.web.site_parsers.nytimes')
return ans
def get_nyt_page(self, url, skip_wayback=False):
if use_wayback_machine and not skip_wayback:
from calibre import browser
return self.nyt_parser.download_url(url, browser())
return self.index_to_soup(url, raw=True)
articles_are_obfuscated = use_wayback_machine
if use_wayback_machine:
def get_obfuscated_article(self, url):
from calibre.ptempfile import PersistentTemporaryFile
with PersistentTemporaryFile() as tf:
tf.write(self.get_nyt_page(url))
return tf.name
def preprocess_raw_html(self, raw_html, url):
return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
def get_browser(self, *args, **kwargs):
kwargs['user_agent'] = (
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
)
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [
('Referer', 'https://www.google.com/'),
('X-Forwarded-For', '66.249.66.1'),
]
return br
def preprocess_html(self, soup):
w = self.recipe_specific_options.get('res')
if w and isinstance(w, str):
res = '-' + w
for img in soup.findAll('img', attrs={'src': True}):
if '-article' in img['src']:
ext = img['src'].split('?')[0].split('.')[-1]
img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext
for c in soup.findAll('div', attrs={'class': 'cap'}):
for p in c.findAll(['p', 'div']):
p.name = 'span'
return soup

View File

@ -1,179 +1,9 @@
#!/usr/bin/env python #!/usr/bin/env python
import json
import re import re
from calibre.utils.iso8601 import parse_iso8601
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
def extract_json(raw): use_wayback_machine = False
pre = re.search(r'<script>window.__preloadedData = ({.+)', raw).group(1)
js = json.JSONDecoder().raw_decode(re.sub('undefined', '[]', pre))[0]
return js['initialData']['data']['article']['sprinkledBody']['content']
def parse_image(i):
crop = i.get('crops') or i.get('spanImageCrops')
if crop:
yield f'<div><img src="{crop[0]["renditions"][0]["url"]}" title="{i.get("altText", "")}">'
if i.get('caption'):
yield f'<div class="cap">{"".join(parse_types(i["caption"]))}'
if i.get('credit'):
yield f'<span class="cred"> {i["credit"]}</span>'
yield '</div>'
elif i.get('legacyHtmlCaption'):
if i['legacyHtmlCaption'].strip():
yield f'<div class="cap">{i["legacyHtmlCaption"]}</div>'
yield '</div>'
def parse_img_grid(g):
for grd in g.get('gridMedia', {}):
yield ''.join(parse_image(grd))
if g.get('caption'):
yield f'<div class="cap">{g["caption"]}'
if g.get('credit'):
yield f'<span class="cred"> {g["credit"]}</span>'
yield '</div>'
def parse_vid(v):
if v.get('promotionalMedia'):
headline = v.get("headline", {}).get("default", "")
rendition = v.get('renditions')
yield (f'<div><b><a href="{rendition[0]["url"]}">Video</a>: {headline}</b></div>'
if rendition else f'<div><b>{headline}</b></div>')
yield ''.join(parse_types(v["promotionalMedia"]))
if v.get('promotionalSummary'):
yield f'<div class="cap">{v["promotionalSummary"]}</div>'
def parse_emb(e):
if e.get('html') and 'datawrapper.dwcdn.net' in e.get('html', ''):
dw = re.search(r'datawrapper.dwcdn.net/(.{5})', e['html']).group(1)
yield f'<div><img src="https://datawrapper.dwcdn.net/{dw}/full.png"></div>'
elif e.get('promotionalMedia'):
if e.get('headline'):
yield f'<div><b>{e["headline"]["default"]}</b></div>'
yield ''.join(parse_types(e["promotionalMedia"]))
if e.get('note'):
yield f'<div class="cap">{e["note"]}</div>'
def parse_byline(byl):
for b in byl.get('bylines', {}):
yield f'<div>{b["renderedRepresentation"]}</div>'
yield '<div><b><i>'
for rl in byl.get('role', {}):
if ''.join(parse_cnt(rl)).strip():
yield ''.join(parse_cnt(rl))
yield '</i></b></div>'
def iso_date(x):
dt = parse_iso8601(x, as_utc=False)
return dt.strftime('%b %d, %Y at %I:%M %p')
def parse_header(h):
if h.get('label'):
yield f'<div class="lbl">{"".join(parse_types(h["label"]))}</div>'
if h.get('headline'):
yield ''.join(parse_types(h["headline"]))
if h.get('summary'):
yield f'<p><i>{"".join(parse_types(h["summary"]))}</i></p>'
if h.get('ledeMedia'):
yield ''.join(parse_types(h["ledeMedia"]))
if h.get('byline'):
yield ''.join(parse_types(h["byline"]))
if h.get('timestampBlock'):
yield ''.join(parse_types(h["timestampBlock"]))
def parse_fmt_type(fm):
for f in fm.get('formats', {}):
ftype = f.get("__typename", "")
if ftype == "BoldFormat":
yield '<strong>'
if ftype == "ItalicFormat":
yield '<em>'
if ftype == "LinkFormat":
hrf = f["url"]
yield f'<a href="{hrf}">'
yield fm.get("text", "")
for f in reversed(fm.get('formats', {})):
ftype = f.get("__typename", "")
if ftype == "BoldFormat":
yield '</strong>'
if ftype == "ItalicFormat":
yield '</em>'
if ftype == "LinkFormat":
yield '</a>'
def parse_cnt(cnt):
for k in cnt:
if isinstance(cnt[k], list):
if k == 'formats':
yield ''.join(parse_fmt_type(cnt))
else:
for cnt_ in cnt[k]:
yield ''.join(parse_types(cnt_))
if isinstance(cnt[k], dict):
yield ''.join(parse_types(cnt[k]))
if cnt.get('text') and 'formats' not in cnt and 'content' not in cnt:
if isinstance(cnt['text'], str):
yield cnt['text']
def parse_types(x):
typename = x.get('__typename', '')
if 'Header' in typename:
yield '\n'.join(parse_header(x))
elif typename.startswith('Heading'):
htag = 'h' + re.match(r'Heading([1-6])Block', typename).group(1)
yield f'<{htag}>{"".join(parse_cnt(x))}</{htag}>'
elif typename == 'ParagraphBlock':
yield f'<p>{"".join(parse_cnt(x))}</p>'
elif typename == 'BylineBlock':
yield f'<div class="byl"><br/>{"".join(parse_byline(x))}</div>'
elif typename == 'LabelBlock':
yield f'<div class="sc">{"".join(parse_cnt(x))}</div>'
elif typename == 'BlockquoteBlock':
yield f'<blockquote>{"".join(parse_cnt(x))}</blockquote>'
elif typename == 'TimestampBlock':
yield f'<div class="time">{iso_date(x["timestamp"])}</div>'
elif typename == 'LineBreakInline':
yield '<br/>'
elif typename == 'RuleBlock':
yield '<hr/>'
elif typename == 'Image':
yield "".join(parse_image(x))
elif typename == 'GridBlock':
yield "".join(parse_img_grid(x))
elif typename == 'Video':
yield "".join(parse_vid(x))
elif typename == 'EmbeddedInteractive':
yield "".join(parse_emb(x))
elif typename == 'ListBlock':
yield f'<ul>{"".join(parse_cnt(x))}</ul>'
elif typename == 'ListItemBlock':
yield f'<li>{"".join(parse_cnt(x))}</li>'
elif typename == 'TextInline':
yield "".join(parse_cnt(x))
elif typename in {'DetailBlock', 'TextRunKV'}:
yield f'<p><i>{"".join(parse_cnt(x))}</i></p>'
elif typename and typename not in {'RelatedLinksBlock', 'Dropzone'}:
if "".join(parse_cnt(x)).strip():
yield "".join(parse_cnt(x))
def article_parse(data):
yield "<html><body>"
for d in data:
yield from parse_types(d)
yield "</body></html>"
class NytFeeds(BasicNewsRecipe): class NytFeeds(BasicNewsRecipe):
@ -192,30 +22,59 @@ class NytFeeds(BasicNewsRecipe):
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png' masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png'
# https://www.nytimes.com/rss
# https://developer.nytimes.com/docs/rss-api/1/overview
feeds = [
# to filter out all opinions from other sections first
'https://rss.nytimes.com/services/xml/rss/nyt/Opinion.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/World.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/US.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Business.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/YourMoney.xml',
# 'https://rss.nytimes.com/services/xml/rss/nyt/Technology.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Science.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Climate.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Health.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Arts.xml',
# 'https://rss.nytimes.com/services/xml/rss/nyt/FashionandStyle.xml',
# 'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml',
# 'https://rss.nytimes.com/services/xml/rss/nyt/books.xml',
'https://www.nytimes.com/services/xml/rss/nyt/Travel.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/well.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Sports.xml',
# 'http://nytimes.com/timeswire/feeds/',
]
def get_cover_url(self): def get_cover_url(self):
soup = self.index_to_soup('https://www.frontpages.com/the-new-york-times/') soup = self.index_to_soup('https://www.frontpages.com/the-new-york-times/')
return 'https://www.frontpages.com' + soup.find('img', attrs={'id':'giornale-img'})['src'] return (
'https://www.frontpages.com'
+ soup.find('img', attrs={'id': 'giornale-img'})['src']
)
recipe_specific_options = { recipe_specific_options = {
'days': { 'days': {
'short': 'Oldest article to download from this news source. In days ', 'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours', 'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article) 'default': str(oldest_article),
},
'comp': {
'short': 'Compress News Images?',
'long': 'enter yes',
'default': 'no'
}, },
'comp': {'short': 'Compress News Images?', 'long': 'enter yes', 'default': 'no'},
'rev': { 'rev': {
'short': 'Reverse the order of articles in each feed?', 'short': 'Reverse the order of articles in each feed?',
'long': 'enter yes', 'long': 'enter yes',
'default': 'no' 'default': 'no',
}, },
'res': { 'res': {
'short': 'For hi-res images, select a resolution from the following\noptions: popup, jumbo, mobileMasterAt3x, superJumbo', 'short': (
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use articleInline.', 'For hi-res images, select a resolution from the following\noptions: '
} 'popup, jumbo, mobileMasterAt3x, superJumbo'
),
'long': (
'This is useful for non e-ink devices, and for a lower file size\nthan '
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
),
},
} }
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
@ -232,67 +91,66 @@ class NytFeeds(BasicNewsRecipe):
if c.lower() == 'yes': if c.lower() == 'yes':
self.compress_news_images = True self.compress_news_images = True
extra_css = ''' extra_css = """
.byl, .time { font-size:small; color:#202020; } .byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; } .cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; } .cred { font-style:italic; font-size:small; }
em, blockquote { color: #202020; } em, blockquote { color: #202020; }
.sc { font-variant: small-caps; } .sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; } .lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; } img { display:block; margin:0 auto; }
''' """
# https://www.nytimes.com/rss @property
# https://developer.nytimes.com/docs/rss-api/1/overview def nyt_parser(self):
feeds = [ ans = getattr(self, '_nyt_parser', None)
# to filter out all opinions from other sections first if ans is None:
'https://rss.nytimes.com/services/xml/rss/nyt/Opinion.xml', from calibre.live import load_module
'https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml', self._nyt_parser = ans = load_module('calibre.web.site_parsers.nytimes')
'https://rss.nytimes.com/services/xml/rss/nyt/World.xml', return ans
'https://rss.nytimes.com/services/xml/rss/nyt/US.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Business.xml', def get_nyt_page(self, url, skip_wayback=False):
'https://rss.nytimes.com/services/xml/rss/nyt/YourMoney.xml', if use_wayback_machine and not skip_wayback:
'https://rss.nytimes.com/services/xml/rss/nyt/Technology.xml', from calibre import browser
'https://rss.nytimes.com/services/xml/rss/nyt/Science.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Climate.xml', return self.nyt_parser.download_url(url, browser())
'https://rss.nytimes.com/services/xml/rss/nyt/Health.xml', return self.index_to_soup(url, raw=True)
'https://rss.nytimes.com/services/xml/rss/nyt/Arts.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/FashionandStyle.xml', articles_are_obfuscated = use_wayback_machine
'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/books.xml', if use_wayback_machine:
'https://www.nytimes.com/services/xml/rss/nyt/Travel.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/well.xml', def get_obfuscated_article(self, url):
'https://rss.nytimes.com/services/xml/rss/nyt/Sports.xml', from calibre.ptempfile import PersistentTemporaryFile
'http://nytimes.com/timeswire/feeds/'
] with PersistentTemporaryFile() as tf:
tf.write(self.get_nyt_page(url))
return tf.name
def preprocess_raw_html(self, raw_html, url):
return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
def get_browser(self, *args, **kwargs): def get_browser(self, *args, **kwargs):
kwargs['user_agent'] = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' kwargs['user_agent'] = (
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
)
br = BasicNewsRecipe.get_browser(self, *args, **kwargs) br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [ br.addheaders += [
('Referer', 'https://www.google.com/'), ('Referer', 'https://www.google.com/'),
('X-Forwarded-For', '66.249.66.1') ('X-Forwarded-For', '66.249.66.1'),
] ]
return br return br
def preprocess_raw_html(self, raw_html, url):
if '/interactive/' in url:
return '<html><body><p><em>'\
+ 'This is an interactive article, which is supposed to be read in a browser.'\
+ '</p></em></body></html>'
data = extract_json(raw_html)
return '\n'.join(article_parse(data))
def preprocess_html(self, soup): def preprocess_html(self, soup):
w = self.recipe_specific_options.get('res') w = self.recipe_specific_options.get('res')
if w and isinstance(w, str): if w and isinstance(w, str):
res = '-' + w res = '-' + w
for img in soup.findAll('img', attrs={'src':True}): for img in soup.findAll('img', attrs={'src': True}):
if '-article' in img['src']: if '-article' in img['src']:
ext = img['src'].split('?')[0].split('.')[-1] ext = img['src'].split('?')[0].split('.')[-1]
img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext
for c in soup.findAll('div', attrs={'class':'cap'}): for c in soup.findAll('div', attrs={'class': 'cap'}):
for p in c.findAll(['p', 'div']): for p in c.findAll(['p', 'div']):
p.name = 'span' p.name = 'span'
return soup return soup

View File

@ -88,7 +88,7 @@ class NewYorkTimes(BasicNewsRecipe):
is_web_edition = True is_web_edition = True
oldest_web_edition_article = 7 # days oldest_web_edition_article = 7 # days
extra_css = ''' extra_css = """
.byl, .time { font-size:small; color:#202020; } .byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; } .cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; } .cred { font-style:italic; font-size:small; }
@ -96,7 +96,7 @@ class NewYorkTimes(BasicNewsRecipe):
.sc { font-variant: small-caps; } .sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; } .lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; } img { display:block; margin:0 auto; }
''' """
@property @property
def nyt_parser(self): def nyt_parser(self):
@ -113,12 +113,7 @@ class NewYorkTimes(BasicNewsRecipe):
return self.index_to_soup(url, raw=True) return self.index_to_soup(url, raw=True)
def preprocess_raw_html(self, raw_html, url): def preprocess_raw_html(self, raw_html, url):
if '/interactive/' in url: return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
return '<html><body><p><em>'\
+ 'This is an interactive article, which is supposed to be read in a browser.'\
+ '</p></em></body></html>'
html = self.nyt_parser.extract_html(self.index_to_soup(raw_html))
return html
articles_are_obfuscated = use_wayback_machine articles_are_obfuscated = use_wayback_machine
@ -144,8 +139,14 @@ class NewYorkTimes(BasicNewsRecipe):
'long': 'For example, 2024/07/16' 'long': 'For example, 2024/07/16'
}, },
'res': { 'res': {
'short': 'For hi-res images, select a resolution from the following\noptions: popup, jumbo, mobileMasterAt3x, superJumbo', 'short': (
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use articleInline.', 'For hi-res images, select a resolution from the following\noptions: '
'popup, jumbo, mobileMasterAt3x, superJumbo'
),
'long': (
'This is useful for non e-ink devices, and for a lower file size\nthan '
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
),
}, },
'comp': { 'comp': {
'short': 'Compress News Images?', 'short': 'Compress News Images?',

View File

@ -88,7 +88,7 @@ class NewYorkTimes(BasicNewsRecipe):
is_web_edition = False is_web_edition = False
oldest_web_edition_article = 7 # days oldest_web_edition_article = 7 # days
extra_css = ''' extra_css = """
.byl, .time { font-size:small; color:#202020; } .byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; } .cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; } .cred { font-style:italic; font-size:small; }
@ -96,7 +96,7 @@ class NewYorkTimes(BasicNewsRecipe):
.sc { font-variant: small-caps; } .sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; } .lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; } img { display:block; margin:0 auto; }
''' """
@property @property
def nyt_parser(self): def nyt_parser(self):
@ -113,12 +113,7 @@ class NewYorkTimes(BasicNewsRecipe):
return self.index_to_soup(url, raw=True) return self.index_to_soup(url, raw=True)
def preprocess_raw_html(self, raw_html, url): def preprocess_raw_html(self, raw_html, url):
if '/interactive/' in url: return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
return '<html><body><p><em>'\
+ 'This is an interactive article, which is supposed to be read in a browser.'\
+ '</p></em></body></html>'
html = self.nyt_parser.extract_html(self.index_to_soup(raw_html))
return html
articles_are_obfuscated = use_wayback_machine articles_are_obfuscated = use_wayback_machine
@ -144,8 +139,14 @@ class NewYorkTimes(BasicNewsRecipe):
'long': 'For example, 2024/07/16' 'long': 'For example, 2024/07/16'
}, },
'res': { 'res': {
'short': 'For hi-res images, select a resolution from the following\noptions: popup, jumbo, mobileMasterAt3x, superJumbo', 'short': (
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use articleInline.', 'For hi-res images, select a resolution from the following\noptions: '
'popup, jumbo, mobileMasterAt3x, superJumbo'
),
'long': (
'This is useful for non e-ink devices, and for a lower file size\nthan '
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
),
}, },
'comp': { 'comp': {
'short': 'Compress News Images?', 'short': 'Compress News Images?',

View File

@ -1,23 +1,67 @@
#!/usr/bin/env python #!/usr/bin/env python
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
use_wayback_machine = False use_wayback_machine = False
class NYTimesTechnology(BasicNewsRecipe): class NytTech(BasicNewsRecipe):
title = 'New York Times Technology Beat' title = 'New York Times Technology Beat'
language = 'en_US'
description = 'The latest in technology - Gadgetwise'
__author__ = 'unkn0wn' __author__ = 'unkn0wn'
oldest_article = 14 # days description = 'The latest in technology - Gadgetwise'
no_stylesheets = True oldest_article = 14 # days
no_javascript = True
ignore_duplicate_articles = {'title', 'url'}
encoding = 'utf-8' encoding = 'utf-8'
use_embedded_content = False
language = 'en_US'
remove_empty_feeds = True
resolve_internal_links = True
ignore_duplicate_articles = {'title', 'url'}
masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png'
extra_css = ''' feeds = [
'https://rss.nytimes.com/services/xml/rss/nyt/Technology.xml',
]
recipe_specific_options = {
'days': {
'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article),
},
'comp': {'short': 'Compress News Images?', 'long': 'enter yes', 'default': 'no'},
'rev': {
'short': 'Reverse the order of articles in each feed?',
'long': 'enter yes',
'default': 'no',
},
'res': {
'short': (
'For hi-res images, select a resolution from the following\noptions: '
'popup, jumbo, mobileMasterAt3x, superJumbo'
),
'long': (
'This is useful for non e-ink devices, and for a lower file size\nthan '
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
),
},
}
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
d = self.recipe_specific_options.get('days')
if d and isinstance(d, str):
self.oldest_article = float(d)
r = self.recipe_specific_options.get('rev')
if r and isinstance(r, str):
if r.lower() == 'yes':
self.reverse_article_order = True
c = self.recipe_specific_options.get('comp')
if c and isinstance(c, str):
if c.lower() == 'yes':
self.compress_news_images = True
extra_css = """
.byl, .time { font-size:small; color:#202020; } .byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; } .cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; } .cred { font-style:italic; font-size:small; }
@ -25,76 +69,46 @@ class NYTimesTechnology(BasicNewsRecipe):
.sc { font-variant: small-caps; } .sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; } .lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; } img { display:block; margin:0 auto; }
''' """
articles_are_obfuscated = use_wayback_machine
if use_wayback_machine:
def get_obfuscated_article(self, url):
from calibre.ptempfile import PersistentTemporaryFile
with PersistentTemporaryFile() as tf:
tf.write(self.get_nyt_page(url))
return tf.name
@property @property
def nyt_parser(self): def nyt_parser(self):
ans = getattr(self, '_nyt_parser', None) ans = getattr(self, '_nyt_parser', None)
if ans is None: if ans is None:
from calibre.live import load_module from calibre.live import load_module
self._nyt_parser = ans = load_module('calibre.web.site_parsers.nytimes') self._nyt_parser = ans = load_module('calibre.web.site_parsers.nytimes')
return ans return ans
def get_nyt_page(self, url, skip_wayback=False): def get_nyt_page(self, url, skip_wayback=False):
if use_wayback_machine and not skip_wayback: if use_wayback_machine and not skip_wayback:
from calibre import browser from calibre import browser
return self.nyt_parser.download_url(url, browser()) return self.nyt_parser.download_url(url, browser())
return self.index_to_soup(url, raw=True) return self.index_to_soup(url, raw=True)
articles_are_obfuscated = use_wayback_machine
if use_wayback_machine:
def get_obfuscated_article(self, url):
from calibre.ptempfile import PersistentTemporaryFile
with PersistentTemporaryFile() as tf:
tf.write(self.get_nyt_page(url))
return tf.name
def preprocess_raw_html(self, raw_html, url): def preprocess_raw_html(self, raw_html, url):
if '/interactive/' in url: return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
return '<html><body><p><em>'\
+ 'This is an interactive article, which is supposed to be read in a browser.'\
+ '</p></em></body></html>'
html = self.nyt_parser.extract_html(self.index_to_soup(raw_html))
return html
recipe_specific_options = {
'days': {
'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article)
},
'res': {
'short': 'For hi-res images, select a resolution from the following\noptions: popup, jumbo, mobileMasterAt3x, superJumbo',
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use articleInline.',
},
'comp': {
'short': 'Compress News Images?',
'long': 'enter yes',
'default': 'no'
}
}
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
c = self.recipe_specific_options.get('comp')
if c and isinstance(c, str):
if c.lower() == 'yes':
self.compress_news_images = True
d = self.recipe_specific_options.get('days')
if d and isinstance(d, str):
self.oldest_article = float(d)
feeds = [
(u'Gadgetwise', u'http://gadgetwise.blogs.nytimes.com/feed/'),
]
def get_browser(self, *args, **kwargs): def get_browser(self, *args, **kwargs):
kwargs['user_agent'] = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' kwargs['user_agent'] = (
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
)
br = BasicNewsRecipe.get_browser(self, *args, **kwargs) br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [ br.addheaders += [
('Referer', 'https://www.google.com/'), ('Referer', 'https://www.google.com/'),
('X-Forwarded-For', '66.249.66.1') ('X-Forwarded-For', '66.249.66.1'),
] ]
return br return br
@ -102,11 +116,11 @@ class NYTimesTechnology(BasicNewsRecipe):
w = self.recipe_specific_options.get('res') w = self.recipe_specific_options.get('res')
if w and isinstance(w, str): if w and isinstance(w, str):
res = '-' + w res = '-' + w
for img in soup.findAll('img', attrs={'src':True}): for img in soup.findAll('img', attrs={'src': True}):
if '-article' in img['src']: if '-article' in img['src']:
ext = img['src'].split('?')[0].split('.')[-1] ext = img['src'].split('?')[0].split('.')[-1]
img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext
for c in soup.findAll('div', attrs={'class':'cap'}): for c in soup.findAll('div', attrs={'class': 'cap'}):
for p in c.findAll(['p', 'div']): for p in c.findAll(['p', 'div']):
p.name = 'span' p.name = 'span'
return soup return soup

View File

@ -24,7 +24,7 @@ class NewYorkTimesBookReview(BasicNewsRecipe):
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
encoding = 'utf-8' encoding = 'utf-8'
extra_css = ''' extra_css = """
.byl, .time { font-size:small; color:#202020; } .byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; } .cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; } .cred { font-style:italic; font-size:small; }
@ -32,7 +32,7 @@ class NewYorkTimesBookReview(BasicNewsRecipe):
.sc { font-variant: small-caps; } .sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; } .lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; } img { display:block; margin:0 auto; }
''' """
articles_are_obfuscated = use_wayback_machine articles_are_obfuscated = use_wayback_machine
@ -58,17 +58,18 @@ class NewYorkTimesBookReview(BasicNewsRecipe):
return self.index_to_soup(url, raw=True) return self.index_to_soup(url, raw=True)
def preprocess_raw_html(self, raw_html, url): def preprocess_raw_html(self, raw_html, url):
if '/interactive/' in url: return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
return '<html><body><p><em>'\
+ 'This is an interactive article, which is supposed to be read in a browser.'\
+ '</p></em></body></html>'
html = self.nyt_parser.extract_html(self.index_to_soup(raw_html))
return html
recipe_specific_options = { recipe_specific_options = {
'res': { 'res': {
'short': 'For hi-res images, select a resolution from the following\noptions: popup, jumbo, mobileMasterAt3x, superJumbo', 'short': (
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use articleInline.', 'For hi-res images, select a resolution from the following\noptions: '
'popup, jumbo, mobileMasterAt3x, superJumbo'
),
'long': (
'This is useful for non e-ink devices, and for a lower file size\nthan '
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
),
}, },
'comp': { 'comp': {
'short': 'Compress News Images?', 'short': 'Compress News Images?',

View File

@ -9,7 +9,7 @@ from xml.sax.saxutils import escape, quoteattr
from calibre.utils.iso8601 import parse_iso8601 from calibre.utils.iso8601 import parse_iso8601
module_version = 8 # needed for live updates module_version = 9 # needed for live updates
pprint pprint
@ -27,6 +27,7 @@ def parse_image(i):
yield f'<div class="cap">{i["legacyHtmlCaption"]}</div>' yield f'<div class="cap">{i["legacyHtmlCaption"]}</div>'
yield '</div>' yield '</div>'
def parse_img_grid(g): def parse_img_grid(g):
for grd in g.get('gridMedia', {}): for grd in g.get('gridMedia', {}):
yield ''.join(parse_image(grd)) yield ''.join(parse_image(grd))
@ -36,16 +37,21 @@ def parse_img_grid(g):
yield f'<span class="cred"> {g["credit"]}</span>' yield f'<span class="cred"> {g["credit"]}</span>'
yield '</div>' yield '</div>'
def parse_vid(v): def parse_vid(v):
if v.get('promotionalMedia'): if v.get('promotionalMedia'):
headline = v.get("headline", {}).get("default", "") headline = v.get('headline', {}).get('default', '')
rendition = v.get('renditions') rendition = v.get('renditions')
yield (f'<div><b><a href="{rendition[0]["url"]}">Video</a>: {headline}</b></div>' yield (
if rendition else f'<div><b>{headline}</b></div>') f'<div><b><a href="{rendition[0]["url"]}">Video</a>: {headline}</b></div>'
yield ''.join(parse_types(v["promotionalMedia"])) if rendition
else f'<div><b>{headline}</b></div>'
)
yield ''.join(parse_types(v['promotionalMedia']))
if v.get('promotionalSummary'): if v.get('promotionalSummary'):
yield f'<div class="cap">{v["promotionalSummary"]}</div>' yield f'<div class="cap">{v["promotionalSummary"]}</div>'
def parse_emb(e): def parse_emb(e):
if e.get('html') and 'datawrapper.dwcdn.net' in e.get('html', ''): if e.get('html') and 'datawrapper.dwcdn.net' in e.get('html', ''):
dw = re.search(r'datawrapper.dwcdn.net/(.{5})', e['html']).group(1) dw = re.search(r'datawrapper.dwcdn.net/(.{5})', e['html']).group(1)
@ -53,57 +59,62 @@ def parse_emb(e):
elif e.get('promotionalMedia'): elif e.get('promotionalMedia'):
if e.get('headline'): if e.get('headline'):
yield f'<div><b>{e["headline"]["default"]}</b></div>' yield f'<div><b>{e["headline"]["default"]}</b></div>'
yield ''.join(parse_types(e["promotionalMedia"])) yield ''.join(parse_types(e['promotionalMedia']))
if e.get('note'): if e.get('note'):
yield f'<div class="cap">{e["note"]}</div>' yield f'<div class="cap">{e["note"]}</div>'
def parse_byline(byl): def parse_byline(byl):
for b in byl.get('bylines', {}): for b in byl.get('bylines', {}):
yield f'<div>{b["renderedRepresentation"]}</div>' yield f'<div><b>{b["renderedRepresentation"]}</b></div>'
yield '<div><b><i>' yield '<div><i>'
for rl in byl.get('role', {}): for rl in byl.get('role', {}):
if ''.join(parse_cnt(rl)).strip(): if ''.join(parse_cnt(rl)).strip():
yield ''.join(parse_cnt(rl)) yield ''.join(parse_cnt(rl))
yield '</i></b></div>' yield '</i></div>'
def iso_date(x): def iso_date(x):
dt = parse_iso8601(x, as_utc=False) dt = parse_iso8601(x, as_utc=False)
return dt.strftime('%b %d, %Y at %I:%M %p') return dt.strftime('%b %d, %Y at %I:%M %p')
def parse_header(h): def parse_header(h):
if h.get('label'): if h.get('label'):
yield f'<div class="lbl">{"".join(parse_types(h["label"]))}</div>' yield f'<div class="lbl">{"".join(parse_types(h["label"]))}</div>'
if h.get('headline'): if h.get('headline'):
yield ''.join(parse_types(h["headline"])) yield ''.join(parse_types(h['headline']))
if h.get('summary'): if h.get('summary'):
yield f'<p><i>{"".join(parse_types(h["summary"]))}</i></p>' yield f'<p><i>{"".join(parse_types(h["summary"]))}</i></p>'
if h.get('ledeMedia'): if h.get('ledeMedia'):
yield ''.join(parse_types(h["ledeMedia"])) yield ''.join(parse_types(h['ledeMedia']))
if h.get('byline'): if h.get('byline'):
yield ''.join(parse_types(h["byline"])) yield ''.join(parse_types(h['byline']))
if h.get('timestampBlock'): if h.get('timestampBlock'):
yield ''.join(parse_types(h["timestampBlock"])) yield ''.join(parse_types(h['timestampBlock']))
def parse_fmt_type(fm): def parse_fmt_type(fm):
for f in fm.get('formats', {}): for f in fm.get('formats', {}):
ftype = f.get("__typename", "") ftype = f.get('__typename', '')
if ftype == "BoldFormat": if ftype == 'BoldFormat':
yield '<strong>' yield '<strong>'
if ftype == "ItalicFormat": if ftype == 'ItalicFormat':
yield '<em>' yield '<em>'
if ftype == "LinkFormat": if ftype == 'LinkFormat':
hrf = f["url"] hrf = f['url']
yield f'<a href="{hrf}">' yield f'<a href="{hrf}">'
yield fm.get("text", "") yield fm.get('text', '')
for f in reversed(fm.get('formats', {})): for f in reversed(fm.get('formats', {})):
ftype = f.get("__typename", "") ftype = f.get('__typename', '')
if ftype == "BoldFormat": if ftype == 'BoldFormat':
yield '</strong>' yield '</strong>'
if ftype == "ItalicFormat": if ftype == 'ItalicFormat':
yield '</em>' yield '</em>'
if ftype == "LinkFormat": if ftype == 'LinkFormat':
yield '</a>' yield '</a>'
def parse_cnt(cnt): def parse_cnt(cnt):
for k in cnt: for k in cnt:
if isinstance(cnt[k], list): if isinstance(cnt[k], list):
@ -118,17 +129,22 @@ def parse_cnt(cnt):
if isinstance(cnt['text'], str): if isinstance(cnt['text'], str):
yield cnt['text'] yield cnt['text']
def parse_types(x): def parse_types(x):
typename = x.get('__typename', '') typename = x.get('__typename', '')
align = ''
if x.get('textAlign'):
align = f' style="text-align: {x["textAlign"].lower()};"'
if 'Header' in typename: if 'Header' in typename:
yield '\n'.join(parse_header(x)) yield '\n'.join(parse_header(x))
elif typename.startswith('Heading'): elif typename.startswith('Heading'):
htag = 'h' + re.match(r'Heading([1-6])Block', typename).group(1) htag = 'h' + re.match(r'Heading([1-6])Block', typename).group(1)
yield f'<{htag}>{"".join(parse_cnt(x))}</{htag}>' yield f'<{htag}{align}>{"".join(parse_cnt(x))}</{htag}>'
elif typename == 'ParagraphBlock': elif typename in {'ParagraphBlock', 'DetailBlock', 'TextRunKV'}:
yield f'<p>{"".join(parse_cnt(x))}</p>' yield f'<p>{"".join(parse_cnt(x))}</p>'
elif typename == 'BylineBlock': elif typename == 'BylineBlock':
@ -145,16 +161,16 @@ def parse_types(x):
yield '<hr/>' yield '<hr/>'
elif typename == 'Image': elif typename == 'Image':
yield "".join(parse_image(x)) yield ''.join(parse_image(x))
elif typename == 'GridBlock': elif typename == 'GridBlock':
yield "".join(parse_img_grid(x)) yield ''.join(parse_img_grid(x))
elif typename == 'Video': elif typename == 'Video':
yield "".join(parse_vid(x)) yield ''.join(parse_vid(x))
elif typename == 'EmbeddedInteractive': elif typename == 'EmbeddedInteractive':
yield "".join(parse_emb(x)) yield ''.join(parse_emb(x))
elif typename == 'ListBlock': elif typename == 'ListBlock':
yield f'<ul>{"".join(parse_cnt(x))}</ul>' yield f'<ul>{"".join(parse_cnt(x))}</ul>'
@ -162,20 +178,22 @@ def parse_types(x):
yield f'\n<li>{"".join(parse_cnt(x))}</li>' yield f'\n<li>{"".join(parse_cnt(x))}</li>'
elif typename == 'TextInline': elif typename == 'TextInline':
yield "".join(parse_cnt(x)) yield ''.join(parse_cnt(x))
elif typename in {'DetailBlock', 'TextRunKV'}: elif typename and typename not in {
yield f'<p><i>{"".join(parse_cnt(x))}</i></p>' 'RelatedLinksBlock',
'EmailSignupBlock',
'Dropzone',
}:
if ''.join(parse_cnt(x)).strip():
yield ''.join(parse_cnt(x))
elif typename and typename not in {'RelatedLinksBlock', 'Dropzone'}:
if "".join(parse_cnt(x)).strip():
yield "".join(parse_cnt(x))
def article_parse(data): def article_parse(data):
yield "<html><body>" yield '<html><body>'
for d in data: for d in data:
yield from parse_types(d) yield from parse_types(d)
yield "</body></html>" yield '</body></html>'
def json_to_html(raw): def json_to_html(raw):
@ -232,7 +250,7 @@ def add_live_item(item, item_type, lines):
def live_json_to_html(data): def live_json_to_html(data):
for k, v in data["ROOT_QUERY"].items(): for k, v in data['ROOT_QUERY'].items():
if isinstance(v, dict) and 'id' in v: if isinstance(v, dict) and 'id' in v:
root = data[v['id']] root = data[v['id']]
s = data[root['storylines'][0]['id']] s = data[root['storylines'][0]['id']]
@ -249,24 +267,32 @@ def live_json_to_html(data):
return '<html><body>' + '\n'.join(lines) + '</body></html>' return '<html><body>' + '\n'.join(lines) + '</body></html>'
def extract_html(soup): def extract_html(soup, url):
if '/interactive/' in url:
return (
'<html><body><p><em>'
+ 'This is an interactive article, which is supposed to be read in a browser.'
+ '</p></em></body></html>'
)
script = soup.findAll('script', text=lambda x: x and 'window.__preloadedData' in x)[0] script = soup.findAll('script', text=lambda x: x and 'window.__preloadedData' in x)[0]
script = str(script) script = str(script)
raw = script[script.find('{'):script.rfind(';')].strip().rstrip(';') raw = script[script.find('{') : script.rfind(';')].strip().rstrip(';')
return json_to_html(raw) return json_to_html(raw)
def download_url_from_wayback(category, url, br=None): def download_url_from_wayback(category, url, br=None):
from mechanize import Request from mechanize import Request
host = 'http://localhost:8090' host = 'http://localhost:8090'
host = 'https://wayback1.calibre-ebook.com' host = 'https://wayback1.calibre-ebook.com'
rq = Request( rq = Request(
host + '/' + category, host + '/' + category,
data=json.dumps({"url": url}), data=json.dumps({'url': url}),
headers={'User-Agent': 'calibre', 'Content-Type': 'application/json'} headers={'User-Agent': 'calibre', 'Content-Type': 'application/json'},
) )
if br is None: if br is None:
from calibre import browser from calibre import browser
br = browser() br = browser()
br.set_handle_gzip(True) br.set_handle_gzip(True)
return br.open_novisit(rq, timeout=3 * 60).read() return br.open_novisit(rq, timeout=3 * 60).read()
@ -284,6 +310,7 @@ if __name__ == '__main__':
raw = open(f).read() raw = open(f).read()
if f.endswith('.html'): if f.endswith('.html'):
from calibre.ebooks.BeautifulSoup import BeautifulSoup from calibre.ebooks.BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(raw) soup = BeautifulSoup(raw)
print(extract_html(soup)) print(extract_html(soup))
else: else: