create NYT Magazine

This commit is contained in:
unkn0w7n 2024-10-09 09:56:19 +05:30
parent 55e6ef52ad
commit 579a6c7b97
7 changed files with 146 additions and 11 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 416 B

131
recipes/nyt_magazine.recipe Normal file
View File

@ -0,0 +1,131 @@
#!/usr/bin/env python
from calibre.web.feeds.news import BasicNewsRecipe
use_wayback_machine = False
class NytMag(BasicNewsRecipe):
title = 'NYT Magazine'
__author__ = 'unkn0wn'
description = 'The latest from The New York Times Magazine.'
oldest_article = 30 # days
encoding = 'utf-8'
use_embedded_content = False
language = 'en_US'
remove_empty_feeds = True
resolve_internal_links = True
ignore_duplicate_articles = {'title', 'url'}
masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png'
def get_cover_url(self):
soup = self.index_to_soup('https://www.nytimes.com/section/magazine')
issue = soup.find(attrs={'class': 'issue-promo'})
return issue.find(attrs={'class': 'promo-image'}).img['src']
feeds = [
'https://rss.nytimes.com/services/xml/rss/nyt/Magazine.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/well.xml',
]
recipe_specific_options = {
'days': {
'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article),
},
'comp': {'short': 'Compress News Images?', 'long': 'enter yes', 'default': 'no'},
'rev': {
'short': 'Reverse the order of articles in each feed?',
'long': 'enter yes',
'default': 'no',
},
'res': {
'short': (
'For hi-res images, select a resolution from the following\noptions: '
'popup, jumbo, mobileMasterAt3x, superJumbo'
),
'long': (
'This is useful for non e-ink devices, and for a lower file size\nthan '
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
),
},
}
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
d = self.recipe_specific_options.get('days')
if d and isinstance(d, str):
self.oldest_article = float(d)
r = self.recipe_specific_options.get('rev')
if r and isinstance(r, str):
if r.lower() == 'yes':
self.reverse_article_order = True
c = self.recipe_specific_options.get('comp')
if c and isinstance(c, str):
if c.lower() == 'yes':
self.compress_news_images = True
extra_css = """
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
em, blockquote { color: #202020; }
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
"""
@property
def nyt_parser(self):
ans = getattr(self, '_nyt_parser', None)
if ans is None:
from calibre.live import load_module
self._nyt_parser = ans = load_module('calibre.web.site_parsers.nytimes')
return ans
def get_nyt_page(self, url, skip_wayback=False):
if use_wayback_machine and not skip_wayback:
from calibre import browser
return self.nyt_parser.download_url(url, browser())
return self.index_to_soup(url, raw=True)
articles_are_obfuscated = use_wayback_machine
if use_wayback_machine:
def get_obfuscated_article(self, url):
from calibre.ptempfile import PersistentTemporaryFile
with PersistentTemporaryFile() as tf:
tf.write(self.get_nyt_page(url))
return tf.name
def preprocess_raw_html(self, raw_html, url):
return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
def get_browser(self, *args, **kwargs):
kwargs['user_agent'] = (
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
)
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
br.addheaders += [
('Referer', 'https://www.google.com/'),
('X-Forwarded-For', '66.249.66.1'),
]
return br
def preprocess_html(self, soup):
w = self.recipe_specific_options.get('res')
if w and isinstance(w, str):
res = '-' + w
for img in soup.findAll('img', attrs={'src': True}):
if '-article' in img['src']:
ext = img['src'].split('?')[0].split('.')[-1]
img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext
for c in soup.findAll('div', attrs={'class': 'cap'}):
for p in c.findAll(['p', 'div']):
p.name = 'span'
return soup

View File

@ -18,6 +18,11 @@ class NytMag(BasicNewsRecipe):
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png' masthead_url = 'https://static01.nytimes.com/newsgraphics/2015-12-23-masthead-2016/b15c3d81d3d7b59065fff9a3f3afe85aa2e2dff5/_assets/nyt-logo.png'
def get_cover_url(self):
soup = self.index_to_soup('https://www.nytimes.com/section/t-magazine')
issue = soup.find(attrs={'class':'issue-promo'})
return issue.a.img['src']
feeds = [ feeds = [
'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml', 'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/FashionandStyle.xml', 'https://rss.nytimes.com/services/xml/rss/nyt/FashionandStyle.xml',

View File

@ -41,8 +41,9 @@ class NytFeeds(BasicNewsRecipe):
# 'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml', # 'https://rss.nytimes.com/services/xml/rss/nyt/tmagazine.xml',
# 'https://rss.nytimes.com/services/xml/rss/nyt/books.xml', # 'https://rss.nytimes.com/services/xml/rss/nyt/books.xml',
'https://www.nytimes.com/services/xml/rss/nyt/Travel.xml', 'https://www.nytimes.com/services/xml/rss/nyt/Travel.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/well.xml', # 'https://rss.nytimes.com/services/xml/rss/nyt/well.xml',
'https://rss.nytimes.com/services/xml/rss/nyt/Sports.xml', 'https://rss.nytimes.com/services/xml/rss/nyt/Sports.xml',
# 'https://rss.nytimes.com/services/xml/rss/nyt/Magazine.xml',
# 'http://nytimes.com/timeswire/feeds/', # 'http://nytimes.com/timeswire/feeds/',
] ]

View File

@ -31,7 +31,7 @@ web_sections = [
('Health', 'health'), ('Health', 'health'),
('Opinion', 'opinion'), ('Opinion', 'opinion'),
('Arts', 'arts'), ('Arts', 'arts'),
('Books', 'books'), # ('Books', 'books'),
('Movies', 'movies'), ('Movies', 'movies'),
('Music', 'arts/music'), ('Music', 'arts/music'),
('Television', 'arts/television'), ('Television', 'arts/television'),

View File

@ -31,7 +31,7 @@ web_sections = [
('Health', 'health'), ('Health', 'health'),
('Opinion', 'opinion'), ('Opinion', 'opinion'),
('Arts', 'arts'), ('Arts', 'arts'),
('Books', 'books'), # ('Books', 'books'),
('Movies', 'movies'), ('Movies', 'movies'),
('Music', 'arts/music'), ('Music', 'arts/music'),
('Television', 'arts/television'), ('Television', 'arts/television'),

View File

@ -9,7 +9,7 @@ from xml.sax.saxutils import escape, quoteattr
from calibre.utils.iso8601 import parse_iso8601 from calibre.utils.iso8601 import parse_iso8601
module_version = 9 # needed for live updates module_version = 10 # needed for live updates
pprint pprint
@ -144,8 +144,10 @@ def parse_types(x):
htag = 'h' + re.match(r'Heading([1-6])Block', typename).group(1) htag = 'h' + re.match(r'Heading([1-6])Block', typename).group(1)
yield f'<{htag}{align}>{"".join(parse_cnt(x))}</{htag}>' yield f'<{htag}{align}>{"".join(parse_cnt(x))}</{htag}>'
elif typename in {'ParagraphBlock', 'DetailBlock', 'TextRunKV'}: elif typename == 'ParagraphBlock':
yield f'<p>{"".join(parse_cnt(x))}</p>' yield f'<p>{"".join(parse_cnt(x))}</p>'
elif typename in {'DetailBlock', 'TextRunKV'}:
yield f'<p style="font-size: small;">{"".join(parse_cnt(x))}</p>'
elif typename == 'BylineBlock': elif typename == 'BylineBlock':
yield f'<div class="byl"><br/>{"".join(parse_byline(x))}</div>' yield f'<div class="byl"><br/>{"".join(parse_byline(x))}</div>'
@ -173,19 +175,15 @@ def parse_types(x):
yield ''.join(parse_emb(x)) yield ''.join(parse_emb(x))
elif typename == 'ListBlock': elif typename == 'ListBlock':
yield f'<ul>{"".join(parse_cnt(x))}</ul>' yield f'\n<ul>{"".join(parse_cnt(x))}</ul>'
elif typename == 'ListItemBlock': elif typename == 'ListItemBlock':
yield f'\n<li>{"".join(parse_cnt(x))}</li>' yield f'\n<li>{"".join(parse_cnt(x))}</li>'
elif typename == 'TextInline':
yield ''.join(parse_cnt(x))
elif typename and typename not in { elif typename and typename not in {
'RelatedLinksBlock', 'RelatedLinksBlock',
'EmailSignupBlock', 'EmailSignupBlock',
'Dropzone', 'Dropzone',
}: }:
if ''.join(parse_cnt(x)).strip():
yield ''.join(parse_cnt(x)) yield ''.join(parse_cnt(x))