mirror of
https://github.com/kovidgoyal/calibre.git
synced 2026-01-07 04:30:19 -05:00
207 lines
7.7 KiB
Python
207 lines
7.7 KiB
Python
#!/usr/bin/env python
|
|
# vim:fileencoding=utf-8
|
|
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
|
|
|
|
import json
|
|
|
|
import mechanize
|
|
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
from polyglot.urllib import urlencode
|
|
|
|
use_wayback_machine = False
|
|
# This is an Apollo persisted query hash which you can get
|
|
# from looking at the XHR requests made by: https://www.nytimes.com/section/todayspaper
|
|
# or by https://www.nytimes.com/section/world
|
|
persistedQuery = '1f99120a11e94dd62a9474f68ee1255537ee3cf7eac20a0377819edb2fa1fef7'
|
|
|
|
|
|
def absolutize(url):
|
|
if url.startswith('/'):
|
|
url = 'https://www.nytimes.com' + url
|
|
return url
|
|
|
|
|
|
class NewYorkTimesBookReview(BasicNewsRecipe):
|
|
title = u'New York Times Book Review'
|
|
language = 'en_US'
|
|
description = 'The New York Times Sunday Book Review'
|
|
__author__ = 'Kovid Goyal'
|
|
|
|
no_stylesheets = True
|
|
no_javascript = True
|
|
ignore_duplicate_articles = {'title', 'url'}
|
|
encoding = 'utf-8'
|
|
|
|
extra_css = '''
|
|
.byl, .time { font-size:small; color:#202020; }
|
|
.cap { font-size:small; text-align:center; }
|
|
.cred { font-style:italic; font-size:small; }
|
|
em, blockquote { color: #202020; }
|
|
.sc { font-variant: small-caps; }
|
|
.lbl { font-size:small; color:#404040; }
|
|
img { display:block; margin:0 auto; }
|
|
'''
|
|
|
|
articles_are_obfuscated = use_wayback_machine
|
|
|
|
if use_wayback_machine:
|
|
def get_obfuscated_article(self, url):
|
|
from calibre.ptempfile import PersistentTemporaryFile
|
|
with PersistentTemporaryFile() as tf:
|
|
tf.write(self.get_nyt_page(url))
|
|
return tf.name
|
|
|
|
@property
|
|
def nyt_parser(self):
|
|
ans = getattr(self, '_nyt_parser', None)
|
|
if ans is None:
|
|
from calibre.live import load_module
|
|
self._nyt_parser = ans = load_module('calibre.web.site_parsers.nytimes')
|
|
return ans
|
|
|
|
def get_nyt_page(self, url, skip_wayback=False):
|
|
if use_wayback_machine and not skip_wayback:
|
|
from calibre import browser
|
|
return self.nyt_parser.download_url(url, browser())
|
|
return self.index_to_soup(url, raw=True)
|
|
|
|
def preprocess_raw_html(self, raw_html, url):
|
|
return self.nyt_parser.extract_html(self.index_to_soup(raw_html), url)
|
|
|
|
recipe_specific_options = {
|
|
'res': {
|
|
'short': (
|
|
'For hi-res images, select a resolution from the following\noptions: '
|
|
'popup, jumbo, mobileMasterAt3x, superJumbo'
|
|
),
|
|
'long': (
|
|
'This is useful for non e-ink devices, and for a lower file size\nthan '
|
|
'the default, use mediumThreeByTwo440, mediumThreeByTwo225, articleInline.'
|
|
),
|
|
},
|
|
'comp': {
|
|
'short': 'Compress News Images?',
|
|
'long': 'enter yes',
|
|
'default': 'no'
|
|
}
|
|
}
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
|
c = self.recipe_specific_options.get('comp')
|
|
if c and isinstance(c, str):
|
|
if c.lower() == 'yes':
|
|
self.compress_news_images = True
|
|
|
|
def read_nyt_metadata(self):
|
|
soup = self.index_to_soup('https://www.nytimes.com/pages/books/review/index.html')
|
|
script = soup.findAll('script', text=lambda x: x and 'window.__preloadedData' in x)[0]
|
|
script = type(u'')(script)
|
|
json_data = script[script.find('{'):script.rfind(';')].strip().rstrip(';') # }}
|
|
self.nytimes_graphql_config = json.loads(self.nyt_parser.clean_js_json(json_data))['config']
|
|
return soup
|
|
|
|
def nyt_graphql_query(self, qid, operationName='CollectionsQuery'):
|
|
query = {
|
|
'operationName': operationName,
|
|
'variables': json.dumps({
|
|
'id': qid,
|
|
'first': 10,
|
|
'exclusionMode': 'HIGHLIGHTS_AND_EMBEDDED',
|
|
'isFetchMore':False,
|
|
'isTranslatable':False,
|
|
'isEspanol':False,
|
|
'highlightsListUri':'nyt://per/personalized-list/__null__',
|
|
'highlightsListFirst':0,
|
|
'hasHighlightsList':False
|
|
}, separators=',:'),
|
|
'extensions': json.dumps({
|
|
'persistedQuery': {
|
|
'version':1,
|
|
'sha256Hash': persistedQuery,
|
|
},
|
|
}, separators=',:')
|
|
}
|
|
url = self.nytimes_graphql_config['gqlUrlClient'] + '?' + urlencode(query)
|
|
br = self.browser
|
|
# br.set_debug_http(True)
|
|
headers = dict(self.nytimes_graphql_config['gqlRequestHeaders'])
|
|
headers['Accept'] = 'application/json'
|
|
req = mechanize.Request(url, headers=headers)
|
|
raw = br.open(req).read()
|
|
# open('/t/raw.json', 'wb').write(raw)
|
|
return json.loads(raw)
|
|
|
|
def parse_index(self):
|
|
# return [('Articles', [{'url': 'https://www.nytimes.com/2022/09/08/books/review/karen-armstrong-by-the-book-interview.html', 'title':'test'}])]
|
|
self.read_nyt_metadata()
|
|
query_id = '/section/books/review'
|
|
data = self.nyt_graphql_query(query_id)
|
|
return parse_toc(data, self.log)
|
|
|
|
def get_browser(self, *args, **kwargs):
|
|
kwargs['user_agent'] = 'User-Agent: Mozilla/5.0 (compatible; archive.org_bot; Wayback Machine Live Record; +http://archive.org/details/archive.org_bot)'
|
|
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
|
|
return br
|
|
|
|
def preprocess_html(self, soup):
|
|
w = self.recipe_specific_options.get('res')
|
|
if w and isinstance(w, str):
|
|
res = '-' + w
|
|
for img in soup.findAll('img', attrs={'src':True}):
|
|
if '-article' in img['src']:
|
|
ext = img['src'].split('?')[0].split('.')[-1]
|
|
img['src'] = img['src'].rsplit('-article', 1)[0] + res + '.' + ext
|
|
for c in soup.findAll('div', attrs={'class':'cap'}):
|
|
for p in c.findAll(['p', 'div']):
|
|
p.name = 'span'
|
|
return soup
|
|
|
|
|
|
def asset_to_article(asset):
|
|
title = asset['headline']['default']
|
|
return {'title': title, 'url': asset['url'], 'description': asset['summary']}
|
|
|
|
|
|
def parse_toc(data, log=print):
|
|
containers = data['data']['legacyCollection']['groupings'][0]['containers']
|
|
articles = []
|
|
log('Book reviews')
|
|
for cont in containers:
|
|
if cont['__typename'] != 'LegacyCollectionContainer':
|
|
continue
|
|
for rel in cont['relations']:
|
|
if rel.get('__typename') == 'LegacyCollectionRelation':
|
|
asset = rel['asset']
|
|
if asset['__typename'] == 'Article':
|
|
articles.append(asset_to_article(asset))
|
|
log(' ', articles[-1]['title'] + ':', articles[-1]['url'])
|
|
feeds = [('Book reviews', articles)]
|
|
articles = []
|
|
log('Books of the Times')
|
|
try:
|
|
containers = data['data']['legacyCollection']['collectionsPage']
|
|
if containers.get('embeddedCollections'):
|
|
containers = containers['embeddedCollections']
|
|
else:
|
|
containers = [containers]
|
|
except Exception as e:
|
|
log('Failed to parse web section', 'Books of the Times', 'with error:', e)
|
|
return articles
|
|
for cont in containers:
|
|
for s in cont['stream']['edges']:
|
|
asset = s['node']
|
|
if asset['__typename'] == 'Article':
|
|
articles.append(asset_to_article(asset))
|
|
log(' ', articles[-1]['title'] + ':', articles[-1]['url'])
|
|
if articles:
|
|
feeds.append(('Book of the Times', articles))
|
|
return feeds
|
|
|
|
|
|
if __name__ == '__main__':
|
|
import sys
|
|
data = json.loads(open(sys.argv[-1], 'rb').read())
|
|
parse_toc(data)
|