mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-12-25 22:37:22 -05:00
165 lines
6.0 KiB
Python
165 lines
6.0 KiB
Python
#!/usr/bin/env python
|
|
# vim:fileencoding=utf-8
|
|
'''
|
|
washingtonpost.com
|
|
'''
|
|
|
|
import json
|
|
|
|
from html5_parser import parse
|
|
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
|
|
|
|
class wapoprint(BasicNewsRecipe):
|
|
title = 'The Washington Post | Print Edition'
|
|
__author__ = 'unkn0wn'
|
|
description = (
|
|
'Leading source for news, video and opinion on politics, business, '
|
|
'world and national news, science, travel, entertainment and more. '
|
|
'Our local coverage includes reporting on education, crime, weather, '
|
|
'traffic, real estate, jobs and cars for DC, Maryland and Virginia. '
|
|
'Offering award-winning opinion writing, entertainment information '
|
|
'and restaurant reviews.'
|
|
)
|
|
publisher = 'The Washington Post Company'
|
|
category = 'news, politics, USA'
|
|
no_stylesheets = True
|
|
use_embedded_content = False
|
|
encoding = 'utf-8'
|
|
language = 'en_US'
|
|
remove_attributes = ['style', 'height', 'width']
|
|
publication_type = 'newspaper'
|
|
resolve_internal_links = True
|
|
ignore_duplicate_articles = {'title', 'url'}
|
|
masthead_url = 'https://upload.wikimedia.org/wikipedia/commons/9/93/The_Logo_of_The_Washington_Post_Newspaper.svg'
|
|
extra_css = '''
|
|
.img { text-align:center; font-size:small; }
|
|
.auth { font-weight:bold; font-size:small; }
|
|
.time { font-size:small; color: #202020; }
|
|
.subt { font-style: italic; }
|
|
'''
|
|
|
|
def get_browser(self, *args, **kwargs):
|
|
kwargs['user_agent'] = (
|
|
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
|
|
)
|
|
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
|
|
br.addheaders += [
|
|
('Referer', 'https://www.google.com/'),
|
|
('X-Forwarded-For', '66.249.66.1'),
|
|
]
|
|
return br
|
|
|
|
def parse_index(self):
|
|
soup = self.index_to_soup('https://www.washingtonpost.com/todays_paper/updates/')
|
|
if img := soup.find(
|
|
'img', attrs={'src': lambda x: x and x.endswith('_FrontPage.png')}
|
|
):
|
|
self.cover_url = img['src']
|
|
if h2 := soup.find(
|
|
'h2', attrs={'class': lambda x: x and 'font--subhead' in x.split()}
|
|
):
|
|
self.title = 'WaPo Print | ' + self.tag_to_string(h2)
|
|
|
|
feeds = []
|
|
|
|
for div in soup.findAll('section', attrs={'id': True}):
|
|
secname = self.tag_to_string(div.find('label')).strip()
|
|
self.log(secname)
|
|
articles = []
|
|
for a in div.findAll('a', href=True):
|
|
url = a['href']
|
|
title = self.tag_to_string(a).strip()
|
|
if not title or not url:
|
|
continue
|
|
self.log('\t', title)
|
|
self.log('\t\t', url)
|
|
articles.append({'title': title, 'url': url})
|
|
if articles:
|
|
feeds.append((secname, articles))
|
|
return feeds
|
|
|
|
def preprocess_raw_html(self, raw, url):
|
|
root = parse(raw)
|
|
if '/interactive/' in url:
|
|
return ('<html><body><article><h1>' + root.xpath('//h1')[0].text + '</h1><em>'
|
|
'This article is supposed to be read in a browser.'
|
|
'</em></article></body></html>')
|
|
m = root.xpath('//script[@id="__NEXT_DATA__"]')
|
|
|
|
data = json.loads(m[0].text)
|
|
data = data['props']['pageProps']['globalContent']
|
|
|
|
text = data.get('label', {}).get('basic', {}).get('text', '')
|
|
label = f'<p class="time">{text}</p>' if text else ''
|
|
title = '<h1>' + data['headlines']['basic'] + '</h1>'
|
|
subhead = '<p class="subt">' + data['description'].get('basic', '') + '</h3>'
|
|
|
|
promo_img = ''
|
|
if data.get('promo_items', {}).get('basic', {}).get('type', '') == 'image':
|
|
pi = data['promo_items']['basic']
|
|
promo_img = (
|
|
'<p><div class="img"><img src="{}"><div>{}</div></div></p>'.format(
|
|
pi['url'], pi['credits_caption_display']
|
|
)
|
|
)
|
|
|
|
author = ''
|
|
if 'credits' in data:
|
|
author = (
|
|
'<div><span class="auth">'
|
|
+ 'By '
|
|
+ ', '.join(x['name'] for x in data['credits']['by'])
|
|
+ '</span> | <span class="time">'
|
|
+ data['publish_date'][:-14]
|
|
+ '</span></div>'
|
|
)
|
|
|
|
body = ''
|
|
for x in data['content_elements']:
|
|
if x['type'] == 'text':
|
|
body += '<p>' + x['content'] + '</p>'
|
|
elif x['type'] == 'video':
|
|
if 'promo_image' in x:
|
|
body += '<p><div class="img"><img src="{}"><div>{}</div></div></p>'.format(
|
|
x['promo_image']['url'], x['description'].get('basic', '')
|
|
)
|
|
elif x['type'] == 'image':
|
|
img_ = (
|
|
'<p><div class="img"><img src="{}"><div>{}</div></div></p>'.format(
|
|
x['url'], x['credits_caption_display']
|
|
)
|
|
)
|
|
if img_ != promo_img:
|
|
body += img_
|
|
elif x['type'] == 'list':
|
|
body += '<ul>'
|
|
for li in x['items']:
|
|
if li.get('content', '') != '':
|
|
body += f'<li>{li["content"]}</li>'
|
|
body += '</ul>'
|
|
|
|
return (
|
|
'<html><body><div>'
|
|
+ label
|
|
+ title
|
|
+ subhead
|
|
+ promo_img
|
|
+ author
|
|
+ body
|
|
+ '</div></body></html>'
|
|
)
|
|
|
|
def preprocess_html(self, soup):
|
|
for img in soup.findAll('img', attrs={'src': True}):
|
|
img['src'] = (
|
|
'https://www.washingtonpost.com/wp-apps/imrs.php?src='
|
|
+ img['src']
|
|
+ '&w=600'
|
|
)
|
|
return soup
|
|
|
|
def populate_article_metadata(self, article, soup, first):
|
|
article.summary = article.text_summary = self.tag_to_string(soup.find('p', attrs={'class':'subt'}))
|