mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
105 lines
4.2 KiB
Python
105 lines
4.2 KiB
Python
#!/usr/bin/env python
|
|
# vim:fileencoding=utf-8
|
|
'''
|
|
washingtonpost.com
|
|
'''
|
|
|
|
import json
|
|
|
|
from html5_parser import parse
|
|
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
|
|
|
|
class wapoprint(BasicNewsRecipe):
|
|
title = 'The Washington Post | Print Edition'
|
|
__author__ = 'unkn0wn'
|
|
description = (
|
|
'Leading source for news, video and opinion on politics, business, world and national news, science,'
|
|
' travel, entertainment and more. Our local coverage includes reporting on education, crime, weather,'
|
|
' traffic, real estate, jobs and cars for DC, Maryland and Virginia. Offering award-winning opinion writing,'
|
|
' entertainment information and restaurant reviews.'
|
|
)
|
|
publisher = 'The Washington Post Company'
|
|
category = 'news, politics, USA'
|
|
no_stylesheets = True
|
|
use_embedded_content = False
|
|
encoding = 'utf-8'
|
|
language = 'en_US'
|
|
remove_attributes = ['style', 'height', 'width']
|
|
publication_type = 'newspaper'
|
|
ignore_duplicate_articles = {'title', 'url'}
|
|
masthead_url = 'https://www.washingtonpost.com/pb/resources/img/twp-masthead-415x57.svg'
|
|
extra_css = '''
|
|
.img { text-align:center; font-size:small; }
|
|
.auth { font-weight:bold; font-size:small; }
|
|
.time { font-size:small; color: #202020; }
|
|
'''
|
|
|
|
def get_browser(self):
|
|
return BasicNewsRecipe.get_browser(
|
|
self, verify_ssl_certificates=False, user_agent='Mozilla/5.0 (Windows NT 10.0; rv:128.0) Gecko/20100101 Firefox/128.0')
|
|
|
|
def parse_index(self):
|
|
soup = self.index_to_soup('https://www.washingtonpost.com/todays_paper/updates/')
|
|
if img := soup.find('img', attrs={'src':lambda x: x and x.endswith('_FrontPage.png')}):
|
|
self.cover_url = img['src']
|
|
if h2 := soup.find('h2', attrs={'class':lambda x: x and 'font--subhead' in x.split()}):
|
|
self.title = 'WaPo Print | ' + self.tag_to_string(h2)
|
|
|
|
feeds = []
|
|
|
|
for div in soup.findAll('section', attrs={'id': True}):
|
|
secname = self.tag_to_string(div.find('label')).strip()
|
|
self.log(secname)
|
|
articles = []
|
|
for a in div.findAll('a', href=True):
|
|
url = a['href']
|
|
title = self.tag_to_string(a).strip()
|
|
if not title or not url:
|
|
continue
|
|
self.log('\t', title)
|
|
self.log('\t\t', url)
|
|
articles.append({'title': title, 'url': url})
|
|
if articles:
|
|
feeds.append((secname, articles))
|
|
return feeds
|
|
|
|
def preprocess_raw_html(self, raw, *a):
|
|
root = parse(raw)
|
|
m = root.xpath('//script[@id="__NEXT_DATA__"]')
|
|
|
|
data = json.loads(m[0].text)
|
|
data = data['props']['pageProps']['globalContent']
|
|
|
|
title = '<h1>' + data['headlines']['basic'] + '</h1>'
|
|
subhead = '<h3>' + data['description'].get('basic', '') + '</h3>'
|
|
|
|
author = ''
|
|
if 'credits' in data:
|
|
author = '<div><span class="auth">' + 'By ' + ', '.join(x['name'] for x in data['credits']['by']) \
|
|
+ '</span> | <span class="time">' + data['publish_date'][:-14] + '</span></div>'
|
|
|
|
body = ''
|
|
for x in data['content_elements']:
|
|
if x['type'] == 'text':
|
|
body += '<p>' + x['content'] + '</p>'
|
|
elif x['type'] == 'video':
|
|
if 'promo_image' in x:
|
|
body += '<p><div class="img"><img src="{}"><div>{}</div></div></p>'.format(
|
|
x['promo_image']['url'], x['description'].get('basic', '')
|
|
)
|
|
elif x['type'] == 'image':
|
|
body += '<p><div class="img"><img src="{}"><div>{}</div></div></p>'.format(x['url'], x['credits_caption_display'])
|
|
|
|
return '<html><body><div>' + title + subhead + author + body + '</div></body></html>'
|
|
|
|
def preprocess_html(self, soup):
|
|
for img in soup.findAll('img', attrs={'src':True}):
|
|
img['src'] = 'https://www.washingtonpost.com/wp-apps/imrs.php?src=' + img['src'] + '&w=916'
|
|
return soup
|
|
|
|
def populate_article_metadata(self, article, soup, first):
|
|
article.summary = self.tag_to_string(soup.find('h3'))
|
|
article.text_summary = self.tag_to_string(soup.find('h3'))
|