#!/usr/bin/env python # vim:fileencoding=utf-8 __license__ = 'GPL v3' __copyright__ = '2011, Darko Miletic ' ''' www.washingtonpost.com ''' import json from calibre.web.feeds.news import BasicNewsRecipe from html5_parser import parse class TheWashingtonPost(BasicNewsRecipe): title = 'The Washington Post' __author__ = 'Darko Miletic, unkn0wn' description = 'Leading source for news, video and opinion on politics, business, world and national news, science, travel, entertainment and more. Our local coverage includes reporting on education, crime, weather, traffic, real estate, jobs and cars for DC, Maryland and Virginia. Offering award-winning opinion writing, entertainment information and restaurant reviews.' # noqa publisher = 'The Washington Post Company' category = 'news, politics, USA' oldest_article = 1.2 max_articles_per_feed = 200 no_stylesheets = True encoding = 'utf8' use_embedded_content = False language = 'en_US' remove_empty_feeds = True ignore_duplicate_articles = {'url'} masthead_url = 'https://www.washingtonpost.com/pb/resources/img/twp-masthead-415x57.svg' publication_type = 'newspaper' remove_attributes = ['style', 'width', 'height'] recipe_specific_options = { 'days': { 'short': 'Oldest article to download from this news source. In days ', 'long': 'For example, 0.5, gives you articles from the past 12 hours', 'default': str(oldest_article) } } def __init__(self, *args, **kwargs): BasicNewsRecipe.__init__(self, *args, **kwargs) d = self.recipe_specific_options.get('days') if d and isinstance(d, str): self.oldest_article = float(d) extra_css = ''' .img { text-align:center; font-size:small; } .auth { font-weight:bold; font-size:small; } .time { font-size:small; color: #202020; } ''' def get_browser(self): return BasicNewsRecipe.get_browser( self, verify_ssl_certificates=False, user_agent='Mozilla/5.0 (Windows NT 10.0; rv:128.0) Gecko/20100101 Firefox/128.0') # Official feeds: https://www.washingtonpost.com/discussions/2018/10/12/washington-post-rss-feeds/ feeds = [ (u'Politics', u'http://feeds.washingtonpost.com/rss/politics'), (u'Opinions', u'http://feeds.washingtonpost.com/rss/opinions'), (u'Local', u'http://feeds.washingtonpost.com/rss/local'), (u'Sports', u'http://feeds.washingtonpost.com/rss/sports'), (u'Technology', u'http://feeds.washingtonpost.com/rss/business/technology'), (u'National', u'http://feeds.washingtonpost.com/rss/national'), (u'World', u'http://feeds.washingtonpost.com/rss/world'), (u'Business', u'http://feeds.washingtonpost.com/rss/business'), (u'Lifestyle', u'http://feeds.washingtonpost.com/rss/lifestyle'), (u'Entertainment', u'http://feeds.washingtonpost.com/rss/entertainment'), # Undocumented feeds. (u'White House', u'http://feeds.washingtonpost.com/rss/politics/whitehouse'), (u'Commanders', u'http://feeds.washingtonpost.com/rss/sports/redskins'), ] def preprocess_raw_html(self, raw, *a): root = parse(raw) m = root.xpath('//script[@id="__NEXT_DATA__"]') data = json.loads(m[0].text) data = data['props']['pageProps']['globalContent'] title = '

' + data['headlines']['basic'] + '

' subhead = '

' + data['description'].get('basic', '') + '

' author = '' if 'credits' in data: author = '
' + 'By ' + ', '.join(x['name'] for x in data['credits']['by']) \ + ' | ' + data['publish_date'][:-14] + '
' body = '' for x in data['content_elements']: if x['type'] == 'text': body += '

' + x['content'] + '

' elif x['type'] == 'video': if 'promo_image' in x: body += '

{}

'.format( x['promo_image']['url'], x['description'].get('basic', '') ) elif x['type'] == 'image': body += '

{}

'.format(x['url'], x['credits_caption_display']) return '
' + title + subhead + author + body + '
' def preprocess_html(self, soup): for img in soup.findAll('img', attrs={'src':True}): img['src'] = 'https://www.washingtonpost.com/wp-apps/imrs.php?src=' + img['src'] + '&w=916' return soup