#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Darko Miletic ' ''' politico.eu ''' import re from calibre.web.feeds.news import BasicNewsRecipe class Politico(BasicNewsRecipe): title = 'Politico.eu' __author__ = 'unkn0wn, Darko Miletic and Sujata Raman' description = ('We connect and empower professionals through nonpartisan journalism and actionable' ' intelligence about European politics and policy. Download Weekly.') publisher = 'Axel Springer SE.' category = 'news, politics, Europe' oldest_article = 7 # days max_articles_per_feed = 20 use_embedded_content = False no_stylesheets = True remove_javascript = True encoding = 'UTF-8' language = 'en' recipe_specific_options = { 'days': { 'short': 'Oldest article to download from this news source. In days ', 'long': 'For example, 0.5, gives you articles from the past 12 hours', 'default': str(oldest_article) } } def __init__(self, *args, **kwargs): BasicNewsRecipe.__init__(self, *args, **kwargs) d = self.recipe_specific_options.get('days') if d and isinstance(d, str): self.oldest_article = float(d) remove_empty_feeds = True ignore_duplicate_articles = ['url'] html2lrf_options = [ '--comment', description, '--category', category, '--publisher', publisher, '--ignore-tables' ] html2epub_options = 'publisher="' + publisher + '"\ncomments="' + \ description + '"\ntags="' + category + '"\nlinearize_tables=True' keep_only_tags = [ dict(name='div', attrs={'class':['container']}), ] remove_tags = [ dict(name=['notags', 'embed', 'aside', 'object', 'link']), dict(name='div', attrs={'class':'module-article-inline related-articles'}), dict(name='header', attrs={'class':'summary-header'}), dict(name='div', attrs={'class':'amazon-polly-wrapper'}), dict(name='div', attrs={'class': lambda x: x and 'story-meta' in x.split()}), dict(attrs={'class': lambda x: x and 'story-tools' in x.split()}), dict(attrs={'class': lambda x: x and 'social-sharing' in x.split()}), dict(attrs={'class': lambda x: x and 'story-meta__authors' in x.split()}), dict(attrs={'class': lambda x: x and 'story-meta__timestamp' in x.split()}), dict(attrs={'class': lambda x: x and 'story-continued' in x.split()}), dict(attrs={'class': lambda x: x and 'story-supplement' in x.split()}), dict(attrs={'class': lambda x: x and 'story-share' in x.split()}), dict(attrs={'class': lambda x: x and 'suggested' in x.split()}), dict(attrs={'class': lambda x: x and 'article__sidebar' in x.split()}), dict(name='svg'), dict(name='footer'), dict(name='span', attrs={'class':'ad-label'}), dict(name='div', attrs={'class':'pro-pill'}), dict(name='ol', attrs={'class':'breadcrumbs__list'}), ] preprocess_regexps = [(re.compile(r']*>([^<]*)', re.DOTALL|re.IGNORECASE), lambda match: match.group(1))] remove_tags_after = dict(attrs={'class': lambda x: x and 'article__more-from' in x.split()}) extra_css = ''' body{font-family:Arial,Sans-serif;} element.style{color:#FF0000;font-family:Arial,Sans-serif;} .author{color:#808080;font-size:x-small;} a{ color:#003399;} .byline{color:#696969 ; font-size:x-small;} .story{color:#000000;} td{color:#000000;} .figcaption__inner, .article-meta, .authors, .datetime {font-size:small; } ''' def get_article_url(self, article): url = BasicNewsRecipe.get_article_url(self, article) if 'politico.com' not in url: return url.split('?')[0] masthead_url = 'https://www.politico.eu/cdn-cgi/image/width=573,quality=80,onerror=redirect,format=auto/wp-content/uploads/2021/02/25/Politico_logo-01.png' def get_cover_url(self): soup = self.index_to_soup('https://www.politico.eu/') for cov in soup.findAll(attrs={'class':'cta__image'}): if cov.find('a', attrs={'href':lambda x: x and 'edition.pagesuite-professional' in x}): return cov.a.img['src'] feeds = [ ('Policy', 'https://www.politico.eu/section/policy/feed'), ('Opinion', 'https://www.politico.eu/section/opinion/feed'), ('Newsletter', 'https://www.politico.eu/newsletter/feed'), ('Others', 'https://www.politico.eu/feed') ] def postprocess_html(self, soup, first): for tag in soup.findAll(name=['table', 'tr', 'td']): tag.name = 'div' return soup