#!/usr/bin/env python2 # vim:fileencoding=utf-8 # License: GPLv3 Copyright: 2016, Aimylios from __future__ import unicode_literals, division, absolute_import, print_function ''' handelsblatt.com ''' import re from calibre.web.feeds.news import BasicNewsRecipe class Handelsblatt(BasicNewsRecipe): title = 'Handelsblatt' __author__ = 'Aimylios' # based on the work of malfi and Hegi description = 'RSS-Feeds von Handelsblatt.com' publisher = 'Verlagsgruppe Handelsblatt GmbH' publication_type = 'newspaper' needs_subscription = 'optional' language = 'de' oldest_article = 2 max_articles_per_feed = 15 simultaneous_downloads = 10 no_stylesheets = True remove_javascript = True remove_empty_feeds = True ignore_duplicate_articles = {'title', 'url'} conversion_options = {'smarten_punctuation': True, 'publisher': publisher} # uncomment this to reduce file size # compress_news_images = True # compress_news_images_max_size = 16 cover_source = 'https://kaufhaus.handelsblatt.com/downloads/handelsblatt-epaper-p1951.html' masthead_url = 'http://www.handelsblatt.com/images/logo_handelsblatt/11002806/7-formatOriginal.png' feeds = [ ('Top-Themen', 'http://www.handelsblatt.com/contentexport/feed/top-themen'), ('Politik', 'http://www.handelsblatt.com/contentexport/feed/politik'), ('Unternehmen', 'http://www.handelsblatt.com/contentexport/feed/unternehmen'), ('Finanzen', 'http://www.handelsblatt.com/contentexport/feed/finanzen'), ('Technologie', 'http://www.handelsblatt.com/contentexport/feed/technologie'), ('Panorama', 'http://www.handelsblatt.com/contentexport/feed/panorama'), ('Sport', 'http://www.handelsblatt.com/contentexport/feed/sport') ] keep_only_tags = [ dict(name='div', attrs={'class': ['vhb-article-container']}) ] remove_tags = [ dict(name='span', attrs={'class': ['vhb-colon', 'vhb-label-premium']}), dict(name='aside', attrs={'class': ['vhb-article-element vhb-left', 'vhb-article-element vhb-left vhb-teasergallery', 'vhb-article-element vhb-left vhb-shorttexts']}), dict(name='aside', attrs={'class': re.compile('vhb-club-events')}), dict(name='article', attrs={'class': ['vhb-imagegallery vhb-teaser', 'vhb-teaser vhb-type-video']}), dict(name='small', attrs={'class': ['vhb-credit']}), dict(name='div', attrs={'class': ['white_content', 'fb-post', 'opinary-widget-wrapper', 'vhb-hollow-area vhb-hollow-area--col-1']}), dict(name='div', attrs={'class': re.compile('vhb-imagegallery')}), dict(name='div', attrs={'id': ['highcharts_infografik']}), dict(name='div', attrs={'id': re.compile('dax-sentiment')}), dict(name=['div', 'section'], attrs={'class': re.compile('slider')}), dict(name='a', attrs={'class': ['twitter-follow-button']}), dict(name='img', attrs={'alt': re.compile('Kolumnenkabinet')}), dict(name=['link', 'blockquote']) ] preprocess_regexps = [ # Insert ". " after "Place" in Place (re.compile(r'([^<]+)()', re.DOTALL | re.IGNORECASE), lambda match: match.group(1) + '. ' + match.group(2)), # Insert ": " after "Title" in Title (re.compile(r'([^<]+)()', re.DOTALL | re.IGNORECASE), lambda match: match.group(1) + ': ' + match.group(2)) ] extra_css = 'h2 {font-size: 1em; text-align: left} \ h3 {font-size: 1em; text-align: left} \ h4 {font-size: 1em; text-align: left; margin-bottom: 0em} \ em {font-style: normal; font-weight: bold} \ .vhb-subline {font-weight: normal; text-transform: uppercase} \ .vhb-headline {font-size: 1.6em} \ .vhb-teaser-head {margin-top: 1em; margin-bottom: 1em} \ .vhb-caption-wrapper {font-size: 0.6em} \ .hcf-location-mark {font-weight: bold} \ .panel-body p {margin-top: 0em}' def get_browser(self): br = BasicNewsRecipe.get_browser(self) if self.username is not None and self.password is not None: br.open( 'https://profil.vhb.de/sso/login?service=http://www.handelsblatt.com') br.select_form(nr=0) br['username'] = self.username br['password'] = self.password br.submit() return br def get_cover_url(self): soup = self.index_to_soup(self.cover_source) style = soup.find('img', alt='Handelsblatt ePaper', style=True)['style'] self.cover_url = style.partition('(')[-1].rpartition(')')[0] return self.cover_url def print_version(self, url): main, sep, id = url.rpartition('/') return main + '/v_detail_tab_print/' + id def preprocess_html(self, soup): # remove all articles without relevant content (e.g., videos) article_container = soup.find( 'div', {'class': 'vhb-article-container'}) if article_container is None: self.abort_article() return soup def postprocess_html(self, soup, first_fetch): # convert lists of author(s) and date(s) into simple text for cap in soup.findAll('div', {'class': re.compile('vhb-article-caption')}): cap.replaceWith(cap.renderContents().decode('utf-8').strip() + ' ') for row in soup.findAll('div', {'class': 'vhb-article-author-row'}): for ul in row.findAll('ul'): entry = '' for li in ul.findAll(lambda tag: tag.name == 'li' and not tag.attrs): entry = entry + self.tag_to_string(li).strip() + ', ' for li in ul.findAll(lambda tag: tag.name == 'li' and tag.attrs): entry = entry + self.tag_to_string(li) + '
' ul.parent.replaceWith(entry) # remove all local hyperlinks for a in soup.findAll('a', {'href': True}): if a['href'] and a['href'][0] in ['/', '#']: a.replaceWith(a.renderContents().decode('utf-8')) # make sure that all figure captions (including the source) are shown # without linebreaks by using the alternative text given within # instead of the original text (which is oddly formatted) for fig in soup.findAll('figcaption', {'class': 'vhb-inline-picture'}): cap = fig.find('img')['alt'] fig.find('div', {'class': 'vhb-caption'}).replaceWith(cap) # clean up remainders of embedded content for div in soup.findAll('div', {'style': True}): if len(div.attrs) == 1: del div['style'] return soup