#!/usr/bin/env python # vim:fileencoding=utf-8 # License: GPLv3 Copyright: 2016, Aimylios from __future__ import absolute_import, division, print_function, unicode_literals ''' handelsblatt.com ''' import re from calibre.web.feeds.news import BasicNewsRecipe class Handelsblatt(BasicNewsRecipe): title = 'Handelsblatt' __author__ = 'Aimylios' # based on the work of malfi and Hegi description = 'RSS-Feeds von Handelsblatt.com' publisher = 'Verlagsgruppe Handelsblatt GmbH' publication_type = 'newspaper' needs_subscription = 'optional' language = 'de' oldest_article = 2 max_articles_per_feed = 15 simultaneous_downloads = 10 no_stylesheets = True remove_javascript = True remove_empty_feeds = True ignore_duplicate_articles = {'title', 'url'} conversion_options = {'smarten_punctuation': True, 'publisher': publisher} # uncomment this to reduce file size # compress_news_images = True # compress_news_images_max_size = 16 login_url = 'https://id.handelsblatt.com/login?service=https://www.handelsblatt.com' cover_source = 'https://www.ikiosk.de/shop/epaper/handelsblatt.html' masthead_url = 'https://www.handelsblatt.com/images/logo_handelsblatt/11002806/8-formatOriginal.png' feeds = [ ('Top-Themen', 'http://www.handelsblatt.com/contentexport/feed/top-themen'), ('Politik', 'http://www.handelsblatt.com/contentexport/feed/politik'), ('Unternehmen', 'http://www.handelsblatt.com/contentexport/feed/unternehmen'), ('Finanzen', 'http://www.handelsblatt.com/contentexport/feed/finanzen'), ('Technologie', 'http://www.handelsblatt.com/contentexport/feed/technologie'), ('Panorama', 'http://www.handelsblatt.com/contentexport/feed/panorama') ] keep_only_tags = [ dict(name='div', attrs={'class': ['vhb-article-container']}) ] remove_tags = [ dict(name='span', attrs={'class': ['vhb-colon', 'vhb-label-premium']}), dict(name='aside', attrs={'class': ['vhb-article-element vhb-left', 'vhb-article-element vhb-left vhb-teasergallery', 'vhb-article-element vhb-left vhb-shorttexts']}), dict(name='aside', attrs={'class': re.compile(r'vhb-club-events')}), dict(name='article', attrs={'class': ['vhb-imagegallery vhb-teaser', 'vhb-teaser vhb-type-video']}), dict(name='small', attrs={'class': ['vhb-credit']}), dict(name='div', attrs={'class': ['dg_health', 'fb-post', 'header-bar', 'lb_consent--placeholder', 'lb-item embed', 'lb-post-actions', 'mod--displaynone', 'white_content', 'opinary-widget-wrapper', 'vhb-article__content-element--shorttextgallery', 'vhb-hollow-area vhb-hollow-area--col-1']}), dict(name='div', attrs={'class': re.compile(r'stepstone')}), dict(name='div', attrs={'class': re.compile(r'vhb-imagegallery')}), dict(name='div', attrs={'id': ['highcharts_infografik']}), dict(name='div', attrs={'id': re.compile(r'dax-sentiment')}), dict(name=['div', 'section'], attrs={'class': re.compile(r'slider')}), dict(name='a', attrs={'class': ['twitter-follow-button']}), dict(name='img', attrs={'class': ['highlight-icon', 'lb-author__avatar', 'pin-icon']}), dict(name='img', attrs={'alt': re.compile(r'Handelsblatt Morning Briefing')}), dict(name=['blockquote', 'button', 'link']) ] preprocess_regexps = [ # insert ". " after the location at the beginning of an article (re.compile(r'([^<]+)()', re.IGNORECASE), lambda match: match.group(1) + '. ' + match.group(2)), # insert ": " between title and text of captions (re.compile(r'([^<]+)()', re.IGNORECASE), lambda match: match.group(1) + ': ' + match.group(2)), # convert "data-src" to "src" attributes (re.compile(r'( data-src=")([^"]*")', re.IGNORECASE), lambda match: ' src="' + match.group(2)) ] extra_css = '''h2 {font-size: 1em; text-align: left} h3 {font-size: 1em; text-align: left} h4 {font-size: 1em; text-align: left; margin-bottom: 0em} em {font-style: normal; font-weight: bold} .vhb-subline {font-weight: normal; text-transform: uppercase} .vhb-headline {font-size: 1.6em} .vhb-teaser-head {margin-top: 1em; margin-bottom: 1em} .vhb-hollow-area--innercontent {font-size: 0.6em} .hcf-location-mark {font-weight: bold} .lb-post-header {margin-top: 1em} .panel-body p {margin-top: 0em}''' def get_browser(self): br = BasicNewsRecipe.get_browser(self) if self.username is not None and self.password is not None: try: br.open(self.login_url) br.select_form(nr=0) br['login'] = self.username br['password'] = self.password br.submit() except Exception as e: self.log('Login failed with error:', str(e)) return br def get_cover_url(self): cover_url = '' soup = self.index_to_soup(self.cover_source) img = soup.find('img', {'alt': 'Handelsblatt - ePaper;', 'src': True}) if img and img['src']: cover_url = img['src'] return cover_url def print_version(self, url): main, sep, name = url.rpartition('/') return main + '/v_detail_tab_print/' + name def preprocess_html(self, soup): # remove all articles without relevant content (e.g., videos) article = soup.find('div', {'class': 'vhb-article-container'}) if article is None: self.abort_article() return soup def postprocess_html(self, soup, first_fetch): # convert lists of author(s) and date(s) into simple text for cap in soup.find_all('div', {'class': re.compile(r'vhb-article-caption')}): cap.replace_with(cap.encode_contents().decode('utf-8').strip() + ' ') for row in soup.find_all('div', {'class': 'vhb-article-author-row'}): for ul in row.find_all('ul'): entry = '' for li in ul.find_all(lambda tag: tag.name == 'li' and not tag.attrs): entry += self.tag_to_string(li).strip() + ', ' for li in ul.find_all(lambda tag: tag.name == 'li' and tag.attrs): entry += self.tag_to_string(li) ul.parent.replace_with(entry) # remove all local hyperlinks for a in soup.find_all('a', {'href': True}): if '.handelsblatt.com/' in a['href']: a.replace_with(a.encode_contents().decode('utf-8')) # make sure that all figure captions (including the source) are shown # without linebreaks by using the alternative text given within # instead of the original text (which is oddly formatted) for fig in soup.find_all('figcaption', {'class': 'vhb-inline-picture'}): cap = fig.find('img')['alt'] fig.find('div', {'class': 'vhb-caption'}).replace_with(cap) # remove references to related articles for strong in soup.find_all('strong'): if strong.string and (re.match(r'^Mehr:? ?', strong.string) or re.match(r'^>>.*', strong.string)): p_parent = strong.find_parent('p') if p_parent: p_parent.decompose() # clean up remainders of embedded content for div in soup.find_all('div', {'style': True}): if len(div.attrs) == 1: del div['style'] return soup