diff --git a/recipes/handelsblatt.recipe b/recipes/handelsblatt.recipe
index 89555271cd..1be19d89b5 100644
--- a/recipes/handelsblatt.recipe
+++ b/recipes/handelsblatt.recipe
@@ -1,88 +1,111 @@
+#!/usr/bin/env python2
+
+__license__ = 'GPL v3'
+__copyright__ = '2016, Aimylios'
+
+'''
+handelsblatt.com
+'''
+
import re
from calibre.web.feeds.news import BasicNewsRecipe
class Handelsblatt(BasicNewsRecipe):
- title = u'Handelsblatt'
- __author__ = 'malfi' # modified by Hegi, last change 2013-05-20
- description = u'Handelsblatt - basierend auf den RRS-Feeds von Handelsblatt.de'
- tags = 'Nachrichten, Blog, Wirtschaft'
- publisher = 'Verlagsgruppe Handelsblatt GmbH'
- category = 'business, economy, news, Germany'
- publication_type = 'daily newspaper'
- language = 'de_DE'
- oldest_article = 7
- max_articles_per_feed = 100
- simultaneous_downloads= 20
+ title = u'Handelsblatt'
+ __author__ = 'Aimylios' # based on the work of malfi and Hegi
+ description = u'RSS-Feeds von Handelsblatt.com'
+ publisher = 'Verlagsgruppe Handelsblatt GmbH'
+ category = 'news, politics, business, economy, Germany'
+ publication_type = 'newspaper'
+ language = 'de'
- auto_cleanup = False
- no_stylesheets = True
- remove_javascript = True
- remove_empty_feeds = True
-
- # don't duplicate articles from "Schlagzeilen" / "Exklusiv" to other rubrics
+ encoding = 'utf8'
+ oldest_article = 4
+ max_articles_per_feed = 30
+ simultaneous_downloads = 20
+ no_stylesheets = True
+ remove_javascript = True
+ remove_empty_feeds = True
ignore_duplicate_articles = {'title', 'url'}
- # if you want to reduce size for an b/w or E-ink device, uncomment this:
- # compress_news_images = True
- # compress_news_images_auto_size = 16
- # scale_news_images = (400,300)
+ conversion_options = {'smarten_punctuation' : True,
+ 'publisher' : publisher}
- timefmt = ' [%a, %d %b %Y]'
+ # uncomment this to reduce file size
+ # compress_news_images = True
- conversion_options = {'smarten_punctuation' : True,
- 'authors' : publisher,
- 'publisher' : publisher}
- language = 'de_DE'
- encoding = 'UTF-8'
+ cover_source = 'https://kaufhaus.handelsblatt.com/downloads/handelsblatt-epaper-p1951.html'
+ masthead_url = 'http://www.handelsblatt.com/images/logo_handelsblatt/11002806/7-formatOriginal.png'
- cover_source = 'http://www.handelsblatt-shop.com/epaper/482/'
- # masthead_url = 'http://www.handelsblatt.com/images/hb_logo/6543086/1-format3.jpg'
- masthead_url = 'http://www.handelsblatt-chemie.de/wp-content/uploads/2012/01/hb-logo.gif'
+ feeds = [
+ (u'Top-Themen', u'http://www.handelsblatt.com/contentexport/feed/top-themen'),
+ (u'Politik', u'http://www.handelsblatt.com/contentexport/feed/politik'),
+ (u'Unternehmen', u'http://www.handelsblatt.com/contentexport/feed/unternehmen'),
+ (u'Finanzen', u'http://www.handelsblatt.com/contentexport/feed/finanzen'),
+ (u'Technologie', u'http://www.handelsblatt.com/contentexport/feed/technologie'),
+ (u'Panorama', u'http://www.handelsblatt.com/contentexport/feed/panorama'),
+ (u'Sport', u'http://www.handelsblatt.com/contentexport/feed/sport')
+ ]
- def get_cover_url(self):
- cover_source_soup = self.index_to_soup(self.cover_source)
- preview_image_div = cover_source_soup.find(attrs={'class':'vorschau'})
- return 'http://www.handelsblatt-shop.com'+preview_image_div.a.img['src']
-
- # remove_tags_before = dict(attrs={'class':'hcf-overline'})
- # remove_tags_after = dict(attrs={'class':'hcf-footer'})
- # Alternatively use this:
-
- keep_only_tags = [
- dict(name='div', attrs={'class':['hcf-column hcf-column1 hcf-teasercontainer hcf-maincol']}),
- dict(name='div', attrs={'id':['contentMain']})
- ]
+ keep_only_tags = [dict(name='div', attrs={'class':['vhb-article-container']})]
remove_tags = [
- dict(name='div', attrs={'class':['hcf-link-block hcf-faq-open', 'hcf-article-related']})
+ dict(name='span', attrs={'class':['vhb-media', 'vhb-colon']}),
+ dict(name='small', attrs={'class':['vhb-credit']}),
+ dict(name='aside', attrs={'class':['vhb-article-element vhb-left',
+ 'vhb-article-element vhb-left vhb-teasergallery',
+ 'vhb-article-element vhb-left vhb-shorttexts']}),
+ dict(name='article', attrs={'class':['vhb-imagegallery vhb-teaser', 'vhb-teaser vhb-type-video']}),
+ dict(name='div', attrs={'class':['fb-post']}),
+ dict(name='blockquote', attrs={'class':['twitter-tweet']}),
+ dict(name='a', attrs={'class':['twitter-follow-button']})
]
- feeds = [
- (u'Handelsblatt Exklusiv',u'http://www.handelsblatt.com/rss/exklusiv'),
- (u'Handelsblatt Top-Themen',u'http://www.handelsblatt.com/rss/top-themen'),
- (u'Handelsblatt Schlagzeilen',u'http://www.handelsblatt.com/rss/ticker/'),
- (u'Handelsblatt Finanzen',u'http://www.handelsblatt.com/rss/finanzen/'),
- (u'Handelsblatt Unternehmen',u'http://www.handelsblatt.com/rss/unternehmen/'),
- (u'Handelsblatt Politik',u'http://www.handelsblatt.com/rss/politik/'),
- (u'Handelsblatt Technologie',u'http://www.handelsblatt.com/rss/technologie/'),
- (u'Handelsblatt Meinung',u'http://www.handelsblatt.com/rss/meinung'),
- (u'Handelsblatt Magazin',u'http://www.handelsblatt.com/rss/magazin/'),
- (u'Handelsblatt Weblogs',u'http://www.handelsblatt.com/rss/blogs')
- ]
+ preprocess_regexps = [
+ # Insert ". " after "Place" in Place
+ (re.compile(r'([^<]+)()',
+ re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + '. ' + match.group(2)),
+ # Insert ": " after "Title" in Title
+ (re.compile(r'([^<]+)()',
+ re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + ': ' + match.group(2))
+ ]
- # Insert ". " after "Place" in Place
- # If you use .epub format you could also do this as extra_css '.hcf-location-mark:after {content: ". "}'
- preprocess_regexps = [(re.compile(r'([^<]*)()',
- re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + '. ' + match.group(2))]
+ extra_css = 'h2 {text-align: left} \
+ h3 {font-size: 1em; text-align: left} \
+ h4 {font-size: 1em; text-align: left; margin-bottom: 0em} \
+ em {font-style: normal; font-weight: bold} \
+ .vhb-subline {font-size: 0.6em; text-transform: uppercase} \
+ .vhb-article-caption {float: left; padding-right: 0.2em} \
+ .vhb-article-author-cell ul {list-style-type: none; margin: 0em} \
+ .vhb-teaser-head {margin-top: 1em; margin-bottom: 1em} \
+ .vhb-caption-wrapper {font-size: 0.6em} \
+ .hcf-location-mark {font-weight: bold} \
+ .panel-link {color: black; text-decoration: none} \
+ .panel-body p {margin-top: 0em}'
- extra_css = 'h1 {font-size: 1.6em; text-align: left} \
- h2 {font-size: 1em; font-style: italic; font-weight: normal} \
- h3 {font-size: 1.3em;text-align: left} \
- h4, h5, h6, a {font-size: 1em;text-align: left} \
- .hcf-caption {font-size: 1em;text-align: left; font-style: italic} \
- .hcf-location-mark {font-style: italic}'
+ def get_cover_url(self):
+ soup = self.index_to_soup(self.cover_source)
+ style = soup.find('img', alt='Handelsblatt ePaper', style=True)['style']
+ self.cover_url = style.partition('(')[-1].rpartition(')')[0]
+ return self.cover_url
def print_version(self, url):
main, sep, id = url.rpartition('/')
return main + '/v_detail_tab_print/' + id
+ def preprocess_html(self, soup):
+ # remove all articles without relevant content (e.g., videos)
+ article_container = soup.find('div', {'class':'vhb-article-container'})
+ if article_container is None:
+ self.abort_article()
+ else:
+ return soup
+
+ def postprocess_html(self, soup, first_fetch):
+ # make sure that all figure captions (including the source) are shown
+ # without linebreaks by using the alternative text given within
+ # instead of the original text (which is oddly formatted)
+ article_figures = soup.findAll('figure', {'class':'vhb-image'})
+ for fig in article_figures:
+ fig.find('div', {'class':'vhb-caption'}).replaceWith(fig.find('img')['alt'])
+ return soup