Update Handelsblatt

This commit is contained in:
Kovid Goyal 2016-04-04 07:01:11 +05:30
parent fb9b1c2885
commit 11a52bb0d7

View File

@ -1,88 +1,111 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
__copyright__ = '2016, Aimylios'
'''
handelsblatt.com
'''
import re import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Handelsblatt(BasicNewsRecipe): class Handelsblatt(BasicNewsRecipe):
title = u'Handelsblatt' title = u'Handelsblatt'
__author__ = 'malfi' # modified by Hegi, last change 2013-05-20 __author__ = 'Aimylios' # based on the work of malfi and Hegi
description = u'Handelsblatt - basierend auf den RRS-Feeds von Handelsblatt.de' description = u'RSS-Feeds von Handelsblatt.com'
tags = 'Nachrichten, Blog, Wirtschaft' publisher = 'Verlagsgruppe Handelsblatt GmbH'
publisher = 'Verlagsgruppe Handelsblatt GmbH' category = 'news, politics, business, economy, Germany'
category = 'business, economy, news, Germany' publication_type = 'newspaper'
publication_type = 'daily newspaper' language = 'de'
language = 'de_DE'
oldest_article = 7
max_articles_per_feed = 100
simultaneous_downloads= 20
auto_cleanup = False encoding = 'utf8'
no_stylesheets = True oldest_article = 4
remove_javascript = True max_articles_per_feed = 30
remove_empty_feeds = True simultaneous_downloads = 20
no_stylesheets = True
# don't duplicate articles from "Schlagzeilen" / "Exklusiv" to other rubrics remove_javascript = True
remove_empty_feeds = True
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
# if you want to reduce size for an b/w or E-ink device, uncomment this: conversion_options = {'smarten_punctuation' : True,
# compress_news_images = True 'publisher' : publisher}
# compress_news_images_auto_size = 16
# scale_news_images = (400,300)
timefmt = ' [%a, %d %b %Y]' # uncomment this to reduce file size
# compress_news_images = True
conversion_options = {'smarten_punctuation' : True, cover_source = 'https://kaufhaus.handelsblatt.com/downloads/handelsblatt-epaper-p1951.html'
'authors' : publisher, masthead_url = 'http://www.handelsblatt.com/images/logo_handelsblatt/11002806/7-formatOriginal.png'
'publisher' : publisher}
language = 'de_DE'
encoding = 'UTF-8'
cover_source = 'http://www.handelsblatt-shop.com/epaper/482/' feeds = [
# masthead_url = 'http://www.handelsblatt.com/images/hb_logo/6543086/1-format3.jpg' (u'Top-Themen', u'http://www.handelsblatt.com/contentexport/feed/top-themen'),
masthead_url = 'http://www.handelsblatt-chemie.de/wp-content/uploads/2012/01/hb-logo.gif' (u'Politik', u'http://www.handelsblatt.com/contentexport/feed/politik'),
(u'Unternehmen', u'http://www.handelsblatt.com/contentexport/feed/unternehmen'),
(u'Finanzen', u'http://www.handelsblatt.com/contentexport/feed/finanzen'),
(u'Technologie', u'http://www.handelsblatt.com/contentexport/feed/technologie'),
(u'Panorama', u'http://www.handelsblatt.com/contentexport/feed/panorama'),
(u'Sport', u'http://www.handelsblatt.com/contentexport/feed/sport')
]
def get_cover_url(self): keep_only_tags = [dict(name='div', attrs={'class':['vhb-article-container']})]
cover_source_soup = self.index_to_soup(self.cover_source)
preview_image_div = cover_source_soup.find(attrs={'class':'vorschau'})
return 'http://www.handelsblatt-shop.com'+preview_image_div.a.img['src']
# remove_tags_before = dict(attrs={'class':'hcf-overline'})
# remove_tags_after = dict(attrs={'class':'hcf-footer'})
# Alternatively use this:
keep_only_tags = [
dict(name='div', attrs={'class':['hcf-column hcf-column1 hcf-teasercontainer hcf-maincol']}),
dict(name='div', attrs={'id':['contentMain']})
]
remove_tags = [ remove_tags = [
dict(name='div', attrs={'class':['hcf-link-block hcf-faq-open', 'hcf-article-related']}) dict(name='span', attrs={'class':['vhb-media', 'vhb-colon']}),
dict(name='small', attrs={'class':['vhb-credit']}),
dict(name='aside', attrs={'class':['vhb-article-element vhb-left',
'vhb-article-element vhb-left vhb-teasergallery',
'vhb-article-element vhb-left vhb-shorttexts']}),
dict(name='article', attrs={'class':['vhb-imagegallery vhb-teaser', 'vhb-teaser vhb-type-video']}),
dict(name='div', attrs={'class':['fb-post']}),
dict(name='blockquote', attrs={'class':['twitter-tweet']}),
dict(name='a', attrs={'class':['twitter-follow-button']})
] ]
feeds = [ preprocess_regexps = [
(u'Handelsblatt Exklusiv',u'http://www.handelsblatt.com/rss/exklusiv'), # Insert ". " after "Place" in <span class="hcf-location-mark">Place</span>
(u'Handelsblatt Top-Themen',u'http://www.handelsblatt.com/rss/top-themen'), (re.compile(r'(<span class="hcf-location-mark">[^<]+)(</span>)',
(u'Handelsblatt Schlagzeilen',u'http://www.handelsblatt.com/rss/ticker/'), re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + '.&nbsp;' + match.group(2)),
(u'Handelsblatt Finanzen',u'http://www.handelsblatt.com/rss/finanzen/'), # Insert ": " after "Title" in <em itemtype="text" itemprop="name" class="vhb-title">Title</em>
(u'Handelsblatt Unternehmen',u'http://www.handelsblatt.com/rss/unternehmen/'), (re.compile(r'(<em itemtype="text" itemprop="name" class="vhb-title">[^<]+)(</em>)',
(u'Handelsblatt Politik',u'http://www.handelsblatt.com/rss/politik/'), re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + ':&nbsp;' + match.group(2))
(u'Handelsblatt Technologie',u'http://www.handelsblatt.com/rss/technologie/'), ]
(u'Handelsblatt Meinung',u'http://www.handelsblatt.com/rss/meinung'),
(u'Handelsblatt Magazin',u'http://www.handelsblatt.com/rss/magazin/'),
(u'Handelsblatt Weblogs',u'http://www.handelsblatt.com/rss/blogs')
]
# Insert ". " after "Place" in <span class="hcf-location-mark">Place</span> extra_css = 'h2 {text-align: left} \
# If you use .epub format you could also do this as extra_css '.hcf-location-mark:after {content: ". "}' h3 {font-size: 1em; text-align: left} \
preprocess_regexps = [(re.compile(r'(<span class="hcf-location-mark">[^<]*)(</span>)', h4 {font-size: 1em; text-align: left; margin-bottom: 0em} \
re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + '. ' + match.group(2))] em {font-style: normal; font-weight: bold} \
.vhb-subline {font-size: 0.6em; text-transform: uppercase} \
.vhb-article-caption {float: left; padding-right: 0.2em} \
.vhb-article-author-cell ul {list-style-type: none; margin: 0em} \
.vhb-teaser-head {margin-top: 1em; margin-bottom: 1em} \
.vhb-caption-wrapper {font-size: 0.6em} \
.hcf-location-mark {font-weight: bold} \
.panel-link {color: black; text-decoration: none} \
.panel-body p {margin-top: 0em}'
extra_css = 'h1 {font-size: 1.6em; text-align: left} \ def get_cover_url(self):
h2 {font-size: 1em; font-style: italic; font-weight: normal} \ soup = self.index_to_soup(self.cover_source)
h3 {font-size: 1.3em;text-align: left} \ style = soup.find('img', alt='Handelsblatt ePaper', style=True)['style']
h4, h5, h6, a {font-size: 1em;text-align: left} \ self.cover_url = style.partition('(')[-1].rpartition(')')[0]
.hcf-caption {font-size: 1em;text-align: left; font-style: italic} \ return self.cover_url
.hcf-location-mark {font-style: italic}'
def print_version(self, url): def print_version(self, url):
main, sep, id = url.rpartition('/') main, sep, id = url.rpartition('/')
return main + '/v_detail_tab_print/' + id return main + '/v_detail_tab_print/' + id
def preprocess_html(self, soup):
# remove all articles without relevant content (e.g., videos)
article_container = soup.find('div', {'class':'vhb-article-container'})
if article_container is None:
self.abort_article()
else:
return soup
def postprocess_html(self, soup, first_fetch):
# make sure that all figure captions (including the source) are shown
# without linebreaks by using the alternative text given within <img/>
# instead of the original text (which is oddly formatted)
article_figures = soup.findAll('figure', {'class':'vhb-image'})
for fig in article_figures:
fig.find('div', {'class':'vhb-caption'}).replaceWith(fig.find('img')['alt'])
return soup