mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
130 lines
4.5 KiB
Python
130 lines
4.5 KiB
Python
#!/usr/bin/env python
|
|
# vim:fileencoding=utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
__license__ = 'GPL v3'
|
|
__version__ = '0.2'
|
|
|
|
'''
|
|
brand eins.de
|
|
'''
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
from collections import OrderedDict
|
|
|
|
|
|
class BrandEins(BasicNewsRecipe):
|
|
|
|
title = u'brand eins'
|
|
__author__ = 'Nikolas Mangold-Takao, Thomas Schlenkhoff'
|
|
language = 'de'
|
|
description = u'brand eins beschreibt den momentanen Wandel in Wirtschaft und Gesellschaft.'
|
|
publisher = u'brand eins Verlag GmbH & Co. oHG'
|
|
category = 'politics, business, wirtschaft, Germany'
|
|
|
|
PREFIX = 'http://www.brandeins.de/'
|
|
INDEX = PREFIX + 'archiv/listeansicht.html'
|
|
|
|
use_embedded_content = False
|
|
resolve_internal_links = True
|
|
|
|
no_stylesheets = True
|
|
needs_subscription = False
|
|
|
|
delay = 1
|
|
summary_length = 200
|
|
simultaneous_downloads = 5
|
|
remove_javascript = True
|
|
|
|
keep_only_tags = dict(name='div', attrs={'id': 'content'})
|
|
|
|
# remove share image from articles
|
|
remove_tags = [dict(name='div', attrs={'id': 'oms_gpt_billboard'}),
|
|
dict(name='div', attrs={'id': 'oms_gpt_rectangle'}),
|
|
dict(name='h3', attrs={'class': 'sharing-headline'}),
|
|
dict(name='div', attrs={'class': 'sharing-links'}),
|
|
dict(name='aside', attrs={'class': 'articleAside'})]
|
|
|
|
remove_tags_before = dict(
|
|
name='div', attrs={'class': 'innerContent typeArticle'})
|
|
remove_tags_after = dict(name='div', attrs={'id': 'socialshareprivacy'})
|
|
|
|
extra_css = '''
|
|
body, p {text-align: left;}
|
|
.headline {font-size: x-large;}
|
|
h2 {font-size: medium;}
|
|
h1 {font-size: large;}
|
|
em.Bold {font-weight:bold;font-style:normal;}
|
|
em.Italic {font-style:italic;}
|
|
'''
|
|
|
|
def parse_index(self):
|
|
issue = ""
|
|
|
|
soup = self.index_to_soup(self.INDEX)
|
|
issue_list = soup.findAll('div', attrs={'class': 'details'})
|
|
|
|
issue_map = {}
|
|
i = 0
|
|
for entry in issue_list:
|
|
title = self.tag_to_string(entry.find(
|
|
'h3', attrs={'class': 'like-h1'}))
|
|
issue_string = self.tag_to_string(
|
|
entry.find('span', attrs={'class': 'meta'}))
|
|
year = issue_string[8:]
|
|
month = issue_string[5:-5]
|
|
yyyymm = "{}{}".format(year, month)
|
|
link = entry.findAll('a')[0]
|
|
issue_map[yyyymm] = link.get('href')
|
|
self.log('- ', year, month, title, link.get('href'))
|
|
|
|
# Issue 1 (most recent) has only few articles online,
|
|
# Issue 2 and 3 (2nd and 3rd recent) is not completely online.
|
|
# Issue 4 (4th recent) is completely online, hence i == 3
|
|
|
|
if issue == "" and i == 3:
|
|
issue = yyyymm
|
|
i += 1
|
|
|
|
url = 'http://brandeins.de/' + issue_map[issue]
|
|
self.log('Issue to get: ', issue, title, url)
|
|
self.issue_url = url # save to extract cover
|
|
|
|
return self.parse_issue(url)
|
|
|
|
def parse_issue(self, url):
|
|
soup = self.index_to_soup(url)
|
|
feeds = OrderedDict()
|
|
|
|
for item in soup.findAll(attrs={'class': lambda x: 'ihv_item' in (x or '').split()}):
|
|
a = item.findParent('a', href=True)
|
|
if a is None:
|
|
continue
|
|
url = self.PREFIX + a['href']
|
|
title = self.tag_to_string(item.find(attrs={'class': 'ihv_title'}))
|
|
sec = self.tag_to_string(
|
|
item.find(attrs={'class': 'ihv_page_category'}).findAll('span')[-1])
|
|
if sec not in feeds:
|
|
feeds[sec] = []
|
|
desc = ''
|
|
for p in item.findAll('p'):
|
|
desc += self.tag_to_string(p)
|
|
feeds[sec].append(
|
|
{'title': title, 'url': url, 'description': desc})
|
|
self.log('Found article:', title, 'at', url)
|
|
|
|
return [(st, articles) for st, articles in feeds.items() if articles]
|
|
|
|
def get_cover_url(self):
|
|
# the index does not contain a usable cover, but the 'Welt in
|
|
# Zahlen'-article contains it
|
|
cover_article = "{}{}".format(
|
|
self.issue_url, 'die-welt-in-zahlen.html')
|
|
self.log('Cover article URL: %s' % cover_article)
|
|
soup = self.index_to_soup(cover_article)
|
|
img = soup.find('section', 'asideSection no-content').find('img')
|
|
self.log('Found cover image url: %s' % img['src'])
|
|
return (self.PREFIX + img['src'])
|
|
|
|
def preprocess_raw_html(self, raw_html, url):
|
|
return raw_html.replace('<p>• ', '<p>')
|