mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
125 lines
4.8 KiB
Python
125 lines
4.8 KiB
Python
#!/usr/bin/env python
|
|
# vim:fileencoding=utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
__license__ = 'GPL v3'
|
|
__copyright__ = '2014, Nikolas Mangold-Takao <nmangold at gmail.com>'
|
|
__version__ = '0.10'
|
|
|
|
''' http://brandeins.de - Wirtschaftsmagazin '''
|
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
|
|
|
class BrandEins(BasicNewsRecipe):
|
|
|
|
title = u'brand eins'
|
|
__author__ = 'Nikolas Mangold-Takao'
|
|
description = u'brand eins beschreibt den momentanen Wandel in Wirtschaft und Gesellschaft, den Übergang vom Informations- zum Wissenszeitalter.'
|
|
publisher = u'brand eins Verlag GmbH & Co. oHG'
|
|
category = 'politics, business, wirtschaft, Germany'
|
|
|
|
PREFIX = 'http://www.brandeins.de/'
|
|
INDEX = PREFIX + 'archiv/listeansicht.html'
|
|
|
|
use_embedded_content = False
|
|
lang = 'de-DE'
|
|
no_stylesheets = True
|
|
encoding = 'utf-8'
|
|
language = 'de'
|
|
publication_type = 'magazine'
|
|
needs_subscription = 'optional'
|
|
timefmt = ''
|
|
|
|
keep_only_tags = dict(name='div', attrs={'id':'content'})
|
|
remove_tags_before = dict(name='div', attrs={'class':'innerContent typeArticle'})
|
|
remove_tags_after = dict(name='div', attrs={'id':'socialshareprivacy'})
|
|
|
|
issue_url = ''
|
|
|
|
'''
|
|
brandeins.de
|
|
'''
|
|
|
|
def parse_index(self):
|
|
# Allow username/password information to access a past issue (mis)using username and password fields
|
|
# username = year [yyyy, e.g. 2012]
|
|
# password = month [MM, e.g. 10 for October]
|
|
issue = ""
|
|
if self.username is not None and self.password is not None:
|
|
try:
|
|
issue = "{}{}".format(self.username, self.password) # yyyyMM
|
|
except:
|
|
pass
|
|
|
|
soup = self.index_to_soup(self.INDEX)
|
|
issue_list = soup.findAll('div', attrs={'class': 'details'})
|
|
|
|
issue_map = {}
|
|
i = 0
|
|
for entry in issue_list:
|
|
title = self.tag_to_string(entry.find('h3', attrs={'class': 'like-h1'}))
|
|
issue_string = self.tag_to_string(entry.find('span', attrs={'class': 'meta'}))
|
|
year = issue_string[8:]
|
|
month = issue_string[5:-5]
|
|
yyyymm = "{}{}".format(year, month)
|
|
link = entry.findAll('a')[0]
|
|
issue_map[yyyymm] = link.get('href')
|
|
self.log('- ', year, month, title, link.get('href'))
|
|
|
|
# Issue 1 (most recent) has only few articles online,
|
|
# Issue 2 (2nd recent) is not completely online.
|
|
# Issue 3 (3rd recent) is completely online, hence i == 2
|
|
if issue == "" and i == 2:
|
|
issue = yyyymm
|
|
i+=1
|
|
|
|
self.log('Issue to get: ', issue, title)
|
|
url = 'http://brandeins.de/'+issue_map[issue]
|
|
self.issue_url = url # save to extract cover
|
|
|
|
return self.parse_issue(url)
|
|
|
|
def parse_issue(self, url):
|
|
soup = self.index_to_soup(url)
|
|
index = soup.find('div', attrs={'class': 'ihv_list'})
|
|
|
|
feeds = []
|
|
sections = index.findAll('section')
|
|
|
|
# special treatment for 'editorial'. It is not grouped in <section> and title is not in <h3>
|
|
inhalt_section = index.find('h1', attrs={'class': 'reset'})
|
|
section_ttl = self.tag_to_string(inhalt_section)
|
|
#self.log('+++ Found section', section_ttl)
|
|
editorial_article = inhalt_section.parent.findNextSibling('a')
|
|
ttl = self.tag_to_string(editorial_article.find('h2', attrs={'class': 'ihv_title'}))
|
|
url = self.PREFIX + editorial_article['href']
|
|
#self.log('--- Found article', ttl, url)
|
|
feeds.append((section_ttl, [{'title': ttl, 'url': url}]))
|
|
|
|
#self.log('NUMBER OF SECTIONS', len(sections))
|
|
for section in sections:
|
|
section_ttl = self.tag_to_string(section.find('h3'))
|
|
#self.log('+++ Found section', section_ttl)
|
|
|
|
articles = []
|
|
for article in section.findNextSiblings(['a', 'section']):
|
|
if (article.name == 'section'):
|
|
break
|
|
|
|
ttl = self.tag_to_string(article.find('h2', attrs={'class': 'ihv_title'}))
|
|
url = self.PREFIX + article['href']
|
|
#self.log('--- Found article', ttl, url)
|
|
articles.append({'title' : ttl, 'url' : url})
|
|
feeds.append((section_ttl, articles))
|
|
return feeds
|
|
|
|
def get_cover_url(self):
|
|
# the index does not contain a usable cover, but the "Welt in Zahlen"-article contains it
|
|
cover_article = "{}/{}".format(self.issue_url[:-5], 'die-welt-in-zahlen.html')
|
|
self.log(cover_article)
|
|
soup = self.index_to_soup(cover_article)
|
|
cover_meta = soup.find('meta', attrs={'property':'og:image'})
|
|
if cover_meta:
|
|
return cover_meta['content']
|
|
else:
|
|
self.log('ERROR: Could not return cover url')
|