calibre/recipes/spektrum.recipe
2025-02-01 09:23:29 +05:30

103 lines
3.2 KiB
Python

#!/usr/bin/env python
# vim:fileencoding=utf-8
##
## Written: October 2012 (new coding)
## Version: 10.0
## Last update: 2025-01-15
##
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = ''
'''
Fetch RSS-Feeds spektrum.de
'''
def classes(classes):
q = frozenset(classes.split(' '))
return dict(
attrs={'class': lambda x: x and frozenset(x.split()).intersection(q)}
)
from calibre.web.feeds.recipes import BasicNewsRecipe
class Spektrum(BasicNewsRecipe):
title = u'Spektrum der Wissenschaft'
__author__ = 'Armin Geller, Bratzzo, Rainer Zenz, update epubli'
description = u'German online portal of Spektrum der Wissenschaft'
publisher = 'Spektrum der Wissenschaft Verlagsgesellschaft mbH'
category = 'science news, Germany'
oldest_article = 3
max_articles_per_feed = 100
no_stylesheets = True
remove_javascript = True
remove_empty_feeds = True
language = 'de'
encoding = 'utf8'
ignore_duplicate_articles = {'title'}
scale_news_images_to_device = True
compress_news_images = True
cover_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/1d/Spektrum_der_Wissenschaft_Logo_seit_2016.svg/640px-Spektrum_der_Wissenschaft_Logo_seit_2016.svg.png'
masthead_url = 'http://www.spektrum.de/fm/861/spektrum.de.png'
feeds = [
(u'Spektrum.de', u'http://www.spektrum.de/alias/rss/spektrum-de-rss-feed/996406'),
]
keep_only_tags = [
dict(name='article', attrs={'class':'content'}),classes('callout-box')
]
remove_tags = [
classes('hide-for-print'),
classes('content__meta'),
classes('content__author'),
classes('content__video'),
dict(name='div', attrs={'role': 'navigation'}),
dict(name='span', attrs={'class': 'sr-only'}),
]
def parse_feeds(self):
unwanted_article_types = [
'podcast',
'video',
'raetsel',
'leseprobe',
# 'kolumne',
# 'rezension',
# 'news',
]
# Call parent's method.
feeds = BasicNewsRecipe.parse_feeds(self)
# Loop through all feeds.
for feed in feeds:
# Loop through all articles in feed.
for article in feed.articles[:]:
if 'VIDEO' in article.title:
feed.articles.remove(article)
continue
# Remove articles with '..' in the url.
for keyword in unwanted_article_types:
if keyword in article.url:
feed.articles.remove(article)
continue
return feeds
def preprocess_html(self, soup):
for noscript in soup.findAll('noscript'):
noscript.name = 'div'
return soup
def preprocess_raw_html(self, raw, url):
# remove articles requiring login and advertisements
unwantedtag = 'content pw-premium'
if unwantedtag in raw:
self.abort_article('Skipping unwanted article with tag:' + unwantedtag)
return raw