mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
59 lines
2.1 KiB
Python
59 lines
2.1 KiB
Python
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
|
|
|
|
|
class DeutscheWelle(BasicNewsRecipe):
|
|
title = 'Deutsche Welle'
|
|
description = 'Nachrichten der Deutschen Welle (DW)'
|
|
publisher = 'DW - info@dw.com'
|
|
language = 'de'
|
|
__author__ = 'unkn0wn'
|
|
oldest_article = 2
|
|
max_articles_per_feed = 200
|
|
no_stylesheets = True
|
|
remove_javascript = True
|
|
remove_empty_feeds = True
|
|
ignore_duplicate_articles = {'title', 'url'}
|
|
remove_attributes = ['height', 'width', 'style']
|
|
|
|
keep_only_tags = [
|
|
dict(name='article')
|
|
]
|
|
|
|
recipe_specific_options = {
|
|
'days': {
|
|
'short': 'Oldest article to download from this news source. In days ',
|
|
'long': 'For example, 0.5, gives you articles from the past 12 hours',
|
|
'default': str(oldest_article)
|
|
}
|
|
}
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
|
d = self.recipe_specific_options.get('days')
|
|
if d and isinstance(d, str):
|
|
self.oldest_article = float(d)
|
|
|
|
remove_tags = [
|
|
dict(name=['footer', 'source']),
|
|
dict(attrs={'data-tracking-name':'sharing-icons-inline'}),
|
|
classes('kicker advertisement vjs-wrapper')
|
|
]
|
|
|
|
# watch out https://www.dw.com/de/service/rss/s-9773 for description of possible rss feeds
|
|
feeds = [
|
|
('Nachrichten', 'http://rss.dw.com/xml/rss-de-news'),
|
|
('Wissenschaft', 'http://rss.dw.com/xml/rss-de-wissenschaft'),
|
|
('Sport', 'http://rss.dw.com/xml/rss-de-sport'),
|
|
('Deuschland entdecken', 'http://rss.dw.com/xml/rss-de-deutschlandentdecken'),
|
|
('Presse', 'http://rss.dw.com/xml/presse'),
|
|
('Politik', 'http://rss.dw.com/xml/rss_de_politik'),
|
|
('Wirtschaft', 'http://rss.dw.com/xml/rss-de-eco'),
|
|
('Kultur und Leben', 'http://rss.dw.com/xml/rss-de-cul'),
|
|
('Thema des Tages', 'http://rss.dw.com/xml/rss-de-top'),
|
|
]
|
|
|
|
def preprocess_html(self, soup):
|
|
for img in soup.findAll('img', srcset=True):
|
|
img['src'] = img['srcset'].split()[6]
|
|
return soup
|