Update Wolne Media

Remove various discontinued news sources.

Merge branch 'master' of https://github.com/t3d/calibre
This commit is contained in:
Kovid Goyal 2018-10-06 06:51:59 +05:30
commit de1b0d1cb6
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
10 changed files with 3 additions and 267 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 498 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 625 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 625 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 204 B

View File

@ -1,31 +0,0 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from calibre.web.feeds.news import BasicNewsRecipe
class TVPINFO(BasicNewsRecipe):
title = u'TVP.INFO'
__author__ = 'fenuks'
description = u'Serwis informacyjny TVP.INFO'
category = 'news'
language = 'pl'
cover_url = 'http://s.v3.tvp.pl/files/tvp-info/gfx/logo.png'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
remove_javascript = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
keep_only_tags = [dict(id='contentNews')]
remove_tags = [dict(attrs={'class': [
'toolbox', 'modulBox read', 'modulBox social', 'videoPlayerBox']}), dict(id='belka')]
feeds = [
(u'Wiadomo\u015bci', u'http://tvp.info/informacje?xslt=tvp-info/news/rss.xslt&src_id=191865'),
(u'\u015awiat', u'http://tvp.info/informacje/swiat?xslt=tvp-info/news/rss.xslt&src_id=191867'),
(u'Biznes', u'http://tvp.info/informacje/biznes?xslt=tvp-info/news/rss.xslt&src_id=191868'),
(u'Nauka', u'http://tvp.info/informacje/nauka?xslt=tvp-info/news/rss.xslt&src_id=191870'),
(u'Kultura', u'http://tvp.info/informacje/kultura?xslt=tvp-info/news/rss.xslt&src_id=191869'),
(u'Rozmaito\u015bci', u'http://tvp.info/informacje/rozmaitosci?xslt=tvp-info/news/rss.xslt&src_id=191872'),
(u'Opinie', u'http://tvp.info/opinie?xslt=tvp-info/news/rss.xslt&src_id=191875'),
(u'Komentarze', u'http://tvp.info/opinie/komentarze?xslt=tvp-info/news/rss.xslt&src_id=238200'),
(u'Wywiady', u'http://tvp.info/opinie/wywiady?xslt=tvp-info/news/rss.xslt&src_id=236644')]

View File

@ -1,25 +0,0 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class UbuntuPomoc(BasicNewsRecipe):
title = u'Ubuntu-pomoc.org'
__author__ = 'fenuks'
description = u'Strona poświęcona systemowi Ubuntu Linux. Znajdziesz tutaj przydatne i sprawdzone poradniki oraz sposoby rozwiązywania wielu popularnych problemów. Ten blog rozwiąże każdy Twój problem - jeśli nie teraz, to wkrótce! :)' # noqa
category = 'Linux, Ubuntu, open source'
language = 'pl'
cover_url = 'http://www.ubuntu-pomoc.org/grafika/ubuntupomoc.png'
preprocess_regexps = [(re.compile(
r'<div class="ciekawostka">.+', re.IGNORECASE | re.DOTALL), lambda m: '')]
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_javascript = True
remove_empty_feeds = True
use_embedded_content = False
remove_attrs = ['style']
keep_only_tags = [dict(name='article')]
remove_tags = [dict(
attrs={'class': ['yarpp-related', 'youtube_sc', 'share']}), dict(name='footer')]
feeds = [(u'Ca\u0142o\u015b\u0107', u'http://feeds.feedburner.com/Ubuntu-Pomoc'),
]

View File

@ -23,6 +23,9 @@ class wolne_media(BasicNewsRecipe):
remove_javascript = True
no_stylesheets = True
auto_cleanup = True
ignore_duplicate_articles = {'url'}
remove_tags = [dict(name='p', attrs={'class': 'tags'})]
feeds = [
(u'Wiadomości z kraju', u'http://wolnemedia.net/category/wiadomosci-z-kraju/feed/'),

View File

@ -1,100 +0,0 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
__copyright__ = '''2010, matek09, matek09@gmail.com
Modified 2011, Mariusz Wolek <mariusz_dot_wolek @ gmail dot com>
Modified 2012, Artur Stachecki <artur.stachecki@gmail.com>'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Wprost(BasicNewsRecipe):
EDITION = 0
FIND_LAST_FULL_ISSUE = True
EXCLUDE_LOCKED = True
ICO_BLOCKED = 'http://www.wprost.pl/G/layout2/ico_blocked.png'
title = u'Wprost'
__author__ = 'matek09'
description = u'Popularny tygodnik ogólnopolski - Wprost. Najlepszy wśród polskich tygodników - opiniotwórczy - społeczno-informacyjny - społeczno-kulturalny.' # noqa
encoding = 'ISO-8859-2'
no_stylesheets = True
language = 'pl'
remove_javascript = True
recursions = 0
remove_tags_before = dict(dict(name='div', attrs={'id': 'print-layer'}))
remove_tags_after = dict(dict(name='div', attrs={'id': 'print-layer'}))
'''
keep_only_tags =[]
keep_only_tags.append(dict(name = 'table', attrs = {'id' : 'title-table'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'div-header'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'div-content'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'def element-autor'}))
'''
preprocess_regexps = [(re.compile(r'style="display: none;"'), lambda match: ''),
(re.compile(r'display: block;'), lambda match: ''),
(re.compile(r'\<td\>\<tr\>\<\/table\>'), lambda match: ''),
(re.compile(r'\<table .*?\>'), lambda match: ''),
(re.compile(r'\<tr>'), lambda match: ''),
(re.compile(r'\<td .*?\>'), lambda match: ''),
(re.compile(r'\<div id="footer"\>.*?\</footer\>'), lambda match: '')]
remove_tags = []
remove_tags.append(dict(name='div', attrs={'class': 'def element-date'}))
remove_tags.append(dict(name='div', attrs={'class': 'def silver'}))
remove_tags.append(
dict(name='div', attrs={'id': 'content-main-column-right'}))
extra_css = '''.div-header {font-size: x-small; font-weight: bold}'''
# h2 {font-size: x-large; font-weight: bold}
def is_blocked(self, a):
if a.findNextSibling('img') is None:
return False
else:
return True
def find_last_issue(self):
soup = self.index_to_soup('http://www.wprost.pl/archiwum/')
a = 0
if self.FIND_LAST_FULL_ISSUE:
ico_blocked = soup.findAll('img', attrs={'src': self.ICO_BLOCKED})
a = ico_blocked[-1].findNext(
'a', attrs={'title': re.compile(r'Spis *', re.IGNORECASE | re.DOTALL)})
else:
a = soup.find('a', attrs={'title': re.compile(
r'Spis *', re.IGNORECASE | re.DOTALL)})
self.EDITION = a['href'].replace('/tygodnik/?I=', '')
self.EDITION_SHORT = a['href'].replace('/tygodnik/?I=15', '')
self.cover_url = a.img['src']
def parse_index(self):
self.find_last_issue()
soup = self.index_to_soup(
'http://www.wprost.pl/tygodnik/?I=' + self.EDITION)
feeds = []
headers = soup.findAll(
attrs={'class': 'block-header block-header-left mtop20 mbottom20'})
articles_list = soup.findAll(attrs={'class': 'standard-box'})
for i in range(len(headers)):
articles = self.find_articles(articles_list[i])
if len(articles) > 0:
section = headers[i].find('a').string
feeds.append((section, articles))
return feeds
def find_articles(self, main_block):
articles = []
for a in main_block.findAll('a'):
if a.name in "td":
break
if self.EXCLUDE_LOCKED and self.is_blocked(a):
continue
articles.append({
'title': self.tag_to_string(a),
'url': 'http://www.wprost.pl' + a['href'],
'date': '',
'description': ''
})
return articles

View File

@ -1,51 +0,0 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
__copyright__ = '''2010, matek09, matek09@gmail.com
Modified 2011, Mariusz Wolek <mariusz_dot_wolek @ gmail dot com>
Modified 2012, Artur Stachecki <artur.stachecki@gmail.com>'''
from calibre.web.feeds.news import BasicNewsRecipe
class Wprost(BasicNewsRecipe):
title = u'Wprost (RSS)'
__author__ = 'matek09'
description = u'Portal informacyjny. Najświeższe wiadomości, najciekawsze komentarze i opinie. Blogi najlepszych publicystów.'
encoding = 'ISO-8859-2'
no_stylesheets = True
language = 'pl'
remove_javascript = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
remove_empty_feeds = True
keep_only_tags = [dict(attrs={'class': 'art-area'})]
remove_tags = [dict(attrs={'class': 'add300x250'})]
extra_css = '''.div-header {font-size: x-small; font-weight: bold}'''
# h2 {font-size: x-large; font-weight: bold}
feeds = [(u'Tylko u nas', u'http://www.wprost.pl/rss/rss_wprostextra.php'),
(u'Wydarzenia', u'http://www.wprost.pl/rss/rss.php'),
(u'Komentarze', u'http://www.wprost.pl/rss/rss_komentarze.php'),
(u'Wydarzenia: Kraj', u'http://www.wprost.pl/rss/rss_kraj.php'),
(u'Komentarze: Kraj', u'http://www.wprost.pl/rss/rss_komentarze_kraj.php'),
(u'Wydarzenia: Świat', u'http://www.wprost.pl/rss/rss_swiat.php'),
(u'Komentarze: Świat', u'http://www.wprost.pl/rss/rss_komentarze_swiat.php'),
(u'Wydarzenia: Gospodarka',
u'http://www.wprost.pl/rss/rss_gospodarka.php'),
(u'Komentarze: Gospodarka',
u'http://www.wprost.pl/rss/rss_komentarze_gospodarka.php'),
(u'Wydarzenia: Życie', u'http://www.wprost.pl/rss/rss_zycie.php'),
(u'Komentarze: Życie', u'http://www.wprost.pl/rss/rss_komentarze_zycie.php'),
(u'Wydarzenia: Sport', u'http://www.wprost.pl/rss/rss_sport.php'),
(u'Komentarze: Sport', u'http://www.wprost.pl/rss/rss_komentarze_sport.php'),
(u'Przegląd prasy', u'http://www.wprost.pl/rss/rss_prasa.php')
]
def get_cover_url(self):
soup = self.index_to_soup('http://www.wprost.pl/tygodnik')
cover = soup.find(attrs={'class': 'wprost-cover'})
if cover:
self.cover_url = cover['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -1,60 +0,0 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
from calibre.web.feeds.news import BasicNewsRecipe
class WysokieObcasyRecipe(BasicNewsRecipe):
__author__ = u'Artur Stachecki <artur.stachecki@gmail.com>'
language = 'pl'
version = 1
title = u'Wysokie Obcasy'
publisher = 'Agora SA'
description = u'Serwis sobotniego dodatku do Gazety Wyborczej'
category = 'magazine'
language = 'pl'
publication_type = 'magazine'
cover_url = ''
remove_empty_feeds = True
no_stylesheets = True
oldest_article = 7
max_articles_per_feed = 100000
recursions = 0
no_stylesheets = True
remove_javascript = True
simultaneous_downloads = 5
keep_only_tags = []
keep_only_tags.append(dict(name='div', attrs={'id': 'article'}))
remove_tags = []
remove_tags.append(dict(name='img'))
remove_tags.append(dict(name='p', attrs={'class': 'info'}))
extra_css = '''
body {font-family: verdana, arial, helvetica, geneva, sans-serif ;}
h1{text-align: left;}
'''
feeds = [
('Wszystkie Artykuly', 'feed://www.wysokieobcasy.pl/pub/rss/wysokieobcasy.xml'),
]
def print_version(self, url):
baseURL = 'http://www.wysokieobcasy.pl/wysokie-obcasy'
segments = url.split(',')
subPath = '/2029020,'
articleURL1 = segments[1]
articleURL2 = segments[2]
printVerString = articleURL1 + ',' + articleURL2
s = baseURL + subPath + printVerString + '.html'
return s
def get_cover_url(self):
soup = self.index_to_soup(
'http://www.wysokieobcasy.pl/wysokie-obcasy/0,0.html')
self.cover_url = soup.find(
attrs={'class': 'holder_cr'}).find('img')['src']
return getattr(self, 'cover_url', self.cover_url)