remove old broken recipes

This commit is contained in:
Tomasz Długosz 2018-10-05 23:46:14 +02:00
parent 79d7658ea3
commit a629a69c6e
3 changed files with 0 additions and 211 deletions

View File

@ -1,100 +0,0 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
__copyright__ = '''2010, matek09, matek09@gmail.com
Modified 2011, Mariusz Wolek <mariusz_dot_wolek @ gmail dot com>
Modified 2012, Artur Stachecki <artur.stachecki@gmail.com>'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Wprost(BasicNewsRecipe):
EDITION = 0
FIND_LAST_FULL_ISSUE = True
EXCLUDE_LOCKED = True
ICO_BLOCKED = 'http://www.wprost.pl/G/layout2/ico_blocked.png'
title = u'Wprost'
__author__ = 'matek09'
description = u'Popularny tygodnik ogólnopolski - Wprost. Najlepszy wśród polskich tygodników - opiniotwórczy - społeczno-informacyjny - społeczno-kulturalny.' # noqa
encoding = 'ISO-8859-2'
no_stylesheets = True
language = 'pl'
remove_javascript = True
recursions = 0
remove_tags_before = dict(dict(name='div', attrs={'id': 'print-layer'}))
remove_tags_after = dict(dict(name='div', attrs={'id': 'print-layer'}))
'''
keep_only_tags =[]
keep_only_tags.append(dict(name = 'table', attrs = {'id' : 'title-table'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'div-header'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'div-content'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'def element-autor'}))
'''
preprocess_regexps = [(re.compile(r'style="display: none;"'), lambda match: ''),
(re.compile(r'display: block;'), lambda match: ''),
(re.compile(r'\<td\>\<tr\>\<\/table\>'), lambda match: ''),
(re.compile(r'\<table .*?\>'), lambda match: ''),
(re.compile(r'\<tr>'), lambda match: ''),
(re.compile(r'\<td .*?\>'), lambda match: ''),
(re.compile(r'\<div id="footer"\>.*?\</footer\>'), lambda match: '')]
remove_tags = []
remove_tags.append(dict(name='div', attrs={'class': 'def element-date'}))
remove_tags.append(dict(name='div', attrs={'class': 'def silver'}))
remove_tags.append(
dict(name='div', attrs={'id': 'content-main-column-right'}))
extra_css = '''.div-header {font-size: x-small; font-weight: bold}'''
# h2 {font-size: x-large; font-weight: bold}
def is_blocked(self, a):
if a.findNextSibling('img') is None:
return False
else:
return True
def find_last_issue(self):
soup = self.index_to_soup('http://www.wprost.pl/archiwum/')
a = 0
if self.FIND_LAST_FULL_ISSUE:
ico_blocked = soup.findAll('img', attrs={'src': self.ICO_BLOCKED})
a = ico_blocked[-1].findNext(
'a', attrs={'title': re.compile(r'Spis *', re.IGNORECASE | re.DOTALL)})
else:
a = soup.find('a', attrs={'title': re.compile(
r'Spis *', re.IGNORECASE | re.DOTALL)})
self.EDITION = a['href'].replace('/tygodnik/?I=', '')
self.EDITION_SHORT = a['href'].replace('/tygodnik/?I=15', '')
self.cover_url = a.img['src']
def parse_index(self):
self.find_last_issue()
soup = self.index_to_soup(
'http://www.wprost.pl/tygodnik/?I=' + self.EDITION)
feeds = []
headers = soup.findAll(
attrs={'class': 'block-header block-header-left mtop20 mbottom20'})
articles_list = soup.findAll(attrs={'class': 'standard-box'})
for i in range(len(headers)):
articles = self.find_articles(articles_list[i])
if len(articles) > 0:
section = headers[i].find('a').string
feeds.append((section, articles))
return feeds
def find_articles(self, main_block):
articles = []
for a in main_block.findAll('a'):
if a.name in "td":
break
if self.EXCLUDE_LOCKED and self.is_blocked(a):
continue
articles.append({
'title': self.tag_to_string(a),
'url': 'http://www.wprost.pl' + a['href'],
'date': '',
'description': ''
})
return articles

View File

@ -1,51 +0,0 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
__copyright__ = '''2010, matek09, matek09@gmail.com
Modified 2011, Mariusz Wolek <mariusz_dot_wolek @ gmail dot com>
Modified 2012, Artur Stachecki <artur.stachecki@gmail.com>'''
from calibre.web.feeds.news import BasicNewsRecipe
class Wprost(BasicNewsRecipe):
title = u'Wprost (RSS)'
__author__ = 'matek09'
description = u'Portal informacyjny. Najświeższe wiadomości, najciekawsze komentarze i opinie. Blogi najlepszych publicystów.'
encoding = 'ISO-8859-2'
no_stylesheets = True
language = 'pl'
remove_javascript = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
remove_empty_feeds = True
keep_only_tags = [dict(attrs={'class': 'art-area'})]
remove_tags = [dict(attrs={'class': 'add300x250'})]
extra_css = '''.div-header {font-size: x-small; font-weight: bold}'''
# h2 {font-size: x-large; font-weight: bold}
feeds = [(u'Tylko u nas', u'http://www.wprost.pl/rss/rss_wprostextra.php'),
(u'Wydarzenia', u'http://www.wprost.pl/rss/rss.php'),
(u'Komentarze', u'http://www.wprost.pl/rss/rss_komentarze.php'),
(u'Wydarzenia: Kraj', u'http://www.wprost.pl/rss/rss_kraj.php'),
(u'Komentarze: Kraj', u'http://www.wprost.pl/rss/rss_komentarze_kraj.php'),
(u'Wydarzenia: Świat', u'http://www.wprost.pl/rss/rss_swiat.php'),
(u'Komentarze: Świat', u'http://www.wprost.pl/rss/rss_komentarze_swiat.php'),
(u'Wydarzenia: Gospodarka',
u'http://www.wprost.pl/rss/rss_gospodarka.php'),
(u'Komentarze: Gospodarka',
u'http://www.wprost.pl/rss/rss_komentarze_gospodarka.php'),
(u'Wydarzenia: Życie', u'http://www.wprost.pl/rss/rss_zycie.php'),
(u'Komentarze: Życie', u'http://www.wprost.pl/rss/rss_komentarze_zycie.php'),
(u'Wydarzenia: Sport', u'http://www.wprost.pl/rss/rss_sport.php'),
(u'Komentarze: Sport', u'http://www.wprost.pl/rss/rss_komentarze_sport.php'),
(u'Przegląd prasy', u'http://www.wprost.pl/rss/rss_prasa.php')
]
def get_cover_url(self):
soup = self.index_to_soup('http://www.wprost.pl/tygodnik')
cover = soup.find(attrs={'class': 'wprost-cover'})
if cover:
self.cover_url = cover['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -1,60 +0,0 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
from calibre.web.feeds.news import BasicNewsRecipe
class WysokieObcasyRecipe(BasicNewsRecipe):
__author__ = u'Artur Stachecki <artur.stachecki@gmail.com>'
language = 'pl'
version = 1
title = u'Wysokie Obcasy'
publisher = 'Agora SA'
description = u'Serwis sobotniego dodatku do Gazety Wyborczej'
category = 'magazine'
language = 'pl'
publication_type = 'magazine'
cover_url = ''
remove_empty_feeds = True
no_stylesheets = True
oldest_article = 7
max_articles_per_feed = 100000
recursions = 0
no_stylesheets = True
remove_javascript = True
simultaneous_downloads = 5
keep_only_tags = []
keep_only_tags.append(dict(name='div', attrs={'id': 'article'}))
remove_tags = []
remove_tags.append(dict(name='img'))
remove_tags.append(dict(name='p', attrs={'class': 'info'}))
extra_css = '''
body {font-family: verdana, arial, helvetica, geneva, sans-serif ;}
h1{text-align: left;}
'''
feeds = [
('Wszystkie Artykuly', 'feed://www.wysokieobcasy.pl/pub/rss/wysokieobcasy.xml'),
]
def print_version(self, url):
baseURL = 'http://www.wysokieobcasy.pl/wysokie-obcasy'
segments = url.split(',')
subPath = '/2029020,'
articleURL1 = segments[1]
articleURL2 = segments[2]
printVerString = articleURL1 + ',' + articleURL2
s = baseURL + subPath + printVerString + '.html'
return s
def get_cover_url(self):
soup = self.index_to_soup(
'http://www.wysokieobcasy.pl/wysokie-obcasy/0,0.html')
self.cover_url = soup.find(
attrs={'class': 'holder_cr'}).find('img')['src']
return getattr(self, 'cover_url', self.cover_url)