Various new and updated Polish news sources
@ -38,6 +38,8 @@ calibre_plugins/
|
|||||||
recipes/.git
|
recipes/.git
|
||||||
recipes/.gitignore
|
recipes/.gitignore
|
||||||
recipes/README.md
|
recipes/README.md
|
||||||
|
recipes/icon_checker.py
|
||||||
|
recipes/readme_updater.py
|
||||||
recipes/katalog_egazeciarz.recipe
|
recipes/katalog_egazeciarz.recipe
|
||||||
recipes/tv_axnscifi.recipe
|
recipes/tv_axnscifi.recipe
|
||||||
recipes/tv_comedycentral.recipe
|
recipes/tv_comedycentral.recipe
|
||||||
@ -60,6 +62,7 @@ recipes/tv_tvpkultura.recipe
|
|||||||
recipes/tv_tvppolonia.recipe
|
recipes/tv_tvppolonia.recipe
|
||||||
recipes/tv_tvpuls.recipe
|
recipes/tv_tvpuls.recipe
|
||||||
recipes/tv_viasathistory.recipe
|
recipes/tv_viasathistory.recipe
|
||||||
|
recipes/icons/katalog_egazeciarz.png
|
||||||
recipes/icons/tv_axnscifi.png
|
recipes/icons/tv_axnscifi.png
|
||||||
recipes/icons/tv_comedycentral.png
|
recipes/icons/tv_comedycentral.png
|
||||||
recipes/icons/tv_discoveryscience.png
|
recipes/icons/tv_discoveryscience.png
|
||||||
|
@ -21,35 +21,24 @@ class Adventure_zone(BasicNewsRecipe):
|
|||||||
extra_css = '.main-bg{text-align: left;} td.capmain{ font-size: 22px; }'
|
extra_css = '.main-bg{text-align: left;} td.capmain{ font-size: 22px; }'
|
||||||
feeds = [(u'Nowinki', u'http://www.adventure-zone.info/fusion/feeds/news.php')]
|
feeds = [(u'Nowinki', u'http://www.adventure-zone.info/fusion/feeds/news.php')]
|
||||||
|
|
||||||
'''def parse_feeds (self):
|
|
||||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
|
||||||
soup=self.index_to_soup(u'http://www.adventure-zone.info/fusion/feeds/news.php')
|
|
||||||
tag=soup.find(name='channel')
|
|
||||||
titles=[]
|
|
||||||
for r in tag.findAll(name='image'):
|
|
||||||
r.extract()
|
|
||||||
art=tag.findAll(name='item')
|
|
||||||
for i in art:
|
|
||||||
titles.append(i.title.string)
|
|
||||||
for feed in feeds:
|
|
||||||
for article in feed.articles[:]:
|
|
||||||
article.title=titles[feed.articles.index(article)]
|
|
||||||
return feeds'''
|
|
||||||
|
|
||||||
|
|
||||||
'''def get_cover_url(self):
|
'''def get_cover_url(self):
|
||||||
soup = self.index_to_soup('http://www.adventure-zone.info/fusion/news.php')
|
soup = self.index_to_soup('http://www.adventure-zone.info/fusion/news.php')
|
||||||
cover=soup.find(id='box_OstatninumerAZ')
|
cover=soup.find(id='box_OstatninumerAZ')
|
||||||
self.cover_url='http://www.adventure-zone.info/fusion/'+ cover.center.a.img['src']
|
self.cover_url='http://www.adventure-zone.info/fusion/'+ cover.center.a.img['src']
|
||||||
return getattr(self, 'cover_url', self.cover_url)'''
|
return getattr(self, 'cover_url', self.cover_url)'''
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
result = re.search('(.+) - Adventure Zone', soup.title.string)
|
result = re.search('(.+) - Adventure Zone', soup.title.string)
|
||||||
if result:
|
if result:
|
||||||
article.title = result.group(1)
|
result = result.group(1)
|
||||||
else:
|
else:
|
||||||
result = soup.body.find('strong')
|
result = soup.body.find('strong')
|
||||||
if result:
|
if result:
|
||||||
article.title = result.string
|
result = result.string
|
||||||
|
if result:
|
||||||
|
result = result.replace('&', '&')
|
||||||
|
result = result.replace(''', '’')
|
||||||
|
article.title = result
|
||||||
|
|
||||||
def skip_ad_pages(self, soup):
|
def skip_ad_pages(self, soup):
|
||||||
skip_tag = soup.body.find(name='td', attrs={'class':'main-bg'})
|
skip_tag = soup.body.find(name='td', attrs={'class':'main-bg'})
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
import re
|
import re
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class BenchmarkPl(BasicNewsRecipe):
|
class BenchmarkPl(BasicNewsRecipe):
|
||||||
title = u'Benchmark.pl'
|
title = u'Benchmark.pl'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
@ -14,8 +16,8 @@ class BenchmarkPl(BasicNewsRecipe):
|
|||||||
remove_attributes = ['style']
|
remove_attributes = ['style']
|
||||||
preprocess_regexps = [(re.compile(ur'<h3><span style="font-size: small;"> Zobacz poprzednie <a href="http://www.benchmark.pl/news/zestawienie/grupa_id/135">Opinie dnia:</a></span>.*</body>', re.DOTALL|re.IGNORECASE), lambda match: '</body>'), (re.compile(ur'Więcej o .*?</ul>', re.DOTALL|re.IGNORECASE), lambda match: '')]
|
preprocess_regexps = [(re.compile(ur'<h3><span style="font-size: small;"> Zobacz poprzednie <a href="http://www.benchmark.pl/news/zestawienie/grupa_id/135">Opinie dnia:</a></span>.*</body>', re.DOTALL|re.IGNORECASE), lambda match: '</body>'), (re.compile(ur'Więcej o .*?</ul>', re.DOTALL|re.IGNORECASE), lambda match: '')]
|
||||||
keep_only_tags = [dict(name='div', attrs={'class':['m_zwykly', 'gallery']}), dict(id='article')]
|
keep_only_tags = [dict(name='div', attrs={'class':['m_zwykly', 'gallery']}), dict(id='article')]
|
||||||
remove_tags_after=dict(name='div', attrs={'class':'body'})
|
remove_tags_after = dict(id='article')
|
||||||
remove_tags=[dict(name='div', attrs={'class':['kategoria', 'socialize', 'thumb', 'panelOcenaObserwowane', 'categoryNextToSocializeGallery', 'breadcrumb', 'footer', 'moreTopics']}), dict(name='table', attrs={'background':'http://www.benchmark.pl/uploads/backend_img/a/fotki_newsy/opinie_dnia/bg.png'}), dict(name='table', attrs={'width':'210', 'cellspacing':'1', 'cellpadding':'4', 'border':'0', 'align':'right'})]
|
remove_tags = [dict(name='div', attrs={'class':['comments', 'body', 'kategoria', 'socialize', 'thumb', 'panelOcenaObserwowane', 'categoryNextToSocializeGallery', 'breadcrumb', 'footer', 'moreTopics']}), dict(name='table', attrs = {'background':'http://www.benchmark.pl/uploads/backend_img/a/fotki_newsy/opinie_dnia/bg.png'}), dict(name='table', attrs={'width':'210', 'cellspacing':'1', 'cellpadding':'4', 'border':'0', 'align':'right'})]
|
||||||
INDEX = 'http://www.benchmark.pl'
|
INDEX = 'http://www.benchmark.pl'
|
||||||
feeds = [(u'Aktualności', u'http://www.benchmark.pl/rss/aktualnosci-pliki.xml'),
|
feeds = [(u'Aktualności', u'http://www.benchmark.pl/rss/aktualnosci-pliki.xml'),
|
||||||
(u'Testy i recenzje', u'http://www.benchmark.pl/rss/testy-recenzje-minirecenzje.xml')]
|
(u'Testy i recenzje', u'http://www.benchmark.pl/rss/testy-recenzje-minirecenzje.xml')]
|
||||||
@ -27,7 +29,12 @@ class BenchmarkPl(BasicNewsRecipe):
|
|||||||
soup2 = self.index_to_soup(nexturl['href'])
|
soup2 = self.index_to_soup(nexturl['href'])
|
||||||
nexturl = soup2.find(attrs={'class':'next'})
|
nexturl = soup2.find(attrs={'class':'next'})
|
||||||
pagetext = soup2.find(name='div', attrs={'class':'body'})
|
pagetext = soup2.find(name='div', attrs={'class':'body'})
|
||||||
appendtag.find('div', attrs={'class':'k_ster'}).extract()
|
tag = appendtag.find('div', attrs={'class':'k_ster'})
|
||||||
|
if tag:
|
||||||
|
tag.extract()
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
if appendtag.find('div', attrs={'class':'k_ster'}):
|
if appendtag.find('div', attrs={'class':'k_ster'}):
|
||||||
@ -38,14 +45,14 @@ class BenchmarkPl(BasicNewsRecipe):
|
|||||||
|
|
||||||
def image_article(self, soup, appendtag):
|
def image_article(self, soup, appendtag):
|
||||||
nexturl = soup.find('div', attrs={'class':'preview'})
|
nexturl = soup.find('div', attrs={'class':'preview'})
|
||||||
if nexturl is not None:
|
if nexturl:
|
||||||
nexturl = nexturl.find('a', attrs={'class':'move_next'})
|
nexturl = nexturl.find('a', attrs={'class':'move_next'})
|
||||||
image = appendtag.find('div', attrs={'class':'preview'}).div['style'][16:]
|
image = appendtag.find('div', attrs={'class':'preview'}).div['style'][16:]
|
||||||
image = self.INDEX + image[:image.find("')")]
|
image = self.INDEX + image[:image.find("')")]
|
||||||
appendtag.find(attrs={'class':'preview'}).name='img'
|
appendtag.find(attrs={'class':'preview'}).name='img'
|
||||||
appendtag.find(attrs={'class':'preview'})['src']=image
|
appendtag.find(attrs={'class':'preview'})['src']=image
|
||||||
appendtag.find('a', attrs={'class':'move_next'}).extract()
|
appendtag.find('a', attrs={'class':'move_next'}).extract()
|
||||||
while nexturl is not None:
|
while nexturl:
|
||||||
nexturl = self.INDEX + nexturl['href']
|
nexturl = self.INDEX + nexturl['href']
|
||||||
soup2 = self.index_to_soup(nexturl)
|
soup2 = self.index_to_soup(nexturl)
|
||||||
nexturl = soup2.find('a', attrs={'class':'move_next'})
|
nexturl = soup2.find('a', attrs={'class':'move_next'})
|
||||||
@ -57,20 +64,24 @@ class BenchmarkPl(BasicNewsRecipe):
|
|||||||
pagetext.find('div', attrs={'class':'title'}).extract()
|
pagetext.find('div', attrs={'class':'title'}).extract()
|
||||||
pagetext.find('div', attrs={'class':'thumb'}).extract()
|
pagetext.find('div', attrs={'class':'thumb'}).extract()
|
||||||
pagetext.find('div', attrs={'class':'panelOcenaObserwowane'}).extract()
|
pagetext.find('div', attrs={'class':'panelOcenaObserwowane'}).extract()
|
||||||
if nexturl is not None:
|
if nexturl:
|
||||||
pagetext.find('a', attrs={'class':'move_next'}).extract()
|
pagetext.find('a', attrs={'class':'move_next'}).extract()
|
||||||
pagetext.find('a', attrs={'class':'move_back'}).extract()
|
pagetext.find('a', attrs={'class':'move_back'}).extract()
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
if soup.find('div', attrs={'class':'preview'}) is not None:
|
if soup.find('div', attrs={'class':'preview'}):
|
||||||
self.image_article(soup, soup.body)
|
self.image_article(soup, soup.body)
|
||||||
else:
|
else:
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
for a in soup('a'):
|
for a in soup('a'):
|
||||||
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
|
if a.has_key('href') and not a['href'].startswith('http'):
|
||||||
a['href'] = self.INDEX + a['href']
|
a['href'] = self.INDEX + a['href']
|
||||||
|
for r in soup.findAll(attrs={'class':['comments', 'body']}):
|
||||||
|
r.extract()
|
||||||
return soup
|
return soup
|
||||||
|
@ -14,7 +14,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
|||||||
class biweekly(BasicNewsRecipe):
|
class biweekly(BasicNewsRecipe):
|
||||||
__author__ = u'Łukasz Grąbczewski'
|
__author__ = u'Łukasz Grąbczewski'
|
||||||
title = 'Biweekly'
|
title = 'Biweekly'
|
||||||
language = 'en'
|
language = 'en_PL'
|
||||||
publisher = 'National Audiovisual Institute'
|
publisher = 'National Audiovisual Institute'
|
||||||
publication_type = 'magazine'
|
publication_type = 'magazine'
|
||||||
description = u'link with culture [English edition of Polish magazine]: literature, theatre, film, art, music, views, talks'
|
description = u'link with culture [English edition of Polish magazine]: literature, theatre, film, art, music, views, talks'
|
||||||
|
30
recipes/blog_biszopa.recipe
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class BlogBiszopa(BasicNewsRecipe):
|
||||||
|
title = u'Blog Biszopa'
|
||||||
|
__author__ = 'fenuks'
|
||||||
|
description = u'Zapiski z Granitowego Miasta'
|
||||||
|
category = 'history'
|
||||||
|
#publication_type = ''
|
||||||
|
language = 'pl'
|
||||||
|
#encoding = ''
|
||||||
|
#extra_css = ''
|
||||||
|
cover_url = 'http://blogbiszopa.pl/wp-content/themes/biszop/images/logo.png'
|
||||||
|
masthead_url = ''
|
||||||
|
use_embedded_content = False
|
||||||
|
oldest_article = 7
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_empty_feeds = True
|
||||||
|
remove_javascript = True
|
||||||
|
remove_attributes = ['style', 'font']
|
||||||
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
|
|
||||||
|
keep_only_tags = [dict(id='main-content')]
|
||||||
|
remove_tags = [dict(name='footer')]
|
||||||
|
#remove_tags_after = {}
|
||||||
|
#remove_tags_before = {}
|
||||||
|
|
||||||
|
feeds = [(u'Artyku\u0142y', u'http://blogbiszopa.pl/feed/')]
|
||||||
|
|
@ -1,5 +1,6 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
import re
|
import re
|
||||||
|
|
||||||
class Ciekawostki_Historyczne(BasicNewsRecipe):
|
class Ciekawostki_Historyczne(BasicNewsRecipe):
|
||||||
title = u'Ciekawostki Historyczne'
|
title = u'Ciekawostki Historyczne'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
@ -10,39 +11,27 @@ class Ciekawostki_Historyczne(BasicNewsRecipe):
|
|||||||
masthead_url = 'http://ciekawostkihistoryczne.pl/wp-content/themes/Wordpress_Magazine/images/logo-ciekawostki-historyczne-male.jpg'
|
masthead_url = 'http://ciekawostkihistoryczne.pl/wp-content/themes/Wordpress_Magazine/images/logo-ciekawostki-historyczne-male.jpg'
|
||||||
cover_url = 'http://ciekawostkihistoryczne.pl/wp-content/themes/Wordpress_Magazine/images/logo-ciekawostki-historyczne-male.jpg'
|
cover_url = 'http://ciekawostkihistoryczne.pl/wp-content/themes/Wordpress_Magazine/images/logo-ciekawostki-historyczne-male.jpg'
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
|
oldest_article = 140000
|
||||||
preprocess_regexps = [(re.compile(ur'Ten artykuł ma kilka stron.*?</fb:like>', re.DOTALL), lambda match: ''), (re.compile(ur'<h2>Zobacz też:</h2>.*?</ol>', re.DOTALL), lambda match: '')]
|
preprocess_regexps = [(re.compile(ur'Ten artykuł ma kilka stron.*?</fb:like>', re.DOTALL), lambda match: ''), (re.compile(ur'<h2>Zobacz też:</h2>.*?</ol>', re.DOTALL), lambda match: '')]
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
|
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
|
||||||
|
recursions = 5
|
||||||
remove_tags = [dict(id='singlepostinfo')]
|
remove_tags = [dict(id='singlepostinfo')]
|
||||||
|
|
||||||
feeds = [(u'Staro\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/starozytnosc/feed/'), (u'\u015aredniowiecze', u'http://ciekawostkihistoryczne.pl/tag/sredniowiecze/feed/'), (u'Nowo\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/nowozytnosc/feed/'), (u'XIX wiek', u'http://ciekawostkihistoryczne.pl/tag/xix-wiek/feed/'), (u'1914-1939', u'http://ciekawostkihistoryczne.pl/tag/1914-1939/feed/'), (u'1939-1945', u'http://ciekawostkihistoryczne.pl/tag/1939-1945/feed/'), (u'Powojnie (od 1945)', u'http://ciekawostkihistoryczne.pl/tag/powojnie/feed/'), (u'Recenzje', u'http://ciekawostkihistoryczne.pl/category/recenzje/feed/')]
|
feeds = [(u'Staro\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/starozytnosc/feed/'), (u'\u015aredniowiecze', u'http://ciekawostkihistoryczne.pl/tag/sredniowiecze/feed/'), (u'Nowo\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/nowozytnosc/feed/'), (u'XIX wiek', u'http://ciekawostkihistoryczne.pl/tag/xix-wiek/feed/'), (u'1914-1939', u'http://ciekawostkihistoryczne.pl/tag/1914-1939/feed/'), (u'1939-1945', u'http://ciekawostkihistoryczne.pl/tag/1939-1945/feed/'), (u'Powojnie (od 1945)', u'http://ciekawostkihistoryczne.pl/tag/powojnie/feed/'), (u'Recenzje', u'http://ciekawostkihistoryczne.pl/category/recenzje/feed/')]
|
||||||
|
|
||||||
def append_page(self, soup, appendtag):
|
def is_link_wanted(self, url, tag):
|
||||||
tag=soup.find(name='h7')
|
return 'ciekawostkihistoryczne' in url and url[-2] in {'2', '3', '4', '5', '6'}
|
||||||
if tag:
|
|
||||||
if tag.br:
|
|
||||||
pass
|
|
||||||
elif tag.nextSibling.name=='p':
|
|
||||||
tag=tag.nextSibling
|
|
||||||
nexturl = tag.findAll('a')
|
|
||||||
for nextpage in nexturl:
|
|
||||||
tag.extract()
|
|
||||||
nextpage= nextpage['href']
|
|
||||||
soup2 = self.index_to_soup(nextpage)
|
|
||||||
pagetext = soup2.find(name='div', attrs={'class':'post'})
|
|
||||||
for r in pagetext.findAll('div', attrs={'id':'singlepostinfo'}):
|
|
||||||
r.extract()
|
|
||||||
for r in pagetext.findAll('div', attrs={'class':'wp-caption alignright'}):
|
|
||||||
r.extract()
|
|
||||||
for r in pagetext.findAll('h1'):
|
|
||||||
r.extract()
|
|
||||||
pagetext.find('h6').nextSibling.extract()
|
|
||||||
pagetext.find('h7').nextSibling.extract()
|
|
||||||
pos = len(appendtag.contents)
|
|
||||||
appendtag.insert(pos, pagetext)
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def postprocess_html(self, soup, first_fetch):
|
||||||
self.append_page(soup, soup.body)
|
tag = soup.find('h7')
|
||||||
|
if tag:
|
||||||
|
tag.nextSibling.extract()
|
||||||
|
if not first_fetch:
|
||||||
|
for r in soup.findAll(['h1']):
|
||||||
|
r.extract()
|
||||||
|
soup.find('h6').nextSibling.extract()
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
|||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
class Computerworld_pl(BasicNewsRecipe):
|
class Computerworld_pl(BasicNewsRecipe):
|
||||||
title = u'Computerworld.pl'
|
title = u'Computerworld.pl'
|
||||||
@ -12,8 +12,16 @@ class Computerworld_pl(BasicNewsRecipe):
|
|||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
keep_only_tags = [dict(attrs={'class':['tyt_news', 'prawo', 'autor', 'tresc']})]
|
remove_attributes = ['style',]
|
||||||
remove_tags_after = dict(name='div', attrs={'class':'rMobi'})
|
preprocess_regexps = [(re.compile(u'Zobacz również:', re.IGNORECASE), lambda m: ''), (re.compile(ur'[*]+reklama[*]+', re.IGNORECASE), lambda m: ''),]
|
||||||
remove_tags = [dict(name='div', attrs={'class':['nnav', 'rMobi']}), dict(name='table', attrs={'class':'ramka_slx'})]
|
keep_only_tags = [dict(id=['szpaltaL', 's2011'])]
|
||||||
|
remove_tags_after = dict(name='div', attrs={'class':'tresc'})
|
||||||
|
remove_tags = [dict(attrs={'class':['nnav', 'rMobi', 'tagi', 'rec']}),]
|
||||||
feeds = [(u'Wiadomo\u015bci', u'http://rssout.idg.pl/cw/news_iso.xml')]
|
feeds = [(u'Wiadomo\u015bci', u'http://rssout.idg.pl/cw/news_iso.xml')]
|
||||||
|
|
||||||
|
def skip_ad_pages(self, soup):
|
||||||
|
if soup.title.string.lower() == 'advertisement':
|
||||||
|
tag = soup.find(name='a')
|
||||||
|
if tag:
|
||||||
|
new_soup = self.index_to_soup(tag['href'], raw=True)
|
||||||
|
return new_soup
|
@ -1,5 +1,6 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Comment
|
||||||
|
|
||||||
class CoNowegoPl(BasicNewsRecipe):
|
class CoNowegoPl(BasicNewsRecipe):
|
||||||
title = u'conowego.pl'
|
title = u'conowego.pl'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
@ -35,6 +36,9 @@ class CoNowegoPl(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
for r in appendtag.findAll(attrs={'class':['pages', 'paginationWrap']}):
|
for r in appendtag.findAll(attrs={'class':['pages', 'paginationWrap']}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ class BasicUserRecipe1337668045(BasicNewsRecipe):
|
|||||||
cover_url = 'http://drytooling.com.pl/images/drytooling-kindle.png'
|
cover_url = 'http://drytooling.com.pl/images/drytooling-kindle.png'
|
||||||
description = u'Drytooling.com.pl jest serwisem wspinaczki zimowej, alpinizmu i himalaizmu. Jeśli uwielbiasz zimę, nie możesz doczekać się aż wyciągniesz szpej z szafki i uderzysz w Tatry, Alpy, czy może Himalaje, to znajdziesz tutaj naprawdę dużo interesujących Cię treści! Zapraszamy!'
|
description = u'Drytooling.com.pl jest serwisem wspinaczki zimowej, alpinizmu i himalaizmu. Jeśli uwielbiasz zimę, nie możesz doczekać się aż wyciągniesz szpej z szafki i uderzysz w Tatry, Alpy, czy może Himalaje, to znajdziesz tutaj naprawdę dużo interesujących Cię treści! Zapraszamy!'
|
||||||
__author__ = u'Damian Granowski'
|
__author__ = u'Damian Granowski'
|
||||||
|
language = 'pl'
|
||||||
oldest_article = 100
|
oldest_article = 100
|
||||||
max_articles_per_feed = 20
|
max_articles_per_feed = 20
|
||||||
auto_cleanup = True
|
auto_cleanup = True
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class Dzieje(BasicNewsRecipe):
|
class Dzieje(BasicNewsRecipe):
|
||||||
title = u'dzieje.pl'
|
title = u'dzieje.pl'
|
||||||
@ -28,11 +29,14 @@ class Dzieje(BasicNewsRecipe):
|
|||||||
pagetext = soup2.find(id='content-area').find(attrs={'class':'content'})
|
pagetext = soup2.find(id='content-area').find(attrs={'class':'content'})
|
||||||
for r in pagetext.findAll(attrs={'class':['fieldgroup group-groupkul', 'fieldgroup group-zdjeciekult', 'fieldgroup group-zdjecieciekaw', 'fieldgroup group-zdjecieksiazka', 'fieldgroup group-zdjeciedu', 'field field-type-filefield field-field-zdjecieglownawyd']}):
|
for r in pagetext.findAll(attrs={'class':['fieldgroup group-groupkul', 'fieldgroup group-zdjeciekult', 'fieldgroup group-zdjecieciekaw', 'fieldgroup group-zdjecieksiazka', 'fieldgroup group-zdjeciedu', 'field field-type-filefield field-field-zdjecieglownawyd']}):
|
||||||
r.extract()
|
r.extract()
|
||||||
pos = len(appendtag.contents)
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
appendtag.insert(pos, pagetext)
|
# appendtag.insert(pos, pagetext)
|
||||||
tag = soup2.find('li', attrs={'class':'pager-next'})
|
tag = soup2.find('li', attrs={'class':'pager-next'})
|
||||||
for r in appendtag.findAll(attrs={'class':['item-list', 'field field-type-computed field-field-tagi', ]}):
|
for r in appendtag.findAll(attrs={'class':['item-list', 'field field-type-computed field-field-tagi', ]}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def find_articles(self, url):
|
def find_articles(self, url):
|
||||||
articles = []
|
articles = []
|
||||||
@ -64,7 +68,7 @@ class Dzieje(BasicNewsRecipe):
|
|||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for a in soup('a'):
|
for a in soup('a'):
|
||||||
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
|
if a.has_key('href') and not a['href'].startswith('http'):
|
||||||
a['href'] = self.index + a['href']
|
a['href'] = self.index + a['href']
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
import re
|
import re
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class Dziennik_pl(BasicNewsRecipe):
|
class Dziennik_pl(BasicNewsRecipe):
|
||||||
title = u'Dziennik.pl'
|
title = u'Dziennik.pl'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
@ -54,6 +56,9 @@ class Dziennik_pl(BasicNewsRecipe):
|
|||||||
v = pagetext.findAll(name=dictionary['name'], attrs=dictionary['attrs'])
|
v = pagetext.findAll(name=dictionary['name'], attrs=dictionary['attrs'])
|
||||||
for delete in v:
|
for delete in v:
|
||||||
delete.extract()
|
delete.extract()
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
if appendtag.find('div', attrs={'class':'article_paginator'}):
|
if appendtag.find('div', attrs={'class':'article_paginator'}):
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class DziennikWschodni(BasicNewsRecipe):
|
class DziennikWschodni(BasicNewsRecipe):
|
||||||
title = u'Dziennik Wschodni'
|
title = u'Dziennik Wschodni'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
@ -73,6 +75,10 @@ class DziennikWschodni(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class EchoDnia(BasicNewsRecipe):
|
class EchoDnia(BasicNewsRecipe):
|
||||||
title = u'Echo Dnia'
|
title = u'Echo Dnia'
|
||||||
@ -69,6 +70,10 @@ class EchoDnia(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -3,29 +3,37 @@
|
|||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, matek09, matek09@gmail.com'
|
__copyright__ = '2010, matek09, matek09@gmail.com'
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
|
||||||
import re
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Comment
|
||||||
|
|
||||||
class Esensja(BasicNewsRecipe):
|
class Esensja(BasicNewsRecipe):
|
||||||
|
|
||||||
title = u'Esensja'
|
title = u'Esensja'
|
||||||
__author__ = 'matek09'
|
__author__ = 'matek09 & fenuks'
|
||||||
description = 'Monthly magazine'
|
description = 'Magazyn kultury popularnej'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
language = 'pl'
|
language = 'pl'
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
|
masthead_url = 'http://esensja.pl/img/wrss.gif'
|
||||||
|
oldest_article = 1
|
||||||
|
URL = 'http://esensja.pl'
|
||||||
HREF = '0'
|
HREF = '0'
|
||||||
|
remove_attributes = ['style', 'bgcolor', 'alt', 'color']
|
||||||
#keep_only_tags =[]
|
keep_only_tags = [dict(attrs={'class':'sekcja'}), ]
|
||||||
#keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'article'})
|
#keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'article'})
|
||||||
remove_tags_before = dict(dict(name = 'div', attrs = {'class' : 't-title'}))
|
#remove_tags_before = dict(dict(name = 'div', attrs = {'class' : 't-title'}))
|
||||||
remove_tags_after = dict(dict(name = 'img', attrs = {'src' : '../../../2000/01/img/tab_bot.gif'}))
|
remove_tags_after = dict(id='tekst')
|
||||||
|
|
||||||
remove_tags =[]
|
remove_tags = [dict(name = 'img', attrs = {'src' : ['../../../2000/01/img/tab_top.gif', '../../../2000/01/img/tab_bot.gif']}),
|
||||||
remove_tags.append(dict(name = 'img', attrs = {'src' : '../../../2000/01/img/tab_top.gif'}))
|
dict(name = 'div', attrs = {'class' : 't-title2 nextpage'}),
|
||||||
remove_tags.append(dict(name = 'img', attrs = {'src' : '../../../2000/01/img/tab_bot.gif'}))
|
#dict(attrs={'rel':'lightbox[galeria]'})
|
||||||
remove_tags.append(dict(name = 'div', attrs = {'class' : 't-title2 nextpage'}))
|
dict(attrs={'class':['tekst_koniec', 'ref', 'wykop']}),
|
||||||
|
dict(attrs={'itemprop':['copyrightHolder', 'publisher']}),
|
||||||
|
dict(id='komentarze')
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.t-title {font-size: x-large; font-weight: bold; text-align: left}
|
.t-title {font-size: x-large; font-weight: bold; text-align: left}
|
||||||
@ -35,8 +43,9 @@ class Esensja(BasicNewsRecipe):
|
|||||||
.annot-ref {font-style: italic; text-align: left}
|
.annot-ref {font-style: italic; text-align: left}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
preprocess_regexps = [(re.compile(r'alt="[^"]*"'),
|
preprocess_regexps = [(re.compile(r'alt="[^"]*"'), lambda match: ''),
|
||||||
lambda match: '')]
|
(re.compile(ur'(title|alt)="[^"]*?"', re.DOTALL), lambda match: ''),
|
||||||
|
]
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('http://www.esensja.pl/magazyn/')
|
soup = self.index_to_soup('http://www.esensja.pl/magazyn/')
|
||||||
@ -47,15 +56,19 @@ class Esensja(BasicNewsRecipe):
|
|||||||
soup = self.index_to_soup(self.HREF + '01.html')
|
soup = self.index_to_soup(self.HREF + '01.html')
|
||||||
self.cover_url = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/img/ilustr/cover_b.jpg'
|
self.cover_url = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/img/ilustr/cover_b.jpg'
|
||||||
feeds = []
|
feeds = []
|
||||||
|
chapter = ''
|
||||||
|
subchapter = ''
|
||||||
|
articles = []
|
||||||
intro = soup.find('div', attrs={'class' : 'n-title'})
|
intro = soup.find('div', attrs={'class' : 'n-title'})
|
||||||
|
'''
|
||||||
introduction = {'title' : self.tag_to_string(intro.a),
|
introduction = {'title' : self.tag_to_string(intro.a),
|
||||||
'url' : self.HREF + intro.a['href'],
|
'url' : self.HREF + intro.a['href'],
|
||||||
'date' : '',
|
'date' : '',
|
||||||
'description' : ''}
|
'description' : ''}
|
||||||
chapter = 'Wprowadzenie'
|
chapter = 'Wprowadzenie'
|
||||||
subchapter = ''
|
|
||||||
articles = []
|
|
||||||
articles.append(introduction)
|
articles.append(introduction)
|
||||||
|
'''
|
||||||
|
|
||||||
for tag in intro.findAllNext(attrs={'class': ['chapter', 'subchapter', 'n-title']}):
|
for tag in intro.findAllNext(attrs={'class': ['chapter', 'subchapter', 'n-title']}):
|
||||||
if tag.name in 'td':
|
if tag.name in 'td':
|
||||||
if len(articles) > 0:
|
if len(articles) > 0:
|
||||||
@ -71,17 +84,72 @@ class Esensja(BasicNewsRecipe):
|
|||||||
subchapter = self.tag_to_string(tag)
|
subchapter = self.tag_to_string(tag)
|
||||||
subchapter = self.tag_to_string(tag)
|
subchapter = self.tag_to_string(tag)
|
||||||
continue
|
continue
|
||||||
articles.append({'title' : self.tag_to_string(tag.a), 'url' : self.HREF + tag.a['href'], 'date' : '', 'description' : ''})
|
|
||||||
|
|
||||||
a = self.index_to_soup(self.HREF + tag.a['href'])
|
finalurl = tag.a['href']
|
||||||
|
if not finalurl.startswith('http'):
|
||||||
|
finalurl = self.HREF + finalurl
|
||||||
|
articles.append({'title' : self.tag_to_string(tag.a), 'url' : finalurl, 'date' : '', 'description' : ''})
|
||||||
|
|
||||||
|
a = self.index_to_soup(finalurl)
|
||||||
i = 1
|
i = 1
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
div = a.find('div', attrs={'class' : 't-title2 nextpage'})
|
div = a.find('div', attrs={'class' : 't-title2 nextpage'})
|
||||||
if div is not None:
|
if div is not None:
|
||||||
a = self.index_to_soup(self.HREF + div.a['href'])
|
link = div.a['href']
|
||||||
articles.append({'title' : self.tag_to_string(tag.a) + ' c. d. ' + str(i), 'url' : self.HREF + div.a['href'], 'date' : '', 'description' : ''})
|
if not link.startswith('http'):
|
||||||
|
link = self.HREF + link
|
||||||
|
a = self.index_to_soup(link)
|
||||||
|
articles.append({'title' : self.tag_to_string(tag.a) + ' c. d. ' + str(i), 'url' : link, 'date' : '', 'description' : ''})
|
||||||
i = i + 1
|
i = i + 1
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
|
def append_page(self, soup, appendtag):
|
||||||
|
r = appendtag.find(attrs={'class':'wiecej_xxx'})
|
||||||
|
if r:
|
||||||
|
nr = r.findAll(attrs={'class':'tn-link'})[-1]
|
||||||
|
try:
|
||||||
|
nr = int(nr.a.string)
|
||||||
|
except:
|
||||||
|
return
|
||||||
|
baseurl = soup.find(attrs={'property':'og:url'})['content'] + '&strona={0}'
|
||||||
|
for number in range(2, nr+1):
|
||||||
|
soup2 = self.index_to_soup(baseurl.format(number))
|
||||||
|
pagetext = soup2.find(attrs={'class':'tresc'})
|
||||||
|
pos = len(appendtag.contents)
|
||||||
|
appendtag.insert(pos, pagetext)
|
||||||
|
for r in appendtag.findAll(attrs={'class':['wiecej_xxx', 'tekst_koniec']}):
|
||||||
|
r.extract()
|
||||||
|
for r in appendtag.findAll('script'):
|
||||||
|
r.extract()
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
self.append_page(soup, soup.body)
|
||||||
|
for tag in soup.findAll(attrs={'class':'img_box_right'}):
|
||||||
|
temp = tag.find('img')
|
||||||
|
src = ''
|
||||||
|
if temp:
|
||||||
|
src = temp.get('src', '')
|
||||||
|
for r in tag.findAll('a', recursive=False):
|
||||||
|
r.extract()
|
||||||
|
info = tag.find(attrs={'class':'img_info'})
|
||||||
|
text = str(tag)
|
||||||
|
if not src:
|
||||||
|
src = re.search('src="[^"]*?"', text)
|
||||||
|
if src:
|
||||||
|
src = src.group(0)
|
||||||
|
src = src[5:].replace('//', '/')
|
||||||
|
if src:
|
||||||
|
tag.contents = []
|
||||||
|
tag.insert(0, BeautifulSoup('<img src="{0}{1}" />'.format(self.URL, src)))
|
||||||
|
if info:
|
||||||
|
tag.insert(len(tag.contents), info)
|
||||||
|
return soup
|
||||||
|
|
||||||
|
109
recipes/esensja_(rss).recipe
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Comment
|
||||||
|
|
||||||
|
class EsensjaRSS(BasicNewsRecipe):
|
||||||
|
title = u'Esensja (RSS)'
|
||||||
|
__author__ = 'fenuks'
|
||||||
|
description = u'Magazyn kultury popularnej'
|
||||||
|
category = 'reading, fantasy, reviews, boardgames, culture'
|
||||||
|
#publication_type = ''
|
||||||
|
language = 'pl'
|
||||||
|
encoding = 'utf-8'
|
||||||
|
INDEX = 'http://www.esensja.pl'
|
||||||
|
extra_css = '''.t-title {font-size: x-large; font-weight: bold; text-align: left}
|
||||||
|
.t-author {font-size: x-small; text-align: left}
|
||||||
|
.t-title2 {font-size: x-small; font-style: italic; text-align: left}
|
||||||
|
.text {font-size: small; text-align: left}
|
||||||
|
.annot-ref {font-style: italic; text-align: left}
|
||||||
|
'''
|
||||||
|
cover_url = ''
|
||||||
|
masthead_url = 'http://esensja.pl/img/wrss.gif'
|
||||||
|
use_embedded_content = False
|
||||||
|
oldest_article = 7
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_empty_feeds = True
|
||||||
|
remove_javascript = True
|
||||||
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
|
preprocess_regexps = [(re.compile(r'alt="[^"]*"'), lambda match: ''),
|
||||||
|
(re.compile(ur'(title|alt)="[^"]*?"', re.DOTALL), lambda match: ''),
|
||||||
|
]
|
||||||
|
remove_attributes = ['style', 'bgcolor', 'alt', 'color']
|
||||||
|
keep_only_tags = [dict(attrs={'class':'sekcja'}), ]
|
||||||
|
remove_tags_after = dict(id='tekst')
|
||||||
|
|
||||||
|
remove_tags = [dict(name = 'img', attrs = {'src' : ['../../../2000/01/img/tab_top.gif', '../../../2000/01/img/tab_bot.gif']}),
|
||||||
|
dict(name = 'div', attrs = {'class' : 't-title2 nextpage'}),
|
||||||
|
#dict(attrs={'rel':'lightbox[galeria]'})
|
||||||
|
dict(attrs={'class':['tekst_koniec', 'ref', 'wykop']}),
|
||||||
|
dict(attrs={'itemprop':['copyrightHolder', 'publisher']}),
|
||||||
|
dict(id='komentarze')
|
||||||
|
]
|
||||||
|
|
||||||
|
feeds = [(u'Książka', u'http://esensja.pl/rss/ksiazka.rss'),
|
||||||
|
(u'Film', u'http://esensja.pl/rss/film.rss'),
|
||||||
|
(u'Komiks', u'http://esensja.pl/rss/komiks.rss'),
|
||||||
|
(u'Gry', u'http://esensja.pl/rss/gry.rss'),
|
||||||
|
(u'Muzyka', u'http://esensja.pl/rss/muzyka.rss'),
|
||||||
|
(u'Twórczość', u'http://esensja.pl/rss/tworczosc.rss'),
|
||||||
|
(u'Varia', u'http://esensja.pl/rss/varia.rss'),
|
||||||
|
(u'Zgryźliwi Tetrycy', u'http://esensja.pl/rss/tetrycy.rss'),
|
||||||
|
(u'Nowe książki', u'http://esensja.pl/rss/xnowosci.rss'),
|
||||||
|
(u'Ostatnio dodane książki', u'http://esensja.pl/rss/xdodane.rss'),
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
soup = self.index_to_soup(self.INDEX)
|
||||||
|
cover = soup.find(id='panel_1')
|
||||||
|
self.cover_url = self.INDEX + cover.find('a')['href'].replace('index.html', '') + 'img/ilustr/cover_b.jpg'
|
||||||
|
return getattr(self, 'cover_url', self.cover_url)
|
||||||
|
|
||||||
|
|
||||||
|
def append_page(self, soup, appendtag):
|
||||||
|
r = appendtag.find(attrs={'class':'wiecej_xxx'})
|
||||||
|
if r:
|
||||||
|
nr = r.findAll(attrs={'class':'tn-link'})[-1]
|
||||||
|
try:
|
||||||
|
nr = int(nr.a.string)
|
||||||
|
except:
|
||||||
|
return
|
||||||
|
baseurl = soup.find(attrs={'property':'og:url'})['content'] + '&strona={0}'
|
||||||
|
for number in range(2, nr+1):
|
||||||
|
soup2 = self.index_to_soup(baseurl.format(number))
|
||||||
|
pagetext = soup2.find(attrs={'class':'tresc'})
|
||||||
|
pos = len(appendtag.contents)
|
||||||
|
appendtag.insert(pos, pagetext)
|
||||||
|
for r in appendtag.findAll(attrs={'class':['wiecej_xxx', 'tekst_koniec']}):
|
||||||
|
r.extract()
|
||||||
|
for r in appendtag.findAll('script'):
|
||||||
|
r.extract()
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
self.append_page(soup, soup.body)
|
||||||
|
for tag in soup.findAll(attrs={'class':'img_box_right'}):
|
||||||
|
temp = tag.find('img')
|
||||||
|
src = ''
|
||||||
|
if temp:
|
||||||
|
src = temp.get('src', '')
|
||||||
|
for r in tag.findAll('a', recursive=False):
|
||||||
|
r.extract()
|
||||||
|
info = tag.find(attrs={'class':'img_info'})
|
||||||
|
text = str(tag)
|
||||||
|
if not src:
|
||||||
|
src = re.search('src="[^"]*?"', text)
|
||||||
|
if src:
|
||||||
|
src = src.group(0)
|
||||||
|
src = src[5:].replace('//', '/')
|
||||||
|
if src:
|
||||||
|
tag.contents = []
|
||||||
|
tag.insert(0, BeautifulSoup('<img src="{0}{1}" />'.format(self.INDEX, src)))
|
||||||
|
if info:
|
||||||
|
tag.insert(len(tag.contents), info)
|
||||||
|
return soup
|
@ -1,6 +1,7 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
|
||||||
import re
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||||
|
|
||||||
class FilmWebPl(BasicNewsRecipe):
|
class FilmWebPl(BasicNewsRecipe):
|
||||||
title = u'FilmWeb'
|
title = u'FilmWeb'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
@ -9,12 +10,14 @@ class FilmWebPl(BasicNewsRecipe):
|
|||||||
category = 'movies'
|
category = 'movies'
|
||||||
language = 'pl'
|
language = 'pl'
|
||||||
index = 'http://www.filmweb.pl'
|
index = 'http://www.filmweb.pl'
|
||||||
|
#extra_css = '.MarkupPhotoHTML-7 {float:left; margin-right: 10px;}'
|
||||||
oldest_article = 8
|
oldest_article = 8
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
ignore_duplicate_articles = {'title', 'url'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
preprocess_regexps = [(re.compile(u'\(kliknij\,\ aby powiększyć\)', re.IGNORECASE), lambda m: ''), ]#(re.compile(ur' | ', re.IGNORECASE), lambda m: '')]
|
remove_javascript = True
|
||||||
|
preprocess_regexps = [(re.compile(u'\(kliknij\,\ aby powiększyć\)', re.IGNORECASE), lambda m: ''), (re.compile(ur'(<br ?/?>\s*?<br ?/?>\s*?)+', re.IGNORECASE), lambda m: '<br />')]#(re.compile(ur' | ', re.IGNORECASE), lambda m: '')]
|
||||||
extra_css = '.hdrBig {font-size:22px;} ul {list-style-type:none; padding: 0; margin: 0;}'
|
extra_css = '.hdrBig {font-size:22px;} ul {list-style-type:none; padding: 0; margin: 0;}'
|
||||||
remove_tags = [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'}), dict(attrs={'class':'userSurname anno'})]
|
remove_tags = [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'}), dict(attrs={'class':'userSurname anno'})]
|
||||||
remove_attributes = ['style',]
|
remove_attributes = ['style',]
|
||||||
@ -42,6 +45,11 @@ class FilmWebPl(BasicNewsRecipe):
|
|||||||
if skip_tag is not None:
|
if skip_tag is not None:
|
||||||
return self.index_to_soup(skip_tag['href'], raw=True)
|
return self.index_to_soup(skip_tag['href'], raw=True)
|
||||||
|
|
||||||
|
def postprocess_html(self, soup, first_fetch):
|
||||||
|
for r in soup.findAll(attrs={'class':'singlephoto'}):
|
||||||
|
r['style'] = 'float:left; margin-right: 10px;'
|
||||||
|
return soup
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for a in soup('a'):
|
for a in soup('a'):
|
||||||
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
|
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
|
||||||
@ -56,4 +64,8 @@ class FilmWebPl(BasicNewsRecipe):
|
|||||||
tag.name = 'div'
|
tag.name = 'div'
|
||||||
for t in tag.findAll('li'):
|
for t in tag.findAll('li'):
|
||||||
t.name = 'div'
|
t.name = 'div'
|
||||||
|
for r in soup.findAll(id=re.compile('photo-\d+')):
|
||||||
|
r.extract()
|
||||||
|
for r in soup.findAll(style=re.compile('float: ?left')):
|
||||||
|
r['class'] = 'singlephoto'
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class GazetaLubuska(BasicNewsRecipe):
|
class GazetaLubuska(BasicNewsRecipe):
|
||||||
title = u'Gazeta Lubuska'
|
title = u'Gazeta Lubuska'
|
||||||
@ -59,6 +60,10 @@ class GazetaLubuska(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class GazetaPomorska(BasicNewsRecipe):
|
class GazetaPomorska(BasicNewsRecipe):
|
||||||
title = u'Gazeta Pomorska'
|
title = u'Gazeta Pomorska'
|
||||||
@ -86,6 +87,10 @@ class GazetaPomorska(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class GazetaWspolczesna(BasicNewsRecipe):
|
class GazetaWspolczesna(BasicNewsRecipe):
|
||||||
title = u'Gazeta Wsp\xf3\u0142czesna'
|
title = u'Gazeta Wsp\xf3\u0142czesna'
|
||||||
@ -58,6 +59,10 @@ class GazetaWspolczesna(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class Gazeta_Wyborcza(BasicNewsRecipe):
|
class Gazeta_Wyborcza(BasicNewsRecipe):
|
||||||
title = u'Gazeta.pl'
|
title = u'Gazeta.pl'
|
||||||
@ -16,6 +16,7 @@ class Gazeta_Wyborcza(BasicNewsRecipe):
|
|||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
remove_tags_before = dict(id='k0')
|
remove_tags_before = dict(id='k0')
|
||||||
remove_tags_after = dict(id='banP4')
|
remove_tags_after = dict(id='banP4')
|
||||||
remove_tags = [dict(name='div', attrs={'class':'rel_box'}), dict(attrs={'class':['date', 'zdjP', 'zdjM', 'pollCont', 'rel_video', 'brand', 'txt_upl']}), dict(name='div', attrs={'id':'footer'})]
|
remove_tags = [dict(name='div', attrs={'class':'rel_box'}), dict(attrs={'class':['date', 'zdjP', 'zdjM', 'pollCont', 'rel_video', 'brand', 'txt_upl']}), dict(name='div', attrs={'id':'footer'})]
|
||||||
@ -48,6 +49,9 @@ class Gazeta_Wyborcza(BasicNewsRecipe):
|
|||||||
url = self.INDEX + link['href']
|
url = self.INDEX + link['href']
|
||||||
soup2 = self.index_to_soup(url)
|
soup2 = self.index_to_soup(url)
|
||||||
pagetext = soup2.find(id='artykul')
|
pagetext = soup2.find(id='artykul')
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
tag = soup2.find('div', attrs={'id': 'Str'})
|
tag = soup2.find('div', attrs={'id': 'Str'})
|
||||||
@ -65,6 +69,9 @@ class Gazeta_Wyborcza(BasicNewsRecipe):
|
|||||||
nexturl = pagetext.find(id='gal_btn_next')
|
nexturl = pagetext.find(id='gal_btn_next')
|
||||||
if nexturl:
|
if nexturl:
|
||||||
nexturl = nexturl.a['href']
|
nexturl = nexturl.a['href']
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
rem = appendtag.find(id='gal_navi')
|
rem = appendtag.find(id='gal_navi')
|
||||||
@ -105,3 +112,7 @@ class Gazeta_Wyborcza(BasicNewsRecipe):
|
|||||||
soup = self.index_to_soup('http://wyborcza.pl/' + cover.contents[3].a['href'])
|
soup = self.index_to_soup('http://wyborcza.pl/' + cover.contents[3].a['href'])
|
||||||
self.cover_url = 'http://wyborcza.pl' + soup.img['src']
|
self.cover_url = 'http://wyborcza.pl' + soup.img['src']
|
||||||
return getattr(self, 'cover_url', self.cover_url)
|
return getattr(self, 'cover_url', self.cover_url)
|
||||||
|
|
||||||
|
'''def image_url_processor(self, baseurl, url):
|
||||||
|
print "@@@@@@@@", url
|
||||||
|
return url.replace('http://wyborcza.pl/ ', '')'''
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class GCN(BasicNewsRecipe):
|
class GCN(BasicNewsRecipe):
|
||||||
title = u'Gazeta Codziennej Nowiny'
|
title = u'Gazeta Codziennej Nowiny'
|
||||||
@ -16,7 +17,7 @@ class GCN(BasicNewsRecipe):
|
|||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
ignore_duplicate_articles = {'title', 'url'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
|
remove_attributes = ['style']
|
||||||
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
|
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
|
||||||
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
|
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
|
||||||
|
|
||||||
@ -78,6 +79,10 @@ class GCN(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -15,6 +15,7 @@ class Gildia(BasicNewsRecipe):
|
|||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
ignore_duplicate_articles = {'title', 'url'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
preprocess_regexps = [(re.compile(ur'</?sup>'), lambda match: '') ]
|
preprocess_regexps = [(re.compile(ur'</?sup>'), lambda match: '') ]
|
||||||
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
remove_tags = [dict(name='div', attrs={'class':'backlink'}), dict(name='div', attrs={'class':'im_img'}), dict(name='div', attrs={'class':'addthis_toolbox addthis_default_style'})]
|
remove_tags = [dict(name='div', attrs={'class':'backlink'}), dict(name='div', attrs={'class':'im_img'}), dict(name='div', attrs={'class':'addthis_toolbox addthis_default_style'})]
|
||||||
keep_only_tags = dict(name='div', attrs={'class':'widetext'})
|
keep_only_tags = dict(name='div', attrs={'class':'widetext'})
|
||||||
feeds = [(u'Gry', u'http://www.gry.gildia.pl/rss'), (u'Literatura', u'http://www.literatura.gildia.pl/rss'), (u'Film', u'http://www.film.gildia.pl/rss'), (u'Horror', u'http://www.horror.gildia.pl/rss'), (u'Konwenty', u'http://www.konwenty.gildia.pl/rss'), (u'Plansz\xf3wki', u'http://www.planszowki.gildia.pl/rss'), (u'Manga i anime', u'http://www.manga.gildia.pl/rss'), (u'Star Wars', u'http://www.starwars.gildia.pl/rss'), (u'Techno', u'http://www.techno.gildia.pl/rss'), (u'Historia', u'http://www.historia.gildia.pl/rss'), (u'Magia', u'http://www.magia.gildia.pl/rss'), (u'Bitewniaki', u'http://www.bitewniaki.gildia.pl/rss'), (u'RPG', u'http://www.rpg.gildia.pl/rss'), (u'LARP', u'http://www.larp.gildia.pl/rss'), (u'Muzyka', u'http://www.muzyka.gildia.pl/rss'), (u'Nauka', u'http://www.nauka.gildia.pl/rss')]
|
feeds = [(u'Gry', u'http://www.gry.gildia.pl/rss'), (u'Literatura', u'http://www.literatura.gildia.pl/rss'), (u'Film', u'http://www.film.gildia.pl/rss'), (u'Horror', u'http://www.horror.gildia.pl/rss'), (u'Konwenty', u'http://www.konwenty.gildia.pl/rss'), (u'Plansz\xf3wki', u'http://www.planszowki.gildia.pl/rss'), (u'Manga i anime', u'http://www.manga.gildia.pl/rss'), (u'Star Wars', u'http://www.starwars.gildia.pl/rss'), (u'Techno', u'http://www.techno.gildia.pl/rss'), (u'Historia', u'http://www.historia.gildia.pl/rss'), (u'Magia', u'http://www.magia.gildia.pl/rss'), (u'Bitewniaki', u'http://www.bitewniaki.gildia.pl/rss'), (u'RPG', u'http://www.rpg.gildia.pl/rss'), (u'LARP', u'http://www.larp.gildia.pl/rss'), (u'Muzyka', u'http://www.muzyka.gildia.pl/rss'), (u'Nauka', u'http://www.nauka.gildia.pl/rss')]
|
||||||
@ -34,7 +35,7 @@ class Gildia(BasicNewsRecipe):
|
|||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for a in soup('a'):
|
for a in soup('a'):
|
||||||
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
|
if a.has_key('href') and not a['href'].startswith('http'):
|
||||||
if '/gry/' in a['href']:
|
if '/gry/' in a['href']:
|
||||||
a['href']='http://www.gry.gildia.pl' + a['href']
|
a['href']='http://www.gry.gildia.pl' + a['href']
|
||||||
elif u'książk' in soup.title.string.lower() or u'komiks' in soup.title.string.lower():
|
elif u'książk' in soup.title.string.lower() or u'komiks' in soup.title.string.lower():
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||||
|
|
||||||
class Gram_pl(BasicNewsRecipe):
|
class Gram_pl(BasicNewsRecipe):
|
||||||
title = u'Gram.pl'
|
title = u'Gram.pl'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import time
|
import time
|
||||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class GryOnlinePl(BasicNewsRecipe):
|
class GryOnlinePl(BasicNewsRecipe):
|
||||||
title = u'Gry-Online.pl'
|
title = u'Gry-Online.pl'
|
||||||
@ -40,10 +41,14 @@ class GryOnlinePl(BasicNewsRecipe):
|
|||||||
r.extract()
|
r.extract()
|
||||||
for r in pagetext.findAll(attrs={'itemprop':'description'}):
|
for r in pagetext.findAll(attrs={'itemprop':'description'}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button', 'lista lista3 lista-gry']}):
|
for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button', 'lista lista3 lista-gry']}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
else:
|
else:
|
||||||
tag = appendtag.find('div', attrs={'class':'S018stronyr'})
|
tag = appendtag.find('div', attrs={'class':'S018stronyr'})
|
||||||
if tag:
|
if tag:
|
||||||
@ -70,10 +75,16 @@ class GryOnlinePl(BasicNewsRecipe):
|
|||||||
r.extract()
|
r.extract()
|
||||||
for r in pagetext.findAll(attrs={'itemprop':'description'}):
|
for r in pagetext.findAll(attrs={'itemprop':'description'}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
[comment.extract() for comment in comments]
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button', 'lista lista3 lista-gry', 'S018strony']}):
|
for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button', 'lista lista3 lista-gry', 'S018strony']}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def image_url_processor(self, baseurl, url):
|
def image_url_processor(self, baseurl, url):
|
||||||
if url.startswith('..'):
|
if url.startswith('..'):
|
||||||
|
BIN
recipes/icons/bachormagazyn.png
Normal file
After Width: | Height: | Size: 898 B |
BIN
recipes/icons/blog_biszopa.png
Normal file
After Width: | Height: | Size: 755 B |
BIN
recipes/icons/esenja.png
Normal file
After Width: | Height: | Size: 329 B |
BIN
recipes/icons/esensja_(rss).png
Normal file
After Width: | Height: | Size: 329 B |
Before Width: | Height: | Size: 806 B After Width: | Height: | Size: 869 B |
BIN
recipes/icons/ksiazka_pl.png
Normal file
After Width: | Height: | Size: 1.3 KiB |
0
recipes/icons/nowy_obywatel.png
Executable file → Normal file
Before Width: | Height: | Size: 480 B After Width: | Height: | Size: 480 B |
BIN
recipes/icons/websecurity_pl.png
Normal file
After Width: | Height: | Size: 863 B |
@ -1,5 +1,7 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
import re
|
import re
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class in4(BasicNewsRecipe):
|
class in4(BasicNewsRecipe):
|
||||||
title = u'IN4.pl'
|
title = u'IN4.pl'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
@ -28,6 +30,9 @@ class in4(BasicNewsRecipe):
|
|||||||
while nexturl:
|
while nexturl:
|
||||||
soup2 = self.index_to_soup(nexturl)
|
soup2 = self.index_to_soup(nexturl)
|
||||||
pagetext = soup2.find(id='news')
|
pagetext = soup2.find(id='news')
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
nexturl = None
|
nexturl = None
|
||||||
|
@ -15,7 +15,7 @@ class INFRA(BasicNewsRecipe):
|
|||||||
remove_tags_before=dict(name='h2', attrs={'class':'contentheading'})
|
remove_tags_before=dict(name='h2', attrs={'class':'contentheading'})
|
||||||
remove_tags_after=dict(attrs={'class':'pagenav'})
|
remove_tags_after=dict(attrs={'class':'pagenav'})
|
||||||
remove_tags=[dict(attrs={'class':'pagenav'})]
|
remove_tags=[dict(attrs={'class':'pagenav'})]
|
||||||
feeds = [(u'Najnowsze wiadomo\u015bci', u'http://www.infra.org.pl/index.php?option=com_rd_rss&id=1')]
|
feeds = [(u'Najnowsze wiadomo\u015bci', u'http://www.infra.org.pl/rss')]
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for item in soup.findAll(style=True):
|
for item in soup.findAll(style=True):
|
||||||
|
@ -12,6 +12,8 @@ class Kosmonauta(BasicNewsRecipe):
|
|||||||
INDEX = 'http://www.kosmonauta.net'
|
INDEX = 'http://www.kosmonauta.net'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
remove_javascript = True
|
||||||
|
remove_attributes = ['style']
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
keep_only_tags = [dict(name='div', attrs={'class':'item-page'})]
|
keep_only_tags = [dict(name='div', attrs={'class':'item-page'})]
|
||||||
remove_tags = [dict(attrs={'class':['article-tools clearfix', 'cedtag', 'nav clearfix', 'jwDisqusForm']})]
|
remove_tags = [dict(attrs={'class':['article-tools clearfix', 'cedtag', 'nav clearfix', 'jwDisqusForm']})]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup as bs
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup as bs, Comment
|
||||||
|
|
||||||
class KurierGalicyjski(BasicNewsRecipe):
|
class KurierGalicyjski(BasicNewsRecipe):
|
||||||
title = u'Kurier Galicyjski'
|
title = u'Kurier Galicyjski'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
@ -42,6 +43,9 @@ class KurierGalicyjski(BasicNewsRecipe):
|
|||||||
r.extract()
|
r.extract()
|
||||||
for r in appendtag.findAll(attrs={'style':'border-top-width: thin; border-top-style: dashed; border-top-color: #CCC; border-bottom-width: thin; border-bottom-style: dashed; border-bottom-color: #CCC; padding-top:5px; padding-bottom:5px; text-align:right; margin-top:10px; height:20px;'}):
|
for r in appendtag.findAll(attrs={'style':'border-top-width: thin; border-top-style: dashed; border-top-color: #CCC; border-bottom-width: thin; border-bottom-style: dashed; border-bottom-color: #CCC; padding-top:5px; padding-bottom:5px; text-align:right; margin-top:10px; height:20px;'}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class KurierPoranny(BasicNewsRecipe):
|
class KurierPoranny(BasicNewsRecipe):
|
||||||
title = u'Kurier Poranny'
|
title = u'Kurier Poranny'
|
||||||
@ -73,6 +74,11 @@ class KurierPoranny(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class LinuxJournal(BasicNewsRecipe):
|
class LinuxJournal(BasicNewsRecipe):
|
||||||
title = u'Linux Journal'
|
title = u'Linux Journal'
|
||||||
@ -25,6 +26,9 @@ class LinuxJournal(BasicNewsRecipe):
|
|||||||
soup2 = self.index_to_soup('http://www.linuxjournal.com'+ nexturl)
|
soup2 = self.index_to_soup('http://www.linuxjournal.com'+ nexturl)
|
||||||
pagetext = soup2.find(attrs={'class':'node-inner'}).find(attrs={'class':'content'})
|
pagetext = soup2.find(attrs={'class':'node-inner'}).find(attrs={'class':'content'})
|
||||||
next = appendtag.find('li', attrs={'class':'pager-next'})
|
next = appendtag.find('li', attrs={'class':'pager-next'})
|
||||||
|
comments = pagetext.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
tag = appendtag.find('div', attrs={'class':'links'})
|
tag = appendtag.find('div', attrs={'class':'links'})
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class NTO(BasicNewsRecipe):
|
class NTO(BasicNewsRecipe):
|
||||||
title = u'Nowa Trybuna Opolska'
|
title = u'Nowa Trybuna Opolska'
|
||||||
@ -58,6 +59,10 @@ class NTO(BasicNewsRecipe):
|
|||||||
pos = len(appendtag.contents)
|
pos = len(appendtag.contents)
|
||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
|
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class OCLab(BasicNewsRecipe):
|
class OCLab(BasicNewsRecipe):
|
||||||
title = u'OCLab.pl'
|
title = u'OCLab.pl'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
@ -26,6 +28,10 @@ class OCLab(BasicNewsRecipe):
|
|||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
for r in appendtag.findAll(attrs={'class':'post-nav-bottom-list'}):
|
for r in appendtag.findAll(attrs={'class':'post-nav-bottom-list'}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
return soup
|
return soup
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class Overclock_pl(BasicNewsRecipe):
|
class Overclock_pl(BasicNewsRecipe):
|
||||||
title = u'Overclock.pl'
|
title = u'Overclock.pl'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
|
@ -1,4 +1,8 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
|
#currently recipe is not working
|
||||||
|
|
||||||
class PC_Foster(BasicNewsRecipe):
|
class PC_Foster(BasicNewsRecipe):
|
||||||
title = u'PC Foster'
|
title = u'PC Foster'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
@ -29,6 +33,9 @@ class PC_Foster(BasicNewsRecipe):
|
|||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
for r in appendtag.findAll(attrs={'class':'review_content double'}):
|
for r in appendtag.findAll(attrs={'class':'review_content double'}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ebooks.BeautifulSoup import Comment
|
||||||
|
|
||||||
class PurePC(BasicNewsRecipe):
|
class PurePC(BasicNewsRecipe):
|
||||||
title = u'PurePC'
|
title = u'PurePC'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
@ -27,6 +29,9 @@ class PurePC(BasicNewsRecipe):
|
|||||||
appendtag.insert(pos, pagetext)
|
appendtag.insert(pos, pagetext)
|
||||||
for r in appendtag.findAll(attrs={'class':['PageMenuList', 'pager', 'fivestar-widget']}):
|
for r in appendtag.findAll(attrs={'class':['PageMenuList', 'pager', 'fivestar-widget']}):
|
||||||
r.extract()
|
r.extract()
|
||||||
|
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
|
||||||
|
for comment in comments:
|
||||||
|
comment.extract()
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
self.append_page(soup, soup.body)
|
self.append_page(soup, soup.body)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class UbuntuPomoc(BasicNewsRecipe):
|
class UbuntuPomoc(BasicNewsRecipe):
|
||||||
title = u'Ubuntu-pomoc.org'
|
title = u'Ubuntu-pomoc.org'
|
||||||
__author__ = 'fenuks'
|
__author__ = 'fenuks'
|
||||||
@ -15,8 +16,8 @@ class UbuntuPomoc(BasicNewsRecipe):
|
|||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
remove_attrs = ['style']
|
remove_attrs = ['style']
|
||||||
keep_only_tags = [dict(attrs={'class':'post'})]
|
keep_only_tags = [dict(name='article')]
|
||||||
remove_tags_after = dict(attrs={'class':'underEntry'})
|
#remove_tags_after = dict(attrs={'class':'underEntry'})
|
||||||
remove_tags = [dict(attrs={'class':['underPostTitle', 'yarpp-related', 'underEntry', 'social', 'tags', 'commentlist', 'youtube_sc']}), dict(id=['wp_rp_first', 'commentReply'])]
|
remove_tags = [dict(attrs={'class':['yarpp-related', 'youtube_sc', 'share']}), dict(name='footer')]
|
||||||
feeds = [(u'Ca\u0142o\u015b\u0107', u'http://feeds.feedburner.com/Ubuntu-Pomoc'),
|
feeds = [(u'Ca\u0142o\u015b\u0107', u'http://feeds.feedburner.com/Ubuntu-Pomoc'),
|
||||||
(u'Gry', u'http://feeds.feedburner.com/GryUbuntu-pomoc')]
|
]
|
||||||
|
28
recipes/websecurity_pl.recipe
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class WebSecurity(BasicNewsRecipe):
|
||||||
|
title = u'WebSecurity'
|
||||||
|
__author__ = 'fenuks'
|
||||||
|
description = u'WebSecurity.pl to największy w Polsce portal o bezpieczeństwie sieciowym.'
|
||||||
|
category = ''
|
||||||
|
#publication_type = ''
|
||||||
|
language = 'pl'
|
||||||
|
#encoding = ''
|
||||||
|
#extra_css = ''
|
||||||
|
cover_url = 'http://websecurity.pl/images/websecurity-logo.png'
|
||||||
|
masthead_url = ''
|
||||||
|
use_embedded_content = False
|
||||||
|
oldest_article = 7
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_empty_feeds = True
|
||||||
|
remove_javascript = True
|
||||||
|
remove_attributes = ['style', 'font']
|
||||||
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
|
|
||||||
|
keep_only_tags = [dict(attrs={'class':'article single'}), dict(id='content')]
|
||||||
|
remove_tags = [dict(attrs={'class':['sociable', 'no-comments']})]
|
||||||
|
remove_tags_after = dict(attrs={'class':'sociable'})
|
||||||
|
feeds = [(u'Wszystkie', u'http://websecurity.pl/feed/'), (u'Aktualno\u015bci', u'http://websecurity.pl/aktualnosci/feed/'), (u'Artyku\u0142y', u'http://websecurity.pl/artykuly/feed/'), (u'Blogosfera', u'http://websecurity.pl/blogosfera/wpisy/feed/')]
|
||||||
|
|
@ -159,6 +159,7 @@ _extra_lang_codes = {
|
|||||||
'en_CZ' : _('English (Czech Republic)'),
|
'en_CZ' : _('English (Czech Republic)'),
|
||||||
'en_PH' : _('English (Philippines)'),
|
'en_PH' : _('English (Philippines)'),
|
||||||
'en_PK' : _('English (Pakistan)'),
|
'en_PK' : _('English (Pakistan)'),
|
||||||
|
'en_PL' : _('English (Poland)'),
|
||||||
'en_HR' : _('English (Croatia)'),
|
'en_HR' : _('English (Croatia)'),
|
||||||
'en_HK' : _('English (Hong Kong)'),
|
'en_HK' : _('English (Hong Kong)'),
|
||||||
'en_HU' : _('English (Hungary)'),
|
'en_HU' : _('English (Hungary)'),
|
||||||
|