Various new and updated Polish recipes

This commit is contained in:
Kovid Goyal 2013-03-07 09:25:07 +05:30
commit e0007f533c
82 changed files with 1246 additions and 176 deletions

View File

@ -3,7 +3,7 @@ import re
class Adventure_zone(BasicNewsRecipe): class Adventure_zone(BasicNewsRecipe):
title = u'Adventure Zone' title = u'Adventure Zone'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Adventure zone - adventure games from A to Z' description = u'Czytaj więcej o przygodzie - codzienne nowinki. Szukaj u nas solucji i poradników, czytaj recenzje i zapowiedzi. Także galeria, pliki oraz forum dla wszystkich fanów gier przygodowych.'
category = 'games' category = 'games'
language = 'pl' language = 'pl'
no_stylesheets = True no_stylesheets = True

View File

@ -5,6 +5,7 @@ class Archeowiesci(BasicNewsRecipe):
__author__ = 'fenuks' __author__ = 'fenuks'
category = 'archeology' category = 'archeology'
language = 'pl' language = 'pl'
description = u'Z pasją o przeszłości'
cover_url='http://archeowiesci.pl/wp-content/uploads/2011/05/Archeowiesci2-115x115.jpg' cover_url='http://archeowiesci.pl/wp-content/uploads/2011/05/Archeowiesci2-115x115.jpg'
oldest_article = 7 oldest_article = 7
needs_subscription='optional' needs_subscription='optional'

View File

@ -2,7 +2,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AstroNEWS(BasicNewsRecipe): class AstroNEWS(BasicNewsRecipe):
title = u'AstroNEWS' title = u'AstroNEWS'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'AstroNEWS- astronomy every day' description = u'AstroNEWS regularnie dostarcza wiadomości o wydarzeniach związanych z astronomią i astronautyką. Informujemy o aktualnych odkryciach i wydarzeniach naukowych, zapowiadamy ciekawe zjawiska astronomiczne. Serwis jest częścią portalu astronomicznego AstroNET prowadzonego przez miłośników astronomii i zawodowych astronomów.'
category = 'astronomy, science' category = 'astronomy, science'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8

View File

@ -13,6 +13,7 @@ class Astroflesz(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
remove_attributes = ['style']
keep_only_tags = [dict(id="k2Container")] keep_only_tags = [dict(id="k2Container")]
remove_tags_after = dict(name='div', attrs={'class':'itemLinks'}) remove_tags_after = dict(name='div', attrs={'class':'itemLinks'})
remove_tags = [dict(name='div', attrs={'class':['itemLinks', 'itemToolbar', 'itemRatingBlock']})] remove_tags = [dict(name='div', attrs={'class':['itemLinks', 'itemToolbar', 'itemRatingBlock']})]

View File

@ -3,7 +3,7 @@ import re
class Astronomia_pl(BasicNewsRecipe): class Astronomia_pl(BasicNewsRecipe):
title = u'Astronomia.pl' title = u'Astronomia.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Astronomia - polish astronomy site' description = u'Astronomia.pl jest edukacyjnym portalem skierowanym do uczniów, studentów i miłośników astronomii. Przedstawiamy gwiazdy, planety, galaktyki, czarne dziury i wiele innych tajemnic Wszechświata.'
masthead_url = 'http://www.astronomia.pl/grafika/logo.gif' masthead_url = 'http://www.astronomia.pl/grafika/logo.gif'
cover_url = 'http://www.astronomia.pl/grafika/logo.gif' cover_url = 'http://www.astronomia.pl/grafika/logo.gif'
category = 'astronomy, science' category = 'astronomy, science'

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Bash_org_pl(BasicNewsRecipe): class Bash_org_pl(BasicNewsRecipe):
title = u'Bash.org.pl' title = u'Bash.org.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Bash.org.pl - funny quotations from IRC discussions' description = 'Bash.org.pl - zabawne cytaty z IRC'
category = 'funny quotations, humour' category = 'funny quotations, humour'
language = 'pl' language = 'pl'
cover_url = u'http://userlogos.org/files/logos/dzikiosiol/none_0.png' cover_url = u'http://userlogos.org/files/logos/dzikiosiol/none_0.png'

View File

@ -3,14 +3,15 @@ import re
class BenchmarkPl(BasicNewsRecipe): class BenchmarkPl(BasicNewsRecipe):
title = u'Benchmark.pl' title = u'Benchmark.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'benchmark.pl -IT site' description = u'benchmark.pl, recenzje i testy sprzętu, aktualności, rankingi, sterowniki, porady, opinie'
masthead_url = 'http://www.benchmark.pl/i/logo-footer.png' masthead_url = 'http://www.benchmark.pl/i/logo-footer.png'
cover_url = 'http://www.ieaddons.pl/benchmark/logo_benchmark_new.gif' cover_url = 'http://www.benchmark.pl/i/logo-dark.png'
category = 'IT' category = 'IT'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets=True no_stylesheets = True
remove_attributes = ['style']
preprocess_regexps = [(re.compile(ur'<h3><span style="font-size: small;">&nbsp;Zobacz poprzednie <a href="http://www.benchmark.pl/news/zestawienie/grupa_id/135">Opinie dnia:</a></span>.*</body>', re.DOTALL|re.IGNORECASE), lambda match: '</body>'), (re.compile(ur'Więcej o .*?</ul>', re.DOTALL|re.IGNORECASE), lambda match: '')] preprocess_regexps = [(re.compile(ur'<h3><span style="font-size: small;">&nbsp;Zobacz poprzednie <a href="http://www.benchmark.pl/news/zestawienie/grupa_id/135">Opinie dnia:</a></span>.*</body>', re.DOTALL|re.IGNORECASE), lambda match: '</body>'), (re.compile(ur'Więcej o .*?</ul>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags=[dict(name='div', attrs={'class':['m_zwykly', 'gallery']}), dict(id='article')] keep_only_tags=[dict(name='div', attrs={'class':['m_zwykly', 'gallery']}), dict(id='article')]
remove_tags_after=dict(name='div', attrs={'class':'body'}) remove_tags_after=dict(name='div', attrs={'class':'body'})
@ -21,17 +22,18 @@ class BenchmarkPl(BasicNewsRecipe):
def append_page(self, soup, appendtag): def append_page(self, soup, appendtag):
nexturl = soup.find('span', attrs={'class':'next'}) nexturl = soup.find(attrs={'class':'next'})
while nexturl is not None: while nexturl:
nexturl= self.INDEX + nexturl.parent['href'] soup2 = self.index_to_soup(nexturl['href'])
soup2 = self.index_to_soup(nexturl) nexturl = soup2.find(attrs={'class':'next'})
nexturl=soup2.find('span', attrs={'class':'next'})
pagetext = soup2.find(name='div', attrs={'class':'body'}) pagetext = soup2.find(name='div', attrs={'class':'body'})
appendtag.find('div', attrs={'class':'k_ster'}).extract() appendtag.find('div', attrs={'class':'k_ster'}).extract()
pos = len(appendtag.contents) pos = len(appendtag.contents)
appendtag.insert(pos, pagetext) appendtag.insert(pos, pagetext)
if appendtag.find('div', attrs={'class':'k_ster'}) is not None: if appendtag.find('div', attrs={'class':'k_ster'}):
appendtag.find('div', attrs={'class':'k_ster'}).extract() appendtag.find('div', attrs={'class':'k_ster'}).extract()
for r in appendtag.findAll(attrs={'class':'changePage'}):
r.extract()
def image_article(self, soup, appendtag): def image_article(self, soup, appendtag):

55
recipes/biweekly.recipe Normal file
View File

@ -0,0 +1,55 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = u'Łukasz Grąbczewski 2011'
__version__ = '2.0'
import re, os
from calibre import walk
from calibre.utils.zipfile import ZipFile
from calibre.ptempfile import PersistentTemporaryFile
from calibre.web.feeds.news import BasicNewsRecipe
class biweekly(BasicNewsRecipe):
__author__ = u'Łukasz Grąbczewski'
title = 'Biweekly'
language = 'en'
publisher = 'National Audiovisual Institute'
publication_type = 'magazine'
description = u'link with culture [English edition of Polish magazine]: literature, theatre, film, art, music, views, talks'
conversion_options = {
'authors' : 'Biweekly.pl'
,'publisher' : publisher
,'language' : language
,'comments' : description
,'no_default_epub_cover' : True
,'preserve_cover_aspect_ratio': True
}
def build_index(self):
browser = self.get_browser()
browser.open('http://www.biweekly.pl/')
# find the link
epublink = browser.find_link(text_regex=re.compile('ePUB VERSION'))
# download ebook
self.report_progress(0,_('Downloading ePUB'))
response = browser.follow_link(epublink)
book_file = PersistentTemporaryFile(suffix='.epub')
book_file.write(response.read())
book_file.close()
# convert
self.report_progress(0.2,_('Converting to OEB'))
oeb = self.output_dir + '/INPUT/'
if not os.path.exists(oeb):
os.makedirs(oeb)
with ZipFile(book_file.name) as f:
f.extractall(path=oeb)
for f in walk(oeb):
if f.endswith('.opf'):
return f

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class CD_Action(BasicNewsRecipe): class CD_Action(BasicNewsRecipe):
title = u'CD-Action' title = u'CD-Action'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'cdaction.pl - polish games magazine site' description = 'Strona CD-Action (CDA), największego w Polsce pisma dla graczy.Pełne wersje gier, newsy, recenzje, zapowiedzi, konkursy, forum, opinie, galerie screenów,trailery, filmiki, patche, teksty. Gry komputerowe (PC) oraz na konsole (PS3, XBOX 360).'
category = 'games' category = 'games'
language = 'pl' language = 'pl'
index='http://www.cdaction.pl' index='http://www.cdaction.pl'

View File

@ -7,17 +7,13 @@ class Computerworld_pl(BasicNewsRecipe):
description = u'Serwis o IT w przemyśle, finansach, handlu, administracji oraz rynku IT i telekomunikacyjnym - wiadomości, opinie, analizy, porady prawne' description = u'Serwis o IT w przemyśle, finansach, handlu, administracji oraz rynku IT i telekomunikacyjnym - wiadomości, opinie, analizy, porady prawne'
category = 'IT' category = 'IT'
language = 'pl' language = 'pl'
masthead_url= 'http://g1.computerworld.pl/cw/beta_gfx/cw2.gif' masthead_url = 'http://g1.computerworld.pl/cw/beta_gfx/cw2.gif'
no_stylesheets=True cover_url = 'http://g1.computerworld.pl/cw/beta_gfx/cw2.gif'
no_stylesheets = True
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
keep_only_tags=[dict(attrs={'class':['tyt_news', 'prawo', 'autor', 'tresc']})] keep_only_tags = [dict(attrs={'class':['tyt_news', 'prawo', 'autor', 'tresc']})]
remove_tags_after=dict(name='div', attrs={'class':'rMobi'}) remove_tags_after = dict(name='div', attrs={'class':'rMobi'})
remove_tags=[dict(name='div', attrs={'class':['nnav', 'rMobi']}), dict(name='table', attrs={'class':'ramka_slx'})] remove_tags = [dict(name='div', attrs={'class':['nnav', 'rMobi']}), dict(name='table', attrs={'class':'ramka_slx'})]
feeds = [(u'Wiadomo\u015bci', u'http://rssout.idg.pl/cw/news_iso.xml')] feeds = [(u'Wiadomo\u015bci', u'http://rssout.idg.pl/cw/news_iso.xml')]
def get_cover_url(self):
soup = self.index_to_soup('http://www.computerworld.pl/')
cover=soup.find(name='img', attrs={'class':'prawo'})
self.cover_url=cover['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -4,11 +4,12 @@ class CoNowegoPl(BasicNewsRecipe):
title = u'conowego.pl' title = u'conowego.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Nowy wortal technologiczny oraz gazeta internetowa. Testy najnowszych produktów, fachowe porady i recenzje. U nas znajdziesz wszystko o elektronice użytkowej !' description = u'Nowy wortal technologiczny oraz gazeta internetowa. Testy najnowszych produktów, fachowe porady i recenzje. U nas znajdziesz wszystko o elektronice użytkowej !'
cover_url = 'http://www.conowego.pl/fileadmin/templates/main/images/logo_top.png' #cover_url = 'http://www.conowego.pl/fileadmin/templates/main/images/logo_top.png'
category = 'IT, news' category = 'IT, news'
language = 'pl' language = 'pl'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
INDEX = 'http://www.conowego.pl/'
no_stylesheets = True no_stylesheets = True
remove_empty_feeds = True remove_empty_feeds = True
use_embedded_content = False use_embedded_content = False
@ -36,3 +37,10 @@ class CoNowegoPl(BasicNewsRecipe):
for r in appendtag.findAll(attrs={'class':['pages', 'paginationWrap']}): for r in appendtag.findAll(attrs={'class':['pages', 'paginationWrap']}):
r.extract() r.extract()
def get_cover_url(self):
soup = self.index_to_soup('http://www.conowego.pl/magazyn/')
tag = soup.find(attrs={'class':'ms_left'})
if tag:
self.cover_url = self.INDEX + tag.find('img')['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -1,4 +1,5 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class CzasGentlemanow(BasicNewsRecipe): class CzasGentlemanow(BasicNewsRecipe):
@ -13,8 +14,9 @@ class CzasGentlemanow(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
remove_empty_feeds = True remove_empty_feeds = True
preprocess_regexps = [(re.compile(u'<h3>Może Cię też zainteresować:</h3>'), lambda m: '')]
use_embedded_content = False use_embedded_content = False
keep_only_tags = [dict(name='div', attrs={'class':'content'})] keep_only_tags = [dict(name='div', attrs={'class':'content'})]
remove_tags = [dict(attrs={'class':'meta_comments'})] remove_tags = [dict(attrs={'class':'meta_comments'}), dict(id=['comments', 'related_posts_thumbnails'])]
remove_tags_after = dict(name='div', attrs={'class':'fblikebutton_button'}) remove_tags_after = dict(id='comments')
feeds = [(u'M\u0119ski \u015awiat', u'http://czasgentlemanow.pl/category/meski-swiat/feed/'), (u'Styl', u'http://czasgentlemanow.pl/category/styl/feed/'), (u'Vademecum Gentlemana', u'http://czasgentlemanow.pl/category/vademecum/feed/'), (u'Dom i rodzina', u'http://czasgentlemanow.pl/category/dom-i-rodzina/feed/'), (u'Honor', u'http://czasgentlemanow.pl/category/honor/feed/'), (u'Gad\u017cety Gentlemana', u'http://czasgentlemanow.pl/category/gadzety-gentlemana/feed/')] feeds = [(u'M\u0119ski \u015awiat', u'http://czasgentlemanow.pl/category/meski-swiat/feed/'), (u'Styl', u'http://czasgentlemanow.pl/category/styl/feed/'), (u'Vademecum Gentlemana', u'http://czasgentlemanow.pl/category/vademecum/feed/'), (u'Dom i rodzina', u'http://czasgentlemanow.pl/category/dom-i-rodzina/feed/'), (u'Honor', u'http://czasgentlemanow.pl/category/honor/feed/'), (u'Gad\u017cety Gentlemana', u'http://czasgentlemanow.pl/category/gadzety-gentlemana/feed/')]

View File

@ -18,7 +18,7 @@ class Dobreprogramy_pl(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
preprocess_regexps = [(re.compile(ur'<div id="\S+360pmp4">Twoja przeglądarka nie obsługuje Flasha i HTML5 lub wyłączono obsługę JavaScript...</div>'), lambda match: '') ] preprocess_regexps = [(re.compile(ur'<div id="\S+360pmp4">Twoja przeglądarka nie obsługuje Flasha i HTML5 lub wyłączono obsługę JavaScript...</div>'), lambda match: '') ]
keep_only_tags=[dict(attrs={'class':['news', 'entry single']})] keep_only_tags=[dict(attrs={'class':['news', 'entry single']})]
remove_tags = [dict(attrs={'class':['newsOptions', 'noPrint', 'komentarze', 'tags font-heading-master']}), dict(id='komentarze')] remove_tags = [dict(attrs={'class':['newsOptions', 'noPrint', 'komentarze', 'tags font-heading-master']}), dict(id='komentarze'), dict(name='iframe')]
#remove_tags = [dict(name='div', attrs={'class':['komentarze', 'block', 'portalInfo', 'menuBar', 'topBar']})] #remove_tags = [dict(name='div', attrs={'class':['komentarze', 'block', 'portalInfo', 'menuBar', 'topBar']})]
feeds = [(u'Aktualności', 'http://feeds.feedburner.com/dobreprogramy/Aktualnosci'), feeds = [(u'Aktualności', 'http://feeds.feedburner.com/dobreprogramy/Aktualnosci'),
('Blogi', 'http://feeds.feedburner.com/dobreprogramy/BlogCzytelnikow')] ('Blogi', 'http://feeds.feedburner.com/dobreprogramy/BlogCzytelnikow')]

View File

@ -0,0 +1,56 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = u'Łukasz Grąbczewski 2011'
__version__ = '2.0'
import re, os
from calibre import walk
from calibre.utils.zipfile import ZipFile
from calibre.ptempfile import PersistentTemporaryFile
from calibre.web.feeds.news import BasicNewsRecipe
class dwutygodnik(BasicNewsRecipe):
__author__ = u'Łukasz Grąbczewski'
title = 'Dwutygodnik'
language = 'pl'
publisher = 'Narodowy Instytut Audiowizualny'
publication_type = 'magazine'
description = u'Strona Kultury: literatura, teatr, film, sztuka, muzyka, felietony, rozmowy'
conversion_options = {
'authors' : 'Dwutygodnik.com'
,'publisher' : publisher
,'language' : language
,'comments' : description
,'no_default_epub_cover' : True
,'preserve_cover_aspect_ratio': True
}
def build_index(self):
browser = self.get_browser()
browser.open('http://www.dwutygodnik.com/')
# find the link
epublink = browser.find_link(text_regex=re.compile('Wersja ePub'))
# download ebook
self.report_progress(0,_('Downloading ePUB'))
response = browser.follow_link(epublink)
book_file = PersistentTemporaryFile(suffix='.epub')
book_file.write(response.read())
book_file.close()
# convert
self.report_progress(0.2,_('Converting to OEB'))
oeb = self.output_dir + '/INPUT/'
if not os.path.exists(oeb):
os.makedirs(oeb)
with ZipFile(book_file.name) as f:
f.extractall(path=oeb)
for f in walk(oeb):
if f.endswith('.opf'):
return f

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Dzieje(BasicNewsRecipe): class Dzieje(BasicNewsRecipe):
title = u'dzieje.pl' title = u'dzieje.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Dzieje - history of Poland' description = 'Dzieje.pl - najlepszy portal informacyjno-edukacyjny dotyczący historii Polski XX wieku. Archiwalne fotografie, filmy, katalog postaci, quizy i konkursy.'
cover_url = 'http://www.dzieje.pl/sites/default/files/dzieje_logo.png' cover_url = 'http://www.dzieje.pl/sites/default/files/dzieje_logo.png'
category = 'history' category = 'history'
language = 'pl' language = 'pl'

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
class DziennikBaltycki(BasicNewsRecipe):
title = u'Dziennik Ba\u0142tycki'
__author__ = 'fenuks'
description = u'Gazeta Regionalna Dziennik Bałtycki. Najnowsze Wiadomości Trójmiasto i Wiadomości Pomorskie. Czytaj!'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/dziennikbaltycki.png?24'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds= True
no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})
remove_tags=[dict(id='mat-podobne'), dict(name='a', attrs={'class':'czytajDalej'}), dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})]
feeds = [(u'Wiadomo\u015bci', u'http://www.dziennikbaltycki.pl/rss/dziennikbaltycki_wiadomosci.xml?201302'), (u'Sport', u'http://dziennikbaltycki.feedsportal.com/c/32980/f/533756/index.rss?201302'), (u'Rejsy', u'http://www.dziennikbaltycki.pl/rss/dziennikbaltycki_rejsy.xml?201302'), (u'Biznes na Pomorzu', u'http://www.dziennikbaltycki.pl/rss/dziennikbaltycki_biznesnapomorzu.xml?201302'), (u'GOM', u'http://www.dziennikbaltycki.pl/rss/dziennikbaltycki_gom.xml?201302'), (u'Opinie', u'http://www.dziennikbaltycki.pl/rss/dziennikbaltycki_opinie.xml?201302'), (u'Pitawal Pomorski', u'http://www.dziennikbaltycki.pl/rss/dziennikbaltycki_pitawalpomorski.xml?201302')]
def print_version(self, url):
return url.replace('artykul', 'drukuj')
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/dziennik-baltycki/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -0,0 +1,35 @@
from calibre.web.feeds.news import BasicNewsRecipe
class DziennikLodzki(BasicNewsRecipe):
title = u'Dziennik \u0141\xf3dzki'
__author__ = 'fenuks'
description = u'Gazeta Regionalna Dziennik Łódzki. Najnowsze Wiadomości Łódź. Czytaj Wiadomości Łódzkie!'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/dzienniklodzki.png?24'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})
remove_tags=[dict(id='mat-podobne'), dict(name='a', attrs={'class':'czytajDalej'}), dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})]
feeds = [(u'Na sygnale', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_nasygnale.xml?201302'), (u'\u0141\xf3d\u017a', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_lodz.xml?201302'), (u'Opinie', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_opinie.xml?201302'), (u'Pieni\u0105dze', u'http://dzienniklodzki.feedsportal.com/c/32980/f/533763/index.rss?201302'), (u'Kultura', u'http://dzienniklodzki.feedsportal.com/c/32980/f/533762/index.rss?201302'), (u'Sport', u'http://dzienniklodzki.feedsportal.com/c/32980/f/533761/index.rss?201302'), (u'Akcje', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_akcje.xml?201302'), (u'M\xf3j Reporter', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_mojreporter.xml?201302'), (u'Studni\xf3wki', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_studniowki.xml?201302'), (u'Kraj', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_kraj.xml?201302'), (u'Zdrowie', u'http://www.dzienniklodzki.pl/rss/dzienniklodzki_zdrowie.xml?201302')]
def print_version(self, url):
return url.replace('artykul', 'drukuj')
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/dziennik-lodzki/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -0,0 +1,78 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class DziennikWschodni(BasicNewsRecipe):
title = u'Dziennik Wschodni'
__author__ = 'fenuks'
description = u'Dziennik Wschodni - portal regionalny województwa lubelskiego.'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.dziennikwschodni.pl'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.dziennikwschodni.pl/rss.xml'),
(u'Lublin', u'http://www.dziennikwschodni.pl/lublin.xml'),
(u'Zamość', u'http://www.dziennikwschodni.pl/zamosc.xml'),
(u'Biała Podlaska', u'http://www.dziennikwschodni.pl/biala_podlaska.xml'),
(u'Chełm', u'http://www.dziennikwschodni.pl/chelm.xml'),
(u'Kraśnik', u'http://www.dziennikwschodni.pl/krasnik.xml'),
(u'Puławy', u'http://www.dziennikwschodni.pl/pulawy.xml'),
(u'Świdnik', u'http://www.dziennikwschodni.pl/swidnik.xml'),
(u'Łęczna', u'http://www.dziennikwschodni.pl/leczna.xml'),
(u'Lubartów', u'http://www.dziennikwschodni.pl/lubartow.xml'),
(u'Sport', u'http://www.dziennikwschodni.pl/sport.xml'),
(u'Praca', u'http://www.dziennikwschodni.pl/praca.xml'),
(u'Dom', u'http://www.dziennikwschodni.pl/dom.xml'),
(u'Moto', u'http://www.dziennikwschodni.pl/moto.xml'),
(u'Zdrowie', u'http://www.dziennikwschodni.pl/zdrowie.xml'),
]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
class DziennikZachodni(BasicNewsRecipe):
title = u'Dziennik Zachodni'
__author__ = 'fenuks'
description = u'Gazeta Regionalna Dziennik Zachodni. Najnowsze Wiadomości Śląskie. Wiadomości Śląsk. Czytaj!'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/dziennikzachodni.png?24'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds= True
no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})
remove_tags=[dict(id='mat-podobne'), dict(name='a', attrs={'class':'czytajDalej'}), dict(attrs={'src':'http://nm.dz.com.pl/dz.png'}), dict(attrs={'href':'http://www.dziennikzachodni.pl/piano'})]
feeds = [(u'Wszystkie', u'http://dziennikzachodni.feedsportal.com/c/32980/f/533764/index.rss?201302'), (u'Wiadomo\u015bci', u'http://dziennikzachodni.feedsportal.com/c/32980/f/533765/index.rss?201302'), (u'Regiony', u'http://www.dziennikzachodni.pl/rss/dziennikzachodni_regiony.xml?201302'), (u'Opinie', u'http://www.dziennikzachodni.pl/rss/dziennikzachodni_regiony.xml?201302'), (u'Blogi', u'http://www.dziennikzachodni.pl/rss/dziennikzachodni_blogi.xml?201302'), (u'Serwisy', u'http://www.dziennikzachodni.pl/rss/dziennikzachodni_serwisy.xml?201302'), (u'Sport', u'http://dziennikzachodni.feedsportal.com/c/32980/f/533766/index.rss?201302'), (u'M\xf3j Reporter', u'http://www.dziennikzachodni.pl/rss/dziennikzachodni_mojreporter.xml?201302'), (u'Na narty', u'http://www.dziennikzachodni.pl/rss/dziennikzachodni_nanarty.xml?201302'), (u'Drogi', u'http://www.dziennikzachodni.pl/rss/dziennikzachodni_drogi.xml?201302'), (u'Pieni\u0105dze', u'http://dziennikzachodni.feedsportal.com/c/32980/f/533768/index.rss?201302')]
def print_version(self, url):
return url.replace('artykul', 'drukuj')
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/dziennik-zachodni/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

74
recipes/echo_dnia.recipe Normal file
View File

@ -0,0 +1,74 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class EchoDnia(BasicNewsRecipe):
title = u'Echo Dnia'
__author__ = 'fenuks'
description = u'Echo Dnia - portal regionalny świętokrzyskiego radomskiego i podkarpackiego. Najnowsze wiadomości z Twojego regionu, galerie, video, mp3.'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.echodnia.eu'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.echodnia.eu/rss.xml'),
(u'Świętokrzyskie', u'http://www.echodnia.eu/swietokrzyskie.xml'),
(u'Radomskie', u'http://www.echodnia.eu/radomskie.xml'),
(u'Podkarpackie', u'http://www.echodnia.eu/podkarpackie.xml'),
(u'Sport \u015bwi\u0119tokrzyski', u'http://www.echodnia.eu/sport_swi.xml'),
(u'Sport radomski', u'http://www.echodnia.eu/sport_rad.xml'),
(u'Sport podkarpacki', u'http://www.echodnia.eu/sport_pod.xml'),
(u'Pi\u0142ka no\u017cna', u'http://www.echodnia.eu/pilka.xml'),
(u'Praca', u'http://www.echodnia.eu/praca.xml'),
(u'Dom', u'http://www.echodnia.eu/dom.xml'),
(u'Auto', u'http://www.echodnia.eu/auto.xml'),
(u'Zdrowie', u'http://www.echodnia.eu/zdrowie.xml')]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -4,6 +4,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class eioba(BasicNewsRecipe): class eioba(BasicNewsRecipe):
title = u'eioba' title = u'eioba'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'eioba.pl - daj się przeczytać!'
cover_url = 'http://www.eioba.org/lay/logo_pl_v3.png' cover_url = 'http://www.eioba.org/lay/logo_pl_v3.png'
language = 'pl' language = 'pl'
oldest_article = 7 oldest_article = 7

View File

@ -5,7 +5,7 @@ class Elektroda(BasicNewsRecipe):
title = u'Elektroda' title = u'Elektroda'
oldest_article = 8 oldest_article = 8
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Elektroda.pl' description = 'Międzynarodowy portal elektroniczny udostępniający bogate zasoby z dziedziny elektroniki oraz forum dyskusyjne.'
cover_url = 'http://demotywatory.elektroda.pl/Thunderpic/logo.gif' cover_url = 'http://demotywatory.elektroda.pl/Thunderpic/logo.gif'
category = 'electronics' category = 'electronics'
language = 'pl' language = 'pl'

View File

@ -12,6 +12,7 @@ class eMuzyka(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
remove_attributes = ['style']
keep_only_tags=[dict(name='div', attrs={'id':'news_container'}), dict(name='h3'), dict(name='div', attrs={'class':'review_text'})] keep_only_tags=[dict(name='div', attrs={'id':'news_container'}), dict(name='h3'), dict(name='div', attrs={'class':'review_text'})]
remove_tags=[dict(name='span', attrs={'id':'date'})] remove_tags=[dict(name='span', attrs={'id':'date'})]
feeds = [(u'Aktualno\u015bci', u'http://www.emuzyka.pl/rss.php?f=1'), (u'Recenzje', u'http://www.emuzyka.pl/rss.php?f=2')] feeds = [(u'Aktualno\u015bci', u'http://www.emuzyka.pl/rss.php?f=1'), (u'Recenzje', u'http://www.emuzyka.pl/rss.php?f=2')]

View File

@ -4,21 +4,21 @@ from calibre.ebooks.BeautifulSoup import BeautifulSoup
class FilmWebPl(BasicNewsRecipe): class FilmWebPl(BasicNewsRecipe):
title = u'FilmWeb' title = u'FilmWeb'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'FilmWeb - biggest polish movie site' description = 'Filmweb.pl - Filmy takie jak Ty Filmweb to największy i najczęściej odwiedzany polski serwis filmowy. Największa baza filmów, seriali i aktorów, repertuar kin i tv, ...'
cover_url = 'http://userlogos.org/files/logos/crudus/filmweb.png' cover_url = 'http://gfx.filmweb.pl/n/logo-filmweb-bevel.jpg'
category = 'movies' category = 'movies'
language = 'pl' language = 'pl'
index='http://www.filmweb.pl' index = 'http://www.filmweb.pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets= True no_stylesheets = True
remove_empty_feeds=True remove_empty_feeds = True
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(u'\(kliknij\,\ aby powiększyć\)', re.IGNORECASE), lambda m: ''), ]#(re.compile(ur' | ', re.IGNORECASE), lambda m: '')] preprocess_regexps = [(re.compile(u'\(kliknij\,\ aby powiększyć\)', re.IGNORECASE), lambda m: ''), ]#(re.compile(ur' | ', re.IGNORECASE), lambda m: '')]
extra_css = '.hdrBig {font-size:22px;} ul {list-style-type:none; padding: 0; margin: 0;}' extra_css = '.hdrBig {font-size:22px;} ul {list-style-type:none; padding: 0; margin: 0;}'
remove_tags= [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'}), dict(attrs={'class':'userSurname anno'})] remove_tags = [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'}), dict(attrs={'class':'userSurname anno'})]
remove_attributes = ['style',] remove_attributes = ['style',]
keep_only_tags= [dict(name='h1', attrs={'class':['hdrBig', 'hdrEntity']}), dict(name='div', attrs={'class':['newsInfo', 'newsInfoSmall', 'reviewContent description']})] keep_only_tags = [dict(name='h1', attrs={'class':['hdrBig', 'hdrEntity']}), dict(name='div', attrs={'class':['newsInfo', 'newsInfoSmall', 'reviewContent description']})]
feeds = [(u'News / Filmy w produkcji', 'http://www.filmweb.pl/feed/news/category/filminproduction'), feeds = [(u'News / Filmy w produkcji', 'http://www.filmweb.pl/feed/news/category/filminproduction'),
(u'News / Festiwale, nagrody i przeglądy', u'http://www.filmweb.pl/feed/news/category/festival'), (u'News / Festiwale, nagrody i przeglądy', u'http://www.filmweb.pl/feed/news/category/festival'),
(u'News / Seriale', u'http://www.filmweb.pl/feed/news/category/serials'), (u'News / Seriale', u'http://www.filmweb.pl/feed/news/category/serials'),

View File

@ -13,7 +13,7 @@ class FocusRecipe(BasicNewsRecipe):
title = u'Focus' title = u'Focus'
publisher = u'Gruner + Jahr Polska' publisher = u'Gruner + Jahr Polska'
category = u'News' category = u'News'
description = u'Newspaper' description = u'Focus.pl - pierwszy w Polsce portal społecznościowy dla miłośników nauki. Tematyka: nauka, historia, cywilizacja, technika, przyroda, sport, gadżety'
category = 'magazine' category = 'magazine'
cover_url = '' cover_url = ''
remove_empty_feeds = True remove_empty_feeds = True

View File

@ -3,6 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Fotoblogia_pl(BasicNewsRecipe): class Fotoblogia_pl(BasicNewsRecipe):
title = u'Fotoblogia.pl' title = u'Fotoblogia.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Jeden z największych polskich blogów o fotografii.'
category = 'photography' category = 'photography'
language = 'pl' language = 'pl'
masthead_url = 'http://img.interia.pl/komputery/nimg/u/0/fotoblogia21.jpg' masthead_url = 'http://img.interia.pl/komputery/nimg/u/0/fotoblogia21.jpg'
@ -11,6 +12,6 @@ class Fotoblogia_pl(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
keep_only_tags=[dict(name='div', attrs={'class':'post-view post-standard'})] keep_only_tags=[dict(name='div', attrs={'class':['post-view post-standard', 'photo-container']})]
remove_tags=[dict(attrs={'class':['external fotoblogia', 'categories', 'tags']})] remove_tags=[dict(attrs={'class':['external fotoblogia', 'categories', 'tags']})]
feeds = [(u'Wszystko', u'http://fotoblogia.pl/feed/rss2')] feeds = [(u'Wszystko', u'http://fotoblogia.pl/feed/rss2')]

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
class GazetaKrakowska(BasicNewsRecipe):
title = u'Gazeta Krakowska'
__author__ = 'fenuks'
description = u'Gazeta Regionalna Gazeta Krakowska. Najnowsze Wiadomości Kraków. Informacje Kraków. Czytaj!'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/gazetakrakowska.png?24'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})
remove_tags=[dict(id='mat-podobne'), dict(name='a', attrs={'class':'czytajDalej'}), dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})]
feeds = [(u'Fakty24', u'http://gazetakrakowska.feedsportal.com/c/32980/f/533770/index.rss?201302'), (u'Krak\xf3w', u'http://www.gazetakrakowska.pl/rss/gazetakrakowska_krakow.xml?201302'), (u'Tarn\xf3w', u'http://www.gazetakrakowska.pl/rss/gazetakrakowska_tarnow.xml?201302'), (u'Nowy S\u0105cz', u'http://www.gazetakrakowska.pl/rss/gazetakrakowska_nsacz.xml?201302'), (u'Ma\u0142. Zach.', u'http://www.gazetakrakowska.pl/rss/gazetakrakowska_malzach.xml?201302'), (u'Podhale', u'http://www.gazetakrakowska.pl/rss/gazetakrakowska_podhale.xml?201302'), (u'Sport', u'http://gazetakrakowska.feedsportal.com/c/32980/f/533771/index.rss?201302'), (u'Kultura', u'http://gazetakrakowska.feedsportal.com/c/32980/f/533772/index.rss?201302'), (u'Opinie', u'http://www.gazetakrakowska.pl/rss/gazetakrakowska_opinie.xml?201302'), (u'Magazyn', u'http://www.gazetakrakowska.pl/rss/gazetakrakowska_magazyn.xml?201302')]
def print_version(self, url):
return url.replace('artykul', 'drukuj')
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/gazeta-krakowska/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -0,0 +1,64 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class GazetaLubuska(BasicNewsRecipe):
title = u'Gazeta Lubuska'
__author__ = 'fenuks'
description = u'Gazeta Lubuska - portal regionalny województwa lubuskiego.'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.gazetalubuska.pl'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.gazetalubuska.pl/rss.xml'), (u'Dreznenko', u'http://www.gazetalubuska.pl/drezdenko.xml'), (u'G\u0142og\xf3w', u'http://www.gazetalubuska.pl/glogow.xml'), (u'Gorz\xf3w Wielkopolski', u'http://www.gazetalubuska.pl/gorzow-wielkopolski.xml'), (u'Gubin', u'http://www.gazetalubuska.pl/gubin.xml'), (u'Kostrzyn', u'http://www.gazetalubuska.pl/kostrzyn.xml'), (u'Krosno Odrza\u0144skie', u'http://www.gazetalubuska.pl/krosno-odrzanskie.xml'), (u'Lubsko', u'http://www.gazetalubuska.pl/lubsko.xml'), (u'Mi\u0119dzych\xf3d', u'http://www.gazetalubuska.pl/miedzychod.xml'), (u'Mi\u0119dzyrzecz', u'http://www.gazetalubuska.pl/miedzyrzecz.xml'), (u'Nowa S\xf3l', u'http://www.gazetalubuska.pl/nowa-sol.xml'), (u'S\u0142ubice', u'http://www.gazetalubuska.pl/slubice.xml'), (u'Strzelce Kraje\u0144skie', u'http://www.gazetalubuska.pl/strzelce-krajenskie.xml'), (u'Sulech\xf3w', u'http://www.gazetalubuska.pl/sulechow.xml'), (u'Sul\u0119cin', u'http://www.gazetalubuska.pl/sulecin.xml'), (u'\u015awi\u0119bodzin', u'http://www.gazetalubuska.pl/swiebodzin.xml'), (u'Wolsztyn', u'http://www.gazetalubuska.pl/wolsztyn.xml'), (u'Wschowa', u'http://www.gazetalubuska.pl/wschowa.xml'), (u'Zielona G\xf3ra', u'http://www.gazetalubuska.pl/zielona-gora.xml'), (u'\u017baga\u0144', u'http://www.gazetalubuska.pl/zagan.xml'), (u'\u017bary', u'http://www.gazetalubuska.pl/zary.xml'), (u'Sport', u'http://www.gazetalubuska.pl/sport.xml'), (u'Auto', u'http://www.gazetalubuska.pl/auto.xml'), (u'Dom', u'http://www.gazetalubuska.pl/dom.xml'), (u'Praca', u'http://www.gazetalubuska.pl/praca.xml'), (u'Zdrowie', u'http://www.gazetalubuska.pl/zdrowie.xml')]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -1,102 +1,91 @@
#!/usr/bin/env python
# # Przed uzyciem przeczytaj komentarz w sekcji "feeds"
__license__ = 'GPL v3'
__copyright__ = u'2010, Richard z forum.eksiazki.org'
'''pomorska.pl'''
import re import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class GazetaPomorska(BasicNewsRecipe): class GazetaPomorska(BasicNewsRecipe):
title = u'Gazeta Pomorska' title = u'Gazeta Pomorska'
publisher = u'Gazeta Pomorska' __author__ = 'Richard z forum.eksiazki.org, fenuks'
description = u'Kujawy i Pomorze - wiadomo\u015bci' description = u'Gazeta Pomorska - portal regionalny'
category = 'newspaper'
language = 'pl' language = 'pl'
__author__ = u'Richard z forum.eksiazki.org' encoding = 'iso-8859-2'
# # (dziekuje t3d z forum.eksiazki.org za testy) extra_css = 'ul {list-style: none; padding:0; margin:0;}'
oldest_article = 2 INDEX = 'http://www.pomorska.pl'
max_articles_per_feed = 20 masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True no_stylesheets = True
remove_javascript = True ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [
(re.compile(r'<a href="http://maps.google[^>]*>[^<]*</a>\.*', re.DOTALL|re.IGNORECASE), lambda m: ''),
(re.compile(r'[<Bb >]*Poznaj opinie[^<]*[</Bb >]*[^<]*<a href[^>]*>[^<]*</a>\.*', re.DOTALL|re.IGNORECASE), lambda m: ''),
(re.compile(r'[<Bb >]*Przeczytaj[^<]*[</Bb >]*[^<]*<a href[^>]*>[^<]*</a>\.*', re.DOTALL|re.IGNORECASE), lambda m: ''),
(re.compile(r'[<Bb >]*Wi.cej informacji[^<]*[</Bb >]*[^<]*<a href[^>]*>[^<]*</a>\.*', re.DOTALL|re.IGNORECASE), lambda m: ''),
(re.compile(r'<a href[^>]*>[<Bb >]*Wideo[^<]*[</Bb >]*[^<]*</a>\.*', re.DOTALL|re.IGNORECASE), lambda m: ''),
(re.compile(r'<a href[^>]*>[<Bb >]*KLIKNIJ TUTAJ[^<]*[</Bb >]*[^<]*</a>\.*', re.DOTALL|re.IGNORECASE), lambda m: '')
]
feeds = [ preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
# # Tutaj jest wymieniona lista kategorii jakie mozemy otrzymywac z Gazety (re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
# # Pomorskiej, po jednej kategorii w wierszu. Jesli na poczatku danego wiersza
# # znajduje sie jeden znak "#", oznacza to ze kategoria jest zakomentowana
# # i nie bedziemy jej otrzymywac. Jesli chcemy ja otrzymywac nalezy usunac
# # znak # z jej wiersza.
# # Jesli subskrybujemy wiecej niz jedna kategorie, na koncu wiersza z kazda
# # kategoria musi sie znajdowac niezakomentowany przecinek, z wyjatkiem
# # ostatniego wiersza - ma byc bez przecinka na koncu.
# # Rekomendowane opcje wyboru kategorii:
# # 1. PomorskaRSS - wiadomosci kazdego typu, lub
# # 2. Region + wybrane miasta, lub
# # 3. Wiadomosci tematyczne.
# # Lista kategorii:
# # PomorskaRSS - wiadomosci kazdego typu, zakomentuj znakiem "#" keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
# # przed odkomentowaniem wiadomosci wybranego typu: remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
(u'PomorskaRSS', u'http://www.pomorska.pl/rss.xml') 'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
# # wiadomosci z regionu nie przypisane do okreslonego miasta: feeds = [(u'Wszystkie', u'http://www.pomorska.pl/rss.xml'),
# (u'Region', u'http://www.pomorska.pl/region.xml'), (u'Region', u'http://www.pomorska.pl/region.xml'),
(u'Bydgoszcz', u'http://www.pomorska.pl/bydgoszcz.xml'),
(u'Nakło', u'http://www.pomorska.pl/naklo.xml'),
(u'Koronowo', u'http://www.pomorska.pl/koronowo.xml'),
(u'Solec Kujawski', u'http://www.pomorska.pl/soleckujawski.xml'),
(u'Grudziądz', u'http://www.pomorska.pl/grudziadz.xml'),
(u'Inowrocław', u'http://www.pomorska.pl/inowroclaw.xml'),
(u'Toruń', u'http://www.pomorska.pl/torun.xml'),
(u'Włocławek', u'http://www.pomorska.pl/wloclawek.xml'),
(u'Aleksandrów Kujawski', u'http://www.pomorska.pl/aleksandrow.xml'),
(u'Brodnica', u'http://www.pomorska.pl/brodnica.xml'),
(u'Chełmno', u'http://www.pomorska.pl/chelmno.xml'),
(u'Chojnice', u'http://www.pomorska.pl/chojnice.xml'),
(u'Ciechocinek', u'http://www.pomorska.pl/ciechocinek.xml'),
(u'Golub-Dobrzyń', u'http://www.pomorska.pl/golubdobrzyn.xml'),
(u'Mogilno', u'http://www.pomorska.pl/mogilno.xml'),
(u'Radziejów', u'http://www.pomorska.pl/radziejow.xml'),
(u'Rypin', u'http://www.pomorska.pl/rypin.xml'),
(u'Sępólno', u'http://www.pomorska.pl/sepolno.xml'),
(u'Świecie', u'http://www.pomorska.pl/swiecie.xml'),
(u'Tuchola', u'http://www.pomorska.pl/tuchola.xml'),
(u'Żnin', u'http://www.pomorska.pl/znin.xml'),
(u'Sport', u'http://www.pomorska.pl/sport.xml'),
(u'Zdrowie', u'http://www.pomorska.pl/zdrowie.xml'),
(u'Auto', u'http://www.pomorska.pl/moto.xml'),
(u'Dom', u'http://www.pomorska.pl/dom.xml'),
#(u'Reporta\u017c', u'http://www.pomorska.pl/reportaz.xml'),
(u'Gospodarka', u'http://www.pomorska.pl/gospodarka.xml')]
# # wiadomosci przypisane do miast: def get_cover_url(self):
# (u'Bydgoszcz', u'http://www.pomorska.pl/bydgoszcz.xml'), soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
# (u'Nak\u0142o', u'http://www.pomorska.pl/naklo.xml'), nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
# (u'Koronowo', u'http://www.pomorska.pl/koronowo.xml'), soup = self.index_to_soup(nexturl)
# (u'Solec Kujawski', u'http://www.pomorska.pl/soleckujawski.xml'), self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
# (u'Grudzi\u0105dz', u'http://www.pomorska.pl/grudziadz.xml'), return getattr(self, 'cover_url', self.cover_url)
# (u'Inowroc\u0142aw', u'http://www.pomorska.pl/inowroclaw.xml'),
# (u'Toru\u0144', u'http://www.pomorska.pl/torun.xml'),
# (u'W\u0142oc\u0142awek', u'http://www.pomorska.pl/wloclawek.xml'),
# (u'Aleksandr\u00f3w Kujawski', u'http://www.pomorska.pl/aleksandrow.xml'),
# (u'Brodnica', u'http://www.pomorska.pl/brodnica.xml'),
# (u'Che\u0142mno', u'http://www.pomorska.pl/chelmno.xml'),
# (u'Chojnice', u'http://www.pomorska.pl/chojnice.xml'),
# (u'Ciechocinek', u'http://www.pomorska.pl/ciechocinek.xml'),
# (u'Golub Dobrzy\u0144', u'http://www.pomorska.pl/golubdobrzyn.xml'),
# (u'Mogilno', u'http://www.pomorska.pl/mogilno.xml'),
# (u'Radziej\u00f3w', u'http://www.pomorska.pl/radziejow.xml'),
# (u'Rypin', u'http://www.pomorska.pl/rypin.xml'),
# (u'S\u0119p\u00f3lno', u'http://www.pomorska.pl/sepolno.xml'),
# (u'\u015awiecie', u'http://www.pomorska.pl/swiecie.xml'),
# (u'Tuchola', u'http://www.pomorska.pl/tuchola.xml'),
# (u'\u017bnin', u'http://www.pomorska.pl/znin.xml')
# # wiadomosci tematyczne (redundancja z region/miasta): def append_page(self, soup, appendtag):
# (u'Sport', u'http://www.pomorska.pl/sport.xml'), tag = soup.find('span', attrs={'class':'photoNavigationPages'})
# (u'Zdrowie', u'http://www.pomorska.pl/zdrowie.xml'), if tag:
# (u'Auto', u'http://www.pomorska.pl/moto.xml'), number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
# (u'Dom', u'http://www.pomorska.pl/dom.xml'), baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
# (u'Reporta\u017c', u'http://www.pomorska.pl/reportaz.xml'),
# (u'Gospodarka', u'http://www.pomorska.pl/gospodarka.xml')
]
keep_only_tags = [dict(name='div', attrs={'id':'article'})] for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
remove_tags = [ def preprocess_html(self, soup):
dict(name='p', attrs={'id':'articleTags'}), self.append_page(soup, soup.body)
dict(name='div', attrs={'id':'articleEpaper'}), return soup
dict(name='div', attrs={'id':'articleConnections'}),
dict(name='div', attrs={'class':'articleFacts'}),
dict(name='div', attrs={'id':'articleExternalLink'}),
dict(name='div', attrs={'id':'articleMultimedia'}),
dict(name='div', attrs={'id':'articleGalleries'}),
dict(name='div', attrs={'id':'articleAlarm'}),
dict(name='div', attrs={'id':'adholder_srodek1'}),
dict(name='div', attrs={'id':'articleVideo'}),
dict(name='a', attrs={'name':'fb_share'})]
extra_css = '''h1 { font-size: 1.4em; }
h2 { font-size: 1.0em; }'''

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
class GazetaWroclawska(BasicNewsRecipe):
title = u'Gazeta Wroc\u0142awska'
__author__ = 'fenuks'
description = u'Gazeta Regionalna Gazeta Wrocławska. Najnowsze Wiadomości Wrocław, Informacje Wrocław. Czytaj!'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/gazetawroclawska.png?24'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})
remove_tags=[dict(id='mat-podobne'), dict(name='a', attrs={'class':'czytajDalej'}), dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})]
feeds = [(u'Fakty24', u'http://gazetawroclawska.feedsportal.com/c/32980/f/533775/index.rss?201302'), (u'Region', u'http://www.gazetawroclawska.pl/rss/gazetawroclawska_region.xml?201302'), (u'Kultura', u'http://gazetawroclawska.feedsportal.com/c/32980/f/533777/index.rss?201302'), (u'Sport', u'http://gazetawroclawska.feedsportal.com/c/32980/f/533776/index.rss?201302'), (u'Z archiwum', u'http://www.gazetawroclawska.pl/rss/gazetawroclawska_zarchiwum.xml?201302'), (u'M\xf3j reporter', u'http://www.gazetawroclawska.pl/rss/gazetawroclawska_mojreporter.xml?201302'), (u'Historia', u'http://www.gazetawroclawska.pl/rss/gazetawroclawska_historia.xml?201302'), (u'Listy do redakcji', u'http://www.gazetawroclawska.pl/rss/gazetawroclawska_listydoredakcji.xml?201302'), (u'Na drogach', u'http://www.gazetawroclawska.pl/rss/gazetawroclawska_nadrogach.xml?201302')]
def print_version(self, url):
return url.replace('artykul', 'drukuj')
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/gazeta-wroclawska/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -0,0 +1,63 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class GazetaWspolczesna(BasicNewsRecipe):
title = u'Gazeta Wsp\xf3\u0142czesna'
__author__ = 'fenuks'
description = u'Gazeta Współczesna - portal regionalny.'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.wspolczesna.pl'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.wspolczesna.pl/rss.xml'), (u'August\xf3w', u'http://www.wspolczesna.pl/augustow.xml'), (u'Bia\u0142ystok', u'http://www.wspolczesna.pl/bialystok.xml'), (u'Bielsk Podlaski', u'http://www.wspolczesna.pl/bielsk.xml'), (u'E\u0142k', u'http://www.wspolczesna.pl/elk.xml'), (u'Grajewo', u'http://www.wspolczesna.pl/grajewo.xml'), (u'Go\u0142dap', u'http://www.wspolczesna.pl/goldap.xml'), (u'Hajn\xf3wka', u'http://www.wspolczesna.pl/hajnowka.xml'), (u'Kolno', u'http://www.wspolczesna.pl/kolno.xml'), (u'\u0141om\u017ca', u'http://www.wspolczesna.pl/lomza.xml'), (u'Mo\u0144ki', u'http://www.wspolczesna.pl/monki.xml'), (u'Olecko', u'http://www.wspolczesna.pl/olecko.xml'), (u'Ostro\u0142\u0119ka', u'http://www.wspolczesna.pl/ostroleka.xml'), (u'Powiat Bia\u0142ostocki', u'http://www.wspolczesna.pl/powiat.xml'), (u'Sejny', u'http://www.wspolczesna.pl/sejny.xml'), (u'Siemiatycze', u'http://www.wspolczesna.pl/siemiatycze.xml'), (u'Sok\xf3\u0142ka', u'http://www.wspolczesna.pl/sokolka.xml'), (u'Suwa\u0142ki', u'http://www.wspolczesna.pl/suwalki.xml'), (u'Wysokie Mazowieckie', u'http://www.wspolczesna.pl/wysokie.xml'), (u'Zambr\xf3w', u'http://www.wspolczesna.pl/zambrow.xml'), (u'Sport', u'http://www.wspolczesna.pl/sport.xml'), (u'Praca', u'http://www.wspolczesna.pl/praca.xml'), (u'Dom', u'http://www.wspolczesna.pl/dom.xml'), (u'Auto', u'http://www.wspolczesna.pl/auto.xml'), (u'Zdrowie', u'http://www.wspolczesna.pl/zdrowie.xml')]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -6,7 +6,7 @@ class Gazeta_Wyborcza(BasicNewsRecipe):
title = u'Gazeta.pl' title = u'Gazeta.pl'
__author__ = 'fenuks, Artur Stachecki' __author__ = 'fenuks, Artur Stachecki'
language = 'pl' language = 'pl'
description = 'news from gazeta.pl' description = 'Wiadomości z Polski i ze świata. Serwisy tematyczne i lokalne w 20 miastach.'
category = 'newspaper' category = 'newspaper'
publication_type = 'newspaper' publication_type = 'newspaper'
masthead_url = 'http://bi.gazeta.pl/im/5/10285/z10285445AA.jpg' masthead_url = 'http://bi.gazeta.pl/im/5/10285/z10285445AA.jpg'

83
recipes/gcn.recipe Normal file
View File

@ -0,0 +1,83 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class GCN(BasicNewsRecipe):
title = u'Gazeta Codziennej Nowiny'
__author__ = 'fenuks'
description = u'nowiny24.pl - portal regionalny województwa podkarpackiego.'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.nowiny24.pl'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.nowiny24.pl/rss.xml'),
(u'Podkarpacie', u'http://www.nowiny24.pl/podkarpacie.xml'),
(u'Bieszczady', u'http://www.nowiny24.pl/bieszczady.xml'),
(u'Rzeszów', u'http://www.nowiny24.pl/rzeszow.xml'),
(u'Przemyśl', u'http://www.nowiny24.pl/przemysl.xml'),
(u'Leżajsk', u'http://www.nowiny24.pl/lezajsk.xml'),
(u'Łańcut', u'http://www.nowiny24.pl/lancut.xml'),
(u'Dębica', u'http://www.nowiny24.pl/debica.xml'),
(u'Jarosław', u'http://www.nowiny24.pl/jaroslaw.xml'),
(u'Krosno', u'http://www.nowiny24.pl/krosno.xml'),
(u'Mielec', u'http://www.nowiny24.pl/mielec.xml'),
(u'Nisko', u'http://www.nowiny24.pl/nisko.xml'),
(u'Sanok', u'http://www.nowiny24.pl/sanok.xml'),
(u'Stalowa Wola', u'http://www.nowiny24.pl/stalowawola.xml'),
(u'Tarnobrzeg', u'http://www.nowiny24.pl/tarnobrzeg.xml'),
(u'Sport', u'http://www.nowiny24.pl/sport.xml'),
(u'Dom', u'http://www.nowiny24.pl/dom.xml'),
(u'Auto', u'http://www.nowiny24.pl/auto.xml'),
(u'Praca', u'http://www.nowiny24.pl/praca.xml'),
(u'Zdrowie', u'http://www.nowiny24.pl/zdrowie.xml'),
(u'Wywiady', u'http://www.nowiny24.pl/wywiady.xml')]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
class GlosWielkopolski(BasicNewsRecipe):
title = u'G\u0142os Wielkopolski'
__author__ = 'fenuks'
description = u'Gazeta Regionalna Głos Wielkopolski. Najnowsze Wiadomości Poznań. Czytaj Informacje Poznań!'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/gloswielkopolski.png?24'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds= True
no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})
remove_tags=[dict(id='mat-podobne'), dict(name='a', attrs={'class':'czytajDalej'}), dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})]
feeds = [(u'Wszystkie', u'http://gloswielkopolski.feedsportal.com/c/32980/f/533779/index.rss?201302'), (u'Wiadomo\u015bci', u'http://gloswielkopolski.feedsportal.com/c/32980/f/533780/index.rss?201302'), (u'Sport', u'http://gloswielkopolski.feedsportal.com/c/32980/f/533781/index.rss?201302'), (u'Kultura', u'http://gloswielkopolski.feedsportal.com/c/32980/f/533782/index.rss?201302'), (u'Porady', u'http://www.gloswielkopolski.pl/rss/gloswielkopolski_porady.xml?201302'), (u'Blogi', u'http://www.gloswielkopolski.pl/rss/gloswielkopolski_blogi.xml?201302'), (u'Nasze akcje', u'http://www.gloswielkopolski.pl/rss/gloswielkopolski_naszeakcje.xml?201302'), (u'Opinie', u'http://www.gloswielkopolski.pl/rss/gloswielkopolski_opinie.xml?201302'), (u'Magazyn', u'http://www.gloswielkopolski.pl/rss/gloswielkopolski_magazyn.xml?201302')]
def print_version(self, url):
return url.replace('artykul', 'drukuj')
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/glos-wielkopolski/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -11,14 +11,13 @@ class Gram_pl(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
no_stylesheets= True no_stylesheets= True
remove_empty_feeds = True
#extra_css = 'h2 {font-style: italic; font-size:20px;} .picbox div {float: left;}' #extra_css = 'h2 {font-style: italic; font-size:20px;} .picbox div {float: left;}'
cover_url=u'http://www.gram.pl/www/01/img/grampl_zima.png' cover_url=u'http://www.gram.pl/www/01/img/grampl_zima.png'
keep_only_tags= [dict(id='articleModule')] keep_only_tags= [dict(id='articleModule')]
remove_tags = [dict(attrs={'class':['breadCrump', 'dymek', 'articleFooter']})] remove_tags = [dict(attrs={'class':['breadCrump', 'dymek', 'articleFooter', 'twitter-share-button']})]
feeds = [(u'Informacje', u'http://www.gram.pl/feed_news.asp'), feeds = [(u'Informacje', u'http://www.gram.pl/feed_news.asp'),
(u'Publikacje', u'http://www.gram.pl/feed_news.asp?type=articles'), (u'Publikacje', u'http://www.gram.pl/feed_news.asp?type=articles')
(u'Kolektyw- Indie Games', u'http://indie.gram.pl/feed/'),
#(u'Kolektyw- Moto Games', u'http://www.motogames.gram.pl/news.rss')
] ]
def parse_feeds (self): def parse_feeds (self):

View File

@ -1,20 +1,23 @@
import time
from calibre.web.feeds.recipes import BasicNewsRecipe from calibre.web.feeds.recipes import BasicNewsRecipe
class GryOnlinePl(BasicNewsRecipe): class GryOnlinePl(BasicNewsRecipe):
title = u'Gry-Online.pl' title = u'Gry-Online.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Gry-Online.pl - computer games' description = u'Wiadomości o grach, recenzje, zapowiedzi. Encyklopedia Gier zawiera opisy gier na PC, konsole Xbox360, PS3 i inne platformy.'
category = 'games' category = 'games'
language = 'pl' language = 'pl'
oldest_article = 13 oldest_article = 13
INDEX= 'http://www.gry-online.pl/' INDEX = 'http://www.gry-online.pl/'
masthead_url='http://www.gry-online.pl/im/gry-online-logo.png' masthead_url = 'http://www.gry-online.pl/im/gry-online-logo.png'
cover_url='http://www.gry-online.pl/im/gry-online-logo.png' cover_url = 'http://www.gry-online.pl/im/gry-online-logo.png'
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets= True no_stylesheets = True
keep_only_tags=[dict(name='div', attrs={'class':['gc660', 'gc660 S013']})] keep_only_tags = [dict(name='div', attrs={'class':['gc660', 'gc660 S013', 'news_endpage_tit', 'news_container', 'news']})]
remove_tags=[dict({'class':['nav-social', 'add-info', 'smlb', 'lista lista3 lista-gry', 'S013po', 'S013-npb', 'zm_gfx_cnt_bottom', 'ocen-txt', 'wiecej-txt', 'wiecej-txt2']})] remove_tags = [dict({'class':['nav-social', 'add-info', 'smlb', 'lista lista3 lista-gry', 'S013po', 'S013-npb', 'zm_gfx_cnt_bottom', 'ocen-txt', 'wiecej-txt', 'wiecej-txt2']})]
feeds = [(u'Newsy', 'http://www.gry-online.pl/rss/news.xml'), ('Teksty', u'http://www.gry-online.pl/rss/teksty.xml')] feeds = [
(u'Newsy', 'http://www.gry-online.pl/rss/news.xml'),
('Teksty', u'http://www.gry-online.pl/rss/teksty.xml')]
def append_page(self, soup, appendtag): def append_page(self, soup, appendtag):
@ -24,7 +27,14 @@ class GryOnlinePl(BasicNewsRecipe):
url_part = soup.find('link', attrs={'rel':'canonical'})['href'] url_part = soup.find('link', attrs={'rel':'canonical'})['href']
url_part = url_part[25:].rpartition('?')[0] url_part = url_part[25:].rpartition('?')[0]
for nexturl in nexturls[1:-1]: for nexturl in nexturls[1:-1]:
soup2 = self.index_to_soup('http://www.gry-online.pl/' + url_part + nexturl['href']) finalurl = 'http://www.gry-online.pl/' + url_part + nexturl['href']
for i in range(10):
try:
soup2 = self.index_to_soup(finalurl)
break
except:
print 'retrying in 0.5s'
time.sleep(0.5)
pagetext = soup2.find(attrs={'class':'gc660'}) pagetext = soup2.find(attrs={'class':'gc660'})
for r in pagetext.findAll(name='header'): for r in pagetext.findAll(name='header'):
r.extract() r.extract()
@ -34,7 +44,42 @@ class GryOnlinePl(BasicNewsRecipe):
appendtag.insert(pos, pagetext) appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button', 'lista lista3 lista-gry']}): for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button', 'lista lista3 lista-gry']}):
r.extract() r.extract()
else:
tag = appendtag.find('div', attrs={'class':'S018stronyr'})
if tag:
nexturl = tag.a
url_part = soup.find('link', attrs={'rel':'canonical'})['href']
url_part = url_part[25:].rpartition('?')[0]
while tag:
end = tag.find(attrs={'class':'right left-dead'})
if end:
break
else:
nexturl = tag.a
finalurl = 'http://www.gry-online.pl/' + url_part + nexturl['href']
for i in range(10):
try:
soup2 = self.index_to_soup(finalurl)
break
except:
print 'retrying in 0.5s'
time.sleep(0.5)
tag = soup2.find('div', attrs={'class':'S018stronyr'})
pagetext = soup2.find(attrs={'class':'gc660'})
for r in pagetext.findAll(name='header'):
r.extract()
for r in pagetext.findAll(attrs={'itemprop':'description'}):
r.extract()
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button', 'lista lista3 lista-gry', 'S018strony']}):
r.extract()
def image_url_processor(self, baseurl, url):
if url.startswith('..'):
return url[2:]
else:
return url
def preprocess_html(self, soup): def preprocess_html(self, soup):
self.append_page(soup, soup.body) self.append_page(soup, soup.body)

View File

@ -8,7 +8,6 @@ hatalska.com
''' '''
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class hatalska(BasicNewsRecipe): class hatalska(BasicNewsRecipe):
title = u'Hatalska' title = u'Hatalska'

BIN
recipes/icons/biweekly.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 603 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 603 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 865 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 461 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 414 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 431 B

BIN
recipes/icons/echo_dnia.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 760 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 762 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 398 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 921 B

BIN
recipes/icons/gcn.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 554 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 446 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 483 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 354 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

BIN
recipes/icons/nto.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 416 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 834 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 537 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 856 B

55
recipes/jazzpress.recipe Normal file
View File

@ -0,0 +1,55 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = u'Łukasz Grąbczewski 2011-2013'
__version__ = '2.0'
import re, os
from calibre import walk
from calibre.utils.zipfile import ZipFile
from calibre.ptempfile import PersistentTemporaryFile
from calibre.web.feeds.news import BasicNewsRecipe
class jazzpress(BasicNewsRecipe):
__author__ = u'Łukasz Grąbczewski'
title = 'JazzPRESS'
language = 'pl'
publisher = 'Fundacja Popularyzacji Muzyki Jazzowej EuroJAZZ'
publication_type = 'magazine'
description = u'Internetowa gazeta poświęcona muzyce improwizowanej'
conversion_options = {
'authors' : 'Fundacja Popularyzacji Muzyki Jazzowej EuroJAZZ'
,'publisher' : publisher
,'language' : language
,'preserve_cover_aspect_ratio': True
,'remove_first_image': True
}
def build_index(self):
browser = self.get_browser()
browser.open('http://radiojazz.fm/')
# find the link
epublink = browser.find_link(url_regex=re.compile('e_jazzpress\d\d\d\d\_epub'))
# download ebook
self.report_progress(0,_('Downloading ePUB'))
response = browser.follow_link(epublink)
book_file = PersistentTemporaryFile(suffix='.epub')
book_file.write(response.read())
book_file.close()
# convert
self.report_progress(0.2,_('Converting to OEB'))
oeb = self.output_dir + '/INPUT/'
if not os.path.exists(oeb):
os.makedirs(oeb)
with ZipFile(book_file.name) as f:
f.extractall(path=oeb)
for f in walk(oeb):
if f.endswith('.opf'):
return f # convert

View File

@ -7,7 +7,7 @@ class Konflikty(BasicNewsRecipe):
__author__ = 'fenuks' __author__ = 'fenuks'
cover_url = 'http://www.konflikty.pl/images/tapety_logo.jpg' cover_url = 'http://www.konflikty.pl/images/tapety_logo.jpg'
language = 'pl' language = 'pl'
description ='military news' description = u'Zbiór ciekawych artykułów historycznych, militarnych oraz recenzji książek, gier i filmów. Najświeższe informacje o lotnictwie, wojskach lądowych i polityce.'
category='military, history' category='military, history'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100

View File

@ -7,7 +7,7 @@ class Kosmonauta(BasicNewsRecipe):
description = u'polskojęzyczny portal w całości dedykowany misjom kosmicznym i badaniom kosmosu.' description = u'polskojęzyczny portal w całości dedykowany misjom kosmicznym i badaniom kosmosu.'
category = 'astronomy' category = 'astronomy'
language = 'pl' language = 'pl'
cover_url='http://bi.gazeta.pl/im/4/10393/z10393414X,Kosmonauta-net.jpg' cover_url = 'http://bi.gazeta.pl/im/4/10393/z10393414X,Kosmonauta-net.jpg'
no_stylesheets = True no_stylesheets = True
INDEX = 'http://www.kosmonauta.net' INDEX = 'http://www.kosmonauta.net'
oldest_article = 7 oldest_article = 7
@ -24,6 +24,5 @@ class Kosmonauta(BasicNewsRecipe):
href = a['href'] href = a['href']
if not href.startswith('http'): if not href.startswith('http'):
a['href'] = self.INDEX + href a['href'] = self.INDEX + href
print '%%%%%%%%%%%%%%%%%%%%%%%%%', a['href']
return soup return soup

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
class KurierLubelski(BasicNewsRecipe):
title = u'Kurier Lubelski'
__author__ = 'fenuks'
description = u'Gazeta Regionalna Kurier Lubelski. Najnowsze Wiadomości Lublin. Czytaj Informacje Lublin!'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/kurierlubelski.png?24'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})
remove_tags=[dict(id='mat-podobne'), dict(name='a', attrs={'class':'czytajDalej'}), dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})]
feeds = [(u'Wiadomo\u015bci', u'http://kurierlubelski.feedsportal.com/c/32980/f/533785/index.rss?201302'), (u'Region', u'http://www.kurierlubelski.pl/rss/kurierlubelski_region.xml?201302'), (u'Sport', u'http://kurierlubelski.feedsportal.com/c/32980/f/533786/index.rss?201302'), (u'Kultura', u'http://kurierlubelski.feedsportal.com/c/32980/f/533787/index.rss?201302'), (u'Rozmaito\u015bci', u'http://www.kurierlubelski.pl/rss/kurierlubelski_rozmaitosci.xml?201302'), (u'Dom', u'http://www.kurierlubelski.pl/rss/kurierlubelski_dom.xml?201302'), (u'Serwisy', u'http://www.kurierlubelski.pl/rss/kurierlubelski_serwisy.xml?201302'), (u'Motofakty', u'http://www.kurierlubelski.pl/rss/kurierlubelski_motofakty.xml?201302'), (u'M\xf3j Reporter', u'http://www.kurierlubelski.pl/rss/kurierlubelski_mojreporter.xml?201302'), (u'Praca', u'http://www.kurierlubelski.pl/rss/kurierlubelski_praca.xml?201302')]
def print_version(self, url):
return url.replace('artykul', 'drukuj')
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/kurier-lubelski/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -0,0 +1,78 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class KurierPoranny(BasicNewsRecipe):
title = u'Kurier Poranny'
__author__ = 'fenuks'
description = u'Kurier Poranny | poranny.pl - portal miejski Białegostoku,informacje,wydarzenia'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.poranny.pl'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.poranny.pl/rss.xml'),
(u'Białystok', u'http://www.poranny.pl/bialystok.xml'),
(u'Bielsk Podlaski', u'http://www.poranny.pl/bielskpodlaski.xml'),
(u'Czarna Białostocka', u'http://www.poranny.pl/czarnabialostocka.xml'),
(u'Hajnówka', u'http://www.poranny.pl/hajnowka.xml'),
(u'Łapy', u'http://www.poranny.pl/lapy.xml'),
(u'Sokółka', u'http://www.poranny.pl/sokolka.xml'),
(u'Supraśl', u'http://www.poranny.pl/suprasl.xml'),
(u'Wasilków', u'http://www.poranny.pl/wasilkow.xml'),
(u'Sport', u'http://www.poranny.pl/sport.xml'),
(u'Praca', u'http://www.poranny.pl/praca.xml'),
(u'Kultura', u'http://www.poranny.pl/kultura.xml'),
(u'Dom', u'http://www.poranny.pl/dom.xml'),
(u'Auto', u'http://www.poranny.pl/auto.xml'),
(u'Polityka', u'http://www.poranny.pl/polityka.xml')]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,27 @@
from calibre.web.feeds.news import BasicNewsRecipe
class KurierSzczecinski(BasicNewsRecipe):
title = u'Kurier Szczeci\u0144ski'
__author__ = 'fenuks'
description = u'24Kurier jest portalem Kuriera Szczecińskiego. Zawiera aktualności ze Szczecina oraz wiadomości regionalne z województwa zachodniopomorskiego. '
category = 'newspaper'
#publication_type = ''
language = 'pl'
#encoding = ''
#extra_css = ''
cover_url = 'http://www.24kurier.pl/Administracja/Img/24kurier_logo-copy-po-zapis'
#masthead_url = ''
use_embedded_content = False
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
remove_javascript = True
remove_attributes = ['style', 'font']
ignore_duplicate_articles = {'title', 'url'}
keep_only_tags = [dict(attrs={'class':'section'})]
remove_tags = [dict(attrs={'class':['Ikonki', 'rek', 'artComments']})]
remove_tags_after = dict(attrs={'class':'artComments'})
#remove_tags_before = dict()
feeds = [(u'Aktualno\u015bci', u'http://www.24kurier.pl/cmspages/articles_rss.aspx'), (u'Kraj', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=kraj'), (u'\u015awiat', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=swiat'), (u'Sport', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=sport'), (u'Kultura', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=kultura'), (u'Gospodarka', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=gospodarka'), (u'Nauka', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=nauka'), (u'Region', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=region'), (u'Szczecin', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=szczecin'), (u'Bia\u0142ogard', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=bialogard'), (u'Choszczno', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=choszczno'), (u'Drawsko', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=drawsko'), (u'Goleni\xf3w', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=goleniow'), (u'Gryfice', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=gryfice'), (u'Gryfino', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=gryfino'), (u'Kamie\u0144 Pomorski', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=kamien'), (u'Ko\u0142obrzeg', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=kolobrzeg'), (u'Koszalin', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=koszalin'), (u'\u0141obez', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=lobez'), (u'My\u015blib\xf3rz', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=mysliborz'), (u'Police', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=police'), (u'Pyrzyce', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=pyrzyce'), (u'S\u0142awno', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=slawno'), (u'Stargard', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=stargard'), (u'Szczecinek', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=szczecinek'), (u'\u015awidwin', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=swidwin'), (u'\u015awinouj\u015bcie', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=swinoujscie'), (u'Wa\u0142cz', u'http://www.24kurier.pl/cmspages/articles_rss.aspx?dzial=walcz')]

View File

@ -8,7 +8,6 @@ www.lifehacking.pl
''' '''
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class lifehacking(BasicNewsRecipe): class lifehacking(BasicNewsRecipe):
title = u'Lifehacker Polska' title = u'Lifehacker Polska'

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Lomza(BasicNewsRecipe): class Lomza(BasicNewsRecipe):
title = u'4Lomza' title = u'4Lomza'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'4Łomża - regional site' description = u'Regionalny portal. Najświeższe informacje z regionu, kulturalne, sportowe. Ogłoszenia, baza biznesu, forum.'
cover_url = 'http://www.4lomza.pl/i/logo4lomza_m.jpg' cover_url = 'http://www.4lomza.pl/i/logo4lomza_m.jpg'
language = 'pl' language = 'pl'
oldest_article = 15 oldest_article = 15

View File

@ -7,7 +7,7 @@ class Mlody_technik(BasicNewsRecipe):
description = u'Młody technik' description = u'Młody technik'
category = 'science' category = 'science'
language = 'pl' language = 'pl'
cover_url='http://science-everywhere.pl/wp-content/uploads/2011/10/mt12.jpg' #cover_url = 'http://science-everywhere.pl/wp-content/uploads/2011/10/mt12.jpg'
no_stylesheets = True no_stylesheets = True
preprocess_regexps = [(re.compile(r"<h4>Podobne</h4>", re.IGNORECASE), lambda m: '')] preprocess_regexps = [(re.compile(r"<h4>Podobne</h4>", re.IGNORECASE), lambda m: '')]
oldest_article = 7 oldest_article = 7
@ -18,10 +18,17 @@ class Mlody_technik(BasicNewsRecipe):
remove_tags = [dict(attrs={'class':'st-related-posts'})] remove_tags = [dict(attrs={'class':'st-related-posts'})]
remove_tags_after = dict(attrs={'class':'entry-content clearfix'}) remove_tags_after = dict(attrs={'class':'entry-content clearfix'})
feeds = [(u'Wszystko', u'http://www.mt.com.pl/feed'), feeds = [(u'Wszystko', u'http://www.mt.com.pl/feed'),
(u'MT NEWS 24/7', u'http://www.mt.com.pl/kategoria/mt-newsy-24-7/feed'), #(u'MT NEWS 24/7', u'http://www.mt.com.pl/kategoria/mt-newsy-24-7/feed'),
(u'Info zoom', u'http://www.mt.com.pl/kategoria/info-zoom/feed'), (u'Info zoom', u'http://www.mt.com.pl/kategoria/info-zoom/feed'),
(u'm.technik', u'http://www.mt.com.pl/kategoria/m-technik/feed'), (u'm.technik', u'http://www.mt.com.pl/kategoria/m-technik/feed'),
(u'Szkoła', u'http://www.mt.com.pl/kategoria/szkola-2/feed'), (u'Szkoła', u'http://www.mt.com.pl/kategoria/szkola-2/feed'),
(u'Na Warsztacie', u'http://www.mt.com.pl/kategoria/na-warsztacie/feed'), (u'Na Warsztacie', u'http://www.mt.com.pl/kategoria/na-warsztacie/feed'),
(u'Z pasji do...', u'http://www.mt.com.pl/kategoria/z-pasji-do/feed'), (u'Z pasji do...', u'http://www.mt.com.pl/kategoria/z-pasji-do/feed'),
(u'MT testuje', u'http://www.mt.com.pl/kategoria/mt-testuje/feed')] (u'MT testuje', u'http://www.mt.com.pl/kategoria/mt-testuje/feed')]
def get_cover_url(self):
soup = self.index_to_soup('http://www.mt.com.pl/')
tag = soup.find(attrs={'class':'xoxo'})
if tag:
self.cover_url = tag.find('img')['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -9,8 +9,8 @@ class Niebezpiecznik_pl(BasicNewsRecipe):
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
cover_url =u'http://userlogos.org/files/logos/Karmody/niebezpiecznik_01.png' cover_url = u'http://userlogos.org/files/logos/Karmody/niebezpiecznik_01.png'
remove_tags=[dict(name='div', attrs={'class':['sociable']}), dict(name='h4'), dict(attrs={'class':'similar-posts'})] remove_tags = [dict(name='div', attrs={'class':['sociable']}), dict(name='h4'), dict(attrs={'class':'similar-posts'})]
keep_only_tags= [dict(name='div', attrs={'class':['title', 'entry']})] keep_only_tags = [dict(name='div', attrs={'class':['title', 'entry']})]
feeds = [(u'Wiadomości', u'http://feeds.feedburner.com/niebezpiecznik/'), feeds = [(u'Wiadomości', u'http://feeds.feedburner.com/niebezpiecznik/'),
('Blog', 'http://feeds.feedburner.com/niebezpiecznik/linkblog/')] ('Blog', 'http://feeds.feedburner.com/niebezpiecznik/linkblog/')]

View File

@ -9,7 +9,7 @@ class Nowa_Fantastyka(BasicNewsRecipe):
__modified_by__ = 'zaslav' __modified_by__ = 'zaslav'
language = 'pl' language = 'pl'
encoding='latin2' encoding='latin2'
description ='site for fantasy readers' description = u'Strona dla miłośników fantastyki'
category='fantasy' category='fantasy'
masthead_url='http://farm5.static.flickr.com/4133/4956658792_7ba7fbf562.jpg' masthead_url='http://farm5.static.flickr.com/4133/4956658792_7ba7fbf562.jpg'
#extra_css='.tytul {font-size: 20px;}' #not working #extra_css='.tytul {font-size: 20px;}' #not working

63
recipes/nto.recipe Normal file
View File

@ -0,0 +1,63 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class NTO(BasicNewsRecipe):
title = u'Nowa Trybuna Opolska'
__author__ = 'fenuks'
description = u'Nowa Trybuna Opolska - portal regionalny województwa opolskiego.'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.nto.pl'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.nto.pl/rss.xml'), (u'Region', u'http://www.nto.pl/region.xml'), (u'Brzeg', u'http://www.nto.pl/brzeg.xml'), (u'G\u0142ubczyce', u'http://www.nto.pl/glubczyce.xml'), (u'K\u0119dzierzyn-Ko\u017ale', u'http://www.nto.pl/kedzierzynkozle.xml'), (u'Kluczbork', u'http://www.nto.pl/kluczbork.xml'), (u'Krapkowice', u'http://www.nto.pl/krapkowice.xml'), (u'Namys\u0142\xf3w', u'http://www.nto.pl/namyslow.xml'), (u'Nysa', u'http://www.nto.pl/nysa.xml'), (u'Olesno', u'http://www.nto.pl/olesno.xml'), (u'Opole', u'http://www.nto.pl/opole.xml'), (u'Prudnik', u'http://www.nto.pl/prudnik.xml'), (u'Strzelce Opolskie', u'http://www.nto.pl/strzelceopolskie.xml'), (u'Sport', u'http://www.nto.pl/sport.xml'), (u'Polska i \u015bwiat', u'http://www.nto.pl/apps/pbcs.dll/section?Category=RSS&channel=KRAJSWIAT'), (u'Zdrowy styl', u'http://www.nto.pl/apps/pbcs.dll/section?Category=rss_zdrowystyl'), (u'Reporta\u017c', u'http://www.nto.pl/reportaz.xml'), (u'Studia', u'http://www.nto.pl/akademicka.xml')]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -7,12 +7,12 @@ class PC_Foster(BasicNewsRecipe):
description = u'Vortal technologiczny: testy, recenzje sprzętu komputerowego i telefonów, nowinki hardware, programy i gry dla Windows. Podkręcanie, modding i Overclocking.' description = u'Vortal technologiczny: testy, recenzje sprzętu komputerowego i telefonów, nowinki hardware, programy i gry dla Windows. Podkręcanie, modding i Overclocking.'
category = 'IT' category = 'IT'
language = 'pl' language = 'pl'
masthead_url='http://pcfoster.pl/public/images/logo.png' masthead_url = 'http://pcfoster.pl/public/images/logo.png'
cover_url= 'http://pcfoster.pl/public/images/logo.png' cover_url = 'http://pcfoster.pl/public/images/logo.png'
no_stylesheets= True no_stylesheets = True
remove_empty_feeds= True remove_empty_feeds = True
keep_only_tags= [dict(id=['news_details', 'review_details']), dict(attrs={'class':'pager more_top'})] keep_only_tags = [dict(id=['news_details', 'review_details']), dict(attrs={'class':'pager more_top'})]
remove_tags=[dict(name='p', attrs={'class':'right'})] remove_tags = [dict(name='p', attrs={'class':'right'})]
feeds = [(u'G\u0142\xf3wny', u'http://pcfoster.pl/public/rss/main.xml')] feeds = [(u'G\u0142\xf3wny', u'http://pcfoster.pl/public/rss/main.xml')]

View File

@ -7,9 +7,11 @@ class PolskaTimes(BasicNewsRecipe):
language = 'pl' language = 'pl'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/polska.gif?17' masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/polska.gif?17'
oldest_article = 7 oldest_article = 7
encoding = 'iso-8859-2'
max_articles_per_feed = 100 max_articles_per_feed = 100
remove_emty_feeds= True remove_empty_feeds = True
no_stylesheets = True no_stylesheets = True
use_embedded_content = False
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
#preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ] #preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'}) remove_tags_after= dict(attrs={'src':'http://nm.dz.com.pl/dz.png'})

View File

@ -4,7 +4,7 @@ class SpidersWeb(BasicNewsRecipe):
title = u"Spider's Web" title = u"Spider's Web"
oldest_article = 7 oldest_article = 7
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Opinie i analizy na temat technologii' description = u'Autorskie teksty popularnych blogerów, testy sprzętu i aplikacji, oraz wiele więcej.'
cover_url = 'http://www.spidersweb.pl/wp-content/themes/new_sw/images/spidersweb.png' cover_url = 'http://www.spidersweb.pl/wp-content/themes/new_sw/images/spidersweb.png'
category = 'IT, WEB' category = 'IT, WEB'
language = 'pl' language = 'pl'

View File

@ -3,7 +3,7 @@ import re
class Tablety_pl(BasicNewsRecipe): class Tablety_pl(BasicNewsRecipe):
title = u'Tablety.pl' title = u'Tablety.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'tablety.pl - latest tablet news' description = u'Tablety, gry i aplikacje na tablety.'
masthead_url= 'http://www.tablety.pl/wp-content/themes/kolektyw/img/logo.png' masthead_url= 'http://www.tablety.pl/wp-content/themes/kolektyw/img/logo.png'
cover_url = 'http://www.tablety.pl/wp-content/themes/kolektyw/img/logo.png' cover_url = 'http://www.tablety.pl/wp-content/themes/kolektyw/img/logo.png'
category = 'IT' category = 'IT'

View File

@ -4,6 +4,7 @@ class tanuki(BasicNewsRecipe):
title = u'Tanuki' title = u'Tanuki'
oldest_article = 7 oldest_article = 7
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Tanuki - portal o anime i mandze.'
category = 'anime, manga' category = 'anime, manga'
language = 'pl' language = 'pl'
max_articles_per_feed = 100 max_articles_per_feed = 100

View File

@ -3,7 +3,6 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class telepolis(BasicNewsRecipe): class telepolis(BasicNewsRecipe):

View File

@ -0,0 +1,37 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class Trojmiasto(BasicNewsRecipe):
title = u'Tr\xf3jmiasto.pl'
__author__ = 'fenuks'
description = u'Wiadomości, imprezy, wydarzenia, spektakle.Gdańsk, Gdynia, Sopot - NOCLEGI, Katalog firm, repertuar kin, wydarzenia, przewodnik, mapa, kwatery, hotele. Portal regionalny trojmiasto.pl'
category = ''
#publication_type = ''
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
cover_url = 'http://www.trojmiasto.pl/_img/toplong2/logo_trojmiasto.gif'
#masthead_url = ''
use_embedded_content = False
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
remove_javascript = True
remove_attributes = ['style', 'font']
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'<strong>Czytaj więcej.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'<strong>Zobacz też.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'<b>[A-ZĄĆĘŁŃÓŚŹŻ \-,.:]*?</b>', re.DOTALL), lambda match: ''),]
#keep_only_tags = []
remove_tags = [dict(id=['logo', 'font_small', 'font_big']), dict(attrs={'class':['title-long', 'ankieta', 'newsletter-inside-content newsletter-wrap', 'copyright_box',
'logo', 'btn btn-photo-add', 'related-info-wrap', 'nTabs', 'article-list', 'rate-player horizontal', 'type-box', 'rate-player'
'hover-nav', 'live-head tC', 'prev-link', 'next-link', 'ie6']}), dict(attrs={'title':[u'drukuj artykuł', u'podziel się na Facebooku', u'prześlij artykuł']})]
remove_tags_after = dict(attrs={'class':'author-wrap'})
remove_tags_before = dict(attrs={'class':'text-container'})
feeds = [(u'Wszystkie', u'http://rss.trojmiasto.pl/rss,0.xml'), (u'Fakty i opinie', u'http://rss.trojmiasto.pl/rss,1.xml'), (u'Sport', u'http://rss.trojmiasto.pl/rss,2.xml'), (u'Dom', u'http://rss.trojmiasto.pl/rss,3.xml'), (u'Moto', u'http://rss.trojmiasto.pl/rss,4.xml'), (u'Nauka', u'http://rss.trojmiasto.pl/rss,5.xml'), (u'Rozrywka', u'http://rss.trojmiasto.pl/rss,6.xml'), (u'Kultura', u'http://rss.trojmiasto.pl/rss,7.xml'), (u'Rowery', u'http://rss.trojmiasto.pl/rss,8.xml'), (u'Dziecko', u'http://rss.trojmiasto.pl/rss,9.xml'), (u'Zdrowie i uroda', u'http://rss.trojmiasto.pl/rss,10.xml'), (u'Praca', u'http://rss.trojmiasto.pl/rss,11.xml'), (u'Artyku\u0142y czytelnik\xf3w', u'http://rss.trojmiasto.pl/rss,12.xml'), (u'Korki', u'http://rss.trojmiasto.pl/rss,13.xml'), (u'Historia', u'http://rss.trojmiasto.pl/rss,14.xml'), (u'Biznes', u'http://rss.trojmiasto.pl/rss,16.xml'), (u'Kryminalne Tr\xf3jmiasto', u'http://rss.trojmiasto.pl/rss,17.xml'), (u'Przewodnik', u'http://rss.trojmiasto.pl/rss,18.xml'), (u'Aktywne Tr\xf3jmiasto', u'http://rss.trojmiasto.pl/rss,19.xml'), (u'Delux', u'http://rss.trojmiasto.pl/rss,20.xml')]
def print_version(self, url):
return url + '?print=1'

View File

@ -8,8 +8,8 @@ class tvn24(BasicNewsRecipe):
description = u'Sport, Biznes, Gospodarka, Informacje, Wiadomości Zawsze aktualne wiadomości z Polski i ze świata' description = u'Sport, Biznes, Gospodarka, Informacje, Wiadomości Zawsze aktualne wiadomości z Polski i ze świata'
category = 'news' category = 'news'
language = 'pl' language = 'pl'
masthead_url= 'http://www.tvn24.pl/_d/topmenu/logo2.gif' #masthead_url= 'http://www.tvn24.pl/_d/topmenu/logo2.gif'
cover_url= 'http://www.tvn24.pl/_d/topmenu/logo2.gif' cover_url= 'http://www.qzdrowiu.pl/Upload/KnowQZdrowiu_PressOffice/TVN24_logo_575702b7-edce-4b6f-a41b-4395f9456f96_ff6d6ccf-528a-4b94-9e61-2fed727aba35.png'
extra_css= 'ul {list-style: none; padding: 0; margin: 0;} li {float: left;margin: 0 0.15em;}' extra_css= 'ul {list-style: none; padding: 0; margin: 0;} li {float: left;margin: 0 0.15em;}'
remove_empty_feeds = True remove_empty_feeds = True
remove_javascript = True remove_javascript = True

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Ubuntu_pl(BasicNewsRecipe): class Ubuntu_pl(BasicNewsRecipe):
title = u'UBUNTU.pl' title = u'UBUNTU.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'UBUNTU.pl - polish ubuntu community site' description = 'Polskie forum użytkowników Ubuntu Linux. Projekty, porady i dyskusje, gotowe rozwiązania problemów.'
masthead_url= 'http://ubuntu.pl/img/logo.jpg' masthead_url= 'http://ubuntu.pl/img/logo.jpg'
cover_url = 'http://ubuntu.pl/img/logo.jpg' cover_url = 'http://ubuntu.pl/img/logo.jpg'
category = 'linux, IT' category = 'linux, IT'

View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = u'Łukasz Grąbczewski 2012-2013'
__version__ = '1.1'
'''
zw.com.pl
'''
from calibre.web.feeds.news import BasicNewsRecipe
class zyciewarszawy(BasicNewsRecipe):
__author__ = u'Łukasz Grączewski'
title = u'Życie Warszawy'
description = u'Wiadomości z Warszawy'
language = 'pl'
publisher = 'Presspublica'
publication_type = 'newspapper'
masthead_url = 'http://www.zw.com.pl/static/img/logo_zw.gif'
no_stylesheets = True
remove_javascript = True
oldest_article = 1 #daily news only
max_articles_per_feed = 100
feeds = [(u'Najnowsze', u'http://www.zw.com.pl/rss/1.html')]
keep_only_tags = []
keep_only_tags.append(dict(name = 'div', attrs = {'id' : 'storyp'}))
remove_tags = []
remove_tags.append(dict(name = 'div', attrs = {'class' : 'authordate'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'author'}))
'''remove_tags.append(dict(name = 'div', attrs = {'class' : 'seealso'}))'''
remove_tags.append(dict(name = 'div', attrs = {'class' : 'more'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'clr'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'adk_0'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'adsense_0'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'share_bottom'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'copyright_law'}))
def print_version(self, url):
url += "?print=tak"
return url