calibre/recipes/gazeta_pomorska.recipe
2013-03-06 20:34:04 +01:00

92 lines
5.2 KiB
Plaintext

import re
from calibre.web.feeds.news import BasicNewsRecipe
class GazetaPomorska(BasicNewsRecipe):
title = u'Gazeta Pomorska'
__author__ = 'Richard z forum.eksiazki.org, fenuks'
description = u'Gazeta Pomorska - portal regionalny'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
extra_css = 'ul {list-style: none; padding:0; margin:0;}'
INDEX = 'http://www.pomorska.pl'
masthead_url = INDEX + '/images/top_logo.png'
oldest_article = 7
max_articles_per_feed = 100
remove_empty_feeds = True
no_stylesheets = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(ur'Czytaj:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'Przeczytaj także:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(ur'Przeczytaj również:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: ''), (re.compile(ur'Zobacz też:.*?</a>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags = [dict(id=['article', 'cover', 'photostory'])]
remove_tags = [dict(id=['articleTags', 'articleMeta', 'boxReadIt', 'articleGalleries', 'articleConnections',
'ForumArticleComments', 'articleRecommend', 'jedynkiLinks', 'articleGalleryConnections',
'photostoryConnections', 'articleEpaper', 'articlePoll', 'articleAlarm', 'articleByline']),
dict(attrs={'class':'articleFunctions'})]
feeds = [(u'Wszystkie', u'http://www.pomorska.pl/rss.xml'),
(u'Region', u'http://www.pomorska.pl/region.xml'),
(u'Bydgoszcz', u'http://www.pomorska.pl/bydgoszcz.xml'),
(u'Nakło', u'http://www.pomorska.pl/naklo.xml'),
(u'Koronowo', u'http://www.pomorska.pl/koronowo.xml'),
(u'Solec Kujawski', u'http://www.pomorska.pl/soleckujawski.xml'),
(u'Grudziądz', u'http://www.pomorska.pl/grudziadz.xml'),
(u'Inowrocław', u'http://www.pomorska.pl/inowroclaw.xml'),
(u'Toruń', u'http://www.pomorska.pl/torun.xml'),
(u'Włocławek', u'http://www.pomorska.pl/wloclawek.xml'),
(u'Aleksandrów Kujawski', u'http://www.pomorska.pl/aleksandrow.xml'),
(u'Brodnica', u'http://www.pomorska.pl/brodnica.xml'),
(u'Chełmno', u'http://www.pomorska.pl/chelmno.xml'),
(u'Chojnice', u'http://www.pomorska.pl/chojnice.xml'),
(u'Ciechocinek', u'http://www.pomorska.pl/ciechocinek.xml'),
(u'Golub-Dobrzyń', u'http://www.pomorska.pl/golubdobrzyn.xml'),
(u'Mogilno', u'http://www.pomorska.pl/mogilno.xml'),
(u'Radziejów', u'http://www.pomorska.pl/radziejow.xml'),
(u'Rypin', u'http://www.pomorska.pl/rypin.xml'),
(u'Sępólno', u'http://www.pomorska.pl/sepolno.xml'),
(u'Świecie', u'http://www.pomorska.pl/swiecie.xml'),
(u'Tuchola', u'http://www.pomorska.pl/tuchola.xml'),
(u'Żnin', u'http://www.pomorska.pl/znin.xml'),
(u'Sport', u'http://www.pomorska.pl/sport.xml'),
(u'Zdrowie', u'http://www.pomorska.pl/zdrowie.xml'),
(u'Auto', u'http://www.pomorska.pl/moto.xml'),
(u'Dom', u'http://www.pomorska.pl/dom.xml'),
#(u'Reporta\u017c', u'http://www.pomorska.pl/reportaz.xml'),
(u'Gospodarka', u'http://www.pomorska.pl/gospodarka.xml')]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX + '/apps/pbcs.dll/section?Category=JEDYNKI')
nexturl = self.INDEX + soup.find(id='covers').find('a')['href']
soup = self.index_to_soup(nexturl)
self.cover_url = self.INDEX + soup.find(id='cover').find(name='img')['src']
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
tag = soup.find('span', attrs={'class':'photoNavigationPages'})
if tag:
number = int(tag.string.rpartition('/')[-1].replace('&nbsp;', ''))
baseurl = self.INDEX + soup.find(attrs={'class':'photoNavigationNext'})['href'][:-1]
for r in appendtag.findAll(attrs={'class':'photoNavigation'}):
r.extract()
for nr in range(2, number+1):
soup2 = self.index_to_soup(baseurl + str(nr))
pagetext = soup2.find(id='photoContainer')
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoMeta'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'photoStoryText'})
if pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup