mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
63 lines
3.0 KiB
Plaintext
63 lines
3.0 KiB
Plaintext
from calibre.web.feeds.news import BasicNewsRecipe
|
|
|
|
class Gram_pl(BasicNewsRecipe):
|
|
title = u'Gram.pl'
|
|
__author__ = 'fenuks'
|
|
description = 'Gram.pl - site about computer games'
|
|
category = 'games'
|
|
language = 'pl'
|
|
oldest_article = 8
|
|
index='http://www.gram.pl'
|
|
max_articles_per_feed = 100
|
|
no_stylesheets= True
|
|
extra_css = 'h2 {font-style: italic; font-size:20px;} .picbox div {float: left;}'
|
|
cover_url=u'http://www.gram.pl/www/01/img/grampl_zima.png'
|
|
remove_tags= [dict(name='p', attrs={'class':['extraText', 'must-log-in']}), dict(attrs={'class':['el', 'headline', 'post-info', 'entry-footer clearfix']}), dict(name='div', attrs={'class':['twojaOcena', 'comment-body', 'comment-author vcard', 'comment-meta commentmetadata', 'tw_button', 'entry-comment-counter', 'snap_nopreview sharing robots-nocontent']}), dict(id=['igit_rpwt_css', 'comments', 'reply-title', 'igit_title'])]
|
|
keep_only_tags= [dict(name='div', attrs={'class':['main', 'arkh-postmetadataheader', 'arkh-postcontent', 'post', 'content', 'news_header', 'news_subheader', 'news_text']}), dict(attrs={'class':['contentheading', 'contentpaneopen']}), dict(name='article')]
|
|
feeds = [(u'Informacje', u'http://www.gram.pl/feed_news.asp'),
|
|
(u'Publikacje', u'http://www.gram.pl/feed_news.asp?type=articles'),
|
|
(u'Kolektyw- Indie Games', u'http://indie.gram.pl/feed/'),
|
|
#(u'Kolektyw- Moto Games', u'http://www.motogames.gram.pl/news.rss')
|
|
]
|
|
|
|
def parse_feeds (self):
|
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
|
for feed in feeds:
|
|
for article in feed.articles[:]:
|
|
if 'REKLAMA SKLEP' in article.title.upper() or u'ARTYKUŁ:' in article.title.upper():
|
|
feed.articles.remove(article)
|
|
return feeds
|
|
|
|
def append_page(self, soup, appendtag):
|
|
nexturl = appendtag.find('a', attrs={'class':'cpn'})
|
|
while nexturl:
|
|
soup2 = self.index_to_soup('http://www.gram.pl'+ nexturl['href'])
|
|
r=appendtag.find(id='pgbox')
|
|
if r:
|
|
r.extract()
|
|
pagetext = soup2.find(attrs={'class':'main'})
|
|
r=pagetext.find('h1')
|
|
if r:
|
|
r.extract()
|
|
r=pagetext.find('h2')
|
|
if r:
|
|
r.extract()
|
|
for r in pagetext.findAll('script'):
|
|
r.extract()
|
|
pos = len(appendtag.contents)
|
|
appendtag.insert(pos, pagetext)
|
|
nexturl = appendtag.find('a', attrs={'class':'cpn'})
|
|
r=appendtag.find(id='pgbox')
|
|
if r:
|
|
r.extract()
|
|
|
|
def preprocess_html(self, soup):
|
|
self.append_page(soup, soup.body)
|
|
tag=soup.findAll(name='div', attrs={'class':'picbox'})
|
|
for t in tag:
|
|
t['style']='float: left;'
|
|
for a in soup('a'):
|
|
if a.has_key('href') and 'http://' not in a['href'] and 'https://' not in a['href']:
|
|
a['href']=self.index + a['href']
|
|
return soup
|