This commit is contained in:
Kovid Goyal 2015-10-18 08:39:29 +05:30
commit f2c01547f8
7 changed files with 2 additions and 162 deletions

View File

@ -1,43 +0,0 @@
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = u'Łukasz Grąbczewski 2013'
__version__ = '1.0'
'''
bachormagazyn.pl
'''
from calibre.web.feeds.news import BasicNewsRecipe
class bachormagazyn(BasicNewsRecipe):
__author__ = u'Łukasz Grączewski'
title = u'Bachor Magazyn'
description = u'Alternatywny magazyn o alternatywach rodzicielstwa'
language = 'pl'
publisher = 'Bachor Mag.'
publication_type = 'magazine'
masthead_url = 'http://bachormagazyn.pl/wp-content/uploads/2011/10/bachor_header1.gif'
no_stylesheets = True
remove_javascript = True
use_embedded_content = False
remove_empty_feeds = True
oldest_article = 32 #monthly +1
max_articles_per_feed = 100
feeds = [
(u'Bezradnik dla nieudacznych rodziców', u'http://bachormagazyn.pl/feed/')
]
keep_only_tags = []
keep_only_tags.append(dict(name = 'div', attrs = {'id' : 'content'}))
remove_tags = []
remove_tags.append(dict(attrs = {'id' : 'nav-above'}))
remove_tags.append(dict(attrs = {'id' : 'nav-below'}))
remove_tags.append(dict(attrs = {'id' : 'comments'}))
remove_tags.append(dict(attrs = {'class' : 'entry-info'}))
remove_tags.append(dict(attrs = {'class' : 'comments-link'}))
remove_tags.append(dict(attrs = {'class' : 'sharedaddy sd-sharing-enabled'}))

View File

@ -1,28 +0,0 @@
from calibre.web.feeds.news import BasicNewsRecipe
class blognexto(BasicNewsRecipe):
title = 'BLOG.NEXTO.pl'
__author__ = 'MrStefan <mrstefaan@gmail.com>'
language = 'pl'
description ='o e-publikacjach prawie wszystko'
masthead_url='http://blog.nexto.pl/wp-content/uploads/2012/04/logo-blog-nexto.pl_.jpg'
remove_empty_feeds= True
oldest_article = 7
max_articles_per_feed = 100
remove_javascript=True
no_stylesheets=True
keep_only_tags =[]
keep_only_tags.append(dict(name = 'div', attrs = {'id' : 'content'}))
remove_tags =[]
remove_tags.append(dict(name = 'div', attrs = {'class' : 'comment-cloud'}))
remove_tags.append(dict(name = 'p', attrs = {'class' : 'post-date1'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'fb-like'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'tags'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'postnavi'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'commments-box'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'respond'}))
feeds = [('Artykuly', 'http://feeds.feedburner.com/blognexto')]

View File

@ -4,7 +4,7 @@ import re
class Ciekawostki_Historyczne(BasicNewsRecipe): class Ciekawostki_Historyczne(BasicNewsRecipe):
title = u'Ciekawostki Historyczne' title = u'Ciekawostki Historyczne'
oldest_article = 7 oldest_article = 7
__author__ = 'fenuks' __author__ = u'fenuks & Tomasz Długosz'
description = u'Serwis popularnonaukowy - odkrycia, kontrowersje, historia, ciekawostki, badania, ciekawostki z przeszłości.' description = u'Serwis popularnonaukowy - odkrycia, kontrowersje, historia, ciekawostki, badania, ciekawostki z przeszłości.'
category = 'history' category = 'history'
language = 'pl' language = 'pl'
@ -18,7 +18,7 @@ class Ciekawostki_Historyczne(BasicNewsRecipe):
remove_empty_feeds = True remove_empty_feeds = True
keep_only_tags = [dict(name='div', attrs={'class':'post'})] keep_only_tags = [dict(name='div', attrs={'class':'post'})]
recursions = 5 recursions = 5
remove_tags = [dict(id='singlepostinfo')] remove_tags = [dict(id='singlepostinfo'), dict(attrs={'class':['books short floatRight','unprintable','booksTable','bawmrp']})]
feeds = [(u'Staro\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/starozytnosc/feed/'), (u'\u015aredniowiecze', u'http://ciekawostkihistoryczne.pl/tag/sredniowiecze/feed/'), (u'Nowo\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/nowozytnosc/feed/'), (u'XIX wiek', u'http://ciekawostkihistoryczne.pl/tag/xix-wiek/feed/'), (u'1914-1939', u'http://ciekawostkihistoryczne.pl/tag/1914-1939/feed/'), (u'1939-1945', u'http://ciekawostkihistoryczne.pl/tag/1939-1945/feed/'), (u'Powojnie (od 1945)', u'http://ciekawostkihistoryczne.pl/tag/powojnie/feed/'), (u'Recenzje', u'http://ciekawostkihistoryczne.pl/category/recenzje/feed/')] feeds = [(u'Staro\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/starozytnosc/feed/'), (u'\u015aredniowiecze', u'http://ciekawostkihistoryczne.pl/tag/sredniowiecze/feed/'), (u'Nowo\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/nowozytnosc/feed/'), (u'XIX wiek', u'http://ciekawostkihistoryczne.pl/tag/xix-wiek/feed/'), (u'1914-1939', u'http://ciekawostkihistoryczne.pl/tag/1914-1939/feed/'), (u'1939-1945', u'http://ciekawostkihistoryczne.pl/tag/1939-1945/feed/'), (u'Powojnie (od 1945)', u'http://ciekawostkihistoryczne.pl/tag/powojnie/feed/'), (u'Recenzje', u'http://ciekawostkihistoryczne.pl/category/recenzje/feed/')]
@ -35,4 +35,3 @@ class Ciekawostki_Historyczne(BasicNewsRecipe):
soup.find('h6').nextSibling.extract() soup.find('h6').nextSibling.extract()
return soup return soup

Binary file not shown.

Before

Width:  |  Height:  |  Size: 620 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 533 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 636 B

View File

@ -1,88 +0,0 @@
#!/usr/bin/env python2
__license__ = 'GPL v3'
'''
magazynconsido.pl/
'''
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.utils.magick import Image
class magazynconsido(BasicNewsRecipe):
title = u'Magazyn Consido'
__author__ = 'Artur Stachecki <artur.stachecki@gmail.com> ,teepel <teepel44@gmail.com>'
language = 'pl'
description =u'Portal dla architektów i projektantów'
masthead_url='http://qualitypixels.pl/wp-content/themes/airlock/advance/inc/timthumb.php?src=http://qualitypixels.pl/wp-content/uploads/2012/01/logotyp-magazynconsido-11.png&w=455&zc=1'
oldest_article = 7
max_articles_per_feed = 100
remove_javascript=True
no_stylesheets = True
use_embedded_content = False
keep_only_tags =[]
keep_only_tags.append(dict(name = 'h1'))
keep_only_tags.append(dict(name = 'p'))
keep_only_tags.append(dict(attrs = {'class' : 'navigation'}))
remove_tags =[dict(attrs = {'style' : 'font-size: x-small;' })]
remove_tags_after =[dict(attrs = {'class' : 'navigation' })]
extra_css=''' img {max-width:30%; max-height:30%; display: block; margin-left: auto; margin-right: auto;}
h1 {text-align: center;}'''
def parse_index(self): #(kk)
soup = self.index_to_soup('http://feeds.feedburner.com/magazynconsido?format=xml')
feeds = []
articles = {}
sections = []
section = ''
for item in soup.findAll('item') :
section = self.tag_to_string(item.category)
if not articles.has_key(section) :
sections.append(section)
articles[section] = []
article_url = self.tag_to_string(item.guid)
article_title = self.tag_to_string(item.title)
article_date = self.tag_to_string(item.pubDate)
article_description = self.tag_to_string(item.description)
articles[section].append( { 'title' : article_title, 'url' : article_url, 'date' : article_date, 'description' : article_description })
for section in sections :
if section == 'Video':
feeds.append((section, articles[section]))
feeds.pop()
else:
feeds.append((section, articles[section]))
return feeds
def append_page(self, soup, appendtag):
apage = soup.find('div', attrs={'class':'wp-pagenavi'})
if apage is not None:
nexturl = soup.find('a', attrs={'class':'nextpostslink'})
soup2 = self.index_to_soup(nexturl['href'])
pagetext = soup2.findAll('p')
for tag in pagetext:
pos = len(appendtag.contents)
appendtag.insert(pos, tag)
while appendtag.find('div', attrs={'class': ['height: 35px;', 'post-meta', 'addthis_toolbox addthis_default_style addthis_', 'post-meta-bottom', 'block_recently_post', 'fbcomments', 'pin-it-button', 'pages', 'navigation']}) is not None:
appendtag.find('div', attrs={'class': ['height: 35px;', 'post-meta', 'addthis_toolbox addthis_default_style addthis_', 'post-meta-bottom', 'block_recently_post', 'fbcomments', 'pin-it-button', 'pages', 'navigation']}).replaceWith('')
def preprocess_html(self, soup): #(kk)
self.append_page(soup, soup.body)
return self.adeify_images(soup)
def postprocess_html(self, soup, first):
#process all the images
for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')):
iurl = tag['src']
img = Image()
img.open(iurl)
if img < 0:
raise RuntimeError('Out of memory')
img.type = "GrayscaleType"
img.save(iurl)
return soup