update esensja and add rss version

This commit is contained in:
Tomasz Długosz 2013-03-21 00:27:17 +01:00
parent c7635f262c
commit 672b991af4
4 changed files with 248 additions and 71 deletions

View File

@ -3,85 +3,153 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2010, matek09, matek09@gmail.com' __copyright__ = '2010, matek09, matek09@gmail.com'
from calibre.web.feeds.news import BasicNewsRecipe
import re import re
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Comment
class Esensja(BasicNewsRecipe): class Esensja(BasicNewsRecipe):
title = u'Esensja' title = u'Esensja'
__author__ = 'matek09' __author__ = 'matek09 & fenuks'
description = 'Monthly magazine' description = 'Magazyn kultury popularnej'
encoding = 'utf-8' encoding = 'utf-8'
no_stylesheets = True no_stylesheets = True
language = 'pl' language = 'pl'
remove_javascript = True remove_javascript = True
HREF = '0' masthead_url = 'http://esensja.pl/img/wrss.gif'
oldest_article = 1
URL = 'http://esensja.pl'
HREF = '0'
remove_attributes = ['style', 'bgcolor', 'alt', 'color']
keep_only_tags = [dict(attrs={'class':'sekcja'}), ]
#keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'article'})
#remove_tags_before = dict(dict(name = 'div', attrs = {'class' : 't-title'}))
remove_tags_after = dict(id='tekst')
#keep_only_tags =[] remove_tags = [dict(name = 'img', attrs = {'src' : ['../../../2000/01/img/tab_top.gif', '../../../2000/01/img/tab_bot.gif']}),
#keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'article'}) dict(name = 'div', attrs = {'class' : 't-title2 nextpage'}),
remove_tags_before = dict(dict(name = 'div', attrs = {'class' : 't-title'})) #dict(attrs={'rel':'lightbox[galeria]'})
remove_tags_after = dict(dict(name = 'img', attrs = {'src' : '../../../2000/01/img/tab_bot.gif'})) dict(attrs={'class':['tekst_koniec', 'ref', 'wykop']}),
dict(attrs={'itemprop':['copyrightHolder', 'publisher']}),
dict(id='komentarze')
]
remove_tags =[] extra_css = '''
remove_tags.append(dict(name = 'img', attrs = {'src' : '../../../2000/01/img/tab_top.gif'})) .t-title {font-size: x-large; font-weight: bold; text-align: left}
remove_tags.append(dict(name = 'img', attrs = {'src' : '../../../2000/01/img/tab_bot.gif'})) .t-author {font-size: x-small; text-align: left}
remove_tags.append(dict(name = 'div', attrs = {'class' : 't-title2 nextpage'})) .t-title2 {font-size: x-small; font-style: italic; text-align: left}
.text {font-size: small; text-align: left}
.annot-ref {font-style: italic; text-align: left}
'''
extra_css = ''' preprocess_regexps = [(re.compile(r'alt="[^"]*"'), lambda match: ''),
.t-title {font-size: x-large; font-weight: bold; text-align: left} (re.compile(ur'(title|alt)="[^"]*?"', re.DOTALL), lambda match: ''),
.t-author {font-size: x-small; text-align: left} ]
.t-title2 {font-size: x-small; font-style: italic; text-align: left}
.text {font-size: small; text-align: left}
.annot-ref {font-style: italic; text-align: left}
'''
preprocess_regexps = [(re.compile(r'alt="[^"]*"'), def parse_index(self):
lambda match: '')] soup = self.index_to_soup('http://www.esensja.pl/magazyn/')
a = soup.find('a', attrs={'href' : re.compile('.*/index.html')})
year = a['href'].split('/')[0]
month = a['href'].split('/')[1]
self.HREF = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/iso/'
soup = self.index_to_soup(self.HREF + '01.html')
self.cover_url = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/img/ilustr/cover_b.jpg'
feeds = []
chapter = ''
subchapter = ''
articles = []
intro = soup.find('div', attrs={'class' : 'n-title'})
'''
introduction = {'title' : self.tag_to_string(intro.a),
'url' : self.HREF + intro.a['href'],
'date' : '',
'description' : ''}
chapter = 'Wprowadzenie'
articles.append(introduction)
'''
for tag in intro.findAllNext(attrs={'class': ['chapter', 'subchapter', 'n-title']}):
if tag.name in 'td':
if len(articles) > 0:
section = chapter
if len(subchapter) > 0:
section += ' - ' + subchapter
feeds.append((section, articles))
articles = []
if tag['class'] == 'chapter':
chapter = self.tag_to_string(tag).capitalize()
subchapter = ''
else:
subchapter = self.tag_to_string(tag)
subchapter = self.tag_to_string(tag)
continue
finalurl = tag.a['href']
if not finalurl.startswith('http'):
finalurl = self.HREF + finalurl
articles.append({'title' : self.tag_to_string(tag.a), 'url' : finalurl, 'date' : '', 'description' : ''})
a = self.index_to_soup(finalurl)
i = 1
while True:
div = a.find('div', attrs={'class' : 't-title2 nextpage'})
if div is not None:
link = div.a['href']
if not link.startswith('http'):
link = self.HREF + link
a = self.index_to_soup(link)
articles.append({'title' : self.tag_to_string(tag.a) + ' c. d. ' + str(i), 'url' : link, 'date' : '', 'description' : ''})
i = i + 1
else:
break
def parse_index(self): return feeds
soup = self.index_to_soup('http://www.esensja.pl/magazyn/')
a = soup.find('a', attrs={'href' : re.compile('.*/index.html')})
year = a['href'].split('/')[0]
month = a['href'].split('/')[1]
self.HREF = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/iso/'
soup = self.index_to_soup(self.HREF + '01.html')
self.cover_url = 'http://www.esensja.pl/magazyn/' + year + '/' + month + '/img/ilustr/cover_b.jpg'
feeds = []
intro = soup.find('div', attrs={'class' : 'n-title'})
introduction = {'title' : self.tag_to_string(intro.a),
'url' : self.HREF + intro.a['href'],
'date' : '',
'description' : ''}
chapter = 'Wprowadzenie'
subchapter = ''
articles = []
articles.append(introduction)
for tag in intro.findAllNext(attrs={'class': ['chapter', 'subchapter', 'n-title']}):
if tag.name in 'td':
if len(articles) > 0:
section = chapter
if len(subchapter) > 0:
section += ' - ' + subchapter
feeds.append((section, articles))
articles = []
if tag['class'] == 'chapter':
chapter = self.tag_to_string(tag).capitalize()
subchapter = ''
else:
subchapter = self.tag_to_string(tag)
subchapter = self.tag_to_string(tag)
continue
articles.append({'title' : self.tag_to_string(tag.a), 'url' : self.HREF + tag.a['href'], 'date' : '', 'description' : ''})
a = self.index_to_soup(self.HREF + tag.a['href']) def append_page(self, soup, appendtag):
i = 1 r = appendtag.find(attrs={'class':'wiecej_xxx'})
while True: if r:
div = a.find('div', attrs={'class' : 't-title2 nextpage'}) nr = r.findAll(attrs={'class':'tn-link'})[-1]
if div is not None: try:
a = self.index_to_soup(self.HREF + div.a['href']) nr = int(nr.a.string)
articles.append({'title' : self.tag_to_string(tag.a) + ' c. d. ' + str(i), 'url' : self.HREF + div.a['href'], 'date' : '', 'description' : ''}) except:
i = i + 1 return
else: baseurl = soup.find(attrs={'property':'og:url'})['content'] + '&strona={0}'
break for number in range(2, nr+1):
soup2 = self.index_to_soup(baseurl.format(number))
pagetext = soup2.find(attrs={'class':'tresc'})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':['wiecej_xxx', 'tekst_koniec']}):
r.extract()
for r in appendtag.findAll('script'):
r.extract()
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
comment.extract()
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
for tag in soup.findAll(attrs={'class':'img_box_right'}):
temp = tag.find('img')
src = ''
if temp:
src = temp.get('src', '')
for r in tag.findAll('a', recursive=False):
r.extract()
info = tag.find(attrs={'class':'img_info'})
text = str(tag)
if not src:
src = re.search('src="[^"]*?"', text)
if src:
src = src.group(0)
src = src[5:].replace('//', '/')
if src:
tag.contents = []
tag.insert(0, BeautifulSoup('<img src="{0}{1}" />'.format(self.URL, src)))
if info:
tag.insert(len(tag.contents), info)
return soup
return feeds

View File

@ -0,0 +1,109 @@
__license__ = 'GPL v3'
import re
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Comment
class EsensjaRSS(BasicNewsRecipe):
title = u'Esensja (RSS)'
__author__ = 'fenuks'
description = u'Magazyn kultury popularnej'
category = 'reading, fantasy, reviews, boardgames, culture'
#publication_type = ''
language = 'pl'
encoding = 'utf-8'
INDEX = 'http://www.esensja.pl'
extra_css = '''.t-title {font-size: x-large; font-weight: bold; text-align: left}
.t-author {font-size: x-small; text-align: left}
.t-title2 {font-size: x-small; font-style: italic; text-align: left}
.text {font-size: small; text-align: left}
.annot-ref {font-style: italic; text-align: left}
'''
cover_url = ''
masthead_url = 'http://esensja.pl/img/wrss.gif'
use_embedded_content = False
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
remove_javascript = True
ignore_duplicate_articles = {'title', 'url'}
preprocess_regexps = [(re.compile(r'alt="[^"]*"'), lambda match: ''),
(re.compile(ur'(title|alt)="[^"]*?"', re.DOTALL), lambda match: ''),
]
remove_attributes = ['style', 'bgcolor', 'alt', 'color']
keep_only_tags = [dict(attrs={'class':'sekcja'}), ]
remove_tags_after = dict(id='tekst')
remove_tags = [dict(name = 'img', attrs = {'src' : ['../../../2000/01/img/tab_top.gif', '../../../2000/01/img/tab_bot.gif']}),
dict(name = 'div', attrs = {'class' : 't-title2 nextpage'}),
#dict(attrs={'rel':'lightbox[galeria]'})
dict(attrs={'class':['tekst_koniec', 'ref', 'wykop']}),
dict(attrs={'itemprop':['copyrightHolder', 'publisher']}),
dict(id='komentarze')
]
feeds = [(u'Książka', u'http://esensja.pl/rss/ksiazka.rss'),
(u'Film', u'http://esensja.pl/rss/film.rss'),
(u'Komiks', u'http://esensja.pl/rss/komiks.rss'),
(u'Gry', u'http://esensja.pl/rss/gry.rss'),
(u'Muzyka', u'http://esensja.pl/rss/muzyka.rss'),
(u'Twórczość', u'http://esensja.pl/rss/tworczosc.rss'),
(u'Varia', u'http://esensja.pl/rss/varia.rss'),
(u'Zgryźliwi Tetrycy', u'http://esensja.pl/rss/tetrycy.rss'),
(u'Nowe książki', u'http://esensja.pl/rss/xnowosci.rss'),
(u'Ostatnio dodane książki', u'http://esensja.pl/rss/xdodane.rss'),
]
def get_cover_url(self):
soup = self.index_to_soup(self.INDEX)
cover = soup.find(id='panel_1')
self.cover_url = self.INDEX + cover.find('a')['href'].replace('index.html', '') + 'img/ilustr/cover_b.jpg'
return getattr(self, 'cover_url', self.cover_url)
def append_page(self, soup, appendtag):
r = appendtag.find(attrs={'class':'wiecej_xxx'})
if r:
nr = r.findAll(attrs={'class':'tn-link'})[-1]
try:
nr = int(nr.a.string)
except:
return
baseurl = soup.find(attrs={'property':'og:url'})['content'] + '&strona={0}'
for number in range(2, nr+1):
soup2 = self.index_to_soup(baseurl.format(number))
pagetext = soup2.find(attrs={'class':'tresc'})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':['wiecej_xxx', 'tekst_koniec']}):
r.extract()
for r in appendtag.findAll('script'):
r.extract()
comments = appendtag.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
comment.extract()
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
for tag in soup.findAll(attrs={'class':'img_box_right'}):
temp = tag.find('img')
src = ''
if temp:
src = temp.get('src', '')
for r in tag.findAll('a', recursive=False):
r.extract()
info = tag.find(attrs={'class':'img_info'})
text = str(tag)
if not src:
src = re.search('src="[^"]*?"', text)
if src:
src = src.group(0)
src = src[5:].replace('//', '/')
if src:
tag.contents = []
tag.insert(0, BeautifulSoup('<img src="{0}{1}" />'.format(self.INDEX, src)))
if info:
tag.insert(len(tag.contents), info)
return soup

BIN
recipes/icons/esenja.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 329 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 329 B