mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Updated recipe for Spiegel international
This commit is contained in:
parent
fb0062dd69
commit
339df810b8
@ -17,7 +17,7 @@ class Barrons(BasicNewsRecipe):
|
|||||||
needs_subscription = True
|
needs_subscription = True
|
||||||
language = 'en'
|
language = 'en'
|
||||||
|
|
||||||
__author__ = 'Kovid Goyal'
|
__author__ = 'Kovid Goyal and Sujata Raman'
|
||||||
description = 'Weekly publication for investors from the publisher of the Wall Street Journal'
|
description = 'Weekly publication for investors from the publisher of the Wall Street Journal'
|
||||||
timefmt = ' [%a, %b %d, %Y]'
|
timefmt = ' [%a, %b %d, %Y]'
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
|
@ -8,9 +8,10 @@ spiegel.de
|
|||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
|
||||||
class Spiegel_int(BasicNewsRecipe):
|
class Spiegel_int(BasicNewsRecipe):
|
||||||
title = 'Spiegel Online International'
|
title = 'Spiegel Online International'
|
||||||
__author__ = 'Darko Miletic'
|
__author__ = 'Darko Miletic and Sujata Raman'
|
||||||
description = "News and POV from Europe's largest newsmagazine"
|
description = "News and POV from Europe's largest newsmagazine"
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
@ -21,7 +22,8 @@ class Spiegel_int(BasicNewsRecipe):
|
|||||||
publisher = 'SPIEGEL ONLINE GmbH'
|
publisher = 'SPIEGEL ONLINE GmbH'
|
||||||
category = 'news, politics, Germany'
|
category = 'news, politics, Germany'
|
||||||
lang = 'en'
|
lang = 'en'
|
||||||
|
recursions = 1
|
||||||
|
match_regexps = [r'http://www.spiegel.de/.*-[1-9],00.html']
|
||||||
conversion_options = {
|
conversion_options = {
|
||||||
'comments' : description
|
'comments' : description
|
||||||
,'tags' : category
|
,'tags' : category
|
||||||
@ -30,11 +32,63 @@ class Spiegel_int(BasicNewsRecipe):
|
|||||||
,'pretty_print': True
|
,'pretty_print': True
|
||||||
}
|
}
|
||||||
|
|
||||||
remove_tags_after = dict(name='div', attrs={'id':'spArticleBody'})
|
extra_css = '''
|
||||||
|
#spArticleColumn{font-family:verdana,arial,helvetica,geneva,sans-serif ; }
|
||||||
|
h1{color:#666666; font-weight:bold;}
|
||||||
|
h2{color:#990000;}
|
||||||
|
h3{color:#990000;}
|
||||||
|
h4 {color:#990000;}
|
||||||
|
a{color:#990000;}
|
||||||
|
.spAuthor{font-style:italic;}
|
||||||
|
#spIntroTeaser{font-weight:bold;}
|
||||||
|
.spCredit{color:#666666; font-size:x-small;}
|
||||||
|
.spShortDate{font-size:x-small;}
|
||||||
|
.spArticleImageBox {font-size:x-small;}
|
||||||
|
.spPhotoGallery{font-size:x-small; color:#990000 ;}
|
||||||
|
'''
|
||||||
|
|
||||||
|
keep_only_tags = [
|
||||||
|
dict(name ='div', attrs={'id': ['spArticleImageBox spAssetAlignleft','spArticleColumn']}),
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='div', attrs={'id':['spSocialBookmark','spArticleFunctions','spMultiPagerHeadlines',]}),
|
||||||
|
dict(name='div', attrs={'class':['spCommercial spM520','spArticleCredit','spPicZoom']}),
|
||||||
|
]
|
||||||
|
|
||||||
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/schlagzeilen/rss/0,5291,676,00.xml')]
|
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/schlagzeilen/rss/0,5291,676,00.xml')]
|
||||||
|
|
||||||
def print_version(self, url):
|
def postprocess_html(self, soup,first):
|
||||||
main, sep, rest = url.rpartition(',')
|
|
||||||
rmain, rsep, rrest = main.rpartition(',')
|
for tag in soup.findAll(name='div',attrs={'id':"spMultiPagerControl"}):
|
||||||
return rmain + ',druck-' + rrest + ',' + rest
|
tag.extract()
|
||||||
|
|
||||||
|
p = soup.find(name = 'p', attrs={'id':'spIntroTeaser'})
|
||||||
|
|
||||||
|
if p.string is not None:
|
||||||
|
t = p.string.rpartition(':')[0]
|
||||||
|
|
||||||
|
if 'Part'in t:
|
||||||
|
if soup.h1 is not None:
|
||||||
|
soup.h1.extract()
|
||||||
|
if soup.h2 is not None:
|
||||||
|
soup.h2.extract()
|
||||||
|
functag = soup.find(name= 'div', attrs={'id':"spArticleFunctions"})
|
||||||
|
if functag is not None:
|
||||||
|
functag.extract()
|
||||||
|
auttag = soup.find(name= 'p', attrs={'class':"spAuthor"})
|
||||||
|
if auttag is not None:
|
||||||
|
auttag.extract()
|
||||||
|
|
||||||
|
pictag = soup.find(name= 'div', attrs={'id':"spArticleTopAsset"})
|
||||||
|
if pictag is not None:
|
||||||
|
pictag.extract()
|
||||||
|
|
||||||
|
|
||||||
|
return soup
|
||||||
|
|
||||||
|
# def print_version(self, url):
|
||||||
|
# main, sep, rest = url.rpartition(',')
|
||||||
|
# rmain, rsep, rrest = main.rpartition(',')
|
||||||
|
# return rmain + ',druck-' + rrest + ',' + rest
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user