Suedeutsche Zeitung by Darko Miletic

This commit is contained in:
Kovid Goyal 2010-02-11 14:11:22 -07:00
parent b0cb18f0c5
commit 2129993f9b
2 changed files with 107 additions and 0 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 492 B

View File

@ -0,0 +1,107 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
www.sueddeutsche.de/sz/
'''
import urllib
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class SueddeutcheZeitung(BasicNewsRecipe):
title = 'Sueddeutche Zeitung'
__author__ = 'Darko Miletic'
description = 'News from Germany. Access to paid content.'
publisher = 'Sueddeutche Zeitung'
category = 'news, politics, Germany'
no_stylesheets = True
oldest_article = 2
encoding = 'cp1252'
needs_subscription = True
remove_empty_feeds = True
PREFIX = 'http://www.sueddeutsche.de'
INDEX = PREFIX + strftime('/sz/%Y-%m-%d/')
LOGIN = PREFIX + '/app/lbox/index.html'
use_embedded_content = False
masthead_url = 'http://pix.sueddeutsche.de/img/g_.gif'
language = 'de_DE'
extra_css = ' body{font-family: Arial,Helvetica,sans-serif} '
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
, 'linearize_tables' : True
}
remove_attributes = ['height','width']
def get_browser(self):
br = BasicNewsRecipe.get_browser()
br.open(self.INDEX)
if self.username is not None and self.password is not None:
data = urllib.urlencode({ 'login_name':self.username
,'login_passwort':self.password
,'lboxaction':'doLogin'
,'passtxt':'Passwort'
,'referer':self.INDEX
,'x':'22'
,'y':'7'
})
br.open(self.LOGIN,data)
return br
remove_tags =[
dict(attrs={'class':'hidePrint'})
,dict(name=['link','object','embed','base','iframe'])
]
remove_tags_before = dict(name='h2')
remove_tags_after = dict(attrs={'class':'author'})
feeds = [
(u'Politik' , INDEX + 'politik/' )
,(u'Seite drei' , INDEX + 'seitedrei/' )
,(u'Meinungsseite', INDEX + 'meinungsseite/')
,(u'Wissen' , INDEX + 'wissen/' )
,(u'Panorama' , INDEX + 'panorama/' )
,(u'Feuilleton' , INDEX + 'feuilleton/' )
,(u'Medien' , INDEX + 'medien/' )
,(u'Wirtschaft' , INDEX + 'wirtschaft/' )
,(u'Sport' , INDEX + 'sport/' )
,(u'Bayern' , INDEX + 'bayern/' )
,(u'Muenchen' , INDEX + 'muenchen/' )
,(u'jetzt.de' , INDEX + 'jetzt.de/' )
]
def parse_index(self):
totalfeeds = []
lfeeds = self.get_feeds()
for feedobj in lfeeds:
feedtitle, feedurl = feedobj
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
articles = []
soup = self.index_to_soup(feedurl)
tbl = soup.find(attrs={'class':'szprintd'})
for item in tbl.findAll(name='td',attrs={'class':'topthema'}):
atag = item.find(attrs={'class':'Titel'}).a
ptag = item.find('p')
stag = ptag.find('script')
if stag:
stag.extract()
url = self.PREFIX + atag['href']
title = self.tag_to_string(atag)
description = self.tag_to_string(ptag)
articles.append({
'title' :title
,'date' :strftime(self.timefmt)
,'url' :url
,'description':description
})
totalfeeds.append((feedtitle, articles))
return totalfeeds
def print_version(self, url):
return url + 'print.html'