Updated Sueddeutsche Zeitung

This commit is contained in:
Kovid Goyal 2010-04-28 07:05:07 -06:00
parent 3fa5efd5d6
commit a174d25164
2 changed files with 32 additions and 35 deletions

View File

@ -5,9 +5,8 @@ __copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
www.sueddeutsche.de/sz/ www.sueddeutsche.de/sz/
''' '''
import urllib
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre import strftime
class SueddeutcheZeitung(BasicNewsRecipe): class SueddeutcheZeitung(BasicNewsRecipe):
title = 'Sueddeutche Zeitung' title = 'Sueddeutche Zeitung'
@ -20,12 +19,13 @@ class SueddeutcheZeitung(BasicNewsRecipe):
encoding = 'cp1252' encoding = 'cp1252'
needs_subscription = True needs_subscription = True
remove_empty_feeds = True remove_empty_feeds = True
delay = 2
PREFIX = 'http://www.sueddeutsche.de' PREFIX = 'http://www.sueddeutsche.de'
INDEX = PREFIX + strftime('/sz/%Y-%m-%d/') INDEX = PREFIX + '/app/epaper/textversion/'
LOGIN = PREFIX + '/app/lbox/index.html'
use_embedded_content = False use_embedded_content = False
masthead_url = 'http://pix.sueddeutsche.de/img/g_.gif' masthead_url = 'http://pix.sueddeutsche.de/img/layout/header/logo.gif'
language = 'de' language = 'de'
publication_type = 'newspaper'
extra_css = ' body{font-family: Arial,Helvetica,sans-serif} ' extra_css = ' body{font-family: Arial,Helvetica,sans-serif} '
conversion_options = { conversion_options = {
@ -40,49 +40,49 @@ class SueddeutcheZeitung(BasicNewsRecipe):
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser() br = BasicNewsRecipe.get_browser()
br.open(self.INDEX)
if self.username is not None and self.password is not None: if self.username is not None and self.password is not None:
data = urllib.urlencode({ 'login_name':self.username br.open(self.INDEX)
,'login_passwort':self.password br.select_form(name='lbox')
,'lboxaction':'doLogin' br['login_name' ] = self.username
,'passtxt':'Passwort' br['login_passwort'] = self.password
,'referer':self.INDEX br.submit()
,'x':'22'
,'y':'7'
})
br.open(self.LOGIN,data)
return br return br
remove_tags =[ remove_tags =[
dict(attrs={'class':'hidePrint'}) dict(attrs={'class':'hidePrint'})
,dict(name=['link','object','embed','base','iframe']) ,dict(name=['link','object','embed','base','iframe'])
] ]
remove_tags_before = dict(name='h2') keep_only_tags = [dict(attrs={'class':'artikelBox'})]
remove_tags_after = dict(attrs={'class':'author'}) remove_tags_before = dict(attrs={'class':'artikelTitel'})
remove_tags_after = dict(attrs={'class':'author'})
feeds = [ feeds = [
(u'Politik' , INDEX + 'politik/' ) (u'Politik' , INDEX + 'Politik/' )
,(u'Seite drei' , INDEX + 'seitedrei/' ) ,(u'Seite drei' , INDEX + 'Seite+drei/' )
,(u'Meinungsseite', INDEX + 'meinungsseite/') ,(u'Meinungsseite', INDEX + 'Meinungsseite/')
,(u'Wissen' , INDEX + 'wissen/' ) ,(u'Wissen' , INDEX + 'Wissen/' )
,(u'Panorama' , INDEX + 'panorama/' ) ,(u'Panorama' , INDEX + 'Panorama/' )
,(u'Feuilleton' , INDEX + 'feuilleton/' ) ,(u'Feuilleton' , INDEX + 'Feuilleton/' )
,(u'Medien' , INDEX + 'medien/' ) ,(u'Medien' , INDEX + 'Medien/' )
,(u'Wirtschaft' , INDEX + 'wirtschaft/' ) ,(u'Wirtschaft' , INDEX + 'Wirtschaft/' )
,(u'Sport' , INDEX + 'sport/' ) ,(u'Sport' , INDEX + 'Sport/' )
,(u'Bayern' , INDEX + 'bayern/' ) ,(u'Bayern' , INDEX + 'Bayern/' )
,(u'Muenchen' , INDEX + 'muenchen/' ) ,(u'Muenchen' , INDEX + 'M%FCnchen/' )
,(u'jetzt.de' , INDEX + 'jetzt.de/' )
] ]
def parse_index(self): def parse_index(self):
src = self.index_to_soup(self.INDEX)
id = ''
for itt in src.findAll('a',href=True):
if itt['href'].startswith('/app/epaper/textversion/inhalt/'):
id = itt['href'].rpartition('/inhalt/')[2]
totalfeeds = [] totalfeeds = []
lfeeds = self.get_feeds() lfeeds = self.get_feeds()
for feedobj in lfeeds: for feedobj in lfeeds:
feedtitle, feedurl = feedobj feedtitle, feedurl = feedobj
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl)) self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
articles = [] articles = []
soup = self.index_to_soup(feedurl) soup = self.index_to_soup(feedurl + id)
tbl = soup.find(attrs={'class':'szprintd'}) tbl = soup.find(attrs={'class':'szprintd'})
for item in tbl.findAll(name='td',attrs={'class':'topthema'}): for item in tbl.findAll(name='td',attrs={'class':'topthema'}):
atag = item.find(attrs={'class':'Titel'}).a atag = item.find(attrs={'class':'Titel'}).a
@ -101,7 +101,3 @@ class SueddeutcheZeitung(BasicNewsRecipe):
}) })
totalfeeds.append((feedtitle, articles)) totalfeeds.append((feedtitle, articles))
return totalfeeds return totalfeeds
def print_version(self, url):
return url + 'print.html'

View File

@ -61,7 +61,8 @@ class EXTHHeader(object):
# last update time # last update time
pass pass
elif id == 503: # Long title elif id == 503: # Long title
if not title or title == _('Unknown') or 'USER_CONTENT' in title: if not title or title == _('Unknown') or \
'USER_CONTENT' in title or title.startswith('dtp_'):
try: try:
title = content.decode(codec) title = content.decode(codec)
except: except: