diff --git a/recipes/faznet.recipe b/recipes/faznet.recipe index e2138dab6f..97b4cf14f1 100644 --- a/recipes/faznet.recipe +++ b/recipes/faznet.recipe @@ -8,7 +8,7 @@ from calibre.web.feeds.news import BasicNewsRecipe class FazNet(BasicNewsRecipe): title = 'FAZ.NET' - __author__ = 'Kovid Goyal, Darko Miletic' + __author__ = 'Kovid Goyal, Darko Miletic, Armin Geller' # AGe upd. V4 2014-01-10 description = 'Frankfurter Allgemeine Zeitung' publisher = 'Frankfurter Allgemeine Zeitung GmbH' category = 'news, politics, Germany' @@ -23,10 +23,11 @@ class FazNet(BasicNewsRecipe): keep_only_tags = [{'class':'FAZArtikelEinleitung'}, {'id':'ArtikelTabContent_0'}] - remove_tags_after = dict(name='div', attrs={'class':['ArtikelFooter']}) # AGe add 2013-12-19 - remove_tags = [dict(name='div', attrs={'class':['ArtikelFooter']})] # AGe add 2013-12-19 - recursions = 1 - match_regexps = [r'-p[2-9].html$'] + remove_tags_after = [dict(name='div', attrs={'class':['ArtikelFooter']})] + remove_tags = [dict(name='div', attrs={'class':['ArtikelFooter']})] + +# recursions = 1 # AGe 2014-01-10 +# match_regexps = [r'-p[2-9].html$'] # AGe 2014-01-10 feeds = [ ('FAZ.NET Aktuell', 'http://www.faz.net/aktuell/?rssview=1'), @@ -34,7 +35,7 @@ class FazNet(BasicNewsRecipe): ('Wirtschaft', 'http://www.faz.net/aktuell/wirtschaft/?rssview=1'), ('Feuilleton', 'http://www.faz.net/aktuell/feuilleton/?rssview=1'), ('Sport', 'http://www.faz.net/aktuell/sport/?rssview=1'), - ('Lebensstil', 'http://www.faz.net/aktuell/lebensstil/?rssview=1'), # AGe add 2013-12-19 + ('Lebensstil', 'http://www.faz.net/aktuell/lebensstil/?rssview=1'), ('Gesellschaft', 'http://www.faz.net/aktuell/gesellschaft/?rssview=1'), ('Finanzen', 'http://www.faz.net/aktuell/finanzen/?rssview=1'), ('Technik & Motor', 'http://www.faz.net/aktuell/technik-motor/?rssview=1'), @@ -43,3 +44,29 @@ class FazNet(BasicNewsRecipe): ('Beruf & Chance', 'http://www.faz.net/aktuell/beruf-chance/?rssview=1'), ('Rhein-Main', 'http://www.faz.net/aktuell/rhein-main/?rssview=1') ] + +# AGe 2014-01-10 New for multipages + INDEX = '' + def append_page(self, soup, appendtag, position): + pager = soup.find('a',attrs={'title':'Nächste Seite'}) + if pager: + nexturl = self.INDEX + pager['href'] + soup2 = self.index_to_soup(nexturl) + texttag = soup2.find('div', attrs={'class':'FAZArtikelContent'}) + texttag.find('div', attrs={'class':'ArtikelFooter'}).extract() + texttag.find('div', attrs={'class':'ArtikelAbbinder'}).extract() + texttag.find('div', attrs={'class':'ArtikelKommentieren Artikelfuss GETS;tk;boxen.top-lesermeinungen;tp;content'}).extract() + texttag.find('div', attrs={'class':'Anzeige GoogleAdsBuehne'}).extract() + texttag.find('div', attrs={'id':'ArticlePagerBottom'}).extract() + newpos = len(texttag.contents) + self.append_page(soup2,texttag,newpos) + texttag.extract() + pager.extract() + appendtag.insert(position,texttag) + + def preprocess_html(self, soup): + self.append_page(soup, soup.body, 3) + pager = soup.find('div',attrs={'id':'ArticlePagerBottom'}) + if pager: + pager.extract() + return self.adeify_images(soup)