This commit is contained in:
Kovid Goyal 2012-05-03 21:00:13 +05:30
parent c006e9cdca
commit 6ad106525c

View File

@ -11,7 +11,7 @@ import datetime
class Newsweek(BasicNewsRecipe): class Newsweek(BasicNewsRecipe):
# how many issues to go back, 0 means get the most current one # how many issues to go back, 0 means get the most current one
BACK_ISSUES = 1 BACK_ISSUES = 1
@ -26,8 +26,8 @@ class Newsweek(BasicNewsRecipe):
language = 'pl' language = 'pl'
remove_javascript = True remove_javascript = True
temp_files = [] temp_files = []
articles_are_obfuscated = True articles_are_obfuscated = True
# #
@ -40,7 +40,7 @@ class Newsweek(BasicNewsRecipe):
page = self.index_to_soup(source) page = self.index_to_soup(source)
main_section = page.find(id='mainSection') main_section = page.find(id='mainSection')
title = main_section.find('h1') title = main_section.find('h1')
info = main_section.find('ul', attrs={'class' : 'articleInfo'}) info = main_section.find('ul', attrs={'class' : 'articleInfo'})
authors = info.find('li').find('h4') authors = info.find('li').find('h4')
@ -50,25 +50,25 @@ class Newsweek(BasicNewsRecipe):
related = article.find('div', attrs={'class' : 'relatedBox'}) related = article.find('div', attrs={'class' : 'relatedBox'})
if related is not None: if related is not None:
related.extract() related.extract()
# remove div with social networking links and links to # remove div with social networking links and links to
# other articles in web version # other articles in web version
for div in article.findAll('div'): for div in article.findAll('div'):
if div.find('span', attrs={'class' : 'google-plus'}): if div.find('span', attrs={'class' : 'google-plus'}):
div.extract() div.extract()
for p in div.findAll('p'): for p in div.findAll('p'):
if p.find('span', attrs={'style' : 'color: rgb(255, 0, 0);'}): if p.find('span', attrs={'style' : 'color: rgb(255, 0, 0);'}):
p.extract() p.extract()
continue continue
for a in p.findAll('a'): for a in p.findAll('a'):
if a.find('span', attrs={'style' : 'font-size: larger;'}): if a.find('span', attrs={'style' : 'font-size: larger;'}):
a.extract() a.extract()
html = unicode(title) + unicode(authors) + unicode(article) html = unicode(title) + unicode(authors) + unicode(article)
next = main_section.find('li', attrs={'class' : 'next'}) next = main_section.find('li', attrs={'class' : 'next'})
while next: while next:
url = next.find('a')['href'] url = next.find('a')['href']
br.open(url) br.open(url)
@ -81,11 +81,11 @@ class Newsweek(BasicNewsRecipe):
aside.extract() aside.extract()
html = html + unicode(article) html = html + unicode(article)
next = main_section.find('li', attrs={'class' : 'next'}) next = main_section.find('li', attrs={'class' : 'next'})
self.temp_files.append(PersistentTemporaryFile('_temparse.html')) self.temp_files.append(PersistentTemporaryFile('_temparse.html'))
self.temp_files[-1].write(html) self.temp_files[-1].write(html)
self.temp_files[-1].close() self.temp_files[-1].close()
return self.temp_files[-1].name return self.temp_files[-1].name
@ -102,9 +102,9 @@ class Newsweek(BasicNewsRecipe):
if len(options) > self.BACK_ISSUES: if len(options) > self.BACK_ISSUES:
option = options[self.BACK_ISSUES]; option = options[self.BACK_ISSUES];
self.EDITION = option['value'].replace('http://www.newsweek.pl/wydania/','') self.EDITION = option['value'].replace('http://www.newsweek.pl/wydania/','')
issue_soup = self.index_to_soup('http://www.newsweek.pl/wydania/' + self.EDITION) self.index_to_soup('http://www.newsweek.pl/wydania/' + self.EDITION)
else: else:
self.BACK_ISSUES = self.BACK_ISSUES - len(options) self.BACK_ISSUES = self.BACK_ISSUES - len(options)
self.YEAR = self.YEAR - 1 self.YEAR = self.YEAR - 1
self.find_last_issue(archive_url + ',' + str(self.YEAR)) self.find_last_issue(archive_url + ',' + str(self.YEAR))
@ -139,14 +139,14 @@ class Newsweek(BasicNewsRecipe):
article = self.create_article(h2) article = self.create_article(h2)
if article is None : if article is None :
continue continue
if articles.has_key(section): if articles.has_key(section):
articles[section].append(article) articles[section].append(article)
else: else:
articles[section] = [article] articles[section] = [article]
sections.append(section) sections.append(section)
for section in sections: for section in sections:
feeds.append((section, articles[section])) feeds.append((section, articles[section]))
return feeds return feeds
@ -161,7 +161,7 @@ class Newsweek(BasicNewsRecipe):
a = h2.find('a') a = h2.find('a')
if a is None: if a is None:
return None return None
article['title'] = self.tag_to_string(a) article['title'] = self.tag_to_string(a)
article['url'] = a['href'] article['url'] = a['href']
article['date'] = self.DATE article['date'] = self.DATE