Update Smithsonian and Business Week Magazine

This commit is contained in:
Kovid Goyal 2013-04-27 09:41:00 +05:30
parent 71f61ac049
commit bd9e63ca92
2 changed files with 30 additions and 24 deletions

View File

@ -12,7 +12,7 @@ class BusinessWeekMagazine(BasicNewsRecipe):
category = 'news'
encoding = 'UTF-8'
keep_only_tags = [
dict(name='div', attrs={'id':'article_body_container'}),
dict(name='div', attrs={'id':['article_body_container','story_body']}),
]
remove_tags = [dict(name='ui'),dict(name='li'),dict(name='div', attrs={'id':['share-email']})]
no_javascript = True
@ -26,43 +26,45 @@ class BusinessWeekMagazine(BasicNewsRecipe):
#Find date
mag=soup.find('h2',text='Magazine')
self.log(mag)
dates=self.tag_to_string(mag.findNext('h3'))
self.timefmt = u' [%s]'%dates
#Go to the main body
div0 = soup.find ('div', attrs={'class':'column left'})
div0 = soup.find('div', attrs={'class':'column left'})
section_title = ''
feeds = OrderedDict()
for div in div0.findAll(['h4','h5']):
for div in div0.findAll('a', attrs={'class': None}):
articles = []
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
title=self.tag_to_string(div.a).strip()
url=div.a['href']
title=self.tag_to_string(div).strip()
url=div['href']
soup0 = self.index_to_soup(url)
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})['href']
articles.append({'title':title, 'url':urlprint, 'description':'', 'date':''})
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})
if urlprint is not None:
url=urlprint['href']
articles.append({'title':title, 'url':url, 'description':'', 'date':''})
if articles:
if section_title not in feeds:
feeds[section_title] = []
feeds[section_title] += articles
div1 = soup.find ('div', attrs={'class':'column center'})
div1 = soup.find('div', attrs={'class':'column center'})
section_title = ''
for div in div1.findAll(['h4','h5']):
for div in div1.findAll('a'):
articles = []
desc=self.tag_to_string(div.findNext('p')).strip()
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
title=self.tag_to_string(div.a).strip()
url=div.a['href']
title=self.tag_to_string(div).strip()
url=div['href']
soup0 = self.index_to_soup(url)
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})['href']
articles.append({'title':title, 'url':urlprint, 'description':desc, 'date':''})
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})
if urlprint is not None:
url=urlprint['href']
articles.append({'title':title, 'url':url, 'description':desc, 'date':''})
if articles:
if section_title not in feeds:
feeds[section_title] = []
feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()]
return ans

View File

@ -25,7 +25,7 @@ class Smithsonian(BasicNewsRecipe):
soup = self.index_to_soup(current_issue_url)
#Go to the main body
div = soup.find ('div', attrs={'id':'article-body'})
div = soup.find('div', attrs={'id':'article-body'})
#Find date
date = re.sub('.*\:\W*', "", self.tag_to_string(div.find('h2')).strip())
@ -49,16 +49,20 @@ class Smithsonian(BasicNewsRecipe):
self.log('Found section:', section_title)
else:
link=post.find('a',href=True)
article_cat=link.findPrevious('p', attrs={'class':'article-cat'})
url=link['href']+'?c=y&story=fullstory'
description=self.tag_to_string(post.find('p')).strip()
desc=re.sub('\sBy\s.*', '', description, re.DOTALL)
author=re.sub('.*By\s', '', description, re.DOTALL)
title=self.tag_to_string(link).strip()+ u' (%s)'%author
description=self.tag_to_string(post.findAll('p')[-1]).strip()
title=self.tag_to_string(link).strip()
if article_cat is not None:
title += u' (%s)'%self.tag_to_string(article_cat).strip()
self.log('\tFound article:', title)
articles.append({'title':title, 'url':url, 'description':desc, 'date':''})
articles.append({'title':title, 'url':url, 'description':description, 'date':''})
if articles:
feeds[section_title] = articles
if section_title not in feeds:
feeds[section_title] = []
feeds[section_title] += articles
articles = []
ans = [(key, val) for key, val in feeds.iteritems()]
return ans