mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-08-07 09:01:38 -04:00
Update Smithsonian Magazine
This commit is contained in:
parent
6d1139ba2d
commit
733ab716af
@ -5,64 +5,57 @@ from collections import OrderedDict
|
|||||||
class Smithsonian(BasicNewsRecipe):
|
class Smithsonian(BasicNewsRecipe):
|
||||||
|
|
||||||
title = 'Smithsonian Magazine'
|
title = 'Smithsonian Magazine'
|
||||||
__author__ = 'Rick Shang'
|
__author__ = 'Kovid Goyal'
|
||||||
|
|
||||||
description = 'This magazine chronicles the arts, environment, sciences and popular culture of the times. It is edited for modern, well-rounded individuals with diverse, general interests. With your order, you become a National Associate Member of the Smithsonian. Membership benefits include your subscription to Smithsonian magazine, a personalized membership card, discounts from the Smithsonian catalog, and more.'
|
description = 'This magazine chronicles the arts, environment, sciences and popular culture of the times. It is edited for modern, well-rounded individuals with diverse, general interests. With your order, you become a National Associate Member of the Smithsonian. Membership benefits include your subscription to Smithsonian magazine, a personalized membership card, discounts from the Smithsonian catalog, and more.' # noqa
|
||||||
language = 'en'
|
language = 'en'
|
||||||
category = 'news'
|
category = 'news'
|
||||||
encoding = 'UTF-8'
|
encoding = 'UTF-8'
|
||||||
keep_only_tags = [dict(attrs={'id':['articleTitle', 'subHead', 'byLine', 'articleImage', 'article-text']})]
|
keep_only_tags = [dict(name='main', attrs={'class':'main'})]
|
||||||
remove_tags = [dict(attrs={'class':['related-articles-inpage', 'viewMorePhotos']})]
|
remove_tags = [
|
||||||
|
dict(attrs={'class':lambda x: x and set(x.split()).intersection({'hidden-phone', 'hidden-tablet', 'hidden-desktop'})}),
|
||||||
|
dict(attrs={'class':['slideshow-nav', 'associated-container']}),
|
||||||
|
]
|
||||||
|
remove_tags_after = dict(name='div', attrs={'class':lambda x:x and 'article-body' in x.split()})
|
||||||
no_javascript = True
|
no_javascript = True
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
#Go to the issue
|
# Go to the issue
|
||||||
soup0 = self.index_to_soup('http://www.smithsonianmag.com/issue/archive/')
|
soup = self.index_to_soup('http://www.smithsonianmag.com/issue/archive/?no-ist')
|
||||||
div = soup0.find('div',attrs={'id':'archives'})
|
li = soup.find('li', attrs={'class':'issue'})
|
||||||
issue = div.find('ul',attrs={'class':'clear-both'})
|
url_prefix = 'http://www.smithsonianmag.com'
|
||||||
current_issue_url = issue.find('a', href=True)['href']
|
current_issue_url = url_prefix + li.find('a', href=True)['href'] + '?no-ist'
|
||||||
|
self.log('Downloading issue:', current_issue_url)
|
||||||
soup = self.index_to_soup(current_issue_url)
|
soup = self.index_to_soup(current_issue_url)
|
||||||
|
|
||||||
#Go to the main body
|
# Go to the main body
|
||||||
div = soup.find('div', attrs={'id':'article-body'})
|
div = soup.find('div', id='Page-Content')
|
||||||
|
|
||||||
#Find date
|
# Find date
|
||||||
date = re.sub('.*\:\W*', "", self.tag_to_string(div.find('h2')).strip())
|
date = re.sub('.*\:\W*', "", self.tag_to_string(div.find('h1')).strip())
|
||||||
self.timefmt = u' [%s]'%date
|
self.timefmt = u' [%s]'%date
|
||||||
|
|
||||||
#Find cover
|
# Find cover
|
||||||
self.cover_url = div.find('img',src=True)['src']
|
self.cover_url = div.find('img', alt=lambda x: x and 'Cover' in x, src=True)['src']
|
||||||
|
|
||||||
feeds = OrderedDict()
|
feeds = OrderedDict()
|
||||||
section_title = ''
|
section_title = ''
|
||||||
articles = []
|
articles = []
|
||||||
for post in div.findAll('div', attrs={'class':['plainModule', 'departments plainModule']}):
|
for div in soup.findAll('div', attrs={'class':'article-list'}):
|
||||||
h4=post.find('h3')
|
section_title = self.tag_to_string(div.find('h2', attrs={'class':'headline'})).capitalize()
|
||||||
if h4 is not None:
|
self.log('\n\nFound section:', section_title)
|
||||||
if articles:
|
articles = feeds[section_title] = []
|
||||||
if section_title not in feeds:
|
for sec in div.findAll('section', attrs={'class':lambda x:x and 'article-teaser' in x.split()}):
|
||||||
feeds[section_title] = []
|
head = sec.find(attrs={'class':'headline'})
|
||||||
feeds[section_title] += articles
|
url = head.find('a', href=True)['href'] + '?all&no-ist'
|
||||||
section_title = self.tag_to_string(h4)
|
if url.startswith('/'):
|
||||||
articles = []
|
url = url_prefix + url
|
||||||
self.log('Found section:', section_title)
|
title = self.tag_to_string(head)
|
||||||
else:
|
desc = sec.find(attrs={'class':'sub-title'})
|
||||||
link=post.find('a',href=True)
|
desc = '' if desc is None else self.tag_to_string(desc)
|
||||||
article_cat=link.findPrevious('p', attrs={'class':'article-cat'})
|
self.log('Found article:', title)
|
||||||
url=link['href']+'?c=y&story=fullstory'
|
self.log('\t', url)
|
||||||
description=self.tag_to_string(post.findAll('p')[-1]).strip()
|
articles.append({'title':title, 'url':url, 'description':desc})
|
||||||
title=self.tag_to_string(link).strip()
|
|
||||||
if article_cat is not None:
|
|
||||||
title += u' (%s)'%self.tag_to_string(article_cat).strip()
|
|
||||||
self.log('\tFound article:', title)
|
|
||||||
articles.append({'title':title, 'url':url, 'description':description, 'date':''})
|
|
||||||
|
|
||||||
if articles:
|
|
||||||
if section_title not in feeds:
|
|
||||||
feeds[section_title] = []
|
|
||||||
feeds[section_title] += articles
|
|
||||||
articles = []
|
|
||||||
|
|
||||||
ans = [(key, val) for key, val in feeds.iteritems()]
|
ans = [(key, val) for key, val in feeds.iteritems()]
|
||||||
return ans
|
return ans
|
||||||
|
Loading…
x
Reference in New Issue
Block a user