mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update Fortune Magazine
This commit is contained in:
parent
146b3373eb
commit
2d9a45db03
@ -1,5 +1,10 @@
|
|||||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
|
def classes(classes):
|
||||||
|
q = frozenset(classes.split(' '))
|
||||||
|
return dict(attrs={
|
||||||
|
'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
||||||
|
|
||||||
|
|
||||||
class Fortune(BasicNewsRecipe):
|
class Fortune(BasicNewsRecipe):
|
||||||
@ -11,20 +16,24 @@ class Fortune(BasicNewsRecipe):
|
|||||||
language = 'en'
|
language = 'en'
|
||||||
category = 'news'
|
category = 'news'
|
||||||
encoding = 'UTF-8'
|
encoding = 'UTF-8'
|
||||||
keep_only_tags = [dict(attrs={'id': ['storycontent']})]
|
keep_only_tags = [
|
||||||
remove_tags = [
|
dict(name='h1', attrs={'class': lambda x: x and 'headline' in x}),
|
||||||
dict(attrs={'class': ['hed_side', 'socialMediaToolbarContainer']})]
|
classes('lead-media author'),
|
||||||
|
dict(id='article-body'),
|
||||||
|
]
|
||||||
|
|
||||||
no_javascript = True
|
no_javascript = True
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
needs_subscription = True
|
needs_subscription = 'optional'
|
||||||
|
|
||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
br = BasicNewsRecipe.get_browser(self)
|
br = BasicNewsRecipe.get_browser(self)
|
||||||
br.open('http://fortune.com')
|
if self.username and self.password:
|
||||||
br.select_form(id='sign-in-form')
|
br.open('http://fortune.com')
|
||||||
br['username'] = self.username
|
br.select_form(id='sign-in-form')
|
||||||
br['password'] = self.password
|
br['username'] = self.username
|
||||||
br.submit()
|
br['password'] = self.password
|
||||||
|
br.submit()
|
||||||
return br
|
return br
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
@ -32,48 +41,19 @@ class Fortune(BasicNewsRecipe):
|
|||||||
|
|
||||||
# Go to the latestissue
|
# Go to the latestissue
|
||||||
soup = self.index_to_soup('http://fortune.com/section/magazine/')
|
soup = self.index_to_soup('http://fortune.com/section/magazine/')
|
||||||
|
|
||||||
# Find cover & date
|
|
||||||
cover_item = soup.find('div', attrs={'id': 'cover-story'})
|
|
||||||
cover = cover_item.find('img', src=True)
|
|
||||||
self.cover_url = cover['src']
|
|
||||||
date = self.tag_to_string(cover_item.find(
|
|
||||||
'div', attrs={'class': 'tocDate'})).strip()
|
|
||||||
self.timefmt = u' [%s]' % date
|
|
||||||
|
|
||||||
feeds = OrderedDict()
|
|
||||||
section_title = ''
|
|
||||||
|
|
||||||
# checkout the cover story
|
|
||||||
articles = []
|
articles = []
|
||||||
coverstory = soup.find('div', attrs={'class': 'cnnHeadline'})
|
|
||||||
title = self.tag_to_string(coverstory.a).strip()
|
|
||||||
url = coverstory.a['href']
|
|
||||||
desc = self.tag_to_string(coverstory.findNext(
|
|
||||||
'p', attrs={'class': 'cnnBlurbTxt'}))
|
|
||||||
articles.append({'title': title, 'url': url,
|
|
||||||
'description': desc, 'date': ''})
|
|
||||||
feeds['Cover Story'] = []
|
|
||||||
feeds['Cover Story'] += articles
|
|
||||||
|
|
||||||
for post in soup.findAll('div', attrs={'class': 'cnnheader'}):
|
for i, article in enumerate(soup.findAll('article', attrs={'class': lambda x: x and 'type-article' in x.split()})):
|
||||||
section_title = self.tag_to_string(post).strip()
|
div = article.find('div', attrs={'class': lambda x: x and 'article-info' in x.split()})
|
||||||
articles = []
|
a = div.find('a', href=True)
|
||||||
|
url = a['href']
|
||||||
ul = post.findNext('ul')
|
if url.startswith('/'):
|
||||||
for link in ul.findAll('li'):
|
url = 'http://fortune.com' + url
|
||||||
links = link.find('h2')
|
title = self.tag_to_string(a)
|
||||||
title = self.tag_to_string(links.a).strip()
|
ai = div.find('div', attrs={'class': lambda x: x and 'article-info-extended' in x.split()})
|
||||||
url = links.a['href']
|
desc = ''
|
||||||
desc = self.tag_to_string(
|
if ai:
|
||||||
link.find('p', attrs={'class': 'cnnBlurbTxt'}))
|
desc = self.tag_to_string(desc)
|
||||||
articles.append({'title': title, 'url': url,
|
self.log('Article:', title, 'at', url)
|
||||||
'description': desc, 'date': ''})
|
articles.append({'title': title, 'url': url, 'description': desc})
|
||||||
|
return [('Articles', articles)]
|
||||||
if articles:
|
|
||||||
if section_title not in feeds:
|
|
||||||
feeds[section_title] = []
|
|
||||||
feeds[section_title] += articles
|
|
||||||
|
|
||||||
ans = [(key, val) for key, val in feeds.iteritems()]
|
|
||||||
return ans
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user