calibre/recipes/mit_technology_review.recipe
Kovid Goyal a80d6302ec Update MIT Technology Review
Fixes #1635338 [MIT Technology Review magazine doesn't download main article](https://bugs.launchpad.net/calibre/+bug/1635338)
2016-10-20 23:12:14 +05:30

70 lines
2.3 KiB
Python

#!/usr/bin/env python2
from __future__ import unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2015 Michael Marotta <mikefm at gmail.net>'
# Written April 2015
# Last edited 4/18/15
'''
technologyreview.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
def absurl(x):
if x.startswith('//'):
x = 'http:' + x
elif not x.startswith('http'):
x = "http://www.technologyreview.com" + x
return x
def classes(classes):
q = frozenset(classes.split(' '))
return dict(attrs={'class': lambda x: x and frozenset(x.split()).intersection(q)})
class MitTechnologyReview(BasicNewsRecipe):
title = 'MIT Technology Review Magazine'
__author__ = 'Michael Marotta'
description = ('Bi-monthly magazine version of MIT Technology Review.'
' This is different than the recipe named simply "Technology Review"'
' which downloads the rss feed with daily articles from the website.')
INDEX = 'http://www.technologyreview.com/magazine/'
language = 'en'
encoding = 'utf-8'
simultaneous_downloads = 20
tags = 'news, technology, science'
no_stylesheets = True
keep_only_tags = [
classes(
'article-topper__topic article-topper__title article-topper__media-wrap article-body__content'),
]
remove_tags = [
classes('l-article-list'),
]
def parse_index(self):
soup = self.index_to_soup(self.INDEX)
# find cover
self.cover_url = absurl(soup.find(
'a', attrs={'class': 'magazine-topper__cover'}).find('img', src=True)['src'])
# parse articles
current_articles = []
for div in soup.findAll(attrs={'class': lambda x: x in
'magazine-topper__title magazine-features-item__top-title magazine-features-item-title author-tz__title feed-tz__title'.split()}):
a = div.find('a', href=True)
title = self.tag_to_string(a).strip()
href = absurl(a['href'])
if href and title:
current_articles.append({'title': title, 'url': href})
self.log(title, '[%s]' % href)
return [('Articles', current_articles)]
def preprocess_html(self, soup):
for img in soup.findAll(srcset=True):
img['src'] = absurl(img['srcset'].split()[0])
del img['srcset']
return soup