mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
83 lines
2.9 KiB
Python
83 lines
2.9 KiB
Python
#!/usr/bin/env python2
|
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
|
|
|
__license__ = 'GPL v3'
|
|
|
|
'''
|
|
calibre recipe for slate.com
|
|
'''
|
|
|
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
|
|
|
|
|
class Slate(BasicNewsRecipe):
|
|
title = 'Slate'
|
|
description = 'A general-interest publication offering analysis and commentary about politics, news and culture.'
|
|
__author__ = 'Kovid Goyal'
|
|
timefmt = ''
|
|
no_stylesheets = True
|
|
language = 'en'
|
|
encoding = 'utf-8'
|
|
masthead_url = 'http://img.slate.com/images/redesign2008/slate_logo.gif'
|
|
remove_attributes = ['style']
|
|
INDEX = 'http://slate.com'
|
|
compress_news_images = True
|
|
|
|
keep_only_tags = [
|
|
dict(name='header', attrs={'class': 'article-header'}),
|
|
dict(name='section', attrs={
|
|
'class': lambda x: x and 'content' == x.strip()}),
|
|
]
|
|
remove_tags = [
|
|
dict(id='header_social'),
|
|
dict(attrs={'class': ['prop-name', 'prop-desc', 'authorbox',
|
|
'twitter', 'email', 'facebook', 'follow-links', 'join-in']}),
|
|
dict(attrs={'class': lambda x: x and 'sharing-buttons' in x.split()}),
|
|
]
|
|
|
|
def print_version(self, url):
|
|
return url.replace('.html', '.single.html')
|
|
|
|
def parse_index(self):
|
|
ans = []
|
|
for sectitle, url in (
|
|
('News & Politics', '/articles/news_and_politics.html'),
|
|
('Technology', '/articles/technology.html'),
|
|
('Business', '/articles/business.html'),
|
|
('Arts', '/articles/arts.html'),
|
|
('Life', '/articles/life.html'),
|
|
('Health & Science', '/articles/health_and_science.html'),
|
|
('Sports', '/articles/sports.html'),
|
|
('Double X', '/articles/double_x.html'),
|
|
):
|
|
url = self.INDEX + url
|
|
self.log('\nFound section:', sectitle)
|
|
articles = self.slate_section_articles(self.index_to_soup(url))
|
|
if articles:
|
|
ans.append((sectitle, articles))
|
|
if self.test and len(ans) > 1:
|
|
break
|
|
return ans
|
|
|
|
def slate_section_articles(self, soup):
|
|
ans = []
|
|
main = soup.find('article', attrs={'class': 'main'})
|
|
for a in main.findAll('a', attrs={'class': 'primary'}):
|
|
url = a['href']
|
|
if url.endswith('/'):
|
|
continue
|
|
p = a.parent
|
|
title = p.find(attrs={'class': 'hed'})
|
|
if title is None:
|
|
continue
|
|
title = self.tag_to_string(title)
|
|
span = p.find(attrs={'class': 'byline'})
|
|
desc = ''
|
|
if span is not None:
|
|
desc = self.tag_to_string(span)
|
|
self.log('\t' + title)
|
|
self.log('\t\t' + url)
|
|
ans.append({'title': title, 'description': desc,
|
|
'date': '', 'url': url})
|
|
return ans
|