mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
67 lines
2.3 KiB
Plaintext
67 lines
2.3 KiB
Plaintext
from calibre.web.feeds.recipes import BasicNewsRecipe
|
|
|
|
|
|
def class_sel(cls):
|
|
def f(x):
|
|
return x and cls in x.split()
|
|
return f
|
|
|
|
|
|
class Spectator(BasicNewsRecipe):
|
|
|
|
title = 'Spectator Magazine'
|
|
__author__ = 'Kovid Goyal'
|
|
description = 'Magazine'
|
|
language = 'en'
|
|
|
|
no_stylesheets = True
|
|
|
|
keep_only_tags = dict(name='div', attrs={
|
|
'class': ['article-header__text', 'featured-image', 'article-content']})
|
|
remove_tags = [
|
|
dict(name='div', attrs={'id': ['disqus_thread']}),
|
|
dict(attrs={'class': ['middle-promo',
|
|
'sharing', 'mejs-player-holder']}),
|
|
dict(name='a', onclick=lambda x: x and '__gaTracker' in x and 'outbound-article' in x),
|
|
]
|
|
remove_tags_after = [
|
|
dict(name='hr', attrs={'class': 'sticky-clear'}),
|
|
]
|
|
|
|
def parse_spec_section(self, div):
|
|
h2 = div.find('h2')
|
|
sectitle = self.tag_to_string(h2)
|
|
self.log('Section:', sectitle)
|
|
articles = []
|
|
for div in div.findAll('div', id=lambda x: x and x.startswith('post-')):
|
|
h2 = div.find('h2', attrs={'class': class_sel('term-item__title')})
|
|
if h2 is None:
|
|
h2 = div.find(attrs={'class': class_sel('news-listing__title')})
|
|
title = self.tag_to_string(h2)
|
|
a = h2.find('a')
|
|
url = a['href']
|
|
desc = ''
|
|
self.log('\tArticle:', title)
|
|
p = div.find(attrs={'class': class_sel('term-item__excerpt')})
|
|
if p is not None:
|
|
desc = self.tag_to_string(p)
|
|
articles.append({'title': title, 'url': url, 'description': desc})
|
|
return sectitle, articles
|
|
|
|
def parse_index(self):
|
|
soup = self.index_to_soup('https://www.spectator.co.uk/magazine/')
|
|
a = soup.find('a', attrs={'class': 'issue-details__cover-link'})
|
|
self.timefmt = ' [%s]' % a['title']
|
|
self.cover_url = a['href']
|
|
if self.cover_url.startswith('//'):
|
|
self.cover_url = 'http:' + self.cover_url
|
|
|
|
feeds = []
|
|
|
|
div = soup.find(attrs={'class': class_sel('content-area')})
|
|
for x in div.findAll(attrs={'class': class_sel('magazine-section-holder')}):
|
|
title, articles = self.parse_spec_section(x)
|
|
if articles:
|
|
feeds.append((title, articles))
|
|
return feeds
|