mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-10-25 07:48:55 -04:00
123 lines
4.5 KiB
Python
123 lines
4.5 KiB
Python
#!/usr/bin/env python
|
|
# vim:fileencoding=utf-8
|
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
|
|
|
|
|
def absurl(url):
|
|
if url.startswith('/'):
|
|
url = 'https://www.spectator.co.uk' + url
|
|
return url
|
|
|
|
|
|
class spectator(BasicNewsRecipe):
|
|
title = 'Spectator Magazine'
|
|
__author__ = 'unkn0wn'
|
|
description = 'The Spectator was established in 1828, and is the best-written and most influential weekly in the English language.'
|
|
language = 'en_GB'
|
|
no_stylesheets = True
|
|
remove_attributes = ['height', 'width', 'style']
|
|
ignore_duplicate_articles = {'url'}
|
|
masthead_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/c/c7/The_Spectator_logo.svg/320px-The_Spectator_logo.svg.png'
|
|
encoding = 'utf-8'
|
|
remove_empty_feeds = True
|
|
resolve_internal_links = True
|
|
|
|
extra_css = '''
|
|
.writers-link__text, .author-bio__content {font-size:small; color:#404040;}
|
|
#fig-c {text-align:center; font-size:small;}
|
|
blockquote, em, i {color:#202020;}
|
|
img {display:block; margin:0 auto;}
|
|
'''
|
|
|
|
keep_only_tags = [
|
|
classes(
|
|
'writers-link entry-header__author entry-header__title entry-header__thumbnail entry-content '
|
|
'author-bio__content '
|
|
)
|
|
]
|
|
|
|
remove_tags = [
|
|
dict(name=['svg', 'button']),
|
|
classes(
|
|
'entry-meta audio-read-block insert--most-popular ad-slot ad-slot--in-content ad-content '
|
|
'subscription-banner '
|
|
)
|
|
]
|
|
|
|
def preprocess_html(self, soup):
|
|
for fc in soup.findAll('figcaption'):
|
|
fc['id'] = 'fig-c'
|
|
return soup
|
|
|
|
def get_browser(self, *args, **kwargs):
|
|
kwargs['user_agent'] = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
|
|
br = BasicNewsRecipe.get_browser(self, *args, **kwargs)
|
|
br.addheaders += [
|
|
('Referer', 'https://www.google.com/'),
|
|
('X-Forwarded-For', '66.249.66.1')
|
|
]
|
|
return br
|
|
|
|
recipe_specific_options = {
|
|
'date': {
|
|
'short': 'The date of the edition to download (DD-MM-YYYY format)',
|
|
'long': 'For example, 20-07-2024'
|
|
}
|
|
}
|
|
|
|
def parse_index(self):
|
|
index = 'https://www.spectator.co.uk/magazine'
|
|
d = self.recipe_specific_options.get('date')
|
|
if d and isinstance(d, str):
|
|
index = index + '/' + d + '/'
|
|
soup = self.index_to_soup(index)
|
|
self.cover_url = soup.find(**classes(
|
|
'magazine-header__container')).img['src'].split('?')[0]
|
|
issue = self.tag_to_string(soup.find(**classes(
|
|
'magazine-header__title'))).strip()
|
|
time = soup.find('time')
|
|
self.title = 'The Spectator ' + issue
|
|
self.timefmt = ' [' + self.tag_to_string(time) + ']'
|
|
self.log('Downloading Issue: ', self.title, self.timefmt)
|
|
nav_div = soup.find('ul', **classes('archive-entry__nav-list'))
|
|
section_list = []
|
|
|
|
for x in nav_div.findAll(['a']):
|
|
section_list.append((
|
|
self.tag_to_string(x).strip(), absurl(x['href'])))
|
|
feeds = []
|
|
|
|
# For each section title, fetch the article urls
|
|
for section in section_list:
|
|
section_title = section[0]
|
|
section_url = section[1]
|
|
self.log(section_title, section_url)
|
|
soup = self.index_to_soup(section_url)
|
|
articles = self.articles_from_soup(soup)
|
|
if articles:
|
|
feeds.append((section_title, articles))
|
|
return feeds
|
|
|
|
def articles_from_soup(self, soup):
|
|
ans = []
|
|
for div in soup.findAll('div', **classes(
|
|
'mosaic__tile mosaic__tile--lead-up'
|
|
)):
|
|
a = div.find('a', href=True, attrs={'class':'article__title-link'})
|
|
url = absurl(a['href'])
|
|
title = self.tag_to_string(a).strip()
|
|
teaser = div.find('p', **classes('article__excerpt-text'))
|
|
desc = ''
|
|
if teaser:
|
|
desc = self.tag_to_string(teaser).strip()
|
|
obj = div.find('a', **classes('article__author article__author--link'))
|
|
if obj:
|
|
desc = self.tag_to_string(obj).strip() + ' | ' + desc
|
|
sec = div.findParent('div').find('a', attrs={'class': 'magazine-issue__entry-link'})
|
|
if sec:
|
|
desc = self.tag_to_string(sec).strip() + ' | ' + desc
|
|
|
|
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
|
|
ans.append({'title': title, 'description':desc, 'url': url})
|
|
return ans
|