Update 1843

This commit is contained in:
Kovid Goyal 2023-10-16 16:13:30 +05:30
parent 32d80a0cfe
commit c7948acdc1
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C

View File

@ -2,14 +2,7 @@
# vim:fileencoding=utf-8 # vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net> # License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals from calibre.web.feeds.news import BasicNewsRecipe, classes
from calibre.web.feeds.recipes import BasicNewsRecipe
def classes(classes):
q = frozenset(classes.split(' '))
return dict(attrs={
'class': lambda x: x and frozenset(x.split()).intersection(q)})
class E1843(BasicNewsRecipe): class E1843(BasicNewsRecipe):
@ -20,34 +13,41 @@ class E1843(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
remove_javascript = True remove_javascript = True
encoding = 'utf-8' encoding = 'utf-8'
# economist.com has started throttling after about 60% of the total has
# downloaded with connection reset by peer (104) errors.
delay = 1
keep_only_tags = [ keep_only_tags = [
dict(id='content') dict(id='content')
] ]
remove_tags = [ remove_tags = [
classes('advert ad ds-share-list article__wordmark related-articles newsletter-signup') classes('advert ad ds-share-list article__wordmark related-articles newsletter-signup'),
dict(attrs={'data-test-id':'sharing-modal'}),
] ]
def parse_index(self): def parse_index(self):
soup = self.index_to_soup('https://economist.com/1843') soup = self.index_to_soup('https://economist.com/1843')
ans = [] ans = []
main = soup.find(id='content')
for a in soup.findAll(**classes('headline-link')): for h3 in main.find_all('h3'):
a = h3.find('a')
url = a['href'] url = a['href']
if url.startswith('/'): if url.startswith('/'):
url = 'https://economist.com' + url url = 'https://economist.com' + url
title = self.tag_to_string(a) title = self.tag_to_string(a)
self.log(title, ' at ', url) self.log(title, ' at ', url)
desc = '' desc = ''
d = a.parent.findNextSibling(itemprop='description') d = a.parent.findNextSibling('p')
if d is not None: if d is not None:
desc = self.tag_to_string(d) desc = self.tag_to_string(d)
ans.append({'title': title, 'url': url, 'description': desc}) ans.append({'title': title, 'url': url, 'description': desc})
return [('Articles', ans)] return [('Articles', ans)]
def postprocess_html(self, soup, *a): def postprocess_html(self, soup, *a):
main = soup.find(id='content') a = soup.find('a', string='More from 1843 magazine')
header = soup.find(**classes('article__header')) if a is not None:
header.extract() more = a.parent.parent
main.insert(0, header) more.extract()
return soup return soup