mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update 1843
Fixes #1886805 [Recipes: 1843.recipe not work well now](https://bugs.launchpad.net/calibre/+bug/1886805)
This commit is contained in:
parent
869d429cb3
commit
c52574c4d9
@ -19,49 +19,35 @@ class E1843(BasicNewsRecipe):
|
|||||||
language = 'en_GB'
|
language = 'en_GB'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
oldest_article = 365
|
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
|
|
||||||
# feeds = [
|
|
||||||
# 'https://www.1843magazine.com/rss/content',
|
|
||||||
# ]
|
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='h1', attrs={'class': lambda x: x and 'title' in x.split()}),
|
dict(id='content')
|
||||||
classes('field-name-field-rubric-summary article-header__overlay-main-image meta-info__author article__body'),
|
]
|
||||||
|
remove_tags = [
|
||||||
|
classes('advert ad ds-share-list article__wordmark related-articles newsletter-signup')
|
||||||
]
|
]
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('https://www.1843magazine.com')
|
soup = self.index_to_soup('https://economist.com/1843/')
|
||||||
a = soup.find(text='Print edition').parent
|
|
||||||
soup = self.index_to_soup(a['href'])
|
|
||||||
h1 = soup.find(**classes('cover-image__main'))
|
|
||||||
self.timefmt = ' [%s]' % self.tag_to_string(h1)
|
|
||||||
img = soup.find(**classes('cover-image__image')).find('img')
|
|
||||||
self.cover_url = img['src']
|
|
||||||
|
|
||||||
ans = []
|
ans = []
|
||||||
current_section = articles = None
|
|
||||||
|
|
||||||
for div in soup.findAll(**classes('field-name-field-header node-article')):
|
for a in soup.findAll(**classes('headline-link')):
|
||||||
if 'field-header' in ''.join(div['class']):
|
url = a['href']
|
||||||
if current_section and articles:
|
if url.startswith('/'):
|
||||||
ans.append((current_section, articles))
|
url = 'https://economist.com' + url
|
||||||
current_section = self.tag_to_string(div)
|
title = self.tag_to_string(a)
|
||||||
self.log(current_section)
|
self.log(title, ' at ', url)
|
||||||
articles = []
|
desc = ''
|
||||||
else:
|
d = a.parent.findNextSibling(itemprop='description')
|
||||||
a = div.find('a', href=True)
|
if d is not None:
|
||||||
title = self.tag_to_string(a)
|
desc = self.tag_to_string(d)
|
||||||
url = a['href']
|
ans.append({'title': title, 'url': url, 'description': desc})
|
||||||
self.log('\t', title, ' at ', url)
|
return [('Articles', ans)]
|
||||||
desc = ''
|
|
||||||
r = div.find(**classes('article-rubric'))
|
|
||||||
if r is not None:
|
|
||||||
desc = self.tag_to_string(r)
|
|
||||||
articles.append(
|
|
||||||
{'title': title, 'url': url, 'description': desc})
|
|
||||||
|
|
||||||
if current_section and articles:
|
def postprocess_html(self, soup, *a):
|
||||||
ans.append((current_section, articles))
|
main = soup.find(id='content')
|
||||||
return ans
|
header = soup.find(**classes('article__header'))
|
||||||
|
header.extract()
|
||||||
|
main.insert(0, header)
|
||||||
|
return soup
|
||||||
|
Loading…
x
Reference in New Issue
Block a user