mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
Update Entrepreneur Magazine
Fixes #1945569 [Fetching news from Entrepreneur Magazine fails](https://bugs.launchpad.net/calibre/+bug/1945569)
This commit is contained in:
parent
5862e8057a
commit
afe7d69681
@ -23,40 +23,31 @@ class EntrepeneurMagRecipe(BasicNewsRecipe):
|
|||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(attrs={'class': ['headline', 'hero topimage']}),
|
dict(attrs={'data-word-count': True}),
|
||||||
dict(itemprop='articlebody'),
|
|
||||||
]
|
]
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(attrs={'class': ['related-content']}),
|
dict(attrs={'class': ['related-content']}),
|
||||||
]
|
]
|
||||||
remove_attributes = ['style']
|
remove_attributes = ['style']
|
||||||
|
|
||||||
INDEX = 'http://www.entrepreneur.com'
|
INDEX = 'https://www.entrepreneur.com'
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
root = self.index_to_soup(
|
soup = self.index_to_soup(self.INDEX + '/latest')
|
||||||
self.INDEX + '/magazine/index.html', as_tree=True)
|
articles = []
|
||||||
for href in root.xpath('//h2[@class="sectiontitle nb"]/a/@href'):
|
for h3 in soup.findAll('h3'):
|
||||||
return self.parse_ent_index(self.INDEX + href)
|
a = h3.parent
|
||||||
|
if a.name == 'a' and a.get('href'):
|
||||||
|
url = self.INDEX + a['href']
|
||||||
|
title = self.tag_to_string(h3)
|
||||||
|
desc = ''
|
||||||
|
if a.next_sibling and a.next_sibling.name == 'p':
|
||||||
|
desc = self.tag_to_string(a.next_sibling)
|
||||||
|
articles.append({'title': title, 'url': url, 'description': desc})
|
||||||
|
self.log(title, url)
|
||||||
|
return [('Articles', articles)]
|
||||||
|
|
||||||
def parse_ent_index(self, url):
|
def preprocess_html(self, soup):
|
||||||
root = self.index_to_soup(url, as_tree=True)
|
for img in soup.findAll('img', attrs={'data-src': True}):
|
||||||
img = root.xpath('//a[@class="hero"]/img[@class="lazy"]')[0]
|
img['src'] = img['data-src']
|
||||||
self.cover_url = img.get('data-original')
|
return soup
|
||||||
self.timefmt = ' [%s]' % img.get('alt').rpartition('-')[-1].strip()
|
|
||||||
body = root.xpath('//div[@id="latest"]')[0]
|
|
||||||
ans = []
|
|
||||||
for x in body.xpath('descendant::h3'):
|
|
||||||
title = self.tag_to_string(x)
|
|
||||||
try:
|
|
||||||
a = x.xpath('./a')[0]
|
|
||||||
except IndexError:
|
|
||||||
continue
|
|
||||||
url = self.INDEX + a.get('href')
|
|
||||||
d = x.getnext()
|
|
||||||
desc = self.tag_to_string(d) if d is not None else ''
|
|
||||||
self.log('\t', title, 'at:', url)
|
|
||||||
self.log('\t\t', desc)
|
|
||||||
ans.append({'title': title, 'url': url, 'description': desc})
|
|
||||||
|
|
||||||
return [('Articles', ans)]
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user