mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
85 lines
2.5 KiB
Python
85 lines
2.5 KiB
Python
#!/usr/bin/env python
|
|
|
|
__license__ = 'GPL v3'
|
|
'''
|
|
sciencenews.org
|
|
'''
|
|
|
|
import re
|
|
|
|
from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes
|
|
|
|
|
|
class ScienceNewsIssue(BasicNewsRecipe):
|
|
title = u'Science News'
|
|
description = ("Science News is an award-winning bi-weekly newsmagazine covering the most important research"
|
|
" in all fields of science. This recipe downloads all the articles from the latest issue.")
|
|
category = u'Science, Technology, News'
|
|
publisher = u'Society for Science & the Public'
|
|
language = 'en'
|
|
no_stylesheets = True
|
|
use_embedded_content = False
|
|
auto_cleanup = False
|
|
remove_attributes = ['height', 'width', 'style']
|
|
ignore_duplicate_articles = {'url'}
|
|
resolve_internal_links = True
|
|
|
|
keep_only_tags = [
|
|
dict(
|
|
attrs={
|
|
'class':
|
|
lambda x: x and (
|
|
'single__content___' in x or 'header-default__title___' in x or
|
|
'header-default__deck___' in x or 'header-default__figure___' in
|
|
x
|
|
)
|
|
}
|
|
)
|
|
]
|
|
remove_tags = [
|
|
dict(name=['svg', 'button']),
|
|
dict(
|
|
attrs={'class': lambda x: x and ('newsletter-signup__wrapper___' in x)}
|
|
)
|
|
]
|
|
|
|
def parse_index(self):
|
|
|
|
index = self.index_to_soup('https://www.sciencenews.org/sn-magazine')
|
|
a = index.find(**prefixed_classes('magazine-archive__issue-thumbnail___'))
|
|
url = a['href']
|
|
self.timefmt = ' [' + url.split('/')[-1] + ']'
|
|
self.cover_url = a.img['src']
|
|
|
|
# Get articles
|
|
soup = self.index_to_soup(url)
|
|
soup = soup.find('main', attrs={'id':'content'})
|
|
re_article = re.compile("https://www.sciencenews.org/article/")
|
|
stories = []
|
|
past_urls = set()
|
|
for sec in soup.find_all(href=re_article):
|
|
|
|
article_url = sec["href"]
|
|
article_title = sec.text.strip()
|
|
|
|
# Ignore image URLs which do not have text title
|
|
if article_title == "":
|
|
continue
|
|
|
|
# Ignore if link is a duplicate
|
|
if article_url in past_urls:
|
|
continue
|
|
|
|
past_urls.add(article_url)
|
|
self.log('\t', article_title, ' ', article_url)
|
|
article_info = {
|
|
"url": article_url,
|
|
"title": article_title,
|
|
}
|
|
stories.append(article_info)
|
|
|
|
index = [
|
|
("Articles", stories),
|
|
]
|
|
return index
|