Unify the two Atlantic recipes

This commit is contained in:
Kovid Goyal 2018-04-01 13:05:53 +05:30
parent 5320035eff
commit 2b487cc2f1
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
2 changed files with 243 additions and 98 deletions

View File

@ -3,13 +3,14 @@ from __future__ import unicode_literals
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
''' '''
theatlantic.com Recipe for web and magazine versions of the The Atlantic
''' '''
import html5lib
from lxml import html
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
web_version = False
def classes(classes): def classes(classes):
q = frozenset(classes.split(' ')) q = frozenset(classes.split(' '))
return dict( return dict(
@ -19,10 +20,15 @@ def classes(classes):
class TheAtlantic(BasicNewsRecipe): class TheAtlantic(BasicNewsRecipe):
title = 'The Atlantic' if web_version:
title = 'TheAtlantic.com'
description = 'News and editorial about politics, culture, entertainment, tech, etc. Contains many articles not seen in The Atlantic magazine'
else:
title = 'The Atlantic'
description = 'Current affairs and politics focussed on the US'
INDEX = 'http://www.theatlantic.com/magazine/'
__author__ = 'Kovid Goyal' __author__ = 'Kovid Goyal'
description = 'Current affairs and politics focussed on the US'
INDEX = 'http://www.theatlantic.com/magazine/'
language = 'en' language = 'en'
encoding = 'utf-8' encoding = 'utf-8'
@ -71,67 +77,85 @@ class TheAtlantic(BasicNewsRecipe):
br.set_cookie('inEuropeanUnion', '0', '.theatlantic.com') br.set_cookie('inEuropeanUnion', '0', '.theatlantic.com')
return br return br
def preprocess_raw_html(self, raw, url):
return html.tostring(
html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False),
method='html',
encoding=unicode
)
def print_version(self, url):
return url + '?single_page=true'
def preprocess_html(self, soup): def preprocess_html(self, soup):
for img in soup.findAll('img', attrs={'data-srcset': True}): for img in soup.findAll('img', attrs={'data-srcset': True}):
img['src'] = img['data-srcset'].split()[0] img['src'] = img['data-srcset'].split()[0]
return soup return soup
def parse_index(self): def print_version(self, url):
soup = self.index_to_soup(self.INDEX) return url.partition('?')[0] + '?single_page=true'
figure = soup.find('figure', id='cover-image')
if figure is not None: if web_version:
img = figure.find('img', src=True)
if img: use_embedded_content = False
self.cover_url = img['src']
current_section, current_articles = 'Cover Story', [] feeds = [
feeds = [] ('The Atlantic', 'http://www.theatlantic.com/feed/all/'),
for div in soup.findAll('div', attrs={'class': lambda x: x and set(x.split()).intersection({'top-sections', 'bottom-sections'})}): ('Best of The Atlantic', 'http://www.theatlantic.com/feed/best-of/'),
for h2 in div.findAll('h2', attrs={'class': True}): ('Politics | The Atlantic', 'http://www.theatlantic.com/feed/channel/politics/'),
if 'section-name' in h2['class'].split(): ('Business | The Atlantic', 'http://www.theatlantic.com/feed/channel/business/'),
if current_articles: ('Culture | The Atlantic', 'http://www.theatlantic.com/feed/channel/entertainment/'),
feeds.append((current_section, current_articles)) ('Global | The Atlantic', 'http://www.theatlantic.com/feed/channel/international/'),
current_articles = [] ('Technology | The Atlantic', 'http://www.theatlantic.com/feed/channel/technology/'),
current_section = self.tag_to_string(h2) ('U.S. | The Atlantic', 'http://www.theatlantic.com/feed/channel/national/'),
self.log('\nFound section:', current_section) ('Health | The Atlantic', 'http://www.theatlantic.com/feed/channel/health/'),
elif 'hed' in h2['class'].split(): ('Video | The Atlantic', 'http://www.theatlantic.com/feed/channel/video/'),
title = self.tag_to_string(h2) ('Sexes | The Atlantic', 'http://www.theatlantic.com/feed/channel/sexes/'),
a = h2.findParent('a', href=True) ('Education | The Atlantic', 'http://www.theatlantic.com/feed/channel/education/'),
url = a['href'] ('Science | The Atlantic', 'http://www.theatlantic.com/feed/channel/science/'),
if url.startswith('/'): ('News | The Atlantic', 'http://www.theatlantic.com/feed/channel/news/'),
url = 'http://www.theatlantic.com' + url ('Press Releases | The Atlantic', 'http://www.theatlantic.com/feed/channel/press-releases/'),
li = a.findParent( ('Newsletters | The Atlantic', 'http://www.theatlantic.com/feed/channel/newsletters/'),
'li', ('The Atlantic Photo', 'http://feeds.feedburner.com/theatlantic/infocus'),
attrs={'class': lambda x: x and 'article' in x.split()} ('Notes | The Atlantic', 'http://feeds.feedburner.com/TheAtlanticNotes'),
) ]
desc = '' else:
dek = li.find( def parse_index(self):
attrs={'class': lambda x: x and 'dek' in x.split()} soup = self.index_to_soup(self.INDEX)
) figure = soup.find('figure', id='cover-image')
if dek is not None: if figure is not None:
desc += self.tag_to_string(dek) img = figure.find('img', src=True)
byline = li.find( if img:
attrs={'class': lambda x: x and 'byline' in x.split()} self.cover_url = img['src']
) current_section, current_articles = 'Cover Story', []
if byline is not None: feeds = []
desc += ' -- ' + self.tag_to_string(byline) for div in soup.findAll('div', attrs={'class': lambda x: x and set(x.split()).intersection({'top-sections', 'bottom-sections'})}):
self.log('\t', title, 'at', url) for h2 in div.findAll('h2', attrs={'class': True}):
if desc: if 'section-name' in h2['class'].split():
self.log('\t\t', desc) if current_articles:
current_articles.append({ feeds.append((current_section, current_articles))
'title': title, current_articles = []
'url': url, current_section = self.tag_to_string(h2)
'description': desc self.log('\nFound section:', current_section)
}) elif 'hed' in h2['class'].split():
if current_articles: title = self.tag_to_string(h2)
feeds.append((current_section, current_articles)) a = h2.findParent('a', href=True)
return feeds url = a['href']
if url.startswith('/'):
url = 'http://www.theatlantic.com' + url
li = a.findParent(
'li',
attrs={'class': lambda x: x and 'article' in x.split()}
)
desc = ''
dek = li.find(
attrs={'class': lambda x: x and 'dek' in x.split()}
)
if dek is not None:
desc += self.tag_to_string(dek)
byline = li.find(
attrs={'class': lambda x: x and 'byline' in x.split()}
)
if byline is not None:
desc += ' -- ' + self.tag_to_string(byline)
self.log('\t', title, 'at', url)
if desc:
self.log('\t\t', desc)
current_articles.append({
'title': title,
'url': url,
'description': desc
})
if current_articles:
feeds.append((current_section, current_articles))
return feeds

View File

@ -1,40 +1,161 @@
#!/usr/bin/env python2 #!/usr/bin/env python2
# vim:fileencoding=utf-8 from __future__ import unicode_literals
from __future__ import unicode_literals, division, absolute_import, print_function __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Recipe for web and magazine versions of the The Atlantic
'''
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1421956712(BasicNewsRecipe): web_version = True
title = 'TheAtlantic.com'
__author__ = 'ebrandon'
def classes(classes):
q = frozenset(classes.split(' '))
return dict(
attrs={'class': lambda x: x and frozenset(x.split()).intersection(q)}
)
class TheAtlantic(BasicNewsRecipe):
if web_version:
title = 'TheAtlantic.com'
description = 'News and editorial about politics, culture, entertainment, tech, etc. Contains many articles not seen in The Atlantic magazine'
else:
title = 'The Atlantic'
description = 'Current affairs and politics focussed on the US'
INDEX = 'http://www.theatlantic.com/magazine/'
__author__ = 'Kovid Goyal'
language = 'en' language = 'en'
description = 'News and editorial about politics, culture, entertainment, tech, etc. Contains many articles not seen in The Atlantic magazine' encoding = 'utf-8'
oldest_article = 7
max_articles_per_feed = 100 keep_only_tags = [
auto_cleanup = True classes(
ignore_duplicate_articles = {'title', 'url'} 'article-header lead-img article-cover-extra article-body article-magazine article-cover-content'
),
{
'name': ['img']
},
]
remove_tags = [
classes(
'social-kit-top letter-writer-info callout secondary-byline embed-wrapper offset-wrapper boxtop-most-popular'
),
{
'name': ['meta', 'link', 'noscript', 'aside', 'h3']
},
{
'attrs': {
'class': ['offset-wrapper', 'boxtop-most-popular']
}
},
{
'attrs': {
'class': lambda x: x and 'article-tools' in x
}
},
{
'src': lambda x: x and 'spotxchange.com' in x
},
]
remove_tags_after = classes('article-body')
no_stylesheets = True
remove_attributes = ['style']
extra_css = '''
.credit { text-align: right; font-size: 75%; display: block }
.figcaption { font-size: 75% }
.caption { font-size: 75% }
.lead-img { display: block }
'''
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
br.set_cookie('inEuropeanUnion', '0', '.theatlantic.com')
return br
def preprocess_html(self, soup):
for img in soup.findAll('img', attrs={'data-srcset': True}):
img['src'] = img['data-srcset'].split()[0]
return soup
def print_version(self, url): def print_version(self, url):
return url.replace('/archive/', '/print/') return url.partition('?')[0] + '?single_page=true'
# Feed are found here: http://www.theatlantic.com/follow-the-atlantic/#follow-rssfeeds if web_version:
feeds = [
('The Atlantic', 'http://www.theatlantic.com/feed/all/'), use_embedded_content = False
('Best of The Atlantic', 'http://www.theatlantic.com/feed/best-of/'),
('Politics | The Atlantic', 'http://www.theatlantic.com/feed/channel/politics/'), feeds = [
('Business | The Atlantic', 'http://www.theatlantic.com/feed/channel/business/'), ('The Atlantic', 'http://www.theatlantic.com/feed/all/'),
('Culture | The Atlantic', 'http://www.theatlantic.com/feed/channel/entertainment/'), ('Best of The Atlantic', 'http://www.theatlantic.com/feed/best-of/'),
('Global | The Atlantic', 'http://www.theatlantic.com/feed/channel/international/'), ('Politics | The Atlantic', 'http://www.theatlantic.com/feed/channel/politics/'),
('Technology | The Atlantic', 'http://www.theatlantic.com/feed/channel/technology/'), ('Business | The Atlantic', 'http://www.theatlantic.com/feed/channel/business/'),
('U.S. | The Atlantic', 'http://www.theatlantic.com/feed/channel/national/'), ('Culture | The Atlantic', 'http://www.theatlantic.com/feed/channel/entertainment/'),
('Health | The Atlantic', 'http://www.theatlantic.com/feed/channel/health/'), ('Global | The Atlantic', 'http://www.theatlantic.com/feed/channel/international/'),
('Video | The Atlantic', 'http://www.theatlantic.com/feed/channel/video/'), ('Technology | The Atlantic', 'http://www.theatlantic.com/feed/channel/technology/'),
('Sexes | The Atlantic', 'http://www.theatlantic.com/feed/channel/sexes/'), ('U.S. | The Atlantic', 'http://www.theatlantic.com/feed/channel/national/'),
('Education | The Atlantic', 'http://www.theatlantic.com/feed/channel/education/'), ('Health | The Atlantic', 'http://www.theatlantic.com/feed/channel/health/'),
('Science | The Atlantic', 'http://www.theatlantic.com/feed/channel/science/'), ('Video | The Atlantic', 'http://www.theatlantic.com/feed/channel/video/'),
('News | The Atlantic', 'http://www.theatlantic.com/feed/channel/news/'), ('Sexes | The Atlantic', 'http://www.theatlantic.com/feed/channel/sexes/'),
('Press Releases | The Atlantic', 'http://www.theatlantic.com/feed/channel/press-releases/'), ('Education | The Atlantic', 'http://www.theatlantic.com/feed/channel/education/'),
('Newsletters | The Atlantic', 'http://www.theatlantic.com/feed/channel/newsletters/'), ('Science | The Atlantic', 'http://www.theatlantic.com/feed/channel/science/'),
('The Atlantic Photo', 'http://feeds.feedburner.com/theatlantic/infocus'), ('News | The Atlantic', 'http://www.theatlantic.com/feed/channel/news/'),
('Notes | The Atlantic', 'http://feeds.feedburner.com/TheAtlanticNotes'), ('Press Releases | The Atlantic', 'http://www.theatlantic.com/feed/channel/press-releases/'),
] ('Newsletters | The Atlantic', 'http://www.theatlantic.com/feed/channel/newsletters/'),
('The Atlantic Photo', 'http://feeds.feedburner.com/theatlantic/infocus'),
('Notes | The Atlantic', 'http://feeds.feedburner.com/TheAtlanticNotes'),
]
else:
def parse_index(self):
soup = self.index_to_soup(self.INDEX)
figure = soup.find('figure', id='cover-image')
if figure is not None:
img = figure.find('img', src=True)
if img:
self.cover_url = img['src']
current_section, current_articles = 'Cover Story', []
feeds = []
for div in soup.findAll('div', attrs={'class': lambda x: x and set(x.split()).intersection({'top-sections', 'bottom-sections'})}):
for h2 in div.findAll('h2', attrs={'class': True}):
if 'section-name' in h2['class'].split():
if current_articles:
feeds.append((current_section, current_articles))
current_articles = []
current_section = self.tag_to_string(h2)
self.log('\nFound section:', current_section)
elif 'hed' in h2['class'].split():
title = self.tag_to_string(h2)
a = h2.findParent('a', href=True)
url = a['href']
if url.startswith('/'):
url = 'http://www.theatlantic.com' + url
li = a.findParent(
'li',
attrs={'class': lambda x: x and 'article' in x.split()}
)
desc = ''
dek = li.find(
attrs={'class': lambda x: x and 'dek' in x.split()}
)
if dek is not None:
desc += self.tag_to_string(dek)
byline = li.find(
attrs={'class': lambda x: x and 'byline' in x.split()}
)
if byline is not None:
desc += ' -- ' + self.tag_to_string(byline)
self.log('\t', title, 'at', url)
if desc:
self.log('\t\t', desc)
current_articles.append({
'title': title,
'url': url,
'description': desc
})
if current_articles:
feeds.append((current_section, current_articles))
return feeds