mirror of
https://github.com/kovidgoyal/calibre.git
synced 2026-03-06 08:53:40 -05:00
162 lines
5.6 KiB
Python
162 lines
5.6 KiB
Python
#!/usr/bin/env python2
|
|
# -*- coding: utf-8 -*-
|
|
__license__ = 'GPL v3'
|
|
|
|
import json
|
|
from urllib import unquote
|
|
|
|
from collections import defaultdict
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
from calibre import browser
|
|
|
|
|
|
def absurl(x):
|
|
if x.startswith('/') and not x.startswith('//'):
|
|
x = 'https://www.newyorker.com' + x
|
|
return x
|
|
|
|
|
|
class Tag(list):
|
|
|
|
def __init__(self, name, **attrs):
|
|
self.name = name
|
|
self.attrs = attrs
|
|
|
|
def __str__(self):
|
|
ans = ['<' + self.name]
|
|
for k, v in self.attrs.iteritems():
|
|
ans.append(' {}="{}"'.format(k, v))
|
|
ans.append('>')
|
|
for child in self:
|
|
ans.append(unicode(child))
|
|
ans.append('</{}>'.format(self.name))
|
|
return ''.join(ans)
|
|
|
|
|
|
def deserialize(node):
|
|
name = node.pop(0)
|
|
if name == 'inline-embed':
|
|
meta = node.pop(0)
|
|
t = meta['type']
|
|
if t in ('image', 'cartoon'):
|
|
meta = json.loads(unquote(meta['meta']))
|
|
ans = Tag('img', src=absurl(meta['url']))
|
|
elif t == 'section':
|
|
ans = Tag('div')
|
|
else:
|
|
ans = Tag('span')
|
|
else:
|
|
ans = Tag(name)
|
|
for child in node:
|
|
if isinstance(child, list):
|
|
ans.append(deserialize(child))
|
|
elif isinstance(child, basestring):
|
|
ans.append(child)
|
|
return ans
|
|
|
|
|
|
class NewYorker(BasicNewsRecipe):
|
|
|
|
title = u'New Yorker Magazine'
|
|
description = u'Content from the New Yorker website'
|
|
|
|
url_list = []
|
|
language = 'en'
|
|
__author__ = 'Kovid Goyal'
|
|
no_stylesheets = True
|
|
timefmt = ' [%b %d]'
|
|
encoding = 'utf-8'
|
|
extra_css = '''
|
|
.byline { font-size:xx-small; font-weight: bold;}
|
|
h3 { margin-bottom: 6px; }
|
|
.caption { font-size: xx-small; font-style: italic; font-weight: normal; }
|
|
'''
|
|
needs_subscription = 'optional'
|
|
keep_only_tags = [
|
|
dict(attrs={'class':lambda x: x and 'ArticleHeader__hed___' in x}),
|
|
dict(attrs={'class':lambda x: x and 'ArticleHeader__dek___' in x}),
|
|
dict(attrs={'class':lambda x: x and 'Byline__articleHeader___' in x}),
|
|
dict(attrs={'class':lambda x: x and 'ArticleLedeImage__container___' in x}),
|
|
dict(itemprop=['headline', 'alternativeHeadline']),
|
|
dict(name='h1'),
|
|
dict(attrs={'class':lambda x: x and 'byline-and-date' in x}),
|
|
dict(attrs={'class':lambda x: x and 'inset-mobile-crop-image' in x}),
|
|
dict(attrs={'class':lambda x: x and 'hero-image-caption' in x}),
|
|
dict(id='articleBody'),
|
|
dict(attrs={'class':lambda x: x and 'ArticleDisclaimer__articleDisclaimer___' in x}),
|
|
dict(attrs={'class':lambda x: x and 'ArticleContributors__bio___' in x}),
|
|
]
|
|
remove_tags = [
|
|
dict(attrs={'class': lambda x: x and set(x.split()).intersection(
|
|
{'content-ad-wrapper', 'social-hover', 'background-image'})}),
|
|
dict(id=['newsletter-signup']),
|
|
dict(name='meta links source'.split()),
|
|
]
|
|
|
|
# def preprocess_raw_html(self, raw, url):
|
|
# import re
|
|
# try:
|
|
# raw = re.search(r'window.__TNY__.INITIAL_STATE = ({.+?)</script', raw).group(1)
|
|
# except AttributeError:
|
|
# return raw
|
|
# data = json.loads(raw.strip().rstrip(';'))
|
|
# return '<html><body><div id="articleBody">' + unicode(deserialize(data['primary']['body']))
|
|
#
|
|
def parse_index(self):
|
|
soup = self.index_to_soup(
|
|
'https://www.newyorker.com/magazine?intcid=magazine')
|
|
# soup = self.index_to_soup('file:///t/raw.html')
|
|
cover_img = soup.find(attrs={'class': lambda x: x and 'MagazineCover__cover___' in x})
|
|
if cover_img is not None:
|
|
cover_img = cover_img.find('img')
|
|
if cover_img is not None:
|
|
self.cover_url = cover_img.get('src', cover_img.get('data-src', cover_img.get('srcset').split()[0]))
|
|
self.log('Found cover:', self.cover_url)
|
|
stories = defaultdict(list)
|
|
last_section = 'Unknown'
|
|
for story in soup.findAll(attrs={'class': lambda x: x and 'River__riverItemContent___' in x}):
|
|
try:
|
|
section = self.tag_to_string(story.find('a')['title']) or last_section
|
|
except KeyError:
|
|
section = last_section
|
|
last_section = section
|
|
a = story.find('h4').find('a')
|
|
title = a.contents[1]
|
|
url = absurl(a['href'])
|
|
desc = ''
|
|
body = story.find(attrs={'class': 'River__dek___CayIg'})
|
|
if body is not None:
|
|
desc = body.contents[0]
|
|
self.log('Found article:', title)
|
|
self.log('\t' + url)
|
|
self.log('\t' + desc)
|
|
self.log('')
|
|
stories[section].append({'title':title, 'url':url, 'description':desc})
|
|
|
|
return [(k, stories[k]) for k in sorted(stories)]
|
|
|
|
def preprocess_html(self, soup):
|
|
for attr in 'srcset data-src-mobile'.split():
|
|
for img in soup.findAll('img'):
|
|
try:
|
|
ds = img[attr].split()[0]
|
|
del img[attr]
|
|
except KeyError:
|
|
continue
|
|
if ds:
|
|
img['src'] = ds
|
|
return soup
|
|
|
|
# The New Yorker changes the content it delivers based on cookies, so the
|
|
# following ensures that we send no cookies
|
|
def get_browser(self, *args, **kwargs):
|
|
return self
|
|
|
|
def clone_browser(self, *args, **kwargs):
|
|
return self.get_browser()
|
|
|
|
def open_novisit(self, *args, **kwargs):
|
|
br = browser()
|
|
return br.open_novisit(*args, **kwargs)
|
|
open = open_novisit
|