mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
Update New York Magazine
This commit is contained in:
parent
42423d730b
commit
d46c192c4c
@ -23,8 +23,6 @@ class NewYorkMagazine(BasicNewsRecipe):
|
|||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
recursions = 1
|
|
||||||
match_regexps = [r'http://nymag.com/.+/index[0-9]{1,2}.html$']
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
classes('lede-text headline-primary article-timestamp by-authors'),
|
classes('lede-text headline-primary article-timestamp by-authors'),
|
||||||
dict(id='main'),
|
dict(id='main'),
|
||||||
@ -35,50 +33,41 @@ class NewYorkMagazine(BasicNewsRecipe):
|
|||||||
dict(id=['minibrowserbox', 'article-related', 'article-tools'])
|
dict(id=['minibrowserbox', 'article-related', 'article-tools'])
|
||||||
]
|
]
|
||||||
remove_attributes = ['srcset']
|
remove_attributes = ['srcset']
|
||||||
handle_gzip = True
|
|
||||||
|
|
||||||
PREFIX = 'http://nymag.com'
|
|
||||||
|
|
||||||
def nymag_get_index(self):
|
def nymag_get_index(self):
|
||||||
return self.index_to_soup('http://nymag.com/includes/tableofcontents.htm')
|
return self.index_to_soup('https://nymag.com/includes/tableofcontents.htm')
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.nymag_get_index()
|
soup = self.nymag_get_index()
|
||||||
self.cover_url = soup.find(attrs={'class': 'cover'}).find('img',
|
cdiv = soup.find(**classes('magazine-toc-cover-image-wrap'))
|
||||||
src=True).get('src')
|
if cdiv is not None:
|
||||||
|
for source in cdiv.findAll('source', srcset=True):
|
||||||
|
self.cover_url = source['srcset'].split()[0]
|
||||||
|
self.log('Cover:', self.cover_url)
|
||||||
|
break
|
||||||
feeds = []
|
feeds = []
|
||||||
current_section = 'Cover Story'
|
for div in soup.findAll(attrs={'data-editable': 'settingTitle'}):
|
||||||
current_articles = []
|
section = self.tag_to_string(div).strip().capitalize()
|
||||||
for h in soup.findAll(['h4', 'h5']):
|
articles = []
|
||||||
if h.name == 'h4':
|
self.log(section)
|
||||||
if current_section and current_articles:
|
ul = div.findNextSibling('ul')
|
||||||
feeds.append((current_section, current_articles))
|
for li in ul.findAll('li'):
|
||||||
current_section = self.tag_to_string(h)
|
a = li.find(**classes('article-link'))
|
||||||
self.log('\tFound section:', current_section)
|
url = a['href']
|
||||||
current_articles = []
|
h3 = li.find('h3')
|
||||||
elif h.name == 'h5':
|
title = self.tag_to_string(h3)
|
||||||
title = self.tag_to_string(h)
|
desc = ''
|
||||||
a = h.find('a', href=True)
|
teaser = h3.findNextSibling(**classes('teaser'))
|
||||||
if a is not None:
|
if teaser is not None:
|
||||||
url = a.get('href')
|
desc = self.tag_to_string(teaser)
|
||||||
if url.startswith('/'):
|
self.log('\t', title, url)
|
||||||
url = self.PREFIX + url
|
articles.append({'title': title, 'url': url, 'description': desc})
|
||||||
if title and url:
|
if articles:
|
||||||
self.log('\t\tFound article:', title)
|
feeds.append((section, articles))
|
||||||
self.log('\t\t\t', url)
|
|
||||||
desc = ''
|
|
||||||
p = h.findNextSibling('p')
|
|
||||||
if p is not None:
|
|
||||||
desc = self.tag_to_string(p)
|
|
||||||
self.log('\t\t\t', desc)
|
|
||||||
current_articles.append({'title': title, 'url': url,
|
|
||||||
'date': '', 'description': desc})
|
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
def postprocess_html(self, soup, first):
|
def preprocess_html(self, soup):
|
||||||
for x in soup.findAll(attrs={'class': 'page-navigation'}):
|
for img in soup.findAll('img', attrs={'data-src': True}):
|
||||||
x.extract()
|
img['src'] = img['data-src']
|
||||||
if not first:
|
|
||||||
for x in soup.findAll(attrs={'class': 'header-spacing'}):
|
|
||||||
x.extract()
|
|
||||||
return soup
|
return soup
|
||||||
|
Loading…
x
Reference in New Issue
Block a user