Update New York Review of Books

Fixes #1235790 [Private bug](https://bugs.launchpad.net/calibre/+bug/1235790)
This commit is contained in:
Kovid Goyal 2013-10-09 10:31:11 +05:30
parent 5246859ad0
commit e1b25b6f52
2 changed files with 76 additions and 60 deletions

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python #!/usr/bin/env python
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
@ -11,6 +10,9 @@ import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
def find_header(tag):
return tag.name == 'header' and tag.parent['class'] == 'article'
class NewYorkReviewOfBooks(BasicNewsRecipe): class NewYorkReviewOfBooks(BasicNewsRecipe):
title = u'New York Review of Books' title = u'New York Review of Books'
@ -23,65 +25,70 @@ class NewYorkReviewOfBooks(BasicNewsRecipe):
no_javascript = True no_javascript = True
needs_subscription = True needs_subscription = True
keep_only_tags = [dict(id=['article-body','page-title'])] keep_only_tags = [
remove_tags = [dict(attrs={'class':['article-tools', 'article-links', dict(name='section', attrs={'class':'article_body'}),
'center advertisement']})] dict(name=find_header),
dict(name='div', attrs={'class':'for-subscribers-only'}),
]
preprocess_regexps = [(re.compile(r'<head>.*?</head>', re.DOTALL), lambda preprocess_regexps = [(re.compile(r'<head>.*?</head>', re.DOTALL), lambda
m:'<head></head>')] m:'<head></head>')]
def print_version(self, url):
return url+'?pagination=false'
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser(self) br = BasicNewsRecipe.get_browser(self)
br.open('http://www.nybooks.com/account/signin/') br.open('http://www.nybooks.com/account/signin/')
br.select_form(nr = 1) br.select_form(nr=2)
br['username'] = self.username br['username'] = self.username
br['password'] = self.password br['password'] = self.password
br.submit() br.submit()
return br return br
def print_version(self, url): def preprocess_html(self, soup):
return url+'?pagination=false' header = soup.find('header')
body = soup.find('body')
body.insert(0, header)
header.find('div', attrs={'class':'details'}).extract()
for i in soup.findAll('input'):
i.extract()
return soup
def parse_index(self): def parse_index(self):
soup = self.index_to_soup('http://www.nybooks.com/current-issue') soup = self.index_to_soup('http://www.nybooks.com/current-issue')
# Find cover # Find cover
sidebar = soup.find(id='sidebar') sidebar = soup.find('div', attrs={'class':'issue_cover'})
if sidebar is not None: if sidebar is not None:
a = sidebar.find('a', href=lambda x: x and 'view-photo' in x) img = sidebar.find('img', src=True)
if a is not None: self.cover_url = 'http://www.nybooks.com' + img['src']
psoup = self.index_to_soup('http://www.nybooks.com'+a['href']) self.log('Found cover at:', self.cover_url)
cover = psoup.find('img', src=True)
self.cover_url = cover['src']
self.log('Found cover at:', self.cover_url)
# Find date # Find date
div = soup.find(id='page-title') div = soup.find('time', pubdate='pubdate')
if div is not None: if div is not None:
h5 = div.find('h5') text = self.tag_to_string(div)
if h5 is not None: date = text.partition(u'\u2022')[0].strip()
text = self.tag_to_string(h5) self.timefmt = u' [%s]'%date
date = text.partition(u'\u2022')[0].strip() self.log('Issue date:', date)
self.timefmt = u' [%s]'%date
self.log('Issue date:', date)
# Find TOC # Find TOC
tocs = soup.findAll('ul', attrs={'class':'issue-article-list'}) toc = soup.find('div', attrs={'class':'current_issue'}).find('div', attrs={'class':'articles_list'})
articles = [] articles = []
for toc in tocs: for div in toc.findAll('div', attrs={'class':'row'}):
for li in toc.findAll('li'): h2 = div.find('h2')
h3 = li.find('h3') title = self.tag_to_string(h2).strip()
title = self.tag_to_string(h3) author = self.tag_to_string(div.find('div', attrs={'class':'author'})).strip()
author = self.tag_to_string(li.find('h4')) title = title + u' (%s)'%author
title = title + u' (%s)'%author url = 'http://www.nybooks.com' + h2.find('a', href=True)['href']
url = 'http://www.nybooks.com'+h3.find('a', href=True)['href'] desc = ''
desc = '' for p in div.findAll('p', attrs={'class':lambda x: x and 'quiet' in x}):
for p in li.findAll('p'): desc += self.tag_to_string(p)
desc += self.tag_to_string(p) self.log('Found article:', title)
self.log('Found article:', title) self.log('\t', url)
self.log('\t', url) self.log('\t', desc)
self.log('\t', desc) articles.append({'title':title, 'url':url, 'date':'',
articles.append({'title':title, 'url':url, 'date':'',
'description':desc}) 'description':desc})
return [('Current Issue', articles)] return [('Current Issue', articles)]

View File

@ -10,6 +10,9 @@ import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
def find_header(tag):
return tag.name == 'header' and tag.parent['class'] == 'article'
class NewYorkReviewOfBooks(BasicNewsRecipe): class NewYorkReviewOfBooks(BasicNewsRecipe):
title = u'New York Review of Books (no subscription)' title = u'New York Review of Books (no subscription)'
@ -21,9 +24,11 @@ class NewYorkReviewOfBooks(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
no_javascript = True no_javascript = True
keep_only_tags = [dict(id=['article-body', 'page-title'])] keep_only_tags = [
remove_tags = [dict(attrs={'class':['article-tools', 'article-links', dict(name='section', attrs={'class':'article_body'}),
'center advertisement']})] dict(name=find_header),
dict(name='div', attrs={'class':'for-subscribers-only'}),
]
preprocess_regexps = [(re.compile(r'<head>.*?</head>', re.DOTALL), lambda preprocess_regexps = [(re.compile(r'<head>.*?</head>', re.DOTALL), lambda
m:'<head></head>')] m:'<head></head>')]
@ -31,40 +36,44 @@ class NewYorkReviewOfBooks(BasicNewsRecipe):
def print_version(self, url): def print_version(self, url):
return url+'?pagination=false' return url+'?pagination=false'
def preprocess_html(self, soup):
header = soup.find('header')
body = soup.find('body')
body.insert(0, header)
header.find('div', attrs={'class':'details'}).extract()
for i in soup.findAll('input'):
i.extract()
return soup
def parse_index(self): def parse_index(self):
soup = self.index_to_soup('http://www.nybooks.com/current-issue') soup = self.index_to_soup('http://www.nybooks.com/current-issue')
# Find cover # Find cover
sidebar = soup.find(id='sidebar') sidebar = soup.find('div', attrs={'class':'issue_cover'})
if sidebar is not None: if sidebar is not None:
a = sidebar.find('a', href=lambda x: x and 'view-photo' in x) img = sidebar.find('img', src=True)
if a is not None: self.cover_url = 'http://www.nybooks.com' + img['src']
psoup = self.index_to_soup('http://www.nybooks.com'+a['href']) self.log('Found cover at:', self.cover_url)
cover = psoup.find('img', src=True)
self.cover_url = cover['src']
self.log('Found cover at:', self.cover_url)
# Find date # Find date
div = soup.find(id='page-title') div = soup.find('time', pubdate='pubdate')
if div is not None: if div is not None:
h5 = div.find('h5') text = self.tag_to_string(div)
if h5 is not None: date = text.partition(u'\u2022')[0].strip()
text = self.tag_to_string(h5) self.timefmt = u' [%s]'%date
date = text.partition(u'\u2022')[0].strip() self.log('Issue date:', date)
self.timefmt = u' [%s]'%date
self.log('Issue date:', date)
# Find TOC # Find TOC
toc = soup.find('ul', attrs={'class':'issue-article-list'}) toc = soup.find('div', attrs={'class':'current_issue'}).find('div', attrs={'class':'articles_list'})
articles = [] articles = []
for li in toc.findAll('li'): for div in toc.findAll('div', attrs={'class':'row'}):
h3 = li.find('h3') h2 = div.find('h2')
title = self.tag_to_string(h3) title = self.tag_to_string(h2).strip()
author = self.tag_to_string(li.find('h4')) author = self.tag_to_string(div.find('div', attrs={'class':'author'})).strip()
title = title + u' (%s)'%author title = title + u' (%s)'%author
url = 'http://www.nybooks.com'+h3.find('a', href=True)['href'] url = 'http://www.nybooks.com' + h2.find('a', href=True)['href']
desc = '' desc = ''
for p in li.findAll('p'): for p in div.findAll('p', attrs={'class':lambda x: x and 'quiet' in x}):
desc += self.tag_to_string(p) desc += self.tag_to_string(p)
self.log('Found article:', title) self.log('Found article:', title)
self.log('\t', url) self.log('\t', url)