mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Business Week Magazine and Chronicle of Higher Education by Rick Shang
This commit is contained in:
parent
8c050a4355
commit
11e0d74428
69
recipes/bwmagazine2.recipe
Normal file
69
recipes/bwmagazine2.recipe
Normal file
@ -0,0 +1,69 @@
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from collections import OrderedDict
|
||||
|
||||
class BusinessWeekMagazine(BasicNewsRecipe):
|
||||
|
||||
title = 'Business Week Magazine'
|
||||
__author__ = 'Rick Shang'
|
||||
|
||||
description = 'A renowned business publication. Business news, trends and profiles of successful businesspeople.'
|
||||
language = 'en'
|
||||
category = 'news'
|
||||
encoding = 'UTF-8'
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'id':'article_body_container'}),
|
||||
]
|
||||
remove_tags = [dict(name='ui'),dict(name='li')]
|
||||
no_javascript = True
|
||||
no_stylesheets = True
|
||||
|
||||
cover_url = 'http://images.businessweek.com/mz/covers/current_120x160.jpg'
|
||||
|
||||
def parse_index(self):
|
||||
|
||||
#Go to the issue
|
||||
soup = self.index_to_soup('http://www.businessweek.com/magazine/news/articles/business_news.htm')
|
||||
|
||||
#Find date
|
||||
mag=soup.find('h2',text='Magazine')
|
||||
self.log(mag)
|
||||
dates=self.tag_to_string(mag.findNext('h3'))
|
||||
self.timefmt = u' [%s]'%dates
|
||||
|
||||
#Go to the main body
|
||||
div0 = soup.find ('div', attrs={'class':'column left'})
|
||||
section_title = ''
|
||||
feeds = OrderedDict()
|
||||
for div in div0.findAll('a'):
|
||||
articles = []
|
||||
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
|
||||
title=self.tag_to_string(div).strip()
|
||||
url=div['href']
|
||||
soup0 = self.index_to_soup(url)
|
||||
urlprint=soup0.find('li', attrs={'class':'print'}).a['href']
|
||||
articles.append({'title':title, 'url':urlprint, 'description':'', 'date':''})
|
||||
|
||||
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
div1 = soup.find ('div', attrs={'class':'column center'})
|
||||
section_title = ''
|
||||
for div in div1.findAll('a'):
|
||||
articles = []
|
||||
desc=self.tag_to_string(div.findNext('p')).strip()
|
||||
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
|
||||
title=self.tag_to_string(div).strip()
|
||||
url=div['href']
|
||||
soup0 = self.index_to_soup(url)
|
||||
urlprint=soup0.find('li', attrs={'class':'print'}).a['href']
|
||||
articles.append({'title':title, 'url':urlprint, 'description':desc, 'date':''})
|
||||
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.iteritems()]
|
||||
return ans
|
||||
|
75
recipes/chronicle_higher_ed.recipe
Normal file
75
recipes/chronicle_higher_ed.recipe
Normal file
@ -0,0 +1,75 @@
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from collections import OrderedDict
|
||||
|
||||
class Chronicle(BasicNewsRecipe):
|
||||
|
||||
title = 'The Chronicle of Higher Education'
|
||||
__author__ = 'Rick Shang'
|
||||
|
||||
description = 'Weekly news and job-information source for college and university faculty members, administrators, and students.'
|
||||
language = 'en'
|
||||
category = 'news'
|
||||
encoding = 'UTF-8'
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class':'article'}),
|
||||
]
|
||||
remove_tags = [dict(name='div',attrs={'class':'related module1'})]
|
||||
no_javascript = True
|
||||
no_stylesheets = True
|
||||
|
||||
|
||||
needs_subscription = True
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open('http://chronicle.com/myaccount/login')
|
||||
br.select_form(nr=1)
|
||||
br['username'] = self.username
|
||||
br['password'] = self.password
|
||||
br.submit()
|
||||
return br
|
||||
|
||||
def parse_index(self):
|
||||
|
||||
#Go to the issue
|
||||
soup0 = self.index_to_soup('http://chronicle.com/section/Archives/39/')
|
||||
issue = soup0.find('ul',attrs={'class':'feature-promo-list'}).li
|
||||
issueurl = "http://chronicle.com"+issue.a['href']
|
||||
|
||||
#Find date
|
||||
dates = self.tag_to_string(issue.a).split(': ')[-1]
|
||||
self.timefmt = u' [%s]'%dates
|
||||
|
||||
#Go to the main body
|
||||
soup = self.index_to_soup(issueurl)
|
||||
div0 = soup.find ('div', attrs={'id':'article-body'})
|
||||
|
||||
feeds = OrderedDict()
|
||||
for div in div0.findAll('div',attrs={'class':'module1'}):
|
||||
section_title = self.tag_to_string(div.find('h3'))
|
||||
for post in div.findAll('li',attrs={'class':'sub-promo'}):
|
||||
articles = []
|
||||
a=post.find('a', href=True)
|
||||
title=self.tag_to_string(a)
|
||||
url="http://chronicle.com"+a['href'].strip()
|
||||
desc=self.tag_to_string(post.find('p'))
|
||||
articles.append({'title':title, 'url':url, 'description':desc, 'date':''})
|
||||
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.iteritems()]
|
||||
return ans
|
||||
|
||||
def preprocess_html(self,soup):
|
||||
#process all the images
|
||||
for div in soup.findAll('div', attrs={'class':'tableauPlaceholder'}):
|
||||
|
||||
noscripts=div.find('noscript').a
|
||||
div.replaceWith(noscripts)
|
||||
for div0 in soup.findAll('div',text='Powered by Tableau'):
|
||||
div0.extract()
|
||||
return soup
|
||||
|
Loading…
x
Reference in New Issue
Block a user