Remove Business Week as it no longer exists (behind a paywall)

This commit is contained in:
Kovid Goyal 2015-02-15 20:32:58 +05:30
parent a30fb54406
commit 4db96c9f47
3 changed files with 0 additions and 124 deletions

View File

@ -1,51 +0,0 @@
__license__ = 'GPL v3'
__copyright__ = '2008 Kovid Goyal kovid@kovidgoyal.net, 2010 Darko Miletic <darko.miletic at gmail.com>'
'''
www.businessweek.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class BusinessWeek(BasicNewsRecipe):
title = 'Business Week'
__author__ = 'Kovid Goyal and Darko Miletic'
description = 'Read the latest international business news & stock market news. Get updated company profiles, financial advice, global economy and technology news.'
publisher = 'Bloomberg L.P.'
category = 'Business, business news, stock market, stock market news, financial advice, company profiles, financial advice, global economy, technology news'
oldest_article = 7
max_articles_per_feed = 200
no_stylesheets = True
auto_cleanup = True
encoding = 'utf8'
use_embedded_content = False
language = 'en'
remove_empty_feeds = True
publication_type = 'magazine'
cover_url = 'http://images.businessweek.com/mz/covers/current_120x160.jpg'
masthead_url = 'http://assets.businessweek.com/images/bw-logo.png'
extra_css = """
body{font-family: Helvetica,Arial,sans-serif }
img{margin-bottom: 0.4em; display:block}
.tagline{color: gray; font-style: italic}
.photoCredit{font-size: small; color: gray}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
feeds = [
(u'Top Stories', u'http://www.businessweek.com/feeds/most-popular.rss'),
]
def print_version(self, url):
soup = self.index_to_soup(url)
prntver = soup.find('li', attrs={'class':'print tracked'})
rurl = prntver.find('a', href=True)['href']
return rurl

View File

@ -1,73 +0,0 @@
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
from collections import OrderedDict
class BusinessWeekMagazine(BasicNewsRecipe):
title = 'Business Week Magazine'
__author__ = 'Rick Shang, Armin Geller' # AGE Upd 2014-01-18
description = 'A renowned business publication. Business news, trends and profiles of successful businesspeople.'
language = 'en'
category = 'news'
encoding = 'UTF-8'
keep_only_tags = [
dict(name='div', attrs={'id':['content']}), # AGE 2014-01-18
]
remove_tags = [dict(name='hr'),
dict(name='a', attrs={'class':'sub_sales'}),
dict(name='div', attrs={'class':'fieldset'}),
dict(name='div', attrs={'id':'taboola_wrapper'})] # AGE 2014-01-18
no_javascript = True
no_stylesheets = True
cover_url = 'http://images.businessweek.com/mz/covers/current_120x160.jpg'
def parse_index(self):
# Go to the issue
soup = self.index_to_soup('http://www.businessweek.com/magazine/news/articles/business_news.htm')
# Find date
mag=soup.find('h2',text='Magazine')
dates=self.tag_to_string(mag.findNext('h3'))
self.timefmt = u' [%s]'%dates
# Go to the main body
div0 = soup.find('div', attrs={'class':'column left'})
section_title = ''
feeds = OrderedDict()
for div in div0.findAll('a', attrs={'class': None}):
articles = []
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
title=self.tag_to_string(div).strip()
url=div['href']
soup0 = self.index_to_soup(url)
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})
if urlprint is not None:
url=urlprint['href']
articles.append({'title':title, 'url':url, 'description':'', 'date':''})
if articles:
if section_title not in feeds:
feeds[section_title] = []
feeds[section_title] += articles
div1 = soup.find('div', attrs={'class':'column center'})
section_title = ''
for div in div1.findAll('a'):
articles = []
desc=self.tag_to_string(div.findNext('p')).strip()
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
title=self.tag_to_string(div).strip()
url=div['href']
soup0 = self.index_to_soup(url)
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})
if urlprint is not None:
url=urlprint['href']
articles.append({'title':title, 'url':url, 'description':desc, 'date':''})
if articles:
if section_title not in feeds:
feeds[section_title] = []
feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()]
return ans

Binary file not shown.

Before

Width:  |  Height:  |  Size: 446 B