mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Remove no longer working recipe
This commit is contained in:
parent
3273a53358
commit
91be3ebc2d
@ -1,124 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
'''
|
||||
discovermagazine.com
|
||||
'''
|
||||
|
||||
import re
|
||||
import mechanize
|
||||
import json
|
||||
import cookielib
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class DiscoverMagazine(BasicNewsRecipe):
|
||||
|
||||
title = u'Discover Magazine'
|
||||
description = u'Science, Technology and the Future'
|
||||
__author__ = 'Starson17'
|
||||
language = 'en'
|
||||
|
||||
oldest_article = 33
|
||||
max_articles_per_feed = 20
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
encoding = 'utf-8'
|
||||
extra_css = '.headline {font-size: x-large;} \n .fact {padding-top: 10pt}'
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={
|
||||
'id': ['searchModule', 'mainMenu', 'tool-box']}),
|
||||
dict(name='div', attrs={'id': [
|
||||
'footer', 'teaser', 'already-subscriber', 'teaser-suite', 'related-articles']}),
|
||||
dict(name='div', attrs={'class': ['column']}),
|
||||
dict(name='img', attrs={'src': 'http://discovermagazine.com/onebyone.gif'})]
|
||||
|
||||
remove_tags_after = [dict(name='div', attrs={'class': 'listingBar'})]
|
||||
|
||||
# Login stuff
|
||||
needs_subscription = True
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser(self)
|
||||
rq = mechanize.Request(
|
||||
'https://secure.kalmbach.com/kserv/api/authentication/login', headers={
|
||||
'Content-Type': 'application/json; charset=UTF-8',
|
||||
'Referer': 'http://discovermagazine.com',
|
||||
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'Origin': 'http://discovermagazine.com',
|
||||
}, data=json.dumps(
|
||||
{'appId': '2', 'email': self.username, 'password': self.password}))
|
||||
br.set_debug_http(True)
|
||||
br.open(rq)
|
||||
data = json.loads(br.open(rq).read())
|
||||
if not data.get('success'):
|
||||
raise ValueError('Failed to login')
|
||||
session_id = data['sessionId']
|
||||
if hasattr(br, 'set_cookie'):
|
||||
br.set_cookie('KSERV', session_id, 'discovermagazine.com')
|
||||
else:
|
||||
c = cookielib.Cookie(
|
||||
None, 'KSERV', session_id,
|
||||
None, False,
|
||||
'discovermagazine.com', True, False,
|
||||
'/', True,
|
||||
False, None, False, None, None, None)
|
||||
br.cookiejar.set_cookie(c)
|
||||
res = br.open('http://discovermagazine.com')
|
||||
br.set_debug_http(False)
|
||||
raw = res.read()
|
||||
if '>Logout<' not in raw:
|
||||
raise ValueError('Failed to login')
|
||||
return br
|
||||
|
||||
# End login stuff
|
||||
|
||||
def append_page(self, soup, appendtag, position):
|
||||
pager = soup.find('span', attrs={'class': 'next'})
|
||||
if pager:
|
||||
nexturl = pager.a['href']
|
||||
soup2 = self.index_to_soup(nexturl)
|
||||
texttag = soup2.find('div', attrs={'class': 'articlebody'})
|
||||
newpos = len(texttag.contents)
|
||||
self.append_page(soup2, texttag, newpos)
|
||||
texttag.extract()
|
||||
appendtag.insert(position, texttag)
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
mtag = '<meta http-equiv="Content-Language" content="en-US"/>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>'
|
||||
soup.head.insert(0, mtag)
|
||||
self.append_page(soup, soup.body, 3)
|
||||
pager = soup.find('div', attrs={'class': 'listingBar'})
|
||||
if pager:
|
||||
pager.extract()
|
||||
return soup
|
||||
|
||||
def postprocess_html(self, soup, first_fetch):
|
||||
for tag in soup.findAll(text=re.compile('^This article is a sample')):
|
||||
tag.parent.extract()
|
||||
for tag in soup.findAll(['table', 'tr', 'td']):
|
||||
tag.name = 'div'
|
||||
for tag in soup.findAll('div', attrs={'class': 'discreet advert'}):
|
||||
tag.extract()
|
||||
for tag in soup.findAll('hr', attrs={'size': '1'}):
|
||||
tag.extract()
|
||||
for tag in soup.findAll('br'):
|
||||
tag.extract()
|
||||
return soup
|
||||
|
||||
feeds = [
|
||||
(u'Technologiy', u'http://feeds.feedburner.com/DiscoverTechnology'),
|
||||
(u'Health & Medicine', u'http://feeds.feedburner.com/DiscoverHealthMedicine'),
|
||||
(u'Mind Brain', u'http://feeds.feedburner.com/DiscoverMindBrain'),
|
||||
(u'Space & Physics', u'http://feeds.feedburner.com/DiscoverSpace'),
|
||||
(u'Living World', u'http://feeds.feedburner.com/DiscoverLivingWorld'),
|
||||
(u'Environment', u'http://feeds.feedburner.com/DiscoverEnvironment'),
|
||||
(u"20 Things you didn't know about...",
|
||||
u'http://feeds.feedburner.com/20ThingsYouDidntKnowAbout'),
|
||||
(u'Vital Signs', u'http://feeds.feedburner.com/discovermagazine/VitalSigns'),
|
||||
]
|
Loading…
x
Reference in New Issue
Block a user