mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update Outlook Magazine
This commit is contained in:
parent
3db4c98315
commit
2246aeede3
@ -1,67 +1,53 @@
|
|||||||
#!/usr/bin/env python
|
import json, re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2009, Kovid Goyal <kovid at kovidgoyal.net>'
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
|
||||||
|
|
||||||
|
|
||||||
def absurl(x):
|
class outlook(BasicNewsRecipe):
|
||||||
if x.startswith('/'):
|
title = 'Outlook Magazine'
|
||||||
x = 'http://www.outlookindia.com' + x
|
__author__ = 'unkn0wn'
|
||||||
return x
|
description = ''
|
||||||
|
|
||||||
|
|
||||||
class OutlookIndia(BasicNewsRecipe):
|
|
||||||
|
|
||||||
title = 'Outlook India'
|
|
||||||
__author__ = 'Kovid Goyal'
|
|
||||||
description = 'Weekly news and current affairs in India'
|
|
||||||
no_stylesheets = True
|
|
||||||
encoding = 'utf-8'
|
|
||||||
language = 'en_IN'
|
language = 'en_IN'
|
||||||
ignore_duplicate_articles = {'title', 'url'}
|
use_embedded_content = False
|
||||||
|
no_stylesheets = True
|
||||||
keep_only_tags = [
|
remove_javascript = True
|
||||||
dict(name='h1'),
|
remove_attributes = ['height', 'width', 'style']
|
||||||
dict(
|
ignore_duplicate_articles = {'url'}
|
||||||
attrs={'class': ['sub_head', 'magzine_stry_image', 'mainContent']}),
|
|
||||||
dict(attrs={'class': lambda x: x and set(
|
|
||||||
x.split()).intersection({'writter', 'covr_wr'})}),
|
|
||||||
]
|
|
||||||
remove_tags = [
|
|
||||||
dict(name='meta'),
|
|
||||||
]
|
|
||||||
|
|
||||||
def get_browser(self):
|
|
||||||
br = BasicNewsRecipe.get_browser(self)
|
|
||||||
# This site sends article titles in the cookie which occasionally
|
|
||||||
# contain non ascii characters causing httplib to fail. Instead just
|
|
||||||
# disable cookies as they're not needed for download. Proper solution
|
|
||||||
# would be to implement a unicode aware cookie jar
|
|
||||||
br.set_cookiejar(None)
|
|
||||||
return br
|
|
||||||
|
|
||||||
def preprocess_raw_html(self, raw_html, url):
|
|
||||||
import html5lib
|
|
||||||
from lxml import html
|
|
||||||
root = html5lib.parse(raw_html, treebuilder='lxml',
|
|
||||||
namespaceHTMLElements=False)
|
|
||||||
return html.tostring(root)
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('http://www.outlookindia.com/magazine')
|
soup = self.index_to_soup('https://www.outlookindia.com/magazine/archive')
|
||||||
for img in soup.findAll('img', src=lambda x: x and 'Latest-Cover.jpg' in x):
|
issue = soup.find(**classes('issue_listing'))
|
||||||
self.cover_url = absurl(img['src'])
|
a = issue.find('a', href=lambda x: x and x.startswith('/magazine/issue/'))
|
||||||
self.log('Found cover:', self.cover_url)
|
url = a['href']
|
||||||
|
self.log('Downloading issue:', url)
|
||||||
|
self.cover_url = a.find('img', attrs={'src': True})['src']
|
||||||
|
soup = self.index_to_soup('https://www.outlookindia.com' + url)
|
||||||
|
ans = []
|
||||||
|
|
||||||
articles = []
|
for h3 in soup.findAll(['h3', 'h4'],
|
||||||
for a in soup.findAll('a', href=lambda x: x and x.startswith('/magazine/story/')):
|
attrs={'class': 'tk-kepler-std-condensed-subhead'}):
|
||||||
url = absurl(a['href'])
|
a = h3.find('a', href=lambda x: x)
|
||||||
|
url = a['href']
|
||||||
title = self.tag_to_string(a)
|
title = self.tag_to_string(a)
|
||||||
desc = ''
|
self.log('\t\tFound article:', title)
|
||||||
div = a.parent.findNextSibling(attrs={'class': 'descriptn'})
|
self.log('\t\t\t', url)
|
||||||
if div is not None:
|
ans.append({
|
||||||
desc = self.tag_to_string(div)
|
'title': title,
|
||||||
self.log('Found article:', title, 'at', url)
|
'url': url,
|
||||||
articles.append({'title': title, 'url': url, 'description': desc})
|
})
|
||||||
return [('Current Issue', articles)]
|
return [('Articles', ans)]
|
||||||
|
|
||||||
|
def preprocess_raw_html(self, raw, *a):
|
||||||
|
m = re.search('<!-- NewsArticle Schema -->.*?script.*?>', raw, flags=re.DOTALL)
|
||||||
|
raw = raw[m.end():].lstrip()
|
||||||
|
data = json.JSONDecoder().raw_decode(raw)[0]
|
||||||
|
title = data['headline']
|
||||||
|
body = data['articleBody']
|
||||||
|
body = body.replace('\r\n', '<p>')
|
||||||
|
author = ' and '.join(x['name'] for x in data['author'])
|
||||||
|
image = desc = ''
|
||||||
|
if data.get('image'):
|
||||||
|
image = '<p><img src="{}">'.format(data['image']['url'])
|
||||||
|
if data.get('description'):
|
||||||
|
desc = '<h2>' + data['description'] + '</h2>'
|
||||||
|
html = '<html><body><h1>' + title + '</h1>' + desc + '<h3>' + author + '</h3>' + image + '<p>' + body
|
||||||
|
return html
|
||||||
|
Loading…
x
Reference in New Issue
Block a user