mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
80 lines
2.8 KiB
Python
80 lines
2.8 KiB
Python
#!/usr/bin/env python
|
|
import json
|
|
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
|
|
|
|
def absurl(url):
|
|
if url.startswith('/'):
|
|
return 'https://www.infzm.com' + url
|
|
|
|
|
|
def json_to_html(raw, link):
|
|
data = json.loads(raw)
|
|
data = data['data']['content']
|
|
title = '<h1 title="{}">'.format(link) + data['subject'] + '</h1>'
|
|
auth = '<p><b>' + data['author'] + '</b></p>'
|
|
sub = '<p class="intro"><i>' + data['introtext'] + '</i></p>'
|
|
body = data['fulltext']
|
|
return '<html><body><div>' + title + auth + sub + body + '</div></body></html>'
|
|
|
|
|
|
class infzm(BasicNewsRecipe):
|
|
title = '南方周末'
|
|
__author__ = 'unkn0wn'
|
|
description = 'Southern Weekly (infzm.com), founded in 1984, is a Chinese weekly newspaper based in Guangzhou. Download Weekly.'
|
|
language = 'zh'
|
|
encoding = 'utf-8'
|
|
no_stylesheets = True
|
|
remove_javascript = True
|
|
ignore_duplicate_articles = {'title', 'url'}
|
|
remove_empty_feeds = True
|
|
use_embedded_content = False
|
|
remove_attributes = ['style', 'height', 'width']
|
|
masthead_url = 'http://ssimg.kkod.cn/web/02/14227.gif'
|
|
|
|
remove_tags = [dict(name=['video', 'svg', 'button'])]
|
|
|
|
articles_are_obfuscated = True
|
|
|
|
def get_obfuscated_article(self, url):
|
|
br = self.get_browser()
|
|
link = url
|
|
res_link = (link.replace('https://www.infzm.com', 'https://api.infzm.com/mobile') +
|
|
'?platform=wap&version=1.89.0&machine_id=35458aa29603f2b246636e5492122b50&user_id=&token=&member_type=')
|
|
# if article is paywalled, add code to figure out machine_id
|
|
raw = br.open(res_link).read()
|
|
html = json_to_html(raw, link)
|
|
return ({'data': html, 'url': link})
|
|
|
|
extra_css = '''
|
|
img {display:block; margin:0 auto;}
|
|
.cm_pic_caption, .cm_pic_author { font-size:small; text-align:center; }
|
|
'''
|
|
|
|
def parse_index(self):
|
|
index = 'https://www.infzm.com/'
|
|
sections = [
|
|
'contents'
|
|
]
|
|
feeds = []
|
|
soup = self.index_to_soup(index)
|
|
for sec in sections:
|
|
section = sec.capitalize()
|
|
self.log(section)
|
|
articles = []
|
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/' + sec + '/')}):
|
|
url = absurl(a['href'].split('?')[0])
|
|
if url in {index + sec + '/', index + sec}:
|
|
continue
|
|
title = self.tag_to_string(a)
|
|
self.log('\t', title, '\n\t\t', url)
|
|
articles.append({'title': title, 'url': url})
|
|
if articles:
|
|
feeds.append((section, articles))
|
|
return feeds
|
|
|
|
def populate_article_metadata(self, article, soup, first):
|
|
if soup.find(attrs={'class':'intro'}):
|
|
article.summary = article.text_summary = self.tag_to_string(soup.find(attrs={'class':'intro'}))
|