mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update global times
This commit is contained in:
parent
0f0ac817c7
commit
00f14d6ee2
@ -1,88 +1,95 @@
|
|||||||
import re
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.utils.date import parse_date
|
||||||
|
|
||||||
|
|
||||||
def classes(classes):
|
|
||||||
q = frozenset(classes.split(' '))
|
|
||||||
return dict(
|
|
||||||
attrs={'class': lambda x: x and frozenset(x.split()).intersection(q)}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
index = 'https://www.globaltimes.cn/'
|
||||||
|
|
||||||
class GlobalTimes(BasicNewsRecipe):
|
class GlobalTimes(BasicNewsRecipe):
|
||||||
title = u'Global Times'
|
title = 'Global Times'
|
||||||
__author__ = 'Jose Ortiz' # lui1 at mobileread.com
|
__author__ = 'unkn0wn'
|
||||||
|
description = 'DISCOVER CHINA, DISCOVER THE WORLD'
|
||||||
language = 'en_CN'
|
language = 'en_CN'
|
||||||
oldest_article = 7
|
|
||||||
max_articles_per_feed = 100
|
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
keep_only_tags = [classes('article-title article-source row-content')]
|
remove_attributes = ['height', 'width', 'style']
|
||||||
|
ignore_duplicate_articles = {'url'}
|
||||||
|
masthead_url = 'https://www.globaltimes.cn/img/logo1@3x.png'
|
||||||
|
encoding = 'utf-8'
|
||||||
|
remove_empty_feeds = True
|
||||||
|
resolve_internal_links = True
|
||||||
|
oldest_article = 1 # days
|
||||||
|
|
||||||
preprocess_regexps = [(
|
def get_cover_url(self):
|
||||||
re.compile(
|
soup = self.index_to_soup('https://en.kiosko.net/cn/np/cn_global_times.html')
|
||||||
r'(?:<(?:br(?:\s*/)?|/br\s*)>(?:\s|'
|
return 'https:' + soup.find('img', attrs={'id':'portada'})['src']
|
||||||
'\xA0'
|
|
||||||
r'| )*){2,9}', re.U | re.I
|
|
||||||
), lambda match: '<p>'
|
|
||||||
)]
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
:root {
|
.article_column {font-size:small; color:#404040;}
|
||||||
font-family: Arial, Helvetica, sans-serif;
|
.author_share_left, .picture, .with_name_card, .pub_time {font-size:small; color:#202020;}
|
||||||
}
|
blockquote, em {color:#202020;}
|
||||||
|
'''
|
||||||
|
|
||||||
.article-title {
|
keep_only_tags = [
|
||||||
font-weight: bold;
|
classes(
|
||||||
font-size: large;
|
'article_column article_title author_share_left article_content'
|
||||||
}
|
)
|
||||||
|
]
|
||||||
|
remove_tags = [classes('author_card')]
|
||||||
|
|
||||||
.article-source, .row-content {
|
def preprocess_raw_html(self, raw, *a):
|
||||||
font-size:small;
|
return raw.replace('<br /><br />', '</p><p>').replace('<br><br>', '</p><p>')
|
||||||
}
|
|
||||||
'''
|
def preprocess_html(self, soup):
|
||||||
|
h1 = soup.find(attrs={'class':'article_title'})
|
||||||
|
if h1:
|
||||||
|
h1.name = 'h1'
|
||||||
|
for div in soup.findAll(attrs={'class':'picture'}):
|
||||||
|
div.name = 'div'
|
||||||
|
p = soup.find(attrs={'class':'author_share_left'})
|
||||||
|
if p:
|
||||||
|
p.name = 'p'
|
||||||
|
return soup
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
catnames = {}
|
sec_url = index + '{}/index.html'
|
||||||
catnames["https://www.globaltimes.cn/china/politics/"] = "China Politics"
|
|
||||||
catnames["https://www.globaltimes.cn/china/diplomacy/"] = "China Diplomacy"
|
section_list = [
|
||||||
catnames["https://www.globaltimes.cn/china/military/"] = "China Military"
|
'china', 'source', 'opinion', 'In-depth', 'world', 'life', 'sport', 'cartoon'
|
||||||
catnames["https://www.globaltimes.cn/world/asia-pacific/"] = "Asia Pacific"
|
]
|
||||||
catnames["https://www.globaltimes.cn/sci-tech"] = "Sci-Tech"
|
|
||||||
feeds = []
|
feeds = []
|
||||||
|
|
||||||
for cat in catnames:
|
for section in section_list:
|
||||||
articles = []
|
section_title = section.capitalize()
|
||||||
self.log(cat)
|
section_url = sec_url.format(section)
|
||||||
soup = self.index_to_soup(cat)
|
self.log(section_title, section_url)
|
||||||
for a in soup.findAll(
|
soup = self.index_to_soup(section_url)
|
||||||
'a',
|
articles = self.articles_from_soup(soup)
|
||||||
attrs={
|
|
||||||
'href':
|
|
||||||
re.compile(
|
|
||||||
r'https?://www.globaltimes.cn/content/[0-9]{4,10}[.]shtml'
|
|
||||||
)
|
|
||||||
}
|
|
||||||
):
|
|
||||||
# Typical url http://www.globaltimes.cn/content/5555555.shtml
|
|
||||||
url = a['href'].strip()
|
|
||||||
title = self.tag_to_string(a).strip()
|
|
||||||
if not title:
|
|
||||||
continue
|
|
||||||
myarticle = ({
|
|
||||||
'title': title,
|
|
||||||
'url': url,
|
|
||||||
'description': '',
|
|
||||||
'date': ''
|
|
||||||
})
|
|
||||||
self.log("found '%s'" % title)
|
|
||||||
articles.append(myarticle)
|
|
||||||
self.log("Adding URL %s\n" % url)
|
|
||||||
if articles:
|
if articles:
|
||||||
feeds.append((catnames[cat], articles))
|
feeds.append((section_title, articles))
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
def postprocess_html(self, soup, first_fetch):
|
def articles_from_soup(self, soup):
|
||||||
for p in [p for p in soup('p') if len(p) == 0]:
|
ans = []
|
||||||
p.extract()
|
dt = datetime.today().strftime('%Y%m')
|
||||||
return soup
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + 'page/' + dt + '/')}):
|
||||||
|
if a.find('img'):
|
||||||
|
continue
|
||||||
|
url = a['href']
|
||||||
|
title = self.tag_to_string(a).strip()
|
||||||
|
desc = ''
|
||||||
|
p = a.find_next_sibling('p')
|
||||||
|
if p:
|
||||||
|
desc = self.tag_to_string(p).strip()
|
||||||
|
src_time = a.find_next_sibling(attrs={'class':'source_time'})
|
||||||
|
if src_time:
|
||||||
|
time = self.tag_to_string(src_time).strip()
|
||||||
|
if '|' in time:
|
||||||
|
time = time.split('|')[1].strip()
|
||||||
|
date = parse_date(time)
|
||||||
|
today = (datetime.now(timezone.utc)).replace(microsecond=0)
|
||||||
|
if (today - date) > timedelta(self.oldest_article):
|
||||||
|
continue
|
||||||
|
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
|
||||||
|
ans.append({'title': title, 'url': url, 'description': desc})
|
||||||
|
return ans
|
||||||
|
|
@ -89,7 +89,6 @@ class LiveMint(BasicNewsRecipe):
|
|||||||
font-weight:normal !important; font-style:italic; color:#202020;
|
font-weight:normal !important; font-style:italic; color:#202020;
|
||||||
}
|
}
|
||||||
h2 {font-size:normal !important;}
|
h2 {font-size:normal !important;}
|
||||||
.author-widget {font-size:small; font-style:italic; color:#404040;}
|
|
||||||
em, blockquote {color:#202020;}
|
em, blockquote {color:#202020;}
|
||||||
.moreAbout, .articleInfo, .metaData, .psTopicsHeading, .topicsTag {font-size:small;}
|
.moreAbout, .articleInfo, .metaData, .psTopicsHeading, .topicsTag {font-size:small;}
|
||||||
'''
|
'''
|
||||||
@ -102,7 +101,7 @@ class LiveMint(BasicNewsRecipe):
|
|||||||
dict(name=['meta', 'link', 'svg', 'button', 'iframe']),
|
dict(name=['meta', 'link', 'svg', 'button', 'iframe']),
|
||||||
classes(
|
classes(
|
||||||
'trendingSimilarHeight moreNews mobAppDownload label msgError msgOk taboolaHeight gadgetSlider'
|
'trendingSimilarHeight moreNews mobAppDownload label msgError msgOk taboolaHeight gadgetSlider'
|
||||||
' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo'
|
' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo author-widget'
|
||||||
' datePublish sepStory premiumSlider moreStory Joinus moreAbout milestone benefitText'
|
' datePublish sepStory premiumSlider moreStory Joinus moreAbout milestone benefitText'
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
@ -85,7 +85,7 @@ class TheWeek(BasicNewsRecipe):
|
|||||||
for sec in sections:
|
for sec in sections:
|
||||||
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-US&gl=US&ceid=US:en'
|
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-US&gl=US&ceid=US:en'
|
||||||
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
|
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
|
||||||
feeds.append(('Others', a.format(when, quote(index, safe=''), '')))
|
feeds.append(('Others', a.format(when, quote(index, safe=''))))
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
article.title = article.title.replace(' - The Week', '')
|
article.title = article.title.replace(' - The Week', '')
|
||||||
|
@ -85,7 +85,7 @@ class TheWeek(BasicNewsRecipe):
|
|||||||
for sec in sections:
|
for sec in sections:
|
||||||
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-GB&gl=GB&ceid=GB:en'
|
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-GB&gl=GB&ceid=GB:en'
|
||||||
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
|
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
|
||||||
feeds.append(('Others', a.format(when, quote(index, safe=''), '')))
|
feeds.append(('Others', a.format(when, quote(index, safe=''))))
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
article.title = article.title.replace(' - The Week', '')
|
article.title = article.title.replace(' - The Week', '')
|
||||||
|
Loading…
x
Reference in New Issue
Block a user