mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-11-23 06:53:02 -05:00
142 lines
5.4 KiB
Python
142 lines
5.4 KiB
Python
from urllib.parse import quote
|
|
|
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
|
from calibre.scraper.simple import read_url
|
|
from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes
|
|
|
|
|
|
def resize(x):
|
|
if 'resize' in x:
|
|
return x.split('&resize')[0] + '&resize=600'
|
|
elif '?crop=' in x:
|
|
return x + '&resize=600'
|
|
|
|
class times(BasicNewsRecipe):
|
|
title = 'The Times and Sunday Times'
|
|
__author__ = 'unkn0wn'
|
|
description = (
|
|
'The Times, founded in 1785 as the Daily Universal Register, is the oldest national daily newspaper '
|
|
'in the UK and holds an important place as the “paper of record” on public life, from politics and world '
|
|
'affairs to business and sport.'
|
|
)
|
|
language = 'en_GB'
|
|
encoding = 'utf-8'
|
|
no_stylesheets = True
|
|
remove_javascript = True
|
|
remove_attributes = ['width', 'height', 'style']
|
|
masthead_url = 'https://www.thetimes.com/d/img/logos/times-black-ee1e0ce4ed.png'
|
|
|
|
ignore_duplicate_articles = {'title', 'url'}
|
|
remove_empty_feeds = True
|
|
resolve_internal_links = True
|
|
simultaneous_downloads = 1
|
|
oldest_article = 1 # days
|
|
web_url = ''
|
|
|
|
def get_cover_url(self):
|
|
soup = self.index_to_soup('https://www.frontpages.com/the-times/')
|
|
return 'https://www.frontpages.com' + soup.find('img', attrs={'id':'giornale-img'})['src']
|
|
|
|
extra_css = '''
|
|
.tc-view__TcView-nuazoi-0, [class^="keylines__KeylineItem-"], .sub { font-size:small; }
|
|
[class^="responsive__StandfirstContainer-"] { font-style:italic; }
|
|
'''
|
|
|
|
keep_only_tags = [
|
|
prefixed_classes(
|
|
'responsive__HeadlineContainer- keylines__KeylineItem- responsive__StandfirstContainer- '
|
|
'responsive__LeadAsset- responsive__ArticleContent-'
|
|
)
|
|
]
|
|
|
|
remove_tags = [
|
|
dict(name=['svg', 'times-datawrapper']),
|
|
dict(attrs={'id':'iframe-wrapper'}),
|
|
dict(attrs={'old-position':'sticky'}),
|
|
prefixed_classes(
|
|
'responsive__InlineAdWrapper-'
|
|
)
|
|
]
|
|
|
|
remove_tags_after = [
|
|
dict(name = 'div', attrs={'id':'paywall-portal-article-footer'})
|
|
]
|
|
|
|
def preprocess_html(self, soup):
|
|
h2 = soup.find(**prefixed_classes('responsive__StandfirstContainer-'))
|
|
if h2:
|
|
h2.name = 'p'
|
|
for h2 in soup.findAll('h2'):
|
|
if h2.text == 'Advertisement':
|
|
div = h2.findParent('div')
|
|
if div:
|
|
div.extract()
|
|
for img in soup.findAll('img', src=True):
|
|
img['src'] = resize(img['src'])
|
|
for img in soup.findAll('img', attrs={'old-src':True}):
|
|
img['src'] = resize(img['old-src'])
|
|
for a in soup.findAll('a', href=True):
|
|
a['href'] = 'http' + a['href'].split('http')[-1]
|
|
div = soup.findAll(attrs={'style': lambda x: x and x.startswith(
|
|
'color:rgb(51, 51, 51);font-family:TimesDigitalW04-Regular'
|
|
)})
|
|
for p in div:
|
|
p.name = 'p'
|
|
for d in soup.findAll(attrs={'id': lambda x: x and '.' in x}):
|
|
d['class'] = 'sub'
|
|
for fig in soup.findAll('figure'):
|
|
fig['class'] = 'sub'
|
|
return soup
|
|
|
|
articles_are_obfuscated = True
|
|
|
|
def get_obfuscated_article(self, url):
|
|
soup = self.index_to_soup(url)
|
|
link = soup.a['href']
|
|
skip_sections =[ # add sections you want to skip
|
|
'/video/', '/videos/', '/multimedia/',
|
|
]
|
|
if any(x in link for x in skip_sections):
|
|
self.abort_article('skipping video links ', link)
|
|
self.web_url = link
|
|
html = self.index_to_soup(link, raw=True)
|
|
return ({ 'data': html, 'url': link })
|
|
|
|
feeds = []
|
|
when = oldest_article*24
|
|
index = 'https://www.thetimes.com/'
|
|
sections = [
|
|
'politics', 'world', 'uk/politics', 'uk/scotland', 'uk', 'comment', 'business-money', 'sport',
|
|
'life-style', 'culture', 'magazine', 'travel', 'sunday-times', 'edition', 'article'
|
|
]
|
|
for sec in sections:
|
|
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-GB&gl=GB&ceid=GB:en'
|
|
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
|
|
feeds.append(('Others', a.format(when, quote(index, safe=''))))
|
|
|
|
def preprocess_raw_html(self, raw, url):
|
|
access = '"userState":{"isLoggedIn":false,"isMetered":false,"hasAccess":true}'
|
|
if access not in raw and 'comment/cartoons' not in url:
|
|
raw_ar = read_url([], 'https://archive.is/latest/' + url)
|
|
archive = BeautifulSoup(str(raw_ar))
|
|
if archive.find('div', attrs={'id':'top'}):
|
|
content = archive.find('article', attrs={'id':False})
|
|
soup = BeautifulSoup(raw)
|
|
article = soup.find(**prefixed_classes('responsive__ArticleContent-'))
|
|
if article and content:
|
|
self.log('**fetching archive content')
|
|
article.clear()
|
|
article.append(content)
|
|
return str(soup)
|
|
return raw
|
|
return raw
|
|
return raw
|
|
|
|
def populate_article_metadata(self, article, soup, first):
|
|
article.title = article.title.replace(' - The Times', '')
|
|
desc = soup.find(**prefixed_classes('responsive__StandfirstContainer-'))
|
|
if desc:
|
|
article.summary = self.tag_to_string(desc)
|
|
article.text_summary = article.summary
|
|
article.url = self.web_url
|