#!/usr/bin/env python import random from calibre.ebooks.BeautifulSoup import BeautifulSoup from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes def resize(x): if 'resize' in x: return x.split('&resize')[0] + '&resize=600' elif '?crop=' in x: return x + '&resize=600' def absurl(url): if url.startswith('/'): url = 'https://www.thetimes.com' + url return url class times(BasicNewsRecipe): title = 'The Times and Sunday Times' __author__ = 'unkn0wn' description = ( 'The Times, founded in 1785 as the Daily Universal Register, is the oldest national daily newspaper ' 'in the UK and holds an important place as the “paper of record” on public life, from politics and world ' 'affairs to business and sport.' ) language = 'en_GB' encoding = 'utf-8' no_stylesheets = True remove_javascript = True remove_attributes = ['width', 'height', 'style'] masthead_url = 'https://www.thetimes.com/d/img/logos/times-black-ee1e0ce4ed.png' ignore_duplicate_articles = {'title', 'url'} remove_empty_feeds = True resolve_internal_links = True simultaneous_downloads = 1 browser_type = 'webengine' def get_cover_url(self): soup = self.index_to_soup('https://www.frontpages.com/the-times/') return 'https://www.frontpages.com' + soup.find('img', attrs={'id':'giornale-img'})['src'] extra_css = ''' .tc-view__TcView-nuazoi-0, [class^="keylines__KeylineItem-"], .sub { font-size:small; } [class^="responsive__StandfirstContainer-"] { font-style:italic; } ''' keep_only_tags = [ prefixed_classes( 'responsive__HeadlineContainer- keylines__KeylineItem- responsive__StandfirstContainer- ' 'responsive__LeadAsset- responsive__ArticleContent-' ) ] remove_tags = [ dict(name=['svg', 'times-datawrapper']), dict(attrs={'id':'iframe-wrapper'}), dict(attrs={'old-position':'sticky'}), prefixed_classes( 'responsive__InlineAdWrapper-' ) ] remove_tags_after = [ dict(name='div', attrs={'id':'paywall-portal-article-footer'}) ] def preprocess_html(self, soup): h2 = soup.find(**prefixed_classes('responsive__StandfirstContainer-')) if h2: h2.name = 'p' for h2 in soup.findAll('h2'): if h2.text == 'Advertisement': div = h2.findParent('div') if div: div.extract() for img in soup.findAll('img', src=True): img['src'] = resize(img['src']) for img in soup.findAll('img', attrs={'old-src':True}): img['src'] = resize(img['old-src']) for a in soup.findAll('a', href=True): a['href'] = 'http' + a['href'].split('http')[-1] div = soup.findAll(attrs={'style': lambda x: x and x.startswith( 'color:rgb(51, 51, 51);font-family:TimesDigitalW04-Regular' )}) for p in div: p.name = 'p' for d in soup.findAll(attrs={'id': lambda x: x and '.' in x}): d['class'] = 'sub' for fig in soup.findAll('figure'): fig['class'] = 'sub' return soup def parse_index(self): soup = self.index_to_soup('https://www.thetimes.com/') main = soup.find('div', attrs={'id':'main-container', 'data-edition-date':True}) self.timefmt = ' [%s]' % main['data-edition-date'] feeds = [] for sec in main.findAll('section', attrs={'id':lambda x: x and x.startswith('section-')}, recursive=False): section = sec['id'].replace('section-', '').capitalize() self.log(section) articles = [] for a in sec.findAll(**prefixed_classes('Item-headline')): if not a.find('a'): continue url = absurl(a.a['href']).split('?')[0] title = self.tag_to_string(a) self.log(' ', title, '\n\t', url) articles.append({'title': title, 'url': url}) feeds.append((section, articles)) return feeds def preprocess_raw_html(self, raw, url): access = '"userState":{"isLoggedIn":false,"isMetered":false,"hasAccess":true}' if access not in raw and 'comment/cartoons' not in url: dom = random.choice(('fo', 'is', 'li', 'md', 'ph', 'vn')) raw_ar = self.index_to_soup('https://archive.' + dom + '/latest/' + url) archive = BeautifulSoup(str(raw_ar)) if archive.find('div', attrs={'id':'top'}): content = archive.find('article', attrs={'id':False}) soup = BeautifulSoup(raw) article = soup.find(**prefixed_classes('responsive__ArticleContent-')) if article and content: self.log('**fetching archive content') article.clear() article.append(content) return str(soup) return raw return raw return raw def populate_article_metadata(self, article, soup, first): desc = soup.find(**prefixed_classes('responsive__StandfirstContainer-')) if desc: article.summary = self.tag_to_string(desc) article.text_summary = article.summary