__license__ = 'GPL v3' __copyright__ = '2010-2017, Bobby Steel , Darko Miletic' ''' www.thetimes.co.uk ''' import urllib import html5lib from lxml import html from calibre.web.feeds.news import BasicNewsRecipe def classes(classes): q = frozenset(classes.split(' ')) return dict(attrs={'class': lambda x: x and frozenset(x.split()).intersection(q)}) class TimesOnline(BasicNewsRecipe): title = 'The Times & Sunday Times (UK)' __author__ = 'Bobby Steel' description = 'news from United Kingdom and World' language = 'en_GB' publisher = 'Times Newspapers Ltd' category = 'news, politics, UK' excludeSections = ['Puzzles'] oldest_article = 1 max_articles_per_feed = 100 no_stylesheets = True use_embedded_content = False encoding = 'utf-8' delay = 1 needs_subscription = True publication_type = 'newspaper' INDEX = 'http://www.thetimes.co.uk/' PREFIX = u'http://www.thetimes.co.uk' extra_css = """ .author-name,.authorName{font-style: italic} .published-date,.multi-position-photo-text{font-family: Arial,Helvetica,sans-serif; font-size: small; color: gray; display:block; margin-bottom: 0.5em} body{font-family: Georgia,"Times New Roman",Times,serif} """ conversion_options = { 'comment': description, 'tags': category, 'publisher': publisher, 'language': language } def get_browser(self): br = BasicNewsRecipe.get_browser(self) br.open('http://www.thetimes.co.uk/') if self.username is not None and self.password is not None: data = urllib.urlencode({ 'gotoUrl': self.INDEX, 'username': self.username, 'password': self.password }) br.open('https://login.thetimes.co.uk/', data) return br remove_tags = [ {'name': ['object', 'link', 'iframe', 'base', 'meta', 'script']}, {'attrs': {'class': ['tools comments-parent','u-hide','RelatedLinks','Tooltip','Toolbar Toolbar--bottom', 'Comments Article-container','ArticlePager','Media-caption']}}, {'attrs': {'class': lambda x: x and 'Toolbar' in x}} ] remove_attributes = ['lang'] keep_only_tags = [ dict(attrs={'class': 'Article Article--default'} ), dict(attrs={'class': 'f-author'}), dict(attrs={'id': 'bodycopy'}) ] remove_tags_after = dict(attrs={'class': 'Article-content'}) feeds = [ (u'All News', u'http://www.thetimes.co.uk/') ] def preprocess_raw_html(self, raw, url): return html.tostring(html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False), method='html', encoding=unicode) def preprocess_html(self, soup): for item in soup.findAll(style=True): del item['style'] return self.adeify_images(soup) def parse_index(self): soup = self.index_to_soup(self.INDEX) totalfeeds = [] current_section = [] div = [] for div in soup.findAll('section', attrs={'data-text': True}): current_articles = [] self.log('in section: ', div['data-text']) current_section = div['data-text'] if current_section not in self.excludeSections: for article in div.findAll('div', attrs={'class': 'Item-content'}): h3 = article.find('h3') if h3 is not None: title = self.tag_to_string(h3) aurl = h3.find('a') if aurl is not None: url = aurl['href'] if url.startswith('/'): url = 'http://www.thetimes.co.uk' + url desc = title self.log('section: ', current_section, 'title: ', title, 'url: ', url, 'desc: ', desc, '\n') current_articles.append({'title': title, 'url': url, 'description': desc}) if current_articles: totalfeeds.append((current_section, current_articles)) return totalfeeds