#!/usr/bin/env python2 # vim:fileencoding=utf-8 # License: GPLv3 Copyright: 2016, Kovid Goyal from __future__ import (unicode_literals, division, absolute_import, print_function) import html5lib from lxml import html from calibre.web.feeds.news import BasicNewsRecipe def CSSSelect(expr): expr = { 'div.whatsNews-simple': '''descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' whatsNews-simple ')]''', 'a.mjLinkItem[href]': '''descendant-or-self::a[@class and contains(concat(' ', normalize-space(@class), ' '), ' mjLinkItem ') and (@href)]''', '.meta_sectionName': '''descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' meta_sectionName ')]''', 'p': 'descendant-or-self::p', 'div.whatsNews-simple.whatsNews-itp': '''descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' whatsNews-simple ') and (@class and contains(concat(' ', normalize-space(@class), ' '), ' whatsNews-itp '))]''', # noqa 'a[href]': 'descendant-or-self::a[@href]', 'span.date-date': "descendant-or-self::span[@class and contains(concat(' ', normalize-space(@class), ' '), ' date-date ')]", 'div.itpSectionHeaderPdf a[href]': "descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' itpSectionHeaderPdf ')]/descendant-or-self::*/a[@href]", # noqa 'div.itpHeader ul.tab a[href]': "descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' itpHeader ')]/descendant-or-self::*/ul[@class and contains(concat(' ', normalize-space(@class), ' '), ' tab ')]/descendant-or-self::*/a[@href]", # noqa }[expr] from lxml.etree import XPath return XPath(expr) def classes(classes): q = frozenset(classes.split(' ')) return dict(attrs={ 'class': lambda x: x and frozenset(x.split()).intersection(q)}) USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0' class WSJ(BasicNewsRecipe): title = 'The Wall Street Journal (free)' __author__ = 'Kovid Goyal' description = 'News and current affairs' language = 'en' compress_news_images = True compress_news_images_auto_size = 7 timefmt = ' [%a, %b %d, %Y]' no_stylesheets = True ignore_duplicate_articles = {'url'} remove_attributes = ['style', 'data-scrim'] WSJ_ITP = 'http://online.wsj.com/itp/today' keep_only_tags = [ dict(classes('wsj-article-headline-wrap article_header')), dict(name='span', itemprop='author', rel='author'), dict(name='article', id='article-contents articleBody'.split()), dict(name='div', id='article_story_body ncTitleArea snipper-ad-login'.split()), dict(classes('nc-exp-artbody errorNotFound')), dict(attrs={'data-module-zone': 'article_snippet'}), ] remove_tags = [ classes( 'insetButton insettipBox author-info media-object-video article_tools nc-exp-artmeta category'), dict(name='span', attrs={ 'data-country-code': True, 'data-ticker-code': True}), dict(name='meta link'.split()), ] def preprocess_raw_html(self, raw_html, url): root = html5lib.parse(raw_html, treebuilder='lxml', namespaceHTMLElements=False) raw_html = html.tostring(root) # open('/t/art.html', 'w').write(raw_html) return raw_html def preprocess_soup(self, soup): # Slideshow and expandable images need to be processed here to # set the src attribute correctly found = 0 for img in soup.findAll('img', attrs={'data-in-base-data-lazy': True}): img['src'] = img['data-in-base-data-lazy'] found += 1 for img in soup.findAll('img', attrs={'data-enlarge': True}): img['src'] = img['data-enlarge'] found += 1 if found: self.log.debug('Found %d dynamic images in:' % found) return soup def get_browser(self): br = BasicNewsRecipe.get_browser(self, user_agent=USER_AGENT) self.wsj_itp_page = br.open(self.WSJ_ITP).read() return br def abs_wsj_url(self, href): if not href.startswith('http'): href = 'http://online.wsj.com' + href return href def wsj_find_articles(self, url, ahed=False): root = self.index_to_soup(url, as_tree=True) for x in CSSSelect('div.whatsNews-simple')(root): x.getparent().remove(x) articles = [] for container in root.xpath('//li[contains(@class, "mjItemMain")]'): meta = container.xpath('descendant::span[@class="meta_sectionName"]') if not meta: continue meta = meta[0] a = meta.xpath('ancestor::a')[0] meta.getparent().remove(meta) meta = self.tag_to_string(meta) title = self.tag_to_string(a) if meta: title += ' [%s]' % meta url = self.abs_wsj_url(a.get('href')) desc = '' if container: for p in container.xpath('descendant::p'): q = self.tag_to_string(p) if 'Subscriber Content' in q: continue desc += q break articles.append({'title': title, 'url': url, 'description': desc, 'date': ''}) self.log('\tFound article:', title) self.log('\t\t', desc) if ahed: for h2 in root.xpath('//li[@class="ahed_listitem"]/h2'): a = h2.xpath('descendant::a')[0] title = self.tag_to_string(a) url = self.abs_wsj_url(a.get('href')) desc = '' p = h2.xpath('following-sibling::p') if p: desc = self.tag_to_string(p[0]) articles.append({'title': title, 'url': url, 'description': desc, 'date': ''}) self.log('Found article:', title) self.log('\t\t', desc) return articles def wsj_find_wn_articles(self, url): root = self.index_to_soup(url, as_tree=True) articles = [] whats_news = CSSSelect('div.whatsNews-simple.whatsNews-itp')(root) if whats_news: for a in CSSSelect('a[href]')(whats_news[-1]): if '/articles/' not in a.get('href', ''): continue container = a.xpath('ancestor::p') for meta in CSSSelect('.meta_sectionName')(a): meta.getparent().remove(meta) title = self.tag_to_string(a).strip() url = self.abs_wsj_url(a.get('href')) desc = '' if container: desc = self.tag_to_string(container[0]) articles.append({'title': title, 'url': url, 'description': desc, 'date': ''}) self.log('\tFound WN article:', title) self.log('\t\t', desc) return articles def wsj_add_feed(self, feeds, title, url): self.log('Found section:', title, '[' + url + ']') try: if url.endswith('whatsnews'): articles = self.wsj_find_wn_articles(url) else: articles = self.wsj_find_articles( url, ahed=title == 'Front Section') except Exception: self.log.exception('Failed to parse section:', title) articles = [] if articles: feeds.append((title, articles)) def parse_index(self): # return self.test_wsj_index() root = self.index_to_soup(self.wsj_itp_page, as_tree=True) for span in CSSSelect('span.date-date')(root): if span.text and span.text.strip(): self.timefmt = ' [%s]' % span.text.strip() break for a in CSSSelect('div.itpSectionHeaderPdf a[href]')(root): self.cover_url = a.get('href') break feeds = [] for a in CSSSelect('div.itpHeader ul.tab a[href]')(root): if '/itp/' not in a.get('href', ''): continue pageone = a.get('href').endswith('pageone') if pageone: title = 'Front Section' url = self.abs_wsj_url(a.get('href')) self.wsj_add_feed(feeds, title, url) title = "What's News" url = url.replace('pageone', 'whatsnews') self.wsj_add_feed(feeds, title, url) else: title = self.tag_to_string(a) url = self.abs_wsj_url(a.get('href')) self.wsj_add_feed(feeds, title, url) return feeds def test_wsj_index(self): return [ ('Testing', [ {'title': 'Article One', 'url': 'http://online.wsj.com/articles/the-end-of-the-impulse-shopper-1416872108'}, # noqa {'title': 'Article Two', 'url': 'http://online.wsj.com/articles/ferguson-police-officer-not-charged-in-black-teens-shooting-1416882438'}, # noqa {'title': 'Article Three', 'url': 'http://online.wsj.com/article/SB10634695869867284248804580297251334393676.html'}, # noqa ]), ]