diff --git a/recipes/berlin_policy_journal.recipe b/recipes/berlin_policy_journal.recipe deleted file mode 100644 index 91e502f973..0000000000 --- a/recipes/berlin_policy_journal.recipe +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python -# vim:fileencoding=utf-8 -# License: GPLv3 Copyright: 2016, Aimylios - -from __future__ import unicode_literals, division, absolute_import, print_function - -''' -berlinpolicyjournal.com -''' - -import re -import time -from calibre.web.feeds.news import BasicNewsRecipe - - -class BerlinPolicyJournal(BasicNewsRecipe): - title = 'Berlin Policy Journal' - __author__ = 'Aimylios' - description = 'Articles from berlinpolicyjournal.com' - publisher = 'Deutsche Gesellschaft für Auswärtige Politik e.V.' - publication_type = 'magazine' - language = 'en_DE' - - oldest_article = 50 - max_articles_per_feed = 30 - simultaneous_downloads = 5 - no_stylesheets = True - remove_javascript = True - - conversion_options = {'smarten_punctuation': True, - 'publisher': publisher} - - INDEX = 'http://berlinpolicyjournal.com/' - masthead_url = INDEX + 'IP/wp-content/uploads/2015/04/logo_bpj_header.gif' - - keep_only_tags = [ - dict(name='article') - ] - - remove_tags = [ - dict(name='div', attrs={ - 'class': ['hidden', 'meta-count', 'meta-share']}), - dict(name='span', attrs={'class': 'ava-auth'}), - dict(name='img', attrs={'alt': re.compile('_store_120px_width$')}), - dict(name='img', attrs={'alt': re.compile('^bpj_app_')}), - dict(name='img', attrs={'alt': re.compile('^BPJ-Montage_')}), - dict(name=['link', 'footer', 'br']) - ] - - remove_attributes = ['sizes', 'width', 'height', 'align'] - - extra_css = 'h1 {font-size: 1.6em; text-align: left} \ - .entry-subtitle {font-style: italic; margin-bottom: 1em} \ - .wp-caption {margin-top: 1em} \ - .wp-caption-text {font-size: 0.6em; margin-top: 0em}' - - def parse_index(self): - soup = self.index_to_soup(self.INDEX) - img_div = soup.find('div', {'id': 'text-2'}) - self.cover_url = img_div.find('img', src=True)['src'] - menu = soup.find('ul', {'id': re.compile('menu-ip')}) - submenus = menu.findAll( - 'li', {'class': re.compile('item-has-children')}) - mag = submenus[0].find('li') - mag_name = self.tag_to_string(mag.a) - mag_url = mag.a['href'] - categories = [{'name': mag_name, 'url': mag_url, 'type': 'magazine'}] - for blog in submenus[1].findAll('li'): - blog_name = self.tag_to_string(blog.a) - blog_url = blog.a['href'] - categories.append( - {'name': blog_name, 'url': blog_url, 'type': 'blog'}) - feeds = [] - for cat in categories: - cat['articles'] = [] - for i in ['1', '2']: - soup = self.index_to_soup(cat['url'] + '/page/' + i) - for div in soup.findAll('div', {'class': 'post-box-big'}): - timestamp = time.strptime(div.find('time')['datetime'][ - :15], '%Y-%m-%dT%H:%M') - age = (time.time() - time.mktime(timestamp)) / (24 * 3600) - if age > self.oldest_article and cat['type'] == 'blog': - continue - article_title = self.tag_to_string( - div.find('h3', {'class': 'entry-title'}).a) - article_url = div.find( - 'h3', {'class': 'entry-title'}).a['href'] - article_date = type(u'')(time.strftime( - ' [%a, %d %b %H:%M]', timestamp)) - article_desc = self.tag_to_string( - div.find('div', {'class': 'i-summary'}).p) - cat['articles'].append({'title': article_title, - 'url': article_url, - 'date': article_date, - 'description': article_desc}) - if soup.find('div', {'class': 'pagination'}) is None: - break - if cat['articles']: - feeds.append((cat['name'], cat['articles'])) - return feeds - - def postprocess_html(self, soup, first_fetch): - # clean up formatting of author(s) and date - div = soup.find('div', {'class': 'meta-info'}) - authors = '' - for entry in div.findAll('span', {'class': 'entry-author'}): - authors = authors + entry.a.span.renderContents().decode('utf-8').strip() + ', ' - date = div.find('time').renderContents().decode('utf-8').strip() - div.replaceWith('
' + date + ' | ' + authors[:-2] + '
') - return soup diff --git a/recipes/icons/le_monde.png b/recipes/icons/le_monde.png index 5166fcf3d2..a23b99db92 100644 Binary files a/recipes/icons/le_monde.png and b/recipes/icons/le_monde.png differ diff --git a/recipes/icons/le_monde_sub.png b/recipes/icons/le_monde_sub.png index e654a68651..097386a2ac 100644 Binary files a/recipes/icons/le_monde_sub.png and b/recipes/icons/le_monde_sub.png differ diff --git a/recipes/icons/le_monde_sub_paper.png b/recipes/icons/le_monde_sub_paper.png new file mode 100644 index 0000000000..b7d95717c6 Binary files /dev/null and b/recipes/icons/le_monde_sub_paper.png differ diff --git a/recipes/le_monde.recipe b/recipes/le_monde.recipe index a443c254c8..bbcd6d87c3 100644 --- a/recipes/le_monde.recipe +++ b/recipes/le_monde.recipe @@ -1,9 +1,16 @@ +#!/usr/bin/env python +# vim:fileencoding=utf-8 +from __future__ import absolute_import, division, print_function, unicode_literals + __license__ = 'GPL v3' __copyright__ = '2012' + ''' lemonde.fr ''' -from calibre.web.feeds.recipes import BasicNewsRecipe + +from calibre.web.feeds.news import BasicNewsRecipe +import re def classes(classes): @@ -15,64 +22,112 @@ def classes(classes): class LeMonde(BasicNewsRecipe): title = 'Le Monde' __author__ = 'veezh' - description = u'Actualités' - oldest_article = 1 - max_articles_per_feed = 100 - no_stylesheets = True - use_embedded_content = False - encoding = 'utf-8' - publisher = 'lemonde.fr' - category = 'news, France, world' + description = 'Les flux RSS du Monde.fr' + publisher = 'Société Editrice du Monde' + publication_type = 'newspaper' + needs_subscription = 'optional' language = 'fr' - extra_css = ''' - img{max-width:100%} - h1{font-size:1.2em !important; line-height:1.2em !important; } - h2{font-size:1em !important; line-height:1em !important; } - h3{font-size:1em !important; text-transform:uppercase !important; color:#666;} - #photo{text-align:center !important; margin:10px 0 -8px;} - #lgd{font-size:1em !important; line-height:1em !important; font-style:italic; color:#333;} ''' - keep_only_tags = [ - dict(itemprop=['Headline', 'description']), - classes('bloc_signature'), - dict(itemprop=['articleBody']), - ] + oldest_article = 2 + max_articles_per_feed = 15 + no_stylesheets = True remove_empty_feeds = True + ignore_duplicate_articles = {'title', 'url'} - def preprocess_html(self, soup): - for lgd in soup.findAll(id="lgd"): - lgd.contents[-1].extract() - for img in soup.findAll('img', attrs={'data-src': True}): - img['src'] = img['data-src'] - return soup + conversion_options = { + 'publisher': publisher + } - def get_article_url(self, article): - url = article.get('guid', None) - if '/chat/' in url or '.blog' in url or '/video/' in url or '/sport/' in url or '/portfolio/' in url or '/visuel/' in url: - url = None - return url + masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png' feeds = [ - ('A la une', 'http://www.lemonde.fr/rss/une.xml'), - ('International', 'http://www.lemonde.fr/rss/tag/international.xml'), - ('Europe', 'http://www.lemonde.fr/rss/tag/europe.xml'), - (u'Société', 'http://www.lemonde.fr/rss/tag/societe.xml'), - ('Economie', 'http://www.lemonde.fr/rss/tag/economie.xml'), - (u'Médias', 'http://www.lemonde.fr/rss/tag/actualite-medias.xml'), - (u'Planète', 'http://www.lemonde.fr/rss/tag/planete.xml'), - ('Culture', 'http://www.lemonde.fr/rss/tag/culture.xml'), - ('Technologies', 'http://www.lemonde.fr/rss/tag/technologies.xml'), - ('Livres', 'http://www.lemonde.fr/rss/tag/livres.xml'), - + ('A la une', 'https://www.lemonde.fr/rss/une.xml'), + ('International', 'https://www.lemonde.fr/international/rss_full.xml'), + ('Politique', 'https://www.lemonde.fr/politique/rss_full.xml'), + ('Société', 'https://www.lemonde.fr/societe/rss_full.xml'), + ('Economie', 'https://www.lemonde.fr/economie/rss_full.xml'), + ('Planète', 'https://www.lemonde.fr/planete/rss_full.xml'), + ('Sciences', 'https://www.lemonde.fr/sciences/rss_full.xml'), + ('Pixels', 'https://www.lemonde.fr/pixels/rss_full.xml'), + ('Culture', 'https://www.lemonde.fr/culture/rss_full.xml'), + ('Idées', 'https://www.lemonde.fr/idees/rss_full.xml') ] - def get_cover_url(self): - cover_url = None - soup = self.index_to_soup( - 'http://www.lemonde.fr/web/monde_pdf/0,33-0,1-0,0.html') - link_item = soup.find('div', attrs={'class': 'pg-gch'}) + keep_only_tags = [ + classes('article__header'), + dict(name='section', attrs={'class': ['article__content', 'article__heading', + 'article__wrapper']}) + ] - if link_item and link_item.img: - cover_url = link_item.img['src'] + remove_tags = [ + classes('article__status meta__date meta__reading-time meta__social multimedia-embed'), + dict(name=['footer', 'link']), + dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher', + 'portfolio', 'services-inread']}) + ] - return cover_url + remove_attributes = [ + 'data-sizes', 'height', 'sizes', 'width' + ] + + preprocess_regexps = [ + # insert space between author name and description + (re.compile(r'(]*>)([^<]*)', + re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)), + # insert " | " between article type and description + (re.compile(r'(]*>[^<]*)()', + re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2)) + ] + + extra_css = ''' + h2 { font-size: 1em; } + h3 { font-size: 1em; } + .article__desc { font-weight: bold; } + .article__fact { font-weight: bold; text-transform: uppercase; } + .article__kicker { text-transform: uppercase; } + .article__legend { font-size: 0.6em; margin-bottom: 1em; } + .article__title { margin-top: 0em; } + ''' + + def get_browser(self): + br = BasicNewsRecipe.get_browser(self) + if self.username is not None and self.password is not None: + br.open('https://secure.lemonde.fr/sfuser/connexion') + br.select_form(name='connection') + br['connection[mail]'] = self.username + br['connection[password]'] = self.password + br.submit() + return br + + def get_article_url(self, article): + url = BasicNewsRecipe.get_article_url(self, article) + # skip articles without relevant content (e.g., videos) + for el in 'blog chat live podcasts portfolio video visuel'.split(): + if '/' + el + '/' in url: + self.log(url) + self.abort_article() + return url + + def preprocess_html(self, soup): + # when an image is available in multiple sizes, select the smallest one + for img in soup.find_all('img', {'data-srcset': True}): + data_srcset = img['data-srcset'].split() + if len(data_srcset) > 1: + img['src'] = data_srcset[-2] + del img['data-srcset'] + return soup + + def postprocess_html(self, soup, first_fetch): + # remove local hyperlinks + for a in soup.find_all('a', {'href': True}): + if '.lemonde.fr/' in a['href']: + a.replace_with(self.tag_to_string(a)) + # clean up header + for ul in soup.find_all('ul', {'class': 'breadcrumb'}): + div = soup.new_tag('div') + category = '' + for li in ul.find_all('li', {'class': True}): + category += self.tag_to_string(li).strip().upper() + ' - ' + div.string = category[:-3] + ul.replace_with(div) + return soup diff --git a/recipes/le_monde_sub.recipe b/recipes/le_monde_sub.recipe index db0271d14d..faf2db09b5 100644 --- a/recipes/le_monde_sub.recipe +++ b/recipes/le_monde_sub.recipe @@ -1,8 +1,16 @@ +#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai +from __future__ import absolute_import, division, print_function, unicode_literals + __author__ = 'S. Durand ' __license__ = 'GPL v3' +''' +lemonde.fr +''' + from calibre.web.feeds.news import BasicNewsRecipe +import re def classes(classes): @@ -11,29 +19,23 @@ def classes(classes): 'class': lambda x: x and frozenset(x.split()).intersection(q)}) -class LeMonde(BasicNewsRecipe): - - title = u'Le Monde: Édition abonnés' +class LeMondeNumerique(BasicNewsRecipe): + title = 'Le Monde: Édition abonnés' __author__ = 'Sylvain Durand' - description = u'La version papier du quotidien Le Monde, disponible du lundi au samedi à partir de 14 heures environ, avec tous ses cahiers.' - language = 'fr' - encoding = 'utf8' - + description = 'La version numérique du quotidien Le Monde' + publisher = 'Société Editrice du Monde' + publication_type = 'newspaper' needs_subscription = True + language = 'fr' - extra_css = ''' - img{max-width:100%} - h1{font-size:1.2em !important; line-height:1.2em !important; } - h2{font-size:1em !important; line-height:1em !important; } - h3{font-size:1em !important; text-transform:uppercase !important; color:#666;} - #photo{text-align:center !important; margin:10px 0 -8px;} - #lgd{font-size:1em !important; line-height:1em !important; font-style:italic; color:#333;} ''' + no_stylesheets = True + ignore_duplicate_articles = {'title', 'url'} - keep_only_tags = [ - dict(itemprop=['Headline', 'description']), - classes('bloc_signature'), - dict(itemprop=['articleBody']), - ] + conversion_options = { + 'publisher': publisher + } + + masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png' lm_sections = [ 'international:International', @@ -49,6 +51,42 @@ class LeMonde(BasicNewsRecipe): 'campus:Campus' ] + keep_only_tags = [ + classes('article__header'), + dict(name='section', attrs={'class': ['article__content', 'article__heading', + 'article__wrapper']}) + ] + + remove_tags = [ + classes('article__status meta__date meta__reading-time meta__social multimedia-embed'), + dict(name=['footer', 'link']), + dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher', + 'portfolio', 'services-inread']}) + ] + + remove_attributes = [ + 'data-sizes', 'height', 'sizes', 'width' + ] + + preprocess_regexps = [ + # insert space between author name and description + (re.compile(r'(]*>)([^<]*)', + re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)), + # insert " | " between article type and description + (re.compile(r'(]*>[^<]*)()', + re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2)) + ] + + extra_css = ''' + h2 { font-size: 1em; } + h3 { font-size: 1em; } + .article__desc { font-weight: bold; } + .article__fact { font-weight: bold; text-transform: uppercase; } + .article__kicker { text-transform: uppercase; } + .article__legend { font-size: 0.6em; margin-bottom: 1em; } + .article__title { margin-top: 0em; } + ''' + def get_browser(self): br = BasicNewsRecipe.get_browser(self) br.open('https://secure.lemonde.fr/sfuser/connexion') @@ -58,45 +96,61 @@ class LeMonde(BasicNewsRecipe): br.submit() return br - def preprocess_html(self, soup): - for lgd in soup.findAll(id="lgd"): - lgd.contents[-1].extract() - for img in soup.findAll('img', attrs={'data-src': True}): - img['src'] = img['data-src'] - return soup - def parse_index(self): ans = [] for x in self.lm_sections: s, section_title = x.partition(':')[::2] self.log('Processing section', section_title, '...') - articles = list(self.parse_section('http://www.lemonde.fr/%s/' % s)) + articles = list(self.parse_section('https://www.lemonde.fr/%s/' % s)) if articles: ans.append((section_title, articles)) return ans def parse_section(self, url): soup = self.index_to_soup(url) - container = soup.find(attrs={'class':lambda x: x and 'grid_12 alpha' in x}) - for article in container.findAll('article'): - h2 = article.find('h2') - if h2 is None: - h2 = article.find('h3') - if h2 is None: - continue - a = h2.find('a', href=True) + for article in soup.find_all('section', {'class': 'teaser'}): + # extract URL + a = article.find('a', {'class': 'teaser__link'}) if a is None: - a = h2.findParents('a', href=True) - if not a: - continue - a = a[0] + continue url = a['href'] - if url.startswith('/'): - url = 'http://www.lemonde.fr' + url - title = self.tag_to_string(a) + # skip articles without relevant content (e.g., videos) + for el in 'blog chat live podcasts portfolio video visuel'.split(): + if '/' + el + '/' in url: + continue + # extract title + h3 = article.find('h3', {'class': 'teaser__title'}) + if h3 is None: + continue + title = self.tag_to_string(h3) + # extract description desc = '' - p = article.find('p') + p = article.find('p', {'class': 'teaser__desc'}) if p is not None: desc = self.tag_to_string(p) self.log('\tFound article', title, 'at', url) yield {'title': title, 'url': url, 'description': desc} + + def preprocess_html(self, soup): + # when an image is available in multiple sizes, select the smallest one + for img in soup.find_all('img', {'data-srcset': True}): + data_srcset = img['data-srcset'].split() + if len(data_srcset) > 1: + img['src'] = data_srcset[-2] + del img['data-srcset'] + return soup + + def postprocess_html(self, soup, first_fetch): + # remove local hyperlinks + for a in soup.find_all('a', {'href': True}): + if '.lemonde.fr/' in a['href']: + a.replace_with(self.tag_to_string(a)) + # clean up header + for ul in soup.find_all('ul', {'class': 'breadcrumb'}): + div = soup.new_tag('div') + category = '' + for li in ul.find_all('li', {'class': True}): + category += self.tag_to_string(li).strip().upper() + ' - ' + div.string = category[:-3] + ul.replace_with(div) + return soup