Update Le Monde

This commit is contained in:
Aimylios 2021-07-11 12:31:38 +02:00
parent fea8c7988b
commit 5385ee0808
5 changed files with 201 additions and 94 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 293 B

After

Width:  |  Height:  |  Size: 400 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 210 B

After

Width:  |  Height:  |  Size: 717 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 404 B

View File

@ -1,9 +1,15 @@
#!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2012' __copyright__ = '2012'
''' '''
lemonde.fr lemonde.fr
''' '''
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.web.feeds.news import BasicNewsRecipe
import re
def classes(classes): def classes(classes):
@ -15,64 +21,112 @@ def classes(classes):
class LeMonde(BasicNewsRecipe): class LeMonde(BasicNewsRecipe):
title = 'Le Monde' title = 'Le Monde'
__author__ = 'veezh' __author__ = 'veezh'
description = u'Actualités' description = 'Les flux RSS du Monde.fr'
oldest_article = 1 publisher = 'Société Editrice du Monde'
max_articles_per_feed = 100 publication_type = 'newspaper'
no_stylesheets = True needs_subscription = 'optional'
use_embedded_content = False
encoding = 'utf-8'
publisher = 'lemonde.fr'
category = 'news, France, world'
language = 'fr' language = 'fr'
extra_css = '''
img{max-width:100%}
h1{font-size:1.2em !important; line-height:1.2em !important; }
h2{font-size:1em !important; line-height:1em !important; }
h3{font-size:1em !important; text-transform:uppercase !important; color:#666;}
#photo{text-align:center !important; margin:10px 0 -8px;}
#lgd{font-size:1em !important; line-height:1em !important; font-style:italic; color:#333;} '''
keep_only_tags = [ oldest_article = 2
dict(itemprop=['Headline', 'description']), max_articles_per_feed = 15
classes('bloc_signature'), no_stylesheets = True
dict(itemprop=['articleBody']),
]
remove_empty_feeds = True remove_empty_feeds = True
ignore_duplicate_articles = {'title', 'url'}
def preprocess_html(self, soup): conversion_options = {
for lgd in soup.findAll(id="lgd"): 'publisher': publisher
lgd.contents[-1].extract() }
for img in soup.findAll('img', attrs={'data-src': True}):
img['src'] = img['data-src']
return soup
def get_article_url(self, article): masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png'
url = article.get('guid', None)
if '/chat/' in url or '.blog' in url or '/video/' in url or '/sport/' in url or '/portfolio/' in url or '/visuel/' in url:
url = None
return url
feeds = [ feeds = [
('A la une', 'http://www.lemonde.fr/rss/une.xml'), ('A la une', 'https://www.lemonde.fr/rss/une.xml'),
('International', 'http://www.lemonde.fr/rss/tag/international.xml'), ('International', 'https://www.lemonde.fr/international/rss_full.xml'),
('Europe', 'http://www.lemonde.fr/rss/tag/europe.xml'), ('Politique', 'https://www.lemonde.fr/politique/rss_full.xml'),
(u'Société', 'http://www.lemonde.fr/rss/tag/societe.xml'), ('Société', 'https://www.lemonde.fr/societe/rss_full.xml'),
('Economie', 'http://www.lemonde.fr/rss/tag/economie.xml'), ('Economie', 'https://www.lemonde.fr/economie/rss_full.xml'),
(u'Médias', 'http://www.lemonde.fr/rss/tag/actualite-medias.xml'), ('Planète', 'https://www.lemonde.fr/planete/rss_full.xml'),
(u'Planète', 'http://www.lemonde.fr/rss/tag/planete.xml'), ('Sciences', 'https://www.lemonde.fr/sciences/rss_full.xml'),
('Culture', 'http://www.lemonde.fr/rss/tag/culture.xml'), ('Pixels', 'https://www.lemonde.fr/pixels/rss_full.xml'),
('Technologies', 'http://www.lemonde.fr/rss/tag/technologies.xml'), ('Culture', 'https://www.lemonde.fr/culture/rss_full.xml'),
('Livres', 'http://www.lemonde.fr/rss/tag/livres.xml'), ('Idées', 'https://www.lemonde.fr/idees/rss_full.xml')
] ]
def get_cover_url(self): keep_only_tags = [
cover_url = None classes('article__header'),
soup = self.index_to_soup( dict(name='section', attrs={'class': ['article__content', 'article__heading',
'http://www.lemonde.fr/web/monde_pdf/0,33-0,1-0,0.html') 'article__wrapper']})
link_item = soup.find('div', attrs={'class': 'pg-gch'}) ]
if link_item and link_item.img: remove_tags = [
cover_url = link_item.img['src'] classes('article__status meta__date meta__reading-time meta__social multimedia-embed'),
dict(name=['footer', 'link']),
dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher',
'portfolio', 'services-inread']})
]
return cover_url remove_attributes = [
'data-sizes', 'height', 'sizes', 'width'
]
preprocess_regexps = [
# insert space between author name and description
(re.compile(r'(<span class="[^"]*author__desc[^>]*>)([^<]*</span>)',
re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)),
# insert " | " between article type and description
(re.compile(r'(<span class="[^"]*article__kicker[^>]*>[^<]*)(</span>)',
re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2))
]
extra_css = '''
h2 { font-size: 1em; }
h3 { font-size: 1em; }
.article__desc { font-weight: bold; }
.article__fact { font-weight: bold; text-transform: uppercase; }
.article__kicker { text-transform: uppercase; }
.article__legend { font-size: 0.6em; margin-bottom: 1em; }
.article__title { margin-top: 0em; }
'''
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
if self.username is not None and self.password is not None:
br.open('https://secure.lemonde.fr/sfuser/connexion')
br.select_form(name='connection')
br['connection[mail]'] = self.username
br['connection[password]'] = self.password
br.submit()
return br
def get_article_url(self, article):
url = BasicNewsRecipe.get_article_url(self, article)
# skip articles without relevant content (e.g., videos)
for el in 'blog chat live podcasts portfolio video visuel'.split():
if '/' + el + '/' in url:
self.log(url)
self.abort_article()
return url
def preprocess_html(self, soup):
# when an image is available in multiple sizes, select the smallest one
for img in soup.find_all('img', {'data-srcset': True}):
data_srcset = img['data-srcset'].split()
if len(data_srcset) > 1:
img['src'] = data_srcset[-2]
del img['data-srcset']
return soup
def postprocess_html(self, soup, first_fetch):
# remove local hyperlinks
for a in soup.find_all('a', {'href': True}):
if '.lemonde.fr/' in a['href']:
a.replace_with(self.tag_to_string(a))
# clean up header
for ul in soup.find_all('ul', {'class': 'breadcrumb'}):
div = soup.new_tag('div')
category = ''
for li in ul.find_all('li', {'class': True}):
category += self.tag_to_string(li).strip().upper() + ' - '
div.string = category[:-3]
ul.replace_with(div)
return soup

View File

@ -1,8 +1,15 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
__author__ = 'S. Durand <sylvaindurand@users.noreply.github.com>' __author__ = 'S. Durand <sylvaindurand@users.noreply.github.com>'
__license__ = 'GPL v3' __license__ = 'GPL v3'
'''
lemonde.fr
'''
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
def classes(classes): def classes(classes):
@ -11,29 +18,23 @@ def classes(classes):
'class': lambda x: x and frozenset(x.split()).intersection(q)}) 'class': lambda x: x and frozenset(x.split()).intersection(q)})
class LeMonde(BasicNewsRecipe): class LeMondeNumerique(BasicNewsRecipe):
title = 'Le Monde: Édition abonnés'
title = u'Le Monde: Édition abonnés'
__author__ = 'Sylvain Durand' __author__ = 'Sylvain Durand'
description = u'La version papier du quotidien Le Monde, disponible du lundi au samedi à partir de 14 heures environ, avec tous ses cahiers.' description = 'La version numérique du quotidien Le Monde'
language = 'fr' publisher = 'Société Editrice du Monde'
encoding = 'utf8' publication_type = 'newspaper'
needs_subscription = True needs_subscription = True
language = 'fr'
extra_css = ''' no_stylesheets = True
img{max-width:100%} ignore_duplicate_articles = {'title', 'url'}
h1{font-size:1.2em !important; line-height:1.2em !important; }
h2{font-size:1em !important; line-height:1em !important; }
h3{font-size:1em !important; text-transform:uppercase !important; color:#666;}
#photo{text-align:center !important; margin:10px 0 -8px;}
#lgd{font-size:1em !important; line-height:1em !important; font-style:italic; color:#333;} '''
keep_only_tags = [ conversion_options = {
dict(itemprop=['Headline', 'description']), 'publisher': publisher
classes('bloc_signature'), }
dict(itemprop=['articleBody']),
] masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png'
lm_sections = [ lm_sections = [
'international:International', 'international:International',
@ -49,6 +50,42 @@ class LeMonde(BasicNewsRecipe):
'campus:Campus' 'campus:Campus'
] ]
keep_only_tags = [
classes('article__header'),
dict(name='section', attrs={'class': ['article__content', 'article__heading',
'article__wrapper']})
]
remove_tags = [
classes('article__status meta__date meta__reading-time meta__social multimedia-embed'),
dict(name=['footer', 'link']),
dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher',
'portfolio', 'services-inread']})
]
remove_attributes = [
'data-sizes', 'height', 'sizes', 'width'
]
preprocess_regexps = [
# insert space between author name and description
(re.compile(r'(<span class="[^"]*author__desc[^>]*>)([^<]*</span>)',
re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)),
# insert " | " between article type and description
(re.compile(r'(<span class="[^"]*article__kicker[^>]*>[^<]*)(</span>)',
re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2))
]
extra_css = '''
h2 { font-size: 1em; }
h3 { font-size: 1em; }
.article__desc { font-weight: bold; }
.article__fact { font-weight: bold; text-transform: uppercase; }
.article__kicker { text-transform: uppercase; }
.article__legend { font-size: 0.6em; margin-bottom: 1em; }
.article__title { margin-top: 0em; }
'''
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser(self) br = BasicNewsRecipe.get_browser(self)
br.open('https://secure.lemonde.fr/sfuser/connexion') br.open('https://secure.lemonde.fr/sfuser/connexion')
@ -58,45 +95,61 @@ class LeMonde(BasicNewsRecipe):
br.submit() br.submit()
return br return br
def preprocess_html(self, soup):
for lgd in soup.findAll(id="lgd"):
lgd.contents[-1].extract()
for img in soup.findAll('img', attrs={'data-src': True}):
img['src'] = img['data-src']
return soup
def parse_index(self): def parse_index(self):
ans = [] ans = []
for x in self.lm_sections: for x in self.lm_sections:
s, section_title = x.partition(':')[::2] s, section_title = x.partition(':')[::2]
self.log('Processing section', section_title, '...') self.log('Processing section', section_title, '...')
articles = list(self.parse_section('http://www.lemonde.fr/%s/' % s)) articles = list(self.parse_section('https://www.lemonde.fr/%s/' % s))
if articles: if articles:
ans.append((section_title, articles)) ans.append((section_title, articles))
return ans return ans
def parse_section(self, url): def parse_section(self, url):
soup = self.index_to_soup(url) soup = self.index_to_soup(url)
container = soup.find(attrs={'class':lambda x: x and 'grid_12 alpha' in x}) for article in soup.find_all('section', {'class': 'teaser'}):
for article in container.findAll('article'): # extract URL
h2 = article.find('h2') a = article.find('a', {'class': 'teaser__link'})
if h2 is None:
h2 = article.find('h3')
if h2 is None:
continue
a = h2.find('a', href=True)
if a is None: if a is None:
a = h2.findParents('a', href=True)
if not a:
continue continue
a = a[0]
url = a['href'] url = a['href']
if url.startswith('/'): # skip articles without relevant content (e.g., videos)
url = 'http://www.lemonde.fr' + url for el in 'blog chat live podcasts portfolio video visuel'.split():
title = self.tag_to_string(a) if '/' + el + '/' in url:
continue
# extract title
h3 = article.find('h3', {'class': 'teaser__title'})
if h3 is None:
continue
title = self.tag_to_string(h3)
# extract description
desc = '' desc = ''
p = article.find('p') p = article.find('p', {'class': 'teaser__desc'})
if p is not None: if p is not None:
desc = self.tag_to_string(p) desc = self.tag_to_string(p)
self.log('\tFound article', title, 'at', url) self.log('\tFound article', title, 'at', url)
yield {'title': title, 'url': url, 'description': desc} yield {'title': title, 'url': url, 'description': desc}
def preprocess_html(self, soup):
# when an image is available in multiple sizes, select the smallest one
for img in soup.find_all('img', {'data-srcset': True}):
data_srcset = img['data-srcset'].split()
if len(data_srcset) > 1:
img['src'] = data_srcset[-2]
del img['data-srcset']
return soup
def postprocess_html(self, soup, first_fetch):
# remove local hyperlinks
for a in soup.find_all('a', {'href': True}):
if '.lemonde.fr/' in a['href']:
a.replace_with(self.tag_to_string(a))
# clean up header
for ul in soup.find_all('ul', {'class': 'breadcrumb'}):
div = soup.new_tag('div')
category = ''
for li in ul.find_all('li', {'class': True}):
category += self.tag_to_string(li).strip().upper() + ' - '
div.string = category[:-3]
ul.replace_with(div)
return soup