mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update Le Monde
Remove non working recipe Merge branch 'lemonde-jul2021' of https://github.com/aimylios/calibre
This commit is contained in:
commit
d16c51bedf
@ -1,110 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# vim:fileencoding=utf-8
|
|
||||||
# License: GPLv3 Copyright: 2016, Aimylios <aimylios at gmx.de>
|
|
||||||
|
|
||||||
from __future__ import unicode_literals, division, absolute_import, print_function
|
|
||||||
|
|
||||||
'''
|
|
||||||
berlinpolicyjournal.com
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
|
||||||
|
|
||||||
|
|
||||||
class BerlinPolicyJournal(BasicNewsRecipe):
|
|
||||||
title = 'Berlin Policy Journal'
|
|
||||||
__author__ = 'Aimylios'
|
|
||||||
description = 'Articles from berlinpolicyjournal.com'
|
|
||||||
publisher = 'Deutsche Gesellschaft für Auswärtige Politik e.V.'
|
|
||||||
publication_type = 'magazine'
|
|
||||||
language = 'en_DE'
|
|
||||||
|
|
||||||
oldest_article = 50
|
|
||||||
max_articles_per_feed = 30
|
|
||||||
simultaneous_downloads = 5
|
|
||||||
no_stylesheets = True
|
|
||||||
remove_javascript = True
|
|
||||||
|
|
||||||
conversion_options = {'smarten_punctuation': True,
|
|
||||||
'publisher': publisher}
|
|
||||||
|
|
||||||
INDEX = 'http://berlinpolicyjournal.com/'
|
|
||||||
masthead_url = INDEX + 'IP/wp-content/uploads/2015/04/logo_bpj_header.gif'
|
|
||||||
|
|
||||||
keep_only_tags = [
|
|
||||||
dict(name='article')
|
|
||||||
]
|
|
||||||
|
|
||||||
remove_tags = [
|
|
||||||
dict(name='div', attrs={
|
|
||||||
'class': ['hidden', 'meta-count', 'meta-share']}),
|
|
||||||
dict(name='span', attrs={'class': 'ava-auth'}),
|
|
||||||
dict(name='img', attrs={'alt': re.compile('_store_120px_width$')}),
|
|
||||||
dict(name='img', attrs={'alt': re.compile('^bpj_app_')}),
|
|
||||||
dict(name='img', attrs={'alt': re.compile('^BPJ-Montage_')}),
|
|
||||||
dict(name=['link', 'footer', 'br'])
|
|
||||||
]
|
|
||||||
|
|
||||||
remove_attributes = ['sizes', 'width', 'height', 'align']
|
|
||||||
|
|
||||||
extra_css = 'h1 {font-size: 1.6em; text-align: left} \
|
|
||||||
.entry-subtitle {font-style: italic; margin-bottom: 1em} \
|
|
||||||
.wp-caption {margin-top: 1em} \
|
|
||||||
.wp-caption-text {font-size: 0.6em; margin-top: 0em}'
|
|
||||||
|
|
||||||
def parse_index(self):
|
|
||||||
soup = self.index_to_soup(self.INDEX)
|
|
||||||
img_div = soup.find('div', {'id': 'text-2'})
|
|
||||||
self.cover_url = img_div.find('img', src=True)['src']
|
|
||||||
menu = soup.find('ul', {'id': re.compile('menu-ip')})
|
|
||||||
submenus = menu.findAll(
|
|
||||||
'li', {'class': re.compile('item-has-children')})
|
|
||||||
mag = submenus[0].find('li')
|
|
||||||
mag_name = self.tag_to_string(mag.a)
|
|
||||||
mag_url = mag.a['href']
|
|
||||||
categories = [{'name': mag_name, 'url': mag_url, 'type': 'magazine'}]
|
|
||||||
for blog in submenus[1].findAll('li'):
|
|
||||||
blog_name = self.tag_to_string(blog.a)
|
|
||||||
blog_url = blog.a['href']
|
|
||||||
categories.append(
|
|
||||||
{'name': blog_name, 'url': blog_url, 'type': 'blog'})
|
|
||||||
feeds = []
|
|
||||||
for cat in categories:
|
|
||||||
cat['articles'] = []
|
|
||||||
for i in ['1', '2']:
|
|
||||||
soup = self.index_to_soup(cat['url'] + '/page/' + i)
|
|
||||||
for div in soup.findAll('div', {'class': 'post-box-big'}):
|
|
||||||
timestamp = time.strptime(div.find('time')['datetime'][
|
|
||||||
:15], '%Y-%m-%dT%H:%M')
|
|
||||||
age = (time.time() - time.mktime(timestamp)) / (24 * 3600)
|
|
||||||
if age > self.oldest_article and cat['type'] == 'blog':
|
|
||||||
continue
|
|
||||||
article_title = self.tag_to_string(
|
|
||||||
div.find('h3', {'class': 'entry-title'}).a)
|
|
||||||
article_url = div.find(
|
|
||||||
'h3', {'class': 'entry-title'}).a['href']
|
|
||||||
article_date = type(u'')(time.strftime(
|
|
||||||
' [%a, %d %b %H:%M]', timestamp))
|
|
||||||
article_desc = self.tag_to_string(
|
|
||||||
div.find('div', {'class': 'i-summary'}).p)
|
|
||||||
cat['articles'].append({'title': article_title,
|
|
||||||
'url': article_url,
|
|
||||||
'date': article_date,
|
|
||||||
'description': article_desc})
|
|
||||||
if soup.find('div', {'class': 'pagination'}) is None:
|
|
||||||
break
|
|
||||||
if cat['articles']:
|
|
||||||
feeds.append((cat['name'], cat['articles']))
|
|
||||||
return feeds
|
|
||||||
|
|
||||||
def postprocess_html(self, soup, first_fetch):
|
|
||||||
# clean up formatting of author(s) and date
|
|
||||||
div = soup.find('div', {'class': 'meta-info'})
|
|
||||||
authors = ''
|
|
||||||
for entry in div.findAll('span', {'class': 'entry-author'}):
|
|
||||||
authors = authors + entry.a.span.renderContents().decode('utf-8').strip() + ', '
|
|
||||||
date = div.find('time').renderContents().decode('utf-8').strip()
|
|
||||||
div.replaceWith('<div>' + date + ' | ' + authors[:-2] + '<br/></div>')
|
|
||||||
return soup
|
|
Binary file not shown.
Before Width: | Height: | Size: 293 B After Width: | Height: | Size: 400 B |
Binary file not shown.
Before Width: | Height: | Size: 210 B After Width: | Height: | Size: 717 B |
BIN
recipes/icons/le_monde_sub_paper.png
Normal file
BIN
recipes/icons/le_monde_sub_paper.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 404 B |
@ -1,9 +1,16 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=utf-8
|
||||||
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2012'
|
__copyright__ = '2012'
|
||||||
|
|
||||||
'''
|
'''
|
||||||
lemonde.fr
|
lemonde.fr
|
||||||
'''
|
'''
|
||||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
def classes(classes):
|
def classes(classes):
|
||||||
@ -15,64 +22,112 @@ def classes(classes):
|
|||||||
class LeMonde(BasicNewsRecipe):
|
class LeMonde(BasicNewsRecipe):
|
||||||
title = 'Le Monde'
|
title = 'Le Monde'
|
||||||
__author__ = 'veezh'
|
__author__ = 'veezh'
|
||||||
description = u'Actualités'
|
description = 'Les flux RSS du Monde.fr'
|
||||||
oldest_article = 1
|
publisher = 'Société Editrice du Monde'
|
||||||
max_articles_per_feed = 100
|
publication_type = 'newspaper'
|
||||||
no_stylesheets = True
|
needs_subscription = 'optional'
|
||||||
use_embedded_content = False
|
|
||||||
encoding = 'utf-8'
|
|
||||||
publisher = 'lemonde.fr'
|
|
||||||
category = 'news, France, world'
|
|
||||||
language = 'fr'
|
language = 'fr'
|
||||||
extra_css = '''
|
|
||||||
img{max-width:100%}
|
|
||||||
h1{font-size:1.2em !important; line-height:1.2em !important; }
|
|
||||||
h2{font-size:1em !important; line-height:1em !important; }
|
|
||||||
h3{font-size:1em !important; text-transform:uppercase !important; color:#666;}
|
|
||||||
#photo{text-align:center !important; margin:10px 0 -8px;}
|
|
||||||
#lgd{font-size:1em !important; line-height:1em !important; font-style:italic; color:#333;} '''
|
|
||||||
|
|
||||||
keep_only_tags = [
|
oldest_article = 2
|
||||||
dict(itemprop=['Headline', 'description']),
|
max_articles_per_feed = 15
|
||||||
classes('bloc_signature'),
|
no_stylesheets = True
|
||||||
dict(itemprop=['articleBody']),
|
|
||||||
]
|
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
conversion_options = {
|
||||||
for lgd in soup.findAll(id="lgd"):
|
'publisher': publisher
|
||||||
lgd.contents[-1].extract()
|
}
|
||||||
for img in soup.findAll('img', attrs={'data-src': True}):
|
|
||||||
img['src'] = img['data-src']
|
|
||||||
return soup
|
|
||||||
|
|
||||||
def get_article_url(self, article):
|
masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png'
|
||||||
url = article.get('guid', None)
|
|
||||||
if '/chat/' in url or '.blog' in url or '/video/' in url or '/sport/' in url or '/portfolio/' in url or '/visuel/' in url:
|
|
||||||
url = None
|
|
||||||
return url
|
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
('A la une', 'http://www.lemonde.fr/rss/une.xml'),
|
('A la une', 'https://www.lemonde.fr/rss/une.xml'),
|
||||||
('International', 'http://www.lemonde.fr/rss/tag/international.xml'),
|
('International', 'https://www.lemonde.fr/international/rss_full.xml'),
|
||||||
('Europe', 'http://www.lemonde.fr/rss/tag/europe.xml'),
|
('Politique', 'https://www.lemonde.fr/politique/rss_full.xml'),
|
||||||
(u'Société', 'http://www.lemonde.fr/rss/tag/societe.xml'),
|
('Société', 'https://www.lemonde.fr/societe/rss_full.xml'),
|
||||||
('Economie', 'http://www.lemonde.fr/rss/tag/economie.xml'),
|
('Economie', 'https://www.lemonde.fr/economie/rss_full.xml'),
|
||||||
(u'Médias', 'http://www.lemonde.fr/rss/tag/actualite-medias.xml'),
|
('Planète', 'https://www.lemonde.fr/planete/rss_full.xml'),
|
||||||
(u'Planète', 'http://www.lemonde.fr/rss/tag/planete.xml'),
|
('Sciences', 'https://www.lemonde.fr/sciences/rss_full.xml'),
|
||||||
('Culture', 'http://www.lemonde.fr/rss/tag/culture.xml'),
|
('Pixels', 'https://www.lemonde.fr/pixels/rss_full.xml'),
|
||||||
('Technologies', 'http://www.lemonde.fr/rss/tag/technologies.xml'),
|
('Culture', 'https://www.lemonde.fr/culture/rss_full.xml'),
|
||||||
('Livres', 'http://www.lemonde.fr/rss/tag/livres.xml'),
|
('Idées', 'https://www.lemonde.fr/idees/rss_full.xml')
|
||||||
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_cover_url(self):
|
keep_only_tags = [
|
||||||
cover_url = None
|
classes('article__header'),
|
||||||
soup = self.index_to_soup(
|
dict(name='section', attrs={'class': ['article__content', 'article__heading',
|
||||||
'http://www.lemonde.fr/web/monde_pdf/0,33-0,1-0,0.html')
|
'article__wrapper']})
|
||||||
link_item = soup.find('div', attrs={'class': 'pg-gch'})
|
]
|
||||||
|
|
||||||
if link_item and link_item.img:
|
remove_tags = [
|
||||||
cover_url = link_item.img['src']
|
classes('article__status meta__date meta__reading-time meta__social multimedia-embed'),
|
||||||
|
dict(name=['footer', 'link']),
|
||||||
|
dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher',
|
||||||
|
'portfolio', 'services-inread']})
|
||||||
|
]
|
||||||
|
|
||||||
return cover_url
|
remove_attributes = [
|
||||||
|
'data-sizes', 'height', 'sizes', 'width'
|
||||||
|
]
|
||||||
|
|
||||||
|
preprocess_regexps = [
|
||||||
|
# insert space between author name and description
|
||||||
|
(re.compile(r'(<span class="[^"]*author__desc[^>]*>)([^<]*</span>)',
|
||||||
|
re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)),
|
||||||
|
# insert " | " between article type and description
|
||||||
|
(re.compile(r'(<span class="[^"]*article__kicker[^>]*>[^<]*)(</span>)',
|
||||||
|
re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2))
|
||||||
|
]
|
||||||
|
|
||||||
|
extra_css = '''
|
||||||
|
h2 { font-size: 1em; }
|
||||||
|
h3 { font-size: 1em; }
|
||||||
|
.article__desc { font-weight: bold; }
|
||||||
|
.article__fact { font-weight: bold; text-transform: uppercase; }
|
||||||
|
.article__kicker { text-transform: uppercase; }
|
||||||
|
.article__legend { font-size: 0.6em; margin-bottom: 1em; }
|
||||||
|
.article__title { margin-top: 0em; }
|
||||||
|
'''
|
||||||
|
|
||||||
|
def get_browser(self):
|
||||||
|
br = BasicNewsRecipe.get_browser(self)
|
||||||
|
if self.username is not None and self.password is not None:
|
||||||
|
br.open('https://secure.lemonde.fr/sfuser/connexion')
|
||||||
|
br.select_form(name='connection')
|
||||||
|
br['connection[mail]'] = self.username
|
||||||
|
br['connection[password]'] = self.password
|
||||||
|
br.submit()
|
||||||
|
return br
|
||||||
|
|
||||||
|
def get_article_url(self, article):
|
||||||
|
url = BasicNewsRecipe.get_article_url(self, article)
|
||||||
|
# skip articles without relevant content (e.g., videos)
|
||||||
|
for el in 'blog chat live podcasts portfolio video visuel'.split():
|
||||||
|
if '/' + el + '/' in url:
|
||||||
|
self.log(url)
|
||||||
|
self.abort_article()
|
||||||
|
return url
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
# when an image is available in multiple sizes, select the smallest one
|
||||||
|
for img in soup.find_all('img', {'data-srcset': True}):
|
||||||
|
data_srcset = img['data-srcset'].split()
|
||||||
|
if len(data_srcset) > 1:
|
||||||
|
img['src'] = data_srcset[-2]
|
||||||
|
del img['data-srcset']
|
||||||
|
return soup
|
||||||
|
|
||||||
|
def postprocess_html(self, soup, first_fetch):
|
||||||
|
# remove local hyperlinks
|
||||||
|
for a in soup.find_all('a', {'href': True}):
|
||||||
|
if '.lemonde.fr/' in a['href']:
|
||||||
|
a.replace_with(self.tag_to_string(a))
|
||||||
|
# clean up header
|
||||||
|
for ul in soup.find_all('ul', {'class': 'breadcrumb'}):
|
||||||
|
div = soup.new_tag('div')
|
||||||
|
category = ''
|
||||||
|
for li in ul.find_all('li', {'class': True}):
|
||||||
|
category += self.tag_to_string(li).strip().upper() + ' - '
|
||||||
|
div.string = category[:-3]
|
||||||
|
ul.replace_with(div)
|
||||||
|
return soup
|
||||||
|
@ -1,8 +1,16 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
|
||||||
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||||
|
|
||||||
__author__ = 'S. Durand <sylvaindurand@users.noreply.github.com>'
|
__author__ = 'S. Durand <sylvaindurand@users.noreply.github.com>'
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
'''
|
||||||
|
lemonde.fr
|
||||||
|
'''
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
def classes(classes):
|
def classes(classes):
|
||||||
@ -11,29 +19,23 @@ def classes(classes):
|
|||||||
'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
||||||
|
|
||||||
|
|
||||||
class LeMonde(BasicNewsRecipe):
|
class LeMondeNumerique(BasicNewsRecipe):
|
||||||
|
title = 'Le Monde: Édition abonnés'
|
||||||
title = u'Le Monde: Édition abonnés'
|
|
||||||
__author__ = 'Sylvain Durand'
|
__author__ = 'Sylvain Durand'
|
||||||
description = u'La version papier du quotidien Le Monde, disponible du lundi au samedi à partir de 14 heures environ, avec tous ses cahiers.'
|
description = 'La version numérique du quotidien Le Monde'
|
||||||
language = 'fr'
|
publisher = 'Société Editrice du Monde'
|
||||||
encoding = 'utf8'
|
publication_type = 'newspaper'
|
||||||
|
|
||||||
needs_subscription = True
|
needs_subscription = True
|
||||||
|
language = 'fr'
|
||||||
|
|
||||||
extra_css = '''
|
no_stylesheets = True
|
||||||
img{max-width:100%}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
h1{font-size:1.2em !important; line-height:1.2em !important; }
|
|
||||||
h2{font-size:1em !important; line-height:1em !important; }
|
|
||||||
h3{font-size:1em !important; text-transform:uppercase !important; color:#666;}
|
|
||||||
#photo{text-align:center !important; margin:10px 0 -8px;}
|
|
||||||
#lgd{font-size:1em !important; line-height:1em !important; font-style:italic; color:#333;} '''
|
|
||||||
|
|
||||||
keep_only_tags = [
|
conversion_options = {
|
||||||
dict(itemprop=['Headline', 'description']),
|
'publisher': publisher
|
||||||
classes('bloc_signature'),
|
}
|
||||||
dict(itemprop=['articleBody']),
|
|
||||||
]
|
masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png'
|
||||||
|
|
||||||
lm_sections = [
|
lm_sections = [
|
||||||
'international:International',
|
'international:International',
|
||||||
@ -49,6 +51,42 @@ class LeMonde(BasicNewsRecipe):
|
|||||||
'campus:Campus'
|
'campus:Campus'
|
||||||
]
|
]
|
||||||
|
|
||||||
|
keep_only_tags = [
|
||||||
|
classes('article__header'),
|
||||||
|
dict(name='section', attrs={'class': ['article__content', 'article__heading',
|
||||||
|
'article__wrapper']})
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
classes('article__status meta__date meta__reading-time meta__social multimedia-embed'),
|
||||||
|
dict(name=['footer', 'link']),
|
||||||
|
dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher',
|
||||||
|
'portfolio', 'services-inread']})
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_attributes = [
|
||||||
|
'data-sizes', 'height', 'sizes', 'width'
|
||||||
|
]
|
||||||
|
|
||||||
|
preprocess_regexps = [
|
||||||
|
# insert space between author name and description
|
||||||
|
(re.compile(r'(<span class="[^"]*author__desc[^>]*>)([^<]*</span>)',
|
||||||
|
re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)),
|
||||||
|
# insert " | " between article type and description
|
||||||
|
(re.compile(r'(<span class="[^"]*article__kicker[^>]*>[^<]*)(</span>)',
|
||||||
|
re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2))
|
||||||
|
]
|
||||||
|
|
||||||
|
extra_css = '''
|
||||||
|
h2 { font-size: 1em; }
|
||||||
|
h3 { font-size: 1em; }
|
||||||
|
.article__desc { font-weight: bold; }
|
||||||
|
.article__fact { font-weight: bold; text-transform: uppercase; }
|
||||||
|
.article__kicker { text-transform: uppercase; }
|
||||||
|
.article__legend { font-size: 0.6em; margin-bottom: 1em; }
|
||||||
|
.article__title { margin-top: 0em; }
|
||||||
|
'''
|
||||||
|
|
||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
br = BasicNewsRecipe.get_browser(self)
|
br = BasicNewsRecipe.get_browser(self)
|
||||||
br.open('https://secure.lemonde.fr/sfuser/connexion')
|
br.open('https://secure.lemonde.fr/sfuser/connexion')
|
||||||
@ -58,45 +96,61 @@ class LeMonde(BasicNewsRecipe):
|
|||||||
br.submit()
|
br.submit()
|
||||||
return br
|
return br
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
|
||||||
for lgd in soup.findAll(id="lgd"):
|
|
||||||
lgd.contents[-1].extract()
|
|
||||||
for img in soup.findAll('img', attrs={'data-src': True}):
|
|
||||||
img['src'] = img['data-src']
|
|
||||||
return soup
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
ans = []
|
ans = []
|
||||||
for x in self.lm_sections:
|
for x in self.lm_sections:
|
||||||
s, section_title = x.partition(':')[::2]
|
s, section_title = x.partition(':')[::2]
|
||||||
self.log('Processing section', section_title, '...')
|
self.log('Processing section', section_title, '...')
|
||||||
articles = list(self.parse_section('http://www.lemonde.fr/%s/' % s))
|
articles = list(self.parse_section('https://www.lemonde.fr/%s/' % s))
|
||||||
if articles:
|
if articles:
|
||||||
ans.append((section_title, articles))
|
ans.append((section_title, articles))
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
def parse_section(self, url):
|
def parse_section(self, url):
|
||||||
soup = self.index_to_soup(url)
|
soup = self.index_to_soup(url)
|
||||||
container = soup.find(attrs={'class':lambda x: x and 'grid_12 alpha' in x})
|
for article in soup.find_all('section', {'class': 'teaser'}):
|
||||||
for article in container.findAll('article'):
|
# extract URL
|
||||||
h2 = article.find('h2')
|
a = article.find('a', {'class': 'teaser__link'})
|
||||||
if h2 is None:
|
|
||||||
h2 = article.find('h3')
|
|
||||||
if h2 is None:
|
|
||||||
continue
|
|
||||||
a = h2.find('a', href=True)
|
|
||||||
if a is None:
|
if a is None:
|
||||||
a = h2.findParents('a', href=True)
|
|
||||||
if not a:
|
|
||||||
continue
|
continue
|
||||||
a = a[0]
|
|
||||||
url = a['href']
|
url = a['href']
|
||||||
if url.startswith('/'):
|
# skip articles without relevant content (e.g., videos)
|
||||||
url = 'http://www.lemonde.fr' + url
|
for el in 'blog chat live podcasts portfolio video visuel'.split():
|
||||||
title = self.tag_to_string(a)
|
if '/' + el + '/' in url:
|
||||||
|
continue
|
||||||
|
# extract title
|
||||||
|
h3 = article.find('h3', {'class': 'teaser__title'})
|
||||||
|
if h3 is None:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(h3)
|
||||||
|
# extract description
|
||||||
desc = ''
|
desc = ''
|
||||||
p = article.find('p')
|
p = article.find('p', {'class': 'teaser__desc'})
|
||||||
if p is not None:
|
if p is not None:
|
||||||
desc = self.tag_to_string(p)
|
desc = self.tag_to_string(p)
|
||||||
self.log('\tFound article', title, 'at', url)
|
self.log('\tFound article', title, 'at', url)
|
||||||
yield {'title': title, 'url': url, 'description': desc}
|
yield {'title': title, 'url': url, 'description': desc}
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
# when an image is available in multiple sizes, select the smallest one
|
||||||
|
for img in soup.find_all('img', {'data-srcset': True}):
|
||||||
|
data_srcset = img['data-srcset'].split()
|
||||||
|
if len(data_srcset) > 1:
|
||||||
|
img['src'] = data_srcset[-2]
|
||||||
|
del img['data-srcset']
|
||||||
|
return soup
|
||||||
|
|
||||||
|
def postprocess_html(self, soup, first_fetch):
|
||||||
|
# remove local hyperlinks
|
||||||
|
for a in soup.find_all('a', {'href': True}):
|
||||||
|
if '.lemonde.fr/' in a['href']:
|
||||||
|
a.replace_with(self.tag_to_string(a))
|
||||||
|
# clean up header
|
||||||
|
for ul in soup.find_all('ul', {'class': 'breadcrumb'}):
|
||||||
|
div = soup.new_tag('div')
|
||||||
|
category = ''
|
||||||
|
for li in ul.find_all('li', {'class': True}):
|
||||||
|
category += self.tag_to_string(li).strip().upper() + ' - '
|
||||||
|
div.string = category[:-3]
|
||||||
|
ul.replace_with(div)
|
||||||
|
return soup
|
||||||
|
Loading…
x
Reference in New Issue
Block a user