From 5385ee080880c6a9d3f96b9f6b32ec51aabfb6c9 Mon Sep 17 00:00:00 2001 From: Aimylios <20016942+aimylios@users.noreply.github.com> Date: Sun, 11 Jul 2021 12:31:38 +0200 Subject: [PATCH] Update Le Monde --- recipes/icons/le_monde.png | Bin 293 -> 400 bytes recipes/icons/le_monde_sub.png | Bin 210 -> 717 bytes recipes/icons/le_monde_sub_paper.png | Bin 0 -> 404 bytes recipes/le_monde.recipe | 156 ++++++++++++++++++--------- recipes/le_monde_sub.recipe | 139 ++++++++++++++++-------- 5 files changed, 201 insertions(+), 94 deletions(-) create mode 100644 recipes/icons/le_monde_sub_paper.png diff --git a/recipes/icons/le_monde.png b/recipes/icons/le_monde.png index 5166fcf3d2aecd6ce934fec261b4d16949dc556c..a23b99db921011c6e72c581315456a4c96ef21a1 100644 GIT binary patch delta 373 zcmV-*0gC>m0+0icB!3BTNLh0L03p5r03p5s;HLR#0000PbVXQnQ*UN;cVTj60C#tH zE@^ISb7Ns}WiD@WXPfRk8UO$R9Z5t%R5;6HU>Gt$wGzKP>lqgYo!*n1)fgBQ7ifb- zJQgV-E6Di2_|ShKxXD)X_5Y_p2BC%j|9@}fVsv1HDwzBq3V&{|`~Ls`zd85jeF1Xb z-aje~RiL1Y3^G2l}GbXF%`1(*N-@4&0zIUxnFX{A7$fP#NQ_!LC_|G$R;pMsYEN5%0e zkh)*Y%z#G$i*B<669aAq21|jl{Gyb}9$CRG1`epSz_Ndk^zv{U8%V(epx7N?vRK;> zk=MDmHcCS|@#X*L;b1EsyMvwl|7{W^L2#~81Jl;i1wd3y87GKh%v&TjXfhoD7hCfQ T88TES00000NkvXXu0mjfqK~DJ delta 266 zcmV+l0rmcn1Em6xB!BElL_t(|+Qri`YQ-=N24MLRIzw0U23@E_5A}oZ;w6y9t0xaR zi7Ls3M+qoPq5mR~K1GW|&(Smp(2;}1OSOp@dt=}Yq(IoO$;T3+mvObq_yMDkCj>j> zhNx5_24|B}SR{aI^@=sb*9!a#ZGF+_x_swQOdnj@+G#5-PJisQ8ekk8Ky`vx?_l-~ zVpLOL^_74=DF^L*0QfgB9sp|se1Hth!d){E?xzs2n3(Md{x@@xshMDwE55_%MH5)1a^XBDW#icF|fDw-@WN0}j(XVUT&k}Ia| zo&bTJ5GUkV$^-(I5n*Qwk{ulE;Yh+NC%CQz*%@xD;O+ts7kH83?S?h(@bLh}6TYiK z^@5)_0@fmMJ%atg+=NgX!Zsr!2wVo@Lct3Ie|tSVIFdL>jzmfnL{ZoujZ`iU z#6TR2^f+Y1Ba;UyA2IFD4KT6VYJRN0OD3_ut8`U|eIfU9=s1M^} zAubi+YB8>r;ASaqoj~hJXv)!EfxDIHJd4M5(5cZ?kM48OpNHWR`Wo=&28NpO_7+B3 zFxrZDxAFcC#x(e#!9+Vg-GlKyK0m}4ExvW)`(sQ!!Q3kFKZX>82{%dKWF6kU;D+pyE&?3oq5 zVj-(CrnuiLEoJugHfvhlzo%A;p^xe^!h}f0>yXR^k=V7D$uC%fWVBHGraiRu-SI1f zdz7IY?^{t4AK2_j?6ffzUpIEMo;Y2gqg`79`|7wPd&Q& zBc7f(C?UG~ literal 210 zcmeAS@N?(olHy`uVBq!ia0vp^0wBx*Bp9q_EZ7UAW_r3fhE&{2R$$v;v|*F6k+JcH z4Gurf@LX=PmeeVc$jMo#b?^WG?@tf^|8Kqj-~Ql}96}coHU7*u{9505b0w#NEpPp* zTmP>t{ga>4+`+)Q>h`~o@_*~xIYPEJ9(llY^}loUM3xy6YA+2;nnX;E8dx-;2Kj64@Sm7g@n{h8SB0AW-|+&BegG z?*H331_qA0umAS{gDS{jfC8034h9CsWW_{oAV)~S>J3!Ez3AYI8P-6t_5Y9ZGBB{; z`TxKV$gB8=;)}&Vq4WR$CowSag18eIbpD}wa}7j60Yt$QNrue-_!I!0RfboA4g*6u zUIh|_6o}AD!4(1u7~kSku#|zp`9D4d)eKfI@F{qz(DUa%9tFSm?Em{8w}SIgz*rXV z`g;IbLF)hSPzCQJ86oLK&hsZofjm%58JH}B3LpwDD+VpSerKK-gTxB36*4=(PG%6A y3rR5FLT`etIg;@nL^XGQ15y7b#=aahSq}g#EeR&EzO5es0000]*>)([^<]*)', + re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)), + # insert " | " between article type and description + (re.compile(r'(]*>[^<]*)()', + re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2)) + ] + + extra_css = ''' + h2 { font-size: 1em; } + h3 { font-size: 1em; } + .article__desc { font-weight: bold; } + .article__fact { font-weight: bold; text-transform: uppercase; } + .article__kicker { text-transform: uppercase; } + .article__legend { font-size: 0.6em; margin-bottom: 1em; } + .article__title { margin-top: 0em; } + ''' + + def get_browser(self): + br = BasicNewsRecipe.get_browser(self) + if self.username is not None and self.password is not None: + br.open('https://secure.lemonde.fr/sfuser/connexion') + br.select_form(name='connection') + br['connection[mail]'] = self.username + br['connection[password]'] = self.password + br.submit() + return br + + def get_article_url(self, article): + url = BasicNewsRecipe.get_article_url(self, article) + # skip articles without relevant content (e.g., videos) + for el in 'blog chat live podcasts portfolio video visuel'.split(): + if '/' + el + '/' in url: + self.log(url) + self.abort_article() + return url + + def preprocess_html(self, soup): + # when an image is available in multiple sizes, select the smallest one + for img in soup.find_all('img', {'data-srcset': True}): + data_srcset = img['data-srcset'].split() + if len(data_srcset) > 1: + img['src'] = data_srcset[-2] + del img['data-srcset'] + return soup + + def postprocess_html(self, soup, first_fetch): + # remove local hyperlinks + for a in soup.find_all('a', {'href': True}): + if '.lemonde.fr/' in a['href']: + a.replace_with(self.tag_to_string(a)) + # clean up header + for ul in soup.find_all('ul', {'class': 'breadcrumb'}): + div = soup.new_tag('div') + category = '' + for li in ul.find_all('li', {'class': True}): + category += self.tag_to_string(li).strip().upper() + ' - ' + div.string = category[:-3] + ul.replace_with(div) + return soup diff --git a/recipes/le_monde_sub.recipe b/recipes/le_monde_sub.recipe index db0271d14d..ec7118fc1f 100644 --- a/recipes/le_monde_sub.recipe +++ b/recipes/le_monde_sub.recipe @@ -1,8 +1,15 @@ +#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai + __author__ = 'S. Durand ' __license__ = 'GPL v3' +''' +lemonde.fr +''' + from calibre.web.feeds.news import BasicNewsRecipe +import re def classes(classes): @@ -11,29 +18,23 @@ def classes(classes): 'class': lambda x: x and frozenset(x.split()).intersection(q)}) -class LeMonde(BasicNewsRecipe): - - title = u'Le Monde: Édition abonnés' +class LeMondeNumerique(BasicNewsRecipe): + title = 'Le Monde: Édition abonnés' __author__ = 'Sylvain Durand' - description = u'La version papier du quotidien Le Monde, disponible du lundi au samedi à partir de 14 heures environ, avec tous ses cahiers.' - language = 'fr' - encoding = 'utf8' - + description = 'La version numérique du quotidien Le Monde' + publisher = 'Société Editrice du Monde' + publication_type = 'newspaper' needs_subscription = True + language = 'fr' - extra_css = ''' - img{max-width:100%} - h1{font-size:1.2em !important; line-height:1.2em !important; } - h2{font-size:1em !important; line-height:1em !important; } - h3{font-size:1em !important; text-transform:uppercase !important; color:#666;} - #photo{text-align:center !important; margin:10px 0 -8px;} - #lgd{font-size:1em !important; line-height:1em !important; font-style:italic; color:#333;} ''' + no_stylesheets = True + ignore_duplicate_articles = {'title', 'url'} - keep_only_tags = [ - dict(itemprop=['Headline', 'description']), - classes('bloc_signature'), - dict(itemprop=['articleBody']), - ] + conversion_options = { + 'publisher': publisher + } + + masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png' lm_sections = [ 'international:International', @@ -49,6 +50,42 @@ class LeMonde(BasicNewsRecipe): 'campus:Campus' ] + keep_only_tags = [ + classes('article__header'), + dict(name='section', attrs={'class': ['article__content', 'article__heading', + 'article__wrapper']}) + ] + + remove_tags = [ + classes('article__status meta__date meta__reading-time meta__social multimedia-embed'), + dict(name=['footer', 'link']), + dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher', + 'portfolio', 'services-inread']}) + ] + + remove_attributes = [ + 'data-sizes', 'height', 'sizes', 'width' + ] + + preprocess_regexps = [ + # insert space between author name and description + (re.compile(r'(]*>)([^<]*)', + re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)), + # insert " | " between article type and description + (re.compile(r'(]*>[^<]*)()', + re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2)) + ] + + extra_css = ''' + h2 { font-size: 1em; } + h3 { font-size: 1em; } + .article__desc { font-weight: bold; } + .article__fact { font-weight: bold; text-transform: uppercase; } + .article__kicker { text-transform: uppercase; } + .article__legend { font-size: 0.6em; margin-bottom: 1em; } + .article__title { margin-top: 0em; } + ''' + def get_browser(self): br = BasicNewsRecipe.get_browser(self) br.open('https://secure.lemonde.fr/sfuser/connexion') @@ -58,45 +95,61 @@ class LeMonde(BasicNewsRecipe): br.submit() return br - def preprocess_html(self, soup): - for lgd in soup.findAll(id="lgd"): - lgd.contents[-1].extract() - for img in soup.findAll('img', attrs={'data-src': True}): - img['src'] = img['data-src'] - return soup - def parse_index(self): ans = [] for x in self.lm_sections: s, section_title = x.partition(':')[::2] self.log('Processing section', section_title, '...') - articles = list(self.parse_section('http://www.lemonde.fr/%s/' % s)) + articles = list(self.parse_section('https://www.lemonde.fr/%s/' % s)) if articles: ans.append((section_title, articles)) return ans def parse_section(self, url): soup = self.index_to_soup(url) - container = soup.find(attrs={'class':lambda x: x and 'grid_12 alpha' in x}) - for article in container.findAll('article'): - h2 = article.find('h2') - if h2 is None: - h2 = article.find('h3') - if h2 is None: - continue - a = h2.find('a', href=True) + for article in soup.find_all('section', {'class': 'teaser'}): + # extract URL + a = article.find('a', {'class': 'teaser__link'}) if a is None: - a = h2.findParents('a', href=True) - if not a: - continue - a = a[0] + continue url = a['href'] - if url.startswith('/'): - url = 'http://www.lemonde.fr' + url - title = self.tag_to_string(a) + # skip articles without relevant content (e.g., videos) + for el in 'blog chat live podcasts portfolio video visuel'.split(): + if '/' + el + '/' in url: + continue + # extract title + h3 = article.find('h3', {'class': 'teaser__title'}) + if h3 is None: + continue + title = self.tag_to_string(h3) + # extract description desc = '' - p = article.find('p') + p = article.find('p', {'class': 'teaser__desc'}) if p is not None: desc = self.tag_to_string(p) self.log('\tFound article', title, 'at', url) yield {'title': title, 'url': url, 'description': desc} + + def preprocess_html(self, soup): + # when an image is available in multiple sizes, select the smallest one + for img in soup.find_all('img', {'data-srcset': True}): + data_srcset = img['data-srcset'].split() + if len(data_srcset) > 1: + img['src'] = data_srcset[-2] + del img['data-srcset'] + return soup + + def postprocess_html(self, soup, first_fetch): + # remove local hyperlinks + for a in soup.find_all('a', {'href': True}): + if '.lemonde.fr/' in a['href']: + a.replace_with(self.tag_to_string(a)) + # clean up header + for ul in soup.find_all('ul', {'class': 'breadcrumb'}): + div = soup.new_tag('div') + category = '' + for li in ul.find_all('li', {'class': True}): + category += self.tag_to_string(li).strip().upper() + ' - ' + div.string = category[:-3] + ul.replace_with(div) + return soup