Rue89 by Louis Gesbert and update Mediapart

This commit is contained in:
Kovid Goyal 2010-10-30 16:02:55 -06:00
parent eaf640df17
commit b7ac30b890
2 changed files with 107 additions and 28 deletions

View File

@ -1,53 +1,79 @@
#!/usr/bin/env python
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>' __copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>; 2010, Louis Gesbert <meta at antislash dot info>'
''' '''
Mediapart Mediapart
''' '''
import re, string from calibre.ebooks.BeautifulSoup import Tag
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Mediapart(BasicNewsRecipe): class Mediapart(BasicNewsRecipe):
title = 'Mediapart' title = 'Mediapart'
__author__ = 'Mathieu Godlewski <mathieu at godlewski.fr>' __author__ = 'Mathieu Godlewski'
description = 'Global news in french from online newspapers' description = 'Global news in french from online newspapers'
oldest_article = 7 oldest_article = 7
language = 'fr' language = 'fr'
needs_subscription = True
max_articles_per_feed = 50 max_articles_per_feed = 50
no_stylesheets = True no_stylesheets = True
html2lrf_options = ['--base-font-size', '10'] cover_url = 'http://www.mediapart.fr/sites/all/themes/mediapart/mediapart/images/annonce.jpg'
feeds = [ feeds = [
('Les articles', 'http://www.mediapart.fr/articles/feed'), ('Les articles', 'http://www.mediapart.fr/articles/feed'),
] ]
preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in # -- print-version has poor quality on this website, better do the conversion ourselves
[ #
(r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'), # preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in
(r'<p>Mediapart\.fr</p>', lambda match : ''), # [
(r'<p[^>]*>[\s]*</p>', lambda match : ''), # (r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'),
(r'<p><a href="[^\.]+\.pdf">[^>]*</a></p>', lambda match : ''), # (r'<span class=\'auteur_staff\'>[^>]+<a title=\'[^\']*\'[^>]*>([^<]*)</a>[^<]*</span>',
# lambda match : '<i>'+match.group(1)+'</i>'),
# (r'\'', lambda match: '&rsquo;'),
# ]
# ]
#
# remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}),
# dict(name='div', attrs={'class':'print-links'}),
# dict(name='img', attrs={'src':'entete_article.png'}),
# dict(name='br') ]
#
# def print_version(self, url):
# raw = self.browser.open(url).read()
# soup = BeautifulSoup(raw.decode('utf8', 'replace'))
# div = soup.find('div', {'id':re.compile('node-\d+')})
# if div is None:
# return None
# article_id = string.replace(div['id'], 'node-', '')
# if article_id is None:
# return None
# return 'http://www.mediapart.fr/print/'+article_id
# -- Non-print version [dict(name='div', attrs={'class':'advert'})]
keep_only_tags = [
dict(name='h1', attrs={'class':'title'}),
dict(name='div', attrs={'class':'page_papier_detail'}),
] ]
]
remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}), def preprocess_html(self,soup):
dict(name='div', attrs={'class':'print-links'}), for title in soup.findAll('div', {'class':'titre'}):
dict(name='img', attrs={'src':'entete_article.png'}), tag = Tag(soup, 'h3')
] title.replaceWith(tag)
tag.insert(0,title)
return soup
# -- Handle login
def get_browser(self):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open('http://www.mediapart.fr/')
br.select_form(nr=1)
br['name'] = self.username
br['pass'] = self.password
br.submit()
return br
def print_version(self, url):
raw = self.browser.open(url).read()
soup = BeautifulSoup(raw.decode('utf8', 'replace'))
div = soup.find('div', {'class':'node node-type-article'})
if div is None:
return None
article_id = string.replace(div['id'], 'node-', '')
if article_id is None:
return None
return 'http://www.mediapart.fr/print/'+article_id

View File

@ -0,0 +1,53 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Louis Gesbert <meta at antislash dot info>'
'''
Rue89
'''
__author__ = '2010, Louis Gesbert <meta at antislash dot info>'
import re
from calibre.ebooks.BeautifulSoup import Tag
from calibre.web.feeds.news import BasicNewsRecipe
class Rue89(BasicNewsRecipe):
title = 'Rue89'
__author__ = 'Louis Gesbert'
description = 'Popular free french news website'
title = u'Rue89'
language = 'fr'
oldest_article = 7
max_articles_per_feed = 50
feeds = [(u'La Une', u'http://www.rue89.com/homepage/feed')]
no_stylesheets = True
preprocess_regexps = [
(re.compile(r'<(/?)h2>', re.IGNORECASE|re.DOTALL),
lambda match : '<'+match.group(1)+'h3>'),
(re.compile(r'<div class="print-title">([^>]+)</div>', re.IGNORECASE|re.DOTALL),
lambda match : '<h2>'+match.group(1)+'</h2>'),
(re.compile(r'<img[^>]+src="[^"]*/numeros/(\d+)[^0-9.">]*.gif"[^>]*/>', re.IGNORECASE|re.DOTALL),
lambda match : '<span style="font-family: Sans-serif; color: red; font-size:24pt; padding=2pt;">'+match.group(1)+'</span>'),
(re.compile(r'\''), lambda match: '&rsquo;'),
]
def preprocess_html(self,soup):
body = Tag(soup, 'body')
title = soup.find('h1', {'class':'title'})
content = soup.find('div', {'class':'content'})
soup.body.replaceWith(body)
body.insert(0, title)
body.insert(1, content)
return soup
remove_tags = [ #dict(name='div', attrs={'class':'print-source_url'}),
#dict(name='div', attrs={'class':'print-links'}),
#dict(name='img', attrs={'class':'print-logo'}),
dict(name='div', attrs={'class':'content_top'}),
dict(name='div', attrs={'id':'sidebar-left'}), ]
# -- print-version has poor quality on this website, better do the conversion ourselves
# def print_version(self, url):
# return re.sub('^.*-([0-9]+)$', 'http://www.rue89.com/print/\\1',url)