New recipe for Mediapart.fr by Mathieu Godlewski

This commit is contained in:
Kovid Goyal 2009-03-07 09:30:14 -08:00
parent 25bc8cc1ef
commit 32a66a5e28
2 changed files with 54 additions and 1 deletions

View File

@ -33,7 +33,7 @@ recipe_modules = ['recipe_' + r for r in (
'la_republica', 'physics_today', 'chicago_tribune', 'e_novine', 'la_republica', 'physics_today', 'chicago_tribune', 'e_novine',
'al_jazeera', 'winsupersite', 'borba', 'courrierinternational', 'al_jazeera', 'winsupersite', 'borba', 'courrierinternational',
'lamujerdemivida', 'soldiers', 'theonion', 'news_times', 'lamujerdemivida', 'soldiers', 'theonion', 'news_times',
'el_universal', 'el_universal', 'mediapart',
)] )]
import re, imp, inspect, time, os import re, imp, inspect, time, os

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>'
'''
Mediapart
'''
import re, string
from datetime import date
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.web.feeds.news import BasicNewsRecipe
class Mediapart(BasicNewsRecipe):
title = 'Mediapart'
__author__ = 'Mathieu Godlewski <mathieu at godlewski.fr>'
description = 'Global news in french from online newspapers'
oldest_article = 7
language = _('French')
max_articles_per_feed = 50
no_stylesheets = True
html2lrf_options = ['--base-font-size', '10']
feeds = [
('Les articles', 'http://www.mediapart.fr/articles/feed'),
]
preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in
[
(r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'),
(r'<p>Mediapart\.fr</p>', lambda match : ''),
(r'<p[^>]*>[\s]*</p>', lambda match : ''),
(r'<p><a href="[^\.]+\.pdf">[^>]*</a></p>', lambda match : ''),
]
]
remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}),
dict(name='div', attrs={'class':'print-links'}),
dict(name='img', attrs={'src':'entete_article.png'}),
]
def print_version(self, url):
raw = self.browser.open(url).read()
soup = BeautifulSoup(raw.decode('utf8', 'replace'))
div = soup.find('div', {'class':'node node-type-article'})
if div is None:
return None
article_id = string.replace(div['id'], 'node-', '')
if article_id is None:
return None
return 'http://www.mediapart.fr/print/'+article_id