Fix Mediapart

This commit is contained in:
Kovid Goyal 2012-02-19 21:54:28 +05:30
parent 6df4b8ff39
commit c7f706348c

View File

@ -1,16 +1,17 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>; 2010, Louis Gesbert <meta at antislash dot info>' __copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>; 2010, 2011, Louis Gesbert <meta at antislash dot info>'
''' '''
Mediapart Mediapart
''' '''
from calibre.ebooks.BeautifulSoup import Tag import re
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Mediapart(BasicNewsRecipe): class Mediapart(BasicNewsRecipe):
title = 'Mediapart' title = 'Mediapart'
__author__ = 'Mathieu Godlewski' __author__ = 'Mathieu Godlewski, Louis Gesbert'
description = 'Global news in french from online newspapers' description = 'Global news in french from news site Mediapart'
oldest_article = 7 oldest_article = 7
language = 'fr' language = 'fr'
needs_subscription = True needs_subscription = True
@ -18,52 +19,30 @@ class Mediapart(BasicNewsRecipe):
max_articles_per_feed = 50 max_articles_per_feed = 50
no_stylesheets = True no_stylesheets = True
cover_url = 'http://www.mediapart.fr/sites/all/themes/mediapart/mediapart/images/annonce.jpg' cover_url = 'http://static.mediapart.fr/files/pave_mediapart.jpg'
feeds = [ feeds = [
('Les articles', 'http://www.mediapart.fr/articles/feed'), ('Les articles', 'http://www.mediapart.fr/articles/feed'),
] ]
# -- print-version has poor quality on this website, better do the conversion ourselves # -- print-version
#
# preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in
# [
# (r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'),
# (r'<span class=\'auteur_staff\'>[^>]+<a title=\'[^\']*\'[^>]*>([^<]*)</a>[^<]*</span>',
# lambda match : '<i>'+match.group(1)+'</i>'),
# (r'\'', lambda match: '&rsquo;'),
# ]
# ]
#
# remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}),
# dict(name='div', attrs={'class':'print-links'}),
# dict(name='img', attrs={'src':'entete_article.png'}),
# dict(name='br') ]
#
# def print_version(self, url):
# raw = self.browser.open(url).read()
# soup = BeautifulSoup(raw.decode('utf8', 'replace'))
# div = soup.find('div', {'id':re.compile('node-\d+')})
# if div is None:
# return None
# article_id = string.replace(div['id'], 'node-', '')
# if article_id is None:
# return None
# return 'http://www.mediapart.fr/print/'+article_id
# -- Non-print version [dict(name='div', attrs={'class':'advert'})] preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in
[
keep_only_tags = [ (r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'),
dict(name='h1', attrs={'class':'title'}), (r'\'', lambda match: '&rsquo;')
dict(name='div', attrs={'class':'page_papier_detail'}),
] ]
]
def preprocess_html(self,soup): remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}) ]
for title in soup.findAll('div', {'class':'titre'}):
tag = Tag(soup, 'h3') def print_version(self, url):
title.replaceWith(tag) raw = self.browser.open(url).read()
tag.insert(0,title) soup = BeautifulSoup(raw.decode('utf8', 'replace'))
return soup link = soup.find('a', {'title':'Imprimer'})
if link is None:
return None
return link['href']
# -- Handle login # -- Handle login
@ -76,4 +55,3 @@ class Mediapart(BasicNewsRecipe):
br['pass'] = self.password br['pass'] = self.password
br.submit() br.submit()
return br return br