Pull from trunk

This commit is contained in:
Kovid Goyal 2009-03-07 11:23:17 -08:00
commit aa47275846
7 changed files with 90 additions and 15 deletions

View File

@ -37,6 +37,7 @@ def freeze():
'/usr/lib/libpoppler.so.4', '/usr/lib/libpoppler.so.4',
'/usr/lib/libxml2.so.2', '/usr/lib/libxml2.so.2',
'/usr/lib/libdbus-1.so.3', '/usr/lib/libdbus-1.so.3',
'/usr/lib/libopenjpeg.so.2',
'/usr/lib/libxslt.so.1', '/usr/lib/libxslt.so.1',
'/usr/lib/libxslt.so.1', '/usr/lib/libxslt.so.1',
'/usr/lib/libgthread-2.0.so.0', '/usr/lib/libgthread-2.0.so.0',

View File

@ -164,8 +164,8 @@ to auto-generate a Table of Contents.
'an overview of the NCX format.')) 'an overview of the NCX format.'))
toc('use_auto_toc', ['--use-auto-toc'], default=False, toc('use_auto_toc', ['--use-auto-toc'], default=False,
help=_('Normally, if the source file already has a Table of Contents, ' help=_('Normally, if the source file already has a Table of Contents, '
'it is used in preference to the autodetected one. ' 'it is used in preference to the auto-generated one. '
'With this option, the autodetected one is always used.')) 'With this option, the auto-generated one is always used.'))
layout = c.add_group('page layout', _('Control page layout')) layout = c.add_group('page layout', _('Control page layout'))
layout('margin_top', ['--margin-top'], default=5.0, layout('margin_top', ['--margin-top'], default=5.0,

View File

@ -46,7 +46,7 @@ class AddFiles(Add):
def metadata_delivered(self, id, mi): def metadata_delivered(self, id, mi):
if self.is_canceled(): if self.is_canceled():
self.reading.wakeAll() self.wake_up()
return return
if not mi.title: if not mi.title:
mi.title = os.path.splitext(self.names[id])[0] mi.title = os.path.splitext(self.names[id])[0]
@ -163,7 +163,7 @@ class AddRecursive(Add):
def metadata_delivered(self, id, mi): def metadata_delivered(self, id, mi):
if self.is_canceled(): if self.is_canceled():
self.reading.wakeAll() self.wake_up()
return return
self.emit(SIGNAL('processed(PyQt_PyObject,PyQt_PyObject)'), self.emit(SIGNAL('processed(PyQt_PyObject,PyQt_PyObject)'),
mi.title, id) mi.title, id)

View File

@ -194,7 +194,11 @@ class ConfigDialog(QDialog, Ui_Dialog):
lang = get_lang() lang = get_lang()
if lang is not None and language_codes.has_key(lang): if lang is not None and language_codes.has_key(lang):
self.language.addItem(language_codes[lang], QVariant(lang)) self.language.addItem(language_codes[lang], QVariant(lang))
items = [(l, language_codes[l]) for l in translations.keys() if l != lang] else:
lang = 'en'
self.language.addItem('English', 'en')
items = [(l, language_codes[l]) for l in translations.keys() \
if l != lang]
if lang != 'en': if lang != 'en':
items.append(('en', 'English')) items.append(('en', 'English'))
items.sort(cmp=lambda x, y: cmp(x[1], y[1])) items.sort(cmp=lambda x, y: cmp(x[1], y[1]))
@ -320,11 +324,17 @@ class ConfigDialog(QDialog, Ui_Dialog):
layout.addWidget(QLabel(_('Error log:'))) layout.addWidget(QLabel(_('Error log:')))
el = QPlainTextEdit(d) el = QPlainTextEdit(d)
layout.addWidget(el) layout.addWidget(el)
el.setPlainText(open(log_error_file, 'rb').read().decode('utf8', 'replace')) try:
el.setPlainText(open(log_error_file, 'rb').read().decode('utf8', 'replace'))
except IOError:
el.setPlainText('No error log found')
layout.addWidget(QLabel(_('Access log:'))) layout.addWidget(QLabel(_('Access log:')))
al = QPlainTextEdit(d) al = QPlainTextEdit(d)
layout.addWidget(al) layout.addWidget(al)
al.setPlainText(open(log_access_file, 'rb').read().decode('utf8', 'replace')) try:
al.setPlainText(open(log_access_file, 'rb').read().decode('utf8', 'replace'))
except IOError:
el.setPlainText('No access log found')
d.show() d.show()
def set_server_options(self): def set_server_options(self):

View File

@ -33,7 +33,7 @@ recipe_modules = ['recipe_' + r for r in (
'la_republica', 'physics_today', 'chicago_tribune', 'e_novine', 'la_republica', 'physics_today', 'chicago_tribune', 'e_novine',
'al_jazeera', 'winsupersite', 'borba', 'courrierinternational', 'al_jazeera', 'winsupersite', 'borba', 'courrierinternational',
'lamujerdemivida', 'soldiers', 'theonion', 'news_times', 'lamujerdemivida', 'soldiers', 'theonion', 'news_times',
'el_universal', 'el_universal', 'mediapart',
)] )]
import re, imp, inspect, time, os import re, imp, inspect, time, os

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>'
'''
Mediapart
'''
import re, string
from datetime import date
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.web.feeds.news import BasicNewsRecipe
class Mediapart(BasicNewsRecipe):
title = 'Mediapart'
__author__ = 'Mathieu Godlewski <mathieu at godlewski.fr>'
description = 'Global news in french from online newspapers'
oldest_article = 7
language = _('French')
max_articles_per_feed = 50
no_stylesheets = True
html2lrf_options = ['--base-font-size', '10']
feeds = [
('Les articles', 'http://www.mediapart.fr/articles/feed'),
]
preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in
[
(r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'),
(r'<p>Mediapart\.fr</p>', lambda match : ''),
(r'<p[^>]*>[\s]*</p>', lambda match : ''),
(r'<p><a href="[^\.]+\.pdf">[^>]*</a></p>', lambda match : ''),
]
]
remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}),
dict(name='div', attrs={'class':'print-links'}),
dict(name='img', attrs={'src':'entete_article.png'}),
]
def print_version(self, url):
raw = self.browser.open(url).read()
soup = BeautifulSoup(raw.decode('utf8', 'replace'))
div = soup.find('div', {'class':'node node-type-article'})
if div is None:
return None
article_id = string.replace(div['id'], 'node-', '')
if article_id is None:
return None
return 'http://www.mediapart.fr/print/'+article_id

View File

@ -3,7 +3,7 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
''' '''
nin.co.yu nin.co.rs
''' '''
import re, urllib import re, urllib
@ -19,14 +19,17 @@ class Nin(BasicNewsRecipe):
oldest_article = 15 oldest_article = 15
simultaneous_downloads = 1 simultaneous_downloads = 1
delay = 1 delay = 1
encoding = 'utf8' encoding = 'utf-8'
needs_subscription = True needs_subscription = True
PREFIX = 'http://www.nin.co.yu' PREFIX = 'http://www.nin.co.rs'
INDEX = PREFIX + '/?change_lang=ls' INDEX = PREFIX + '/?change_lang=ls'
LOGIN = PREFIX + '/?logout=true' LOGIN = PREFIX + '/?logout=true'
FEED = PREFIX + '/misc/rss.php?feed=RSS2.0'
remove_javascript = True remove_javascript = True
use_embedded_content = False use_embedded_content = False
language = _('Serbian') language = _('Serbian')
lang = 'sr-RS'
direction = 'ltr'
extra_css = '@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)} @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)} body{text-align: justify; font-family: serif1, serif} .article_description{font-family: sans1, sans-serif}' extra_css = '@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)} @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)} body{text-align: justify; font-family: serif1, serif} .article_description{font-family: sans1, sans-serif}'
html2lrf_options = [ html2lrf_options = [
@ -54,7 +57,7 @@ class Nin(BasicNewsRecipe):
keep_only_tags =[dict(name='td', attrs={'width':'520'})] keep_only_tags =[dict(name='td', attrs={'width':'520'})]
remove_tags_after =dict(name='html') remove_tags_after =dict(name='html')
feeds =[(u'NIN', u'http://www.nin.co.yu/misc/rss.php?feed=RSS2.0')] feeds =[(u'NIN', FEED)]
def get_cover_url(self): def get_cover_url(self):
cover_url = None cover_url = None
@ -65,8 +68,16 @@ class Nin(BasicNewsRecipe):
return cover_url return cover_url
def preprocess_html(self, soup): def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Language" content="sr-Latn-RS"/>' soup.html['lang'] = self.lang
soup.html['dir' ] = self.direction
mtag = '<meta http-equiv="Content-Language" content="' + self.lang + '"/>'
mtag += '\n<meta http-equiv="Content-Type" content="text/html; charset=' + self.encoding + '"/>'
soup.head.insert(0,mtag) soup.head.insert(0,mtag)
for item in soup.findAll(style=True): for item in soup.findAll(style=True):
del item['style'] del item['style']
return soup return soup
def get_article_url(self, article):
raw = article.get('link', None)
return raw.replace('.co.yu','.co.rs')