Pull from trunk

This commit is contained in:
Kovid Goyal 2009-03-08 13:50:01 -07:00
commit 8ef6067724
10 changed files with 159 additions and 38 deletions

View File

@ -3,11 +3,14 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, re, logging, time, subprocess, atexit, mimetypes, \
__builtin__
__builtin__, warnings
__builtin__.__dict__['dynamic_property'] = lambda(func): func(None)
from htmlentitydefs import name2codepoint
from math import floor
warnings.simplefilter('ignore', DeprecationWarning)
from PyQt4.QtCore import QUrl
from PyQt4.QtGui import QDesktopServices
from calibre.startup import plugins, winutil, winutilerror

View File

@ -857,7 +857,7 @@ class Processor(Parser):
except ValueError:
setting = ''
face = font.attrib.pop('face', None)
if face is not None:
if face:
faces = []
for face in face.split(','):
face = face.strip()

View File

@ -389,7 +389,13 @@ class MobiReader(object):
opf.cover = 'images/%05d.jpg'%(self.book_header.exth.cover_offset+1)
elif mi.cover is not None:
opf.cover = mi.cover
manifest = [(htmlfile, 'text/x-oeb1-document'),
else:
opf.cover = 'images/%05d.jpg'%1
if not os.path.exists(os.path.join(os.path.dirname(htmlfile),
*opf.cover.split('/'))):
opf.cover = None
manifest = [(htmlfile, 'text/x-oeb1-document'),
(os.path.abspath('styles.css'), 'text/css')]
bp = os.path.dirname(htmlfile)
for i in getattr(self, 'image_names', []):

Binary file not shown.

After

Width:  |  Height:  |  Size: 951 B

View File

@ -393,43 +393,27 @@ def option_parser():
help='Save a manifest of all installed files to the specified location')
return parser
def install_man_pages(fatal_errors):
from bz2 import compress
import subprocess
def install_man_pages(fatal_errors, use_destdir=False):
from calibre.utils.help2man import create_man_page
prefix = os.environ.get('DESTDIR', '/') if use_destdir else '/'
manpath = os.path.join(prefix, 'usr/share/man/man1')
if not os.path.exists(manpath):
os.makedirs(manpath)
print 'Installing MAN pages...'
manpath = '/usr/share/man/man1'
f = NamedTemporaryFile()
f.write('[see also]\nhttp://%s.kovidgoyal.net\n'%__appname__)
f.flush()
manifest = []
os.environ['PATH'] += ':'+os.path.expanduser('~/bin')
for src in entry_points['console_scripts']:
prog = src[:src.index('=')].strip()
if prog in ('ebook-device', 'markdown-calibre',
'calibre-fontconfig', 'calibre-parallel'):
prog, right = src.split('=')
prog = prog.strip()
module = __import__(right.split(':')[0].strip(), fromlist=['a'])
parser = getattr(module, 'option_parser', None)
if parser is None:
continue
help2man = ('help2man', prog, '--name', 'part of %s'%__appname__,
'--section', '1', '--no-info', '--include',
f.name, '--manual', __appname__)
parser = parser()
raw = create_man_page(prog, parser)
manfile = os.path.join(manpath, prog+'.1'+__appname__+'.bz2')
print '\tInstalling MAN page for', prog
try:
p = subprocess.Popen(help2man, stdout=subprocess.PIPE)
except OSError, err:
import errno
if err.errno != errno.ENOENT:
raise
print 'Failed to install MAN pages as help2man is missing from your system'
break
o = p.stdout.read()
raw = re.compile(r'^\.IP\s*^([A-Z :]+)$', re.MULTILINE).sub(r'.SS\n\1', o)
if not raw.strip():
print 'Unable to create MAN page for', prog
continue
f2 = open_file(manfile)
manifest.append(f2.name)
f2.write(compress(raw))
open(manfile, 'wb').write(raw)
manifest.append(manfile)
return manifest
def post_install():
@ -441,9 +425,9 @@ def post_install():
manifest = []
setup_desktop_integration(opts.fatal_errors)
if opts.no_root or os.geteuid() == 0:
manifest += install_man_pages(opts.fatal_errors, use_destdir)
manifest += setup_udev_rules(opts.group_file, not opts.dont_reload, opts.fatal_errors)
manifest += setup_completion(opts.fatal_errors)
manifest += install_man_pages(opts.fatal_errors)
else:
print "Skipping udev, completion, and man-page install for non-root user."

View File

@ -18,7 +18,6 @@ DEPENDENCIES = [
('lxml', '2.1.5', 'lxml', 'python-lxml', 'python-lxml'),
('python-dateutil', '1.4.1', 'python-dateutil', 'python-dateutil', 'python-dateutil'),
('BeautifulSoup', '3.0.5', 'beautifulsoup', 'python-beautifulsoup', 'python-BeautifulSoup'),
('help2man', '1.36.4', 'help2man', 'help2man', 'help2man'),
]

View File

@ -88,7 +88,7 @@ sudo python -c "import urllib2; exec urllib2.urlopen('http://calibre.kovidgoyal.
be ignored.
</li>
<li>
You must have help2man and xdg-utils installed
You must have xdg-utils installed
on your system before running the installer.
</li>
<li>

View File

@ -0,0 +1,59 @@
from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import time, bz2
from calibre.constants import __version__, __appname__, __author__
def create_man_page(prog, parser):
usage = parser.usage.splitlines()
for i, line in enumerate(list(usage)):
if not line.strip():
usage[i] = '.PP'
else:
usage[i] = line.replace('%prog', prog)
lines = [
'.TH ' + prog.upper() + ' "1" ' + time.strftime('"%B %Y"') +
' "%s (%s %s)" "%s"'%(prog, __appname__, __version__, __appname__),
'.SH NAME',
prog + r' \- part of '+__appname__,
'.SH SYNOPSIS',
'.B "%s"'%prog + r'\fR '+' '.join(usage[0].split()[1:]),
'.SH DESCRIPTION',
]
lines += usage[1:]
lines += [
'.SH OPTIONS'
]
def format_option(opt):
ans = ['.TP']
opts = []
opts += opt._short_opts
opts.append(opt.get_opt_string())
opts = [r'\fB'+x.replace('-', r'\-')+r'\fR' for x in opts]
ans.append(', '.join(opts))
help = opt.help if opt.help else ''
ans.append(help.replace('%prog', prog).replace('%default', str(opt.default)))
return ans
for opt in parser.option_list:
lines.extend(format_option(opt))
for group in parser.option_groups:
lines.append('.SS '+group.title)
if group.description:
lines.extend(['.PP', group.description])
for opt in group.option_list:
lines.extend(format_option(opt))
lines += ['.SH SEE ALSO',
'The User Manual is available at '
'http://calibre.kovidgoyal.net/user_manual',
'.PP', '.B Created by '+__author__]
return bz2.compress('\n'.join(lines))

View File

@ -33,7 +33,7 @@ recipe_modules = ['recipe_' + r for r in (
'la_republica', 'physics_today', 'chicago_tribune', 'e_novine',
'al_jazeera', 'winsupersite', 'borba', 'courrierinternational',
'lamujerdemivida', 'soldiers', 'theonion', 'news_times',
'el_universal', 'mediapart',
'el_universal', 'mediapart', 'wikinews_en',
)]
import re, imp, inspect, time, os

View File

@ -0,0 +1,70 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
'''
en.wikinews.org
'''
from calibre.web.feeds.news import BasicNewsRecipe
class WikiNews(BasicNewsRecipe):
title = 'Wikinews'
__author__ = 'Darko Miletic'
description = 'News from wikipedia'
category = 'news, world'
oldest_article = 7
max_articles_per_feed = 100
publisher = 'Wiki'
no_stylesheets = True
use_embedded_content = False
encoding = 'utf-8'
remove_javascript = True
language = _('English')
html2lrf_options = [
'--comment', description
, '--category', category
, '--publisher', publisher
]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
keep_only_tags = [
dict(name='h1', attrs={'id':'firstHeading'})
,dict(name='div', attrs={'id':'bodyContent'})
]
remove_tags = [
dict(name='link')
,dict(name='div',attrs={'id':['printfooter','catlinks','footer']})
,dict(name='div',attrs={'class':['thumb left','thumb right']})
]
remove_tags_after = dict(name='h2')
feeds = [(u'News', u'http://feeds.feedburner.com/WikinewsLatestNews')]
def get_article_url(self, article):
artl = article.get('link', None)
rest, sep, article_id = artl.rpartition('/')
return 'http://en.wikinews.org/wiki/' + article_id
def print_version(self, url):
rest, sep, article_id = url.rpartition('/')
return 'http://en.wikinews.org/w/index.php?title=' + article_id + '&printable=yes'
def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Language" content="en"/><meta http-equiv="Content-Type" content="text/html; charset=utf-8">'
soup.head.insert(0,mtag)
btag = soup.find('div',attrs={'id':'bodyContent'})
for item in btag.findAll('div'):
item.extract()
for item in btag.findAll('h2'):
item.extract()
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll(font=True):
del item['font']
return soup