mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
py3: Calls to UUID(hexkey) require strings
Also various misc cleanups I came across while reviewing the last py3 merge PR
This commit is contained in:
parent
015ff1611b
commit
422537926d
@ -174,9 +174,8 @@ class CHMInput(InputFormatPlugin):
|
||||
return htmlpath, toc
|
||||
|
||||
def _read_file(self, name):
|
||||
f = open(name, 'rb')
|
||||
data = f.read()
|
||||
f.close()
|
||||
with lopen(name, 'rb') as f:
|
||||
data = f.read()
|
||||
return data
|
||||
|
||||
def add_node(self, node, toc, ancestor_map):
|
||||
|
@ -8,7 +8,7 @@ import os, re, posixpath
|
||||
from itertools import cycle
|
||||
|
||||
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
|
||||
from polyglot.builtins import as_bytes, getcwd
|
||||
from polyglot.builtins import getcwd
|
||||
|
||||
ADOBE_OBFUSCATION = 'http://ns.adobe.com/pdf/enc#RC'
|
||||
IDPF_OBFUSCATION = 'http://www.idpf.org/2008/embedding'
|
||||
@ -24,7 +24,7 @@ def decrypt_font_data(key, data, algorithm):
|
||||
|
||||
|
||||
def decrypt_font(key, path, algorithm):
|
||||
with open(path, 'r+b') as f:
|
||||
with lopen(path, 'r+b') as f:
|
||||
data = decrypt_font_data(key, f.read(), algorithm)
|
||||
f.seek(0), f.truncate(), f.write(data)
|
||||
|
||||
@ -57,7 +57,7 @@ class EPUBInput(InputFormatPlugin):
|
||||
(item.text and item.text.startswith('urn:uuid:')):
|
||||
try:
|
||||
key = item.text.rpartition(':')[-1]
|
||||
key = uuid.UUID(as_bytes(key)).bytes
|
||||
key = uuid.UUID(key).bytes
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
@ -223,8 +223,8 @@ class EPUBInput(InputFormatPlugin):
|
||||
if os.path.exists(guide_cover):
|
||||
renderer = render_html_svg_workaround(guide_cover, log)
|
||||
if renderer is not None:
|
||||
open('calibre_raster_cover.jpg', 'wb').write(
|
||||
renderer)
|
||||
with lopen('calibre_raster_cover.jpg', 'wb') as f:
|
||||
f.write(renderer)
|
||||
|
||||
# Set the titlepage guide entry
|
||||
self.set_guide_type(opf, 'titlepage', guide_cover, 'Title Page')
|
||||
@ -238,7 +238,7 @@ class EPUBInput(InputFormatPlugin):
|
||||
if k.endswith(attr):
|
||||
return v
|
||||
try:
|
||||
with open('META-INF/container.xml', 'rb') as f:
|
||||
with lopen('META-INF/container.xml', 'rb') as f:
|
||||
root = etree.fromstring(f.read())
|
||||
for r in root.xpath('//*[local-name()="rootfile"]'):
|
||||
if attr(r, 'media-type') != "application/oebps-package+xml":
|
||||
@ -348,7 +348,7 @@ class EPUBInput(InputFormatPlugin):
|
||||
with lopen('content.opf', 'wb') as nopf:
|
||||
nopf.write(opf.render())
|
||||
|
||||
return os.path.abspath(u'content.opf')
|
||||
return os.path.abspath('content.opf')
|
||||
|
||||
def convert_epub3_nav(self, nav_path, opf, log, opts):
|
||||
from lxml import etree
|
||||
@ -421,7 +421,7 @@ class EPUBInput(InputFormatPlugin):
|
||||
changed = True
|
||||
elem.set('data-calibre-removed-titlepage', '1')
|
||||
if changed:
|
||||
with open(nav_path, 'wb') as f:
|
||||
with lopen(nav_path, 'wb') as f:
|
||||
f.write(serialize(root, 'application/xhtml+xml'))
|
||||
|
||||
def postprocess_book(self, oeb, opts, log):
|
||||
|
@ -191,7 +191,7 @@ class FB2Output(OutputFormatPlugin):
|
||||
close = True
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path) != '':
|
||||
os.makedirs(os.path.dirname(output_path))
|
||||
out_stream = open(output_path, 'wb')
|
||||
out_stream = lopen(output_path, 'wb')
|
||||
else:
|
||||
out_stream = output_path
|
||||
|
||||
|
@ -53,14 +53,14 @@ class MOBIInput(InputFormatPlugin):
|
||||
if raw:
|
||||
if isinstance(raw, unicode_type):
|
||||
raw = raw.encode('utf-8')
|
||||
with open('debug-raw.html', 'wb') as f:
|
||||
with lopen('debug-raw.html', 'wb') as f:
|
||||
f.write(raw)
|
||||
from calibre.ebooks.oeb.base import close_self_closing_tags
|
||||
for f, root in parse_cache.items():
|
||||
raw = html.tostring(root, encoding='utf-8', method='xml',
|
||||
include_meta_content_type=False)
|
||||
raw = close_self_closing_tags(raw)
|
||||
with open(f, 'wb') as q:
|
||||
with lopen(f, 'wb') as q:
|
||||
q.write(raw)
|
||||
accelerators['pagebreaks'] = '//h:div[@class="mbp_pagebreak"]'
|
||||
accelerators['pagebreaks'] = '//h:div[@class="mbp_pagebreak"]'
|
||||
return mr.created_opf_path
|
||||
|
@ -53,7 +53,7 @@ class OEBOutput(OutputFormatPlugin):
|
||||
# Needed as I can't get lxml to output opf:role and
|
||||
# not output <opf:metadata> as well
|
||||
raw = re.sub(br'(<[/]{0,1})opf:', br'\1', raw)
|
||||
with open(href, 'wb') as f:
|
||||
with lopen(href, 'wb') as f:
|
||||
f.write(raw)
|
||||
|
||||
for item in oeb_book.manifest:
|
||||
@ -65,7 +65,7 @@ class OEBOutput(OutputFormatPlugin):
|
||||
dir = os.path.dirname(path)
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
with open(path, 'wb') as f:
|
||||
with lopen(path, 'wb') as f:
|
||||
f.write(item.bytes_representation)
|
||||
item.unload_data_from_memory(memory=path)
|
||||
|
||||
|
@ -39,9 +39,9 @@ class PDBOutput(OutputFormatPlugin):
|
||||
close = False
|
||||
if not hasattr(output_path, 'write'):
|
||||
close = True
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path) != '':
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path):
|
||||
os.makedirs(os.path.dirname(output_path))
|
||||
out_stream = open(output_path, 'wb')
|
||||
out_stream = lopen(output_path, 'wb')
|
||||
else:
|
||||
out_stream = output_path
|
||||
|
||||
|
@ -36,7 +36,7 @@ class PDFInput(InputFormatPlugin):
|
||||
from calibre.ebooks.pdf.reflow import PDFDocument
|
||||
|
||||
pdftohtml(getcwd(), stream.name, self.opts.no_images, as_xml=True)
|
||||
with open(u'index.xml', 'rb') as f:
|
||||
with lopen('index.xml', 'rb') as f:
|
||||
xml = clean_ascii_chars(f.read())
|
||||
PDFDocument(xml, self.opts, self.log)
|
||||
return os.path.join(getcwd(), 'metadata.opf')
|
||||
@ -69,12 +69,12 @@ class PDFInput(InputFormatPlugin):
|
||||
|
||||
opf.create_spine(['index.html'])
|
||||
log.debug('Rendering manifest...')
|
||||
with open('metadata.opf', 'wb') as opffile:
|
||||
with lopen('metadata.opf', 'wb') as opffile:
|
||||
opf.render(opffile)
|
||||
if os.path.exists('toc.ncx'):
|
||||
ncxid = opf.manifest.id_for_path('toc.ncx')
|
||||
if ncxid:
|
||||
with open('metadata.opf', 'r+b') as f:
|
||||
with lopen('metadata.opf', 'r+b') as f:
|
||||
raw = f.read().replace(b'<spine', b'<spine toc="%s"' % as_bytes(ncxid))
|
||||
f.seek(0)
|
||||
f.write(raw)
|
||||
|
@ -20,8 +20,8 @@ from polyglot.builtins import iteritems, unicode_type
|
||||
UNITS = ['millimeter', 'centimeter', 'point', 'inch' , 'pica' , 'didot',
|
||||
'cicero', 'devicepixel']
|
||||
|
||||
PAPER_SIZES = [u'a0', u'a1', u'a2', u'a3', u'a4', u'a5', u'a6', u'b0', u'b1',
|
||||
u'b2', u'b3', u'b4', u'b5', u'b6', u'legal', u'letter']
|
||||
PAPER_SIZES = ['a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'b0', 'b1',
|
||||
'b2', 'b3', 'b4', 'b5', 'b6', 'legal', 'letter']
|
||||
|
||||
|
||||
class PDFMetadata(object): # {{{
|
||||
|
@ -30,14 +30,14 @@ class PMLInput(InputFormatPlugin):
|
||||
hclose = False
|
||||
|
||||
if not hasattr(pml_path, 'read'):
|
||||
pml_stream = open(pml_path, 'rb')
|
||||
pml_stream = lopen(pml_path, 'rb')
|
||||
pclose = True
|
||||
else:
|
||||
pml_stream = pml_path
|
||||
pml_stream.seek(0)
|
||||
|
||||
if not hasattr(html_path, 'write'):
|
||||
html_stream = open(html_path, 'wb')
|
||||
html_stream = lopen(html_path, 'wb')
|
||||
hclose = True
|
||||
else:
|
||||
html_stream = html_path
|
||||
@ -138,8 +138,8 @@ class PMLInput(InputFormatPlugin):
|
||||
opf.create_manifest(manifest_items)
|
||||
opf.create_spine(pages)
|
||||
opf.set_toc(toc)
|
||||
with open('metadata.opf', 'wb') as opffile:
|
||||
with open('toc.ncx', 'wb') as tocfile:
|
||||
with lopen('metadata.opf', 'wb') as opffile:
|
||||
with lopen('toc.ncx', 'wb') as tocfile:
|
||||
opf.render(opffile, tocfile, 'toc.ncx')
|
||||
|
||||
return os.path.join(getcwd(), 'metadata.opf')
|
||||
|
@ -43,7 +43,7 @@ class PMLOutput(OutputFormatPlugin):
|
||||
with TemporaryDirectory('_pmlz_output') as tdir:
|
||||
pmlmlizer = PMLMLizer(log)
|
||||
pml = unicode_type(pmlmlizer.extract_content(oeb_book, opts))
|
||||
with open(os.path.join(tdir, 'index.pml'), 'wb') as out:
|
||||
with lopen(os.path.join(tdir, 'index.pml'), 'wb') as out:
|
||||
out.write(pml.encode(opts.pml_output_encoding, 'replace'))
|
||||
|
||||
img_path = os.path.join(tdir, 'index_img')
|
||||
@ -77,5 +77,5 @@ class PMLOutput(OutputFormatPlugin):
|
||||
|
||||
path = os.path.join(out_dir, image_hrefs[item.href])
|
||||
|
||||
with open(path, 'wb') as out:
|
||||
with lopen(path, 'wb') as out:
|
||||
out.write(data)
|
||||
|
@ -28,9 +28,9 @@ class RBOutput(OutputFormatPlugin):
|
||||
close = False
|
||||
if not hasattr(output_path, 'write'):
|
||||
close = True
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path) != '':
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path):
|
||||
os.makedirs(os.path.dirname(output_path))
|
||||
out_stream = open(output_path, 'wb')
|
||||
out_stream = lopen(output_path, 'wb')
|
||||
else:
|
||||
out_stream = output_path
|
||||
|
||||
|
@ -65,7 +65,7 @@ class RecipeInput(InputFormatPlugin):
|
||||
zf = ZipFile(recipe_or_file, 'r')
|
||||
zf.extractall()
|
||||
zf.close()
|
||||
with open('download.recipe', 'rb') as f:
|
||||
with lopen('download.recipe', 'rb') as f:
|
||||
self.recipe_source = f.read()
|
||||
recipe = compile_recipe(self.recipe_source)
|
||||
recipe.needs_subscription = False
|
||||
@ -88,7 +88,7 @@ class RecipeInput(InputFormatPlugin):
|
||||
self.recipe_source = self.recipe_source.encode('utf-8')
|
||||
recipe = compile_recipe(self.recipe_source)
|
||||
elif os.access(recipe_or_file, os.R_OK):
|
||||
with open(recipe_or_file, 'rb') as f:
|
||||
with lopen(recipe_or_file, 'rb') as f:
|
||||
self.recipe_source = f.read()
|
||||
recipe = compile_recipe(self.recipe_source)
|
||||
log('Using custom recipe')
|
||||
|
@ -28,7 +28,7 @@ class RTFOutput(OutputFormatPlugin):
|
||||
close = True
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path) != '':
|
||||
os.makedirs(os.path.dirname(output_path))
|
||||
out_stream = open(output_path, 'wb')
|
||||
out_stream = lopen(output_path, 'wb')
|
||||
else:
|
||||
out_stream = output_path
|
||||
|
||||
|
@ -31,9 +31,9 @@ class TCROutput(OutputFormatPlugin):
|
||||
close = False
|
||||
if not hasattr(output_path, 'write'):
|
||||
close = True
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path) != '':
|
||||
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path):
|
||||
os.makedirs(os.path.dirname(output_path))
|
||||
out_stream = open(output_path, 'wb')
|
||||
out_stream = lopen(output_path, 'wb')
|
||||
else:
|
||||
out_stream = output_path
|
||||
|
||||
|
@ -297,8 +297,8 @@ OptionRecommendation(name='toc_threshold',
|
||||
OptionRecommendation(name='max_toc_links',
|
||||
recommended_value=50, level=OptionRecommendation.LOW,
|
||||
help=_('Maximum number of links to insert into the TOC. Set to 0 '
|
||||
'to disable. Default is: %default. Links are only added to the '
|
||||
'TOC if less than the threshold number of chapters were detected.'
|
||||
'to disable. Default is: %default. Links are only added to the '
|
||||
'TOC if less than the threshold number of chapters were detected.'
|
||||
)
|
||||
),
|
||||
|
||||
@ -537,8 +537,8 @@ OptionRecommendation(name='asciiize',
|
||||
'(characters shared by Chinese and Japanese for instance) the '
|
||||
'representation based on the current calibre interface language will be '
|
||||
'used.')%
|
||||
u'\u041c\u0438\u0445\u0430\u0438\u043b '
|
||||
u'\u0413\u043e\u0440\u0431\u0430\u0447\u0451\u0432'
|
||||
'\u041c\u0438\u0445\u0430\u0438\u043b '
|
||||
'\u0413\u043e\u0440\u0431\u0430\u0447\u0451\u0432'
|
||||
)
|
||||
),
|
||||
|
||||
@ -948,8 +948,8 @@ OptionRecommendation(name='search_replace',
|
||||
if self.opts.read_metadata_from_opf is not None:
|
||||
self.opts.read_metadata_from_opf = os.path.abspath(
|
||||
self.opts.read_metadata_from_opf)
|
||||
opf = OPF(open(self.opts.read_metadata_from_opf, 'rb'),
|
||||
os.path.dirname(self.opts.read_metadata_from_opf))
|
||||
with lopen(self.opts.read_metadata_from_opf, 'rb') as stream:
|
||||
opf = OPF(stream, os.path.dirname(self.opts.read_metadata_from_opf))
|
||||
mi = opf.to_book_metadata()
|
||||
self.opts_to_mi(mi)
|
||||
if mi.cover:
|
||||
@ -958,7 +958,8 @@ OptionRecommendation(name='search_replace',
|
||||
ext = mi.cover.rpartition('.')[-1].lower().strip()
|
||||
if ext not in ('png', 'jpg', 'jpeg', 'gif'):
|
||||
ext = 'jpg'
|
||||
mi.cover_data = (ext, open(mi.cover, 'rb').read())
|
||||
with lopen(mi.cover, 'rb') as stream:
|
||||
mi.cover_data = (ext, stream.read())
|
||||
mi.cover = None
|
||||
self.user_metadata = mi
|
||||
|
||||
@ -1014,7 +1015,7 @@ OptionRecommendation(name='search_replace',
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def dump_oeb(self, oeb, out_dir):
|
||||
@ -1063,7 +1064,7 @@ OptionRecommendation(name='search_replace',
|
||||
self.opts.debug_pipeline = os.path.abspath(self.opts.debug_pipeline)
|
||||
if not os.path.exists(self.opts.debug_pipeline):
|
||||
os.makedirs(self.opts.debug_pipeline)
|
||||
with open(os.path.join(self.opts.debug_pipeline, 'README.txt'), 'wb') as f:
|
||||
with lopen(os.path.join(self.opts.debug_pipeline, 'README.txt'), 'wb') as f:
|
||||
f.write(DEBUG_README)
|
||||
for x in ('input', 'parsed', 'structure', 'processed'):
|
||||
x = os.path.join(self.opts.debug_pipeline, x)
|
||||
@ -1081,7 +1082,7 @@ OptionRecommendation(name='search_replace',
|
||||
|
||||
tdir = PersistentTemporaryDirectory('_plumber')
|
||||
stream = self.input if self.input_fmt == 'recipe' else \
|
||||
open(self.input, 'rb')
|
||||
lopen(self.input, 'rb')
|
||||
if self.input_fmt == 'recipe':
|
||||
self.opts.original_recipe_input_arg = self.original_input_arg
|
||||
|
||||
@ -1175,7 +1176,7 @@ OptionRecommendation(name='search_replace',
|
||||
else:
|
||||
try:
|
||||
fkey = list(map(float, fkey.split(',')))
|
||||
except:
|
||||
except Exception:
|
||||
self.log.error('Invalid font size key: %r ignoring'%fkey)
|
||||
fkey = self.opts.dest.fkey
|
||||
|
||||
|
@ -14,7 +14,7 @@ import time
|
||||
import unicodedata
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from polyglot.builtins import iteritems, unicode_type, zip, as_bytes, map
|
||||
from polyglot.builtins import iteritems, unicode_type, zip, map
|
||||
from io import BytesIO
|
||||
from itertools import count
|
||||
|
||||
@ -1290,7 +1290,7 @@ class EpubContainer(Container):
|
||||
(item.text and item.text.startswith('urn:uuid:')):
|
||||
try:
|
||||
key = item.text.rpartition(':')[-1]
|
||||
key = uuid.UUID(as_bytes(key)).bytes
|
||||
key = uuid.UUID(key).bytes
|
||||
except Exception:
|
||||
self.log.exception('Failed to parse obfuscation key')
|
||||
key = None
|
||||
|
Loading…
x
Reference in New Issue
Block a user