mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
automated translation to f-strings using flynt with --aggressive and patched to not pointlessly call int()
This commit is contained in:
parent
5c95a0ad18
commit
18d57d6298
@ -87,7 +87,7 @@ def build_forms(srcdir, info=None, summary=False, check_for_migration=False, che
|
||||
open(compiled_form, 'wb').write(dat)
|
||||
num += 1
|
||||
if num:
|
||||
info('Compiled %d forms' % num)
|
||||
info(f'Compiled {num} forms')
|
||||
if check_icons:
|
||||
resource_dir = os.path.join(os.path.dirname(srcdir), 'resources')
|
||||
ensure_icons_built(resource_dir, force_compile, info)
|
||||
|
@ -808,7 +808,7 @@ def initialize_plugins(perf=False):
|
||||
sys.stdout, sys.stderr = ostdout, ostderr
|
||||
if perf:
|
||||
for x in sorted(times, key=lambda x: times[x]):
|
||||
print('%50s: %.3f'%(x, times[x]))
|
||||
print(f'{x:50}: {times[x]:.3f}')
|
||||
_initialized_plugins.sort(key=lambda x: x.priority, reverse=True)
|
||||
reread_filetype_plugins()
|
||||
reread_metadata_plugins()
|
||||
|
@ -350,7 +350,7 @@ class CalibrePluginFinder:
|
||||
c = 0
|
||||
while True:
|
||||
c += 1
|
||||
plugin_name = 'dummy%d'%c
|
||||
plugin_name = f'dummy{c}'
|
||||
if plugin_name not in self.loaded_plugins:
|
||||
break
|
||||
else:
|
||||
|
@ -93,7 +93,7 @@ def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, conve
|
||||
'languages'}.union(set(fdata))
|
||||
for x, data in iteritems(fdata):
|
||||
if data['datatype'] == 'series':
|
||||
FIELDS.add('%d_index'%x)
|
||||
FIELDS.add(f'{x}_index')
|
||||
data = []
|
||||
for record in self.data:
|
||||
if record is None:
|
||||
|
@ -614,11 +614,11 @@ class DB:
|
||||
from calibre.library.coloring import migrate_old_rule
|
||||
old_rules = []
|
||||
for i in range(1, 6):
|
||||
col = self.prefs.get('column_color_name_%d' % i, None)
|
||||
templ = self.prefs.get('column_color_template_%d' % i, None)
|
||||
col = self.prefs.get(f'column_color_name_{i}', None)
|
||||
templ = self.prefs.get(f'column_color_template_{i}', None)
|
||||
if col and templ:
|
||||
try:
|
||||
del self.prefs['column_color_name_%d' % i]
|
||||
del self.prefs[f'column_color_name_{i}']
|
||||
rules = migrate_old_rule(self.field_metadata, templ)
|
||||
for templ in rules:
|
||||
old_rules.append((col, templ))
|
||||
@ -1410,7 +1410,7 @@ class DB:
|
||||
with closing(Connection(tmpdb)) as conn:
|
||||
shell = Shell(db=conn, encoding='utf-8')
|
||||
shell.process_command('.read ' + fname.replace(os.sep, '/'))
|
||||
conn.execute('PRAGMA user_version=%d;'%uv)
|
||||
conn.execute(f'PRAGMA user_version={uv};')
|
||||
|
||||
self.close(unload_formatter_functions=False)
|
||||
try:
|
||||
@ -1495,7 +1495,7 @@ class DB:
|
||||
# windows).
|
||||
l = (self.PATH_LIMIT - (extlen // 2) - 2) if iswindows else ((self.PATH_LIMIT - extlen - 2) // 2)
|
||||
if l < 5:
|
||||
raise ValueError('Extension length too long: %d' % extlen)
|
||||
raise ValueError(f'Extension length too long: {extlen}')
|
||||
author = ascii_filename(author)[:l]
|
||||
title = ascii_filename(title.lstrip())[:l].rstrip()
|
||||
if not title:
|
||||
@ -1510,7 +1510,7 @@ class DB:
|
||||
# Database layer API {{{
|
||||
|
||||
def custom_table_names(self, num):
|
||||
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num
|
||||
return f'custom_column_{num}', f'books_custom_column_{num}_link'
|
||||
|
||||
@property
|
||||
def custom_tables(self):
|
||||
@ -1628,7 +1628,7 @@ class DB:
|
||||
def format_hash(self, book_id, fmt, fname, path):
|
||||
path = self.format_abspath(book_id, fmt, fname, path)
|
||||
if path is None:
|
||||
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
|
||||
raise NoSuchFormat(f'Record {book_id} has no fmt: {fmt}')
|
||||
sha = hashlib.sha256()
|
||||
with open(path, 'rb') as f:
|
||||
while True:
|
||||
|
@ -992,7 +992,7 @@ class Cache:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
|
||||
raise NoSuchFormat(f'Record {book_id} has no fmt: {fmt}')
|
||||
return self.backend.format_hash(book_id, fmt, name, path)
|
||||
|
||||
@api
|
||||
@ -1222,7 +1222,7 @@ class Cache:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except (KeyError, AttributeError):
|
||||
raise NoSuchFormat('Record %d has no %s file'%(book_id, fmt))
|
||||
raise NoSuchFormat(f'Record {book_id} has no {fmt} file')
|
||||
|
||||
return self.backend.copy_format_to(book_id, fmt, name, path, dest,
|
||||
use_hardlink=use_hardlink, report_file_size=report_file_size)
|
||||
@ -2374,7 +2374,7 @@ class Cache:
|
||||
removed. '''
|
||||
missing = frozenset(val_map) - self._all_book_ids()
|
||||
if missing:
|
||||
raise ValueError('add_custom_book_data: no such book_ids: %d'%missing)
|
||||
raise ValueError(f'add_custom_book_data: no such book_ids: {missing}')
|
||||
self.backend.add_custom_data(name, val_map, delete_first)
|
||||
|
||||
@read_api
|
||||
|
@ -43,5 +43,5 @@ def main(opts, args, dbctx):
|
||||
prints(pformat(data))
|
||||
print('\n')
|
||||
else:
|
||||
prints(col, '(%d)'%data['num'])
|
||||
prints(col, f"({data['num']})")
|
||||
return 0
|
||||
|
@ -654,7 +654,7 @@ class LibraryDatabase:
|
||||
book_id = index if index_is_id else self.id(index)
|
||||
ans = self.new_api.format_abspath(book_id, fmt)
|
||||
if ans is None:
|
||||
raise NoSuchFormat('Record %d has no format: %s'%(book_id, fmt))
|
||||
raise NoSuchFormat(f'Record {book_id} has no format: {fmt}')
|
||||
return ans
|
||||
|
||||
def format_files(self, index, index_is_id=False):
|
||||
|
@ -23,13 +23,13 @@ class SchemaUpgrade:
|
||||
try:
|
||||
while True:
|
||||
uv = next(self.db.execute('pragma user_version'))[0]
|
||||
meth = getattr(self, 'upgrade_version_%d'%uv, None)
|
||||
meth = getattr(self, f'upgrade_version_{uv}', None)
|
||||
if meth is None:
|
||||
break
|
||||
else:
|
||||
prints('Upgrading database to version %d...'%(uv+1))
|
||||
prints(f'Upgrading database to version {uv + 1}...')
|
||||
meth()
|
||||
self.db.execute('pragma user_version=%d'%(uv+1))
|
||||
self.db.execute(f'pragma user_version={uv + 1}')
|
||||
except:
|
||||
self.db.execute('ROLLBACK')
|
||||
raise
|
||||
|
@ -279,7 +279,7 @@ class FilesystemTest(BaseTest):
|
||||
self.assertFalse(importer.corrupted_files)
|
||||
self.assertEqual(cache.all_book_ids(), ic.all_book_ids())
|
||||
for book_id in cache.all_book_ids():
|
||||
self.assertEqual(cache.cover(book_id), ic.cover(book_id), 'Covers not identical for book: %d' % book_id)
|
||||
self.assertEqual(cache.cover(book_id), ic.cover(book_id), f'Covers not identical for book: {book_id}')
|
||||
for fmt in cache.formats(book_id):
|
||||
self.assertEqual(cache.format(book_id, fmt), ic.format(book_id, fmt))
|
||||
self.assertEqual(cache.format_metadata(book_id, fmt)['mtime'], cache.format_metadata(book_id, fmt)['mtime'])
|
||||
|
@ -544,7 +544,7 @@ class LegacyTest(BaseTest):
|
||||
n = now()
|
||||
ndb = self.init_legacy(self.cloned_library)
|
||||
amap = ndb.new_api.get_id_map('authors')
|
||||
sorts = [(aid, 's%d' % aid) for aid in amap]
|
||||
sorts = [(aid, f's{aid}') for aid in amap]
|
||||
db = self.init_old(self.cloned_library)
|
||||
run_funcs(self, db, ndb, (
|
||||
('+format_metadata', 1, 'FMT1', itemgetter('size')),
|
||||
|
@ -126,8 +126,7 @@ class ReadingTest(BaseTest):
|
||||
if isinstance(val, tuple) and 'authors' not in field and 'languages' not in field:
|
||||
val, expected_val = set(val), set(expected_val)
|
||||
self.assertEqual(expected_val, val,
|
||||
'Book id: %d Field: %s failed: %r != %r'%(
|
||||
book_id, field, expected_val, val))
|
||||
f'Book id: {book_id} Field: {field} failed: {expected_val!r} != {val!r}')
|
||||
# }}}
|
||||
|
||||
def test_sorting(self): # {{{
|
||||
@ -206,7 +205,7 @@ class ReadingTest(BaseTest):
|
||||
('title', True)]), 'Subsort failed')
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
for i in range(7):
|
||||
cache.create_book_entry(Metadata('title%d' % i), apply_import_tags=False)
|
||||
cache.create_book_entry(Metadata(f'title{i}'), apply_import_tags=False)
|
||||
cache.create_custom_column('one', 'CC1', 'int', False)
|
||||
cache.create_custom_column('two', 'CC2', 'int', False)
|
||||
cache.create_custom_column('three', 'CC3', 'int', False)
|
||||
|
@ -27,7 +27,7 @@ class UtilsTest(BaseTest):
|
||||
total = 0
|
||||
for i in range(1, num+1):
|
||||
sz = i * 1000
|
||||
c.insert(i, i, (('%d'%i) * sz).encode('ascii'))
|
||||
c.insert(i, i, (f'{i}' * sz).encode('ascii'))
|
||||
total += sz
|
||||
return total
|
||||
|
||||
@ -44,7 +44,7 @@ class UtilsTest(BaseTest):
|
||||
for i in (3, 4, 2, 5, 1):
|
||||
data, ts = c[i]
|
||||
self.assertEqual(i, ts, 'timestamp not correct')
|
||||
self.assertEqual((('%d'%i) * (i*1000)).encode('ascii'), data)
|
||||
self.assertEqual((f'{i}' * (i*1000)).encode('ascii'), data)
|
||||
c.set_group_id('a')
|
||||
self.basic_fill(c)
|
||||
order = tuple(c.items)
|
||||
|
@ -387,7 +387,7 @@ class WritingTest(BaseTest):
|
||||
for book_id in book_ids:
|
||||
raw = cache.read_backup(book_id)
|
||||
opf = OPF(BytesIO(raw))
|
||||
ae(opf.title, 'title%d'%book_id)
|
||||
ae(opf.title, f'title{book_id}')
|
||||
ae(opf.authors, ['author1', 'author2'])
|
||||
tested_fields = 'title authors tags'.split()
|
||||
before = {f:cache.all_field_for(f, book_ids) for f in tested_fields}
|
||||
@ -439,9 +439,9 @@ class WritingTest(BaseTest):
|
||||
ae(cache.set_cover({bid:img for bid in (1, 2, 3)}), {1, 2, 3})
|
||||
old = self.init_old()
|
||||
for book_id in (1, 2, 3):
|
||||
ae(cache.cover(book_id), img, 'Cover was not set correctly for book %d' % book_id)
|
||||
ae(cache.cover(book_id), img, f'Cover was not set correctly for book {book_id}')
|
||||
ae(cache.field_for('cover', book_id), 1)
|
||||
ae(old.cover(book_id, index_is_id=True), img, 'Cover was not set correctly for book %d' % book_id)
|
||||
ae(old.cover(book_id, index_is_id=True), img, f'Cover was not set correctly for book {book_id}')
|
||||
self.assertTrue(old.has_cover(book_id))
|
||||
old.close()
|
||||
old.break_cycles()
|
||||
@ -771,9 +771,9 @@ class WritingTest(BaseTest):
|
||||
conn.execute('INSERT INTO publishers (name) VALUES ("MŪS")')
|
||||
uid = conn.last_insert_rowid()
|
||||
conn.execute('DELETE FROM books_publishers_link')
|
||||
conn.execute('INSERT INTO books_publishers_link (book,publisher) VALUES (1, %d)' % lid)
|
||||
conn.execute('INSERT INTO books_publishers_link (book,publisher) VALUES (2, %d)' % uid)
|
||||
conn.execute('INSERT INTO books_publishers_link (book,publisher) VALUES (3, %d)' % uid)
|
||||
conn.execute(f'INSERT INTO books_publishers_link (book,publisher) VALUES (1, {lid})')
|
||||
conn.execute(f'INSERT INTO books_publishers_link (book,publisher) VALUES (2, {uid})')
|
||||
conn.execute(f'INSERT INTO books_publishers_link (book,publisher) VALUES (3, {uid})')
|
||||
cache.reload_from_db()
|
||||
t = cache.fields['publisher'].table
|
||||
for x in (lid, uid):
|
||||
|
@ -295,9 +295,7 @@ class ThumbnailCache:
|
||||
self._load_index()
|
||||
self._invalidate_sizes()
|
||||
ts = (f'{timestamp:.2f}').replace('.00', '')
|
||||
path = '%s%s%s%s%d-%s-%d-%dx%d' % (
|
||||
self.group_id, os.sep, book_id % 100, os.sep,
|
||||
book_id, ts, len(data), self.thumbnail_size[0], self.thumbnail_size[1])
|
||||
path = f'{self.group_id}{os.sep}{book_id % 100}{os.sep}{book_id}-{ts}-{len(data)}-{self.thumbnail_size[0]}x{self.thumbnail_size[1]}'
|
||||
path = os.path.join(self.location, path)
|
||||
key = (self.group_id, book_id)
|
||||
e = self.items.pop(key, None)
|
||||
@ -371,7 +369,7 @@ class ThumbnailCache:
|
||||
self._remove((self.group_id, book_id))
|
||||
elif os.path.exists(self.location):
|
||||
try:
|
||||
raw = '\n'.join('%s %d' % (self.group_id, book_id) for book_id in book_ids)
|
||||
raw = '\n'.join(f'{self.group_id} {book_id}' for book_id in book_ids)
|
||||
with open(os.path.join(self.location, 'invalidate'), 'ab') as f:
|
||||
f.write(raw.encode('ascii'))
|
||||
except OSError as err:
|
||||
|
@ -153,7 +153,7 @@ class Bookmark: # {{{
|
||||
marker_found = 0
|
||||
text = ''
|
||||
search_str1 = f'{mi.title}'
|
||||
search_str2 = '- Highlight Loc. %d' % (displayed_location)
|
||||
search_str2 = f'- Highlight Loc. {displayed_location}'
|
||||
for line in f2:
|
||||
if marker_found == 0:
|
||||
if line.startswith(search_str1):
|
||||
|
@ -830,7 +830,7 @@ class KOBO(USBMS):
|
||||
cursor.close()
|
||||
|
||||
def set_readstatus(self, connection, ContentID, ReadStatus):
|
||||
debug_print('Kobo::set_readstatus - ContentID=%s, ReadStatus=%d' % (ContentID, ReadStatus))
|
||||
debug_print(f'Kobo::set_readstatus - ContentID={ContentID}, ReadStatus={ReadStatus}')
|
||||
cursor = connection.cursor()
|
||||
t = (ContentID,)
|
||||
cursor.execute('select DateLastRead, ReadStatus from Content where BookID is Null and ContentID = ?', t)
|
||||
@ -851,7 +851,7 @@ class KOBO(USBMS):
|
||||
t = (ReadStatus, datelastread, ContentID,)
|
||||
|
||||
try:
|
||||
debug_print('Kobo::set_readstatus - Making change - ContentID=%s, ReadStatus=%d, DateLastRead=%s' % (ContentID, ReadStatus, datelastread))
|
||||
debug_print(f'Kobo::set_readstatus - Making change - ContentID={ContentID}, ReadStatus={ReadStatus}, DateLastRead={datelastread}')
|
||||
cursor.execute("update content set ReadStatus=?,FirstTimeReading='false',DateLastRead=? where BookID is Null and ContentID = ?", t)
|
||||
except:
|
||||
debug_print(' Database Exception: Unable to update ReadStatus')
|
||||
@ -1742,8 +1742,7 @@ class KOBOTOUCH(KOBO):
|
||||
if show_debug:
|
||||
debug_print(f"KoboTouch:update_booklist - title='{title}'", f'ContentType={ContentType}', 'isdownloaded=', isdownloaded)
|
||||
debug_print(
|
||||
' prefix=%s, DateCreated=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s'%
|
||||
(prefix, DateCreated, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded,))
|
||||
f' prefix={prefix}, DateCreated={DateCreated}, readstatus={readstatus}, MimeType={MimeType}, expired={expired}, favouritesindex={favouritesindex}, accessibility={accessibility}, isdownloaded={isdownloaded}')
|
||||
changed = False
|
||||
try:
|
||||
lpath = path.partition(self.normalize_path(prefix))[2]
|
||||
@ -1845,7 +1844,7 @@ class KOBOTOUCH(KOBO):
|
||||
if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'):
|
||||
if show_debug:
|
||||
self.debug_index = idx
|
||||
debug_print('KoboTouch:update_booklist - idx=%d'%idx)
|
||||
debug_print(f'KoboTouch:update_booklist - idx={idx}')
|
||||
debug_print(f'KoboTouch:update_booklist - lpath={lpath}')
|
||||
debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections)
|
||||
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
|
||||
@ -2090,7 +2089,7 @@ class KOBOTOUCH(KOBO):
|
||||
# self.report_progress((i) / float(books_on_device), _('Getting list of books on device...'))
|
||||
show_debug = self.is_debugging_title(row['Title'])
|
||||
if show_debug:
|
||||
debug_print('KoboTouch:books - looping on database - row=%d' % i)
|
||||
debug_print(f'KoboTouch:books - looping on database - row={i}')
|
||||
debug_print("KoboTouch:books - title='{}'".format(row['Title']), 'authors=', row['Attribution'])
|
||||
debug_print('KoboTouch:books - row=', row)
|
||||
if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].lower().startswith(
|
||||
@ -2506,7 +2505,7 @@ class KOBOTOUCH(KOBO):
|
||||
if self._card_a_prefix is not None:
|
||||
ContentID = ContentID.replace(self._card_a_prefix, 'file:///mnt/sd/')
|
||||
else: # ContentType = 16
|
||||
debug_print("KoboTouch:contentid_from_path ContentType other than 6 - ContentType='%d'"%ContentType, f"path='{path}'")
|
||||
debug_print(f"KoboTouch:contentid_from_path ContentType other than 6 - ContentType='{ContentType}'", f"path='{path}'")
|
||||
ContentID = path
|
||||
ContentID = ContentID.replace(self._main_prefix, 'file:///mnt/onboard/')
|
||||
if self._card_a_prefix is not None:
|
||||
@ -2720,9 +2719,8 @@ class KOBOTOUCH(KOBO):
|
||||
if not prefs['manage_device_metadata'] == 'manual' and delete_empty_collections:
|
||||
debug_print('KoboTouch:update_device_database_collections - about to clear empty bookshelves')
|
||||
self.delete_empty_bookshelves(connection)
|
||||
debug_print('KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d' % (self.series_set, books_in_library))
|
||||
debug_print('KoboTouch:update_device_database_collections - Number of core metadata set=%d Number of books=%d' % (
|
||||
self.core_metadata_set, books_in_library))
|
||||
debug_print(f'KoboTouch:update_device_database_collections - Number of series set={self.series_set} Number of books={books_in_library}')
|
||||
debug_print(f'KoboTouch:update_device_database_collections - Number of core metadata set={self.core_metadata_set} Number of books={books_in_library}')
|
||||
|
||||
self.dump_bookshelves(connection)
|
||||
|
||||
@ -2916,8 +2914,7 @@ class KOBOTOUCH(KOBO):
|
||||
for ending, cover_options in self.cover_file_endings().items():
|
||||
kobo_size, min_dbversion, max_dbversion, is_full_size = cover_options
|
||||
if show_debug:
|
||||
debug_print('KoboTouch:_upload_cover - library_cover_size=%s -> kobo_size=%s, min_dbversion=%d max_dbversion=%d, is_full_size=%s' % (
|
||||
library_cover_size, kobo_size, min_dbversion, max_dbversion, is_full_size))
|
||||
debug_print(f'KoboTouch:_upload_cover - library_cover_size={library_cover_size} -> kobo_size={kobo_size}, min_dbversion={min_dbversion} max_dbversion={max_dbversion}, is_full_size={is_full_size}')
|
||||
|
||||
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
|
||||
if show_debug:
|
||||
@ -4229,7 +4226,7 @@ class KOBOTOUCH(KOBO):
|
||||
if i == 0:
|
||||
prints('No shelves found!!')
|
||||
else:
|
||||
prints('Number of shelves=%d'%i)
|
||||
prints(f'Number of shelves={i}')
|
||||
|
||||
prints('\nBooks on shelves on device:')
|
||||
cursor.execute(shelfcontent_query)
|
||||
@ -4241,7 +4238,7 @@ class KOBOTOUCH(KOBO):
|
||||
if i == 0:
|
||||
prints('No books are on any shelves!!')
|
||||
else:
|
||||
prints('Number of shelved books=%d'%i)
|
||||
prints(f'Number of shelved books={i}')
|
||||
|
||||
cursor.close()
|
||||
debug_print('KoboTouch:dump_bookshelves - end')
|
||||
|
@ -218,7 +218,7 @@ class MTP_DEVICE(MTPDeviceBase):
|
||||
self.dev = self._filesystem_cache = None
|
||||
|
||||
def format_errorstack(self, errs):
|
||||
return '\n'.join('%d:%s'%(code, as_unicode(msg)) for code, msg in errs)
|
||||
return '\n'.join(f'{code}:{as_unicode(msg)}' for code, msg in errs)
|
||||
|
||||
@synchronous
|
||||
def open(self, connected_device, library_uuid):
|
||||
|
@ -122,7 +122,7 @@ class PALADIN(USBMS):
|
||||
|
||||
try:
|
||||
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
|
||||
debug_print('Device Offset: %d ms'%device_offset)
|
||||
debug_print(f'Device Offset: {device_offset} ms')
|
||||
self.device_offset = device_offset
|
||||
except ValueError:
|
||||
debug_print('No Books To Detect Device Offset.')
|
||||
@ -249,7 +249,7 @@ class PALADIN(USBMS):
|
||||
sequence_max = sequence_min
|
||||
sequence_dirty = 0
|
||||
|
||||
debug_print('Book Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
|
||||
debug_print(f'Book Sequence Min: {sequence_min}, Source Id: {source_id}')
|
||||
|
||||
try:
|
||||
cursor = connection.cursor()
|
||||
@ -283,7 +283,7 @@ class PALADIN(USBMS):
|
||||
|
||||
# If the database is 'dirty', then we should fix up the Ids and the sequence number
|
||||
if sequence_dirty == 1:
|
||||
debug_print('Book Sequence Dirty for Source Id: %d'%source_id)
|
||||
debug_print(f'Book Sequence Dirty for Source Id: {source_id}')
|
||||
sequence_max = sequence_max + 1
|
||||
for book, bookId in db_books.items():
|
||||
if bookId < sequence_min:
|
||||
@ -302,7 +302,7 @@ class PALADIN(USBMS):
|
||||
cursor.execute(query, t)
|
||||
|
||||
self.set_database_sequence_id(connection, 'books', sequence_max)
|
||||
debug_print('Book Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
|
||||
debug_print(f'Book Sequence Max: {sequence_max}, Source Id: {source_id}')
|
||||
|
||||
cursor.close()
|
||||
return db_books
|
||||
@ -355,7 +355,7 @@ class PALADIN(USBMS):
|
||||
book.mime or mime_type_ext(path_to_ext(lpath)))
|
||||
cursor.execute(query, t)
|
||||
book.bookId = connection.last_insert_rowid()
|
||||
debug_print('Inserted New Book: (%u) '%book.bookId + book.title)
|
||||
debug_print(f'Inserted New Book: ({book.bookId}) ' + book.title)
|
||||
else:
|
||||
query = '''
|
||||
UPDATE books
|
||||
@ -386,7 +386,7 @@ class PALADIN(USBMS):
|
||||
sequence_max = sequence_min
|
||||
sequence_dirty = 0
|
||||
|
||||
debug_print('Collection Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
|
||||
debug_print(f'Collection Sequence Min: {sequence_min}, Source Id: {source_id}')
|
||||
|
||||
try:
|
||||
cursor = connection.cursor()
|
||||
@ -415,7 +415,7 @@ class PALADIN(USBMS):
|
||||
|
||||
# If the database is 'dirty', then we should fix up the Ids and the sequence number
|
||||
if sequence_dirty == 1:
|
||||
debug_print('Collection Sequence Dirty for Source Id: %d'%source_id)
|
||||
debug_print(f'Collection Sequence Dirty for Source Id: {source_id}')
|
||||
sequence_max = sequence_max + 1
|
||||
for collection, collectionId in db_collections.items():
|
||||
if collectionId < sequence_min:
|
||||
@ -434,13 +434,13 @@ class PALADIN(USBMS):
|
||||
cursor.execute(query, t)
|
||||
|
||||
self.set_database_sequence_id(connection, 'tags', sequence_max)
|
||||
debug_print('Collection Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
|
||||
debug_print(f'Collection Sequence Max: {sequence_max}, Source Id: {source_id}')
|
||||
|
||||
# Fix up the collections table now...
|
||||
sequence_dirty = 0
|
||||
sequence_max = sequence_min
|
||||
|
||||
debug_print('Collections Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
|
||||
debug_print(f'Collections Sequence Min: {sequence_min}, Source Id: {source_id}')
|
||||
|
||||
query = 'SELECT _id FROM booktags'
|
||||
cursor.execute(query)
|
||||
@ -454,7 +454,7 @@ class PALADIN(USBMS):
|
||||
sequence_max = max(sequence_max, row[0])
|
||||
|
||||
if sequence_dirty == 1:
|
||||
debug_print('Collections Sequence Dirty for Source Id: %d'%source_id)
|
||||
debug_print(f'Collections Sequence Dirty for Source Id: {source_id}')
|
||||
sequence_max = sequence_max + 1
|
||||
for pairId in db_collection_pairs:
|
||||
if pairId < sequence_min:
|
||||
@ -465,7 +465,7 @@ class PALADIN(USBMS):
|
||||
sequence_max = sequence_max + 1
|
||||
|
||||
self.set_database_sequence_id(connection, 'booktags', sequence_max)
|
||||
debug_print('Collections Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
|
||||
debug_print(f'Collections Sequence Max: {sequence_max}, Source Id: {source_id}')
|
||||
|
||||
cursor.close()
|
||||
return db_collections
|
||||
@ -483,7 +483,7 @@ class PALADIN(USBMS):
|
||||
t = (collection,)
|
||||
cursor.execute(query, t)
|
||||
db_collections[collection] = connection.last_insert_rowid()
|
||||
debug_print('Inserted New Collection: (%u) '%db_collections[collection] + collection)
|
||||
debug_print(f'Inserted New Collection: ({db_collections[collection]}) ' + collection)
|
||||
|
||||
# Get existing books in collection
|
||||
query = '''
|
||||
|
@ -434,8 +434,7 @@ class XMLCache:
|
||||
book.lpath, book.thumbnail)
|
||||
self.periodicalize_book(book, ext_record)
|
||||
|
||||
debug_print('Timezone votes: %d GMT, %d LTZ, use_tz_var=%s'%
|
||||
(gtz_count, ltz_count, use_tz_var))
|
||||
debug_print(f'Timezone votes: {gtz_count} GMT, {ltz_count} LTZ, use_tz_var={use_tz_var}')
|
||||
self.update_playlists(i, root, booklist, collections_attributes)
|
||||
# Update the device collections because update playlist could have added
|
||||
# some new ones.
|
||||
|
@ -210,7 +210,7 @@ class PRST1(USBMS):
|
||||
|
||||
try:
|
||||
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
|
||||
debug_print('Device Offset: %d ms'%device_offset)
|
||||
debug_print(f'Device Offset: {device_offset} ms')
|
||||
self.device_offset = device_offset
|
||||
except ValueError:
|
||||
debug_print('No Books To Detect Device Offset.')
|
||||
@ -362,7 +362,7 @@ class PRST1(USBMS):
|
||||
sequence_max = sequence_min
|
||||
sequence_dirty = 0
|
||||
|
||||
debug_print('Book Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
|
||||
debug_print(f'Book Sequence Min: {sequence_min}, Source Id: {source_id}')
|
||||
|
||||
try:
|
||||
cursor = connection.cursor()
|
||||
@ -396,7 +396,7 @@ class PRST1(USBMS):
|
||||
|
||||
# If the database is 'dirty', then we should fix up the Ids and the sequence number
|
||||
if sequence_dirty == 1:
|
||||
debug_print('Book Sequence Dirty for Source Id: %d'%source_id)
|
||||
debug_print(f'Book Sequence Dirty for Source Id: {source_id}')
|
||||
sequence_max = sequence_max + 1
|
||||
for book, bookId in db_books.items():
|
||||
if bookId < sequence_min:
|
||||
@ -433,7 +433,7 @@ class PRST1(USBMS):
|
||||
cursor.execute(query, t)
|
||||
|
||||
self.set_database_sequence_id(connection, 'books', sequence_max)
|
||||
debug_print('Book Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
|
||||
debug_print(f'Book Sequence Max: {sequence_max}, Source Id: {source_id}')
|
||||
|
||||
cursor.close()
|
||||
return db_books
|
||||
@ -495,7 +495,7 @@ class PRST1(USBMS):
|
||||
book.bookId = self.get_lastrowid(cursor)
|
||||
if upload_covers:
|
||||
self.upload_book_cover(connection, book, source_id)
|
||||
debug_print('Inserted New Book: (%u) '%book.bookId + book.title)
|
||||
debug_print(f'Inserted New Book: ({book.bookId}) ' + book.title)
|
||||
else:
|
||||
query = '''
|
||||
UPDATE books
|
||||
@ -534,7 +534,7 @@ class PRST1(USBMS):
|
||||
sequence_max = sequence_min
|
||||
sequence_dirty = 0
|
||||
|
||||
debug_print('Collection Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
|
||||
debug_print(f'Collection Sequence Min: {sequence_min}, Source Id: {source_id}')
|
||||
|
||||
try:
|
||||
cursor = connection.cursor()
|
||||
@ -563,7 +563,7 @@ class PRST1(USBMS):
|
||||
|
||||
# If the database is 'dirty', then we should fix up the Ids and the sequence number
|
||||
if sequence_dirty == 1:
|
||||
debug_print('Collection Sequence Dirty for Source Id: %d'%source_id)
|
||||
debug_print(f'Collection Sequence Dirty for Source Id: {source_id}')
|
||||
sequence_max = sequence_max + 1
|
||||
for collection, collectionId in db_collections.items():
|
||||
if collectionId < sequence_min:
|
||||
@ -582,13 +582,13 @@ class PRST1(USBMS):
|
||||
cursor.execute(query, t)
|
||||
|
||||
self.set_database_sequence_id(connection, 'collection', sequence_max)
|
||||
debug_print('Collection Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
|
||||
debug_print(f'Collection Sequence Max: {sequence_max}, Source Id: {source_id}')
|
||||
|
||||
# Fix up the collections table now...
|
||||
sequence_dirty = 0
|
||||
sequence_max = sequence_min
|
||||
|
||||
debug_print('Collections Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
|
||||
debug_print(f'Collections Sequence Min: {sequence_min}, Source Id: {source_id}')
|
||||
|
||||
query = 'SELECT _id FROM collections'
|
||||
cursor.execute(query)
|
||||
@ -602,7 +602,7 @@ class PRST1(USBMS):
|
||||
sequence_max = max(sequence_max, row[0])
|
||||
|
||||
if sequence_dirty == 1:
|
||||
debug_print('Collections Sequence Dirty for Source Id: %d'%source_id)
|
||||
debug_print(f'Collections Sequence Dirty for Source Id: {source_id}')
|
||||
sequence_max = sequence_max + 1
|
||||
for pairId in db_collection_pairs:
|
||||
if pairId < sequence_min:
|
||||
@ -613,7 +613,7 @@ class PRST1(USBMS):
|
||||
sequence_max = sequence_max + 1
|
||||
|
||||
self.set_database_sequence_id(connection, 'collections', sequence_max)
|
||||
debug_print('Collections Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
|
||||
debug_print(f'Collections Sequence Max: {sequence_max}, Source Id: {source_id}')
|
||||
|
||||
cursor.close()
|
||||
return db_collections
|
||||
@ -631,7 +631,7 @@ class PRST1(USBMS):
|
||||
t = (collection, source_id)
|
||||
cursor.execute(query, t)
|
||||
db_collections[collection] = self.get_lastrowid(cursor)
|
||||
debug_print('Inserted New Collection: (%u) '%db_collections[collection] + collection)
|
||||
debug_print(f'Inserted New Collection: ({db_collections[collection]}) ' + collection)
|
||||
|
||||
# Get existing books in collection
|
||||
query = '''
|
||||
|
@ -219,8 +219,7 @@ def test_for_mem_leak():
|
||||
for i in range(3):
|
||||
gc.collect()
|
||||
usedmem = memory(startmem)
|
||||
prints('Memory used in %d repetitions of scan(): %.5f KB'%(reps,
|
||||
1024*usedmem))
|
||||
prints(f'Memory used in {reps} repetitions of scan(): {1024 * usedmem:.5f} KB')
|
||||
prints('Differences in python object counts:')
|
||||
diff_hists(h1, gc_histogram())
|
||||
prints()
|
||||
|
@ -853,7 +853,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
json_metadata[key]['book'] = self.json_codec.encode_book_metadata(book['book'])
|
||||
json_metadata[key]['last_used'] = book['last_used']
|
||||
result = as_bytes(json.dumps(json_metadata, indent=2, default=to_json))
|
||||
fd.write(('%0.7d\n'%(len(result)+1)).encode('ascii'))
|
||||
fd.write(f'{len(result) + 1:007}\n'.encode('ascii'))
|
||||
fd.write(result)
|
||||
fd.write(b'\n')
|
||||
count += 1
|
||||
@ -1943,7 +1943,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
try:
|
||||
self.listen_socket.listen(1)
|
||||
except:
|
||||
message = 'listen on port %d failed' % port
|
||||
message = f'listen on port {port} failed'
|
||||
self._debug(message)
|
||||
self._close_listen_socket()
|
||||
return message
|
||||
|
@ -28,7 +28,7 @@ def node_mountpoint(node):
|
||||
|
||||
|
||||
def basic_mount_options():
|
||||
return ['rw', 'noexec', 'nosuid', 'nodev', 'uid=%d'%os.geteuid(), 'gid=%d'%os.getegid()]
|
||||
return ['rw', 'noexec', 'nosuid', 'nodev', f'uid={os.geteuid()}', f'gid={os.getegid()}']
|
||||
|
||||
|
||||
class UDisks:
|
||||
|
@ -597,7 +597,7 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
continue
|
||||
mp, ret = mount(card, typ)
|
||||
if mp is None:
|
||||
print('Unable to mount card (Error code: %d)'%ret, file=sys.stderr)
|
||||
print(f'Unable to mount card (Error code: {ret})', file=sys.stderr)
|
||||
else:
|
||||
if not mp.endswith('/'):
|
||||
mp += '/'
|
||||
|
@ -179,10 +179,7 @@ class USB_DEVICE_DESCRIPTOR(Structure):
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return 'USBDevice(class=0x%x sub_class=0x%x protocol=0x%x vendor_id=0x%x product_id=0x%x bcd=0x%x manufacturer=%d product=%d serial_number=%d)' % (
|
||||
self.bDeviceClass, self.bDeviceSubClass, self.bDeviceProtocol,
|
||||
self.idVendor, self.idProduct, self.bcdDevice, self.iManufacturer,
|
||||
self.iProduct, self.iSerialNumber)
|
||||
return f'USBDevice(class=0x{self.bDeviceClass:x} sub_class=0x{self.bDeviceSubClass:x} protocol=0x{self.bDeviceProtocol:x} vendor_id=0x{self.idVendor:x} product_id=0x{self.idProduct:x} bcd=0x{self.bcdDevice:x} manufacturer={self.iManufacturer} product={self.iProduct} serial_number={self.iSerialNumber})'
|
||||
|
||||
|
||||
class USB_ENDPOINT_DESCRIPTOR(Structure):
|
||||
@ -935,7 +932,7 @@ def get_usb_info(usbdev, debug=False): # {{{
|
||||
# randomly after some time of my Kindle being
|
||||
# connected. Disconnecting and reconnecting causes
|
||||
# it to start working again.
|
||||
prints('Failed to read %s from device, with error: [%d] %s' % (name, err.winerror, as_unicode(err)))
|
||||
prints(f'Failed to read {name} from device, with error: [{err.winerror}] {as_unicode(err)}')
|
||||
finally:
|
||||
CloseHandle(handle)
|
||||
return ans
|
||||
|
@ -236,7 +236,7 @@ class PageProcessor(list): # {{{
|
||||
final_fmt = QImage.Format.Format_Indexed8 if uses_256_colors else QImage.Format.Format_Grayscale16
|
||||
if img.format() != final_fmt:
|
||||
img = img.convertToFormat(final_fmt)
|
||||
dest = '%d_%d.%s'%(self.num, i, self.opts.output_format)
|
||||
dest = f'{self.num}_{i}.{self.opts.output_format}'
|
||||
dest = os.path.join(self.dest, dest)
|
||||
with open(dest, 'wb') as f:
|
||||
f.write(image_to_data(img, fmt=self.opts.output_format))
|
||||
|
@ -302,7 +302,7 @@ class ProgressBar:
|
||||
def __call__(self, frac, msg=''):
|
||||
if msg:
|
||||
percent = int(frac*100)
|
||||
self.log('%d%% %s'%(percent, msg))
|
||||
self.log(f'{percent}% {msg}')
|
||||
|
||||
|
||||
def create_option_parser(args, log):
|
||||
|
@ -131,7 +131,7 @@ class CHMInput(InputFormatPlugin):
|
||||
# print('Printing hhcroot')
|
||||
# print(etree.tostring(hhcroot, pretty_print=True))
|
||||
# print('=============================')
|
||||
log.debug('Found %d section nodes' % toc.count())
|
||||
log.debug(f'Found {toc.count()} section nodes')
|
||||
htmlpath = os.path.splitext(hhcpath)[0] + '.html'
|
||||
base = os.path.dirname(os.path.abspath(htmlpath))
|
||||
|
||||
|
@ -175,7 +175,7 @@ class ComicInput(InputFormatPlugin):
|
||||
num_pages_per_comic = []
|
||||
for i, x in enumerate(comics_):
|
||||
title, fname = x
|
||||
cdir = 'comic_%d'%(i+1) if len(comics_) > 1 else '.'
|
||||
cdir = f'comic_{i + 1}' if len(comics_) > 1 else '.'
|
||||
cdir = os.path.abspath(cdir)
|
||||
if not os.path.exists(cdir):
|
||||
os.makedirs(cdir)
|
||||
@ -228,11 +228,11 @@ class ComicInput(InputFormatPlugin):
|
||||
wrapper_page_href = href(wrappers[0])
|
||||
for i in range(num_pages_per_comic[0]):
|
||||
toc.add_item(f'{wrapper_page_href}#page_{i+1}', None,
|
||||
_('Page')+' %d'%(i+1), play_order=i)
|
||||
_('Page')+f' {i + 1}', play_order=i)
|
||||
|
||||
else:
|
||||
for i, x in enumerate(wrappers):
|
||||
toc.add_item(href(x), None, _('Page')+' %d'%(i+1),
|
||||
toc.add_item(href(x), None, _('Page')+f' {i + 1}',
|
||||
play_order=i)
|
||||
else:
|
||||
po = 0
|
||||
@ -246,12 +246,12 @@ class ComicInput(InputFormatPlugin):
|
||||
wrapper_page_href = href(wrappers[0])
|
||||
for i in range(num_pages):
|
||||
stoc.add_item(f'{wrapper_page_href}#page_{i+1}', None,
|
||||
_('Page')+' %d'%(i+1), play_order=po)
|
||||
_('Page')+f' {i + 1}', play_order=po)
|
||||
po += 1
|
||||
else:
|
||||
for i, x in enumerate(wrappers):
|
||||
stoc.add_item(href(x), None,
|
||||
_('Page')+' %d'%(i+1), play_order=po)
|
||||
_('Page')+f' {i + 1}', play_order=po)
|
||||
po += 1
|
||||
opf.set_toc(toc)
|
||||
with open('metadata.opf', 'wb') as m, open('toc.ncx', 'wb') as n:
|
||||
@ -282,7 +282,7 @@ class ComicInput(InputFormatPlugin):
|
||||
dir = os.path.dirname(pages[0])
|
||||
for i, page in enumerate(pages):
|
||||
wrapper = WRAPPER%(XHTML_NS, i+1, os.path.basename(page), i+1)
|
||||
page = os.path.join(dir, 'page_%d.xhtml'%(i+1))
|
||||
page = os.path.join(dir, f'page_{i + 1}.xhtml')
|
||||
with open(page, 'wb') as f:
|
||||
f.write(wrapper.encode('utf-8'))
|
||||
wrappers.append(page)
|
||||
|
@ -41,7 +41,7 @@ class DJVUInput(InputFormatPlugin):
|
||||
c = 0
|
||||
while os.path.exists(htmlfile):
|
||||
c += 1
|
||||
htmlfile = os.path.join(base, 'index%d.html'%c)
|
||||
htmlfile = os.path.join(base, f'index{c}.html')
|
||||
with open(htmlfile, 'wb') as f:
|
||||
f.write(html.encode('utf-8'))
|
||||
odi = options.debug_pipeline
|
||||
|
@ -110,10 +110,10 @@ class FB2Input(InputFormatPlugin):
|
||||
note = notes.get(cite, None)
|
||||
if note:
|
||||
c = 1
|
||||
while 'cite%d' % c in all_ids:
|
||||
while f'cite{c}' in all_ids:
|
||||
c += 1
|
||||
if not note.get('id', None):
|
||||
note.set('id', 'cite%d' % c)
|
||||
note.set('id', f'cite{c}')
|
||||
all_ids.add(note.get('id'))
|
||||
a.set('href', '#{}'.format(note.get('id')))
|
||||
for x in result.xpath('//*[@link_note or @link_cite]'):
|
||||
|
@ -89,7 +89,7 @@ class HTMLZInput(InputFormatPlugin):
|
||||
c = 0
|
||||
while os.path.exists(htmlfile):
|
||||
c += 1
|
||||
htmlfile = 'index%d.html'%c
|
||||
htmlfile = f'index{c}.html'
|
||||
with open(htmlfile, 'wb') as f:
|
||||
f.write(html.encode('utf-8'))
|
||||
odi = options.debug_pipeline
|
||||
|
@ -141,7 +141,7 @@ class RTFInput(InputFormatPlugin):
|
||||
if fmt is None:
|
||||
fmt = 'wmf'
|
||||
count += 1
|
||||
name = '%04d.%s' % (count, fmt)
|
||||
name = f'{count:04}.{fmt}'
|
||||
with open(name, 'wb') as f:
|
||||
f.write(data)
|
||||
imap[count] = name
|
||||
@ -243,7 +243,7 @@ class RTFInput(InputFormatPlugin):
|
||||
if style not in border_styles:
|
||||
border_styles.append(style)
|
||||
idx = border_styles.index(style)
|
||||
cls = 'border_style%d'%idx
|
||||
cls = f'border_style{idx}'
|
||||
style_map[cls] = style
|
||||
elem.set('class', cls)
|
||||
return style_map
|
||||
|
@ -90,7 +90,7 @@ class SNBInput(InputFormatPlugin):
|
||||
for ch in toc.find('.//body'):
|
||||
chapterName = ch.text
|
||||
chapterSrc = ch.get('src')
|
||||
fname = 'ch_%d.htm' % i
|
||||
fname = f'ch_{i}.htm'
|
||||
data = snbFile.GetFileStream('snbc/' + chapterSrc)
|
||||
if data is None:
|
||||
continue
|
||||
|
@ -498,7 +498,7 @@ class HTMLPreProcessor:
|
||||
|
||||
# search / replace using the sr?_search / sr?_replace options
|
||||
for i in range(1, 4):
|
||||
search, replace = 'sr%d_search'%i, 'sr%d_replace'%i
|
||||
search, replace = f'sr{i}_search', f'sr{i}_replace'
|
||||
search_pattern = getattr(self.extra_opts, search, '')
|
||||
replace_txt = getattr(self.extra_opts, replace, '')
|
||||
if search_pattern:
|
||||
@ -559,7 +559,7 @@ class HTMLPreProcessor:
|
||||
name, i = None, 0
|
||||
while not name or os.path.exists(os.path.join(odir, name)):
|
||||
i += 1
|
||||
name = '%04d.html'%i
|
||||
name = f'{i:04}.html'
|
||||
with open(os.path.join(odir, name), 'wb') as f:
|
||||
f.write(raw.encode('utf-8'))
|
||||
|
||||
|
@ -140,7 +140,7 @@ class HeuristicProcessor:
|
||||
name, i = None, 0
|
||||
while not name or os.path.exists(os.path.join(odir, name)):
|
||||
i += 1
|
||||
name = '%04d.html'%i
|
||||
name = f'{i:04}.html'
|
||||
with open(os.path.join(odir, name), 'wb') as f:
|
||||
f.write(raw.encode('utf-8'))
|
||||
|
||||
|
@ -45,8 +45,7 @@ class DjvuChunk:
|
||||
print('found', self.type, self.subtype, pos, self.size)
|
||||
if self.type in b'FORM'.split():
|
||||
if verbose > 0:
|
||||
print('processing substuff %d %d (%x)' % (pos, self.dataend,
|
||||
self.dataend))
|
||||
print(f'processing substuff {pos} {self.dataend} ({self.dataend:x})')
|
||||
numchunks = 0
|
||||
while pos < self.dataend:
|
||||
x = DjvuChunk(buf, pos, start+self.size, verbose=verbose)
|
||||
@ -54,11 +53,10 @@ class DjvuChunk:
|
||||
self._subchunks.append(x)
|
||||
newpos = pos + x.size + x.headersize + (1 if (x.size % 2) else 0)
|
||||
if verbose > 0:
|
||||
print('newpos %d %d (%x, %x) %d' % (newpos, self.dataend,
|
||||
newpos, self.dataend, x.headersize))
|
||||
print(f'newpos {newpos} {self.dataend} ({newpos:x}, {self.dataend:x}) {x.headersize}')
|
||||
pos = newpos
|
||||
if verbose > 0:
|
||||
print(' end of chunk %d (%x)' % (pos, pos))
|
||||
print(f' end of chunk {pos} ({pos:x})')
|
||||
|
||||
def dump(self, verbose=0, indent=1, out=None, txtout=None, maxlevel=100):
|
||||
if out:
|
||||
|
@ -155,7 +155,7 @@ def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath, uuid):
|
||||
|
||||
# Process dir attributes
|
||||
class_map = dict(itervalues(styles.classes))
|
||||
parents = ('p', 'div') + tuple('h%d' % i for i in range(1, 7))
|
||||
parents = ('p', 'div') + tuple(f'h{i}' for i in range(1, 7))
|
||||
for parent in root.xpath('//*[({})]'.format(' or '.join(f'name()="{t}"' for t in parents))):
|
||||
# Ensure that children of rtl parents that are not rtl have an
|
||||
# explicit dir set. Also, remove dir from children if it is the same as
|
||||
|
@ -110,7 +110,7 @@ class Fields:
|
||||
c = 0
|
||||
while self.index_bookmark_prefix in all_ids:
|
||||
c += 1
|
||||
self.index_bookmark_prefix = self.index_bookmark_prefix.replace('-', '%d-' % c)
|
||||
self.index_bookmark_prefix = self.index_bookmark_prefix.replace('-', f'{c}-')
|
||||
stack = []
|
||||
for elem in self.namespace.XPath(
|
||||
'//*[name()="w:p" or name()="w:r" or'
|
||||
@ -209,7 +209,7 @@ class Fields:
|
||||
def WORD(x):
|
||||
return self.namespace.expand('w:' + x)
|
||||
self.index_bookmark_counter += 1
|
||||
bmark = xe['anchor'] = '%s%d' % (self.index_bookmark_prefix, self.index_bookmark_counter)
|
||||
bmark = xe['anchor'] = f'{self.index_bookmark_prefix}{self.index_bookmark_counter}'
|
||||
p = field.start.getparent()
|
||||
bm = p.makeelement(WORD('bookmarkStart'))
|
||||
bm.set(WORD('id'), bmark), bm.set(WORD('name'), bmark)
|
||||
|
@ -48,7 +48,7 @@ class Footnotes:
|
||||
note = notes.get(fid, None)
|
||||
if note is not None and note.type == 'normal':
|
||||
self.counter += 1
|
||||
anchor = 'note_%d' % self.counter
|
||||
anchor = f'note_{self.counter}'
|
||||
self.notes[anchor] = (str(self.counter), note)
|
||||
return anchor, str(self.counter)
|
||||
return None, None
|
||||
|
@ -183,7 +183,7 @@ class Images:
|
||||
name = base
|
||||
while name in exists:
|
||||
n, e = base.rpartition('.')[0::2]
|
||||
name = '%s-%d.%s' % (n, c, e)
|
||||
name = f'{n}-{c}.{e}'
|
||||
c += 1
|
||||
return name
|
||||
|
||||
@ -191,7 +191,7 @@ class Images:
|
||||
resized, img = resize_to_fit(raw, max_width, max_height)
|
||||
if resized:
|
||||
base, ext = os.path.splitext(base)
|
||||
base = base + '-%dx%d%s' % (max_width, max_height, ext)
|
||||
base = base + f'-{max_width}x{max_height}{ext}'
|
||||
raw = image_to_data(img, fmt=ext[1:])
|
||||
return raw, base, resized
|
||||
|
||||
|
@ -93,7 +93,7 @@ def generate_anchor(name, existing):
|
||||
x = y = 'id_' + re.sub(r'[^0-9a-zA-Z_]', '', ascii_text(name)).lstrip('_')
|
||||
c = 1
|
||||
while y in existing:
|
||||
y = '%s_%d' % (x, c)
|
||||
y = f'{x}_{c}'
|
||||
c += 1
|
||||
return y
|
||||
|
||||
|
@ -40,7 +40,7 @@ def alphabet(val, lower=True):
|
||||
alphabet_map = {
|
||||
'lower-alpha':alphabet, 'upper-alpha':partial(alphabet, lower=False),
|
||||
'lower-roman':lambda x: roman(x).lower(), 'upper-roman':roman,
|
||||
'decimal-leading-zero': lambda x: '0%d' % x
|
||||
'decimal-leading-zero': lambda x: f'0{x}'
|
||||
}
|
||||
|
||||
|
||||
@ -73,7 +73,7 @@ class Level:
|
||||
if x > ilvl or x not in counter:
|
||||
return ''
|
||||
val = counter[x] - (0 if x == ilvl else 1)
|
||||
formatter = alphabet_map.get(self.fmt, lambda x: '%d' % x)
|
||||
formatter = alphabet_map.get(self.fmt, lambda x: f'{x}')
|
||||
return formatter(val)
|
||||
return re.sub(r'%(\d+)', sub, template).rstrip() + '\xa0'
|
||||
|
||||
|
@ -427,7 +427,7 @@ class Styles:
|
||||
ans, _ = self.classes.get(h, (None, None))
|
||||
if ans is None:
|
||||
self.counter[prefix] += 1
|
||||
ans = '%s_%d' % (prefix, self.counter[prefix])
|
||||
ans = f'{prefix}_{self.counter[prefix]}'
|
||||
self.classes[h] = (ans, css)
|
||||
return ans
|
||||
|
||||
|
@ -460,9 +460,9 @@ class Table:
|
||||
return (m - (m % n)) // n
|
||||
if c is not None:
|
||||
odd_column_band = (divisor(c, self.table_style.col_band_size) % 2) == 1
|
||||
overrides.append('band%dVert' % (1 if odd_column_band else 2))
|
||||
overrides.append(f'band{1 if odd_column_band else 2}Vert')
|
||||
odd_row_band = (divisor(r, self.table_style.row_band_size) % 2) == 1
|
||||
overrides.append('band%dHorz' % (1 if odd_row_band else 2))
|
||||
overrides.append(f'band{1 if odd_row_band else 2}Horz')
|
||||
|
||||
# According to the OOXML spec columns should have higher override
|
||||
# priority than rows, but Word seems to do it the other way around.
|
||||
|
@ -518,7 +518,7 @@ class Convert:
|
||||
m = re.match(r'heading\s+(\d+)$', style.style_name or '', re.IGNORECASE)
|
||||
if m is not None:
|
||||
n = min(6, max(1, int(m.group(1))))
|
||||
dest.tag = 'h%d' % n
|
||||
dest.tag = f'h{n}'
|
||||
dest.set('data-heading-level', str(n))
|
||||
|
||||
if style.bidi is True:
|
||||
|
@ -30,7 +30,7 @@ def from_headings(body, log, namespace, num_levels=3):
|
||||
def ensure_id(elem):
|
||||
ans = elem.get('id', None)
|
||||
if not ans:
|
||||
ans = 'toc_id_%d' % (next(idcount) + 1)
|
||||
ans = f'toc_id_{next(idcount) + 1}'
|
||||
elem.set('id', ans)
|
||||
return ans
|
||||
|
||||
|
@ -134,7 +134,7 @@ class DocumentRelationships:
|
||||
def add_relationship(self, target, rtype, target_mode=None):
|
||||
ans = self.get_relationship_id(target, rtype, target_mode)
|
||||
if ans is None:
|
||||
ans = 'rId%d' % (len(self.rmap) + 1)
|
||||
ans = f'rId{len(self.rmap) + 1}'
|
||||
self.rmap[(target, rtype, target_mode)] = ans
|
||||
return ans
|
||||
|
||||
|
@ -67,8 +67,8 @@ class FontsManager:
|
||||
item = ef['item']
|
||||
rid = rel_map.get(item)
|
||||
if rid is None:
|
||||
rel_map[item] = rid = 'rId%d' % num
|
||||
fname = 'fonts/font%d.odttf' % num
|
||||
rel_map[item] = rid = f'rId{num}'
|
||||
fname = f'fonts/font{num}.odttf'
|
||||
makeelement(embed_relationships, 'Relationship', Id=rid, Type=self.namespace.names['EMBEDDED_FONT'], Target=fname)
|
||||
font_data_map['word/' + fname] = obfuscate_font_data(item.data, key)
|
||||
makeelement(font, 'w:embed' + tag, r_id=rid,
|
||||
|
@ -92,7 +92,7 @@ class LinksManager:
|
||||
i, bname = 0, name
|
||||
while name in self.used_bookmark_names:
|
||||
i += 1
|
||||
name = bname + ('_%d' % i)
|
||||
name = bname + f'_{i}'
|
||||
self.anchor_map[key] = name
|
||||
self.used_bookmark_names.add(name)
|
||||
return name
|
||||
|
@ -84,7 +84,7 @@ class NumberingDefinition:
|
||||
makeelement = self.namespace.makeelement
|
||||
an = makeelement(parent, 'w:abstractNum', w_abstractNumId=str(self.num_id))
|
||||
makeelement(an, 'w:multiLevelType', w_val='hybridMultilevel')
|
||||
makeelement(an, 'w:name', w_val='List %d' % (self.num_id + 1))
|
||||
makeelement(an, 'w:name', w_val=f'List {self.num_id + 1}')
|
||||
for level in self.levels:
|
||||
level.serialize(an, makeelement)
|
||||
|
||||
|
@ -744,7 +744,7 @@ class StylesManager:
|
||||
if style.outline_level is None:
|
||||
val = f'Para %0{snum}d' % i
|
||||
else:
|
||||
val = 'Heading %d' % (style.outline_level + 1)
|
||||
val = f'Heading {style.outline_level + 1}'
|
||||
heading_styles.append(style)
|
||||
style.id = style.name = val
|
||||
style.seq = i
|
||||
@ -764,7 +764,7 @@ class StylesManager:
|
||||
ds_counts[run.descendant_style] += run.style_weight
|
||||
rnum = len(str(max(1, len(ds_counts) - 1)))
|
||||
for i, (text_style, count) in enumerate(ds_counts.most_common()):
|
||||
text_style.id = 'Text%d' % i
|
||||
text_style.id = f'Text{i}'
|
||||
text_style.name = f'%0{rnum}d Text' % i
|
||||
text_style.seq = i
|
||||
self.descendant_text_styles = sorted(descendant_style_map, key=attrgetter('seq'))
|
||||
|
@ -48,7 +48,7 @@ def add_page_map(opfpath, opts):
|
||||
oeb = OEBBook(opfpath)
|
||||
selector = XPath(opts.page, namespaces=NSMAP)
|
||||
name_for = build_name_for(opts.page_names)
|
||||
idgen = ('calibre-page-%d' % n for n in count(1))
|
||||
idgen = (f'calibre-page-{n}' for n in count(1))
|
||||
for item in oeb.spine:
|
||||
data = item.data
|
||||
for elem in selector(data):
|
||||
|
@ -137,7 +137,7 @@ def sony_metadata(oeb):
|
||||
for i, section in enumerate(toc):
|
||||
if not section.href:
|
||||
continue
|
||||
secid = 'section%d'%i
|
||||
secid = f'section{i}'
|
||||
sectitle = section.title
|
||||
if not sectitle:
|
||||
sectitle = _('Unknown')
|
||||
@ -170,7 +170,7 @@ def sony_metadata(oeb):
|
||||
desc = section.description
|
||||
if not desc:
|
||||
desc = ''
|
||||
aid = 'article%d'%j
|
||||
aid = f'article{j}'
|
||||
|
||||
entries.append(SONY_ATOM_ENTRY.format(
|
||||
title=xml(atitle),
|
||||
|
@ -116,7 +116,7 @@ class FB2MLizer:
|
||||
metadata['title'] = self.oeb_book.metadata.title[0].value
|
||||
metadata['appname'] = __appname__
|
||||
metadata['version'] = __version__
|
||||
metadata['date'] = '%i.%i.%i' % (datetime.now().day, datetime.now().month, datetime.now().year)
|
||||
metadata['date'] = f'{datetime.now().day}.{datetime.now().month}.{datetime.now().year}'
|
||||
if self.oeb_book.metadata.language:
|
||||
lc = lang_as_iso639_1(self.oeb_book.metadata.language[0].value)
|
||||
if not lc:
|
||||
|
@ -153,7 +153,7 @@ class HTMLFile:
|
||||
return hash(self.path)
|
||||
|
||||
def __str__(self):
|
||||
return 'HTMLFile:%d:%s:%r'%(self.level, 'b' if self.is_binary else 'a', self.path)
|
||||
return f"HTMLFile:{self.level}:{'b' if self.is_binary else 'a'}:{self.path!r}"
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
@ -242,7 +242,7 @@ class UnBinary:
|
||||
if flags & FLAG_ATOM:
|
||||
if not self.tag_atoms or tag not in self.tag_atoms:
|
||||
raise LitError(
|
||||
'atom tag %d not in atom tag list' % tag)
|
||||
f'atom tag {tag} not in atom tag list')
|
||||
tag_name = self.tag_atoms[tag]
|
||||
current_map = self.attr_atoms
|
||||
elif tag < len(self.tag_map):
|
||||
@ -257,8 +257,7 @@ class UnBinary:
|
||||
buf.write(encode(tag_name))
|
||||
elif flags & FLAG_CLOSING:
|
||||
if depth == 0:
|
||||
raise LitError('Extra closing tag %s at %d'%(tag_name,
|
||||
self.cpos))
|
||||
raise LitError(f'Extra closing tag {tag_name} at {self.cpos}')
|
||||
break
|
||||
|
||||
elif state == 'get attr':
|
||||
@ -290,7 +289,7 @@ class UnBinary:
|
||||
attr = self.attr_map[oc]
|
||||
if not attr or not isinstance(attr, string_or_bytes):
|
||||
raise LitError(
|
||||
'Unknown attribute %d in tag %s' % (oc, tag_name))
|
||||
f'Unknown attribute {oc} in tag {tag_name}')
|
||||
if attr.startswith('%'):
|
||||
in_censorship = True
|
||||
state = 'get value length'
|
||||
@ -315,7 +314,7 @@ class UnBinary:
|
||||
if oc == 0xffff:
|
||||
continue
|
||||
if count < 0 or count > (len(bin) - self.cpos):
|
||||
raise LitError('Invalid character count %d' % count)
|
||||
raise LitError(f'Invalid character count {count}')
|
||||
|
||||
elif state == 'get value':
|
||||
if count == 0xfffe:
|
||||
@ -342,7 +341,7 @@ class UnBinary:
|
||||
elif state == 'get custom length':
|
||||
count = oc - 1
|
||||
if count <= 0 or count > len(bin)-self.cpos:
|
||||
raise LitError('Invalid character count %d' % count)
|
||||
raise LitError(f'Invalid character count {count}')
|
||||
dynamic_tag += 1
|
||||
state = 'get custom'
|
||||
tag_name = ''
|
||||
@ -357,7 +356,7 @@ class UnBinary:
|
||||
elif state == 'get attr length':
|
||||
count = oc - 1
|
||||
if count <= 0 or count > (len(bin) - self.cpos):
|
||||
raise LitError('Invalid character count %d' % count)
|
||||
raise LitError(f'Invalid character count {count}')
|
||||
buf.write(b' ')
|
||||
state = 'get custom attr'
|
||||
|
||||
@ -371,7 +370,7 @@ class UnBinary:
|
||||
elif state == 'get href length':
|
||||
count = oc - 1
|
||||
if count <= 0 or count > (len(bin) - self.cpos):
|
||||
raise LitError('Invalid character count %d' % count)
|
||||
raise LitError(f'Invalid character count {count}')
|
||||
href = ''
|
||||
state = 'get href'
|
||||
|
||||
@ -397,8 +396,7 @@ class DirectoryEntry:
|
||||
self.size = size
|
||||
|
||||
def __repr__(self):
|
||||
return 'DirectoryEntry(name=%s, section=%d, offset=%d, size=%d)' \
|
||||
% (repr(self.name), self.section, self.offset, self.size)
|
||||
return f'DirectoryEntry(name={repr(self.name)}, section={self.section}, offset={self.offset}, size={self.size})'
|
||||
|
||||
def __str__(self):
|
||||
return repr(self)
|
||||
@ -429,9 +427,7 @@ class ManifestItem:
|
||||
return self.internal == other
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
'ManifestItem(internal=%r, path=%r, mime_type=%r, offset=%d, root=%r, state=%r)'
|
||||
) % (self.internal, self.path, self.mime_type, self.offset, self.root, self.state)
|
||||
return f"ManifestItem(internal={self.internal!r}, path={self.path!r}, mime_type={self.mime_type!r}, offset={self.offset}, root={self.root!r}, state={self.state!r})"
|
||||
|
||||
|
||||
def preserve(function):
|
||||
@ -462,7 +458,7 @@ class LitFile:
|
||||
if self.magic != b'ITOLITLS':
|
||||
raise LitError('Not a valid LIT file')
|
||||
if self.version != 1:
|
||||
raise LitError('Unknown LIT version %d' % (self.version,))
|
||||
raise LitError(f'Unknown LIT version {self.version}')
|
||||
self.read_secondary_header()
|
||||
self.read_header_pieces()
|
||||
self.read_section_names()
|
||||
@ -553,7 +549,7 @@ class LitFile:
|
||||
if blocktype == b'CAOL':
|
||||
if blockver != 2:
|
||||
raise LitError(
|
||||
'Unknown CAOL block format %d' % blockver)
|
||||
f'Unknown CAOL block format {blockver}')
|
||||
self.creator_id = u32(byts[offset+12:])
|
||||
self.entry_chunklen = u32(byts[offset+20:])
|
||||
self.count_chunklen = u32(byts[offset+24:])
|
||||
@ -563,7 +559,7 @@ class LitFile:
|
||||
elif blocktype == b'ITSF':
|
||||
if blockver != 4:
|
||||
raise LitError(
|
||||
'Unknown ITSF block format %d' % blockver)
|
||||
f'Unknown ITSF block format {blockver}')
|
||||
if u32(byts[offset+4+16:]):
|
||||
raise LitError('This file has a 64bit content offset')
|
||||
self.content_offset = u32(byts[offset+16:])
|
||||
|
@ -138,9 +138,9 @@ class TextBlock(etree.XSLTExtension):
|
||||
classes = []
|
||||
bs = node.get('blockstyle')
|
||||
if bs in self.styles.block_style_map:
|
||||
classes.append('bs%d'%self.styles.block_style_map[bs])
|
||||
classes.append(f'bs{self.styles.block_style_map[bs]}')
|
||||
if ts in self.styles.text_style_map:
|
||||
classes.append('ts%d'%self.styles.text_style_map[ts])
|
||||
classes.append(f'ts{self.styles.text_style_map[ts]}')
|
||||
if classes:
|
||||
root.set('class', ' '.join(classes))
|
||||
objid = node.get('objid', None)
|
||||
@ -218,7 +218,7 @@ class TextBlock(etree.XSLTExtension):
|
||||
def process_container(self, child, tgt):
|
||||
idx = self.styles.get_text_styles(child)
|
||||
if idx is not None:
|
||||
tgt.set('class', 'ts%d'%idx)
|
||||
tgt.set('class', f'ts{idx}')
|
||||
self.parent.append(tgt)
|
||||
orig_parent = self.parent
|
||||
self.parent = tgt
|
||||
@ -305,7 +305,7 @@ class Styles(etree.XSLTExtension):
|
||||
for i, s in enumerate(w):
|
||||
if not s:
|
||||
continue
|
||||
rsel = '.%s%d'%(sel, i)
|
||||
rsel = f'.{sel}{i}'
|
||||
s = join(s)
|
||||
f.write(as_bytes(rsel + ' {\n\t' + s + '\n}\n\n'))
|
||||
|
||||
@ -331,8 +331,8 @@ class Styles(etree.XSLTExtension):
|
||||
if a == 255:
|
||||
return None
|
||||
if a == 0:
|
||||
return 'rgb(%d,%d,%d)'%(r,g,b)
|
||||
return 'rgba(%d,%d,%d,%f)'%(r,g,b,1.-a/255.)
|
||||
return f'rgb({r},{g},{b})'
|
||||
return f'rgba({r},{g},{b},{1.0 - a / 255.0:f})'
|
||||
except:
|
||||
return None
|
||||
|
||||
|
@ -116,7 +116,7 @@ class LRFDocument(LRFMetaFile):
|
||||
close = '</Main>\n'
|
||||
pt_id = page_tree.id
|
||||
else:
|
||||
pages += '<PageTree objid="%d">\n'%(page_tree.id,)
|
||||
pages += f'<PageTree objid="{page_tree.id}">\n'
|
||||
close = '</PageTree>\n'
|
||||
for page in page_tree:
|
||||
pages += str(page)
|
||||
|
@ -261,7 +261,7 @@ class Color:
|
||||
return (self.r, self.g, self.b, 0xff-self.a)[i] # In Qt 0xff is opaque while in LRS 0x00 is opaque
|
||||
|
||||
def to_html(self):
|
||||
return 'rgb(%d, %d, %d)'%(self.r, self.g, self.b)
|
||||
return f'rgb({self.r}, {self.g}, {self.b})'
|
||||
|
||||
|
||||
class EmptyPageElement:
|
||||
@ -303,7 +303,7 @@ class Wait(EmptyPageElement):
|
||||
self.time = time
|
||||
|
||||
def __str__(self):
|
||||
return '\n<Wait time="%d" />\n'%(self.time)
|
||||
return f'\n<Wait time="{self.time}" />\n'
|
||||
|
||||
|
||||
class Locate(EmptyPageElement):
|
||||
@ -323,8 +323,7 @@ class BlockSpace(EmptyPageElement):
|
||||
self.xspace, self.yspace = xspace, yspace
|
||||
|
||||
def __str__(self):
|
||||
return '\n<BlockSpace xspace="%d" yspace="%d" />\n'%\
|
||||
(self.xspace, self.yspace)
|
||||
return f'\n<BlockSpace xspace="{self.xspace}" yspace="{self.yspace}" />\n'
|
||||
|
||||
|
||||
class Page(LRFStream):
|
||||
@ -420,7 +419,7 @@ class Page(LRFStream):
|
||||
yield from self.content
|
||||
|
||||
def __str__(self):
|
||||
s = '\n<Page pagestyle="%d" objid="%d">\n'%(self.style_id, self.id)
|
||||
s = f'\n<Page pagestyle="{self.style_id}" objid="{self.id}">\n'
|
||||
for i in self:
|
||||
s += str(i)
|
||||
s += '\n</Page>\n'
|
||||
@ -470,11 +469,11 @@ class BlockAttr(StyleObject, LRFObject):
|
||||
margin = str(obj.sidemargin) + 'px'
|
||||
ans += item('margin-left: {m}; margin-right: {m};'.format(**dict(m=margin)))
|
||||
if hasattr(obj, 'topskip'):
|
||||
ans += item('margin-top: %dpx;'%obj.topskip)
|
||||
ans += item(f'margin-top: {obj.topskip}px;')
|
||||
if hasattr(obj, 'footskip'):
|
||||
ans += item('margin-bottom: %dpx;'%obj.footskip)
|
||||
ans += item(f'margin-bottom: {obj.footskip}px;')
|
||||
if hasattr(obj, 'framewidth'):
|
||||
ans += item('border: solid %dpx'%obj.framewidth)
|
||||
ans += item(f'border: solid {obj.framewidth}px')
|
||||
if hasattr(obj, 'framecolor') and obj.framecolor.a < 255:
|
||||
ans += item(f'border-color: {obj.framecolor.to_html()};')
|
||||
if hasattr(obj, 'bgcolor') and obj.bgcolor.a < 255:
|
||||
@ -602,9 +601,9 @@ class Block(LRFStream, TextCSS):
|
||||
self.attrs[attr] = getattr(self, attr)
|
||||
|
||||
def __str__(self):
|
||||
s = '\n<%s objid="%d" blockstyle="%s" '%(self.name, self.id, getattr(self, 'style_id', ''))
|
||||
s = f"\n<{self.name} objid=\"{self.id}\" blockstyle=\"{getattr(self, 'style_id', '')}\" "
|
||||
if hasattr(self, 'textstyle_id'):
|
||||
s += 'textstyle="%d" '%(self.textstyle_id,)
|
||||
s += f'textstyle="{self.textstyle_id}" '
|
||||
for attr in self.attrs:
|
||||
s += f'{attr}="{self.attrs[attr]}" '
|
||||
if self.name != 'ImageBlock':
|
||||
@ -933,8 +932,7 @@ class Image(LRFObject):
|
||||
data = property(fget=lambda self: self._document.objects[self.refstream].stream)
|
||||
|
||||
def __str__(self):
|
||||
return '<Image objid="%s" x0="%d" y0="%d" x1="%d" y1="%d" xsize="%d" ysize="%d" refstream="%d" />\n'%\
|
||||
(self.id, self.x0, self.y0, self.x1, self.y1, self.xsize, self.ysize, self.refstream)
|
||||
return f'<Image objid="{self.id}" x0="{self.x0}" y0="{self.y0}" x1="{self.x1}" y1="{self.y1}" xsize="{self.xsize}" ysize="{self.ysize}" refstream="{self.refstream}" />\n'
|
||||
|
||||
|
||||
class PutObj(EmptyPageElement):
|
||||
@ -944,7 +942,7 @@ class PutObj(EmptyPageElement):
|
||||
self.object = objects[refobj]
|
||||
|
||||
def __str__(self):
|
||||
return '<PutObj x1="%d" y1="%d" refobj="%d" />'%(self.x1, self.y1, self.refobj)
|
||||
return f'<PutObj x1="{self.x1}" y1="{self.y1}" refobj="{self.refobj}" />'
|
||||
|
||||
|
||||
class Canvas(LRFStream):
|
||||
|
@ -341,7 +341,7 @@ class LrsObject:
|
||||
if labelName is None:
|
||||
labelName = name
|
||||
if labelDecorate:
|
||||
label = '%s.%d' % (labelName, self.objId)
|
||||
label = f'{labelName}.{self.objId}'
|
||||
else:
|
||||
label = str(self.objId)
|
||||
element.attrib[objlabel] = label
|
||||
|
@ -188,7 +188,7 @@ class Tag:
|
||||
self.offset = stream.tell()
|
||||
tag_id = struct.unpack('<BB', stream.read(2))
|
||||
if tag_id[1] != 0xF5:
|
||||
raise LRFParseError('Bad tag ID %02X at %d'%(tag_id[1], self.offset))
|
||||
raise LRFParseError(f'Bad tag ID {tag_id[1]:02X} at {self.offset}')
|
||||
if tag_id[0] not in self.__class__.tags:
|
||||
raise LRFParseError(f'Unknown tag ID: F5{tag_id[0]:02X}')
|
||||
|
||||
|
@ -227,7 +227,7 @@ class MetadataUpdater:
|
||||
# Fetch the existing title
|
||||
title_offset, = unpack('>L', self.record0[0x54:0x58])
|
||||
title_length, = unpack('>L', self.record0[0x58:0x5c])
|
||||
title_in_file, = unpack('%ds' % (title_length), self.record0[title_offset:title_offset + title_length])
|
||||
title_in_file, = unpack(f'{title_length}s', self.record0[title_offset:title_offset + title_length])
|
||||
|
||||
# Adjust length to accommodate PrimaryINDX if necessary
|
||||
mobi_header_length, = unpack('>L', self.record0[0x14:0x18])
|
||||
|
@ -226,7 +226,7 @@ class ManifestItem(Resource): # {{{
|
||||
return self.href()
|
||||
if index == 1:
|
||||
return self.media_type
|
||||
raise IndexError('%d out of bounds.'%index)
|
||||
raise IndexError(f'{index} out of bounds.')
|
||||
|
||||
# }}}
|
||||
|
||||
@ -237,7 +237,7 @@ class Manifest(ResourceCollection): # {{{
|
||||
self.append(ManifestItem.from_opf_manifest_item(item, dir))
|
||||
id = item.get('id', '')
|
||||
if not id:
|
||||
id = 'id%d'%self.next_id
|
||||
id = f'id{self.next_id}'
|
||||
self[-1].id = id
|
||||
self.next_id += 1
|
||||
|
||||
@ -261,7 +261,7 @@ class Manifest(ResourceCollection): # {{{
|
||||
mi = ManifestItem(path, is_path=True)
|
||||
if mt:
|
||||
mi.mime_type = mt
|
||||
mi.id = 'id%d'%m.next_id
|
||||
mi.id = f'id{m.next_id}'
|
||||
m.next_id += 1
|
||||
m.append(mi)
|
||||
return m
|
||||
@ -270,7 +270,7 @@ class Manifest(ResourceCollection): # {{{
|
||||
mi = ManifestItem(path, is_path=True)
|
||||
if mime_type:
|
||||
mi.mime_type = mime_type
|
||||
mi.id = 'id%d'%self.next_id
|
||||
mi.id = f'id{self.next_id}'
|
||||
self.next_id += 1
|
||||
self.append(mi)
|
||||
return mi.id
|
||||
@ -787,7 +787,7 @@ class OPF: # {{{
|
||||
c = 1
|
||||
while manifest_id in ids:
|
||||
c += 1
|
||||
manifest_id = 'id%d'%c
|
||||
manifest_id = f'id{c}'
|
||||
if not media_type:
|
||||
media_type = 'application/xhtml+xml'
|
||||
ans = etree.Element('{{{}}}item'.format(self.NAMESPACES['opf']),
|
||||
@ -801,7 +801,7 @@ class OPF: # {{{
|
||||
def replace_manifest_item(self, item, items):
|
||||
items = [self.create_manifest_item(*i) for i in items]
|
||||
for i, item2 in enumerate(items):
|
||||
item2.set('id', item.get('id')+'.%d'%(i+1))
|
||||
item2.set('id', item.get('id')+f'.{i + 1}')
|
||||
manifest = item.getparent()
|
||||
index = manifest.index(item)
|
||||
manifest[index:index+1] = items
|
||||
|
@ -39,7 +39,7 @@ def read_info(outputdir, get_cover):
|
||||
try:
|
||||
raw = subprocess.check_output([pdfinfo, '-enc', 'UTF-8', '-isodates', 'src.pdf'])
|
||||
except subprocess.CalledProcessError as e:
|
||||
prints('pdfinfo errored out with return code: %d'%e.returncode)
|
||||
prints(f'pdfinfo errored out with return code: {e.returncode}')
|
||||
return None
|
||||
try:
|
||||
info_raw = raw.decode('utf-8')
|
||||
@ -63,7 +63,7 @@ def read_info(outputdir, get_cover):
|
||||
try:
|
||||
raw = subprocess.check_output([pdfinfo, '-meta', 'src.pdf']).strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
prints('pdfinfo failed to read XML metadata with return code: %d'%e.returncode)
|
||||
prints(f'pdfinfo failed to read XML metadata with return code: {e.returncode}')
|
||||
else:
|
||||
parts = re.split(br'^Metadata:', raw, 1, flags=re.MULTILINE)
|
||||
if len(parts) > 1:
|
||||
@ -77,7 +77,7 @@ def read_info(outputdir, get_cover):
|
||||
subprocess.check_call([pdftoppm, '-singlefile', '-jpeg', '-cropbox',
|
||||
'src.pdf', 'cover'])
|
||||
except subprocess.CalledProcessError as e:
|
||||
prints('pdftoppm errored out with return code: %d'%e.returncode)
|
||||
prints(f'pdftoppm errored out with return code: {e.returncode}')
|
||||
|
||||
return ans
|
||||
|
||||
|
@ -263,7 +263,7 @@ class TOC(list):
|
||||
if not text:
|
||||
text = ''
|
||||
c[1] += 1
|
||||
item_id = 'num_%d'%c[1]
|
||||
item_id = f'num_{c[1]}'
|
||||
text = clean_xml_chars(text)
|
||||
elem = E.navPoint(
|
||||
E.navLabel(E.text(re.sub(r'\s+', ' ', text))),
|
||||
|
@ -148,7 +148,7 @@ class MetadataUpdater:
|
||||
for tag in self.topaz_headers:
|
||||
print(f'{tag}: ')
|
||||
num_recs = len(self.topaz_headers[tag]['blocks'])
|
||||
print(' num_recs: %d' % num_recs)
|
||||
print(f' num_recs: {num_recs}')
|
||||
if num_recs:
|
||||
print(' starting offset: 0x{:x}'.format(self.topaz_headers[tag]['blocks'][0]['offset']))
|
||||
|
||||
|
@ -81,7 +81,7 @@ def ensure_unique(template, existing):
|
||||
c = 0
|
||||
while q in existing:
|
||||
c += 1
|
||||
q = '%s-%d%s' % (b, c, e)
|
||||
q = f'{b}-{c}{e}'
|
||||
return q
|
||||
|
||||
|
||||
|
@ -585,7 +585,7 @@ def find_nsmap(elems):
|
||||
ans[pp] = ns
|
||||
else:
|
||||
i += 1
|
||||
ans['ns%d' % i] = ns
|
||||
ans[f'ns{i}'] = ns
|
||||
return ans
|
||||
|
||||
|
||||
|
@ -46,19 +46,19 @@ class ContainerHeader:
|
||||
def __str__(self):
|
||||
ans = [('*'*10) + ' Container Header ' + ('*'*10)]
|
||||
a = ans.append
|
||||
a('Record size: %d' % self.record_size)
|
||||
a('Type: %d' % self.type)
|
||||
a('Total number of records in this container: %d' % self.count)
|
||||
a(f'Record size: {self.record_size}')
|
||||
a(f'Type: {self.type}')
|
||||
a(f'Total number of records in this container: {self.count}')
|
||||
a(f'Encoding: {self.encoding}')
|
||||
a(f'Unknowns1: {self.unknowns1}')
|
||||
a('Num of resource records: %d' % self.num_of_resource_records)
|
||||
a('Num of non-dummy resource records: %d' % self.num_of_non_dummy_resource_records)
|
||||
a('Offset to href record: %d' % self.offset_to_href_record)
|
||||
a(f'Num of resource records: {self.num_of_resource_records}')
|
||||
a(f'Num of non-dummy resource records: {self.num_of_non_dummy_resource_records}')
|
||||
a(f'Offset to href record: {self.offset_to_href_record}')
|
||||
a(f'Unknowns2: {self.unknowns2}')
|
||||
a('Header length: %d' % self.header_length)
|
||||
a(f'Header length: {self.header_length}')
|
||||
a(f'Title Length: {self.title_length}')
|
||||
a(f'hrefs: {self.hrefs}')
|
||||
a('Null bytes after EXTH: %d' % self.null_bytes_after_exth)
|
||||
a(f'Null bytes after EXTH: {self.null_bytes_after_exth}')
|
||||
if len(self.bytes_after_exth) != self.null_bytes_after_exth:
|
||||
a('Non-null bytes present after EXTH header!!!!')
|
||||
return '\n'.join(ans) + '\n\n' + str(self.exth) + '\n\n' + (f'Title: {self.title}')
|
||||
|
@ -116,8 +116,7 @@ class Record: # {{{
|
||||
|
||||
@property
|
||||
def header(self):
|
||||
return 'Offset: %d Flags: %d UID: %d First 4 bytes: %r Size: %d'%(self.offset, self.flags,
|
||||
self.uid, self.raw[:4], len(self.raw))
|
||||
return f'Offset: {self.offset} Flags: {self.flags} UID: {self.uid} First 4 bytes: {self.raw[:4]!r} Size: {len(self.raw)}'
|
||||
# }}}
|
||||
|
||||
|
||||
@ -213,7 +212,7 @@ class EXTHRecord:
|
||||
self.data = binascii.hexlify(self.data)
|
||||
|
||||
def __str__(self):
|
||||
return '%s (%d): %r'%(self.name, self.type, self.data)
|
||||
return f'{self.name} ({self.type}): {self.data!r}'
|
||||
|
||||
|
||||
class EXTHHeader:
|
||||
@ -254,8 +253,8 @@ class EXTHHeader:
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' EXTH Header '+ '*'*20]
|
||||
ans.append('EXTH header length: %d'%self.length)
|
||||
ans.append('Number of EXTH records: %d'%self.count)
|
||||
ans.append(f'EXTH header length: {self.length}')
|
||||
ans.append(f'Number of EXTH records: {self.count}')
|
||||
ans.append('EXTH records...')
|
||||
for r in self.records:
|
||||
ans.append(str(r))
|
||||
@ -416,7 +415,7 @@ class MOBIHeader: # {{{
|
||||
self.last_resource_record = self.exth.kf8_header_index - 2
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' MOBI %d Header '%self.file_version+ '*'*20]
|
||||
ans = ['*'*20 + f' MOBI {self.file_version} Header '+ '*'*20]
|
||||
|
||||
a = ans.append
|
||||
|
||||
@ -427,39 +426,39 @@ class MOBIHeader: # {{{
|
||||
def r(d, attr):
|
||||
x = getattr(self, attr)
|
||||
if attr in self.relative_records and x != NULL_INDEX:
|
||||
a('%s: Absolute: %d Relative: %d'%(d, x, x-self.header_offset))
|
||||
a(f'{d}: Absolute: {x} Relative: {x - self.header_offset}')
|
||||
else:
|
||||
i(d, x)
|
||||
|
||||
a(f'Compression: {self.compression}')
|
||||
a(f'Unused: {self.unused!r}')
|
||||
a('Text length: %d'%self.text_length)
|
||||
a('Number of text records: %d'%self.number_of_text_records)
|
||||
a('Text record size: %d'%self.text_record_size)
|
||||
a(f'Text length: {self.text_length}')
|
||||
a(f'Number of text records: {self.number_of_text_records}')
|
||||
a(f'Text record size: {self.text_record_size}')
|
||||
a(f'Encryption: {self.encryption_type}')
|
||||
a(f'Unknown: {self.unknown!r}')
|
||||
a(f'Identifier: {self.identifier!r}')
|
||||
a('Header length: %d'% self.length)
|
||||
a(f'Header length: {self.length}')
|
||||
a(f'Type: {self.type}')
|
||||
a(f'Encoding: {self.encoding}')
|
||||
a(f'UID: {self.uid!r}')
|
||||
a('File version: %d'%self.file_version)
|
||||
a(f'File version: {self.file_version}')
|
||||
r('Meta Orth Index', 'meta_orth_indx')
|
||||
r('Meta Infl Index', 'meta_infl_indx')
|
||||
r('Secondary index record', 'secondary_index_record')
|
||||
a(f'Reserved: {self.reserved!r}')
|
||||
r('First non-book record', 'first_non_book_record')
|
||||
a('Full name offset: %d'%self.fullname_offset)
|
||||
a('Full name length: %d bytes'%self.fullname_length)
|
||||
a(f'Full name offset: {self.fullname_offset}')
|
||||
a(f'Full name length: {self.fullname_length} bytes')
|
||||
a(f'Langcode: {self.locale_raw!r}')
|
||||
a(f'Language: {self.language}')
|
||||
a(f'Sub language: {self.sublanguage}')
|
||||
a(f'Input language: {self.input_language!r}')
|
||||
a(f'Output language: {self.output_langauage!r}')
|
||||
a('Min version: %d'%self.min_version)
|
||||
a(f'Min version: {self.min_version}')
|
||||
r('First Image index', 'first_image_index')
|
||||
r('Huffman record offset', 'huffman_record_offset')
|
||||
a('Huffman record count: %d'%self.huffman_record_count)
|
||||
a(f'Huffman record count: {self.huffman_record_count}')
|
||||
r('Huffman table offset', 'datp_record_offset')
|
||||
a(f'Huffman table length: {self.datp_record_count!r}')
|
||||
a(f'EXTH flags: {bin(self.exth_flags)[2:]} ({self.has_exth})')
|
||||
@ -472,18 +471,18 @@ class MOBIHeader: # {{{
|
||||
if self.has_extra_data_flags:
|
||||
a(f'Unknown4: {self.unknown4!r}')
|
||||
if hasattr(self, 'first_text_record'):
|
||||
a('First content record: %d'%self.first_text_record)
|
||||
a('Last content record: %d'%self.last_text_record)
|
||||
a(f'First content record: {self.first_text_record}')
|
||||
a(f'Last content record: {self.last_text_record}')
|
||||
else:
|
||||
r('FDST Index', 'fdst_idx')
|
||||
a('FDST Count: %d'% self.fdst_count)
|
||||
a(f'FDST Count: {self.fdst_count}')
|
||||
r('FCIS number', 'fcis_number')
|
||||
a('FCIS count: %d'% self.fcis_count)
|
||||
a(f'FCIS count: {self.fcis_count}')
|
||||
r('FLIS number', 'flis_number')
|
||||
a('FLIS count: %d'% self.flis_count)
|
||||
a(f'FLIS count: {self.flis_count}')
|
||||
a(f'Unknown6: {self.unknown6!r}')
|
||||
r('SRCS record index', 'srcs_record_index')
|
||||
a('Number of SRCS records?: %d'%self.num_srcs_records)
|
||||
a(f'Number of SRCS records?: {self.num_srcs_records}')
|
||||
a(f'Unknown7: {self.unknown7!r}')
|
||||
a(f'Extra data flags: {bin(self.extra_data_flags)} (has multibyte: {self.has_multibytes}) '
|
||||
f'(has indexing: {self.has_indexing_bytes}) (has uncrossable breaks: {self.has_uncrossable_breaks})')
|
||||
@ -502,8 +501,7 @@ class MOBIHeader: # {{{
|
||||
ans += '\n\n' + str(self.exth)
|
||||
ans += f'\n\nBytes after EXTH ({len(self.bytes_after_exth)} bytes): {format_bytes(self.bytes_after_exth)}'
|
||||
|
||||
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset +
|
||||
self.fullname_length))
|
||||
ans += f'\nNumber of bytes after full name: {len(self.raw) - (self.fullname_offset + self.fullname_length)}'
|
||||
|
||||
ans += f'\nRecord 0 length: {len(self.raw)}'
|
||||
return ans
|
||||
@ -599,13 +597,12 @@ class TextRecord: # {{{
|
||||
|
||||
for typ, val in iteritems(self.trailing_data):
|
||||
if isinstance(typ, numbers.Integral):
|
||||
print('Record %d has unknown trailing data of type: %d : %r'%
|
||||
(idx, typ, val))
|
||||
print(f'Record {idx} has unknown trailing data of type: {typ} : {val!r}')
|
||||
|
||||
self.idx = idx
|
||||
|
||||
def dump(self, folder):
|
||||
name = '%06d'%self.idx
|
||||
name = f'{self.idx:06}'
|
||||
with open(os.path.join(folder, name+'.txt'), 'wb') as f:
|
||||
f.write(self.raw)
|
||||
with open(os.path.join(folder, name+'.trailing_data'), 'wb') as f:
|
||||
|
@ -100,14 +100,14 @@ class Index:
|
||||
ans.extend(['', ''])
|
||||
ans += ['*'*10 + f' Index Record Headers ({len(self.index_headers)} records) ' + '*'*10]
|
||||
for i, header in enumerate(self.index_headers):
|
||||
ans += ['*'*10 + ' Index Record %d ' % i + '*'*10]
|
||||
ans += ['*'*10 + f' Index Record {i} ' + '*'*10]
|
||||
for field in INDEX_HEADER_FIELDS:
|
||||
a('%-12s: %r'%(FIELD_NAMES.get(field, field), header[field]))
|
||||
|
||||
if self.cncx:
|
||||
a('*'*10 + ' CNCX ' + '*'*10)
|
||||
for offset, val in iteritems(self.cncx):
|
||||
a('%10s: %s'%(offset, val))
|
||||
a(f'{offset:10}: {val}')
|
||||
ans.extend(['', ''])
|
||||
|
||||
if self.table is not None:
|
||||
|
@ -30,8 +30,7 @@ class TagX: # {{{
|
||||
self.is_eof = (self.eof == 1 and self.tag == 0 and self.num_values == 0 and self.bitmask == 0)
|
||||
|
||||
def __repr__(self):
|
||||
return 'TAGX(tag=%02d, num_values=%d, bitmask=%r, eof=%d)' % (self.tag,
|
||||
self.num_values, bin(self.bitmask), self.eof)
|
||||
return f'TAGX(tag={self.tag:02}, num_values={self.num_values}, bitmask={bin(self.bitmask)!r}, eof={self.eof})'
|
||||
# }}}
|
||||
|
||||
|
||||
@ -55,7 +54,7 @@ class SecondaryIndexHeader: # {{{
|
||||
'cp1252'}.get(self.index_encoding_num, 'unknown')
|
||||
if self.index_encoding == 'unknown':
|
||||
raise ValueError(
|
||||
'Unknown index encoding: %d'%self.index_encoding_num)
|
||||
f'Unknown index encoding: {self.index_encoding_num}')
|
||||
self.unknown2 = raw[32:36]
|
||||
self.num_index_entries, = struct.unpack('>I', raw[36:40])
|
||||
self.ordt_start, = struct.unpack('>I', raw[40:44])
|
||||
@ -102,30 +101,29 @@ class SecondaryIndexHeader: # {{{
|
||||
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
|
||||
len(w), not bool(w.replace(b'\0', b''))))
|
||||
|
||||
a('Header length: %d'%self.header_length)
|
||||
a(f'Header length: {self.header_length}')
|
||||
u(self.unknown1)
|
||||
a('Index Type: %s (%d)'%(self.index_type_desc, self.index_type))
|
||||
a('Offset to IDXT start: %d'%self.idxt_start)
|
||||
a('Number of index records: %d'%self.index_count)
|
||||
a('Index encoding: %s (%d)'%(self.index_encoding,
|
||||
self.index_encoding_num))
|
||||
a(f'Index Type: {self.index_type_desc} ({self.index_type})')
|
||||
a(f'Offset to IDXT start: {self.idxt_start}')
|
||||
a(f'Number of index records: {self.index_count}')
|
||||
a(f'Index encoding: {self.index_encoding} ({self.index_encoding_num})')
|
||||
u(self.unknown2)
|
||||
a('Number of index entries: %d'% self.num_index_entries)
|
||||
a('ORDT start: %d'%self.ordt_start)
|
||||
a('LIGT start: %d'%self.ligt_start)
|
||||
a('Number of LIGT entries: %d'%self.num_of_ligt_entries)
|
||||
a('Number of cncx blocks: %d'%self.num_of_cncx_blocks)
|
||||
a(f'Number of index entries: {self.num_index_entries}')
|
||||
a(f'ORDT start: {self.ordt_start}')
|
||||
a(f'LIGT start: {self.ligt_start}')
|
||||
a(f'Number of LIGT entries: {self.num_of_ligt_entries}')
|
||||
a(f'Number of cncx blocks: {self.num_of_cncx_blocks}')
|
||||
u(self.unknown3)
|
||||
a('TAGX offset: %d'%self.tagx_offset)
|
||||
a(f'TAGX offset: {self.tagx_offset}')
|
||||
u(self.unknown4)
|
||||
a('\n\n')
|
||||
a('*'*20 + ' TAGX Header (%d bytes)'%self.tagx_header_length+ '*'*20)
|
||||
a('Header length: %d'%self.tagx_header_length)
|
||||
a('Control byte count: %d'%self.tagx_control_byte_count)
|
||||
a('*'*20 + f' TAGX Header ({self.tagx_header_length} bytes)'+ '*'*20)
|
||||
a(f'Header length: {self.tagx_header_length}')
|
||||
a(f'Control byte count: {self.tagx_control_byte_count}')
|
||||
for i in self.tagx_entries:
|
||||
a('\t' + repr(i))
|
||||
a(f'Index of last IndexEntry in secondary index record: {self.last_entry}')
|
||||
a('Number of entries in the NCX: %d'% self.ncx_count)
|
||||
a(f'Number of entries in the NCX: {self.ncx_count}')
|
||||
|
||||
return '\n'.join(ans)
|
||||
|
||||
@ -154,7 +152,7 @@ class IndexHeader: # {{{
|
||||
'cp1252'}.get(self.index_encoding_num, 'unknown')
|
||||
if self.index_encoding == 'unknown':
|
||||
raise ValueError(
|
||||
'Unknown index encoding: %d'%self.index_encoding_num)
|
||||
f'Unknown index encoding: {self.index_encoding_num}')
|
||||
self.possibly_language = raw[32:36]
|
||||
self.num_index_entries, = struct.unpack('>I', raw[36:40])
|
||||
self.ordt_start, = struct.unpack('>I', raw[40:44])
|
||||
@ -204,31 +202,30 @@ class IndexHeader: # {{{
|
||||
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
|
||||
len(w), not bool(w.replace(b'\0', b''))))
|
||||
|
||||
a('Header length: %d'%self.header_length)
|
||||
a(f'Header length: {self.header_length}')
|
||||
u(self.unknown1)
|
||||
a('Header type: %d'%self.header_type)
|
||||
a('Index Type: %s (%d)'%(self.index_type_desc, self.index_type))
|
||||
a('Offset to IDXT start: %d'%self.idxt_start)
|
||||
a('Number of index records: %d'%self.index_count)
|
||||
a('Index encoding: %s (%d)'%(self.index_encoding,
|
||||
self.index_encoding_num))
|
||||
a(f'Header type: {self.header_type}')
|
||||
a(f'Index Type: {self.index_type_desc} ({self.index_type})')
|
||||
a(f'Offset to IDXT start: {self.idxt_start}')
|
||||
a(f'Number of index records: {self.index_count}')
|
||||
a(f'Index encoding: {self.index_encoding} ({self.index_encoding_num})')
|
||||
a(f'Unknown (possibly language?): {self.possibly_language!r}')
|
||||
a('Number of index entries: %d'% self.num_index_entries)
|
||||
a('ORDT start: %d'%self.ordt_start)
|
||||
a('LIGT start: %d'%self.ligt_start)
|
||||
a('Number of LIGT entries: %d'%self.num_of_ligt_entries)
|
||||
a('Number of cncx blocks: %d'%self.num_of_cncx_blocks)
|
||||
a(f'Number of index entries: {self.num_index_entries}')
|
||||
a(f'ORDT start: {self.ordt_start}')
|
||||
a(f'LIGT start: {self.ligt_start}')
|
||||
a(f'Number of LIGT entries: {self.num_of_ligt_entries}')
|
||||
a(f'Number of cncx blocks: {self.num_of_cncx_blocks}')
|
||||
u(self.unknown2)
|
||||
a('TAGX offset: %d'%self.tagx_offset)
|
||||
a(f'TAGX offset: {self.tagx_offset}')
|
||||
u(self.unknown3)
|
||||
a('\n\n')
|
||||
a('*'*20 + ' TAGX Header (%d bytes)'%self.tagx_header_length+ '*'*20)
|
||||
a('Header length: %d'%self.tagx_header_length)
|
||||
a('Control byte count: %d'%self.tagx_control_byte_count)
|
||||
a('*'*20 + f' TAGX Header ({self.tagx_header_length} bytes)'+ '*'*20)
|
||||
a(f'Header length: {self.tagx_header_length}')
|
||||
a(f'Control byte count: {self.tagx_control_byte_count}')
|
||||
for i in self.tagx_entries:
|
||||
a('\t' + repr(i))
|
||||
a(f'Index of last IndexEntry in primary index record: {self.last_entry}')
|
||||
a('Number of entries in the NCX: %d'% self.ncx_count)
|
||||
a(f'Number of entries in the NCX: {self.ncx_count}')
|
||||
|
||||
return '\n'.join(ans)
|
||||
# }}}
|
||||
@ -275,7 +272,7 @@ class Tag: # {{{
|
||||
self.attr, self.desc = self.TAG_MAP[tag_type]
|
||||
else:
|
||||
print('Unknown tag value: %s')
|
||||
self.desc = '??Unknown (tag value: %d)'%tag_type
|
||||
self.desc = f'??Unknown (tag value: {tag_type})'
|
||||
self.attr = 'unknown'
|
||||
|
||||
if '_offset' in self.attr:
|
||||
@ -368,8 +365,7 @@ class IndexEntry: # {{{
|
||||
if tag.value is not None:
|
||||
ans.append('\t'+str(tag))
|
||||
if self.first_child_index != -1:
|
||||
ans.append('\tNumber of children: %d'%(self.last_child_index -
|
||||
self.first_child_index + 1))
|
||||
ans.append(f'\tNumber of children: {self.last_child_index - self.first_child_index + 1}')
|
||||
return '\n'.join(ans)
|
||||
|
||||
# }}}
|
||||
@ -458,8 +454,7 @@ class CNCX: # {{{
|
||||
except:
|
||||
byts = raw[pos:]
|
||||
r = format_bytes(byts)
|
||||
print('CNCX entry at offset %d has unknown format %s'%(
|
||||
pos+record_offset, r))
|
||||
print(f'CNCX entry at offset {pos + record_offset} has unknown format {r}')
|
||||
self.records[pos+record_offset] = r
|
||||
pos = len(raw)
|
||||
pos += consumed+length
|
||||
@ -471,7 +466,7 @@ class CNCX: # {{{
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + f' cncx ({len(self.records)} strings) '+ '*'*20]
|
||||
for k, v in iteritems(self.records):
|
||||
ans.append('%10d : %s'%(k, v))
|
||||
ans.append(f'{k:10} : {v}')
|
||||
return '\n'.join(ans)
|
||||
|
||||
# }}}
|
||||
@ -485,7 +480,7 @@ class ImageRecord: # {{{
|
||||
self.idx = idx
|
||||
|
||||
def dump(self, folder):
|
||||
name = '%06d'%self.idx
|
||||
name = f'{self.idx:06}'
|
||||
with open(os.path.join(folder, name+'.'+self.fmt), 'wb') as f:
|
||||
f.write(self.raw)
|
||||
|
||||
@ -497,7 +492,7 @@ class BinaryRecord: # {{{
|
||||
def __init__(self, idx, record):
|
||||
self.raw = record.raw
|
||||
sig = self.raw[:4]
|
||||
name = '%06d'%idx
|
||||
name = f'{idx:06}'
|
||||
if sig in {b'FCIS', b'FLIS', b'SRCS', b'DATP', b'RESC', b'BOUN',
|
||||
b'FDST', b'AUDI', b'VIDE', b'CRES', b'CONT', b'CMET'}:
|
||||
name += '-' + sig.decode('ascii')
|
||||
@ -516,7 +511,7 @@ class FontRecord: # {{{
|
||||
|
||||
def __init__(self, idx, record):
|
||||
self.raw = record.raw
|
||||
name = '%06d'%idx
|
||||
name = f'{idx:06}'
|
||||
self.font = read_font_record(self.raw)
|
||||
if self.font['err']:
|
||||
raise ValueError('Failed to read font record: {} Headers: {}'.format(
|
||||
@ -564,7 +559,7 @@ class TBSIndexing: # {{{
|
||||
for i in self.indices:
|
||||
if i.index in {idx, str(idx)}:
|
||||
return i
|
||||
raise IndexError('Index %d not found'%idx)
|
||||
raise IndexError(f'Index {idx} not found')
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + f' TBS Indexing ({len(self.record_indices)} records) '+ '*'*20]
|
||||
@ -580,13 +575,12 @@ class TBSIndexing: # {{{
|
||||
continue
|
||||
types[tbs_type] += strings
|
||||
for typ, strings in iteritems(types):
|
||||
with open(os.path.join(bdir, 'tbs_type_%d.txt'%typ), 'wb') as f:
|
||||
with open(os.path.join(bdir, f'tbs_type_{typ}.txt'), 'wb') as f:
|
||||
f.write(as_bytes('\n'.join(strings)))
|
||||
|
||||
def dump_record(self, r, dat):
|
||||
ans = []
|
||||
ans.append('\nRecord #%d: Starts at: %d Ends at: %d'%(r.idx,
|
||||
dat['geom'][0], dat['geom'][1]))
|
||||
ans.append(f"\nRecord #{r.idx}: Starts at: {dat['geom'][0]} Ends at: {dat['geom'][1]}")
|
||||
s, e, c = dat['starts'], dat['ends'], dat['complete']
|
||||
ans.append(('\tContains: %d index entries '
|
||||
'(%d ends, %d complete, %d starts)')%tuple(map(len, (s+e+c, e,
|
||||
@ -597,9 +591,7 @@ class TBSIndexing: # {{{
|
||||
if entries:
|
||||
ans.append(f'\t{typ}:')
|
||||
for x in entries:
|
||||
ans.append(('\t\tIndex Entry: %s (Parent index: %s, '
|
||||
'Depth: %d, Offset: %d, Size: %d) [%s]')%(
|
||||
x.index, x.parent_index, x.depth, x.offset, x.size, x.label))
|
||||
ans.append(f"\t\tIndex Entry: {x.index} (Parent index: {x.parent_index}, Depth: {x.depth}, Offset: {x.offset}, Size: {x.size}) [{x.label}]")
|
||||
|
||||
def bin4(num):
|
||||
ans = bin(num)[2:]
|
||||
@ -615,8 +607,8 @@ class TBSIndexing: # {{{
|
||||
byts = byts[consumed:]
|
||||
for k in extra:
|
||||
tbs_type |= k
|
||||
ans.append('\nTBS: %d (%s)'%(tbs_type, bin4(tbs_type)))
|
||||
ans.append('Outermost index: %d'%outermost_index)
|
||||
ans.append(f'\nTBS: {tbs_type} ({bin4(tbs_type)})')
|
||||
ans.append(f'Outermost index: {outermost_index}')
|
||||
ans.append(f'Unknown extra start bytes: {repr_extra(extra)}')
|
||||
if is_periodical: # Hierarchical periodical
|
||||
try:
|
||||
@ -626,7 +618,7 @@ class TBSIndexing: # {{{
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
a = []
|
||||
print('Failed to decode TBS bytes for record: %d'%r.idx)
|
||||
print(f'Failed to decode TBS bytes for record: {r.idx}')
|
||||
ans += a
|
||||
if byts:
|
||||
sbyts = tuple(hex(b)[2:] for b in byts)
|
||||
@ -654,35 +646,25 @@ class TBSIndexing: # {{{
|
||||
raise ValueError('Dont know how to interpret flags'
|
||||
f' {extra!r} while reading section transitions')
|
||||
nsi = self.get_index(psi.index+1)
|
||||
ans.append('Last article in this record of section %d'
|
||||
' (relative to next section index [%d]): '
|
||||
'%d [%d absolute index]'%(psi.index, nsi.index, ai,
|
||||
ai+nsi.index))
|
||||
ans.append(f'Last article in this record of section {psi.index} (relative to next section index [{nsi.index}]): {ai} [{ai + nsi.index} absolute index]')
|
||||
psi = nsi
|
||||
continue
|
||||
|
||||
ans.append('First article in this record of section %d'
|
||||
' (relative to its parent section): '
|
||||
'%d [%d absolute index]'%(psi.index, ai, ai+psi.index))
|
||||
ans.append(f'First article in this record of section {psi.index} (relative to its parent section): {ai} [{ai + psi.index} absolute index]')
|
||||
|
||||
num = extra.get(0b0100, None)
|
||||
if num is None:
|
||||
msg = ('The section %d has at most one article'
|
||||
' in this record')%psi.index
|
||||
msg = f"The section {psi.index} has at most one article in this record"
|
||||
else:
|
||||
msg = ('Number of articles in this record of '
|
||||
'section %d: %d')%(psi.index, num)
|
||||
msg = f"Number of articles in this record of section {psi.index}: {num}"
|
||||
ans.append(msg)
|
||||
|
||||
offset = extra.get(0b0001, None)
|
||||
if offset is not None:
|
||||
if offset == 0:
|
||||
ans.append('This record is spanned by the article:'
|
||||
'%d'%(ai+psi.index))
|
||||
ans.append(f'This record is spanned by the article:{ai + psi.index}')
|
||||
else:
|
||||
ans.append('->Offset to start of next section (%d) from start'
|
||||
' of record: %d [%d absolute offset]'%(psi.index+1,
|
||||
offset, offset+record_offset))
|
||||
ans.append(f'->Offset to start of next section ({psi.index + 1}) from start of record: {offset} [{offset + record_offset} absolute offset]')
|
||||
return byts
|
||||
# }}}
|
||||
|
||||
@ -698,8 +680,7 @@ class TBSIndexing: # {{{
|
||||
f' {si.index}')
|
||||
if 0b0100 in extra:
|
||||
num = extra[0b0100]
|
||||
ans.append('The number of articles from the section %d'
|
||||
' in this record: %s'%(si.index, num))
|
||||
ans.append(f'The number of articles from the section {si.index} in this record: {num}')
|
||||
elif 0b0001 in extra:
|
||||
eof = extra[0b0001]
|
||||
if eof != 0:
|
||||
@ -791,7 +772,7 @@ class MOBIFile: # {{{
|
||||
p()
|
||||
p('Record headers:')
|
||||
for i, r in enumerate(self.records):
|
||||
p('%6d. %s'%(i, r.header))
|
||||
p(f'{i:6}. {r.header}')
|
||||
|
||||
p()
|
||||
p(str(self.mobi_header))
|
||||
|
@ -53,7 +53,7 @@ class FDST:
|
||||
class File:
|
||||
|
||||
def __init__(self, skel, skeleton, text, first_aid, sections):
|
||||
self.name = 'part%04d'%skel.file_number
|
||||
self.name = f'part{skel.file_number:04}'
|
||||
self.skeleton, self.text, self.first_aid = skeleton, text, first_aid
|
||||
self.sections = sections
|
||||
|
||||
@ -66,7 +66,7 @@ class File:
|
||||
with open('skeleton.html', 'wb') as f:
|
||||
f.write(self.skeleton)
|
||||
for i, text in enumerate(self.sections):
|
||||
with open('sect-%04d.html'%i, 'wb') as f:
|
||||
with open(f'sect-{i:04}.html', 'wb') as f:
|
||||
f.write(text)
|
||||
|
||||
|
||||
@ -101,7 +101,7 @@ class MOBIFile:
|
||||
p()
|
||||
p('Record headers:')
|
||||
for i, r in enumerate(self.mf.records):
|
||||
p('%6d. %s'%(i, r.header))
|
||||
p(f'{i:6}. {r.header}')
|
||||
|
||||
p()
|
||||
p(str(self.mf.mobi8_header))
|
||||
@ -151,7 +151,7 @@ class MOBIFile:
|
||||
for i, x in enumerate(boundaries):
|
||||
start, end = x
|
||||
raw = self.raw_text[start:end]
|
||||
with open(os.path.join(ddir, 'flow%04d.txt'%i), 'wb') as f:
|
||||
with open(os.path.join(ddir, f'flow{i:04}.txt'), 'wb') as f:
|
||||
f.write(raw)
|
||||
|
||||
def extract_resources(self, records):
|
||||
@ -221,7 +221,7 @@ class MOBIFile:
|
||||
elif sig in known_types:
|
||||
suffix = '-' + sig.decode('ascii')
|
||||
|
||||
self.resource_map.append(('%s/%06d%s.%s'%(prefix, resource_index, suffix, ext),
|
||||
self.resource_map.append((f'{prefix}/{resource_index:06}{suffix}.{ext}',
|
||||
payload))
|
||||
|
||||
def read_tbs(self):
|
||||
@ -260,9 +260,9 @@ class MOBIFile:
|
||||
for i, strands in enumerate(indexing_data):
|
||||
rec = self.text_records[i]
|
||||
tbs_bytes = rec.trailing_data.get('indexing', b'')
|
||||
desc = ['Record #%d'%i]
|
||||
desc = [f'Record #{i}']
|
||||
for s, strand in enumerate(strands):
|
||||
desc.append('Strand %d'%s)
|
||||
desc.append(f'Strand {s}')
|
||||
for entries in itervalues(strand):
|
||||
for e in entries:
|
||||
desc.append(
|
||||
@ -284,7 +284,7 @@ class MOBIFile:
|
||||
extra = {bin(k):v for k, v in iteritems(extra)}
|
||||
sequences.append((val, extra))
|
||||
for j, seq in enumerate(sequences):
|
||||
desc.append('Sequence #%d: %r %r'%(j, seq[0], seq[1]))
|
||||
desc.append(f'Sequence #{j}: {seq[0]!r} {seq[1]!r}')
|
||||
if tbs_bytes:
|
||||
desc.append(f'Remaining bytes: {format_bytes(tbs_bytes)}')
|
||||
calculated_sequences = encode_strands_as_sequences(strands,
|
||||
@ -294,7 +294,7 @@ class MOBIFile:
|
||||
except:
|
||||
calculated_bytes = b'failed to calculate tbs bytes'
|
||||
if calculated_bytes != otbs:
|
||||
print('WARNING: TBS mismatch for record %d'%i)
|
||||
print(f'WARNING: TBS mismatch for record {i}')
|
||||
desc.append('WARNING: TBS mismatch!')
|
||||
desc.append(f'Calculated sequences: {calculated_sequences!r}')
|
||||
desc.append('')
|
||||
@ -321,7 +321,7 @@ def inspect_mobi(mobi_file, ddir):
|
||||
fo.write(payload)
|
||||
|
||||
for i, container in enumerate(f.containers):
|
||||
with open(os.path.join(ddir, 'container%d.txt' % (i + 1)), 'wb') as cf:
|
||||
with open(os.path.join(ddir, f'container{i + 1}.txt'), 'wb') as cf:
|
||||
cf.write(str(container).encode('utf-8'))
|
||||
|
||||
if f.fdst:
|
||||
|
@ -220,8 +220,7 @@ class BookHeader:
|
||||
}[self.codepage]
|
||||
except (IndexError, KeyError):
|
||||
self.codec = 'cp1252' if not user_encoding else user_encoding
|
||||
log.warn('Unknown codepage %d. Assuming %s' % (self.codepage,
|
||||
self.codec))
|
||||
log.warn(f'Unknown codepage {self.codepage}. Assuming {self.codec}')
|
||||
# Some KF8 files have header length == 264 (generated by kindlegen
|
||||
# 2.9?). See https://bugs.launchpad.net/bugs/1179144
|
||||
max_header_length = 500 # We choose 500 for future versions of kindlegen
|
||||
|
@ -16,7 +16,7 @@ PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values')
|
||||
INDEX_HEADER_FIELDS = (
|
||||
'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
|
||||
'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx'
|
||||
) + tuple('unknown%d'%i for i in range(27)) + ('ocnt', 'oentries',
|
||||
) + tuple(f'unknown{i}' for i in range(27)) + ('ocnt', 'oentries',
|
||||
'ordt1', 'ordt2', 'tagx')
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ def parse_indx_header(data):
|
||||
check_signature(data, b'INDX')
|
||||
words = INDEX_HEADER_FIELDS
|
||||
num = len(words)
|
||||
values = struct.unpack('>%dL' % num, data[4:4*(num+1)])
|
||||
values = struct.unpack(f'>{num}L', data[4:4*(num+1)])
|
||||
ans = dict(zip(words, values))
|
||||
ans['idx_header_end_pos'] = 4 * (num+1)
|
||||
ordt1, ordt2 = ans['ordt1'], ans['ordt2']
|
||||
@ -103,8 +103,7 @@ class CNCX: # {{{
|
||||
except:
|
||||
byts = raw[pos:]
|
||||
r = format_bytes(byts)
|
||||
print('CNCX entry at offset %d has unknown format %s'%(
|
||||
pos+record_offset, r))
|
||||
print(f'CNCX entry at offset {pos + record_offset} has unknown format {r}')
|
||||
self.records[pos+record_offset] = r
|
||||
pos = len(raw)
|
||||
pos += consumed+length
|
||||
|
@ -525,7 +525,7 @@ class MobiReader:
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
attrib['src'] = 'images/' + image_name_map.get(recindex, '%05d.jpg' % recindex)
|
||||
attrib['src'] = 'images/' + image_name_map.get(recindex, f'{recindex:05}.jpg')
|
||||
for attr in ('width', 'height'):
|
||||
if attr in attrib:
|
||||
val = attrib[attr]
|
||||
@ -577,7 +577,7 @@ class MobiReader:
|
||||
ncls = sel
|
||||
break
|
||||
if ncls is None:
|
||||
ncls = 'calibre_%d' % i
|
||||
ncls = f'calibre_{i}'
|
||||
self.tag_css_rules[ncls] = rule
|
||||
cls = attrib.get('class', '')
|
||||
cls = cls + (' ' if cls else '') + ncls
|
||||
@ -658,7 +658,7 @@ class MobiReader:
|
||||
mi = MetaInformation(self.book_header.title, [_('Unknown')])
|
||||
opf = OPFCreator(os.path.dirname(htmlfile), mi)
|
||||
if hasattr(self.book_header.exth, 'cover_offset'):
|
||||
opf.cover = 'images/%05d.jpg' % (self.book_header.exth.cover_offset + 1)
|
||||
opf.cover = f'images/{self.book_header.exth.cover_offset + 1:05}.jpg'
|
||||
elif mi.cover is not None:
|
||||
opf.cover = mi.cover
|
||||
else:
|
||||
@ -920,7 +920,7 @@ class MobiReader:
|
||||
except OSError:
|
||||
self.log.warn(f'Ignoring undecodeable GIF image at index {image_index}')
|
||||
continue
|
||||
path = os.path.join(output_dir, '%05d.%s' % (image_index, imgfmt))
|
||||
path = os.path.join(output_dir, f'{image_index:05}.{imgfmt}')
|
||||
image_name_map[image_index] = os.path.basename(path)
|
||||
if imgfmt == 'png':
|
||||
with open(path, 'wb') as f:
|
||||
|
@ -200,7 +200,7 @@ class Mobi8Reader:
|
||||
self.elems[divptr]
|
||||
if i == 0:
|
||||
aidtext = idtext[12:-2]
|
||||
filename = 'part%04d.html' % filenum
|
||||
filename = f'part{filenum:04}.html'
|
||||
part = text[baseptr:baseptr + length]
|
||||
insertpos = insertpos - skelpos
|
||||
head = skeleton[:insertpos]
|
||||
@ -256,7 +256,7 @@ class Mobi8Reader:
|
||||
image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE)
|
||||
for j in range(1, len(self.flows)):
|
||||
flowpart = self.flows[j]
|
||||
nstr = '%04d' % j
|
||||
nstr = f'{j:04}'
|
||||
m = svg_tag_pattern.search(flowpart)
|
||||
if m is not None:
|
||||
# svg
|
||||
@ -320,7 +320,7 @@ class Mobi8Reader:
|
||||
# pos
|
||||
fi = self.get_file_info(pos)
|
||||
if fi.num is None and fi.start is None:
|
||||
raise ValueError('No file contains pos: %d'%pos)
|
||||
raise ValueError(f'No file contains pos: {pos}')
|
||||
textblock = self.parts[fi.num]
|
||||
npos = pos - fi.start
|
||||
pgt = textblock.find(b'>', npos)
|
||||
@ -391,7 +391,7 @@ class Mobi8Reader:
|
||||
pos = entry['pos']
|
||||
fi = self.get_file_info(pos)
|
||||
if fi.filename is None:
|
||||
raise ValueError('Index entry has invalid pos: %d'%pos)
|
||||
raise ValueError(f'Index entry has invalid pos: {pos}')
|
||||
idtag = self.get_id_tag(pos)
|
||||
href = f'{fi.type}/{fi.filename}'
|
||||
else:
|
||||
@ -429,10 +429,9 @@ class Mobi8Reader:
|
||||
pass # Ignore these records
|
||||
elif typ == b'FONT':
|
||||
font = read_font_record(data)
|
||||
href = 'fonts/%05d.%s' % (fname_idx, font['ext'])
|
||||
href = f"fonts/{fname_idx:05}.{font['ext']}"
|
||||
if font['err']:
|
||||
self.log.warn('Reading font record %d failed: %s'%(
|
||||
fname_idx, font['err']))
|
||||
self.log.warn(f"Reading font record {fname_idx} failed: {font['err']}")
|
||||
if font['headers']:
|
||||
self.log.debug('Font record headers: {}'.format(font['headers']))
|
||||
with open(href.replace('/', os.sep), 'wb') as f:
|
||||
@ -448,7 +447,7 @@ class Mobi8Reader:
|
||||
elif typ == b'CRES':
|
||||
data, imgtype = container.load_image(data)
|
||||
if data is not None:
|
||||
href = 'images/%05d.%s'%(container.resource_index, imgtype)
|
||||
href = f'images/{container.resource_index:05}.{imgtype}'
|
||||
with open(href.replace('/', os.sep), 'wb') as f:
|
||||
f.write(data)
|
||||
elif typ == b'\xa0\xa0\xa0\xa0' and len(data) == 4 and container is not None:
|
||||
@ -456,7 +455,7 @@ class Mobi8Reader:
|
||||
elif container is None:
|
||||
if not (len(data) == len(PLACEHOLDER_GIF) and data == PLACEHOLDER_GIF):
|
||||
imgtype = find_imgtype(data)
|
||||
href = 'images/%05d.%s'%(fname_idx, imgtype)
|
||||
href = f'images/{fname_idx:05}.{imgtype}'
|
||||
with open(href.replace('/', os.sep), 'wb') as f:
|
||||
f.write(data)
|
||||
|
||||
|
@ -156,8 +156,7 @@ def test_decint(num):
|
||||
raw = encint(num, forward=d)
|
||||
sz = len(raw)
|
||||
if (num, sz) != decint(raw, forward=d):
|
||||
raise ValueError('Failed for num %d, forward=%r: %r != %r' % (
|
||||
num, d, (num, sz), decint(raw, forward=d)))
|
||||
raise ValueError(f'Failed for num {num}, forward={d!r}: {num, sz!r} != {decint(raw, forward=d)!r}')
|
||||
|
||||
|
||||
def rescale_image(data, maxsizeb=IMAGE_MAX_SIZE, dimen=None):
|
||||
|
@ -390,4 +390,4 @@ class Serializer:
|
||||
self.start_offset = ioff
|
||||
for hoff in hoffs:
|
||||
buf.seek(hoff)
|
||||
buf.write(('%010d' % ioff).encode('utf-8'))
|
||||
buf.write(f'{ioff:010}'.encode('utf-8'))
|
||||
|
@ -267,7 +267,7 @@ class ChunkIndex(Index):
|
||||
self.cncx = CNCX(c.selector for c in chunk_table)
|
||||
|
||||
self.entries = [
|
||||
('%010d'%c.insert_pos, {
|
||||
(f'{c.insert_pos:010}', {
|
||||
|
||||
'cncx_offset':self.cncx[c.selector],
|
||||
'file_number':c.file_number,
|
||||
@ -378,7 +378,7 @@ if __name__ == '__main__':
|
||||
import os
|
||||
import subprocess
|
||||
os.chdir('/t')
|
||||
paras = ['<p>%d</p>' % i for i in range(4000)]
|
||||
paras = [f'<p>{i}</p>' for i in range(4000)]
|
||||
raw = '<html><body>' + '\n\n'.join(paras) + '</body></html>'
|
||||
|
||||
src = 'index.html'
|
||||
|
@ -302,7 +302,7 @@ class KF8Writer:
|
||||
# https://bugs.launchpad.net/bugs/1489495
|
||||
if id_:
|
||||
cid += 1
|
||||
val = 'c%d' % cid
|
||||
val = f'c{cid}'
|
||||
self.id_map[(item.href, id_)] = val
|
||||
tag.set('cid', val)
|
||||
else:
|
||||
|
@ -341,7 +341,7 @@ class Chunker:
|
||||
for s in self.skeletons:
|
||||
s.start_pos = sp
|
||||
sp += len(s)
|
||||
self.skel_table = [Skel(s.file_number, 'SKEL%010d'%s.file_number,
|
||||
self.skel_table = [Skel(s.file_number, f'SKEL{s.file_number:010}',
|
||||
len(s.chunks), s.start_pos, len(s.skeleton)) for s in self.skeletons]
|
||||
|
||||
Chunk = namedtuple('Chunk',
|
||||
@ -426,13 +426,13 @@ class Chunker:
|
||||
error = False
|
||||
for i, skeleton in enumerate(self.skeletons):
|
||||
for j, chunk in enumerate(skeleton.chunks):
|
||||
with open(os.path.join(chunks, 'file-%d-chunk-%d.html'%(i, j)),
|
||||
with open(os.path.join(chunks, f'file-{i}-chunk-{j}.html'),
|
||||
'wb') as f:
|
||||
f.write(chunk.raw)
|
||||
oraw, rraw = orig_dumps[i], skeleton.rebuild()
|
||||
with open(os.path.join(orig, '%04d.html'%i), 'wb') as f:
|
||||
with open(os.path.join(orig, f'{i:04}.html'), 'wb') as f:
|
||||
f.write(oraw)
|
||||
with open(os.path.join(rebuilt, '%04d.html'%i), 'wb') as f:
|
||||
with open(os.path.join(rebuilt, f'{i:04}.html'), 'wb') as f:
|
||||
f.write(rraw)
|
||||
if oraw != rraw:
|
||||
error = True
|
||||
|
@ -200,7 +200,7 @@ class Extract(ODF2XHTML):
|
||||
# Replace all the class selectors with a single class selector
|
||||
# This will be added to the class attribute of all elements
|
||||
# that have one of these selectors.
|
||||
replace_name = 'c_odt%d'%count
|
||||
replace_name = f'c_odt{count}'
|
||||
count += 1
|
||||
for sel in r.selectorList:
|
||||
s = sel.selectorText[1:]
|
||||
|
@ -57,7 +57,7 @@ class BookmarksMixin:
|
||||
dat = []
|
||||
for bm in bookmarks:
|
||||
if bm['type'] == 'legacy':
|
||||
rec = '%s^%d#%s'%(bm['title'], bm['spine'], bm['pos'])
|
||||
rec = f"{bm['title']}^{bm['spine']}#{bm['pos']}"
|
||||
else:
|
||||
pos = bm['pos']
|
||||
if isinstance(pos, numbers.Number):
|
||||
|
@ -103,8 +103,7 @@ def html5_parse(data, max_nesting_depth=100):
|
||||
if isinstance(x.tag, string_or_bytes) and not len(x): # Leaf node
|
||||
depth = node_depth(x)
|
||||
if depth > max_nesting_depth:
|
||||
raise ValueError('HTML 5 parsing resulted in a tree with nesting'
|
||||
' depth > %d'%max_nesting_depth)
|
||||
raise ValueError(f'HTML 5 parsing resulted in a tree with nesting depth > {max_nesting_depth}')
|
||||
return data
|
||||
|
||||
|
||||
|
@ -231,7 +231,7 @@ class MimetypeMismatch(BaseError):
|
||||
c = 0
|
||||
while container.has_name(new_name):
|
||||
c += 1
|
||||
new_name = self.file_name.rpartition('.')[0] + ('%d.' % c) + self.change_ext_to
|
||||
new_name = self.file_name.rpartition('.')[0] + f'{c}.' + self.change_ext_to
|
||||
rename_files(container, {self.file_name:new_name})
|
||||
changed = True
|
||||
else:
|
||||
|
@ -146,7 +146,7 @@ class EscapedName(BaseError):
|
||||
c = 0
|
||||
while self.sname in all_names:
|
||||
c += 1
|
||||
self.sname = '%s_%d.%s' % (bn, c, ext)
|
||||
self.sname = f'{bn}_{c}.{ext}'
|
||||
rename_files(container, {self.name:self.sname})
|
||||
return True
|
||||
|
||||
|
@ -344,7 +344,7 @@ class Container(ContainerBase): # {{{
|
||||
item_id = 'id'
|
||||
while item_id in all_ids:
|
||||
c += 1
|
||||
item_id = 'id' + '%d'%c
|
||||
item_id = 'id' + f'{c}'
|
||||
manifest = self.opf_xpath('//opf:manifest')[0]
|
||||
href = self.name_to_href(name, self.opf_name)
|
||||
item = manifest.makeelement(OPF('item'),
|
||||
@ -369,7 +369,7 @@ class Container(ContainerBase): # {{{
|
||||
base, ext = name.rpartition('.')[::2]
|
||||
if c > 1:
|
||||
base = base.rpartition('-')[0]
|
||||
name = '%s-%d.%s' % (base, c, ext)
|
||||
name = f'{base}-{c}.{ext}'
|
||||
return name
|
||||
|
||||
def add_file(self, name, data, media_type=None, spine_index=None, modify_name_if_needed=False, process_manifest_item=None):
|
||||
|
@ -382,7 +382,7 @@ def create_epub_cover(container, cover_path, existing_image, options=None):
|
||||
container.log.exception('Failed to get width and height of cover')
|
||||
ar = 'xMidYMid meet' if keep_aspect else 'none'
|
||||
templ = CoverManager.SVG_TEMPLATE.replace('__ar__', ar)
|
||||
templ = templ.replace('__viewbox__', '0 0 %d %d'%(width, height))
|
||||
templ = templ.replace('__viewbox__', f'0 0 {width} {height}')
|
||||
templ = templ.replace('__width__', str(width))
|
||||
templ = templ.replace('__height__', str(height))
|
||||
folder = recommended_folders[tname]
|
||||
|
@ -98,7 +98,7 @@ def compress_images(container, report=None, names=None, jpeg_quality=None, webp_
|
||||
if not keep_going:
|
||||
abort.set()
|
||||
progress_callback(0, num_to_process, '')
|
||||
[Worker(abort, 'CompressImage%d' % i, queue, results, jpeg_quality, webp_quality, pc) for i in range(min(detect_ncpus(), num_to_process))]
|
||||
[Worker(abort, f'CompressImage{i}', queue, results, jpeg_quality, webp_quality, pc) for i in range(min(detect_ncpus(), num_to_process))]
|
||||
queue.join()
|
||||
before_total = after_total = 0
|
||||
processed_num = 0
|
||||
|
@ -218,7 +218,7 @@ def replace_file(container, name, path, basename, force_mt=None):
|
||||
b, e = nname.rpartition('.')[0::2]
|
||||
while container.exists(nname):
|
||||
count += 1
|
||||
nname = b + ('_%d.%s' % (count, e))
|
||||
nname = b + f'_{count}.{e}'
|
||||
rename_files(container, {name:nname})
|
||||
mt = force_mt or container.guess_type(nname)
|
||||
container.mime_map[nname] = mt
|
||||
@ -308,7 +308,7 @@ def rationalize_folders(container, folder_type_map):
|
||||
while new_name in all_names or new_name in new_names:
|
||||
c += 1
|
||||
n, ext = bn.rpartition('.')[0::2]
|
||||
new_name = posixpath.join(folder, '%s_%d.%s' % (n, c, ext))
|
||||
new_name = posixpath.join(folder, f'{n}_{c}.{ext}')
|
||||
name_map[name] = new_name
|
||||
new_names.add(new_name)
|
||||
return name_map
|
||||
|
@ -215,7 +215,7 @@ def split(container, name, loc_or_xpath, before=True, totals=None):
|
||||
nname, s = None, 0
|
||||
while not nname or container.exists(nname):
|
||||
s += 1
|
||||
nname = '%s_split%d.%s' % (base, s, ext)
|
||||
nname = f'{base}_split{s}.{ext}'
|
||||
manifest_item = container.generate_item(nname, media_type=container.mime_map[name])
|
||||
bottom_name = container.href_to_name(manifest_item.get('href'), container.opf_name)
|
||||
|
||||
@ -287,7 +287,7 @@ def multisplit(container, name, xpath, before=True):
|
||||
current = name
|
||||
all_names = [name]
|
||||
for i in range(len(nodes)):
|
||||
current = split(container, current, '//*[@calibre-split-point="%d"]' % i, before=before)
|
||||
current = split(container, current, f'//*[@calibre-split-point="{i}"]', before=before)
|
||||
all_names.append(current)
|
||||
|
||||
for x in all_names:
|
||||
@ -345,7 +345,7 @@ def unique_anchor(seen_anchors, current):
|
||||
ans = current
|
||||
while ans in seen_anchors:
|
||||
c += 1
|
||||
ans = '%s_%d' % (current, c)
|
||||
ans = f'{current}_{c}'
|
||||
return ans
|
||||
|
||||
|
||||
|
@ -51,7 +51,7 @@ def create_epub(manifest, spine=(), guide=(), meta_cover=None, ver=3):
|
||||
spine = [x[0] for x in manifest if guess_type(x[0]) in OEB_DOCS]
|
||||
spine = ''.join(f'<itemref idref="{name}"/>' for name in spine)
|
||||
guide = ''.join(f'<reference href="{name}" type="{typ}" title="{title}"/>' for name, typ, title in guide)
|
||||
opf = OPF_TEMPLATE.format(manifest=mo, ver='%d.0'%ver, metadata=metadata, spine=spine, guide=guide)
|
||||
opf = OPF_TEMPLATE.format(manifest=mo, ver=f'{ver}.0', metadata=metadata, spine=spine, guide=guide)
|
||||
buf = BytesIO()
|
||||
with ZipFile(buf, 'w', ZIP_STORED) as zf:
|
||||
zf.writestr('META-INF/container.xml', b'''
|
||||
@ -79,7 +79,7 @@ class Structure(BaseTest):
|
||||
ep = os.path.join(self.tdir, str(n) + 'book.epub')
|
||||
with open(ep, 'wb') as f:
|
||||
f.write(create_epub(*args, **kw).getvalue())
|
||||
c = get_container(ep, tdir=os.path.join(self.tdir, 'container%d' % n), tweak_mode=True)
|
||||
c = get_container(ep, tdir=os.path.join(self.tdir, f'container{n}'), tweak_mode=True)
|
||||
return c
|
||||
|
||||
def test_toc_detection(self):
|
||||
|
@ -622,7 +622,7 @@ def create_ncx(toc, to_href, btitle, lang, uid):
|
||||
def process_node(xml_parent, toc_parent):
|
||||
for child in toc_parent:
|
||||
play_order['c'] += 1
|
||||
point = etree.SubElement(xml_parent, NCX('navPoint'), id='num_%d' % play_order['c'],
|
||||
point = etree.SubElement(xml_parent, NCX('navPoint'), id=f"num_{play_order['c']}",
|
||||
playOrder=str(play_order['c']))
|
||||
label = etree.SubElement(point, NCX('navLabel'))
|
||||
title = child.title
|
||||
@ -853,7 +853,7 @@ def toc_to_html(toc, container, toc_name, title, lang=None):
|
||||
li.append(a)
|
||||
if len(toc) > 0:
|
||||
parent = li.makeelement(XHTML('ul'))
|
||||
parent.set('class', 'level%d' % (style_level))
|
||||
parent.set('class', f'level{style_level}')
|
||||
li.append(parent)
|
||||
a.tail = '\n\n' + (indent*(level+2))
|
||||
parent.text = '\n'+(indent*(level+3))
|
||||
@ -909,7 +909,7 @@ def create_inline_toc(container, title=None):
|
||||
name, c = 'toc.xhtml', 0
|
||||
while container.has_name(name):
|
||||
c += 1
|
||||
name = 'toc%d.xhtml' % c
|
||||
name = f'toc{c}.xhtml'
|
||||
container.add_file(name, raw, spine_index=0)
|
||||
else:
|
||||
with container.open(name, 'wb') as f:
|
||||
|
@ -142,7 +142,7 @@ class CoverManager:
|
||||
# if self.preserve_aspect_ratio:
|
||||
# width, height = 600, 800
|
||||
self.svg_template = self.svg_template.replace('__viewbox__',
|
||||
'0 0 %d %d'%(width, height))
|
||||
f'0 0 {width} {height}')
|
||||
self.svg_template = self.svg_template.replace('__width__',
|
||||
str(width))
|
||||
self.svg_template = self.svg_template.replace('__height__',
|
||||
|
@ -132,7 +132,7 @@ class UniqueFilenames: # {{{
|
||||
c = 0
|
||||
while True:
|
||||
c += 1
|
||||
suffix = '_u%d'%c
|
||||
suffix = f'_u{c}'
|
||||
candidate = base + suffix + ext
|
||||
if candidate not in self.seen_filenames:
|
||||
return suffix
|
||||
|
@ -143,7 +143,7 @@ class RemoveFakeMargins:
|
||||
|
||||
for p in paras(body):
|
||||
level = level_of(p, body)
|
||||
level = '%s_%d'%(barename(p.tag), level)
|
||||
level = f'{barename(p.tag)}_{level}'
|
||||
if level not in self.levels:
|
||||
self.levels[level] = []
|
||||
self.levels[level].append(p)
|
||||
@ -151,7 +151,7 @@ class RemoveFakeMargins:
|
||||
remove = set()
|
||||
for k, v in iteritems(self.levels):
|
||||
num = len(v)
|
||||
self.log.debug('Found %d items of level:'%num, k)
|
||||
self.log.debug(f'Found {num} items of level:', k)
|
||||
level = int(k.split('_')[-1])
|
||||
tag = k.split('_')[0]
|
||||
if tag == 'p' and num < 25:
|
||||
|
@ -217,8 +217,7 @@ class SVGRasterizer:
|
||||
href = self.images[key]
|
||||
else:
|
||||
logger = self.oeb.logger
|
||||
logger.info('Rasterizing %r to %dx%d'
|
||||
% (svgitem.href, size.width(), size.height()))
|
||||
logger.info(f'Rasterizing {svgitem.href!r} to {size.width()}x{size.height()}')
|
||||
image = QImage(size, QImage.Format.Format_ARGB32_Premultiplied)
|
||||
image.fill(QColor('white').rgb())
|
||||
painter = QPainter(image)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user