automated translation to f-strings using flynt with --aggressive and patched to not pointlessly call int()

This commit is contained in:
Kovid Goyal 2025-02-01 12:40:34 +05:30
parent 5c95a0ad18
commit 18d57d6298
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
210 changed files with 659 additions and 740 deletions

View File

@ -87,7 +87,7 @@ def build_forms(srcdir, info=None, summary=False, check_for_migration=False, che
open(compiled_form, 'wb').write(dat) open(compiled_form, 'wb').write(dat)
num += 1 num += 1
if num: if num:
info('Compiled %d forms' % num) info(f'Compiled {num} forms')
if check_icons: if check_icons:
resource_dir = os.path.join(os.path.dirname(srcdir), 'resources') resource_dir = os.path.join(os.path.dirname(srcdir), 'resources')
ensure_icons_built(resource_dir, force_compile, info) ensure_icons_built(resource_dir, force_compile, info)

View File

@ -808,7 +808,7 @@ def initialize_plugins(perf=False):
sys.stdout, sys.stderr = ostdout, ostderr sys.stdout, sys.stderr = ostdout, ostderr
if perf: if perf:
for x in sorted(times, key=lambda x: times[x]): for x in sorted(times, key=lambda x: times[x]):
print('%50s: %.3f'%(x, times[x])) print(f'{x:50}: {times[x]:.3f}')
_initialized_plugins.sort(key=lambda x: x.priority, reverse=True) _initialized_plugins.sort(key=lambda x: x.priority, reverse=True)
reread_filetype_plugins() reread_filetype_plugins()
reread_metadata_plugins() reread_metadata_plugins()

View File

@ -350,7 +350,7 @@ class CalibrePluginFinder:
c = 0 c = 0
while True: while True:
c += 1 c += 1
plugin_name = 'dummy%d'%c plugin_name = f'dummy{c}'
if plugin_name not in self.loaded_plugins: if plugin_name not in self.loaded_plugins:
break break
else: else:

View File

@ -93,7 +93,7 @@ def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, conve
'languages'}.union(set(fdata)) 'languages'}.union(set(fdata))
for x, data in iteritems(fdata): for x, data in iteritems(fdata):
if data['datatype'] == 'series': if data['datatype'] == 'series':
FIELDS.add('%d_index'%x) FIELDS.add(f'{x}_index')
data = [] data = []
for record in self.data: for record in self.data:
if record is None: if record is None:

View File

@ -614,11 +614,11 @@ class DB:
from calibre.library.coloring import migrate_old_rule from calibre.library.coloring import migrate_old_rule
old_rules = [] old_rules = []
for i in range(1, 6): for i in range(1, 6):
col = self.prefs.get('column_color_name_%d' % i, None) col = self.prefs.get(f'column_color_name_{i}', None)
templ = self.prefs.get('column_color_template_%d' % i, None) templ = self.prefs.get(f'column_color_template_{i}', None)
if col and templ: if col and templ:
try: try:
del self.prefs['column_color_name_%d' % i] del self.prefs[f'column_color_name_{i}']
rules = migrate_old_rule(self.field_metadata, templ) rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules: for templ in rules:
old_rules.append((col, templ)) old_rules.append((col, templ))
@ -1410,7 +1410,7 @@ class DB:
with closing(Connection(tmpdb)) as conn: with closing(Connection(tmpdb)) as conn:
shell = Shell(db=conn, encoding='utf-8') shell = Shell(db=conn, encoding='utf-8')
shell.process_command('.read ' + fname.replace(os.sep, '/')) shell.process_command('.read ' + fname.replace(os.sep, '/'))
conn.execute('PRAGMA user_version=%d;'%uv) conn.execute(f'PRAGMA user_version={uv};')
self.close(unload_formatter_functions=False) self.close(unload_formatter_functions=False)
try: try:
@ -1495,7 +1495,7 @@ class DB:
# windows). # windows).
l = (self.PATH_LIMIT - (extlen // 2) - 2) if iswindows else ((self.PATH_LIMIT - extlen - 2) // 2) l = (self.PATH_LIMIT - (extlen // 2) - 2) if iswindows else ((self.PATH_LIMIT - extlen - 2) // 2)
if l < 5: if l < 5:
raise ValueError('Extension length too long: %d' % extlen) raise ValueError(f'Extension length too long: {extlen}')
author = ascii_filename(author)[:l] author = ascii_filename(author)[:l]
title = ascii_filename(title.lstrip())[:l].rstrip() title = ascii_filename(title.lstrip())[:l].rstrip()
if not title: if not title:
@ -1510,7 +1510,7 @@ class DB:
# Database layer API {{{ # Database layer API {{{
def custom_table_names(self, num): def custom_table_names(self, num):
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num return f'custom_column_{num}', f'books_custom_column_{num}_link'
@property @property
def custom_tables(self): def custom_tables(self):
@ -1628,7 +1628,7 @@ class DB:
def format_hash(self, book_id, fmt, fname, path): def format_hash(self, book_id, fmt, fname, path):
path = self.format_abspath(book_id, fmt, fname, path) path = self.format_abspath(book_id, fmt, fname, path)
if path is None: if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt)) raise NoSuchFormat(f'Record {book_id} has no fmt: {fmt}')
sha = hashlib.sha256() sha = hashlib.sha256()
with open(path, 'rb') as f: with open(path, 'rb') as f:
while True: while True:

View File

@ -992,7 +992,7 @@ class Cache:
name = self.fields['formats'].format_fname(book_id, fmt) name = self.fields['formats'].format_fname(book_id, fmt)
path = self._field_for('path', book_id).replace('/', os.sep) path = self._field_for('path', book_id).replace('/', os.sep)
except: except:
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt)) raise NoSuchFormat(f'Record {book_id} has no fmt: {fmt}')
return self.backend.format_hash(book_id, fmt, name, path) return self.backend.format_hash(book_id, fmt, name, path)
@api @api
@ -1222,7 +1222,7 @@ class Cache:
name = self.fields['formats'].format_fname(book_id, fmt) name = self.fields['formats'].format_fname(book_id, fmt)
path = self._field_for('path', book_id).replace('/', os.sep) path = self._field_for('path', book_id).replace('/', os.sep)
except (KeyError, AttributeError): except (KeyError, AttributeError):
raise NoSuchFormat('Record %d has no %s file'%(book_id, fmt)) raise NoSuchFormat(f'Record {book_id} has no {fmt} file')
return self.backend.copy_format_to(book_id, fmt, name, path, dest, return self.backend.copy_format_to(book_id, fmt, name, path, dest,
use_hardlink=use_hardlink, report_file_size=report_file_size) use_hardlink=use_hardlink, report_file_size=report_file_size)
@ -2374,7 +2374,7 @@ class Cache:
removed. ''' removed. '''
missing = frozenset(val_map) - self._all_book_ids() missing = frozenset(val_map) - self._all_book_ids()
if missing: if missing:
raise ValueError('add_custom_book_data: no such book_ids: %d'%missing) raise ValueError(f'add_custom_book_data: no such book_ids: {missing}')
self.backend.add_custom_data(name, val_map, delete_first) self.backend.add_custom_data(name, val_map, delete_first)
@read_api @read_api

View File

@ -43,5 +43,5 @@ def main(opts, args, dbctx):
prints(pformat(data)) prints(pformat(data))
print('\n') print('\n')
else: else:
prints(col, '(%d)'%data['num']) prints(col, f"({data['num']})")
return 0 return 0

View File

@ -654,7 +654,7 @@ class LibraryDatabase:
book_id = index if index_is_id else self.id(index) book_id = index if index_is_id else self.id(index)
ans = self.new_api.format_abspath(book_id, fmt) ans = self.new_api.format_abspath(book_id, fmt)
if ans is None: if ans is None:
raise NoSuchFormat('Record %d has no format: %s'%(book_id, fmt)) raise NoSuchFormat(f'Record {book_id} has no format: {fmt}')
return ans return ans
def format_files(self, index, index_is_id=False): def format_files(self, index, index_is_id=False):

View File

@ -23,13 +23,13 @@ class SchemaUpgrade:
try: try:
while True: while True:
uv = next(self.db.execute('pragma user_version'))[0] uv = next(self.db.execute('pragma user_version'))[0]
meth = getattr(self, 'upgrade_version_%d'%uv, None) meth = getattr(self, f'upgrade_version_{uv}', None)
if meth is None: if meth is None:
break break
else: else:
prints('Upgrading database to version %d...'%(uv+1)) prints(f'Upgrading database to version {uv + 1}...')
meth() meth()
self.db.execute('pragma user_version=%d'%(uv+1)) self.db.execute(f'pragma user_version={uv + 1}')
except: except:
self.db.execute('ROLLBACK') self.db.execute('ROLLBACK')
raise raise

View File

@ -279,7 +279,7 @@ class FilesystemTest(BaseTest):
self.assertFalse(importer.corrupted_files) self.assertFalse(importer.corrupted_files)
self.assertEqual(cache.all_book_ids(), ic.all_book_ids()) self.assertEqual(cache.all_book_ids(), ic.all_book_ids())
for book_id in cache.all_book_ids(): for book_id in cache.all_book_ids():
self.assertEqual(cache.cover(book_id), ic.cover(book_id), 'Covers not identical for book: %d' % book_id) self.assertEqual(cache.cover(book_id), ic.cover(book_id), f'Covers not identical for book: {book_id}')
for fmt in cache.formats(book_id): for fmt in cache.formats(book_id):
self.assertEqual(cache.format(book_id, fmt), ic.format(book_id, fmt)) self.assertEqual(cache.format(book_id, fmt), ic.format(book_id, fmt))
self.assertEqual(cache.format_metadata(book_id, fmt)['mtime'], cache.format_metadata(book_id, fmt)['mtime']) self.assertEqual(cache.format_metadata(book_id, fmt)['mtime'], cache.format_metadata(book_id, fmt)['mtime'])

View File

@ -544,7 +544,7 @@ class LegacyTest(BaseTest):
n = now() n = now()
ndb = self.init_legacy(self.cloned_library) ndb = self.init_legacy(self.cloned_library)
amap = ndb.new_api.get_id_map('authors') amap = ndb.new_api.get_id_map('authors')
sorts = [(aid, 's%d' % aid) for aid in amap] sorts = [(aid, f's{aid}') for aid in amap]
db = self.init_old(self.cloned_library) db = self.init_old(self.cloned_library)
run_funcs(self, db, ndb, ( run_funcs(self, db, ndb, (
('+format_metadata', 1, 'FMT1', itemgetter('size')), ('+format_metadata', 1, 'FMT1', itemgetter('size')),

View File

@ -126,8 +126,7 @@ class ReadingTest(BaseTest):
if isinstance(val, tuple) and 'authors' not in field and 'languages' not in field: if isinstance(val, tuple) and 'authors' not in field and 'languages' not in field:
val, expected_val = set(val), set(expected_val) val, expected_val = set(val), set(expected_val)
self.assertEqual(expected_val, val, self.assertEqual(expected_val, val,
'Book id: %d Field: %s failed: %r != %r'%( f'Book id: {book_id} Field: {field} failed: {expected_val!r} != {val!r}')
book_id, field, expected_val, val))
# }}} # }}}
def test_sorting(self): # {{{ def test_sorting(self): # {{{
@ -206,7 +205,7 @@ class ReadingTest(BaseTest):
('title', True)]), 'Subsort failed') ('title', True)]), 'Subsort failed')
from calibre.ebooks.metadata.book.base import Metadata from calibre.ebooks.metadata.book.base import Metadata
for i in range(7): for i in range(7):
cache.create_book_entry(Metadata('title%d' % i), apply_import_tags=False) cache.create_book_entry(Metadata(f'title{i}'), apply_import_tags=False)
cache.create_custom_column('one', 'CC1', 'int', False) cache.create_custom_column('one', 'CC1', 'int', False)
cache.create_custom_column('two', 'CC2', 'int', False) cache.create_custom_column('two', 'CC2', 'int', False)
cache.create_custom_column('three', 'CC3', 'int', False) cache.create_custom_column('three', 'CC3', 'int', False)

View File

@ -27,7 +27,7 @@ class UtilsTest(BaseTest):
total = 0 total = 0
for i in range(1, num+1): for i in range(1, num+1):
sz = i * 1000 sz = i * 1000
c.insert(i, i, (('%d'%i) * sz).encode('ascii')) c.insert(i, i, (f'{i}' * sz).encode('ascii'))
total += sz total += sz
return total return total
@ -44,7 +44,7 @@ class UtilsTest(BaseTest):
for i in (3, 4, 2, 5, 1): for i in (3, 4, 2, 5, 1):
data, ts = c[i] data, ts = c[i]
self.assertEqual(i, ts, 'timestamp not correct') self.assertEqual(i, ts, 'timestamp not correct')
self.assertEqual((('%d'%i) * (i*1000)).encode('ascii'), data) self.assertEqual((f'{i}' * (i*1000)).encode('ascii'), data)
c.set_group_id('a') c.set_group_id('a')
self.basic_fill(c) self.basic_fill(c)
order = tuple(c.items) order = tuple(c.items)

View File

@ -387,7 +387,7 @@ class WritingTest(BaseTest):
for book_id in book_ids: for book_id in book_ids:
raw = cache.read_backup(book_id) raw = cache.read_backup(book_id)
opf = OPF(BytesIO(raw)) opf = OPF(BytesIO(raw))
ae(opf.title, 'title%d'%book_id) ae(opf.title, f'title{book_id}')
ae(opf.authors, ['author1', 'author2']) ae(opf.authors, ['author1', 'author2'])
tested_fields = 'title authors tags'.split() tested_fields = 'title authors tags'.split()
before = {f:cache.all_field_for(f, book_ids) for f in tested_fields} before = {f:cache.all_field_for(f, book_ids) for f in tested_fields}
@ -439,9 +439,9 @@ class WritingTest(BaseTest):
ae(cache.set_cover({bid:img for bid in (1, 2, 3)}), {1, 2, 3}) ae(cache.set_cover({bid:img for bid in (1, 2, 3)}), {1, 2, 3})
old = self.init_old() old = self.init_old()
for book_id in (1, 2, 3): for book_id in (1, 2, 3):
ae(cache.cover(book_id), img, 'Cover was not set correctly for book %d' % book_id) ae(cache.cover(book_id), img, f'Cover was not set correctly for book {book_id}')
ae(cache.field_for('cover', book_id), 1) ae(cache.field_for('cover', book_id), 1)
ae(old.cover(book_id, index_is_id=True), img, 'Cover was not set correctly for book %d' % book_id) ae(old.cover(book_id, index_is_id=True), img, f'Cover was not set correctly for book {book_id}')
self.assertTrue(old.has_cover(book_id)) self.assertTrue(old.has_cover(book_id))
old.close() old.close()
old.break_cycles() old.break_cycles()
@ -771,9 +771,9 @@ class WritingTest(BaseTest):
conn.execute('INSERT INTO publishers (name) VALUES ("MŪS")') conn.execute('INSERT INTO publishers (name) VALUES ("MŪS")')
uid = conn.last_insert_rowid() uid = conn.last_insert_rowid()
conn.execute('DELETE FROM books_publishers_link') conn.execute('DELETE FROM books_publishers_link')
conn.execute('INSERT INTO books_publishers_link (book,publisher) VALUES (1, %d)' % lid) conn.execute(f'INSERT INTO books_publishers_link (book,publisher) VALUES (1, {lid})')
conn.execute('INSERT INTO books_publishers_link (book,publisher) VALUES (2, %d)' % uid) conn.execute(f'INSERT INTO books_publishers_link (book,publisher) VALUES (2, {uid})')
conn.execute('INSERT INTO books_publishers_link (book,publisher) VALUES (3, %d)' % uid) conn.execute(f'INSERT INTO books_publishers_link (book,publisher) VALUES (3, {uid})')
cache.reload_from_db() cache.reload_from_db()
t = cache.fields['publisher'].table t = cache.fields['publisher'].table
for x in (lid, uid): for x in (lid, uid):

View File

@ -295,9 +295,7 @@ class ThumbnailCache:
self._load_index() self._load_index()
self._invalidate_sizes() self._invalidate_sizes()
ts = (f'{timestamp:.2f}').replace('.00', '') ts = (f'{timestamp:.2f}').replace('.00', '')
path = '%s%s%s%s%d-%s-%d-%dx%d' % ( path = f'{self.group_id}{os.sep}{book_id % 100}{os.sep}{book_id}-{ts}-{len(data)}-{self.thumbnail_size[0]}x{self.thumbnail_size[1]}'
self.group_id, os.sep, book_id % 100, os.sep,
book_id, ts, len(data), self.thumbnail_size[0], self.thumbnail_size[1])
path = os.path.join(self.location, path) path = os.path.join(self.location, path)
key = (self.group_id, book_id) key = (self.group_id, book_id)
e = self.items.pop(key, None) e = self.items.pop(key, None)
@ -371,7 +369,7 @@ class ThumbnailCache:
self._remove((self.group_id, book_id)) self._remove((self.group_id, book_id))
elif os.path.exists(self.location): elif os.path.exists(self.location):
try: try:
raw = '\n'.join('%s %d' % (self.group_id, book_id) for book_id in book_ids) raw = '\n'.join(f'{self.group_id} {book_id}' for book_id in book_ids)
with open(os.path.join(self.location, 'invalidate'), 'ab') as f: with open(os.path.join(self.location, 'invalidate'), 'ab') as f:
f.write(raw.encode('ascii')) f.write(raw.encode('ascii'))
except OSError as err: except OSError as err:

View File

@ -153,7 +153,7 @@ class Bookmark: # {{{
marker_found = 0 marker_found = 0
text = '' text = ''
search_str1 = f'{mi.title}' search_str1 = f'{mi.title}'
search_str2 = '- Highlight Loc. %d' % (displayed_location) search_str2 = f'- Highlight Loc. {displayed_location}'
for line in f2: for line in f2:
if marker_found == 0: if marker_found == 0:
if line.startswith(search_str1): if line.startswith(search_str1):

View File

@ -830,7 +830,7 @@ class KOBO(USBMS):
cursor.close() cursor.close()
def set_readstatus(self, connection, ContentID, ReadStatus): def set_readstatus(self, connection, ContentID, ReadStatus):
debug_print('Kobo::set_readstatus - ContentID=%s, ReadStatus=%d' % (ContentID, ReadStatus)) debug_print(f'Kobo::set_readstatus - ContentID={ContentID}, ReadStatus={ReadStatus}')
cursor = connection.cursor() cursor = connection.cursor()
t = (ContentID,) t = (ContentID,)
cursor.execute('select DateLastRead, ReadStatus from Content where BookID is Null and ContentID = ?', t) cursor.execute('select DateLastRead, ReadStatus from Content where BookID is Null and ContentID = ?', t)
@ -851,7 +851,7 @@ class KOBO(USBMS):
t = (ReadStatus, datelastread, ContentID,) t = (ReadStatus, datelastread, ContentID,)
try: try:
debug_print('Kobo::set_readstatus - Making change - ContentID=%s, ReadStatus=%d, DateLastRead=%s' % (ContentID, ReadStatus, datelastread)) debug_print(f'Kobo::set_readstatus - Making change - ContentID={ContentID}, ReadStatus={ReadStatus}, DateLastRead={datelastread}')
cursor.execute("update content set ReadStatus=?,FirstTimeReading='false',DateLastRead=? where BookID is Null and ContentID = ?", t) cursor.execute("update content set ReadStatus=?,FirstTimeReading='false',DateLastRead=? where BookID is Null and ContentID = ?", t)
except: except:
debug_print(' Database Exception: Unable to update ReadStatus') debug_print(' Database Exception: Unable to update ReadStatus')
@ -1742,8 +1742,7 @@ class KOBOTOUCH(KOBO):
if show_debug: if show_debug:
debug_print(f"KoboTouch:update_booklist - title='{title}'", f'ContentType={ContentType}', 'isdownloaded=', isdownloaded) debug_print(f"KoboTouch:update_booklist - title='{title}'", f'ContentType={ContentType}', 'isdownloaded=', isdownloaded)
debug_print( debug_print(
' prefix=%s, DateCreated=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s'% f' prefix={prefix}, DateCreated={DateCreated}, readstatus={readstatus}, MimeType={MimeType}, expired={expired}, favouritesindex={favouritesindex}, accessibility={accessibility}, isdownloaded={isdownloaded}')
(prefix, DateCreated, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded,))
changed = False changed = False
try: try:
lpath = path.partition(self.normalize_path(prefix))[2] lpath = path.partition(self.normalize_path(prefix))[2]
@ -1845,7 +1844,7 @@ class KOBOTOUCH(KOBO):
if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'): if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'):
if show_debug: if show_debug:
self.debug_index = idx self.debug_index = idx
debug_print('KoboTouch:update_booklist - idx=%d'%idx) debug_print(f'KoboTouch:update_booklist - idx={idx}')
debug_print(f'KoboTouch:update_booklist - lpath={lpath}') debug_print(f'KoboTouch:update_booklist - lpath={lpath}')
debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections) debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map) debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
@ -2090,7 +2089,7 @@ class KOBOTOUCH(KOBO):
# self.report_progress((i) / float(books_on_device), _('Getting list of books on device...')) # self.report_progress((i) / float(books_on_device), _('Getting list of books on device...'))
show_debug = self.is_debugging_title(row['Title']) show_debug = self.is_debugging_title(row['Title'])
if show_debug: if show_debug:
debug_print('KoboTouch:books - looping on database - row=%d' % i) debug_print(f'KoboTouch:books - looping on database - row={i}')
debug_print("KoboTouch:books - title='{}'".format(row['Title']), 'authors=', row['Attribution']) debug_print("KoboTouch:books - title='{}'".format(row['Title']), 'authors=', row['Attribution'])
debug_print('KoboTouch:books - row=', row) debug_print('KoboTouch:books - row=', row)
if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].lower().startswith( if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].lower().startswith(
@ -2506,7 +2505,7 @@ class KOBOTOUCH(KOBO):
if self._card_a_prefix is not None: if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, 'file:///mnt/sd/') ContentID = ContentID.replace(self._card_a_prefix, 'file:///mnt/sd/')
else: # ContentType = 16 else: # ContentType = 16
debug_print("KoboTouch:contentid_from_path ContentType other than 6 - ContentType='%d'"%ContentType, f"path='{path}'") debug_print(f"KoboTouch:contentid_from_path ContentType other than 6 - ContentType='{ContentType}'", f"path='{path}'")
ContentID = path ContentID = path
ContentID = ContentID.replace(self._main_prefix, 'file:///mnt/onboard/') ContentID = ContentID.replace(self._main_prefix, 'file:///mnt/onboard/')
if self._card_a_prefix is not None: if self._card_a_prefix is not None:
@ -2720,9 +2719,8 @@ class KOBOTOUCH(KOBO):
if not prefs['manage_device_metadata'] == 'manual' and delete_empty_collections: if not prefs['manage_device_metadata'] == 'manual' and delete_empty_collections:
debug_print('KoboTouch:update_device_database_collections - about to clear empty bookshelves') debug_print('KoboTouch:update_device_database_collections - about to clear empty bookshelves')
self.delete_empty_bookshelves(connection) self.delete_empty_bookshelves(connection)
debug_print('KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d' % (self.series_set, books_in_library)) debug_print(f'KoboTouch:update_device_database_collections - Number of series set={self.series_set} Number of books={books_in_library}')
debug_print('KoboTouch:update_device_database_collections - Number of core metadata set=%d Number of books=%d' % ( debug_print(f'KoboTouch:update_device_database_collections - Number of core metadata set={self.core_metadata_set} Number of books={books_in_library}')
self.core_metadata_set, books_in_library))
self.dump_bookshelves(connection) self.dump_bookshelves(connection)
@ -2916,8 +2914,7 @@ class KOBOTOUCH(KOBO):
for ending, cover_options in self.cover_file_endings().items(): for ending, cover_options in self.cover_file_endings().items():
kobo_size, min_dbversion, max_dbversion, is_full_size = cover_options kobo_size, min_dbversion, max_dbversion, is_full_size = cover_options
if show_debug: if show_debug:
debug_print('KoboTouch:_upload_cover - library_cover_size=%s -> kobo_size=%s, min_dbversion=%d max_dbversion=%d, is_full_size=%s' % ( debug_print(f'KoboTouch:_upload_cover - library_cover_size={library_cover_size} -> kobo_size={kobo_size}, min_dbversion={min_dbversion} max_dbversion={max_dbversion}, is_full_size={is_full_size}')
library_cover_size, kobo_size, min_dbversion, max_dbversion, is_full_size))
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion: if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
if show_debug: if show_debug:
@ -4229,7 +4226,7 @@ class KOBOTOUCH(KOBO):
if i == 0: if i == 0:
prints('No shelves found!!') prints('No shelves found!!')
else: else:
prints('Number of shelves=%d'%i) prints(f'Number of shelves={i}')
prints('\nBooks on shelves on device:') prints('\nBooks on shelves on device:')
cursor.execute(shelfcontent_query) cursor.execute(shelfcontent_query)
@ -4241,7 +4238,7 @@ class KOBOTOUCH(KOBO):
if i == 0: if i == 0:
prints('No books are on any shelves!!') prints('No books are on any shelves!!')
else: else:
prints('Number of shelved books=%d'%i) prints(f'Number of shelved books={i}')
cursor.close() cursor.close()
debug_print('KoboTouch:dump_bookshelves - end') debug_print('KoboTouch:dump_bookshelves - end')

View File

@ -218,7 +218,7 @@ class MTP_DEVICE(MTPDeviceBase):
self.dev = self._filesystem_cache = None self.dev = self._filesystem_cache = None
def format_errorstack(self, errs): def format_errorstack(self, errs):
return '\n'.join('%d:%s'%(code, as_unicode(msg)) for code, msg in errs) return '\n'.join(f'{code}:{as_unicode(msg)}' for code, msg in errs)
@synchronous @synchronous
def open(self, connected_device, library_uuid): def open(self, connected_device, library_uuid):

View File

@ -122,7 +122,7 @@ class PALADIN(USBMS):
try: try:
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a)) device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
debug_print('Device Offset: %d ms'%device_offset) debug_print(f'Device Offset: {device_offset} ms')
self.device_offset = device_offset self.device_offset = device_offset
except ValueError: except ValueError:
debug_print('No Books To Detect Device Offset.') debug_print('No Books To Detect Device Offset.')
@ -249,7 +249,7 @@ class PALADIN(USBMS):
sequence_max = sequence_min sequence_max = sequence_min
sequence_dirty = 0 sequence_dirty = 0
debug_print('Book Sequence Min: %d, Source Id: %d'%(sequence_min,source_id)) debug_print(f'Book Sequence Min: {sequence_min}, Source Id: {source_id}')
try: try:
cursor = connection.cursor() cursor = connection.cursor()
@ -283,7 +283,7 @@ class PALADIN(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number # If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1: if sequence_dirty == 1:
debug_print('Book Sequence Dirty for Source Id: %d'%source_id) debug_print(f'Book Sequence Dirty for Source Id: {source_id}')
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
for book, bookId in db_books.items(): for book, bookId in db_books.items():
if bookId < sequence_min: if bookId < sequence_min:
@ -302,7 +302,7 @@ class PALADIN(USBMS):
cursor.execute(query, t) cursor.execute(query, t)
self.set_database_sequence_id(connection, 'books', sequence_max) self.set_database_sequence_id(connection, 'books', sequence_max)
debug_print('Book Sequence Max: %d, Source Id: %d'%(sequence_max,source_id)) debug_print(f'Book Sequence Max: {sequence_max}, Source Id: {source_id}')
cursor.close() cursor.close()
return db_books return db_books
@ -355,7 +355,7 @@ class PALADIN(USBMS):
book.mime or mime_type_ext(path_to_ext(lpath))) book.mime or mime_type_ext(path_to_ext(lpath)))
cursor.execute(query, t) cursor.execute(query, t)
book.bookId = connection.last_insert_rowid() book.bookId = connection.last_insert_rowid()
debug_print('Inserted New Book: (%u) '%book.bookId + book.title) debug_print(f'Inserted New Book: ({book.bookId}) ' + book.title)
else: else:
query = ''' query = '''
UPDATE books UPDATE books
@ -386,7 +386,7 @@ class PALADIN(USBMS):
sequence_max = sequence_min sequence_max = sequence_min
sequence_dirty = 0 sequence_dirty = 0
debug_print('Collection Sequence Min: %d, Source Id: %d'%(sequence_min,source_id)) debug_print(f'Collection Sequence Min: {sequence_min}, Source Id: {source_id}')
try: try:
cursor = connection.cursor() cursor = connection.cursor()
@ -415,7 +415,7 @@ class PALADIN(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number # If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1: if sequence_dirty == 1:
debug_print('Collection Sequence Dirty for Source Id: %d'%source_id) debug_print(f'Collection Sequence Dirty for Source Id: {source_id}')
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
for collection, collectionId in db_collections.items(): for collection, collectionId in db_collections.items():
if collectionId < sequence_min: if collectionId < sequence_min:
@ -434,13 +434,13 @@ class PALADIN(USBMS):
cursor.execute(query, t) cursor.execute(query, t)
self.set_database_sequence_id(connection, 'tags', sequence_max) self.set_database_sequence_id(connection, 'tags', sequence_max)
debug_print('Collection Sequence Max: %d, Source Id: %d'%(sequence_max,source_id)) debug_print(f'Collection Sequence Max: {sequence_max}, Source Id: {source_id}')
# Fix up the collections table now... # Fix up the collections table now...
sequence_dirty = 0 sequence_dirty = 0
sequence_max = sequence_min sequence_max = sequence_min
debug_print('Collections Sequence Min: %d, Source Id: %d'%(sequence_min,source_id)) debug_print(f'Collections Sequence Min: {sequence_min}, Source Id: {source_id}')
query = 'SELECT _id FROM booktags' query = 'SELECT _id FROM booktags'
cursor.execute(query) cursor.execute(query)
@ -454,7 +454,7 @@ class PALADIN(USBMS):
sequence_max = max(sequence_max, row[0]) sequence_max = max(sequence_max, row[0])
if sequence_dirty == 1: if sequence_dirty == 1:
debug_print('Collections Sequence Dirty for Source Id: %d'%source_id) debug_print(f'Collections Sequence Dirty for Source Id: {source_id}')
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
for pairId in db_collection_pairs: for pairId in db_collection_pairs:
if pairId < sequence_min: if pairId < sequence_min:
@ -465,7 +465,7 @@ class PALADIN(USBMS):
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
self.set_database_sequence_id(connection, 'booktags', sequence_max) self.set_database_sequence_id(connection, 'booktags', sequence_max)
debug_print('Collections Sequence Max: %d, Source Id: %d'%(sequence_max,source_id)) debug_print(f'Collections Sequence Max: {sequence_max}, Source Id: {source_id}')
cursor.close() cursor.close()
return db_collections return db_collections
@ -483,7 +483,7 @@ class PALADIN(USBMS):
t = (collection,) t = (collection,)
cursor.execute(query, t) cursor.execute(query, t)
db_collections[collection] = connection.last_insert_rowid() db_collections[collection] = connection.last_insert_rowid()
debug_print('Inserted New Collection: (%u) '%db_collections[collection] + collection) debug_print(f'Inserted New Collection: ({db_collections[collection]}) ' + collection)
# Get existing books in collection # Get existing books in collection
query = ''' query = '''

View File

@ -434,8 +434,7 @@ class XMLCache:
book.lpath, book.thumbnail) book.lpath, book.thumbnail)
self.periodicalize_book(book, ext_record) self.periodicalize_book(book, ext_record)
debug_print('Timezone votes: %d GMT, %d LTZ, use_tz_var=%s'% debug_print(f'Timezone votes: {gtz_count} GMT, {ltz_count} LTZ, use_tz_var={use_tz_var}')
(gtz_count, ltz_count, use_tz_var))
self.update_playlists(i, root, booklist, collections_attributes) self.update_playlists(i, root, booklist, collections_attributes)
# Update the device collections because update playlist could have added # Update the device collections because update playlist could have added
# some new ones. # some new ones.

View File

@ -210,7 +210,7 @@ class PRST1(USBMS):
try: try:
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a)) device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
debug_print('Device Offset: %d ms'%device_offset) debug_print(f'Device Offset: {device_offset} ms')
self.device_offset = device_offset self.device_offset = device_offset
except ValueError: except ValueError:
debug_print('No Books To Detect Device Offset.') debug_print('No Books To Detect Device Offset.')
@ -362,7 +362,7 @@ class PRST1(USBMS):
sequence_max = sequence_min sequence_max = sequence_min
sequence_dirty = 0 sequence_dirty = 0
debug_print('Book Sequence Min: %d, Source Id: %d'%(sequence_min,source_id)) debug_print(f'Book Sequence Min: {sequence_min}, Source Id: {source_id}')
try: try:
cursor = connection.cursor() cursor = connection.cursor()
@ -396,7 +396,7 @@ class PRST1(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number # If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1: if sequence_dirty == 1:
debug_print('Book Sequence Dirty for Source Id: %d'%source_id) debug_print(f'Book Sequence Dirty for Source Id: {source_id}')
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
for book, bookId in db_books.items(): for book, bookId in db_books.items():
if bookId < sequence_min: if bookId < sequence_min:
@ -433,7 +433,7 @@ class PRST1(USBMS):
cursor.execute(query, t) cursor.execute(query, t)
self.set_database_sequence_id(connection, 'books', sequence_max) self.set_database_sequence_id(connection, 'books', sequence_max)
debug_print('Book Sequence Max: %d, Source Id: %d'%(sequence_max,source_id)) debug_print(f'Book Sequence Max: {sequence_max}, Source Id: {source_id}')
cursor.close() cursor.close()
return db_books return db_books
@ -495,7 +495,7 @@ class PRST1(USBMS):
book.bookId = self.get_lastrowid(cursor) book.bookId = self.get_lastrowid(cursor)
if upload_covers: if upload_covers:
self.upload_book_cover(connection, book, source_id) self.upload_book_cover(connection, book, source_id)
debug_print('Inserted New Book: (%u) '%book.bookId + book.title) debug_print(f'Inserted New Book: ({book.bookId}) ' + book.title)
else: else:
query = ''' query = '''
UPDATE books UPDATE books
@ -534,7 +534,7 @@ class PRST1(USBMS):
sequence_max = sequence_min sequence_max = sequence_min
sequence_dirty = 0 sequence_dirty = 0
debug_print('Collection Sequence Min: %d, Source Id: %d'%(sequence_min,source_id)) debug_print(f'Collection Sequence Min: {sequence_min}, Source Id: {source_id}')
try: try:
cursor = connection.cursor() cursor = connection.cursor()
@ -563,7 +563,7 @@ class PRST1(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number # If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1: if sequence_dirty == 1:
debug_print('Collection Sequence Dirty for Source Id: %d'%source_id) debug_print(f'Collection Sequence Dirty for Source Id: {source_id}')
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
for collection, collectionId in db_collections.items(): for collection, collectionId in db_collections.items():
if collectionId < sequence_min: if collectionId < sequence_min:
@ -582,13 +582,13 @@ class PRST1(USBMS):
cursor.execute(query, t) cursor.execute(query, t)
self.set_database_sequence_id(connection, 'collection', sequence_max) self.set_database_sequence_id(connection, 'collection', sequence_max)
debug_print('Collection Sequence Max: %d, Source Id: %d'%(sequence_max,source_id)) debug_print(f'Collection Sequence Max: {sequence_max}, Source Id: {source_id}')
# Fix up the collections table now... # Fix up the collections table now...
sequence_dirty = 0 sequence_dirty = 0
sequence_max = sequence_min sequence_max = sequence_min
debug_print('Collections Sequence Min: %d, Source Id: %d'%(sequence_min,source_id)) debug_print(f'Collections Sequence Min: {sequence_min}, Source Id: {source_id}')
query = 'SELECT _id FROM collections' query = 'SELECT _id FROM collections'
cursor.execute(query) cursor.execute(query)
@ -602,7 +602,7 @@ class PRST1(USBMS):
sequence_max = max(sequence_max, row[0]) sequence_max = max(sequence_max, row[0])
if sequence_dirty == 1: if sequence_dirty == 1:
debug_print('Collections Sequence Dirty for Source Id: %d'%source_id) debug_print(f'Collections Sequence Dirty for Source Id: {source_id}')
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
for pairId in db_collection_pairs: for pairId in db_collection_pairs:
if pairId < sequence_min: if pairId < sequence_min:
@ -613,7 +613,7 @@ class PRST1(USBMS):
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
self.set_database_sequence_id(connection, 'collections', sequence_max) self.set_database_sequence_id(connection, 'collections', sequence_max)
debug_print('Collections Sequence Max: %d, Source Id: %d'%(sequence_max,source_id)) debug_print(f'Collections Sequence Max: {sequence_max}, Source Id: {source_id}')
cursor.close() cursor.close()
return db_collections return db_collections
@ -631,7 +631,7 @@ class PRST1(USBMS):
t = (collection, source_id) t = (collection, source_id)
cursor.execute(query, t) cursor.execute(query, t)
db_collections[collection] = self.get_lastrowid(cursor) db_collections[collection] = self.get_lastrowid(cursor)
debug_print('Inserted New Collection: (%u) '%db_collections[collection] + collection) debug_print(f'Inserted New Collection: ({db_collections[collection]}) ' + collection)
# Get existing books in collection # Get existing books in collection
query = ''' query = '''

View File

@ -219,8 +219,7 @@ def test_for_mem_leak():
for i in range(3): for i in range(3):
gc.collect() gc.collect()
usedmem = memory(startmem) usedmem = memory(startmem)
prints('Memory used in %d repetitions of scan(): %.5f KB'%(reps, prints(f'Memory used in {reps} repetitions of scan(): {1024 * usedmem:.5f} KB')
1024*usedmem))
prints('Differences in python object counts:') prints('Differences in python object counts:')
diff_hists(h1, gc_histogram()) diff_hists(h1, gc_histogram())
prints() prints()

View File

@ -853,7 +853,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
json_metadata[key]['book'] = self.json_codec.encode_book_metadata(book['book']) json_metadata[key]['book'] = self.json_codec.encode_book_metadata(book['book'])
json_metadata[key]['last_used'] = book['last_used'] json_metadata[key]['last_used'] = book['last_used']
result = as_bytes(json.dumps(json_metadata, indent=2, default=to_json)) result = as_bytes(json.dumps(json_metadata, indent=2, default=to_json))
fd.write(('%0.7d\n'%(len(result)+1)).encode('ascii')) fd.write(f'{len(result) + 1:007}\n'.encode('ascii'))
fd.write(result) fd.write(result)
fd.write(b'\n') fd.write(b'\n')
count += 1 count += 1
@ -1943,7 +1943,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
try: try:
self.listen_socket.listen(1) self.listen_socket.listen(1)
except: except:
message = 'listen on port %d failed' % port message = f'listen on port {port} failed'
self._debug(message) self._debug(message)
self._close_listen_socket() self._close_listen_socket()
return message return message

View File

@ -28,7 +28,7 @@ def node_mountpoint(node):
def basic_mount_options(): def basic_mount_options():
return ['rw', 'noexec', 'nosuid', 'nodev', 'uid=%d'%os.geteuid(), 'gid=%d'%os.getegid()] return ['rw', 'noexec', 'nosuid', 'nodev', f'uid={os.geteuid()}', f'gid={os.getegid()}']
class UDisks: class UDisks:

View File

@ -597,7 +597,7 @@ class Device(DeviceConfig, DevicePlugin):
continue continue
mp, ret = mount(card, typ) mp, ret = mount(card, typ)
if mp is None: if mp is None:
print('Unable to mount card (Error code: %d)'%ret, file=sys.stderr) print(f'Unable to mount card (Error code: {ret})', file=sys.stderr)
else: else:
if not mp.endswith('/'): if not mp.endswith('/'):
mp += '/' mp += '/'

View File

@ -179,10 +179,7 @@ class USB_DEVICE_DESCRIPTOR(Structure):
) )
def __repr__(self): def __repr__(self):
return 'USBDevice(class=0x%x sub_class=0x%x protocol=0x%x vendor_id=0x%x product_id=0x%x bcd=0x%x manufacturer=%d product=%d serial_number=%d)' % ( return f'USBDevice(class=0x{self.bDeviceClass:x} sub_class=0x{self.bDeviceSubClass:x} protocol=0x{self.bDeviceProtocol:x} vendor_id=0x{self.idVendor:x} product_id=0x{self.idProduct:x} bcd=0x{self.bcdDevice:x} manufacturer={self.iManufacturer} product={self.iProduct} serial_number={self.iSerialNumber})'
self.bDeviceClass, self.bDeviceSubClass, self.bDeviceProtocol,
self.idVendor, self.idProduct, self.bcdDevice, self.iManufacturer,
self.iProduct, self.iSerialNumber)
class USB_ENDPOINT_DESCRIPTOR(Structure): class USB_ENDPOINT_DESCRIPTOR(Structure):
@ -935,7 +932,7 @@ def get_usb_info(usbdev, debug=False): # {{{
# randomly after some time of my Kindle being # randomly after some time of my Kindle being
# connected. Disconnecting and reconnecting causes # connected. Disconnecting and reconnecting causes
# it to start working again. # it to start working again.
prints('Failed to read %s from device, with error: [%d] %s' % (name, err.winerror, as_unicode(err))) prints(f'Failed to read {name} from device, with error: [{err.winerror}] {as_unicode(err)}')
finally: finally:
CloseHandle(handle) CloseHandle(handle)
return ans return ans

View File

@ -236,7 +236,7 @@ class PageProcessor(list): # {{{
final_fmt = QImage.Format.Format_Indexed8 if uses_256_colors else QImage.Format.Format_Grayscale16 final_fmt = QImage.Format.Format_Indexed8 if uses_256_colors else QImage.Format.Format_Grayscale16
if img.format() != final_fmt: if img.format() != final_fmt:
img = img.convertToFormat(final_fmt) img = img.convertToFormat(final_fmt)
dest = '%d_%d.%s'%(self.num, i, self.opts.output_format) dest = f'{self.num}_{i}.{self.opts.output_format}'
dest = os.path.join(self.dest, dest) dest = os.path.join(self.dest, dest)
with open(dest, 'wb') as f: with open(dest, 'wb') as f:
f.write(image_to_data(img, fmt=self.opts.output_format)) f.write(image_to_data(img, fmt=self.opts.output_format))

View File

@ -302,7 +302,7 @@ class ProgressBar:
def __call__(self, frac, msg=''): def __call__(self, frac, msg=''):
if msg: if msg:
percent = int(frac*100) percent = int(frac*100)
self.log('%d%% %s'%(percent, msg)) self.log(f'{percent}% {msg}')
def create_option_parser(args, log): def create_option_parser(args, log):

View File

@ -131,7 +131,7 @@ class CHMInput(InputFormatPlugin):
# print('Printing hhcroot') # print('Printing hhcroot')
# print(etree.tostring(hhcroot, pretty_print=True)) # print(etree.tostring(hhcroot, pretty_print=True))
# print('=============================') # print('=============================')
log.debug('Found %d section nodes' % toc.count()) log.debug(f'Found {toc.count()} section nodes')
htmlpath = os.path.splitext(hhcpath)[0] + '.html' htmlpath = os.path.splitext(hhcpath)[0] + '.html'
base = os.path.dirname(os.path.abspath(htmlpath)) base = os.path.dirname(os.path.abspath(htmlpath))

View File

@ -175,7 +175,7 @@ class ComicInput(InputFormatPlugin):
num_pages_per_comic = [] num_pages_per_comic = []
for i, x in enumerate(comics_): for i, x in enumerate(comics_):
title, fname = x title, fname = x
cdir = 'comic_%d'%(i+1) if len(comics_) > 1 else '.' cdir = f'comic_{i + 1}' if len(comics_) > 1 else '.'
cdir = os.path.abspath(cdir) cdir = os.path.abspath(cdir)
if not os.path.exists(cdir): if not os.path.exists(cdir):
os.makedirs(cdir) os.makedirs(cdir)
@ -228,11 +228,11 @@ class ComicInput(InputFormatPlugin):
wrapper_page_href = href(wrappers[0]) wrapper_page_href = href(wrappers[0])
for i in range(num_pages_per_comic[0]): for i in range(num_pages_per_comic[0]):
toc.add_item(f'{wrapper_page_href}#page_{i+1}', None, toc.add_item(f'{wrapper_page_href}#page_{i+1}', None,
_('Page')+' %d'%(i+1), play_order=i) _('Page')+f' {i + 1}', play_order=i)
else: else:
for i, x in enumerate(wrappers): for i, x in enumerate(wrappers):
toc.add_item(href(x), None, _('Page')+' %d'%(i+1), toc.add_item(href(x), None, _('Page')+f' {i + 1}',
play_order=i) play_order=i)
else: else:
po = 0 po = 0
@ -246,12 +246,12 @@ class ComicInput(InputFormatPlugin):
wrapper_page_href = href(wrappers[0]) wrapper_page_href = href(wrappers[0])
for i in range(num_pages): for i in range(num_pages):
stoc.add_item(f'{wrapper_page_href}#page_{i+1}', None, stoc.add_item(f'{wrapper_page_href}#page_{i+1}', None,
_('Page')+' %d'%(i+1), play_order=po) _('Page')+f' {i + 1}', play_order=po)
po += 1 po += 1
else: else:
for i, x in enumerate(wrappers): for i, x in enumerate(wrappers):
stoc.add_item(href(x), None, stoc.add_item(href(x), None,
_('Page')+' %d'%(i+1), play_order=po) _('Page')+f' {i + 1}', play_order=po)
po += 1 po += 1
opf.set_toc(toc) opf.set_toc(toc)
with open('metadata.opf', 'wb') as m, open('toc.ncx', 'wb') as n: with open('metadata.opf', 'wb') as m, open('toc.ncx', 'wb') as n:
@ -282,7 +282,7 @@ class ComicInput(InputFormatPlugin):
dir = os.path.dirname(pages[0]) dir = os.path.dirname(pages[0])
for i, page in enumerate(pages): for i, page in enumerate(pages):
wrapper = WRAPPER%(XHTML_NS, i+1, os.path.basename(page), i+1) wrapper = WRAPPER%(XHTML_NS, i+1, os.path.basename(page), i+1)
page = os.path.join(dir, 'page_%d.xhtml'%(i+1)) page = os.path.join(dir, f'page_{i + 1}.xhtml')
with open(page, 'wb') as f: with open(page, 'wb') as f:
f.write(wrapper.encode('utf-8')) f.write(wrapper.encode('utf-8'))
wrappers.append(page) wrappers.append(page)

View File

@ -41,7 +41,7 @@ class DJVUInput(InputFormatPlugin):
c = 0 c = 0
while os.path.exists(htmlfile): while os.path.exists(htmlfile):
c += 1 c += 1
htmlfile = os.path.join(base, 'index%d.html'%c) htmlfile = os.path.join(base, f'index{c}.html')
with open(htmlfile, 'wb') as f: with open(htmlfile, 'wb') as f:
f.write(html.encode('utf-8')) f.write(html.encode('utf-8'))
odi = options.debug_pipeline odi = options.debug_pipeline

View File

@ -110,10 +110,10 @@ class FB2Input(InputFormatPlugin):
note = notes.get(cite, None) note = notes.get(cite, None)
if note: if note:
c = 1 c = 1
while 'cite%d' % c in all_ids: while f'cite{c}' in all_ids:
c += 1 c += 1
if not note.get('id', None): if not note.get('id', None):
note.set('id', 'cite%d' % c) note.set('id', f'cite{c}')
all_ids.add(note.get('id')) all_ids.add(note.get('id'))
a.set('href', '#{}'.format(note.get('id'))) a.set('href', '#{}'.format(note.get('id')))
for x in result.xpath('//*[@link_note or @link_cite]'): for x in result.xpath('//*[@link_note or @link_cite]'):

View File

@ -89,7 +89,7 @@ class HTMLZInput(InputFormatPlugin):
c = 0 c = 0
while os.path.exists(htmlfile): while os.path.exists(htmlfile):
c += 1 c += 1
htmlfile = 'index%d.html'%c htmlfile = f'index{c}.html'
with open(htmlfile, 'wb') as f: with open(htmlfile, 'wb') as f:
f.write(html.encode('utf-8')) f.write(html.encode('utf-8'))
odi = options.debug_pipeline odi = options.debug_pipeline

View File

@ -141,7 +141,7 @@ class RTFInput(InputFormatPlugin):
if fmt is None: if fmt is None:
fmt = 'wmf' fmt = 'wmf'
count += 1 count += 1
name = '%04d.%s' % (count, fmt) name = f'{count:04}.{fmt}'
with open(name, 'wb') as f: with open(name, 'wb') as f:
f.write(data) f.write(data)
imap[count] = name imap[count] = name
@ -243,7 +243,7 @@ class RTFInput(InputFormatPlugin):
if style not in border_styles: if style not in border_styles:
border_styles.append(style) border_styles.append(style)
idx = border_styles.index(style) idx = border_styles.index(style)
cls = 'border_style%d'%idx cls = f'border_style{idx}'
style_map[cls] = style style_map[cls] = style
elem.set('class', cls) elem.set('class', cls)
return style_map return style_map

View File

@ -90,7 +90,7 @@ class SNBInput(InputFormatPlugin):
for ch in toc.find('.//body'): for ch in toc.find('.//body'):
chapterName = ch.text chapterName = ch.text
chapterSrc = ch.get('src') chapterSrc = ch.get('src')
fname = 'ch_%d.htm' % i fname = f'ch_{i}.htm'
data = snbFile.GetFileStream('snbc/' + chapterSrc) data = snbFile.GetFileStream('snbc/' + chapterSrc)
if data is None: if data is None:
continue continue

View File

@ -498,7 +498,7 @@ class HTMLPreProcessor:
# search / replace using the sr?_search / sr?_replace options # search / replace using the sr?_search / sr?_replace options
for i in range(1, 4): for i in range(1, 4):
search, replace = 'sr%d_search'%i, 'sr%d_replace'%i search, replace = f'sr{i}_search', f'sr{i}_replace'
search_pattern = getattr(self.extra_opts, search, '') search_pattern = getattr(self.extra_opts, search, '')
replace_txt = getattr(self.extra_opts, replace, '') replace_txt = getattr(self.extra_opts, replace, '')
if search_pattern: if search_pattern:
@ -559,7 +559,7 @@ class HTMLPreProcessor:
name, i = None, 0 name, i = None, 0
while not name or os.path.exists(os.path.join(odir, name)): while not name or os.path.exists(os.path.join(odir, name)):
i += 1 i += 1
name = '%04d.html'%i name = f'{i:04}.html'
with open(os.path.join(odir, name), 'wb') as f: with open(os.path.join(odir, name), 'wb') as f:
f.write(raw.encode('utf-8')) f.write(raw.encode('utf-8'))

View File

@ -140,7 +140,7 @@ class HeuristicProcessor:
name, i = None, 0 name, i = None, 0
while not name or os.path.exists(os.path.join(odir, name)): while not name or os.path.exists(os.path.join(odir, name)):
i += 1 i += 1
name = '%04d.html'%i name = f'{i:04}.html'
with open(os.path.join(odir, name), 'wb') as f: with open(os.path.join(odir, name), 'wb') as f:
f.write(raw.encode('utf-8')) f.write(raw.encode('utf-8'))

View File

@ -45,8 +45,7 @@ class DjvuChunk:
print('found', self.type, self.subtype, pos, self.size) print('found', self.type, self.subtype, pos, self.size)
if self.type in b'FORM'.split(): if self.type in b'FORM'.split():
if verbose > 0: if verbose > 0:
print('processing substuff %d %d (%x)' % (pos, self.dataend, print(f'processing substuff {pos} {self.dataend} ({self.dataend:x})')
self.dataend))
numchunks = 0 numchunks = 0
while pos < self.dataend: while pos < self.dataend:
x = DjvuChunk(buf, pos, start+self.size, verbose=verbose) x = DjvuChunk(buf, pos, start+self.size, verbose=verbose)
@ -54,11 +53,10 @@ class DjvuChunk:
self._subchunks.append(x) self._subchunks.append(x)
newpos = pos + x.size + x.headersize + (1 if (x.size % 2) else 0) newpos = pos + x.size + x.headersize + (1 if (x.size % 2) else 0)
if verbose > 0: if verbose > 0:
print('newpos %d %d (%x, %x) %d' % (newpos, self.dataend, print(f'newpos {newpos} {self.dataend} ({newpos:x}, {self.dataend:x}) {x.headersize}')
newpos, self.dataend, x.headersize))
pos = newpos pos = newpos
if verbose > 0: if verbose > 0:
print(' end of chunk %d (%x)' % (pos, pos)) print(f' end of chunk {pos} ({pos:x})')
def dump(self, verbose=0, indent=1, out=None, txtout=None, maxlevel=100): def dump(self, verbose=0, indent=1, out=None, txtout=None, maxlevel=100):
if out: if out:

View File

@ -155,7 +155,7 @@ def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath, uuid):
# Process dir attributes # Process dir attributes
class_map = dict(itervalues(styles.classes)) class_map = dict(itervalues(styles.classes))
parents = ('p', 'div') + tuple('h%d' % i for i in range(1, 7)) parents = ('p', 'div') + tuple(f'h{i}' for i in range(1, 7))
for parent in root.xpath('//*[({})]'.format(' or '.join(f'name()="{t}"' for t in parents))): for parent in root.xpath('//*[({})]'.format(' or '.join(f'name()="{t}"' for t in parents))):
# Ensure that children of rtl parents that are not rtl have an # Ensure that children of rtl parents that are not rtl have an
# explicit dir set. Also, remove dir from children if it is the same as # explicit dir set. Also, remove dir from children if it is the same as

View File

@ -110,7 +110,7 @@ class Fields:
c = 0 c = 0
while self.index_bookmark_prefix in all_ids: while self.index_bookmark_prefix in all_ids:
c += 1 c += 1
self.index_bookmark_prefix = self.index_bookmark_prefix.replace('-', '%d-' % c) self.index_bookmark_prefix = self.index_bookmark_prefix.replace('-', f'{c}-')
stack = [] stack = []
for elem in self.namespace.XPath( for elem in self.namespace.XPath(
'//*[name()="w:p" or name()="w:r" or' '//*[name()="w:p" or name()="w:r" or'
@ -209,7 +209,7 @@ class Fields:
def WORD(x): def WORD(x):
return self.namespace.expand('w:' + x) return self.namespace.expand('w:' + x)
self.index_bookmark_counter += 1 self.index_bookmark_counter += 1
bmark = xe['anchor'] = '%s%d' % (self.index_bookmark_prefix, self.index_bookmark_counter) bmark = xe['anchor'] = f'{self.index_bookmark_prefix}{self.index_bookmark_counter}'
p = field.start.getparent() p = field.start.getparent()
bm = p.makeelement(WORD('bookmarkStart')) bm = p.makeelement(WORD('bookmarkStart'))
bm.set(WORD('id'), bmark), bm.set(WORD('name'), bmark) bm.set(WORD('id'), bmark), bm.set(WORD('name'), bmark)

View File

@ -48,7 +48,7 @@ class Footnotes:
note = notes.get(fid, None) note = notes.get(fid, None)
if note is not None and note.type == 'normal': if note is not None and note.type == 'normal':
self.counter += 1 self.counter += 1
anchor = 'note_%d' % self.counter anchor = f'note_{self.counter}'
self.notes[anchor] = (str(self.counter), note) self.notes[anchor] = (str(self.counter), note)
return anchor, str(self.counter) return anchor, str(self.counter)
return None, None return None, None

View File

@ -183,7 +183,7 @@ class Images:
name = base name = base
while name in exists: while name in exists:
n, e = base.rpartition('.')[0::2] n, e = base.rpartition('.')[0::2]
name = '%s-%d.%s' % (n, c, e) name = f'{n}-{c}.{e}'
c += 1 c += 1
return name return name
@ -191,7 +191,7 @@ class Images:
resized, img = resize_to_fit(raw, max_width, max_height) resized, img = resize_to_fit(raw, max_width, max_height)
if resized: if resized:
base, ext = os.path.splitext(base) base, ext = os.path.splitext(base)
base = base + '-%dx%d%s' % (max_width, max_height, ext) base = base + f'-{max_width}x{max_height}{ext}'
raw = image_to_data(img, fmt=ext[1:]) raw = image_to_data(img, fmt=ext[1:])
return raw, base, resized return raw, base, resized

View File

@ -93,7 +93,7 @@ def generate_anchor(name, existing):
x = y = 'id_' + re.sub(r'[^0-9a-zA-Z_]', '', ascii_text(name)).lstrip('_') x = y = 'id_' + re.sub(r'[^0-9a-zA-Z_]', '', ascii_text(name)).lstrip('_')
c = 1 c = 1
while y in existing: while y in existing:
y = '%s_%d' % (x, c) y = f'{x}_{c}'
c += 1 c += 1
return y return y

View File

@ -40,7 +40,7 @@ def alphabet(val, lower=True):
alphabet_map = { alphabet_map = {
'lower-alpha':alphabet, 'upper-alpha':partial(alphabet, lower=False), 'lower-alpha':alphabet, 'upper-alpha':partial(alphabet, lower=False),
'lower-roman':lambda x: roman(x).lower(), 'upper-roman':roman, 'lower-roman':lambda x: roman(x).lower(), 'upper-roman':roman,
'decimal-leading-zero': lambda x: '0%d' % x 'decimal-leading-zero': lambda x: f'0{x}'
} }
@ -73,7 +73,7 @@ class Level:
if x > ilvl or x not in counter: if x > ilvl or x not in counter:
return '' return ''
val = counter[x] - (0 if x == ilvl else 1) val = counter[x] - (0 if x == ilvl else 1)
formatter = alphabet_map.get(self.fmt, lambda x: '%d' % x) formatter = alphabet_map.get(self.fmt, lambda x: f'{x}')
return formatter(val) return formatter(val)
return re.sub(r'%(\d+)', sub, template).rstrip() + '\xa0' return re.sub(r'%(\d+)', sub, template).rstrip() + '\xa0'

View File

@ -427,7 +427,7 @@ class Styles:
ans, _ = self.classes.get(h, (None, None)) ans, _ = self.classes.get(h, (None, None))
if ans is None: if ans is None:
self.counter[prefix] += 1 self.counter[prefix] += 1
ans = '%s_%d' % (prefix, self.counter[prefix]) ans = f'{prefix}_{self.counter[prefix]}'
self.classes[h] = (ans, css) self.classes[h] = (ans, css)
return ans return ans

View File

@ -460,9 +460,9 @@ class Table:
return (m - (m % n)) // n return (m - (m % n)) // n
if c is not None: if c is not None:
odd_column_band = (divisor(c, self.table_style.col_band_size) % 2) == 1 odd_column_band = (divisor(c, self.table_style.col_band_size) % 2) == 1
overrides.append('band%dVert' % (1 if odd_column_band else 2)) overrides.append(f'band{1 if odd_column_band else 2}Vert')
odd_row_band = (divisor(r, self.table_style.row_band_size) % 2) == 1 odd_row_band = (divisor(r, self.table_style.row_band_size) % 2) == 1
overrides.append('band%dHorz' % (1 if odd_row_band else 2)) overrides.append(f'band{1 if odd_row_band else 2}Horz')
# According to the OOXML spec columns should have higher override # According to the OOXML spec columns should have higher override
# priority than rows, but Word seems to do it the other way around. # priority than rows, but Word seems to do it the other way around.

View File

@ -518,7 +518,7 @@ class Convert:
m = re.match(r'heading\s+(\d+)$', style.style_name or '', re.IGNORECASE) m = re.match(r'heading\s+(\d+)$', style.style_name or '', re.IGNORECASE)
if m is not None: if m is not None:
n = min(6, max(1, int(m.group(1)))) n = min(6, max(1, int(m.group(1))))
dest.tag = 'h%d' % n dest.tag = f'h{n}'
dest.set('data-heading-level', str(n)) dest.set('data-heading-level', str(n))
if style.bidi is True: if style.bidi is True:

View File

@ -30,7 +30,7 @@ def from_headings(body, log, namespace, num_levels=3):
def ensure_id(elem): def ensure_id(elem):
ans = elem.get('id', None) ans = elem.get('id', None)
if not ans: if not ans:
ans = 'toc_id_%d' % (next(idcount) + 1) ans = f'toc_id_{next(idcount) + 1}'
elem.set('id', ans) elem.set('id', ans)
return ans return ans

View File

@ -134,7 +134,7 @@ class DocumentRelationships:
def add_relationship(self, target, rtype, target_mode=None): def add_relationship(self, target, rtype, target_mode=None):
ans = self.get_relationship_id(target, rtype, target_mode) ans = self.get_relationship_id(target, rtype, target_mode)
if ans is None: if ans is None:
ans = 'rId%d' % (len(self.rmap) + 1) ans = f'rId{len(self.rmap) + 1}'
self.rmap[(target, rtype, target_mode)] = ans self.rmap[(target, rtype, target_mode)] = ans
return ans return ans

View File

@ -67,8 +67,8 @@ class FontsManager:
item = ef['item'] item = ef['item']
rid = rel_map.get(item) rid = rel_map.get(item)
if rid is None: if rid is None:
rel_map[item] = rid = 'rId%d' % num rel_map[item] = rid = f'rId{num}'
fname = 'fonts/font%d.odttf' % num fname = f'fonts/font{num}.odttf'
makeelement(embed_relationships, 'Relationship', Id=rid, Type=self.namespace.names['EMBEDDED_FONT'], Target=fname) makeelement(embed_relationships, 'Relationship', Id=rid, Type=self.namespace.names['EMBEDDED_FONT'], Target=fname)
font_data_map['word/' + fname] = obfuscate_font_data(item.data, key) font_data_map['word/' + fname] = obfuscate_font_data(item.data, key)
makeelement(font, 'w:embed' + tag, r_id=rid, makeelement(font, 'w:embed' + tag, r_id=rid,

View File

@ -92,7 +92,7 @@ class LinksManager:
i, bname = 0, name i, bname = 0, name
while name in self.used_bookmark_names: while name in self.used_bookmark_names:
i += 1 i += 1
name = bname + ('_%d' % i) name = bname + f'_{i}'
self.anchor_map[key] = name self.anchor_map[key] = name
self.used_bookmark_names.add(name) self.used_bookmark_names.add(name)
return name return name

View File

@ -84,7 +84,7 @@ class NumberingDefinition:
makeelement = self.namespace.makeelement makeelement = self.namespace.makeelement
an = makeelement(parent, 'w:abstractNum', w_abstractNumId=str(self.num_id)) an = makeelement(parent, 'w:abstractNum', w_abstractNumId=str(self.num_id))
makeelement(an, 'w:multiLevelType', w_val='hybridMultilevel') makeelement(an, 'w:multiLevelType', w_val='hybridMultilevel')
makeelement(an, 'w:name', w_val='List %d' % (self.num_id + 1)) makeelement(an, 'w:name', w_val=f'List {self.num_id + 1}')
for level in self.levels: for level in self.levels:
level.serialize(an, makeelement) level.serialize(an, makeelement)

View File

@ -744,7 +744,7 @@ class StylesManager:
if style.outline_level is None: if style.outline_level is None:
val = f'Para %0{snum}d' % i val = f'Para %0{snum}d' % i
else: else:
val = 'Heading %d' % (style.outline_level + 1) val = f'Heading {style.outline_level + 1}'
heading_styles.append(style) heading_styles.append(style)
style.id = style.name = val style.id = style.name = val
style.seq = i style.seq = i
@ -764,7 +764,7 @@ class StylesManager:
ds_counts[run.descendant_style] += run.style_weight ds_counts[run.descendant_style] += run.style_weight
rnum = len(str(max(1, len(ds_counts) - 1))) rnum = len(str(max(1, len(ds_counts) - 1)))
for i, (text_style, count) in enumerate(ds_counts.most_common()): for i, (text_style, count) in enumerate(ds_counts.most_common()):
text_style.id = 'Text%d' % i text_style.id = f'Text{i}'
text_style.name = f'%0{rnum}d Text' % i text_style.name = f'%0{rnum}d Text' % i
text_style.seq = i text_style.seq = i
self.descendant_text_styles = sorted(descendant_style_map, key=attrgetter('seq')) self.descendant_text_styles = sorted(descendant_style_map, key=attrgetter('seq'))

View File

@ -48,7 +48,7 @@ def add_page_map(opfpath, opts):
oeb = OEBBook(opfpath) oeb = OEBBook(opfpath)
selector = XPath(opts.page, namespaces=NSMAP) selector = XPath(opts.page, namespaces=NSMAP)
name_for = build_name_for(opts.page_names) name_for = build_name_for(opts.page_names)
idgen = ('calibre-page-%d' % n for n in count(1)) idgen = (f'calibre-page-{n}' for n in count(1))
for item in oeb.spine: for item in oeb.spine:
data = item.data data = item.data
for elem in selector(data): for elem in selector(data):

View File

@ -137,7 +137,7 @@ def sony_metadata(oeb):
for i, section in enumerate(toc): for i, section in enumerate(toc):
if not section.href: if not section.href:
continue continue
secid = 'section%d'%i secid = f'section{i}'
sectitle = section.title sectitle = section.title
if not sectitle: if not sectitle:
sectitle = _('Unknown') sectitle = _('Unknown')
@ -170,7 +170,7 @@ def sony_metadata(oeb):
desc = section.description desc = section.description
if not desc: if not desc:
desc = '' desc = ''
aid = 'article%d'%j aid = f'article{j}'
entries.append(SONY_ATOM_ENTRY.format( entries.append(SONY_ATOM_ENTRY.format(
title=xml(atitle), title=xml(atitle),

View File

@ -116,7 +116,7 @@ class FB2MLizer:
metadata['title'] = self.oeb_book.metadata.title[0].value metadata['title'] = self.oeb_book.metadata.title[0].value
metadata['appname'] = __appname__ metadata['appname'] = __appname__
metadata['version'] = __version__ metadata['version'] = __version__
metadata['date'] = '%i.%i.%i' % (datetime.now().day, datetime.now().month, datetime.now().year) metadata['date'] = f'{datetime.now().day}.{datetime.now().month}.{datetime.now().year}'
if self.oeb_book.metadata.language: if self.oeb_book.metadata.language:
lc = lang_as_iso639_1(self.oeb_book.metadata.language[0].value) lc = lang_as_iso639_1(self.oeb_book.metadata.language[0].value)
if not lc: if not lc:

View File

@ -153,7 +153,7 @@ class HTMLFile:
return hash(self.path) return hash(self.path)
def __str__(self): def __str__(self):
return 'HTMLFile:%d:%s:%r'%(self.level, 'b' if self.is_binary else 'a', self.path) return f"HTMLFile:{self.level}:{'b' if self.is_binary else 'a'}:{self.path!r}"
def __repr__(self): def __repr__(self):
return str(self) return str(self)

View File

@ -242,7 +242,7 @@ class UnBinary:
if flags & FLAG_ATOM: if flags & FLAG_ATOM:
if not self.tag_atoms or tag not in self.tag_atoms: if not self.tag_atoms or tag not in self.tag_atoms:
raise LitError( raise LitError(
'atom tag %d not in atom tag list' % tag) f'atom tag {tag} not in atom tag list')
tag_name = self.tag_atoms[tag] tag_name = self.tag_atoms[tag]
current_map = self.attr_atoms current_map = self.attr_atoms
elif tag < len(self.tag_map): elif tag < len(self.tag_map):
@ -257,8 +257,7 @@ class UnBinary:
buf.write(encode(tag_name)) buf.write(encode(tag_name))
elif flags & FLAG_CLOSING: elif flags & FLAG_CLOSING:
if depth == 0: if depth == 0:
raise LitError('Extra closing tag %s at %d'%(tag_name, raise LitError(f'Extra closing tag {tag_name} at {self.cpos}')
self.cpos))
break break
elif state == 'get attr': elif state == 'get attr':
@ -290,7 +289,7 @@ class UnBinary:
attr = self.attr_map[oc] attr = self.attr_map[oc]
if not attr or not isinstance(attr, string_or_bytes): if not attr or not isinstance(attr, string_or_bytes):
raise LitError( raise LitError(
'Unknown attribute %d in tag %s' % (oc, tag_name)) f'Unknown attribute {oc} in tag {tag_name}')
if attr.startswith('%'): if attr.startswith('%'):
in_censorship = True in_censorship = True
state = 'get value length' state = 'get value length'
@ -315,7 +314,7 @@ class UnBinary:
if oc == 0xffff: if oc == 0xffff:
continue continue
if count < 0 or count > (len(bin) - self.cpos): if count < 0 or count > (len(bin) - self.cpos):
raise LitError('Invalid character count %d' % count) raise LitError(f'Invalid character count {count}')
elif state == 'get value': elif state == 'get value':
if count == 0xfffe: if count == 0xfffe:
@ -342,7 +341,7 @@ class UnBinary:
elif state == 'get custom length': elif state == 'get custom length':
count = oc - 1 count = oc - 1
if count <= 0 or count > len(bin)-self.cpos: if count <= 0 or count > len(bin)-self.cpos:
raise LitError('Invalid character count %d' % count) raise LitError(f'Invalid character count {count}')
dynamic_tag += 1 dynamic_tag += 1
state = 'get custom' state = 'get custom'
tag_name = '' tag_name = ''
@ -357,7 +356,7 @@ class UnBinary:
elif state == 'get attr length': elif state == 'get attr length':
count = oc - 1 count = oc - 1
if count <= 0 or count > (len(bin) - self.cpos): if count <= 0 or count > (len(bin) - self.cpos):
raise LitError('Invalid character count %d' % count) raise LitError(f'Invalid character count {count}')
buf.write(b' ') buf.write(b' ')
state = 'get custom attr' state = 'get custom attr'
@ -371,7 +370,7 @@ class UnBinary:
elif state == 'get href length': elif state == 'get href length':
count = oc - 1 count = oc - 1
if count <= 0 or count > (len(bin) - self.cpos): if count <= 0 or count > (len(bin) - self.cpos):
raise LitError('Invalid character count %d' % count) raise LitError(f'Invalid character count {count}')
href = '' href = ''
state = 'get href' state = 'get href'
@ -397,8 +396,7 @@ class DirectoryEntry:
self.size = size self.size = size
def __repr__(self): def __repr__(self):
return 'DirectoryEntry(name=%s, section=%d, offset=%d, size=%d)' \ return f'DirectoryEntry(name={repr(self.name)}, section={self.section}, offset={self.offset}, size={self.size})'
% (repr(self.name), self.section, self.offset, self.size)
def __str__(self): def __str__(self):
return repr(self) return repr(self)
@ -429,9 +427,7 @@ class ManifestItem:
return self.internal == other return self.internal == other
def __repr__(self): def __repr__(self):
return ( return f"ManifestItem(internal={self.internal!r}, path={self.path!r}, mime_type={self.mime_type!r}, offset={self.offset}, root={self.root!r}, state={self.state!r})"
'ManifestItem(internal=%r, path=%r, mime_type=%r, offset=%d, root=%r, state=%r)'
) % (self.internal, self.path, self.mime_type, self.offset, self.root, self.state)
def preserve(function): def preserve(function):
@ -462,7 +458,7 @@ class LitFile:
if self.magic != b'ITOLITLS': if self.magic != b'ITOLITLS':
raise LitError('Not a valid LIT file') raise LitError('Not a valid LIT file')
if self.version != 1: if self.version != 1:
raise LitError('Unknown LIT version %d' % (self.version,)) raise LitError(f'Unknown LIT version {self.version}')
self.read_secondary_header() self.read_secondary_header()
self.read_header_pieces() self.read_header_pieces()
self.read_section_names() self.read_section_names()
@ -553,7 +549,7 @@ class LitFile:
if blocktype == b'CAOL': if blocktype == b'CAOL':
if blockver != 2: if blockver != 2:
raise LitError( raise LitError(
'Unknown CAOL block format %d' % blockver) f'Unknown CAOL block format {blockver}')
self.creator_id = u32(byts[offset+12:]) self.creator_id = u32(byts[offset+12:])
self.entry_chunklen = u32(byts[offset+20:]) self.entry_chunklen = u32(byts[offset+20:])
self.count_chunklen = u32(byts[offset+24:]) self.count_chunklen = u32(byts[offset+24:])
@ -563,7 +559,7 @@ class LitFile:
elif blocktype == b'ITSF': elif blocktype == b'ITSF':
if blockver != 4: if blockver != 4:
raise LitError( raise LitError(
'Unknown ITSF block format %d' % blockver) f'Unknown ITSF block format {blockver}')
if u32(byts[offset+4+16:]): if u32(byts[offset+4+16:]):
raise LitError('This file has a 64bit content offset') raise LitError('This file has a 64bit content offset')
self.content_offset = u32(byts[offset+16:]) self.content_offset = u32(byts[offset+16:])

View File

@ -138,9 +138,9 @@ class TextBlock(etree.XSLTExtension):
classes = [] classes = []
bs = node.get('blockstyle') bs = node.get('blockstyle')
if bs in self.styles.block_style_map: if bs in self.styles.block_style_map:
classes.append('bs%d'%self.styles.block_style_map[bs]) classes.append(f'bs{self.styles.block_style_map[bs]}')
if ts in self.styles.text_style_map: if ts in self.styles.text_style_map:
classes.append('ts%d'%self.styles.text_style_map[ts]) classes.append(f'ts{self.styles.text_style_map[ts]}')
if classes: if classes:
root.set('class', ' '.join(classes)) root.set('class', ' '.join(classes))
objid = node.get('objid', None) objid = node.get('objid', None)
@ -218,7 +218,7 @@ class TextBlock(etree.XSLTExtension):
def process_container(self, child, tgt): def process_container(self, child, tgt):
idx = self.styles.get_text_styles(child) idx = self.styles.get_text_styles(child)
if idx is not None: if idx is not None:
tgt.set('class', 'ts%d'%idx) tgt.set('class', f'ts{idx}')
self.parent.append(tgt) self.parent.append(tgt)
orig_parent = self.parent orig_parent = self.parent
self.parent = tgt self.parent = tgt
@ -305,7 +305,7 @@ class Styles(etree.XSLTExtension):
for i, s in enumerate(w): for i, s in enumerate(w):
if not s: if not s:
continue continue
rsel = '.%s%d'%(sel, i) rsel = f'.{sel}{i}'
s = join(s) s = join(s)
f.write(as_bytes(rsel + ' {\n\t' + s + '\n}\n\n')) f.write(as_bytes(rsel + ' {\n\t' + s + '\n}\n\n'))
@ -331,8 +331,8 @@ class Styles(etree.XSLTExtension):
if a == 255: if a == 255:
return None return None
if a == 0: if a == 0:
return 'rgb(%d,%d,%d)'%(r,g,b) return f'rgb({r},{g},{b})'
return 'rgba(%d,%d,%d,%f)'%(r,g,b,1.-a/255.) return f'rgba({r},{g},{b},{1.0 - a / 255.0:f})'
except: except:
return None return None

View File

@ -116,7 +116,7 @@ class LRFDocument(LRFMetaFile):
close = '</Main>\n' close = '</Main>\n'
pt_id = page_tree.id pt_id = page_tree.id
else: else:
pages += '<PageTree objid="%d">\n'%(page_tree.id,) pages += f'<PageTree objid="{page_tree.id}">\n'
close = '</PageTree>\n' close = '</PageTree>\n'
for page in page_tree: for page in page_tree:
pages += str(page) pages += str(page)

View File

@ -261,7 +261,7 @@ class Color:
return (self.r, self.g, self.b, 0xff-self.a)[i] # In Qt 0xff is opaque while in LRS 0x00 is opaque return (self.r, self.g, self.b, 0xff-self.a)[i] # In Qt 0xff is opaque while in LRS 0x00 is opaque
def to_html(self): def to_html(self):
return 'rgb(%d, %d, %d)'%(self.r, self.g, self.b) return f'rgb({self.r}, {self.g}, {self.b})'
class EmptyPageElement: class EmptyPageElement:
@ -303,7 +303,7 @@ class Wait(EmptyPageElement):
self.time = time self.time = time
def __str__(self): def __str__(self):
return '\n<Wait time="%d" />\n'%(self.time) return f'\n<Wait time="{self.time}" />\n'
class Locate(EmptyPageElement): class Locate(EmptyPageElement):
@ -323,8 +323,7 @@ class BlockSpace(EmptyPageElement):
self.xspace, self.yspace = xspace, yspace self.xspace, self.yspace = xspace, yspace
def __str__(self): def __str__(self):
return '\n<BlockSpace xspace="%d" yspace="%d" />\n'%\ return f'\n<BlockSpace xspace="{self.xspace}" yspace="{self.yspace}" />\n'
(self.xspace, self.yspace)
class Page(LRFStream): class Page(LRFStream):
@ -420,7 +419,7 @@ class Page(LRFStream):
yield from self.content yield from self.content
def __str__(self): def __str__(self):
s = '\n<Page pagestyle="%d" objid="%d">\n'%(self.style_id, self.id) s = f'\n<Page pagestyle="{self.style_id}" objid="{self.id}">\n'
for i in self: for i in self:
s += str(i) s += str(i)
s += '\n</Page>\n' s += '\n</Page>\n'
@ -470,11 +469,11 @@ class BlockAttr(StyleObject, LRFObject):
margin = str(obj.sidemargin) + 'px' margin = str(obj.sidemargin) + 'px'
ans += item('margin-left: {m}; margin-right: {m};'.format(**dict(m=margin))) ans += item('margin-left: {m}; margin-right: {m};'.format(**dict(m=margin)))
if hasattr(obj, 'topskip'): if hasattr(obj, 'topskip'):
ans += item('margin-top: %dpx;'%obj.topskip) ans += item(f'margin-top: {obj.topskip}px;')
if hasattr(obj, 'footskip'): if hasattr(obj, 'footskip'):
ans += item('margin-bottom: %dpx;'%obj.footskip) ans += item(f'margin-bottom: {obj.footskip}px;')
if hasattr(obj, 'framewidth'): if hasattr(obj, 'framewidth'):
ans += item('border: solid %dpx'%obj.framewidth) ans += item(f'border: solid {obj.framewidth}px')
if hasattr(obj, 'framecolor') and obj.framecolor.a < 255: if hasattr(obj, 'framecolor') and obj.framecolor.a < 255:
ans += item(f'border-color: {obj.framecolor.to_html()};') ans += item(f'border-color: {obj.framecolor.to_html()};')
if hasattr(obj, 'bgcolor') and obj.bgcolor.a < 255: if hasattr(obj, 'bgcolor') and obj.bgcolor.a < 255:
@ -602,9 +601,9 @@ class Block(LRFStream, TextCSS):
self.attrs[attr] = getattr(self, attr) self.attrs[attr] = getattr(self, attr)
def __str__(self): def __str__(self):
s = '\n<%s objid="%d" blockstyle="%s" '%(self.name, self.id, getattr(self, 'style_id', '')) s = f"\n<{self.name} objid=\"{self.id}\" blockstyle=\"{getattr(self, 'style_id', '')}\" "
if hasattr(self, 'textstyle_id'): if hasattr(self, 'textstyle_id'):
s += 'textstyle="%d" '%(self.textstyle_id,) s += f'textstyle="{self.textstyle_id}" '
for attr in self.attrs: for attr in self.attrs:
s += f'{attr}="{self.attrs[attr]}" ' s += f'{attr}="{self.attrs[attr]}" '
if self.name != 'ImageBlock': if self.name != 'ImageBlock':
@ -933,8 +932,7 @@ class Image(LRFObject):
data = property(fget=lambda self: self._document.objects[self.refstream].stream) data = property(fget=lambda self: self._document.objects[self.refstream].stream)
def __str__(self): def __str__(self):
return '<Image objid="%s" x0="%d" y0="%d" x1="%d" y1="%d" xsize="%d" ysize="%d" refstream="%d" />\n'%\ return f'<Image objid="{self.id}" x0="{self.x0}" y0="{self.y0}" x1="{self.x1}" y1="{self.y1}" xsize="{self.xsize}" ysize="{self.ysize}" refstream="{self.refstream}" />\n'
(self.id, self.x0, self.y0, self.x1, self.y1, self.xsize, self.ysize, self.refstream)
class PutObj(EmptyPageElement): class PutObj(EmptyPageElement):
@ -944,7 +942,7 @@ class PutObj(EmptyPageElement):
self.object = objects[refobj] self.object = objects[refobj]
def __str__(self): def __str__(self):
return '<PutObj x1="%d" y1="%d" refobj="%d" />'%(self.x1, self.y1, self.refobj) return f'<PutObj x1="{self.x1}" y1="{self.y1}" refobj="{self.refobj}" />'
class Canvas(LRFStream): class Canvas(LRFStream):

View File

@ -341,7 +341,7 @@ class LrsObject:
if labelName is None: if labelName is None:
labelName = name labelName = name
if labelDecorate: if labelDecorate:
label = '%s.%d' % (labelName, self.objId) label = f'{labelName}.{self.objId}'
else: else:
label = str(self.objId) label = str(self.objId)
element.attrib[objlabel] = label element.attrib[objlabel] = label

View File

@ -188,7 +188,7 @@ class Tag:
self.offset = stream.tell() self.offset = stream.tell()
tag_id = struct.unpack('<BB', stream.read(2)) tag_id = struct.unpack('<BB', stream.read(2))
if tag_id[1] != 0xF5: if tag_id[1] != 0xF5:
raise LRFParseError('Bad tag ID %02X at %d'%(tag_id[1], self.offset)) raise LRFParseError(f'Bad tag ID {tag_id[1]:02X} at {self.offset}')
if tag_id[0] not in self.__class__.tags: if tag_id[0] not in self.__class__.tags:
raise LRFParseError(f'Unknown tag ID: F5{tag_id[0]:02X}') raise LRFParseError(f'Unknown tag ID: F5{tag_id[0]:02X}')

View File

@ -227,7 +227,7 @@ class MetadataUpdater:
# Fetch the existing title # Fetch the existing title
title_offset, = unpack('>L', self.record0[0x54:0x58]) title_offset, = unpack('>L', self.record0[0x54:0x58])
title_length, = unpack('>L', self.record0[0x58:0x5c]) title_length, = unpack('>L', self.record0[0x58:0x5c])
title_in_file, = unpack('%ds' % (title_length), self.record0[title_offset:title_offset + title_length]) title_in_file, = unpack(f'{title_length}s', self.record0[title_offset:title_offset + title_length])
# Adjust length to accommodate PrimaryINDX if necessary # Adjust length to accommodate PrimaryINDX if necessary
mobi_header_length, = unpack('>L', self.record0[0x14:0x18]) mobi_header_length, = unpack('>L', self.record0[0x14:0x18])

View File

@ -226,7 +226,7 @@ class ManifestItem(Resource): # {{{
return self.href() return self.href()
if index == 1: if index == 1:
return self.media_type return self.media_type
raise IndexError('%d out of bounds.'%index) raise IndexError(f'{index} out of bounds.')
# }}} # }}}
@ -237,7 +237,7 @@ class Manifest(ResourceCollection): # {{{
self.append(ManifestItem.from_opf_manifest_item(item, dir)) self.append(ManifestItem.from_opf_manifest_item(item, dir))
id = item.get('id', '') id = item.get('id', '')
if not id: if not id:
id = 'id%d'%self.next_id id = f'id{self.next_id}'
self[-1].id = id self[-1].id = id
self.next_id += 1 self.next_id += 1
@ -261,7 +261,7 @@ class Manifest(ResourceCollection): # {{{
mi = ManifestItem(path, is_path=True) mi = ManifestItem(path, is_path=True)
if mt: if mt:
mi.mime_type = mt mi.mime_type = mt
mi.id = 'id%d'%m.next_id mi.id = f'id{m.next_id}'
m.next_id += 1 m.next_id += 1
m.append(mi) m.append(mi)
return m return m
@ -270,7 +270,7 @@ class Manifest(ResourceCollection): # {{{
mi = ManifestItem(path, is_path=True) mi = ManifestItem(path, is_path=True)
if mime_type: if mime_type:
mi.mime_type = mime_type mi.mime_type = mime_type
mi.id = 'id%d'%self.next_id mi.id = f'id{self.next_id}'
self.next_id += 1 self.next_id += 1
self.append(mi) self.append(mi)
return mi.id return mi.id
@ -787,7 +787,7 @@ class OPF: # {{{
c = 1 c = 1
while manifest_id in ids: while manifest_id in ids:
c += 1 c += 1
manifest_id = 'id%d'%c manifest_id = f'id{c}'
if not media_type: if not media_type:
media_type = 'application/xhtml+xml' media_type = 'application/xhtml+xml'
ans = etree.Element('{{{}}}item'.format(self.NAMESPACES['opf']), ans = etree.Element('{{{}}}item'.format(self.NAMESPACES['opf']),
@ -801,7 +801,7 @@ class OPF: # {{{
def replace_manifest_item(self, item, items): def replace_manifest_item(self, item, items):
items = [self.create_manifest_item(*i) for i in items] items = [self.create_manifest_item(*i) for i in items]
for i, item2 in enumerate(items): for i, item2 in enumerate(items):
item2.set('id', item.get('id')+'.%d'%(i+1)) item2.set('id', item.get('id')+f'.{i + 1}')
manifest = item.getparent() manifest = item.getparent()
index = manifest.index(item) index = manifest.index(item)
manifest[index:index+1] = items manifest[index:index+1] = items

View File

@ -39,7 +39,7 @@ def read_info(outputdir, get_cover):
try: try:
raw = subprocess.check_output([pdfinfo, '-enc', 'UTF-8', '-isodates', 'src.pdf']) raw = subprocess.check_output([pdfinfo, '-enc', 'UTF-8', '-isodates', 'src.pdf'])
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
prints('pdfinfo errored out with return code: %d'%e.returncode) prints(f'pdfinfo errored out with return code: {e.returncode}')
return None return None
try: try:
info_raw = raw.decode('utf-8') info_raw = raw.decode('utf-8')
@ -63,7 +63,7 @@ def read_info(outputdir, get_cover):
try: try:
raw = subprocess.check_output([pdfinfo, '-meta', 'src.pdf']).strip() raw = subprocess.check_output([pdfinfo, '-meta', 'src.pdf']).strip()
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
prints('pdfinfo failed to read XML metadata with return code: %d'%e.returncode) prints(f'pdfinfo failed to read XML metadata with return code: {e.returncode}')
else: else:
parts = re.split(br'^Metadata:', raw, 1, flags=re.MULTILINE) parts = re.split(br'^Metadata:', raw, 1, flags=re.MULTILINE)
if len(parts) > 1: if len(parts) > 1:
@ -77,7 +77,7 @@ def read_info(outputdir, get_cover):
subprocess.check_call([pdftoppm, '-singlefile', '-jpeg', '-cropbox', subprocess.check_call([pdftoppm, '-singlefile', '-jpeg', '-cropbox',
'src.pdf', 'cover']) 'src.pdf', 'cover'])
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
prints('pdftoppm errored out with return code: %d'%e.returncode) prints(f'pdftoppm errored out with return code: {e.returncode}')
return ans return ans

View File

@ -263,7 +263,7 @@ class TOC(list):
if not text: if not text:
text = '' text = ''
c[1] += 1 c[1] += 1
item_id = 'num_%d'%c[1] item_id = f'num_{c[1]}'
text = clean_xml_chars(text) text = clean_xml_chars(text)
elem = E.navPoint( elem = E.navPoint(
E.navLabel(E.text(re.sub(r'\s+', ' ', text))), E.navLabel(E.text(re.sub(r'\s+', ' ', text))),

View File

@ -148,7 +148,7 @@ class MetadataUpdater:
for tag in self.topaz_headers: for tag in self.topaz_headers:
print(f'{tag}: ') print(f'{tag}: ')
num_recs = len(self.topaz_headers[tag]['blocks']) num_recs = len(self.topaz_headers[tag]['blocks'])
print(' num_recs: %d' % num_recs) print(f' num_recs: {num_recs}')
if num_recs: if num_recs:
print(' starting offset: 0x{:x}'.format(self.topaz_headers[tag]['blocks'][0]['offset'])) print(' starting offset: 0x{:x}'.format(self.topaz_headers[tag]['blocks'][0]['offset']))

View File

@ -81,7 +81,7 @@ def ensure_unique(template, existing):
c = 0 c = 0
while q in existing: while q in existing:
c += 1 c += 1
q = '%s-%d%s' % (b, c, e) q = f'{b}-{c}{e}'
return q return q

View File

@ -585,7 +585,7 @@ def find_nsmap(elems):
ans[pp] = ns ans[pp] = ns
else: else:
i += 1 i += 1
ans['ns%d' % i] = ns ans[f'ns{i}'] = ns
return ans return ans

View File

@ -46,19 +46,19 @@ class ContainerHeader:
def __str__(self): def __str__(self):
ans = [('*'*10) + ' Container Header ' + ('*'*10)] ans = [('*'*10) + ' Container Header ' + ('*'*10)]
a = ans.append a = ans.append
a('Record size: %d' % self.record_size) a(f'Record size: {self.record_size}')
a('Type: %d' % self.type) a(f'Type: {self.type}')
a('Total number of records in this container: %d' % self.count) a(f'Total number of records in this container: {self.count}')
a(f'Encoding: {self.encoding}') a(f'Encoding: {self.encoding}')
a(f'Unknowns1: {self.unknowns1}') a(f'Unknowns1: {self.unknowns1}')
a('Num of resource records: %d' % self.num_of_resource_records) a(f'Num of resource records: {self.num_of_resource_records}')
a('Num of non-dummy resource records: %d' % self.num_of_non_dummy_resource_records) a(f'Num of non-dummy resource records: {self.num_of_non_dummy_resource_records}')
a('Offset to href record: %d' % self.offset_to_href_record) a(f'Offset to href record: {self.offset_to_href_record}')
a(f'Unknowns2: {self.unknowns2}') a(f'Unknowns2: {self.unknowns2}')
a('Header length: %d' % self.header_length) a(f'Header length: {self.header_length}')
a(f'Title Length: {self.title_length}') a(f'Title Length: {self.title_length}')
a(f'hrefs: {self.hrefs}') a(f'hrefs: {self.hrefs}')
a('Null bytes after EXTH: %d' % self.null_bytes_after_exth) a(f'Null bytes after EXTH: {self.null_bytes_after_exth}')
if len(self.bytes_after_exth) != self.null_bytes_after_exth: if len(self.bytes_after_exth) != self.null_bytes_after_exth:
a('Non-null bytes present after EXTH header!!!!') a('Non-null bytes present after EXTH header!!!!')
return '\n'.join(ans) + '\n\n' + str(self.exth) + '\n\n' + (f'Title: {self.title}') return '\n'.join(ans) + '\n\n' + str(self.exth) + '\n\n' + (f'Title: {self.title}')

View File

@ -116,8 +116,7 @@ class Record: # {{{
@property @property
def header(self): def header(self):
return 'Offset: %d Flags: %d UID: %d First 4 bytes: %r Size: %d'%(self.offset, self.flags, return f'Offset: {self.offset} Flags: {self.flags} UID: {self.uid} First 4 bytes: {self.raw[:4]!r} Size: {len(self.raw)}'
self.uid, self.raw[:4], len(self.raw))
# }}} # }}}
@ -213,7 +212,7 @@ class EXTHRecord:
self.data = binascii.hexlify(self.data) self.data = binascii.hexlify(self.data)
def __str__(self): def __str__(self):
return '%s (%d): %r'%(self.name, self.type, self.data) return f'{self.name} ({self.type}): {self.data!r}'
class EXTHHeader: class EXTHHeader:
@ -254,8 +253,8 @@ class EXTHHeader:
def __str__(self): def __str__(self):
ans = ['*'*20 + ' EXTH Header '+ '*'*20] ans = ['*'*20 + ' EXTH Header '+ '*'*20]
ans.append('EXTH header length: %d'%self.length) ans.append(f'EXTH header length: {self.length}')
ans.append('Number of EXTH records: %d'%self.count) ans.append(f'Number of EXTH records: {self.count}')
ans.append('EXTH records...') ans.append('EXTH records...')
for r in self.records: for r in self.records:
ans.append(str(r)) ans.append(str(r))
@ -416,7 +415,7 @@ class MOBIHeader: # {{{
self.last_resource_record = self.exth.kf8_header_index - 2 self.last_resource_record = self.exth.kf8_header_index - 2
def __str__(self): def __str__(self):
ans = ['*'*20 + ' MOBI %d Header '%self.file_version+ '*'*20] ans = ['*'*20 + f' MOBI {self.file_version} Header '+ '*'*20]
a = ans.append a = ans.append
@ -427,39 +426,39 @@ class MOBIHeader: # {{{
def r(d, attr): def r(d, attr):
x = getattr(self, attr) x = getattr(self, attr)
if attr in self.relative_records and x != NULL_INDEX: if attr in self.relative_records and x != NULL_INDEX:
a('%s: Absolute: %d Relative: %d'%(d, x, x-self.header_offset)) a(f'{d}: Absolute: {x} Relative: {x - self.header_offset}')
else: else:
i(d, x) i(d, x)
a(f'Compression: {self.compression}') a(f'Compression: {self.compression}')
a(f'Unused: {self.unused!r}') a(f'Unused: {self.unused!r}')
a('Text length: %d'%self.text_length) a(f'Text length: {self.text_length}')
a('Number of text records: %d'%self.number_of_text_records) a(f'Number of text records: {self.number_of_text_records}')
a('Text record size: %d'%self.text_record_size) a(f'Text record size: {self.text_record_size}')
a(f'Encryption: {self.encryption_type}') a(f'Encryption: {self.encryption_type}')
a(f'Unknown: {self.unknown!r}') a(f'Unknown: {self.unknown!r}')
a(f'Identifier: {self.identifier!r}') a(f'Identifier: {self.identifier!r}')
a('Header length: %d'% self.length) a(f'Header length: {self.length}')
a(f'Type: {self.type}') a(f'Type: {self.type}')
a(f'Encoding: {self.encoding}') a(f'Encoding: {self.encoding}')
a(f'UID: {self.uid!r}') a(f'UID: {self.uid!r}')
a('File version: %d'%self.file_version) a(f'File version: {self.file_version}')
r('Meta Orth Index', 'meta_orth_indx') r('Meta Orth Index', 'meta_orth_indx')
r('Meta Infl Index', 'meta_infl_indx') r('Meta Infl Index', 'meta_infl_indx')
r('Secondary index record', 'secondary_index_record') r('Secondary index record', 'secondary_index_record')
a(f'Reserved: {self.reserved!r}') a(f'Reserved: {self.reserved!r}')
r('First non-book record', 'first_non_book_record') r('First non-book record', 'first_non_book_record')
a('Full name offset: %d'%self.fullname_offset) a(f'Full name offset: {self.fullname_offset}')
a('Full name length: %d bytes'%self.fullname_length) a(f'Full name length: {self.fullname_length} bytes')
a(f'Langcode: {self.locale_raw!r}') a(f'Langcode: {self.locale_raw!r}')
a(f'Language: {self.language}') a(f'Language: {self.language}')
a(f'Sub language: {self.sublanguage}') a(f'Sub language: {self.sublanguage}')
a(f'Input language: {self.input_language!r}') a(f'Input language: {self.input_language!r}')
a(f'Output language: {self.output_langauage!r}') a(f'Output language: {self.output_langauage!r}')
a('Min version: %d'%self.min_version) a(f'Min version: {self.min_version}')
r('First Image index', 'first_image_index') r('First Image index', 'first_image_index')
r('Huffman record offset', 'huffman_record_offset') r('Huffman record offset', 'huffman_record_offset')
a('Huffman record count: %d'%self.huffman_record_count) a(f'Huffman record count: {self.huffman_record_count}')
r('Huffman table offset', 'datp_record_offset') r('Huffman table offset', 'datp_record_offset')
a(f'Huffman table length: {self.datp_record_count!r}') a(f'Huffman table length: {self.datp_record_count!r}')
a(f'EXTH flags: {bin(self.exth_flags)[2:]} ({self.has_exth})') a(f'EXTH flags: {bin(self.exth_flags)[2:]} ({self.has_exth})')
@ -472,18 +471,18 @@ class MOBIHeader: # {{{
if self.has_extra_data_flags: if self.has_extra_data_flags:
a(f'Unknown4: {self.unknown4!r}') a(f'Unknown4: {self.unknown4!r}')
if hasattr(self, 'first_text_record'): if hasattr(self, 'first_text_record'):
a('First content record: %d'%self.first_text_record) a(f'First content record: {self.first_text_record}')
a('Last content record: %d'%self.last_text_record) a(f'Last content record: {self.last_text_record}')
else: else:
r('FDST Index', 'fdst_idx') r('FDST Index', 'fdst_idx')
a('FDST Count: %d'% self.fdst_count) a(f'FDST Count: {self.fdst_count}')
r('FCIS number', 'fcis_number') r('FCIS number', 'fcis_number')
a('FCIS count: %d'% self.fcis_count) a(f'FCIS count: {self.fcis_count}')
r('FLIS number', 'flis_number') r('FLIS number', 'flis_number')
a('FLIS count: %d'% self.flis_count) a(f'FLIS count: {self.flis_count}')
a(f'Unknown6: {self.unknown6!r}') a(f'Unknown6: {self.unknown6!r}')
r('SRCS record index', 'srcs_record_index') r('SRCS record index', 'srcs_record_index')
a('Number of SRCS records?: %d'%self.num_srcs_records) a(f'Number of SRCS records?: {self.num_srcs_records}')
a(f'Unknown7: {self.unknown7!r}') a(f'Unknown7: {self.unknown7!r}')
a(f'Extra data flags: {bin(self.extra_data_flags)} (has multibyte: {self.has_multibytes}) ' a(f'Extra data flags: {bin(self.extra_data_flags)} (has multibyte: {self.has_multibytes}) '
f'(has indexing: {self.has_indexing_bytes}) (has uncrossable breaks: {self.has_uncrossable_breaks})') f'(has indexing: {self.has_indexing_bytes}) (has uncrossable breaks: {self.has_uncrossable_breaks})')
@ -502,8 +501,7 @@ class MOBIHeader: # {{{
ans += '\n\n' + str(self.exth) ans += '\n\n' + str(self.exth)
ans += f'\n\nBytes after EXTH ({len(self.bytes_after_exth)} bytes): {format_bytes(self.bytes_after_exth)}' ans += f'\n\nBytes after EXTH ({len(self.bytes_after_exth)} bytes): {format_bytes(self.bytes_after_exth)}'
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset + ans += f'\nNumber of bytes after full name: {len(self.raw) - (self.fullname_offset + self.fullname_length)}'
self.fullname_length))
ans += f'\nRecord 0 length: {len(self.raw)}' ans += f'\nRecord 0 length: {len(self.raw)}'
return ans return ans
@ -599,13 +597,12 @@ class TextRecord: # {{{
for typ, val in iteritems(self.trailing_data): for typ, val in iteritems(self.trailing_data):
if isinstance(typ, numbers.Integral): if isinstance(typ, numbers.Integral):
print('Record %d has unknown trailing data of type: %d : %r'% print(f'Record {idx} has unknown trailing data of type: {typ} : {val!r}')
(idx, typ, val))
self.idx = idx self.idx = idx
def dump(self, folder): def dump(self, folder):
name = '%06d'%self.idx name = f'{self.idx:06}'
with open(os.path.join(folder, name+'.txt'), 'wb') as f: with open(os.path.join(folder, name+'.txt'), 'wb') as f:
f.write(self.raw) f.write(self.raw)
with open(os.path.join(folder, name+'.trailing_data'), 'wb') as f: with open(os.path.join(folder, name+'.trailing_data'), 'wb') as f:

View File

@ -100,14 +100,14 @@ class Index:
ans.extend(['', '']) ans.extend(['', ''])
ans += ['*'*10 + f' Index Record Headers ({len(self.index_headers)} records) ' + '*'*10] ans += ['*'*10 + f' Index Record Headers ({len(self.index_headers)} records) ' + '*'*10]
for i, header in enumerate(self.index_headers): for i, header in enumerate(self.index_headers):
ans += ['*'*10 + ' Index Record %d ' % i + '*'*10] ans += ['*'*10 + f' Index Record {i} ' + '*'*10]
for field in INDEX_HEADER_FIELDS: for field in INDEX_HEADER_FIELDS:
a('%-12s: %r'%(FIELD_NAMES.get(field, field), header[field])) a('%-12s: %r'%(FIELD_NAMES.get(field, field), header[field]))
if self.cncx: if self.cncx:
a('*'*10 + ' CNCX ' + '*'*10) a('*'*10 + ' CNCX ' + '*'*10)
for offset, val in iteritems(self.cncx): for offset, val in iteritems(self.cncx):
a('%10s: %s'%(offset, val)) a(f'{offset:10}: {val}')
ans.extend(['', '']) ans.extend(['', ''])
if self.table is not None: if self.table is not None:

View File

@ -30,8 +30,7 @@ class TagX: # {{{
self.is_eof = (self.eof == 1 and self.tag == 0 and self.num_values == 0 and self.bitmask == 0) self.is_eof = (self.eof == 1 and self.tag == 0 and self.num_values == 0 and self.bitmask == 0)
def __repr__(self): def __repr__(self):
return 'TAGX(tag=%02d, num_values=%d, bitmask=%r, eof=%d)' % (self.tag, return f'TAGX(tag={self.tag:02}, num_values={self.num_values}, bitmask={bin(self.bitmask)!r}, eof={self.eof})'
self.num_values, bin(self.bitmask), self.eof)
# }}} # }}}
@ -55,7 +54,7 @@ class SecondaryIndexHeader: # {{{
'cp1252'}.get(self.index_encoding_num, 'unknown') 'cp1252'}.get(self.index_encoding_num, 'unknown')
if self.index_encoding == 'unknown': if self.index_encoding == 'unknown':
raise ValueError( raise ValueError(
'Unknown index encoding: %d'%self.index_encoding_num) f'Unknown index encoding: {self.index_encoding_num}')
self.unknown2 = raw[32:36] self.unknown2 = raw[32:36]
self.num_index_entries, = struct.unpack('>I', raw[36:40]) self.num_index_entries, = struct.unpack('>I', raw[36:40])
self.ordt_start, = struct.unpack('>I', raw[40:44]) self.ordt_start, = struct.unpack('>I', raw[40:44])
@ -102,30 +101,29 @@ class SecondaryIndexHeader: # {{{
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w, a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
len(w), not bool(w.replace(b'\0', b'')))) len(w), not bool(w.replace(b'\0', b''))))
a('Header length: %d'%self.header_length) a(f'Header length: {self.header_length}')
u(self.unknown1) u(self.unknown1)
a('Index Type: %s (%d)'%(self.index_type_desc, self.index_type)) a(f'Index Type: {self.index_type_desc} ({self.index_type})')
a('Offset to IDXT start: %d'%self.idxt_start) a(f'Offset to IDXT start: {self.idxt_start}')
a('Number of index records: %d'%self.index_count) a(f'Number of index records: {self.index_count}')
a('Index encoding: %s (%d)'%(self.index_encoding, a(f'Index encoding: {self.index_encoding} ({self.index_encoding_num})')
self.index_encoding_num))
u(self.unknown2) u(self.unknown2)
a('Number of index entries: %d'% self.num_index_entries) a(f'Number of index entries: {self.num_index_entries}')
a('ORDT start: %d'%self.ordt_start) a(f'ORDT start: {self.ordt_start}')
a('LIGT start: %d'%self.ligt_start) a(f'LIGT start: {self.ligt_start}')
a('Number of LIGT entries: %d'%self.num_of_ligt_entries) a(f'Number of LIGT entries: {self.num_of_ligt_entries}')
a('Number of cncx blocks: %d'%self.num_of_cncx_blocks) a(f'Number of cncx blocks: {self.num_of_cncx_blocks}')
u(self.unknown3) u(self.unknown3)
a('TAGX offset: %d'%self.tagx_offset) a(f'TAGX offset: {self.tagx_offset}')
u(self.unknown4) u(self.unknown4)
a('\n\n') a('\n\n')
a('*'*20 + ' TAGX Header (%d bytes)'%self.tagx_header_length+ '*'*20) a('*'*20 + f' TAGX Header ({self.tagx_header_length} bytes)'+ '*'*20)
a('Header length: %d'%self.tagx_header_length) a(f'Header length: {self.tagx_header_length}')
a('Control byte count: %d'%self.tagx_control_byte_count) a(f'Control byte count: {self.tagx_control_byte_count}')
for i in self.tagx_entries: for i in self.tagx_entries:
a('\t' + repr(i)) a('\t' + repr(i))
a(f'Index of last IndexEntry in secondary index record: {self.last_entry}') a(f'Index of last IndexEntry in secondary index record: {self.last_entry}')
a('Number of entries in the NCX: %d'% self.ncx_count) a(f'Number of entries in the NCX: {self.ncx_count}')
return '\n'.join(ans) return '\n'.join(ans)
@ -154,7 +152,7 @@ class IndexHeader: # {{{
'cp1252'}.get(self.index_encoding_num, 'unknown') 'cp1252'}.get(self.index_encoding_num, 'unknown')
if self.index_encoding == 'unknown': if self.index_encoding == 'unknown':
raise ValueError( raise ValueError(
'Unknown index encoding: %d'%self.index_encoding_num) f'Unknown index encoding: {self.index_encoding_num}')
self.possibly_language = raw[32:36] self.possibly_language = raw[32:36]
self.num_index_entries, = struct.unpack('>I', raw[36:40]) self.num_index_entries, = struct.unpack('>I', raw[36:40])
self.ordt_start, = struct.unpack('>I', raw[40:44]) self.ordt_start, = struct.unpack('>I', raw[40:44])
@ -204,31 +202,30 @@ class IndexHeader: # {{{
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w, a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
len(w), not bool(w.replace(b'\0', b'')))) len(w), not bool(w.replace(b'\0', b''))))
a('Header length: %d'%self.header_length) a(f'Header length: {self.header_length}')
u(self.unknown1) u(self.unknown1)
a('Header type: %d'%self.header_type) a(f'Header type: {self.header_type}')
a('Index Type: %s (%d)'%(self.index_type_desc, self.index_type)) a(f'Index Type: {self.index_type_desc} ({self.index_type})')
a('Offset to IDXT start: %d'%self.idxt_start) a(f'Offset to IDXT start: {self.idxt_start}')
a('Number of index records: %d'%self.index_count) a(f'Number of index records: {self.index_count}')
a('Index encoding: %s (%d)'%(self.index_encoding, a(f'Index encoding: {self.index_encoding} ({self.index_encoding_num})')
self.index_encoding_num))
a(f'Unknown (possibly language?): {self.possibly_language!r}') a(f'Unknown (possibly language?): {self.possibly_language!r}')
a('Number of index entries: %d'% self.num_index_entries) a(f'Number of index entries: {self.num_index_entries}')
a('ORDT start: %d'%self.ordt_start) a(f'ORDT start: {self.ordt_start}')
a('LIGT start: %d'%self.ligt_start) a(f'LIGT start: {self.ligt_start}')
a('Number of LIGT entries: %d'%self.num_of_ligt_entries) a(f'Number of LIGT entries: {self.num_of_ligt_entries}')
a('Number of cncx blocks: %d'%self.num_of_cncx_blocks) a(f'Number of cncx blocks: {self.num_of_cncx_blocks}')
u(self.unknown2) u(self.unknown2)
a('TAGX offset: %d'%self.tagx_offset) a(f'TAGX offset: {self.tagx_offset}')
u(self.unknown3) u(self.unknown3)
a('\n\n') a('\n\n')
a('*'*20 + ' TAGX Header (%d bytes)'%self.tagx_header_length+ '*'*20) a('*'*20 + f' TAGX Header ({self.tagx_header_length} bytes)'+ '*'*20)
a('Header length: %d'%self.tagx_header_length) a(f'Header length: {self.tagx_header_length}')
a('Control byte count: %d'%self.tagx_control_byte_count) a(f'Control byte count: {self.tagx_control_byte_count}')
for i in self.tagx_entries: for i in self.tagx_entries:
a('\t' + repr(i)) a('\t' + repr(i))
a(f'Index of last IndexEntry in primary index record: {self.last_entry}') a(f'Index of last IndexEntry in primary index record: {self.last_entry}')
a('Number of entries in the NCX: %d'% self.ncx_count) a(f'Number of entries in the NCX: {self.ncx_count}')
return '\n'.join(ans) return '\n'.join(ans)
# }}} # }}}
@ -275,7 +272,7 @@ class Tag: # {{{
self.attr, self.desc = self.TAG_MAP[tag_type] self.attr, self.desc = self.TAG_MAP[tag_type]
else: else:
print('Unknown tag value: %s') print('Unknown tag value: %s')
self.desc = '??Unknown (tag value: %d)'%tag_type self.desc = f'??Unknown (tag value: {tag_type})'
self.attr = 'unknown' self.attr = 'unknown'
if '_offset' in self.attr: if '_offset' in self.attr:
@ -368,8 +365,7 @@ class IndexEntry: # {{{
if tag.value is not None: if tag.value is not None:
ans.append('\t'+str(tag)) ans.append('\t'+str(tag))
if self.first_child_index != -1: if self.first_child_index != -1:
ans.append('\tNumber of children: %d'%(self.last_child_index - ans.append(f'\tNumber of children: {self.last_child_index - self.first_child_index + 1}')
self.first_child_index + 1))
return '\n'.join(ans) return '\n'.join(ans)
# }}} # }}}
@ -458,8 +454,7 @@ class CNCX: # {{{
except: except:
byts = raw[pos:] byts = raw[pos:]
r = format_bytes(byts) r = format_bytes(byts)
print('CNCX entry at offset %d has unknown format %s'%( print(f'CNCX entry at offset {pos + record_offset} has unknown format {r}')
pos+record_offset, r))
self.records[pos+record_offset] = r self.records[pos+record_offset] = r
pos = len(raw) pos = len(raw)
pos += consumed+length pos += consumed+length
@ -471,7 +466,7 @@ class CNCX: # {{{
def __str__(self): def __str__(self):
ans = ['*'*20 + f' cncx ({len(self.records)} strings) '+ '*'*20] ans = ['*'*20 + f' cncx ({len(self.records)} strings) '+ '*'*20]
for k, v in iteritems(self.records): for k, v in iteritems(self.records):
ans.append('%10d : %s'%(k, v)) ans.append(f'{k:10} : {v}')
return '\n'.join(ans) return '\n'.join(ans)
# }}} # }}}
@ -485,7 +480,7 @@ class ImageRecord: # {{{
self.idx = idx self.idx = idx
def dump(self, folder): def dump(self, folder):
name = '%06d'%self.idx name = f'{self.idx:06}'
with open(os.path.join(folder, name+'.'+self.fmt), 'wb') as f: with open(os.path.join(folder, name+'.'+self.fmt), 'wb') as f:
f.write(self.raw) f.write(self.raw)
@ -497,7 +492,7 @@ class BinaryRecord: # {{{
def __init__(self, idx, record): def __init__(self, idx, record):
self.raw = record.raw self.raw = record.raw
sig = self.raw[:4] sig = self.raw[:4]
name = '%06d'%idx name = f'{idx:06}'
if sig in {b'FCIS', b'FLIS', b'SRCS', b'DATP', b'RESC', b'BOUN', if sig in {b'FCIS', b'FLIS', b'SRCS', b'DATP', b'RESC', b'BOUN',
b'FDST', b'AUDI', b'VIDE', b'CRES', b'CONT', b'CMET'}: b'FDST', b'AUDI', b'VIDE', b'CRES', b'CONT', b'CMET'}:
name += '-' + sig.decode('ascii') name += '-' + sig.decode('ascii')
@ -516,7 +511,7 @@ class FontRecord: # {{{
def __init__(self, idx, record): def __init__(self, idx, record):
self.raw = record.raw self.raw = record.raw
name = '%06d'%idx name = f'{idx:06}'
self.font = read_font_record(self.raw) self.font = read_font_record(self.raw)
if self.font['err']: if self.font['err']:
raise ValueError('Failed to read font record: {} Headers: {}'.format( raise ValueError('Failed to read font record: {} Headers: {}'.format(
@ -564,7 +559,7 @@ class TBSIndexing: # {{{
for i in self.indices: for i in self.indices:
if i.index in {idx, str(idx)}: if i.index in {idx, str(idx)}:
return i return i
raise IndexError('Index %d not found'%idx) raise IndexError(f'Index {idx} not found')
def __str__(self): def __str__(self):
ans = ['*'*20 + f' TBS Indexing ({len(self.record_indices)} records) '+ '*'*20] ans = ['*'*20 + f' TBS Indexing ({len(self.record_indices)} records) '+ '*'*20]
@ -580,13 +575,12 @@ class TBSIndexing: # {{{
continue continue
types[tbs_type] += strings types[tbs_type] += strings
for typ, strings in iteritems(types): for typ, strings in iteritems(types):
with open(os.path.join(bdir, 'tbs_type_%d.txt'%typ), 'wb') as f: with open(os.path.join(bdir, f'tbs_type_{typ}.txt'), 'wb') as f:
f.write(as_bytes('\n'.join(strings))) f.write(as_bytes('\n'.join(strings)))
def dump_record(self, r, dat): def dump_record(self, r, dat):
ans = [] ans = []
ans.append('\nRecord #%d: Starts at: %d Ends at: %d'%(r.idx, ans.append(f"\nRecord #{r.idx}: Starts at: {dat['geom'][0]} Ends at: {dat['geom'][1]}")
dat['geom'][0], dat['geom'][1]))
s, e, c = dat['starts'], dat['ends'], dat['complete'] s, e, c = dat['starts'], dat['ends'], dat['complete']
ans.append(('\tContains: %d index entries ' ans.append(('\tContains: %d index entries '
'(%d ends, %d complete, %d starts)')%tuple(map(len, (s+e+c, e, '(%d ends, %d complete, %d starts)')%tuple(map(len, (s+e+c, e,
@ -597,9 +591,7 @@ class TBSIndexing: # {{{
if entries: if entries:
ans.append(f'\t{typ}:') ans.append(f'\t{typ}:')
for x in entries: for x in entries:
ans.append(('\t\tIndex Entry: %s (Parent index: %s, ' ans.append(f"\t\tIndex Entry: {x.index} (Parent index: {x.parent_index}, Depth: {x.depth}, Offset: {x.offset}, Size: {x.size}) [{x.label}]")
'Depth: %d, Offset: %d, Size: %d) [%s]')%(
x.index, x.parent_index, x.depth, x.offset, x.size, x.label))
def bin4(num): def bin4(num):
ans = bin(num)[2:] ans = bin(num)[2:]
@ -615,8 +607,8 @@ class TBSIndexing: # {{{
byts = byts[consumed:] byts = byts[consumed:]
for k in extra: for k in extra:
tbs_type |= k tbs_type |= k
ans.append('\nTBS: %d (%s)'%(tbs_type, bin4(tbs_type))) ans.append(f'\nTBS: {tbs_type} ({bin4(tbs_type)})')
ans.append('Outermost index: %d'%outermost_index) ans.append(f'Outermost index: {outermost_index}')
ans.append(f'Unknown extra start bytes: {repr_extra(extra)}') ans.append(f'Unknown extra start bytes: {repr_extra(extra)}')
if is_periodical: # Hierarchical periodical if is_periodical: # Hierarchical periodical
try: try:
@ -626,7 +618,7 @@ class TBSIndexing: # {{{
import traceback import traceback
traceback.print_exc() traceback.print_exc()
a = [] a = []
print('Failed to decode TBS bytes for record: %d'%r.idx) print(f'Failed to decode TBS bytes for record: {r.idx}')
ans += a ans += a
if byts: if byts:
sbyts = tuple(hex(b)[2:] for b in byts) sbyts = tuple(hex(b)[2:] for b in byts)
@ -654,35 +646,25 @@ class TBSIndexing: # {{{
raise ValueError('Dont know how to interpret flags' raise ValueError('Dont know how to interpret flags'
f' {extra!r} while reading section transitions') f' {extra!r} while reading section transitions')
nsi = self.get_index(psi.index+1) nsi = self.get_index(psi.index+1)
ans.append('Last article in this record of section %d' ans.append(f'Last article in this record of section {psi.index} (relative to next section index [{nsi.index}]): {ai} [{ai + nsi.index} absolute index]')
' (relative to next section index [%d]): '
'%d [%d absolute index]'%(psi.index, nsi.index, ai,
ai+nsi.index))
psi = nsi psi = nsi
continue continue
ans.append('First article in this record of section %d' ans.append(f'First article in this record of section {psi.index} (relative to its parent section): {ai} [{ai + psi.index} absolute index]')
' (relative to its parent section): '
'%d [%d absolute index]'%(psi.index, ai, ai+psi.index))
num = extra.get(0b0100, None) num = extra.get(0b0100, None)
if num is None: if num is None:
msg = ('The section %d has at most one article' msg = f"The section {psi.index} has at most one article in this record"
' in this record')%psi.index
else: else:
msg = ('Number of articles in this record of ' msg = f"Number of articles in this record of section {psi.index}: {num}"
'section %d: %d')%(psi.index, num)
ans.append(msg) ans.append(msg)
offset = extra.get(0b0001, None) offset = extra.get(0b0001, None)
if offset is not None: if offset is not None:
if offset == 0: if offset == 0:
ans.append('This record is spanned by the article:' ans.append(f'This record is spanned by the article:{ai + psi.index}')
'%d'%(ai+psi.index))
else: else:
ans.append('->Offset to start of next section (%d) from start' ans.append(f'->Offset to start of next section ({psi.index + 1}) from start of record: {offset} [{offset + record_offset} absolute offset]')
' of record: %d [%d absolute offset]'%(psi.index+1,
offset, offset+record_offset))
return byts return byts
# }}} # }}}
@ -698,8 +680,7 @@ class TBSIndexing: # {{{
f' {si.index}') f' {si.index}')
if 0b0100 in extra: if 0b0100 in extra:
num = extra[0b0100] num = extra[0b0100]
ans.append('The number of articles from the section %d' ans.append(f'The number of articles from the section {si.index} in this record: {num}')
' in this record: %s'%(si.index, num))
elif 0b0001 in extra: elif 0b0001 in extra:
eof = extra[0b0001] eof = extra[0b0001]
if eof != 0: if eof != 0:
@ -791,7 +772,7 @@ class MOBIFile: # {{{
p() p()
p('Record headers:') p('Record headers:')
for i, r in enumerate(self.records): for i, r in enumerate(self.records):
p('%6d. %s'%(i, r.header)) p(f'{i:6}. {r.header}')
p() p()
p(str(self.mobi_header)) p(str(self.mobi_header))

View File

@ -53,7 +53,7 @@ class FDST:
class File: class File:
def __init__(self, skel, skeleton, text, first_aid, sections): def __init__(self, skel, skeleton, text, first_aid, sections):
self.name = 'part%04d'%skel.file_number self.name = f'part{skel.file_number:04}'
self.skeleton, self.text, self.first_aid = skeleton, text, first_aid self.skeleton, self.text, self.first_aid = skeleton, text, first_aid
self.sections = sections self.sections = sections
@ -66,7 +66,7 @@ class File:
with open('skeleton.html', 'wb') as f: with open('skeleton.html', 'wb') as f:
f.write(self.skeleton) f.write(self.skeleton)
for i, text in enumerate(self.sections): for i, text in enumerate(self.sections):
with open('sect-%04d.html'%i, 'wb') as f: with open(f'sect-{i:04}.html', 'wb') as f:
f.write(text) f.write(text)
@ -101,7 +101,7 @@ class MOBIFile:
p() p()
p('Record headers:') p('Record headers:')
for i, r in enumerate(self.mf.records): for i, r in enumerate(self.mf.records):
p('%6d. %s'%(i, r.header)) p(f'{i:6}. {r.header}')
p() p()
p(str(self.mf.mobi8_header)) p(str(self.mf.mobi8_header))
@ -151,7 +151,7 @@ class MOBIFile:
for i, x in enumerate(boundaries): for i, x in enumerate(boundaries):
start, end = x start, end = x
raw = self.raw_text[start:end] raw = self.raw_text[start:end]
with open(os.path.join(ddir, 'flow%04d.txt'%i), 'wb') as f: with open(os.path.join(ddir, f'flow{i:04}.txt'), 'wb') as f:
f.write(raw) f.write(raw)
def extract_resources(self, records): def extract_resources(self, records):
@ -221,7 +221,7 @@ class MOBIFile:
elif sig in known_types: elif sig in known_types:
suffix = '-' + sig.decode('ascii') suffix = '-' + sig.decode('ascii')
self.resource_map.append(('%s/%06d%s.%s'%(prefix, resource_index, suffix, ext), self.resource_map.append((f'{prefix}/{resource_index:06}{suffix}.{ext}',
payload)) payload))
def read_tbs(self): def read_tbs(self):
@ -260,9 +260,9 @@ class MOBIFile:
for i, strands in enumerate(indexing_data): for i, strands in enumerate(indexing_data):
rec = self.text_records[i] rec = self.text_records[i]
tbs_bytes = rec.trailing_data.get('indexing', b'') tbs_bytes = rec.trailing_data.get('indexing', b'')
desc = ['Record #%d'%i] desc = [f'Record #{i}']
for s, strand in enumerate(strands): for s, strand in enumerate(strands):
desc.append('Strand %d'%s) desc.append(f'Strand {s}')
for entries in itervalues(strand): for entries in itervalues(strand):
for e in entries: for e in entries:
desc.append( desc.append(
@ -284,7 +284,7 @@ class MOBIFile:
extra = {bin(k):v for k, v in iteritems(extra)} extra = {bin(k):v for k, v in iteritems(extra)}
sequences.append((val, extra)) sequences.append((val, extra))
for j, seq in enumerate(sequences): for j, seq in enumerate(sequences):
desc.append('Sequence #%d: %r %r'%(j, seq[0], seq[1])) desc.append(f'Sequence #{j}: {seq[0]!r} {seq[1]!r}')
if tbs_bytes: if tbs_bytes:
desc.append(f'Remaining bytes: {format_bytes(tbs_bytes)}') desc.append(f'Remaining bytes: {format_bytes(tbs_bytes)}')
calculated_sequences = encode_strands_as_sequences(strands, calculated_sequences = encode_strands_as_sequences(strands,
@ -294,7 +294,7 @@ class MOBIFile:
except: except:
calculated_bytes = b'failed to calculate tbs bytes' calculated_bytes = b'failed to calculate tbs bytes'
if calculated_bytes != otbs: if calculated_bytes != otbs:
print('WARNING: TBS mismatch for record %d'%i) print(f'WARNING: TBS mismatch for record {i}')
desc.append('WARNING: TBS mismatch!') desc.append('WARNING: TBS mismatch!')
desc.append(f'Calculated sequences: {calculated_sequences!r}') desc.append(f'Calculated sequences: {calculated_sequences!r}')
desc.append('') desc.append('')
@ -321,7 +321,7 @@ def inspect_mobi(mobi_file, ddir):
fo.write(payload) fo.write(payload)
for i, container in enumerate(f.containers): for i, container in enumerate(f.containers):
with open(os.path.join(ddir, 'container%d.txt' % (i + 1)), 'wb') as cf: with open(os.path.join(ddir, f'container{i + 1}.txt'), 'wb') as cf:
cf.write(str(container).encode('utf-8')) cf.write(str(container).encode('utf-8'))
if f.fdst: if f.fdst:

View File

@ -220,8 +220,7 @@ class BookHeader:
}[self.codepage] }[self.codepage]
except (IndexError, KeyError): except (IndexError, KeyError):
self.codec = 'cp1252' if not user_encoding else user_encoding self.codec = 'cp1252' if not user_encoding else user_encoding
log.warn('Unknown codepage %d. Assuming %s' % (self.codepage, log.warn(f'Unknown codepage {self.codepage}. Assuming {self.codec}')
self.codec))
# Some KF8 files have header length == 264 (generated by kindlegen # Some KF8 files have header length == 264 (generated by kindlegen
# 2.9?). See https://bugs.launchpad.net/bugs/1179144 # 2.9?). See https://bugs.launchpad.net/bugs/1179144
max_header_length = 500 # We choose 500 for future versions of kindlegen max_header_length = 500 # We choose 500 for future versions of kindlegen

View File

@ -16,7 +16,7 @@ PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values')
INDEX_HEADER_FIELDS = ( INDEX_HEADER_FIELDS = (
'len', 'nul1', 'type', 'gen', 'start', 'count', 'code', 'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx' 'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx'
) + tuple('unknown%d'%i for i in range(27)) + ('ocnt', 'oentries', ) + tuple(f'unknown{i}' for i in range(27)) + ('ocnt', 'oentries',
'ordt1', 'ordt2', 'tagx') 'ordt1', 'ordt2', 'tagx')
@ -47,7 +47,7 @@ def parse_indx_header(data):
check_signature(data, b'INDX') check_signature(data, b'INDX')
words = INDEX_HEADER_FIELDS words = INDEX_HEADER_FIELDS
num = len(words) num = len(words)
values = struct.unpack('>%dL' % num, data[4:4*(num+1)]) values = struct.unpack(f'>{num}L', data[4:4*(num+1)])
ans = dict(zip(words, values)) ans = dict(zip(words, values))
ans['idx_header_end_pos'] = 4 * (num+1) ans['idx_header_end_pos'] = 4 * (num+1)
ordt1, ordt2 = ans['ordt1'], ans['ordt2'] ordt1, ordt2 = ans['ordt1'], ans['ordt2']
@ -103,8 +103,7 @@ class CNCX: # {{{
except: except:
byts = raw[pos:] byts = raw[pos:]
r = format_bytes(byts) r = format_bytes(byts)
print('CNCX entry at offset %d has unknown format %s'%( print(f'CNCX entry at offset {pos + record_offset} has unknown format {r}')
pos+record_offset, r))
self.records[pos+record_offset] = r self.records[pos+record_offset] = r
pos = len(raw) pos = len(raw)
pos += consumed+length pos += consumed+length

View File

@ -525,7 +525,7 @@ class MobiReader:
except Exception: except Exception:
pass pass
else: else:
attrib['src'] = 'images/' + image_name_map.get(recindex, '%05d.jpg' % recindex) attrib['src'] = 'images/' + image_name_map.get(recindex, f'{recindex:05}.jpg')
for attr in ('width', 'height'): for attr in ('width', 'height'):
if attr in attrib: if attr in attrib:
val = attrib[attr] val = attrib[attr]
@ -577,7 +577,7 @@ class MobiReader:
ncls = sel ncls = sel
break break
if ncls is None: if ncls is None:
ncls = 'calibre_%d' % i ncls = f'calibre_{i}'
self.tag_css_rules[ncls] = rule self.tag_css_rules[ncls] = rule
cls = attrib.get('class', '') cls = attrib.get('class', '')
cls = cls + (' ' if cls else '') + ncls cls = cls + (' ' if cls else '') + ncls
@ -658,7 +658,7 @@ class MobiReader:
mi = MetaInformation(self.book_header.title, [_('Unknown')]) mi = MetaInformation(self.book_header.title, [_('Unknown')])
opf = OPFCreator(os.path.dirname(htmlfile), mi) opf = OPFCreator(os.path.dirname(htmlfile), mi)
if hasattr(self.book_header.exth, 'cover_offset'): if hasattr(self.book_header.exth, 'cover_offset'):
opf.cover = 'images/%05d.jpg' % (self.book_header.exth.cover_offset + 1) opf.cover = f'images/{self.book_header.exth.cover_offset + 1:05}.jpg'
elif mi.cover is not None: elif mi.cover is not None:
opf.cover = mi.cover opf.cover = mi.cover
else: else:
@ -920,7 +920,7 @@ class MobiReader:
except OSError: except OSError:
self.log.warn(f'Ignoring undecodeable GIF image at index {image_index}') self.log.warn(f'Ignoring undecodeable GIF image at index {image_index}')
continue continue
path = os.path.join(output_dir, '%05d.%s' % (image_index, imgfmt)) path = os.path.join(output_dir, f'{image_index:05}.{imgfmt}')
image_name_map[image_index] = os.path.basename(path) image_name_map[image_index] = os.path.basename(path)
if imgfmt == 'png': if imgfmt == 'png':
with open(path, 'wb') as f: with open(path, 'wb') as f:

View File

@ -200,7 +200,7 @@ class Mobi8Reader:
self.elems[divptr] self.elems[divptr]
if i == 0: if i == 0:
aidtext = idtext[12:-2] aidtext = idtext[12:-2]
filename = 'part%04d.html' % filenum filename = f'part{filenum:04}.html'
part = text[baseptr:baseptr + length] part = text[baseptr:baseptr + length]
insertpos = insertpos - skelpos insertpos = insertpos - skelpos
head = skeleton[:insertpos] head = skeleton[:insertpos]
@ -256,7 +256,7 @@ class Mobi8Reader:
image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE) image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE)
for j in range(1, len(self.flows)): for j in range(1, len(self.flows)):
flowpart = self.flows[j] flowpart = self.flows[j]
nstr = '%04d' % j nstr = f'{j:04}'
m = svg_tag_pattern.search(flowpart) m = svg_tag_pattern.search(flowpart)
if m is not None: if m is not None:
# svg # svg
@ -320,7 +320,7 @@ class Mobi8Reader:
# pos # pos
fi = self.get_file_info(pos) fi = self.get_file_info(pos)
if fi.num is None and fi.start is None: if fi.num is None and fi.start is None:
raise ValueError('No file contains pos: %d'%pos) raise ValueError(f'No file contains pos: {pos}')
textblock = self.parts[fi.num] textblock = self.parts[fi.num]
npos = pos - fi.start npos = pos - fi.start
pgt = textblock.find(b'>', npos) pgt = textblock.find(b'>', npos)
@ -391,7 +391,7 @@ class Mobi8Reader:
pos = entry['pos'] pos = entry['pos']
fi = self.get_file_info(pos) fi = self.get_file_info(pos)
if fi.filename is None: if fi.filename is None:
raise ValueError('Index entry has invalid pos: %d'%pos) raise ValueError(f'Index entry has invalid pos: {pos}')
idtag = self.get_id_tag(pos) idtag = self.get_id_tag(pos)
href = f'{fi.type}/{fi.filename}' href = f'{fi.type}/{fi.filename}'
else: else:
@ -429,10 +429,9 @@ class Mobi8Reader:
pass # Ignore these records pass # Ignore these records
elif typ == b'FONT': elif typ == b'FONT':
font = read_font_record(data) font = read_font_record(data)
href = 'fonts/%05d.%s' % (fname_idx, font['ext']) href = f"fonts/{fname_idx:05}.{font['ext']}"
if font['err']: if font['err']:
self.log.warn('Reading font record %d failed: %s'%( self.log.warn(f"Reading font record {fname_idx} failed: {font['err']}")
fname_idx, font['err']))
if font['headers']: if font['headers']:
self.log.debug('Font record headers: {}'.format(font['headers'])) self.log.debug('Font record headers: {}'.format(font['headers']))
with open(href.replace('/', os.sep), 'wb') as f: with open(href.replace('/', os.sep), 'wb') as f:
@ -448,7 +447,7 @@ class Mobi8Reader:
elif typ == b'CRES': elif typ == b'CRES':
data, imgtype = container.load_image(data) data, imgtype = container.load_image(data)
if data is not None: if data is not None:
href = 'images/%05d.%s'%(container.resource_index, imgtype) href = f'images/{container.resource_index:05}.{imgtype}'
with open(href.replace('/', os.sep), 'wb') as f: with open(href.replace('/', os.sep), 'wb') as f:
f.write(data) f.write(data)
elif typ == b'\xa0\xa0\xa0\xa0' and len(data) == 4 and container is not None: elif typ == b'\xa0\xa0\xa0\xa0' and len(data) == 4 and container is not None:
@ -456,7 +455,7 @@ class Mobi8Reader:
elif container is None: elif container is None:
if not (len(data) == len(PLACEHOLDER_GIF) and data == PLACEHOLDER_GIF): if not (len(data) == len(PLACEHOLDER_GIF) and data == PLACEHOLDER_GIF):
imgtype = find_imgtype(data) imgtype = find_imgtype(data)
href = 'images/%05d.%s'%(fname_idx, imgtype) href = f'images/{fname_idx:05}.{imgtype}'
with open(href.replace('/', os.sep), 'wb') as f: with open(href.replace('/', os.sep), 'wb') as f:
f.write(data) f.write(data)

View File

@ -156,8 +156,7 @@ def test_decint(num):
raw = encint(num, forward=d) raw = encint(num, forward=d)
sz = len(raw) sz = len(raw)
if (num, sz) != decint(raw, forward=d): if (num, sz) != decint(raw, forward=d):
raise ValueError('Failed for num %d, forward=%r: %r != %r' % ( raise ValueError(f'Failed for num {num}, forward={d!r}: {num, sz!r} != {decint(raw, forward=d)!r}')
num, d, (num, sz), decint(raw, forward=d)))
def rescale_image(data, maxsizeb=IMAGE_MAX_SIZE, dimen=None): def rescale_image(data, maxsizeb=IMAGE_MAX_SIZE, dimen=None):

View File

@ -390,4 +390,4 @@ class Serializer:
self.start_offset = ioff self.start_offset = ioff
for hoff in hoffs: for hoff in hoffs:
buf.seek(hoff) buf.seek(hoff)
buf.write(('%010d' % ioff).encode('utf-8')) buf.write(f'{ioff:010}'.encode('utf-8'))

View File

@ -267,7 +267,7 @@ class ChunkIndex(Index):
self.cncx = CNCX(c.selector for c in chunk_table) self.cncx = CNCX(c.selector for c in chunk_table)
self.entries = [ self.entries = [
('%010d'%c.insert_pos, { (f'{c.insert_pos:010}', {
'cncx_offset':self.cncx[c.selector], 'cncx_offset':self.cncx[c.selector],
'file_number':c.file_number, 'file_number':c.file_number,
@ -378,7 +378,7 @@ if __name__ == '__main__':
import os import os
import subprocess import subprocess
os.chdir('/t') os.chdir('/t')
paras = ['<p>%d</p>' % i for i in range(4000)] paras = [f'<p>{i}</p>' for i in range(4000)]
raw = '<html><body>' + '\n\n'.join(paras) + '</body></html>' raw = '<html><body>' + '\n\n'.join(paras) + '</body></html>'
src = 'index.html' src = 'index.html'

View File

@ -302,7 +302,7 @@ class KF8Writer:
# https://bugs.launchpad.net/bugs/1489495 # https://bugs.launchpad.net/bugs/1489495
if id_: if id_:
cid += 1 cid += 1
val = 'c%d' % cid val = f'c{cid}'
self.id_map[(item.href, id_)] = val self.id_map[(item.href, id_)] = val
tag.set('cid', val) tag.set('cid', val)
else: else:

View File

@ -341,7 +341,7 @@ class Chunker:
for s in self.skeletons: for s in self.skeletons:
s.start_pos = sp s.start_pos = sp
sp += len(s) sp += len(s)
self.skel_table = [Skel(s.file_number, 'SKEL%010d'%s.file_number, self.skel_table = [Skel(s.file_number, f'SKEL{s.file_number:010}',
len(s.chunks), s.start_pos, len(s.skeleton)) for s in self.skeletons] len(s.chunks), s.start_pos, len(s.skeleton)) for s in self.skeletons]
Chunk = namedtuple('Chunk', Chunk = namedtuple('Chunk',
@ -426,13 +426,13 @@ class Chunker:
error = False error = False
for i, skeleton in enumerate(self.skeletons): for i, skeleton in enumerate(self.skeletons):
for j, chunk in enumerate(skeleton.chunks): for j, chunk in enumerate(skeleton.chunks):
with open(os.path.join(chunks, 'file-%d-chunk-%d.html'%(i, j)), with open(os.path.join(chunks, f'file-{i}-chunk-{j}.html'),
'wb') as f: 'wb') as f:
f.write(chunk.raw) f.write(chunk.raw)
oraw, rraw = orig_dumps[i], skeleton.rebuild() oraw, rraw = orig_dumps[i], skeleton.rebuild()
with open(os.path.join(orig, '%04d.html'%i), 'wb') as f: with open(os.path.join(orig, f'{i:04}.html'), 'wb') as f:
f.write(oraw) f.write(oraw)
with open(os.path.join(rebuilt, '%04d.html'%i), 'wb') as f: with open(os.path.join(rebuilt, f'{i:04}.html'), 'wb') as f:
f.write(rraw) f.write(rraw)
if oraw != rraw: if oraw != rraw:
error = True error = True

View File

@ -200,7 +200,7 @@ class Extract(ODF2XHTML):
# Replace all the class selectors with a single class selector # Replace all the class selectors with a single class selector
# This will be added to the class attribute of all elements # This will be added to the class attribute of all elements
# that have one of these selectors. # that have one of these selectors.
replace_name = 'c_odt%d'%count replace_name = f'c_odt{count}'
count += 1 count += 1
for sel in r.selectorList: for sel in r.selectorList:
s = sel.selectorText[1:] s = sel.selectorText[1:]

View File

@ -57,7 +57,7 @@ class BookmarksMixin:
dat = [] dat = []
for bm in bookmarks: for bm in bookmarks:
if bm['type'] == 'legacy': if bm['type'] == 'legacy':
rec = '%s^%d#%s'%(bm['title'], bm['spine'], bm['pos']) rec = f"{bm['title']}^{bm['spine']}#{bm['pos']}"
else: else:
pos = bm['pos'] pos = bm['pos']
if isinstance(pos, numbers.Number): if isinstance(pos, numbers.Number):

View File

@ -103,8 +103,7 @@ def html5_parse(data, max_nesting_depth=100):
if isinstance(x.tag, string_or_bytes) and not len(x): # Leaf node if isinstance(x.tag, string_or_bytes) and not len(x): # Leaf node
depth = node_depth(x) depth = node_depth(x)
if depth > max_nesting_depth: if depth > max_nesting_depth:
raise ValueError('HTML 5 parsing resulted in a tree with nesting' raise ValueError(f'HTML 5 parsing resulted in a tree with nesting depth > {max_nesting_depth}')
' depth > %d'%max_nesting_depth)
return data return data

View File

@ -231,7 +231,7 @@ class MimetypeMismatch(BaseError):
c = 0 c = 0
while container.has_name(new_name): while container.has_name(new_name):
c += 1 c += 1
new_name = self.file_name.rpartition('.')[0] + ('%d.' % c) + self.change_ext_to new_name = self.file_name.rpartition('.')[0] + f'{c}.' + self.change_ext_to
rename_files(container, {self.file_name:new_name}) rename_files(container, {self.file_name:new_name})
changed = True changed = True
else: else:

View File

@ -146,7 +146,7 @@ class EscapedName(BaseError):
c = 0 c = 0
while self.sname in all_names: while self.sname in all_names:
c += 1 c += 1
self.sname = '%s_%d.%s' % (bn, c, ext) self.sname = f'{bn}_{c}.{ext}'
rename_files(container, {self.name:self.sname}) rename_files(container, {self.name:self.sname})
return True return True

View File

@ -344,7 +344,7 @@ class Container(ContainerBase): # {{{
item_id = 'id' item_id = 'id'
while item_id in all_ids: while item_id in all_ids:
c += 1 c += 1
item_id = 'id' + '%d'%c item_id = 'id' + f'{c}'
manifest = self.opf_xpath('//opf:manifest')[0] manifest = self.opf_xpath('//opf:manifest')[0]
href = self.name_to_href(name, self.opf_name) href = self.name_to_href(name, self.opf_name)
item = manifest.makeelement(OPF('item'), item = manifest.makeelement(OPF('item'),
@ -369,7 +369,7 @@ class Container(ContainerBase): # {{{
base, ext = name.rpartition('.')[::2] base, ext = name.rpartition('.')[::2]
if c > 1: if c > 1:
base = base.rpartition('-')[0] base = base.rpartition('-')[0]
name = '%s-%d.%s' % (base, c, ext) name = f'{base}-{c}.{ext}'
return name return name
def add_file(self, name, data, media_type=None, spine_index=None, modify_name_if_needed=False, process_manifest_item=None): def add_file(self, name, data, media_type=None, spine_index=None, modify_name_if_needed=False, process_manifest_item=None):

View File

@ -382,7 +382,7 @@ def create_epub_cover(container, cover_path, existing_image, options=None):
container.log.exception('Failed to get width and height of cover') container.log.exception('Failed to get width and height of cover')
ar = 'xMidYMid meet' if keep_aspect else 'none' ar = 'xMidYMid meet' if keep_aspect else 'none'
templ = CoverManager.SVG_TEMPLATE.replace('__ar__', ar) templ = CoverManager.SVG_TEMPLATE.replace('__ar__', ar)
templ = templ.replace('__viewbox__', '0 0 %d %d'%(width, height)) templ = templ.replace('__viewbox__', f'0 0 {width} {height}')
templ = templ.replace('__width__', str(width)) templ = templ.replace('__width__', str(width))
templ = templ.replace('__height__', str(height)) templ = templ.replace('__height__', str(height))
folder = recommended_folders[tname] folder = recommended_folders[tname]

View File

@ -98,7 +98,7 @@ def compress_images(container, report=None, names=None, jpeg_quality=None, webp_
if not keep_going: if not keep_going:
abort.set() abort.set()
progress_callback(0, num_to_process, '') progress_callback(0, num_to_process, '')
[Worker(abort, 'CompressImage%d' % i, queue, results, jpeg_quality, webp_quality, pc) for i in range(min(detect_ncpus(), num_to_process))] [Worker(abort, f'CompressImage{i}', queue, results, jpeg_quality, webp_quality, pc) for i in range(min(detect_ncpus(), num_to_process))]
queue.join() queue.join()
before_total = after_total = 0 before_total = after_total = 0
processed_num = 0 processed_num = 0

View File

@ -218,7 +218,7 @@ def replace_file(container, name, path, basename, force_mt=None):
b, e = nname.rpartition('.')[0::2] b, e = nname.rpartition('.')[0::2]
while container.exists(nname): while container.exists(nname):
count += 1 count += 1
nname = b + ('_%d.%s' % (count, e)) nname = b + f'_{count}.{e}'
rename_files(container, {name:nname}) rename_files(container, {name:nname})
mt = force_mt or container.guess_type(nname) mt = force_mt or container.guess_type(nname)
container.mime_map[nname] = mt container.mime_map[nname] = mt
@ -308,7 +308,7 @@ def rationalize_folders(container, folder_type_map):
while new_name in all_names or new_name in new_names: while new_name in all_names or new_name in new_names:
c += 1 c += 1
n, ext = bn.rpartition('.')[0::2] n, ext = bn.rpartition('.')[0::2]
new_name = posixpath.join(folder, '%s_%d.%s' % (n, c, ext)) new_name = posixpath.join(folder, f'{n}_{c}.{ext}')
name_map[name] = new_name name_map[name] = new_name
new_names.add(new_name) new_names.add(new_name)
return name_map return name_map

View File

@ -215,7 +215,7 @@ def split(container, name, loc_or_xpath, before=True, totals=None):
nname, s = None, 0 nname, s = None, 0
while not nname or container.exists(nname): while not nname or container.exists(nname):
s += 1 s += 1
nname = '%s_split%d.%s' % (base, s, ext) nname = f'{base}_split{s}.{ext}'
manifest_item = container.generate_item(nname, media_type=container.mime_map[name]) manifest_item = container.generate_item(nname, media_type=container.mime_map[name])
bottom_name = container.href_to_name(manifest_item.get('href'), container.opf_name) bottom_name = container.href_to_name(manifest_item.get('href'), container.opf_name)
@ -287,7 +287,7 @@ def multisplit(container, name, xpath, before=True):
current = name current = name
all_names = [name] all_names = [name]
for i in range(len(nodes)): for i in range(len(nodes)):
current = split(container, current, '//*[@calibre-split-point="%d"]' % i, before=before) current = split(container, current, f'//*[@calibre-split-point="{i}"]', before=before)
all_names.append(current) all_names.append(current)
for x in all_names: for x in all_names:
@ -345,7 +345,7 @@ def unique_anchor(seen_anchors, current):
ans = current ans = current
while ans in seen_anchors: while ans in seen_anchors:
c += 1 c += 1
ans = '%s_%d' % (current, c) ans = f'{current}_{c}'
return ans return ans

View File

@ -51,7 +51,7 @@ def create_epub(manifest, spine=(), guide=(), meta_cover=None, ver=3):
spine = [x[0] for x in manifest if guess_type(x[0]) in OEB_DOCS] spine = [x[0] for x in manifest if guess_type(x[0]) in OEB_DOCS]
spine = ''.join(f'<itemref idref="{name}"/>' for name in spine) spine = ''.join(f'<itemref idref="{name}"/>' for name in spine)
guide = ''.join(f'<reference href="{name}" type="{typ}" title="{title}"/>' for name, typ, title in guide) guide = ''.join(f'<reference href="{name}" type="{typ}" title="{title}"/>' for name, typ, title in guide)
opf = OPF_TEMPLATE.format(manifest=mo, ver='%d.0'%ver, metadata=metadata, spine=spine, guide=guide) opf = OPF_TEMPLATE.format(manifest=mo, ver=f'{ver}.0', metadata=metadata, spine=spine, guide=guide)
buf = BytesIO() buf = BytesIO()
with ZipFile(buf, 'w', ZIP_STORED) as zf: with ZipFile(buf, 'w', ZIP_STORED) as zf:
zf.writestr('META-INF/container.xml', b''' zf.writestr('META-INF/container.xml', b'''
@ -79,7 +79,7 @@ class Structure(BaseTest):
ep = os.path.join(self.tdir, str(n) + 'book.epub') ep = os.path.join(self.tdir, str(n) + 'book.epub')
with open(ep, 'wb') as f: with open(ep, 'wb') as f:
f.write(create_epub(*args, **kw).getvalue()) f.write(create_epub(*args, **kw).getvalue())
c = get_container(ep, tdir=os.path.join(self.tdir, 'container%d' % n), tweak_mode=True) c = get_container(ep, tdir=os.path.join(self.tdir, f'container{n}'), tweak_mode=True)
return c return c
def test_toc_detection(self): def test_toc_detection(self):

View File

@ -622,7 +622,7 @@ def create_ncx(toc, to_href, btitle, lang, uid):
def process_node(xml_parent, toc_parent): def process_node(xml_parent, toc_parent):
for child in toc_parent: for child in toc_parent:
play_order['c'] += 1 play_order['c'] += 1
point = etree.SubElement(xml_parent, NCX('navPoint'), id='num_%d' % play_order['c'], point = etree.SubElement(xml_parent, NCX('navPoint'), id=f"num_{play_order['c']}",
playOrder=str(play_order['c'])) playOrder=str(play_order['c']))
label = etree.SubElement(point, NCX('navLabel')) label = etree.SubElement(point, NCX('navLabel'))
title = child.title title = child.title
@ -853,7 +853,7 @@ def toc_to_html(toc, container, toc_name, title, lang=None):
li.append(a) li.append(a)
if len(toc) > 0: if len(toc) > 0:
parent = li.makeelement(XHTML('ul')) parent = li.makeelement(XHTML('ul'))
parent.set('class', 'level%d' % (style_level)) parent.set('class', f'level{style_level}')
li.append(parent) li.append(parent)
a.tail = '\n\n' + (indent*(level+2)) a.tail = '\n\n' + (indent*(level+2))
parent.text = '\n'+(indent*(level+3)) parent.text = '\n'+(indent*(level+3))
@ -909,7 +909,7 @@ def create_inline_toc(container, title=None):
name, c = 'toc.xhtml', 0 name, c = 'toc.xhtml', 0
while container.has_name(name): while container.has_name(name):
c += 1 c += 1
name = 'toc%d.xhtml' % c name = f'toc{c}.xhtml'
container.add_file(name, raw, spine_index=0) container.add_file(name, raw, spine_index=0)
else: else:
with container.open(name, 'wb') as f: with container.open(name, 'wb') as f:

View File

@ -142,7 +142,7 @@ class CoverManager:
# if self.preserve_aspect_ratio: # if self.preserve_aspect_ratio:
# width, height = 600, 800 # width, height = 600, 800
self.svg_template = self.svg_template.replace('__viewbox__', self.svg_template = self.svg_template.replace('__viewbox__',
'0 0 %d %d'%(width, height)) f'0 0 {width} {height}')
self.svg_template = self.svg_template.replace('__width__', self.svg_template = self.svg_template.replace('__width__',
str(width)) str(width))
self.svg_template = self.svg_template.replace('__height__', self.svg_template = self.svg_template.replace('__height__',

View File

@ -132,7 +132,7 @@ class UniqueFilenames: # {{{
c = 0 c = 0
while True: while True:
c += 1 c += 1
suffix = '_u%d'%c suffix = f'_u{c}'
candidate = base + suffix + ext candidate = base + suffix + ext
if candidate not in self.seen_filenames: if candidate not in self.seen_filenames:
return suffix return suffix

View File

@ -143,7 +143,7 @@ class RemoveFakeMargins:
for p in paras(body): for p in paras(body):
level = level_of(p, body) level = level_of(p, body)
level = '%s_%d'%(barename(p.tag), level) level = f'{barename(p.tag)}_{level}'
if level not in self.levels: if level not in self.levels:
self.levels[level] = [] self.levels[level] = []
self.levels[level].append(p) self.levels[level].append(p)
@ -151,7 +151,7 @@ class RemoveFakeMargins:
remove = set() remove = set()
for k, v in iteritems(self.levels): for k, v in iteritems(self.levels):
num = len(v) num = len(v)
self.log.debug('Found %d items of level:'%num, k) self.log.debug(f'Found {num} items of level:', k)
level = int(k.split('_')[-1]) level = int(k.split('_')[-1])
tag = k.split('_')[0] tag = k.split('_')[0]
if tag == 'p' and num < 25: if tag == 'p' and num < 25:

View File

@ -217,8 +217,7 @@ class SVGRasterizer:
href = self.images[key] href = self.images[key]
else: else:
logger = self.oeb.logger logger = self.oeb.logger
logger.info('Rasterizing %r to %dx%d' logger.info(f'Rasterizing {svgitem.href!r} to {size.width()}x{size.height()}')
% (svgitem.href, size.width(), size.height()))
image = QImage(size, QImage.Format.Format_ARGB32_Premultiplied) image = QImage(size, QImage.Format.Format_ARGB32_Premultiplied)
image.fill(QColor('white').rgb()) image.fill(QColor('white').rgb())
painter = QPainter(image) painter = QPainter(image)

Some files were not shown because too many files have changed in this diff Show More