Automated conversion of % format specifiers

Using flint. Change has been filtred
because even on safe mode, flint can be too aggressive.
This commit is contained in:
un-pogaz 2025-01-28 09:37:08 +02:00
parent c8d9e0c24e
commit e5a65f69ec
67 changed files with 151 additions and 151 deletions

View File

@ -116,7 +116,7 @@ class DemoTool(Tool):
num = re.search(r'[0-9.]+', val) num = re.search(r'[0-9.]+', val)
if num is not None: if num is not None:
num = num.group() num = num.group()
val = val.replace(num, '%f' % (float(num) * factor)) val = val.replace(num, f'{float(num)*factor:f}')
style.setProperty('font-size', val) style.setProperty('font-size', val)
# We should also be dealing with the font shorthand property and # We should also be dealing with the font shorthand property and
# font sizes specified as non numbers, but those are left as exercises # font sizes specified as non numbers, but those are left as exercises

View File

@ -76,5 +76,20 @@ docstring-quotes = 'single'
inline-quotes = 'single' inline-quotes = 'single'
multiline-quotes = 'single' multiline-quotes = 'single'
[tool.flynt]
line-length = 400 # over value to catch every case
transform-format = false # don't transform already existing format call
exclude = [
"bypy/",
"setup/polib.py",
"setup/linux-installer.py",
"src/calibre/ebooks/metadata/sources/",
"src/calibre/gui2/store/stores/",
"src/css_selectors/",
"src/polyglot/",
"src/templite/",
"src/tinycss/",
]
[tool.pylsp-mypy] [tool.pylsp-mypy]
enabled = false enabled = false

View File

@ -756,7 +756,7 @@ class WritingTest(BaseTest):
self.assertEqual({1,2,3}, cache.set_sort_for_authors(sdata)) self.assertEqual({1,2,3}, cache.set_sort_for_authors(sdata))
for bid in (1, 2, 3): for bid in (1, 2, 3):
self.assertIn(', changed', cache.field_for('author_sort', bid)) self.assertIn(', changed', cache.field_for('author_sort', bid))
sdata = {aid:'%s, changed' % (aid*2 if aid == max(adata) else aid) for aid in adata} sdata = {aid:f'{aid*2 if aid == max(adata) else aid}, changed' for aid in adata}
self.assertEqual({3}, cache.set_sort_for_authors(sdata), self.assertEqual({3}, cache.set_sort_for_authors(sdata),
'Setting the author sort to the same value as before, incorrectly marked some books as dirty') 'Setting the author sort to the same value as before, incorrectly marked some books as dirty')
# }}} # }}}

View File

@ -1721,9 +1721,9 @@ class KOBOTOUCH(KOBO):
debugging_title = self.debugging_title debugging_title = self.debugging_title
debug_print(f"KoboTouch:books - set_debugging_title to '{debugging_title}'") debug_print(f"KoboTouch:books - set_debugging_title to '{debugging_title}'")
bl.set_debugging_title(debugging_title) bl.set_debugging_title(debugging_title)
debug_print('KoboTouch:books - length bl=%d'%len(bl)) debug_print(f'KoboTouch:books - length bl={len(bl)}')
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE) need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
debug_print('KoboTouch:books - length bl after sync=%d'%len(bl)) debug_print(f'KoboTouch:books - length bl after sync={len(bl)}')
# make a dict cache of paths so the lookup in the loop below is faster. # make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {} bl_cache = {}
@ -2266,7 +2266,7 @@ class KOBOTOUCH(KOBO):
def upload_books(self, files, names, on_card=None, end_session=True, def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None): metadata=None):
debug_print('KoboTouch:upload_books - %d books'%(len(files))) debug_print(f'KoboTouch:upload_books - {len(files)} books')
debug_print('KoboTouch:upload_books - files=', files) debug_print('KoboTouch:upload_books - files=', files)
if self.modifying_epub(): if self.modifying_epub():
@ -2611,7 +2611,7 @@ class KOBOTOUCH(KOBO):
# debug_print("KoboTouch:update_device_database_collections - self.bookshelvelist=", self.bookshelvelist) # debug_print("KoboTouch:update_device_database_collections - self.bookshelvelist=", self.bookshelvelist)
# Process any collections that exist # Process any collections that exist
for category, books in collections.items(): for category, books in collections.items():
debug_print("KoboTouch:update_device_database_collections - category='%s' books=%d"%(category, len(books))) debug_print(f"KoboTouch:update_device_database_collections - category='{category}' books={len(books)}")
if create_collections and not (category in supportedcategories or category in readstatuslist or category in accessibilitylist): if create_collections and not (category in supportedcategories or category in readstatuslist or category in accessibilitylist):
self.check_for_bookshelf(connection, category) self.check_for_bookshelf(connection, category)
# if category in self.bookshelvelist: # if category in self.bookshelvelist:
@ -2642,7 +2642,7 @@ class KOBOTOUCH(KOBO):
category_added = True category_added = True
elif category in self.bookshelvelist and self.supports_bookshelves: elif category in self.bookshelvelist and self.supports_bookshelves:
if show_debug: if show_debug:
debug_print(' length book.device_collections=%d'%len(book.device_collections)) debug_print(f' length book.device_collections={len(book.device_collections)}')
if category not in book.device_collections: if category not in book.device_collections:
if show_debug: if show_debug:
debug_print(' Setting bookshelf on device') debug_print(' Setting bookshelf on device')

View File

@ -27,8 +27,7 @@ from calibre.utils.filenames import shorten_components_to
from calibre.utils.icu import lower as icu_lower from calibre.utils.icu import lower as icu_lower
from polyglot.builtins import as_bytes, iteritems, itervalues from polyglot.builtins import as_bytes, iteritems, itervalues
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%( BASE = importlib.import_module('calibre.devices.mtp.{}.driver'.format('windows' if iswindows else 'unix')).MTP_DEVICE
'windows' if iswindows else 'unix')).MTP_DEVICE
DEFAULT_THUMBNAIL_HEIGHT = 320 DEFAULT_THUMBNAIL_HEIGHT = 320

View File

@ -108,7 +108,7 @@ class FileOrFolder:
path = '' path = ''
datum = f'size={self.size}' datum = f'size={self.size}'
if self.is_folder or self.is_storage: if self.is_folder or self.is_storage:
datum = 'children=%s'%(len(self.files) + len(self.folders)) datum = f'children={len(self.files)+len(self.folders)}'
return f'{name}(id={self.object_id}, storage_id={self.storage_id}, {datum}, path={path}, modified={self.last_mod_string})' return f'{name}(id={self.object_id}, storage_id={self.storage_id}, {datum}, path={path}, modified={self.last_mod_string})'
__str__ = __repr__ __str__ = __repr__

View File

@ -339,8 +339,7 @@ class MTP_DEVICE(MTPDeviceBase):
prints('There were some errors while getting the ' prints('There were some errors while getting the '
f' filesystem from {self.current_friendly_name}: {self.format_errorstack(all_errs)}') f' filesystem from {self.current_friendly_name}: {self.format_errorstack(all_errs)}')
self._filesystem_cache = FilesystemCache(storage, all_items) self._filesystem_cache = FilesystemCache(storage, all_items)
debug('Filesystem metadata loaded in %g seconds (%d objects)'%( debug(f'Filesystem metadata loaded in {time.time()-st:g} seconds ({len(self._filesystem_cache)} objects)')
time.time()-st, len(self._filesystem_cache)))
return self._filesystem_cache return self._filesystem_cache
@synchronous @synchronous

View File

@ -289,8 +289,7 @@ class MTP_DEVICE(MTPDeviceBase):
all_storage.append(storage) all_storage.append(storage)
items.append(itervalues(id_map)) items.append(itervalues(id_map))
self._filesystem_cache = FilesystemCache(all_storage, chain(*items)) self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
debug('Filesystem metadata loaded in %g seconds (%d objects)'%( debug(f'Filesystem metadata loaded in {time.time()-st:g} seconds ({len(self._filesystem_cache)} objects)')
time.time()-st, len(self._filesystem_cache)))
return self._filesystem_cache return self._filesystem_cache
@same_thread @same_thread

View File

@ -1499,12 +1499,12 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
self.report_progress((i + 1) / float(len(files)), _('Transferring books to device...')) self.report_progress((i + 1) / float(len(files)), _('Transferring books to device...'))
self.report_progress(1.0, _('Transferring books to device...')) self.report_progress(1.0, _('Transferring books to device...'))
self._debug('finished uploading %d books' % (len(files))) self._debug(f'finished uploading {len(files)} books')
return paths return paths
@synchronous('sync_lock') @synchronous('sync_lock')
def add_books_to_metadata(self, locations, metadata, booklists): def add_books_to_metadata(self, locations, metadata, booklists):
self._debug('adding metadata for %d books' % (len(metadata))) self._debug(f'adding metadata for {len(metadata)} books')
metadata = iter(metadata) metadata = iter(metadata)
for i, location in enumerate(locations): for i, location in enumerate(locations):
@ -1558,7 +1558,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
bl.remove_book(book) bl.remove_book(book)
self._set_known_metadata(book, remove=True) self._set_known_metadata(book, remove=True)
self.report_progress(1.0, _('Removing books from device metadata listing...')) self.report_progress(1.0, _('Removing books from device metadata listing...'))
self._debug('finished removing metadata for %d books' % (len(paths))) self._debug(f'finished removing metadata for {len(paths)} books')
@synchronous('sync_lock') @synchronous('sync_lock')
def get_file(self, path, outfile, end_session=True, this_book=None, total_books=None): def get_file(self, path, outfile, end_session=True, this_book=None, total_books=None):

View File

@ -299,8 +299,7 @@ class USBMS(CLI, Device):
need_sync = True need_sync = True
del bl[idx] del bl[idx]
debug_print('USBMS: count found in cache: %d, count of files in metadata: %d, need_sync: %s' % debug_print(f'USBMS: count found in cache: {len(bl_cache)}, count of files in metadata: {len(bl)}, need_sync: {need_sync}')
(len(bl_cache), len(bl), need_sync))
if need_sync: # self.count_found_in_bl != len(bl) or need_sync: if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb': if oncard == 'cardb':
self.sync_booklists((None, None, bl)) self.sync_booklists((None, None, bl))
@ -315,7 +314,7 @@ class USBMS(CLI, Device):
def upload_books(self, files, names, on_card=None, end_session=True, def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None): metadata=None):
debug_print('USBMS: uploading %d books'%(len(files))) debug_print(f'USBMS: uploading {len(files)} books')
path = self._sanity_check(on_card, files) path = self._sanity_check(on_card, files)
@ -341,7 +340,7 @@ class USBMS(CLI, Device):
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...')) self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
self.report_progress(1.0, _('Transferring books to device...')) self.report_progress(1.0, _('Transferring books to device...'))
debug_print('USBMS: finished uploading %d books'%(len(files))) debug_print(f'USBMS: finished uploading {len(files)} books')
return list(zip(paths, cycle([on_card]))) return list(zip(paths, cycle([on_card])))
def upload_cover(self, path, filename, metadata, filepath): def upload_cover(self, path, filename, metadata, filepath):
@ -358,7 +357,7 @@ class USBMS(CLI, Device):
pass pass
def add_books_to_metadata(self, locations, metadata, booklists): def add_books_to_metadata(self, locations, metadata, booklists):
debug_print('USBMS: adding metadata for %d books'%(len(metadata))) debug_print(f'USBMS: adding metadata for {len(metadata)} books')
metadata = iter(metadata) metadata = iter(metadata)
locations = tuple(locations) locations = tuple(locations)
@ -418,7 +417,7 @@ class USBMS(CLI, Device):
pass pass
def delete_books(self, paths, end_session=True): def delete_books(self, paths, end_session=True):
debug_print('USBMS: deleting %d books'%(len(paths))) debug_print(f'USBMS: deleting {len(paths)} books')
for i, path in enumerate(paths): for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...')) self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path) path = self.normalize_path(path)
@ -428,10 +427,10 @@ class USBMS(CLI, Device):
self.delete_extra_book_files(path) self.delete_extra_book_files(path)
self.report_progress(1.0, _('Removing books from device...')) self.report_progress(1.0, _('Removing books from device...'))
debug_print('USBMS: finished deleting %d books'%(len(paths))) debug_print(f'USBMS: finished deleting {len(paths)} books')
def remove_books_from_metadata(self, paths, booklists): def remove_books_from_metadata(self, paths, booklists):
debug_print('USBMS: removing metadata for %d books'%(len(paths))) debug_print(f'USBMS: removing metadata for {len(paths)} books')
for i, path in enumerate(paths): for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...')) self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
@ -440,7 +439,7 @@ class USBMS(CLI, Device):
if path.endswith(book.path): if path.endswith(book.path):
bl.remove_book(book) bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...')) self.report_progress(1.0, _('Removing books from device metadata listing...'))
debug_print('USBMS: finished removing metadata for %d books'%(len(paths))) debug_print(f'USBMS: finished removing metadata for {len(paths)} books')
# If you override this method and you use book._new_book, then you must # If you override this method and you use book._new_book, then you must
# complete the processing before you call this method. The flag is cleared # complete the processing before you call this method. The flag is cleared

View File

@ -320,7 +320,7 @@ def create_option_parser(args, log):
log('\t'+title) log('\t'+title)
except: except:
log('\t'+repr(title)) log('\t'+repr(title))
log('%d recipes available'%len(titles)) log(f'{len(titles)} recipes available')
raise SystemExit(0) raise SystemExit(0)
parser = option_parser() parser = option_parser()

View File

@ -163,7 +163,7 @@ class SNBOutput(OutputFormatPlugin):
ch.set('src', ProcessFileName(tocitem.href) + '.snbc') ch.set('src', ProcessFileName(tocitem.href) + '.snbc')
ch.text = tocitem.title ch.text = tocitem.title
etree.SubElement(tocHead, 'chapters').text = '%d' % len(tocBody) etree.SubElement(tocHead, 'chapters').text = str(len(tocBody))
with open(os.path.join(snbfDir, 'toc.snbf'), 'wb') as f: with open(os.path.join(snbfDir, 'toc.snbf'), 'wb') as f:
f.write(etree.tostring(tocInfoTree, pretty_print=True, encoding='utf-8')) f.write(etree.tostring(tocInfoTree, pretty_print=True, encoding='utf-8'))

View File

@ -28,9 +28,9 @@ def _read_width(elem, get):
elif typ == 'auto': elif typ == 'auto':
ans = 'auto' ans = 'auto'
elif typ == 'dxa': elif typ == 'dxa':
ans = '%.3gpt' % (w/20) ans = f'{w/20:.3g}pt'
elif typ == 'pct': elif typ == 'pct':
ans = '%.3g%%' % (w/50) ans = f'{w/50:.3g}%'
return ans return ans
@ -243,7 +243,7 @@ class RowStyle(Style):
rule, val = self.height rule, val = self.height
if rule != 'auto': if rule != 'auto':
try: try:
c['min-height' if rule == 'atLeast' else 'height'] = '%.3gpt' % (int(val)/20) c['min-height' if rule == 'atLeast' else 'height'] = f'{int(val)/20:.3g}pt'
except (ValueError, TypeError): except (ValueError, TypeError):
pass pass
c.update(self.convert_spacing()) c.update(self.convert_spacing())
@ -282,7 +282,7 @@ class CellStyle(Style):
if val not in (inherit, 'auto'): if val not in (inherit, 'auto'):
c[f'padding-{x}'] = val c[f'padding-{x}'] = val
elif val is inherit and x in {'left', 'right'}: elif val is inherit and x in {'left', 'right'}:
c[f'padding-{x}'] = '%.3gpt' % (115/20) c[f'padding-{x}'] = f'{115/20:.3g}pt'
# In Word, tables are apparently rendered with some default top and # In Word, tables are apparently rendered with some default top and
# bottom padding irrespective of the cellMargin values. Simulate # bottom padding irrespective of the cellMargin values. Simulate
# that here. # that here.
@ -353,7 +353,7 @@ class TableStyle(Style):
for x in ('left', 'top', 'right', 'bottom'): for x in ('left', 'top', 'right', 'bottom'):
val = self.float.get(f'{x}FromText', 0) val = self.float.get(f'{x}FromText', 0)
try: try:
val = '%.3gpt' % (int(val) / 20) val = f'{int(val)/20:.3g}pt'
except (ValueError, TypeError): except (ValueError, TypeError):
val = '0' val = '0'
c[f'margin-{x}'] = val c[f'margin-{x}'] = val

View File

@ -696,7 +696,7 @@ class Convert:
else: else:
clear = child.get('clear', None) clear = child.get('clear', None)
if clear in {'all', 'left', 'right'}: if clear in {'all', 'left', 'right'}:
br = BR(style='clear:%s'%('both' if clear == 'all' else clear)) br = BR(style='clear:{}'.format('both' if clear == 'all' else clear))
else: else:
br = BR() br = BR()
text.add_elem(br) text.add_elem(br)

View File

@ -34,7 +34,7 @@ class SpannedCell:
def serialize(self, tr, makeelement): def serialize(self, tr, makeelement):
tc = makeelement(tr, 'w:tc') tc = makeelement(tr, 'w:tc')
tcPr = makeelement(tc, 'w:tcPr') tcPr = makeelement(tc, 'w:tcPr')
makeelement(tcPr, 'w:%sMerge' % ('h' if self.horizontal else 'v'), w_val='continue') makeelement(tcPr, 'w:{}Merge'.format('h' if self.horizontal else 'v'), w_val='continue')
makeelement(tc, 'w:p') makeelement(tc, 'w:p')
def applicable_borders(self, edge): def applicable_borders(self, edge):

View File

@ -320,7 +320,7 @@ class UnBinary:
elif state == 'get value': elif state == 'get value':
if count == 0xfffe: if count == 0xfffe:
if not in_censorship: if not in_censorship:
buf.write(encode('%s"' % (oc - 1))) buf.write(encode(f'{oc-1}"'))
in_censorship = False in_censorship = False
state = 'get attr' state = 'get attr'
elif count > 0: elif count > 0:

View File

@ -178,7 +178,7 @@ class LRFStream(LRFObject):
if len(self.stream) != decomp_size: if len(self.stream) != decomp_size:
raise LRFParseError('Stream decompressed size is wrong!') raise LRFParseError('Stream decompressed size is wrong!')
if stream.read(2) != b'\x06\xF5': if stream.read(2) != b'\x06\xF5':
print('Warning: corrupted end-of-stream tag at %08X; skipping it'%(stream.tell()-2)) print(f'Warning: corrupted end-of-stream tag at {stream.tell()-2:08X}; skipping it')
self.end_stream(None, None) self.end_stream(None, None)
@ -497,10 +497,10 @@ class TextCSS:
fs = getattr(obj, 'fontsize', None) fs = getattr(obj, 'fontsize', None)
if fs is not None: if fs is not None:
ans += item('font-size: %fpt;'%(int(fs)/10)) ans += item(f'font-size: {int(fs)/10:f}pt;')
fw = getattr(obj, 'fontweight', None) fw = getattr(obj, 'fontweight', None)
if fw is not None: if fw is not None:
ans += item('font-weight: %s;'%('bold' if int(fw) >= 700 else 'normal')) ans += item('font-weight: {};'.format('bold' if int(fw) >= 700 else 'normal'))
fn = getattr(obj, 'fontfacename', None) fn = getattr(obj, 'fontfacename', None)
if fn is not None: if fn is not None:
fn = cls.FONT_MAP[fn] fn = cls.FONT_MAP[fn]
@ -519,10 +519,10 @@ class TextCSS:
ans += item(f'text-align: {al};') ans += item(f'text-align: {al};')
lh = getattr(obj, 'linespace', None) lh = getattr(obj, 'linespace', None)
if lh is not None: if lh is not None:
ans += item('text-align: %fpt;'%(int(lh)/10)) ans += item(f'text-align: {int(lh)/10:f}pt;')
pi = getattr(obj, 'parindent', None) pi = getattr(obj, 'parindent', None)
if pi is not None: if pi is not None:
ans += item('text-indent: %fpt;'%(int(pi)/10)) ans += item(f'text-indent: {int(pi)/10:f}pt;')
return ans return ans

View File

@ -95,7 +95,7 @@ def writeSignedWord(f, sword):
def writeWords(f, *words): def writeWords(f, *words):
f.write(struct.pack('<%dH' % len(words), *words)) f.write(struct.pack(f'<{len(words)}H', *words))
def writeDWord(f, dword): def writeDWord(f, dword):
@ -103,7 +103,7 @@ def writeDWord(f, dword):
def writeDWords(f, *dwords): def writeDWords(f, *dwords):
f.write(struct.pack('<%dI' % len(dwords), *dwords)) f.write(struct.pack(f'<{len(dwords)}I', *dwords))
def writeQWord(f, qword): def writeQWord(f, qword):

View File

@ -708,7 +708,7 @@ class Metadata:
elif datatype == 'bool': elif datatype == 'bool':
res = _('Yes') if res else _('No') res = _('Yes') if res else _('No')
elif datatype == 'rating': elif datatype == 'rating':
res = '%.2g'%(res/2) res = f'{res/2:.2g}'
elif datatype in ['int', 'float']: elif datatype in ['int', 'float']:
try: try:
fmt = cmeta['display'].get('number_format', None) fmt = cmeta['display'].get('number_format', None)
@ -748,7 +748,7 @@ class Metadata:
elif datatype == 'datetime': elif datatype == 'datetime':
res = format_date(res, fmeta['display'].get('date_format','dd MMM yyyy')) res = format_date(res, fmeta['display'].get('date_format','dd MMM yyyy'))
elif datatype == 'rating': elif datatype == 'rating':
res = '%.2g'%(res/2) res = f'{res/2:.2g}'
elif key == 'size': elif key == 'size':
res = human_readable(res) res = human_readable(res)
return (name, str(res), orig_res, fmeta) return (name, str(res), orig_res, fmeta)
@ -785,7 +785,7 @@ class Metadata:
if not self.is_null('languages'): if not self.is_null('languages'):
fmt('Languages', ', '.join(self.languages)) fmt('Languages', ', '.join(self.languages))
if self.rating is not None: if self.rating is not None:
fmt('Rating', ('%.2g'%(float(self.rating)/2)) if self.rating fmt('Rating', (f'{float(self.rating)/2:.2g}') if self.rating
else '') else '')
if self.timestamp is not None: if self.timestamp is not None:
fmt('Timestamp', isoformat(self.timestamp)) fmt('Timestamp', isoformat(self.timestamp))

View File

@ -500,14 +500,12 @@ class MOBIHeader: # {{{
if self.has_exth: if self.has_exth:
ans += '\n\n' + str(self.exth) ans += '\n\n' + str(self.exth)
ans += '\n\nBytes after EXTH (%d bytes): %s'%( ans += f'\n\nBytes after EXTH ({len(self.bytes_after_exth)} bytes): {format_bytes(self.bytes_after_exth)}'
len(self.bytes_after_exth),
format_bytes(self.bytes_after_exth))
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset + ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset +
self.fullname_length)) self.fullname_length))
ans += '\nRecord 0 length: %d'%len(self.raw) ans += f'\nRecord 0 length: {len(self.raw)}'
return ans return ans
# }}} # }}}

View File

@ -98,7 +98,7 @@ class Index:
for field in INDEX_HEADER_FIELDS: for field in INDEX_HEADER_FIELDS:
a('%-12s: %r'%(FIELD_NAMES.get(field, field), self.header[field])) a('%-12s: %r'%(FIELD_NAMES.get(field, field), self.header[field]))
ans.extend(['', '']) ans.extend(['', ''])
ans += ['*'*10 + ' Index Record Headers (%d records) ' % len(self.index_headers) + '*'*10] ans += ['*'*10 + f' Index Record Headers ({len(self.index_headers)} records) ' + '*'*10]
for i, header in enumerate(self.index_headers): for i, header in enumerate(self.index_headers):
ans += ['*'*10 + ' Index Record %d ' % i + '*'*10] ans += ['*'*10 + ' Index Record %d ' % i + '*'*10]
for field in INDEX_HEADER_FIELDS: for field in INDEX_HEADER_FIELDS:
@ -111,7 +111,7 @@ class Index:
ans.extend(['', '']) ans.extend(['', ''])
if self.table is not None: if self.table is not None:
a('*'*10 + ' %d Index Entries '%len(self.table) + '*'*10) a('*'*10 + f' {len(self.table)} Index Entries ' + '*'*10)
for k, v in iteritems(self.table): for k, v in iteritems(self.table):
a(f'{k}: {v!r}') a(f'{k}: {v!r}')
@ -139,8 +139,7 @@ class SKELIndex(Index):
for i, text in enumerate(self.table): for i, text in enumerate(self.table):
tag_map = self.table[text] tag_map = self.table[text]
if set(tag_map) != {1, 6}: if set(tag_map) != {1, 6}:
raise ValueError('SKEL Index has unknown tags: %s'% raise ValueError(f'SKEL Index has unknown tags: {set(tag_map)-{1,6}}')
(set(tag_map)-{1,6}))
self.records.append(File( self.records.append(File(
i, # file_number i, # file_number
text, # name text, # name
@ -160,8 +159,7 @@ class SECTIndex(Index):
for i, text in enumerate(self.table): for i, text in enumerate(self.table):
tag_map = self.table[text] tag_map = self.table[text]
if set(tag_map) != {2, 3, 4, 6}: if set(tag_map) != {2, 3, 4, 6}:
raise ValueError('Chunk Index has unknown tags: %s'% raise ValueError(f'Chunk Index has unknown tags: {set(tag_map)-{2,3,4,6}}')
(set(tag_map)-{2, 3, 4, 6}))
toc_text = self.cncx[tag_map[2][0]] toc_text = self.cncx[tag_map[2][0]]
self.records.append(Elem( self.records.append(Elem(

View File

@ -197,7 +197,7 @@ class IndexHeader: # {{{
# raise ValueError('Non null trailing bytes after IDXT') # raise ValueError('Non null trailing bytes after IDXT')
def __str__(self): def __str__(self):
ans = ['*'*20 + ' Index Header (%d bytes)'%len(self.record.raw)+ '*'*20] ans = ['*'*20 + f' Index Header ({len(self.record.raw)} bytes)'+ '*'*20]
a = ans.append a = ans.append
def u(w): def u(w):
@ -363,8 +363,7 @@ class IndexEntry: # {{{
return [0, 0] return [0, 0]
def __str__(self): def __str__(self):
ans = ['Index Entry(index=%s, length=%d)'%( ans = [f'Index Entry(index={self.index}, length={len(self.tags)})']
self.index, len(self.tags))]
for tag in self.tags: for tag in self.tags:
if tag.value is not None: if tag.value is not None:
ans.append('\t'+str(tag)) ans.append('\t'+str(tag))
@ -412,7 +411,7 @@ class IndexRecord: # {{{
continue continue
def __str__(self): def __str__(self):
ans = ['*'*20 + ' Index Entries (%d entries) '%len(self.indices)+ '*'*20] ans = ['*'*20 + f' Index Entries ({len(self.indices)} entries) '+ '*'*20]
a = ans.append a = ans.append
def u(w): def u(w):
@ -470,7 +469,7 @@ class CNCX: # {{{
return self.records.get(offset) return self.records.get(offset)
def __str__(self): def __str__(self):
ans = ['*'*20 + ' cncx (%d strings) '%len(self.records)+ '*'*20] ans = ['*'*20 + f' cncx ({len(self.records)} strings) '+ '*'*20]
for k, v in iteritems(self.records): for k, v in iteritems(self.records):
ans.append('%10d : %s'%(k, v)) ans.append('%10d : %s'%(k, v))
return '\n'.join(ans) return '\n'.join(ans)
@ -568,7 +567,7 @@ class TBSIndexing: # {{{
raise IndexError('Index %d not found'%idx) raise IndexError('Index %d not found'%idx)
def __str__(self): def __str__(self):
ans = ['*'*20 + ' TBS Indexing (%d records) '%len(self.record_indices)+ '*'*20] ans = ['*'*20 + f' TBS Indexing ({len(self.record_indices)} records) '+ '*'*20]
for r, dat in iteritems(self.record_indices): for r, dat in iteritems(self.record_indices):
ans += self.dump_record(r, dat)[-1] ans += self.dump_record(r, dat)[-1]
return '\n'.join(ans) return '\n'.join(ans)

View File

@ -43,7 +43,7 @@ class FDST:
return ans.append(f'{k}: {v}') return ans.append(f'{k}: {v}')
a('Offset to sections', self.sec_off) a('Offset to sections', self.sec_off)
a('Number of section records', self.num_sections) a('Number of section records', self.num_sections)
ans.append('**** %d Sections ****'% len(self.sections)) ans.append(f'**** {len(self.sections)} Sections ****')
for sec in self.sections: for sec in self.sections:
ans.append('Start: %20d End: %d'%sec) ans.append('Start: %20d End: %d'%sec)

View File

@ -80,7 +80,7 @@ def remove_kindlegen_markup(parts, aid_anchor_suffix, linked_aids):
aid = None aid = None
replacement = '' replacement = ''
if aid in linked_aids: if aid in linked_aids:
replacement = ' id="%s"' % (aid + '-' + aid_anchor_suffix) replacement = ' id="{}"'.format(aid + '-' + aid_anchor_suffix)
tag = within_tag_aid_position_pattern.sub(replacement, tag, 1) tag = within_tag_aid_position_pattern.sub(replacement, tag, 1)
srcpieces[j] = tag srcpieces[j] = tag
part = ''.join(srcpieces) part = ''.join(srcpieces)
@ -150,7 +150,7 @@ def update_flow_links(mobi8_reader, resource_map, log):
num = int(m.group(1), 32) num = int(m.group(1), 32)
href = resource_map[num-1] href = resource_map[num-1]
if href: if href:
replacement = '"%s"'%('../'+ href) replacement = '"{}"'.format('../'+ href)
tag = img_index_pattern.sub(replacement, tag, 1) tag = img_index_pattern.sub(replacement, tag, 1)
else: else:
log.warn(f'Referenced image {num} was not recognized ' log.warn(f'Referenced image {num} was not recognized '
@ -168,7 +168,7 @@ def update_flow_links(mobi8_reader, resource_map, log):
num = int(m.group(1), 32) num = int(m.group(1), 32)
href = resource_map[num-1] href = resource_map[num-1]
if href: if href:
replacement = '"%s"'%('../'+ href) replacement = '"{}"'.format('../'+ href)
tag = url_img_index_pattern.sub(replacement, tag, 1) tag = url_img_index_pattern.sub(replacement, tag, 1)
else: else:
log.warn(f'Referenced image {num} was not recognized as a ' log.warn(f'Referenced image {num} was not recognized as a '
@ -182,9 +182,9 @@ def update_flow_links(mobi8_reader, resource_map, log):
log.warn(f'Referenced font {num} was not recognized as a ' log.warn(f'Referenced font {num} was not recognized as a '
f'valid font in {tag}') f'valid font in {tag}')
else: else:
replacement = '"%s"'%('../'+ href) replacement = '"{}"'.format('../'+ href)
if href.endswith('.failed'): if href.endswith('.failed'):
replacement = '"%s"'%('failed-'+href) replacement = '"{}"'.format('failed-'+href)
tag = font_index_pattern.sub(replacement, tag, 1) tag = font_index_pattern.sub(replacement, tag, 1)
# process links to other css pieces # process links to other css pieces
@ -280,7 +280,7 @@ def insert_images_into_markup(parts, resource_map, log):
except IndexError: except IndexError:
href = '' href = ''
if href: if href:
replacement = '"%s"'%('../' + href) replacement = '"{}"'.format('../' + href)
tag = img_index_pattern.sub(replacement, tag, 1) tag = img_index_pattern.sub(replacement, tag, 1)
else: else:
log.warn(f'Referenced image {num} was not recognized as ' log.warn(f'Referenced image {num} was not recognized as '

View File

@ -447,8 +447,7 @@ class Indexer: # {{{
if self.is_periodical and self.masthead_offset is None: if self.is_periodical and self.masthead_offset is None:
raise ValueError('Periodicals must have a masthead') raise ValueError('Periodicals must have a masthead')
self.log('Generating MOBI index for a %s'%('periodical' if self.log('Generating MOBI index for a {}'.format('periodical' if self.is_periodical else 'book'))
self.is_periodical else 'book'))
self.is_flat_periodical = False self.is_flat_periodical = False
if self.is_periodical: if self.is_periodical:
periodical_node = next(iter(oeb.toc)) periodical_node = next(iter(oeb.toc))
@ -530,7 +529,7 @@ class Indexer: # {{{
ans = header + body ans = header + body
if len(ans) > 0x10000: if len(ans) > 0x10000:
raise ValueError('Too many entries (%d) in the TOC'%len(offsets)) raise ValueError(f'Too many entries ({len(offsets)}) in the TOC')
return ans return ans
# }}} # }}}

View File

@ -82,7 +82,7 @@ def subset_all_fonts(container, font_stats, report):
report(_('The font %s was already subset')%font_name) report(_('The font %s was already subset')%font_name)
else: else:
report(_('Decreased the font {0} to {1} of its original size').format( report(_('Decreased the font {0} to {1} of its original size').format(
font_name, ('%.1f%%' % (nlen/olen * 100)))) font_name, (f'{nlen/olen*100:.1f}%')))
changed = True changed = True
f.seek(0), f.truncate(), f.write(nraw) f.seek(0), f.truncate(), f.write(nraw)

View File

@ -184,7 +184,7 @@ class StylizerRules:
if size == 'smallest': if size == 'smallest':
size = 'xx-small' size = 'xx-small'
if size in FONT_SIZE_NAMES: if size in FONT_SIZE_NAMES:
style['font-size'] = '%.1frem' % (self.profile.fnames[size] / float(self.profile.fbase)) style['font-size'] = f'{self.profile.fnames[size]/float(self.profile.fbase):.1f}rem'
if '-epub-writing-mode' in style: if '-epub-writing-mode' in style:
for x in ('-webkit-writing-mode', 'writing-mode'): for x in ('-webkit-writing-mode', 'writing-mode'):
style[x] = style.get(x, style['-epub-writing-mode']) style[x] = style.get(x, style['-epub-writing-mode'])
@ -419,7 +419,7 @@ class Stylizer:
style['font-size'].endswith('pt'): style['font-size'].endswith('pt'):
style = copy.copy(style) style = copy.copy(style)
size = float(style['font-size'][:-2]) size = float(style['font-size'][:-2])
style['font-size'] = '%.2fpt' % (size * font_scale) style['font-size'] = f'{size*font_scale:.2f}pt'
style = ';\n '.join(': '.join(item) for item in style.items()) style = ';\n '.join(': '.join(item) for item in style.items())
rules.append(f'{selector} {{\n {style};\n}}') rules.append(f'{selector} {{\n {style};\n}}')
return '\n'.join(rules) return '\n'.join(rules)

View File

@ -350,7 +350,7 @@ class CSSFlattener:
if value == 0 or not isinstance(value, numbers.Number): if value == 0 or not isinstance(value, numbers.Number):
continue continue
if value <= slineh: if value <= slineh:
cssdict[property] = '%0.5fem' % (dlineh / fsize) cssdict[property] = f'{dlineh/fsize:0.5f}em'
else: else:
try: try:
value = round(value / slineh) * dlineh value = round(value / slineh) * dlineh
@ -358,7 +358,7 @@ class CSSFlattener:
self.oeb.logger.warning( self.oeb.logger.warning(
'Invalid length:', value) 'Invalid length:', value)
value = 0.0 value = 0.0
cssdict[property] = '%0.5fem' % (value / fsize) cssdict[property] = f'{value/fsize:0.5f}em'
def flatten_node(self, node, stylizer, names, styles, pseudo_styles, psize, item_id, recurse=True): def flatten_node(self, node, stylizer, names, styles, pseudo_styles, psize, item_id, recurse=True):
if not isinstance(node.tag, string_or_bytes) or namespace(node.tag) not in (XHTML_NS, SVG_NS): if not isinstance(node.tag, string_or_bytes) or namespace(node.tag) not in (XHTML_NS, SVG_NS):
@ -473,12 +473,12 @@ class CSSFlattener:
dyn_rescale = 1 dyn_rescale = 1
fsize = self.fmap[_sbase] fsize = self.fmap[_sbase]
fsize *= dyn_rescale fsize *= dyn_rescale
cssdict['font-size'] = '%0.5fem'%(fsize/psize) cssdict['font-size'] = f'{fsize/psize:0.5f}em'
psize = fsize psize = fsize
elif 'font-size' in cssdict or tag == 'body': elif 'font-size' in cssdict or tag == 'body':
fsize = self.fmap[font_size] fsize = self.fmap[font_size]
try: try:
cssdict['font-size'] = '%0.5fem' % (fsize / psize) cssdict['font-size'] = f'{fsize/psize:0.5f}em'
except ZeroDivisionError: except ZeroDivisionError:
cssdict['font-size'] = f'{fsize:.1f}pt' cssdict['font-size'] = f'{fsize:.1f}pt'
psize = fsize psize = fsize

View File

@ -234,7 +234,7 @@ class FlowSplitter:
self.was_split = len(self.trees) > 1 self.was_split = len(self.trees) > 1
if self.was_split: if self.was_split:
self.log('\tSplit into %d parts'%len(self.trees)) self.log(f'\tSplit into {len(self.trees)} parts')
self.commit() self.commit()
def split_on_page_breaks(self, orig_tree): def split_on_page_breaks(self, orig_tree):
@ -309,10 +309,10 @@ class FlowSplitter:
return True return True
def split_text(self, text, root, size): def split_text(self, text, root, size):
self.log.debug('\t\t\tSplitting text of length: %d'%len(text)) self.log.debug(f'\t\t\tSplitting text of length: {len(text)}')
rest = text.replace('\r', '') rest = text.replace('\r', '')
parts = rest.split('\n\n') parts = rest.split('\n\n')
self.log.debug('\t\t\t\tFound %d parts'%len(parts)) self.log.debug(f'\t\t\t\tFound {len(parts)} parts')
if max(map(len, parts)) > size: if max(map(len, parts)) > size:
raise SplitError('Cannot split as file contains a <pre> tag ' raise SplitError('Cannot split as file contains a <pre> tag '
'with a very large paragraph', root) 'with a very large paragraph', root)

View File

@ -179,8 +179,7 @@ class SubsetFonts:
font['item'].unload_data_from_memory() font['item'].unload_data_from_memory()
if totals[0]: if totals[0]:
self.log('Reduced total font size to %.1f%% of original'% self.log(f'Reduced total font size to {totals[0]/totals[1]*100:.1f}% of original')
(totals[0]/totals[1] * 100))
def find_embedded_fonts(self): def find_embedded_fonts(self):
''' '''

View File

@ -28,7 +28,7 @@ def ereader_header_info(header):
print('') print('')
ereader_header_info202(h0) ereader_header_info202(h0)
else: else:
raise EreaderError('Size mismatch. eReader header record size %i KB is not supported.' % len(h0)) raise EreaderError(f'Size mismatch. eReader header record size {len(h0)} KB is not supported.')
def pdb_header_info(header): def pdb_header_info(header):

View File

@ -696,7 +696,7 @@ class Region:
def dump(self, f): def dump(self, f):
f.write('############################################################\n') f.write('############################################################\n')
f.write('########## Region (%d columns) ###############\n'%len(self.columns)) f.write(f'########## Region ({len(self.columns)} columns) ###############\n')
f.write('############################################################\n\n') f.write('############################################################\n\n')
for i, col in enumerate(self.columns): for i, col in enumerate(self.columns):
col.dump(f, i) col.dump(f, i)

View File

@ -107,7 +107,7 @@ class CMap(Stream):
mapping = [] mapping = []
for m in maps: for m in maps:
meat = '\n'.join(f'{k} {v}' for k, v in iteritems(m)) meat = '\n'.join(f'{k} {v}' for k, v in iteritems(m))
mapping.append('%d beginbfchar\n%s\nendbfchar'%(len(m), meat)) mapping.append(f'{len(m)} beginbfchar\n{meat}\nendbfchar')
try: try:
name = name.encode('ascii').decode('ascii') name = name.encode('ascii').decode('ascii')
except Exception: except Exception:
@ -177,8 +177,8 @@ class Font:
except NoGlyphs: except NoGlyphs:
if self.used_glyphs: if self.used_glyphs:
debug( debug(
'Subsetting of %s failed, font appears to have no glyphs for the %d characters it is used with, some text may not be rendered in the PDF' % 'Subsetting of {} failed, font appears to have no glyphs for the {} characters it is used with, some text may not be rendered in the PDF'
(self.metrics.names.get('full_name', 'Unknown'), len(self.used_glyphs))) .format(self.metrics.names.get('full_name', 'Unknown'), len(self.used_glyphs)))
if self.is_otf: if self.is_otf:
self.font_stream.write(self.metrics.sfnt['CFF '].raw) self.font_stream.write(self.metrics.sfnt['CFF '].raw)
else: else:

View File

@ -92,24 +92,24 @@ class Page(Stream):
def set_opacity(self, opref): def set_opacity(self, opref):
if opref not in self.opacities: if opref not in self.opacities:
self.opacities[opref] = 'Opa%d'%len(self.opacities) self.opacities[opref] = f'Opa{len(self.opacities)}'
name = self.opacities[opref] name = self.opacities[opref]
serialize(Name(name), self) serialize(Name(name), self)
self.write(b' gs ') self.write(b' gs ')
def add_font(self, fontref): def add_font(self, fontref):
if fontref not in self.fonts: if fontref not in self.fonts:
self.fonts[fontref] = 'F%d'%len(self.fonts) self.fonts[fontref] = f'F{len(self.fonts)}'
return self.fonts[fontref] return self.fonts[fontref]
def add_image(self, imgref): def add_image(self, imgref):
if imgref not in self.xobjects: if imgref not in self.xobjects:
self.xobjects[imgref] = 'Image%d'%len(self.xobjects) self.xobjects[imgref] = f'Image{len(self.xobjects)}'
return self.xobjects[imgref] return self.xobjects[imgref]
def add_pattern(self, patternref): def add_pattern(self, patternref):
if patternref not in self.patterns: if patternref not in self.patterns:
self.patterns[patternref] = 'Pat%d'%len(self.patterns) self.patterns[patternref] = f'Pat{len(self.patterns)}'
return self.patterns[patternref] return self.patterns[patternref]
def add_resources(self): def add_resources(self):

View File

@ -95,7 +95,7 @@ class Unidecoder:
# Code groups within CODEPOINTS take the form 'xAB' # Code groups within CODEPOINTS take the form 'xAB'
if not isinstance(character, str): if not isinstance(character, str):
character = str(character, 'utf-8') character = str(character, 'utf-8')
return 'x%02x' % (ord(character) >> 8) return f'x{ord(character) >> 8:02x}'
def grouped_point(self, character): def grouped_point(self, character):
''' '''

View File

@ -23,10 +23,8 @@ class StoreAction(InterfaceAction):
self.qaction.triggered.connect(self.do_search) self.qaction.triggered.connect(self.do_search)
self.store_menu = self.qaction.menu() self.store_menu = self.qaction.menu()
cm = partial(self.create_menu_action, self.store_menu) cm = partial(self.create_menu_action, self.store_menu)
for x, t in [('author', _('this author')), ('title', _('this title')), for x, t in [('author', _('this author')), ('title', _('this title')), ('book', _('this book'))]:
('book', _('this book'))]: func = getattr(self, 'search_{}'.format('author_title' if x == 'book' else x))
func = getattr(self, 'search_%s'%('author_title' if x == 'book'
else x))
ac = cm(x, _('Search for %s')%t, triggered=func) ac = cm(x, _('Search for %s')%t, triggered=func)
setattr(self, 'action_search_by_'+x, ac) setattr(self, 'action_search_by_'+x, ac)
self.store_menu.addSeparator() self.store_menu.addSeparator()

View File

@ -474,7 +474,7 @@ class Adder(QObject):
# detection/automerge will fail for this book. # detection/automerge will fail for this book.
traceback.print_exc() traceback.print_exc()
if DEBUG: if DEBUG:
prints('Added', mi.title, 'to db in: %.1f' % (time.time() - st)) prints('Added', mi.title, f'to db in: {time.time()-st:.1f}')
def add_formats(self, book_id, paths, mi, replace=True, is_an_add=False): def add_formats(self, book_id, paths, mi, replace=True, is_an_add=False):
fmap = {p.rpartition(os.path.extsep)[-1].lower():p for p in paths} fmap = {p.rpartition(os.path.extsep)[-1].lower():p for p in paths}

View File

@ -23,7 +23,7 @@ class DuplicatesQuestion(QDialog):
self.setLayout(l) self.setLayout(l)
t = ngettext('Duplicate found', 'duplicates found', len(duplicates)) t = ngettext('Duplicate found', 'duplicates found', len(duplicates))
if len(duplicates) > 1: if len(duplicates) > 1:
t = '%d %s' % (len(duplicates), t) t = f'{len(duplicates)} {t}'
self.setWindowTitle(t) self.setWindowTitle(t)
self.i = i = QIcon.ic('dialog_question.png') self.i = i = QIcon.ic('dialog_question.png')
self.setWindowIcon(i) self.setWindowIcon(i)

View File

@ -888,7 +888,7 @@ class BooksModel(QAbstractTableModel): # {{{
val = fffunc(field_obj, idfunc(idx), default_value=0) or 0 val = fffunc(field_obj, idfunc(idx), default_value=0) or 0
if val == 0: if val == 0:
return None return None
ans = '%.1f' % (val * sz_mult) ans = f'{val*sz_mult:.1f}'
return ('<0.1' if ans == '0.0' else ans) return ('<0.1' if ans == '0.0' else ans)
elif field == 'languages': elif field == 'languages':
def func(idx): def func(idx):

View File

@ -255,7 +255,7 @@ class GuiRunner(QObject):
self.splash_screen.finish(main) self.splash_screen.finish(main)
timed_print('splash screen hidden') timed_print('splash screen hidden')
self.splash_screen = None self.splash_screen = None
timed_print('Started up in %.2f seconds'%(monotonic() - self.startup_time), 'with', len(db.data), 'books') timed_print(f'Started up in {monotonic()-self.startup_time:.2f} seconds', 'with', len(db.data), 'books')
main.set_exception_handler() main.set_exception_handler()
if len(self.args) > 1: if len(self.args) > 1:
main.handle_cli_args(self.args[1:]) main.handle_cli_args(self.args[1:])

View File

@ -273,7 +273,7 @@ def download(all_ids, tf, db, do_identify, covers, ensure_fields,
if abort.is_set(): if abort.is_set():
aborted = True aborted = True
log('Download complete, with %d failures'%len(failed_ids)) log(f'Download complete, with {len(failed_ids)} failures')
return (aborted, ans, tdir, tf, failed_ids, failed_covers, title_map, return (aborted, ans, tdir, tf, failed_ids, failed_covers, title_map,
lm_map, all_failed) lm_map, all_failed)
finally: finally:

View File

@ -342,7 +342,7 @@ class Saver(QObject):
def updating_metadata_finished(self): def updating_metadata_finished(self):
if DEBUG: if DEBUG:
prints('Saved %d books in %.1f seconds' % (len(self.all_book_ids), time.time() - self.start_time)) prints(f'Saved {len(self.all_book_ids)} books in {time.time()-self.start_time:.1f} seconds')
self.pd.close() self.pd.close()
self.pd.deleteLater() self.pd.deleteLater()
self.report() self.report()

View File

@ -1968,7 +1968,7 @@ class TagsModel(QAbstractItemModel): # {{{
if tag.name and tag.name[0] in stars: # char is a star or a half. Assume rating if tag.name and tag.name[0] in stars: # char is a star or a half. Assume rating
rnum = len(tag.name) rnum = len(tag.name)
if tag.name.endswith(stars[-1]): if tag.name.endswith(stars[-1]):
rnum = '%s.5' % (rnum - 1) rnum = f'{rnum-1}.5'
ans.append(f'{prefix}{category}:{rnum}') ans.append(f'{prefix}{category}:{rnum}')
else: else:
name = tag.original_name name = tag.original_name

View File

@ -904,7 +904,7 @@ class DiffSplit(QSplitter): # {{{
continue continue
if tag in {'replace', 'insert', 'delete'}: if tag in {'replace', 'insert', 'delete'}:
fmt = getattr(self.left, '%s_format' % ('replacereplace' if tag == 'replace' else tag)) fmt = getattr(self.left, '{}_format'.format('replacereplace' if tag == 'replace' else tag))
f = QTextLayout.FormatRange() f = QTextLayout.FormatRange()
f.start, f.length, f.format = pos, len(word), fmt f.start, f.length, f.format = pos, len(word), fmt
fmts.append(f) fmts.append(f)

View File

@ -188,7 +188,7 @@ def do_print():
if data['page_numbers']: if data['page_numbers']:
args.append('--pdf-page-numbers') args.append('--pdf-page-numbers')
for edge in 'left top right bottom'.split(): for edge in 'left top right bottom'.split():
args.append('--pdf-page-margin-' + edge), args.append('%.1f' % (data['margin_' + edge] * 72)) args.append('--pdf-page-margin-' + edge), args.append('{:.1f}'.format(data['margin_' + edge] * 72))
from calibre.ebooks.conversion.cli import main from calibre.ebooks.conversion.cli import main
main(args) main(args)

View File

@ -62,16 +62,16 @@ def serialize_string(key, val):
val = str(val).encode('utf-8') val = str(val).encode('utf-8')
if len(val) > 2**16 - 1: if len(val) > 2**16 - 1:
raise ValueError(f'{key} is too long') raise ValueError(f'{key} is too long')
return struct.pack('=B%dsH%ds' % (len(key), len(val)), len(key), key, len(val), val) return struct.pack(f'=B{len(key)}sH{len(val)}s', len(key), key, len(val), val)
def serialize_file_types(file_types): def serialize_file_types(file_types):
key = b'FILE_TYPES' key = b'FILE_TYPES'
buf = [struct.pack('=B%dsH' % len(key), len(key), key, len(file_types))] buf = [struct.pack(f'=B{len(key)}sH', len(key), key, len(file_types))]
def add(x): def add(x):
x = x.encode('utf-8').replace(b'\0', b'') x = x.encode('utf-8').replace(b'\0', b'')
buf.append(struct.pack('=H%ds' % len(x), len(x), x)) buf.append(struct.pack(f'=H{len(x)}s', len(x), x))
for name, extensions in file_types: for name, extensions in file_types:
add(name or _('Files')) add(name or _('Files'))
if isinstance(extensions, string_or_bytes): if isinstance(extensions, string_or_bytes):

View File

@ -315,7 +315,7 @@ class BIBTEX(CatalogPlugin):
log(" --search='{}'".format(opts_dict['search_text'])) log(" --search='{}'".format(opts_dict['search_text']))
if opts_dict['ids']: if opts_dict['ids']:
log(' Book count: %d' % len(opts_dict['ids'])) log(' Book count: {}'.format(len(opts_dict['ids'])))
if opts_dict['search_text']: if opts_dict['search_text']:
log(' (--search ignored when a subset of the database is specified)') log(' (--search ignored when a subset of the database is specified)')

View File

@ -75,7 +75,7 @@ class CSV_XML(CatalogPlugin):
log(" --search='{}'".format(opts_dict['search_text'])) log(" --search='{}'".format(opts_dict['search_text']))
if opts_dict['ids']: if opts_dict['ids']:
log(' Book count: %d' % len(opts_dict['ids'])) log(' Book count: {}'.format(len(opts_dict['ids'])))
if opts_dict['search_text']: if opts_dict['search_text']:
log(' (--search ignored when a subset of the database is specified)') log(' (--search ignored when a subset of the database is specified)')
@ -153,7 +153,7 @@ class CSV_XML(CatalogPlugin):
item = item.replace('\r\n', ' ') item = item.replace('\r\n', ' ')
item = item.replace('\n', ' ') item = item.replace('\n', ' ')
elif fm.get(field, {}).get('datatype', None) == 'rating' and item: elif fm.get(field, {}).get('datatype', None) == 'rating' and item:
item = '%.2g' % (item / 2) item = f'{item/2:.2g}'
# Convert HTML to markdown text # Convert HTML to markdown text
if isinstance(item, str): if isinstance(item, str):
@ -197,7 +197,7 @@ class CSV_XML(CatalogPlugin):
if not isinstance(val, (bytes, str)): if not isinstance(val, (bytes, str)):
if (fm.get(field, {}).get('datatype', None) == if (fm.get(field, {}).get('datatype', None) ==
'rating' and val): 'rating' and val):
val = '%.2g' % (val / 2) val = f'{val/2:.2g}'
val = str(val) val = str(val)
item = getattr(E, field)(val) item = getattr(E, field)(val)
record.append(item) record.append(item)

View File

@ -300,7 +300,7 @@ class EPUB_MOBI(CatalogPlugin):
opts_dict = vars(opts) opts_dict = vars(opts)
if opts_dict['ids']: if opts_dict['ids']:
build_log.append(' book count: %d' % len(opts_dict['ids'])) build_log.append(' book count: {}'.format(len(opts_dict['ids'])))
sections_list = [] sections_list = []
if opts.generate_authors: if opts.generate_authors:

View File

@ -834,11 +834,11 @@ class CatalogBuilder:
self.individual_authors = list(individual_authors) self.individual_authors = list(individual_authors)
if self.DEBUG and self.opts.verbose: if self.DEBUG and self.opts.verbose:
self.opts.log.info('\nfetch_books_by_author(): %d unique authors' % len(unique_authors)) self.opts.log.info(f'\nfetch_books_by_author(): {len(unique_authors)} unique authors')
for author in unique_authors: for author in unique_authors:
self.opts.log.info((' %-50s %-25s %2d' % (author[0][0:45], author[1][0:20], self.opts.log.info((' %-50s %-25s %2d' % (author[0][0:45], author[1][0:20],
author[2])).encode('utf-8')) author[2])).encode('utf-8'))
self.opts.log.info('\nfetch_books_by_author(): %d individual authors' % len(individual_authors)) self.opts.log.info(f'\nfetch_books_by_author(): {len(individual_authors)} individual authors')
for author in sorted(individual_authors): for author in sorted(individual_authors):
self.opts.log.info(f'{author}') self.opts.log.info(f'{author}')
@ -865,7 +865,7 @@ class CatalogBuilder:
self.books_by_title = sorted(self.books_to_catalog, key=lambda x: sort_key(x['title_sort'].upper())) self.books_by_title = sorted(self.books_to_catalog, key=lambda x: sort_key(x['title_sort'].upper()))
if self.DEBUG and self.opts.verbose: if self.DEBUG and self.opts.verbose:
self.opts.log.info('fetch_books_by_title(): %d books' % len(self.books_by_title)) self.opts.log.info(f'fetch_books_by_title(): {len(self.books_by_title)} books')
self.opts.log.info(' %-40s %-40s' % ('title', 'title_sort')) self.opts.log.info(' %-40s %-40s' % ('title', 'title_sort'))
for title in self.books_by_title: for title in self.books_by_title:
self.opts.log.info((' %-40s %-40s' % (title['title'][0:40], self.opts.log.info((' %-40s %-40s' % (title['title'][0:40],
@ -1202,7 +1202,7 @@ class CatalogBuilder:
else: else:
yield tag yield tag
ans = '%s%d %s:\n' % (' ' * indent, len(tags), header) ans = '{}{} {}:\n'.format(' '*indent, len(tags), header)
ans += ' ' * (indent + 1) ans += ' ' * (indent + 1)
out_str = '' out_str = ''
sorted_tags = sorted(tags, key=sort_key) sorted_tags = sorted(tags, key=sort_key)
@ -2119,12 +2119,11 @@ class CatalogBuilder:
if self.opts.verbose: if self.opts.verbose:
if len(genre_list): if len(genre_list):
self.opts.log.info(' Genre summary: %d active genre tags used in generating catalog with %d titles' % self.opts.log.info(f' Genre summary: {len(genre_list)} active genre tags used in generating catalog with {len(self.books_to_catalog)} titles')
(len(genre_list), len(self.books_to_catalog)))
for genre in genre_list: for genre in genre_list:
for key in genre: for key in genre:
self.opts.log.info(' %s: %d %s' % (self.get_friendly_genre_tag(key), self.opts.log.info(' {}: {} {}'.format(self.get_friendly_genre_tag(key),
len(genre[key]), len(genre[key]),
'titles' if len(genre[key]) > 1 else 'title')) 'titles' if len(genre[key]) > 1 else 'title'))
@ -3036,7 +3035,7 @@ class CatalogBuilder:
Outputs: Outputs:
ncx_soup (file): updated ncx_soup (file): updated
''' '''
section_header = '%s [%d]' % (tocTitle, len(self.books_by_description)) section_header = f'{tocTitle} [{len(self.books_by_description)}]'
if self.generate_for_kindle_mobi: if self.generate_for_kindle_mobi:
section_header = tocTitle section_header = tocTitle
navPointTag = self.generate_ncx_section_header('bydescription-ID', section_header, 'content/book_%d.html' % int(self.books_by_description[0]['id'])) navPointTag = self.generate_ncx_section_header('bydescription-ID', section_header, 'content/book_%d.html' % int(self.books_by_description[0]['id']))
@ -3115,7 +3114,7 @@ class CatalogBuilder:
series_by_letter.append(current_series_list) series_by_letter.append(current_series_list)
# --- Construct the 'Books By Series' section --- # --- Construct the 'Books By Series' section ---
section_header = '%s [%d]' % (tocTitle, len(self.all_series)) section_header = f'{tocTitle} [{len(self.all_series)}]'
if self.generate_for_kindle_mobi: if self.generate_for_kindle_mobi:
section_header = tocTitle section_header = tocTitle
output = 'BySeries' output = 'BySeries'
@ -3197,7 +3196,7 @@ class CatalogBuilder:
books_by_letter.append(current_book_list) books_by_letter.append(current_book_list)
# --- Construct the 'Books By Title' section --- # --- Construct the 'Books By Title' section ---
section_header = '%s [%d]' % (tocTitle, len(self.books_by_title)) section_header = f'{tocTitle} [{len(self.books_by_title)}]'
if self.generate_for_kindle_mobi: if self.generate_for_kindle_mobi:
section_header = tocTitle section_header = tocTitle
output = 'ByAlphaTitle' output = 'ByAlphaTitle'
@ -3285,7 +3284,7 @@ class CatalogBuilder:
# --- Construct the 'Books By Author' *section* --- # --- Construct the 'Books By Author' *section* ---
file_ID = f'{tocTitle.lower()}' file_ID = f'{tocTitle.lower()}'
file_ID = file_ID.replace(' ', '') file_ID = file_ID.replace(' ', '')
section_header = '%s [%d]' % (tocTitle, len(self.individual_authors)) section_header = f'{tocTitle} [{len(self.individual_authors)}]'
if self.generate_for_kindle_mobi: if self.generate_for_kindle_mobi:
section_header = tocTitle section_header = tocTitle
navPointTag = self.generate_ncx_section_header(f'{file_ID}-ID', section_header, f'{HTML_file}#section_start') navPointTag = self.generate_ncx_section_header(f'{file_ID}-ID', section_header, f'{HTML_file}#section_start')
@ -3563,7 +3562,7 @@ class CatalogBuilder:
# --- Construct the 'Books By Genre' *section* --- # --- Construct the 'Books By Genre' *section* ---
file_ID = f'{tocTitle.lower()}' file_ID = f'{tocTitle.lower()}'
file_ID = file_ID.replace(' ', '') file_ID = file_ID.replace(' ', '')
section_header = '%s [%d]' % (tocTitle, len(self.genres)) section_header = f'{tocTitle} [{len(self.genres)}]'
if self.generate_for_kindle_mobi: if self.generate_for_kindle_mobi:
section_header = tocTitle section_header = tocTitle
navPointTag = self.generate_ncx_section_header(f'{file_ID}-ID', section_header, 'content/Genre_{}.html#section_start'.format(self.genres[0]['tag'])) navPointTag = self.generate_ncx_section_header(f'{file_ID}-ID', section_header, 'content/Genre_{}.html#section_start'.format(self.genres[0]['tag']))

View File

@ -798,7 +798,7 @@ class PostInstall:
os.rmdir(config_dir) os.rmdir(config_dir)
if warn is None and self.warnings: if warn is None and self.warnings:
self.info('\n\nThere were %d warnings\n'%len(self.warnings)) self.info(f'\n\nThere were {len(self.warnings)} warnings\n')
for args, kwargs in self.warnings: for args, kwargs in self.warnings:
self.info('*', *args, **kwargs) self.info('*', *args, **kwargs)
print() print()

View File

@ -79,4 +79,4 @@ def debug_print(*args, **kw):
# Check if debugging is enabled # Check if debugging is enabled
if is_debugging(): if is_debugging():
# Print the elapsed time and the provided arguments if debugging is enabled # Print the elapsed time and the provided arguments if debugging is enabled
prints('DEBUG: %6.1f' % (time.monotonic() - base_time), *args, **kw) prints(f'DEBUG: {time.monotonic()-base_time:6.1f}', *args, **kw)

View File

@ -271,7 +271,7 @@ class AuthController:
def do_http_auth(self, data, endpoint): def do_http_auth(self, data, endpoint):
ban_key = data.remote_addr, data.forwarded_for ban_key = data.remote_addr, data.forwarded_for
if self.ban_list.is_banned(ban_key): if self.ban_list.is_banned(ban_key):
raise HTTPForbidden('Too many login attempts', log='Too many login attempts from: %s' % (ban_key if data.forwarded_for else data.remote_addr)) raise HTTPForbidden('Too many login attempts', log=f'Too many login attempts from: {ban_key if data.forwarded_for else data.remote_addr}')
auth = data.inheaders.get('Authorization') auth = data.inheaders.get('Authorization')
nonce_is_stale = False nonce_is_stale = False
log_msg = None log_msg = None

View File

@ -412,7 +412,7 @@ def auto_reload(log, dirs=frozenset(), cmd=None, add_default_dirs=True, listen_o
cmd.insert(1, 'calibre-server') cmd.insert(1, 'calibre-server')
dirs = find_dirs_to_watch(fpath, dirs, add_default_dirs) dirs = find_dirs_to_watch(fpath, dirs, add_default_dirs)
log('Auto-restarting server on changes press Ctrl-C to quit') log('Auto-restarting server on changes press Ctrl-C to quit')
log('Watching %d directory trees for changes' % len(dirs)) log(f'Watching {len(dirs)} directory trees for changes')
with ReloadServer(listen_on) as server, Worker(cmd, log, server) as worker: with ReloadServer(listen_on) as server, Worker(cmd, log, server) as worker:
w = Watcher(dirs, worker, log) w = Watcher(dirs, worker, log)
worker.wakeup = w.wakeup worker.wakeup = w.wakeup

View File

@ -773,7 +773,7 @@ class ServerLoop:
for pool in (self.plugin_pool, self.pool): for pool in (self.plugin_pool, self.pool):
pool.stop(wait_till) pool.stop(wait_till)
if pool.workers: if pool.workers:
self.log.warn('Failed to shutdown %d workers in %s cleanly' % (len(pool.workers), pool.__class__.__name__)) self.log.warn(f'Failed to shutdown {len(pool.workers)} workers in {pool.__class__.__name__} cleanly')
self.jobs_manager.wait_for_shutdown(wait_till) self.jobs_manager.wait_for_shutdown(wait_till)

View File

@ -216,7 +216,7 @@ def manage_users_cli(path=None, args=()):
prints('%d)' % (i + 1), choice) prints('%d)' % (i + 1), choice)
print() print()
while True: while True:
prompt = question + ' [1-%d]:' % len(choices) prompt = question + f' [1-{len(choices)}]:'
if default is not None: if default is not None:
prompt = question + ' [1-%d %s: %d]' % ( prompt = question + ' [1-%d %s: %d]' % (
len(choices), _('default'), default + 1) len(choices), _('default'), default + 1)

View File

@ -462,7 +462,7 @@ def process_category_node(
# reflect that in the node structure as well. # reflect that in the node structure as well.
node_data = tag_map.get(id(tag), None) node_data = tag_map.get(id(tag), None)
if node_data is None: if node_data is None:
node_id = 'n%d' % len(tag_map) node_id = f'n{len(tag_map)}'
node_data = items[node_id] = category_item_as_json(tag, clear_rating=clear_rating) node_data = items[node_id] = category_item_as_json(tag, clear_rating=clear_rating)
tag_map[id(tag)] = (node_id, node_data) tag_map[id(tag)] = (node_id, node_data)
node_to_tag_map[node_id] = tag node_to_tag_map[node_id] = tag

View File

@ -168,7 +168,7 @@ class Route:
if argspec.args[2:len(self.names)+2] != self.names: if argspec.args[2:len(self.names)+2] != self.names:
raise route_error("Function's argument names do not match the variable names in the route") raise route_error("Function's argument names do not match the variable names in the route")
if not frozenset(self.type_checkers).issubset(frozenset(self.names)): if not frozenset(self.type_checkers).issubset(frozenset(self.names)):
raise route_error('There exist type checkers that do not correspond to route variables: %r' % (set(self.type_checkers) - set(self.names))) raise route_error(f'There exist type checkers that do not correspond to route variables: {set(self.type_checkers)-set(self.names)!r}')
self.min_size = found_optional_part if found_optional_part is not False else len(matchers) self.min_size = found_optional_part if found_optional_part is not False else len(matchers)
self.max_size = sys.maxsize if self.soak_up_extra else len(matchers) self.max_size = sys.maxsize if self.soak_up_extra else len(matchers)

View File

@ -376,14 +376,14 @@ class TestHTTP(BaseTest):
r = conn.getresponse() r = conn.getresponse()
self.ae(r.status, http_client.PARTIAL_CONTENT) self.ae(r.status, http_client.PARTIAL_CONTENT)
self.ae(str(r.getheader('Accept-Ranges')), 'bytes') self.ae(str(r.getheader('Accept-Ranges')), 'bytes')
self.ae(str(r.getheader('Content-Range')), 'bytes 2-25/%d' % len(fdata)) self.ae(str(r.getheader('Content-Range')), f'bytes 2-25/{len(fdata)}')
self.ae(int(r.getheader('Content-Length')), 24) self.ae(int(r.getheader('Content-Length')), 24)
self.ae(r.read(), fdata[2:26]) self.ae(r.read(), fdata[2:26])
conn.request('GET', '/test', headers={'Range':'bytes=100000-'}) conn.request('GET', '/test', headers={'Range':'bytes=100000-'})
r = conn.getresponse() r = conn.getresponse()
self.ae(r.status, http_client.REQUESTED_RANGE_NOT_SATISFIABLE) self.ae(r.status, http_client.REQUESTED_RANGE_NOT_SATISFIABLE)
self.ae(str(r.getheader('Content-Range')), 'bytes */%d' % len(fdata)) self.ae(str(r.getheader('Content-Range')), f'bytes */{len(fdata)}')
conn.request('GET', '/test', headers={'Range':'bytes=25-50', 'If-Range':etag}) conn.request('GET', '/test', headers={'Range':'bytes=25-50', 'If-Range':etag})
r = conn.getresponse() r = conn.getresponse()

View File

@ -41,7 +41,7 @@ class Index(list):
offsets = b''.join(pack(b'>L', x)[1:] for x in offsets) offsets = b''.join(pack(b'>L', x)[1:] for x in offsets)
else: else:
fmt = {1:'B', 2:'H', 4:'L'}[offsize] fmt = {1:'B', 2:'H', 4:'L'}[offsize]
offsets = pack(('>%d%s'%(len(offsets), fmt)).encode('ascii'), offsets = pack(f'>{len(offsets)}{fmt}'.encode('ascii'),
*offsets) *offsets)
self.raw = prefix + offsets + obj_data self.raw = prefix + offsets + obj_data
@ -102,7 +102,7 @@ class Charsets(list):
def compile(self): def compile(self):
ans = pack(b'>B', 0) ans = pack(b'>B', 0)
sids = [self.strings(x) for x in self] sids = [self.strings(x) for x in self]
ans += pack(('>%dH'%len(self)).encode('ascii'), *sids) ans += pack(f'>{len(self)}H'.encode('ascii'), *sids)
self.raw = ans self.raw = ans
return ans return ans

View File

@ -165,8 +165,7 @@ def test_roundtrip(ff=None):
if data[:12] != rd[:12]: if data[:12] != rd[:12]:
raise ValueError('Roundtripping failed, font header not the same') raise ValueError('Roundtripping failed, font header not the same')
if len(data) != len(rd): if len(data) != len(rd):
raise ValueError('Roundtripping failed, size different (%d vs. %d)'% raise ValueError(f'Roundtripping failed, size different ({len(data)} vs. {len(rd)})')
(len(data), len(rd)))
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -234,7 +234,7 @@ def print_stats(old_stats, new_stats):
np = nsz/new_total * 100 np = nsz/new_total * 100
suffix = ' | same size' suffix = ' | same size'
if nsz != osz: if nsz != osz:
suffix = ' | reduced to %.1f %%'%(nsz/osz * 100) suffix = f' | reduced to {nsz/osz*100:.1f} %'
prints('%4s'%table, ' ', '%10s'%osz, ' ', f'{op:5.1f} %', ' ', prints('%4s'%table, ' ', '%10s'%osz, ' ', f'{op:5.1f} %', ' ',
'%10s'%nsz, ' ', f'{np:5.1f} %', suffix) '%10s'%nsz, ' ', f'{np:5.1f} %', suffix)
prints('='*80) prints('='*80)
@ -289,7 +289,7 @@ def main(args):
reduced = (len(sf)/len(orig)) * 100 reduced = (len(sf)/len(orig)) * 100
def sz(x): def sz(x):
return '%gKB'%(len(x)/1024.) return f'{len(x)/1024.0:g}KB'
print_stats(old_stats, new_stats) print_stats(old_stats, new_stats)
prints('Original size:', sz(orig), 'Subset size:', sz(sf), f'Reduced to: {reduced:g}%') prints('Original size:', sz(orig), 'Subset size:', sz(sf), f'Reduced to: {reduced:g}%')
prints(f'Subsetting took {taken:g} seconds') prints(f'Subsetting took {taken:g} seconds')
@ -381,7 +381,7 @@ def all():
print(name, path, err) print(name, path, err)
print() print()
print('Average reduction to: %.1f%%'%(sum(averages)/len(averages))) print(f'Average reduction to: {sum(averages)/len(averages):.1f}%')
print('Total:', total, 'Unsupported:', len(unsupported), 'Failed:', print('Total:', total, 'Unsupported:', len(unsupported), 'Failed:',
len(failed), 'Warnings:', len(warnings)) len(failed), 'Warnings:', len(warnings))

View File

@ -2172,7 +2172,7 @@ returns the empty string.
except: except:
return '' return ''
i = d1 - d2 i = d1 - d2
return '%.1f'%(i.days + (i.seconds/(24.0*60.0*60.0))) return f'{i.days+(i.seconds/(24.0*60.0*60.0)):.1f}'
class BuiltinDateArithmetic(BuiltinFormatterFunction): class BuiltinDateArithmetic(BuiltinFormatterFunction):

View File

@ -762,7 +762,7 @@ class SMTP:
# Hmmm? what's this? -ddm # Hmmm? what's this? -ddm
# self.esmtp_features['7bit']="" # self.esmtp_features['7bit']=""
if self.has_extn('size'): if self.has_extn('size'):
esmtp_opts.append('size=%d' % len(msg)) esmtp_opts.append(f'size={len(msg)}')
for option in mail_options: for option in mail_options:
esmtp_opts.append(option) esmtp_opts.append(option)
@ -909,7 +909,7 @@ if __name__ == '__main__':
if not line: if not line:
break break
msg = msg + line msg = msg + line
print('Message length is %d' % len(msg)) print(f'Message length is {len(msg)}')
server = SMTP('localhost') server = SMTP('localhost')
server.set_debuglevel(1) server.set_debuglevel(1)

View File

@ -183,7 +183,7 @@ class Register(Thread):
prints('Registering with default programs...') prints('Registering with default programs...')
register() register()
if DEBUG: if DEBUG:
prints('Registered with default programs in %.1f seconds' % (time.monotonic() - st)) prints(f'Registered with default programs in {time.monotonic()-st:.1f} seconds')
def __enter__(self): def __enter__(self):
return self return self

View File

@ -1760,7 +1760,7 @@ class BasicNewsRecipe(Recipe):
url = url.decode('utf-8') url = url.decode('utf-8')
if url.startswith('feed://'): if url.startswith('feed://'):
url = 'http'+url[4:] url = 'http'+url[4:]
self.report_progress(0, _('Fetching feed')+' %s...'%(title if title else url)) self.report_progress(0, _('Fetching feed')+f' {title if title else url}...')
try: try:
purl = urlparse(url, allow_fragments=False) purl = urlparse(url, allow_fragments=False)
if purl.username or purl.password: if purl.username or purl.password:
@ -1780,7 +1780,7 @@ class BasicNewsRecipe(Recipe):
)) ))
except Exception as err: except Exception as err:
feed = Feed() feed = Feed()
msg = 'Failed feed: %s'%(title if title else url) msg = f'Failed feed: {title if title else url}'
feed.populate_from_preparsed_feed(msg, []) feed.populate_from_preparsed_feed(msg, [])
feed.description = as_unicode(err) feed.description = as_unicode(err)
parsed_feeds.append(feed) parsed_feeds.append(feed)

View File

@ -94,7 +94,7 @@ class NewsCategory(NewsTreeItem):
def data(self, role): def data(self, role):
if role == Qt.ItemDataRole.DisplayRole: if role == Qt.ItemDataRole.DisplayRole:
return (self.cdata + ' [%d]'%len(self.children)) return (self.cdata + f' [{len(self.children)}]')
elif role == Qt.ItemDataRole.FontRole: elif role == Qt.ItemDataRole.FontRole:
return self.bold_font return self.bold_font
elif role == Qt.ItemDataRole.ForegroundRole and self.category == _('Scheduled'): elif role == Qt.ItemDataRole.ForegroundRole and self.category == _('Scheduled'):