mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-08 02:34:06 -04:00
Automated conversion of % format specifiers
Using flint. Change has been filtred because even on safe mode, flint can be too aggressive.
This commit is contained in:
parent
c8d9e0c24e
commit
e5a65f69ec
@ -116,7 +116,7 @@ class DemoTool(Tool):
|
||||
num = re.search(r'[0-9.]+', val)
|
||||
if num is not None:
|
||||
num = num.group()
|
||||
val = val.replace(num, '%f' % (float(num) * factor))
|
||||
val = val.replace(num, f'{float(num)*factor:f}')
|
||||
style.setProperty('font-size', val)
|
||||
# We should also be dealing with the font shorthand property and
|
||||
# font sizes specified as non numbers, but those are left as exercises
|
||||
|
@ -76,5 +76,20 @@ docstring-quotes = 'single'
|
||||
inline-quotes = 'single'
|
||||
multiline-quotes = 'single'
|
||||
|
||||
[tool.flynt]
|
||||
line-length = 400 # over value to catch every case
|
||||
transform-format = false # don't transform already existing format call
|
||||
exclude = [
|
||||
"bypy/",
|
||||
"setup/polib.py",
|
||||
"setup/linux-installer.py",
|
||||
"src/calibre/ebooks/metadata/sources/",
|
||||
"src/calibre/gui2/store/stores/",
|
||||
"src/css_selectors/",
|
||||
"src/polyglot/",
|
||||
"src/templite/",
|
||||
"src/tinycss/",
|
||||
]
|
||||
|
||||
[tool.pylsp-mypy]
|
||||
enabled = false
|
||||
|
@ -756,7 +756,7 @@ class WritingTest(BaseTest):
|
||||
self.assertEqual({1,2,3}, cache.set_sort_for_authors(sdata))
|
||||
for bid in (1, 2, 3):
|
||||
self.assertIn(', changed', cache.field_for('author_sort', bid))
|
||||
sdata = {aid:'%s, changed' % (aid*2 if aid == max(adata) else aid) for aid in adata}
|
||||
sdata = {aid:f'{aid*2 if aid == max(adata) else aid}, changed' for aid in adata}
|
||||
self.assertEqual({3}, cache.set_sort_for_authors(sdata),
|
||||
'Setting the author sort to the same value as before, incorrectly marked some books as dirty')
|
||||
# }}}
|
||||
|
@ -1721,9 +1721,9 @@ class KOBOTOUCH(KOBO):
|
||||
debugging_title = self.debugging_title
|
||||
debug_print(f"KoboTouch:books - set_debugging_title to '{debugging_title}'")
|
||||
bl.set_debugging_title(debugging_title)
|
||||
debug_print('KoboTouch:books - length bl=%d'%len(bl))
|
||||
debug_print(f'KoboTouch:books - length bl={len(bl)}')
|
||||
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
|
||||
debug_print('KoboTouch:books - length bl after sync=%d'%len(bl))
|
||||
debug_print(f'KoboTouch:books - length bl after sync={len(bl)}')
|
||||
|
||||
# make a dict cache of paths so the lookup in the loop below is faster.
|
||||
bl_cache = {}
|
||||
@ -2266,7 +2266,7 @@ class KOBOTOUCH(KOBO):
|
||||
|
||||
def upload_books(self, files, names, on_card=None, end_session=True,
|
||||
metadata=None):
|
||||
debug_print('KoboTouch:upload_books - %d books'%(len(files)))
|
||||
debug_print(f'KoboTouch:upload_books - {len(files)} books')
|
||||
debug_print('KoboTouch:upload_books - files=', files)
|
||||
|
||||
if self.modifying_epub():
|
||||
@ -2611,7 +2611,7 @@ class KOBOTOUCH(KOBO):
|
||||
# debug_print("KoboTouch:update_device_database_collections - self.bookshelvelist=", self.bookshelvelist)
|
||||
# Process any collections that exist
|
||||
for category, books in collections.items():
|
||||
debug_print("KoboTouch:update_device_database_collections - category='%s' books=%d"%(category, len(books)))
|
||||
debug_print(f"KoboTouch:update_device_database_collections - category='{category}' books={len(books)}")
|
||||
if create_collections and not (category in supportedcategories or category in readstatuslist or category in accessibilitylist):
|
||||
self.check_for_bookshelf(connection, category)
|
||||
# if category in self.bookshelvelist:
|
||||
@ -2642,7 +2642,7 @@ class KOBOTOUCH(KOBO):
|
||||
category_added = True
|
||||
elif category in self.bookshelvelist and self.supports_bookshelves:
|
||||
if show_debug:
|
||||
debug_print(' length book.device_collections=%d'%len(book.device_collections))
|
||||
debug_print(f' length book.device_collections={len(book.device_collections)}')
|
||||
if category not in book.device_collections:
|
||||
if show_debug:
|
||||
debug_print(' Setting bookshelf on device')
|
||||
|
@ -27,8 +27,7 @@ from calibre.utils.filenames import shorten_components_to
|
||||
from calibre.utils.icu import lower as icu_lower
|
||||
from polyglot.builtins import as_bytes, iteritems, itervalues
|
||||
|
||||
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%(
|
||||
'windows' if iswindows else 'unix')).MTP_DEVICE
|
||||
BASE = importlib.import_module('calibre.devices.mtp.{}.driver'.format('windows' if iswindows else 'unix')).MTP_DEVICE
|
||||
DEFAULT_THUMBNAIL_HEIGHT = 320
|
||||
|
||||
|
||||
|
@ -108,7 +108,7 @@ class FileOrFolder:
|
||||
path = ''
|
||||
datum = f'size={self.size}'
|
||||
if self.is_folder or self.is_storage:
|
||||
datum = 'children=%s'%(len(self.files) + len(self.folders))
|
||||
datum = f'children={len(self.files)+len(self.folders)}'
|
||||
return f'{name}(id={self.object_id}, storage_id={self.storage_id}, {datum}, path={path}, modified={self.last_mod_string})'
|
||||
|
||||
__str__ = __repr__
|
||||
|
@ -339,8 +339,7 @@ class MTP_DEVICE(MTPDeviceBase):
|
||||
prints('There were some errors while getting the '
|
||||
f' filesystem from {self.current_friendly_name}: {self.format_errorstack(all_errs)}')
|
||||
self._filesystem_cache = FilesystemCache(storage, all_items)
|
||||
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
|
||||
time.time()-st, len(self._filesystem_cache)))
|
||||
debug(f'Filesystem metadata loaded in {time.time()-st:g} seconds ({len(self._filesystem_cache)} objects)')
|
||||
return self._filesystem_cache
|
||||
|
||||
@synchronous
|
||||
|
@ -289,8 +289,7 @@ class MTP_DEVICE(MTPDeviceBase):
|
||||
all_storage.append(storage)
|
||||
items.append(itervalues(id_map))
|
||||
self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
|
||||
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
|
||||
time.time()-st, len(self._filesystem_cache)))
|
||||
debug(f'Filesystem metadata loaded in {time.time()-st:g} seconds ({len(self._filesystem_cache)} objects)')
|
||||
return self._filesystem_cache
|
||||
|
||||
@same_thread
|
||||
|
@ -1499,12 +1499,12 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
self.report_progress((i + 1) / float(len(files)), _('Transferring books to device...'))
|
||||
|
||||
self.report_progress(1.0, _('Transferring books to device...'))
|
||||
self._debug('finished uploading %d books' % (len(files)))
|
||||
self._debug(f'finished uploading {len(files)} books')
|
||||
return paths
|
||||
|
||||
@synchronous('sync_lock')
|
||||
def add_books_to_metadata(self, locations, metadata, booklists):
|
||||
self._debug('adding metadata for %d books' % (len(metadata)))
|
||||
self._debug(f'adding metadata for {len(metadata)} books')
|
||||
|
||||
metadata = iter(metadata)
|
||||
for i, location in enumerate(locations):
|
||||
@ -1558,7 +1558,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
bl.remove_book(book)
|
||||
self._set_known_metadata(book, remove=True)
|
||||
self.report_progress(1.0, _('Removing books from device metadata listing...'))
|
||||
self._debug('finished removing metadata for %d books' % (len(paths)))
|
||||
self._debug(f'finished removing metadata for {len(paths)} books')
|
||||
|
||||
@synchronous('sync_lock')
|
||||
def get_file(self, path, outfile, end_session=True, this_book=None, total_books=None):
|
||||
|
@ -299,8 +299,7 @@ class USBMS(CLI, Device):
|
||||
need_sync = True
|
||||
del bl[idx]
|
||||
|
||||
debug_print('USBMS: count found in cache: %d, count of files in metadata: %d, need_sync: %s' %
|
||||
(len(bl_cache), len(bl), need_sync))
|
||||
debug_print(f'USBMS: count found in cache: {len(bl_cache)}, count of files in metadata: {len(bl)}, need_sync: {need_sync}')
|
||||
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
|
||||
if oncard == 'cardb':
|
||||
self.sync_booklists((None, None, bl))
|
||||
@ -315,7 +314,7 @@ class USBMS(CLI, Device):
|
||||
|
||||
def upload_books(self, files, names, on_card=None, end_session=True,
|
||||
metadata=None):
|
||||
debug_print('USBMS: uploading %d books'%(len(files)))
|
||||
debug_print(f'USBMS: uploading {len(files)} books')
|
||||
|
||||
path = self._sanity_check(on_card, files)
|
||||
|
||||
@ -341,7 +340,7 @@ class USBMS(CLI, Device):
|
||||
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
|
||||
|
||||
self.report_progress(1.0, _('Transferring books to device...'))
|
||||
debug_print('USBMS: finished uploading %d books'%(len(files)))
|
||||
debug_print(f'USBMS: finished uploading {len(files)} books')
|
||||
return list(zip(paths, cycle([on_card])))
|
||||
|
||||
def upload_cover(self, path, filename, metadata, filepath):
|
||||
@ -358,7 +357,7 @@ class USBMS(CLI, Device):
|
||||
pass
|
||||
|
||||
def add_books_to_metadata(self, locations, metadata, booklists):
|
||||
debug_print('USBMS: adding metadata for %d books'%(len(metadata)))
|
||||
debug_print(f'USBMS: adding metadata for {len(metadata)} books')
|
||||
|
||||
metadata = iter(metadata)
|
||||
locations = tuple(locations)
|
||||
@ -418,7 +417,7 @@ class USBMS(CLI, Device):
|
||||
pass
|
||||
|
||||
def delete_books(self, paths, end_session=True):
|
||||
debug_print('USBMS: deleting %d books'%(len(paths)))
|
||||
debug_print(f'USBMS: deleting {len(paths)} books')
|
||||
for i, path in enumerate(paths):
|
||||
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
|
||||
path = self.normalize_path(path)
|
||||
@ -428,10 +427,10 @@ class USBMS(CLI, Device):
|
||||
self.delete_extra_book_files(path)
|
||||
|
||||
self.report_progress(1.0, _('Removing books from device...'))
|
||||
debug_print('USBMS: finished deleting %d books'%(len(paths)))
|
||||
debug_print(f'USBMS: finished deleting {len(paths)} books')
|
||||
|
||||
def remove_books_from_metadata(self, paths, booklists):
|
||||
debug_print('USBMS: removing metadata for %d books'%(len(paths)))
|
||||
debug_print(f'USBMS: removing metadata for {len(paths)} books')
|
||||
|
||||
for i, path in enumerate(paths):
|
||||
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
|
||||
@ -440,7 +439,7 @@ class USBMS(CLI, Device):
|
||||
if path.endswith(book.path):
|
||||
bl.remove_book(book)
|
||||
self.report_progress(1.0, _('Removing books from device metadata listing...'))
|
||||
debug_print('USBMS: finished removing metadata for %d books'%(len(paths)))
|
||||
debug_print(f'USBMS: finished removing metadata for {len(paths)} books')
|
||||
|
||||
# If you override this method and you use book._new_book, then you must
|
||||
# complete the processing before you call this method. The flag is cleared
|
||||
|
@ -320,7 +320,7 @@ def create_option_parser(args, log):
|
||||
log('\t'+title)
|
||||
except:
|
||||
log('\t'+repr(title))
|
||||
log('%d recipes available'%len(titles))
|
||||
log(f'{len(titles)} recipes available')
|
||||
raise SystemExit(0)
|
||||
|
||||
parser = option_parser()
|
||||
|
@ -163,7 +163,7 @@ class SNBOutput(OutputFormatPlugin):
|
||||
ch.set('src', ProcessFileName(tocitem.href) + '.snbc')
|
||||
ch.text = tocitem.title
|
||||
|
||||
etree.SubElement(tocHead, 'chapters').text = '%d' % len(tocBody)
|
||||
etree.SubElement(tocHead, 'chapters').text = str(len(tocBody))
|
||||
|
||||
with open(os.path.join(snbfDir, 'toc.snbf'), 'wb') as f:
|
||||
f.write(etree.tostring(tocInfoTree, pretty_print=True, encoding='utf-8'))
|
||||
|
@ -28,9 +28,9 @@ def _read_width(elem, get):
|
||||
elif typ == 'auto':
|
||||
ans = 'auto'
|
||||
elif typ == 'dxa':
|
||||
ans = '%.3gpt' % (w/20)
|
||||
ans = f'{w/20:.3g}pt'
|
||||
elif typ == 'pct':
|
||||
ans = '%.3g%%' % (w/50)
|
||||
ans = f'{w/50:.3g}%'
|
||||
return ans
|
||||
|
||||
|
||||
@ -243,7 +243,7 @@ class RowStyle(Style):
|
||||
rule, val = self.height
|
||||
if rule != 'auto':
|
||||
try:
|
||||
c['min-height' if rule == 'atLeast' else 'height'] = '%.3gpt' % (int(val)/20)
|
||||
c['min-height' if rule == 'atLeast' else 'height'] = f'{int(val)/20:.3g}pt'
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
c.update(self.convert_spacing())
|
||||
@ -282,7 +282,7 @@ class CellStyle(Style):
|
||||
if val not in (inherit, 'auto'):
|
||||
c[f'padding-{x}'] = val
|
||||
elif val is inherit and x in {'left', 'right'}:
|
||||
c[f'padding-{x}'] = '%.3gpt' % (115/20)
|
||||
c[f'padding-{x}'] = f'{115/20:.3g}pt'
|
||||
# In Word, tables are apparently rendered with some default top and
|
||||
# bottom padding irrespective of the cellMargin values. Simulate
|
||||
# that here.
|
||||
@ -353,7 +353,7 @@ class TableStyle(Style):
|
||||
for x in ('left', 'top', 'right', 'bottom'):
|
||||
val = self.float.get(f'{x}FromText', 0)
|
||||
try:
|
||||
val = '%.3gpt' % (int(val) / 20)
|
||||
val = f'{int(val)/20:.3g}pt'
|
||||
except (ValueError, TypeError):
|
||||
val = '0'
|
||||
c[f'margin-{x}'] = val
|
||||
|
@ -696,7 +696,7 @@ class Convert:
|
||||
else:
|
||||
clear = child.get('clear', None)
|
||||
if clear in {'all', 'left', 'right'}:
|
||||
br = BR(style='clear:%s'%('both' if clear == 'all' else clear))
|
||||
br = BR(style='clear:{}'.format('both' if clear == 'all' else clear))
|
||||
else:
|
||||
br = BR()
|
||||
text.add_elem(br)
|
||||
|
@ -34,7 +34,7 @@ class SpannedCell:
|
||||
def serialize(self, tr, makeelement):
|
||||
tc = makeelement(tr, 'w:tc')
|
||||
tcPr = makeelement(tc, 'w:tcPr')
|
||||
makeelement(tcPr, 'w:%sMerge' % ('h' if self.horizontal else 'v'), w_val='continue')
|
||||
makeelement(tcPr, 'w:{}Merge'.format('h' if self.horizontal else 'v'), w_val='continue')
|
||||
makeelement(tc, 'w:p')
|
||||
|
||||
def applicable_borders(self, edge):
|
||||
|
@ -320,7 +320,7 @@ class UnBinary:
|
||||
elif state == 'get value':
|
||||
if count == 0xfffe:
|
||||
if not in_censorship:
|
||||
buf.write(encode('%s"' % (oc - 1)))
|
||||
buf.write(encode(f'{oc-1}"'))
|
||||
in_censorship = False
|
||||
state = 'get attr'
|
||||
elif count > 0:
|
||||
|
@ -178,7 +178,7 @@ class LRFStream(LRFObject):
|
||||
if len(self.stream) != decomp_size:
|
||||
raise LRFParseError('Stream decompressed size is wrong!')
|
||||
if stream.read(2) != b'\x06\xF5':
|
||||
print('Warning: corrupted end-of-stream tag at %08X; skipping it'%(stream.tell()-2))
|
||||
print(f'Warning: corrupted end-of-stream tag at {stream.tell()-2:08X}; skipping it')
|
||||
self.end_stream(None, None)
|
||||
|
||||
|
||||
@ -497,10 +497,10 @@ class TextCSS:
|
||||
|
||||
fs = getattr(obj, 'fontsize', None)
|
||||
if fs is not None:
|
||||
ans += item('font-size: %fpt;'%(int(fs)/10))
|
||||
ans += item(f'font-size: {int(fs)/10:f}pt;')
|
||||
fw = getattr(obj, 'fontweight', None)
|
||||
if fw is not None:
|
||||
ans += item('font-weight: %s;'%('bold' if int(fw) >= 700 else 'normal'))
|
||||
ans += item('font-weight: {};'.format('bold' if int(fw) >= 700 else 'normal'))
|
||||
fn = getattr(obj, 'fontfacename', None)
|
||||
if fn is not None:
|
||||
fn = cls.FONT_MAP[fn]
|
||||
@ -519,10 +519,10 @@ class TextCSS:
|
||||
ans += item(f'text-align: {al};')
|
||||
lh = getattr(obj, 'linespace', None)
|
||||
if lh is not None:
|
||||
ans += item('text-align: %fpt;'%(int(lh)/10))
|
||||
ans += item(f'text-align: {int(lh)/10:f}pt;')
|
||||
pi = getattr(obj, 'parindent', None)
|
||||
if pi is not None:
|
||||
ans += item('text-indent: %fpt;'%(int(pi)/10))
|
||||
ans += item(f'text-indent: {int(pi)/10:f}pt;')
|
||||
|
||||
return ans
|
||||
|
||||
|
@ -95,7 +95,7 @@ def writeSignedWord(f, sword):
|
||||
|
||||
|
||||
def writeWords(f, *words):
|
||||
f.write(struct.pack('<%dH' % len(words), *words))
|
||||
f.write(struct.pack(f'<{len(words)}H', *words))
|
||||
|
||||
|
||||
def writeDWord(f, dword):
|
||||
@ -103,7 +103,7 @@ def writeDWord(f, dword):
|
||||
|
||||
|
||||
def writeDWords(f, *dwords):
|
||||
f.write(struct.pack('<%dI' % len(dwords), *dwords))
|
||||
f.write(struct.pack(f'<{len(dwords)}I', *dwords))
|
||||
|
||||
|
||||
def writeQWord(f, qword):
|
||||
|
@ -708,7 +708,7 @@ class Metadata:
|
||||
elif datatype == 'bool':
|
||||
res = _('Yes') if res else _('No')
|
||||
elif datatype == 'rating':
|
||||
res = '%.2g'%(res/2)
|
||||
res = f'{res/2:.2g}'
|
||||
elif datatype in ['int', 'float']:
|
||||
try:
|
||||
fmt = cmeta['display'].get('number_format', None)
|
||||
@ -748,7 +748,7 @@ class Metadata:
|
||||
elif datatype == 'datetime':
|
||||
res = format_date(res, fmeta['display'].get('date_format','dd MMM yyyy'))
|
||||
elif datatype == 'rating':
|
||||
res = '%.2g'%(res/2)
|
||||
res = f'{res/2:.2g}'
|
||||
elif key == 'size':
|
||||
res = human_readable(res)
|
||||
return (name, str(res), orig_res, fmeta)
|
||||
@ -785,7 +785,7 @@ class Metadata:
|
||||
if not self.is_null('languages'):
|
||||
fmt('Languages', ', '.join(self.languages))
|
||||
if self.rating is not None:
|
||||
fmt('Rating', ('%.2g'%(float(self.rating)/2)) if self.rating
|
||||
fmt('Rating', (f'{float(self.rating)/2:.2g}') if self.rating
|
||||
else '')
|
||||
if self.timestamp is not None:
|
||||
fmt('Timestamp', isoformat(self.timestamp))
|
||||
|
@ -500,14 +500,12 @@ class MOBIHeader: # {{{
|
||||
|
||||
if self.has_exth:
|
||||
ans += '\n\n' + str(self.exth)
|
||||
ans += '\n\nBytes after EXTH (%d bytes): %s'%(
|
||||
len(self.bytes_after_exth),
|
||||
format_bytes(self.bytes_after_exth))
|
||||
ans += f'\n\nBytes after EXTH ({len(self.bytes_after_exth)} bytes): {format_bytes(self.bytes_after_exth)}'
|
||||
|
||||
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset +
|
||||
self.fullname_length))
|
||||
|
||||
ans += '\nRecord 0 length: %d'%len(self.raw)
|
||||
ans += f'\nRecord 0 length: {len(self.raw)}'
|
||||
return ans
|
||||
# }}}
|
||||
|
||||
|
@ -98,7 +98,7 @@ class Index:
|
||||
for field in INDEX_HEADER_FIELDS:
|
||||
a('%-12s: %r'%(FIELD_NAMES.get(field, field), self.header[field]))
|
||||
ans.extend(['', ''])
|
||||
ans += ['*'*10 + ' Index Record Headers (%d records) ' % len(self.index_headers) + '*'*10]
|
||||
ans += ['*'*10 + f' Index Record Headers ({len(self.index_headers)} records) ' + '*'*10]
|
||||
for i, header in enumerate(self.index_headers):
|
||||
ans += ['*'*10 + ' Index Record %d ' % i + '*'*10]
|
||||
for field in INDEX_HEADER_FIELDS:
|
||||
@ -111,7 +111,7 @@ class Index:
|
||||
ans.extend(['', ''])
|
||||
|
||||
if self.table is not None:
|
||||
a('*'*10 + ' %d Index Entries '%len(self.table) + '*'*10)
|
||||
a('*'*10 + f' {len(self.table)} Index Entries ' + '*'*10)
|
||||
for k, v in iteritems(self.table):
|
||||
a(f'{k}: {v!r}')
|
||||
|
||||
@ -139,8 +139,7 @@ class SKELIndex(Index):
|
||||
for i, text in enumerate(self.table):
|
||||
tag_map = self.table[text]
|
||||
if set(tag_map) != {1, 6}:
|
||||
raise ValueError('SKEL Index has unknown tags: %s'%
|
||||
(set(tag_map)-{1,6}))
|
||||
raise ValueError(f'SKEL Index has unknown tags: {set(tag_map)-{1,6}}')
|
||||
self.records.append(File(
|
||||
i, # file_number
|
||||
text, # name
|
||||
@ -160,8 +159,7 @@ class SECTIndex(Index):
|
||||
for i, text in enumerate(self.table):
|
||||
tag_map = self.table[text]
|
||||
if set(tag_map) != {2, 3, 4, 6}:
|
||||
raise ValueError('Chunk Index has unknown tags: %s'%
|
||||
(set(tag_map)-{2, 3, 4, 6}))
|
||||
raise ValueError(f'Chunk Index has unknown tags: {set(tag_map)-{2,3,4,6}}')
|
||||
|
||||
toc_text = self.cncx[tag_map[2][0]]
|
||||
self.records.append(Elem(
|
||||
|
@ -197,7 +197,7 @@ class IndexHeader: # {{{
|
||||
# raise ValueError('Non null trailing bytes after IDXT')
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' Index Header (%d bytes)'%len(self.record.raw)+ '*'*20]
|
||||
ans = ['*'*20 + f' Index Header ({len(self.record.raw)} bytes)'+ '*'*20]
|
||||
a = ans.append
|
||||
|
||||
def u(w):
|
||||
@ -363,8 +363,7 @@ class IndexEntry: # {{{
|
||||
return [0, 0]
|
||||
|
||||
def __str__(self):
|
||||
ans = ['Index Entry(index=%s, length=%d)'%(
|
||||
self.index, len(self.tags))]
|
||||
ans = [f'Index Entry(index={self.index}, length={len(self.tags)})']
|
||||
for tag in self.tags:
|
||||
if tag.value is not None:
|
||||
ans.append('\t'+str(tag))
|
||||
@ -412,7 +411,7 @@ class IndexRecord: # {{{
|
||||
continue
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' Index Entries (%d entries) '%len(self.indices)+ '*'*20]
|
||||
ans = ['*'*20 + f' Index Entries ({len(self.indices)} entries) '+ '*'*20]
|
||||
a = ans.append
|
||||
|
||||
def u(w):
|
||||
@ -470,7 +469,7 @@ class CNCX: # {{{
|
||||
return self.records.get(offset)
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' cncx (%d strings) '%len(self.records)+ '*'*20]
|
||||
ans = ['*'*20 + f' cncx ({len(self.records)} strings) '+ '*'*20]
|
||||
for k, v in iteritems(self.records):
|
||||
ans.append('%10d : %s'%(k, v))
|
||||
return '\n'.join(ans)
|
||||
@ -568,7 +567,7 @@ class TBSIndexing: # {{{
|
||||
raise IndexError('Index %d not found'%idx)
|
||||
|
||||
def __str__(self):
|
||||
ans = ['*'*20 + ' TBS Indexing (%d records) '%len(self.record_indices)+ '*'*20]
|
||||
ans = ['*'*20 + f' TBS Indexing ({len(self.record_indices)} records) '+ '*'*20]
|
||||
for r, dat in iteritems(self.record_indices):
|
||||
ans += self.dump_record(r, dat)[-1]
|
||||
return '\n'.join(ans)
|
||||
|
@ -43,7 +43,7 @@ class FDST:
|
||||
return ans.append(f'{k}: {v}')
|
||||
a('Offset to sections', self.sec_off)
|
||||
a('Number of section records', self.num_sections)
|
||||
ans.append('**** %d Sections ****'% len(self.sections))
|
||||
ans.append(f'**** {len(self.sections)} Sections ****')
|
||||
for sec in self.sections:
|
||||
ans.append('Start: %20d End: %d'%sec)
|
||||
|
||||
|
@ -80,7 +80,7 @@ def remove_kindlegen_markup(parts, aid_anchor_suffix, linked_aids):
|
||||
aid = None
|
||||
replacement = ''
|
||||
if aid in linked_aids:
|
||||
replacement = ' id="%s"' % (aid + '-' + aid_anchor_suffix)
|
||||
replacement = ' id="{}"'.format(aid + '-' + aid_anchor_suffix)
|
||||
tag = within_tag_aid_position_pattern.sub(replacement, tag, 1)
|
||||
srcpieces[j] = tag
|
||||
part = ''.join(srcpieces)
|
||||
@ -150,7 +150,7 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
||||
num = int(m.group(1), 32)
|
||||
href = resource_map[num-1]
|
||||
if href:
|
||||
replacement = '"%s"'%('../'+ href)
|
||||
replacement = '"{}"'.format('../'+ href)
|
||||
tag = img_index_pattern.sub(replacement, tag, 1)
|
||||
else:
|
||||
log.warn(f'Referenced image {num} was not recognized '
|
||||
@ -168,7 +168,7 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
||||
num = int(m.group(1), 32)
|
||||
href = resource_map[num-1]
|
||||
if href:
|
||||
replacement = '"%s"'%('../'+ href)
|
||||
replacement = '"{}"'.format('../'+ href)
|
||||
tag = url_img_index_pattern.sub(replacement, tag, 1)
|
||||
else:
|
||||
log.warn(f'Referenced image {num} was not recognized as a '
|
||||
@ -182,9 +182,9 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
||||
log.warn(f'Referenced font {num} was not recognized as a '
|
||||
f'valid font in {tag}')
|
||||
else:
|
||||
replacement = '"%s"'%('../'+ href)
|
||||
replacement = '"{}"'.format('../'+ href)
|
||||
if href.endswith('.failed'):
|
||||
replacement = '"%s"'%('failed-'+href)
|
||||
replacement = '"{}"'.format('failed-'+href)
|
||||
tag = font_index_pattern.sub(replacement, tag, 1)
|
||||
|
||||
# process links to other css pieces
|
||||
@ -280,7 +280,7 @@ def insert_images_into_markup(parts, resource_map, log):
|
||||
except IndexError:
|
||||
href = ''
|
||||
if href:
|
||||
replacement = '"%s"'%('../' + href)
|
||||
replacement = '"{}"'.format('../' + href)
|
||||
tag = img_index_pattern.sub(replacement, tag, 1)
|
||||
else:
|
||||
log.warn(f'Referenced image {num} was not recognized as '
|
||||
|
@ -447,8 +447,7 @@ class Indexer: # {{{
|
||||
if self.is_periodical and self.masthead_offset is None:
|
||||
raise ValueError('Periodicals must have a masthead')
|
||||
|
||||
self.log('Generating MOBI index for a %s'%('periodical' if
|
||||
self.is_periodical else 'book'))
|
||||
self.log('Generating MOBI index for a {}'.format('periodical' if self.is_periodical else 'book'))
|
||||
self.is_flat_periodical = False
|
||||
if self.is_periodical:
|
||||
periodical_node = next(iter(oeb.toc))
|
||||
@ -530,7 +529,7 @@ class Indexer: # {{{
|
||||
|
||||
ans = header + body
|
||||
if len(ans) > 0x10000:
|
||||
raise ValueError('Too many entries (%d) in the TOC'%len(offsets))
|
||||
raise ValueError(f'Too many entries ({len(offsets)}) in the TOC')
|
||||
return ans
|
||||
# }}}
|
||||
|
||||
|
@ -82,7 +82,7 @@ def subset_all_fonts(container, font_stats, report):
|
||||
report(_('The font %s was already subset')%font_name)
|
||||
else:
|
||||
report(_('Decreased the font {0} to {1} of its original size').format(
|
||||
font_name, ('%.1f%%' % (nlen/olen * 100))))
|
||||
font_name, (f'{nlen/olen*100:.1f}%')))
|
||||
changed = True
|
||||
f.seek(0), f.truncate(), f.write(nraw)
|
||||
|
||||
|
@ -184,7 +184,7 @@ class StylizerRules:
|
||||
if size == 'smallest':
|
||||
size = 'xx-small'
|
||||
if size in FONT_SIZE_NAMES:
|
||||
style['font-size'] = '%.1frem' % (self.profile.fnames[size] / float(self.profile.fbase))
|
||||
style['font-size'] = f'{self.profile.fnames[size]/float(self.profile.fbase):.1f}rem'
|
||||
if '-epub-writing-mode' in style:
|
||||
for x in ('-webkit-writing-mode', 'writing-mode'):
|
||||
style[x] = style.get(x, style['-epub-writing-mode'])
|
||||
@ -419,7 +419,7 @@ class Stylizer:
|
||||
style['font-size'].endswith('pt'):
|
||||
style = copy.copy(style)
|
||||
size = float(style['font-size'][:-2])
|
||||
style['font-size'] = '%.2fpt' % (size * font_scale)
|
||||
style['font-size'] = f'{size*font_scale:.2f}pt'
|
||||
style = ';\n '.join(': '.join(item) for item in style.items())
|
||||
rules.append(f'{selector} {{\n {style};\n}}')
|
||||
return '\n'.join(rules)
|
||||
|
@ -350,7 +350,7 @@ class CSSFlattener:
|
||||
if value == 0 or not isinstance(value, numbers.Number):
|
||||
continue
|
||||
if value <= slineh:
|
||||
cssdict[property] = '%0.5fem' % (dlineh / fsize)
|
||||
cssdict[property] = f'{dlineh/fsize:0.5f}em'
|
||||
else:
|
||||
try:
|
||||
value = round(value / slineh) * dlineh
|
||||
@ -358,7 +358,7 @@ class CSSFlattener:
|
||||
self.oeb.logger.warning(
|
||||
'Invalid length:', value)
|
||||
value = 0.0
|
||||
cssdict[property] = '%0.5fem' % (value / fsize)
|
||||
cssdict[property] = f'{value/fsize:0.5f}em'
|
||||
|
||||
def flatten_node(self, node, stylizer, names, styles, pseudo_styles, psize, item_id, recurse=True):
|
||||
if not isinstance(node.tag, string_or_bytes) or namespace(node.tag) not in (XHTML_NS, SVG_NS):
|
||||
@ -473,12 +473,12 @@ class CSSFlattener:
|
||||
dyn_rescale = 1
|
||||
fsize = self.fmap[_sbase]
|
||||
fsize *= dyn_rescale
|
||||
cssdict['font-size'] = '%0.5fem'%(fsize/psize)
|
||||
cssdict['font-size'] = f'{fsize/psize:0.5f}em'
|
||||
psize = fsize
|
||||
elif 'font-size' in cssdict or tag == 'body':
|
||||
fsize = self.fmap[font_size]
|
||||
try:
|
||||
cssdict['font-size'] = '%0.5fem' % (fsize / psize)
|
||||
cssdict['font-size'] = f'{fsize/psize:0.5f}em'
|
||||
except ZeroDivisionError:
|
||||
cssdict['font-size'] = f'{fsize:.1f}pt'
|
||||
psize = fsize
|
||||
|
@ -234,7 +234,7 @@ class FlowSplitter:
|
||||
|
||||
self.was_split = len(self.trees) > 1
|
||||
if self.was_split:
|
||||
self.log('\tSplit into %d parts'%len(self.trees))
|
||||
self.log(f'\tSplit into {len(self.trees)} parts')
|
||||
self.commit()
|
||||
|
||||
def split_on_page_breaks(self, orig_tree):
|
||||
@ -309,10 +309,10 @@ class FlowSplitter:
|
||||
return True
|
||||
|
||||
def split_text(self, text, root, size):
|
||||
self.log.debug('\t\t\tSplitting text of length: %d'%len(text))
|
||||
self.log.debug(f'\t\t\tSplitting text of length: {len(text)}')
|
||||
rest = text.replace('\r', '')
|
||||
parts = rest.split('\n\n')
|
||||
self.log.debug('\t\t\t\tFound %d parts'%len(parts))
|
||||
self.log.debug(f'\t\t\t\tFound {len(parts)} parts')
|
||||
if max(map(len, parts)) > size:
|
||||
raise SplitError('Cannot split as file contains a <pre> tag '
|
||||
'with a very large paragraph', root)
|
||||
|
@ -179,8 +179,7 @@ class SubsetFonts:
|
||||
font['item'].unload_data_from_memory()
|
||||
|
||||
if totals[0]:
|
||||
self.log('Reduced total font size to %.1f%% of original'%
|
||||
(totals[0]/totals[1] * 100))
|
||||
self.log(f'Reduced total font size to {totals[0]/totals[1]*100:.1f}% of original')
|
||||
|
||||
def find_embedded_fonts(self):
|
||||
'''
|
||||
|
@ -28,7 +28,7 @@ def ereader_header_info(header):
|
||||
print('')
|
||||
ereader_header_info202(h0)
|
||||
else:
|
||||
raise EreaderError('Size mismatch. eReader header record size %i KB is not supported.' % len(h0))
|
||||
raise EreaderError(f'Size mismatch. eReader header record size {len(h0)} KB is not supported.')
|
||||
|
||||
|
||||
def pdb_header_info(header):
|
||||
|
@ -696,7 +696,7 @@ class Region:
|
||||
|
||||
def dump(self, f):
|
||||
f.write('############################################################\n')
|
||||
f.write('########## Region (%d columns) ###############\n'%len(self.columns))
|
||||
f.write(f'########## Region ({len(self.columns)} columns) ###############\n')
|
||||
f.write('############################################################\n\n')
|
||||
for i, col in enumerate(self.columns):
|
||||
col.dump(f, i)
|
||||
|
@ -107,7 +107,7 @@ class CMap(Stream):
|
||||
mapping = []
|
||||
for m in maps:
|
||||
meat = '\n'.join(f'{k} {v}' for k, v in iteritems(m))
|
||||
mapping.append('%d beginbfchar\n%s\nendbfchar'%(len(m), meat))
|
||||
mapping.append(f'{len(m)} beginbfchar\n{meat}\nendbfchar')
|
||||
try:
|
||||
name = name.encode('ascii').decode('ascii')
|
||||
except Exception:
|
||||
@ -177,8 +177,8 @@ class Font:
|
||||
except NoGlyphs:
|
||||
if self.used_glyphs:
|
||||
debug(
|
||||
'Subsetting of %s failed, font appears to have no glyphs for the %d characters it is used with, some text may not be rendered in the PDF' %
|
||||
(self.metrics.names.get('full_name', 'Unknown'), len(self.used_glyphs)))
|
||||
'Subsetting of {} failed, font appears to have no glyphs for the {} characters it is used with, some text may not be rendered in the PDF'
|
||||
.format(self.metrics.names.get('full_name', 'Unknown'), len(self.used_glyphs)))
|
||||
if self.is_otf:
|
||||
self.font_stream.write(self.metrics.sfnt['CFF '].raw)
|
||||
else:
|
||||
|
@ -92,24 +92,24 @@ class Page(Stream):
|
||||
|
||||
def set_opacity(self, opref):
|
||||
if opref not in self.opacities:
|
||||
self.opacities[opref] = 'Opa%d'%len(self.opacities)
|
||||
self.opacities[opref] = f'Opa{len(self.opacities)}'
|
||||
name = self.opacities[opref]
|
||||
serialize(Name(name), self)
|
||||
self.write(b' gs ')
|
||||
|
||||
def add_font(self, fontref):
|
||||
if fontref not in self.fonts:
|
||||
self.fonts[fontref] = 'F%d'%len(self.fonts)
|
||||
self.fonts[fontref] = f'F{len(self.fonts)}'
|
||||
return self.fonts[fontref]
|
||||
|
||||
def add_image(self, imgref):
|
||||
if imgref not in self.xobjects:
|
||||
self.xobjects[imgref] = 'Image%d'%len(self.xobjects)
|
||||
self.xobjects[imgref] = f'Image{len(self.xobjects)}'
|
||||
return self.xobjects[imgref]
|
||||
|
||||
def add_pattern(self, patternref):
|
||||
if patternref not in self.patterns:
|
||||
self.patterns[patternref] = 'Pat%d'%len(self.patterns)
|
||||
self.patterns[patternref] = f'Pat{len(self.patterns)}'
|
||||
return self.patterns[patternref]
|
||||
|
||||
def add_resources(self):
|
||||
|
@ -95,7 +95,7 @@ class Unidecoder:
|
||||
# Code groups within CODEPOINTS take the form 'xAB'
|
||||
if not isinstance(character, str):
|
||||
character = str(character, 'utf-8')
|
||||
return 'x%02x' % (ord(character) >> 8)
|
||||
return f'x{ord(character) >> 8:02x}'
|
||||
|
||||
def grouped_point(self, character):
|
||||
'''
|
||||
|
@ -23,10 +23,8 @@ class StoreAction(InterfaceAction):
|
||||
self.qaction.triggered.connect(self.do_search)
|
||||
self.store_menu = self.qaction.menu()
|
||||
cm = partial(self.create_menu_action, self.store_menu)
|
||||
for x, t in [('author', _('this author')), ('title', _('this title')),
|
||||
('book', _('this book'))]:
|
||||
func = getattr(self, 'search_%s'%('author_title' if x == 'book'
|
||||
else x))
|
||||
for x, t in [('author', _('this author')), ('title', _('this title')), ('book', _('this book'))]:
|
||||
func = getattr(self, 'search_{}'.format('author_title' if x == 'book' else x))
|
||||
ac = cm(x, _('Search for %s')%t, triggered=func)
|
||||
setattr(self, 'action_search_by_'+x, ac)
|
||||
self.store_menu.addSeparator()
|
||||
|
@ -474,7 +474,7 @@ class Adder(QObject):
|
||||
# detection/automerge will fail for this book.
|
||||
traceback.print_exc()
|
||||
if DEBUG:
|
||||
prints('Added', mi.title, 'to db in: %.1f' % (time.time() - st))
|
||||
prints('Added', mi.title, f'to db in: {time.time()-st:.1f}')
|
||||
|
||||
def add_formats(self, book_id, paths, mi, replace=True, is_an_add=False):
|
||||
fmap = {p.rpartition(os.path.extsep)[-1].lower():p for p in paths}
|
||||
|
@ -23,7 +23,7 @@ class DuplicatesQuestion(QDialog):
|
||||
self.setLayout(l)
|
||||
t = ngettext('Duplicate found', 'duplicates found', len(duplicates))
|
||||
if len(duplicates) > 1:
|
||||
t = '%d %s' % (len(duplicates), t)
|
||||
t = f'{len(duplicates)} {t}'
|
||||
self.setWindowTitle(t)
|
||||
self.i = i = QIcon.ic('dialog_question.png')
|
||||
self.setWindowIcon(i)
|
||||
|
@ -888,7 +888,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
||||
val = fffunc(field_obj, idfunc(idx), default_value=0) or 0
|
||||
if val == 0:
|
||||
return None
|
||||
ans = '%.1f' % (val * sz_mult)
|
||||
ans = f'{val*sz_mult:.1f}'
|
||||
return ('<0.1' if ans == '0.0' else ans)
|
||||
elif field == 'languages':
|
||||
def func(idx):
|
||||
|
@ -255,7 +255,7 @@ class GuiRunner(QObject):
|
||||
self.splash_screen.finish(main)
|
||||
timed_print('splash screen hidden')
|
||||
self.splash_screen = None
|
||||
timed_print('Started up in %.2f seconds'%(monotonic() - self.startup_time), 'with', len(db.data), 'books')
|
||||
timed_print(f'Started up in {monotonic()-self.startup_time:.2f} seconds', 'with', len(db.data), 'books')
|
||||
main.set_exception_handler()
|
||||
if len(self.args) > 1:
|
||||
main.handle_cli_args(self.args[1:])
|
||||
|
@ -273,7 +273,7 @@ def download(all_ids, tf, db, do_identify, covers, ensure_fields,
|
||||
|
||||
if abort.is_set():
|
||||
aborted = True
|
||||
log('Download complete, with %d failures'%len(failed_ids))
|
||||
log(f'Download complete, with {len(failed_ids)} failures')
|
||||
return (aborted, ans, tdir, tf, failed_ids, failed_covers, title_map,
|
||||
lm_map, all_failed)
|
||||
finally:
|
||||
|
@ -342,7 +342,7 @@ class Saver(QObject):
|
||||
|
||||
def updating_metadata_finished(self):
|
||||
if DEBUG:
|
||||
prints('Saved %d books in %.1f seconds' % (len(self.all_book_ids), time.time() - self.start_time))
|
||||
prints(f'Saved {len(self.all_book_ids)} books in {time.time()-self.start_time:.1f} seconds')
|
||||
self.pd.close()
|
||||
self.pd.deleteLater()
|
||||
self.report()
|
||||
|
@ -1968,7 +1968,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
if tag.name and tag.name[0] in stars: # char is a star or a half. Assume rating
|
||||
rnum = len(tag.name)
|
||||
if tag.name.endswith(stars[-1]):
|
||||
rnum = '%s.5' % (rnum - 1)
|
||||
rnum = f'{rnum-1}.5'
|
||||
ans.append(f'{prefix}{category}:{rnum}')
|
||||
else:
|
||||
name = tag.original_name
|
||||
|
@ -904,7 +904,7 @@ class DiffSplit(QSplitter): # {{{
|
||||
continue
|
||||
|
||||
if tag in {'replace', 'insert', 'delete'}:
|
||||
fmt = getattr(self.left, '%s_format' % ('replacereplace' if tag == 'replace' else tag))
|
||||
fmt = getattr(self.left, '{}_format'.format('replacereplace' if tag == 'replace' else tag))
|
||||
f = QTextLayout.FormatRange()
|
||||
f.start, f.length, f.format = pos, len(word), fmt
|
||||
fmts.append(f)
|
||||
|
@ -188,7 +188,7 @@ def do_print():
|
||||
if data['page_numbers']:
|
||||
args.append('--pdf-page-numbers')
|
||||
for edge in 'left top right bottom'.split():
|
||||
args.append('--pdf-page-margin-' + edge), args.append('%.1f' % (data['margin_' + edge] * 72))
|
||||
args.append('--pdf-page-margin-' + edge), args.append('{:.1f}'.format(data['margin_' + edge] * 72))
|
||||
from calibre.ebooks.conversion.cli import main
|
||||
main(args)
|
||||
|
||||
|
@ -62,16 +62,16 @@ def serialize_string(key, val):
|
||||
val = str(val).encode('utf-8')
|
||||
if len(val) > 2**16 - 1:
|
||||
raise ValueError(f'{key} is too long')
|
||||
return struct.pack('=B%dsH%ds' % (len(key), len(val)), len(key), key, len(val), val)
|
||||
return struct.pack(f'=B{len(key)}sH{len(val)}s', len(key), key, len(val), val)
|
||||
|
||||
|
||||
def serialize_file_types(file_types):
|
||||
key = b'FILE_TYPES'
|
||||
buf = [struct.pack('=B%dsH' % len(key), len(key), key, len(file_types))]
|
||||
buf = [struct.pack(f'=B{len(key)}sH', len(key), key, len(file_types))]
|
||||
|
||||
def add(x):
|
||||
x = x.encode('utf-8').replace(b'\0', b'')
|
||||
buf.append(struct.pack('=H%ds' % len(x), len(x), x))
|
||||
buf.append(struct.pack(f'=H{len(x)}s', len(x), x))
|
||||
for name, extensions in file_types:
|
||||
add(name or _('Files'))
|
||||
if isinstance(extensions, string_or_bytes):
|
||||
|
@ -315,7 +315,7 @@ class BIBTEX(CatalogPlugin):
|
||||
log(" --search='{}'".format(opts_dict['search_text']))
|
||||
|
||||
if opts_dict['ids']:
|
||||
log(' Book count: %d' % len(opts_dict['ids']))
|
||||
log(' Book count: {}'.format(len(opts_dict['ids'])))
|
||||
if opts_dict['search_text']:
|
||||
log(' (--search ignored when a subset of the database is specified)')
|
||||
|
||||
|
@ -75,7 +75,7 @@ class CSV_XML(CatalogPlugin):
|
||||
log(" --search='{}'".format(opts_dict['search_text']))
|
||||
|
||||
if opts_dict['ids']:
|
||||
log(' Book count: %d' % len(opts_dict['ids']))
|
||||
log(' Book count: {}'.format(len(opts_dict['ids'])))
|
||||
if opts_dict['search_text']:
|
||||
log(' (--search ignored when a subset of the database is specified)')
|
||||
|
||||
@ -153,7 +153,7 @@ class CSV_XML(CatalogPlugin):
|
||||
item = item.replace('\r\n', ' ')
|
||||
item = item.replace('\n', ' ')
|
||||
elif fm.get(field, {}).get('datatype', None) == 'rating' and item:
|
||||
item = '%.2g' % (item / 2)
|
||||
item = f'{item/2:.2g}'
|
||||
|
||||
# Convert HTML to markdown text
|
||||
if isinstance(item, str):
|
||||
@ -197,7 +197,7 @@ class CSV_XML(CatalogPlugin):
|
||||
if not isinstance(val, (bytes, str)):
|
||||
if (fm.get(field, {}).get('datatype', None) ==
|
||||
'rating' and val):
|
||||
val = '%.2g' % (val / 2)
|
||||
val = f'{val/2:.2g}'
|
||||
val = str(val)
|
||||
item = getattr(E, field)(val)
|
||||
record.append(item)
|
||||
|
@ -300,7 +300,7 @@ class EPUB_MOBI(CatalogPlugin):
|
||||
|
||||
opts_dict = vars(opts)
|
||||
if opts_dict['ids']:
|
||||
build_log.append(' book count: %d' % len(opts_dict['ids']))
|
||||
build_log.append(' book count: {}'.format(len(opts_dict['ids'])))
|
||||
|
||||
sections_list = []
|
||||
if opts.generate_authors:
|
||||
|
@ -834,11 +834,11 @@ class CatalogBuilder:
|
||||
self.individual_authors = list(individual_authors)
|
||||
|
||||
if self.DEBUG and self.opts.verbose:
|
||||
self.opts.log.info('\nfetch_books_by_author(): %d unique authors' % len(unique_authors))
|
||||
self.opts.log.info(f'\nfetch_books_by_author(): {len(unique_authors)} unique authors')
|
||||
for author in unique_authors:
|
||||
self.opts.log.info((' %-50s %-25s %2d' % (author[0][0:45], author[1][0:20],
|
||||
author[2])).encode('utf-8'))
|
||||
self.opts.log.info('\nfetch_books_by_author(): %d individual authors' % len(individual_authors))
|
||||
self.opts.log.info(f'\nfetch_books_by_author(): {len(individual_authors)} individual authors')
|
||||
for author in sorted(individual_authors):
|
||||
self.opts.log.info(f'{author}')
|
||||
|
||||
@ -865,7 +865,7 @@ class CatalogBuilder:
|
||||
self.books_by_title = sorted(self.books_to_catalog, key=lambda x: sort_key(x['title_sort'].upper()))
|
||||
|
||||
if self.DEBUG and self.opts.verbose:
|
||||
self.opts.log.info('fetch_books_by_title(): %d books' % len(self.books_by_title))
|
||||
self.opts.log.info(f'fetch_books_by_title(): {len(self.books_by_title)} books')
|
||||
self.opts.log.info(' %-40s %-40s' % ('title', 'title_sort'))
|
||||
for title in self.books_by_title:
|
||||
self.opts.log.info((' %-40s %-40s' % (title['title'][0:40],
|
||||
@ -1202,7 +1202,7 @@ class CatalogBuilder:
|
||||
else:
|
||||
yield tag
|
||||
|
||||
ans = '%s%d %s:\n' % (' ' * indent, len(tags), header)
|
||||
ans = '{}{} {}:\n'.format(' '*indent, len(tags), header)
|
||||
ans += ' ' * (indent + 1)
|
||||
out_str = ''
|
||||
sorted_tags = sorted(tags, key=sort_key)
|
||||
@ -2119,12 +2119,11 @@ class CatalogBuilder:
|
||||
|
||||
if self.opts.verbose:
|
||||
if len(genre_list):
|
||||
self.opts.log.info(' Genre summary: %d active genre tags used in generating catalog with %d titles' %
|
||||
(len(genre_list), len(self.books_to_catalog)))
|
||||
self.opts.log.info(f' Genre summary: {len(genre_list)} active genre tags used in generating catalog with {len(self.books_to_catalog)} titles')
|
||||
|
||||
for genre in genre_list:
|
||||
for key in genre:
|
||||
self.opts.log.info(' %s: %d %s' % (self.get_friendly_genre_tag(key),
|
||||
self.opts.log.info(' {}: {} {}'.format(self.get_friendly_genre_tag(key),
|
||||
len(genre[key]),
|
||||
'titles' if len(genre[key]) > 1 else 'title'))
|
||||
|
||||
@ -3036,7 +3035,7 @@ class CatalogBuilder:
|
||||
Outputs:
|
||||
ncx_soup (file): updated
|
||||
'''
|
||||
section_header = '%s [%d]' % (tocTitle, len(self.books_by_description))
|
||||
section_header = f'{tocTitle} [{len(self.books_by_description)}]'
|
||||
if self.generate_for_kindle_mobi:
|
||||
section_header = tocTitle
|
||||
navPointTag = self.generate_ncx_section_header('bydescription-ID', section_header, 'content/book_%d.html' % int(self.books_by_description[0]['id']))
|
||||
@ -3115,7 +3114,7 @@ class CatalogBuilder:
|
||||
series_by_letter.append(current_series_list)
|
||||
|
||||
# --- Construct the 'Books By Series' section ---
|
||||
section_header = '%s [%d]' % (tocTitle, len(self.all_series))
|
||||
section_header = f'{tocTitle} [{len(self.all_series)}]'
|
||||
if self.generate_for_kindle_mobi:
|
||||
section_header = tocTitle
|
||||
output = 'BySeries'
|
||||
@ -3197,7 +3196,7 @@ class CatalogBuilder:
|
||||
books_by_letter.append(current_book_list)
|
||||
|
||||
# --- Construct the 'Books By Title' section ---
|
||||
section_header = '%s [%d]' % (tocTitle, len(self.books_by_title))
|
||||
section_header = f'{tocTitle} [{len(self.books_by_title)}]'
|
||||
if self.generate_for_kindle_mobi:
|
||||
section_header = tocTitle
|
||||
output = 'ByAlphaTitle'
|
||||
@ -3285,7 +3284,7 @@ class CatalogBuilder:
|
||||
# --- Construct the 'Books By Author' *section* ---
|
||||
file_ID = f'{tocTitle.lower()}'
|
||||
file_ID = file_ID.replace(' ', '')
|
||||
section_header = '%s [%d]' % (tocTitle, len(self.individual_authors))
|
||||
section_header = f'{tocTitle} [{len(self.individual_authors)}]'
|
||||
if self.generate_for_kindle_mobi:
|
||||
section_header = tocTitle
|
||||
navPointTag = self.generate_ncx_section_header(f'{file_ID}-ID', section_header, f'{HTML_file}#section_start')
|
||||
@ -3563,7 +3562,7 @@ class CatalogBuilder:
|
||||
# --- Construct the 'Books By Genre' *section* ---
|
||||
file_ID = f'{tocTitle.lower()}'
|
||||
file_ID = file_ID.replace(' ', '')
|
||||
section_header = '%s [%d]' % (tocTitle, len(self.genres))
|
||||
section_header = f'{tocTitle} [{len(self.genres)}]'
|
||||
if self.generate_for_kindle_mobi:
|
||||
section_header = tocTitle
|
||||
navPointTag = self.generate_ncx_section_header(f'{file_ID}-ID', section_header, 'content/Genre_{}.html#section_start'.format(self.genres[0]['tag']))
|
||||
|
@ -798,7 +798,7 @@ class PostInstall:
|
||||
os.rmdir(config_dir)
|
||||
|
||||
if warn is None and self.warnings:
|
||||
self.info('\n\nThere were %d warnings\n'%len(self.warnings))
|
||||
self.info(f'\n\nThere were {len(self.warnings)} warnings\n')
|
||||
for args, kwargs in self.warnings:
|
||||
self.info('*', *args, **kwargs)
|
||||
print()
|
||||
|
@ -79,4 +79,4 @@ def debug_print(*args, **kw):
|
||||
# Check if debugging is enabled
|
||||
if is_debugging():
|
||||
# Print the elapsed time and the provided arguments if debugging is enabled
|
||||
prints('DEBUG: %6.1f' % (time.monotonic() - base_time), *args, **kw)
|
||||
prints(f'DEBUG: {time.monotonic()-base_time:6.1f}', *args, **kw)
|
||||
|
@ -271,7 +271,7 @@ class AuthController:
|
||||
def do_http_auth(self, data, endpoint):
|
||||
ban_key = data.remote_addr, data.forwarded_for
|
||||
if self.ban_list.is_banned(ban_key):
|
||||
raise HTTPForbidden('Too many login attempts', log='Too many login attempts from: %s' % (ban_key if data.forwarded_for else data.remote_addr))
|
||||
raise HTTPForbidden('Too many login attempts', log=f'Too many login attempts from: {ban_key if data.forwarded_for else data.remote_addr}')
|
||||
auth = data.inheaders.get('Authorization')
|
||||
nonce_is_stale = False
|
||||
log_msg = None
|
||||
|
@ -412,7 +412,7 @@ def auto_reload(log, dirs=frozenset(), cmd=None, add_default_dirs=True, listen_o
|
||||
cmd.insert(1, 'calibre-server')
|
||||
dirs = find_dirs_to_watch(fpath, dirs, add_default_dirs)
|
||||
log('Auto-restarting server on changes press Ctrl-C to quit')
|
||||
log('Watching %d directory trees for changes' % len(dirs))
|
||||
log(f'Watching {len(dirs)} directory trees for changes')
|
||||
with ReloadServer(listen_on) as server, Worker(cmd, log, server) as worker:
|
||||
w = Watcher(dirs, worker, log)
|
||||
worker.wakeup = w.wakeup
|
||||
|
@ -773,7 +773,7 @@ class ServerLoop:
|
||||
for pool in (self.plugin_pool, self.pool):
|
||||
pool.stop(wait_till)
|
||||
if pool.workers:
|
||||
self.log.warn('Failed to shutdown %d workers in %s cleanly' % (len(pool.workers), pool.__class__.__name__))
|
||||
self.log.warn(f'Failed to shutdown {len(pool.workers)} workers in {pool.__class__.__name__} cleanly')
|
||||
self.jobs_manager.wait_for_shutdown(wait_till)
|
||||
|
||||
|
||||
|
@ -216,7 +216,7 @@ def manage_users_cli(path=None, args=()):
|
||||
prints('%d)' % (i + 1), choice)
|
||||
print()
|
||||
while True:
|
||||
prompt = question + ' [1-%d]:' % len(choices)
|
||||
prompt = question + f' [1-{len(choices)}]:'
|
||||
if default is not None:
|
||||
prompt = question + ' [1-%d %s: %d]' % (
|
||||
len(choices), _('default'), default + 1)
|
||||
|
@ -462,7 +462,7 @@ def process_category_node(
|
||||
# reflect that in the node structure as well.
|
||||
node_data = tag_map.get(id(tag), None)
|
||||
if node_data is None:
|
||||
node_id = 'n%d' % len(tag_map)
|
||||
node_id = f'n{len(tag_map)}'
|
||||
node_data = items[node_id] = category_item_as_json(tag, clear_rating=clear_rating)
|
||||
tag_map[id(tag)] = (node_id, node_data)
|
||||
node_to_tag_map[node_id] = tag
|
||||
|
@ -168,7 +168,7 @@ class Route:
|
||||
if argspec.args[2:len(self.names)+2] != self.names:
|
||||
raise route_error("Function's argument names do not match the variable names in the route")
|
||||
if not frozenset(self.type_checkers).issubset(frozenset(self.names)):
|
||||
raise route_error('There exist type checkers that do not correspond to route variables: %r' % (set(self.type_checkers) - set(self.names)))
|
||||
raise route_error(f'There exist type checkers that do not correspond to route variables: {set(self.type_checkers)-set(self.names)!r}')
|
||||
self.min_size = found_optional_part if found_optional_part is not False else len(matchers)
|
||||
self.max_size = sys.maxsize if self.soak_up_extra else len(matchers)
|
||||
|
||||
|
@ -376,14 +376,14 @@ class TestHTTP(BaseTest):
|
||||
r = conn.getresponse()
|
||||
self.ae(r.status, http_client.PARTIAL_CONTENT)
|
||||
self.ae(str(r.getheader('Accept-Ranges')), 'bytes')
|
||||
self.ae(str(r.getheader('Content-Range')), 'bytes 2-25/%d' % len(fdata))
|
||||
self.ae(str(r.getheader('Content-Range')), f'bytes 2-25/{len(fdata)}')
|
||||
self.ae(int(r.getheader('Content-Length')), 24)
|
||||
self.ae(r.read(), fdata[2:26])
|
||||
|
||||
conn.request('GET', '/test', headers={'Range':'bytes=100000-'})
|
||||
r = conn.getresponse()
|
||||
self.ae(r.status, http_client.REQUESTED_RANGE_NOT_SATISFIABLE)
|
||||
self.ae(str(r.getheader('Content-Range')), 'bytes */%d' % len(fdata))
|
||||
self.ae(str(r.getheader('Content-Range')), f'bytes */{len(fdata)}')
|
||||
|
||||
conn.request('GET', '/test', headers={'Range':'bytes=25-50', 'If-Range':etag})
|
||||
r = conn.getresponse()
|
||||
|
@ -41,7 +41,7 @@ class Index(list):
|
||||
offsets = b''.join(pack(b'>L', x)[1:] for x in offsets)
|
||||
else:
|
||||
fmt = {1:'B', 2:'H', 4:'L'}[offsize]
|
||||
offsets = pack(('>%d%s'%(len(offsets), fmt)).encode('ascii'),
|
||||
offsets = pack(f'>{len(offsets)}{fmt}'.encode('ascii'),
|
||||
*offsets)
|
||||
|
||||
self.raw = prefix + offsets + obj_data
|
||||
@ -102,7 +102,7 @@ class Charsets(list):
|
||||
def compile(self):
|
||||
ans = pack(b'>B', 0)
|
||||
sids = [self.strings(x) for x in self]
|
||||
ans += pack(('>%dH'%len(self)).encode('ascii'), *sids)
|
||||
ans += pack(f'>{len(self)}H'.encode('ascii'), *sids)
|
||||
self.raw = ans
|
||||
return ans
|
||||
|
||||
|
@ -165,8 +165,7 @@ def test_roundtrip(ff=None):
|
||||
if data[:12] != rd[:12]:
|
||||
raise ValueError('Roundtripping failed, font header not the same')
|
||||
if len(data) != len(rd):
|
||||
raise ValueError('Roundtripping failed, size different (%d vs. %d)'%
|
||||
(len(data), len(rd)))
|
||||
raise ValueError(f'Roundtripping failed, size different ({len(data)} vs. {len(rd)})')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -234,7 +234,7 @@ def print_stats(old_stats, new_stats):
|
||||
np = nsz/new_total * 100
|
||||
suffix = ' | same size'
|
||||
if nsz != osz:
|
||||
suffix = ' | reduced to %.1f %%'%(nsz/osz * 100)
|
||||
suffix = f' | reduced to {nsz/osz*100:.1f} %'
|
||||
prints('%4s'%table, ' ', '%10s'%osz, ' ', f'{op:5.1f} %', ' ',
|
||||
'%10s'%nsz, ' ', f'{np:5.1f} %', suffix)
|
||||
prints('='*80)
|
||||
@ -289,7 +289,7 @@ def main(args):
|
||||
reduced = (len(sf)/len(orig)) * 100
|
||||
|
||||
def sz(x):
|
||||
return '%gKB'%(len(x)/1024.)
|
||||
return f'{len(x)/1024.0:g}KB'
|
||||
print_stats(old_stats, new_stats)
|
||||
prints('Original size:', sz(orig), 'Subset size:', sz(sf), f'Reduced to: {reduced:g}%')
|
||||
prints(f'Subsetting took {taken:g} seconds')
|
||||
@ -381,7 +381,7 @@ def all():
|
||||
print(name, path, err)
|
||||
print()
|
||||
|
||||
print('Average reduction to: %.1f%%'%(sum(averages)/len(averages)))
|
||||
print(f'Average reduction to: {sum(averages)/len(averages):.1f}%')
|
||||
print('Total:', total, 'Unsupported:', len(unsupported), 'Failed:',
|
||||
len(failed), 'Warnings:', len(warnings))
|
||||
|
||||
|
@ -2172,7 +2172,7 @@ returns the empty string.
|
||||
except:
|
||||
return ''
|
||||
i = d1 - d2
|
||||
return '%.1f'%(i.days + (i.seconds/(24.0*60.0*60.0)))
|
||||
return f'{i.days+(i.seconds/(24.0*60.0*60.0)):.1f}'
|
||||
|
||||
|
||||
class BuiltinDateArithmetic(BuiltinFormatterFunction):
|
||||
|
@ -762,7 +762,7 @@ class SMTP:
|
||||
# Hmmm? what's this? -ddm
|
||||
# self.esmtp_features['7bit']=""
|
||||
if self.has_extn('size'):
|
||||
esmtp_opts.append('size=%d' % len(msg))
|
||||
esmtp_opts.append(f'size={len(msg)}')
|
||||
for option in mail_options:
|
||||
esmtp_opts.append(option)
|
||||
|
||||
@ -909,7 +909,7 @@ if __name__ == '__main__':
|
||||
if not line:
|
||||
break
|
||||
msg = msg + line
|
||||
print('Message length is %d' % len(msg))
|
||||
print(f'Message length is {len(msg)}')
|
||||
|
||||
server = SMTP('localhost')
|
||||
server.set_debuglevel(1)
|
||||
|
@ -183,7 +183,7 @@ class Register(Thread):
|
||||
prints('Registering with default programs...')
|
||||
register()
|
||||
if DEBUG:
|
||||
prints('Registered with default programs in %.1f seconds' % (time.monotonic() - st))
|
||||
prints(f'Registered with default programs in {time.monotonic()-st:.1f} seconds')
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
@ -1760,7 +1760,7 @@ class BasicNewsRecipe(Recipe):
|
||||
url = url.decode('utf-8')
|
||||
if url.startswith('feed://'):
|
||||
url = 'http'+url[4:]
|
||||
self.report_progress(0, _('Fetching feed')+' %s...'%(title if title else url))
|
||||
self.report_progress(0, _('Fetching feed')+f' {title if title else url}...')
|
||||
try:
|
||||
purl = urlparse(url, allow_fragments=False)
|
||||
if purl.username or purl.password:
|
||||
@ -1780,7 +1780,7 @@ class BasicNewsRecipe(Recipe):
|
||||
))
|
||||
except Exception as err:
|
||||
feed = Feed()
|
||||
msg = 'Failed feed: %s'%(title if title else url)
|
||||
msg = f'Failed feed: {title if title else url}'
|
||||
feed.populate_from_preparsed_feed(msg, [])
|
||||
feed.description = as_unicode(err)
|
||||
parsed_feeds.append(feed)
|
||||
|
@ -94,7 +94,7 @@ class NewsCategory(NewsTreeItem):
|
||||
|
||||
def data(self, role):
|
||||
if role == Qt.ItemDataRole.DisplayRole:
|
||||
return (self.cdata + ' [%d]'%len(self.children))
|
||||
return (self.cdata + f' [{len(self.children)}]')
|
||||
elif role == Qt.ItemDataRole.FontRole:
|
||||
return self.bold_font
|
||||
elif role == Qt.ItemDataRole.ForegroundRole and self.category == _('Scheduled'):
|
||||
|
Loading…
x
Reference in New Issue
Block a user