mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
Merge branch 'remaining-percent-format' of https://github.com/un-pogaz/calibre
This commit is contained in:
commit
dfe88a675e
@ -48,7 +48,6 @@ unfixable = ['PIE794', 'ISC001']
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"recipes/*" = ['UP']
|
||||
"setup/changelog.py" = ['ISC001']
|
||||
"src/calibre/*" = ['UP031']
|
||||
"src/calibre/ebooks/unihandecode/*codepoints.py" = ['E501']
|
||||
"src/calibre/ebooks/metadata/sources/*" = ['UP']
|
||||
"src/calibre/gui2/store/stores/*" = ['UP']
|
||||
|
@ -86,7 +86,7 @@ def _print_check_library_results(checker, check, as_csv=False, out=sys.stdout):
|
||||
else:
|
||||
print(check[1], file=out)
|
||||
for i in list:
|
||||
print(' %-40.40s - %-40.40s' % (i[0], i[1]), file=out)
|
||||
print(f' {i[0]:<40.40} -{i[1]:<40.40}', file=out)
|
||||
|
||||
|
||||
def main(opts, args, dbctx):
|
||||
|
@ -231,7 +231,7 @@ def do_list(
|
||||
|
||||
widths = list(base_widths)
|
||||
titles = map(
|
||||
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths,
|
||||
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, # noqa: UP031
|
||||
[template_title if v == 'template' else v for v in fields]
|
||||
)
|
||||
with ColoredStream(sys.stdout, fg='green'):
|
||||
@ -251,7 +251,7 @@ def do_list(
|
||||
ft = text[i][l] if l < len(text[i]) else ''
|
||||
stdout.write(ft.encode('utf-8'))
|
||||
if i < len(text) - 1:
|
||||
filler = ('%*s' % (widths[i] - str_width(ft) - 1, ''))
|
||||
filler = ' '*(widths[i] - str_width(ft) - 1)
|
||||
stdout.write((filler + separator).encode('utf-8'))
|
||||
stdout.write(linesep)
|
||||
|
||||
|
@ -99,7 +99,7 @@ def do_list(fields, data, opts):
|
||||
|
||||
widths = list(base_widths)
|
||||
titles = map(
|
||||
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, fields
|
||||
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, fields # noqa: UP031
|
||||
)
|
||||
with ColoredStream(sys.stdout, fg='green'):
|
||||
prints(''.join(titles))
|
||||
@ -115,7 +115,7 @@ def do_list(fields, data, opts):
|
||||
for l in range(lines):
|
||||
for i, field in enumerate(text):
|
||||
ft = text[i][l] if l < len(text[i]) else ''
|
||||
filler = '%*s' % (widths[i] - len(ft) - 1, '')
|
||||
filler = ' '*(widths[i] - len(ft) - 1)
|
||||
print(ft.encode('utf-8') + filler.encode('utf-8'), end=separator)
|
||||
print()
|
||||
|
||||
|
@ -118,9 +118,9 @@ def get_fields(dbctx):
|
||||
def main(opts, args, dbctx):
|
||||
if opts.list_fields:
|
||||
ans = get_fields(dbctx)
|
||||
prints('%-40s' % _('Title'), _('Field name'), '\n')
|
||||
prints('{:<40}'.format(_('Title')), _('Field name'), '\n')
|
||||
for key, m in ans:
|
||||
prints('%-40s' % m['name'], key)
|
||||
prints('{:<40}'.format(m['name']), key)
|
||||
return 0
|
||||
|
||||
def verify_int(x):
|
||||
|
@ -244,7 +244,7 @@ def main():
|
||||
where = ('Memory', 'Card A', 'Card B')
|
||||
print('Filesystem\tSize \tUsed \tAvail \tUse%')
|
||||
for i in range(3):
|
||||
print('%-10s\t%s\t%s\t%s\t%s'%(where[i], human_readable(total[i]), human_readable(total[i]-free[i]), human_readable(free[i]),
|
||||
print('{:<10}\t{}\t{}\t{}\t{}'.format(where[i], human_readable(total[i]), human_readable(total[i]-free[i]), human_readable(free[i]),
|
||||
str(0 if total[i]==0 else int(100*(total[i]-free[i])/(total[i]*1.)))+'%'))
|
||||
elif command == 'eject':
|
||||
dev.eject()
|
||||
|
@ -168,7 +168,7 @@ class Bookmark: # {{{
|
||||
ans = ['Kobo bookmark:']
|
||||
|
||||
def fmt(x, y):
|
||||
ans.append('%-20s: %s'%(str(x), str(y)))
|
||||
ans.append(f'{x:<20}: {y}')
|
||||
|
||||
if self.contentId:
|
||||
fmt('ContentID', self.contentId)
|
||||
|
@ -101,7 +101,7 @@ class Book(Book_):
|
||||
ans = ['Kobo metadata:']
|
||||
|
||||
def fmt(x, y):
|
||||
ans.append('%-20s: %s'%(str(x), str(y)))
|
||||
ans.append(f'{x:<20}: {y}')
|
||||
|
||||
if self.contentID:
|
||||
fmt('Content ID', self.contentID)
|
||||
|
@ -484,7 +484,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
except:
|
||||
today = time.localtime()
|
||||
date = (today[0], today[1], today[2])
|
||||
template = '{title}_%d-%d-%d' % date
|
||||
template = f'{{title}}_{date[0]}-{date[1]}-{date[2]}'
|
||||
use_subdirs = self.SUPPORTS_SUB_DIRS and settings.use_subdirs
|
||||
|
||||
from calibre.library.save_to_disk import config, get_components
|
||||
|
@ -91,7 +91,7 @@ def create_upload_path(mdata, fname, template, sanitize,
|
||||
except:
|
||||
today = time.localtime()
|
||||
date = (today[0], today[1], today[2])
|
||||
template = '{title}_%d-%d-%d' % date
|
||||
template = f'{{title}}_{date[0]}-{date[1]}-{date[2]}'
|
||||
|
||||
fname = sanitize(fname)
|
||||
ext = path_type.splitext(fname)[1]
|
||||
|
@ -192,10 +192,8 @@ class RTFInput(InputFormatPlugin):
|
||||
return name
|
||||
|
||||
def write_inline_css(self, ic, border_styles):
|
||||
font_size_classes = ['span.fs%d { font-size: %spt }'%(i, x) for i, x in
|
||||
enumerate(ic.font_sizes)]
|
||||
color_classes = ['span.col%d { color: %s }'%(i, x) for i, x in
|
||||
enumerate(ic.colors) if x != 'false']
|
||||
font_size_classes = [f'span.fs{i} {{ font-size: {x}pt }}' for i, x in enumerate(ic.font_sizes)]
|
||||
color_classes = [f'span.col{i} {{ color: {x} }}' for i, x in enumerate(ic.colors) if x != 'false']
|
||||
css = textwrap.dedent('''
|
||||
span.none {
|
||||
text-decoration: none; font-weight: normal;
|
||||
|
@ -265,16 +265,16 @@ class Dehyphenator:
|
||||
self.format = format
|
||||
if format == 'html':
|
||||
intextmatch = re.compile((
|
||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?=<)(?P<wraptags>(</span>)?'
|
||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?=<)(?P<wraptags>(</span>)?' # noqa: UP031
|
||||
r'\s*(</[iubp]>\s*){1,2}(?P<up2threeblanks><(p|div)[^>]*>\s*(<p[^>]*>\s*</p>\s*)'
|
||||
r'?</(p|div)>\s+){0,3}\s*(<[iubp][^>]*>\s*){1,2}(<span[^>]*>)?)\s*(?P<secondpart>[\w\d]+)') % length)
|
||||
elif format == 'pdf':
|
||||
intextmatch = re.compile((
|
||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?P<wraptags><p>|'
|
||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?P<wraptags><p>|' # noqa: UP031
|
||||
r'</[iub]>\s*<p>\s*<[iub]>)\s*(?P<secondpart>[\w\d]+)')% length)
|
||||
elif format == 'txt':
|
||||
intextmatch = re.compile(
|
||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)( |\t)*(?P<wraptags>(\n( |\t)*)+)(?P<secondpart>[\w\d]+)'% length)
|
||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)( |\t)*(?P<wraptags>(\n( |\t)*)+)(?P<secondpart>[\w\d]+)'% length) # noqa: UP031
|
||||
elif format == 'individual_words':
|
||||
intextmatch = re.compile(
|
||||
r'(?!<)(?P<firstpart>[^\W\-]+)(-|‐)\s*(?P<secondpart>\w+)(?![^<]*?>)', re.UNICODE)
|
||||
@ -529,11 +529,11 @@ class HTMLPreProcessor:
|
||||
# print('The pdf line length returned is ' + str(length))
|
||||
# unwrap em/en dashes
|
||||
end_rules.append((re.compile(
|
||||
r'(?<=.{%i}[–—])\s*<p>\s*(?=[\[a-z\d])' % length), lambda match: ''))
|
||||
r'(?<=.{%i}[–—])\s*<p>\s*(?=[\[a-z\d])' % length), lambda match: '')) # noqa: UP031
|
||||
end_rules.append(
|
||||
# Un wrap using punctuation
|
||||
(re.compile((
|
||||
r'(?<=.{%i}([a-zäëïöüàèìòùáćéíĺóŕńśúýâêîôûçąężıãõñæøþðßěľščťžňďřů,:)\\IAß]'
|
||||
r'(?<=.{%i}([a-zäëïöüàèìòùáćéíĺóŕńśúýâêîôûçąężıãõñæøþðßěľščťžňďřů,:)\\IAß]' # noqa: UP031
|
||||
r'|(?<!\&\w{4});))\s*(?P<ital></(i|b|u)>)?\s*(</p>\s*<p>\s*)+\s*(?=(<(i|b|u)>)?'
|
||||
r'\s*[\w\d$(])') % length, re.UNICODE), wrap_lines),
|
||||
)
|
||||
|
@ -209,7 +209,7 @@ class DOCX:
|
||||
E = ElementMaker(namespace=self.namespace.namespaces['ep'], nsmap={None:self.namespace.namespaces['ep']})
|
||||
props = E.Properties(
|
||||
E.Application(__appname__),
|
||||
E.AppVersion('%02d.%04d' % numeric_version[:2]),
|
||||
E.AppVersion(f'{numeric_version[0]:02}.{numeric_version[1]:04}'),
|
||||
E.DocSecurity('0'),
|
||||
E.HyperlinksChanged('false'),
|
||||
E.LinksUpToDate('true'),
|
||||
|
@ -769,8 +769,7 @@ class StylesManager:
|
||||
text_style.seq = i
|
||||
self.descendant_text_styles = sorted(descendant_style_map, key=attrgetter('seq'))
|
||||
|
||||
self.log.debug('%d Text Styles %d Combined styles' % tuple(map(len, (
|
||||
self.descendant_text_styles, self.combined_styles))))
|
||||
self.log.debug(f'{len(self.descendant_text_styles)} Text Styles {len(self.combined_styles)} Combined styles')
|
||||
|
||||
self.primary_heading_style = None
|
||||
if heading_styles:
|
||||
|
@ -765,7 +765,7 @@ class Metadata:
|
||||
ans = []
|
||||
|
||||
def fmt(x, y):
|
||||
ans.append('%-20s: %s'%(str(x), str(y)))
|
||||
ans.append(f'{x:<20}: {y}')
|
||||
|
||||
fmt('Title', self.title)
|
||||
if self.title_sort:
|
||||
|
@ -286,7 +286,7 @@ class MetadataUpdater:
|
||||
s, src = src[:length],src[length:]
|
||||
hexa = ' '.join([f'{ord(x):02X}' for x in s])
|
||||
s = s.translate(FILTER)
|
||||
result += '%04X %-*s %s\n' % (N, length*3, hexa, s)
|
||||
result += '%04X %-*s %s\n' % (N, length*3, hexa, s) # noqa: UP031
|
||||
N+=length
|
||||
print(result)
|
||||
|
||||
@ -308,7 +308,7 @@ class MetadataUpdater:
|
||||
def dump_pdbrecords(self):
|
||||
# Diagnostic
|
||||
print('MetadataUpdater.dump_pdbrecords()')
|
||||
print('%10s %10s %10s' % ('offset','flags','val'))
|
||||
print(f"{'offset':>10} {'flags':>10} {'val':>10}")
|
||||
for i in range(len(self.pdbrecords)):
|
||||
pdbrecord = self.pdbrecords[i]
|
||||
print(f'{pdbrecord[0]:10X} {pdbrecord[1]:10X} {pdbrecord[2]:10X}')
|
||||
|
@ -161,7 +161,7 @@ class MetadataUpdater:
|
||||
s, src = src[:length],src[length:]
|
||||
hexa = ' '.join([f'{ord(x):02X}' for x in s])
|
||||
s = s.translate(FILTER)
|
||||
result += '%04X %-*s %s\n' % (N, length*3, hexa, s)
|
||||
result += '%04X %-*s %s\n' % (N, length*3, hexa, s) # noqa: UP031
|
||||
N+=length
|
||||
print(result)
|
||||
|
||||
|
@ -96,13 +96,13 @@ class Index:
|
||||
a = ans.append
|
||||
if self.header is not None:
|
||||
for field in INDEX_HEADER_FIELDS:
|
||||
a('%-12s: %r'%(FIELD_NAMES.get(field, field), self.header[field]))
|
||||
a(f'{FIELD_NAMES.get(field, field):<12}: {self.header[field]!r}')
|
||||
ans.extend(['', ''])
|
||||
ans += ['*'*10 + f' Index Record Headers ({len(self.index_headers)} records) ' + '*'*10]
|
||||
for i, header in enumerate(self.index_headers):
|
||||
ans += ['*'*10 + f' Index Record {i} ' + '*'*10]
|
||||
for field in INDEX_HEADER_FIELDS:
|
||||
a('%-12s: %r'%(FIELD_NAMES.get(field, field), header[field]))
|
||||
a(f'{FIELD_NAMES.get(field, field):<12}: {header[field]!r}')
|
||||
|
||||
if self.cncx:
|
||||
a('*'*10 + ' CNCX ' + '*'*10)
|
||||
|
@ -98,8 +98,8 @@ class SecondaryIndexHeader: # {{{
|
||||
a = ans.append
|
||||
|
||||
def u(w):
|
||||
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
|
||||
len(w), not bool(w.replace(b'\0', b''))))
|
||||
a('Unknown: {!r} ({} bytes) (All zeros: {!r})'.format(
|
||||
w, len(w), not bool(w.replace(b'\0', b''))))
|
||||
|
||||
a(f'Header length: {self.header_length}')
|
||||
u(self.unknown1)
|
||||
@ -199,7 +199,7 @@ class IndexHeader: # {{{
|
||||
a = ans.append
|
||||
|
||||
def u(w):
|
||||
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
|
||||
a('Unknown: {!r} ({} bytes) (All zeros: {!r})'.format(w,
|
||||
len(w), not bool(w.replace(b'\0', b''))))
|
||||
|
||||
a(f'Header length: {self.header_length}')
|
||||
@ -411,7 +411,7 @@ class IndexRecord: # {{{
|
||||
a = ans.append
|
||||
|
||||
def u(w):
|
||||
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
|
||||
a('Unknown: {!r} ({} bytes) (All zeros: {!r})'.format(w,
|
||||
len(w), not bool(w.replace(b'\0', b''))))
|
||||
for entry in self.indices:
|
||||
offset = entry.offset
|
||||
@ -582,9 +582,7 @@ class TBSIndexing: # {{{
|
||||
ans = []
|
||||
ans.append(f"\nRecord #{r.idx}: Starts at: {dat['geom'][0]} Ends at: {dat['geom'][1]}")
|
||||
s, e, c = dat['starts'], dat['ends'], dat['complete']
|
||||
ans.append(('\tContains: %d index entries '
|
||||
'(%d ends, %d complete, %d starts)')%tuple(map(len, (s+e+c, e,
|
||||
c, s))))
|
||||
ans.append(f'\tContains: {len(s+e+c)} index entries ({len(e)} ends, {len(c)} complete, {len(s)} starts)')
|
||||
byts = bytearray(r.trailing_data.get('indexing', b''))
|
||||
ans.append(f'TBS bytes: {format_bytes(byts)}')
|
||||
for typ, entries in (('Ends', e), ('Complete', c), ('Starts', s)):
|
||||
|
@ -45,7 +45,7 @@ class FDST:
|
||||
a('Number of section records', self.num_sections)
|
||||
ans.append(f'**** {len(self.sections)} Sections ****')
|
||||
for sec in self.sections:
|
||||
ans.append('Start: %20d End: %d'%sec)
|
||||
ans.append(f'Start: {sec[0]:>20} End: {sec[1]}')
|
||||
|
||||
return '\n'.join(ans)
|
||||
|
||||
@ -266,7 +266,7 @@ class MOBIFile:
|
||||
for entries in itervalues(strand):
|
||||
for e in entries:
|
||||
desc.append(
|
||||
' %s%d [%-9s] parent: %s (%d) Geometry: (%d, %d)'%(
|
||||
' {}{} [{:<9}] parent: {} ({}) Geometry: ({}, {})'.format(
|
||||
e.depth * (' ') + '- ', e.index, e.action, e.parent,
|
||||
e.index-(e.parent or 0), e.start-i*RECORD_SIZE,
|
||||
e.start+e.length-i*RECORD_SIZE))
|
||||
|
@ -662,7 +662,7 @@ class MobiReader:
|
||||
elif mi.cover is not None:
|
||||
opf.cover = mi.cover
|
||||
else:
|
||||
opf.cover = 'images/%05d.jpg' % 1
|
||||
opf.cover = f'images/{1:05}.jpg'
|
||||
if not os.path.exists(os.path.join(os.path.dirname(htmlfile),
|
||||
* opf.cover.split('/'))):
|
||||
opf.cover = None
|
||||
|
@ -344,7 +344,7 @@ class Container(ContainerBase): # {{{
|
||||
item_id = 'id'
|
||||
while item_id in all_ids:
|
||||
c += 1
|
||||
item_id = 'id' + f'{c}'
|
||||
item_id = f'id{c}'
|
||||
manifest = self.opf_xpath('//opf:manifest')[0]
|
||||
href = self.name_to_href(name, self.opf_name)
|
||||
item = manifest.makeelement(OPF('item'),
|
||||
|
@ -366,10 +366,10 @@ class FlowSplitter:
|
||||
elif size <= self.max_flow_size:
|
||||
self.split_trees.append(t)
|
||||
self.log.debug(
|
||||
f'\t\t\tCommitted sub-tree #{len(self.split_trees)} ({size / 1024.0} KB)')
|
||||
f'\t\t\tCommitted sub-tree #{len(self.split_trees)} ({size//1024} KB)')
|
||||
else:
|
||||
self.log.debug(
|
||||
f'\t\t\tSplit tree still too large: {size / 1024.0} KB')
|
||||
f'\t\t\tSplit tree still too large: {size//1024} KB')
|
||||
self.split_to_size(t)
|
||||
|
||||
def find_split_point(self, root):
|
||||
|
@ -777,7 +777,7 @@ def add_pagenum_toc(root, toc, opts, page_number_display_map):
|
||||
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
||||
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
||||
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
||||
''' % tuple(indents) + (opts.extra_css or '')
|
||||
''' % tuple(indents) + (opts.extra_css or '') # noqa: UP031
|
||||
style = body.makeelement(XHTML('style'), type='text/css')
|
||||
style.text = css
|
||||
body.append(style)
|
||||
|
@ -68,7 +68,7 @@ class IndirectObjects:
|
||||
stream.write(b'xref'+EOL)
|
||||
stream.write(f'0 {1 + len(self._offsets)}')
|
||||
stream.write(EOL)
|
||||
stream.write('%010d 65535 f '%0)
|
||||
stream.write(f'{0:010} 65535 f ')
|
||||
stream.write(EOL)
|
||||
|
||||
for offset in self._offsets:
|
||||
|
@ -366,8 +366,7 @@ class Document:
|
||||
tag = el.tag
|
||||
|
||||
if weight + content_score < 0:
|
||||
self.debug('Cleaned %s with score %6.3f and weight %-3s' %
|
||||
(describe(el), content_score, weight, ))
|
||||
self.debug(f'Cleaned {describe(el)} with score {content_score:6.3f} and weight {weight:<3}')
|
||||
el.drop_tree()
|
||||
elif el.text_content().count(',') < 10:
|
||||
counts = {}
|
||||
|
@ -322,10 +322,10 @@ class RtfTokenizer:
|
||||
l = l + 1
|
||||
i = i + 1
|
||||
if l > 10:
|
||||
raise Exception('Error (at:%d): Too many digits in control word numeric argument.'%[tokenStart])
|
||||
raise Exception(f'Error (at:{tokenStart}): Too many digits in control word numeric argument.')
|
||||
|
||||
if not consumed:
|
||||
raise Exception('Error (at:%d): Control Word without numeric argument end.'%[tokenStart])
|
||||
raise Exception(f'Error (at:{tokenStart}): Control Word without numeric argument end.')
|
||||
|
||||
separator = ''
|
||||
if isChar(self.rtfData[i], ' '):
|
||||
|
@ -177,7 +177,7 @@ class RTFMLizer:
|
||||
self.log.exception(f'Image {item.href} is corrupted, ignoring')
|
||||
repl = '\n\n'
|
||||
else:
|
||||
repl = '\n\n{\\*\\shppict{\\pict\\jpegblip\\picw%i\\pich%i \n%s\n}}\n\n' % (width, height, data)
|
||||
repl = '\n\n{\\*\\shppict{\\pict\\jpegblip\\picw%i\\pich%i \n%s\n}}\n\n' % (width, height, data) # noqa: UP031
|
||||
text = text.replace(f'SPECIAL_IMAGE-{src}-REPLACE_ME', repl)
|
||||
return text
|
||||
|
||||
|
@ -98,7 +98,7 @@ def getimagesize(url):
|
||||
break
|
||||
p.feed(s)
|
||||
if p.image:
|
||||
return 'width="%i" height="%i"' % p.image.size
|
||||
return f'width="{p.image.size[0]}" height="{p.image.size[1]}"'
|
||||
except (OSError, ValueError):
|
||||
return None
|
||||
|
||||
|
@ -90,7 +90,7 @@ class RichTextDelegate(QStyledItemDelegate): # {{{
|
||||
group = (QPalette.ColorGroup.Active if option.state & QStyle.StateFlag.State_Active else
|
||||
QPalette.ColorGroup.Inactive)
|
||||
c = p.color(group, QPalette.ColorRole.HighlightedText)
|
||||
c = 'rgb(%d, %d, %d)'%c.getRgb()[:3]
|
||||
c = 'rgb({}, {}, {})'.format(*c.getRgb()[:3])
|
||||
doc.setDefaultStyleSheet(f' * {{ color: {c} }}')
|
||||
doc.setHtml(index.data() or '')
|
||||
return doc
|
||||
|
@ -202,8 +202,7 @@ class Check(QSplitter):
|
||||
fix_tt = _('Try to fix all fixable errors automatically. Only works for some types of error.')
|
||||
fix_msg = _('Try to correct all fixable errors automatically')
|
||||
run_tt, run_msg = _('Re-run the check'), _('Re-run check')
|
||||
header = '<style>a { text-decoration: none}</style><h2>%s [%d / %d]</h2>' % (
|
||||
header, self.items.currentRow()+1, self.items.count())
|
||||
header = f'<style>a {{text-decoration: none}}</style><h2>{header} [{self.items.currentRow()+1} / {self.items.count()}]</h2>'
|
||||
msg = '<p>%s</p>'
|
||||
footer = '<div>%s<a href="fix:errors" title="%s">%s</a><br><br> <a href="run:check" title="%s">%s</a></div>'
|
||||
if err.has_multiple_locations:
|
||||
|
@ -143,7 +143,7 @@ class CheckExternalLinks(Dialog):
|
||||
for i, (locations, err, url) in enumerate(self.errors):
|
||||
if i in self.fixed_errors:
|
||||
continue
|
||||
text += '<li><b>%s</b> \xa0<a href="err:%d">[%s]</a><br>%s<br><ul>' % (url, i, _('Fix this link'), err)
|
||||
text += '<li><b>{}</b> \xa0<a href="err:{}">[{}]</a><br>{}<br><ul>'.format(url, i, _('Fix this link'), err)
|
||||
for name, href, lnum, col in locations:
|
||||
text += '<li>{name} \xa0<a href="loc:{lnum},{name}">[{line}: {lnum}]</a></li>'.format(
|
||||
name=name, lnum=lnum, line=_('line number'))
|
||||
|
@ -547,7 +547,7 @@ class NamesDelegate(QStyledItemDelegate):
|
||||
positions = sorted(set(positions) - {-1}, reverse=True)
|
||||
text = f'<body>{make_highlighted_text(emphasis_style(), text, positions)}</body>'
|
||||
doc = QTextDocument()
|
||||
c = 'rgb(%d, %d, %d)'%c.getRgb()[:3]
|
||||
c = 'rgb({}, {}, {})'.format(*c.getRgb()[:3])
|
||||
doc.setDefaultStyleSheet(f' body {{ color: {c} }}')
|
||||
doc.setHtml(text)
|
||||
doc.setDefaultFont(option.font)
|
||||
|
@ -154,7 +154,7 @@ class Search:
|
||||
flags = self.regex_flags
|
||||
flags |= regex.DOTALL
|
||||
match_any_word = r'(?:\b(?:' + '|'.join(words) + r')\b)'
|
||||
joiner = '.{1,%d}' % interval
|
||||
joiner = '.{1,%d}' % interval # noqa: UP031
|
||||
full_pat = regex.compile(joiner.join(match_any_word for x in words), flags=flags)
|
||||
word_pats = tuple(regex.compile(rf'\b{x}\b', flags) for x in words)
|
||||
self._nsd = word_pats, full_pat
|
||||
|
@ -399,7 +399,7 @@ class BIBTEX(CatalogPlugin):
|
||||
entry['ondevice'] = db.catalog_plugin_on_device_temp_mapping[entry['id']]['ondevice']
|
||||
|
||||
# outfile.write('%%%Calibre catalog\n%%%{0} entries in catalog\n\n'.format(nb_entries))
|
||||
outfile.write('@preamble{"This catalog of %d entries was generated by calibre on %s"}\n\n'
|
||||
outfile.write('@preamble{"This catalog of %d entries was generated by calibre on %s"}\n\n' # noqa: UP031
|
||||
% (nb_entries, strftime('%A, %d. %B %Y %H:%M')))
|
||||
|
||||
for entry in data:
|
||||
|
@ -264,7 +264,7 @@ class CatalogBuilder:
|
||||
(str): sort key
|
||||
'''
|
||||
if not book['series']:
|
||||
fs = '{:<%d}!{!s}' % longest_author_sort
|
||||
fs = '{:<%d}!{!s}' % longest_author_sort # noqa: UP031
|
||||
key = fs.format(capitalize(book['author_sort']),
|
||||
capitalize(book['title_sort']))
|
||||
else:
|
||||
@ -272,7 +272,7 @@ class CatalogBuilder:
|
||||
integer = int(index)
|
||||
fraction = index - integer
|
||||
series_index = f"{integer:04}{str(f'{fraction:0.4f}').lstrip('0')}"
|
||||
fs = '{:<%d}~{!s}{!s}' % longest_author_sort
|
||||
fs = '{:<%d}~{!s}{!s}' % longest_author_sort # noqa: UP031
|
||||
key = fs.format(capitalize(book['author_sort']),
|
||||
self.generate_sort_title(book['series']),
|
||||
series_index)
|
||||
@ -653,9 +653,8 @@ class CatalogBuilder:
|
||||
self.opts.log.info(' Custom fields:')
|
||||
all_custom_fields = self.db.custom_field_keys()
|
||||
for cf in all_custom_fields:
|
||||
self.opts.log.info(' %-20s %-20s %s' %
|
||||
(cf, "'{}'".format(self.db.metadata_for_field(cf)['name']),
|
||||
self.db.metadata_for_field(cf)['datatype']))
|
||||
self.opts.log.info(' {:<20} {!r:<20} {}'.format(
|
||||
cf, self.db.metadata_for_field(cf)['name'], self.db.metadata_for_field(cf)['datatype']))
|
||||
|
||||
def establish_equivalencies(self, item_list, key=None):
|
||||
''' Return icu equivalent sort letter.
|
||||
@ -786,7 +785,7 @@ class CatalogBuilder:
|
||||
if self.DEBUG and self.opts.verbose:
|
||||
tl = [i['title'] for i in books_by_author]
|
||||
lt = max(tl, key=len)
|
||||
fs = '{:<6}{:<%d} {:<%d} {}' % (len(lt), len(las))
|
||||
fs = '{:<6}{:<%d} {:<%d} {}' % (len(lt), len(las)) # noqa: UP031
|
||||
print(fs.format('', 'Title', 'Author', 'Series'))
|
||||
for i in books_by_author:
|
||||
print(fs.format('', i['title'], i['author_sort'], i['series']))
|
||||
@ -835,8 +834,7 @@ class CatalogBuilder:
|
||||
if self.DEBUG and self.opts.verbose:
|
||||
self.opts.log.info(f'\nfetch_books_by_author(): {len(unique_authors)} unique authors')
|
||||
for author in unique_authors:
|
||||
self.opts.log.info((' %-50s %-25s %2d' % (author[0][0:45], author[1][0:20],
|
||||
author[2])).encode('utf-8'))
|
||||
self.opts.log.info(f' {author[0][0:45]:<50} {author[1][0:20]:<25} {author[2]:>2}')
|
||||
self.opts.log.info(f'\nfetch_books_by_author(): {len(individual_authors)} individual authors')
|
||||
for author in sorted(individual_authors):
|
||||
self.opts.log.info(f'{author}')
|
||||
@ -865,10 +863,9 @@ class CatalogBuilder:
|
||||
|
||||
if self.DEBUG and self.opts.verbose:
|
||||
self.opts.log.info(f'fetch_books_by_title(): {len(self.books_by_title)} books')
|
||||
self.opts.log.info(' %-40s %-40s' % ('title', 'title_sort'))
|
||||
self.opts.log.info(' {:<40} {:<40}'.format('title', 'title_sort'))
|
||||
for title in self.books_by_title:
|
||||
self.opts.log.info((' %-40s %-40s' % (title['title'][0:40],
|
||||
title['title_sort'][0:40])).encode('utf-8'))
|
||||
self.opts.log.info(' {:<40} {:<40}'.format(title['title'][0:40], title['title_sort'][0:40]))
|
||||
else:
|
||||
error_msg = _("No books to catalog.\nCheck 'Excluded books' rules in the E-book options.\n")
|
||||
self.opts.log.error('*** ' + error_msg + ' ***')
|
||||
|
@ -248,7 +248,7 @@ class AuthController:
|
||||
self.log = log
|
||||
self.secret = as_hex_unicode(os.urandom(random.randint(20, 30)))
|
||||
self.max_age_seconds = max_age_seconds
|
||||
self.key_order = '{%d}:{%d}:{%d}' % random.choice(tuple(permutations((0,1,2))))
|
||||
self.key_order = '{%d}:{%d}:{%d}' % random.choice(tuple(permutations((0,1,2)))) # noqa: UP031
|
||||
self.realm = realm
|
||||
if '"' in realm:
|
||||
raise ValueError('Double-quotes are not allowed in the authentication realm')
|
||||
|
@ -59,8 +59,7 @@ class CustomHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
|
||||
def format_heading(self, heading):
|
||||
from calibre.utils.terminal import colored
|
||||
return '%*s%s:\n' % (self.current_indent, '',
|
||||
colored(heading, fg='blue', bold=True))
|
||||
return ' '*self.current_indent + '{}:\n'.format(colored(heading, fg='blue', bold=True))
|
||||
|
||||
def format_option(self, option):
|
||||
import textwrap
|
||||
@ -71,12 +70,10 @@ class CustomHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
opts = self.option_strings[option]
|
||||
opt_width = self.help_position - self.current_indent - 2
|
||||
if len(opts) > opt_width:
|
||||
opts = '%*s%s\n' % (self.current_indent, '',
|
||||
colored(opts, fg='green'))
|
||||
opts = ' '*self.current_indent + '{}\n'.format(colored(opts, fg='green'))
|
||||
indent_first = self.help_position
|
||||
else: # start help on same line as opts
|
||||
opts = '%*s%-*s ' % (self.current_indent, '', opt_width +
|
||||
len(colored('', fg='green')), colored(opts, fg='green'))
|
||||
opts = ' '*self.current_indent + '%-*s ' % (opt_width+len(colored('', fg='green')), colored(opts, fg='green')) # noqa: UP031
|
||||
indent_first = 0
|
||||
result.append(opts)
|
||||
if option.help:
|
||||
@ -85,9 +82,8 @@ class CustomHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
|
||||
for line in help_text:
|
||||
help_lines.extend(textwrap.wrap(line, self.help_width))
|
||||
result.append('%*s%s\n' % (indent_first, '', help_lines[0]))
|
||||
result.extend(['%*s%s\n' % (self.help_position, '', line)
|
||||
for line in help_lines[1:]])
|
||||
result.append(' '*indent_first + f'{help_lines[0]}')
|
||||
result.extend([' '*self.help_position + f'{line}' for line in help_lines[1:]])
|
||||
elif opts[-1] != '\n':
|
||||
result.append('\n')
|
||||
return ''.join(result)+'\n'
|
||||
|
@ -220,8 +220,7 @@ def option_parser():
|
||||
def print_stats(old_stats, new_stats):
|
||||
from calibre import prints
|
||||
prints('========= Table comparison (original vs. subset) =========')
|
||||
prints('Table', ' ', '%10s'%'Size', ' ', 'Percent', ' ', '%10s'%'New Size',
|
||||
' New Percent')
|
||||
prints('Table', ' ', f"{'Size':>10}", ' ', 'Percent', ' ', f"{'New Size':>10}", ' New Percent')
|
||||
prints('='*80)
|
||||
old_total = sum(itervalues(old_stats))
|
||||
new_total = sum(itervalues(new_stats))
|
||||
@ -235,8 +234,8 @@ def print_stats(old_stats, new_stats):
|
||||
suffix = ' | same size'
|
||||
if nsz != osz:
|
||||
suffix = f' | reduced to {nsz/osz*100:.1f} %'
|
||||
prints(f'{table:4}', ' ', f'{osz:10}', ' ', f'{op:5.1f} %', ' ',
|
||||
f'{nsz:10}', ' ', f'{np:5.1f} %', suffix)
|
||||
prints(f'{table:>4}', ' ', f'{osz:>10}', ' ', f'{op:5.1f} %', ' ',
|
||||
f'{nsz:>10}', ' ', f'{np:5.1f} %', suffix)
|
||||
prints('='*80)
|
||||
|
||||
|
||||
|
@ -971,10 +971,11 @@ class ZipFile:
|
||||
|
||||
def printdir(self):
|
||||
'''Print a table of contents for the zip file.'''
|
||||
print('%-46s %19s %12s' % ('File Name', 'Modified ', 'Size'))
|
||||
print(f"{'File Name':<46} {'Modified ':>19} {'Size':>12}")
|
||||
for zinfo in self.filelist:
|
||||
date = '%d-%02d-%02d %02d:%02d:%02d' % zinfo.date_time[:6]
|
||||
print('%-46s %s %12d' % (zinfo.filename, date, zinfo.file_size))
|
||||
date = (f'{zinfo.date_time[0]}-{zinfo.date_time[1]:02}-{zinfo.date_time[2]:02} '
|
||||
f'{zinfo.date_time[3]:02}:{zinfo.date_time[4]:02}:{zinfo.date_time[5]:02}')
|
||||
print(f'{zinfo.filename:<46} {date:>19} {zinfo.file_size:>12}')
|
||||
|
||||
def testzip(self):
|
||||
'''Read all the files and check the CRC.'''
|
||||
|
@ -250,7 +250,7 @@ class Feed:
|
||||
return len(self.articles)
|
||||
|
||||
def __repr__(self):
|
||||
res = [('%20s\n'%'').replace(' ', '_')+repr(art) for art in self]
|
||||
res = ['_'*20 + f'\n{art!r}' for art in self]
|
||||
|
||||
return '\n'+'\n'.join(res)+'\n'
|
||||
|
||||
|
@ -1693,7 +1693,7 @@ class BasicNewsRecipe(Recipe):
|
||||
f.title, play_order=po, description=desc, author=auth))
|
||||
|
||||
else:
|
||||
entries.append('feed_%d/index.html'%0)
|
||||
entries.append('feed_0/index.html')
|
||||
feed_index(0, toc)
|
||||
|
||||
for i, p in enumerate(entries):
|
||||
|
@ -427,7 +427,7 @@ class SchedulerConfig:
|
||||
schedule = 0.04
|
||||
text = f'{schedule:f}'
|
||||
elif typ == 'day/time':
|
||||
text = '%d:%d:%d'%schedule
|
||||
text = f'{int(schedule[0])}:{int(schedule[1])}:{int(schedule[2])}'
|
||||
elif typ in ('days_of_week', 'days_of_month'):
|
||||
dw = ','.join(map(str, map(int, schedule[0])))
|
||||
text = f'{dw}:{int(schedule[1])}:{int(schedule[2])}'
|
||||
|
Loading…
x
Reference in New Issue
Block a user