mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
add-noqa for unconvertible % format
This commit is contained in:
parent
75aff43417
commit
1bb7a160f8
@ -231,7 +231,7 @@ def do_list(
|
|||||||
|
|
||||||
widths = list(base_widths)
|
widths = list(base_widths)
|
||||||
titles = map(
|
titles = map(
|
||||||
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths,
|
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, # noqa: UP031
|
||||||
[template_title if v == 'template' else v for v in fields]
|
[template_title if v == 'template' else v for v in fields]
|
||||||
)
|
)
|
||||||
with ColoredStream(sys.stdout, fg='green'):
|
with ColoredStream(sys.stdout, fg='green'):
|
||||||
|
@ -99,7 +99,7 @@ def do_list(fields, data, opts):
|
|||||||
|
|
||||||
widths = list(base_widths)
|
widths = list(base_widths)
|
||||||
titles = map(
|
titles = map(
|
||||||
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, fields
|
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, fields # noqa: UP031
|
||||||
)
|
)
|
||||||
with ColoredStream(sys.stdout, fg='green'):
|
with ColoredStream(sys.stdout, fg='green'):
|
||||||
prints(''.join(titles))
|
prints(''.join(titles))
|
||||||
|
@ -265,16 +265,16 @@ class Dehyphenator:
|
|||||||
self.format = format
|
self.format = format
|
||||||
if format == 'html':
|
if format == 'html':
|
||||||
intextmatch = re.compile((
|
intextmatch = re.compile((
|
||||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?=<)(?P<wraptags>(</span>)?'
|
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?=<)(?P<wraptags>(</span>)?' # noqa: UP031
|
||||||
r'\s*(</[iubp]>\s*){1,2}(?P<up2threeblanks><(p|div)[^>]*>\s*(<p[^>]*>\s*</p>\s*)'
|
r'\s*(</[iubp]>\s*){1,2}(?P<up2threeblanks><(p|div)[^>]*>\s*(<p[^>]*>\s*</p>\s*)'
|
||||||
r'?</(p|div)>\s+){0,3}\s*(<[iubp][^>]*>\s*){1,2}(<span[^>]*>)?)\s*(?P<secondpart>[\w\d]+)') % length)
|
r'?</(p|div)>\s+){0,3}\s*(<[iubp][^>]*>\s*){1,2}(<span[^>]*>)?)\s*(?P<secondpart>[\w\d]+)') % length)
|
||||||
elif format == 'pdf':
|
elif format == 'pdf':
|
||||||
intextmatch = re.compile((
|
intextmatch = re.compile((
|
||||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?P<wraptags><p>|'
|
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)\s*(?P<wraptags><p>|' # noqa: UP031
|
||||||
r'</[iub]>\s*<p>\s*<[iub]>)\s*(?P<secondpart>[\w\d]+)')% length)
|
r'</[iub]>\s*<p>\s*<[iub]>)\s*(?P<secondpart>[\w\d]+)')% length)
|
||||||
elif format == 'txt':
|
elif format == 'txt':
|
||||||
intextmatch = re.compile(
|
intextmatch = re.compile(
|
||||||
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)( |\t)*(?P<wraptags>(\n( |\t)*)+)(?P<secondpart>[\w\d]+)'% length)
|
r'(?<=.{%i})(?P<firstpart>[^\W\-]+)(-|‐)( |\t)*(?P<wraptags>(\n( |\t)*)+)(?P<secondpart>[\w\d]+)'% length) # noqa: UP031
|
||||||
elif format == 'individual_words':
|
elif format == 'individual_words':
|
||||||
intextmatch = re.compile(
|
intextmatch = re.compile(
|
||||||
r'(?!<)(?P<firstpart>[^\W\-]+)(-|‐)\s*(?P<secondpart>\w+)(?![^<]*?>)', re.UNICODE)
|
r'(?!<)(?P<firstpart>[^\W\-]+)(-|‐)\s*(?P<secondpart>\w+)(?![^<]*?>)', re.UNICODE)
|
||||||
@ -529,11 +529,11 @@ class HTMLPreProcessor:
|
|||||||
# print('The pdf line length returned is ' + str(length))
|
# print('The pdf line length returned is ' + str(length))
|
||||||
# unwrap em/en dashes
|
# unwrap em/en dashes
|
||||||
end_rules.append((re.compile(
|
end_rules.append((re.compile(
|
||||||
r'(?<=.{%i}[–—])\s*<p>\s*(?=[\[a-z\d])' % length), lambda match: ''))
|
r'(?<=.{%i}[–—])\s*<p>\s*(?=[\[a-z\d])' % length), lambda match: '')) # noqa: UP031
|
||||||
end_rules.append(
|
end_rules.append(
|
||||||
# Un wrap using punctuation
|
# Un wrap using punctuation
|
||||||
(re.compile((
|
(re.compile((
|
||||||
r'(?<=.{%i}([a-zäëïöüàèìòùáćéíĺóŕńśúýâêîôûçąężıãõñæøþðßěľščťžňďřů,:)\\IAß]'
|
r'(?<=.{%i}([a-zäëïöüàèìòùáćéíĺóŕńśúýâêîôûçąężıãõñæøþðßěľščťžňďřů,:)\\IAß]' # noqa: UP031
|
||||||
r'|(?<!\&\w{4});))\s*(?P<ital></(i|b|u)>)?\s*(</p>\s*<p>\s*)+\s*(?=(<(i|b|u)>)?'
|
r'|(?<!\&\w{4});))\s*(?P<ital></(i|b|u)>)?\s*(</p>\s*<p>\s*)+\s*(?=(<(i|b|u)>)?'
|
||||||
r'\s*[\w\d$(])') % length, re.UNICODE), wrap_lines),
|
r'\s*[\w\d$(])') % length, re.UNICODE), wrap_lines),
|
||||||
)
|
)
|
||||||
|
@ -286,7 +286,7 @@ class MetadataUpdater:
|
|||||||
s, src = src[:length],src[length:]
|
s, src = src[:length],src[length:]
|
||||||
hexa = ' '.join([f'{ord(x):02X}' for x in s])
|
hexa = ' '.join([f'{ord(x):02X}' for x in s])
|
||||||
s = s.translate(FILTER)
|
s = s.translate(FILTER)
|
||||||
result += '%04X %-*s %s\n' % (N, length*3, hexa, s)
|
result += '%04X %-*s %s\n' % (N, length*3, hexa, s) # noqa: UP031
|
||||||
N+=length
|
N+=length
|
||||||
print(result)
|
print(result)
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ class MetadataUpdater:
|
|||||||
s, src = src[:length],src[length:]
|
s, src = src[:length],src[length:]
|
||||||
hexa = ' '.join([f'{ord(x):02X}' for x in s])
|
hexa = ' '.join([f'{ord(x):02X}' for x in s])
|
||||||
s = s.translate(FILTER)
|
s = s.translate(FILTER)
|
||||||
result += '%04X %-*s %s\n' % (N, length*3, hexa, s)
|
result += '%04X %-*s %s\n' % (N, length*3, hexa, s) # noqa: UP031
|
||||||
N+=length
|
N+=length
|
||||||
print(result)
|
print(result)
|
||||||
|
|
||||||
|
@ -777,7 +777,7 @@ def add_pagenum_toc(root, toc, opts, page_number_display_map):
|
|||||||
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
||||||
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
||||||
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
|
||||||
''' % tuple(indents) + (opts.extra_css or '')
|
''' % tuple(indents) + (opts.extra_css or '') # noqa: UP031
|
||||||
style = body.makeelement(XHTML('style'), type='text/css')
|
style = body.makeelement(XHTML('style'), type='text/css')
|
||||||
style.text = css
|
style.text = css
|
||||||
body.append(style)
|
body.append(style)
|
||||||
|
@ -177,7 +177,7 @@ class RTFMLizer:
|
|||||||
self.log.exception(f'Image {item.href} is corrupted, ignoring')
|
self.log.exception(f'Image {item.href} is corrupted, ignoring')
|
||||||
repl = '\n\n'
|
repl = '\n\n'
|
||||||
else:
|
else:
|
||||||
repl = '\n\n{\\*\\shppict{\\pict\\jpegblip\\picw%i\\pich%i \n%s\n}}\n\n' % (width, height, data)
|
repl = '\n\n{\\*\\shppict{\\pict\\jpegblip\\picw%i\\pich%i \n%s\n}}\n\n' % (width, height, data) # noqa: UP031
|
||||||
text = text.replace(f'SPECIAL_IMAGE-{src}-REPLACE_ME', repl)
|
text = text.replace(f'SPECIAL_IMAGE-{src}-REPLACE_ME', repl)
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
@ -154,7 +154,7 @@ class Search:
|
|||||||
flags = self.regex_flags
|
flags = self.regex_flags
|
||||||
flags |= regex.DOTALL
|
flags |= regex.DOTALL
|
||||||
match_any_word = r'(?:\b(?:' + '|'.join(words) + r')\b)'
|
match_any_word = r'(?:\b(?:' + '|'.join(words) + r')\b)'
|
||||||
joiner = '.{1,%d}' % interval
|
joiner = '.{1,%d}' % interval # noqa: UP031
|
||||||
full_pat = regex.compile(joiner.join(match_any_word for x in words), flags=flags)
|
full_pat = regex.compile(joiner.join(match_any_word for x in words), flags=flags)
|
||||||
word_pats = tuple(regex.compile(rf'\b{x}\b', flags) for x in words)
|
word_pats = tuple(regex.compile(rf'\b{x}\b', flags) for x in words)
|
||||||
self._nsd = word_pats, full_pat
|
self._nsd = word_pats, full_pat
|
||||||
|
@ -399,7 +399,7 @@ class BIBTEX(CatalogPlugin):
|
|||||||
entry['ondevice'] = db.catalog_plugin_on_device_temp_mapping[entry['id']]['ondevice']
|
entry['ondevice'] = db.catalog_plugin_on_device_temp_mapping[entry['id']]['ondevice']
|
||||||
|
|
||||||
# outfile.write('%%%Calibre catalog\n%%%{0} entries in catalog\n\n'.format(nb_entries))
|
# outfile.write('%%%Calibre catalog\n%%%{0} entries in catalog\n\n'.format(nb_entries))
|
||||||
outfile.write('@preamble{"This catalog of %d entries was generated by calibre on %s"}\n\n'
|
outfile.write('@preamble{"This catalog of %d entries was generated by calibre on %s"}\n\n' # noqa: UP031
|
||||||
% (nb_entries, strftime('%A, %d. %B %Y %H:%M')))
|
% (nb_entries, strftime('%A, %d. %B %Y %H:%M')))
|
||||||
|
|
||||||
for entry in data:
|
for entry in data:
|
||||||
|
@ -264,7 +264,7 @@ class CatalogBuilder:
|
|||||||
(str): sort key
|
(str): sort key
|
||||||
'''
|
'''
|
||||||
if not book['series']:
|
if not book['series']:
|
||||||
fs = '{:<%d}!{!s}' % longest_author_sort
|
fs = '{:<%d}!{!s}' % longest_author_sort # noqa: UP031
|
||||||
key = fs.format(capitalize(book['author_sort']),
|
key = fs.format(capitalize(book['author_sort']),
|
||||||
capitalize(book['title_sort']))
|
capitalize(book['title_sort']))
|
||||||
else:
|
else:
|
||||||
@ -272,7 +272,7 @@ class CatalogBuilder:
|
|||||||
integer = int(index)
|
integer = int(index)
|
||||||
fraction = index - integer
|
fraction = index - integer
|
||||||
series_index = f"{integer:04}{str(f'{fraction:0.4f}').lstrip('0')}"
|
series_index = f"{integer:04}{str(f'{fraction:0.4f}').lstrip('0')}"
|
||||||
fs = '{:<%d}~{!s}{!s}' % longest_author_sort
|
fs = '{:<%d}~{!s}{!s}' % longest_author_sort # noqa: UP031
|
||||||
key = fs.format(capitalize(book['author_sort']),
|
key = fs.format(capitalize(book['author_sort']),
|
||||||
self.generate_sort_title(book['series']),
|
self.generate_sort_title(book['series']),
|
||||||
series_index)
|
series_index)
|
||||||
@ -785,7 +785,7 @@ class CatalogBuilder:
|
|||||||
if self.DEBUG and self.opts.verbose:
|
if self.DEBUG and self.opts.verbose:
|
||||||
tl = [i['title'] for i in books_by_author]
|
tl = [i['title'] for i in books_by_author]
|
||||||
lt = max(tl, key=len)
|
lt = max(tl, key=len)
|
||||||
fs = '{:<6}{:<%d} {:<%d} {}' % (len(lt), len(las))
|
fs = '{:<6}{:<%d} {:<%d} {}' % (len(lt), len(las)) # noqa: UP031
|
||||||
print(fs.format('', 'Title', 'Author', 'Series'))
|
print(fs.format('', 'Title', 'Author', 'Series'))
|
||||||
for i in books_by_author:
|
for i in books_by_author:
|
||||||
print(fs.format('', i['title'], i['author_sort'], i['series']))
|
print(fs.format('', i['title'], i['author_sort'], i['series']))
|
||||||
|
@ -248,7 +248,7 @@ class AuthController:
|
|||||||
self.log = log
|
self.log = log
|
||||||
self.secret = as_hex_unicode(os.urandom(random.randint(20, 30)))
|
self.secret = as_hex_unicode(os.urandom(random.randint(20, 30)))
|
||||||
self.max_age_seconds = max_age_seconds
|
self.max_age_seconds = max_age_seconds
|
||||||
self.key_order = '{%d}:{%d}:{%d}' % random.choice(tuple(permutations((0,1,2))))
|
self.key_order = '{%d}:{%d}:{%d}' % random.choice(tuple(permutations((0,1,2)))) # noqa: UP031
|
||||||
self.realm = realm
|
self.realm = realm
|
||||||
if '"' in realm:
|
if '"' in realm:
|
||||||
raise ValueError('Double-quotes are not allowed in the authentication realm')
|
raise ValueError('Double-quotes are not allowed in the authentication realm')
|
||||||
|
@ -73,7 +73,7 @@ class CustomHelpFormatter(optparse.IndentedHelpFormatter):
|
|||||||
opts = ' '*self.current_indent + '{}\n'.format(colored(opts, fg='green'))
|
opts = ' '*self.current_indent + '{}\n'.format(colored(opts, fg='green'))
|
||||||
indent_first = self.help_position
|
indent_first = self.help_position
|
||||||
else: # start help on same line as opts
|
else: # start help on same line as opts
|
||||||
opts = ' '*self.current_indent + '%-*s ' % (opt_width+len(colored('', fg='green')), colored(opts, fg='green'))
|
opts = ' '*self.current_indent + '%-*s ' % (opt_width+len(colored('', fg='green')), colored(opts, fg='green')) # noqa: UP031
|
||||||
indent_first = 0
|
indent_first = 0
|
||||||
result.append(opts)
|
result.append(opts)
|
||||||
if option.help:
|
if option.help:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user