Merge branch 'pyproject-add-codespell' of https://github.com/un-pogaz/calibre

This commit is contained in:
Kovid Goyal 2025-03-24 08:24:25 +05:30
commit 8e90212f67
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
110 changed files with 280 additions and 161 deletions

View File

@ -496,7 +496,7 @@
:: improved recipes :: improved recipes
- Jot Down - Jot Down
- Various Russian and Ukranian news sources - Various Russian and Ukrainian news sources
- Nautilus Magazine - Nautilus Magazine
- Süddeutsche Zeitung - Süddeutsche Zeitung
- The India Forum - The India Forum
@ -974,7 +974,7 @@
- Fix a regression in 7.0 caused by a regression in Qt that would result in calibre hanging rarely when using the cover browser view - Fix a regression in 7.0 caused by a regression in Qt that would result in calibre hanging rarely when using the cover browser view
- [2049992] Fix custom template functions not useable in save to disk templates - [2049992] Fix custom template functions not usable in save to disk templates
- Fix a regression in 7.2 that caused the popup used for editing fields in the book list to be mis-positioned on very wide monitors - Fix a regression in 7.2 that caused the popup used for editing fields in the book list to be mis-positioned on very wide monitors

View File

@ -55,10 +55,10 @@ def binary_includes():
get_dll_path('bz2', 2), j(PREFIX, 'lib', 'libunrar.so'), get_dll_path('bz2', 2), j(PREFIX, 'lib', 'libunrar.so'),
get_dll_path('python' + py_ver, 2), get_dll_path('jbig', 2), get_dll_path('python' + py_ver, 2), get_dll_path('jbig', 2),
# We dont include libstdc++.so as the OpenGL dlls on the target # We don't include libstdc++.so as the OpenGL dlls on the target
# computer fail to load in the QPA xcb plugin if they were compiled # computer fail to load in the QPA xcb plugin if they were compiled
# with a newer version of gcc than the one on the build computer. # with a newer version of gcc than the one on the build computer.
# libstdc++, like glibc is forward compatible and I dont think any # libstdc++, like glibc is forward compatible and I don't think any
# distros do not have libstdc++.so.6, so it should be safe to leave it out. # distros do not have libstdc++.so.6, so it should be safe to leave it out.
# https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html (The current # https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html (The current
# debian stable libstdc++ is libstdc++.so.6.0.17) # debian stable libstdc++ is libstdc++.so.6.0.17)

View File

@ -13,10 +13,10 @@ entitlements = {
# MAP_JIT is used by libpcre which is bundled with Qt # MAP_JIT is used by libpcre which is bundled with Qt
'com.apple.security.cs.allow-jit': True, 'com.apple.security.cs.allow-jit': True,
# v8 and therefore WebEngine need this as they dont use MAP_JIT # v8 and therefore WebEngine need this as they don't use MAP_JIT
'com.apple.security.cs.allow-unsigned-executable-memory': True, 'com.apple.security.cs.allow-unsigned-executable-memory': True,
# calibre itself does not use DYLD env vars, but dont know about its # calibre itself does not use DYLD env vars, but don't know about its
# dependencies. # dependencies.
'com.apple.security.cs.allow-dyld-environment-variables': True, 'com.apple.security.cs.allow-dyld-environment-variables': True,

View File

@ -346,7 +346,7 @@ int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PWSTR pCmdLine
return write_bytes(pipe, echo_sz, echo_buf) ? 0 : 1; return write_bytes(pipe, echo_sz, echo_buf) ? 0 : 1;
} }
if (app_uid != NULL) { if (app_uid != NULL) {
// dont check return status as failure is not critical // don't check return status as failure is not critical
set_app_uid(app_uid); set_app_uid(app_uid);
} }

View File

@ -5,16 +5,16 @@
<SummaryInformation Keywords="Installer" Description="{app} Installer" Manufacturer="Kovid Goyal" /> <SummaryInformation Keywords="Installer" Description="{app} Installer" Manufacturer="Kovid Goyal" />
<!-- Disable creation of system restore points on calibre installs. Speeds <!-- Disable creation of system restore points on calibre installs. Speeds
up the install. We dont need system restore since we dont install any up the install. We don't need system restore since we don't install any
system DLLs/components anyway (apart from start menu entries) --> system DLLs/components anyway (apart from start menu entries) -->
<Property Id="MSIFASTINSTALL" Value="3" /> <Property Id="MSIFASTINSTALL" Value="3" />
<Media Id="1" Cabinet="{app}.cab" CompressionLevel="{compression}" EmbedCab="yes" /> <Media Id="1" Cabinet="{app}.cab" CompressionLevel="{compression}" EmbedCab="yes" />
<!-- The following line ensures that DLLs are replaced even if <!-- The following line ensures that DLLs are replaced even if
their version is the same as before or they dont have versions. their version is the same as before or they don't have versions.
Microsoft's brain dead installer will otherwise use file dates to Microsoft's brain dead installer will otherwise use file dates to
determine whether to install a file or not. Simply not robust. And determine whether to install a file or not. Simply not robust. And
since we dont install any system files whatsoever, we can never replace since we don't install any system files whatsoever, we can never replace
a system file with an older version. This way the calibre install a system file with an older version. This way the calibre install
should always result in a consistent set of files being present in the should always result in a consistent set of files being present in the
installation folder, though of course, with Microsoft there are no installation folder, though of course, with Microsoft there are no

View File

@ -402,7 +402,7 @@ V. General Format of a .ZIP file
13 - Acorn Risc 14 - VFAT 13 - Acorn Risc 14 - VFAT
15 - alternate MVS 16 - BeOS 15 - alternate MVS 16 - BeOS
17 - Tandem 18 - OS/400 17 - Tandem 18 - OS/400
19 - OS/X (Darwin) 20 thru 255 - unused 19 - OS/X (Darwin) 20 through 255 - unused
The lower byte indicates the ZIP specification version The lower byte indicates the ZIP specification version
(the version of this document) supported by the software (the version of this document) supported by the software
@ -719,7 +719,7 @@ V. General Format of a .ZIP file
The Header ID field indicates the type of data that is in The Header ID field indicates the type of data that is in
the following data block. the following data block.
Header ID's of 0 thru 31 are reserved for use by PKWARE. Header ID's of 0 through 31 are reserved for use by PKWARE.
The remaining ID's can be used by third party vendors for The remaining ID's can be used by third party vendors for
proprietary usage. proprietary usage.
@ -1769,7 +1769,7 @@ Example: 0x02, 0x42, 0x01, 0x13
This would generate the original bit length array of: This would generate the original bit length array of:
(3, 3, 3, 3, 3, 2, 4, 4) (3, 3, 3, 3, 3, 2, 4, 4)
There are 8 codes in this table for the values 0 thru 7. Using There are 8 codes in this table for the values 0 through 7. Using
the algorithm to obtain the Shannon-Fano codes produces: the algorithm to obtain the Shannon-Fano codes produces:
Reversed Order Original Reversed Order Original
@ -1909,8 +1909,8 @@ The bit lengths for the literal tables are sent first with the number
of entries sent described by the 5 bits sent earlier. There are up of entries sent described by the 5 bits sent earlier. There are up
to 286 literal characters; the first 256 represent the respective 8 to 286 literal characters; the first 256 represent the respective 8
bit character, code 256 represents the End-Of-Block code, the remaining bit character, code 256 represents the End-Of-Block code, the remaining
29 codes represent copy lengths of 3 thru 258. There are up to 30 29 codes represent copy lengths of 3 through 258. There are up to 30
distance codes representing distances from 1 thru 32k as described distance codes representing distances from 1 through 32k as described
below. below.
Length Codes Length Codes
@ -2221,7 +2221,7 @@ keys, based on random data, to render a plaintext attack on the
data ineffective. data ineffective.
Read the 12-byte encryption header into Buffer, in locations Read the 12-byte encryption header into Buffer, in locations
Buffer(0) thru Buffer(11). Buffer(0) through Buffer(11).
loop for i <- 0 to 11 loop for i <- 0 to 11
C <- buffer(i) ^ decrypt_byte() C <- buffer(i) ^ decrypt_byte()

View File

@ -14,7 +14,7 @@ from calibre.utils.config import JSONConfig
# Remember that this name (i.e. plugins/interface_demo) is also # Remember that this name (i.e. plugins/interface_demo) is also
# in a global namespace, so make it as unique as possible. # in a global namespace, so make it as unique as possible.
# You should always prefix your config file name with plugins/, # You should always prefix your config file name with plugins/,
# so as to ensure you dont accidentally clobber a calibre config file # so as to ensure you don't accidentally clobber a calibre config file
prefs = JSONConfig('plugins/interface_demo') prefs = JSONConfig('plugins/interface_demo')
# Set defaults # Set defaults

View File

@ -140,7 +140,7 @@ class DemoDialog(QDialog):
set_metadata(ffile, mi, fmt) set_metadata(ffile, mi, fmt)
ffile.seek(0) ffile.seek(0)
# Now replace the file in the calibre library with the updated # Now replace the file in the calibre library with the updated
# file. We dont use add_format_with_hooks as the hooks were # file. We don't use add_format_with_hooks as the hooks were
# already run when the file was first added to calibre. # already run when the file was first added to calibre.
db.add_format(book_id, fmt, ffile, run_hooks=False) db.add_format(book_id, fmt, ffile, run_hooks=False)

View File

@ -22,7 +22,7 @@ class InterfacePlugin(InterfaceAction):
name = 'Interface Plugin Demo' name = 'Interface Plugin Demo'
# Declare the main action associated with this plugin # Declare the main action associated with this plugin
# The keyboard shortcut can be None if you dont want to use a keyboard # The keyboard shortcut can be None if you don't want to use a keyboard
# shortcut. Remember that currently calibre has no central management for # shortcut. Remember that currently calibre has no central management for
# keyboard shortcuts, so try to use an unusual/unused shortcut. # keyboard shortcuts, so try to use an unusual/unused shortcut.
action_spec = ('Interface Plugin Demo', None, action_spec = ('Interface Plugin Demo', None,

View File

@ -74,6 +74,125 @@ docstring-quotes = 'single'
inline-quotes = 'single' inline-quotes = 'single'
multiline-quotes = 'single' multiline-quotes = 'single'
[tool.codespell]
# calibre will probably never fully compliant with codespell
# this setting is only to easily find common typo errors
# by filtering a great range of false-positives, but not all
# (if codespell could per-file-ignores words, its be nicer)
count = false
summary = false
quiet-level = 3
regex = '''\b(?<!&)(?<!&amp;)[\w\-']+(?!&(amp;)?)\b'''
builtin = [
'clear',
'rare',
'informal',
'code',
]
ignore-words-list = [
"alo",
"ans",
"clen",
"eto",
"fo",
"nam",
"nd",
"som",
"te",
"atLeast",
"Implementor",
"implementor",
"Implementors",
"implementors",
"missings",
"re-use",
"re-used",
"re-using",
"succeded",
# code
"ws",
"ws",
"ws",
"ws",
"dur",
"ro",
"snd",
"ws",
"deque",
"assertIn",
"atEnd",
"endcode",
"errorString",
"FocusIn",
"iff",
"lets",
"lite",
"NMAKE",
"nmake",
"uDate",
"UINT",
"uInt",
"uint",
"KeyPair",
"Keypair",
"keypair",
"Referer",
"seeked",
"sinc",
"stdio",
"thead",
]
uri-ignore-words-list = '*'
skip = [
"*.svg",
"*.rcc",
"*_ui.py",
"./src/calibre/ebooks/rtf2xml/char_set.py",
"./src/calibre/ebooks/unihandecode/*",
"./src/calibre/ebooks/html_entities.h",
"./src/calibre/ebooks/html_entities.py",
"./src/calibre/utils/icu_test.py",
"./src/calibre/utils/search_query_parser_test.py",
"./Changelog.old.txt",
"./COPYRIGHT",
"./LICENSE",
"./LICENSE.rtf",
"./session.vim",
"./build/*",
"./docs/*",
"./nbproject/*",
"./recipes/*",
"./translations/*",
"./tags/*",
"./manual/generated/*",
"./manual/locale/*",
"./resources/dictionaries/*",
"./resources/localization/*",
"./resources/hyphenation/*",
"./resources/mathjax/*",
"./resources/builtin_recipes.xml",
"./resources/changelog.json",
"./resources/editor.js",
"./resources/editor-functions.json",
"./resources/mime.types",
"./resources/piper-voices.json",
"./resources/stylelint-bundle.min.js",
"./resources/user-manual-translation-stats.json",
"./resources/template-functions.json",
"./resources/viewer.js",
"./resources/viewer.html",
"./resources/content-server/index-generated.html",
"./setup/installer/*",
"./setup/pyqt_enums/*",
"./setup/lc_data.py",
"./setup/linux-installer.py",
"./src/css_selectors/*",
"./src/polyglot/*",
"./src/templite/*",
"./src/tinycss/*",
"./src/unicode_names/*",
]
[tool.flynt] [tool.flynt]
line-length = 400 # over value to catch every case line-length = 400 # over value to catch every case
transform-format = false # don't transform already existing format call transform-format = false # don't transform already existing format call

View File

@ -81,7 +81,7 @@ class BaltimoreSun(BasicNewsRecipe):
(u'Maryland Weather', u'http://www.baltimoresun.com/news/weather/weather-blog/rss2.0.xml'), (u'Maryland Weather', u'http://www.baltimoresun.com/news/weather/weather-blog/rss2.0.xml'),
(u'Second Opinion', u'http://www.baltimoresun.com/news/opinion/second-opinion-blog/rss2.0.xml'), (u'Second Opinion', u'http://www.baltimoresun.com/news/opinion/second-opinion-blog/rss2.0.xml'),
(u'Sun Investigates', u'http://www.baltimoresun.com/news/maryland/sun-investigates/rss2.0.xml'), (u'Sun Investigates', u'http://www.baltimoresun.com/news/maryland/sun-investigates/rss2.0.xml'),
(u'You Dont Say', u'http://www.baltimoresun.com/news/language-blog/rss2.0.xml'), (u"You Don't Say", u'http://www.baltimoresun.com/news/language-blog/rss2.0.xml'),
# Business Blogs ## # Business Blogs ##
(u'BaltTech', u'http://www.baltimoresun.com/business/technology/blog/rss2.0.xml'), (u'BaltTech', u'http://www.baltimoresun.com/business/technology/blog/rss2.0.xml'),

View File

@ -45,7 +45,7 @@ class epw(BasicNewsRecipe):
'/styles/freeissue/public', '' '/styles/freeissue/public', ''
) )
except Exception: except Exception:
# sometimes they dont add img src # sometimes they don't add img src
self.cover_url = 'https://www.epw.in/sites/default/files/cache/cover_images/2022/Cover_4June2022_Big.gif' self.cover_url = 'https://www.epw.in/sites/default/files/cache/cover_images/2022/Cover_4June2022_Big.gif'
feeds = OrderedDict() feeds = OrderedDict()

View File

@ -61,7 +61,7 @@ class ESPN(BasicNewsRecipe):
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser(self) br = BasicNewsRecipe.get_browser(self)
if False and self.username and self.password: if False and self.username and self.password:
# ESPN has changed to a JS based login system, cant be bothered # ESPN has changed to a JS based login system, can't be bothered
# revering it # revering it
br.set_handle_refresh(False) br.set_handle_refresh(False)
url = ('https://r.espn.go.com/members/v3_1/login') url = ('https://r.espn.go.com/members/v3_1/login')

View File

@ -71,7 +71,7 @@ class ft(BasicNewsRecipe):
# def get_browser(self, *args, **kw): # def get_browser(self, *args, **kw):
# br = super().get_browser(*args, **kw) # br = super().get_browser(*args, **kw)
# if self.username and self.password: # if self.username and self.password:
# # ft.com uses a CAPTCHA on its login page so this sadly doesnt work # # ft.com uses a CAPTCHA on its login page so this sadly doesn't work
# br.open('https://accounts.ft.com/login?location=https%3A%2F%2Fwww.ft.com') # br.open('https://accounts.ft.com/login?location=https%3A%2F%2Fwww.ft.com')
# br.select_form(id='email-form') # br.select_form(id='email-form')
# br['email'] = self.username # br['email'] = self.username

View File

@ -77,7 +77,7 @@ class IndependentAustralia(BasicNewsRecipe):
businessArticles = [] businessArticles = []
lifeArticles = [] lifeArticles = []
australiaArticles = [] australiaArticles = []
# Loop thru the articles in all feeds to find articles with base categories in it # Loop through the articles in all feeds to find articles with base categories in it
for curfeed in feeds: for curfeed in feeds:
delList = [] delList = []
for a, curarticle in enumerate(curfeed.articles): for a, curarticle in enumerate(curfeed.articles):

View File

@ -56,7 +56,7 @@ class TheMiamiHerald(BasicNewsRecipe):
] ]
def get_browser(self, *a, **kw): def get_browser(self, *a, **kw):
# MyClatchy servers dont like the user-agent header, they hang forever # MyClatchy servers don't like the user-agent header, they hang forever
# when it is present # when it is present
br = BasicNewsRecipe.get_browser(self, *a, **kw) br = BasicNewsRecipe.get_browser(self, *a, **kw)
br.addheaders = [x for x in br.addheaders if x[0].lower() != 'user-agent'] br.addheaders = [x for x in br.addheaders if x[0].lower() != 'user-agent']

View File

@ -107,7 +107,7 @@ class SatMagazine(BasicNewsRecipe):
title_number = 0 title_number = 0
# Goes thru all the articles one by one and sort them out # Goes through all the articles one by one and sort them out
for article in articles: for article in articles:
title = self.tag_to_string(article) title = self.tag_to_string(article)

View File

@ -8,7 +8,7 @@
(function() { (function() {
"use strict"; "use strict";
// wrap up long words that dont fit in the page // wrap up long words that don't fit in the page
document.body.style.overflowWrap = 'break-word'; document.body.style.overflowWrap = 'break-word';
var break_avoid_block_styles = { var break_avoid_block_styles = {

View File

@ -12,7 +12,7 @@
var settings = SETTINGS; var settings = SETTINGS;
function onclick(event) { function onclick(event) {
// We dont want this event to trigger onclick on this element's parent // We don't want this event to trigger onclick on this element's parent
// block, if any. // block, if any.
event.stopPropagation(); event.stopPropagation();
var frac = window.pageYOffset/document.body.scrollHeight; var frac = window.pageYOffset/document.body.scrollHeight;

View File

@ -580,7 +580,7 @@ class Build(Command):
if iswindows or env is self.windows_cross_env: if iswindows or env is self.windows_cross_env:
pre_ld_flags = [] pre_ld_flags = []
if ext.uses_icu: if ext.uses_icu:
# windows has its own ICU libs that dont work # windows has its own ICU libs that don't work
pre_ld_flags = elib pre_ld_flags = elib
cmd += pre_ld_flags + env.ldflags + ext.ldflags + elib + xlib + \ cmd += pre_ld_flags + env.ldflags + ext.ldflags + elib + xlib + \
['/EXPORT:' + init_symbol_name(ext.name)] + all_objects + ['/OUT:'+dest] ['/EXPORT:' + init_symbol_name(ext.name)] + all_objects + ['/OUT:'+dest]
@ -623,7 +623,7 @@ class Build(Command):
def build_headless(self): def build_headless(self):
from setup.parallel_build import cpu_count from setup.parallel_build import cpu_count
if iswindows or ishaiku: if iswindows or ishaiku:
return # Dont have headless operation on these platforms return # Don't have headless operation on these platforms
from setup.build_environment import CMAKE, sw from setup.build_environment import CMAKE, sw
self.info('\n####### Building headless QPA plugin', '#'*7) self.info('\n####### Building headless QPA plugin', '#'*7)
a = absolutize a = absolutize

View File

@ -57,7 +57,7 @@ def get_dist(base, which, bitness):
def shutdown_allowed(which, bitness): def shutdown_allowed(which, bitness):
# The ARM64 VM is extremely flakey often booting up to a non-functional # The ARM64 VM is extremely flakey often booting up to a non-functional
# state so dont shut it down as it seems to be more stable once bootup is # state so don't shut it down as it seems to be more stable once boot-up is
# done. # done.
return bitness != 'arm64' return bitness != 'arm64'

View File

@ -286,7 +286,7 @@ class Translations(POT): # {{{
def is_po_file_ok(self, x): def is_po_file_ok(self, x):
bname = os.path.splitext(os.path.basename(x))[0] bname = os.path.splitext(os.path.basename(x))[0]
# sr@latin.po is identical to sr.po. And we dont support country # sr@latin.po is identical to sr.po. And we don't support country
# specific variants except for a few. # specific variants except for a few.
if '_' in bname: if '_' in bname:
return bname.partition('_')[0] in ('pt', 'zh', 'bn') return bname.partition('_')[0] in ('pt', 'zh', 'bn')

View File

@ -45,7 +45,7 @@ class InputProfile(Plugin):
type = _('Input profile') type = _('Input profile')
name = 'Default Input Profile' name = 'Default Input Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it short_name = 'default' # Used in the CLI so don't use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful ' description = _('This profile tries to provide sane defaults and is useful '
'if you know nothing about the input document.') 'if you know nothing about the input document.')
@ -243,7 +243,7 @@ class OutputProfile(Plugin):
type = _('Output profile') type = _('Output profile')
name = 'Default Output Profile' name = 'Default Output Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it short_name = 'default' # Used in the CLI so don't use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful ' description = _('This profile tries to provide sane defaults and is useful '
'if you want to produce a document intended to be read at a ' 'if you want to produce a document intended to be read at a '
'computer or on a range of devices.') 'computer or on a range of devices.')

View File

@ -715,7 +715,7 @@ def patch_metadata_plugins(possibly_updated_plugins):
if pup is not None: if pup is not None:
if pup.version > plugin.version and pup.minimum_calibre_version <= numeric_version: if pup.version > plugin.version and pup.minimum_calibre_version <= numeric_version:
patches[i] = pup(None) patches[i] = pup(None)
# Metadata source plugins dont use initialize() but that # Metadata source plugins don't use initialize() but that
# might change in the future, so be safe. # might change in the future, so be safe.
patches[i].initialize() patches[i].initialize()
for i, pup in iteritems(patches): for i, pup in iteritems(patches):

View File

@ -2283,7 +2283,7 @@ class DB:
def remove_trash_formats_dir_if_empty(self, book_id): def remove_trash_formats_dir_if_empty(self, book_id):
bdir = os.path.join(self.trash_dir, 'f', str(book_id)) bdir = os.path.join(self.trash_dir, 'f', str(book_id))
if os.path.isdir(bdir) and len(os.listdir(bdir)) <= 1: # dont count metadata.json if os.path.isdir(bdir) and len(os.listdir(bdir)) <= 1: # don't count metadata.json
self.rmtree(bdir) self.rmtree(bdir)
def list_trash_entries(self): def list_trash_entries(self):

View File

@ -373,7 +373,7 @@ class Cache:
mi.format_metadata = FormatMetadata(self, book_id, formats) mi.format_metadata = FormatMetadata(self, book_id, formats)
good_formats = FormatsList(sorted(formats), mi.format_metadata) good_formats = FormatsList(sorted(formats), mi.format_metadata)
# These three attributes are returned by the db2 get_metadata(), # These three attributes are returned by the db2 get_metadata(),
# however, we dont actually use them anywhere other than templates, so # however, we don't actually use them anywhere other than templates, so
# they have been removed, to avoid unnecessary overhead. The templates # they have been removed, to avoid unnecessary overhead. The templates
# all use _proxy_metadata. # all use _proxy_metadata.
# mi.book_size = self._field_for('size', book_id, default_value=0) # mi.book_size = self._field_for('size', book_id, default_value=0)
@ -3474,7 +3474,7 @@ class Cache:
self._add_extra_files(dest_id, {q: BytesIO(cdata)}, replace=False, auto_rename=True) self._add_extra_files(dest_id, {q: BytesIO(cdata)}, replace=False, auto_rename=True)
break break
for key in self.field_metadata: # loop thru all defined fields for key in self.field_metadata: # loop through all defined fields
fm = self.field_metadata[key] fm = self.field_metadata[key]
if not fm['is_custom']: if not fm['is_custom']:
continue continue

View File

@ -119,7 +119,7 @@ class Notes:
def path_for_resource(self, resource_hash: str) -> str: def path_for_resource(self, resource_hash: str) -> str:
hashalg, digest = resource_hash.split(':', 1) hashalg, digest = resource_hash.split(':', 1)
prefix = digest[:2] prefix = digest[:2]
# Cant use colons in filenames on windows safely # Can't use colons in filenames on windows safely
return os.path.join(self.resources_dir, prefix, f'{hashalg}-{digest}') return os.path.join(self.resources_dir, prefix, f'{hashalg}-{digest}')
def remove_resources(self, conn, note_id, resources_to_potentially_remove, delete_from_link_table=True): def remove_resources(self, conn, note_id, resources_to_potentially_remove, delete_from_link_table=True):

View File

@ -135,7 +135,7 @@ class Restore(Thread):
tdir = TemporaryDirectory('_rlib', dir=basedir) tdir = TemporaryDirectory('_rlib', dir=basedir)
tdir.__enter__() tdir.__enter__()
except OSError: except OSError:
# In case we dont have permissions to create directories in the # In case we don't have permissions to create directories in the
# parent folder of the src library # parent folder of the src library
tdir = TemporaryDirectory('_rlib') tdir = TemporaryDirectory('_rlib')

View File

@ -707,7 +707,7 @@ class Parser(SearchQueryParser): # {{{
continue continue
if fm['search_terms'] and x not in {'series_sort', 'id'}: if fm['search_terms'] and x not in {'series_sort', 'id'}:
if x not in self.virtual_fields and x != 'uuid': if x not in self.virtual_fields and x != 'uuid':
# We dont search virtual fields because if we do, search # We don't search virtual fields because if we do, search
# caching will not be used # caching will not be used
all_locs.add(x) all_locs.add(x)
field_metadata[x] = fm field_metadata[x] = fm

View File

@ -46,8 +46,8 @@ def get_defaults(spec):
def compare_argspecs(old, new, attr): def compare_argspecs(old, new, attr):
# We dont compare the names of the non-keyword arguments as they are often # We don't compare the names of the non-keyword arguments as they are often
# different and they dont affect the usage of the API. # different and they don't affect the usage of the API.
ok = len(old.args) == len(new.args) and get_defaults(old) == get_defaults(new) ok = len(old.args) == len(new.args) and get_defaults(old) == get_defaults(new)
if not ok: if not ok:

View File

@ -188,7 +188,7 @@ def debug(ioreg_to_tmp=False, buf=None, plugins=None,
out(' ') out(' ')
if ioreg_to_tmp: if ioreg_to_tmp:
open('/tmp/ioreg.txt', 'w').write(ioreg) open('/tmp/ioreg.txt', 'w').write(ioreg)
out('Dont forget to send the contents of /tmp/ioreg.txt') out("Don't forget to send the contents of /tmp/ioreg.txt")
out('You can open it with the command: open /tmp/ioreg.txt') out('You can open it with the command: open /tmp/ioreg.txt')
else: else:
out(ioreg) out(ioreg)

View File

@ -299,7 +299,7 @@
DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST | DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST |
DEVICE_FLAG_PLAYLIST_SPL_V1 }, DEVICE_FLAG_PLAYLIST_SPL_V1 },
// YP-F3 is NOT MTP - USB mass storage // YP-F3 is NOT MTP - USB mass storage
// From a rouge .INF file // From a rogue .INF file
// this device ID seems to have been recycled for: // this device ID seems to have been recycled for:
// the Samsung SGH-A707 Cingular cellphone // the Samsung SGH-A707 Cingular cellphone
// the Samsung L760-V cellphone // the Samsung L760-V cellphone
@ -1183,7 +1183,7 @@
DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL }, DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL },
// From: Willy Gardiol (web) <willy@gardiol.org> // From: Willy Gardiol (web) <willy@gardiol.org>
// Spurious errors for getting all objects, lead me to believe // Spurious errors for getting all objects, lead me to believe
// this flag atleast is needed // this flag at least is needed
{ "Nokia", 0x0421, "5800 XpressMusic v2", 0x0155, { "Nokia", 0x0421, "5800 XpressMusic v2", 0x0155,
DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL }, DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL },
// Yet another version... I think // Yet another version... I think

View File

@ -922,7 +922,7 @@ def get_usb_info(usbdev, debug=False): # {{{
try: try:
buf, dd = get_device_descriptor(handle, device_port) buf, dd = get_device_descriptor(handle, device_port)
if dd.idVendor == usbdev.vendor_id and dd.idProduct == usbdev.product_id and dd.bcdDevice == usbdev.bcd: if dd.idVendor == usbdev.vendor_id and dd.idProduct == usbdev.product_id and dd.bcdDevice == usbdev.bcd:
# Dont need to read language since we only care about english names # Don't need to read language since we only care about english names
# buf, langs = get_device_languages(handle, device_port) # buf, langs = get_device_languages(handle, device_port)
# print(111, langs) # print(111, langs)
for index, name in ((dd.iManufacturer, 'manufacturer'), (dd.iProduct, 'product'), (dd.iSerialNumber, 'serial_number')): for index, name in ((dd.iManufacturer, 'manufacturer'), (dd.iProduct, 'product'), (dd.iSerialNumber, 'serial_number')):

View File

@ -117,7 +117,7 @@ class HTMLZInput(InputFormatPlugin):
if opf: if opf:
opf_parsed = OPF(opf, basedir=os.getcwd()) opf_parsed = OPF(opf, basedir=os.getcwd())
cover_path = opf_parsed.raster_cover or opf_parsed.cover cover_path = opf_parsed.raster_cover or opf_parsed.cover
os.remove(opf) # dont confuse code that searches for OPF files later on the oeb object will create its own OPF os.remove(opf) # don't confuse code that searches for OPF files later on the oeb object will create its own OPF
# Set the cover. # Set the cover.
if cover_path: if cover_path:
cdata = None cdata = None

View File

@ -123,7 +123,7 @@ def read_single_border(parent, edge, XPath, get):
pass pass
sz = get(elem, 'w:sz') sz = get(elem, 'w:sz')
if sz is not None: if sz is not None:
# we dont care about art borders (they are only used for page borders) # we don't care about art borders (they are only used for page borders)
try: try:
width = min(96, max(2, float(sz))) / 8 width = min(96, max(2, float(sz))) / 8
except (ValueError, TypeError): except (ValueError, TypeError):

View File

@ -33,7 +33,7 @@ def read_text_border(parent, dest, XPath, get):
pass pass
sz = get(elem, 'w:sz') sz = get(elem, 'w:sz')
if sz is not None: if sz is not None:
# we dont care about art borders (they are only used for page borders) # we don't care about art borders (they are only used for page borders)
try: try:
# A border of less than 1pt is not rendered by WebKit # A border of less than 1pt is not rendered by WebKit
border_width = min(96, max(8, float(sz))) / 8 border_width = min(96, max(8, float(sz))) / 8

View File

@ -520,7 +520,7 @@ class Convert:
if float_spec is None and is_float: if float_spec is None and is_float:
float_spec = FloatSpec(self.docx.namespace, html_tag, tag_style) float_spec = FloatSpec(self.docx.namespace, html_tag, tag_style)
if display in {'inline', 'inline-block'} or tagname == 'br': # <br> has display:block but we dont want to start a new paragraph if display in {'inline', 'inline-block'} or tagname == 'br': # <br> has display:block but we don't want to start a new paragraph
if is_float and float_spec.is_dropcaps: if is_float and float_spec.is_dropcaps:
self.add_block_tag(tagname, html_tag, tag_style, stylizer, float_spec=float_spec) self.add_block_tag(tagname, html_tag, tag_style, stylizer, float_spec=float_spec)
float_spec = None float_spec = None
@ -539,7 +539,7 @@ class Convert:
self.blocks.start_new_table(html_tag, tag_style) self.blocks.start_new_table(html_tag, tag_style)
else: else:
if tagname == 'img' and is_float: if tagname == 'img' and is_float:
# Image is floating so dont start a new paragraph for it # Image is floating so don't start a new paragraph for it
self.add_inline_tag(tagname, html_tag, tag_style, stylizer) self.add_inline_tag(tagname, html_tag, tag_style, stylizer)
else: else:
if tagname == 'hr': if tagname == 'hr':

View File

@ -9,7 +9,7 @@ import regex
class Parser: class Parser:
''' See epubcfi.ebnf for the specification that this parser tries to ''' See epubcfi.ebnf for the specification that this parser tries to
follow. I have implemented it manually, since I dont want to depend on follow. I have implemented it manually, since I don't want to depend on
grako, and the grammar is pretty simple. This parser is thread-safe, i.e. grako, and the grammar is pretty simple. This parser is thread-safe, i.e.
it can be used from multiple threads simultaneously. ''' it can be used from multiple threads simultaneously. '''

View File

@ -580,7 +580,7 @@ class LitFile:
offset, size = u32(piece), int32(piece[8:]) offset, size = u32(piece), int32(piece[8:])
piece = self.read_raw(offset, size) piece = self.read_raw(offset, size)
if i == 0: if i == 0:
continue # Dont need this piece continue # Don't need this piece
elif i == 1: elif i == 1:
if u32(piece[8:]) != self.entry_chunklen or \ if u32(piece[8:]) != self.entry_chunklen or \
u32(piece[12:]) != self.entry_unknown: u32(piece[12:]) != self.entry_unknown:

View File

@ -814,7 +814,7 @@ class HTMLConverter:
collapse_whitespace = 'white-space' not in css or css['white-space'] != 'pre' collapse_whitespace = 'white-space' not in css or css['white-space'] != 'pre'
if self.process_alignment(css) and collapse_whitespace: if self.process_alignment(css) and collapse_whitespace:
# Dont want leading blanks in a new paragraph # Don't want leading blanks in a new paragraph
src = src.lstrip() src = src.lstrip()
def append_text(src): def append_text(src):

View File

@ -51,7 +51,7 @@ def read_variable_len_data(data, header):
header['tagx_block_size'] = 0 header['tagx_block_size'] = 0
trailing_bytes = data[idxt_offset+idxt_size:] trailing_bytes = data[idxt_offset+idxt_size:]
if trailing_bytes.rstrip(b'\0'): if trailing_bytes.rstrip(b'\0'):
raise ValueError('Traling bytes after last IDXT entry: {!r}'.format(trailing_bytes.rstrip(b'\0'))) raise ValueError('Trailing bytes after last IDXT entry: {!r}'.format(trailing_bytes.rstrip(b'\0')))
header['indices'] = indices header['indices'] = indices

View File

@ -637,11 +637,11 @@ class TBSIndexing: # {{{
ai, extra, consumed = decode_tbs(byts) ai, extra, consumed = decode_tbs(byts)
byts = byts[consumed:] byts = byts[consumed:]
if extra.get(0b0010, None) is not None: if extra.get(0b0010, None) is not None:
raise ValueError('Dont know how to interpret flag 0b0010' raise ValueError("Don't know how to interpret flag 0b0010"
' while reading section transitions') ' while reading section transitions')
if extra.get(0b1000, None) is not None: if extra.get(0b1000, None) is not None:
if len(extra) > 1: if len(extra) > 1:
raise ValueError('Dont know how to interpret flags' raise ValueError("Don't know how to interpret flags"
f' {extra!r} while reading section transitions') f' {extra!r} while reading section transitions')
nsi = self.get_index(psi.index+1) nsi = self.get_index(psi.index+1)
ans.append( ans.append(
@ -675,7 +675,7 @@ class TBSIndexing: # {{{
si, extra, consumed = decode_tbs(byts) si, extra, consumed = decode_tbs(byts)
byts = byts[consumed:] byts = byts[consumed:]
if len(extra) > 1 or 0b0010 in extra or 0b1000 in extra: if len(extra) > 1 or 0b0010 in extra or 0b1000 in extra:
raise ValueError(f'Dont know how to interpret flags {extra!r}' raise ValueError(f"Don't know how to interpret flags {extra!r}"
' when reading starting section') ' when reading starting section')
si = self.get_index(si) si = self.get_index(si)
ans.append('The section at the start of this record is:' ans.append('The section at the start of this record is:'

View File

@ -55,7 +55,7 @@ def parse_indx_header(data):
ans['ordt_map'] = '' ans['ordt_map'] = ''
if ordt1 > 0 and data[ordt1:ordt1+4] == b'ORDT': if ordt1 > 0 and data[ordt1:ordt1+4] == b'ORDT':
# I dont know what this is, but using it seems to be unnecessary, so # I don't know what this is, but using it seems to be unnecessary, so
# just leave it as the raw bytestring # just leave it as the raw bytestring
ans['ordt1_raw'] = data[ordt1+4:ordt1+4+ans['oentries']] ans['ordt1_raw'] = data[ordt1+4:ordt1+4+ans['oentries']]
if ordt2 > 0 and data[ordt2:ordt2+4] == b'ORDT': if ordt2 > 0 and data[ordt2:ordt2+4] == b'ORDT':

View File

@ -221,7 +221,7 @@ class SecondaryIndexEntry(IndexEntry):
tag = self.INDEX_MAP[index] tag = self.INDEX_MAP[index]
# The values for this index entry # The values for this index entry
# I dont know what the 5 means, it is not the number of entries # I don't know what the 5 means, it is not the number of entries
self.secondary = [5 if tag == min( self.secondary = [5 if tag == min(
itervalues(self.INDEX_MAP)) else 0, 0, tag] itervalues(self.INDEX_MAP)) else 0, 0, tag]

View File

@ -20,7 +20,7 @@ from calibre.ebooks.mobi.writer2.serializer import Serializer
from calibre.utils.filenames import ascii_filename from calibre.utils.filenames import ascii_filename
from polyglot.builtins import iteritems from polyglot.builtins import iteritems
# Disabled as I dont care about uncrossable breaks # Disabled as I don't care about uncrossable breaks
WRITE_UNCROSSABLE_BREAKS = False WRITE_UNCROSSABLE_BREAKS = False
NULL_INDEX = 0xffffffff NULL_INDEX = 0xffffffff

View File

@ -25,7 +25,7 @@ def process_jpegs_for_amazon(data: bytes) -> bytes:
img = Image.open(BytesIO(data)) img = Image.open(BytesIO(data))
if img.format == 'JPEG': if img.format == 'JPEG':
# Amazon's MOBI renderer can't render JPEG images without JFIF metadata # Amazon's MOBI renderer can't render JPEG images without JFIF metadata
# and images with EXIF data dont get displayed on the cover screen # and images with EXIF data don't get displayed on the cover screen
changed = not img.info changed = not img.info
has_exif = False has_exif = False
if hasattr(img, 'getexif'): if hasattr(img, 'getexif'):

View File

@ -240,7 +240,7 @@ class Serializer:
if tocref.klass == 'periodical': if tocref.klass == 'periodical':
# This is a section node. # This is a section node.
# For periodical tocs, the section urls are like r'feed_\d+/index.html' # For periodical tocs, the section urls are like r'feed_\d+/index.html'
# We dont want to point to the start of the first article # We don't want to point to the start of the first article
# so we change the href. # so we change the href.
itemhref = re.sub(r'article_\d+/', '', itemhref) itemhref = re.sub(r'article_\d+/', '', itemhref)
self.href_offsets[itemhref].append(buf.tell()) self.href_offsets[itemhref].append(buf.tell())

View File

@ -246,7 +246,7 @@ class SkelIndex(Index):
def __init__(self, skel_table): def __init__(self, skel_table):
self.entries = [ self.entries = [
(s.name, { (s.name, {
# Dont ask me why these entries have to be repeated twice # Don't ask me why these entries have to be repeated twice
'chunk_count':(s.chunk_count, s.chunk_count), 'chunk_count':(s.chunk_count, s.chunk_count),
'geometry':(s.start_pos, s.length, s.start_pos, s.length), 'geometry':(s.start_pos, s.length, s.start_pos, s.length),
}) for s in skel_table }) for s in skel_table
@ -387,7 +387,7 @@ if __name__ == '__main__':
subprocess.check_call(['ebook-convert', src, '.epub', '--level1-toc', '//h:p', '--no-default-epub-cover', '--flow-size', '1000000']) subprocess.check_call(['ebook-convert', src, '.epub', '--level1-toc', '//h:p', '--no-default-epub-cover', '--flow-size', '1000000'])
subprocess.check_call(['ebook-convert', src, '.azw3', '--level1-toc', '//h:p', '--no-inline-toc', '--extract-to=x']) subprocess.check_call(['ebook-convert', src, '.azw3', '--level1-toc', '//h:p', '--no-inline-toc', '--extract-to=x'])
subprocess.call(['kindlegen', 'index.epub']) # kindlegen exit code is not 0 as we dont have a cover subprocess.call(['kindlegen', 'index.epub']) # kindlegen exit code is not 0 as we don't have a cover
subprocess.check_call(['calibre-debug', 'index.mobi']) subprocess.check_call(['calibre-debug', 'index.mobi'])
from calibre.gui2.tweak_book.diff.main import main from calibre.gui2.tweak_book.diff.main import main

View File

@ -500,7 +500,7 @@ class Container(ContainerBase): # {{{
# spec requires all text including filenames to be in NFC form. # spec requires all text including filenames to be in NFC form.
# The proper fix is to implement a VFS that maps between # The proper fix is to implement a VFS that maps between
# canonical names and their file system representation, however, # canonical names and their file system representation, however,
# I dont have the time for that now. Note that the container # I don't have the time for that now. Note that the container
# ensures that all text files are normalized to NFC when # ensures that all text files are normalized to NFC when
# decoding them anyway, so there should be no mismatch between # decoding them anyway, so there should be no mismatch between
# names in the text and NFC canonical file names. # names in the text and NFC canonical file names.
@ -1481,7 +1481,7 @@ def opf_to_azw3(opf, outpath, container):
def _parse_css(self, data): def _parse_css(self, data):
# The default CSS parser used by oeb.base inserts the h namespace # The default CSS parser used by oeb.base inserts the h namespace
# and resolves all @import rules. We dont want that. # and resolves all @import rules. We don't want that.
return container.parse_css(data) return container.parse_css(data)
def specialize(oeb): def specialize(oeb):

View File

@ -131,8 +131,8 @@ def filter_by_weight(fonts, val):
def find_matching_font(fonts, weight='normal', style='normal', stretch='normal'): def find_matching_font(fonts, weight='normal', style='normal', stretch='normal'):
# See https://www.w3.org/TR/css-fonts-3/#font-style-matching # See https://www.w3.org/TR/css-fonts-3/#font-style-matching
# We dont implement the unicode character range testing # We don't implement the unicode character range testing
# We also dont implement bolder, lighter # We also don't implement bolder, lighter
for f, q in ((filter_by_stretch, stretch), (filter_by_style, style), (filter_by_weight, weight)): for f, q in ((filter_by_stretch, stretch), (filter_by_style, style), (filter_by_weight, weight)):
fonts = f(fonts, q) fonts = f(fonts, q)
if len(fonts) == 1: if len(fonts) == 1:

View File

@ -81,7 +81,7 @@ def pretty_opf(root):
try: try:
children = sorted(manifest, key=manifest_key) children = sorted(manifest, key=manifest_key)
except AttributeError: except AttributeError:
continue # There are comments so dont sort since that would mess up the comments continue # There are comments so don't sort since that would mess up the comments
for x in reversed(children): for x in reversed(children):
manifest.insert(0, x) manifest.insert(0, x)

View File

@ -101,7 +101,7 @@ div#book-inner {{ margin-top: 0; margin-bottom: 0; }}</style><script type="text/
# encoding quirks # encoding quirks
'<p>A\xa0nbsp;&nbsp;': '<p>A\xa0nbsp;&nbsp;':
'<p><span class="koboSpan" id="kobo.1.1">A&#160;nbsp;&#160;</span></p>', '<p><span class="koboSpan" id="kobo.1.1">A&#160;nbsp;&#160;</span></p>',
'<div><script>1 < 2 & 3</script>': # escaping with cdata note that kepubify doesnt do this '<div><script>1 < 2 & 3</script>': # escaping with cdata note that kepubify doesn't do this
'<div><script><![CDATA[1 < 2 & 3]]></script></div>', '<div><script><![CDATA[1 < 2 & 3]]></script></div>',
# CSS filtering # CSS filtering

View File

@ -232,7 +232,7 @@ def parse_css(data, fname='<string>', is_declaration=False, decode=None, log_lev
if css_preprocessor is not None: if css_preprocessor is not None:
data = css_preprocessor(data) data = css_preprocessor(data)
parser = CSSParser(loglevel=log_level, parser = CSSParser(loglevel=log_level,
# We dont care about @import rules # We don't care about @import rules
fetcher=lambda x: (None, None), log=_css_logger) fetcher=lambda x: (None, None), log=_css_logger)
if is_declaration: if is_declaration:
data = parser.parseStyle(data, validate=False) data = parser.parseStyle(data, validate=False)

View File

@ -84,7 +84,7 @@ class CoverManager:
def default_cover(self): def default_cover(self):
''' '''
Create a generic cover for books that dont have a cover Create a generic cover for books that don't have a cover
''' '''
if self.no_default_cover: if self.no_default_cover:
return None return None

View File

@ -141,7 +141,7 @@ class EmbedFonts:
for sel in rule.selectorList: for sel in rule.selectorList:
sel = sel.selectorText sel = sel.selectorText
if sel and sel.startswith('.'): if sel and sel.startswith('.'):
# We dont care about pseudo-selectors as the worst that # We don't care about pseudo-selectors as the worst that
# can happen is some extra characters will remain in # can happen is some extra characters will remain in
# the font # the font
sel = sel.partition(':')[0] sel = sel.partition(':')[0]

View File

@ -144,7 +144,7 @@ class MergeMetadata:
self.oeb.guide.remove('cover') self.oeb.guide.remove('cover')
self.oeb.guide.remove('titlepage') self.oeb.guide.remove('titlepage')
elif self.oeb.plumber_output_format in {'mobi', 'azw3'} and old_cover is not None: elif self.oeb.plumber_output_format in {'mobi', 'azw3'} and old_cover is not None:
# The amazon formats dont support html cover pages, so remove them # The amazon formats don't support html cover pages, so remove them
# even if no cover was specified. # even if no cover was specified.
self.oeb.guide.remove('titlepage') self.oeb.guide.remove('titlepage')
do_remove_old_cover = False do_remove_old_cover = False

View File

@ -214,7 +214,7 @@ class SubsetFonts:
for sel in rule.selectorList: for sel in rule.selectorList:
sel = sel.selectorText sel = sel.selectorText
if sel and sel.startswith('.'): if sel and sel.startswith('.'):
# We dont care about pseudo-selectors as the worst that # We don't care about pseudo-selectors as the worst that
# can happen is some extra characters will remain in # can happen is some extra characters will remain in
# the font # the font
sel = sel.partition(':')[0] sel = sel.partition(':')[0]

View File

@ -836,7 +836,7 @@ def fonts_are_identical(fonts):
def merge_font_files(fonts, log): def merge_font_files(fonts, log):
# As of Qt 5.15.1 Chromium has switched to harfbuzz and dropped sfntly. It # As of Qt 5.15.1 Chromium has switched to harfbuzz and dropped sfntly. It
# now produces font descriptors whose W arrays dont match the glyph width # now produces font descriptors whose W arrays don't match the glyph width
# information from the hhea table, in contravention of the PDF spec. So # information from the hhea table, in contravention of the PDF spec. So
# we can no longer merge font descriptors, all we can do is merge the # we can no longer merge font descriptors, all we can do is merge the
# actual sfnt data streams into a single stream and subset it to contain # actual sfnt data streams into a single stream and subset it to contain
@ -1013,7 +1013,7 @@ def add_header_footer(manager, opts, pdf_doc, container, page_number_display_map
toplevel_toc_map = stack_to_map(create_toc_stack(tc())) toplevel_toc_map = stack_to_map(create_toc_stack(tc()))
toplevel_pagenum_map, toplevel_pages_map = page_counts_map(tc()) toplevel_pagenum_map, toplevel_pages_map = page_counts_map(tc())
dpi = 96 # dont know how to query Qt for this, seems to be the same on all platforms dpi = 96 # don't know how to query Qt for this, seems to be the same on all platforms
def pt_to_px(pt): return int(pt * dpi / 72) def pt_to_px(pt): return int(pt * dpi / 72)
def create_container(page_num, margins): def create_container(page_num, margins):

View File

@ -732,21 +732,21 @@ class Region:
class Page: class Page:
def __init__(self, page, font_map, opts, log, idc): def __init__(self, page, font_map, opts, log, idc):
def text_cmp(frst, secnd): def text_cmp(first, second):
# Compare 2 text objects. # Compare 2 text objects.
# Order by line (top/bottom) then left # Order by line (top/bottom) then left
if (frst.top <= secnd.top and frst.bottom >= secnd.bottom-BOTTOM_FACTOR) \ if (first.top <= second.top and first.bottom >= second.bottom-BOTTOM_FACTOR) \
or (secnd.top <= frst.top and secnd.bottom >= frst.bottom-BOTTOM_FACTOR): or (second.top <= first.top and second.bottom >= first.bottom-BOTTOM_FACTOR):
# Overlap = same line # Overlap = same line
if frst.left < secnd.left: if first.left < second.left:
return -1 return -1
elif frst.left == secnd.left: elif first.left == second.left:
return 0 return 0
return 1 return 1
# Different line so sort into line number # Different line so sort into line number
if frst.bottom < secnd.bottom: if first.bottom < second.bottom:
return -1 return -1
elif frst.bottom == secnd.bottom: elif first.bottom == second.bottom:
return 0 return 0
return 1 return 1

View File

@ -121,7 +121,7 @@ class ListNumbers:
return 'ordered' return 'ordered'
# sys.stderr.write('module is list_numbers\n') # sys.stderr.write('module is list_numbers\n')
# sys.stderr.write('method is __determine type\n') # sys.stderr.write('method is __determine type\n')
# sys.stderr.write('Couldn\'t get type of list\n') # sys.stderr.write("Couldn't get type of list\n")
# must be some type of ordered list -- just a guess! # must be some type of ordered list -- just a guess!
return 'unordered' return 'unordered'

View File

@ -1628,7 +1628,7 @@ def ensure_app(headless=True):
# unhandled python exception in a slot or virtual method. Since ensure_app() # unhandled python exception in a slot or virtual method. Since ensure_app()
# is used in worker processes for background work like rendering html # is used in worker processes for background work like rendering html
# or running a headless browser, we circumvent this as I really # or running a headless browser, we circumvent this as I really
# dont feel like going through all the code and making sure no # don't feel like going through all the code and making sure no
# unhandled exceptions ever occur. All the actual GUI apps already # unhandled exceptions ever occur. All the actual GUI apps already
# override sys.excepthook with a proper error handler. # override sys.excepthook with a proper error handler.
sys.excepthook = simple_excepthook sys.excepthook = simple_excepthook
@ -1776,7 +1776,7 @@ def raise_and_focus(self: QWidget) -> None:
def raise_without_focus(self: QWidget) -> None: def raise_without_focus(self: QWidget) -> None:
if QApplication.instance().platformName() == 'wayland': if QApplication.instance().platformName() == 'wayland':
# On fucking Wayland, we cant raise a dialog without also giving it # On fucking Wayland, we can't raise a dialog without also giving it
# keyboard focus. What a joke. # keyboard focus. What a joke.
self.raise_and_focus() self.raise_and_focus()
else: else:

View File

@ -269,7 +269,7 @@ class InterfaceAction(QObject):
else: else:
self.shortcut_action_for_context_menu = shortcut_action self.shortcut_action_for_context_menu = shortcut_action
if ismacos: if ismacos:
# In Qt 5 keyboard shortcuts dont work unless the # In Qt 5 keyboard shortcuts don't work unless the
# action is explicitly added to the main window # action is explicitly added to the main window
self.gui.addAction(shortcut_action) self.gui.addAction(shortcut_action)
@ -338,7 +338,7 @@ class InterfaceAction(QObject):
shortcut_name, default_keys=keys, shortcut_name, default_keys=keys,
action=ac, description=description, group=self.action_spec[0], action=ac, description=description, group=self.action_spec[0],
persist_shortcut=persist_shortcut) persist_shortcut=persist_shortcut)
# In Qt 5 keyboard shortcuts dont work unless the # In Qt 5 keyboard shortcuts don't work unless the
# action is explicitly added to the main window and on OSX and # action is explicitly added to the main window and on OSX and
# Unity since the menu might be exported, the shortcuts won't work # Unity since the menu might be exported, the shortcuts won't work
self.gui.addAction(ac) self.gui.addAction(ac)

View File

@ -288,10 +288,10 @@ class AutoAdder(QObject):
if duplicates: if duplicates:
paths, formats, metadata = [], [], [] paths, formats, metadata = [], [], []
for p, f, mis in duplicates: for p, f, mi in duplicates:
paths.extend(p) paths.extend(p)
formats.extend(f) formats.extend(f)
metadata.extend(mis) metadata.extend(mi)
dups = [(mic, mic.cover, [p]) for mic, p in zip(metadata, paths)] dups = [(mic, mic.cover, [p]) for mic, p in zip(metadata, paths)]
d = DuplicatesQuestion(m.db, dups, parent=gui) d = DuplicatesQuestion(m.db, dups, parent=gui)
dups = tuple(d.duplicates) dups = tuple(d.duplicates)

View File

@ -485,7 +485,7 @@ class CentralContainer(QWidget):
def read_settings(self): def read_settings(self):
before = self.serialized_settings() before = self.serialized_settings()
# sadly self.size() doesnt always return sensible values so look at # sadly self.size() doesn't always return sensible values so look at
# the size of the main window which works perfectly for width, not so # the size of the main window which works perfectly for width, not so
# perfectly for height # perfectly for height
sz = self.size() sz = self.size()

View File

@ -520,7 +520,7 @@ class CoverSettingsWidget(QWidget):
def restore_defaults(self): def restore_defaults(self):
defaults = self.original_prefs.defaults.copy() defaults = self.original_prefs.defaults.copy()
# Dont delete custom color themes when restoring defaults # Don't delete custom color themes when restoring defaults
defaults['color_themes'] = self.custom_colors defaults['color_themes'] = self.custom_colors
self.apply_prefs(defaults) self.apply_prefs(defaults)
self.update_preview() self.update_preview()

View File

@ -287,7 +287,7 @@ class NoteEditorWidget(EditorWidget):
def do_insert_image(self): def do_insert_image(self):
# See https://bugreports.qt.io/browse/QTBUG-118537 # See https://bugreports.qt.io/browse/QTBUG-118537
# for why we cant have a nice margin for floating images # for why we can't have a nice margin for floating images
d = AskImage(self.images, self.db) d = AskImage(self.images, self.db)
if d.exec() == QDialog.DialogCode.Accepted and d.current_digest: if d.exec() == QDialog.DialogCode.Accepted and d.current_digest:
ir = self.images[d.current_digest] ir = self.images[d.current_digest]

View File

@ -153,21 +153,21 @@ def send_mails(jobnames, callback, attachments, to_s, subjects,
attachments, to_s, subjects, texts, attachment_names): attachments, to_s, subjects, texts, attachment_names):
description = _('Email %(name)s to %(to)s') % dict(name=name, to=to) description = _('Email %(name)s to %(to)s') % dict(name=name, to=to)
if isinstance(to, str) and (is_for_kindle(to) or '@pbsync.com' in to): if isinstance(to, str) and (is_for_kindle(to) or '@pbsync.com' in to):
# The PocketBook service is a total joke. It cant handle # The PocketBook service is a total joke. It can't handle
# non-ascii, filenames that are long enough to be split up, commas, and # non-ascii, filenames that are long enough to be split up, commas, and
# the good lord alone knows what else. So use a random filename # the good lord alone knows what else. So use a random filename
# containing only 22 English letters and numbers # containing only 22 English letters and numbers
# #
# And since this email is only going to be processed by automated # And since this email is only going to be processed by automated
# services, make the subject+text random too as at least the amazon # services, make the subject+text random too as at least the amazon
# service cant handle non-ascii text. I dont know what baboons # service can't handle non-ascii text. I don't know what baboons
# these companies employ to write their code. It's the height of # these companies employ to write their code. It's the height of
# irony that they are called "tech" companies. # irony that they are called "tech" companies.
# https://bugs.launchpad.net/calibre/+bug/1989282 # https://bugs.launchpad.net/calibre/+bug/1989282
from calibre.utils.short_uuid import uuid4 from calibre.utils.short_uuid import uuid4
if not is_for_kindle(to): if not is_for_kindle(to):
# Amazon nowadays reads metadata from attachment filename instead of # Amazon nowadays reads metadata from attachment filename instead of
# file internal metadata so dont nuke the filename. # file internal metadata so don't nuke the filename.
# https://www.mobileread.com/forums/showthread.php?t=349290 # https://www.mobileread.com/forums/showthread.php?t=349290
aname = f'{uuid4()}.' + aname.rpartition('.')[-1] aname = f'{uuid4()}.' + aname.rpartition('.')[-1]
subject = uuid4() subject = uuid4()

View File

@ -36,7 +36,7 @@ class FTSDialog(Dialog):
l = QVBoxLayout(self) l = QVBoxLayout(self)
self.fat_warning = fw = QLabel( self.fat_warning = fw = QLabel(
f'<span style="color:red; font-weight: bold">{_("WARNING")}:</span> ' + f'<span style="color:red; font-weight: bold">{_("WARNING")}:</span> ' +
_('The calibre library is on a FAT drive, indexing more than a few hundred books wont work.') + _("The calibre library is on a FAT drive, indexing more than a few hundred books won't work.") +
f' <a href="xxx" style="text-decoration: none">{_("Learn more")}</a>') f' <a href="xxx" style="text-decoration: none">{_("Learn more")}</a>')
# fw.setVisible(False) # fw.setVisible(False)
fw.linkActivated.connect(self.show_fat_details) fw.linkActivated.connect(self.show_fat_details)

View File

@ -359,7 +359,7 @@ class VLTabs(QTabBar): # {{{
def lock_tab(self): def lock_tab(self):
gprefs['vl_tabs_closable'] = False gprefs['vl_tabs_closable'] = False
self.setTabsClosable(False) self.setTabsClosable(False)
# Workaround for Qt bug where it doesnt recalculate the tab size after locking # Workaround for Qt bug where it doesn't recalculate the tab size after locking
for idx in range(self.count()): for idx in range(self.count()):
self.setTabButton(idx, QTabBar.ButtonPosition.RightSide, None) self.setTabButton(idx, QTabBar.ButtonPosition.RightSide, None)
self.setTabButton(idx, QTabBar.ButtonPosition.LeftSide, None) self.setTabButton(idx, QTabBar.ButtonPosition.LeftSide, None)
@ -392,7 +392,7 @@ class VLTabs(QTabBar): # {{{
def tab_close(self, index): def tab_close(self, index):
vl = str(self.tabData(index) or '') vl = str(self.tabData(index) or '')
if vl: # Dont allow closing the All Books tab if vl: # Don't allow closing the All Books tab
self.current_db.new_api.set_pref('virt_libs_hidden', list( self.current_db.new_api.set_pref('virt_libs_hidden', list(
self.current_db.new_api.pref('virt_libs_hidden', ())) + [vl]) self.current_db.new_api.pref('virt_libs_hidden', ())) + [vl])
self.removeTab(index) self.removeTab(index)

View File

@ -646,7 +646,7 @@ class CoverDelegate(QStyledItemDelegate):
if self.title_height != 0: if self.title_height != 0:
self.paint_title(painter, trect, db, book_id) self.paint_title(painter, trect, db, book_id)
if self.emblem_size > 0: if self.emblem_size > 0:
# We dont draw embossed emblems as the ondevice/marked emblems are drawn in the gutter # We don't draw embossed emblems as the ondevice/marked emblems are drawn in the gutter
return return
if marked: if marked:
try: try:
@ -1163,7 +1163,7 @@ class GridView(QListView):
self.thumbnail_cache.set_database(newdb) self.thumbnail_cache.set_database(newdb)
try: try:
# Use a timeout so that if, for some reason, the render thread # Use a timeout so that if, for some reason, the render thread
# gets stuck, we dont deadlock, future covers won't get # gets stuck, we don't deadlock, future covers won't get
# rendered, but this is better than a deadlock # rendered, but this is better than a deadlock
join_with_timeout(self.delegate.render_queue) join_with_timeout(self.delegate.render_queue)
except RuntimeError: except RuntimeError:

View File

@ -243,7 +243,7 @@ class BooksModel(QAbstractTableModel): # {{{
self.bool_yes_icon = QIcon.ic('ok.png').pixmap(icon_height) self.bool_yes_icon = QIcon.ic('ok.png').pixmap(icon_height)
self.bool_no_icon = QIcon.ic('list_remove.png').pixmap(icon_height) self.bool_no_icon = QIcon.ic('list_remove.png').pixmap(icon_height)
self.bool_blank_icon = QIcon.ic('blank.png').pixmap(icon_height) self.bool_blank_icon = QIcon.ic('blank.png').pixmap(icon_height)
# Qt auto-scales marked icon correctly, so we dont need to do it (and # Qt auto-scales marked icon correctly, so we don't need to do it (and
# remember that the cover grid view needs a larger version of the icon, # remember that the cover grid view needs a larger version of the icon,
# anyway) # anyway)
self.marked_icon = QIcon.ic('marked.png') self.marked_icon = QIcon.ic('marked.png')

View File

@ -164,7 +164,7 @@ class PreserveViewState: # {{{
''' '''
Save the set of selected books at enter time. If at exit time there are no Save the set of selected books at enter time. If at exit time there are no
selected books, restore the previous selection, the previous current index selected books, restore the previous selection, the previous current index
and dont affect the scroll position. and don't affect the scroll position.
''' '''
def __init__(self, view, preserve_hpos=True, preserve_vpos=True, require_selected_ids=True): def __init__(self, view, preserve_hpos=True, preserve_vpos=True, require_selected_ids=True):

View File

@ -268,7 +268,7 @@ class MarkdownHighlighter(QSyntaxHighlighter):
elif emphasis: elif emphasis:
self.setFormat(self.offset+offset+ match.start(), match.end() - match.start(), self.MARKDOWN_KWS_FORMAT['Italic']) self.setFormat(self.offset+offset+ match.start(), match.end() - match.start(), self.MARKDOWN_KWS_FORMAT['Italic'])
def recusive(match, extra_offset, bold, emphasis): def recursive(match, extra_offset, bold, emphasis):
apply(match, bold, emphasis) apply(match, bold, emphasis)
if bold and emphasis: if bold and emphasis:
return # max deep => return, do not process extra Bold/Italic return # max deep => return, do not process extra Bold/Italic
@ -278,17 +278,17 @@ class MarkdownHighlighter(QSyntaxHighlighter):
self._highlightBoldEmphasis(sub_txt, cursor, bf, sub_offset, bold, emphasis) self._highlightBoldEmphasis(sub_txt, cursor, bf, sub_offset, bold, emphasis)
for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['Italic'],text): for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['Italic'],text):
recusive(mo, 1, bold, True) recursive(mo, 1, bold, True)
found = True found = True
for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['uItalic'],text): for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['uItalic'],text):
recusive(mo, 1, bold, True) recursive(mo, 1, bold, True)
found = True found = True
for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['Bold'],text): for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['Bold'],text):
recusive(mo, 2, True, emphasis) recursive(mo, 2, True, emphasis)
found = True found = True
for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['uBold'],text): for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['uBold'],text):
recusive(mo, 2, True, emphasis) recursive(mo, 2, True, emphasis)
found = True found = True
for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['BoldItalic'],text): for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['BoldItalic'],text):

View File

@ -253,7 +253,7 @@ def get_notifier(systray=None):
if not ans.ok: if not ans.ok:
ans = DummyNotifier() ans = DummyNotifier()
else: else:
# We dont use Qt's systray based notifier as it uses Growl and is # We don't use Qt's systray based notifier as it uses Growl and is
# broken with different versions of Growl # broken with different versions of Growl
ans = DummyNotifier() ans = DummyNotifier()
elif iswindows: elif iswindows:

View File

@ -965,7 +965,7 @@ QRect PictureFlowPrivate::renderSlide(const SlideInfo &slide, int alpha, int col
if(column < 0) if(column < 0)
continue; continue;
if (preserveAspectRatio && !slide_moving_to_center) { if (preserveAspectRatio && !slide_moving_to_center) {
// We dont want a black border at the edge of narrow images when the images are in the left or right stacks // We don't want a black border at the edge of narrow images when the images are in the left or right stacks
if (slide.slideIndex < centerIndex) { if (slide.slideIndex < centerIndex) {
column = qMin(column + img_offset, sw - 1); column = qMin(column + img_offset, sw - 1);
} else if (slide.slideIndex == centerIndex) { } else if (slide.slideIndex == centerIndex) {

View File

@ -290,7 +290,7 @@ void CalibreStyle::drawPrimitive(PrimitiveElement element, const QStyleOption *
const QStyleOptionViewItem *vopt = NULL; const QStyleOptionViewItem *vopt = NULL;
switch (element) { switch (element) {
case PE_FrameTabBarBase: // {{{ case PE_FrameTabBarBase: // {{{
// dont draw line below tabs in dark mode as it looks bad // don't draw line below tabs in dark mode as it looks bad
if (const QStyleOptionTabBarBase *tbb = qstyleoption_cast<const QStyleOptionTabBarBase *>(option)) { if (const QStyleOptionTabBarBase *tbb = qstyleoption_cast<const QStyleOptionTabBarBase *>(option)) {
if (tbb->shape == QTabBar::RoundedNorth) { if (tbb->shape == QTabBar::RoundedNorth) {
QColor bg = option->palette.color(QPalette::Window); QColor bg = option->palette.color(QPalette::Window);

View File

@ -80,7 +80,7 @@ def beautify_text(raw, syntax):
log.setLevel(logging.WARN) log.setLevel(logging.WARN)
log.raiseExceptions = False log.raiseExceptions = False
parser = CSSParser(loglevel=logging.WARNING, parser = CSSParser(loglevel=logging.WARNING,
# We dont care about @import rules # We don't care about @import rules
fetcher=lambda x: (None, None), log=_css_logger) fetcher=lambda x: (None, None), log=_css_logger)
data = parser.parseString(raw, href='<string>', validate=False) data = parser.parseString(raw, href='<string>', validate=False)
return serialize(data, 'text/css').decode('utf-8') return serialize(data, 'text/css').decode('utf-8')

View File

@ -46,7 +46,7 @@ from polyglot.builtins import codepoint_to_chr, iteritems, itervalues
def string_length(x): def string_length(x):
return strlen(str(x)) # Needed on narrow python builds, as subclasses of unicode dont work return strlen(str(x)) # Needed on narrow python builds, as subclasses of unicode don't work
KEY = Qt.Key.Key_J KEY = Qt.Key.Key_J

View File

@ -25,7 +25,7 @@ JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
class JavascriptLexer(RegexLexer): class JavascriptLexer(RegexLexer):
''' '''
For JavaScript source code. This is based on the pygments JS highlighter, For JavaScript source code. This is based on the pygments JS highlighter,
bu that does not handle multi-line comments in streaming mode, so we had to but that does not handle multi-line comments in streaming mode, so we had to
modify it. modify it.
''' '''

View File

@ -877,7 +877,7 @@ class TextEdit(PlainTextEdit):
c = self.textCursor() c = self.textCursor()
left = min(c.anchor(), c.position()) left = min(c.anchor(), c.position())
right = max(c.anchor(), c.position()) right = max(c.anchor(), c.position())
# For speed we use QPlainTextEdit's toPlainText as we dont care about # For speed we use QPlainTextEdit's toPlainText as we don't care about
# spaces in this context # spaces in this context
raw = str(QPlainTextEdit.toPlainText(self)) raw = str(QPlainTextEdit.toPlainText(self))
# Make sure the left edge is not within a <> # Make sure the left edge is not within a <>

View File

@ -1346,7 +1346,7 @@ def get_search_function(state):
ans = state['replace'] ans = state['replace']
is_regex = state['mode'] not in ('normal', 'fuzzy') is_regex = state['mode'] not in ('normal', 'fuzzy')
if not is_regex: if not is_regex:
# We dont want backslash escape sequences interpreted in normal mode # We don't want backslash escape sequences interpreted in normal mode
return lambda m: ans return lambda m: ans
if state['mode'] == 'function': if state['mode'] == 'function':
try: try:

View File

@ -862,7 +862,7 @@ class LibraryPage(QWizardPage, LibraryUI):
os.rmdir(dln) os.rmdir(dln)
except Exception: except Exception:
pass pass
# dont leave behind any empty dirs # don't leave behind any empty dirs
for x in self.made_dirs: for x in self.made_dirs:
with suppress(OSError): with suppress(OSError):
os.rmdir(x) os.rmdir(x)

View File

@ -3888,7 +3888,7 @@ class CatalogBuilder:
if zf is not None: if zf is not None:
# Ensure that the read succeeded # Ensure that the read succeeded
# If we failed to open the zip file for reading, # If we failed to open the zip file for reading,
# we dont know if it contained the thumb or not # we don't know if it contained the thumb or not
zf = _open_archive('a') zf = _open_archive('a')
if zf is not None: if zf is not None:
with zf: with zf:

View File

@ -200,7 +200,7 @@ class DigestAuth: # {{{
def validate_request(self, pw, data, log=None): def validate_request(self, pw, data, log=None):
# We should also be checking for replay attacks by using nonce_count, # We should also be checking for replay attacks by using nonce_count,
# however, various HTTP clients, most prominently Firefox dont # however, various HTTP clients, most prominently Firefox don't
# implement nonce-counts correctly, so we cannot do the check. # implement nonce-counts correctly, so we cannot do the check.
# https://bugzil.la/114451 # https://bugzil.la/114451
path = parse_uri(self.uri.encode('utf-8'))[1] path = parse_uri(self.uri.encode('utf-8'))[1]

View File

@ -541,7 +541,7 @@ def get_data_file(ctx, rd, book_id, relpath, library_id):
def strerr(e: Exception): def strerr(e: Exception):
# Dont leak the filepath in the error response # Don't leak the filepath in the error response
if isinstance(e, OSError): if isinstance(e, OSError):
return e.strerror or str(e) return e.strerror or str(e)
return str(e) return str(e)

View File

@ -83,7 +83,7 @@ def parse_uri(uri, parse_query=True, unquote_func=unquote):
try: try:
query = MultiDict.create_from_query_string(qs) query = MultiDict.create_from_query_string(qs)
except Exception: except Exception:
raise HTTPSimpleResponse(http_client.BAD_REQUEST, 'Unparseable query string') raise HTTPSimpleResponse(http_client.BAD_REQUEST, 'Unparsable query string')
else: else:
query = None query = None

View File

@ -256,7 +256,7 @@ class Connection: # {{{
def recv(self, amt): def recv(self, amt):
# If there is data in the read buffer we have to return only that, # If there is data in the read buffer we have to return only that,
# since we dont know if the socket has signalled it is ready for # since we don't know if the socket has signalled it is ready for
# reading # reading
if self.read_buffer.has_data: if self.read_buffer.has_data:
return self.read_buffer.read(amt) return self.read_buffer.read(amt)

View File

@ -233,7 +233,7 @@ class GroupedSearchTerms:
def __init__(self, src): def __init__(self, src):
self.keys = frozenset(src) self.keys = frozenset(src)
self.hash = hash(self.keys) self.hash = hash(self.keys)
# We dont need to store values since this is used as part of a key for # We don't need to store values since this is used as part of a key for
# a cache and if the values have changed the cache will be invalidated # a cache and if the values have changed the cache will be invalidated
# for other reasons anyway (last_modified() will have changed on the # for other reasons anyway (last_modified() will have changed on the
# db) # db)

View File

@ -295,7 +295,7 @@ class Router:
if x: if x:
k, v = x.partition('=')[::2] k, v = x.partition('=')[::2]
if k: if k:
# Since we only set simple hex encoded cookies, we dont # Since we only set simple hex encoded cookies, we don't
# need more sophisticated value parsing # need more sophisticated value parsing
c[k] = v.strip('"') c[k] = v.strip('"')

View File

@ -31,7 +31,7 @@ class ModernHTTPSHandler(HTTPSHandler):
class Browser(B): class Browser(B):
''' '''
A cloneable mechanize browser. Useful for multithreading. The idea is that A clonable mechanize browser. Useful for multithreading. The idea is that
each thread has a browser clone. Every clone uses the same thread safe each thread has a browser clone. Every clone uses the same thread safe
cookie jar. All clones share the same browser configuration. cookie jar. All clones share the same browser configuration.

View File

@ -153,7 +153,7 @@ class WindowsFileCopier:
except OSError as err: except OSError as err:
# Ignore dir not empty errors. Should never happen but we # Ignore dir not empty errors. Should never happen but we
# ignore it as the UNIX semantics are to not delete folders # ignore it as the UNIX semantics are to not delete folders
# during __exit__ anyway and we dont want to leak the handle. # during __exit__ anyway and we don't want to leak the handle.
if err.winerror != winutil.ERROR_DIR_NOT_EMPTY: if err.winerror != winutil.ERROR_DIR_NOT_EMPTY:
raise raise
h.close() h.close()

View File

@ -222,7 +222,7 @@ def case_preserving_open_file(path, mode='wb', mkdir_mode=0o777):
try: try:
candidates = [c for c in os.listdir(cpath) if c.lower() == cl] candidates = [c for c in os.listdir(cpath) if c.lower() == cl]
except: except:
# Dont have permission to do the listdir, assume the case is # Don't have permission to do the listdir, assume the case is
# correct as we have no way to check it. # correct as we have no way to check it.
pass pass
else: else:

View File

@ -315,8 +315,8 @@ class FontScanner(Thread):
self.reload_cache() self.reload_cache()
if isworker: if isworker:
# Dont scan font files in worker processes, use whatever is # Don't scan font files in worker processes, use whatever is
# cached. Font files typically dont change frequently enough to # cached. Font files typically don't change frequently enough to
# justify a rescan in a worker process. # justify a rescan in a worker process.
self.build_families() self.build_families()
return return

View File

@ -192,7 +192,7 @@ class SetGlobalsNode(Node):
class StringCompareNode(Node): class StringCompareNode(Node):
def __init__(self, line_number, operator, left, right): def __init__(self, line_number, operator, left, right):
Node.__init__(self, line_number, 'comparision: ' + operator) Node.__init__(self, line_number, 'comparison: ' + operator)
self.node_type = self.NODE_COMPARE_STRING self.node_type = self.NODE_COMPARE_STRING
self.operator = operator self.operator = operator
self.left = left self.left = left

View File

@ -374,7 +374,7 @@ icu_Collator_contractions(icu_Collator *self, PyObject *args) {
if (pbuf == NULL) { Py_DECREF(ans); ans = NULL; goto end; } if (pbuf == NULL) { Py_DECREF(ans); ans = NULL; goto end; }
PyTuple_SetItem(ans, i, pbuf); PyTuple_SetItem(ans, i, pbuf);
} else { } else {
// Ranges dont make sense for contractions, ignore them // Ranges don't make sense for contractions, ignore them
PyTuple_SetItem(ans, i, Py_None); Py_INCREF(Py_None); PyTuple_SetItem(ans, i, Py_None); Py_INCREF(Py_None);
} }
} }

View File

@ -248,7 +248,7 @@ class INotifyTreeWatcher(INotify):
raise NoSuchDir(f'The dir {base} does not exist') raise NoSuchDir(f'The dir {base} does not exist')
return return
if e.errno == errno.EACCES: if e.errno == errno.EACCES:
# We silently ignore entries for which we dont have permission, # We silently ignore entries for which we don't have permission,
# unless they are the top level dir # unless they are the top level dir
if top_level: if top_level:
raise NoSuchDir(f'You do not have permission to monitor {base}') raise NoSuchDir(f'You do not have permission to monitor {base}')
@ -293,7 +293,7 @@ class INotifyTreeWatcher(INotify):
def process_event(self, wd, mask, cookie, name): def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW): if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont # We missed some INOTIFY events, so we don't
# know the state of any tracked dirs. # know the state of any tracked dirs.
self.watch_tree() self.watch_tree()
self.modified.add(None) self.modified.add(None)

View File

@ -202,7 +202,7 @@ class IP_ADAPTER_ADDRESSES(ctypes.Structure):
('Dhcpv6ClientDuid', ctypes.c_ubyte * MAX_DHCPV6_DUID_LENGTH), ('Dhcpv6ClientDuid', ctypes.c_ubyte * MAX_DHCPV6_DUID_LENGTH),
('Dhcpv6ClientDuidLength', wintypes.ULONG), ('Dhcpv6ClientDuidLength', wintypes.ULONG),
('Dhcpv6Iaid', wintypes.ULONG), ('Dhcpv6Iaid', wintypes.ULONG),
# Vista SP1 and later, so we comment it out as we dont need it # Vista SP1 and later, so we comment it out as we don't need it
# ('FirstDnsSuffix', ctypes.POINTER(IP_ADAPTER_DNS_SUFFIX)), # ('FirstDnsSuffix', ctypes.POINTER(IP_ADAPTER_DNS_SUFFIX)),
] ]

View File

@ -113,8 +113,8 @@ history_length(2000) #value of -1 means no limit
if not os.path.exists(ipydir): if not os.path.exists(ipydir):
os.makedirs(ipydir) os.makedirs(ipydir)
conf = os.path.join(ipydir, 'pyreadline.txt') conf = os.path.join(ipydir, 'pyreadline.txt')
hist = os.path.join(ipydir, 'history.txt') history = os.path.join(ipydir, 'history.txt')
config = config % hist config = config % history
with open(conf, 'wb') as f: with open(conf, 'wb') as f:
f.write(config.encode('utf-8')) f.write(config.encode('utf-8'))
pyreadline.rlmain.config_path = conf pyreadline.rlmain.config_path = conf

View File

@ -743,7 +743,7 @@ class SMTP:
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"] >>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\ >>> msg = '''\\
... From: Me@my.org ... From: Me@my.org
... Subject: testin'... ... Subject: testing...
... ...
... This is a test ''' ... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg) >>> s.sendmail("me@my.org",tolist,msg)

View File

@ -293,7 +293,7 @@ def _EndRecData(fpin):
endrec = list(struct.unpack(structEndArchive, recData)) endrec = list(struct.unpack(structEndArchive, recData))
comment = data[start+sizeEndCentDir:] comment = data[start+sizeEndCentDir:]
# check that comment length is correct # check that comment length is correct
# Kovid: Added == 0 check as some zip files apparently dont set this # Kovid: Added == 0 check as some zip files apparently don't set this
if endrec[_ECD_COMMENT_SIZE] == 0 or endrec[_ECD_COMMENT_SIZE] == len(comment): if endrec[_ECD_COMMENT_SIZE] == 0 or endrec[_ECD_COMMENT_SIZE] == len(comment):
# Append the archive comment and start offset # Append the archive comment and start offset
endrec.append(comment) endrec.append(comment)

Some files were not shown because too many files have changed in this diff Show More