mirror of
				https://github.com/kovidgoyal/calibre.git
				synced 2025-11-03 19:17:02 -05:00 
			
		
		
		
	Merge branch 'pyproject-add-codespell' of https://github.com/un-pogaz/calibre
This commit is contained in:
		
						commit
						8e90212f67
					
				@ -496,7 +496,7 @@
 | 
			
		||||
 | 
			
		||||
:: improved recipes
 | 
			
		||||
- Jot Down
 | 
			
		||||
- Various Russian and Ukranian news sources
 | 
			
		||||
- Various Russian and Ukrainian news sources
 | 
			
		||||
- Nautilus Magazine
 | 
			
		||||
- Süddeutsche Zeitung
 | 
			
		||||
- The India Forum
 | 
			
		||||
@ -974,7 +974,7 @@
 | 
			
		||||
 | 
			
		||||
- Fix a regression in 7.0 caused by a regression in Qt that would result in calibre hanging rarely when using the cover browser view
 | 
			
		||||
 | 
			
		||||
- [2049992] Fix custom template functions not useable in save to disk templates
 | 
			
		||||
- [2049992] Fix custom template functions not usable in save to disk templates
 | 
			
		||||
 | 
			
		||||
- Fix a regression in 7.2 that caused the popup used for editing fields in the book list to be mis-positioned on very wide monitors
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -55,10 +55,10 @@ def binary_includes():
 | 
			
		||||
            get_dll_path('bz2', 2), j(PREFIX, 'lib', 'libunrar.so'),
 | 
			
		||||
            get_dll_path('python' + py_ver, 2), get_dll_path('jbig', 2),
 | 
			
		||||
 | 
			
		||||
            # We dont include libstdc++.so as the OpenGL dlls on the target
 | 
			
		||||
            # We don't include libstdc++.so as the OpenGL dlls on the target
 | 
			
		||||
            # computer fail to load in the QPA xcb plugin if they were compiled
 | 
			
		||||
            # with a newer version of gcc than the one on the build computer.
 | 
			
		||||
            # libstdc++, like glibc is forward compatible and I dont think any
 | 
			
		||||
            # libstdc++, like glibc is forward compatible and I don't think any
 | 
			
		||||
            # distros do not have libstdc++.so.6, so it should be safe to leave it out.
 | 
			
		||||
            # https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html (The current
 | 
			
		||||
            # debian stable libstdc++ is  libstdc++.so.6.0.17)
 | 
			
		||||
 | 
			
		||||
@ -13,10 +13,10 @@ entitlements = {
 | 
			
		||||
    # MAP_JIT is used by libpcre which is bundled with Qt
 | 
			
		||||
    'com.apple.security.cs.allow-jit': True,
 | 
			
		||||
 | 
			
		||||
    # v8 and therefore WebEngine need this as they dont use MAP_JIT
 | 
			
		||||
    # v8 and therefore WebEngine need this as they don't use MAP_JIT
 | 
			
		||||
    'com.apple.security.cs.allow-unsigned-executable-memory': True,
 | 
			
		||||
 | 
			
		||||
    # calibre itself does not use DYLD env vars, but dont know about its
 | 
			
		||||
    # calibre itself does not use DYLD env vars, but don't know about its
 | 
			
		||||
    # dependencies.
 | 
			
		||||
    'com.apple.security.cs.allow-dyld-environment-variables': True,
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -346,7 +346,7 @@ int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PWSTR pCmdLine
 | 
			
		||||
        return write_bytes(pipe, echo_sz, echo_buf) ? 0 : 1;
 | 
			
		||||
    }
 | 
			
		||||
	if (app_uid != NULL) {
 | 
			
		||||
        // dont check return status as failure is not critical
 | 
			
		||||
        // don't check return status as failure is not critical
 | 
			
		||||
        set_app_uid(app_uid);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -5,16 +5,16 @@
 | 
			
		||||
    <SummaryInformation Keywords="Installer" Description="{app} Installer" Manufacturer="Kovid Goyal" />
 | 
			
		||||
 | 
			
		||||
    <!-- Disable creation of system restore points on calibre installs. Speeds
 | 
			
		||||
         up the install. We dont need system restore since we dont install any
 | 
			
		||||
         up the install. We don't need system restore since we don't install any
 | 
			
		||||
         system DLLs/components anyway (apart from start menu entries) -->
 | 
			
		||||
    <Property Id="MSIFASTINSTALL" Value="3" />
 | 
			
		||||
 | 
			
		||||
    <Media Id="1" Cabinet="{app}.cab" CompressionLevel="{compression}" EmbedCab="yes" />
 | 
			
		||||
    <!-- The following line ensures that DLLs are replaced even if
 | 
			
		||||
        their version is the same as before or they dont have versions.
 | 
			
		||||
        their version is the same as before or they don't have versions.
 | 
			
		||||
        Microsoft's brain dead installer will otherwise use file dates to
 | 
			
		||||
        determine whether to install a file or not. Simply not robust. And
 | 
			
		||||
        since we dont install any system files whatsoever, we can never replace
 | 
			
		||||
        since we don't install any system files whatsoever, we can never replace
 | 
			
		||||
        a system file with an older version. This way the calibre install
 | 
			
		||||
        should always result in a consistent set of files being present in the
 | 
			
		||||
        installation folder, though of course, with Microsoft there are no
 | 
			
		||||
 | 
			
		||||
@ -402,7 +402,7 @@ V. General Format of a .ZIP file
 | 
			
		||||
         13 - Acorn Risc               14 - VFAT
 | 
			
		||||
         15 - alternate MVS            16 - BeOS
 | 
			
		||||
         17 - Tandem                   18 - OS/400
 | 
			
		||||
         19 - OS/X (Darwin)            20 thru 255 - unused
 | 
			
		||||
         19 - OS/X (Darwin)            20 through 255 - unused
 | 
			
		||||
 | 
			
		||||
          The lower byte indicates the ZIP specification version 
 | 
			
		||||
          (the version of this document) supported by the software 
 | 
			
		||||
@ -719,7 +719,7 @@ V. General Format of a .ZIP file
 | 
			
		||||
          The Header ID field indicates the type of data that is in
 | 
			
		||||
          the following data block.
 | 
			
		||||
 | 
			
		||||
          Header ID's of 0 thru 31 are reserved for use by PKWARE.
 | 
			
		||||
          Header ID's of 0 through 31 are reserved for use by PKWARE.
 | 
			
		||||
          The remaining ID's can be used by third party vendors for
 | 
			
		||||
          proprietary usage.
 | 
			
		||||
 | 
			
		||||
@ -1769,7 +1769,7 @@ Example:   0x02, 0x42, 0x01, 0x13
 | 
			
		||||
    This would generate the original bit length array of:
 | 
			
		||||
    (3, 3, 3, 3, 3, 2, 4, 4)
 | 
			
		||||
 | 
			
		||||
    There are 8 codes in this table for the values 0 thru 7.  Using 
 | 
			
		||||
    There are 8 codes in this table for the values 0 through 7.  Using 
 | 
			
		||||
    the algorithm to obtain the Shannon-Fano codes produces:
 | 
			
		||||
 | 
			
		||||
                                  Reversed     Order     Original
 | 
			
		||||
@ -1909,8 +1909,8 @@ The bit lengths for the literal tables are sent first with the number
 | 
			
		||||
of entries sent described by the 5 bits sent earlier.  There are up
 | 
			
		||||
to 286 literal characters; the first 256 represent the respective 8
 | 
			
		||||
bit character, code 256 represents the End-Of-Block code, the remaining
 | 
			
		||||
29 codes represent copy lengths of 3 thru 258.  There are up to 30
 | 
			
		||||
distance codes representing distances from 1 thru 32k as described
 | 
			
		||||
29 codes represent copy lengths of 3 through 258.  There are up to 30
 | 
			
		||||
distance codes representing distances from 1 through 32k as described
 | 
			
		||||
below.
 | 
			
		||||
 | 
			
		||||
                             Length Codes
 | 
			
		||||
@ -2221,7 +2221,7 @@ keys, based on random data, to render a plaintext attack on the
 | 
			
		||||
data ineffective.
 | 
			
		||||
 | 
			
		||||
Read the 12-byte encryption header into Buffer, in locations
 | 
			
		||||
Buffer(0) thru Buffer(11).
 | 
			
		||||
Buffer(0) through Buffer(11).
 | 
			
		||||
 | 
			
		||||
loop for i <- 0 to 11
 | 
			
		||||
    C <- buffer(i) ^ decrypt_byte()
 | 
			
		||||
 | 
			
		||||
@ -14,7 +14,7 @@ from calibre.utils.config import JSONConfig
 | 
			
		||||
# Remember that this name (i.e. plugins/interface_demo) is also
 | 
			
		||||
# in a global namespace, so make it as unique as possible.
 | 
			
		||||
# You should always prefix your config file name with plugins/,
 | 
			
		||||
# so as to ensure you dont accidentally clobber a calibre config file
 | 
			
		||||
# so as to ensure you don't accidentally clobber a calibre config file
 | 
			
		||||
prefs = JSONConfig('plugins/interface_demo')
 | 
			
		||||
 | 
			
		||||
# Set defaults
 | 
			
		||||
 | 
			
		||||
@ -140,7 +140,7 @@ class DemoDialog(QDialog):
 | 
			
		||||
                set_metadata(ffile, mi, fmt)
 | 
			
		||||
                ffile.seek(0)
 | 
			
		||||
                # Now replace the file in the calibre library with the updated
 | 
			
		||||
                # file. We dont use add_format_with_hooks as the hooks were
 | 
			
		||||
                # file. We don't use add_format_with_hooks as the hooks were
 | 
			
		||||
                # already run when the file was first added to calibre.
 | 
			
		||||
                db.add_format(book_id, fmt, ffile, run_hooks=False)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -22,7 +22,7 @@ class InterfacePlugin(InterfaceAction):
 | 
			
		||||
    name = 'Interface Plugin Demo'
 | 
			
		||||
 | 
			
		||||
    # Declare the main action associated with this plugin
 | 
			
		||||
    # The keyboard shortcut can be None if you dont want to use a keyboard
 | 
			
		||||
    # The keyboard shortcut can be None if you don't want to use a keyboard
 | 
			
		||||
    # shortcut. Remember that currently calibre has no central management for
 | 
			
		||||
    # keyboard shortcuts, so try to use an unusual/unused shortcut.
 | 
			
		||||
    action_spec = ('Interface Plugin Demo', None,
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										119
									
								
								pyproject.toml
									
									
									
									
									
								
							
							
						
						
									
										119
									
								
								pyproject.toml
									
									
									
									
									
								
							@ -74,6 +74,125 @@ docstring-quotes = 'single'
 | 
			
		||||
inline-quotes = 'single'
 | 
			
		||||
multiline-quotes = 'single'
 | 
			
		||||
 | 
			
		||||
[tool.codespell]
 | 
			
		||||
# calibre will probably never fully compliant with codespell
 | 
			
		||||
# this setting is only to easily find common typo errors
 | 
			
		||||
# by filtering a great range of false-positives, but not all
 | 
			
		||||
# (if codespell could per-file-ignores words, its be nicer)
 | 
			
		||||
count = false
 | 
			
		||||
summary = false
 | 
			
		||||
quiet-level = 3
 | 
			
		||||
regex = '''\b(?<!&)(?<!&)[\w\-']+(?!&(amp;)?)\b'''
 | 
			
		||||
builtin = [
 | 
			
		||||
    'clear',
 | 
			
		||||
    'rare',
 | 
			
		||||
    'informal',
 | 
			
		||||
    'code',
 | 
			
		||||
]
 | 
			
		||||
ignore-words-list = [
 | 
			
		||||
    "alo",
 | 
			
		||||
    "ans",
 | 
			
		||||
    "clen",
 | 
			
		||||
    "eto",
 | 
			
		||||
    "fo",
 | 
			
		||||
    "nam",
 | 
			
		||||
    "nd",
 | 
			
		||||
    "som",
 | 
			
		||||
    "te",
 | 
			
		||||
    "atLeast",
 | 
			
		||||
    "Implementor",
 | 
			
		||||
    "implementor",
 | 
			
		||||
    "Implementors",
 | 
			
		||||
    "implementors",
 | 
			
		||||
    "missings",
 | 
			
		||||
    "re-use",
 | 
			
		||||
    "re-used",
 | 
			
		||||
    "re-using",
 | 
			
		||||
    "succeded",
 | 
			
		||||
    # code
 | 
			
		||||
    "ws",
 | 
			
		||||
    "ws",
 | 
			
		||||
    "ws",
 | 
			
		||||
    "ws",
 | 
			
		||||
    "dur",
 | 
			
		||||
    "ro",
 | 
			
		||||
    "snd",
 | 
			
		||||
    "ws",
 | 
			
		||||
    "deque",
 | 
			
		||||
    "assertIn",
 | 
			
		||||
    "atEnd",
 | 
			
		||||
    "endcode",
 | 
			
		||||
    "errorString",
 | 
			
		||||
    "FocusIn",
 | 
			
		||||
    "iff",
 | 
			
		||||
    "lets",
 | 
			
		||||
    "lite",
 | 
			
		||||
    "NMAKE",
 | 
			
		||||
    "nmake",
 | 
			
		||||
    "uDate",
 | 
			
		||||
    "UINT",
 | 
			
		||||
    "uInt",
 | 
			
		||||
    "uint",
 | 
			
		||||
    "KeyPair",
 | 
			
		||||
    "Keypair",
 | 
			
		||||
    "keypair",
 | 
			
		||||
    "Referer",
 | 
			
		||||
    "seeked",
 | 
			
		||||
    "sinc",
 | 
			
		||||
    "stdio",
 | 
			
		||||
    "thead",
 | 
			
		||||
]
 | 
			
		||||
uri-ignore-words-list = '*'
 | 
			
		||||
skip = [
 | 
			
		||||
    "*.svg",
 | 
			
		||||
    "*.rcc",
 | 
			
		||||
    "*_ui.py",
 | 
			
		||||
    "./src/calibre/ebooks/rtf2xml/char_set.py",
 | 
			
		||||
    "./src/calibre/ebooks/unihandecode/*",
 | 
			
		||||
    "./src/calibre/ebooks/html_entities.h",
 | 
			
		||||
    "./src/calibre/ebooks/html_entities.py",
 | 
			
		||||
    "./src/calibre/utils/icu_test.py",
 | 
			
		||||
    "./src/calibre/utils/search_query_parser_test.py",
 | 
			
		||||
    "./Changelog.old.txt",
 | 
			
		||||
    "./COPYRIGHT",
 | 
			
		||||
    "./LICENSE",
 | 
			
		||||
    "./LICENSE.rtf",
 | 
			
		||||
    "./session.vim",
 | 
			
		||||
    "./build/*",
 | 
			
		||||
    "./docs/*",
 | 
			
		||||
    "./nbproject/*",
 | 
			
		||||
    "./recipes/*",
 | 
			
		||||
    "./translations/*",
 | 
			
		||||
    "./tags/*",
 | 
			
		||||
    "./manual/generated/*",
 | 
			
		||||
    "./manual/locale/*",
 | 
			
		||||
    "./resources/dictionaries/*",
 | 
			
		||||
    "./resources/localization/*",
 | 
			
		||||
    "./resources/hyphenation/*",
 | 
			
		||||
    "./resources/mathjax/*",
 | 
			
		||||
    "./resources/builtin_recipes.xml",
 | 
			
		||||
    "./resources/changelog.json",
 | 
			
		||||
    "./resources/editor.js",
 | 
			
		||||
    "./resources/editor-functions.json",
 | 
			
		||||
    "./resources/mime.types",
 | 
			
		||||
    "./resources/piper-voices.json",
 | 
			
		||||
    "./resources/stylelint-bundle.min.js",
 | 
			
		||||
    "./resources/user-manual-translation-stats.json",
 | 
			
		||||
    "./resources/template-functions.json",
 | 
			
		||||
    "./resources/viewer.js",
 | 
			
		||||
    "./resources/viewer.html",
 | 
			
		||||
    "./resources/content-server/index-generated.html",
 | 
			
		||||
    "./setup/installer/*",
 | 
			
		||||
    "./setup/pyqt_enums/*",
 | 
			
		||||
    "./setup/lc_data.py",
 | 
			
		||||
    "./setup/linux-installer.py",
 | 
			
		||||
    "./src/css_selectors/*",
 | 
			
		||||
    "./src/polyglot/*",
 | 
			
		||||
    "./src/templite/*",
 | 
			
		||||
    "./src/tinycss/*",
 | 
			
		||||
    "./src/unicode_names/*",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[tool.flynt]
 | 
			
		||||
line-length = 400  # over value to catch every case
 | 
			
		||||
transform-format = false  # don't transform already existing format call
 | 
			
		||||
 | 
			
		||||
@ -81,7 +81,7 @@ class BaltimoreSun(BasicNewsRecipe):
 | 
			
		||||
        (u'Maryland Weather', u'http://www.baltimoresun.com/news/weather/weather-blog/rss2.0.xml'),
 | 
			
		||||
        (u'Second Opinion', u'http://www.baltimoresun.com/news/opinion/second-opinion-blog/rss2.0.xml'),
 | 
			
		||||
        (u'Sun Investigates', u'http://www.baltimoresun.com/news/maryland/sun-investigates/rss2.0.xml'),
 | 
			
		||||
        (u'You Dont Say', u'http://www.baltimoresun.com/news/language-blog/rss2.0.xml'),
 | 
			
		||||
        (u"You Don't Say", u'http://www.baltimoresun.com/news/language-blog/rss2.0.xml'),
 | 
			
		||||
 | 
			
		||||
        # Business Blogs ##
 | 
			
		||||
        (u'BaltTech', u'http://www.baltimoresun.com/business/technology/blog/rss2.0.xml'),
 | 
			
		||||
 | 
			
		||||
@ -45,7 +45,7 @@ class epw(BasicNewsRecipe):
 | 
			
		||||
                '/styles/freeissue/public', ''
 | 
			
		||||
            )
 | 
			
		||||
        except Exception:
 | 
			
		||||
            # sometimes they dont add img src
 | 
			
		||||
            # sometimes they don't add img src
 | 
			
		||||
            self.cover_url = 'https://www.epw.in/sites/default/files/cache/cover_images/2022/Cover_4June2022_Big.gif'
 | 
			
		||||
 | 
			
		||||
        feeds = OrderedDict()
 | 
			
		||||
 | 
			
		||||
@ -61,7 +61,7 @@ class ESPN(BasicNewsRecipe):
 | 
			
		||||
    def get_browser(self):
 | 
			
		||||
        br = BasicNewsRecipe.get_browser(self)
 | 
			
		||||
        if False and self.username and self.password:
 | 
			
		||||
            # ESPN has changed to a JS based login system, cant be bothered
 | 
			
		||||
            # ESPN has changed to a JS based login system, can't be bothered
 | 
			
		||||
            # revering it
 | 
			
		||||
            br.set_handle_refresh(False)
 | 
			
		||||
            url = ('https://r.espn.go.com/members/v3_1/login')
 | 
			
		||||
 | 
			
		||||
@ -71,7 +71,7 @@ class ft(BasicNewsRecipe):
 | 
			
		||||
    # def get_browser(self, *args, **kw):
 | 
			
		||||
    #     br = super().get_browser(*args, **kw)
 | 
			
		||||
    #     if self.username and self.password:
 | 
			
		||||
    #         # ft.com uses a CAPTCHA on its login page so this sadly doesnt work
 | 
			
		||||
    #         # ft.com uses a CAPTCHA on its login page so this sadly doesn't work
 | 
			
		||||
    #         br.open('https://accounts.ft.com/login?location=https%3A%2F%2Fwww.ft.com')
 | 
			
		||||
    #         br.select_form(id='email-form')
 | 
			
		||||
    #         br['email'] = self.username
 | 
			
		||||
 | 
			
		||||
@ -77,7 +77,7 @@ class IndependentAustralia(BasicNewsRecipe):
 | 
			
		||||
        businessArticles = []
 | 
			
		||||
        lifeArticles = []
 | 
			
		||||
        australiaArticles = []
 | 
			
		||||
        # Loop thru the articles in all feeds to find articles with base categories in it
 | 
			
		||||
        # Loop through the articles in all feeds to find articles with base categories in it
 | 
			
		||||
        for curfeed in feeds:
 | 
			
		||||
            delList = []
 | 
			
		||||
            for a, curarticle in enumerate(curfeed.articles):
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ class TheMiamiHerald(BasicNewsRecipe):
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    def get_browser(self, *a, **kw):
 | 
			
		||||
        # MyClatchy servers dont like the user-agent header, they hang forever
 | 
			
		||||
        # MyClatchy servers don't like the user-agent header, they hang forever
 | 
			
		||||
        # when it is present
 | 
			
		||||
        br = BasicNewsRecipe.get_browser(self, *a, **kw)
 | 
			
		||||
        br.addheaders = [x for x in br.addheaders if x[0].lower() != 'user-agent']
 | 
			
		||||
 | 
			
		||||
@ -107,7 +107,7 @@ class SatMagazine(BasicNewsRecipe):
 | 
			
		||||
 | 
			
		||||
        title_number = 0
 | 
			
		||||
 | 
			
		||||
        # Goes thru all the articles one by one and sort them out
 | 
			
		||||
        # Goes through all the articles one by one and sort them out
 | 
			
		||||
        for article in articles:
 | 
			
		||||
 | 
			
		||||
            title = self.tag_to_string(article)
 | 
			
		||||
 | 
			
		||||
@ -8,7 +8,7 @@
 | 
			
		||||
 | 
			
		||||
(function() {
 | 
			
		||||
"use strict";
 | 
			
		||||
    // wrap up long words that dont fit in the page
 | 
			
		||||
    // wrap up long words that don't fit in the page
 | 
			
		||||
    document.body.style.overflowWrap = 'break-word';
 | 
			
		||||
 | 
			
		||||
    var break_avoid_block_styles = {
 | 
			
		||||
 | 
			
		||||
@ -12,7 +12,7 @@
 | 
			
		||||
    var settings = SETTINGS;
 | 
			
		||||
 | 
			
		||||
    function onclick(event) {
 | 
			
		||||
        // We dont want this event to trigger onclick on this element's parent
 | 
			
		||||
        // We don't want this event to trigger onclick on this element's parent
 | 
			
		||||
        // block, if any.
 | 
			
		||||
        event.stopPropagation();
 | 
			
		||||
        var frac = window.pageYOffset/document.body.scrollHeight;
 | 
			
		||||
 | 
			
		||||
@ -580,7 +580,7 @@ class Build(Command):
 | 
			
		||||
            if iswindows or env is self.windows_cross_env:
 | 
			
		||||
                pre_ld_flags = []
 | 
			
		||||
                if ext.uses_icu:
 | 
			
		||||
                    # windows has its own ICU libs that dont work
 | 
			
		||||
                    # windows has its own ICU libs that don't work
 | 
			
		||||
                    pre_ld_flags = elib
 | 
			
		||||
                cmd += pre_ld_flags + env.ldflags + ext.ldflags + elib + xlib + \
 | 
			
		||||
                    ['/EXPORT:' + init_symbol_name(ext.name)] + all_objects + ['/OUT:'+dest]
 | 
			
		||||
@ -623,7 +623,7 @@ class Build(Command):
 | 
			
		||||
    def build_headless(self):
 | 
			
		||||
        from setup.parallel_build import cpu_count
 | 
			
		||||
        if iswindows or ishaiku:
 | 
			
		||||
            return  # Dont have headless operation on these platforms
 | 
			
		||||
            return  # Don't have headless operation on these platforms
 | 
			
		||||
        from setup.build_environment import CMAKE, sw
 | 
			
		||||
        self.info('\n####### Building headless QPA plugin', '#'*7)
 | 
			
		||||
        a = absolutize
 | 
			
		||||
 | 
			
		||||
@ -57,7 +57,7 @@ def get_dist(base, which, bitness):
 | 
			
		||||
 | 
			
		||||
def shutdown_allowed(which, bitness):
 | 
			
		||||
    # The ARM64 VM is extremely flakey often booting up to a non-functional
 | 
			
		||||
    # state so dont shut it down as it seems to be more stable once bootup is
 | 
			
		||||
    # state so don't shut it down as it seems to be more stable once boot-up is
 | 
			
		||||
    # done.
 | 
			
		||||
    return bitness != 'arm64'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -286,7 +286,7 @@ class Translations(POT):  # {{{
 | 
			
		||||
 | 
			
		||||
    def is_po_file_ok(self, x):
 | 
			
		||||
        bname = os.path.splitext(os.path.basename(x))[0]
 | 
			
		||||
        # sr@latin.po is identical to sr.po. And we dont support country
 | 
			
		||||
        # sr@latin.po is identical to sr.po. And we don't support country
 | 
			
		||||
        # specific variants except for a few.
 | 
			
		||||
        if '_' in bname:
 | 
			
		||||
            return bname.partition('_')[0] in ('pt', 'zh', 'bn')
 | 
			
		||||
 | 
			
		||||
@ -45,7 +45,7 @@ class InputProfile(Plugin):
 | 
			
		||||
    type = _('Input profile')
 | 
			
		||||
 | 
			
		||||
    name        = 'Default Input Profile'
 | 
			
		||||
    short_name  = 'default'  # Used in the CLI so dont use spaces etc. in it
 | 
			
		||||
    short_name  = 'default'  # Used in the CLI so don't use spaces etc. in it
 | 
			
		||||
    description = _('This profile tries to provide sane defaults and is useful '
 | 
			
		||||
                    'if you know nothing about the input document.')
 | 
			
		||||
 | 
			
		||||
@ -243,7 +243,7 @@ class OutputProfile(Plugin):
 | 
			
		||||
    type = _('Output profile')
 | 
			
		||||
 | 
			
		||||
    name        = 'Default Output Profile'
 | 
			
		||||
    short_name  = 'default'  # Used in the CLI so dont use spaces etc. in it
 | 
			
		||||
    short_name  = 'default'  # Used in the CLI so don't use spaces etc. in it
 | 
			
		||||
    description = _('This profile tries to provide sane defaults and is useful '
 | 
			
		||||
                    'if you want to produce a document intended to be read at a '
 | 
			
		||||
                    'computer or on a range of devices.')
 | 
			
		||||
 | 
			
		||||
@ -715,7 +715,7 @@ def patch_metadata_plugins(possibly_updated_plugins):
 | 
			
		||||
            if pup is not None:
 | 
			
		||||
                if pup.version > plugin.version and pup.minimum_calibre_version <= numeric_version:
 | 
			
		||||
                    patches[i] = pup(None)
 | 
			
		||||
                    # Metadata source plugins dont use initialize() but that
 | 
			
		||||
                    # Metadata source plugins don't use initialize() but that
 | 
			
		||||
                    # might change in the future, so be safe.
 | 
			
		||||
                    patches[i].initialize()
 | 
			
		||||
    for i, pup in iteritems(patches):
 | 
			
		||||
 | 
			
		||||
@ -2283,7 +2283,7 @@ class DB:
 | 
			
		||||
 | 
			
		||||
    def remove_trash_formats_dir_if_empty(self, book_id):
 | 
			
		||||
        bdir = os.path.join(self.trash_dir, 'f', str(book_id))
 | 
			
		||||
        if os.path.isdir(bdir) and len(os.listdir(bdir)) <= 1:  # dont count metadata.json
 | 
			
		||||
        if os.path.isdir(bdir) and len(os.listdir(bdir)) <= 1:  # don't count metadata.json
 | 
			
		||||
            self.rmtree(bdir)
 | 
			
		||||
 | 
			
		||||
    def list_trash_entries(self):
 | 
			
		||||
 | 
			
		||||
@ -373,7 +373,7 @@ class Cache:
 | 
			
		||||
            mi.format_metadata = FormatMetadata(self, book_id, formats)
 | 
			
		||||
            good_formats = FormatsList(sorted(formats), mi.format_metadata)
 | 
			
		||||
        # These three attributes are returned by the db2 get_metadata(),
 | 
			
		||||
        # however, we dont actually use them anywhere other than templates, so
 | 
			
		||||
        # however, we don't actually use them anywhere other than templates, so
 | 
			
		||||
        # they have been removed, to avoid unnecessary overhead. The templates
 | 
			
		||||
        # all use _proxy_metadata.
 | 
			
		||||
        # mi.book_size   = self._field_for('size', book_id, default_value=0)
 | 
			
		||||
@ -3474,7 +3474,7 @@ class Cache:
 | 
			
		||||
                        self._add_extra_files(dest_id, {q: BytesIO(cdata)}, replace=False, auto_rename=True)
 | 
			
		||||
                        break
 | 
			
		||||
 | 
			
		||||
        for key in self.field_metadata:  # loop thru all defined fields
 | 
			
		||||
        for key in self.field_metadata:  # loop through all defined fields
 | 
			
		||||
            fm = self.field_metadata[key]
 | 
			
		||||
            if not fm['is_custom']:
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
@ -119,7 +119,7 @@ class Notes:
 | 
			
		||||
    def path_for_resource(self, resource_hash: str) -> str:
 | 
			
		||||
        hashalg, digest = resource_hash.split(':', 1)
 | 
			
		||||
        prefix = digest[:2]
 | 
			
		||||
        # Cant use colons in filenames on windows safely
 | 
			
		||||
        # Can't use colons in filenames on windows safely
 | 
			
		||||
        return os.path.join(self.resources_dir, prefix, f'{hashalg}-{digest}')
 | 
			
		||||
 | 
			
		||||
    def remove_resources(self, conn, note_id, resources_to_potentially_remove, delete_from_link_table=True):
 | 
			
		||||
 | 
			
		||||
@ -135,7 +135,7 @@ class Restore(Thread):
 | 
			
		||||
                tdir = TemporaryDirectory('_rlib', dir=basedir)
 | 
			
		||||
                tdir.__enter__()
 | 
			
		||||
            except OSError:
 | 
			
		||||
                # In case we dont have permissions to create directories in the
 | 
			
		||||
                # In case we don't have permissions to create directories in the
 | 
			
		||||
                # parent folder of the src library
 | 
			
		||||
                tdir = TemporaryDirectory('_rlib')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -707,7 +707,7 @@ class Parser(SearchQueryParser):  # {{{
 | 
			
		||||
                continue
 | 
			
		||||
            if fm['search_terms'] and x not in {'series_sort', 'id'}:
 | 
			
		||||
                if x not in self.virtual_fields and x != 'uuid':
 | 
			
		||||
                    # We dont search virtual fields because if we do, search
 | 
			
		||||
                    # We don't search virtual fields because if we do, search
 | 
			
		||||
                    # caching will not be used
 | 
			
		||||
                    all_locs.add(x)
 | 
			
		||||
                field_metadata[x] = fm
 | 
			
		||||
 | 
			
		||||
@ -46,8 +46,8 @@ def get_defaults(spec):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def compare_argspecs(old, new, attr):
 | 
			
		||||
    # We dont compare the names of the non-keyword arguments as they are often
 | 
			
		||||
    # different and they dont affect the usage of the API.
 | 
			
		||||
    # We don't compare the names of the non-keyword arguments as they are often
 | 
			
		||||
    # different and they don't affect the usage of the API.
 | 
			
		||||
 | 
			
		||||
    ok = len(old.args) == len(new.args) and get_defaults(old) == get_defaults(new)
 | 
			
		||||
    if not ok:
 | 
			
		||||
 | 
			
		||||
@ -188,7 +188,7 @@ def debug(ioreg_to_tmp=False, buf=None, plugins=None,
 | 
			
		||||
                out(' ')
 | 
			
		||||
                if ioreg_to_tmp:
 | 
			
		||||
                    open('/tmp/ioreg.txt', 'w').write(ioreg)
 | 
			
		||||
                    out('Dont forget to send the contents of /tmp/ioreg.txt')
 | 
			
		||||
                    out("Don't forget to send the contents of /tmp/ioreg.txt")
 | 
			
		||||
                    out('You can open it with the command: open /tmp/ioreg.txt')
 | 
			
		||||
                else:
 | 
			
		||||
                    out(ioreg)
 | 
			
		||||
 | 
			
		||||
@ -299,7 +299,7 @@
 | 
			
		||||
     DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST |
 | 
			
		||||
     DEVICE_FLAG_PLAYLIST_SPL_V1 },
 | 
			
		||||
  // YP-F3 is NOT MTP - USB mass storage
 | 
			
		||||
  // From a rouge .INF file
 | 
			
		||||
  // From a rogue .INF file
 | 
			
		||||
  // this device ID seems to have been recycled for:
 | 
			
		||||
  // the Samsung SGH-A707 Cingular cellphone
 | 
			
		||||
  // the Samsung L760-V cellphone
 | 
			
		||||
 | 
			
		||||
@ -922,7 +922,7 @@ def get_usb_info(usbdev, debug=False):  # {{{
 | 
			
		||||
    try:
 | 
			
		||||
        buf, dd = get_device_descriptor(handle, device_port)
 | 
			
		||||
        if dd.idVendor == usbdev.vendor_id and dd.idProduct == usbdev.product_id and dd.bcdDevice == usbdev.bcd:
 | 
			
		||||
            # Dont need to read language since we only care about english names
 | 
			
		||||
            # Don't need to read language since we only care about english names
 | 
			
		||||
            # buf, langs = get_device_languages(handle, device_port)
 | 
			
		||||
            # print(111, langs)
 | 
			
		||||
            for index, name in ((dd.iManufacturer, 'manufacturer'), (dd.iProduct, 'product'), (dd.iSerialNumber, 'serial_number')):
 | 
			
		||||
 | 
			
		||||
@ -117,7 +117,7 @@ class HTMLZInput(InputFormatPlugin):
 | 
			
		||||
        if opf:
 | 
			
		||||
            opf_parsed = OPF(opf, basedir=os.getcwd())
 | 
			
		||||
            cover_path = opf_parsed.raster_cover or opf_parsed.cover
 | 
			
		||||
            os.remove(opf)  # dont confuse code that searches for OPF files later on the oeb object will create its own OPF
 | 
			
		||||
            os.remove(opf)  # don't confuse code that searches for OPF files later on the oeb object will create its own OPF
 | 
			
		||||
        # Set the cover.
 | 
			
		||||
        if cover_path:
 | 
			
		||||
            cdata = None
 | 
			
		||||
 | 
			
		||||
@ -123,7 +123,7 @@ def read_single_border(parent, edge, XPath, get):
 | 
			
		||||
                pass
 | 
			
		||||
        sz = get(elem, 'w:sz')
 | 
			
		||||
        if sz is not None:
 | 
			
		||||
            # we dont care about art borders (they are only used for page borders)
 | 
			
		||||
            # we don't care about art borders (they are only used for page borders)
 | 
			
		||||
            try:
 | 
			
		||||
                width = min(96, max(2, float(sz))) / 8
 | 
			
		||||
            except (ValueError, TypeError):
 | 
			
		||||
 | 
			
		||||
@ -33,7 +33,7 @@ def read_text_border(parent, dest, XPath, get):
 | 
			
		||||
                pass
 | 
			
		||||
        sz = get(elem, 'w:sz')
 | 
			
		||||
        if sz is not None:
 | 
			
		||||
            # we dont care about art borders (they are only used for page borders)
 | 
			
		||||
            # we don't care about art borders (they are only used for page borders)
 | 
			
		||||
            try:
 | 
			
		||||
                # A border of less than 1pt is not rendered by WebKit
 | 
			
		||||
                border_width = min(96, max(8, float(sz))) / 8
 | 
			
		||||
 | 
			
		||||
@ -520,7 +520,7 @@ class Convert:
 | 
			
		||||
            if float_spec is None and is_float:
 | 
			
		||||
                float_spec = FloatSpec(self.docx.namespace, html_tag, tag_style)
 | 
			
		||||
 | 
			
		||||
            if display in {'inline', 'inline-block'} or tagname == 'br':  # <br> has display:block but we dont want to start a new paragraph
 | 
			
		||||
            if display in {'inline', 'inline-block'} or tagname == 'br':  # <br> has display:block but we don't want to start a new paragraph
 | 
			
		||||
                if is_float and float_spec.is_dropcaps:
 | 
			
		||||
                    self.add_block_tag(tagname, html_tag, tag_style, stylizer, float_spec=float_spec)
 | 
			
		||||
                    float_spec = None
 | 
			
		||||
@ -539,7 +539,7 @@ class Convert:
 | 
			
		||||
                    self.blocks.start_new_table(html_tag, tag_style)
 | 
			
		||||
            else:
 | 
			
		||||
                if tagname == 'img' and is_float:
 | 
			
		||||
                    # Image is floating so dont start a new paragraph for it
 | 
			
		||||
                    # Image is floating so don't start a new paragraph for it
 | 
			
		||||
                    self.add_inline_tag(tagname, html_tag, tag_style, stylizer)
 | 
			
		||||
                else:
 | 
			
		||||
                    if tagname == 'hr':
 | 
			
		||||
 | 
			
		||||
@ -9,7 +9,7 @@ import regex
 | 
			
		||||
 | 
			
		||||
class Parser:
 | 
			
		||||
    ''' See epubcfi.ebnf for the specification that this parser tries to
 | 
			
		||||
    follow. I have implemented it manually, since I dont want to depend on
 | 
			
		||||
    follow. I have implemented it manually, since I don't want to depend on
 | 
			
		||||
    grako, and the grammar is pretty simple. This parser is thread-safe, i.e.
 | 
			
		||||
    it can be used from multiple threads simultaneously. '''
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -580,7 +580,7 @@ class LitFile:
 | 
			
		||||
            offset, size = u32(piece), int32(piece[8:])
 | 
			
		||||
            piece = self.read_raw(offset, size)
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                continue  # Dont need this piece
 | 
			
		||||
                continue  # Don't need this piece
 | 
			
		||||
            elif i == 1:
 | 
			
		||||
                if u32(piece[8:])  != self.entry_chunklen or \
 | 
			
		||||
                   u32(piece[12:]) != self.entry_unknown:
 | 
			
		||||
 | 
			
		||||
@ -814,7 +814,7 @@ class HTMLConverter:
 | 
			
		||||
 | 
			
		||||
        collapse_whitespace = 'white-space' not in css or css['white-space'] != 'pre'
 | 
			
		||||
        if self.process_alignment(css) and collapse_whitespace:
 | 
			
		||||
            # Dont want leading blanks in a new paragraph
 | 
			
		||||
            # Don't want leading blanks in a new paragraph
 | 
			
		||||
            src = src.lstrip()
 | 
			
		||||
 | 
			
		||||
        def append_text(src):
 | 
			
		||||
 | 
			
		||||
@ -51,7 +51,7 @@ def read_variable_len_data(data, header):
 | 
			
		||||
        header['tagx_block_size'] = 0
 | 
			
		||||
    trailing_bytes = data[idxt_offset+idxt_size:]
 | 
			
		||||
    if trailing_bytes.rstrip(b'\0'):
 | 
			
		||||
        raise ValueError('Traling bytes after last IDXT entry: {!r}'.format(trailing_bytes.rstrip(b'\0')))
 | 
			
		||||
        raise ValueError('Trailing bytes after last IDXT entry: {!r}'.format(trailing_bytes.rstrip(b'\0')))
 | 
			
		||||
    header['indices'] = indices
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -637,11 +637,11 @@ class TBSIndexing:  # {{{
 | 
			
		||||
                ai, extra, consumed = decode_tbs(byts)
 | 
			
		||||
                byts = byts[consumed:]
 | 
			
		||||
                if extra.get(0b0010, None) is not None:
 | 
			
		||||
                    raise ValueError('Dont know how to interpret flag 0b0010'
 | 
			
		||||
                    raise ValueError("Don't know how to interpret flag 0b0010"
 | 
			
		||||
                            ' while reading section transitions')
 | 
			
		||||
                if extra.get(0b1000, None) is not None:
 | 
			
		||||
                    if len(extra) > 1:
 | 
			
		||||
                        raise ValueError('Dont know how to interpret flags'
 | 
			
		||||
                        raise ValueError("Don't know how to interpret flags"
 | 
			
		||||
                                f' {extra!r} while reading section transitions')
 | 
			
		||||
                    nsi = self.get_index(psi.index+1)
 | 
			
		||||
                    ans.append(
 | 
			
		||||
@ -675,7 +675,7 @@ class TBSIndexing:  # {{{
 | 
			
		||||
            si, extra, consumed = decode_tbs(byts)
 | 
			
		||||
            byts = byts[consumed:]
 | 
			
		||||
            if len(extra) > 1 or 0b0010 in extra or 0b1000 in extra:
 | 
			
		||||
                raise ValueError(f'Dont know how to interpret flags {extra!r}'
 | 
			
		||||
                raise ValueError(f"Don't know how to interpret flags {extra!r}"
 | 
			
		||||
                        ' when reading starting section')
 | 
			
		||||
            si = self.get_index(si)
 | 
			
		||||
            ans.append('The section at the start of this record is:'
 | 
			
		||||
 | 
			
		||||
@ -55,7 +55,7 @@ def parse_indx_header(data):
 | 
			
		||||
    ans['ordt_map'] = ''
 | 
			
		||||
 | 
			
		||||
    if ordt1 > 0 and data[ordt1:ordt1+4] == b'ORDT':
 | 
			
		||||
        # I dont know what this is, but using it seems to be unnecessary, so
 | 
			
		||||
        # I don't know what this is, but using it seems to be unnecessary, so
 | 
			
		||||
        # just leave it as the raw bytestring
 | 
			
		||||
        ans['ordt1_raw'] = data[ordt1+4:ordt1+4+ans['oentries']]
 | 
			
		||||
    if ordt2 > 0 and data[ordt2:ordt2+4] == b'ORDT':
 | 
			
		||||
 | 
			
		||||
@ -221,7 +221,7 @@ class SecondaryIndexEntry(IndexEntry):
 | 
			
		||||
        tag = self.INDEX_MAP[index]
 | 
			
		||||
 | 
			
		||||
        # The values for this index entry
 | 
			
		||||
        # I dont know what the 5 means, it is not the number of entries
 | 
			
		||||
        # I don't know what the 5 means, it is not the number of entries
 | 
			
		||||
        self.secondary = [5 if tag == min(
 | 
			
		||||
            itervalues(self.INDEX_MAP)) else 0, 0, tag]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -20,7 +20,7 @@ from calibre.ebooks.mobi.writer2.serializer import Serializer
 | 
			
		||||
from calibre.utils.filenames import ascii_filename
 | 
			
		||||
from polyglot.builtins import iteritems
 | 
			
		||||
 | 
			
		||||
# Disabled as I dont care about uncrossable breaks
 | 
			
		||||
# Disabled as I don't care about uncrossable breaks
 | 
			
		||||
WRITE_UNCROSSABLE_BREAKS = False
 | 
			
		||||
NULL_INDEX = 0xffffffff
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -25,7 +25,7 @@ def process_jpegs_for_amazon(data: bytes) -> bytes:
 | 
			
		||||
    img = Image.open(BytesIO(data))
 | 
			
		||||
    if img.format == 'JPEG':
 | 
			
		||||
        # Amazon's MOBI renderer can't render JPEG images without JFIF metadata
 | 
			
		||||
        # and images with EXIF data dont get displayed on the cover screen
 | 
			
		||||
        # and images with EXIF data don't get displayed on the cover screen
 | 
			
		||||
        changed = not img.info
 | 
			
		||||
        has_exif = False
 | 
			
		||||
        if hasattr(img, 'getexif'):
 | 
			
		||||
 | 
			
		||||
@ -240,7 +240,7 @@ class Serializer:
 | 
			
		||||
                if tocref.klass == 'periodical':
 | 
			
		||||
                    # This is a section node.
 | 
			
		||||
                    # For periodical tocs, the section urls are like r'feed_\d+/index.html'
 | 
			
		||||
                    # We dont want to point to the start of the first article
 | 
			
		||||
                    # We don't want to point to the start of the first article
 | 
			
		||||
                    # so we change the href.
 | 
			
		||||
                    itemhref = re.sub(r'article_\d+/', '', itemhref)
 | 
			
		||||
                self.href_offsets[itemhref].append(buf.tell())
 | 
			
		||||
 | 
			
		||||
@ -246,7 +246,7 @@ class SkelIndex(Index):
 | 
			
		||||
    def __init__(self, skel_table):
 | 
			
		||||
        self.entries = [
 | 
			
		||||
                (s.name, {
 | 
			
		||||
                    # Dont ask me why these entries have to be repeated twice
 | 
			
		||||
                    # Don't ask me why these entries have to be repeated twice
 | 
			
		||||
                    'chunk_count':(s.chunk_count, s.chunk_count),
 | 
			
		||||
                    'geometry':(s.start_pos, s.length, s.start_pos, s.length),
 | 
			
		||||
                    }) for s in skel_table
 | 
			
		||||
@ -387,7 +387,7 @@ if __name__ == '__main__':
 | 
			
		||||
 | 
			
		||||
    subprocess.check_call(['ebook-convert', src, '.epub', '--level1-toc', '//h:p', '--no-default-epub-cover', '--flow-size', '1000000'])
 | 
			
		||||
    subprocess.check_call(['ebook-convert', src, '.azw3', '--level1-toc', '//h:p', '--no-inline-toc', '--extract-to=x'])
 | 
			
		||||
    subprocess.call(['kindlegen', 'index.epub'])  # kindlegen exit code is not 0 as we dont have a cover
 | 
			
		||||
    subprocess.call(['kindlegen', 'index.epub'])  # kindlegen exit code is not 0 as we don't have a cover
 | 
			
		||||
    subprocess.check_call(['calibre-debug', 'index.mobi'])
 | 
			
		||||
 | 
			
		||||
    from calibre.gui2.tweak_book.diff.main import main
 | 
			
		||||
 | 
			
		||||
@ -500,7 +500,7 @@ class Container(ContainerBase):  # {{{
 | 
			
		||||
        # spec requires all text including filenames to be in NFC form.
 | 
			
		||||
        # The proper fix is to implement a VFS that maps between
 | 
			
		||||
        # canonical names and their file system representation, however,
 | 
			
		||||
        # I dont have the time for that now. Note that the container
 | 
			
		||||
        # I don't have the time for that now. Note that the container
 | 
			
		||||
        # ensures that all text files are normalized to NFC when
 | 
			
		||||
        # decoding them anyway, so there should be no mismatch between
 | 
			
		||||
        # names in the text and NFC canonical file names.
 | 
			
		||||
@ -1481,7 +1481,7 @@ def opf_to_azw3(opf, outpath, container):
 | 
			
		||||
 | 
			
		||||
        def _parse_css(self, data):
 | 
			
		||||
            # The default CSS parser used by oeb.base inserts the h namespace
 | 
			
		||||
            # and resolves all @import rules. We dont want that.
 | 
			
		||||
            # and resolves all @import rules. We don't want that.
 | 
			
		||||
            return container.parse_css(data)
 | 
			
		||||
 | 
			
		||||
    def specialize(oeb):
 | 
			
		||||
 | 
			
		||||
@ -131,8 +131,8 @@ def filter_by_weight(fonts, val):
 | 
			
		||||
 | 
			
		||||
def find_matching_font(fonts, weight='normal', style='normal', stretch='normal'):
 | 
			
		||||
    # See https://www.w3.org/TR/css-fonts-3/#font-style-matching
 | 
			
		||||
    # We dont implement the unicode character range testing
 | 
			
		||||
    # We also dont implement bolder, lighter
 | 
			
		||||
    # We don't implement the unicode character range testing
 | 
			
		||||
    # We also don't implement bolder, lighter
 | 
			
		||||
    for f, q in ((filter_by_stretch, stretch), (filter_by_style, style), (filter_by_weight, weight)):
 | 
			
		||||
        fonts = f(fonts, q)
 | 
			
		||||
        if len(fonts) == 1:
 | 
			
		||||
 | 
			
		||||
@ -81,7 +81,7 @@ def pretty_opf(root):
 | 
			
		||||
        try:
 | 
			
		||||
            children = sorted(manifest, key=manifest_key)
 | 
			
		||||
        except AttributeError:
 | 
			
		||||
            continue  # There are comments so dont sort since that would mess up the comments
 | 
			
		||||
            continue  # There are comments so don't sort since that would mess up the comments
 | 
			
		||||
        for x in reversed(children):
 | 
			
		||||
            manifest.insert(0, x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -101,7 +101,7 @@ div#book-inner {{ margin-top: 0; margin-bottom: 0; }}</style><script type="text/
 | 
			
		||||
            # encoding quirks
 | 
			
		||||
            '<p>A\xa0nbsp; ':
 | 
			
		||||
            '<p><span class="koboSpan" id="kobo.1.1">A nbsp; </span></p>',
 | 
			
		||||
            '<div><script>1 < 2 & 3</script>':  # escaping with cdata note that kepubify doesnt do this
 | 
			
		||||
            '<div><script>1 < 2 & 3</script>':  # escaping with cdata note that kepubify doesn't do this
 | 
			
		||||
            '<div><script><![CDATA[1 < 2 & 3]]></script></div>',
 | 
			
		||||
 | 
			
		||||
            # CSS filtering
 | 
			
		||||
 | 
			
		||||
@ -232,7 +232,7 @@ def parse_css(data, fname='<string>', is_declaration=False, decode=None, log_lev
 | 
			
		||||
    if css_preprocessor is not None:
 | 
			
		||||
        data = css_preprocessor(data)
 | 
			
		||||
    parser = CSSParser(loglevel=log_level,
 | 
			
		||||
                        # We dont care about @import rules
 | 
			
		||||
                        # We don't care about @import rules
 | 
			
		||||
                        fetcher=lambda x: (None, None), log=_css_logger)
 | 
			
		||||
    if is_declaration:
 | 
			
		||||
        data = parser.parseStyle(data, validate=False)
 | 
			
		||||
 | 
			
		||||
@ -84,7 +84,7 @@ class CoverManager:
 | 
			
		||||
 | 
			
		||||
    def default_cover(self):
 | 
			
		||||
        '''
 | 
			
		||||
        Create a generic cover for books that dont have a cover
 | 
			
		||||
        Create a generic cover for books that don't have a cover
 | 
			
		||||
        '''
 | 
			
		||||
        if self.no_default_cover:
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
@ -141,7 +141,7 @@ class EmbedFonts:
 | 
			
		||||
                for sel in rule.selectorList:
 | 
			
		||||
                    sel = sel.selectorText
 | 
			
		||||
                    if sel and sel.startswith('.'):
 | 
			
		||||
                        # We dont care about pseudo-selectors as the worst that
 | 
			
		||||
                        # We don't care about pseudo-selectors as the worst that
 | 
			
		||||
                        # can happen is some extra characters will remain in
 | 
			
		||||
                        # the font
 | 
			
		||||
                        sel = sel.partition(':')[0]
 | 
			
		||||
 | 
			
		||||
@ -144,7 +144,7 @@ class MergeMetadata:
 | 
			
		||||
            self.oeb.guide.remove('cover')
 | 
			
		||||
            self.oeb.guide.remove('titlepage')
 | 
			
		||||
        elif self.oeb.plumber_output_format in {'mobi', 'azw3'} and old_cover is not None:
 | 
			
		||||
            # The amazon formats dont support html cover pages, so remove them
 | 
			
		||||
            # The amazon formats don't support html cover pages, so remove them
 | 
			
		||||
            # even if no cover was specified.
 | 
			
		||||
            self.oeb.guide.remove('titlepage')
 | 
			
		||||
        do_remove_old_cover = False
 | 
			
		||||
 | 
			
		||||
@ -214,7 +214,7 @@ class SubsetFonts:
 | 
			
		||||
                for sel in rule.selectorList:
 | 
			
		||||
                    sel = sel.selectorText
 | 
			
		||||
                    if sel and sel.startswith('.'):
 | 
			
		||||
                        # We dont care about pseudo-selectors as the worst that
 | 
			
		||||
                        # We don't care about pseudo-selectors as the worst that
 | 
			
		||||
                        # can happen is some extra characters will remain in
 | 
			
		||||
                        # the font
 | 
			
		||||
                        sel = sel.partition(':')[0]
 | 
			
		||||
 | 
			
		||||
@ -836,7 +836,7 @@ def fonts_are_identical(fonts):
 | 
			
		||||
 | 
			
		||||
def merge_font_files(fonts, log):
 | 
			
		||||
    # As of Qt 5.15.1 Chromium has switched to harfbuzz and dropped sfntly. It
 | 
			
		||||
    # now produces font descriptors whose W arrays dont match the glyph width
 | 
			
		||||
    # now produces font descriptors whose W arrays don't match the glyph width
 | 
			
		||||
    # information from the hhea table, in contravention of the PDF spec. So
 | 
			
		||||
    # we can no longer merge font descriptors, all we can do is merge the
 | 
			
		||||
    # actual sfnt data streams into a single stream and subset it to contain
 | 
			
		||||
@ -1013,7 +1013,7 @@ def add_header_footer(manager, opts, pdf_doc, container, page_number_display_map
 | 
			
		||||
        toplevel_toc_map = stack_to_map(create_toc_stack(tc()))
 | 
			
		||||
        toplevel_pagenum_map, toplevel_pages_map = page_counts_map(tc())
 | 
			
		||||
 | 
			
		||||
    dpi = 96  # dont know how to query Qt for this, seems to be the same on all platforms
 | 
			
		||||
    dpi = 96  # don't know how to query Qt for this, seems to be the same on all platforms
 | 
			
		||||
    def pt_to_px(pt): return int(pt * dpi / 72)
 | 
			
		||||
 | 
			
		||||
    def create_container(page_num, margins):
 | 
			
		||||
 | 
			
		||||
@ -732,21 +732,21 @@ class Region:
 | 
			
		||||
class Page:
 | 
			
		||||
 | 
			
		||||
    def __init__(self, page, font_map, opts, log, idc):
 | 
			
		||||
        def text_cmp(frst, secnd):
 | 
			
		||||
        def text_cmp(first, second):
 | 
			
		||||
            # Compare 2 text objects.
 | 
			
		||||
            # Order by line (top/bottom) then left
 | 
			
		||||
            if (frst.top <= secnd.top and frst.bottom >= secnd.bottom-BOTTOM_FACTOR) \
 | 
			
		||||
              or (secnd.top <= frst.top and secnd.bottom >= frst.bottom-BOTTOM_FACTOR):
 | 
			
		||||
            if (first.top <= second.top and first.bottom >= second.bottom-BOTTOM_FACTOR) \
 | 
			
		||||
              or (second.top <= first.top and second.bottom >= first.bottom-BOTTOM_FACTOR):
 | 
			
		||||
                # Overlap = same line
 | 
			
		||||
                if frst.left < secnd.left:
 | 
			
		||||
                if first.left < second.left:
 | 
			
		||||
                    return -1
 | 
			
		||||
                elif frst.left == secnd.left:
 | 
			
		||||
                elif first.left == second.left:
 | 
			
		||||
                    return 0
 | 
			
		||||
                return 1
 | 
			
		||||
            # Different line so sort into line number
 | 
			
		||||
            if frst.bottom < secnd.bottom:
 | 
			
		||||
            if first.bottom < second.bottom:
 | 
			
		||||
                return -1
 | 
			
		||||
            elif frst.bottom == secnd.bottom:
 | 
			
		||||
            elif first.bottom == second.bottom:
 | 
			
		||||
                return 0
 | 
			
		||||
            return 1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -121,7 +121,7 @@ class ListNumbers:
 | 
			
		||||
            return 'ordered'
 | 
			
		||||
        # sys.stderr.write('module is list_numbers\n')
 | 
			
		||||
        # sys.stderr.write('method is __determine type\n')
 | 
			
		||||
        # sys.stderr.write('Couldn\'t get type of list\n')
 | 
			
		||||
        # sys.stderr.write("Couldn't get type of list\n")
 | 
			
		||||
        # must be some type of ordered list -- just a guess!
 | 
			
		||||
        return 'unordered'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -1628,7 +1628,7 @@ def ensure_app(headless=True):
 | 
			
		||||
            # unhandled python exception in a slot or virtual method. Since ensure_app()
 | 
			
		||||
            # is used in worker processes for background work like rendering html
 | 
			
		||||
            # or running a headless browser, we circumvent this as I really
 | 
			
		||||
            # dont feel like going through all the code and making sure no
 | 
			
		||||
            # don't feel like going through all the code and making sure no
 | 
			
		||||
            # unhandled exceptions ever occur. All the actual GUI apps already
 | 
			
		||||
            # override sys.excepthook with a proper error handler.
 | 
			
		||||
            sys.excepthook = simple_excepthook
 | 
			
		||||
@ -1776,7 +1776,7 @@ def raise_and_focus(self: QWidget) -> None:
 | 
			
		||||
 | 
			
		||||
def raise_without_focus(self: QWidget) -> None:
 | 
			
		||||
    if QApplication.instance().platformName() == 'wayland':
 | 
			
		||||
        # On fucking Wayland, we cant raise a dialog without also giving it
 | 
			
		||||
        # On fucking Wayland, we can't raise a dialog without also giving it
 | 
			
		||||
        # keyboard focus. What a joke.
 | 
			
		||||
        self.raise_and_focus()
 | 
			
		||||
    else:
 | 
			
		||||
 | 
			
		||||
@ -269,7 +269,7 @@ class InterfaceAction(QObject):
 | 
			
		||||
                else:
 | 
			
		||||
                    self.shortcut_action_for_context_menu = shortcut_action
 | 
			
		||||
                    if ismacos:
 | 
			
		||||
                        # In Qt 5 keyboard shortcuts dont work unless the
 | 
			
		||||
                        # In Qt 5 keyboard shortcuts don't work unless the
 | 
			
		||||
                        # action is explicitly added to the main window
 | 
			
		||||
                        self.gui.addAction(shortcut_action)
 | 
			
		||||
 | 
			
		||||
@ -338,7 +338,7 @@ class InterfaceAction(QObject):
 | 
			
		||||
                shortcut_name, default_keys=keys,
 | 
			
		||||
                action=ac, description=description, group=self.action_spec[0],
 | 
			
		||||
                persist_shortcut=persist_shortcut)
 | 
			
		||||
            # In Qt 5 keyboard shortcuts dont work unless the
 | 
			
		||||
            # In Qt 5 keyboard shortcuts don't work unless the
 | 
			
		||||
            # action is explicitly added to the main window and on OSX and
 | 
			
		||||
            # Unity since the menu might be exported, the shortcuts won't work
 | 
			
		||||
            self.gui.addAction(ac)
 | 
			
		||||
 | 
			
		||||
@ -288,10 +288,10 @@ class AutoAdder(QObject):
 | 
			
		||||
 | 
			
		||||
        if duplicates:
 | 
			
		||||
            paths, formats, metadata = [], [], []
 | 
			
		||||
            for p, f, mis in duplicates:
 | 
			
		||||
            for p, f, mi in duplicates:
 | 
			
		||||
                paths.extend(p)
 | 
			
		||||
                formats.extend(f)
 | 
			
		||||
                metadata.extend(mis)
 | 
			
		||||
                metadata.extend(mi)
 | 
			
		||||
            dups = [(mic, mic.cover, [p]) for mic, p in zip(metadata, paths)]
 | 
			
		||||
            d = DuplicatesQuestion(m.db, dups, parent=gui)
 | 
			
		||||
            dups = tuple(d.duplicates)
 | 
			
		||||
 | 
			
		||||
@ -485,7 +485,7 @@ class CentralContainer(QWidget):
 | 
			
		||||
 | 
			
		||||
    def read_settings(self):
 | 
			
		||||
        before = self.serialized_settings()
 | 
			
		||||
        # sadly self.size() doesnt always return sensible values so look at
 | 
			
		||||
        # sadly self.size() doesn't always return sensible values so look at
 | 
			
		||||
        # the size of the main window which works perfectly for width, not so
 | 
			
		||||
        # perfectly for height
 | 
			
		||||
        sz = self.size()
 | 
			
		||||
 | 
			
		||||
@ -520,7 +520,7 @@ class CoverSettingsWidget(QWidget):
 | 
			
		||||
 | 
			
		||||
    def restore_defaults(self):
 | 
			
		||||
        defaults = self.original_prefs.defaults.copy()
 | 
			
		||||
        # Dont delete custom color themes when restoring defaults
 | 
			
		||||
        # Don't delete custom color themes when restoring defaults
 | 
			
		||||
        defaults['color_themes'] = self.custom_colors
 | 
			
		||||
        self.apply_prefs(defaults)
 | 
			
		||||
        self.update_preview()
 | 
			
		||||
 | 
			
		||||
@ -287,7 +287,7 @@ class NoteEditorWidget(EditorWidget):
 | 
			
		||||
 | 
			
		||||
    def do_insert_image(self):
 | 
			
		||||
        # See https://bugreports.qt.io/browse/QTBUG-118537
 | 
			
		||||
        # for why we cant have a nice margin for floating images
 | 
			
		||||
        # for why we can't have a nice margin for floating images
 | 
			
		||||
        d = AskImage(self.images, self.db)
 | 
			
		||||
        if d.exec() == QDialog.DialogCode.Accepted and d.current_digest:
 | 
			
		||||
            ir = self.images[d.current_digest]
 | 
			
		||||
 | 
			
		||||
@ -153,21 +153,21 @@ def send_mails(jobnames, callback, attachments, to_s, subjects,
 | 
			
		||||
            attachments, to_s, subjects, texts, attachment_names):
 | 
			
		||||
        description = _('Email %(name)s to %(to)s') % dict(name=name, to=to)
 | 
			
		||||
        if isinstance(to, str) and (is_for_kindle(to) or '@pbsync.com' in to):
 | 
			
		||||
            # The PocketBook service is a total joke. It cant handle
 | 
			
		||||
            # The PocketBook service is a total joke. It can't handle
 | 
			
		||||
            # non-ascii, filenames that are long enough to be split up, commas, and
 | 
			
		||||
            # the good lord alone knows what else. So use a random filename
 | 
			
		||||
            # containing only 22 English letters and numbers
 | 
			
		||||
            #
 | 
			
		||||
            # And since this email is only going to be processed by automated
 | 
			
		||||
            # services, make the subject+text random too as at least the amazon
 | 
			
		||||
            # service cant handle non-ascii text. I dont know what baboons
 | 
			
		||||
            # service can't handle non-ascii text. I don't know what baboons
 | 
			
		||||
            # these companies employ to write their code. It's the height of
 | 
			
		||||
            # irony that they are called "tech" companies.
 | 
			
		||||
            # https://bugs.launchpad.net/calibre/+bug/1989282
 | 
			
		||||
            from calibre.utils.short_uuid import uuid4
 | 
			
		||||
            if not is_for_kindle(to):
 | 
			
		||||
                # Amazon nowadays reads metadata from attachment filename instead of
 | 
			
		||||
                # file internal metadata so dont nuke the filename.
 | 
			
		||||
                # file internal metadata so don't nuke the filename.
 | 
			
		||||
                # https://www.mobileread.com/forums/showthread.php?t=349290
 | 
			
		||||
                aname = f'{uuid4()}.' + aname.rpartition('.')[-1]
 | 
			
		||||
            subject = uuid4()
 | 
			
		||||
 | 
			
		||||
@ -36,7 +36,7 @@ class FTSDialog(Dialog):
 | 
			
		||||
        l = QVBoxLayout(self)
 | 
			
		||||
        self.fat_warning = fw = QLabel(
 | 
			
		||||
            f'<span style="color:red; font-weight: bold">{_("WARNING")}:</span> ' +
 | 
			
		||||
            _('The calibre library is on a FAT drive, indexing more than a few hundred books wont work.') +
 | 
			
		||||
            _("The calibre library is on a FAT drive, indexing more than a few hundred books won't work.") +
 | 
			
		||||
            f' <a href="xxx" style="text-decoration: none">{_("Learn more")}</a>')
 | 
			
		||||
        # fw.setVisible(False)
 | 
			
		||||
        fw.linkActivated.connect(self.show_fat_details)
 | 
			
		||||
 | 
			
		||||
@ -359,7 +359,7 @@ class VLTabs(QTabBar):  # {{{
 | 
			
		||||
    def lock_tab(self):
 | 
			
		||||
        gprefs['vl_tabs_closable'] = False
 | 
			
		||||
        self.setTabsClosable(False)
 | 
			
		||||
        # Workaround for Qt bug where it doesnt recalculate the tab size after locking
 | 
			
		||||
        # Workaround for Qt bug where it doesn't recalculate the tab size after locking
 | 
			
		||||
        for idx in range(self.count()):
 | 
			
		||||
            self.setTabButton(idx, QTabBar.ButtonPosition.RightSide, None)
 | 
			
		||||
            self.setTabButton(idx, QTabBar.ButtonPosition.LeftSide, None)
 | 
			
		||||
@ -392,7 +392,7 @@ class VLTabs(QTabBar):  # {{{
 | 
			
		||||
 | 
			
		||||
    def tab_close(self, index):
 | 
			
		||||
        vl = str(self.tabData(index) or '')
 | 
			
		||||
        if vl:  # Dont allow closing the All Books tab
 | 
			
		||||
        if vl:  # Don't allow closing the All Books tab
 | 
			
		||||
            self.current_db.new_api.set_pref('virt_libs_hidden', list(
 | 
			
		||||
                self.current_db.new_api.pref('virt_libs_hidden', ())) + [vl])
 | 
			
		||||
            self.removeTab(index)
 | 
			
		||||
 | 
			
		||||
@ -646,7 +646,7 @@ class CoverDelegate(QStyledItemDelegate):
 | 
			
		||||
                if self.title_height != 0:
 | 
			
		||||
                    self.paint_title(painter, trect, db, book_id)
 | 
			
		||||
            if self.emblem_size > 0:
 | 
			
		||||
                # We dont draw embossed emblems as the ondevice/marked emblems are drawn in the gutter
 | 
			
		||||
                # We don't draw embossed emblems as the ondevice/marked emblems are drawn in the gutter
 | 
			
		||||
                return
 | 
			
		||||
            if marked:
 | 
			
		||||
                try:
 | 
			
		||||
@ -1163,7 +1163,7 @@ class GridView(QListView):
 | 
			
		||||
            self.thumbnail_cache.set_database(newdb)
 | 
			
		||||
            try:
 | 
			
		||||
                # Use a timeout so that if, for some reason, the render thread
 | 
			
		||||
                # gets stuck, we dont deadlock, future covers won't get
 | 
			
		||||
                # gets stuck, we don't deadlock, future covers won't get
 | 
			
		||||
                # rendered, but this is better than a deadlock
 | 
			
		||||
                join_with_timeout(self.delegate.render_queue)
 | 
			
		||||
            except RuntimeError:
 | 
			
		||||
 | 
			
		||||
@ -243,7 +243,7 @@ class BooksModel(QAbstractTableModel):  # {{{
 | 
			
		||||
        self.bool_yes_icon = QIcon.ic('ok.png').pixmap(icon_height)
 | 
			
		||||
        self.bool_no_icon = QIcon.ic('list_remove.png').pixmap(icon_height)
 | 
			
		||||
        self.bool_blank_icon = QIcon.ic('blank.png').pixmap(icon_height)
 | 
			
		||||
        # Qt auto-scales marked icon correctly, so we dont need to do it (and
 | 
			
		||||
        # Qt auto-scales marked icon correctly, so we don't need to do it (and
 | 
			
		||||
        # remember that the cover grid view needs a larger version of the icon,
 | 
			
		||||
        # anyway)
 | 
			
		||||
        self.marked_icon = QIcon.ic('marked.png')
 | 
			
		||||
 | 
			
		||||
@ -164,7 +164,7 @@ class PreserveViewState:  # {{{
 | 
			
		||||
    '''
 | 
			
		||||
    Save the set of selected books at enter time. If at exit time there are no
 | 
			
		||||
    selected books, restore the previous selection, the previous current index
 | 
			
		||||
    and dont affect the scroll position.
 | 
			
		||||
    and don't affect the scroll position.
 | 
			
		||||
    '''
 | 
			
		||||
 | 
			
		||||
    def __init__(self, view, preserve_hpos=True, preserve_vpos=True, require_selected_ids=True):
 | 
			
		||||
 | 
			
		||||
@ -268,7 +268,7 @@ class MarkdownHighlighter(QSyntaxHighlighter):
 | 
			
		||||
            elif emphasis:
 | 
			
		||||
                self.setFormat(self.offset+offset+ match.start(), match.end() - match.start(), self.MARKDOWN_KWS_FORMAT['Italic'])
 | 
			
		||||
 | 
			
		||||
        def recusive(match, extra_offset, bold, emphasis):
 | 
			
		||||
        def recursive(match, extra_offset, bold, emphasis):
 | 
			
		||||
            apply(match, bold, emphasis)
 | 
			
		||||
            if bold and emphasis:
 | 
			
		||||
                return  # max deep => return, do not process extra Bold/Italic
 | 
			
		||||
@ -278,17 +278,17 @@ class MarkdownHighlighter(QSyntaxHighlighter):
 | 
			
		||||
            self._highlightBoldEmphasis(sub_txt, cursor, bf, sub_offset, bold, emphasis)
 | 
			
		||||
 | 
			
		||||
        for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['Italic'],text):
 | 
			
		||||
            recusive(mo, 1, bold, True)
 | 
			
		||||
            recursive(mo, 1, bold, True)
 | 
			
		||||
            found = True
 | 
			
		||||
        for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['uItalic'],text):
 | 
			
		||||
            recusive(mo, 1, bold, True)
 | 
			
		||||
            recursive(mo, 1, bold, True)
 | 
			
		||||
            found = True
 | 
			
		||||
 | 
			
		||||
        for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['Bold'],text):
 | 
			
		||||
            recusive(mo, 2, True, emphasis)
 | 
			
		||||
            recursive(mo, 2, True, emphasis)
 | 
			
		||||
            found = True
 | 
			
		||||
        for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['uBold'],text):
 | 
			
		||||
            recusive(mo, 2, True, emphasis)
 | 
			
		||||
            recursive(mo, 2, True, emphasis)
 | 
			
		||||
            found = True
 | 
			
		||||
 | 
			
		||||
        for mo in re.finditer(self.MARKDOWN_KEYS_REGEX['BoldItalic'],text):
 | 
			
		||||
 | 
			
		||||
@ -253,7 +253,7 @@ def get_notifier(systray=None):
 | 
			
		||||
            if not ans.ok:
 | 
			
		||||
                ans = DummyNotifier()
 | 
			
		||||
        else:
 | 
			
		||||
            # We dont use Qt's systray based notifier as it uses Growl and is
 | 
			
		||||
            # We don't use Qt's systray based notifier as it uses Growl and is
 | 
			
		||||
            # broken with different versions of Growl
 | 
			
		||||
            ans = DummyNotifier()
 | 
			
		||||
    elif iswindows:
 | 
			
		||||
 | 
			
		||||
@ -965,7 +965,7 @@ QRect PictureFlowPrivate::renderSlide(const SlideInfo &slide, int alpha, int col
 | 
			
		||||
    if(column < 0)
 | 
			
		||||
      continue;
 | 
			
		||||
    if (preserveAspectRatio && !slide_moving_to_center) {
 | 
			
		||||
        // We dont want a black border at the edge of narrow images when the images are in the left or right stacks
 | 
			
		||||
        // We don't want a black border at the edge of narrow images when the images are in the left or right stacks
 | 
			
		||||
        if (slide.slideIndex < centerIndex) {
 | 
			
		||||
            column = qMin(column + img_offset, sw - 1);
 | 
			
		||||
        } else if (slide.slideIndex == centerIndex) {
 | 
			
		||||
 | 
			
		||||
@ -290,7 +290,7 @@ void CalibreStyle::drawPrimitive(PrimitiveElement element, const QStyleOption *
 | 
			
		||||
    const QStyleOptionViewItem *vopt = NULL;
 | 
			
		||||
    switch (element) {
 | 
			
		||||
        case PE_FrameTabBarBase: // {{{
 | 
			
		||||
            // dont draw line below tabs in dark mode as it looks bad
 | 
			
		||||
            // don't draw line below tabs in dark mode as it looks bad
 | 
			
		||||
            if (const QStyleOptionTabBarBase *tbb = qstyleoption_cast<const QStyleOptionTabBarBase *>(option)) {
 | 
			
		||||
                if (tbb->shape == QTabBar::RoundedNorth) {
 | 
			
		||||
                    QColor bg = option->palette.color(QPalette::Window);
 | 
			
		||||
 | 
			
		||||
@ -80,7 +80,7 @@ def beautify_text(raw, syntax):
 | 
			
		||||
        log.setLevel(logging.WARN)
 | 
			
		||||
        log.raiseExceptions = False
 | 
			
		||||
        parser = CSSParser(loglevel=logging.WARNING,
 | 
			
		||||
                           # We dont care about @import rules
 | 
			
		||||
                           # We don't care about @import rules
 | 
			
		||||
                           fetcher=lambda x: (None, None), log=_css_logger)
 | 
			
		||||
        data = parser.parseString(raw, href='<string>', validate=False)
 | 
			
		||||
        return serialize(data, 'text/css').decode('utf-8')
 | 
			
		||||
 | 
			
		||||
@ -46,7 +46,7 @@ from polyglot.builtins import codepoint_to_chr, iteritems, itervalues
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def string_length(x):
 | 
			
		||||
    return strlen(str(x))  # Needed on narrow python builds, as subclasses of unicode dont work
 | 
			
		||||
    return strlen(str(x))  # Needed on narrow python builds, as subclasses of unicode don't work
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
KEY = Qt.Key.Key_J
 | 
			
		||||
 | 
			
		||||
@ -25,7 +25,7 @@ JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
 | 
			
		||||
class JavascriptLexer(RegexLexer):
 | 
			
		||||
    '''
 | 
			
		||||
    For JavaScript source code. This is based on the pygments JS highlighter,
 | 
			
		||||
    bu that does not handle multi-line comments in streaming mode, so we had to
 | 
			
		||||
    but that does not handle multi-line comments in streaming mode, so we had to
 | 
			
		||||
    modify it.
 | 
			
		||||
    '''
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -877,7 +877,7 @@ class TextEdit(PlainTextEdit):
 | 
			
		||||
        c = self.textCursor()
 | 
			
		||||
        left = min(c.anchor(), c.position())
 | 
			
		||||
        right = max(c.anchor(), c.position())
 | 
			
		||||
        # For speed we use QPlainTextEdit's toPlainText as we dont care about
 | 
			
		||||
        # For speed we use QPlainTextEdit's toPlainText as we don't care about
 | 
			
		||||
        # spaces in this context
 | 
			
		||||
        raw = str(QPlainTextEdit.toPlainText(self))
 | 
			
		||||
        # Make sure the left edge is not within a <>
 | 
			
		||||
 | 
			
		||||
@ -1346,7 +1346,7 @@ def get_search_function(state):
 | 
			
		||||
    ans = state['replace']
 | 
			
		||||
    is_regex = state['mode'] not in ('normal', 'fuzzy')
 | 
			
		||||
    if not is_regex:
 | 
			
		||||
        # We dont want backslash escape sequences interpreted in normal mode
 | 
			
		||||
        # We don't want backslash escape sequences interpreted in normal mode
 | 
			
		||||
        return lambda m: ans
 | 
			
		||||
    if state['mode'] == 'function':
 | 
			
		||||
        try:
 | 
			
		||||
 | 
			
		||||
@ -862,7 +862,7 @@ class LibraryPage(QWizardPage, LibraryUI):
 | 
			
		||||
                os.rmdir(dln)
 | 
			
		||||
        except Exception:
 | 
			
		||||
            pass
 | 
			
		||||
        # dont leave behind any empty dirs
 | 
			
		||||
        # don't leave behind any empty dirs
 | 
			
		||||
        for x in self.made_dirs:
 | 
			
		||||
            with suppress(OSError):
 | 
			
		||||
                os.rmdir(x)
 | 
			
		||||
 | 
			
		||||
@ -3888,7 +3888,7 @@ class CatalogBuilder:
 | 
			
		||||
            if zf is not None:
 | 
			
		||||
                # Ensure that the read succeeded
 | 
			
		||||
                # If we failed to open the zip file for reading,
 | 
			
		||||
                # we dont know if it contained the thumb or not
 | 
			
		||||
                # we don't know if it contained the thumb or not
 | 
			
		||||
                zf = _open_archive('a')
 | 
			
		||||
                if zf is not None:
 | 
			
		||||
                    with zf:
 | 
			
		||||
 | 
			
		||||
@ -200,7 +200,7 @@ class DigestAuth:  # {{{
 | 
			
		||||
 | 
			
		||||
    def validate_request(self, pw, data, log=None):
 | 
			
		||||
        # We should also be checking for replay attacks by using nonce_count,
 | 
			
		||||
        # however, various HTTP clients, most prominently Firefox dont
 | 
			
		||||
        # however, various HTTP clients, most prominently Firefox don't
 | 
			
		||||
        # implement nonce-counts correctly, so we cannot do the check.
 | 
			
		||||
        # https://bugzil.la/114451
 | 
			
		||||
        path = parse_uri(self.uri.encode('utf-8'))[1]
 | 
			
		||||
 | 
			
		||||
@ -541,7 +541,7 @@ def get_data_file(ctx, rd, book_id, relpath, library_id):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def strerr(e: Exception):
 | 
			
		||||
    # Dont leak the filepath in the error response
 | 
			
		||||
    # Don't leak the filepath in the error response
 | 
			
		||||
    if isinstance(e, OSError):
 | 
			
		||||
        return e.strerror or str(e)
 | 
			
		||||
    return str(e)
 | 
			
		||||
 | 
			
		||||
@ -83,7 +83,7 @@ def parse_uri(uri, parse_query=True, unquote_func=unquote):
 | 
			
		||||
        try:
 | 
			
		||||
            query = MultiDict.create_from_query_string(qs)
 | 
			
		||||
        except Exception:
 | 
			
		||||
            raise HTTPSimpleResponse(http_client.BAD_REQUEST, 'Unparseable query string')
 | 
			
		||||
            raise HTTPSimpleResponse(http_client.BAD_REQUEST, 'Unparsable query string')
 | 
			
		||||
    else:
 | 
			
		||||
        query = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -256,7 +256,7 @@ class Connection:  # {{{
 | 
			
		||||
 | 
			
		||||
    def recv(self, amt):
 | 
			
		||||
        # If there is data in the read buffer we have to return only that,
 | 
			
		||||
        # since we dont know if the socket has signalled it is ready for
 | 
			
		||||
        # since we don't know if the socket has signalled it is ready for
 | 
			
		||||
        # reading
 | 
			
		||||
        if self.read_buffer.has_data:
 | 
			
		||||
            return self.read_buffer.read(amt)
 | 
			
		||||
 | 
			
		||||
@ -233,7 +233,7 @@ class GroupedSearchTerms:
 | 
			
		||||
    def __init__(self, src):
 | 
			
		||||
        self.keys = frozenset(src)
 | 
			
		||||
        self.hash = hash(self.keys)
 | 
			
		||||
        # We dont need to store values since this is used as part of a key for
 | 
			
		||||
        # We don't need to store values since this is used as part of a key for
 | 
			
		||||
        # a cache and if the values have changed the cache will be invalidated
 | 
			
		||||
        # for other reasons anyway (last_modified() will have changed on the
 | 
			
		||||
        # db)
 | 
			
		||||
 | 
			
		||||
@ -295,7 +295,7 @@ class Router:
 | 
			
		||||
                if x:
 | 
			
		||||
                    k, v = x.partition('=')[::2]
 | 
			
		||||
                    if k:
 | 
			
		||||
                        # Since we only set simple hex encoded cookies, we dont
 | 
			
		||||
                        # Since we only set simple hex encoded cookies, we don't
 | 
			
		||||
                        # need more sophisticated value parsing
 | 
			
		||||
                        c[k] = v.strip('"')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -31,7 +31,7 @@ class ModernHTTPSHandler(HTTPSHandler):
 | 
			
		||||
 | 
			
		||||
class Browser(B):
 | 
			
		||||
    '''
 | 
			
		||||
    A cloneable mechanize browser. Useful for multithreading. The idea is that
 | 
			
		||||
    A clonable mechanize browser. Useful for multithreading. The idea is that
 | 
			
		||||
    each thread has a browser clone. Every clone uses the same thread safe
 | 
			
		||||
    cookie jar. All clones share the same browser configuration.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -153,7 +153,7 @@ class WindowsFileCopier:
 | 
			
		||||
                except OSError as err:
 | 
			
		||||
                    # Ignore dir not empty errors. Should never happen but we
 | 
			
		||||
                    # ignore it as the UNIX semantics are to not delete folders
 | 
			
		||||
                    # during __exit__ anyway and we dont want to leak the handle.
 | 
			
		||||
                    # during __exit__ anyway and we don't want to leak the handle.
 | 
			
		||||
                    if err.winerror != winutil.ERROR_DIR_NOT_EMPTY:
 | 
			
		||||
                        raise
 | 
			
		||||
            h.close()
 | 
			
		||||
 | 
			
		||||
@ -222,7 +222,7 @@ def case_preserving_open_file(path, mode='wb', mkdir_mode=0o777):
 | 
			
		||||
        try:
 | 
			
		||||
            candidates = [c for c in os.listdir(cpath) if c.lower() == cl]
 | 
			
		||||
        except:
 | 
			
		||||
            # Dont have permission to do the listdir, assume the case is
 | 
			
		||||
            # Don't have permission to do the listdir, assume the case is
 | 
			
		||||
            # correct as we have no way to check it.
 | 
			
		||||
            pass
 | 
			
		||||
        else:
 | 
			
		||||
 | 
			
		||||
@ -315,8 +315,8 @@ class FontScanner(Thread):
 | 
			
		||||
        self.reload_cache()
 | 
			
		||||
 | 
			
		||||
        if isworker:
 | 
			
		||||
            # Dont scan font files in worker processes, use whatever is
 | 
			
		||||
            # cached. Font files typically dont change frequently enough to
 | 
			
		||||
            # Don't scan font files in worker processes, use whatever is
 | 
			
		||||
            # cached. Font files typically don't change frequently enough to
 | 
			
		||||
            # justify a rescan in a worker process.
 | 
			
		||||
            self.build_families()
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
@ -192,7 +192,7 @@ class SetGlobalsNode(Node):
 | 
			
		||||
 | 
			
		||||
class StringCompareNode(Node):
 | 
			
		||||
    def __init__(self, line_number, operator, left, right):
 | 
			
		||||
        Node.__init__(self, line_number, 'comparision: ' + operator)
 | 
			
		||||
        Node.__init__(self, line_number, 'comparison: ' + operator)
 | 
			
		||||
        self.node_type = self.NODE_COMPARE_STRING
 | 
			
		||||
        self.operator = operator
 | 
			
		||||
        self.left = left
 | 
			
		||||
 | 
			
		||||
@ -374,7 +374,7 @@ icu_Collator_contractions(icu_Collator *self, PyObject *args) {
 | 
			
		||||
            if (pbuf == NULL) { Py_DECREF(ans); ans = NULL; goto end; }
 | 
			
		||||
            PyTuple_SetItem(ans, i, pbuf);
 | 
			
		||||
        } else {
 | 
			
		||||
            // Ranges dont make sense for contractions, ignore them
 | 
			
		||||
            // Ranges don't make sense for contractions, ignore them
 | 
			
		||||
            PyTuple_SetItem(ans, i, Py_None); Py_INCREF(Py_None);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -248,7 +248,7 @@ class INotifyTreeWatcher(INotify):
 | 
			
		||||
                    raise NoSuchDir(f'The dir {base} does not exist')
 | 
			
		||||
                return
 | 
			
		||||
            if e.errno == errno.EACCES:
 | 
			
		||||
                # We silently ignore entries for which we dont have permission,
 | 
			
		||||
                # We silently ignore entries for which we don't have permission,
 | 
			
		||||
                # unless they are the top level dir
 | 
			
		||||
                if top_level:
 | 
			
		||||
                    raise NoSuchDir(f'You do not have permission to monitor {base}')
 | 
			
		||||
@ -293,7 +293,7 @@ class INotifyTreeWatcher(INotify):
 | 
			
		||||
 | 
			
		||||
    def process_event(self, wd, mask, cookie, name):
 | 
			
		||||
        if wd == -1 and (mask & self.Q_OVERFLOW):
 | 
			
		||||
            # We missed some INOTIFY events, so we dont
 | 
			
		||||
            # We missed some INOTIFY events, so we don't
 | 
			
		||||
            # know the state of any tracked dirs.
 | 
			
		||||
            self.watch_tree()
 | 
			
		||||
            self.modified.add(None)
 | 
			
		||||
 | 
			
		||||
@ -202,7 +202,7 @@ class IP_ADAPTER_ADDRESSES(ctypes.Structure):
 | 
			
		||||
        ('Dhcpv6ClientDuid', ctypes.c_ubyte * MAX_DHCPV6_DUID_LENGTH),
 | 
			
		||||
        ('Dhcpv6ClientDuidLength', wintypes.ULONG),
 | 
			
		||||
        ('Dhcpv6Iaid', wintypes.ULONG),
 | 
			
		||||
        # Vista SP1 and later, so we comment it out as we dont need it
 | 
			
		||||
        # Vista SP1 and later, so we comment it out as we don't need it
 | 
			
		||||
        # ('FirstDnsSuffix', ctypes.POINTER(IP_ADAPTER_DNS_SUFFIX)),
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -113,8 +113,8 @@ history_length(2000) #value of -1 means no limit
 | 
			
		||||
        if not os.path.exists(ipydir):
 | 
			
		||||
            os.makedirs(ipydir)
 | 
			
		||||
        conf = os.path.join(ipydir, 'pyreadline.txt')
 | 
			
		||||
        hist = os.path.join(ipydir, 'history.txt')
 | 
			
		||||
        config = config % hist
 | 
			
		||||
        history = os.path.join(ipydir, 'history.txt')
 | 
			
		||||
        config = config % history
 | 
			
		||||
        with open(conf, 'wb') as f:
 | 
			
		||||
            f.write(config.encode('utf-8'))
 | 
			
		||||
        pyreadline.rlmain.config_path = conf
 | 
			
		||||
 | 
			
		||||
@ -743,7 +743,7 @@ class SMTP:
 | 
			
		||||
         >>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
 | 
			
		||||
         >>> msg = '''\\
 | 
			
		||||
         ... From: Me@my.org
 | 
			
		||||
         ... Subject: testin'...
 | 
			
		||||
         ... Subject: testing...
 | 
			
		||||
         ...
 | 
			
		||||
         ... This is a test '''
 | 
			
		||||
         >>> s.sendmail("me@my.org",tolist,msg)
 | 
			
		||||
 | 
			
		||||
@ -293,7 +293,7 @@ def _EndRecData(fpin):
 | 
			
		||||
        endrec = list(struct.unpack(structEndArchive, recData))
 | 
			
		||||
        comment = data[start+sizeEndCentDir:]
 | 
			
		||||
        # check that comment length is correct
 | 
			
		||||
        # Kovid: Added == 0 check as some zip files apparently dont set this
 | 
			
		||||
        # Kovid: Added == 0 check as some zip files apparently don't set this
 | 
			
		||||
        if endrec[_ECD_COMMENT_SIZE] == 0 or endrec[_ECD_COMMENT_SIZE] == len(comment):
 | 
			
		||||
            # Append the archive comment and start offset
 | 
			
		||||
            endrec.append(comment)
 | 
			
		||||
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user