Fix the remaining typos

This commit is contained in:
Christian Clauss 2021-10-03 09:44:42 +02:00
parent a1982c2c7e
commit fb1d7c40f8
115 changed files with 238 additions and 238 deletions

View File

@ -263,7 +263,7 @@ def add_catalog(cache, path, title, dbapi=None):
new_book_added = True new_book_added = True
else: else:
cache._set_metadata(db_id, mi) cache._set_metadata(db_id, mi)
cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Cant keep write lock since post-import hooks might run cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Can't keep write lock since post-import hooks might run
return db_id, new_book_added return db_id, new_book_added
@ -295,7 +295,7 @@ def add_news(cache, path, arg, dbapi=None):
mi.timestamp = utcnow() mi.timestamp = utcnow()
db_id = cache._create_book_entry(mi, apply_import_tags=False) db_id = cache._create_book_entry(mi, apply_import_tags=False)
cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Cant keep write lock since post-import hooks might run cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Can't keep write lock since post-import hooks might run
if not hasattr(path, 'read'): if not hasattr(path, 'read'):
stream.close() stream.close()

View File

@ -23,7 +23,7 @@ class DeleteService(Thread):
by only moving the files/folders to be deleted out of the library in the by only moving the files/folders to be deleted out of the library in the
main thread, they are deleted to recycle bin in a separate worker thread. main thread, they are deleted to recycle bin in a separate worker thread.
This has the added advantage that doing a restore from the recycle bin wont This has the added advantage that doing a restore from the recycle bin won't
cause metadata.db and the file system to get out of sync. Also, deleting cause metadata.db and the file system to get out of sync. Also, deleting
becomes much faster, since in the common case, the move is done by a simple becomes much faster, since in the common case, the move is done by a simple
os.rename(). The downside is that if the user quits calibre while a long os.rename(). The downside is that if the user quits calibre while a long

View File

@ -108,7 +108,7 @@ class Restore(Thread):
tdir = TemporaryDirectory('_rlib', dir=basedir) tdir = TemporaryDirectory('_rlib', dir=basedir)
tdir.__enter__() tdir.__enter__()
except EnvironmentError: except EnvironmentError:
# Incase we dont have permissions to create directories in the # In case we dont have permissions to create directories in the
# parent folder of the src library # parent folder of the src library
tdir = TemporaryDirectory('_rlib') tdir = TemporaryDirectory('_rlib')

View File

@ -346,7 +346,7 @@ def main():
dev.rm(args[0]) dev.rm(args[0])
elif command == "touch": elif command == "touch":
parser = OptionParser(usage="usage: %prog touch path\nCreate an empty file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/\n\n"+ # noqa parser = OptionParser(usage="usage: %prog touch path\nCreate an empty file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/\n\n"+ # noqa
"Unfortunately, I cant figure out how to update file times on the device, so if path already exists, touch does nothing") "Unfortunately, I can't figure out how to update file times on the device, so if path already exists, touch does nothing")
options, args = parser.parse_args(args) options, args = parser.parse_args(args)
if len(args) != 1: if len(args) != 1:
parser.print_help() parser.print_help()

View File

@ -148,7 +148,7 @@ class ControlError(ProtocolError):
class WrongDestinationError(PathError): class WrongDestinationError(PathError):
''' The user chose the wrong destination to send books to, for example by ''' The user chose the wrong destination to send books to, for example by
trying to send books to a non existant storage card.''' trying to send books to a non existent storage card.'''
pass pass

View File

@ -677,7 +677,7 @@ class DevicePlugin(Plugin):
def synchronize_with_db(self, db, book_id, book_metadata, first_call): def synchronize_with_db(self, db, book_id, book_metadata, first_call):
''' '''
Called during book matching when a book on the device is matched with Called during book matching when a book on the device is matched with
a book in calibre's db. The method is responsible for syncronizing a book in calibre's db. The method is responsible for synchronizing
data from the device to calibre's db (if needed). data from the device to calibre's db (if needed).
The method must return a two-value tuple. The first value is a set of The method must return a two-value tuple. The first value is a set of

View File

@ -280,7 +280,7 @@ class APNXBuilder:
def get_pages_pagebreak_tag(self, mobi_file_path): def get_pages_pagebreak_tag(self, mobi_file_path):
''' '''
Determine pages based on the presense of Determine pages based on the presence of
<mbp:pagebreak>. <mbp:pagebreak>.
''' '''
pages = [] pages = []

View File

@ -309,7 +309,7 @@ class KOBO(USBMS):
# print 'update_metadata_item returned true' # print 'update_metadata_item returned true'
changed = True changed = True
else: else:
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!") debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
if lpath in playlist_map and \ if lpath in playlist_map and \
playlist_map[lpath] not in bl[idx].device_collections: playlist_map[lpath] not in bl[idx].device_collections:
bl[idx].device_collections = playlist_map.get(lpath,[]) bl[idx].device_collections = playlist_map.get(lpath,[])
@ -321,7 +321,7 @@ class KOBO(USBMS):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))): if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID) book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
else: else:
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!") debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
title = "FILE MISSING: " + title title = "FILE MISSING: " + title
book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576) book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
@ -350,10 +350,10 @@ class KOBO(USBMS):
if self.dbversion >= 33: if self.dbversion >= 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, IsDownloaded from content where ' 'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, IsDownloaded from content where '
'BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict( 'BookID is Null %(previews)s %(recommendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(
expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')', expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')',
previews=' and Accessibility <> 6' if not self.show_previews else '', previews=' and Accessibility <> 6' if not self.show_previews else '',
recomendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] is False else '') recommendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] is False else '')
elif self.dbversion >= 16 and self.dbversion < 33: elif self.dbversion >= 16 and self.dbversion < 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where ' 'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where '
@ -600,7 +600,7 @@ class KOBO(USBMS):
prints('in add_books_to_metadata. Prefix is None!', path, prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix) self._main_prefix)
continue continue
# print "Add book to metatdata: " # print "Add book to metadata: "
# print "prefix: " + prefix # print "prefix: " + prefix
lpath = path.partition(prefix)[2] lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'): if lpath.startswith('/') or lpath.startswith('\\'):
@ -926,10 +926,10 @@ class KOBO(USBMS):
pass pass
else: # No collections else: # No collections
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread) # Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - reseting ReadStatus") debug_print("No Collections - resetting ReadStatus")
self.reset_readstatus(connection, oncard) self.reset_readstatus(connection, oncard)
if self.dbversion >= 14: if self.dbversion >= 14:
debug_print("No Collections - reseting FavouritesIndex") debug_print("No Collections - resetting FavouritesIndex")
self.reset_favouritesindex(connection, oncard) self.reset_favouritesindex(connection, oncard)
# debug_print('Finished update_device_database_collections', collections_attributes) # debug_print('Finished update_device_database_collections', collections_attributes)
@ -1067,11 +1067,11 @@ class KOBO(USBMS):
fsync(f) fsync(f)
else: else:
debug_print("ImageID could not be retreived from the database") debug_print("ImageID could not be retrieved from the database")
def prepare_addable_books(self, paths): def prepare_addable_books(self, paths):
''' '''
The Kobo supports an encrypted epub refered to as a kepub The Kobo supports an encrypted epub referred to as a kepub
Unfortunately Kobo decided to put the files on the device Unfortunately Kobo decided to put the files on the device
with no file extension. I just hope that decision causes with no file extension. I just hope that decision causes
them as much grief as it does me :-) them as much grief as it does me :-)
@ -1369,7 +1369,7 @@ class KOBOTOUCH(KOBO):
# build number. A number will be recorded here but it can be safely ignored # build number. A number will be recorded here but it can be safely ignored
# when testing the firmware version. # when testing the firmware version.
max_supported_fwversion = (4, 28, 16705) max_supported_fwversion = (4, 28, 16705)
# The following document firwmare versions where new function or devices were added. # The following document firmware versions where new function or devices were added.
# Not all are used, but this feels a good place to record it. # Not all are used, but this feels a good place to record it.
min_fwversion_shelves = (2, 0, 0) min_fwversion_shelves = (2, 0, 0)
min_fwversion_images_on_sdcard = (2, 4, 1) min_fwversion_images_on_sdcard = (2, 4, 1)
@ -1667,7 +1667,7 @@ class KOBOTOUCH(KOBO):
if favouritesindex == 1: if favouritesindex == 1:
playlist_map[lpath].append('Shortlist') playlist_map[lpath].append('Shortlist')
# The follwing is in flux: # The following is in flux:
# - FW2.0.0, DBVersion 53,55 accessibility == 1 # - FW2.0.0, DBVersion 53,55 accessibility == 1
# - FW2.1.2 beta, DBVersion == 56, accessibility == -1: # - FW2.1.2 beta, DBVersion == 56, accessibility == -1:
# So, the following should be OK # So, the following should be OK
@ -1857,7 +1857,7 @@ class KOBOTOUCH(KOBO):
"from ShelfContent " \ "from ShelfContent " \
"where ContentId = ? " \ "where ContentId = ? " \
"and _IsDeleted = 'false' " \ "and _IsDeleted = 'false' " \
"and ShelfName is not null" # This should never be nulll, but it is protection against an error cause by a sync to the Kobo server "and ShelfName is not null" # This should never be null, but it is protection against an error cause by a sync to the Kobo server
values = (ContentID, ) values = (ContentID, )
cursor.execute(query, values) cursor.execute(query, values)
for i, row in enumerate(cursor): for i, row in enumerate(cursor):
@ -1905,32 +1905,32 @@ class KOBOTOUCH(KOBO):
where_clause = (" WHERE BookID IS NULL " where_clause = (" WHERE BookID IS NULL "
" AND ((Accessibility = -1 AND IsDownloaded in ('true', 1 )) " # Sideloaded books " AND ((Accessibility = -1 AND IsDownloaded in ('true', 1 )) " # Sideloaded books
" OR (Accessibility IN (%(downloaded_accessibility)s) %(expiry)s) " # Purchased books " OR (Accessibility IN (%(downloaded_accessibility)s) %(expiry)s) " # Purchased books
" %(previews)s %(recomendations)s ) " # Previews or Recommendations " %(previews)s %(recommendations)s ) " # Previews or Recommendations
) % \ ) % \
dict( dict(
expiry="" if self.show_archived_books else "and IsDownloaded in ('true', 1)", expiry="" if self.show_archived_books else "and IsDownloaded in ('true', 1)",
previews=" OR (Accessibility in (6) AND ___UserID <> '')" if self.show_previews else "", previews=" OR (Accessibility in (6) AND ___UserID <> '')" if self.show_previews else "",
recomendations=" OR (Accessibility IN (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else "", recommendations=" OR (Accessibility IN (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else "",
downloaded_accessibility="1,2,8,9" if self.supports_overdrive() else "1,2" downloaded_accessibility="1,2,8,9" if self.supports_overdrive() else "1,2"
) )
elif self.supports_series(): elif self.supports_series():
where_clause = (" WHERE BookID IS NULL " where_clause = (" WHERE BookID IS NULL "
" AND ((Accessibility = -1 AND IsDownloaded IN ('true', 1)) or (Accessibility IN (1,2)) %(previews)s %(recomendations)s )" " AND ((Accessibility = -1 AND IsDownloaded IN ('true', 1)) or (Accessibility IN (1,2)) %(previews)s %(recommendations)s )"
" AND NOT ((___ExpirationStatus=3 OR ___ExpirationStatus is Null) %(expiry)s)" " AND NOT ((___ExpirationStatus=3 OR ___ExpirationStatus is Null) %(expiry)s)"
) % \ ) % \
dict( dict(
expiry=" AND ContentType = 6" if self.show_archived_books else "", expiry=" AND ContentType = 6" if self.show_archived_books else "",
previews=" or (Accessibility IN (6) AND ___UserID <> '')" if self.show_previews else "", previews=" or (Accessibility IN (6) AND ___UserID <> '')" if self.show_previews else "",
recomendations=" or (Accessibility in (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else "" recommendations=" or (Accessibility in (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else ""
) )
elif self.dbversion >= 33: elif self.dbversion >= 33:
where_clause = (' WHERE BookID IS NULL %(previews)s %(recomendations)s AND NOT' where_clause = (' WHERE BookID IS NULL %(previews)s %(recommendations)s AND NOT'
' ((___ExpirationStatus=3 or ___ExpirationStatus IS NULL) %(expiry)s)' ' ((___ExpirationStatus=3 or ___ExpirationStatus IS NULL) %(expiry)s)'
) % \ ) % \
dict( dict(
expiry=' AND ContentType = 6' if self.show_archived_books else '', expiry=' AND ContentType = 6' if self.show_archived_books else '',
previews=' AND Accessibility <> 6' if not self.show_previews else '', previews=' AND Accessibility <> 6' if not self.show_previews else '',
recomendations=' AND IsDownloaded IN (\'true\', 1)' if not self.show_recommendations else '' recommendations=' AND IsDownloaded IN (\'true\', 1)' if not self.show_recommendations else ''
) )
elif self.dbversion >= 16: elif self.dbversion >= 16:
where_clause = (' WHERE BookID IS NULL ' where_clause = (' WHERE BookID IS NULL '
@ -2527,7 +2527,7 @@ class KOBOTOUCH(KOBO):
elif bookshelf_attribute: # No collections but have set the shelf option elif bookshelf_attribute: # No collections but have set the shelf option
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread) # Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - reseting ReadStatus") debug_print("No Collections - resetting ReadStatus")
if self.dbversion < 53: if self.dbversion < 53:
self.reset_readstatus(connection, oncard) self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves: if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
@ -2675,7 +2675,7 @@ class KOBOTOUCH(KOBO):
): ):
''' '''
This will generate the new cover image from the cover in the library. It is a wrapper This will generate the new cover image from the cover in the library. It is a wrapper
for save_cover_data_to to allow it to be overriden in a subclass. For this reason, for save_cover_data_to to allow it to be overridden in a subclass. For this reason,
options are passed in that are not used by this implementation. options are passed in that are not used by this implementation.
:param cover_data: original cover data :param cover_data: original cover data
@ -3525,12 +3525,12 @@ class KOBOTOUCH(KOBO):
@property @property
def create_bookshelves(self): def create_bookshelves(self):
# Only for backwards compatabilty # Only for backwards compatibility
return self.manage_collections return self.manage_collections
@property @property
def delete_empty_shelves(self): def delete_empty_shelves(self):
# Only for backwards compatabilty # Only for backwards compatibility
return self.delete_empty_collections return self.delete_empty_collections
@property @property
@ -3756,7 +3756,7 @@ class KOBOTOUCH(KOBO):
settings.show_recommendations = settings.extra_customization[OPT_SHOW_RECOMMENDATIONS] settings.show_recommendations = settings.extra_customization[OPT_SHOW_RECOMMENDATIONS]
# If the configuration hasn't been change for a long time, the last few option will be out # If the configuration hasn't been change for a long time, the last few option will be out
# of sync. The last two options aare always the support newer firmware and the debugging # of sync. The last two options are always the support newer firmware and the debugging
# title. Set seties and Modify CSS were the last two new options. The debugging title is # title. Set seties and Modify CSS were the last two new options. The debugging title is
# a string, so looking for that. # a string, so looking for that.
start_subclass_extra_options = OPT_MODIFY_CSS start_subclass_extra_options = OPT_MODIFY_CSS

View File

@ -171,7 +171,7 @@
*/ */
#define DEVICE_FLAG_ALWAYS_PROBE_DESCRIPTOR 0x00000800 #define DEVICE_FLAG_ALWAYS_PROBE_DESCRIPTOR 0x00000800
/** /**
* Samsung has implimented its own playlist format as a .spl file * Samsung has implemented its own playlist format as a .spl file
* stored in the normal file system, rather than a proper mtp * stored in the normal file system, rather than a proper mtp
* playlist. There are multiple versions of the .spl format * playlist. There are multiple versions of the .spl format
* identified by a line in the file: VERSION X.XX * identified by a line in the file: VERSION X.XX
@ -179,7 +179,7 @@
*/ */
#define DEVICE_FLAG_PLAYLIST_SPL_V1 0x00001000 #define DEVICE_FLAG_PLAYLIST_SPL_V1 0x00001000
/** /**
* Samsung has implimented its own playlist format as a .spl file * Samsung has implemented its own playlist format as a .spl file
* stored in the normal file system, rather than a proper mtp * stored in the normal file system, rather than a proper mtp
* playlist. There are multiple versions of the .spl format * playlist. There are multiple versions of the .spl format
* identified by a line in the file: VERSION X.XX * identified by a line in the file: VERSION X.XX

View File

@ -296,7 +296,7 @@
DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST | DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST |
DEVICE_FLAG_PLAYLIST_SPL_V1 }, DEVICE_FLAG_PLAYLIST_SPL_V1 },
// YP-F3 is NOT MTP - USB mass storage // YP-F3 is NOT MTP - USB mass storage
// From a rouge .INF file // From a rogue .INF file
// this device ID seems to have been recycled for: // this device ID seems to have been recycled for:
// the Samsung SGH-A707 Cingular cellphone // the Samsung SGH-A707 Cingular cellphone
// the Samsung L760-V cellphone // the Samsung L760-V cellphone
@ -613,7 +613,7 @@
/* /*
* SanDisk * SanDisk
* several devices (c150 for sure) are definately dual-mode and must * several devices (c150 for sure) are definitely dual-mode and must
* have the USB mass storage driver that hooks them unloaded first. * have the USB mass storage driver that hooks them unloaded first.
* They all have problematic dual-mode making the device unload effect * They all have problematic dual-mode making the device unload effect
* uncertain on these devices. * uncertain on these devices.
@ -1071,7 +1071,7 @@
DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL }, DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL },
// From: Willy Gardiol (web) <willy@gardiol.org> // From: Willy Gardiol (web) <willy@gardiol.org>
// Spurious errors for getting all objects, lead me to believe // Spurious errors for getting all objects, lead me to believe
// this flag atleast is needed // this flag at least is needed
{ "Nokia", 0x0421, "5800 XpressMusic v2", 0x0155, { "Nokia", 0x0421, "5800 XpressMusic v2", 0x0155,
DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL }, DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL },
// Yet another version... I think // Yet another version... I think
@ -2711,7 +2711,7 @@
#if 1 #if 1
/* after some review I commented it back in. There was apparently /* after some review I commented it back in. There was apparently
* only one or two devices misbehaving (having this ID in mass storage mode), * only one or two devices misbehaving (having this ID in mass storage mode),
* but more seem to use it regulary as MTP devices. Marcus 20150401 */ * but more seem to use it regularly as MTP devices. Marcus 20150401 */
/* /*
* This had to be commented out - the same VID+PID is used also for * This had to be commented out - the same VID+PID is used also for
* other modes than MTP, so we need to let mtp-probe do its job on this * other modes than MTP, so we need to let mtp-probe do its job on this
@ -2796,7 +2796,7 @@
#if 1 #if 1
/* after some review I commented it back in. There was apparently /* after some review I commented it back in. There was apparently
* only one or two devices misbehaving (having this ID in mass storage mode), * only one or two devices misbehaving (having this ID in mass storage mode),
* but more seem to use it regulary as MTP devices. Marcus 20150401 */ * but more seem to use it regularly as MTP devices. Marcus 20150401 */
/* /*
* This had to be commented out - the same VID+PID is used also for * This had to be commented out - the same VID+PID is used also for
* other modes than MTP, so we need to let mtp-probe do its job on this * other modes than MTP, so we need to let mtp-probe do its job on this
@ -3136,7 +3136,7 @@
DEVICE_FLAGS_ANDROID_BUGS }, DEVICE_FLAGS_ANDROID_BUGS },
/* In update 4 the order of devices was changed for /* In update 4 the order of devices was changed for
better OS X / Windows suport and another device-id better OS X / Windows support and another device-id
got assigned for the MTP */ got assigned for the MTP */
{ "Jolla", 0x2931, "Sailfish (ID2)", 0x0a05, { "Jolla", 0x2931, "Sailfish (ID2)", 0x0a05,
DEVICE_FLAGS_ANDROID_BUGS }, DEVICE_FLAGS_ANDROID_BUGS },

View File

@ -136,7 +136,7 @@ wpd_enumerate_devices(PyObject *self, PyObject *args) {
hr = portable_device_manager->GetDevices(NULL, &num_of_devices); hr = portable_device_manager->GetDevices(NULL, &num_of_devices);
Py_END_ALLOW_THREADS; Py_END_ALLOW_THREADS;
if (FAILED(hr)) return hresult_set_exc("Failed to get number of devices on the system", hr); if (FAILED(hr)) return hresult_set_exc("Failed to get number of devices on the system", hr);
num_of_devices += 15; // Incase new devices were connected between this call and the next num_of_devices += 15; // In case new devices were connected between this call and the next
pnp_device_ids = (PWSTR*)calloc(num_of_devices, sizeof(PWSTR)); pnp_device_ids = (PWSTR*)calloc(num_of_devices, sizeof(PWSTR));
if (pnp_device_ids == NULL) return PyErr_NoMemory(); if (pnp_device_ids == NULL) return PyErr_NoMemory();

View File

@ -27,7 +27,7 @@ Periodical identifier sample from a PRS-650:
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<cacheExt xmlns="http://www.sony.com/xmlns/product/prs/device/1"> <cacheExt xmlns="http://www.sony.com/xmlns/product/prs/device/1">
<text conformsTo="http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0" periodicalName="The Atlantic" <text conformsTo="http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0" periodicalName="The Atlantic"
description="Current affairs and politics focussed on the US" publicationDate="Tue, 19 Oct 2010 00:00:00 GMT" description="Current affairs and politics focused on the US" publicationDate="Tue, 19 Oct 2010 00:00:00 GMT"
path="database/media/books/calibre/Atlantic [Mon, 18 Oct 2010], The - calibre_1701.epub"> path="database/media/books/calibre/Atlantic [Mon, 18 Oct 2010], The - calibre_1701.epub">
<thumbnail width="167" height="217">main_thumbnail.jpg</thumbnail> <thumbnail width="167" height="217">main_thumbnail.jpg</thumbnail>
</text> </text>

View File

@ -45,7 +45,7 @@ class LITInput(InputFormatPlugin):
from calibre.ebooks.chardet import xml_to_unicode from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.xml_parse import safe_xml_fromstring from calibre.utils.xml_parse import safe_xml_fromstring
import copy import copy
self.log('LIT file with all text in singe <pre> tag detected') self.log('LIT file with all text in single <pre> tag detected')
html = separate_paragraphs_single_line(pre.text) html = separate_paragraphs_single_line(pre.text)
html = convert_basic(html).replace('<html>', html = convert_basic(html).replace('<html>',
'<html xmlns="%s">'%XHTML_NS) '<html xmlns="%s">'%XHTML_NS)

View File

@ -121,7 +121,7 @@ class PMLInput(InputFormatPlugin):
if hasattr(stream, 'name'): if hasattr(stream, 'name'):
images = self.get_images(stream, os.path.abspath(os.path.dirname(stream.name))) images = self.get_images(stream, os.path.abspath(os.path.dirname(stream.name)))
# We want pages to be orded alphabetically. # We want pages to be ordered alphabetically.
pages.sort() pages.sort()
manifest_items = [] manifest_items = []

View File

@ -170,7 +170,7 @@ class TXTInput(InputFormatPlugin):
if txt_formatting is not None and txt_formatting.text: if txt_formatting is not None and txt_formatting.text:
txt_formatting = txt_formatting.text.strip() txt_formatting = txt_formatting.text.strip()
if txt_formatting in ('plain', 'textile', 'markdown') and options.formatting_type == 'auto': if txt_formatting in ('plain', 'textile', 'markdown') and options.formatting_type == 'auto':
log.info(f'Using metadata from TXTZ archive to set text formating type to: {txt_formatting}') log.info(f'Using metadata from TXTZ archive to set text formatting type to: {txt_formatting}')
options.formatting_type = txt_formatting options.formatting_type = txt_formatting
if txt_formatting != 'plain': if txt_formatting != 'plain':
options.paragraph_type = 'off' options.paragraph_type = 'off'

View File

@ -1054,7 +1054,7 @@ OptionRecommendation(name='search_replace',
from calibre.utils.fonts.scanner import font_scanner # noqa from calibre.utils.fonts.scanner import font_scanner # noqa
import css_parser, logging import css_parser, logging
css_parser.log.setLevel(logging.WARN) css_parser.log.setLevel(logging.WARN)
get_types_map() # Ensure the mimetypes module is intialized get_types_map() # Ensure the mimetypes module is initialized
if self.opts.debug_pipeline is not None: if self.opts.debug_pipeline is not None:
self.opts.verbose = max(self.opts.verbose, 4) self.opts.verbose = max(self.opts.verbose, 4)

View File

@ -19,7 +19,7 @@ def get_applicable_xe_fields(index, xe_fields, XPath, expand):
lr = index.get('letter-range', None) lr = index.get('letter-range', None)
if lr is not None: if lr is not None:
sl, el = lr.parition('-')[0::2] sl, el = lr.partition('-')[0::2]
sl, el = sl.strip(), el.strip() sl, el = sl.strip(), el.strip()
if sl and el: if sl and el:
def inrange(text): def inrange(text):

View File

@ -14,7 +14,7 @@ class Parser:
''' See epubcfi.ebnf for the specification that this parser tries to ''' See epubcfi.ebnf for the specification that this parser tries to
follow. I have implemented it manually, since I dont want to depend on follow. I have implemented it manually, since I dont want to depend on
grako, and the grammar is pretty simple. This parser is thread-safe, i.e. grako, and the grammar is pretty simple. This parser is thread-safe, i.e.
it can be used from multiple threads simulataneously. ''' it can be used from multiple threads simultaneously. '''
def __init__(self): def __init__(self):
# All allowed unicode characters + escaped special characters # All allowed unicode characters + escaped special characters

View File

@ -168,7 +168,7 @@ class HTMLFile:
try: try:
link = self.resolve(url) link = self.resolve(url)
except ValueError: except ValueError:
# Unparseable URL, ignore # Unparsable URL, ignore
continue continue
if link not in self.links: if link not in self.links:
self.links.append(link) self.links.append(link)

View File

@ -383,7 +383,7 @@ sol3d2 so3lic 5solv 3som 3s4on. sona4 son4g s4op 5sophic s5ophiz s5ophy sor5c
sor5d 4sov so5vi 2spa 5spai spa4n spen4d 2s5peo 2sper s2phe 3spher spho5 spil4 sor5d 4sov so5vi 2spa 5spai spa4n spen4d 2s5peo 2sper s2phe 3spher spho5 spil4
sp5ing 4spio s4ply s4pon spor4 4spot squal4l s1r 2ss s1sa ssas3 s2s5c s3sel sp5ing 4spio s4ply s4pon spor4 4spot squal4l s1r 2ss s1sa ssas3 s2s5c s3sel
s5seng s4ses. s5set s1si s4sie ssi4er ss5ily s4sl ss4li s4sn sspend4 ss2t ssur5a s5seng s4ses. s5set s1si s4sie ssi4er ss5ily s4sl ss4li s4sn sspend4 ss2t ssur5a
ss5w 2st. s2tag s2tal stam4i 5stand s4ta4p 5stat. s4ted stern5i s5tero ste2w ss5w 2nd. s2tag s2tal stam4i 5stand s4ta4p 5stat. s4ted stern5i s5tero ste2w
stew5a s3the st2i s4ti. s5tia s1tic 5stick s4tie s3tif st3ing 5stir s1tle 5stock stew5a s3the st2i s4ti. s5tia s1tic 5stick s4tie s3tif st3ing 5stir s1tle 5stock
stom3a 5stone s4top 3store st4r s4trad 5stratu s4tray s4trid 4stry 4st3w s2ty stom3a 5stone s4top 3store st4r s4trad 5stratu s4tray s4trid 4stry 4st3w s2ty
1su su1al su4b3 su2g3 su5is suit3 s4ul su2m sum3i su2n su2r 4sv sw2 4swo s4y 1su su1al su4b3 su2g3 su5is suit3 s4ul su2m sum3i su2n su2r 4sv sw2 4swo s4y

View File

@ -66,7 +66,7 @@ def _bytelist2longBigEndian(blist):
def _rotateLeft(x, n): def _rotateLeft(x, n):
"Rotate x (32 bit) left n bits circularly." "Rotate x (32 bit) left n bits circular."
return (x << n) | (x >> (32-n)) return (x << n) | (x >> (32-n))

View File

@ -236,7 +236,7 @@ class HTMLConverter:
self.id_counter = 0 self.id_counter = 0
self.unused_target_blocks = [] # : Used to remove extra TextBlocks self.unused_target_blocks = [] # : Used to remove extra TextBlocks
self.link_level = 0 #: Current link level self.link_level = 0 #: Current link level
self.memory = [] #: Used to ensure that duplicate CSS unhandled erros are not reported self.memory = [] #: Used to ensure that duplicate CSS unhandled errors are not reported
self.tops = {} #: element representing the top of each HTML file in the LRF file self.tops = {} #: element representing the top of each HTML file in the LRF file
self.previous_text = '' # : Used to figure out when to lstrip self.previous_text = '' # : Used to figure out when to lstrip
self.stripped_space = '' self.stripped_space = ''
@ -937,7 +937,7 @@ class HTMLConverter:
if height <= 0: if height <= 0:
height = 1 height = 1
pt = PersistentTemporaryFile(suffix='_html2lrf_scaled_image_.'+encoding.lower()) pt = PersistentTemporaryFile(suffix='_html2lrf_scaled_image_.'+encoding.lower())
self.image_memory.append(pt) # Neccessary, trust me ;-) self.image_memory.append(pt) # Necessary, trust me ;-)
try: try:
im.resize((int(width), int(height)), PILImage.ANTIALIAS).save(pt, encoding) im.resize((int(width), int(height)), PILImage.ANTIALIAS).save(pt, encoding)
pt.close() pt.close()

View File

@ -351,7 +351,7 @@ class Book(Delegator):
the Book class in some way or another in order to be rendered as the Book class in some way or another in order to be rendered as
an LRS or LRF file. an LRS or LRF file.
The following settings are available on the contructor of Book: The following settings are available on the constructor of Book:
author="book author" or author=("book author", "sort as") author="book author" or author=("book author", "sort as")
Author of the book. Author of the book.

View File

@ -79,7 +79,7 @@ def set_metadata(stream, mi):
if hr.compression not in (2, 10): if hr.compression not in (2, 10):
return return
# Create a metadata record for the file if one does not alreay exist # Create a metadata record for the file if one does not already exist
if not hr.has_metadata: if not hr.has_metadata:
sections += [b'', b'MeTaInFo\x00'] sections += [b'', b'MeTaInFo\x00']
last_data = len(sections) - 1 last_data = len(sections) - 1

View File

@ -163,7 +163,7 @@ def get_metadata(stream):
def _parse_authors(root, ctx): def _parse_authors(root, ctx):
authors = [] authors = []
# pick up authors but only from 1 secrion <title-info>; otherwise it is not consistent! # pick up authors but only from 1 section <title-info>; otherwise it is not consistent!
# Those are fallbacks: <src-title-info>, <document-info> # Those are fallbacks: <src-title-info>, <document-info>
author = None author = None
for author_sec in ['title-info', 'src-title-info', 'document-info']: for author_sec in ['title-info', 'src-title-info', 'document-info']:
@ -248,7 +248,7 @@ def _parse_cover_data(root, imgid, mi, ctx):
def _parse_tags(root, mi, ctx): def _parse_tags(root, mi, ctx):
# pick up genre but only from 1 secrion <title-info>; otherwise it is not consistent! # pick up genre but only from 1 section <title-info>; otherwise it is not consistent!
# Those are fallbacks: <src-title-info> # Those are fallbacks: <src-title-info>
for genre_sec in ['title-info', 'src-title-info']: for genre_sec in ['title-info', 'src-title-info']:
# -- i18n Translations-- ? # -- i18n Translations-- ?

View File

@ -307,7 +307,7 @@ class Worker(Thread): # Get details {{{
text() = "Buscar productos similares por categoría" or text() = "Buscar productos similares por categoría" or
text() = "Ricerca articoli simili per categoria" or text() = "Ricerca articoli simili per categoria" or
text() = "Rechercher des articles similaires par rubrique" or text() = "Rechercher des articles similaires par rubrique" or
text() = "Procure por itens similares por categoria" or text() = "Procure por items similares por categoria" or
text() = "関連商品を探す" text() = "関連商品を探す"
]/../descendant::ul/li ]/../descendant::ul/li
''' '''

View File

@ -480,7 +480,7 @@ class Source(Plugin):
The URL is the URL for the book identified by identifiers at this The URL is the URL for the book identified by identifiers at this
source. identifier_type, identifier_value specify the identifier source. identifier_type, identifier_value specify the identifier
corresponding to the URL. corresponding to the URL.
This URL must be browseable to by a human using a browser. It is meant This URL must be browsable to by a human using a browser. It is meant
to provide a clickable link for the user to easily visit the books page to provide a clickable link for the user to easily visit the books page
at this source. at this source.
If no URL is found, return None. This method must be quick, and If no URL is found, return None. This method must be quick, and

View File

@ -25,7 +25,7 @@ PLACEHOLDER_GIF = b'GIF89a\x01\x00\x01\x00\xf0\x00\x00\x00\x00\x00\xff\xff\xff!\
def process_jpegs_for_amazon(data: bytes) -> bytes: def process_jpegs_for_amazon(data: bytes) -> bytes:
img = Image.open(BytesIO(data)) img = Image.open(BytesIO(data))
if img.format == 'JPEG': if img.format == 'JPEG':
# Amazon's MOBI renderer cant render JPEG images without JFIF metadata # Amazon's MOBI renderer can't render JPEG images without JFIF metadata
# and images with EXIF data dont get displayed on the cover screen # and images with EXIF data dont get displayed on the cover screen
changed = not img.info changed = not img.info
if hasattr(img, '_getexif') and img._getexif(): if hasattr(img, '_getexif') and img._getexif():

View File

@ -194,7 +194,7 @@ class Serializer:
try: try:
path, frag = urldefrag(urlnormalize(href)) path, frag = urldefrag(urlnormalize(href))
except ValueError: except ValueError:
# Unparseable URL # Unparsable URL
return False return False
if path and base: if path and base:
path = base.abshref(path) path = base.abshref(path)

View File

@ -229,10 +229,10 @@ class Extract(ODF2XHTML):
# only one draw:image allowed in the draw:frame # only one draw:image allowed in the draw:frame
if len(img) == 1 and img[0].getAttribute('href') == mi.cover: if len(img) == 1 and img[0].getAttribute('href') == mi.cover:
# ok, this is the right frame with the right image # ok, this is the right frame with the right image
# check if there are more childs # check if there are more children
if len(frm.childNodes) != 1: if len(frm.childNodes) != 1:
break break
# check if the parent paragraph more childs # check if the parent paragraph more children
para = frm.parentNode para = frm.parentNode
if para.tagName != 'text:p' or len(para.childNodes) != 1: if para.tagName != 'text:p' or len(para.childNodes) != 1:
break break

View File

@ -520,7 +520,7 @@ class Container(ContainerBase): # {{{
return unicodedata.normalize('NFC', abspath_to_name(fullpath, root or self.root)) return unicodedata.normalize('NFC', abspath_to_name(fullpath, root or self.root))
def name_to_abspath(self, name): def name_to_abspath(self, name):
' Convert a canonical name to an absolute OS dependant path ' ' Convert a canonical name to an absolute OS dependent path '
return name_to_abspath(name, self.root) return name_to_abspath(name, self.root)
def exists(self, name): def exists(self, name):

View File

@ -27,7 +27,7 @@ def pretty_xml_tree(elem, level=0, indent=' '):
textual content. Also assumes that there is no text immediately after textual content. Also assumes that there is no text immediately after
closing tags. These are true for opf/ncx and container.xml files. If either closing tags. These are true for opf/ncx and container.xml files. If either
of the assumptions are violated, there should be no data loss, but pretty of the assumptions are violated, there should be no data loss, but pretty
printing wont produce optimal results.''' printing won't produce optimal results.'''
if (not elem.text and len(elem) > 0) or (elem.text and isspace(elem.text)): if (not elem.text and len(elem) > 0) or (elem.text and isspace(elem.text)):
elem.text = '\n' + (indent * (level+1)) elem.text = '\n' + (indent * (level+1))
for i, child in enumerate(elem): for i, child in enumerate(elem):

View File

@ -124,7 +124,7 @@ class Split:
for i, elem in enumerate(item.data.iter('*')): for i, elem in enumerate(item.data.iter('*')):
try: try:
elem.set('pb_order', unicode_type(i)) elem.set('pb_order', unicode_type(i))
except TypeError: # Cant set attributes on comment nodes etc. except TypeError: # Can't set attributes on comment nodes etc.
continue continue
page_breaks = list(page_breaks) page_breaks = list(page_breaks)
@ -168,7 +168,7 @@ class Split:
try: try:
href = self.current_item.abshref(href) href = self.current_item.abshref(href)
except ValueError: except ValueError:
# Unparseable URL # Unparsable URL
return url return url
try: try:
href = urlnormalize(href) href = urlnormalize(href)

View File

@ -36,7 +36,7 @@ def pdb_header_info(header):
print('PDB Header Info:') print('PDB Header Info:')
print('') print('')
print('Identity: %s' % header.ident) print('Identity: %s' % header.ident)
print('Total Sectons: %s' % header.num_sections) print('Total Sections: %s' % header.num_sections)
print('Title: %s' % header.title) print('Title: %s' % header.title)
print('') print('')

View File

@ -154,7 +154,7 @@ class Writer(FormatWriter):
if len(data) + len(header) < 65505: if len(data) + len(header) < 65505:
images.append((header, data)) images.append((header, data))
except Exception as e: except Exception as e:
self.log.error('Error: Could not include file %s becuase ' self.log.error('Error: Could not include file %s because '
'%s.' % (item.href, e)) '%s.' % (item.href, e))
return images return images

View File

@ -74,7 +74,7 @@ class PdbHeaderBuilder:
def build_header(self, section_lengths, out_stream): def build_header(self, section_lengths, out_stream):
''' '''
section_lengths = Lenght of each section in file. section_lengths = Length of each section in file.
''' '''
now = int(time.time()) now = int(time.time())

View File

@ -118,7 +118,7 @@ class HeaderRecord:
def __init__(self, raw): def __init__(self, raw):
self.uid, = struct.unpack('>H', raw[0:2]) self.uid, = struct.unpack('>H', raw[0:2])
# This is labled version in the spec. # This is labeled version in the spec.
# 2 is ZLIB compressed, # 2 is ZLIB compressed,
# 1 is DOC compressed # 1 is DOC compressed
self.compression, = struct.unpack('>H', raw[2:4]) self.compression, = struct.unpack('>H', raw[2:4])
@ -182,7 +182,7 @@ class SectionMetadata:
This does not store metadata such as title, or author. This does not store metadata such as title, or author.
That metadata would be best retrieved with the PDB (plucker) That metadata would be best retrieved with the PDB (plucker)
metdata reader. metadata reader.
This stores document specific information such as the This stores document specific information such as the
text encoding. text encoding.
@ -358,7 +358,7 @@ class Reader(FormatReader):
def extract_content(self, output_dir): def extract_content(self, output_dir):
# Each text record is independent (unless the continuation # Each text record is independent (unless the continuation
# value is set in the previous record). Put each converted # value is set in the previous record). Put each converted
# text recored into a separate file. We will reference the # text recorded into a separate file. We will reference the
# home.html file as the first file and let the HTML input # home.html file as the first file and let the HTML input
# plugin assemble the order based on hyperlinks. # plugin assemble the order based on hyperlinks.
with CurrentDir(output_dir): with CurrentDir(output_dir):
@ -452,7 +452,7 @@ class Reader(FormatReader):
odi = self.options.debug_pipeline odi = self.options.debug_pipeline
self.options.debug_pipeline = None self.options.debug_pipeline = None
# Determine the home.html record uid. This should be set in the # Determine the home.html record uid. This should be set in the
# reserved values in the metadata recored. home.html is the first # reserved values in the metadata recorded. home.html is the first
# text record (should have hyper link references to other records) # text record (should have hyper link references to other records)
# in the document. # in the document.
try: try:

View File

@ -60,7 +60,7 @@ class Reader(FormatReader):
self.log.debug('Foud ztxt version: %i.%i' % (vmajor, vminor)) self.log.debug('Foud ztxt version: %i.%i' % (vmajor, vminor))
# Initalize the decompressor # Initialize the decompressor
self.uncompressor = zlib.decompressobj() self.uncompressor = zlib.decompressobj()
self.uncompressor.decompress(self.section_data(1)) self.uncompressor.decompress(self.section_data(1))

View File

@ -88,7 +88,7 @@ class Links:
try: try:
purl = urlparse(url) purl = urlparse(url)
except Exception: except Exception:
self.pdf.debug('Ignoring unparseable URL: %r' % url) self.pdf.debug('Ignoring unparsable URL: %r' % url)
continue continue
if purl.scheme and purl.scheme != 'file': if purl.scheme and purl.scheme != 'file':
action = Dictionary({ action = Dictionary({

View File

@ -198,7 +198,7 @@ class PMLMLizer:
text = text.replace('\xa0', ' ') text = text.replace('\xa0', ' ')
# Turn all characters that cannot be represented by themself into their # Turn all characters that cannot be represented by themself into their
# PML code equivelent # PML code equivalent
text = re.sub('[^\x00-\x7f]', lambda x: unipmlcode(x.group()), text) text = re.sub('[^\x00-\x7f]', lambda x: unipmlcode(x.group()), text)
# Remove excess spaces at beginning and end of lines # Remove excess spaces at beginning and end of lines
@ -346,7 +346,7 @@ class PMLMLizer:
except: except:
pass pass
# Proccess text within this tag. # Process text within this tag.
if hasattr(elem, 'text') and elem.text: if hasattr(elem, 'text') and elem.text:
text.append(self.prepare_string_for_pml(elem.text)) text.append(self.prepare_string_for_pml(elem.text))

View File

@ -206,7 +206,7 @@ class RBMLizer:
text.append('<%s>' % style_tag) text.append('<%s>' % style_tag)
tag_stack.append(style_tag) tag_stack.append(style_tag)
# Proccess tags that contain text. # Process tags that contain text.
if hasattr(elem, 'text') and elem.text: if hasattr(elem, 'text') and elem.text:
text.append(prepare_string_for_xml(elem.text)) text.append(prepare_string_for_xml(elem.text))

View File

@ -37,9 +37,9 @@ class RBWriter:
def write_content(self, oeb_book, out_stream, metadata=None): def write_content(self, oeb_book, out_stream, metadata=None):
info = [('info.info', self._info_section(metadata))] info = [('info.info', self._info_section(metadata))]
images = self._images(oeb_book.manifest) images = self._images(oeb_book.manifest)
text_size, chuncks = self._text(oeb_book) text_size, chunks = self._text(oeb_book)
chunck_sizes = [len(x) for x in chuncks] chunck_sizes = [len(x) for x in chunks]
text = [('index.html', chuncks)] text = [('index.html', chunks)]
hidx = [('index.hidx', ' ')] hidx = [('index.hidx', ' ')]
toc_items = [] toc_items = []
@ -84,8 +84,8 @@ class RBWriter:
out_stream.write(struct.pack('<I', text_size)) out_stream.write(struct.pack('<I', text_size))
for size in chunck_sizes: for size in chunck_sizes:
out_stream.write(struct.pack('<I', size)) out_stream.write(struct.pack('<I', size))
for chunck in text[0][1]: for chunk in text[0][1]:
out_stream.write(chunck) out_stream.write(chunk)
self.log.debug('Writing images...') self.log.debug('Writing images...')
for item in hidx+images: for item in hidx+images:
@ -132,7 +132,7 @@ class RBWriter:
images.append((name, data)) images.append((name, data))
except Exception as e: except Exception as e:
self.log.error('Error: Could not include file %s becuase ' self.log.error('Error: Could not include file %s because '
'%s.' % (item.href, e)) '%s.' % (item.href, e))
return images return images

View File

@ -71,7 +71,7 @@ def text_length(i):
return len(clean(i.text_content() or "")) return len(clean(i.text_content() or ""))
class Unparseable(ValueError): class Unparsable(ValueError):
pass pass
@ -156,7 +156,7 @@ class Document:
return cleaned_article return cleaned_article
except Exception as e: except Exception as e:
self.log.exception('error getting summary: ') self.log.exception('error getting summary: ')
reraise(Unparseable, Unparseable(unicode_type(e)), sys.exc_info()[2]) reraise(Unparsable, Unparsable(unicode_type(e)), sys.exc_info()[2])
def get_article(self, candidates, best_candidate): def get_article(self, candidates, best_candidate):
# Now that we have the top candidate, look through its siblings for content that might also be related. # Now that we have the top candidate, look through its siblings for content that might also be related.

View File

@ -11,7 +11,7 @@ RTF tokenizer and token parser. v.1.0 (1/17/2010)
Author: Gerendi Sandor Attila Author: Gerendi Sandor Attila
At this point this will tokenize a RTF file then rebuild it from the tokens. At this point this will tokenize a RTF file then rebuild it from the tokens.
In the process the UTF8 tokens are altered to be supported by the RTF2XML and also remain RTF specification compilant. In the process the UTF8 tokens are altered to be supported by the RTF2XML and also remain RTF specification compliant.
""" """
@ -235,7 +235,7 @@ class RtfTokenParser():
i = i + 1 i = i + 1
j = j + 1 j = j + 1
continue continue
raise Exception('Error: incorect utf replacement.') raise Exception('Error: incorrect utf replacement.')
# calibre rtf2xml does not support utfreplace # calibre rtf2xml does not support utfreplace
replace = [] replace = []

View File

@ -275,7 +275,7 @@ class RTFMLizer:
text += '{%s\n' % style_tag text += '{%s\n' % style_tag
tag_stack.append(style_tag) tag_stack.append(style_tag)
# Proccess tags that contain text. # Process tags that contain text.
if hasattr(elem, 'text') and elem.text: if hasattr(elem, 'text') and elem.text:
text += txt2rtf(elem.text) text += txt2rtf(elem.text)

View File

@ -44,7 +44,7 @@ def Handle_Main():
# determine the run level. The default is 1. # determine the run level. The default is 1.
run_level = 3, run_level = 3,
# The name of a debug directory, if you are running at # The name of a debug directory, if you are running at
# run level 3 or higer. # run level 3 or higher.
debug = 'debug_dir', debug = 'debug_dir',
# Convert RTF caps to real caps. # Convert RTF caps to real caps.
# Default is 1. # Default is 1.
@ -124,7 +124,7 @@ class ParseRtf:
'output' --a file to output the parsed file. (Default is standard 'output' --a file to output the parsed file. (Default is standard
output.) output.)
'temp_dir' --directory for temporary output (If not provided, the 'temp_dir' --directory for temporary output (If not provided, the
script tries to output to directory where is script is exectued.) script tries to output to directory where is script is executed.)
'deb_dir' --debug directory. If a debug_dir is provided, the script 'deb_dir' --debug directory. If a debug_dir is provided, the script
will copy each run through as a file to examine in the debug_dir will copy each run through as a file to examine in the debug_dir
'check_brackets' -- make sure the brackets match up after each run 'check_brackets' -- make sure the brackets match up after each run

View File

@ -111,7 +111,7 @@ class AddBrackets:
2-If an open bracket is found the code inside is ignore 2-If an open bracket is found the code inside is ignore
(written without modifications) (written without modifications)
3-If an accepted control word is found put the line 3-If an accepted control word is found put the line
in a buffer then chage state to after cw in a buffer then change state to after cw
4-Else simply write the line 4-Else simply write the line
""" """
if line == 'cb<nu<clos-brack<0001\n' and self.__open_bracket: if line == 'cb<nu<clos-brack<0001\n' and self.__open_bracket:
@ -151,7 +151,7 @@ class AddBrackets:
def __write_group(self): def __write_group(self):
""" """
Write a tempory group after accepted control words end Write a temporary group after accepted control words end
But this is mostly useless in my opinion as there is no list of rejected cw But this is mostly useless in my opinion as there is no list of rejected cw
This may be a way to implement future old rtf processing for cw This may be a way to implement future old rtf processing for cw
Utility: open a group to just put brackets but why be so complicated? Utility: open a group to just put brackets but why be so complicated?

View File

@ -126,7 +126,7 @@ class Colors:
Logic: Logic:
Check if the end of the color table has been reached. If so, Check if the end of the color table has been reached. If so,
change the state to after the color table. change the state to after the color table.
Othewise, get a function by passing the self.__token_info to the Otherwise, get a function by passing the self.__token_info to the
state dictionary. state dictionary.
""" """
# mi<mk<clrtbl-beg # mi<mk<clrtbl-beg
@ -234,7 +234,7 @@ class Colors:
beginning of the color table. beginning of the color table.
If the state is in the color table, create the color dictionary If the state is in the color table, create the color dictionary
and print out the tags. and print out the tags.
If the state if afer the color table, look for lines with color If the state if after the color table, look for lines with color
info, and substitute the number with the hex number. info, and substitute the number with the hex number.
""" """
self.__initiate_values() self.__initiate_values()

View File

@ -31,7 +31,7 @@ class Configure:
if self.__show_config_file and self.__configuration_file: if self.__show_config_file and self.__configuration_file:
sys.stderr.write('configuration file is "%s"\n' % self.__configuration_file) sys.stderr.write('configuration file is "%s"\n' % self.__configuration_file)
if self.__show_config_file and not self.__configuration_file: if self.__show_config_file and not self.__configuration_file:
sys.stderr.write('No configuraiton file found; using default values\n') sys.stderr.write('No configuration file found; using default values\n')
if self.__configuration_file: if self.__configuration_file:
read_obj = open_for_read(self.__configuration_file) read_obj = open_for_read(self.__configuration_file)
line_to_read = 1 line_to_read = 1
@ -111,7 +111,7 @@ class Configure:
return_dict['configure-directory'] = None return_dict['configure-directory'] = None
else: else:
if not os.path.isdir(configuration_dir): if not os.path.isdir(configuration_dir):
sys.stderr.write('The dirctory "%s" does not appear to be a directory.\n' sys.stderr.write('The directory "%s" does not appear to be a directory.\n'
% configuration_dir) % configuration_dir)
return 1 return 1
else: else:

View File

@ -107,7 +107,7 @@ class ConvertToTags:
""" """
Process lines for open tags that have attributes. Process lines for open tags that have attributes.
The important info is between [17:-1]. Take this info and split it The important info is between [17:-1]. Take this info and split it
with the delimeter '<'. The first token in this group is the element with the delimiter '<'. The first token in this group is the element
name. The rest are attributes, separated fromt their values by '>'. So name. The rest are attributes, separated fromt their values by '>'. So
read each token one at a time, and split them by '>'. read each token one at a time, and split them by '>'.
""" """
@ -256,7 +256,7 @@ class ConvertToTags:
an open function for open tags an open function for open tags
an open with attribute function for tags with attributes an open with attribute function for tags with attributes
an empty with attribute function for tags that are empty but have an empty with attribute function for tags that are empty but have
attribtes. attributes.
a closed function for closed tags. a closed function for closed tags.
an empty tag function. an empty tag function.
""" """

View File

@ -19,7 +19,7 @@ from . import open_for_read, open_for_write
class DeleteInfo: class DeleteInfo:
"""Delete unecessary destination groups""" """Delete unnecessary destination groups"""
def __init__(self, def __init__(self,
in_file , in_file ,
@ -110,7 +110,7 @@ class DeleteInfo:
If you find that you are in a delete group, and the previous If you find that you are in a delete group, and the previous
token in not an open bracket (self.__ob = 0), that means token in not an open bracket (self.__ob = 0), that means
that the delete group is nested inside another acceptable that the delete group is nested inside another acceptable
detination group. In this case, you have already written destination group. In this case, you have already written
the open bracket, so you will need to write the closed one the open bracket, so you will need to write the closed one
as well. as well.
""" """

View File

@ -255,11 +255,11 @@ class FieldStrings:
def __equation_func(self, field_name, name, line): def __equation_func(self, field_name, name, line):
""" """
Requried: Required:
field_name -- the first word in the string field_name -- the first word in the string
name --the changed name according to the dictionary name --the changed name according to the dictionary
line -- the string to be parse line -- the string to be parse
Retuns: Returns:
The name of the field The name of the field
Logic: Logic:
""" """
@ -272,7 +272,7 @@ class FieldStrings:
field_name -- the first word in the string field_name -- the first word in the string
name --the changed name according to the dictionary name --the changed name according to the dictionary
line -- the string to be parse line -- the string to be parse
Retuns: Returns:
The name of the field The name of the field
Logic: Logic:
""" """
@ -378,11 +378,11 @@ class FieldStrings:
def __simple_info_func(self, field_name, name, line): def __simple_info_func(self, field_name, name, line):
""" """
Requried: Required:
field_name -- the first word in the string field_name -- the first word in the string
name --the changed name according to the dictionary name --the changed name according to the dictionary
line -- the string to be parse line -- the string to be parse
Retuns: Returns:
The name of the field The name of the field
Logic: Logic:
These fields can only have the following switches: These fields can only have the following switches:
@ -406,11 +406,11 @@ class FieldStrings:
def __hyperlink_func(self, field_name, name, line): def __hyperlink_func(self, field_name, name, line):
""" """
Requried: Required:
field_name -- the first word in the string field_name -- the first word in the string
name --the changed name according to the dictionary name --the changed name according to the dictionary
line -- the string to be parse line -- the string to be parse
Retuns: Returns:
The name of the field The name of the field
""" """
self.__link_switch = re.compile(r'\\l\s{1,}"{0,1}(.*?)"{0,1}\s') self.__link_switch = re.compile(r'\\l\s{1,}"{0,1}(.*?)"{0,1}\s')
@ -442,11 +442,11 @@ class FieldStrings:
def __include_text_func(self, field_name, name, line): def __include_text_func(self, field_name, name, line):
""" """
Requried: Required:
field_name -- the first word in the string field_name -- the first word in the string
name --the changed name according to the dictionary name --the changed name according to the dictionary
line -- the string to be parse line -- the string to be parse
Retuns: Returns:
The name of the field The name of the field
Logic: Logic:
""" """
@ -484,11 +484,11 @@ class FieldStrings:
def __include_pict_func(self, field_name, name, line): def __include_pict_func(self, field_name, name, line):
""" """
Requried: Required:
field_name -- the first word in the string field_name -- the first word in the string
name --the changed name according to the dictionary name --the changed name according to the dictionary
line -- the string to be parse line -- the string to be parse
Retuns: Returns:
The name of the field The name of the field
Logic: Logic:
""" """
@ -526,7 +526,7 @@ class FieldStrings:
A page reference field looks like this: A page reference field looks like this:
PAGEREF _Toc440880424 \\h PAGEREF _Toc440880424 \\h
I want to extract the second line of info, which is used as an I want to extract the second line of info, which is used as an
achor in the resulting XML file. anchor in the resulting XML file.
""" """
the_string = name the_string = name
match_group = re.search(self.__format_text_exp, line) match_group = re.search(self.__format_text_exp, line)
@ -776,7 +776,7 @@ class FieldStrings:
name -- the changed name according to the dictionary. name -- the changed name according to the dictionary.
line -- the string to parse. line -- the string to parse.
Returns: Returns:
A string containing font size, font style, and a hexidecimal value. A string containing font size, font style, and a hexadecimal value.
Logic: Logic:
The SYMBOL field is one of Microsoft's many quirky ways of The SYMBOL field is one of Microsoft's many quirky ways of
entering text. The string that results from this method looks like entering text. The string that results from this method looks like
@ -785,7 +785,7 @@ class FieldStrings:
The first word merely tells us that we have encountered a SYMBOL The first word merely tells us that we have encountered a SYMBOL
field. field.
The next value is the Microsoft decimal value. Change this to The next value is the Microsoft decimal value. Change this to
hexidecimal. hexadecimal.
The pattern '\\f "some font' tells us the font. The pattern '\\f "some font' tells us the font.
The pattern '\\s some size' tells us the font size. The pattern '\\s some size' tells us the font size.
Extract all of this information. Store this information in a Extract all of this information. Store this information in a

View File

@ -53,7 +53,7 @@ Examples
<field type = "insert-time"> <field type = "insert-time">
10:34 PM 10:34 PM
</field> </field>
The simple field in the above example conatins no paragraph or sections breaks. The simple field in the above example contains no paragraph or sections breaks.
This line of RTF: This line of RTF:
{{\field{\*\fldinst SYMBOL 97 \\f "Symbol" \\s 12}{\fldrslt\f3\fs24}}} {{\field{\*\fldinst SYMBOL 97 \\f "Symbol" \\s 12}{\fldrslt\f3\fs24}}}
Becomes: Becomes:
@ -141,7 +141,7 @@ Examples
} }
self.__field_count = [] # keep track of the brackets self.__field_count = [] # keep track of the brackets
self.__field_instruction = [] # field instruction strings self.__field_instruction = [] # field instruction strings
self.__symbol = 0 # wheter or not the field is really UTF-8 self.__symbol = 0 # whether or not the field is really UTF-8
# (these fields cannot be nested.) # (these fields cannot be nested.)
self.__field_instruction_string = '' # string that collects field instruction self.__field_instruction_string = '' # string that collects field instruction
self.__par_in_field = [] # paragraphs in field? self.__par_in_field = [] # paragraphs in field?
@ -150,7 +150,7 @@ Examples
def __before_body_func(self, line): def __before_body_func(self, line):
""" """
Requried: Required:
line --line ro parse line --line ro parse
Returns: Returns:
nothing (changes an instant and writes a line) nothing (changes an instant and writes a line)
@ -183,7 +183,7 @@ Examples
Returns: Returns:
nothing nothing
Logic: Logic:
Set the values for parseing the field. Four lists have to have Set the values for parsing the field. Four lists have to have
items appended to them. items appended to them.
""" """
self.__state = 'field' self.__state = 'field'
@ -201,7 +201,7 @@ Examples
Returns: Returns:
nothing. nothing.
Logic: Logic:
Check for the end of the field; a paragaph break; a section break; Check for the end of the field; a paragraph break; a section break;
the beginning of another field; or the beginning of the field the beginning of another field; or the beginning of the field
instruction. instruction.
""" """
@ -289,7 +289,7 @@ Examples
Nothing Nothing
Logic: Logic:
Pop the last values in the instructions list, the fields list, the Pop the last values in the instructions list, the fields list, the
paragaph list, and the section list. paragraph list, and the section list.
If the field is a symbol, do not write the tags <field></field>, If the field is a symbol, do not write the tags <field></field>,
since this field is really just UTF-8. since this field is really just UTF-8.
If the field contains paragraph or section breaks, it is a If the field contains paragraph or section breaks, it is a

View File

@ -30,7 +30,7 @@ use to you unless you use it as part of the other modules.)
Method Method
----------- -----------
Look for the beginning of a bookmark, index, or toc entry. When such a token Look for the beginning of a bookmark, index, or toc entry. When such a token
is found, store the opeing bracket count in a variable. Collect all the text is found, store the opening bracket count in a variable. Collect all the text
until the closing bracket entry is found. Send the string to the module until the closing bracket entry is found. Send the string to the module
field_strings to process it. Write the processed string to the output field_strings to process it. Write the processed string to the output
file. file.
@ -170,7 +170,7 @@ file.
A string for a toc instruction field. A string for a toc instruction field.
Logic: Logic:
This method is meant for *both* index and toc entries. This method is meant for *both* index and toc entries.
I want to eleminate paragraph endings, and I want to divide the I want to eliminate paragraph endings, and I want to divide the
entry into a main entry and (if it exists) a sub entry. entry into a main entry and (if it exists) a sub entry.
Split the string by newlines. Read on token at a time. If the Split the string by newlines. Read on token at a time. If the
token is a special colon, end the main entry element and start the token is a special colon, end the main entry element and start the
@ -238,7 +238,7 @@ file.
def __index_bookmark_func(self, my_string): def __index_bookmark_func(self, my_string):
""" """
Requries: Requires:
my_string -- string in all the index my_string -- string in all the index
Returns: Returns:
bookmark_string -- the text string of the book mark bookmark_string -- the text string of the book mark
@ -373,7 +373,7 @@ file.
my_string --string to parse my_string --string to parse
type --type of string type --type of string
Returns: Returns:
A string formated for a field instruction. A string formatted for a field instruction.
Logic: Logic:
The type is the name (either bookmark-end or bookmark-start). The The type is the name (either bookmark-end or bookmark-start). The
id is the complete text string. id is the complete text string.

View File

@ -108,7 +108,7 @@ class GetOptions:
pass pass
""" """
sys.stderr.write( sys.stderr.write(
'You must provide an ouput file with the \'o\' option\n') 'You must provide an output file with the \'o\' option\n')
return_options['valid'] = 0 return_options['valid'] = 0
""" """
if 'level' in the_keys: if 'level' in the_keys:
@ -226,7 +226,7 @@ class GetOptions:
if not smart_output and not return_options['out-file']: if not smart_output and not return_options['out-file']:
""" """
sys.stderr.write( sys.stderr.write(
'Please provide and file to outut with the -o option.\n' 'Please provide and file to output with the -o option.\n'
'Or set \'<smart-output value = "true"/>\'.\n' 'Or set \'<smart-output value = "true"/>\'.\n'
'in the configuration file.\n' 'in the configuration file.\n'
) )

View File

@ -214,7 +214,7 @@ class GroupBorders:
Returns: Returns:
Nothing Nothing
Logic Logic
Look for the start of a paragraph defintion. If one is found, check if Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to it contains a list-id. If it does, start a list. Change the state to
in_pard. in_pard.
""" """

View File

@ -207,7 +207,7 @@ class GroupStyles:
Returns: Returns:
Nothing Nothing
Logic Logic
Look for the start of a paragraph defintion. If one is found, check if Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to it contains a list-id. If it does, start a list. Change the state to
in_pard. in_pard.
""" """

View File

@ -149,7 +149,7 @@ class HeadingsToSections:
Returns: Returns:
Nothing Nothing
Logic Logic
Look for the start of a paragraph defintion. If one is found, check if Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to it contains a list-id. If it does, start a list. Change the state to
in_pard. in_pard.
""" """

View File

@ -23,7 +23,7 @@ from . import open_for_read, open_for_write
class Hex2Utf8: class Hex2Utf8:
""" """
Convert Microsoft hexidecimal numbers to utf-8 Convert Microsoft hexadecimal numbers to utf-8
""" """
def __init__(self, def __init__(self,
@ -54,7 +54,7 @@ class Hex2Utf8:
directory from which the script is run.) directory from which the script is run.)
'symbol'--whether to load the symbol character map 'symbol'--whether to load the symbol character map
'winddings'--whether to load the wingdings character map 'winddings'--whether to load the wingdings character map
'caps'--whether to load the caps characer map 'caps'--whether to load the caps character map
'convert_to_caps'--wether to convert caps to utf-8 'convert_to_caps'--wether to convert caps to utf-8
Returns: Returns:
nothing nothing
@ -110,7 +110,7 @@ class Hex2Utf8:
directory from which the script is run.) directory from which the script is run.)
'symbol'--whether to load the symbol character map 'symbol'--whether to load the symbol character map
'winddings'--whether to load the wingdings character map 'winddings'--whether to load the wingdings character map
'caps'--whether to load the caps characer map 'caps'--whether to load the caps character map
'convert_to_caps'--wether to convert caps to utf-8 'convert_to_caps'--wether to convert caps to utf-8
Returns: Returns:
nothing nothing
@ -145,7 +145,7 @@ class Hex2Utf8:
Set values, including those for the dictionaries. Set values, including those for the dictionaries.
The file that contains the maps is broken down into many different The file that contains the maps is broken down into many different
sets. For example, for the Symbol font, there is the standard part for sets. For example, for the Symbol font, there is the standard part for
hexidecimal numbers, and the part for Microsoft characters. Read hexadecimal numbers, and the part for Microsoft characters. Read
each part in, and then combine them. each part in, and then combine them.
""" """
# the default encoding system, the lower map for characters 0 through # the default encoding system, the lower map for characters 0 through
@ -262,7 +262,7 @@ class Hex2Utf8:
hex_num) hex_num)
if self.__run_level > 4: if self.__run_level > 4:
# msg = 'no dictionary entry for %s\n' # msg = 'no dictionary entry for %s\n'
# msg += 'the hexidecimal num is "%s"\n' % (hex_num) # msg += 'the hexadecimal num is "%s"\n' % (hex_num)
# msg += 'dictionary is %s\n' % self.__current_dict_name # msg += 'dictionary is %s\n' % self.__current_dict_name
msg = 'Character "&#x%s;" does not appear to be valid (or is a control character)\n' % token msg = 'Character "&#x%s;" does not appear to be valid (or is a control character)\n' % token
raise self.__bug_handler(msg) raise self.__bug_handler(msg)
@ -537,7 +537,7 @@ class Hex2Utf8:
new_char_entity = '&#x%s' % hex_num new_char_entity = '&#x%s' % hex_num
converted = self.__caps_uni_dict.get(new_char_entity) converted = self.__caps_uni_dict.get(new_char_entity)
if not converted: if not converted:
# bullets and other entities dont' have capital equivelents # bullets and other entities don't have capital equivalents
return char_entity return char_entity
else: else:
return converted return converted

View File

@ -112,7 +112,7 @@ class Info:
Returns: Returns:
nothing nothing
Logic: Logic:
Check for the beginning of the informatin table. When found, set Check for the beginning of the information table. When found, set
the state to the information table. Always write the line. the state to the information table. Always write the line.
""" """
if self.__token_info == 'mi<mk<doc-in-beg': if self.__token_info == 'mi<mk<doc-in-beg':
@ -127,7 +127,7 @@ class Info:
nothing. nothing.
Logic: Logic:
Check for the end of information. If not found, check if the Check for the end of information. If not found, check if the
token has a special value in the info table dictionay. If it token has a special value in the info table dictionary. If it
does, execute that function. does, execute that function.
Otherwise, output the line to the file. Otherwise, output the line to the file.
""" """
@ -148,7 +148,7 @@ class Info:
Returns: Returns:
nothing nothing
Logic: Logic:
This function marks the beginning of informatin fields that have This function marks the beginning of information fields that have
text that must be collected. Set the type of information field text that must be collected. Set the type of information field
with the tag option. Set the state to collecting text with the tag option. Set the state to collecting text
""" """
@ -212,7 +212,7 @@ class Info:
because it exists in abbreviated form. (num-of-wor) because it exists in abbreviated form. (num-of-wor)
I want to check this information in a dictionary to convert it I want to check this information in a dictionary to convert it
to a longer, readable form. If the key does not exist in the to a longer, readable form. If the key does not exist in the
dictionary, print out an error message. Otherise add the value dictionary, print out an error message. Otherwise add the value
to the text string. to the text string.
(num-of-wor => number-of-words) (num-of-wor => number-of-words)
""" """
@ -265,7 +265,7 @@ class Info:
If the state is in the information table, use other methods to If the state is in the information table, use other methods to
parse the information parse the information
style table, look for lines with style info, and substitute the style table, look for lines with style info, and substitute the
number with the name of the style. If the state if afer the number with the name of the style. If the state if after the
information table, simply write the line to the output file. information table, simply write the line to the output file.
""" """
self.__initiate_values() self.__initiate_values()

View File

@ -175,7 +175,7 @@ class Inline:
Logic: Logic:
If the token is a control word for character info (cw<ci), use another If the token is a control word for character info (cw<ci), use another
method to add to the dictionary. method to add to the dictionary.
Use the dictionary to get the approriate function. Use the dictionary to get the appropriate function.
Always print out the line. Always print out the line.
""" """
if line[0:5] == 'cw<ci': # calibre: bug in original function no diff between cw<ci and cw<pf if line[0:5] == 'cw<ci': # calibre: bug in original function no diff between cw<ci and cw<pf
@ -294,7 +294,7 @@ class Inline:
in waiting. in waiting.
Iterate through this slice, which contains only dictionaries. Iterate through this slice, which contains only dictionaries.
Get the keys in each dictionary. If 'font-style' is in the keys, Get the keys in each dictionary. If 'font-style' is in the keys,
write a marker tag. (I will use this marker tag later when conerting write a marker tag. (I will use this marker tag later when converting
hext text to utf8.) hext text to utf8.)
Write a tag for the inline values. Write a tag for the inline values.
""" """

View File

@ -217,7 +217,7 @@ class ListTable:
nothing nothing
Logic: Logic:
Check for the end of the group. Check for the end of the group.
Otherwise, if the token is hexidecimal, create an attribute. Otherwise, if the token is hexadecimal, create an attribute.
Do so by finding the base-10 value of the number. Then divide Do so by finding the base-10 value of the number. Then divide
this by 2 and round it. Remove the ".0". Sandwwhich the result to this by 2 and round it. Remove the ".0". Sandwwhich the result to
give you something like level1-show-level. give you something like level1-show-level.
@ -249,9 +249,9 @@ class ListTable:
nothing nothing
Logic: Logic:
Check for the end of the group. Check for the end of the group.
Otherwise, if the text is hexidecimal, call on the method Otherwise, if the text is hexadecimal, call on the method
__parse_level_text_length. __parse_level_text_length.
Otheriwse, if the text is regular text, create an attribute. Otherwise, if the text is regular text, create an attribute.
This attribute indicates the puncuation after a certain level. This attribute indicates the puncuation after a certain level.
An example is "level1-marker = '.'" An example is "level1-marker = '.'"
Otherwise, check for a level-template-id. Otherwise, check for a level-template-id.
@ -283,7 +283,7 @@ class ListTable:
def __parse_level_text_length(self, line): def __parse_level_text_length(self, line):
""" """
Requires: Requires:
line --line with hexidecimal number line --line with hexadecimal number
Returns: Returns:
nothing nothing
Logic: Logic:
@ -373,7 +373,7 @@ class ListTable:
a list-in-table tag. Get the dictionary of this list a list-in-table tag. Get the dictionary of this list
(the first item). Print out the key => value pair. (the first item). Print out the key => value pair.
Remove the first item (the dictionary) form this list. Now iterate Remove the first item (the dictionary) form this list. Now iterate
through what is left in the list. Each list will conatin one item, through what is left in the list. Each list will contain one item,
a dictionary. Get this dictionary and print out key => value pair. a dictionary. Get this dictionary and print out key => value pair.
""" """
not_allow = ['list-id',] not_allow = ['list-id',]
@ -440,7 +440,7 @@ class ListTable:
Returns: Returns:
A string and the dictionary of list-table values and attributes. A string and the dictionary of list-table values and attributes.
Logic: Logic:
Call on the __parse_lines metod, which splits the text string into Call on the __parse_lines method, which splits the text string into
lines (which will be tokens) and processes them. lines (which will be tokens) and processes them.
""" """
self.__parse_lines(line) self.__parse_lines(line)

View File

@ -392,7 +392,7 @@ class MakeLists:
Returns: Returns:
Nothing Nothing
Logic Logic
Look for the start of a paragraph defintion. If one is found, check if Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to it contains a list-id. If it does, start a list. Change the state to
in_pard. in_pard.
""" """

View File

@ -10,7 +10,7 @@ class ParseOptions:
options_dict -- a dictionary with the key equal to the opition, and options_dict -- a dictionary with the key equal to the opition, and
a list describing that option. (See below) a list describing that option. (See below)
Returns: Returns:
A tupple. The first item in the tupple is a dictionary containing A tuple. The first item in the tuple is a dictionary containing
the arguments for each options. The second is a list of the the arguments for each options. The second is a list of the
arguments. arguments.
If invalid options are passed to the module, 0,0 is returned. If invalid options are passed to the module, 0,0 is returned.
@ -193,7 +193,7 @@ class ParseOptions:
list of options list of options
Logic: Logic:
Iterate through the self.__system string, looking for the last Iterate through the self.__system string, looking for the last
option. The options are everything in the sysem string before the option. The options are everything in the system string before the
last option. last option.
Check to see that the options contain no arguments. Check to see that the options contain no arguments.
""" """

View File

@ -316,7 +316,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
Returns: Returns:
nothing nothing
Logic: Logic:
Look for the beginning of a paragaraph definition Look for the beginning of a paragraph definition
""" """
# cw<pf<par-def___<nu<true # cw<pf<par-def___<nu<true
if self.__token_info == 'cw<pf<par-def___': if self.__token_info == 'cw<pf<par-def___':
@ -433,7 +433,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
Nothing Nothing
Logic: Logic:
The previous state was collect tokens, and I have found the start The previous state was collect tokens, and I have found the start
of a paragraph. I want to outut the defintion tag; output the line of a paragraph. I want to output the definition tag; output the line
itself (telling me of the beginning of a paragraph);change the itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs'; state to 'in_paragraphs';
""" """
@ -449,7 +449,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
Nothing Nothing
Logic: Logic:
The state was is after_para_def. and I have found the start of a The state was is after_para_def. and I have found the start of a
paragraph. I want to outut the defintion tag; output the line paragraph. I want to output the definition tag; output the line
itself (telling me of the beginning of a paragraph);change the itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs'. state to 'in_paragraphs'.
(I now realize that this is absolutely identical to the function above!) (I now realize that this is absolutely identical to the function above!)
@ -517,8 +517,8 @@ if another paragraph_def is found, the state changes to collect_tokens.
out the paragraph definition. If you find another paragraph out the paragraph definition. If you find another paragraph
definition, then you write out the old paragraph dictionary and definition, then you write out the old paragraph dictionary and
print out the string. You change the state to collect tokens. print out the string. You change the state to collect tokens.
If you find any larger block elemens, such as cell, row, If you find any larger block elements, such as cell, row,
field-block, or section, you write out the paragraph defintion and field-block, or section, you write out the paragraph definition and
then the text string. then the text string.
If you find the beginning of a paragraph, then you don't need to If you find the beginning of a paragraph, then you don't need to
write out the paragraph definition. Write out the string, and write out the paragraph definition. Write out the string, and
@ -537,7 +537,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
Nothing Nothing
Logic: Logic:
The state is after the end of a paragraph. You have found the The state is after the end of a paragraph. You have found the
start of a paragaph, so you don't need to print out the paragaph start of a paragraph, so you don't need to print out the paragraph
definition. Print out the string, the line, and change the state definition. Print out the string, the line, and change the state
to in paragraphs. to in paragraphs.
""" """
@ -553,8 +553,8 @@ if another paragraph_def is found, the state changes to collect_tokens.
Returns: Returns:
Nothing Nothing
Logic: Logic:
You have found a new paragraph defintion at the end of a You have found a new paragraph definition at the end of a
paragraph. Output the end of the old paragraph defintion. Output paragraph. Output the end of the old paragraph definition. Output
the text string. Output the line. Change the state to collect the text string. Output the line. Change the state to collect
tokens. (And don't forget to set the text string to ''!) tokens. (And don't forget to set the text string to ''!)
""" """
@ -571,7 +571,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
Logic: Logic:
The state is after a paragraph, and you have found a larger block The state is after a paragraph, and you have found a larger block
than paragraph-definition. You want to write the end tag of the than paragraph-definition. You want to write the end tag of the
old defintion and reset the text string (handled by other old definition and reset the text string (handled by other
methods). methods).
""" """
self.__write_para_def_end_func() self.__write_para_def_end_func()

View File

@ -163,7 +163,7 @@ class PreambleDiv:
def __make_default_font_table(self): def __make_default_font_table(self):
""" """
If not font table is fount, need to write one out. If not font table is found, need to write one out.
""" """
self.__font_table_final = 'mi<tg<open______<font-table\n' self.__font_table_final = 'mi<tg<open______<font-table\n'
self.__font_table_final += 'mi<mk<fonttb-beg\n' self.__font_table_final += 'mi<mk<fonttb-beg\n'

View File

@ -137,7 +137,7 @@ class Preamble:
nothing (changes the original file) nothing (changes the original file)
Logic: Logic:
Read one line in at a time. Determine what action to take based on Read one line in at a time. Determine what action to take based on
the state. The state can either be defaut, the revision table, or the state. The state can either be default, the revision table, or
the list table. the list table.
""" """
self.__initiate_values() self.__initiate_values()

View File

@ -588,7 +588,7 @@ class ProcessTokens:
} }
""" """
# unknown # unknown
# These must get passed on because they occure after \\* # These must get passed on because they occurred after \\*
'do' : ('un', 'unknown___', self.default_func), 'do' : ('un', 'unknown___', self.default_func),
'company' : ('un', 'company___', self.default_func), 'company' : ('un', 'company___', self.default_func),
'shpinst' : ('un', 'unknown___', self.default_func), 'shpinst' : ('un', 'unknown___', self.default_func),
@ -755,7 +755,7 @@ class ProcessTokens:
return first, second return first, second
def convert_to_hex(self,number): def convert_to_hex(self,number):
"""Convert a string to uppercase hexidecimal""" """Convert a string to uppercase hexadecimal"""
num = int(number) num = int(number)
try: try:
hex_num = "%X" % num hex_num = "%X" % num

View File

@ -52,8 +52,8 @@ class Sections:
between the section tags. between the section tags.
Start a new section outside the field-block strings. Use the second number in Start a new section outside the field-block strings. Use the second number in
the list; use the second item in the description list. the list; use the second item in the description list.
CHANGE (2004-04-26) No longer write sections that occurr in field-blocks. CHANGE (2004-04-26) No longer write sections that occur in field-blocks.
Instead, ingore all section information in a field-block. Instead, ignore all section information in a field-block.
""" """
def __init__(self, def __init__(self,
@ -151,7 +151,7 @@ class Sections:
nothing nothing
Logic: Logic:
I need to add the right data to the section values dictionary so I I need to add the right data to the section values dictionary so I
can retrive it later. The attribute (or key) is the name; the can retrieve it later. The attribute (or key) is the name; the
value is the last part of the text string. value is the last part of the text string.
ex: cw<tb<columns___<nu<2 ex: cw<tb<columns___<nu<2
""" """
@ -207,7 +207,7 @@ class Sections:
nothing nothing
Logic: Logic:
I have found a section definition. Check if the line is the end of I have found a section definition. Check if the line is the end of
the defnition (a paragraph defintion), or if it contains info that the defnition (a paragraph definition), or if it contains info that
should be added to the values dictionary. If neither of these should be added to the values dictionary. If neither of these
cases are true, output the line to a file. cases are true, output the line to a file.
""" """
@ -247,9 +247,9 @@ class Sections:
nothing nothing
Logic: Logic:
Text or control words indicating text have been found Text or control words indicating text have been found
before \\pard. This shoud indicate older RTF. Reset the state before \\pard. This should indicate older RTF. Reset the state
Write the section defintion. Insert a paragraph definition. Write the section definition. Insert a paragraph definition.
Insert {} to mark the end of a paragraph defintion Insert {} to mark the end of a paragraph definition
""" """
if not self.__in_field: if not self.__in_field:
self.__state = 'body' self.__state = 'body'
@ -427,7 +427,7 @@ class Sections:
Change the state. Change the state.
""" """
# change this 2004-04-26 # change this 2004-04-26
# Don't do anyting # Don't do anything
""" """
self.__sec_in_field_string += line self.__sec_in_field_string += line
self.__print_field_sec_attributes() self.__print_field_sec_attributes()

View File

@ -89,7 +89,7 @@ class Styles:
'default-ta' : 'default-tab', 'default-ta' : 'default-tab',
'align_____' : 'align', 'align_____' : 'align',
'widow-cntr' : 'widow-control', 'widow-cntr' : 'widow-control',
# page fomratting mixed in! (Just in older RTF?) # page formatting mixed in! (Just in older RTF?)
'margin-lef' : 'left-indent', 'margin-lef' : 'left-indent',
'margin-rig' : 'right-indent', 'margin-rig' : 'right-indent',
'margin-bot' : 'space-after', 'margin-bot' : 'space-after',
@ -527,7 +527,7 @@ class Styles:
style. I accomplish this by simply looking up the value of 15 in style. I accomplish this by simply looking up the value of 15 in
the styles table. the styles table.
Use two loops. First, check all the paragraph styles. Then check Use two loops. First, check all the paragraph styles. Then check
all the characer styles. all the character styles.
The inner loop: first check 'next-style', then check 'based-on-style'. The inner loop: first check 'next-style', then check 'based-on-style'.
Make sure values exist for the keys to avoid the nasty keyerror message. Make sure values exist for the keys to avoid the nasty keyerror message.
""" """
@ -629,7 +629,7 @@ class Styles:
Returns: Returns:
nothing nothing
Logic: Logic:
Check the line for the beginning of an individaul style. If it is Check the line for the beginning of an individual style. If it is
not found, simply print out the line. not found, simply print out the line.
""" """
action = self.__state_dict.get(self.__token_info) action = self.__state_dict.get(self.__token_info)
@ -698,7 +698,7 @@ class Styles:
beginning of the style table. beginning of the style table.
If the state is in the style table, create the style dictionary If the state is in the style table, create the style dictionary
and print out the tags. and print out the tags.
If the state if afer the style table, look for lines with style If the state if after the style table, look for lines with style
info, and substitute the number with the name of the style. info, and substitute the number with the name of the style.
""" """
self.__initiate_values() self.__initiate_values()

View File

@ -29,8 +29,8 @@ States.
2. 'mi<mk<not-in-tbl', end the table. 2. 'mi<mk<not-in-tbl', end the table.
3. 'cw<tb<row-def___' start a row definition 3. 'cw<tb<row-def___' start a row definition
3. in_row_definition 3. in_row_definition
1. 'mi<mk<not-in-tbl' : end the row defintion. If in table, end the table. 1. 'mi<mk<not-in-tbl' : end the row definition. If in table, end the table.
2. 'mi<mk<pard-start' : end the row defintion 2. 'mi<mk<pard-start' : end the row definition
if already in the table, start a row and cell. if already in the table, start a row and cell.
3. 'cw<tb<row_______' : end the row definition, end the row 3. 'cw<tb<row_______' : end the row definition, end the row
4. 'cw...' use another method to handle the control word 4. 'cw...' use another method to handle the control word
@ -299,7 +299,7 @@ class Table:
the tokens in the row definition contain the following information: the tokens in the row definition contain the following information:
1. row borders. 1. row borders.
2. cell borders for all cells in the row. 2. cell borders for all cells in the row.
3. cell postions for all cells in the row. 3. cell positions for all cells in the row.
Put all information about row borders into a row dictionary. Put all information about row borders into a row dictionary.
Put all information about cell borders into into the dictionary in Put all information about cell borders into into the dictionary in
the last item in the cell list. ([{border:something, width:something}, the last item in the cell list. ([{border:something, width:something},
@ -501,7 +501,7 @@ class Table:
nothing nothing
Logic: Logic:
Write an empty tag with attributes if there are attributes. Write an empty tag with attributes if there are attributes.
Otherwise, writen an empty tag with cell as element. Otherwise, written an empty tag with cell as element.
""" """
if len(self.__cell_list) > 0: if len(self.__cell_list) > 0:
self.__write_obj.write('mi<tg<empty-att_<cell') self.__write_obj.write('mi<tg<empty-att_<cell')

View File

@ -195,7 +195,7 @@ class SNBFile:
def Output(self, outputFile): def Output(self, outputFile):
# Sort the files in file buffer, # Sort the files in file buffer,
# requried by the SNB file format # required by the SNB file format
self.files.sort(key=lambda x: x.fileName) self.files.sort(key=lambda x: x.fileName)
outputFile = open(outputFile, 'wb') outputFile = open(outputFile, 'wb')

View File

@ -59,7 +59,7 @@ class MarkdownMLizer(OEB2HTML):
# pre has 4 spaces. We trimmed 3 so anything with a space left is a pre. # pre has 4 spaces. We trimmed 3 so anything with a space left is a pre.
text = re.sub('(?msu)^[ ]', ' ', text) text = re.sub('(?msu)^[ ]', ' ', text)
# Remove tabs that aren't at the beinning of a line # Remove tabs that aren't at the beginning of a line
new_text = [] new_text = []
for l in text.splitlines(): for l in text.splitlines():
start = re.match('\t+', l) start = re.match('\t+', l)

View File

@ -79,7 +79,7 @@ class TextileMLizer(OEB2HTML):
text = re.sub(r'(\s|[*_\'"])\[('+t+'[a-zA-Z0-9 \'",.*_]+'+t+r')\](\s|[*_\'"?!,.])', r'\1\2\3', text) text = re.sub(r'(\s|[*_\'"])\[('+t+'[a-zA-Z0-9 \'",.*_]+'+t+r')\](\s|[*_\'"?!,.])', r'\1\2\3', text)
return text return text
# Now tidyup links and ids - remove ones that don't have a correponding opposite # Now tidyup links and ids - remove ones that don't have a corresponding opposite
if self.opts.keep_links: if self.opts.keep_links:
for i in self.our_links: for i in self.our_links:
if i[0] == '#': if i[0] == '#':

View File

@ -75,7 +75,7 @@ class Unidecoder:
self.codepoints.update(HANCODES) self.codepoints.update(HANCODES)
def decode(self, text): def decode(self, text):
# Replace characters larger than 127 with their ASCII equivelent. # Replace characters larger than 127 with their ASCII equivalent.
return re.sub('[^\x00-\x7f]',lambda x: self.replace_point(x.group()), text) return re.sub('[^\x00-\x7f]',lambda x: self.replace_point(x.group()), text)
def replace_point(self, codepoint): def replace_point(self, codepoint):
@ -95,7 +95,7 @@ class Unidecoder:
''' '''
Find what group character is a part of. Find what group character is a part of.
''' '''
# Code groups withing CODEPOINTS take the form 'xAB' # Code groups within CODEPOINTS take the form 'xAB'
if not isinstance(character, unicode_type): if not isinstance(character, unicode_type):
character = unicode_type(character, "utf-8") character = unicode_type(character, "utf-8")
return 'x%02x' % (ord(character) >> 8) return 'x%02x' % (ord(character) >> 8)

View File

@ -274,7 +274,7 @@ class InterfaceAction(QObject):
persist_shortcut=persist_shortcut) persist_shortcut=persist_shortcut)
# In Qt 5 keyboard shortcuts dont work unless the # In Qt 5 keyboard shortcuts dont work unless the
# action is explicitly added to the main window and on OSX and # action is explicitly added to the main window and on OSX and
# Unity since the menu might be exported, the shortcuts wont work # Unity since the menu might be exported, the shortcuts won't work
self.gui.addAction(ac) self.gui.addAction(ac)
if triggered is not None: if triggered is not None:
ac.triggered.connect(triggered) ac.triggered.connect(triggered)

View File

@ -675,7 +675,7 @@ class BarsManager(QObject):
''' '''
This shows the correct main toolbar and rebuilds the menubar based on This shows the correct main toolbar and rebuilds the menubar based on
whether a device is connected or not. Note that the toolbars are whether a device is connected or not. Note that the toolbars are
explicitly not rebuilt, this is to workaround a Qt limitation iwth explicitly not rebuilt, this is to workaround a Qt limitation with
QToolButton's popup menus and modal dialogs. If you want the toolbars QToolButton's popup menus and modal dialogs. If you want the toolbars
rebuilt, call init_bars(). rebuilt, call init_bars().
''' '''

View File

@ -976,7 +976,7 @@ class GridView(QListView):
newdb.new_api.add_cover_cache(x) newdb.new_api.add_cover_cache(x)
try: try:
# Use a timeout so that if, for some reason, the render thread # Use a timeout so that if, for some reason, the render thread
# gets stuck, we dont deadlock, future covers wont get # gets stuck, we dont deadlock, future covers won't get
# rendered, but this is better than a deadlock # rendered, but this is better than a deadlock
join_with_timeout(self.delegate.render_queue) join_with_timeout(self.delegate.render_queue)
except RuntimeError: except RuntimeError:

View File

@ -990,7 +990,7 @@ class BooksView(QTableView): # {{{
self.restore_state() self.restore_state()
self.set_ondevice_column_visibility() self.set_ondevice_column_visibility()
# incase there were marked books # in case there were marked books
self.model().set_row_decoration(set()) self.model().set_row_decoration(set())
self.row_header.headerDataChanged(Qt.Orientation.Vertical, 0, self.row_header.count()-1) self.row_header.headerDataChanged(Qt.Orientation.Vertical, 0, self.row_header.count()-1)
self.row_header.geometriesChanged.emit() self.row_header.geometriesChanged.emit()

View File

@ -269,7 +269,7 @@ class MetadataSingleDialogBase(QDialog):
self.fetch_metadata_button = b = RightClickButton(self) self.fetch_metadata_button = b = RightClickButton(self)
# The following rigmarole is needed so that Qt gives the button the # The following rigmarole is needed so that Qt gives the button the
# same height as the other buttons in the dialog. There is no way to # same height as the other buttons in the dialog. There is no way to
# center the text in a QToolButton with an icon, so we cant just set an # center the text in a QToolButton with an icon, so we can't just set an
# icon # icon
b.setIcon(QIcon(I('download-metadata.png'))) b.setIcon(QIcon(I('download-metadata.png')))
b.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon) b.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)

View File

@ -160,7 +160,7 @@ public:
/*! /*!
Returns QImage of specified slide. Returns QImage of specified slide.
This function will be called only whenever necessary, e.g. the 100th slide This function will be called only whenever necessary, e.g. the 100th slide
will not be retrived when only the first few slides are visible. will not be retrieved when only the first few slides are visible.
*/ */
virtual QImage slide(int index) const; virtual QImage slide(int index) const;

View File

@ -102,8 +102,8 @@ class TextureChooser(QDialog):
return i return i
def update_remove_state(self): def update_remove_state(self):
removeable = bool(self.selected_fname and not self.selected_fname.startswith(':')) removable = bool(self.selected_fname and not self.selected_fname.startswith(':'))
self.remove_button.setEnabled(removeable) self.remove_button.setEnabled(removable)
@property @property
def texture(self): def texture(self):

View File

@ -115,14 +115,14 @@ class StorePlugin: # {{{
disabled by default. disabled by default.
If a store doesn't provide search on it's own use something like a site specific If a store doesn't provide search on it's own use something like a site specific
google search to get search results for this funtion. google search to get search results for this function.
:param query: The string query search with. :param query: The string query search with.
:param max_results: The maximum number of results to return. :param max_results: The maximum number of results to return.
:param timeout: The maximum amount of time in seconds to spend downloading data for search results. :param timeout: The maximum amount of time in seconds to spend downloading data for search results.
:return: :class:`calibre.gui2.store.search_result.SearchResult` objects :return: :class:`calibre.gui2.store.search_result.SearchResult` objects
item_data is plugin specific and is used in :meth:`open` to open to a specifc place in the store. item_data is plugin specific and is used in :meth:`open` to open to a specific place in the store.
''' '''
raise NotImplementedError() raise NotImplementedError()

View File

@ -148,7 +148,7 @@ class Matches(QAbstractItemModel):
query = query.replace('~', '') query = query.replace('~', '')
query = query.replace('>', '') query = query.replace('>', '')
query = query.replace('<', '') query = query.replace('<', '')
# Store the query at this point for comparision later # Store the query at this point for comparison later
mod_query = query mod_query = query
# Remove filter identifiers # Remove filter identifiers
# Remove the prefix. # Remove the prefix.

View File

@ -200,7 +200,7 @@ class SearchDialog(QDialog, Ui_Dialog):
self.searching = True self.searching = True
self.search.setText(self.STOP_TEXT) self.search.setText(self.STOP_TEXT)
# Give the query to the results model so it can do # Give the query to the results model so it can do
# futher filtering. # further filtering.
self.results_view.model().set_query(query) self.results_view.model().set_query(query)
# Plugins are in random order that does not change. # Plugins are in random order that does not change.

View File

@ -53,7 +53,7 @@ class BubokPortugalStore(BasicStoreConfig, StorePlugin):
title = ''.join(data.xpath('.//div[@class="titulo"]/text()')) title = ''.join(data.xpath('.//div[@class="titulo"]/text()'))
author = ''.join(data.xpath('.//div[@class="autor"]/text()')) author = ''.join(data.xpath('.//div[@class="author"]/text()'))
price = ''.join(data.xpath('.//div[@class="precio"]/text()')) price = ''.join(data.xpath('.//div[@class="precio"]/text()'))

View File

@ -53,7 +53,7 @@ class BubokPublishingStore(BasicStoreConfig, StorePlugin):
title = ''.join(data.xpath('.//div[@class="titulo"]/text()')) title = ''.join(data.xpath('.//div[@class="titulo"]/text()'))
author = ''.join(data.xpath('.//div[@class="autor"]/text()')) author = ''.join(data.xpath('.//div[@class="author"]/text()'))
price = ''.join(data.xpath('.//div[@class="precio"]/text()')) price = ''.join(data.xpath('.//div[@class="precio"]/text()'))

View File

@ -62,7 +62,7 @@ class RW2010Store(BasicStoreConfig, StorePlugin):
with closing(br.open(id.strip(), timeout=timeout/4)) as nf: with closing(br.open(id.strip(), timeout=timeout/4)) as nf:
idata = html.fromstring(nf.read()) idata = html.fromstring(nf.read())
cover_url = ''.join(idata.xpath('//div[@class="boxa"]//div[@class="img"]/img/@src')) cover_url = ''.join(idata.xpath('//div[@class="boxa"]//div[@class="img"]/img/@src'))
author = ''.join(idata.xpath('//div[@class="boxb"]//h3[text()="Autor: "]/span/text()')) author = ''.join(idata.xpath('//div[@class="boxb"]//h3[text()="Author: "]/span/text()'))
title = ''.join(idata.xpath('//div[@class="boxb"]/h2[1]/text()')) title = ''.join(idata.xpath('//div[@class="boxb"]/h2[1]/text()'))
title = re.sub(r'\(#.+\)', '', title) title = re.sub(r'\(#.+\)', '', title)
formats = ''.join(idata.xpath('//div[@class="boxb"]//h3[text()="Format pliku: "]/span/text()')) formats = ''.join(idata.xpath('//div[@class="boxb"]//h3[text()="Format pliku: "]/span/text()'))

View File

@ -54,7 +54,7 @@ class ThreadedJob(BaseJob):
Note that it is not called if the user kills the job. Check job.failed Note that it is not called if the user kills the job. Check job.failed
to see if the job succeeded or not. And use job.log to get the job log. to see if the job succeeded or not. And use job.log to get the job log.
:param killable: If False the GUI wont let the user kill this job :param killable: If False the GUI won't let the user kill this job
:param log: Must be a subclass of GUILog or None. If None a default :param log: Must be a subclass of GUILog or None. If None a default
GUILog is created. GUILog is created.

View File

@ -60,7 +60,7 @@ enum {
}; };
/* values from this array need to correspont to the order of the enum above */ /* values from this array need to correspond to the order of the enum above */
static char *opcode_names[] = { static char *opcode_names[] = {
"equal", "equal",
"insert", "insert",
@ -586,7 +586,7 @@ load_lines(PyObject *orig, struct line **lines)
line->data = item; line->data = item;
line->hash = PyObject_Hash(item); line->hash = PyObject_Hash(item);
if (line->hash == (-1)) { if (line->hash == (-1)) {
/* Propogate the hash exception */ /* Propagate the hash exception */
size = -1; size = -1;
goto cleanup; goto cleanup;
} }

View File

@ -401,7 +401,7 @@ class Editor(QMainWindow):
if name == 'insert-tag': if name == 'insert-tag':
w = bar.widgetForAction(ac) w = bar.widgetForAction(ac)
if hasattr(w, 'setPopupMode'): if hasattr(w, 'setPopupMode'):
# For some unknown reason this button is occassionally a # For some unknown reason this button is occasionally a
# QPushButton instead of a QToolButton # QPushButton instead of a QToolButton
w.setPopupMode(QToolButton.ToolButtonPopupMode.MenuButtonPopup) w.setPopupMode(QToolButton.ToolButtonPopupMode.MenuButtonPopup)
w.setMenu(self.insert_tag_menu) w.setMenu(self.insert_tag_menu)
@ -413,7 +413,7 @@ class Editor(QMainWindow):
ac.setMenu(m) ac.setMenu(m)
ch = bar.widgetForAction(ac) ch = bar.widgetForAction(ac)
if hasattr(ch, 'setPopupMode'): if hasattr(ch, 'setPopupMode'):
# For some unknown reason this button is occassionally a # For some unknown reason this button is occasionally a
# QPushButton instead of a QToolButton # QPushButton instead of a QToolButton
ch.setPopupMode(QToolButton.ToolButtonPopupMode.InstantPopup) ch.setPopupMode(QToolButton.ToolButtonPopupMode.InstantPopup)
for name in tuple('h%d' % d for d in range(1, 7)) + ('p',): for name in tuple('h%d' % d for d in range(1, 7)) + ('p',):

View File

@ -62,7 +62,7 @@ class BlockingJob(QWidget):
def start(self): def start(self):
self.setGeometry(0, 0, self.parent().width(), self.parent().height()) self.setGeometry(0, 0, self.parent().width(), self.parent().height())
self.setVisible(True) self.setVisible(True)
# Prevent any actions from being triggerred by key presses # Prevent any actions from being triggered by key presses
self.parent().setEnabled(False) self.parent().setEnabled(False)
self.raise_() self.raise_()
self.setFocus(Qt.FocusReason.OtherFocusReason) self.setFocus(Qt.FocusReason.OtherFocusReason)

View File

@ -176,7 +176,7 @@ class Declaration(QWidget):
]) ])
self.lines_for_copy.append(text + vtext) self.lines_for_copy.append(text + vtext)
if prop.is_overriden: if prop.is_overriden:
self.lines_for_copy[-1] += ' [overriden]' self.lines_for_copy[-1] += ' [overridden]'
ypos += max(br1.height(), br2.height()) + line_spacing ypos += max(br1.height(), br2.height()) + line_spacing
self.lines_for_copy.append('--------------------------\n') self.lines_for_copy.append('--------------------------\n')

View File

@ -472,7 +472,7 @@ class ToolbarSettings(QWidget):
self.sl = l = QGridLayout() self.sl = l = QGridLayout()
gl.addLayout(l, 1, 0, 1, -1) gl.addLayout(l, 1, 0, 1, -1)
self.gb1 = gb1 = QGroupBox(_('A&vailable actions'), self) self.gb1 = gb1 = QGroupBox(_('A&available actions'), self)
self.gb2 = gb2 = QGroupBox(_('&Current actions'), self) self.gb2 = gb2 = QGroupBox(_('&Current actions'), self)
gb1.setFlat(True), gb2.setFlat(True) gb1.setFlat(True), gb2.setFlat(True)
gb1.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding) gb1.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)

View File

@ -61,7 +61,7 @@ def save_container(container, path):
except EnvironmentError as err: except EnvironmentError as err:
if err.errno not in (errno.EPERM, errno.EACCES): if err.errno not in (errno.EPERM, errno.EACCES):
# ignore chown failure as user could be editing file belonging # ignore chown failure as user could be editing file belonging
# to a different user, in which case we really cant do anything # to a different user, in which case we really can't do anything
# about it short of making the file update non-atomic # about it short of making the file update non-atomic
raise raise

View File

@ -53,7 +53,7 @@ def get_newest_version():
# certificate verification failed, since the version check contains no # certificate verification failed, since the version check contains no
# critical information, ignore and proceed # critical information, ignore and proceed
# We have to do this as if the calibre CA certificate ever # We have to do this as if the calibre CA certificate ever
# needs to be revoked, then we wont be able to do version checks # needs to be revoked, then we won't be able to do version checks
version = get_https_resource_securely(URL, headers=headers, cacerts=None) version = get_https_resource_securely(URL, headers=headers, cacerts=None)
try: try:
version = version.decode('utf-8').strip() version = version.decode('utf-8').strip()

View File

@ -418,7 +418,7 @@ class ConfigureToolBar(Dialog):
self.h = h = QHBoxLayout() self.h = h = QHBoxLayout()
l.addLayout(h) l.addLayout(h)
self.lg = lg = QGroupBox(_('A&vailable actions'), self) self.lg = lg = QGroupBox(_('A&available actions'), self)
lg.v = v = QVBoxLayout(lg) lg.v = v = QVBoxLayout(lg)
v.addWidget(self.available_actions) v.addWidget(self.available_actions)
h.addWidget(lg) h.addWidget(lg)

View File

@ -83,7 +83,7 @@ class FilenamePattern(QWidget, Ui_Form): # {{{
connect_lambda(self.re.lineEdit().textChanged, self, lambda self, x: self.changed_signal.emit()) connect_lambda(self.re.lineEdit().textChanged, self, lambda self, x: self.changed_signal.emit())
def initialize(self, defaults=False): def initialize(self, defaults=False):
# Get all items in the combobox. If we are reseting # Get all items in the combobox. If we are resetting
# to defaults we don't want to lose what the user # to defaults we don't want to lose what the user
# has added. # has added.
val_hist = [unicode_type(self.re.lineEdit().text())] + [unicode_type(self.re.itemText(i)) for i in range(self.re.count())] val_hist = [unicode_type(self.re.lineEdit().text())] + [unicode_type(self.re.itemText(i)) for i in range(self.re.count())]
@ -789,7 +789,7 @@ class EncodingComboBox(QComboBox): # {{{
A combobox that holds text encodings support A combobox that holds text encodings support
by Python. This is only populated with the most by Python. This is only populated with the most
common and standard encodings. There is no good common and standard encodings. There is no good
way to programatically list all supported encodings way to programmatically list all supported encodings
using encodings.aliases.aliases.keys(). It using encodings.aliases.aliases.keys(). It
will not work. will not work.
''' '''

View File

@ -32,7 +32,7 @@ class GenericUnixServices : public QGenericUnixServices {
* leading to a segfault. For example, defaultHintStyleFromMatch() queries * leading to a segfault. For example, defaultHintStyleFromMatch() queries
* the nativeInterface() without checking that it is NULL. See * the nativeInterface() without checking that it is NULL. See
* https://bugreports.qt-project.org/browse/QTBUG-40946 * https://bugreports.qt-project.org/browse/QTBUG-40946
* This is no longer strictly neccessary since we implement our own fontconfig database * This is no longer strictly necessary since we implement our own fontconfig database
* (a patched version of the Qt fontconfig database). However, it is probably a good idea to * (a patched version of the Qt fontconfig database). However, it is probably a good idea to
* keep it unknown, since the headless QPA is used in contexts where a desktop environment * keep it unknown, since the headless QPA is used in contexts where a desktop environment
* does not make sense anyway. * does not make sense anyway.

View File

@ -279,7 +279,7 @@ class BIBTEX(CatalogPlugin):
bibfile_enctag = ['strict', 'replace', 'ignore', 'backslashreplace'] bibfile_enctag = ['strict', 'replace', 'ignore', 'backslashreplace']
bib_entry = ['mixed', 'misc', 'book'] bib_entry = ['mixed', 'misc', 'book']
# Needed beacause CLI return str vs int by widget # Needed because CLI return str vs int by widget
try: try:
bibfile_enc = bibfile_enc[opts.bibfile_enc] bibfile_enc = bibfile_enc[opts.bibfile_enc]
bibfile_enctag = bibfile_enctag[opts.bibfile_enctag] bibfile_enctag = bibfile_enctag[opts.bibfile_enctag]
@ -342,7 +342,7 @@ class BIBTEX(CatalogPlugin):
# Initialize BibTeX class # Initialize BibTeX class
bibtexc = BibTeX() bibtexc = BibTeX()
# Entries writing after Bibtex formating (or not) # Entries writing after Bibtex formatting (or not)
if bibfile_enc != 'ascii' : if bibfile_enc != 'ascii' :
bibtexc.ascii_bibtex = False bibtexc.ascii_bibtex = False
else : else :

Some files were not shown because too many files have changed in this diff Show More