mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Fix the remaining typos
This commit is contained in:
parent
a1982c2c7e
commit
fb1d7c40f8
@ -263,7 +263,7 @@ def add_catalog(cache, path, title, dbapi=None):
|
||||
new_book_added = True
|
||||
else:
|
||||
cache._set_metadata(db_id, mi)
|
||||
cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Cant keep write lock since post-import hooks might run
|
||||
cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Can't keep write lock since post-import hooks might run
|
||||
|
||||
return db_id, new_book_added
|
||||
|
||||
@ -295,7 +295,7 @@ def add_news(cache, path, arg, dbapi=None):
|
||||
mi.timestamp = utcnow()
|
||||
|
||||
db_id = cache._create_book_entry(mi, apply_import_tags=False)
|
||||
cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Cant keep write lock since post-import hooks might run
|
||||
cache.add_format(db_id, fmt, stream, dbapi=dbapi) # Can't keep write lock since post-import hooks might run
|
||||
|
||||
if not hasattr(path, 'read'):
|
||||
stream.close()
|
||||
|
@ -23,7 +23,7 @@ class DeleteService(Thread):
|
||||
by only moving the files/folders to be deleted out of the library in the
|
||||
main thread, they are deleted to recycle bin in a separate worker thread.
|
||||
|
||||
This has the added advantage that doing a restore from the recycle bin wont
|
||||
This has the added advantage that doing a restore from the recycle bin won't
|
||||
cause metadata.db and the file system to get out of sync. Also, deleting
|
||||
becomes much faster, since in the common case, the move is done by a simple
|
||||
os.rename(). The downside is that if the user quits calibre while a long
|
||||
|
@ -346,7 +346,7 @@ def main():
|
||||
dev.rm(args[0])
|
||||
elif command == "touch":
|
||||
parser = OptionParser(usage="usage: %prog touch path\nCreate an empty file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/\n\n"+ # noqa
|
||||
"Unfortunately, I cant figure out how to update file times on the device, so if path already exists, touch does nothing")
|
||||
"Unfortunately, I can't figure out how to update file times on the device, so if path already exists, touch does nothing")
|
||||
options, args = parser.parse_args(args)
|
||||
if len(args) != 1:
|
||||
parser.print_help()
|
||||
|
@ -148,7 +148,7 @@ class ControlError(ProtocolError):
|
||||
|
||||
class WrongDestinationError(PathError):
|
||||
''' The user chose the wrong destination to send books to, for example by
|
||||
trying to send books to a non existant storage card.'''
|
||||
trying to send books to a non existent storage card.'''
|
||||
pass
|
||||
|
||||
|
||||
|
@ -677,7 +677,7 @@ class DevicePlugin(Plugin):
|
||||
def synchronize_with_db(self, db, book_id, book_metadata, first_call):
|
||||
'''
|
||||
Called during book matching when a book on the device is matched with
|
||||
a book in calibre's db. The method is responsible for syncronizing
|
||||
a book in calibre's db. The method is responsible for synchronizing
|
||||
data from the device to calibre's db (if needed).
|
||||
|
||||
The method must return a two-value tuple. The first value is a set of
|
||||
|
@ -280,7 +280,7 @@ class APNXBuilder:
|
||||
|
||||
def get_pages_pagebreak_tag(self, mobi_file_path):
|
||||
'''
|
||||
Determine pages based on the presense of
|
||||
Determine pages based on the presence of
|
||||
<mbp:pagebreak>.
|
||||
'''
|
||||
pages = []
|
||||
|
@ -309,7 +309,7 @@ class KOBO(USBMS):
|
||||
# print 'update_metadata_item returned true'
|
||||
changed = True
|
||||
else:
|
||||
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
|
||||
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
|
||||
if lpath in playlist_map and \
|
||||
playlist_map[lpath] not in bl[idx].device_collections:
|
||||
bl[idx].device_collections = playlist_map.get(lpath,[])
|
||||
@ -321,7 +321,7 @@ class KOBO(USBMS):
|
||||
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
|
||||
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
|
||||
else:
|
||||
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
|
||||
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
|
||||
title = "FILE MISSING: " + title
|
||||
book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
|
||||
|
||||
@ -350,10 +350,10 @@ class KOBO(USBMS):
|
||||
if self.dbversion >= 33:
|
||||
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, IsDownloaded from content where '
|
||||
'BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(
|
||||
'BookID is Null %(previews)s %(recommendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(
|
||||
expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')',
|
||||
previews=' and Accessibility <> 6' if not self.show_previews else '',
|
||||
recomendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] is False else '')
|
||||
recommendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] is False else '')
|
||||
elif self.dbversion >= 16 and self.dbversion < 33:
|
||||
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
|
||||
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where '
|
||||
@ -600,7 +600,7 @@ class KOBO(USBMS):
|
||||
prints('in add_books_to_metadata. Prefix is None!', path,
|
||||
self._main_prefix)
|
||||
continue
|
||||
# print "Add book to metatdata: "
|
||||
# print "Add book to metadata: "
|
||||
# print "prefix: " + prefix
|
||||
lpath = path.partition(prefix)[2]
|
||||
if lpath.startswith('/') or lpath.startswith('\\'):
|
||||
@ -926,10 +926,10 @@ class KOBO(USBMS):
|
||||
pass
|
||||
else: # No collections
|
||||
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
|
||||
debug_print("No Collections - reseting ReadStatus")
|
||||
debug_print("No Collections - resetting ReadStatus")
|
||||
self.reset_readstatus(connection, oncard)
|
||||
if self.dbversion >= 14:
|
||||
debug_print("No Collections - reseting FavouritesIndex")
|
||||
debug_print("No Collections - resetting FavouritesIndex")
|
||||
self.reset_favouritesindex(connection, oncard)
|
||||
|
||||
# debug_print('Finished update_device_database_collections', collections_attributes)
|
||||
@ -1067,11 +1067,11 @@ class KOBO(USBMS):
|
||||
fsync(f)
|
||||
|
||||
else:
|
||||
debug_print("ImageID could not be retreived from the database")
|
||||
debug_print("ImageID could not be retrieved from the database")
|
||||
|
||||
def prepare_addable_books(self, paths):
|
||||
'''
|
||||
The Kobo supports an encrypted epub refered to as a kepub
|
||||
The Kobo supports an encrypted epub referred to as a kepub
|
||||
Unfortunately Kobo decided to put the files on the device
|
||||
with no file extension. I just hope that decision causes
|
||||
them as much grief as it does me :-)
|
||||
@ -1369,7 +1369,7 @@ class KOBOTOUCH(KOBO):
|
||||
# build number. A number will be recorded here but it can be safely ignored
|
||||
# when testing the firmware version.
|
||||
max_supported_fwversion = (4, 28, 16705)
|
||||
# The following document firwmare versions where new function or devices were added.
|
||||
# The following document firmware versions where new function or devices were added.
|
||||
# Not all are used, but this feels a good place to record it.
|
||||
min_fwversion_shelves = (2, 0, 0)
|
||||
min_fwversion_images_on_sdcard = (2, 4, 1)
|
||||
@ -1667,7 +1667,7 @@ class KOBOTOUCH(KOBO):
|
||||
if favouritesindex == 1:
|
||||
playlist_map[lpath].append('Shortlist')
|
||||
|
||||
# The follwing is in flux:
|
||||
# The following is in flux:
|
||||
# - FW2.0.0, DBVersion 53,55 accessibility == 1
|
||||
# - FW2.1.2 beta, DBVersion == 56, accessibility == -1:
|
||||
# So, the following should be OK
|
||||
@ -1857,7 +1857,7 @@ class KOBOTOUCH(KOBO):
|
||||
"from ShelfContent " \
|
||||
"where ContentId = ? " \
|
||||
"and _IsDeleted = 'false' " \
|
||||
"and ShelfName is not null" # This should never be nulll, but it is protection against an error cause by a sync to the Kobo server
|
||||
"and ShelfName is not null" # This should never be null, but it is protection against an error cause by a sync to the Kobo server
|
||||
values = (ContentID, )
|
||||
cursor.execute(query, values)
|
||||
for i, row in enumerate(cursor):
|
||||
@ -1905,32 +1905,32 @@ class KOBOTOUCH(KOBO):
|
||||
where_clause = (" WHERE BookID IS NULL "
|
||||
" AND ((Accessibility = -1 AND IsDownloaded in ('true', 1 )) " # Sideloaded books
|
||||
" OR (Accessibility IN (%(downloaded_accessibility)s) %(expiry)s) " # Purchased books
|
||||
" %(previews)s %(recomendations)s ) " # Previews or Recommendations
|
||||
" %(previews)s %(recommendations)s ) " # Previews or Recommendations
|
||||
) % \
|
||||
dict(
|
||||
expiry="" if self.show_archived_books else "and IsDownloaded in ('true', 1)",
|
||||
previews=" OR (Accessibility in (6) AND ___UserID <> '')" if self.show_previews else "",
|
||||
recomendations=" OR (Accessibility IN (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else "",
|
||||
recommendations=" OR (Accessibility IN (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else "",
|
||||
downloaded_accessibility="1,2,8,9" if self.supports_overdrive() else "1,2"
|
||||
)
|
||||
elif self.supports_series():
|
||||
where_clause = (" WHERE BookID IS NULL "
|
||||
" AND ((Accessibility = -1 AND IsDownloaded IN ('true', 1)) or (Accessibility IN (1,2)) %(previews)s %(recomendations)s )"
|
||||
" AND ((Accessibility = -1 AND IsDownloaded IN ('true', 1)) or (Accessibility IN (1,2)) %(previews)s %(recommendations)s )"
|
||||
" AND NOT ((___ExpirationStatus=3 OR ___ExpirationStatus is Null) %(expiry)s)"
|
||||
) % \
|
||||
dict(
|
||||
expiry=" AND ContentType = 6" if self.show_archived_books else "",
|
||||
previews=" or (Accessibility IN (6) AND ___UserID <> '')" if self.show_previews else "",
|
||||
recomendations=" or (Accessibility in (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else ""
|
||||
recommendations=" or (Accessibility in (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else ""
|
||||
)
|
||||
elif self.dbversion >= 33:
|
||||
where_clause = (' WHERE BookID IS NULL %(previews)s %(recomendations)s AND NOT'
|
||||
where_clause = (' WHERE BookID IS NULL %(previews)s %(recommendations)s AND NOT'
|
||||
' ((___ExpirationStatus=3 or ___ExpirationStatus IS NULL) %(expiry)s)'
|
||||
) % \
|
||||
dict(
|
||||
expiry=' AND ContentType = 6' if self.show_archived_books else '',
|
||||
previews=' AND Accessibility <> 6' if not self.show_previews else '',
|
||||
recomendations=' AND IsDownloaded IN (\'true\', 1)' if not self.show_recommendations else ''
|
||||
recommendations=' AND IsDownloaded IN (\'true\', 1)' if not self.show_recommendations else ''
|
||||
)
|
||||
elif self.dbversion >= 16:
|
||||
where_clause = (' WHERE BookID IS NULL '
|
||||
@ -2527,7 +2527,7 @@ class KOBOTOUCH(KOBO):
|
||||
|
||||
elif bookshelf_attribute: # No collections but have set the shelf option
|
||||
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
|
||||
debug_print("No Collections - reseting ReadStatus")
|
||||
debug_print("No Collections - resetting ReadStatus")
|
||||
if self.dbversion < 53:
|
||||
self.reset_readstatus(connection, oncard)
|
||||
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
|
||||
@ -2675,7 +2675,7 @@ class KOBOTOUCH(KOBO):
|
||||
):
|
||||
'''
|
||||
This will generate the new cover image from the cover in the library. It is a wrapper
|
||||
for save_cover_data_to to allow it to be overriden in a subclass. For this reason,
|
||||
for save_cover_data_to to allow it to be overridden in a subclass. For this reason,
|
||||
options are passed in that are not used by this implementation.
|
||||
|
||||
:param cover_data: original cover data
|
||||
@ -3525,12 +3525,12 @@ class KOBOTOUCH(KOBO):
|
||||
|
||||
@property
|
||||
def create_bookshelves(self):
|
||||
# Only for backwards compatabilty
|
||||
# Only for backwards compatibility
|
||||
return self.manage_collections
|
||||
|
||||
@property
|
||||
def delete_empty_shelves(self):
|
||||
# Only for backwards compatabilty
|
||||
# Only for backwards compatibility
|
||||
return self.delete_empty_collections
|
||||
|
||||
@property
|
||||
@ -3756,7 +3756,7 @@ class KOBOTOUCH(KOBO):
|
||||
settings.show_recommendations = settings.extra_customization[OPT_SHOW_RECOMMENDATIONS]
|
||||
|
||||
# If the configuration hasn't been change for a long time, the last few option will be out
|
||||
# of sync. The last two options aare always the support newer firmware and the debugging
|
||||
# of sync. The last two options are always the support newer firmware and the debugging
|
||||
# title. Set seties and Modify CSS were the last two new options. The debugging title is
|
||||
# a string, so looking for that.
|
||||
start_subclass_extra_options = OPT_MODIFY_CSS
|
||||
|
@ -171,7 +171,7 @@
|
||||
*/
|
||||
#define DEVICE_FLAG_ALWAYS_PROBE_DESCRIPTOR 0x00000800
|
||||
/**
|
||||
* Samsung has implimented its own playlist format as a .spl file
|
||||
* Samsung has implemented its own playlist format as a .spl file
|
||||
* stored in the normal file system, rather than a proper mtp
|
||||
* playlist. There are multiple versions of the .spl format
|
||||
* identified by a line in the file: VERSION X.XX
|
||||
@ -179,7 +179,7 @@
|
||||
*/
|
||||
#define DEVICE_FLAG_PLAYLIST_SPL_V1 0x00001000
|
||||
/**
|
||||
* Samsung has implimented its own playlist format as a .spl file
|
||||
* Samsung has implemented its own playlist format as a .spl file
|
||||
* stored in the normal file system, rather than a proper mtp
|
||||
* playlist. There are multiple versions of the .spl format
|
||||
* identified by a line in the file: VERSION X.XX
|
||||
|
@ -296,7 +296,7 @@
|
||||
DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST |
|
||||
DEVICE_FLAG_PLAYLIST_SPL_V1 },
|
||||
// YP-F3 is NOT MTP - USB mass storage
|
||||
// From a rouge .INF file
|
||||
// From a rogue .INF file
|
||||
// this device ID seems to have been recycled for:
|
||||
// the Samsung SGH-A707 Cingular cellphone
|
||||
// the Samsung L760-V cellphone
|
||||
@ -613,7 +613,7 @@
|
||||
|
||||
/*
|
||||
* SanDisk
|
||||
* several devices (c150 for sure) are definately dual-mode and must
|
||||
* several devices (c150 for sure) are definitely dual-mode and must
|
||||
* have the USB mass storage driver that hooks them unloaded first.
|
||||
* They all have problematic dual-mode making the device unload effect
|
||||
* uncertain on these devices.
|
||||
@ -2711,7 +2711,7 @@
|
||||
#if 1
|
||||
/* after some review I commented it back in. There was apparently
|
||||
* only one or two devices misbehaving (having this ID in mass storage mode),
|
||||
* but more seem to use it regulary as MTP devices. Marcus 20150401 */
|
||||
* but more seem to use it regularly as MTP devices. Marcus 20150401 */
|
||||
/*
|
||||
* This had to be commented out - the same VID+PID is used also for
|
||||
* other modes than MTP, so we need to let mtp-probe do its job on this
|
||||
@ -2796,7 +2796,7 @@
|
||||
#if 1
|
||||
/* after some review I commented it back in. There was apparently
|
||||
* only one or two devices misbehaving (having this ID in mass storage mode),
|
||||
* but more seem to use it regulary as MTP devices. Marcus 20150401 */
|
||||
* but more seem to use it regularly as MTP devices. Marcus 20150401 */
|
||||
/*
|
||||
* This had to be commented out - the same VID+PID is used also for
|
||||
* other modes than MTP, so we need to let mtp-probe do its job on this
|
||||
@ -3136,7 +3136,7 @@
|
||||
DEVICE_FLAGS_ANDROID_BUGS },
|
||||
|
||||
/* In update 4 the order of devices was changed for
|
||||
better OS X / Windows suport and another device-id
|
||||
better OS X / Windows support and another device-id
|
||||
got assigned for the MTP */
|
||||
{ "Jolla", 0x2931, "Sailfish (ID2)", 0x0a05,
|
||||
DEVICE_FLAGS_ANDROID_BUGS },
|
||||
|
@ -27,7 +27,7 @@ Periodical identifier sample from a PRS-650:
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<cacheExt xmlns="http://www.sony.com/xmlns/product/prs/device/1">
|
||||
<text conformsTo="http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0" periodicalName="The Atlantic"
|
||||
description="Current affairs and politics focussed on the US" publicationDate="Tue, 19 Oct 2010 00:00:00 GMT"
|
||||
description="Current affairs and politics focused on the US" publicationDate="Tue, 19 Oct 2010 00:00:00 GMT"
|
||||
path="database/media/books/calibre/Atlantic [Mon, 18 Oct 2010], The - calibre_1701.epub">
|
||||
<thumbnail width="167" height="217">main_thumbnail.jpg</thumbnail>
|
||||
</text>
|
||||
|
@ -45,7 +45,7 @@ class LITInput(InputFormatPlugin):
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
from calibre.utils.xml_parse import safe_xml_fromstring
|
||||
import copy
|
||||
self.log('LIT file with all text in singe <pre> tag detected')
|
||||
self.log('LIT file with all text in single <pre> tag detected')
|
||||
html = separate_paragraphs_single_line(pre.text)
|
||||
html = convert_basic(html).replace('<html>',
|
||||
'<html xmlns="%s">'%XHTML_NS)
|
||||
|
@ -121,7 +121,7 @@ class PMLInput(InputFormatPlugin):
|
||||
if hasattr(stream, 'name'):
|
||||
images = self.get_images(stream, os.path.abspath(os.path.dirname(stream.name)))
|
||||
|
||||
# We want pages to be orded alphabetically.
|
||||
# We want pages to be ordered alphabetically.
|
||||
pages.sort()
|
||||
|
||||
manifest_items = []
|
||||
|
@ -170,7 +170,7 @@ class TXTInput(InputFormatPlugin):
|
||||
if txt_formatting is not None and txt_formatting.text:
|
||||
txt_formatting = txt_formatting.text.strip()
|
||||
if txt_formatting in ('plain', 'textile', 'markdown') and options.formatting_type == 'auto':
|
||||
log.info(f'Using metadata from TXTZ archive to set text formating type to: {txt_formatting}')
|
||||
log.info(f'Using metadata from TXTZ archive to set text formatting type to: {txt_formatting}')
|
||||
options.formatting_type = txt_formatting
|
||||
if txt_formatting != 'plain':
|
||||
options.paragraph_type = 'off'
|
||||
|
@ -1054,7 +1054,7 @@ OptionRecommendation(name='search_replace',
|
||||
from calibre.utils.fonts.scanner import font_scanner # noqa
|
||||
import css_parser, logging
|
||||
css_parser.log.setLevel(logging.WARN)
|
||||
get_types_map() # Ensure the mimetypes module is intialized
|
||||
get_types_map() # Ensure the mimetypes module is initialized
|
||||
|
||||
if self.opts.debug_pipeline is not None:
|
||||
self.opts.verbose = max(self.opts.verbose, 4)
|
||||
|
@ -19,7 +19,7 @@ def get_applicable_xe_fields(index, xe_fields, XPath, expand):
|
||||
|
||||
lr = index.get('letter-range', None)
|
||||
if lr is not None:
|
||||
sl, el = lr.parition('-')[0::2]
|
||||
sl, el = lr.partition('-')[0::2]
|
||||
sl, el = sl.strip(), el.strip()
|
||||
if sl and el:
|
||||
def inrange(text):
|
||||
|
@ -14,7 +14,7 @@ class Parser:
|
||||
''' See epubcfi.ebnf for the specification that this parser tries to
|
||||
follow. I have implemented it manually, since I dont want to depend on
|
||||
grako, and the grammar is pretty simple. This parser is thread-safe, i.e.
|
||||
it can be used from multiple threads simulataneously. '''
|
||||
it can be used from multiple threads simultaneously. '''
|
||||
|
||||
def __init__(self):
|
||||
# All allowed unicode characters + escaped special characters
|
||||
|
@ -168,7 +168,7 @@ class HTMLFile:
|
||||
try:
|
||||
link = self.resolve(url)
|
||||
except ValueError:
|
||||
# Unparseable URL, ignore
|
||||
# Unparsable URL, ignore
|
||||
continue
|
||||
if link not in self.links:
|
||||
self.links.append(link)
|
||||
|
@ -383,7 +383,7 @@ sol3d2 so3lic 5solv 3som 3s4on. sona4 son4g s4op 5sophic s5ophiz s5ophy sor5c
|
||||
sor5d 4sov so5vi 2spa 5spai spa4n spen4d 2s5peo 2sper s2phe 3spher spho5 spil4
|
||||
sp5ing 4spio s4ply s4pon spor4 4spot squal4l s1r 2ss s1sa ssas3 s2s5c s3sel
|
||||
s5seng s4ses. s5set s1si s4sie ssi4er ss5ily s4sl ss4li s4sn sspend4 ss2t ssur5a
|
||||
ss5w 2st. s2tag s2tal stam4i 5stand s4ta4p 5stat. s4ted stern5i s5tero ste2w
|
||||
ss5w 2nd. s2tag s2tal stam4i 5stand s4ta4p 5stat. s4ted stern5i s5tero ste2w
|
||||
stew5a s3the st2i s4ti. s5tia s1tic 5stick s4tie s3tif st3ing 5stir s1tle 5stock
|
||||
stom3a 5stone s4top 3store st4r s4trad 5stratu s4tray s4trid 4stry 4st3w s2ty
|
||||
1su su1al su4b3 su2g3 su5is suit3 s4ul su2m sum3i su2n su2r 4sv sw2 4swo s4y
|
||||
|
@ -66,7 +66,7 @@ def _bytelist2longBigEndian(blist):
|
||||
|
||||
|
||||
def _rotateLeft(x, n):
|
||||
"Rotate x (32 bit) left n bits circularly."
|
||||
"Rotate x (32 bit) left n bits circular."
|
||||
|
||||
return (x << n) | (x >> (32-n))
|
||||
|
||||
|
@ -236,7 +236,7 @@ class HTMLConverter:
|
||||
self.id_counter = 0
|
||||
self.unused_target_blocks = [] # : Used to remove extra TextBlocks
|
||||
self.link_level = 0 #: Current link level
|
||||
self.memory = [] #: Used to ensure that duplicate CSS unhandled erros are not reported
|
||||
self.memory = [] #: Used to ensure that duplicate CSS unhandled errors are not reported
|
||||
self.tops = {} #: element representing the top of each HTML file in the LRF file
|
||||
self.previous_text = '' # : Used to figure out when to lstrip
|
||||
self.stripped_space = ''
|
||||
@ -937,7 +937,7 @@ class HTMLConverter:
|
||||
if height <= 0:
|
||||
height = 1
|
||||
pt = PersistentTemporaryFile(suffix='_html2lrf_scaled_image_.'+encoding.lower())
|
||||
self.image_memory.append(pt) # Neccessary, trust me ;-)
|
||||
self.image_memory.append(pt) # Necessary, trust me ;-)
|
||||
try:
|
||||
im.resize((int(width), int(height)), PILImage.ANTIALIAS).save(pt, encoding)
|
||||
pt.close()
|
||||
|
@ -351,7 +351,7 @@ class Book(Delegator):
|
||||
the Book class in some way or another in order to be rendered as
|
||||
an LRS or LRF file.
|
||||
|
||||
The following settings are available on the contructor of Book:
|
||||
The following settings are available on the constructor of Book:
|
||||
|
||||
author="book author" or author=("book author", "sort as")
|
||||
Author of the book.
|
||||
|
@ -79,7 +79,7 @@ def set_metadata(stream, mi):
|
||||
if hr.compression not in (2, 10):
|
||||
return
|
||||
|
||||
# Create a metadata record for the file if one does not alreay exist
|
||||
# Create a metadata record for the file if one does not already exist
|
||||
if not hr.has_metadata:
|
||||
sections += [b'', b'MeTaInFo\x00']
|
||||
last_data = len(sections) - 1
|
||||
|
@ -163,7 +163,7 @@ def get_metadata(stream):
|
||||
|
||||
def _parse_authors(root, ctx):
|
||||
authors = []
|
||||
# pick up authors but only from 1 secrion <title-info>; otherwise it is not consistent!
|
||||
# pick up authors but only from 1 section <title-info>; otherwise it is not consistent!
|
||||
# Those are fallbacks: <src-title-info>, <document-info>
|
||||
author = None
|
||||
for author_sec in ['title-info', 'src-title-info', 'document-info']:
|
||||
@ -248,7 +248,7 @@ def _parse_cover_data(root, imgid, mi, ctx):
|
||||
|
||||
|
||||
def _parse_tags(root, mi, ctx):
|
||||
# pick up genre but only from 1 secrion <title-info>; otherwise it is not consistent!
|
||||
# pick up genre but only from 1 section <title-info>; otherwise it is not consistent!
|
||||
# Those are fallbacks: <src-title-info>
|
||||
for genre_sec in ['title-info', 'src-title-info']:
|
||||
# -- i18n Translations-- ?
|
||||
|
@ -307,7 +307,7 @@ class Worker(Thread): # Get details {{{
|
||||
text() = "Buscar productos similares por categoría" or
|
||||
text() = "Ricerca articoli simili per categoria" or
|
||||
text() = "Rechercher des articles similaires par rubrique" or
|
||||
text() = "Procure por itens similares por categoria" or
|
||||
text() = "Procure por items similares por categoria" or
|
||||
text() = "関連商品を探す"
|
||||
]/../descendant::ul/li
|
||||
'''
|
||||
|
@ -480,7 +480,7 @@ class Source(Plugin):
|
||||
The URL is the URL for the book identified by identifiers at this
|
||||
source. identifier_type, identifier_value specify the identifier
|
||||
corresponding to the URL.
|
||||
This URL must be browseable to by a human using a browser. It is meant
|
||||
This URL must be browsable to by a human using a browser. It is meant
|
||||
to provide a clickable link for the user to easily visit the books page
|
||||
at this source.
|
||||
If no URL is found, return None. This method must be quick, and
|
||||
|
@ -25,7 +25,7 @@ PLACEHOLDER_GIF = b'GIF89a\x01\x00\x01\x00\xf0\x00\x00\x00\x00\x00\xff\xff\xff!\
|
||||
def process_jpegs_for_amazon(data: bytes) -> bytes:
|
||||
img = Image.open(BytesIO(data))
|
||||
if img.format == 'JPEG':
|
||||
# Amazon's MOBI renderer cant render JPEG images without JFIF metadata
|
||||
# Amazon's MOBI renderer can't render JPEG images without JFIF metadata
|
||||
# and images with EXIF data dont get displayed on the cover screen
|
||||
changed = not img.info
|
||||
if hasattr(img, '_getexif') and img._getexif():
|
||||
|
@ -194,7 +194,7 @@ class Serializer:
|
||||
try:
|
||||
path, frag = urldefrag(urlnormalize(href))
|
||||
except ValueError:
|
||||
# Unparseable URL
|
||||
# Unparsable URL
|
||||
return False
|
||||
if path and base:
|
||||
path = base.abshref(path)
|
||||
|
@ -229,10 +229,10 @@ class Extract(ODF2XHTML):
|
||||
# only one draw:image allowed in the draw:frame
|
||||
if len(img) == 1 and img[0].getAttribute('href') == mi.cover:
|
||||
# ok, this is the right frame with the right image
|
||||
# check if there are more childs
|
||||
# check if there are more children
|
||||
if len(frm.childNodes) != 1:
|
||||
break
|
||||
# check if the parent paragraph more childs
|
||||
# check if the parent paragraph more children
|
||||
para = frm.parentNode
|
||||
if para.tagName != 'text:p' or len(para.childNodes) != 1:
|
||||
break
|
||||
|
@ -520,7 +520,7 @@ class Container(ContainerBase): # {{{
|
||||
return unicodedata.normalize('NFC', abspath_to_name(fullpath, root or self.root))
|
||||
|
||||
def name_to_abspath(self, name):
|
||||
' Convert a canonical name to an absolute OS dependant path '
|
||||
' Convert a canonical name to an absolute OS dependent path '
|
||||
return name_to_abspath(name, self.root)
|
||||
|
||||
def exists(self, name):
|
||||
|
@ -27,7 +27,7 @@ def pretty_xml_tree(elem, level=0, indent=' '):
|
||||
textual content. Also assumes that there is no text immediately after
|
||||
closing tags. These are true for opf/ncx and container.xml files. If either
|
||||
of the assumptions are violated, there should be no data loss, but pretty
|
||||
printing wont produce optimal results.'''
|
||||
printing won't produce optimal results.'''
|
||||
if (not elem.text and len(elem) > 0) or (elem.text and isspace(elem.text)):
|
||||
elem.text = '\n' + (indent * (level+1))
|
||||
for i, child in enumerate(elem):
|
||||
|
@ -124,7 +124,7 @@ class Split:
|
||||
for i, elem in enumerate(item.data.iter('*')):
|
||||
try:
|
||||
elem.set('pb_order', unicode_type(i))
|
||||
except TypeError: # Cant set attributes on comment nodes etc.
|
||||
except TypeError: # Can't set attributes on comment nodes etc.
|
||||
continue
|
||||
|
||||
page_breaks = list(page_breaks)
|
||||
@ -168,7 +168,7 @@ class Split:
|
||||
try:
|
||||
href = self.current_item.abshref(href)
|
||||
except ValueError:
|
||||
# Unparseable URL
|
||||
# Unparsable URL
|
||||
return url
|
||||
try:
|
||||
href = urlnormalize(href)
|
||||
|
@ -36,7 +36,7 @@ def pdb_header_info(header):
|
||||
print('PDB Header Info:')
|
||||
print('')
|
||||
print('Identity: %s' % header.ident)
|
||||
print('Total Sectons: %s' % header.num_sections)
|
||||
print('Total Sections: %s' % header.num_sections)
|
||||
print('Title: %s' % header.title)
|
||||
print('')
|
||||
|
||||
|
@ -154,7 +154,7 @@ class Writer(FormatWriter):
|
||||
if len(data) + len(header) < 65505:
|
||||
images.append((header, data))
|
||||
except Exception as e:
|
||||
self.log.error('Error: Could not include file %s becuase '
|
||||
self.log.error('Error: Could not include file %s because '
|
||||
'%s.' % (item.href, e))
|
||||
|
||||
return images
|
||||
|
@ -74,7 +74,7 @@ class PdbHeaderBuilder:
|
||||
|
||||
def build_header(self, section_lengths, out_stream):
|
||||
'''
|
||||
section_lengths = Lenght of each section in file.
|
||||
section_lengths = Length of each section in file.
|
||||
'''
|
||||
|
||||
now = int(time.time())
|
||||
|
@ -118,7 +118,7 @@ class HeaderRecord:
|
||||
|
||||
def __init__(self, raw):
|
||||
self.uid, = struct.unpack('>H', raw[0:2])
|
||||
# This is labled version in the spec.
|
||||
# This is labeled version in the spec.
|
||||
# 2 is ZLIB compressed,
|
||||
# 1 is DOC compressed
|
||||
self.compression, = struct.unpack('>H', raw[2:4])
|
||||
@ -182,7 +182,7 @@ class SectionMetadata:
|
||||
|
||||
This does not store metadata such as title, or author.
|
||||
That metadata would be best retrieved with the PDB (plucker)
|
||||
metdata reader.
|
||||
metadata reader.
|
||||
|
||||
This stores document specific information such as the
|
||||
text encoding.
|
||||
@ -358,7 +358,7 @@ class Reader(FormatReader):
|
||||
def extract_content(self, output_dir):
|
||||
# Each text record is independent (unless the continuation
|
||||
# value is set in the previous record). Put each converted
|
||||
# text recored into a separate file. We will reference the
|
||||
# text recorded into a separate file. We will reference the
|
||||
# home.html file as the first file and let the HTML input
|
||||
# plugin assemble the order based on hyperlinks.
|
||||
with CurrentDir(output_dir):
|
||||
@ -452,7 +452,7 @@ class Reader(FormatReader):
|
||||
odi = self.options.debug_pipeline
|
||||
self.options.debug_pipeline = None
|
||||
# Determine the home.html record uid. This should be set in the
|
||||
# reserved values in the metadata recored. home.html is the first
|
||||
# reserved values in the metadata recorded. home.html is the first
|
||||
# text record (should have hyper link references to other records)
|
||||
# in the document.
|
||||
try:
|
||||
|
@ -60,7 +60,7 @@ class Reader(FormatReader):
|
||||
|
||||
self.log.debug('Foud ztxt version: %i.%i' % (vmajor, vminor))
|
||||
|
||||
# Initalize the decompressor
|
||||
# Initialize the decompressor
|
||||
self.uncompressor = zlib.decompressobj()
|
||||
self.uncompressor.decompress(self.section_data(1))
|
||||
|
||||
|
@ -88,7 +88,7 @@ class Links:
|
||||
try:
|
||||
purl = urlparse(url)
|
||||
except Exception:
|
||||
self.pdf.debug('Ignoring unparseable URL: %r' % url)
|
||||
self.pdf.debug('Ignoring unparsable URL: %r' % url)
|
||||
continue
|
||||
if purl.scheme and purl.scheme != 'file':
|
||||
action = Dictionary({
|
||||
|
@ -198,7 +198,7 @@ class PMLMLizer:
|
||||
text = text.replace('\xa0', ' ')
|
||||
|
||||
# Turn all characters that cannot be represented by themself into their
|
||||
# PML code equivelent
|
||||
# PML code equivalent
|
||||
text = re.sub('[^\x00-\x7f]', lambda x: unipmlcode(x.group()), text)
|
||||
|
||||
# Remove excess spaces at beginning and end of lines
|
||||
@ -346,7 +346,7 @@ class PMLMLizer:
|
||||
except:
|
||||
pass
|
||||
|
||||
# Proccess text within this tag.
|
||||
# Process text within this tag.
|
||||
if hasattr(elem, 'text') and elem.text:
|
||||
text.append(self.prepare_string_for_pml(elem.text))
|
||||
|
||||
|
@ -206,7 +206,7 @@ class RBMLizer:
|
||||
text.append('<%s>' % style_tag)
|
||||
tag_stack.append(style_tag)
|
||||
|
||||
# Proccess tags that contain text.
|
||||
# Process tags that contain text.
|
||||
if hasattr(elem, 'text') and elem.text:
|
||||
text.append(prepare_string_for_xml(elem.text))
|
||||
|
||||
|
@ -37,9 +37,9 @@ class RBWriter:
|
||||
def write_content(self, oeb_book, out_stream, metadata=None):
|
||||
info = [('info.info', self._info_section(metadata))]
|
||||
images = self._images(oeb_book.manifest)
|
||||
text_size, chuncks = self._text(oeb_book)
|
||||
chunck_sizes = [len(x) for x in chuncks]
|
||||
text = [('index.html', chuncks)]
|
||||
text_size, chunks = self._text(oeb_book)
|
||||
chunck_sizes = [len(x) for x in chunks]
|
||||
text = [('index.html', chunks)]
|
||||
hidx = [('index.hidx', ' ')]
|
||||
|
||||
toc_items = []
|
||||
@ -84,8 +84,8 @@ class RBWriter:
|
||||
out_stream.write(struct.pack('<I', text_size))
|
||||
for size in chunck_sizes:
|
||||
out_stream.write(struct.pack('<I', size))
|
||||
for chunck in text[0][1]:
|
||||
out_stream.write(chunck)
|
||||
for chunk in text[0][1]:
|
||||
out_stream.write(chunk)
|
||||
|
||||
self.log.debug('Writing images...')
|
||||
for item in hidx+images:
|
||||
@ -132,7 +132,7 @@ class RBWriter:
|
||||
|
||||
images.append((name, data))
|
||||
except Exception as e:
|
||||
self.log.error('Error: Could not include file %s becuase '
|
||||
self.log.error('Error: Could not include file %s because '
|
||||
'%s.' % (item.href, e))
|
||||
|
||||
return images
|
||||
|
@ -71,7 +71,7 @@ def text_length(i):
|
||||
return len(clean(i.text_content() or ""))
|
||||
|
||||
|
||||
class Unparseable(ValueError):
|
||||
class Unparsable(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
@ -156,7 +156,7 @@ class Document:
|
||||
return cleaned_article
|
||||
except Exception as e:
|
||||
self.log.exception('error getting summary: ')
|
||||
reraise(Unparseable, Unparseable(unicode_type(e)), sys.exc_info()[2])
|
||||
reraise(Unparsable, Unparsable(unicode_type(e)), sys.exc_info()[2])
|
||||
|
||||
def get_article(self, candidates, best_candidate):
|
||||
# Now that we have the top candidate, look through its siblings for content that might also be related.
|
||||
|
@ -11,7 +11,7 @@ RTF tokenizer and token parser. v.1.0 (1/17/2010)
|
||||
Author: Gerendi Sandor Attila
|
||||
|
||||
At this point this will tokenize a RTF file then rebuild it from the tokens.
|
||||
In the process the UTF8 tokens are altered to be supported by the RTF2XML and also remain RTF specification compilant.
|
||||
In the process the UTF8 tokens are altered to be supported by the RTF2XML and also remain RTF specification compliant.
|
||||
"""
|
||||
|
||||
|
||||
@ -235,7 +235,7 @@ class RtfTokenParser():
|
||||
i = i + 1
|
||||
j = j + 1
|
||||
continue
|
||||
raise Exception('Error: incorect utf replacement.')
|
||||
raise Exception('Error: incorrect utf replacement.')
|
||||
|
||||
# calibre rtf2xml does not support utfreplace
|
||||
replace = []
|
||||
|
@ -275,7 +275,7 @@ class RTFMLizer:
|
||||
text += '{%s\n' % style_tag
|
||||
tag_stack.append(style_tag)
|
||||
|
||||
# Proccess tags that contain text.
|
||||
# Process tags that contain text.
|
||||
if hasattr(elem, 'text') and elem.text:
|
||||
text += txt2rtf(elem.text)
|
||||
|
||||
|
@ -44,7 +44,7 @@ def Handle_Main():
|
||||
# determine the run level. The default is 1.
|
||||
run_level = 3,
|
||||
# The name of a debug directory, if you are running at
|
||||
# run level 3 or higer.
|
||||
# run level 3 or higher.
|
||||
debug = 'debug_dir',
|
||||
# Convert RTF caps to real caps.
|
||||
# Default is 1.
|
||||
@ -124,7 +124,7 @@ class ParseRtf:
|
||||
'output' --a file to output the parsed file. (Default is standard
|
||||
output.)
|
||||
'temp_dir' --directory for temporary output (If not provided, the
|
||||
script tries to output to directory where is script is exectued.)
|
||||
script tries to output to directory where is script is executed.)
|
||||
'deb_dir' --debug directory. If a debug_dir is provided, the script
|
||||
will copy each run through as a file to examine in the debug_dir
|
||||
'check_brackets' -- make sure the brackets match up after each run
|
||||
|
@ -111,7 +111,7 @@ class AddBrackets:
|
||||
2-If an open bracket is found the code inside is ignore
|
||||
(written without modifications)
|
||||
3-If an accepted control word is found put the line
|
||||
in a buffer then chage state to after cw
|
||||
in a buffer then change state to after cw
|
||||
4-Else simply write the line
|
||||
"""
|
||||
if line == 'cb<nu<clos-brack<0001\n' and self.__open_bracket:
|
||||
@ -151,7 +151,7 @@ class AddBrackets:
|
||||
|
||||
def __write_group(self):
|
||||
"""
|
||||
Write a tempory group after accepted control words end
|
||||
Write a temporary group after accepted control words end
|
||||
But this is mostly useless in my opinion as there is no list of rejected cw
|
||||
This may be a way to implement future old rtf processing for cw
|
||||
Utility: open a group to just put brackets but why be so complicated?
|
||||
|
@ -126,7 +126,7 @@ class Colors:
|
||||
Logic:
|
||||
Check if the end of the color table has been reached. If so,
|
||||
change the state to after the color table.
|
||||
Othewise, get a function by passing the self.__token_info to the
|
||||
Otherwise, get a function by passing the self.__token_info to the
|
||||
state dictionary.
|
||||
"""
|
||||
# mi<mk<clrtbl-beg
|
||||
@ -234,7 +234,7 @@ class Colors:
|
||||
beginning of the color table.
|
||||
If the state is in the color table, create the color dictionary
|
||||
and print out the tags.
|
||||
If the state if afer the color table, look for lines with color
|
||||
If the state if after the color table, look for lines with color
|
||||
info, and substitute the number with the hex number.
|
||||
"""
|
||||
self.__initiate_values()
|
||||
|
@ -31,7 +31,7 @@ class Configure:
|
||||
if self.__show_config_file and self.__configuration_file:
|
||||
sys.stderr.write('configuration file is "%s"\n' % self.__configuration_file)
|
||||
if self.__show_config_file and not self.__configuration_file:
|
||||
sys.stderr.write('No configuraiton file found; using default values\n')
|
||||
sys.stderr.write('No configuration file found; using default values\n')
|
||||
if self.__configuration_file:
|
||||
read_obj = open_for_read(self.__configuration_file)
|
||||
line_to_read = 1
|
||||
@ -111,7 +111,7 @@ class Configure:
|
||||
return_dict['configure-directory'] = None
|
||||
else:
|
||||
if not os.path.isdir(configuration_dir):
|
||||
sys.stderr.write('The dirctory "%s" does not appear to be a directory.\n'
|
||||
sys.stderr.write('The directory "%s" does not appear to be a directory.\n'
|
||||
% configuration_dir)
|
||||
return 1
|
||||
else:
|
||||
|
@ -107,7 +107,7 @@ class ConvertToTags:
|
||||
"""
|
||||
Process lines for open tags that have attributes.
|
||||
The important info is between [17:-1]. Take this info and split it
|
||||
with the delimeter '<'. The first token in this group is the element
|
||||
with the delimiter '<'. The first token in this group is the element
|
||||
name. The rest are attributes, separated fromt their values by '>'. So
|
||||
read each token one at a time, and split them by '>'.
|
||||
"""
|
||||
@ -256,7 +256,7 @@ class ConvertToTags:
|
||||
an open function for open tags
|
||||
an open with attribute function for tags with attributes
|
||||
an empty with attribute function for tags that are empty but have
|
||||
attribtes.
|
||||
attributes.
|
||||
a closed function for closed tags.
|
||||
an empty tag function.
|
||||
"""
|
||||
|
@ -19,7 +19,7 @@ from . import open_for_read, open_for_write
|
||||
|
||||
|
||||
class DeleteInfo:
|
||||
"""Delete unecessary destination groups"""
|
||||
"""Delete unnecessary destination groups"""
|
||||
|
||||
def __init__(self,
|
||||
in_file ,
|
||||
@ -110,7 +110,7 @@ class DeleteInfo:
|
||||
If you find that you are in a delete group, and the previous
|
||||
token in not an open bracket (self.__ob = 0), that means
|
||||
that the delete group is nested inside another acceptable
|
||||
detination group. In this case, you have already written
|
||||
destination group. In this case, you have already written
|
||||
the open bracket, so you will need to write the closed one
|
||||
as well.
|
||||
"""
|
||||
|
@ -255,11 +255,11 @@ class FieldStrings:
|
||||
|
||||
def __equation_func(self, field_name, name, line):
|
||||
"""
|
||||
Requried:
|
||||
Required:
|
||||
field_name -- the first word in the string
|
||||
name --the changed name according to the dictionary
|
||||
line -- the string to be parse
|
||||
Retuns:
|
||||
Returns:
|
||||
The name of the field
|
||||
Logic:
|
||||
"""
|
||||
@ -272,7 +272,7 @@ class FieldStrings:
|
||||
field_name -- the first word in the string
|
||||
name --the changed name according to the dictionary
|
||||
line -- the string to be parse
|
||||
Retuns:
|
||||
Returns:
|
||||
The name of the field
|
||||
Logic:
|
||||
"""
|
||||
@ -378,11 +378,11 @@ class FieldStrings:
|
||||
|
||||
def __simple_info_func(self, field_name, name, line):
|
||||
"""
|
||||
Requried:
|
||||
Required:
|
||||
field_name -- the first word in the string
|
||||
name --the changed name according to the dictionary
|
||||
line -- the string to be parse
|
||||
Retuns:
|
||||
Returns:
|
||||
The name of the field
|
||||
Logic:
|
||||
These fields can only have the following switches:
|
||||
@ -406,11 +406,11 @@ class FieldStrings:
|
||||
|
||||
def __hyperlink_func(self, field_name, name, line):
|
||||
"""
|
||||
Requried:
|
||||
Required:
|
||||
field_name -- the first word in the string
|
||||
name --the changed name according to the dictionary
|
||||
line -- the string to be parse
|
||||
Retuns:
|
||||
Returns:
|
||||
The name of the field
|
||||
"""
|
||||
self.__link_switch = re.compile(r'\\l\s{1,}"{0,1}(.*?)"{0,1}\s')
|
||||
@ -442,11 +442,11 @@ class FieldStrings:
|
||||
|
||||
def __include_text_func(self, field_name, name, line):
|
||||
"""
|
||||
Requried:
|
||||
Required:
|
||||
field_name -- the first word in the string
|
||||
name --the changed name according to the dictionary
|
||||
line -- the string to be parse
|
||||
Retuns:
|
||||
Returns:
|
||||
The name of the field
|
||||
Logic:
|
||||
"""
|
||||
@ -484,11 +484,11 @@ class FieldStrings:
|
||||
|
||||
def __include_pict_func(self, field_name, name, line):
|
||||
"""
|
||||
Requried:
|
||||
Required:
|
||||
field_name -- the first word in the string
|
||||
name --the changed name according to the dictionary
|
||||
line -- the string to be parse
|
||||
Retuns:
|
||||
Returns:
|
||||
The name of the field
|
||||
Logic:
|
||||
"""
|
||||
@ -526,7 +526,7 @@ class FieldStrings:
|
||||
A page reference field looks like this:
|
||||
PAGEREF _Toc440880424 \\h
|
||||
I want to extract the second line of info, which is used as an
|
||||
achor in the resulting XML file.
|
||||
anchor in the resulting XML file.
|
||||
"""
|
||||
the_string = name
|
||||
match_group = re.search(self.__format_text_exp, line)
|
||||
@ -776,7 +776,7 @@ class FieldStrings:
|
||||
name -- the changed name according to the dictionary.
|
||||
line -- the string to parse.
|
||||
Returns:
|
||||
A string containing font size, font style, and a hexidecimal value.
|
||||
A string containing font size, font style, and a hexadecimal value.
|
||||
Logic:
|
||||
The SYMBOL field is one of Microsoft's many quirky ways of
|
||||
entering text. The string that results from this method looks like
|
||||
@ -785,7 +785,7 @@ class FieldStrings:
|
||||
The first word merely tells us that we have encountered a SYMBOL
|
||||
field.
|
||||
The next value is the Microsoft decimal value. Change this to
|
||||
hexidecimal.
|
||||
hexadecimal.
|
||||
The pattern '\\f "some font' tells us the font.
|
||||
The pattern '\\s some size' tells us the font size.
|
||||
Extract all of this information. Store this information in a
|
||||
|
@ -53,7 +53,7 @@ Examples
|
||||
<field type = "insert-time">
|
||||
10:34 PM
|
||||
</field>
|
||||
The simple field in the above example conatins no paragraph or sections breaks.
|
||||
The simple field in the above example contains no paragraph or sections breaks.
|
||||
This line of RTF:
|
||||
{{\field{\*\fldinst SYMBOL 97 \\f "Symbol" \\s 12}{\fldrslt\f3\fs24}}}
|
||||
Becomes:
|
||||
@ -141,7 +141,7 @@ Examples
|
||||
}
|
||||
self.__field_count = [] # keep track of the brackets
|
||||
self.__field_instruction = [] # field instruction strings
|
||||
self.__symbol = 0 # wheter or not the field is really UTF-8
|
||||
self.__symbol = 0 # whether or not the field is really UTF-8
|
||||
# (these fields cannot be nested.)
|
||||
self.__field_instruction_string = '' # string that collects field instruction
|
||||
self.__par_in_field = [] # paragraphs in field?
|
||||
@ -150,7 +150,7 @@ Examples
|
||||
|
||||
def __before_body_func(self, line):
|
||||
"""
|
||||
Requried:
|
||||
Required:
|
||||
line --line ro parse
|
||||
Returns:
|
||||
nothing (changes an instant and writes a line)
|
||||
@ -183,7 +183,7 @@ Examples
|
||||
Returns:
|
||||
nothing
|
||||
Logic:
|
||||
Set the values for parseing the field. Four lists have to have
|
||||
Set the values for parsing the field. Four lists have to have
|
||||
items appended to them.
|
||||
"""
|
||||
self.__state = 'field'
|
||||
@ -201,7 +201,7 @@ Examples
|
||||
Returns:
|
||||
nothing.
|
||||
Logic:
|
||||
Check for the end of the field; a paragaph break; a section break;
|
||||
Check for the end of the field; a paragraph break; a section break;
|
||||
the beginning of another field; or the beginning of the field
|
||||
instruction.
|
||||
"""
|
||||
@ -289,7 +289,7 @@ Examples
|
||||
Nothing
|
||||
Logic:
|
||||
Pop the last values in the instructions list, the fields list, the
|
||||
paragaph list, and the section list.
|
||||
paragraph list, and the section list.
|
||||
If the field is a symbol, do not write the tags <field></field>,
|
||||
since this field is really just UTF-8.
|
||||
If the field contains paragraph or section breaks, it is a
|
||||
|
@ -30,7 +30,7 @@ use to you unless you use it as part of the other modules.)
|
||||
Method
|
||||
-----------
|
||||
Look for the beginning of a bookmark, index, or toc entry. When such a token
|
||||
is found, store the opeing bracket count in a variable. Collect all the text
|
||||
is found, store the opening bracket count in a variable. Collect all the text
|
||||
until the closing bracket entry is found. Send the string to the module
|
||||
field_strings to process it. Write the processed string to the output
|
||||
file.
|
||||
@ -170,7 +170,7 @@ file.
|
||||
A string for a toc instruction field.
|
||||
Logic:
|
||||
This method is meant for *both* index and toc entries.
|
||||
I want to eleminate paragraph endings, and I want to divide the
|
||||
I want to eliminate paragraph endings, and I want to divide the
|
||||
entry into a main entry and (if it exists) a sub entry.
|
||||
Split the string by newlines. Read on token at a time. If the
|
||||
token is a special colon, end the main entry element and start the
|
||||
@ -238,7 +238,7 @@ file.
|
||||
|
||||
def __index_bookmark_func(self, my_string):
|
||||
"""
|
||||
Requries:
|
||||
Requires:
|
||||
my_string -- string in all the index
|
||||
Returns:
|
||||
bookmark_string -- the text string of the book mark
|
||||
@ -373,7 +373,7 @@ file.
|
||||
my_string --string to parse
|
||||
type --type of string
|
||||
Returns:
|
||||
A string formated for a field instruction.
|
||||
A string formatted for a field instruction.
|
||||
Logic:
|
||||
The type is the name (either bookmark-end or bookmark-start). The
|
||||
id is the complete text string.
|
||||
|
@ -108,7 +108,7 @@ class GetOptions:
|
||||
pass
|
||||
"""
|
||||
sys.stderr.write(
|
||||
'You must provide an ouput file with the \'o\' option\n')
|
||||
'You must provide an output file with the \'o\' option\n')
|
||||
return_options['valid'] = 0
|
||||
"""
|
||||
if 'level' in the_keys:
|
||||
@ -226,7 +226,7 @@ class GetOptions:
|
||||
if not smart_output and not return_options['out-file']:
|
||||
"""
|
||||
sys.stderr.write(
|
||||
'Please provide and file to outut with the -o option.\n'
|
||||
'Please provide and file to output with the -o option.\n'
|
||||
'Or set \'<smart-output value = "true"/>\'.\n'
|
||||
'in the configuration file.\n'
|
||||
)
|
||||
|
@ -214,7 +214,7 @@ class GroupBorders:
|
||||
Returns:
|
||||
Nothing
|
||||
Logic
|
||||
Look for the start of a paragraph defintion. If one is found, check if
|
||||
Look for the start of a paragraph definition. If one is found, check if
|
||||
it contains a list-id. If it does, start a list. Change the state to
|
||||
in_pard.
|
||||
"""
|
||||
|
@ -207,7 +207,7 @@ class GroupStyles:
|
||||
Returns:
|
||||
Nothing
|
||||
Logic
|
||||
Look for the start of a paragraph defintion. If one is found, check if
|
||||
Look for the start of a paragraph definition. If one is found, check if
|
||||
it contains a list-id. If it does, start a list. Change the state to
|
||||
in_pard.
|
||||
"""
|
||||
|
@ -149,7 +149,7 @@ class HeadingsToSections:
|
||||
Returns:
|
||||
Nothing
|
||||
Logic
|
||||
Look for the start of a paragraph defintion. If one is found, check if
|
||||
Look for the start of a paragraph definition. If one is found, check if
|
||||
it contains a list-id. If it does, start a list. Change the state to
|
||||
in_pard.
|
||||
"""
|
||||
|
@ -23,7 +23,7 @@ from . import open_for_read, open_for_write
|
||||
|
||||
class Hex2Utf8:
|
||||
"""
|
||||
Convert Microsoft hexidecimal numbers to utf-8
|
||||
Convert Microsoft hexadecimal numbers to utf-8
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
@ -54,7 +54,7 @@ class Hex2Utf8:
|
||||
directory from which the script is run.)
|
||||
'symbol'--whether to load the symbol character map
|
||||
'winddings'--whether to load the wingdings character map
|
||||
'caps'--whether to load the caps characer map
|
||||
'caps'--whether to load the caps character map
|
||||
'convert_to_caps'--wether to convert caps to utf-8
|
||||
Returns:
|
||||
nothing
|
||||
@ -110,7 +110,7 @@ class Hex2Utf8:
|
||||
directory from which the script is run.)
|
||||
'symbol'--whether to load the symbol character map
|
||||
'winddings'--whether to load the wingdings character map
|
||||
'caps'--whether to load the caps characer map
|
||||
'caps'--whether to load the caps character map
|
||||
'convert_to_caps'--wether to convert caps to utf-8
|
||||
Returns:
|
||||
nothing
|
||||
@ -145,7 +145,7 @@ class Hex2Utf8:
|
||||
Set values, including those for the dictionaries.
|
||||
The file that contains the maps is broken down into many different
|
||||
sets. For example, for the Symbol font, there is the standard part for
|
||||
hexidecimal numbers, and the part for Microsoft characters. Read
|
||||
hexadecimal numbers, and the part for Microsoft characters. Read
|
||||
each part in, and then combine them.
|
||||
"""
|
||||
# the default encoding system, the lower map for characters 0 through
|
||||
@ -262,7 +262,7 @@ class Hex2Utf8:
|
||||
hex_num)
|
||||
if self.__run_level > 4:
|
||||
# msg = 'no dictionary entry for %s\n'
|
||||
# msg += 'the hexidecimal num is "%s"\n' % (hex_num)
|
||||
# msg += 'the hexadecimal num is "%s"\n' % (hex_num)
|
||||
# msg += 'dictionary is %s\n' % self.__current_dict_name
|
||||
msg = 'Character "&#x%s;" does not appear to be valid (or is a control character)\n' % token
|
||||
raise self.__bug_handler(msg)
|
||||
@ -537,7 +537,7 @@ class Hex2Utf8:
|
||||
new_char_entity = '&#x%s' % hex_num
|
||||
converted = self.__caps_uni_dict.get(new_char_entity)
|
||||
if not converted:
|
||||
# bullets and other entities dont' have capital equivelents
|
||||
# bullets and other entities don't have capital equivalents
|
||||
return char_entity
|
||||
else:
|
||||
return converted
|
||||
|
@ -112,7 +112,7 @@ class Info:
|
||||
Returns:
|
||||
nothing
|
||||
Logic:
|
||||
Check for the beginning of the informatin table. When found, set
|
||||
Check for the beginning of the information table. When found, set
|
||||
the state to the information table. Always write the line.
|
||||
"""
|
||||
if self.__token_info == 'mi<mk<doc-in-beg':
|
||||
@ -127,7 +127,7 @@ class Info:
|
||||
nothing.
|
||||
Logic:
|
||||
Check for the end of information. If not found, check if the
|
||||
token has a special value in the info table dictionay. If it
|
||||
token has a special value in the info table dictionary. If it
|
||||
does, execute that function.
|
||||
Otherwise, output the line to the file.
|
||||
"""
|
||||
@ -148,7 +148,7 @@ class Info:
|
||||
Returns:
|
||||
nothing
|
||||
Logic:
|
||||
This function marks the beginning of informatin fields that have
|
||||
This function marks the beginning of information fields that have
|
||||
text that must be collected. Set the type of information field
|
||||
with the tag option. Set the state to collecting text
|
||||
"""
|
||||
@ -212,7 +212,7 @@ class Info:
|
||||
because it exists in abbreviated form. (num-of-wor)
|
||||
I want to check this information in a dictionary to convert it
|
||||
to a longer, readable form. If the key does not exist in the
|
||||
dictionary, print out an error message. Otherise add the value
|
||||
dictionary, print out an error message. Otherwise add the value
|
||||
to the text string.
|
||||
(num-of-wor => number-of-words)
|
||||
"""
|
||||
@ -265,7 +265,7 @@ class Info:
|
||||
If the state is in the information table, use other methods to
|
||||
parse the information
|
||||
style table, look for lines with style info, and substitute the
|
||||
number with the name of the style. If the state if afer the
|
||||
number with the name of the style. If the state if after the
|
||||
information table, simply write the line to the output file.
|
||||
"""
|
||||
self.__initiate_values()
|
||||
|
@ -175,7 +175,7 @@ class Inline:
|
||||
Logic:
|
||||
If the token is a control word for character info (cw<ci), use another
|
||||
method to add to the dictionary.
|
||||
Use the dictionary to get the approriate function.
|
||||
Use the dictionary to get the appropriate function.
|
||||
Always print out the line.
|
||||
"""
|
||||
if line[0:5] == 'cw<ci': # calibre: bug in original function no diff between cw<ci and cw<pf
|
||||
@ -294,7 +294,7 @@ class Inline:
|
||||
in waiting.
|
||||
Iterate through this slice, which contains only dictionaries.
|
||||
Get the keys in each dictionary. If 'font-style' is in the keys,
|
||||
write a marker tag. (I will use this marker tag later when conerting
|
||||
write a marker tag. (I will use this marker tag later when converting
|
||||
hext text to utf8.)
|
||||
Write a tag for the inline values.
|
||||
"""
|
||||
|
@ -217,7 +217,7 @@ class ListTable:
|
||||
nothing
|
||||
Logic:
|
||||
Check for the end of the group.
|
||||
Otherwise, if the token is hexidecimal, create an attribute.
|
||||
Otherwise, if the token is hexadecimal, create an attribute.
|
||||
Do so by finding the base-10 value of the number. Then divide
|
||||
this by 2 and round it. Remove the ".0". Sandwwhich the result to
|
||||
give you something like level1-show-level.
|
||||
@ -249,9 +249,9 @@ class ListTable:
|
||||
nothing
|
||||
Logic:
|
||||
Check for the end of the group.
|
||||
Otherwise, if the text is hexidecimal, call on the method
|
||||
Otherwise, if the text is hexadecimal, call on the method
|
||||
__parse_level_text_length.
|
||||
Otheriwse, if the text is regular text, create an attribute.
|
||||
Otherwise, if the text is regular text, create an attribute.
|
||||
This attribute indicates the puncuation after a certain level.
|
||||
An example is "level1-marker = '.'"
|
||||
Otherwise, check for a level-template-id.
|
||||
@ -283,7 +283,7 @@ class ListTable:
|
||||
def __parse_level_text_length(self, line):
|
||||
"""
|
||||
Requires:
|
||||
line --line with hexidecimal number
|
||||
line --line with hexadecimal number
|
||||
Returns:
|
||||
nothing
|
||||
Logic:
|
||||
@ -373,7 +373,7 @@ class ListTable:
|
||||
a list-in-table tag. Get the dictionary of this list
|
||||
(the first item). Print out the key => value pair.
|
||||
Remove the first item (the dictionary) form this list. Now iterate
|
||||
through what is left in the list. Each list will conatin one item,
|
||||
through what is left in the list. Each list will contain one item,
|
||||
a dictionary. Get this dictionary and print out key => value pair.
|
||||
"""
|
||||
not_allow = ['list-id',]
|
||||
@ -440,7 +440,7 @@ class ListTable:
|
||||
Returns:
|
||||
A string and the dictionary of list-table values and attributes.
|
||||
Logic:
|
||||
Call on the __parse_lines metod, which splits the text string into
|
||||
Call on the __parse_lines method, which splits the text string into
|
||||
lines (which will be tokens) and processes them.
|
||||
"""
|
||||
self.__parse_lines(line)
|
||||
|
@ -392,7 +392,7 @@ class MakeLists:
|
||||
Returns:
|
||||
Nothing
|
||||
Logic
|
||||
Look for the start of a paragraph defintion. If one is found, check if
|
||||
Look for the start of a paragraph definition. If one is found, check if
|
||||
it contains a list-id. If it does, start a list. Change the state to
|
||||
in_pard.
|
||||
"""
|
||||
|
@ -10,7 +10,7 @@ class ParseOptions:
|
||||
options_dict -- a dictionary with the key equal to the opition, and
|
||||
a list describing that option. (See below)
|
||||
Returns:
|
||||
A tupple. The first item in the tupple is a dictionary containing
|
||||
A tuple. The first item in the tuple is a dictionary containing
|
||||
the arguments for each options. The second is a list of the
|
||||
arguments.
|
||||
If invalid options are passed to the module, 0,0 is returned.
|
||||
@ -193,7 +193,7 @@ class ParseOptions:
|
||||
list of options
|
||||
Logic:
|
||||
Iterate through the self.__system string, looking for the last
|
||||
option. The options are everything in the sysem string before the
|
||||
option. The options are everything in the system string before the
|
||||
last option.
|
||||
Check to see that the options contain no arguments.
|
||||
"""
|
||||
|
@ -316,7 +316,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
||||
Returns:
|
||||
nothing
|
||||
Logic:
|
||||
Look for the beginning of a paragaraph definition
|
||||
Look for the beginning of a paragraph definition
|
||||
"""
|
||||
# cw<pf<par-def___<nu<true
|
||||
if self.__token_info == 'cw<pf<par-def___':
|
||||
@ -433,7 +433,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
||||
Nothing
|
||||
Logic:
|
||||
The previous state was collect tokens, and I have found the start
|
||||
of a paragraph. I want to outut the defintion tag; output the line
|
||||
of a paragraph. I want to output the definition tag; output the line
|
||||
itself (telling me of the beginning of a paragraph);change the
|
||||
state to 'in_paragraphs';
|
||||
"""
|
||||
@ -449,7 +449,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
||||
Nothing
|
||||
Logic:
|
||||
The state was is after_para_def. and I have found the start of a
|
||||
paragraph. I want to outut the defintion tag; output the line
|
||||
paragraph. I want to output the definition tag; output the line
|
||||
itself (telling me of the beginning of a paragraph);change the
|
||||
state to 'in_paragraphs'.
|
||||
(I now realize that this is absolutely identical to the function above!)
|
||||
@ -517,8 +517,8 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
||||
out the paragraph definition. If you find another paragraph
|
||||
definition, then you write out the old paragraph dictionary and
|
||||
print out the string. You change the state to collect tokens.
|
||||
If you find any larger block elemens, such as cell, row,
|
||||
field-block, or section, you write out the paragraph defintion and
|
||||
If you find any larger block elements, such as cell, row,
|
||||
field-block, or section, you write out the paragraph definition and
|
||||
then the text string.
|
||||
If you find the beginning of a paragraph, then you don't need to
|
||||
write out the paragraph definition. Write out the string, and
|
||||
@ -537,7 +537,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
||||
Nothing
|
||||
Logic:
|
||||
The state is after the end of a paragraph. You have found the
|
||||
start of a paragaph, so you don't need to print out the paragaph
|
||||
start of a paragraph, so you don't need to print out the paragraph
|
||||
definition. Print out the string, the line, and change the state
|
||||
to in paragraphs.
|
||||
"""
|
||||
@ -553,8 +553,8 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
||||
Returns:
|
||||
Nothing
|
||||
Logic:
|
||||
You have found a new paragraph defintion at the end of a
|
||||
paragraph. Output the end of the old paragraph defintion. Output
|
||||
You have found a new paragraph definition at the end of a
|
||||
paragraph. Output the end of the old paragraph definition. Output
|
||||
the text string. Output the line. Change the state to collect
|
||||
tokens. (And don't forget to set the text string to ''!)
|
||||
"""
|
||||
@ -571,7 +571,7 @@ if another paragraph_def is found, the state changes to collect_tokens.
|
||||
Logic:
|
||||
The state is after a paragraph, and you have found a larger block
|
||||
than paragraph-definition. You want to write the end tag of the
|
||||
old defintion and reset the text string (handled by other
|
||||
old definition and reset the text string (handled by other
|
||||
methods).
|
||||
"""
|
||||
self.__write_para_def_end_func()
|
||||
|
@ -163,7 +163,7 @@ class PreambleDiv:
|
||||
|
||||
def __make_default_font_table(self):
|
||||
"""
|
||||
If not font table is fount, need to write one out.
|
||||
If not font table is found, need to write one out.
|
||||
"""
|
||||
self.__font_table_final = 'mi<tg<open______<font-table\n'
|
||||
self.__font_table_final += 'mi<mk<fonttb-beg\n'
|
||||
|
@ -137,7 +137,7 @@ class Preamble:
|
||||
nothing (changes the original file)
|
||||
Logic:
|
||||
Read one line in at a time. Determine what action to take based on
|
||||
the state. The state can either be defaut, the revision table, or
|
||||
the state. The state can either be default, the revision table, or
|
||||
the list table.
|
||||
"""
|
||||
self.__initiate_values()
|
||||
|
@ -588,7 +588,7 @@ class ProcessTokens:
|
||||
}
|
||||
"""
|
||||
# unknown
|
||||
# These must get passed on because they occure after \\*
|
||||
# These must get passed on because they occurred after \\*
|
||||
'do' : ('un', 'unknown___', self.default_func),
|
||||
'company' : ('un', 'company___', self.default_func),
|
||||
'shpinst' : ('un', 'unknown___', self.default_func),
|
||||
@ -755,7 +755,7 @@ class ProcessTokens:
|
||||
return first, second
|
||||
|
||||
def convert_to_hex(self,number):
|
||||
"""Convert a string to uppercase hexidecimal"""
|
||||
"""Convert a string to uppercase hexadecimal"""
|
||||
num = int(number)
|
||||
try:
|
||||
hex_num = "%X" % num
|
||||
|
@ -52,8 +52,8 @@ class Sections:
|
||||
between the section tags.
|
||||
Start a new section outside the field-block strings. Use the second number in
|
||||
the list; use the second item in the description list.
|
||||
CHANGE (2004-04-26) No longer write sections that occurr in field-blocks.
|
||||
Instead, ingore all section information in a field-block.
|
||||
CHANGE (2004-04-26) No longer write sections that occur in field-blocks.
|
||||
Instead, ignore all section information in a field-block.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
@ -151,7 +151,7 @@ class Sections:
|
||||
nothing
|
||||
Logic:
|
||||
I need to add the right data to the section values dictionary so I
|
||||
can retrive it later. The attribute (or key) is the name; the
|
||||
can retrieve it later. The attribute (or key) is the name; the
|
||||
value is the last part of the text string.
|
||||
ex: cw<tb<columns___<nu<2
|
||||
"""
|
||||
@ -207,7 +207,7 @@ class Sections:
|
||||
nothing
|
||||
Logic:
|
||||
I have found a section definition. Check if the line is the end of
|
||||
the defnition (a paragraph defintion), or if it contains info that
|
||||
the defnition (a paragraph definition), or if it contains info that
|
||||
should be added to the values dictionary. If neither of these
|
||||
cases are true, output the line to a file.
|
||||
"""
|
||||
@ -247,9 +247,9 @@ class Sections:
|
||||
nothing
|
||||
Logic:
|
||||
Text or control words indicating text have been found
|
||||
before \\pard. This shoud indicate older RTF. Reset the state
|
||||
Write the section defintion. Insert a paragraph definition.
|
||||
Insert {} to mark the end of a paragraph defintion
|
||||
before \\pard. This should indicate older RTF. Reset the state
|
||||
Write the section definition. Insert a paragraph definition.
|
||||
Insert {} to mark the end of a paragraph definition
|
||||
"""
|
||||
if not self.__in_field:
|
||||
self.__state = 'body'
|
||||
@ -427,7 +427,7 @@ class Sections:
|
||||
Change the state.
|
||||
"""
|
||||
# change this 2004-04-26
|
||||
# Don't do anyting
|
||||
# Don't do anything
|
||||
"""
|
||||
self.__sec_in_field_string += line
|
||||
self.__print_field_sec_attributes()
|
||||
|
@ -89,7 +89,7 @@ class Styles:
|
||||
'default-ta' : 'default-tab',
|
||||
'align_____' : 'align',
|
||||
'widow-cntr' : 'widow-control',
|
||||
# page fomratting mixed in! (Just in older RTF?)
|
||||
# page formatting mixed in! (Just in older RTF?)
|
||||
'margin-lef' : 'left-indent',
|
||||
'margin-rig' : 'right-indent',
|
||||
'margin-bot' : 'space-after',
|
||||
@ -527,7 +527,7 @@ class Styles:
|
||||
style. I accomplish this by simply looking up the value of 15 in
|
||||
the styles table.
|
||||
Use two loops. First, check all the paragraph styles. Then check
|
||||
all the characer styles.
|
||||
all the character styles.
|
||||
The inner loop: first check 'next-style', then check 'based-on-style'.
|
||||
Make sure values exist for the keys to avoid the nasty keyerror message.
|
||||
"""
|
||||
@ -629,7 +629,7 @@ class Styles:
|
||||
Returns:
|
||||
nothing
|
||||
Logic:
|
||||
Check the line for the beginning of an individaul style. If it is
|
||||
Check the line for the beginning of an individual style. If it is
|
||||
not found, simply print out the line.
|
||||
"""
|
||||
action = self.__state_dict.get(self.__token_info)
|
||||
@ -698,7 +698,7 @@ class Styles:
|
||||
beginning of the style table.
|
||||
If the state is in the style table, create the style dictionary
|
||||
and print out the tags.
|
||||
If the state if afer the style table, look for lines with style
|
||||
If the state if after the style table, look for lines with style
|
||||
info, and substitute the number with the name of the style.
|
||||
"""
|
||||
self.__initiate_values()
|
||||
|
@ -29,8 +29,8 @@ States.
|
||||
2. 'mi<mk<not-in-tbl', end the table.
|
||||
3. 'cw<tb<row-def___' start a row definition
|
||||
3. in_row_definition
|
||||
1. 'mi<mk<not-in-tbl' : end the row defintion. If in table, end the table.
|
||||
2. 'mi<mk<pard-start' : end the row defintion
|
||||
1. 'mi<mk<not-in-tbl' : end the row definition. If in table, end the table.
|
||||
2. 'mi<mk<pard-start' : end the row definition
|
||||
if already in the table, start a row and cell.
|
||||
3. 'cw<tb<row_______' : end the row definition, end the row
|
||||
4. 'cw...' use another method to handle the control word
|
||||
@ -299,7 +299,7 @@ class Table:
|
||||
the tokens in the row definition contain the following information:
|
||||
1. row borders.
|
||||
2. cell borders for all cells in the row.
|
||||
3. cell postions for all cells in the row.
|
||||
3. cell positions for all cells in the row.
|
||||
Put all information about row borders into a row dictionary.
|
||||
Put all information about cell borders into into the dictionary in
|
||||
the last item in the cell list. ([{border:something, width:something},
|
||||
@ -501,7 +501,7 @@ class Table:
|
||||
nothing
|
||||
Logic:
|
||||
Write an empty tag with attributes if there are attributes.
|
||||
Otherwise, writen an empty tag with cell as element.
|
||||
Otherwise, written an empty tag with cell as element.
|
||||
"""
|
||||
if len(self.__cell_list) > 0:
|
||||
self.__write_obj.write('mi<tg<empty-att_<cell')
|
||||
|
@ -195,7 +195,7 @@ class SNBFile:
|
||||
def Output(self, outputFile):
|
||||
|
||||
# Sort the files in file buffer,
|
||||
# requried by the SNB file format
|
||||
# required by the SNB file format
|
||||
self.files.sort(key=lambda x: x.fileName)
|
||||
|
||||
outputFile = open(outputFile, 'wb')
|
||||
|
@ -59,7 +59,7 @@ class MarkdownMLizer(OEB2HTML):
|
||||
# pre has 4 spaces. We trimmed 3 so anything with a space left is a pre.
|
||||
text = re.sub('(?msu)^[ ]', ' ', text)
|
||||
|
||||
# Remove tabs that aren't at the beinning of a line
|
||||
# Remove tabs that aren't at the beginning of a line
|
||||
new_text = []
|
||||
for l in text.splitlines():
|
||||
start = re.match('\t+', l)
|
||||
|
@ -79,7 +79,7 @@ class TextileMLizer(OEB2HTML):
|
||||
text = re.sub(r'(\s|[*_\'"])\[('+t+'[a-zA-Z0-9 \'",.*_]+'+t+r')\](\s|[*_\'"?!,.])', r'\1\2\3', text)
|
||||
return text
|
||||
|
||||
# Now tidyup links and ids - remove ones that don't have a correponding opposite
|
||||
# Now tidyup links and ids - remove ones that don't have a corresponding opposite
|
||||
if self.opts.keep_links:
|
||||
for i in self.our_links:
|
||||
if i[0] == '#':
|
||||
|
@ -75,7 +75,7 @@ class Unidecoder:
|
||||
self.codepoints.update(HANCODES)
|
||||
|
||||
def decode(self, text):
|
||||
# Replace characters larger than 127 with their ASCII equivelent.
|
||||
# Replace characters larger than 127 with their ASCII equivalent.
|
||||
return re.sub('[^\x00-\x7f]',lambda x: self.replace_point(x.group()), text)
|
||||
|
||||
def replace_point(self, codepoint):
|
||||
@ -95,7 +95,7 @@ class Unidecoder:
|
||||
'''
|
||||
Find what group character is a part of.
|
||||
'''
|
||||
# Code groups withing CODEPOINTS take the form 'xAB'
|
||||
# Code groups within CODEPOINTS take the form 'xAB'
|
||||
if not isinstance(character, unicode_type):
|
||||
character = unicode_type(character, "utf-8")
|
||||
return 'x%02x' % (ord(character) >> 8)
|
||||
|
@ -274,7 +274,7 @@ class InterfaceAction(QObject):
|
||||
persist_shortcut=persist_shortcut)
|
||||
# In Qt 5 keyboard shortcuts dont work unless the
|
||||
# action is explicitly added to the main window and on OSX and
|
||||
# Unity since the menu might be exported, the shortcuts wont work
|
||||
# Unity since the menu might be exported, the shortcuts won't work
|
||||
self.gui.addAction(ac)
|
||||
if triggered is not None:
|
||||
ac.triggered.connect(triggered)
|
||||
|
@ -675,7 +675,7 @@ class BarsManager(QObject):
|
||||
'''
|
||||
This shows the correct main toolbar and rebuilds the menubar based on
|
||||
whether a device is connected or not. Note that the toolbars are
|
||||
explicitly not rebuilt, this is to workaround a Qt limitation iwth
|
||||
explicitly not rebuilt, this is to workaround a Qt limitation with
|
||||
QToolButton's popup menus and modal dialogs. If you want the toolbars
|
||||
rebuilt, call init_bars().
|
||||
'''
|
||||
|
@ -976,7 +976,7 @@ class GridView(QListView):
|
||||
newdb.new_api.add_cover_cache(x)
|
||||
try:
|
||||
# Use a timeout so that if, for some reason, the render thread
|
||||
# gets stuck, we dont deadlock, future covers wont get
|
||||
# gets stuck, we dont deadlock, future covers won't get
|
||||
# rendered, but this is better than a deadlock
|
||||
join_with_timeout(self.delegate.render_queue)
|
||||
except RuntimeError:
|
||||
|
@ -269,7 +269,7 @@ class MetadataSingleDialogBase(QDialog):
|
||||
self.fetch_metadata_button = b = RightClickButton(self)
|
||||
# The following rigmarole is needed so that Qt gives the button the
|
||||
# same height as the other buttons in the dialog. There is no way to
|
||||
# center the text in a QToolButton with an icon, so we cant just set an
|
||||
# center the text in a QToolButton with an icon, so we can't just set an
|
||||
# icon
|
||||
b.setIcon(QIcon(I('download-metadata.png')))
|
||||
b.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
|
||||
|
@ -160,7 +160,7 @@ public:
|
||||
/*!
|
||||
Returns QImage of specified slide.
|
||||
This function will be called only whenever necessary, e.g. the 100th slide
|
||||
will not be retrived when only the first few slides are visible.
|
||||
will not be retrieved when only the first few slides are visible.
|
||||
*/
|
||||
virtual QImage slide(int index) const;
|
||||
|
||||
|
@ -102,8 +102,8 @@ class TextureChooser(QDialog):
|
||||
return i
|
||||
|
||||
def update_remove_state(self):
|
||||
removeable = bool(self.selected_fname and not self.selected_fname.startswith(':'))
|
||||
self.remove_button.setEnabled(removeable)
|
||||
removable = bool(self.selected_fname and not self.selected_fname.startswith(':'))
|
||||
self.remove_button.setEnabled(removable)
|
||||
|
||||
@property
|
||||
def texture(self):
|
||||
|
@ -115,14 +115,14 @@ class StorePlugin: # {{{
|
||||
disabled by default.
|
||||
|
||||
If a store doesn't provide search on it's own use something like a site specific
|
||||
google search to get search results for this funtion.
|
||||
google search to get search results for this function.
|
||||
|
||||
:param query: The string query search with.
|
||||
:param max_results: The maximum number of results to return.
|
||||
:param timeout: The maximum amount of time in seconds to spend downloading data for search results.
|
||||
|
||||
:return: :class:`calibre.gui2.store.search_result.SearchResult` objects
|
||||
item_data is plugin specific and is used in :meth:`open` to open to a specifc place in the store.
|
||||
item_data is plugin specific and is used in :meth:`open` to open to a specific place in the store.
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -148,7 +148,7 @@ class Matches(QAbstractItemModel):
|
||||
query = query.replace('~', '')
|
||||
query = query.replace('>', '')
|
||||
query = query.replace('<', '')
|
||||
# Store the query at this point for comparision later
|
||||
# Store the query at this point for comparison later
|
||||
mod_query = query
|
||||
# Remove filter identifiers
|
||||
# Remove the prefix.
|
||||
|
@ -200,7 +200,7 @@ class SearchDialog(QDialog, Ui_Dialog):
|
||||
self.searching = True
|
||||
self.search.setText(self.STOP_TEXT)
|
||||
# Give the query to the results model so it can do
|
||||
# futher filtering.
|
||||
# further filtering.
|
||||
self.results_view.model().set_query(query)
|
||||
|
||||
# Plugins are in random order that does not change.
|
||||
|
@ -53,7 +53,7 @@ class BubokPortugalStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
title = ''.join(data.xpath('.//div[@class="titulo"]/text()'))
|
||||
|
||||
author = ''.join(data.xpath('.//div[@class="autor"]/text()'))
|
||||
author = ''.join(data.xpath('.//div[@class="author"]/text()'))
|
||||
|
||||
price = ''.join(data.xpath('.//div[@class="precio"]/text()'))
|
||||
|
||||
|
@ -53,7 +53,7 @@ class BubokPublishingStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
title = ''.join(data.xpath('.//div[@class="titulo"]/text()'))
|
||||
|
||||
author = ''.join(data.xpath('.//div[@class="autor"]/text()'))
|
||||
author = ''.join(data.xpath('.//div[@class="author"]/text()'))
|
||||
|
||||
price = ''.join(data.xpath('.//div[@class="precio"]/text()'))
|
||||
|
||||
|
@ -62,7 +62,7 @@ class RW2010Store(BasicStoreConfig, StorePlugin):
|
||||
with closing(br.open(id.strip(), timeout=timeout/4)) as nf:
|
||||
idata = html.fromstring(nf.read())
|
||||
cover_url = ''.join(idata.xpath('//div[@class="boxa"]//div[@class="img"]/img/@src'))
|
||||
author = ''.join(idata.xpath('//div[@class="boxb"]//h3[text()="Autor: "]/span/text()'))
|
||||
author = ''.join(idata.xpath('//div[@class="boxb"]//h3[text()="Author: "]/span/text()'))
|
||||
title = ''.join(idata.xpath('//div[@class="boxb"]/h2[1]/text()'))
|
||||
title = re.sub(r'\(#.+\)', '', title)
|
||||
formats = ''.join(idata.xpath('//div[@class="boxb"]//h3[text()="Format pliku: "]/span/text()'))
|
||||
|
@ -54,7 +54,7 @@ class ThreadedJob(BaseJob):
|
||||
Note that it is not called if the user kills the job. Check job.failed
|
||||
to see if the job succeeded or not. And use job.log to get the job log.
|
||||
|
||||
:param killable: If False the GUI wont let the user kill this job
|
||||
:param killable: If False the GUI won't let the user kill this job
|
||||
|
||||
:param log: Must be a subclass of GUILog or None. If None a default
|
||||
GUILog is created.
|
||||
|
@ -60,7 +60,7 @@ enum {
|
||||
};
|
||||
|
||||
|
||||
/* values from this array need to correspont to the order of the enum above */
|
||||
/* values from this array need to correspond to the order of the enum above */
|
||||
static char *opcode_names[] = {
|
||||
"equal",
|
||||
"insert",
|
||||
@ -586,7 +586,7 @@ load_lines(PyObject *orig, struct line **lines)
|
||||
line->data = item;
|
||||
line->hash = PyObject_Hash(item);
|
||||
if (line->hash == (-1)) {
|
||||
/* Propogate the hash exception */
|
||||
/* Propagate the hash exception */
|
||||
size = -1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ class Editor(QMainWindow):
|
||||
if name == 'insert-tag':
|
||||
w = bar.widgetForAction(ac)
|
||||
if hasattr(w, 'setPopupMode'):
|
||||
# For some unknown reason this button is occassionally a
|
||||
# For some unknown reason this button is occasionally a
|
||||
# QPushButton instead of a QToolButton
|
||||
w.setPopupMode(QToolButton.ToolButtonPopupMode.MenuButtonPopup)
|
||||
w.setMenu(self.insert_tag_menu)
|
||||
@ -413,7 +413,7 @@ class Editor(QMainWindow):
|
||||
ac.setMenu(m)
|
||||
ch = bar.widgetForAction(ac)
|
||||
if hasattr(ch, 'setPopupMode'):
|
||||
# For some unknown reason this button is occassionally a
|
||||
# For some unknown reason this button is occasionally a
|
||||
# QPushButton instead of a QToolButton
|
||||
ch.setPopupMode(QToolButton.ToolButtonPopupMode.InstantPopup)
|
||||
for name in tuple('h%d' % d for d in range(1, 7)) + ('p',):
|
||||
|
@ -62,7 +62,7 @@ class BlockingJob(QWidget):
|
||||
def start(self):
|
||||
self.setGeometry(0, 0, self.parent().width(), self.parent().height())
|
||||
self.setVisible(True)
|
||||
# Prevent any actions from being triggerred by key presses
|
||||
# Prevent any actions from being triggered by key presses
|
||||
self.parent().setEnabled(False)
|
||||
self.raise_()
|
||||
self.setFocus(Qt.FocusReason.OtherFocusReason)
|
||||
|
@ -176,7 +176,7 @@ class Declaration(QWidget):
|
||||
])
|
||||
self.lines_for_copy.append(text + vtext)
|
||||
if prop.is_overriden:
|
||||
self.lines_for_copy[-1] += ' [overriden]'
|
||||
self.lines_for_copy[-1] += ' [overridden]'
|
||||
ypos += max(br1.height(), br2.height()) + line_spacing
|
||||
self.lines_for_copy.append('--------------------------\n')
|
||||
|
||||
|
@ -472,7 +472,7 @@ class ToolbarSettings(QWidget):
|
||||
self.sl = l = QGridLayout()
|
||||
gl.addLayout(l, 1, 0, 1, -1)
|
||||
|
||||
self.gb1 = gb1 = QGroupBox(_('A&vailable actions'), self)
|
||||
self.gb1 = gb1 = QGroupBox(_('A&available actions'), self)
|
||||
self.gb2 = gb2 = QGroupBox(_('&Current actions'), self)
|
||||
gb1.setFlat(True), gb2.setFlat(True)
|
||||
gb1.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
|
||||
|
@ -61,7 +61,7 @@ def save_container(container, path):
|
||||
except EnvironmentError as err:
|
||||
if err.errno not in (errno.EPERM, errno.EACCES):
|
||||
# ignore chown failure as user could be editing file belonging
|
||||
# to a different user, in which case we really cant do anything
|
||||
# to a different user, in which case we really can't do anything
|
||||
# about it short of making the file update non-atomic
|
||||
raise
|
||||
|
||||
|
@ -53,7 +53,7 @@ def get_newest_version():
|
||||
# certificate verification failed, since the version check contains no
|
||||
# critical information, ignore and proceed
|
||||
# We have to do this as if the calibre CA certificate ever
|
||||
# needs to be revoked, then we wont be able to do version checks
|
||||
# needs to be revoked, then we won't be able to do version checks
|
||||
version = get_https_resource_securely(URL, headers=headers, cacerts=None)
|
||||
try:
|
||||
version = version.decode('utf-8').strip()
|
||||
|
@ -418,7 +418,7 @@ class ConfigureToolBar(Dialog):
|
||||
|
||||
self.h = h = QHBoxLayout()
|
||||
l.addLayout(h)
|
||||
self.lg = lg = QGroupBox(_('A&vailable actions'), self)
|
||||
self.lg = lg = QGroupBox(_('A&available actions'), self)
|
||||
lg.v = v = QVBoxLayout(lg)
|
||||
v.addWidget(self.available_actions)
|
||||
h.addWidget(lg)
|
||||
|
@ -83,7 +83,7 @@ class FilenamePattern(QWidget, Ui_Form): # {{{
|
||||
connect_lambda(self.re.lineEdit().textChanged, self, lambda self, x: self.changed_signal.emit())
|
||||
|
||||
def initialize(self, defaults=False):
|
||||
# Get all items in the combobox. If we are reseting
|
||||
# Get all items in the combobox. If we are resetting
|
||||
# to defaults we don't want to lose what the user
|
||||
# has added.
|
||||
val_hist = [unicode_type(self.re.lineEdit().text())] + [unicode_type(self.re.itemText(i)) for i in range(self.re.count())]
|
||||
@ -789,7 +789,7 @@ class EncodingComboBox(QComboBox): # {{{
|
||||
A combobox that holds text encodings support
|
||||
by Python. This is only populated with the most
|
||||
common and standard encodings. There is no good
|
||||
way to programatically list all supported encodings
|
||||
way to programmatically list all supported encodings
|
||||
using encodings.aliases.aliases.keys(). It
|
||||
will not work.
|
||||
'''
|
||||
|
@ -32,7 +32,7 @@ class GenericUnixServices : public QGenericUnixServices {
|
||||
* leading to a segfault. For example, defaultHintStyleFromMatch() queries
|
||||
* the nativeInterface() without checking that it is NULL. See
|
||||
* https://bugreports.qt-project.org/browse/QTBUG-40946
|
||||
* This is no longer strictly neccessary since we implement our own fontconfig database
|
||||
* This is no longer strictly necessary since we implement our own fontconfig database
|
||||
* (a patched version of the Qt fontconfig database). However, it is probably a good idea to
|
||||
* keep it unknown, since the headless QPA is used in contexts where a desktop environment
|
||||
* does not make sense anyway.
|
||||
|
@ -279,7 +279,7 @@ class BIBTEX(CatalogPlugin):
|
||||
bibfile_enctag = ['strict', 'replace', 'ignore', 'backslashreplace']
|
||||
bib_entry = ['mixed', 'misc', 'book']
|
||||
|
||||
# Needed beacause CLI return str vs int by widget
|
||||
# Needed because CLI return str vs int by widget
|
||||
try:
|
||||
bibfile_enc = bibfile_enc[opts.bibfile_enc]
|
||||
bibfile_enctag = bibfile_enctag[opts.bibfile_enctag]
|
||||
@ -342,7 +342,7 @@ class BIBTEX(CatalogPlugin):
|
||||
# Initialize BibTeX class
|
||||
bibtexc = BibTeX()
|
||||
|
||||
# Entries writing after Bibtex formating (or not)
|
||||
# Entries writing after Bibtex formatting (or not)
|
||||
if bibfile_enc != 'ascii' :
|
||||
bibtexc.ascii_bibtex = False
|
||||
else :
|
||||
|
@ -129,7 +129,7 @@ class NumberToText: # {{{
|
||||
right = NumberToText(decimal_strings[1]).text
|
||||
self.text = '%s point %s' % (left.capitalize(), right)
|
||||
|
||||
# Test for hypenated
|
||||
# Test for hyphenated
|
||||
elif re.search('-', self.number):
|
||||
if self.verbose:
|
||||
self.log("Hyphenated: %s" % self.number)
|
||||
|
@ -404,7 +404,7 @@ class LibraryDatabase:
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_update_books_series_link_b
|
||||
BEFORE UPDATE OF serie ON books_series_link
|
||||
BEFORE UPDATE OF series ON books_series_link
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from series WHERE id=NEW.series) IS NULL
|
||||
|
@ -216,7 +216,7 @@ class Dictionaries:
|
||||
try:
|
||||
ans.obj.add(word)
|
||||
except Exception:
|
||||
# not critical since all it means is that the word wont show up in suggestions
|
||||
# not critical since all it means is that the word won't show up in suggestions
|
||||
prints('Failed to add the word %r to the dictionary for %s' % (word, locale), file=sys.stderr)
|
||||
self.dictionaries[locale] = ans
|
||||
return ans
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user