mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
upgrade hidden print to Python 3 (extra-edit)
This commit is contained in:
parent
e0022f21cf
commit
12cb8b2e58
@ -53,7 +53,7 @@ class AM730(BasicNewsRecipe):
|
||||
return self.masthead_url
|
||||
|
||||
def getAMSectionArticles(self, sectionName,url):
|
||||
# print sectionName
|
||||
# print(sectionName)
|
||||
soup = self.index_to_soup(url)
|
||||
articles = []
|
||||
for aTag in soup.findAll('a',attrs={'class':'newsimglink'}):
|
||||
@ -67,7 +67,7 @@ class AM730(BasicNewsRecipe):
|
||||
print(title)
|
||||
try:
|
||||
if articles.index({'title':title,'url':href})>=0:
|
||||
# print 'already added'
|
||||
# print('already added')
|
||||
continue # already added
|
||||
except:
|
||||
pass
|
||||
|
@ -66,7 +66,7 @@ class General(BasicNewsRecipe):
|
||||
index = 'https://www.elpais.com.uy/impresa/'
|
||||
soup = self.index_to_soup(index)
|
||||
link_item = soup.find('a', attrs={'class': 'page-link link-module'})
|
||||
# print link_item
|
||||
# print(link_item)
|
||||
if link_item:
|
||||
cover_url = 'https://www.elpais.com.uy' + link_item.get('href')
|
||||
return cover_url
|
||||
|
@ -56,7 +56,7 @@ class MedScrape(BasicNewsRecipe):
|
||||
# the original url is: http://www.medscape.com/viewarticle/728955?src=rss
|
||||
# the print url is: http://www.medscape.com/viewarticle/728955_print
|
||||
print_url = url.partition('?')[0] + '_print'
|
||||
# print 'the printable version is: ',print_url
|
||||
# print('the printable version is: ',print_url)
|
||||
return print_url
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
|
@ -66,7 +66,7 @@ class OurDailyBread(BasicNewsRecipe):
|
||||
div.set('class', 'calibre-inserted-psalm')
|
||||
hr = div.makeelement('hr')
|
||||
div.insert(0, hr)
|
||||
# print html.tostring(div)
|
||||
# print(html.tostring(div))
|
||||
raw = html.tostring(root, encoding='unicode')
|
||||
return raw
|
||||
|
||||
|
@ -142,6 +142,6 @@ class PressePortalDE(BasicNewsRecipe):
|
||||
# and split this at the symbol '.' (if there) : ('3120111',)
|
||||
# from this take the first tuple. '3120111'
|
||||
side = (url.rpartition('/')[-1]).rsplit('.', 1)[0]
|
||||
# print 'ConvertPrintURL from : ' + url + '/n to ' + side
|
||||
# print('ConvertPrintURL from : ' + url + '/n to ' + side)
|
||||
printurl = 'http://www.presseportal.de/print/' + side + '-print.html'
|
||||
return printurl
|
||||
|
@ -45,5 +45,5 @@ class AdvancedUserRecipe1283848012(BasicNewsRecipe):
|
||||
rg = re.compile(re1 + re2, re.IGNORECASE | re.DOTALL)
|
||||
m = rg.search(txt)
|
||||
if m:
|
||||
# print 'bad link'
|
||||
# print('bad link')
|
||||
return 1
|
||||
|
@ -423,14 +423,14 @@ def upload_to_servers(files, version): # {{{
|
||||
# print('Uploading to server:', server)
|
||||
# server = '%s.calibre-ebook.com' % server
|
||||
# # Copy the generated index files
|
||||
# print ('Copying generated index')
|
||||
# print('Copying generated index')
|
||||
# check_call(['rsync', '-hza', '-e', 'ssh -x', '--include', '*.html',
|
||||
# '--filter', '-! */', base, 'root@%s:%s' % (server, rdir)])
|
||||
# # Copy the release files
|
||||
# rdir = '%s%s/' % (rdir, version)
|
||||
# for x in files:
|
||||
# start = time.time()
|
||||
# print ('Uploading', x)
|
||||
# print('Uploading', x)
|
||||
# for i in range(5):
|
||||
# try:
|
||||
# check_call(['rsync', '-h', '-z', '--progress', '-e', 'ssh -x', x,
|
||||
@ -438,11 +438,11 @@ def upload_to_servers(files, version): # {{{
|
||||
# except KeyboardInterrupt:
|
||||
# raise SystemExit(1)
|
||||
# except:
|
||||
# print ('\nUpload failed, trying again in 30 seconds')
|
||||
# print('\nUpload failed, trying again in 30 seconds')
|
||||
# time.sleep(30)
|
||||
# else:
|
||||
# break
|
||||
# print ('Uploaded in', int(time.time() - start), 'seconds\n\n')
|
||||
# print('Uploaded in', int(time.time() - start), 'seconds\n\n')
|
||||
#
|
||||
|
||||
|
||||
|
@ -61,11 +61,11 @@ class Bookmark: # {{{
|
||||
bpl = bpar_offset + 4
|
||||
bpar_len, = unpack('>I', data[bpl:bpl+4])
|
||||
bpar_len += 8
|
||||
# print 'bpar_len: 0x%x' % bpar_len
|
||||
# print('bpar_len: 0x%x' % bpar_len)
|
||||
eo = bpar_offset + bpar_len
|
||||
|
||||
# Walk bookmark entries
|
||||
# print ' --- %s --- ' % self.path
|
||||
# print(' --- %s --- ' % self.path)
|
||||
current_entry = 1
|
||||
sig = data[eo:eo+4]
|
||||
previous_block = None
|
||||
@ -80,7 +80,7 @@ class Bookmark: # {{{
|
||||
current_block = 'data_header'
|
||||
# entry_type = "data_header"
|
||||
location, = unpack('>I', data[eo+0x34:eo+0x38])
|
||||
# print 'data_header location: %d' % location
|
||||
# print('data_header location: %d' % location)
|
||||
else:
|
||||
current_block = 'text_block'
|
||||
if previous_block == 'empty_data':
|
||||
@ -112,11 +112,11 @@ class Bookmark: # {{{
|
||||
start, = unpack('>I', data[eo+8:eo+12])
|
||||
user_notes[start] = user_notes[end_loc]
|
||||
'''
|
||||
print " %s: swapping 0x%x (%d) to 0x%x (%d)" % (user_notes[end_loc]['type'],
|
||||
print(" %s: swapping 0x%x (%d) to 0x%x (%d)" % (user_notes[end_loc]['type'],
|
||||
end_loc,
|
||||
end_loc/MAGIC_MOBI_CONSTANT + 1,
|
||||
start,
|
||||
start//MAGIC_MOBI_CONSTANT + 1)
|
||||
start//MAGIC_MOBI_CONSTANT + 1))
|
||||
'''
|
||||
user_notes[start]['displayed_location'] = start // MAGIC_MOBI_CONSTANT + 1
|
||||
user_notes.pop(end_loc)
|
||||
@ -125,7 +125,7 @@ class Bookmark: # {{{
|
||||
# be the same - cheat by nudging -1
|
||||
# Skip bookmark for last_read_location
|
||||
if end_loc != self.last_read:
|
||||
# print ' adding Bookmark at 0x%x (%d)' % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1)
|
||||
# print(' adding Bookmark at 0x%x (%d)' % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1))
|
||||
displayed_location = end_loc // MAGIC_MOBI_CONSTANT + 1
|
||||
user_notes[end_loc - 1] = dict(id=self.id,
|
||||
displayed_location=displayed_location,
|
||||
|
@ -149,7 +149,7 @@ class Bookmark: # {{{
|
||||
for row in cursor:
|
||||
self.last_read = row['DateLastRead']
|
||||
self.percent_read = 100 if (row['ReadStatus'] == 2) else row['___PercentRead']
|
||||
# print row[1]
|
||||
# print(row[1])
|
||||
cursor.close()
|
||||
|
||||
# self.last_read_location = self.last_read - self.pdf_page_offset
|
||||
|
@ -321,7 +321,7 @@ class KOBO(USBMS):
|
||||
playlist_map[lpath].append('Recommendation')
|
||||
|
||||
path = self.normalize_path(path)
|
||||
# print 'Normalized FileName: ' + path
|
||||
# print('Normalized FileName: ' + path)
|
||||
|
||||
idx = bl_cache.get(lpath, None)
|
||||
if idx is not None:
|
||||
@ -332,7 +332,7 @@ class KOBO(USBMS):
|
||||
# Try the Touch version if the image does not exist
|
||||
imagename = self.normalize_path(self._main_prefix + KOBO_ROOT_DIR_NAME + '/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
|
||||
|
||||
# print 'Image name Normalized: ' + imagename
|
||||
# print('Image name Normalized: ' + imagename)
|
||||
if not os.path.exists(imagename):
|
||||
debug_print('Strange - The image name does not exist - title: ', title)
|
||||
if imagename is not None:
|
||||
@ -340,7 +340,7 @@ class KOBO(USBMS):
|
||||
if (ContentType != '6' and MimeType != 'Shortcover'):
|
||||
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
|
||||
if self.update_metadata_item(bl[idx]):
|
||||
# print 'update_metadata_item returned true'
|
||||
# print('update_metadata_item returned true')
|
||||
changed = True
|
||||
else:
|
||||
debug_print(' Strange: The file: ', prefix, lpath, ' does not exist!')
|
||||
@ -364,7 +364,7 @@ class KOBO(USBMS):
|
||||
'mime: ', mime, 'date: ', date, 'ContentType: ', ContentType, 'ImageID: ', ImageID)
|
||||
raise
|
||||
|
||||
# print 'Update booklist'
|
||||
# print('Update booklist')
|
||||
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
|
||||
|
||||
if bl.add_book(book, replace_metadata=False):
|
||||
@ -452,8 +452,8 @@ class KOBO(USBMS):
|
||||
need_sync = True
|
||||
del bl[idx]
|
||||
|
||||
# print 'count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
|
||||
# (len(bl_cache), len(bl), need_sync)
|
||||
# print('count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
|
||||
# (len(bl_cache), len(bl), need_sync))
|
||||
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
|
||||
if oncard == 'cardb':
|
||||
self.sync_booklists((None, None, bl))
|
||||
@ -551,7 +551,7 @@ class KOBO(USBMS):
|
||||
fpath = self.normalize_path(fpath)
|
||||
|
||||
if os.path.exists(fpath):
|
||||
# print 'Image File Exists: ' + fpath
|
||||
# print('Image File Exists: ' + fpath)
|
||||
os.unlink(fpath)
|
||||
|
||||
def delete_books(self, paths, end_session=True):
|
||||
@ -561,33 +561,33 @@ class KOBO(USBMS):
|
||||
for i, path in enumerate(paths):
|
||||
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
|
||||
path = self.normalize_path(path)
|
||||
# print 'Delete file normalized path: ' + path
|
||||
# print('Delete file normalized path: ' + path)
|
||||
extension = os.path.splitext(path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path)
|
||||
|
||||
ContentID = self.contentid_from_path(path, ContentType)
|
||||
|
||||
ImageID = self.delete_via_sql(ContentID, ContentType)
|
||||
# print ' We would now delete the Images for' + ImageID
|
||||
# print(' We would now delete the Images for' + ImageID)
|
||||
self.delete_images(ImageID, path)
|
||||
|
||||
if os.path.exists(path):
|
||||
# Delete the ebook
|
||||
# print 'Delete the ebook: ' + path
|
||||
# print('Delete the ebook: ' + path)
|
||||
os.unlink(path)
|
||||
|
||||
filepath = os.path.splitext(path)[0]
|
||||
for ext in self.DELETE_EXTS:
|
||||
if os.path.exists(filepath + ext):
|
||||
# print 'Filename: ' + filename
|
||||
# print('Filename: ' + filename)
|
||||
os.unlink(filepath + ext)
|
||||
if os.path.exists(path + ext):
|
||||
# print 'Filename: ' + filename
|
||||
# print('Filename: ' + filename)
|
||||
os.unlink(path + ext)
|
||||
|
||||
if self.SUPPORTS_SUB_DIRS:
|
||||
try:
|
||||
# print 'removed'
|
||||
# print('removed')
|
||||
os.removedirs(os.path.dirname(path))
|
||||
except Exception:
|
||||
pass
|
||||
@ -601,9 +601,9 @@ class KOBO(USBMS):
|
||||
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
|
||||
for bl in booklists:
|
||||
for book in bl:
|
||||
# print 'Book Path: ' + book.path
|
||||
# print('Book Path: ' + book.path)
|
||||
if path.endswith(book.path):
|
||||
# print ' Remove: ' + book.path
|
||||
# print(' Remove: ' + book.path)
|
||||
bl.remove_book(book)
|
||||
self.report_progress(1.0, _('Removing books from device metadata listing...'))
|
||||
|
||||
@ -634,12 +634,12 @@ class KOBO(USBMS):
|
||||
prints('in add_books_to_metadata. Prefix is None!', path,
|
||||
self._main_prefix)
|
||||
continue
|
||||
# print 'Add book to metadata: '
|
||||
# print 'prefix: ' + prefix
|
||||
# print('Add book to metadata: ')
|
||||
# print('prefix: ' + prefix)
|
||||
lpath = path.partition(prefix)[2]
|
||||
if lpath.startswith('/') or lpath.startswith('\\'):
|
||||
lpath = lpath[1:]
|
||||
# print 'path: ' + lpath
|
||||
# print('path: ' + lpath)
|
||||
book = self.book_class(prefix, lpath, info.title, other=info)
|
||||
if book.size is None or book.size == 0:
|
||||
book.size = os.stat(self.normalize_path(path)).st_size
|
||||
@ -686,13 +686,13 @@ class KOBO(USBMS):
|
||||
def get_content_type_from_extension(self, extension):
|
||||
if extension == '.kobo':
|
||||
# Kobo books do not have book files. They do have some images though
|
||||
# print 'kobo book'
|
||||
# print('kobo book')
|
||||
ContentType = 6
|
||||
elif extension == '.pdf' or extension == '.epub':
|
||||
# print 'ePub or pdf'
|
||||
# print('ePub or pdf')
|
||||
ContentType = 16
|
||||
elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html':
|
||||
# print 'txt'
|
||||
# print('txt')
|
||||
if self.fwversion == (1,0) or self.fwversion == (1,4) or self.fwversion == (1,7,4):
|
||||
ContentType = 999
|
||||
else:
|
||||
@ -708,14 +708,14 @@ class KOBO(USBMS):
|
||||
print('path from_contentid cardb')
|
||||
elif oncard == 'carda':
|
||||
path = path.replace('file:///mnt/sd/', self._card_a_prefix)
|
||||
# print 'SD Card: ' + path
|
||||
# print('SD Card: ' + path)
|
||||
else:
|
||||
if ContentType == '6' and MimeType == 'Shortcover':
|
||||
# This is a hack as the kobo files do not exist
|
||||
# but the path is required to make a unique id
|
||||
# for calibre's reference
|
||||
path = self._main_prefix + path + '.kobo'
|
||||
# print 'Path: ' + path
|
||||
# print('Path: ' + path)
|
||||
elif (ContentType == '6' or ContentType == '10') and (
|
||||
MimeType == 'application/x-kobo-epub+zip' or (
|
||||
MimeType == 'application/epub+zip' and self.isTolinoDevice())
|
||||
@ -724,12 +724,12 @@ class KOBO(USBMS):
|
||||
path = self._main_prefix + path.replace('file:///mnt/onboard/', '')
|
||||
else:
|
||||
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
|
||||
# print 'Internal: ' + path
|
||||
# print('Internal: ' + path)
|
||||
else:
|
||||
# if path.startswith('file:///mnt/onboard/'):
|
||||
path = path.replace('file:///mnt/onboard/', self._main_prefix)
|
||||
path = path.replace('/mnt/onboard/', self._main_prefix)
|
||||
# print 'Internal: ' + path
|
||||
# print('Internal: ' + path)
|
||||
|
||||
return path
|
||||
|
||||
@ -1820,7 +1820,7 @@ class KOBOTOUCH(KOBO):
|
||||
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
|
||||
|
||||
path = self.normalize_path(path)
|
||||
# print 'Normalized FileName: ' + path
|
||||
# print('Normalized FileName: ' + path)
|
||||
|
||||
# Collect the Kobo metadata
|
||||
authors_list = [a.strip() for a in authors.split('&')] if authors is not None else [_('Unknown')]
|
||||
@ -1929,7 +1929,7 @@ class KOBOTOUCH(KOBO):
|
||||
debug_print(' bookshelves:', bookshelves)
|
||||
debug_print(' kobo_collections:', kobo_collections)
|
||||
|
||||
# print 'Update booklist'
|
||||
# print('Update booklist')
|
||||
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
|
||||
book.current_shelves = bookshelves
|
||||
book.kobo_collections = kobo_collections
|
||||
@ -2144,8 +2144,8 @@ class KOBOTOUCH(KOBO):
|
||||
else:
|
||||
debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title)
|
||||
|
||||
# print 'count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
|
||||
# (len(bl_cache), len(bl), need_sync)
|
||||
# print('count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
|
||||
# (len(bl_cache), len(bl), need_sync))
|
||||
# Bypassing the KOBO sync_booklists as that does things we don't need to do
|
||||
# Also forcing sync to see if this solves issues with updating shelves and matching books.
|
||||
if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync:
|
||||
@ -2208,7 +2208,7 @@ class KOBOTOUCH(KOBO):
|
||||
path = path.replace('file:///mnt/onboard/', self._main_prefix)
|
||||
path = path.replace('file:///mnt/sd/', self._card_a_prefix)
|
||||
path = path.replace('/mnt/onboard/', self._main_prefix)
|
||||
# print 'Internal: ' + path
|
||||
# print('Internal: ' + path)
|
||||
|
||||
return path
|
||||
|
||||
|
@ -247,7 +247,7 @@ class USBMS(CLI, Device):
|
||||
if idx is not None:
|
||||
bl_cache[lpath] = None
|
||||
if self.update_metadata_item(bl[idx]):
|
||||
# print 'update_metadata_item returned true'
|
||||
# print('update_metadata_item returned true')
|
||||
changed = True
|
||||
else:
|
||||
if bl.add_book(self.book_from_path(prefix, lpath),
|
||||
|
@ -141,14 +141,14 @@ class CHMReader(CHMFile):
|
||||
else:
|
||||
frag = None
|
||||
name = self._deentity(li.object('param', {'name': 'Name'})[0]['value'])
|
||||
# print '========>', name
|
||||
# print('========>', name)
|
||||
toc.add_item(href, frag, name, play_order=self._playorder)
|
||||
self._playorder += 1
|
||||
if li.ul:
|
||||
child = self._parse_toc(li.ul)
|
||||
child.parent = toc
|
||||
toc.append(child)
|
||||
# print toc
|
||||
# print(toc)
|
||||
return toc
|
||||
|
||||
def ResolveObject(self, path):
|
||||
|
@ -628,7 +628,7 @@ class HeuristicProcessor:
|
||||
def check_paragraph(self, content):
|
||||
content = re.sub('\\s*</?span[^>]*>\\s*', '', content)
|
||||
if re.match('.*["\'.!?:]$', content):
|
||||
# print 'detected this as a paragraph'
|
||||
# print('detected this as a paragraph')
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -641,9 +641,9 @@ class HeuristicProcessor:
|
||||
html = re.sub('</?a[^>]*>', '', html)
|
||||
|
||||
def convert_styles(match):
|
||||
# print 'raw styles are: '+match.group('styles')
|
||||
# print('raw styles are: '+match.group('styles'))
|
||||
content = match.group('content')
|
||||
# print 'raw content is: '+match.group('content')
|
||||
# print('raw content is: '+match.group('content'))
|
||||
image = match.group('image')
|
||||
|
||||
is_paragraph = False
|
||||
@ -668,12 +668,12 @@ class HeuristicProcessor:
|
||||
else:
|
||||
styles = match.group('styles').split(';')
|
||||
is_paragraph = self.check_paragraph(content)
|
||||
# print 'styles for this line are: '+str(styles)
|
||||
# print('styles for this line are: '+str(styles))
|
||||
split_styles = []
|
||||
for style in styles:
|
||||
# print 'style is: '+str(style)
|
||||
# print('style is: '+str(style))
|
||||
newstyle = style.split(':')
|
||||
# print 'newstyle is: '+str(newstyle)
|
||||
# print('newstyle is: '+str(newstyle))
|
||||
split_styles.append(newstyle)
|
||||
styles = split_styles
|
||||
for style, setting in styles:
|
||||
@ -710,18 +710,18 @@ class HeuristicProcessor:
|
||||
self.log.debug('padding bottom is: ' + str(setting[2]))
|
||||
self.log.debug('padding left is: ' +str(setting[3]))
|
||||
|
||||
# print 'text-align is: '+str(text_align)
|
||||
# print '\n***\nline is:\n '+str(match.group(0))+'\n'
|
||||
# print('text-align is: '+str(text_align))
|
||||
# print('\n***\nline is:\n '+str(match.group(0))+'\n')
|
||||
if debugabby:
|
||||
# print 'this line is a paragraph = '+str(is_paragraph)+', previous line was '+str(self.previous_was_paragraph)
|
||||
# print('this line is a paragraph = '+str(is_paragraph)+', previous line was '+str(self.previous_was_paragraph))
|
||||
self.log.debug('styles for this line were:', styles)
|
||||
self.log.debug('newline is:')
|
||||
self.log.debug(blockquote_open_loop+blockquote_close_loop+
|
||||
paragraph_before+'<p style="'+text_indent+text_align+
|
||||
'">'+content+'</p>'+paragraph_after+'\n\n\n\n\n')
|
||||
# print 'is_paragraph is '+str(is_paragraph)+', previous_was_paragraph is '+str(self.previous_was_paragraph)
|
||||
# print('is_paragraph is '+str(is_paragraph)+', previous_was_paragraph is '+str(self.previous_was_paragraph))
|
||||
self.previous_was_paragraph = is_paragraph
|
||||
# print 'previous_was_paragraph is now set to '+str(self.previous_was_paragraph)+'\n\n\n'
|
||||
# print('previous_was_paragraph is now set to '+str(self.previous_was_paragraph)+'\n\n\n')
|
||||
return blockquote_open_loop+blockquote_close_loop+paragraph_before+'<p style="'+text_indent+text_align+'">'+content+'</p>'+paragraph_after
|
||||
|
||||
html = abbyy_line.sub(convert_styles, html)
|
||||
@ -772,7 +772,7 @@ class HeuristicProcessor:
|
||||
self.line_close = '\\s*(</[ibu][^>]*>\\s*)?</(?P=outer)>'
|
||||
|
||||
# ADE doesn't render <br />, change to empty paragraphs
|
||||
# html = re.sub('<br[^>]*>', u'<p>\u00a0</p>', html)
|
||||
# html = re.sub('<br[^>]*>', '<p>\u00a0</p>', html)
|
||||
|
||||
# Determine whether the document uses interleaved blank lines
|
||||
self.blanks_between_paragraphs = self.analyze_blanks(html)
|
||||
|
@ -37,7 +37,7 @@ class DjvuChunk:
|
||||
self.dataend = pos + self.size - (8 if inclheader else 0)
|
||||
if self.type == b'FORM':
|
||||
oldpos, pos = pos, pos+4
|
||||
# print oldpos, pos
|
||||
# print(oldpos, pos)
|
||||
self.subtype = buf[oldpos:pos]
|
||||
# self.headersize += 4
|
||||
self.datastart = pos
|
||||
|
@ -397,7 +397,7 @@ class LrfTag:
|
||||
if p is None:
|
||||
return
|
||||
|
||||
# print ' Writing tag', self.name
|
||||
# print(' Writing tag', self.name)
|
||||
for f in self.format:
|
||||
if isinstance(f, dict):
|
||||
p = f[p]
|
||||
@ -565,7 +565,7 @@ class LrfObject:
|
||||
dotscode)))
|
||||
|
||||
def write(self, lrf, encoding=None):
|
||||
# print 'Writing object', self.name
|
||||
# print('Writing object', self.name)
|
||||
LrfTag('ObjectStart', (self.objId, self.type)).write(lrf)
|
||||
|
||||
for tag in self.tags:
|
||||
|
@ -1344,7 +1344,7 @@ class Page(LrsObject, LrsContainer):
|
||||
if hasattr(content, 'getReferencedObjIds'):
|
||||
pageContent.update(content.getReferencedObjIds())
|
||||
|
||||
# print 'page contents:', pageContent
|
||||
# print('page contents:', pageContent)
|
||||
# ObjectList not needed and causes slowdown in SONY LRF renderer
|
||||
# p.appendLrfTag(LrfTag("ObjectList", pageContent))
|
||||
p.appendLrfTag(LrfTag('Link', self.pageStyle.objId))
|
||||
@ -1620,7 +1620,7 @@ class Button(LrsObject, LrsContainer):
|
||||
|
||||
def toLrf(self, lrfWriter):
|
||||
(refobj, refpage) = self.findJumpToRefs()
|
||||
# print 'Button writing JumpTo refobj=', jumpto.refobj, ', and refpage=', jumpto.refpage
|
||||
# print('Button writing JumpTo refobj=', jumpto.refobj, ', and refpage=', jumpto.refpage)
|
||||
button = LrfObject('Button', self.objId)
|
||||
button.appendLrfTag(LrfTag('buttonflags', 0x10)) # pushbutton
|
||||
button.appendLrfTag(LrfTag('PushButtonStart'))
|
||||
|
@ -439,7 +439,7 @@ def mi_to_html(
|
||||
|
||||
ans = ['<tr id="%s" class="%s">%s</tr>'%(fieldl.replace('#', '_'),
|
||||
classname(fieldl), html) for fieldl, html in ans]
|
||||
# print '\n'.join(ans)
|
||||
# print('\n'.join(ans))
|
||||
direction = 'rtl' if rtl else 'ltr'
|
||||
rans = f'<table class="fields" style="direction: {direction}; '
|
||||
if not for_qt:
|
||||
|
@ -281,7 +281,7 @@ class MetadataUpdater:
|
||||
offset += 1
|
||||
self.md_header['num_recs'] = ord(self.data[offset:offset+1])
|
||||
offset += 1
|
||||
# print 'self.md_header: %s' % self.md_header
|
||||
# print('self.md_header: %s' % self.md_header)
|
||||
|
||||
self.metadata = {}
|
||||
self.md_seq = []
|
||||
|
@ -108,7 +108,7 @@ class EXTHHeader: # {{{
|
||||
except Exception:
|
||||
pass
|
||||
# else:
|
||||
# print 'unknown record', idx, repr(content)
|
||||
# print('unknown record', idx, repr(content))
|
||||
if title:
|
||||
self.mi.title = replace_entities(clean_xml_chars(clean_ascii_chars(title)))
|
||||
|
||||
@ -184,7 +184,7 @@ class EXTHHeader: # {{{
|
||||
if self.kf8_header == NULL_INDEX:
|
||||
self.kf8_header = None
|
||||
# else:
|
||||
# print 'unhandled metadata record', idx, repr(content)
|
||||
# print('unhandled metadata record', idx, repr(content))
|
||||
# }}}
|
||||
|
||||
|
||||
|
@ -188,7 +188,7 @@ class Hex2Utf8:
|
||||
self.__dingbats_dict.update(ms_dingbats_dict)
|
||||
# load dictionary for caps, and make a string for the replacement
|
||||
self.__caps_uni_dict = char_map_obj.get_char_map(map='caps_uni')
|
||||
# # print self.__caps_uni_dict
|
||||
# # print(self.__caps_uni_dict)
|
||||
# don't think I'll need this
|
||||
# keys = self.__caps_uni_dict.keys()
|
||||
# self.__caps_uni_replace = '|'.join(keys)
|
||||
@ -478,7 +478,7 @@ class Hex2Utf8:
|
||||
if in caps, convert. Otherwise, print out.
|
||||
'''
|
||||
text = line[17:-1]
|
||||
# print line
|
||||
# print(line)
|
||||
if self.__current_dict_name in ('Symbol', 'Wingdings', 'Zapf Dingbats'):
|
||||
the_string = ''
|
||||
for letter in text:
|
||||
@ -494,7 +494,7 @@ class Hex2Utf8:
|
||||
else:
|
||||
the_string += converted
|
||||
self.__write_obj.write('tx<nu<__________<%s\n' % the_string)
|
||||
# print the_string
|
||||
# print(the_string)
|
||||
else:
|
||||
if self.__caps_list[-1] == 'true' \
|
||||
and self.__convert_caps\
|
||||
|
@ -34,15 +34,15 @@ class ParseOptions:
|
||||
def __init__(self, system_string, options_dict):
|
||||
self.__system_string = system_string[1:]
|
||||
long_list = self.__make_long_list_func(options_dict)
|
||||
# # print long_list
|
||||
# # print(long_list)
|
||||
short_list = self.__make_short_list_func(options_dict)
|
||||
# # print short_list
|
||||
# # print(short_list)
|
||||
self.__legal_options = long_list + short_list
|
||||
# # print self.__legal_options
|
||||
# # print(self.__legal_options)
|
||||
self.__short_long_dict = self.__make_short_long_dict_func(options_dict)
|
||||
# # print self.__short_long_dict
|
||||
# # print(self.__short_long_dict)
|
||||
self.__opt_with_args = self.__make_options_with_arg_list(options_dict)
|
||||
# # print self.__opt_with_args
|
||||
# # print(self.__opt_with_args)
|
||||
self.__options_okay = 1
|
||||
|
||||
def __make_long_list_func(self, options_dict):
|
||||
@ -256,16 +256,16 @@ class ParseOptions:
|
||||
|
||||
def parse_options(self):
|
||||
self.__system_string = self.__sub_short_with_long()
|
||||
# # print 'subbed list is %s' % self.__system_string
|
||||
# # print('subbed list is %s' % self.__system_string)
|
||||
self.__system_string = self.__pair_arg_with_option()
|
||||
# # print 'list with pairing is %s' % self.__system_string
|
||||
# # print('list with pairing is %s' % self.__system_string)
|
||||
options, arguments = self.__get_just_options()
|
||||
# # print 'options are %s ' % options
|
||||
# # print 'arguments are %s ' % arguments
|
||||
# # print('options are %s ' % options)
|
||||
# # print('arguments are %s ' % arguments)
|
||||
self.__is_legal_option_func()
|
||||
if self.__options_okay:
|
||||
options_dict = self.__make_options_dict(options)
|
||||
# # print options_dict
|
||||
# # print(options_dict)
|
||||
return options_dict, arguments
|
||||
else:
|
||||
return 0,0
|
||||
|
@ -552,7 +552,7 @@ class Table:
|
||||
line = line_to_read
|
||||
self.__token_info = line[:16]
|
||||
action = self.__state_dict.get(self.__state[-1])
|
||||
# print self.__state[-1]
|
||||
# print(self.__state[-1])
|
||||
if action is None:
|
||||
sys.stderr.write('No matching state in module table.py\n')
|
||||
sys.stderr.write(self.__state[-1] + '\n')
|
||||
|
@ -204,7 +204,7 @@ class Tokenize:
|
||||
# import sys
|
||||
# def main(args=sys.argv):
|
||||
# if len(args) < 2:
|
||||
# print 'No file'
|
||||
# print('No file')
|
||||
# return
|
||||
# file = 'data_tokens.txt'
|
||||
# if len(args) == 3:
|
||||
|
@ -1521,8 +1521,8 @@ class Editor(QWidget): # {{{
|
||||
self.editor.html = v
|
||||
|
||||
def change_tab(self, index):
|
||||
# print 'reloading:', (index and self.wyswyg_dirty) or (not index and
|
||||
# self.source_dirty)
|
||||
# print('reloading:', (index and self.wyswyg_dirty) or (not index and
|
||||
# self.source_dirty))
|
||||
if index == 1: # changing to code view
|
||||
if self.wyswyg_dirty:
|
||||
self.code_edit.setPlainText(self.editor.html)
|
||||
@ -1598,4 +1598,4 @@ if __name__ == '__main__':
|
||||
i = 'file:///home/kovid/work/calibre/resources/images/'
|
||||
w.html = f'<p>Testing <img src="{i}/donate.png"> img and another <img src="{i}/lt.png">file</p>'
|
||||
app.exec()
|
||||
# print w.html
|
||||
# print(w.html)
|
||||
|
@ -243,4 +243,4 @@ def ignored_folders():
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(browse())
|
||||
# print ('Ignored:', ignored_folders())
|
||||
# print('Ignored:', ignored_folders())
|
||||
|
@ -186,7 +186,7 @@ class MarkdownHighlighter(QSyntaxHighlighter):
|
||||
prev = prevBlock.text()
|
||||
prevAscii = str(prev.replace('\u2029','\n'))
|
||||
if self.offset == 0 and prevAscii.strip():
|
||||
#print 'Its a header'
|
||||
#print('Its a header')
|
||||
prevCursor.select(QTextCursor.SelectionType.LineUnderCursor)
|
||||
#prevCursor.setCharFormat(self.MARKDOWN_KWS_FORMAT['Header'])
|
||||
formatRange = QTextLayout.FormatRange()
|
||||
|
@ -581,7 +581,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
traceback.print_stack()
|
||||
return
|
||||
# traceback.print_stack()
|
||||
# print ()
|
||||
# print()
|
||||
self._build_in_progress = True
|
||||
self.beginResetModel()
|
||||
self._run_rebuild(state_map=state_map)
|
||||
@ -1284,7 +1284,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
self.convert_requested.emit(book_ids, fmt)
|
||||
|
||||
def handle_drop(self, on_node, ids):
|
||||
# print 'Dropped ids:', ids, on_node.tag
|
||||
# print('Dropped ids:', ids, on_node.tag)
|
||||
key = on_node.tag.category
|
||||
if key == 'formats':
|
||||
self.handle_drop_on_format(on_node.tag.name, ids)
|
||||
|
@ -59,7 +59,7 @@ class MetadataBackup(Thread): # {{{
|
||||
(id_, sequence) = self.db.get_a_dirtied_book()
|
||||
if id_ is None:
|
||||
continue
|
||||
# print 'writer thread', id_, sequence
|
||||
# print('writer thread', id_, sequence)
|
||||
except:
|
||||
# Happens during interpreter shutdown
|
||||
break
|
||||
|
@ -1354,7 +1354,7 @@ class CatalogBuilder:
|
||||
# massaged = re.sub("&", "&", massaged)
|
||||
|
||||
if massaged.strip() and dest:
|
||||
# print traceback.print_stack(limit=3)
|
||||
# print(traceback.print_stack(limit=3))
|
||||
return self.generate_short_description(massaged.strip(), dest=dest)
|
||||
else:
|
||||
return None
|
||||
@ -2032,7 +2032,7 @@ class CatalogBuilder:
|
||||
bookmarked_books = []
|
||||
for bm_book in self.bookmarked_books:
|
||||
book = self.bookmarked_books[bm_book]
|
||||
# print 'bm_book: %s' % bm_book
|
||||
# print('bm_book: %s' % bm_book)
|
||||
book[1]['bookmark_timestamp'] = book[0].timestamp
|
||||
try:
|
||||
book[1]['percent_read'] = min(float(100 * book[0].last_read / book[0].book_length), 100)
|
||||
@ -2139,7 +2139,7 @@ class CatalogBuilder:
|
||||
master_genre_list = []
|
||||
for genre_tag_set in genre_list:
|
||||
for (index, genre) in enumerate(genre_tag_set):
|
||||
# print 'genre: %s \t genre_tag_set[genre]: %s' % (genre, genre_tag_set[genre])
|
||||
# print('genre: %s \t genre_tag_set[genre]: %s' % (genre, genre_tag_set[genre]))
|
||||
|
||||
# Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation
|
||||
authors = []
|
||||
@ -3396,7 +3396,7 @@ class CatalogBuilder:
|
||||
for book in self.books_by_date_range:
|
||||
book_time = datetime.datetime(book['timestamp'].year, book['timestamp'].month, book['timestamp'].day)
|
||||
if (today_time - book_time).days <= date_range_limit:
|
||||
# print 'generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days)
|
||||
# print('generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days))
|
||||
current_titles_list.append(book['title'])
|
||||
else:
|
||||
break
|
||||
@ -3505,7 +3505,7 @@ class CatalogBuilder:
|
||||
for book in self.bookmarked_books_by_date_read:
|
||||
bookmark_time = utcfromtimestamp(book['bookmark_timestamp'])
|
||||
if (today_time - bookmark_time).days <= date_range_limit:
|
||||
# print 'generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days)
|
||||
# print('generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days))
|
||||
current_titles_list.append(book['title'])
|
||||
else:
|
||||
break
|
||||
|
@ -839,9 +839,9 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
'''
|
||||
with self.dirtied_lock:
|
||||
dc_sequence = self.dirtied_cache.get(book_id, None)
|
||||
# print 'clear_dirty: check book', book_id, dc_sequence
|
||||
# print('clear_dirty: check book', book_id, dc_sequence)
|
||||
if dc_sequence is None or sequence is None or dc_sequence == sequence:
|
||||
# print 'needs to be cleaned'
|
||||
# print('needs to be cleaned')
|
||||
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
|
||||
(book_id,))
|
||||
self.conn.commit()
|
||||
@ -850,7 +850,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
except:
|
||||
pass
|
||||
elif dc_sequence is not None:
|
||||
# print 'book needs to be done again'
|
||||
# print('book needs to be done again')
|
||||
pass
|
||||
|
||||
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
|
||||
@ -908,12 +908,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
self.update_last_modified(book_ids)
|
||||
for book in book_ids:
|
||||
with self.dirtied_lock:
|
||||
# print 'dirtied: check id', book
|
||||
# print('dirtied: check id', book)
|
||||
if book in self.dirtied_cache:
|
||||
self.dirtied_cache[book] = self.dirtied_sequence
|
||||
self.dirtied_sequence += 1
|
||||
continue
|
||||
# print 'book not already dirty'
|
||||
# print('book not already dirty')
|
||||
|
||||
self.conn.execute(
|
||||
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
|
||||
@ -964,7 +964,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
# thread has not done the work between the put and the get_metadata
|
||||
with self.dirtied_lock:
|
||||
sequence = self.dirtied_cache.get(idx, None)
|
||||
# print 'get_md_for_dump', idx, sequence
|
||||
# print('get_md_for_dump', idx, sequence)
|
||||
try:
|
||||
# While a book is being created, the path is empty. Don't bother to
|
||||
# try to write the opf, because it will go to the wrong folder.
|
||||
@ -1864,7 +1864,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
md.append((category, cat['rec_index'],
|
||||
cat['is_multiple'].get('cache_to_list', None),
|
||||
cat['datatype'] == 'composite'))
|
||||
# print 'end phase "collection":', time.clock() - last, 'seconds'
|
||||
# print('end phase "collection":', time.clock() - last, 'seconds')
|
||||
# last = time.clock()
|
||||
|
||||
# Now scan every book looking for category items.
|
||||
@ -1932,7 +1932,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
except:
|
||||
prints('get_categories: item', val, 'is not in', cat, 'list!')
|
||||
|
||||
# print 'end phase "books":', time.clock() - last, 'seconds'
|
||||
# print('end phase "books":', time.clock() - last, 'seconds')
|
||||
# last = time.clock()
|
||||
|
||||
# Now do news
|
||||
@ -1953,7 +1953,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0])
|
||||
tcategories['news'][r[1]] = item
|
||||
|
||||
# print 'end phase "news":', time.clock() - last, 'seconds'
|
||||
# print('end phase "news":', time.clock() - last, 'seconds')
|
||||
# last = time.clock()
|
||||
|
||||
# Build the real category list by iterating over the temporary copy
|
||||
@ -2038,7 +2038,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
use_sort_as_name=use_sort_as_name)
|
||||
for r in items]
|
||||
|
||||
# print 'end phase "tags list":', time.clock() - last, 'seconds'
|
||||
# print('end phase "tags list":', time.clock() - last, 'seconds')
|
||||
# last = time.clock()
|
||||
|
||||
# Needed for legacy databases that have multiple ratings that
|
||||
@ -2181,8 +2181,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
icon_map['search'] = icon_map['search']
|
||||
categories['search'] = items
|
||||
|
||||
# print 'last phase ran in:', time.clock() - last, 'seconds'
|
||||
# print 'get_categories ran in:', time.clock() - start, 'seconds'
|
||||
# print('last phase ran in:', time.clock() - last, 'seconds')
|
||||
# print('get_categories ran in:', time.clock() - start, 'seconds')
|
||||
|
||||
return categories
|
||||
|
||||
|
@ -38,9 +38,9 @@ class AllIpAddressesGetter(Thread):
|
||||
|
||||
def run(self):
|
||||
global _all_ip_addresses
|
||||
# print 'sleeping'
|
||||
# print('sleeping')
|
||||
# time.sleep(15)
|
||||
# print 'slept'
|
||||
# print('slept')
|
||||
_all_ip_addresses = self.get_all_ips()
|
||||
|
||||
|
||||
@ -74,7 +74,7 @@ def _get_external_ip():
|
||||
break
|
||||
except:
|
||||
time.sleep(0.3)
|
||||
# print 'ipaddr: %s' % ipaddr
|
||||
# print('ipaddr: %s' % ipaddr)
|
||||
return ipaddr
|
||||
|
||||
|
||||
|
@ -330,7 +330,7 @@ class TouchscreenFeedTemplate(Template):
|
||||
navbar_t.append(navbar_tr)
|
||||
top_navbar = navbar_t
|
||||
bottom_navbar = copy.copy(navbar_t)
|
||||
# print '\n%s\n' % etree.tostring(navbar_t, pretty_print=True)
|
||||
# print('\n%s\n' % etree.tostring(navbar_t, pretty_print=True))
|
||||
|
||||
# Build the page
|
||||
head = HEAD(TITLE(feed.title))
|
||||
@ -423,6 +423,6 @@ class TouchscreenNavBarTemplate(Template):
|
||||
navbar_tr.append(TD(attrs('article_next'),link))
|
||||
navbar_t.append(navbar_tr)
|
||||
navbar.append(navbar_t)
|
||||
# print '\n%s\n' % etree.tostring(navbar, pretty_print=True)
|
||||
# print('\n%s\n' % etree.tostring(navbar, pretty_print=True))
|
||||
|
||||
self.root = HTML(head, BODY(navbar))
|
||||
|
Loading…
x
Reference in New Issue
Block a user