upgrade hidden print to Python 3 (extra-edit)

This commit is contained in:
un-pogaz 2025-01-24 11:14:16 +01:00
parent e0022f21cf
commit 12cb8b2e58
32 changed files with 117 additions and 117 deletions

View File

@ -53,7 +53,7 @@ class AM730(BasicNewsRecipe):
return self.masthead_url return self.masthead_url
def getAMSectionArticles(self, sectionName,url): def getAMSectionArticles(self, sectionName,url):
# print sectionName # print(sectionName)
soup = self.index_to_soup(url) soup = self.index_to_soup(url)
articles = [] articles = []
for aTag in soup.findAll('a',attrs={'class':'newsimglink'}): for aTag in soup.findAll('a',attrs={'class':'newsimglink'}):
@ -67,7 +67,7 @@ class AM730(BasicNewsRecipe):
print(title) print(title)
try: try:
if articles.index({'title':title,'url':href})>=0: if articles.index({'title':title,'url':href})>=0:
# print 'already added' # print('already added')
continue # already added continue # already added
except: except:
pass pass

View File

@ -66,7 +66,7 @@ class General(BasicNewsRecipe):
index = 'https://www.elpais.com.uy/impresa/' index = 'https://www.elpais.com.uy/impresa/'
soup = self.index_to_soup(index) soup = self.index_to_soup(index)
link_item = soup.find('a', attrs={'class': 'page-link link-module'}) link_item = soup.find('a', attrs={'class': 'page-link link-module'})
# print link_item # print(link_item)
if link_item: if link_item:
cover_url = 'https://www.elpais.com.uy' + link_item.get('href') cover_url = 'https://www.elpais.com.uy' + link_item.get('href')
return cover_url return cover_url

View File

@ -56,7 +56,7 @@ class MedScrape(BasicNewsRecipe):
# the original url is: http://www.medscape.com/viewarticle/728955?src=rss # the original url is: http://www.medscape.com/viewarticle/728955?src=rss
# the print url is: http://www.medscape.com/viewarticle/728955_print # the print url is: http://www.medscape.com/viewarticle/728955_print
print_url = url.partition('?')[0] + '_print' print_url = url.partition('?')[0] + '_print'
# print 'the printable version is: ',print_url # print('the printable version is: ',print_url)
return print_url return print_url
def preprocess_html(self, soup): def preprocess_html(self, soup):

View File

@ -66,7 +66,7 @@ class OurDailyBread(BasicNewsRecipe):
div.set('class', 'calibre-inserted-psalm') div.set('class', 'calibre-inserted-psalm')
hr = div.makeelement('hr') hr = div.makeelement('hr')
div.insert(0, hr) div.insert(0, hr)
# print html.tostring(div) # print(html.tostring(div))
raw = html.tostring(root, encoding='unicode') raw = html.tostring(root, encoding='unicode')
return raw return raw

View File

@ -142,6 +142,6 @@ class PressePortalDE(BasicNewsRecipe):
# and split this at the symbol '.' (if there) : ('3120111',) # and split this at the symbol '.' (if there) : ('3120111',)
# from this take the first tuple. '3120111' # from this take the first tuple. '3120111'
side = (url.rpartition('/')[-1]).rsplit('.', 1)[0] side = (url.rpartition('/')[-1]).rsplit('.', 1)[0]
# print 'ConvertPrintURL from : ' + url + '/n to ' + side # print('ConvertPrintURL from : ' + url + '/n to ' + side)
printurl = 'http://www.presseportal.de/print/' + side + '-print.html' printurl = 'http://www.presseportal.de/print/' + side + '-print.html'
return printurl return printurl

View File

@ -45,5 +45,5 @@ class AdvancedUserRecipe1283848012(BasicNewsRecipe):
rg = re.compile(re1 + re2, re.IGNORECASE | re.DOTALL) rg = re.compile(re1 + re2, re.IGNORECASE | re.DOTALL)
m = rg.search(txt) m = rg.search(txt)
if m: if m:
# print 'bad link' # print('bad link')
return 1 return 1

View File

@ -61,11 +61,11 @@ class Bookmark: # {{{
bpl = bpar_offset + 4 bpl = bpar_offset + 4
bpar_len, = unpack('>I', data[bpl:bpl+4]) bpar_len, = unpack('>I', data[bpl:bpl+4])
bpar_len += 8 bpar_len += 8
# print 'bpar_len: 0x%x' % bpar_len # print('bpar_len: 0x%x' % bpar_len)
eo = bpar_offset + bpar_len eo = bpar_offset + bpar_len
# Walk bookmark entries # Walk bookmark entries
# print ' --- %s --- ' % self.path # print(' --- %s --- ' % self.path)
current_entry = 1 current_entry = 1
sig = data[eo:eo+4] sig = data[eo:eo+4]
previous_block = None previous_block = None
@ -80,7 +80,7 @@ class Bookmark: # {{{
current_block = 'data_header' current_block = 'data_header'
# entry_type = "data_header" # entry_type = "data_header"
location, = unpack('>I', data[eo+0x34:eo+0x38]) location, = unpack('>I', data[eo+0x34:eo+0x38])
# print 'data_header location: %d' % location # print('data_header location: %d' % location)
else: else:
current_block = 'text_block' current_block = 'text_block'
if previous_block == 'empty_data': if previous_block == 'empty_data':
@ -112,11 +112,11 @@ class Bookmark: # {{{
start, = unpack('>I', data[eo+8:eo+12]) start, = unpack('>I', data[eo+8:eo+12])
user_notes[start] = user_notes[end_loc] user_notes[start] = user_notes[end_loc]
''' '''
print " %s: swapping 0x%x (%d) to 0x%x (%d)" % (user_notes[end_loc]['type'], print(" %s: swapping 0x%x (%d) to 0x%x (%d)" % (user_notes[end_loc]['type'],
end_loc, end_loc,
end_loc/MAGIC_MOBI_CONSTANT + 1, end_loc/MAGIC_MOBI_CONSTANT + 1,
start, start,
start//MAGIC_MOBI_CONSTANT + 1) start//MAGIC_MOBI_CONSTANT + 1))
''' '''
user_notes[start]['displayed_location'] = start // MAGIC_MOBI_CONSTANT + 1 user_notes[start]['displayed_location'] = start // MAGIC_MOBI_CONSTANT + 1
user_notes.pop(end_loc) user_notes.pop(end_loc)
@ -125,7 +125,7 @@ class Bookmark: # {{{
# be the same - cheat by nudging -1 # be the same - cheat by nudging -1
# Skip bookmark for last_read_location # Skip bookmark for last_read_location
if end_loc != self.last_read: if end_loc != self.last_read:
# print ' adding Bookmark at 0x%x (%d)' % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1) # print(' adding Bookmark at 0x%x (%d)' % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1))
displayed_location = end_loc // MAGIC_MOBI_CONSTANT + 1 displayed_location = end_loc // MAGIC_MOBI_CONSTANT + 1
user_notes[end_loc - 1] = dict(id=self.id, user_notes[end_loc - 1] = dict(id=self.id,
displayed_location=displayed_location, displayed_location=displayed_location,

View File

@ -149,7 +149,7 @@ class Bookmark: # {{{
for row in cursor: for row in cursor:
self.last_read = row['DateLastRead'] self.last_read = row['DateLastRead']
self.percent_read = 100 if (row['ReadStatus'] == 2) else row['___PercentRead'] self.percent_read = 100 if (row['ReadStatus'] == 2) else row['___PercentRead']
# print row[1] # print(row[1])
cursor.close() cursor.close()
# self.last_read_location = self.last_read - self.pdf_page_offset # self.last_read_location = self.last_read - self.pdf_page_offset

View File

@ -321,7 +321,7 @@ class KOBO(USBMS):
playlist_map[lpath].append('Recommendation') playlist_map[lpath].append('Recommendation')
path = self.normalize_path(path) path = self.normalize_path(path)
# print 'Normalized FileName: ' + path # print('Normalized FileName: ' + path)
idx = bl_cache.get(lpath, None) idx = bl_cache.get(lpath, None)
if idx is not None: if idx is not None:
@ -332,7 +332,7 @@ class KOBO(USBMS):
# Try the Touch version if the image does not exist # Try the Touch version if the image does not exist
imagename = self.normalize_path(self._main_prefix + KOBO_ROOT_DIR_NAME + '/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed') imagename = self.normalize_path(self._main_prefix + KOBO_ROOT_DIR_NAME + '/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
# print 'Image name Normalized: ' + imagename # print('Image name Normalized: ' + imagename)
if not os.path.exists(imagename): if not os.path.exists(imagename):
debug_print('Strange - The image name does not exist - title: ', title) debug_print('Strange - The image name does not exist - title: ', title)
if imagename is not None: if imagename is not None:
@ -340,7 +340,7 @@ class KOBO(USBMS):
if (ContentType != '6' and MimeType != 'Shortcover'): if (ContentType != '6' and MimeType != 'Shortcover'):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))): if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
if self.update_metadata_item(bl[idx]): if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true' # print('update_metadata_item returned true')
changed = True changed = True
else: else:
debug_print(' Strange: The file: ', prefix, lpath, ' does not exist!') debug_print(' Strange: The file: ', prefix, lpath, ' does not exist!')
@ -364,7 +364,7 @@ class KOBO(USBMS):
'mime: ', mime, 'date: ', date, 'ContentType: ', ContentType, 'ImageID: ', ImageID) 'mime: ', mime, 'date: ', date, 'ContentType: ', ContentType, 'ImageID: ', ImageID)
raise raise
# print 'Update booklist' # print('Update booklist')
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else [] book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
if bl.add_book(book, replace_metadata=False): if bl.add_book(book, replace_metadata=False):
@ -452,8 +452,8 @@ class KOBO(USBMS):
need_sync = True need_sync = True
del bl[idx] del bl[idx]
# print 'count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \ # print('count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
# (len(bl_cache), len(bl), need_sync) # (len(bl_cache), len(bl), need_sync))
if need_sync: # self.count_found_in_bl != len(bl) or need_sync: if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb': if oncard == 'cardb':
self.sync_booklists((None, None, bl)) self.sync_booklists((None, None, bl))
@ -551,7 +551,7 @@ class KOBO(USBMS):
fpath = self.normalize_path(fpath) fpath = self.normalize_path(fpath)
if os.path.exists(fpath): if os.path.exists(fpath):
# print 'Image File Exists: ' + fpath # print('Image File Exists: ' + fpath)
os.unlink(fpath) os.unlink(fpath)
def delete_books(self, paths, end_session=True): def delete_books(self, paths, end_session=True):
@ -561,33 +561,33 @@ class KOBO(USBMS):
for i, path in enumerate(paths): for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...')) self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path) path = self.normalize_path(path)
# print 'Delete file normalized path: ' + path # print('Delete file normalized path: ' + path)
extension = os.path.splitext(path)[1] extension = os.path.splitext(path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path) ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path)
ContentID = self.contentid_from_path(path, ContentType) ContentID = self.contentid_from_path(path, ContentType)
ImageID = self.delete_via_sql(ContentID, ContentType) ImageID = self.delete_via_sql(ContentID, ContentType)
# print ' We would now delete the Images for' + ImageID # print(' We would now delete the Images for' + ImageID)
self.delete_images(ImageID, path) self.delete_images(ImageID, path)
if os.path.exists(path): if os.path.exists(path):
# Delete the ebook # Delete the ebook
# print 'Delete the ebook: ' + path # print('Delete the ebook: ' + path)
os.unlink(path) os.unlink(path)
filepath = os.path.splitext(path)[0] filepath = os.path.splitext(path)[0]
for ext in self.DELETE_EXTS: for ext in self.DELETE_EXTS:
if os.path.exists(filepath + ext): if os.path.exists(filepath + ext):
# print 'Filename: ' + filename # print('Filename: ' + filename)
os.unlink(filepath + ext) os.unlink(filepath + ext)
if os.path.exists(path + ext): if os.path.exists(path + ext):
# print 'Filename: ' + filename # print('Filename: ' + filename)
os.unlink(path + ext) os.unlink(path + ext)
if self.SUPPORTS_SUB_DIRS: if self.SUPPORTS_SUB_DIRS:
try: try:
# print 'removed' # print('removed')
os.removedirs(os.path.dirname(path)) os.removedirs(os.path.dirname(path))
except Exception: except Exception:
pass pass
@ -601,9 +601,9 @@ class KOBO(USBMS):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...')) self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists: for bl in booklists:
for book in bl: for book in bl:
# print 'Book Path: ' + book.path # print('Book Path: ' + book.path)
if path.endswith(book.path): if path.endswith(book.path):
# print ' Remove: ' + book.path # print(' Remove: ' + book.path)
bl.remove_book(book) bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...')) self.report_progress(1.0, _('Removing books from device metadata listing...'))
@ -634,12 +634,12 @@ class KOBO(USBMS):
prints('in add_books_to_metadata. Prefix is None!', path, prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix) self._main_prefix)
continue continue
# print 'Add book to metadata: ' # print('Add book to metadata: ')
# print 'prefix: ' + prefix # print('prefix: ' + prefix)
lpath = path.partition(prefix)[2] lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'): if lpath.startswith('/') or lpath.startswith('\\'):
lpath = lpath[1:] lpath = lpath[1:]
# print 'path: ' + lpath # print('path: ' + lpath)
book = self.book_class(prefix, lpath, info.title, other=info) book = self.book_class(prefix, lpath, info.title, other=info)
if book.size is None or book.size == 0: if book.size is None or book.size == 0:
book.size = os.stat(self.normalize_path(path)).st_size book.size = os.stat(self.normalize_path(path)).st_size
@ -686,13 +686,13 @@ class KOBO(USBMS):
def get_content_type_from_extension(self, extension): def get_content_type_from_extension(self, extension):
if extension == '.kobo': if extension == '.kobo':
# Kobo books do not have book files. They do have some images though # Kobo books do not have book files. They do have some images though
# print 'kobo book' # print('kobo book')
ContentType = 6 ContentType = 6
elif extension == '.pdf' or extension == '.epub': elif extension == '.pdf' or extension == '.epub':
# print 'ePub or pdf' # print('ePub or pdf')
ContentType = 16 ContentType = 16
elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html': elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html':
# print 'txt' # print('txt')
if self.fwversion == (1,0) or self.fwversion == (1,4) or self.fwversion == (1,7,4): if self.fwversion == (1,0) or self.fwversion == (1,4) or self.fwversion == (1,7,4):
ContentType = 999 ContentType = 999
else: else:
@ -708,14 +708,14 @@ class KOBO(USBMS):
print('path from_contentid cardb') print('path from_contentid cardb')
elif oncard == 'carda': elif oncard == 'carda':
path = path.replace('file:///mnt/sd/', self._card_a_prefix) path = path.replace('file:///mnt/sd/', self._card_a_prefix)
# print 'SD Card: ' + path # print('SD Card: ' + path)
else: else:
if ContentType == '6' and MimeType == 'Shortcover': if ContentType == '6' and MimeType == 'Shortcover':
# This is a hack as the kobo files do not exist # This is a hack as the kobo files do not exist
# but the path is required to make a unique id # but the path is required to make a unique id
# for calibre's reference # for calibre's reference
path = self._main_prefix + path + '.kobo' path = self._main_prefix + path + '.kobo'
# print 'Path: ' + path # print('Path: ' + path)
elif (ContentType == '6' or ContentType == '10') and ( elif (ContentType == '6' or ContentType == '10') and (
MimeType == 'application/x-kobo-epub+zip' or ( MimeType == 'application/x-kobo-epub+zip' or (
MimeType == 'application/epub+zip' and self.isTolinoDevice()) MimeType == 'application/epub+zip' and self.isTolinoDevice())
@ -724,12 +724,12 @@ class KOBO(USBMS):
path = self._main_prefix + path.replace('file:///mnt/onboard/', '') path = self._main_prefix + path.replace('file:///mnt/onboard/', '')
else: else:
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
# print 'Internal: ' + path # print('Internal: ' + path)
else: else:
# if path.startswith('file:///mnt/onboard/'): # if path.startswith('file:///mnt/onboard/'):
path = path.replace('file:///mnt/onboard/', self._main_prefix) path = path.replace('file:///mnt/onboard/', self._main_prefix)
path = path.replace('/mnt/onboard/', self._main_prefix) path = path.replace('/mnt/onboard/', self._main_prefix)
# print 'Internal: ' + path # print('Internal: ' + path)
return path return path
@ -1820,7 +1820,7 @@ class KOBOTOUCH(KOBO):
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map) debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
path = self.normalize_path(path) path = self.normalize_path(path)
# print 'Normalized FileName: ' + path # print('Normalized FileName: ' + path)
# Collect the Kobo metadata # Collect the Kobo metadata
authors_list = [a.strip() for a in authors.split('&')] if authors is not None else [_('Unknown')] authors_list = [a.strip() for a in authors.split('&')] if authors is not None else [_('Unknown')]
@ -1929,7 +1929,7 @@ class KOBOTOUCH(KOBO):
debug_print(' bookshelves:', bookshelves) debug_print(' bookshelves:', bookshelves)
debug_print(' kobo_collections:', kobo_collections) debug_print(' kobo_collections:', kobo_collections)
# print 'Update booklist' # print('Update booklist')
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else [] book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
book.current_shelves = bookshelves book.current_shelves = bookshelves
book.kobo_collections = kobo_collections book.kobo_collections = kobo_collections
@ -2144,8 +2144,8 @@ class KOBOTOUCH(KOBO):
else: else:
debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title) debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title)
# print 'count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \ # print('count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
# (len(bl_cache), len(bl), need_sync) # (len(bl_cache), len(bl), need_sync))
# Bypassing the KOBO sync_booklists as that does things we don't need to do # Bypassing the KOBO sync_booklists as that does things we don't need to do
# Also forcing sync to see if this solves issues with updating shelves and matching books. # Also forcing sync to see if this solves issues with updating shelves and matching books.
if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync: if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync:
@ -2208,7 +2208,7 @@ class KOBOTOUCH(KOBO):
path = path.replace('file:///mnt/onboard/', self._main_prefix) path = path.replace('file:///mnt/onboard/', self._main_prefix)
path = path.replace('file:///mnt/sd/', self._card_a_prefix) path = path.replace('file:///mnt/sd/', self._card_a_prefix)
path = path.replace('/mnt/onboard/', self._main_prefix) path = path.replace('/mnt/onboard/', self._main_prefix)
# print 'Internal: ' + path # print('Internal: ' + path)
return path return path

View File

@ -247,7 +247,7 @@ class USBMS(CLI, Device):
if idx is not None: if idx is not None:
bl_cache[lpath] = None bl_cache[lpath] = None
if self.update_metadata_item(bl[idx]): if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true' # print('update_metadata_item returned true')
changed = True changed = True
else: else:
if bl.add_book(self.book_from_path(prefix, lpath), if bl.add_book(self.book_from_path(prefix, lpath),

View File

@ -141,14 +141,14 @@ class CHMReader(CHMFile):
else: else:
frag = None frag = None
name = self._deentity(li.object('param', {'name': 'Name'})[0]['value']) name = self._deentity(li.object('param', {'name': 'Name'})[0]['value'])
# print '========>', name # print('========>', name)
toc.add_item(href, frag, name, play_order=self._playorder) toc.add_item(href, frag, name, play_order=self._playorder)
self._playorder += 1 self._playorder += 1
if li.ul: if li.ul:
child = self._parse_toc(li.ul) child = self._parse_toc(li.ul)
child.parent = toc child.parent = toc
toc.append(child) toc.append(child)
# print toc # print(toc)
return toc return toc
def ResolveObject(self, path): def ResolveObject(self, path):

View File

@ -628,7 +628,7 @@ class HeuristicProcessor:
def check_paragraph(self, content): def check_paragraph(self, content):
content = re.sub('\\s*</?span[^>]*>\\s*', '', content) content = re.sub('\\s*</?span[^>]*>\\s*', '', content)
if re.match('.*["\'.!?:]$', content): if re.match('.*["\'.!?:]$', content):
# print 'detected this as a paragraph' # print('detected this as a paragraph')
return True return True
else: else:
return False return False
@ -641,9 +641,9 @@ class HeuristicProcessor:
html = re.sub('</?a[^>]*>', '', html) html = re.sub('</?a[^>]*>', '', html)
def convert_styles(match): def convert_styles(match):
# print 'raw styles are: '+match.group('styles') # print('raw styles are: '+match.group('styles'))
content = match.group('content') content = match.group('content')
# print 'raw content is: '+match.group('content') # print('raw content is: '+match.group('content'))
image = match.group('image') image = match.group('image')
is_paragraph = False is_paragraph = False
@ -668,12 +668,12 @@ class HeuristicProcessor:
else: else:
styles = match.group('styles').split(';') styles = match.group('styles').split(';')
is_paragraph = self.check_paragraph(content) is_paragraph = self.check_paragraph(content)
# print 'styles for this line are: '+str(styles) # print('styles for this line are: '+str(styles))
split_styles = [] split_styles = []
for style in styles: for style in styles:
# print 'style is: '+str(style) # print('style is: '+str(style))
newstyle = style.split(':') newstyle = style.split(':')
# print 'newstyle is: '+str(newstyle) # print('newstyle is: '+str(newstyle))
split_styles.append(newstyle) split_styles.append(newstyle)
styles = split_styles styles = split_styles
for style, setting in styles: for style, setting in styles:
@ -710,18 +710,18 @@ class HeuristicProcessor:
self.log.debug('padding bottom is: ' + str(setting[2])) self.log.debug('padding bottom is: ' + str(setting[2]))
self.log.debug('padding left is: ' +str(setting[3])) self.log.debug('padding left is: ' +str(setting[3]))
# print 'text-align is: '+str(text_align) # print('text-align is: '+str(text_align))
# print '\n***\nline is:\n '+str(match.group(0))+'\n' # print('\n***\nline is:\n '+str(match.group(0))+'\n')
if debugabby: if debugabby:
# print 'this line is a paragraph = '+str(is_paragraph)+', previous line was '+str(self.previous_was_paragraph) # print('this line is a paragraph = '+str(is_paragraph)+', previous line was '+str(self.previous_was_paragraph))
self.log.debug('styles for this line were:', styles) self.log.debug('styles for this line were:', styles)
self.log.debug('newline is:') self.log.debug('newline is:')
self.log.debug(blockquote_open_loop+blockquote_close_loop+ self.log.debug(blockquote_open_loop+blockquote_close_loop+
paragraph_before+'<p style="'+text_indent+text_align+ paragraph_before+'<p style="'+text_indent+text_align+
'">'+content+'</p>'+paragraph_after+'\n\n\n\n\n') '">'+content+'</p>'+paragraph_after+'\n\n\n\n\n')
# print 'is_paragraph is '+str(is_paragraph)+', previous_was_paragraph is '+str(self.previous_was_paragraph) # print('is_paragraph is '+str(is_paragraph)+', previous_was_paragraph is '+str(self.previous_was_paragraph))
self.previous_was_paragraph = is_paragraph self.previous_was_paragraph = is_paragraph
# print 'previous_was_paragraph is now set to '+str(self.previous_was_paragraph)+'\n\n\n' # print('previous_was_paragraph is now set to '+str(self.previous_was_paragraph)+'\n\n\n')
return blockquote_open_loop+blockquote_close_loop+paragraph_before+'<p style="'+text_indent+text_align+'">'+content+'</p>'+paragraph_after return blockquote_open_loop+blockquote_close_loop+paragraph_before+'<p style="'+text_indent+text_align+'">'+content+'</p>'+paragraph_after
html = abbyy_line.sub(convert_styles, html) html = abbyy_line.sub(convert_styles, html)
@ -772,7 +772,7 @@ class HeuristicProcessor:
self.line_close = '\\s*(</[ibu][^>]*>\\s*)?</(?P=outer)>' self.line_close = '\\s*(</[ibu][^>]*>\\s*)?</(?P=outer)>'
# ADE doesn't render <br />, change to empty paragraphs # ADE doesn't render <br />, change to empty paragraphs
# html = re.sub('<br[^>]*>', u'<p>\u00a0</p>', html) # html = re.sub('<br[^>]*>', '<p>\u00a0</p>', html)
# Determine whether the document uses interleaved blank lines # Determine whether the document uses interleaved blank lines
self.blanks_between_paragraphs = self.analyze_blanks(html) self.blanks_between_paragraphs = self.analyze_blanks(html)

View File

@ -37,7 +37,7 @@ class DjvuChunk:
self.dataend = pos + self.size - (8 if inclheader else 0) self.dataend = pos + self.size - (8 if inclheader else 0)
if self.type == b'FORM': if self.type == b'FORM':
oldpos, pos = pos, pos+4 oldpos, pos = pos, pos+4
# print oldpos, pos # print(oldpos, pos)
self.subtype = buf[oldpos:pos] self.subtype = buf[oldpos:pos]
# self.headersize += 4 # self.headersize += 4
self.datastart = pos self.datastart = pos

View File

@ -397,7 +397,7 @@ class LrfTag:
if p is None: if p is None:
return return
# print ' Writing tag', self.name # print(' Writing tag', self.name)
for f in self.format: for f in self.format:
if isinstance(f, dict): if isinstance(f, dict):
p = f[p] p = f[p]
@ -565,7 +565,7 @@ class LrfObject:
dotscode))) dotscode)))
def write(self, lrf, encoding=None): def write(self, lrf, encoding=None):
# print 'Writing object', self.name # print('Writing object', self.name)
LrfTag('ObjectStart', (self.objId, self.type)).write(lrf) LrfTag('ObjectStart', (self.objId, self.type)).write(lrf)
for tag in self.tags: for tag in self.tags:

View File

@ -1344,7 +1344,7 @@ class Page(LrsObject, LrsContainer):
if hasattr(content, 'getReferencedObjIds'): if hasattr(content, 'getReferencedObjIds'):
pageContent.update(content.getReferencedObjIds()) pageContent.update(content.getReferencedObjIds())
# print 'page contents:', pageContent # print('page contents:', pageContent)
# ObjectList not needed and causes slowdown in SONY LRF renderer # ObjectList not needed and causes slowdown in SONY LRF renderer
# p.appendLrfTag(LrfTag("ObjectList", pageContent)) # p.appendLrfTag(LrfTag("ObjectList", pageContent))
p.appendLrfTag(LrfTag('Link', self.pageStyle.objId)) p.appendLrfTag(LrfTag('Link', self.pageStyle.objId))
@ -1620,7 +1620,7 @@ class Button(LrsObject, LrsContainer):
def toLrf(self, lrfWriter): def toLrf(self, lrfWriter):
(refobj, refpage) = self.findJumpToRefs() (refobj, refpage) = self.findJumpToRefs()
# print 'Button writing JumpTo refobj=', jumpto.refobj, ', and refpage=', jumpto.refpage # print('Button writing JumpTo refobj=', jumpto.refobj, ', and refpage=', jumpto.refpage)
button = LrfObject('Button', self.objId) button = LrfObject('Button', self.objId)
button.appendLrfTag(LrfTag('buttonflags', 0x10)) # pushbutton button.appendLrfTag(LrfTag('buttonflags', 0x10)) # pushbutton
button.appendLrfTag(LrfTag('PushButtonStart')) button.appendLrfTag(LrfTag('PushButtonStart'))

View File

@ -439,7 +439,7 @@ def mi_to_html(
ans = ['<tr id="%s" class="%s">%s</tr>'%(fieldl.replace('#', '_'), ans = ['<tr id="%s" class="%s">%s</tr>'%(fieldl.replace('#', '_'),
classname(fieldl), html) for fieldl, html in ans] classname(fieldl), html) for fieldl, html in ans]
# print '\n'.join(ans) # print('\n'.join(ans))
direction = 'rtl' if rtl else 'ltr' direction = 'rtl' if rtl else 'ltr'
rans = f'<table class="fields" style="direction: {direction}; ' rans = f'<table class="fields" style="direction: {direction}; '
if not for_qt: if not for_qt:

View File

@ -281,7 +281,7 @@ class MetadataUpdater:
offset += 1 offset += 1
self.md_header['num_recs'] = ord(self.data[offset:offset+1]) self.md_header['num_recs'] = ord(self.data[offset:offset+1])
offset += 1 offset += 1
# print 'self.md_header: %s' % self.md_header # print('self.md_header: %s' % self.md_header)
self.metadata = {} self.metadata = {}
self.md_seq = [] self.md_seq = []

View File

@ -108,7 +108,7 @@ class EXTHHeader: # {{{
except Exception: except Exception:
pass pass
# else: # else:
# print 'unknown record', idx, repr(content) # print('unknown record', idx, repr(content))
if title: if title:
self.mi.title = replace_entities(clean_xml_chars(clean_ascii_chars(title))) self.mi.title = replace_entities(clean_xml_chars(clean_ascii_chars(title)))
@ -184,7 +184,7 @@ class EXTHHeader: # {{{
if self.kf8_header == NULL_INDEX: if self.kf8_header == NULL_INDEX:
self.kf8_header = None self.kf8_header = None
# else: # else:
# print 'unhandled metadata record', idx, repr(content) # print('unhandled metadata record', idx, repr(content))
# }}} # }}}

View File

@ -188,7 +188,7 @@ class Hex2Utf8:
self.__dingbats_dict.update(ms_dingbats_dict) self.__dingbats_dict.update(ms_dingbats_dict)
# load dictionary for caps, and make a string for the replacement # load dictionary for caps, and make a string for the replacement
self.__caps_uni_dict = char_map_obj.get_char_map(map='caps_uni') self.__caps_uni_dict = char_map_obj.get_char_map(map='caps_uni')
# # print self.__caps_uni_dict # # print(self.__caps_uni_dict)
# don't think I'll need this # don't think I'll need this
# keys = self.__caps_uni_dict.keys() # keys = self.__caps_uni_dict.keys()
# self.__caps_uni_replace = '|'.join(keys) # self.__caps_uni_replace = '|'.join(keys)
@ -478,7 +478,7 @@ class Hex2Utf8:
if in caps, convert. Otherwise, print out. if in caps, convert. Otherwise, print out.
''' '''
text = line[17:-1] text = line[17:-1]
# print line # print(line)
if self.__current_dict_name in ('Symbol', 'Wingdings', 'Zapf Dingbats'): if self.__current_dict_name in ('Symbol', 'Wingdings', 'Zapf Dingbats'):
the_string = '' the_string = ''
for letter in text: for letter in text:
@ -494,7 +494,7 @@ class Hex2Utf8:
else: else:
the_string += converted the_string += converted
self.__write_obj.write('tx<nu<__________<%s\n' % the_string) self.__write_obj.write('tx<nu<__________<%s\n' % the_string)
# print the_string # print(the_string)
else: else:
if self.__caps_list[-1] == 'true' \ if self.__caps_list[-1] == 'true' \
and self.__convert_caps\ and self.__convert_caps\

View File

@ -34,15 +34,15 @@ class ParseOptions:
def __init__(self, system_string, options_dict): def __init__(self, system_string, options_dict):
self.__system_string = system_string[1:] self.__system_string = system_string[1:]
long_list = self.__make_long_list_func(options_dict) long_list = self.__make_long_list_func(options_dict)
# # print long_list # # print(long_list)
short_list = self.__make_short_list_func(options_dict) short_list = self.__make_short_list_func(options_dict)
# # print short_list # # print(short_list)
self.__legal_options = long_list + short_list self.__legal_options = long_list + short_list
# # print self.__legal_options # # print(self.__legal_options)
self.__short_long_dict = self.__make_short_long_dict_func(options_dict) self.__short_long_dict = self.__make_short_long_dict_func(options_dict)
# # print self.__short_long_dict # # print(self.__short_long_dict)
self.__opt_with_args = self.__make_options_with_arg_list(options_dict) self.__opt_with_args = self.__make_options_with_arg_list(options_dict)
# # print self.__opt_with_args # # print(self.__opt_with_args)
self.__options_okay = 1 self.__options_okay = 1
def __make_long_list_func(self, options_dict): def __make_long_list_func(self, options_dict):
@ -256,16 +256,16 @@ class ParseOptions:
def parse_options(self): def parse_options(self):
self.__system_string = self.__sub_short_with_long() self.__system_string = self.__sub_short_with_long()
# # print 'subbed list is %s' % self.__system_string # # print('subbed list is %s' % self.__system_string)
self.__system_string = self.__pair_arg_with_option() self.__system_string = self.__pair_arg_with_option()
# # print 'list with pairing is %s' % self.__system_string # # print('list with pairing is %s' % self.__system_string)
options, arguments = self.__get_just_options() options, arguments = self.__get_just_options()
# # print 'options are %s ' % options # # print('options are %s ' % options)
# # print 'arguments are %s ' % arguments # # print('arguments are %s ' % arguments)
self.__is_legal_option_func() self.__is_legal_option_func()
if self.__options_okay: if self.__options_okay:
options_dict = self.__make_options_dict(options) options_dict = self.__make_options_dict(options)
# # print options_dict # # print(options_dict)
return options_dict, arguments return options_dict, arguments
else: else:
return 0,0 return 0,0

View File

@ -552,7 +552,7 @@ class Table:
line = line_to_read line = line_to_read
self.__token_info = line[:16] self.__token_info = line[:16]
action = self.__state_dict.get(self.__state[-1]) action = self.__state_dict.get(self.__state[-1])
# print self.__state[-1] # print(self.__state[-1])
if action is None: if action is None:
sys.stderr.write('No matching state in module table.py\n') sys.stderr.write('No matching state in module table.py\n')
sys.stderr.write(self.__state[-1] + '\n') sys.stderr.write(self.__state[-1] + '\n')

View File

@ -204,7 +204,7 @@ class Tokenize:
# import sys # import sys
# def main(args=sys.argv): # def main(args=sys.argv):
# if len(args) < 2: # if len(args) < 2:
# print 'No file' # print('No file')
# return # return
# file = 'data_tokens.txt' # file = 'data_tokens.txt'
# if len(args) == 3: # if len(args) == 3:

View File

@ -1521,8 +1521,8 @@ class Editor(QWidget): # {{{
self.editor.html = v self.editor.html = v
def change_tab(self, index): def change_tab(self, index):
# print 'reloading:', (index and self.wyswyg_dirty) or (not index and # print('reloading:', (index and self.wyswyg_dirty) or (not index and
# self.source_dirty) # self.source_dirty))
if index == 1: # changing to code view if index == 1: # changing to code view
if self.wyswyg_dirty: if self.wyswyg_dirty:
self.code_edit.setPlainText(self.editor.html) self.code_edit.setPlainText(self.editor.html)
@ -1598,4 +1598,4 @@ if __name__ == '__main__':
i = 'file:///home/kovid/work/calibre/resources/images/' i = 'file:///home/kovid/work/calibre/resources/images/'
w.html = f'<p>Testing <img src="{i}/donate.png"> img and another <img src="{i}/lt.png">file</p>' w.html = f'<p>Testing <img src="{i}/donate.png"> img and another <img src="{i}/lt.png">file</p>'
app.exec() app.exec()
# print w.html # print(w.html)

View File

@ -186,7 +186,7 @@ class MarkdownHighlighter(QSyntaxHighlighter):
prev = prevBlock.text() prev = prevBlock.text()
prevAscii = str(prev.replace('\u2029','\n')) prevAscii = str(prev.replace('\u2029','\n'))
if self.offset == 0 and prevAscii.strip(): if self.offset == 0 and prevAscii.strip():
#print 'Its a header' #print('Its a header')
prevCursor.select(QTextCursor.SelectionType.LineUnderCursor) prevCursor.select(QTextCursor.SelectionType.LineUnderCursor)
#prevCursor.setCharFormat(self.MARKDOWN_KWS_FORMAT['Header']) #prevCursor.setCharFormat(self.MARKDOWN_KWS_FORMAT['Header'])
formatRange = QTextLayout.FormatRange() formatRange = QTextLayout.FormatRange()

View File

@ -1284,7 +1284,7 @@ class TagsModel(QAbstractItemModel): # {{{
self.convert_requested.emit(book_ids, fmt) self.convert_requested.emit(book_ids, fmt)
def handle_drop(self, on_node, ids): def handle_drop(self, on_node, ids):
# print 'Dropped ids:', ids, on_node.tag # print('Dropped ids:', ids, on_node.tag)
key = on_node.tag.category key = on_node.tag.category
if key == 'formats': if key == 'formats':
self.handle_drop_on_format(on_node.tag.name, ids) self.handle_drop_on_format(on_node.tag.name, ids)

View File

@ -59,7 +59,7 @@ class MetadataBackup(Thread): # {{{
(id_, sequence) = self.db.get_a_dirtied_book() (id_, sequence) = self.db.get_a_dirtied_book()
if id_ is None: if id_ is None:
continue continue
# print 'writer thread', id_, sequence # print('writer thread', id_, sequence)
except: except:
# Happens during interpreter shutdown # Happens during interpreter shutdown
break break

View File

@ -1354,7 +1354,7 @@ class CatalogBuilder:
# massaged = re.sub("&", "&#38;", massaged) # massaged = re.sub("&", "&#38;", massaged)
if massaged.strip() and dest: if massaged.strip() and dest:
# print traceback.print_stack(limit=3) # print(traceback.print_stack(limit=3))
return self.generate_short_description(massaged.strip(), dest=dest) return self.generate_short_description(massaged.strip(), dest=dest)
else: else:
return None return None
@ -2032,7 +2032,7 @@ class CatalogBuilder:
bookmarked_books = [] bookmarked_books = []
for bm_book in self.bookmarked_books: for bm_book in self.bookmarked_books:
book = self.bookmarked_books[bm_book] book = self.bookmarked_books[bm_book]
# print 'bm_book: %s' % bm_book # print('bm_book: %s' % bm_book)
book[1]['bookmark_timestamp'] = book[0].timestamp book[1]['bookmark_timestamp'] = book[0].timestamp
try: try:
book[1]['percent_read'] = min(float(100 * book[0].last_read / book[0].book_length), 100) book[1]['percent_read'] = min(float(100 * book[0].last_read / book[0].book_length), 100)
@ -2139,7 +2139,7 @@ class CatalogBuilder:
master_genre_list = [] master_genre_list = []
for genre_tag_set in genre_list: for genre_tag_set in genre_list:
for (index, genre) in enumerate(genre_tag_set): for (index, genre) in enumerate(genre_tag_set):
# print 'genre: %s \t genre_tag_set[genre]: %s' % (genre, genre_tag_set[genre]) # print('genre: %s \t genre_tag_set[genre]: %s' % (genre, genre_tag_set[genre]))
# Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation # Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation
authors = [] authors = []
@ -3396,7 +3396,7 @@ class CatalogBuilder:
for book in self.books_by_date_range: for book in self.books_by_date_range:
book_time = datetime.datetime(book['timestamp'].year, book['timestamp'].month, book['timestamp'].day) book_time = datetime.datetime(book['timestamp'].year, book['timestamp'].month, book['timestamp'].day)
if (today_time - book_time).days <= date_range_limit: if (today_time - book_time).days <= date_range_limit:
# print 'generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days) # print('generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days))
current_titles_list.append(book['title']) current_titles_list.append(book['title'])
else: else:
break break
@ -3505,7 +3505,7 @@ class CatalogBuilder:
for book in self.bookmarked_books_by_date_read: for book in self.bookmarked_books_by_date_read:
bookmark_time = utcfromtimestamp(book['bookmark_timestamp']) bookmark_time = utcfromtimestamp(book['bookmark_timestamp'])
if (today_time - bookmark_time).days <= date_range_limit: if (today_time - bookmark_time).days <= date_range_limit:
# print 'generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days) # print('generate_ncx_by_date_added: %s added %d days ago' % (book['title'], (today_time-book_time).days))
current_titles_list.append(book['title']) current_titles_list.append(book['title'])
else: else:
break break

View File

@ -839,9 +839,9 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
''' '''
with self.dirtied_lock: with self.dirtied_lock:
dc_sequence = self.dirtied_cache.get(book_id, None) dc_sequence = self.dirtied_cache.get(book_id, None)
# print 'clear_dirty: check book', book_id, dc_sequence # print('clear_dirty: check book', book_id, dc_sequence)
if dc_sequence is None or sequence is None or dc_sequence == sequence: if dc_sequence is None or sequence is None or dc_sequence == sequence:
# print 'needs to be cleaned' # print('needs to be cleaned')
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?', self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,)) (book_id,))
self.conn.commit() self.conn.commit()
@ -850,7 +850,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
except: except:
pass pass
elif dc_sequence is not None: elif dc_sequence is not None:
# print 'book needs to be done again' # print('book needs to be done again')
pass pass
def dump_metadata(self, book_ids=None, remove_from_dirtied=True, def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
@ -908,12 +908,12 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.update_last_modified(book_ids) self.update_last_modified(book_ids)
for book in book_ids: for book in book_ids:
with self.dirtied_lock: with self.dirtied_lock:
# print 'dirtied: check id', book # print('dirtied: check id', book)
if book in self.dirtied_cache: if book in self.dirtied_cache:
self.dirtied_cache[book] = self.dirtied_sequence self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1 self.dirtied_sequence += 1
continue continue
# print 'book not already dirty' # print('book not already dirty')
self.conn.execute( self.conn.execute(
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)', 'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
@ -964,7 +964,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
# thread has not done the work between the put and the get_metadata # thread has not done the work between the put and the get_metadata
with self.dirtied_lock: with self.dirtied_lock:
sequence = self.dirtied_cache.get(idx, None) sequence = self.dirtied_cache.get(idx, None)
# print 'get_md_for_dump', idx, sequence # print('get_md_for_dump', idx, sequence)
try: try:
# While a book is being created, the path is empty. Don't bother to # While a book is being created, the path is empty. Don't bother to
# try to write the opf, because it will go to the wrong folder. # try to write the opf, because it will go to the wrong folder.
@ -1864,7 +1864,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
md.append((category, cat['rec_index'], md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite')) cat['datatype'] == 'composite'))
# print 'end phase "collection":', time.clock() - last, 'seconds' # print('end phase "collection":', time.clock() - last, 'seconds')
# last = time.clock() # last = time.clock()
# Now scan every book looking for category items. # Now scan every book looking for category items.
@ -1932,7 +1932,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
except: except:
prints('get_categories: item', val, 'is not in', cat, 'list!') prints('get_categories: item', val, 'is not in', cat, 'list!')
# print 'end phase "books":', time.clock() - last, 'seconds' # print('end phase "books":', time.clock() - last, 'seconds')
# last = time.clock() # last = time.clock()
# Now do news # Now do news
@ -1953,7 +1953,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0]) item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0])
tcategories['news'][r[1]] = item tcategories['news'][r[1]] = item
# print 'end phase "news":', time.clock() - last, 'seconds' # print('end phase "news":', time.clock() - last, 'seconds')
# last = time.clock() # last = time.clock()
# Build the real category list by iterating over the temporary copy # Build the real category list by iterating over the temporary copy
@ -2038,7 +2038,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
use_sort_as_name=use_sort_as_name) use_sort_as_name=use_sort_as_name)
for r in items] for r in items]
# print 'end phase "tags list":', time.clock() - last, 'seconds' # print('end phase "tags list":', time.clock() - last, 'seconds')
# last = time.clock() # last = time.clock()
# Needed for legacy databases that have multiple ratings that # Needed for legacy databases that have multiple ratings that
@ -2181,8 +2181,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
icon_map['search'] = icon_map['search'] icon_map['search'] = icon_map['search']
categories['search'] = items categories['search'] = items
# print 'last phase ran in:', time.clock() - last, 'seconds' # print('last phase ran in:', time.clock() - last, 'seconds')
# print 'get_categories ran in:', time.clock() - start, 'seconds' # print('get_categories ran in:', time.clock() - start, 'seconds')
return categories return categories

View File

@ -38,9 +38,9 @@ class AllIpAddressesGetter(Thread):
def run(self): def run(self):
global _all_ip_addresses global _all_ip_addresses
# print 'sleeping' # print('sleeping')
# time.sleep(15) # time.sleep(15)
# print 'slept' # print('slept')
_all_ip_addresses = self.get_all_ips() _all_ip_addresses = self.get_all_ips()
@ -74,7 +74,7 @@ def _get_external_ip():
break break
except: except:
time.sleep(0.3) time.sleep(0.3)
# print 'ipaddr: %s' % ipaddr # print('ipaddr: %s' % ipaddr)
return ipaddr return ipaddr

View File

@ -330,7 +330,7 @@ class TouchscreenFeedTemplate(Template):
navbar_t.append(navbar_tr) navbar_t.append(navbar_tr)
top_navbar = navbar_t top_navbar = navbar_t
bottom_navbar = copy.copy(navbar_t) bottom_navbar = copy.copy(navbar_t)
# print '\n%s\n' % etree.tostring(navbar_t, pretty_print=True) # print('\n%s\n' % etree.tostring(navbar_t, pretty_print=True))
# Build the page # Build the page
head = HEAD(TITLE(feed.title)) head = HEAD(TITLE(feed.title))
@ -423,6 +423,6 @@ class TouchscreenNavBarTemplate(Template):
navbar_tr.append(TD(attrs('article_next'),link)) navbar_tr.append(TD(attrs('article_next'),link))
navbar_t.append(navbar_tr) navbar_t.append(navbar_tr)
navbar.append(navbar_t) navbar.append(navbar_t)
# print '\n%s\n' % etree.tostring(navbar, pretty_print=True) # print('\n%s\n' % etree.tostring(navbar, pretty_print=True))
self.root = HTML(head, BODY(navbar)) self.root = HTML(head, BODY(navbar))