Get rid of more xrange

This commit is contained in:
Kovid Goyal 2019-03-13 15:44:45 +05:30
parent d1e30dfcac
commit 6b5c3bd7c9
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
21 changed files with 74 additions and 60 deletions

View File

@ -14,6 +14,7 @@ from operator import itemgetter
from calibre.library.field_metadata import fm_as_dict from calibre.library.field_metadata import fm_as_dict
from calibre.db.tests.base import BaseTest from calibre.db.tests.base import BaseTest
from polyglot.builtins import range
# Utils {{{ # Utils {{{
@ -162,11 +163,11 @@ class LegacyTest(BaseTest):
'comment', 'publisher', 'rating', 'series_index', 'tags', 'comment', 'publisher', 'rating', 'series_index', 'tags',
'timestamp', 'uuid', 'pubdate', 'ondevice', 'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified', 'languages') 'metadata_last_modified', 'languages')
oldvals = {g:tuple(getattr(old, g)(x) for x in xrange(3)) + tuple(getattr(old, g)(x, True) for x in (1,2,3)) for g in getters} oldvals = {g:tuple(getattr(old, g)(x) for x in range(3)) + tuple(getattr(old, g)(x, True) for x in (1,2,3)) for g in getters}
old_rows = {tuple(r)[:5] for r in old} old_rows = {tuple(r)[:5] for r in old}
old.close() old.close()
db = self.init_legacy() db = self.init_legacy()
newvals = {g:tuple(getattr(db, g)(x) for x in xrange(3)) + tuple(getattr(db, g)(x, True) for x in (1,2,3)) for g in getters} newvals = {g:tuple(getattr(db, g)(x) for x in range(3)) + tuple(getattr(db, g)(x, True) for x in (1,2,3)) for g in getters}
new_rows = {tuple(r)[:5] for r in db} new_rows = {tuple(r)[:5] for r in db}
for x in (oldvals, newvals): for x in (oldvals, newvals):
x['tags'] = tuple(set(y.split(',')) if y else y for y in x['tags']) x['tags'] = tuple(set(y.split(',')) if y else y for y in x['tags'])

View File

@ -13,6 +13,7 @@ from time import time
from calibre.utils.date import utc_tz from calibre.utils.date import utc_tz
from calibre.db.tests.base import BaseTest from calibre.db.tests.base import BaseTest
from polyglot.builtins import range
class ReadingTest(BaseTest): class ReadingTest(BaseTest):
@ -198,18 +199,18 @@ class ReadingTest(BaseTest):
ae([3, 2, 1], cache.multisort([('identifiers', True), ae([3, 2, 1], cache.multisort([('identifiers', True),
('title', True)]), 'Subsort failed') ('title', True)]), 'Subsort failed')
from calibre.ebooks.metadata.book.base import Metadata from calibre.ebooks.metadata.book.base import Metadata
for i in xrange(7): for i in range(7):
cache.create_book_entry(Metadata('title%d' % i), apply_import_tags=False) cache.create_book_entry(Metadata('title%d' % i), apply_import_tags=False)
cache.create_custom_column('one', 'CC1', 'int', False) cache.create_custom_column('one', 'CC1', 'int', False)
cache.create_custom_column('two', 'CC2', 'int', False) cache.create_custom_column('two', 'CC2', 'int', False)
cache.create_custom_column('three', 'CC3', 'int', False) cache.create_custom_column('three', 'CC3', 'int', False)
cache.close() cache.close()
cache = self.init_cache() cache = self.init_cache()
cache.set_field('#one', {(i+(5*m)):m for m in (0, 1) for i in xrange(1, 6)}) cache.set_field('#one', {(i+(5*m)):m for m in (0, 1) for i in range(1, 6)})
cache.set_field('#two', {i+(m*3):m for m in (0, 1, 2) for i in (1, 2, 3)}) cache.set_field('#two', {i+(m*3):m for m in (0, 1, 2) for i in (1, 2, 3)})
cache.set_field('#two', {10:2}) cache.set_field('#two', {10:2})
cache.set_field('#three', {i:i for i in xrange(1, 11)}) cache.set_field('#three', {i:i for i in range(1, 11)})
ae(list(xrange(1, 11)), cache.multisort([('#one', True), ('#two', True)], ids_to_sort=sorted(cache.all_book_ids()))) ae(list(range(1, 11)), cache.multisort([('#one', True), ('#two', True)], ids_to_sort=sorted(cache.all_book_ids())))
ae([4, 5, 1, 2, 3, 7,8, 9, 10, 6], cache.multisort([('#one', True), ('#two', False)], ids_to_sort=sorted(cache.all_book_ids()))) ae([4, 5, 1, 2, 3, 7,8, 9, 10, 6], cache.multisort([('#one', True), ('#two', False)], ids_to_sort=sorted(cache.all_book_ids())))
ae([5, 4, 3, 2, 1, 10, 9, 8, 7, 6], cache.multisort([('#one', True), ('#two', False), ('#three', False)], ids_to_sort=sorted(cache.all_book_ids()))) ae([5, 4, 3, 2, 1, 10, 9, 8, 7, 6], cache.multisort([('#one', True), ('#two', False), ('#three', False)], ids_to_sort=sorted(cache.all_book_ids())))
# }}} # }}}
@ -220,7 +221,7 @@ class ReadingTest(BaseTest):
old = LibraryDatabase2(self.library_path) old = LibraryDatabase2(self.library_path)
old_metadata = {i:old.get_metadata( old_metadata = {i:old.get_metadata(
i, index_is_id=True, get_cover=True, cover_as_data=True) for i in i, index_is_id=True, get_cover=True, cover_as_data=True) for i in
xrange(1, 4)} range(1, 4)}
for mi in old_metadata.itervalues(): for mi in old_metadata.itervalues():
mi.format_metadata = dict(mi.format_metadata) mi.format_metadata = dict(mi.format_metadata)
if mi.formats: if mi.formats:
@ -231,7 +232,7 @@ class ReadingTest(BaseTest):
cache = self.init_cache(self.library_path) cache = self.init_cache(self.library_path)
new_metadata = {i:cache.get_metadata( new_metadata = {i:cache.get_metadata(
i, get_cover=True, cover_as_data=True) for i in xrange(1, 4)} i, get_cover=True, cover_as_data=True) for i in range(1, 4)}
cache = None cache = None
for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()): for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()):
self.compare_metadata(mi1, mi2) self.compare_metadata(mi1, mi2)
@ -245,7 +246,7 @@ class ReadingTest(BaseTest):
for d, l in ((json_dumps, json_loads), (msgpack_dumps, msgpack_loads)): for d, l in ((json_dumps, json_loads), (msgpack_dumps, msgpack_loads)):
fm2 = l(d(fm)) fm2 = l(d(fm))
self.assertEqual(fm_as_dict(fm), fm_as_dict(fm2)) self.assertEqual(fm_as_dict(fm), fm_as_dict(fm2))
for i in xrange(1, 4): for i in range(1, 4):
mi = cache.get_metadata(i, get_cover=True, cover_as_data=True) mi = cache.get_metadata(i, get_cover=True, cover_as_data=True)
rmi = msgpack_loads(msgpack_dumps(mi)) rmi = msgpack_loads(msgpack_dumps(mi))
self.compare_metadata(mi, rmi, exclude='format_metadata has_cover formats id'.split()) self.compare_metadata(mi, rmi, exclude='format_metadata has_cover formats id'.split())

File diff suppressed because one or more lines are too long

View File

@ -17,6 +17,7 @@ from calibre.ebooks.mobi.reader.headers import MetadataHeader
from calibre.utils.logging import default_log from calibre.utils.logging import default_log
from calibre import prints, fsync from calibre import prints, fsync
from calibre.constants import DEBUG from calibre.constants import DEBUG
from polyglot.builtins import range
class APNXBuilder(object): class APNXBuilder(object):
@ -267,7 +268,7 @@ class APNXBuilder(object):
p_char_count = 0 p_char_count = 0
# Every 30 lines is a new page # Every 30 lines is a new page
for i in xrange(0, len(lines), 32): for i in range(0, len(lines), 32):
pages.append(lines[i]) pages.append(lines[i])
return pages return pages

View File

@ -13,6 +13,7 @@ from calibre.constants import iswindows, islinux
from calibre.utils.icu import lower from calibre.utils.icu import lower
from calibre.devices.mtp.driver import MTP_DEVICE from calibre.devices.mtp.driver import MTP_DEVICE
from calibre.devices.scanner import DeviceScanner from calibre.devices.scanner import DeviceScanner
from polyglot.builtins import range
class ProgressCallback(object): class ProgressCallback(object):
@ -172,9 +173,9 @@ class TestDeviceInteraction(unittest.TestCase):
gc.disable() gc.disable()
try: try:
start_mem = memory() start_mem = memory()
for i in xrange(repetitions): for i in range(repetitions):
func(*args, **kwargs) func(*args, **kwargs)
for i in xrange(3): for i in range(3):
gc.collect() gc.collect()
end_mem = memory() end_mem = memory()
finally: finally:
@ -262,6 +263,6 @@ def tests():
def run(): def run():
unittest.TextTestRunner(verbosity=2).run(tests()) unittest.TextTestRunner(verbosity=2).run(tests())
if __name__ == '__main__': if __name__ == '__main__':
run() run()

View File

@ -9,6 +9,7 @@ from cStringIO import StringIO
from struct import pack from struct import pack
from calibre.constants import plugins from calibre.constants import plugins
from polyglot.builtins import range
cPalmdoc = plugins['cPalmdoc'][0] cPalmdoc = plugins['cPalmdoc'][0]
if not cPalmdoc: if not cPalmdoc:
raise RuntimeError(('Failed to load required cPalmdoc module: ' raise RuntimeError(('Failed to load required cPalmdoc module: '
@ -56,7 +57,7 @@ def py_compress_doc(data):
if i > 10 and (ldata - i) > 10: if i > 10 and (ldata - i) > 10:
chunk = '' chunk = ''
match = -1 match = -1
for j in xrange(10, 2, -1): for j in range(10, 2, -1):
chunk = data[i:i+j] chunk = data[i:i+j]
try: try:
match = data.rindex(chunk, 0, i) match = data.rindex(chunk, 0, i)
@ -97,4 +98,3 @@ def py_compress_doc(data):
out.write(''.join(binseq)) out.write(''.join(binseq))
i += len(binseq) - 1 i += len(binseq) - 1
return out.getvalue() return out.getvalue()

View File

@ -5,6 +5,7 @@ __copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import re import re
from polyglot.builtins import range
class TCRCompressor(object): class TCRCompressor(object):
@ -48,7 +49,7 @@ class TCRCompressor(object):
Look for codes that do no not appear in the coded text and add them to Look for codes that do no not appear in the coded text and add them to
the list of free codes. the list of free codes.
''' '''
for i in xrange(256): for i in range(256):
if i not in self.unused_codes: if i not in self.unused_codes:
if chr(i) not in self.coded_txt: if chr(i) not in self.coded_txt:
self.unused_codes.add(i) self.unused_codes.add(i)
@ -104,7 +105,7 @@ class TCRCompressor(object):
# Generate the code dictionary. # Generate the code dictionary.
code_dict = [] code_dict = []
for i in xrange(0, 256): for i in range(0, 256):
if i in self.unused_codes: if i in self.unused_codes:
code_dict.append(chr(0)) code_dict.append(chr(0))
else: else:
@ -122,7 +123,7 @@ def decompress(stream):
# Codes that the file contents are broken down into. # Codes that the file contents are broken down into.
entries = [] entries = []
for i in xrange(256): for i in range(256):
entry_len = ord(stream.read(1)) entry_len = ord(stream.read(1))
entries.append(stream.read(entry_len)) entries.append(stream.read(entry_len))

View File

@ -3,6 +3,8 @@
from __future__ import (unicode_literals, division, absolute_import, from __future__ import (unicode_literals, division, absolute_import,
print_function) print_function)
from polyglot.builtins import range
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2011, Anthon van der Neut <A.van.der.Neut@ruamel.eu>' __copyright__ = '2011, Anthon van der Neut <A.van.der.Neut@ruamel.eu>'
@ -505,7 +507,7 @@ class BZZDecoder():
markerpos = -1 markerpos = -1
zc = lambda i: self.zpcodec_decode(self.ctx, i) zc = lambda i: self.zpcodec_decode(self.ctx, i)
dc = lambda i, bits: self.decode_binary(self.ctx, i, bits) dc = lambda i, bits: self.decode_binary(self.ctx, i, bits)
for i in xrange(self.xsize): for i in range(self.xsize):
ctxid = CTXIDS - 1 ctxid = CTXIDS - 1
if ctxid > mtfno: if ctxid > mtfno:
ctxid = mtfno ctxid = mtfno
@ -737,5 +739,6 @@ def main():
d = plugins['bzzdec'][0] d = plugins['bzzdec'][0]
print (d.decompress(raw)) print (d.decompress(raw))
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -7,6 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os import os
from polyglot.builtins import range
NBSP = '\xa0' NBSP = '\xa0'
@ -134,7 +135,7 @@ def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath):
# Process dir attributes # Process dir attributes
class_map = dict(styles.classes.itervalues()) class_map = dict(styles.classes.itervalues())
parents = ('p', 'div') + tuple('h%d' % i for i in xrange(1, 7)) parents = ('p', 'div') + tuple('h%d' % i for i in range(1, 7))
for parent in root.xpath('//*[(%s)]' % ' or '.join('name()="%s"' % t for t in parents)): for parent in root.xpath('//*[(%s)]' % ' or '.join('name()="%s"' % t for t in parents)):
# Ensure that children of rtl parents that are not rtl have an # Ensure that children of rtl parents that are not rtl have an
# explicit dir set. Also, remove dir from children if it is the same as # explicit dir set. Also, remove dir from children if it is the same as

View File

@ -14,7 +14,7 @@ from calibre.utils.filenames import ascii_filename
from calibre.utils.fonts.scanner import font_scanner, NoFonts from calibre.utils.fonts.scanner import font_scanner, NoFonts
from calibre.utils.fonts.utils import panose_to_css_generic_family, is_truetype_font from calibre.utils.fonts.utils import panose_to_css_generic_family, is_truetype_font
from calibre.utils.icu import ord_string from calibre.utils.icu import ord_string
from polyglot.builtins import codepoint_to_chr from polyglot.builtins import codepoint_to_chr, range
Embed = namedtuple('Embed', 'name key subsetted') Embed = namedtuple('Embed', 'name key subsetted')
@ -84,7 +84,7 @@ class Family(object):
for x in XPath('./w:panose1[@w:val]')(elem): for x in XPath('./w:panose1[@w:val]')(elem):
try: try:
v = get(x, 'w:val') v = get(x, 'w:val')
v = tuple(int(v[i:i+2], 16) for i in xrange(0, len(v), 2)) v = tuple(int(v[i:i+2], 16) for i in range(0, len(v), 2))
except (TypeError, ValueError, IndexError): except (TypeError, ValueError, IndexError):
pass pass
else: else:
@ -184,9 +184,9 @@ class Fonts(object):
prefix = raw[:32] prefix = raw[:32]
if ef.key: if ef.key:
key = re.sub(r'[^A-Fa-f0-9]', '', ef.key) key = re.sub(r'[^A-Fa-f0-9]', '', ef.key)
key = bytearray(reversed(tuple(int(key[i:i+2], 16) for i in xrange(0, len(key), 2)))) key = bytearray(reversed(tuple(int(key[i:i+2], 16) for i in range(0, len(key), 2))))
prefix = bytearray(prefix) prefix = bytearray(prefix)
prefix = bytes(bytearray(prefix[i]^key[i % len(key)] for i in xrange(len(prefix)))) prefix = bytes(bytearray(prefix[i]^key[i % len(key)] for i in range(len(prefix))))
if not is_truetype_font(prefix): if not is_truetype_font(prefix):
return None return None
ext = 'otf' if prefix.startswith(b'OTTO') else 'ttf' ext = 'otf' if prefix.startswith(b'OTTO') else 'ttf'

View File

@ -10,6 +10,7 @@ from lxml.html.builder import TABLE, TR, TD
from calibre.ebooks.docx.block_styles import inherit, read_shd as rs, read_border, binary_property, border_props, ParagraphStyle, border_to_css from calibre.ebooks.docx.block_styles import inherit, read_shd as rs, read_border, binary_property, border_props, ParagraphStyle, border_to_css
from calibre.ebooks.docx.char_styles import RunStyle from calibre.ebooks.docx.char_styles import RunStyle
from polyglot.builtins import range
# Read from XML {{{ # Read from XML {{{
read_shd = rs read_shd = rs
@ -570,7 +571,7 @@ class Table(object):
return return
# Handle vMerge # Handle vMerge
max_col_num = max(len(r) for r in self.cell_map) max_col_num = max(len(r) for r in self.cell_map)
for c in xrange(max_col_num): for c in range(max_col_num):
cells = [row[c] if c < len(row) else None for row in self.cell_map] cells = [row[c] if c < len(row) else None for row in self.cell_map]
runs = [[]] runs = [[]]
for cell in cells: for cell in cells:

View File

@ -13,7 +13,7 @@ from lxml.etree import tostring
from calibre.ebooks.metadata.toc import TOC from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.oeb.polish.toc import elem_to_toc_text from calibre.ebooks.oeb.polish.toc import elem_to_toc_text
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, range
def from_headings(body, log, namespace): def from_headings(body, log, namespace):
@ -22,7 +22,7 @@ def from_headings(body, log, namespace):
headings = ('h1', 'h2', 'h3') headings = ('h1', 'h2', 'h3')
tocroot = TOC() tocroot = TOC()
xpaths = [XPath('//%s' % x) for x in headings] xpaths = [XPath('//%s' % x) for x in headings]
level_prev = {i+1:None for i in xrange(len(xpaths))} level_prev = {i+1:None for i in range(len(xpaths))}
level_prev[0] = tocroot level_prev[0] = tocroot
level_item_map = {i+1:frozenset(xp(body)) for i, xp in enumerate(xpaths)} level_item_map = {i+1:frozenset(xp(body)) for i, xp in enumerate(xpaths)}
item_level_map = {e:i for i, elems in level_item_map.iteritems() for e in elems} item_level_map = {e:i for i, elems in level_item_map.iteritems() for e in elems}
@ -49,7 +49,7 @@ def from_headings(body, log, namespace):
text = elem_to_toc_text(item) text = elem_to_toc_text(item)
toc = parent.add_item('index.html', elem_id, text) toc = parent.add_item('index.html', elem_id, text)
level_prev[lvl] = toc level_prev[lvl] = toc
for i in xrange(lvl+1, len(xpaths)+1): for i in range(lvl+1, len(xpaths)+1):
level_prev[i] = None level_prev[i] = None
if len(tuple(tocroot.flat())) > 1: if len(tuple(tocroot.flat())) > 1:
@ -79,7 +79,7 @@ def structure_toc(entries):
parent = find_parent(level) parent = find_parent(level)
last_found[level] = parent.add_item('index.html', item.anchor, last_found[level] = parent.add_item('index.html', item.anchor,
item.text) item.text)
for i in xrange(level+1, len(last_found)): for i in range(level+1, len(last_found)):
last_found[i] = None last_found[i] = None
return newtoc return newtoc

View File

@ -22,7 +22,7 @@ from calibre.ebooks.oeb.base import urlnormalize, xpath
from calibre.ebooks.oeb.reader import OEBReader from calibre.ebooks.oeb.reader import OEBReader
from calibre.ebooks import DRMError from calibre.ebooks import DRMError
from calibre import plugins from calibre import plugins
from polyglot.builtins import codepoint_to_chr, unicode_type, string_or_bytes from polyglot.builtins import codepoint_to_chr, unicode_type, string_or_bytes, range
lzx, lxzerror = plugins['lzx'] lzx, lxzerror = plugins['lzx']
msdes, msdeserror = plugins['msdes'] msdes, msdeserror = plugins['msdes']
@ -105,7 +105,7 @@ def read_utf8_char(bytes, pos):
if elsize + pos > len(bytes): if elsize + pos > len(bytes):
raise LitError('Invalid UTF8 character: %s' % repr(bytes[pos])) raise LitError('Invalid UTF8 character: %s' % repr(bytes[pos]))
c &= (mask - 1) c &= (mask - 1)
for i in xrange(1, elsize): for i in range(1, elsize):
b = ord(bytes[pos+i]) b = ord(bytes[pos+i])
if (b & 0xC0) != 0x80: if (b & 0xC0) != 0x80:
raise LitError( raise LitError(
@ -117,7 +117,7 @@ def read_utf8_char(bytes, pos):
def consume_sized_utf8_string(bytes, zpad=False): def consume_sized_utf8_string(bytes, zpad=False):
result = [] result = []
slen, pos = read_utf8_char(bytes, 0) slen, pos = read_utf8_char(bytes, 0)
for i in xrange(ord(slen)): for i in range(ord(slen)):
char, pos = read_utf8_char(bytes, pos) char, pos = read_utf8_char(bytes, pos)
result.append(char) result.append(char)
if zpad and bytes[pos] == '\000': if zpad and bytes[pos] == '\000':
@ -166,7 +166,7 @@ class UnBinary(object):
return target return target
target = target.split('/') target = target.split('/')
base = self.dir.split('/') base = self.dir.split('/')
for index in xrange(min(len(base), len(target))): for index in range(min(len(base), len(target))):
if base[index] != target[index]: if base[index] != target[index]:
break break
else: else:
@ -567,7 +567,7 @@ class LitFile(object):
def read_header_pieces(self): def read_header_pieces(self):
src = self.header[self.hdr_len:] src = self.header[self.hdr_len:]
for i in xrange(self.num_pieces): for i in range(self.num_pieces):
piece = src[i * self.PIECE_SIZE:(i + 1) * self.PIECE_SIZE] piece = src[i * self.PIECE_SIZE:(i + 1) * self.PIECE_SIZE]
if u32(piece[4:]) != 0 or u32(piece[12:]) != 0: if u32(piece[4:]) != 0 or u32(piece[12:]) != 0:
raise LitError('Piece %s has 64bit value' % repr(piece)) raise LitError('Piece %s has 64bit value' % repr(piece))
@ -597,7 +597,7 @@ class LitFile(object):
if (32 + (num_chunks * chunk_size)) != len(piece): if (32 + (num_chunks * chunk_size)) != len(piece):
raise LitError('IFCM header has incorrect length') raise LitError('IFCM header has incorrect length')
self.entries = {} self.entries = {}
for i in xrange(num_chunks): for i in range(num_chunks):
offset = 32 + (i * chunk_size) offset = 32 + (i * chunk_size)
chunk = piece[offset:offset + chunk_size] chunk = piece[offset:offset + chunk_size]
tag, chunk = chunk[:4], chunk[4:] tag, chunk = chunk[:4], chunk[4:]
@ -612,7 +612,7 @@ class LitFile(object):
# Hopefully will work even without a correct entries count # Hopefully will work even without a correct entries count
entries = (2 ** 16) - 1 entries = (2 ** 16) - 1
chunk = chunk[40:] chunk = chunk[40:]
for j in xrange(entries): for j in range(entries):
if remaining <= 0: if remaining <= 0:
break break
namelen, chunk, remaining = encint(chunk, remaining) namelen, chunk, remaining = encint(chunk, remaining)
@ -642,7 +642,7 @@ class LitFile(object):
num_sections = u16(raw[2:pos]) num_sections = u16(raw[2:pos])
self.section_names = [""] * num_sections self.section_names = [""] * num_sections
self.section_data = [None] * num_sections self.section_data = [None] * num_sections
for section in xrange(num_sections): for section in range(num_sections):
size = u16(raw[pos:pos+2]) size = u16(raw[pos:pos+2])
pos += 2 pos += 2
size = size*2 + 2 size = size*2 + 2
@ -669,7 +669,7 @@ class LitFile(object):
num_files, raw = int32(raw), raw[4:] num_files, raw = int32(raw), raw[4:]
if num_files == 0: if num_files == 0:
continue continue
for i in xrange(num_files): for i in range(num_files):
if len(raw) < 5: if len(raw) < 5:
raise LitError('Truncated manifest') raise LitError('Truncated manifest')
offset, raw = u32(raw), raw[4:] offset, raw = u32(raw), raw[4:]
@ -740,7 +740,7 @@ class LitFile(object):
hash.update(data) hash.update(data)
digest = hash.digest() digest = hash.digest()
key = [0] * 8 key = [0] * 8
for i in xrange(0, len(digest)): for i in range(0, len(digest)):
key[i % 8] ^= ord(digest[i]) key[i % 8] ^= ord(digest[i])
return ''.join(chr(x) for x in key) return ''.join(chr(x) for x in key)
@ -856,7 +856,7 @@ class LitFile(object):
data = self.get_file(name) data = self.get_file(name)
nentries, data = u32(data), data[4:] nentries, data = u32(data), data[4:]
tags = {} tags = {}
for i in xrange(1, nentries + 1): for i in range(1, nentries + 1):
if len(data) <= 1: if len(data) <= 1:
break break
size, data = ord(data[0]), data[1:] size, data = ord(data[0]), data[1:]
@ -869,7 +869,7 @@ class LitFile(object):
return (tags, {}) return (tags, {})
attrs = {} attrs = {}
nentries, data = u32(data), data[4:] nentries, data = u32(data), data[4:]
for i in xrange(1, nentries + 1): for i in range(1, nentries + 1):
if len(data) <= 4: if len(data) <= 4:
break break
size, data = u32(data), data[4:] size, data = u32(data), data[4:]

View File

@ -21,7 +21,7 @@ from calibre.ebooks.mobi.langcodes import iana2mobi
from calibre.utils.date import now as nowf from calibre.utils.date import now as nowf
from calibre.utils.imghdr import what from calibre.utils.imghdr import what
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1 from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, range
def is_image(ss): def is_image(ss):
@ -163,7 +163,7 @@ class MetadataUpdater(object):
nitems, = unpack('>I', exth[8:12]) nitems, = unpack('>I', exth[8:12])
pos = 12 pos = 12
# Store any EXTH fields not specifiable in GUI # Store any EXTH fields not specifiable in GUI
for i in xrange(nitems): for i in range(nitems):
id, size = unpack('>II', exth[pos:pos + 8]) id, size = unpack('>II', exth[pos:pos + 8])
content = exth[pos + 8: pos + size] content = exth[pos + 8: pos + size]
pos += size pos += size
@ -295,7 +295,7 @@ class MetadataUpdater(object):
def get_pdbrecords(self): def get_pdbrecords(self):
pdbrecords = [] pdbrecords = []
for i in xrange(self.nrecs): for i in range(self.nrecs):
offset, a1,a2,a3,a4 = unpack('>LBBBB', self.data[78+i*8:78+i*8+8]) offset, a1,a2,a3,a4 = unpack('>LBBBB', self.data[78+i*8:78+i*8+8])
flags, val = a1, a2<<16|a3<<8|a4 flags, val = a1, a2<<16|a3<<8|a4
pdbrecords.append([offset, flags, val]) pdbrecords.append([offset, flags, val])
@ -312,7 +312,7 @@ class MetadataUpdater(object):
# Diagnostic # Diagnostic
print("MetadataUpdater.dump_pdbrecords()") print("MetadataUpdater.dump_pdbrecords()")
print("%10s %10s %10s" % ("offset","flags","val")) print("%10s %10s %10s" % ("offset","flags","val"))
for i in xrange(len(self.pdbrecords)): for i in range(len(self.pdbrecords)):
pdbrecord = self.pdbrecords[i] pdbrecord = self.pdbrecords[i]
print("%10X %10X %10X" % (pdbrecord[0], pdbrecord[1], pdbrecord[2])) print("%10X %10X %10X" % (pdbrecord[0], pdbrecord[1], pdbrecord[2]))

View File

@ -25,7 +25,7 @@ from calibre.utils.localization import get_lang, canonicalize_lang
from calibre import prints, guess_type from calibre import prints, guess_type
from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars
from calibre.utils.config import tweaks from calibre.utils.config import tweaks
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, range
pretty_print_opf = False pretty_print_opf = False
@ -715,7 +715,7 @@ class OPF(object): # {{{
def create_manifest_item(self, href, media_type, append=False): def create_manifest_item(self, href, media_type, append=False):
ids = [i.get('id', None) for i in self.itermanifest()] ids = [i.get('id', None) for i in self.itermanifest()]
id = None id = None
for c in xrange(1, sys.maxint): for c in range(1, sys.maxint):
id = 'id%d'%c id = 'id%d'%c
if id not in ids: if id not in ids:
break break

View File

@ -10,7 +10,7 @@ import sys, os
from lxml import etree from lxml import etree
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, range
class Font(object): class Font(object):
@ -623,7 +623,7 @@ class PDFDocument(object):
self.opts, self.log = opts, log self.opts, self.log = opts, log
parser = etree.XMLParser(recover=True) parser = etree.XMLParser(recover=True)
self.root = etree.fromstring(xml, parser=parser) self.root = etree.fromstring(xml, parser=parser)
idc = iter(xrange(sys.maxint)) idc = iter(range(sys.maxint))
self.fonts = [] self.fonts = []
self.font_map = {} self.font_map = {}

View File

@ -14,6 +14,7 @@ from calibre.ebooks.rb import HEADER
from calibre.ebooks.rb import RocketBookError from calibre.ebooks.rb import RocketBookError
from calibre.ebooks.metadata.rb import get_metadata from calibre.ebooks.metadata.rb import get_metadata
from calibre.ebooks.metadata.opf2 import OPFCreator from calibre.ebooks.metadata.opf2 import OPFCreator
from polyglot.builtins import range
class RBToc(list): class RBToc(list):
@ -80,7 +81,7 @@ class Reader(object):
count = self.read_i32() count = self.read_i32()
self.read_i32() # Uncompressed size. self.read_i32() # Uncompressed size.
chunck_sizes = [] chunck_sizes = []
for i in xrange(count): for i in range(count):
chunck_sizes.append(self.read_i32()) chunck_sizes.append(self.read_i32())
for size in chunck_sizes: for size in chunck_sizes:

View File

@ -15,7 +15,7 @@ import os, re
from calibre.ebooks.rtf2xml import copy from calibre.ebooks.rtf2xml import copy
from calibre.utils.mreplace import MReplace from calibre.utils.mreplace import MReplace
from calibre.ptempfile import better_mktemp from calibre.ptempfile import better_mktemp
from polyglot.builtins import codepoint_to_chr from polyglot.builtins import codepoint_to_chr, range
class Tokenize: class Tokenize:
@ -44,7 +44,7 @@ class Tokenize:
self.__uc_bin = False self.__uc_bin = False
def __remove_uc_chars(self, startchar, token): def __remove_uc_chars(self, startchar, token):
for i in xrange(startchar, len(token)): for i in range(startchar, len(token)):
if self.__uc_char: if self.__uc_char:
self.__uc_char -= 1 self.__uc_char -= 1
else: else:

View File

@ -14,6 +14,7 @@ from calibre import guess_type
from calibre.srv.tests.base import BaseTest, TestServer from calibre.srv.tests.base import BaseTest, TestServer
from calibre.srv.utils import eintr_retry_call from calibre.srv.utils import eintr_retry_call
from calibre.utils.monotonic import monotonic from calibre.utils.monotonic import monotonic
from polyglot.builtins import range
is_ci = os.environ.get('CI', '').lower() == 'true' is_ci = os.environ.get('CI', '').lower() == 'true'
@ -271,11 +272,11 @@ class TestHTTP(BaseTest):
conn = server.connect() conn = server.connect()
# Test pipelining # Test pipelining
responses = [] responses = []
for i in xrange(10): for i in range(10):
conn._HTTPConnection__state = httplib._CS_IDLE conn._HTTPConnection__state = httplib._CS_IDLE
conn.request('GET', '/%d'%i) conn.request('GET', '/%d'%i)
responses.append(conn.response_class(conn.sock, strict=conn.strict, method=conn._method)) responses.append(conn.response_class(conn.sock, strict=conn.strict, method=conn._method))
for i in xrange(10): for i in range(10):
r = responses[i] r = responses[i]
r.begin() r.begin()
self.ae(r.read(), ('%d' % i).encode('ascii')) self.ae(r.read(), ('%d' % i).encode('ascii'))
@ -426,7 +427,7 @@ class TestHTTP(BaseTest):
def test_static_generation(self): # {{{ def test_static_generation(self): # {{{
'Test static generation' 'Test static generation'
nums = list(map(str, xrange(10))) nums = list(map(str, range(10)))
def handler(conn): def handler(conn):
return conn.generate_static_output('test', nums.pop) return conn.generate_static_output('test', nums.pop)
@ -435,7 +436,7 @@ class TestHTTP(BaseTest):
conn.request('GET', '/an_etagged_path') conn.request('GET', '/an_etagged_path')
r = conn.getresponse() r = conn.getresponse()
data = r.read() data = r.read()
for i in xrange(5): for i in range(5):
conn.request('GET', '/an_etagged_path') conn.request('GET', '/an_etagged_path')
r = conn.getresponse() r = conn.getresponse()
self.assertEqual(data, r.read()) self.assertEqual(data, r.read())

View File

@ -17,6 +17,7 @@ from calibre.srv.tests.base import BaseTest, TestServer
from calibre.ptempfile import TemporaryDirectory from calibre.ptempfile import TemporaryDirectory
from calibre.utils.certgen import create_server_cert from calibre.utils.certgen import create_server_cert
from calibre.utils.monotonic import monotonic from calibre.utils.monotonic import monotonic
from polyglot.builtins import range
is_ci = os.environ.get('CI', '').lower() == 'true' is_ci = os.environ.get('CI', '').lower() == 'true'
@ -167,7 +168,7 @@ class LoopTest(BaseTest):
self.ae(buf.read(1000), bytes(buf.ba)) self.ae(buf.read(1000), bytes(buf.ba))
self.ae(b'', buf.read(10)) self.ae(b'', buf.read(10))
self.ae(write(b'a'*10), 10) self.ae(write(b'a'*10), 10)
numbers = bytes(bytearray(xrange(10))) numbers = bytes(bytearray(range(10)))
set(numbers, 1, 3, READ) set(numbers, 1, 3, READ)
self.ae(buf.read(1), b'\x01') self.ae(buf.read(1), b'\x01')
self.ae(buf.read(10), b'\x02') self.ae(buf.read(10), b'\x02')

View File

@ -16,6 +16,7 @@ from calibre.srv.web_socket import (
PING, PONG, PROTOCOL_ERROR, CONTINUATION, INCONSISTENT_DATA, CONTROL_CODES) PING, PONG, PROTOCOL_ERROR, CONTINUATION, INCONSISTENT_DATA, CONTROL_CODES)
from calibre.utils.monotonic import monotonic from calibre.utils.monotonic import monotonic
from calibre.utils.socket_inheritance import set_socket_inherit from calibre.utils.socket_inheritance import set_socket_inherit
from polyglot.builtins import range
HANDSHAKE_STR = '''\ HANDSHAKE_STR = '''\
GET / HTTP/1.1\r GET / HTTP/1.1\r
@ -230,7 +231,7 @@ class WebSocketTest(BaseTest):
# connection before the client has finished sending all # connection before the client has finished sending all
# messages, so ignore failures to send packets. # messages, so ignore failures to send packets.
isf_test = partial(simple_test, ignore_send_failures=True) isf_test = partial(simple_test, ignore_send_failures=True)
for rsv in xrange(1, 7): for rsv in range(1, 7):
isf_test([{'rsv':rsv, 'opcode':BINARY}], [], close_code=PROTOCOL_ERROR, send_close=False) isf_test([{'rsv':rsv, 'opcode':BINARY}], [], close_code=PROTOCOL_ERROR, send_close=False)
for opcode in (3, 4, 5, 6, 7, 11, 12, 13, 14, 15): for opcode in (3, 4, 5, 6, 7, 11, 12, 13, 14, 15):
isf_test([{'opcode':opcode}], [], close_code=PROTOCOL_ERROR, send_close=False) isf_test([{'opcode':opcode}], [], close_code=PROTOCOL_ERROR, send_close=False)