Finish getting rid of xrange

This commit is contained in:
Kovid Goyal 2019-03-13 18:44:05 +05:30
parent b4e467ea18
commit 9cf2e2f671
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
35 changed files with 124 additions and 117 deletions

View File

@ -5,6 +5,8 @@ __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
Microsoft LIT OPF tag and attribute tables, copied from ConvertLIT.
"""
from polyglot.builtins import range
TAGS = [
None,
"package",
@ -76,6 +78,6 @@ ATTRS = {
0x0016: "xml:lang",
}
TAGS_ATTRS = [{} for i in xrange(43)]
TAGS_ATTRS = [{} for i in range(43)]
MAP = (TAGS, ATTRS, TAGS_ATTRS)

View File

@ -14,6 +14,7 @@ from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.langcodes import main_language, sub_language
from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.utils import get_trailing_data
from polyglot.builtins import range
# PalmDB {{{
@ -224,7 +225,7 @@ class EXTHHeader(object):
pos = 12
self.records = []
for i in xrange(self.count):
for i in range(self.count):
pos = self.read_record(pos)
self.records.sort(key=lambda x:x.type)
self.rmap = {x.type:x for x in self.records}
@ -517,7 +518,7 @@ class MOBIFile(object):
self.record_headers = []
self.records = []
for i in xrange(self.palmdb.number_of_records):
for i in range(self.palmdb.number_of_records):
pos = 78 + i * 8
offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8])
flags, val = a1, a2 << 16 | a3 << 8 | a4
@ -557,7 +558,7 @@ class MOBIFile(object):
from calibre.ebooks.mobi.huffcdic import HuffReader
def huffit(off, cnt):
huffman_record_nums = list(xrange(off, off+cnt))
huffman_record_nums = list(range(off, off+cnt))
huffrecs = [self.records[r].raw for r in huffman_record_nums]
huffs = HuffReader(huffrecs)
return huffman_record_nums, huffs.unpack
@ -616,5 +617,3 @@ class TextRecord(object): # {{{
return len(self.raw)
# }}}

View File

@ -15,6 +15,7 @@ from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.reader.index import (CNCX, parse_indx_header,
parse_tagx_section, parse_index_record, INDEX_HEADER_FIELDS)
from calibre.ebooks.mobi.reader.ncx import (tag_fieldname_map, default_entry)
from polyglot.builtins import range
File = namedtuple('File',
'file_number name divtbl_count start_position length')
@ -41,7 +42,7 @@ def read_variable_len_data(data, header):
tagx_block_size = header['tagx_block_size'] = struct.unpack_from(b'>I', data, offset + 4)[0]
header['tagx_block'] = data[offset:offset+tagx_block_size]
offset = idxt_offset + 4
for i in xrange(header['count']):
for i in range(header['count']):
p = struct.unpack_from(b'>H', data, offset)[0]
offset += 2
strlen = bytearray(data[p])[0]
@ -77,7 +78,7 @@ def read_index(sections, idx, codec):
read_variable_len_data(data, indx_header)
index_headers = []
for i in xrange(idx + 1, idx + 1 + indx_count):
for i in range(idx + 1, idx + 1 + indx_count):
# Index record
data = sections[i].raw
index_headers.append(parse_index_record(table, data, control_byte_count, tags, codec,

View File

@ -20,7 +20,7 @@ from calibre.ebooks.mobi.utils import (decode_hex_number, decint,
from calibre.utils.imghdr import what
from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.debug.headers import TextRecord
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
class TagX(object): # {{{
@ -744,7 +744,7 @@ class MOBIFile(object): # {{{
self.index_header.index_encoding)
self.index_record = IndexRecord(self.records[pir+1:pir+1+numi],
self.index_header, self.cncx)
self.indexing_record_nums = set(xrange(pir,
self.indexing_record_nums = set(range(pir,
pir+1+numi+self.index_header.num_of_cncx_blocks))
self.secondary_index_record = self.secondary_index_header = None
sir = self.mobi_header.secondary_index_record
@ -754,17 +754,17 @@ class MOBIFile(object): # {{{
self.indexing_record_nums.add(sir)
self.secondary_index_record = IndexRecord(
self.records[sir+1:sir+1+numi], self.secondary_index_header, self.cncx)
self.indexing_record_nums |= set(xrange(sir+1, sir+1+numi))
self.indexing_record_nums |= set(range(sir+1, sir+1+numi))
ntr = self.mobi_header.number_of_text_records
fii = self.mobi_header.first_image_index
self.text_records = [TextRecord(r, self.records[r],
self.mobi_header.extra_data_flags, mf.decompress6) for r in xrange(1,
self.mobi_header.extra_data_flags, mf.decompress6) for r in range(1,
min(len(self.records), ntr+1))]
self.image_records, self.binary_records = [], []
self.font_records = []
image_index = 0
for i in xrange(self.mobi_header.first_resource_record, min(self.mobi_header.last_resource_record, len(self.records))):
for i in range(self.mobi_header.first_resource_record, min(self.mobi_header.last_resource_record, len(self.records))):
if i in self.indexing_record_nums or i in self.huffman_record_nums:
continue
image_index += 1

View File

@ -12,13 +12,14 @@ from collections import OrderedDict, namedtuple
from calibre.ebooks.mobi.utils import (decint, count_set_bits,
decode_string)
from polyglot.builtins import range
TagX = namedtuple('TagX', 'tag num_of_values bitmask eof')
PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values')
INDEX_HEADER_FIELDS = (
'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx'
) + tuple('unknown%d'%i for i in xrange(27)) + ('ocnt', 'oentries',
) + tuple('unknown%d'%i for i in range(27)) + ('ocnt', 'oentries',
'ordt1', 'ordt2', 'tagx')
@ -73,7 +74,7 @@ def parse_indx_header(data):
# ascii character. If we cannot, we map to the ? char.
parsed = bytearray(ans['oentries'])
for i in xrange(0, 2*ans['oentries'], 2):
for i in range(0, 2*ans['oentries'], 2):
parsed[i//2] = raw[i+1] if 0x20 < raw[i+1] < 0x7f else ord(b'?')
ans['ordt_map'] = bytes(parsed).decode('ascii')
else:
@ -133,7 +134,7 @@ def parse_tagx_section(data):
first_entry_offset, = struct.unpack_from(b'>L', data, 4)
control_byte_count, = struct.unpack_from(b'>L', data, 8)
for i in xrange(12, first_entry_offset, 4):
for i in range(12, first_entry_offset, 4):
vals = list(bytearray(data[i:i+4]))
tags.append(TagX(*vals))
return control_byte_count, tags
@ -177,7 +178,7 @@ def get_tag_map(control_byte_count, tagx, data, strict=False):
values = []
if x.value_count is not None:
# Read value_count * values_per_entry variable width values.
for _ in xrange(x.value_count * x.num_of_values):
for _ in range(x.value_count * x.num_of_values):
byts, consumed = decint(data)
data = data[consumed:]
values.append(byts)
@ -220,7 +221,7 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
# loop through to build up the IDXT position starts
idx_positions= []
for j in xrange(entry_count):
for j in range(entry_count):
pos, = struct.unpack_from(b'>H', data, idxt_pos + 4 + (2 * j))
idx_positions.append(pos)
# The last entry ends before the IDXT tag (but there might be zero fill
@ -229,7 +230,7 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
# For each entry in the IDXT build up the tag map and any associated
# text
for j in xrange(entry_count):
for j in range(entry_count):
start, end = idx_positions[j:j+2]
rec = data[start:end]
# Sometimes (in the guide table if the type attribute has non ascii
@ -266,10 +267,9 @@ def read_index(sections, idx, codec):
tag_section_start = indx_header['tagx']
control_byte_count, tags = parse_tagx_section(data[tag_section_start:])
for i in xrange(idx + 1, idx + 1 + indx_count):
for i in range(idx + 1, idx + 1 + indx_count):
# Index record
data = sections[i][0]
parse_index_record(table, data, control_byte_count, tags, codec,
indx_header['ordt_map'])
return table, cncx

View File

@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
import re, os
from calibre.ebooks.chardet import strip_encoding_declarations
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
def update_internal_links(mobi8_reader, log):
@ -31,7 +31,7 @@ def update_internal_links(mobi8_reader, log):
parts = []
for part in mr.parts:
srcpieces = posfid_pattern.split(part)
for j in xrange(1, len(srcpieces), 2):
for j in range(1, len(srcpieces), 2):
tag = srcpieces[j]
if tag.startswith(b'<'):
for m in posfid_index_pattern.finditer(tag):
@ -69,7 +69,7 @@ def remove_kindlegen_markup(parts, aid_anchor_suffix, linked_aids):
re.IGNORECASE)
within_tag_aid_position_pattern = re.compile(r'''\s[ac]id\s*=['"]([^'"]*)['"]''')
for i in xrange(len(parts)):
for i in range(len(parts)):
part = parts[i]
srcpieces = find_tag_with_aid_pattern.split(part)
for j in range(len(srcpieces)):
@ -95,7 +95,7 @@ def remove_kindlegen_markup(parts, aid_anchor_suffix, linked_aids):
within_tag_AmznPageBreak_position_pattern = re.compile(
r'''\sdata-AmznPageBreak=['"]([^'"]*)['"]''')
for i in xrange(len(parts)):
for i in range(len(parts)):
part = parts[i]
srcpieces = find_tag_with_AmznPageBreak_pattern.split(part)
for j in range(len(srcpieces)):
@ -229,7 +229,7 @@ def insert_flows_into_markup(parts, flows, mobi8_reader, log):
# kindle:flow:XXXX?mime=YYYY/ZZZ (used for style sheets, svg images, etc)
tag_pattern = re.compile(r'''(<[^>]*>)''')
flow_pattern = re.compile(r'''['"]kindle:flow:([0-9|A-V]+)\?mime=([^'"]+)['"]''', re.IGNORECASE)
for i in xrange(len(parts)):
for i in range(len(parts)):
part = parts[i]
# flow pattern
@ -265,10 +265,10 @@ def insert_images_into_markup(parts, resource_map, log):
style_pattern = re.compile(r'''(<[a-zA-Z0-9]+\s[^>]*style\s*=\s*[^>]*>)''',
re.IGNORECASE)
for i in xrange(len(parts)):
for i in range(len(parts)):
part = parts[i]
srcpieces = img_pattern.split(part)
for j in xrange(1, len(srcpieces), 2):
for j in range(1, len(srcpieces), 2):
tag = srcpieces[j]
if tag.startswith('<im'):
for m in img_index_pattern.finditer(tag):
@ -286,10 +286,10 @@ def insert_images_into_markup(parts, resource_map, log):
parts[i] = part
# Replace urls used in style attributes
for i in xrange(len(parts)):
for i in range(len(parts)):
part = parts[i]
srcpieces = style_pattern.split(part)
for j in xrange(1, len(srcpieces), 2):
for j in range(1, len(srcpieces), 2):
tag = srcpieces[j]
if 'kindle:embed' in tag:
for m in img_index_pattern.finditer(tag):
@ -312,7 +312,7 @@ def insert_images_into_markup(parts, resource_map, log):
def upshift_markup(parts):
tag_pattern = re.compile(r'''(<(?:svg)[^>]*>)''', re.IGNORECASE)
for i in xrange(len(parts)):
for i in range(len(parts)):
part = parts[i]
# tag pattern

View File

@ -23,7 +23,7 @@ from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.reader.headers import BookHeader
from calibre.utils.img import save_cover_data_to
from calibre.utils.imghdr import what
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
class TopazError(ValueError):
@ -784,7 +784,7 @@ class MobiReader(object):
def extract_text(self, offset=1):
self.log.debug('Extracting text...')
text_sections = [self.text_section(i) for i in xrange(offset,
text_sections = [self.text_section(i) for i in range(offset,
min(self.book_header.records + offset, len(self.sections)))]
processed_records = list(range(offset-1, self.book_header.records +
offset))
@ -793,9 +793,9 @@ class MobiReader(object):
if self.book_header.compression_type == 'DH':
huffs = [self.sections[i][0] for i in
xrange(self.book_header.huff_offset,
range(self.book_header.huff_offset,
self.book_header.huff_offset + self.book_header.huff_number)]
processed_records += list(xrange(self.book_header.huff_offset,
processed_records += list(range(self.book_header.huff_offset,
self.book_header.huff_offset + self.book_header.huff_number))
huff = HuffReader(huffs)
unpack = huff.unpack

View File

@ -25,6 +25,7 @@ from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.utils import read_font_record
from calibre.ebooks.oeb.parse_utils import parse_html
from calibre.ebooks.oeb.base import XPath, XHTML, xml2text
from polyglot.builtins import range
Part = namedtuple('Part',
'num type filename start end aid')
@ -193,7 +194,7 @@ class Mobi8Reader(object):
baseptr = skelpos + skellen
skeleton = text[skelpos:baseptr]
inspos_warned = False
for i in xrange(divcnt):
for i in range(divcnt):
insertpos, idtext, filenum, seqnum, startpos, length = \
self.elems[divptr]
if i == 0:
@ -253,7 +254,7 @@ class Mobi8Reader(object):
self.flowinfo.append(FlowInfo(None, None, None, None))
svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE)
image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE)
for j in xrange(1, len(self.flows)):
for j in range(1, len(self.flows)):
flowpart = self.flows[j]
nstr = '%04d' % j
m = svg_tag_pattern.search(flowpart)

View File

@ -14,6 +14,7 @@ from collections import OrderedDict, defaultdict
from calibre.ebooks.mobi.utils import (encint, encode_number_as_hex,
encode_tbs, align_block, RECORD_SIZE, CNCX as CNCX_)
from polyglot.builtins import range
class CNCX(CNCX_): # {{{
@ -844,7 +845,7 @@ class Indexer(object): # {{{
deepest = max(i.depth for i in self.indices)
for i in xrange(self.number_of_text_records):
for i in range(self.number_of_text_records):
offset = i * RECORD_SIZE
next_offset = offset + RECORD_SIZE
data = {'ends':[], 'completes':[], 'starts':[],
@ -890,5 +891,3 @@ class Indexer(object): # {{{
# }}}
# }}}

View File

@ -20,7 +20,7 @@ from calibre.ebooks.mobi.writer2 import (PALMDOC, UNCOMPRESSED)
from calibre.ebooks.mobi.utils import (encint, encode_trailing_data,
align_block, detect_periodical, RECORD_SIZE, create_text_record)
from calibre.ebooks.mobi.writer2.indexer import Indexer
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
# Disabled as I dont care about uncrossable breaks
WRITE_UNCROSSABLE_BREAKS = False
@ -106,7 +106,7 @@ class MobiWriter(object):
self.log.exception('Failed to generate MOBI index:')
else:
self.primary_index_record_idx = len(self.records)
for i in xrange(self.last_text_record_idx + 1):
for i in range(self.last_text_record_idx + 1):
if i == 0:
continue
tbs = self.indexer.get_trailing_byte_sequence(i)
@ -125,7 +125,7 @@ class MobiWriter(object):
breaks = self.serializer.breaks
for i in xrange(1, self.last_text_record_idx+1):
for i in range(1, self.last_text_record_idx+1):
offset = i * RECORD_SIZE
pbreak = 0
running = offset

View File

@ -11,7 +11,7 @@ __docformat__ = 'restructuredtext en'
from collections import namedtuple
from struct import pack
from io import BytesIO
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import unicode_type, zip, range
from calibre.ebooks.mobi.utils import CNCX, encint, align_block
from calibre.ebooks.mobi.writer8.header import Header
@ -380,7 +380,7 @@ if __name__ == '__main__':
# calibre and kindlegen and compare the output
import os, subprocess
os.chdir('/t')
paras = ['<p>%d</p>' % i for i in xrange(4000)]
paras = ['<p>%d</p>' % i for i in range(4000)]
raw = '<html><body>' + '\n\n'.join(paras) + '</body></html>'
src = 'index.html'

View File

@ -11,8 +11,9 @@ from functools import partial
from contextlib import closing
from calibre import detect_ncpus as cpu_count
from polyglot.builtins import range
DEBUG, INFO, WARN, ERROR, CRITICAL = xrange(5)
DEBUG, INFO, WARN, ERROR, CRITICAL = range(5)
class BaseError(object):
@ -55,4 +56,3 @@ def run_checkers(func, args_list):
raise Exception('Failed to run worker: \n%s' % tb)
ans.extend(result)
return ans

View File

@ -21,6 +21,7 @@ from calibre.ebooks.oeb.polish.replace import remove_links_to
from calibre.ebooks.oeb.polish.cover import get_raster_cover_name
from calibre.ebooks.oeb.polish.utils import guess_type, actual_case_for_name, corrected_case_for_name
from calibre.ebooks.oeb.polish.check.base import BaseError, WARN, INFO
from polyglot.builtins import range
class BadLink(BaseError):
@ -445,7 +446,7 @@ def check_external_links(container, progress_callback=(lambda num, total:None),
done.append(None)
progress_callback(len(done), len(external_links))
workers = [Thread(name="CheckLinks", target=check_links) for i in xrange(min(10, len(external_links)))]
workers = [Thread(name="CheckLinks", target=check_links) for i in range(min(10, len(external_links)))]
for w in workers:
w.daemon = True
w.start()

View File

@ -10,6 +10,7 @@ from threading import Thread, Event
from Queue import Queue, Empty
from calibre import detect_ncpus, human_readable, force_unicode, filesystem_encoding
from polyglot.builtins import range
class Worker(Thread):
@ -88,7 +89,7 @@ def compress_images(container, report=None, names=None, jpeg_quality=None, progr
if not keep_going:
abort.set()
progress_callback(0, len(images), '')
[Worker(abort, 'CompressImage%d' % i, queue, results, container, jpeg_quality, pc) for i in xrange(min(detect_ncpus(), len(images)))]
[Worker(abort, 'CompressImage%d' % i, queue, results, container, jpeg_quality, pc) for i in range(min(detect_ncpus(), len(images)))]
queue.join()
before_total = after_total = 0
changed = False

View File

@ -7,7 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import copy, os, re
from polyglot.builtins import map, string_or_bytes
from polyglot.builtins import map, string_or_bytes, range
from urlparse import urlparse
from calibre.ebooks.oeb.base import barename, XPNSMAP, XPath, OPF, XHTML, OEB_DOCS
@ -286,7 +286,7 @@ def multisplit(container, name, xpath, before=True):
current = name
all_names = [name]
for i in xrange(len(nodes)):
for i in range(len(nodes)):
current = split(container, current, '//*[@calibre-split-point="%d"]' % i, before=before)
all_names.append(current)

View File

@ -18,6 +18,7 @@ from calibre.ebooks.oeb.polish.cascade import iterrules, resolve_styles, iterdec
from calibre.utils.icu import ord_string, safe_chr
from polyglot.builtins import unicode_type
from tinycss.fonts3 import parse_font_family
from polyglot.builtins import range
def normalize_font_properties(font):
@ -92,10 +93,10 @@ def get_matching_rules(rules, font):
elif fw == 500:
q = [500, 400, 300, 200, 100, 600, 700, 800, 900]
elif fw < 400:
q = [fw] + list(xrange(fw-100, -100, -100)) + list(xrange(fw+100,
q = [fw] + list(range(fw-100, -100, -100)) + list(range(fw+100,
100, 1000))
else:
q = [fw] + list(xrange(fw+100, 100, 1000)) + list(xrange(fw-100,
q = [fw] + list(range(fw+100, 100, 1000)) + list(range(fw-100,
-100, -100))
for wt in q:
m = [f for f in matches if f['weight'] == wt]

View File

@ -15,6 +15,7 @@ from calibre.ebooks.oeb.polish.tests.base import BaseTest
from calibre.ebooks.oeb.polish.parsing import parse_html5 as parse
from calibre.ebooks.oeb.base import XPath, XHTML_NS, SVG_NS, XLINK_NS
from calibre.ebooks.oeb.parse_utils import html5_parse
from polyglot.builtins import range
def nonvoid_cdata_elements(test, parse_function):
@ -214,7 +215,7 @@ def timing():
for name, f in (('calibre', partial(parse, line_numbers=False)), ('html5lib', vanilla), ('calibre-old', html5_parse)):
timings = []
for i in xrange(10):
for i in range(10):
st = monotonic()
f(raw)
timings.append(monotonic() - st)

View File

@ -20,7 +20,7 @@ from calibre.ebooks.epub import rules
from calibre.ebooks.oeb.base import (OEB_STYLES, XPNSMAP as NAMESPACES,
urldefrag, rewrite_links, urlunquote, XHTML, urlnormalize)
from calibre.ebooks.oeb.polish.split import do_split
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
from css_selectors import Select, SelectorError
XPath = functools.partial(_XPath, namespaces=NAMESPACES)
@ -245,7 +245,7 @@ class FlowSplitter(object):
while ordered_ids:
pb_id, (pattern, before) = ordered_ids.iteritems().next()
del ordered_ids[pb_id]
for i in xrange(len(self.trees)-1, -1, -1):
for i in range(len(self.trees)-1, -1, -1):
tree = self.trees[i]
elem = pattern(tree)
if elem:

View File

@ -11,7 +11,7 @@ from collections import defaultdict
from calibre.ebooks.oeb.base import urlnormalize
from calibre.utils.fonts.sfnt.subset import subset, NoGlyphs, UnsupportedFont
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
from tinycss.fonts3 import parse_font_family
@ -287,10 +287,10 @@ class SubsetFonts(object):
elif fw == 500:
q = [500, 400, 300, 200, 100, 600, 700, 800, 900]
elif fw < 400:
q = [fw] + list(xrange(fw-100, -100, -100)) + list(xrange(fw+100,
q = [fw] + list(range(fw-100, -100, -100)) + list(range(fw+100,
100, 1000))
else:
q = [fw] + list(xrange(fw+100, 100, 1000)) + list(xrange(fw-100,
q = [fw] + list(range(fw+100, 100, 1000)) + list(range(fw-100,
-100, -100))
for wt in q:
matches = [f for f in matching_set if f['weight'] == wt]

View File

@ -13,7 +13,7 @@ from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
from calibre.utils.config_base import prefs
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
class Matches(QAbstractItemModel):
@ -56,19 +56,19 @@ class Matches(QAbstractItemModel):
self.sort(self.sort_col, self.sort_order)
def enable_all(self):
for i in xrange(len(self.matches)):
for i in range(len(self.matches)):
index = self.createIndex(i, 0)
data = (True)
self.setData(index, data, Qt.CheckStateRole)
def enable_none(self):
for i in xrange(len(self.matches)):
for i in range(len(self.matches)):
index = self.createIndex(i, 0)
data = (False)
self.setData(index, data, Qt.CheckStateRole)
def enable_invert(self):
for i in xrange(len(self.matches)):
for i in range(len(self.matches)):
self.toggle_plugin(self.createIndex(i, 0))
def toggle_plugin(self, index):

View File

@ -13,6 +13,7 @@ from PyQt5.Qt import (Qt, QTreeView, QSize, QMenu)
from calibre.customize.ui import store_plugins
from calibre.gui2.metadata.single_download import RichTextDelegate
from calibre.gui2.store.config.chooser.models import Matches
from polyglot.builtins import range
class ResultsView(QTreeView):
@ -30,7 +31,7 @@ class ResultsView(QTreeView):
for i in self._model.HTML_COLS:
self.setItemDelegateForColumn(i, self.rt_delegate)
for i in xrange(self._model.columnCount()):
for i in range(self._model.columnCount()):
self.resizeColumnToContents(i)
self.model().sort(1, Qt.AscendingOrder)

View File

@ -14,6 +14,7 @@ from Queue import Queue
from calibre import browser
from calibre.constants import DEBUG
from calibre.utils.img import scale_image
from polyglot.builtins import range
class GenericDownloadThreadPool(object):
@ -44,7 +45,7 @@ class GenericDownloadThreadPool(object):
starts any threads necessary to fill the pool if it is
not already full.
'''
for i in xrange(self.thread_count - self.running_threads_count()):
for i in range(self.thread_count - self.running_threads_count()):
t = self.thread_type(self.tasks, self.results)
self.threads.append(t)
t.start()

View File

@ -14,6 +14,7 @@ from calibre.gui2.tweak_book import tprefs
from calibre.gui2.tweak_book.editor.text import get_highlighter as calibre_highlighter, SyntaxHighlighter
from calibre.gui2.tweak_book.editor.themes import get_theme, highlight_to_char_format
from calibre.gui2.tweak_book.editor.syntax.utils import format_for_pygments_token, NULL_FMT
from polyglot.builtins import range
class QtHighlighter(QTextDocument):
@ -59,7 +60,7 @@ class NullHighlighter(object):
self.lines = text.splitlines()
def copy_lines(self, lo, hi, cursor):
for i in xrange(lo, hi):
for i in range(lo, hi):
cursor.insertText(self.lines[i])
cursor.insertBlock()
@ -101,7 +102,7 @@ class PygmentsHighlighter(object):
continue
def copy_lines(self, lo, hi, cursor):
for i in xrange(lo, hi):
for i in range(lo, hi):
for fmt, text in self.lines[i]:
cursor.insertText(text, fmt)
cursor.setCharFormat(NULL_FMT)

View File

@ -12,7 +12,7 @@ from math import ceil
from functools import partial
from collections import namedtuple, OrderedDict
from difflib import SequenceMatcher
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import unicode_type, zip, range
import regex
from PyQt5.Qt import (
@ -160,7 +160,7 @@ class TextBrowser(PlainTextEdit): # {{{
def calculate_metrics(self):
w = self.fontMetrics()
self.number_width = max(map(lambda x:w.width(str(x)), xrange(10)))
self.number_width = max(map(lambda x:w.width(str(x)), range(10)))
self.space_width = w.width(' ')
def show_context_menu(self, pos):
@ -612,7 +612,7 @@ class DiffSplit(QSplitter): # {{{
if size > 0:
c.beginEditBlock()
c.insertText(_('Size: {0} Resolution: {1}x{2}').format(human_readable(size), img.width(), img.height()))
for i in xrange(lines + 1):
for i in range(lines + 1):
c.insertBlock()
change.extend((start, c.block().blockNumber()))
c.insertBlock()
@ -640,7 +640,7 @@ class DiffSplit(QSplitter): # {{{
c.beginEditBlock()
c.movePosition(c.StartOfBlock)
if delta > 0:
for _ in xrange(delta):
for _ in range(delta):
c.insertBlock()
else:
c.movePosition(c.NextBlock, c.KeepAnchor, -delta)
@ -747,7 +747,7 @@ class DiffSplit(QSplitter): # {{{
def do_insert(self, cursor, highlighter, line_number_map, lo, hi):
start_block = cursor.block()
highlighter.copy_lines(lo, hi, cursor)
for num, i in enumerate(xrange(start_block.blockNumber(), cursor.blockNumber())):
for num, i in enumerate(range(start_block.blockNumber(), cursor.blockNumber())):
line_number_map[i] = lo + num + 1
return start_block.blockNumber(), cursor.block().blockNumber()
@ -806,10 +806,10 @@ class DiffSplit(QSplitter): # {{{
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:

View File

@ -24,7 +24,7 @@ from calibre.gui2.tweak_book.widgets import Dialog, PlainTextEdit
from calibre.utils.config import JSONConfig
from calibre.utils.icu import string_length as strlen
from calibre.utils.localization import localize_user_manual_link
from polyglot.builtins import codepoint_to_chr, unicode_type
from polyglot.builtins import codepoint_to_chr, unicode_type, range
string_length = lambda x: strlen(unicode_type(x)) # Needed on narrow python builds, as subclasses of unicode dont work
KEY = Qt.Key_J
@ -529,7 +529,7 @@ class EditSnippet(QWidget):
self.template.setPlainText(snip.get('template') or '')
ftypes = snip.get('syntaxes', ())
for i in xrange(self.types.count()):
for i in range(self.types.count()):
i = self.types.item(i)
ftype = i.data(Qt.UserRole)
i.setCheckState(Qt.Checked if ftype in ftypes else Qt.Unchecked)
@ -544,7 +544,7 @@ class EditSnippet(QWidget):
def fget(self):
ftypes = []
for i in xrange(self.types.count()):
for i in range(self.types.count()):
i = self.types.item(i)
if i.checkState() == Qt.Checked:
ftypes.append(i.data(Qt.UserRole))
@ -657,7 +657,7 @@ class UserSnippets(Dialog):
else:
error_dialog(self, _('Invalid snippet'), err, show=True)
return
user_snippets['snippets'] = [self.snip_list.item(i).data(Qt.UserRole) for i in xrange(self.snip_list.count())]
user_snippets['snippets'] = [self.snip_list.item(i).data(Qt.UserRole) for i in range(self.snip_list.count())]
snippets(refresh=True)
return Dialog.accept(self)

View File

@ -8,7 +8,7 @@ import os
import re
import textwrap
import unicodedata
from polyglot.builtins import unicode_type, map
from polyglot.builtins import unicode_type, map, range
from PyQt5.Qt import (
QColor, QColorDialog, QFont, QFontDatabase, QKeySequence, QPainter, QPalette,
@ -262,7 +262,7 @@ class TextEdit(PlainTextEdit):
self.setFont(font)
self.highlighter.apply_theme(theme)
w = self.fontMetrics()
self.number_width = max(map(lambda x:w.width(str(x)), xrange(10)))
self.number_width = max(map(lambda x:w.width(str(x)), range(10)))
self.size_hint = QSize(self.expected_geometry[0] * w.averageCharWidth(), self.expected_geometry[1] * w.height())
self.highlight_color = theme_color(theme, 'HighlightRegion', 'bg')
self.highlight_cursor_line()

View File

@ -18,7 +18,7 @@ from calibre.gui2 import error_dialog
from calibre.gui2.tweak_book import tprefs
from calibre.gui2.tweak_book.editor import syntax_text_char_format
from calibre.gui2.tweak_book.widgets import Dialog
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
underline_styles = {'single', 'dash', 'dot', 'dash_dot', 'dash_dot_dot', 'wave', 'spell'}
@ -35,8 +35,8 @@ def default_theme():
# The solarized themes {{{
SLDX = {'base03':'1c1c1c', 'base02':'262626', 'base01':'585858', 'base00':'626262', 'base0':'808080', 'base1':'8a8a8a', 'base2':'e4e4e4', 'base3':'ffffd7', 'yellow':'af8700', 'orange':'d75f00', 'red':'d70000', 'magenta':'af005f', 'violet':'5f5faf', 'blue':'0087ff', 'cyan':'00afaf', 'green':'5f8700'} # noqa
SLD = {'base03':'002b36', 'base02':'073642', 'base01':'586e75', 'base00':'657b83', 'base0':'839496', 'base1':'93a1a1', 'base2':'eee8d5', 'base3':'fdf6e3', 'yellow':'b58900', 'orange':'cb4b16', 'red':'dc322f', 'magenta':'d33682', 'violet':'6c71c4', 'blue':'268bd2', 'cyan':'2aa198', 'green':'859900'} # noqa
m = {'base%d'%n:'base%02d'%n for n in xrange(1, 4)}
m.update({'base%02d'%n:'base%d'%n for n in xrange(1, 4)})
m = {'base%d'%n:'base%02d'%n for n in range(1, 4)}
m.update({'base%02d'%n:'base%d'%n for n in range(1, 4)})
SLL = {m.get(k, k) : v for k, v in SLD.iteritems()}
SLLX = {m.get(k, k) : v for k, v in SLDX.iteritems()}
SOLARIZED = \

View File

@ -8,6 +8,7 @@ __copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from struct import unpack, pack
from polyglot.builtins import range
t1_operand_encoding = [None] * 256
t1_operand_encoding[0:32] = (32) * ["do_operator"]
@ -91,7 +92,7 @@ class ByteCode(dict):
if len(nibbles) % 2:
nibbles.append(0xf)
d = bytearray([30])
for i in xrange(0, len(nibbles), 2):
for i in range(0, len(nibbles), 2):
d.append(nibbles[i] << 4 | nibbles[i+1])
return bytes(d)
@ -164,7 +165,7 @@ class Dict(ByteCode):
def handle_operator(self, operator, arg_type):
if isinstance(arg_type, tuple):
value = ()
for i in xrange(len(arg_type)-1, -1, -1):
for i in range(len(arg_type)-1, -1, -1):
arg = arg_type[i]
arghandler = getattr(self, 'arg_' + arg)
value = (arghandler(operator),) + value

View File

@ -15,6 +15,7 @@ from calibre.utils.fonts.sfnt.errors import UnsupportedFont, NoGlyphs
from calibre.utils.fonts.sfnt.cff.dict_data import TopDict, PrivateDict
from calibre.utils.fonts.sfnt.cff.constants import (cff_standard_strings,
STANDARD_CHARSETS)
from polyglot.builtins import range
# Useful links
# http://www.adobe.com/content/dam/Adobe/en/devnet/font/pdfs/5176.CFF.pdf
@ -104,14 +105,14 @@ class Index(list):
offset += 1
if self.offset_size == 3:
offsets = [unpack(b'>L', b'\0' + raw[i:i+3])[0]
for i in xrange(offset, offset+3*(count+1), 3)]
for i in range(offset, offset+3*(count+1), 3)]
else:
fmt = {1:'B', 2:'H', 4:'L'}[self.offset_size]
fmt = ('>%d%s'%(count+1, fmt)).encode('ascii')
offsets = unpack_from(fmt, raw, offset)
offset += self.offset_size * (count+1) - 1
for i in xrange(len(offsets)-1):
for i in range(len(offsets)-1):
off, noff = offsets[i:i+2]
obj = raw[offset+off:offset+noff]
self.append(obj)
@ -166,7 +167,7 @@ class Charset(list):
offset += sz
count += nleft + 1
self.extend('cid%05d'%x if is_CID else strings[x] for x in
xrange(first, first + nleft+1))
range(first, first + nleft+1))
def lookup(self, glyph_id):
if self.standard_charset is None:
@ -219,6 +220,3 @@ class CFFTable(UnknownTable):
CFF(s.raw)
self.raw = s.raw

View File

@ -11,6 +11,7 @@ from struct import pack
from collections import OrderedDict
from calibre.utils.fonts.sfnt.cff.constants import cff_standard_strings
from polyglot.builtins import range
class Index(list):
@ -131,7 +132,7 @@ class Subset(object):
charsets.extend(cff.charset[1:]) # .notdef is not included
endchar_operator = bytes(bytearray([14]))
for i in xrange(self.cff.num_glyphs):
for i in range(self.cff.num_glyphs):
cname = self.cff.charset.safe_lookup(i)
ok = cname in keep_charnames
cs = self.cff.char_strings[i] if ok else endchar_operator
@ -189,5 +190,3 @@ class Subset(object):
self.raw += private_dict.raw
if private_dict.subrs is not None:
self.raw += private_dict.subrs.raw

View File

@ -16,6 +16,7 @@ from collections import OrderedDict
from calibre.utils.fonts.utils import read_bmp_prefix
from calibre.utils.fonts.sfnt import UnknownTable, max_power_of_two
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from polyglot.builtins import range
def split_range(start_code, end_code, cmap): # {{{
@ -153,7 +154,7 @@ class BMPTable(object):
ans = {}
for i, ec in enumerate(self.end_count):
sc = self.start_count[i]
for code in xrange(sc, ec+1):
for code in range(sc, ec+1):
ro = self.range_offset[i]
if ro == 0:
glyph_id = self.id_delta[i] + code
@ -180,7 +181,7 @@ class CmapTable(UnknownTable):
offset = 4
sz = calcsize(b'>HHL')
recs = []
for i in xrange(self.num_tables):
for i in range(self.num_tables):
platform, encoding, table_offset = unpack_from(b'>HHL', self.raw,
offset)
offset += sz
@ -188,7 +189,7 @@ class CmapTable(UnknownTable):
self.bmp_table = None
for i in xrange(len(recs)):
for i in range(len(recs)):
platform, encoding, offset = recs[i]
try:
next_offset = recs[i+1][-1]
@ -256,9 +257,9 @@ class CmapTable(UnknownTable):
id_delta = []
id_range_offset = []
glyph_index_array = []
for i in xrange(len(end_code)-1): # skip the closing codes (0xffff)
indices = list(cmap[char_code] for char_code in xrange(start_code[i], end_code[i] + 1))
if indices == list(xrange(indices[0], indices[0] + len(indices))):
for i in range(len(end_code)-1): # skip the closing codes (0xffff)
indices = list(cmap[char_code] for char_code in range(start_code[i], end_code[i] + 1))
if indices == list(range(indices[0], indices[0] + len(indices))):
# indices is a contiguous list
id_delta_temp = set_id_delta(indices[0] - start_code[i])
id_delta.append(id_delta_temp)
@ -290,4 +291,3 @@ class CmapTable(UnknownTable):
fmt = b'>4HL'
offset = calcsize(fmt)
self.raw = pack(fmt, self.version, self.num_tables, 3, 1, offset) + self.bmp_table

View File

@ -11,6 +11,7 @@ from struct import unpack_from, calcsize
from collections import OrderedDict, namedtuple
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from polyglot.builtins import range
class Unpackable(object):
@ -41,7 +42,7 @@ class SimpleListTable(list):
self.read_extra_header(data)
count = data.unpack('H')
for i in xrange(count):
for i in range(count):
offset = data.unpack('H')
self.append(self.child_class(raw, data.start_pos + offset))
self.read_extra_footer(data)
@ -66,7 +67,7 @@ class ListTable(OrderedDict):
self.read_extra_header(data)
count = data.unpack('H')
for i in xrange(count):
for i in range(count):
tag, coffset = data.unpack('4sH')
self[tag] = self.child_class(raw, data.start_pos + coffset)
@ -93,7 +94,7 @@ class IndexTable(list):
self.read_extra_header(data)
count = data.unpack('H')
for i in xrange(count):
for i in range(count):
self.append(data.unpack('H'))
def read_extra_header(self, data):
@ -167,6 +168,7 @@ def ExtensionSubstitution(raw, offset, subtable_map={}):
raise UnsupportedFont('ExtensionSubstitution has unknown format: 0x%x'%subst_format)
return subtable_map[extension_lookup_type](raw, offset+data.start_pos)
CoverageRange = namedtuple('CoverageRange', 'start end start_coverage_index')
@ -186,7 +188,7 @@ class Coverage(object):
else:
self.ranges = []
ranges = data.unpack('%dH'%(3*count), single_special=False)
for i in xrange(count):
for i in range(count):
start, end, start_coverage_index = ranges[i*3:(i+1)*3]
self.ranges.append(CoverageRange(start, end, start_coverage_index))
@ -249,4 +251,3 @@ class UnknownLookupSubTable(object):
items.append(read_item(data))
coverage_to_items_map.append(items)
return coverage_to_items_map

View File

@ -12,6 +12,7 @@ from struct import unpack_from, calcsize, pack, error as struct_error
from calibre.utils.fonts.sfnt import (UnknownTable, FixedProperty,
max_power_of_two)
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from polyglot.builtins import range
class KernTable(UnknownTable):
@ -30,7 +31,7 @@ class KernTable(UnknownTable):
raise UnsupportedFont('kern table has version: %x'%self._version)
offset = 4 if (self._version == 0) else 8
tables = []
for i in xrange(self.num_tables):
for i in range(self.num_tables):
if self._version == 0:
version, length, coverage = unpack_from(b'>3H', self.raw, offset)
table_format = version
@ -57,7 +58,7 @@ class KernTable(UnknownTable):
offset = calcsize(headerfmt + b'4H')
entries = []
entrysz = calcsize(b'>2Hh')
for i in xrange(npairs):
for i in range(npairs):
try:
left, right, value = unpack_from(b'>2Hh', raw, offset)
except struct_error:
@ -87,4 +88,3 @@ class KernTable(UnknownTable):
header = pack(headerfmt, length, coverage, tuple_index)
return header + pack(b'>4H', npairs, search_range, entry_selector,
range_shift) + entries

View File

@ -11,6 +11,7 @@ from struct import calcsize, unpack_from, pack
from operator import itemgetter
from calibre.utils.fonts.sfnt import UnknownTable
from polyglot.builtins import range
class LocaTable(UnknownTable):
@ -46,7 +47,7 @@ class LocaTable(UnknownTable):
self.offset_map[glyph_id+1] = offset + sz
# Fix all zero entries to be the same as the previous entry, which
# means that if the ith entry is zero, the i-1 glyph is not present.
for i in xrange(1, len(self.offset_map)):
for i in range(1, len(self.offset_map)):
if self.offset_map[i] == 0:
self.offset_map[i] = self.offset_map[i-1]
@ -59,9 +60,7 @@ class LocaTable(UnknownTable):
def dump_glyphs(self, sfnt):
if not hasattr(self, 'offset_map'):
self.load_offsets(sfnt[b'head'], sfnt[b'maxp'])
for i in xrange(len(self.offset_map)-1):
for i in range(len(self.offset_map)-1):
off, noff = self.offset_map[i], self.offset_map[i+1]
if noff != off:
print('Glyph id:', i, 'size:', noff-off)

View File

@ -15,7 +15,7 @@ from functools import partial
from calibre.utils.icu import safe_chr, ord_string
from calibre.utils.fonts.sfnt.container import Sfnt
from calibre.utils.fonts.sfnt.errors import UnsupportedFont, NoGlyphs
from polyglot.builtins import unicode_type
from polyglot.builtins import unicode_type, range
# TrueType outlines {{{
@ -115,7 +115,7 @@ def subset(raw, individual_chars, ranges=(), warnings=None):
chars = set(map(safe_ord, individual_chars))
for r in ranges:
chars |= set(xrange(safe_ord(r[0]), safe_ord(r[1])+1))
chars |= set(range(safe_ord(r[0]), safe_ord(r[1])+1))
# Always add the space character for ease of use from the command line
if safe_ord(' ') not in chars:
@ -307,10 +307,10 @@ def test_mem():
start_mem = memory()
raw = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
calls = 1000
for i in xrange(calls):
for i in range(calls):
subset(raw, (), (('a', 'z'),))
del raw
for i in xrange(3):
for i in range(3):
gc.collect()
print ('Leaked memory per call:', (memory() - start_mem)/calls*1024, 'KB')