mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-08 02:34:06 -04:00
Finish getting rid of xrange
This commit is contained in:
parent
b4e467ea18
commit
9cf2e2f671
@ -5,6 +5,8 @@ __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
|
|||||||
Microsoft LIT OPF tag and attribute tables, copied from ConvertLIT.
|
Microsoft LIT OPF tag and attribute tables, copied from ConvertLIT.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
TAGS = [
|
TAGS = [
|
||||||
None,
|
None,
|
||||||
"package",
|
"package",
|
||||||
@ -76,6 +78,6 @@ ATTRS = {
|
|||||||
0x0016: "xml:lang",
|
0x0016: "xml:lang",
|
||||||
}
|
}
|
||||||
|
|
||||||
TAGS_ATTRS = [{} for i in xrange(43)]
|
TAGS_ATTRS = [{} for i in range(43)]
|
||||||
|
|
||||||
MAP = (TAGS, ATTRS, TAGS_ATTRS)
|
MAP = (TAGS, ATTRS, TAGS_ATTRS)
|
||||||
|
@ -14,6 +14,7 @@ from calibre.ebooks.mobi.reader.headers import NULL_INDEX
|
|||||||
from calibre.ebooks.mobi.langcodes import main_language, sub_language
|
from calibre.ebooks.mobi.langcodes import main_language, sub_language
|
||||||
from calibre.ebooks.mobi.debug import format_bytes
|
from calibre.ebooks.mobi.debug import format_bytes
|
||||||
from calibre.ebooks.mobi.utils import get_trailing_data
|
from calibre.ebooks.mobi.utils import get_trailing_data
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
# PalmDB {{{
|
# PalmDB {{{
|
||||||
|
|
||||||
@ -224,7 +225,7 @@ class EXTHHeader(object):
|
|||||||
|
|
||||||
pos = 12
|
pos = 12
|
||||||
self.records = []
|
self.records = []
|
||||||
for i in xrange(self.count):
|
for i in range(self.count):
|
||||||
pos = self.read_record(pos)
|
pos = self.read_record(pos)
|
||||||
self.records.sort(key=lambda x:x.type)
|
self.records.sort(key=lambda x:x.type)
|
||||||
self.rmap = {x.type:x for x in self.records}
|
self.rmap = {x.type:x for x in self.records}
|
||||||
@ -517,7 +518,7 @@ class MOBIFile(object):
|
|||||||
|
|
||||||
self.record_headers = []
|
self.record_headers = []
|
||||||
self.records = []
|
self.records = []
|
||||||
for i in xrange(self.palmdb.number_of_records):
|
for i in range(self.palmdb.number_of_records):
|
||||||
pos = 78 + i * 8
|
pos = 78 + i * 8
|
||||||
offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8])
|
offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8])
|
||||||
flags, val = a1, a2 << 16 | a3 << 8 | a4
|
flags, val = a1, a2 << 16 | a3 << 8 | a4
|
||||||
@ -557,7 +558,7 @@ class MOBIFile(object):
|
|||||||
from calibre.ebooks.mobi.huffcdic import HuffReader
|
from calibre.ebooks.mobi.huffcdic import HuffReader
|
||||||
|
|
||||||
def huffit(off, cnt):
|
def huffit(off, cnt):
|
||||||
huffman_record_nums = list(xrange(off, off+cnt))
|
huffman_record_nums = list(range(off, off+cnt))
|
||||||
huffrecs = [self.records[r].raw for r in huffman_record_nums]
|
huffrecs = [self.records[r].raw for r in huffman_record_nums]
|
||||||
huffs = HuffReader(huffrecs)
|
huffs = HuffReader(huffrecs)
|
||||||
return huffman_record_nums, huffs.unpack
|
return huffman_record_nums, huffs.unpack
|
||||||
@ -616,5 +617,3 @@ class TextRecord(object): # {{{
|
|||||||
return len(self.raw)
|
return len(self.raw)
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@ from calibre.ebooks.mobi.reader.headers import NULL_INDEX
|
|||||||
from calibre.ebooks.mobi.reader.index import (CNCX, parse_indx_header,
|
from calibre.ebooks.mobi.reader.index import (CNCX, parse_indx_header,
|
||||||
parse_tagx_section, parse_index_record, INDEX_HEADER_FIELDS)
|
parse_tagx_section, parse_index_record, INDEX_HEADER_FIELDS)
|
||||||
from calibre.ebooks.mobi.reader.ncx import (tag_fieldname_map, default_entry)
|
from calibre.ebooks.mobi.reader.ncx import (tag_fieldname_map, default_entry)
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
File = namedtuple('File',
|
File = namedtuple('File',
|
||||||
'file_number name divtbl_count start_position length')
|
'file_number name divtbl_count start_position length')
|
||||||
@ -41,7 +42,7 @@ def read_variable_len_data(data, header):
|
|||||||
tagx_block_size = header['tagx_block_size'] = struct.unpack_from(b'>I', data, offset + 4)[0]
|
tagx_block_size = header['tagx_block_size'] = struct.unpack_from(b'>I', data, offset + 4)[0]
|
||||||
header['tagx_block'] = data[offset:offset+tagx_block_size]
|
header['tagx_block'] = data[offset:offset+tagx_block_size]
|
||||||
offset = idxt_offset + 4
|
offset = idxt_offset + 4
|
||||||
for i in xrange(header['count']):
|
for i in range(header['count']):
|
||||||
p = struct.unpack_from(b'>H', data, offset)[0]
|
p = struct.unpack_from(b'>H', data, offset)[0]
|
||||||
offset += 2
|
offset += 2
|
||||||
strlen = bytearray(data[p])[0]
|
strlen = bytearray(data[p])[0]
|
||||||
@ -77,7 +78,7 @@ def read_index(sections, idx, codec):
|
|||||||
read_variable_len_data(data, indx_header)
|
read_variable_len_data(data, indx_header)
|
||||||
index_headers = []
|
index_headers = []
|
||||||
|
|
||||||
for i in xrange(idx + 1, idx + 1 + indx_count):
|
for i in range(idx + 1, idx + 1 + indx_count):
|
||||||
# Index record
|
# Index record
|
||||||
data = sections[i].raw
|
data = sections[i].raw
|
||||||
index_headers.append(parse_index_record(table, data, control_byte_count, tags, codec,
|
index_headers.append(parse_index_record(table, data, control_byte_count, tags, codec,
|
||||||
|
@ -20,7 +20,7 @@ from calibre.ebooks.mobi.utils import (decode_hex_number, decint,
|
|||||||
from calibre.utils.imghdr import what
|
from calibre.utils.imghdr import what
|
||||||
from calibre.ebooks.mobi.debug import format_bytes
|
from calibre.ebooks.mobi.debug import format_bytes
|
||||||
from calibre.ebooks.mobi.debug.headers import TextRecord
|
from calibre.ebooks.mobi.debug.headers import TextRecord
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
|
|
||||||
|
|
||||||
class TagX(object): # {{{
|
class TagX(object): # {{{
|
||||||
@ -744,7 +744,7 @@ class MOBIFile(object): # {{{
|
|||||||
self.index_header.index_encoding)
|
self.index_header.index_encoding)
|
||||||
self.index_record = IndexRecord(self.records[pir+1:pir+1+numi],
|
self.index_record = IndexRecord(self.records[pir+1:pir+1+numi],
|
||||||
self.index_header, self.cncx)
|
self.index_header, self.cncx)
|
||||||
self.indexing_record_nums = set(xrange(pir,
|
self.indexing_record_nums = set(range(pir,
|
||||||
pir+1+numi+self.index_header.num_of_cncx_blocks))
|
pir+1+numi+self.index_header.num_of_cncx_blocks))
|
||||||
self.secondary_index_record = self.secondary_index_header = None
|
self.secondary_index_record = self.secondary_index_header = None
|
||||||
sir = self.mobi_header.secondary_index_record
|
sir = self.mobi_header.secondary_index_record
|
||||||
@ -754,17 +754,17 @@ class MOBIFile(object): # {{{
|
|||||||
self.indexing_record_nums.add(sir)
|
self.indexing_record_nums.add(sir)
|
||||||
self.secondary_index_record = IndexRecord(
|
self.secondary_index_record = IndexRecord(
|
||||||
self.records[sir+1:sir+1+numi], self.secondary_index_header, self.cncx)
|
self.records[sir+1:sir+1+numi], self.secondary_index_header, self.cncx)
|
||||||
self.indexing_record_nums |= set(xrange(sir+1, sir+1+numi))
|
self.indexing_record_nums |= set(range(sir+1, sir+1+numi))
|
||||||
|
|
||||||
ntr = self.mobi_header.number_of_text_records
|
ntr = self.mobi_header.number_of_text_records
|
||||||
fii = self.mobi_header.first_image_index
|
fii = self.mobi_header.first_image_index
|
||||||
self.text_records = [TextRecord(r, self.records[r],
|
self.text_records = [TextRecord(r, self.records[r],
|
||||||
self.mobi_header.extra_data_flags, mf.decompress6) for r in xrange(1,
|
self.mobi_header.extra_data_flags, mf.decompress6) for r in range(1,
|
||||||
min(len(self.records), ntr+1))]
|
min(len(self.records), ntr+1))]
|
||||||
self.image_records, self.binary_records = [], []
|
self.image_records, self.binary_records = [], []
|
||||||
self.font_records = []
|
self.font_records = []
|
||||||
image_index = 0
|
image_index = 0
|
||||||
for i in xrange(self.mobi_header.first_resource_record, min(self.mobi_header.last_resource_record, len(self.records))):
|
for i in range(self.mobi_header.first_resource_record, min(self.mobi_header.last_resource_record, len(self.records))):
|
||||||
if i in self.indexing_record_nums or i in self.huffman_record_nums:
|
if i in self.indexing_record_nums or i in self.huffman_record_nums:
|
||||||
continue
|
continue
|
||||||
image_index += 1
|
image_index += 1
|
||||||
|
@ -12,13 +12,14 @@ from collections import OrderedDict, namedtuple
|
|||||||
|
|
||||||
from calibre.ebooks.mobi.utils import (decint, count_set_bits,
|
from calibre.ebooks.mobi.utils import (decint, count_set_bits,
|
||||||
decode_string)
|
decode_string)
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
TagX = namedtuple('TagX', 'tag num_of_values bitmask eof')
|
TagX = namedtuple('TagX', 'tag num_of_values bitmask eof')
|
||||||
PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values')
|
PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values')
|
||||||
INDEX_HEADER_FIELDS = (
|
INDEX_HEADER_FIELDS = (
|
||||||
'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
|
'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
|
||||||
'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx'
|
'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx'
|
||||||
) + tuple('unknown%d'%i for i in xrange(27)) + ('ocnt', 'oentries',
|
) + tuple('unknown%d'%i for i in range(27)) + ('ocnt', 'oentries',
|
||||||
'ordt1', 'ordt2', 'tagx')
|
'ordt1', 'ordt2', 'tagx')
|
||||||
|
|
||||||
|
|
||||||
@ -73,7 +74,7 @@ def parse_indx_header(data):
|
|||||||
# ascii character. If we cannot, we map to the ? char.
|
# ascii character. If we cannot, we map to the ? char.
|
||||||
|
|
||||||
parsed = bytearray(ans['oentries'])
|
parsed = bytearray(ans['oentries'])
|
||||||
for i in xrange(0, 2*ans['oentries'], 2):
|
for i in range(0, 2*ans['oentries'], 2):
|
||||||
parsed[i//2] = raw[i+1] if 0x20 < raw[i+1] < 0x7f else ord(b'?')
|
parsed[i//2] = raw[i+1] if 0x20 < raw[i+1] < 0x7f else ord(b'?')
|
||||||
ans['ordt_map'] = bytes(parsed).decode('ascii')
|
ans['ordt_map'] = bytes(parsed).decode('ascii')
|
||||||
else:
|
else:
|
||||||
@ -133,7 +134,7 @@ def parse_tagx_section(data):
|
|||||||
first_entry_offset, = struct.unpack_from(b'>L', data, 4)
|
first_entry_offset, = struct.unpack_from(b'>L', data, 4)
|
||||||
control_byte_count, = struct.unpack_from(b'>L', data, 8)
|
control_byte_count, = struct.unpack_from(b'>L', data, 8)
|
||||||
|
|
||||||
for i in xrange(12, first_entry_offset, 4):
|
for i in range(12, first_entry_offset, 4):
|
||||||
vals = list(bytearray(data[i:i+4]))
|
vals = list(bytearray(data[i:i+4]))
|
||||||
tags.append(TagX(*vals))
|
tags.append(TagX(*vals))
|
||||||
return control_byte_count, tags
|
return control_byte_count, tags
|
||||||
@ -177,7 +178,7 @@ def get_tag_map(control_byte_count, tagx, data, strict=False):
|
|||||||
values = []
|
values = []
|
||||||
if x.value_count is not None:
|
if x.value_count is not None:
|
||||||
# Read value_count * values_per_entry variable width values.
|
# Read value_count * values_per_entry variable width values.
|
||||||
for _ in xrange(x.value_count * x.num_of_values):
|
for _ in range(x.value_count * x.num_of_values):
|
||||||
byts, consumed = decint(data)
|
byts, consumed = decint(data)
|
||||||
data = data[consumed:]
|
data = data[consumed:]
|
||||||
values.append(byts)
|
values.append(byts)
|
||||||
@ -220,7 +221,7 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
|
|||||||
|
|
||||||
# loop through to build up the IDXT position starts
|
# loop through to build up the IDXT position starts
|
||||||
idx_positions= []
|
idx_positions= []
|
||||||
for j in xrange(entry_count):
|
for j in range(entry_count):
|
||||||
pos, = struct.unpack_from(b'>H', data, idxt_pos + 4 + (2 * j))
|
pos, = struct.unpack_from(b'>H', data, idxt_pos + 4 + (2 * j))
|
||||||
idx_positions.append(pos)
|
idx_positions.append(pos)
|
||||||
# The last entry ends before the IDXT tag (but there might be zero fill
|
# The last entry ends before the IDXT tag (but there might be zero fill
|
||||||
@ -229,7 +230,7 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
|
|||||||
|
|
||||||
# For each entry in the IDXT build up the tag map and any associated
|
# For each entry in the IDXT build up the tag map and any associated
|
||||||
# text
|
# text
|
||||||
for j in xrange(entry_count):
|
for j in range(entry_count):
|
||||||
start, end = idx_positions[j:j+2]
|
start, end = idx_positions[j:j+2]
|
||||||
rec = data[start:end]
|
rec = data[start:end]
|
||||||
# Sometimes (in the guide table if the type attribute has non ascii
|
# Sometimes (in the guide table if the type attribute has non ascii
|
||||||
@ -266,10 +267,9 @@ def read_index(sections, idx, codec):
|
|||||||
tag_section_start = indx_header['tagx']
|
tag_section_start = indx_header['tagx']
|
||||||
control_byte_count, tags = parse_tagx_section(data[tag_section_start:])
|
control_byte_count, tags = parse_tagx_section(data[tag_section_start:])
|
||||||
|
|
||||||
for i in xrange(idx + 1, idx + 1 + indx_count):
|
for i in range(idx + 1, idx + 1 + indx_count):
|
||||||
# Index record
|
# Index record
|
||||||
data = sections[i][0]
|
data = sections[i][0]
|
||||||
parse_index_record(table, data, control_byte_count, tags, codec,
|
parse_index_record(table, data, control_byte_count, tags, codec,
|
||||||
indx_header['ordt_map'])
|
indx_header['ordt_map'])
|
||||||
return table, cncx
|
return table, cncx
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
|
|||||||
import re, os
|
import re, os
|
||||||
|
|
||||||
from calibre.ebooks.chardet import strip_encoding_declarations
|
from calibre.ebooks.chardet import strip_encoding_declarations
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
|
|
||||||
|
|
||||||
def update_internal_links(mobi8_reader, log):
|
def update_internal_links(mobi8_reader, log):
|
||||||
@ -31,7 +31,7 @@ def update_internal_links(mobi8_reader, log):
|
|||||||
parts = []
|
parts = []
|
||||||
for part in mr.parts:
|
for part in mr.parts:
|
||||||
srcpieces = posfid_pattern.split(part)
|
srcpieces = posfid_pattern.split(part)
|
||||||
for j in xrange(1, len(srcpieces), 2):
|
for j in range(1, len(srcpieces), 2):
|
||||||
tag = srcpieces[j]
|
tag = srcpieces[j]
|
||||||
if tag.startswith(b'<'):
|
if tag.startswith(b'<'):
|
||||||
for m in posfid_index_pattern.finditer(tag):
|
for m in posfid_index_pattern.finditer(tag):
|
||||||
@ -69,7 +69,7 @@ def remove_kindlegen_markup(parts, aid_anchor_suffix, linked_aids):
|
|||||||
re.IGNORECASE)
|
re.IGNORECASE)
|
||||||
within_tag_aid_position_pattern = re.compile(r'''\s[ac]id\s*=['"]([^'"]*)['"]''')
|
within_tag_aid_position_pattern = re.compile(r'''\s[ac]id\s*=['"]([^'"]*)['"]''')
|
||||||
|
|
||||||
for i in xrange(len(parts)):
|
for i in range(len(parts)):
|
||||||
part = parts[i]
|
part = parts[i]
|
||||||
srcpieces = find_tag_with_aid_pattern.split(part)
|
srcpieces = find_tag_with_aid_pattern.split(part)
|
||||||
for j in range(len(srcpieces)):
|
for j in range(len(srcpieces)):
|
||||||
@ -95,7 +95,7 @@ def remove_kindlegen_markup(parts, aid_anchor_suffix, linked_aids):
|
|||||||
within_tag_AmznPageBreak_position_pattern = re.compile(
|
within_tag_AmznPageBreak_position_pattern = re.compile(
|
||||||
r'''\sdata-AmznPageBreak=['"]([^'"]*)['"]''')
|
r'''\sdata-AmznPageBreak=['"]([^'"]*)['"]''')
|
||||||
|
|
||||||
for i in xrange(len(parts)):
|
for i in range(len(parts)):
|
||||||
part = parts[i]
|
part = parts[i]
|
||||||
srcpieces = find_tag_with_AmznPageBreak_pattern.split(part)
|
srcpieces = find_tag_with_AmznPageBreak_pattern.split(part)
|
||||||
for j in range(len(srcpieces)):
|
for j in range(len(srcpieces)):
|
||||||
@ -229,7 +229,7 @@ def insert_flows_into_markup(parts, flows, mobi8_reader, log):
|
|||||||
# kindle:flow:XXXX?mime=YYYY/ZZZ (used for style sheets, svg images, etc)
|
# kindle:flow:XXXX?mime=YYYY/ZZZ (used for style sheets, svg images, etc)
|
||||||
tag_pattern = re.compile(r'''(<[^>]*>)''')
|
tag_pattern = re.compile(r'''(<[^>]*>)''')
|
||||||
flow_pattern = re.compile(r'''['"]kindle:flow:([0-9|A-V]+)\?mime=([^'"]+)['"]''', re.IGNORECASE)
|
flow_pattern = re.compile(r'''['"]kindle:flow:([0-9|A-V]+)\?mime=([^'"]+)['"]''', re.IGNORECASE)
|
||||||
for i in xrange(len(parts)):
|
for i in range(len(parts)):
|
||||||
part = parts[i]
|
part = parts[i]
|
||||||
|
|
||||||
# flow pattern
|
# flow pattern
|
||||||
@ -265,10 +265,10 @@ def insert_images_into_markup(parts, resource_map, log):
|
|||||||
style_pattern = re.compile(r'''(<[a-zA-Z0-9]+\s[^>]*style\s*=\s*[^>]*>)''',
|
style_pattern = re.compile(r'''(<[a-zA-Z0-9]+\s[^>]*style\s*=\s*[^>]*>)''',
|
||||||
re.IGNORECASE)
|
re.IGNORECASE)
|
||||||
|
|
||||||
for i in xrange(len(parts)):
|
for i in range(len(parts)):
|
||||||
part = parts[i]
|
part = parts[i]
|
||||||
srcpieces = img_pattern.split(part)
|
srcpieces = img_pattern.split(part)
|
||||||
for j in xrange(1, len(srcpieces), 2):
|
for j in range(1, len(srcpieces), 2):
|
||||||
tag = srcpieces[j]
|
tag = srcpieces[j]
|
||||||
if tag.startswith('<im'):
|
if tag.startswith('<im'):
|
||||||
for m in img_index_pattern.finditer(tag):
|
for m in img_index_pattern.finditer(tag):
|
||||||
@ -286,10 +286,10 @@ def insert_images_into_markup(parts, resource_map, log):
|
|||||||
parts[i] = part
|
parts[i] = part
|
||||||
|
|
||||||
# Replace urls used in style attributes
|
# Replace urls used in style attributes
|
||||||
for i in xrange(len(parts)):
|
for i in range(len(parts)):
|
||||||
part = parts[i]
|
part = parts[i]
|
||||||
srcpieces = style_pattern.split(part)
|
srcpieces = style_pattern.split(part)
|
||||||
for j in xrange(1, len(srcpieces), 2):
|
for j in range(1, len(srcpieces), 2):
|
||||||
tag = srcpieces[j]
|
tag = srcpieces[j]
|
||||||
if 'kindle:embed' in tag:
|
if 'kindle:embed' in tag:
|
||||||
for m in img_index_pattern.finditer(tag):
|
for m in img_index_pattern.finditer(tag):
|
||||||
@ -312,7 +312,7 @@ def insert_images_into_markup(parts, resource_map, log):
|
|||||||
def upshift_markup(parts):
|
def upshift_markup(parts):
|
||||||
tag_pattern = re.compile(r'''(<(?:svg)[^>]*>)''', re.IGNORECASE)
|
tag_pattern = re.compile(r'''(<(?:svg)[^>]*>)''', re.IGNORECASE)
|
||||||
|
|
||||||
for i in xrange(len(parts)):
|
for i in range(len(parts)):
|
||||||
part = parts[i]
|
part = parts[i]
|
||||||
|
|
||||||
# tag pattern
|
# tag pattern
|
||||||
|
@ -23,7 +23,7 @@ from calibre.ebooks.metadata.toc import TOC
|
|||||||
from calibre.ebooks.mobi.reader.headers import BookHeader
|
from calibre.ebooks.mobi.reader.headers import BookHeader
|
||||||
from calibre.utils.img import save_cover_data_to
|
from calibre.utils.img import save_cover_data_to
|
||||||
from calibre.utils.imghdr import what
|
from calibre.utils.imghdr import what
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
|
|
||||||
|
|
||||||
class TopazError(ValueError):
|
class TopazError(ValueError):
|
||||||
@ -784,7 +784,7 @@ class MobiReader(object):
|
|||||||
|
|
||||||
def extract_text(self, offset=1):
|
def extract_text(self, offset=1):
|
||||||
self.log.debug('Extracting text...')
|
self.log.debug('Extracting text...')
|
||||||
text_sections = [self.text_section(i) for i in xrange(offset,
|
text_sections = [self.text_section(i) for i in range(offset,
|
||||||
min(self.book_header.records + offset, len(self.sections)))]
|
min(self.book_header.records + offset, len(self.sections)))]
|
||||||
processed_records = list(range(offset-1, self.book_header.records +
|
processed_records = list(range(offset-1, self.book_header.records +
|
||||||
offset))
|
offset))
|
||||||
@ -793,9 +793,9 @@ class MobiReader(object):
|
|||||||
|
|
||||||
if self.book_header.compression_type == 'DH':
|
if self.book_header.compression_type == 'DH':
|
||||||
huffs = [self.sections[i][0] for i in
|
huffs = [self.sections[i][0] for i in
|
||||||
xrange(self.book_header.huff_offset,
|
range(self.book_header.huff_offset,
|
||||||
self.book_header.huff_offset + self.book_header.huff_number)]
|
self.book_header.huff_offset + self.book_header.huff_number)]
|
||||||
processed_records += list(xrange(self.book_header.huff_offset,
|
processed_records += list(range(self.book_header.huff_offset,
|
||||||
self.book_header.huff_offset + self.book_header.huff_number))
|
self.book_header.huff_offset + self.book_header.huff_number))
|
||||||
huff = HuffReader(huffs)
|
huff = HuffReader(huffs)
|
||||||
unpack = huff.unpack
|
unpack = huff.unpack
|
||||||
|
@ -25,6 +25,7 @@ from calibre.ebooks.metadata.toc import TOC
|
|||||||
from calibre.ebooks.mobi.utils import read_font_record
|
from calibre.ebooks.mobi.utils import read_font_record
|
||||||
from calibre.ebooks.oeb.parse_utils import parse_html
|
from calibre.ebooks.oeb.parse_utils import parse_html
|
||||||
from calibre.ebooks.oeb.base import XPath, XHTML, xml2text
|
from calibre.ebooks.oeb.base import XPath, XHTML, xml2text
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
Part = namedtuple('Part',
|
Part = namedtuple('Part',
|
||||||
'num type filename start end aid')
|
'num type filename start end aid')
|
||||||
@ -193,7 +194,7 @@ class Mobi8Reader(object):
|
|||||||
baseptr = skelpos + skellen
|
baseptr = skelpos + skellen
|
||||||
skeleton = text[skelpos:baseptr]
|
skeleton = text[skelpos:baseptr]
|
||||||
inspos_warned = False
|
inspos_warned = False
|
||||||
for i in xrange(divcnt):
|
for i in range(divcnt):
|
||||||
insertpos, idtext, filenum, seqnum, startpos, length = \
|
insertpos, idtext, filenum, seqnum, startpos, length = \
|
||||||
self.elems[divptr]
|
self.elems[divptr]
|
||||||
if i == 0:
|
if i == 0:
|
||||||
@ -253,7 +254,7 @@ class Mobi8Reader(object):
|
|||||||
self.flowinfo.append(FlowInfo(None, None, None, None))
|
self.flowinfo.append(FlowInfo(None, None, None, None))
|
||||||
svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE)
|
svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE)
|
||||||
image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE)
|
image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE)
|
||||||
for j in xrange(1, len(self.flows)):
|
for j in range(1, len(self.flows)):
|
||||||
flowpart = self.flows[j]
|
flowpart = self.flows[j]
|
||||||
nstr = '%04d' % j
|
nstr = '%04d' % j
|
||||||
m = svg_tag_pattern.search(flowpart)
|
m = svg_tag_pattern.search(flowpart)
|
||||||
|
@ -14,6 +14,7 @@ from collections import OrderedDict, defaultdict
|
|||||||
|
|
||||||
from calibre.ebooks.mobi.utils import (encint, encode_number_as_hex,
|
from calibre.ebooks.mobi.utils import (encint, encode_number_as_hex,
|
||||||
encode_tbs, align_block, RECORD_SIZE, CNCX as CNCX_)
|
encode_tbs, align_block, RECORD_SIZE, CNCX as CNCX_)
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class CNCX(CNCX_): # {{{
|
class CNCX(CNCX_): # {{{
|
||||||
@ -844,7 +845,7 @@ class Indexer(object): # {{{
|
|||||||
|
|
||||||
deepest = max(i.depth for i in self.indices)
|
deepest = max(i.depth for i in self.indices)
|
||||||
|
|
||||||
for i in xrange(self.number_of_text_records):
|
for i in range(self.number_of_text_records):
|
||||||
offset = i * RECORD_SIZE
|
offset = i * RECORD_SIZE
|
||||||
next_offset = offset + RECORD_SIZE
|
next_offset = offset + RECORD_SIZE
|
||||||
data = {'ends':[], 'completes':[], 'starts':[],
|
data = {'ends':[], 'completes':[], 'starts':[],
|
||||||
@ -890,5 +891,3 @@ class Indexer(object): # {{{
|
|||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from calibre.ebooks.mobi.writer2 import (PALMDOC, UNCOMPRESSED)
|
|||||||
from calibre.ebooks.mobi.utils import (encint, encode_trailing_data,
|
from calibre.ebooks.mobi.utils import (encint, encode_trailing_data,
|
||||||
align_block, detect_periodical, RECORD_SIZE, create_text_record)
|
align_block, detect_periodical, RECORD_SIZE, create_text_record)
|
||||||
from calibre.ebooks.mobi.writer2.indexer import Indexer
|
from calibre.ebooks.mobi.writer2.indexer import Indexer
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
|
|
||||||
# Disabled as I dont care about uncrossable breaks
|
# Disabled as I dont care about uncrossable breaks
|
||||||
WRITE_UNCROSSABLE_BREAKS = False
|
WRITE_UNCROSSABLE_BREAKS = False
|
||||||
@ -106,7 +106,7 @@ class MobiWriter(object):
|
|||||||
self.log.exception('Failed to generate MOBI index:')
|
self.log.exception('Failed to generate MOBI index:')
|
||||||
else:
|
else:
|
||||||
self.primary_index_record_idx = len(self.records)
|
self.primary_index_record_idx = len(self.records)
|
||||||
for i in xrange(self.last_text_record_idx + 1):
|
for i in range(self.last_text_record_idx + 1):
|
||||||
if i == 0:
|
if i == 0:
|
||||||
continue
|
continue
|
||||||
tbs = self.indexer.get_trailing_byte_sequence(i)
|
tbs = self.indexer.get_trailing_byte_sequence(i)
|
||||||
@ -125,7 +125,7 @@ class MobiWriter(object):
|
|||||||
|
|
||||||
breaks = self.serializer.breaks
|
breaks = self.serializer.breaks
|
||||||
|
|
||||||
for i in xrange(1, self.last_text_record_idx+1):
|
for i in range(1, self.last_text_record_idx+1):
|
||||||
offset = i * RECORD_SIZE
|
offset = i * RECORD_SIZE
|
||||||
pbreak = 0
|
pbreak = 0
|
||||||
running = offset
|
running = offset
|
||||||
|
@ -11,7 +11,7 @@ __docformat__ = 'restructuredtext en'
|
|||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from polyglot.builtins import unicode_type, zip
|
from polyglot.builtins import unicode_type, zip, range
|
||||||
|
|
||||||
from calibre.ebooks.mobi.utils import CNCX, encint, align_block
|
from calibre.ebooks.mobi.utils import CNCX, encint, align_block
|
||||||
from calibre.ebooks.mobi.writer8.header import Header
|
from calibre.ebooks.mobi.writer8.header import Header
|
||||||
@ -380,7 +380,7 @@ if __name__ == '__main__':
|
|||||||
# calibre and kindlegen and compare the output
|
# calibre and kindlegen and compare the output
|
||||||
import os, subprocess
|
import os, subprocess
|
||||||
os.chdir('/t')
|
os.chdir('/t')
|
||||||
paras = ['<p>%d</p>' % i for i in xrange(4000)]
|
paras = ['<p>%d</p>' % i for i in range(4000)]
|
||||||
raw = '<html><body>' + '\n\n'.join(paras) + '</body></html>'
|
raw = '<html><body>' + '\n\n'.join(paras) + '</body></html>'
|
||||||
|
|
||||||
src = 'index.html'
|
src = 'index.html'
|
||||||
|
@ -11,8 +11,9 @@ from functools import partial
|
|||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
|
|
||||||
from calibre import detect_ncpus as cpu_count
|
from calibre import detect_ncpus as cpu_count
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
DEBUG, INFO, WARN, ERROR, CRITICAL = xrange(5)
|
DEBUG, INFO, WARN, ERROR, CRITICAL = range(5)
|
||||||
|
|
||||||
|
|
||||||
class BaseError(object):
|
class BaseError(object):
|
||||||
@ -55,4 +56,3 @@ def run_checkers(func, args_list):
|
|||||||
raise Exception('Failed to run worker: \n%s' % tb)
|
raise Exception('Failed to run worker: \n%s' % tb)
|
||||||
ans.extend(result)
|
ans.extend(result)
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ from calibre.ebooks.oeb.polish.replace import remove_links_to
|
|||||||
from calibre.ebooks.oeb.polish.cover import get_raster_cover_name
|
from calibre.ebooks.oeb.polish.cover import get_raster_cover_name
|
||||||
from calibre.ebooks.oeb.polish.utils import guess_type, actual_case_for_name, corrected_case_for_name
|
from calibre.ebooks.oeb.polish.utils import guess_type, actual_case_for_name, corrected_case_for_name
|
||||||
from calibre.ebooks.oeb.polish.check.base import BaseError, WARN, INFO
|
from calibre.ebooks.oeb.polish.check.base import BaseError, WARN, INFO
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class BadLink(BaseError):
|
class BadLink(BaseError):
|
||||||
@ -445,7 +446,7 @@ def check_external_links(container, progress_callback=(lambda num, total:None),
|
|||||||
done.append(None)
|
done.append(None)
|
||||||
progress_callback(len(done), len(external_links))
|
progress_callback(len(done), len(external_links))
|
||||||
|
|
||||||
workers = [Thread(name="CheckLinks", target=check_links) for i in xrange(min(10, len(external_links)))]
|
workers = [Thread(name="CheckLinks", target=check_links) for i in range(min(10, len(external_links)))]
|
||||||
for w in workers:
|
for w in workers:
|
||||||
w.daemon = True
|
w.daemon = True
|
||||||
w.start()
|
w.start()
|
||||||
|
@ -10,6 +10,7 @@ from threading import Thread, Event
|
|||||||
from Queue import Queue, Empty
|
from Queue import Queue, Empty
|
||||||
|
|
||||||
from calibre import detect_ncpus, human_readable, force_unicode, filesystem_encoding
|
from calibre import detect_ncpus, human_readable, force_unicode, filesystem_encoding
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class Worker(Thread):
|
class Worker(Thread):
|
||||||
@ -88,7 +89,7 @@ def compress_images(container, report=None, names=None, jpeg_quality=None, progr
|
|||||||
if not keep_going:
|
if not keep_going:
|
||||||
abort.set()
|
abort.set()
|
||||||
progress_callback(0, len(images), '')
|
progress_callback(0, len(images), '')
|
||||||
[Worker(abort, 'CompressImage%d' % i, queue, results, container, jpeg_quality, pc) for i in xrange(min(detect_ncpus(), len(images)))]
|
[Worker(abort, 'CompressImage%d' % i, queue, results, container, jpeg_quality, pc) for i in range(min(detect_ncpus(), len(images)))]
|
||||||
queue.join()
|
queue.join()
|
||||||
before_total = after_total = 0
|
before_total = after_total = 0
|
||||||
changed = False
|
changed = False
|
||||||
|
@ -7,7 +7,7 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
|
|
||||||
import copy, os, re
|
import copy, os, re
|
||||||
from polyglot.builtins import map, string_or_bytes
|
from polyglot.builtins import map, string_or_bytes, range
|
||||||
from urlparse import urlparse
|
from urlparse import urlparse
|
||||||
|
|
||||||
from calibre.ebooks.oeb.base import barename, XPNSMAP, XPath, OPF, XHTML, OEB_DOCS
|
from calibre.ebooks.oeb.base import barename, XPNSMAP, XPath, OPF, XHTML, OEB_DOCS
|
||||||
@ -286,7 +286,7 @@ def multisplit(container, name, xpath, before=True):
|
|||||||
|
|
||||||
current = name
|
current = name
|
||||||
all_names = [name]
|
all_names = [name]
|
||||||
for i in xrange(len(nodes)):
|
for i in range(len(nodes)):
|
||||||
current = split(container, current, '//*[@calibre-split-point="%d"]' % i, before=before)
|
current = split(container, current, '//*[@calibre-split-point="%d"]' % i, before=before)
|
||||||
all_names.append(current)
|
all_names.append(current)
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ from calibre.ebooks.oeb.polish.cascade import iterrules, resolve_styles, iterdec
|
|||||||
from calibre.utils.icu import ord_string, safe_chr
|
from calibre.utils.icu import ord_string, safe_chr
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type
|
||||||
from tinycss.fonts3 import parse_font_family
|
from tinycss.fonts3 import parse_font_family
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
def normalize_font_properties(font):
|
def normalize_font_properties(font):
|
||||||
@ -92,10 +93,10 @@ def get_matching_rules(rules, font):
|
|||||||
elif fw == 500:
|
elif fw == 500:
|
||||||
q = [500, 400, 300, 200, 100, 600, 700, 800, 900]
|
q = [500, 400, 300, 200, 100, 600, 700, 800, 900]
|
||||||
elif fw < 400:
|
elif fw < 400:
|
||||||
q = [fw] + list(xrange(fw-100, -100, -100)) + list(xrange(fw+100,
|
q = [fw] + list(range(fw-100, -100, -100)) + list(range(fw+100,
|
||||||
100, 1000))
|
100, 1000))
|
||||||
else:
|
else:
|
||||||
q = [fw] + list(xrange(fw+100, 100, 1000)) + list(xrange(fw-100,
|
q = [fw] + list(range(fw+100, 100, 1000)) + list(range(fw-100,
|
||||||
-100, -100))
|
-100, -100))
|
||||||
for wt in q:
|
for wt in q:
|
||||||
m = [f for f in matches if f['weight'] == wt]
|
m = [f for f in matches if f['weight'] == wt]
|
||||||
|
@ -15,6 +15,7 @@ from calibre.ebooks.oeb.polish.tests.base import BaseTest
|
|||||||
from calibre.ebooks.oeb.polish.parsing import parse_html5 as parse
|
from calibre.ebooks.oeb.polish.parsing import parse_html5 as parse
|
||||||
from calibre.ebooks.oeb.base import XPath, XHTML_NS, SVG_NS, XLINK_NS
|
from calibre.ebooks.oeb.base import XPath, XHTML_NS, SVG_NS, XLINK_NS
|
||||||
from calibre.ebooks.oeb.parse_utils import html5_parse
|
from calibre.ebooks.oeb.parse_utils import html5_parse
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
def nonvoid_cdata_elements(test, parse_function):
|
def nonvoid_cdata_elements(test, parse_function):
|
||||||
@ -214,7 +215,7 @@ def timing():
|
|||||||
|
|
||||||
for name, f in (('calibre', partial(parse, line_numbers=False)), ('html5lib', vanilla), ('calibre-old', html5_parse)):
|
for name, f in (('calibre', partial(parse, line_numbers=False)), ('html5lib', vanilla), ('calibre-old', html5_parse)):
|
||||||
timings = []
|
timings = []
|
||||||
for i in xrange(10):
|
for i in range(10):
|
||||||
st = monotonic()
|
st = monotonic()
|
||||||
f(raw)
|
f(raw)
|
||||||
timings.append(monotonic() - st)
|
timings.append(monotonic() - st)
|
||||||
|
@ -20,7 +20,7 @@ from calibre.ebooks.epub import rules
|
|||||||
from calibre.ebooks.oeb.base import (OEB_STYLES, XPNSMAP as NAMESPACES,
|
from calibre.ebooks.oeb.base import (OEB_STYLES, XPNSMAP as NAMESPACES,
|
||||||
urldefrag, rewrite_links, urlunquote, XHTML, urlnormalize)
|
urldefrag, rewrite_links, urlunquote, XHTML, urlnormalize)
|
||||||
from calibre.ebooks.oeb.polish.split import do_split
|
from calibre.ebooks.oeb.polish.split import do_split
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
from css_selectors import Select, SelectorError
|
from css_selectors import Select, SelectorError
|
||||||
|
|
||||||
XPath = functools.partial(_XPath, namespaces=NAMESPACES)
|
XPath = functools.partial(_XPath, namespaces=NAMESPACES)
|
||||||
@ -245,7 +245,7 @@ class FlowSplitter(object):
|
|||||||
while ordered_ids:
|
while ordered_ids:
|
||||||
pb_id, (pattern, before) = ordered_ids.iteritems().next()
|
pb_id, (pattern, before) = ordered_ids.iteritems().next()
|
||||||
del ordered_ids[pb_id]
|
del ordered_ids[pb_id]
|
||||||
for i in xrange(len(self.trees)-1, -1, -1):
|
for i in range(len(self.trees)-1, -1, -1):
|
||||||
tree = self.trees[i]
|
tree = self.trees[i]
|
||||||
elem = pattern(tree)
|
elem = pattern(tree)
|
||||||
if elem:
|
if elem:
|
||||||
|
@ -11,7 +11,7 @@ from collections import defaultdict
|
|||||||
|
|
||||||
from calibre.ebooks.oeb.base import urlnormalize
|
from calibre.ebooks.oeb.base import urlnormalize
|
||||||
from calibre.utils.fonts.sfnt.subset import subset, NoGlyphs, UnsupportedFont
|
from calibre.utils.fonts.sfnt.subset import subset, NoGlyphs, UnsupportedFont
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
from tinycss.fonts3 import parse_font_family
|
from tinycss.fonts3 import parse_font_family
|
||||||
|
|
||||||
|
|
||||||
@ -287,10 +287,10 @@ class SubsetFonts(object):
|
|||||||
elif fw == 500:
|
elif fw == 500:
|
||||||
q = [500, 400, 300, 200, 100, 600, 700, 800, 900]
|
q = [500, 400, 300, 200, 100, 600, 700, 800, 900]
|
||||||
elif fw < 400:
|
elif fw < 400:
|
||||||
q = [fw] + list(xrange(fw-100, -100, -100)) + list(xrange(fw+100,
|
q = [fw] + list(range(fw-100, -100, -100)) + list(range(fw+100,
|
||||||
100, 1000))
|
100, 1000))
|
||||||
else:
|
else:
|
||||||
q = [fw] + list(xrange(fw+100, 100, 1000)) + list(xrange(fw-100,
|
q = [fw] + list(range(fw+100, 100, 1000)) + list(range(fw-100,
|
||||||
-100, -100))
|
-100, -100))
|
||||||
for wt in q:
|
for wt in q:
|
||||||
matches = [f for f in matching_set if f['weight'] == wt]
|
matches = [f for f in matching_set if f['weight'] == wt]
|
||||||
|
@ -13,7 +13,7 @@ from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
|
|||||||
from calibre.utils.config_base import prefs
|
from calibre.utils.config_base import prefs
|
||||||
from calibre.utils.icu import sort_key
|
from calibre.utils.icu import sort_key
|
||||||
from calibre.utils.search_query_parser import SearchQueryParser
|
from calibre.utils.search_query_parser import SearchQueryParser
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
|
|
||||||
|
|
||||||
class Matches(QAbstractItemModel):
|
class Matches(QAbstractItemModel):
|
||||||
@ -56,19 +56,19 @@ class Matches(QAbstractItemModel):
|
|||||||
self.sort(self.sort_col, self.sort_order)
|
self.sort(self.sort_col, self.sort_order)
|
||||||
|
|
||||||
def enable_all(self):
|
def enable_all(self):
|
||||||
for i in xrange(len(self.matches)):
|
for i in range(len(self.matches)):
|
||||||
index = self.createIndex(i, 0)
|
index = self.createIndex(i, 0)
|
||||||
data = (True)
|
data = (True)
|
||||||
self.setData(index, data, Qt.CheckStateRole)
|
self.setData(index, data, Qt.CheckStateRole)
|
||||||
|
|
||||||
def enable_none(self):
|
def enable_none(self):
|
||||||
for i in xrange(len(self.matches)):
|
for i in range(len(self.matches)):
|
||||||
index = self.createIndex(i, 0)
|
index = self.createIndex(i, 0)
|
||||||
data = (False)
|
data = (False)
|
||||||
self.setData(index, data, Qt.CheckStateRole)
|
self.setData(index, data, Qt.CheckStateRole)
|
||||||
|
|
||||||
def enable_invert(self):
|
def enable_invert(self):
|
||||||
for i in xrange(len(self.matches)):
|
for i in range(len(self.matches)):
|
||||||
self.toggle_plugin(self.createIndex(i, 0))
|
self.toggle_plugin(self.createIndex(i, 0))
|
||||||
|
|
||||||
def toggle_plugin(self, index):
|
def toggle_plugin(self, index):
|
||||||
|
@ -13,6 +13,7 @@ from PyQt5.Qt import (Qt, QTreeView, QSize, QMenu)
|
|||||||
from calibre.customize.ui import store_plugins
|
from calibre.customize.ui import store_plugins
|
||||||
from calibre.gui2.metadata.single_download import RichTextDelegate
|
from calibre.gui2.metadata.single_download import RichTextDelegate
|
||||||
from calibre.gui2.store.config.chooser.models import Matches
|
from calibre.gui2.store.config.chooser.models import Matches
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class ResultsView(QTreeView):
|
class ResultsView(QTreeView):
|
||||||
@ -30,7 +31,7 @@ class ResultsView(QTreeView):
|
|||||||
for i in self._model.HTML_COLS:
|
for i in self._model.HTML_COLS:
|
||||||
self.setItemDelegateForColumn(i, self.rt_delegate)
|
self.setItemDelegateForColumn(i, self.rt_delegate)
|
||||||
|
|
||||||
for i in xrange(self._model.columnCount()):
|
for i in range(self._model.columnCount()):
|
||||||
self.resizeColumnToContents(i)
|
self.resizeColumnToContents(i)
|
||||||
|
|
||||||
self.model().sort(1, Qt.AscendingOrder)
|
self.model().sort(1, Qt.AscendingOrder)
|
||||||
|
@ -14,6 +14,7 @@ from Queue import Queue
|
|||||||
from calibre import browser
|
from calibre import browser
|
||||||
from calibre.constants import DEBUG
|
from calibre.constants import DEBUG
|
||||||
from calibre.utils.img import scale_image
|
from calibre.utils.img import scale_image
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class GenericDownloadThreadPool(object):
|
class GenericDownloadThreadPool(object):
|
||||||
@ -44,7 +45,7 @@ class GenericDownloadThreadPool(object):
|
|||||||
starts any threads necessary to fill the pool if it is
|
starts any threads necessary to fill the pool if it is
|
||||||
not already full.
|
not already full.
|
||||||
'''
|
'''
|
||||||
for i in xrange(self.thread_count - self.running_threads_count()):
|
for i in range(self.thread_count - self.running_threads_count()):
|
||||||
t = self.thread_type(self.tasks, self.results)
|
t = self.thread_type(self.tasks, self.results)
|
||||||
self.threads.append(t)
|
self.threads.append(t)
|
||||||
t.start()
|
t.start()
|
||||||
|
@ -14,6 +14,7 @@ from calibre.gui2.tweak_book import tprefs
|
|||||||
from calibre.gui2.tweak_book.editor.text import get_highlighter as calibre_highlighter, SyntaxHighlighter
|
from calibre.gui2.tweak_book.editor.text import get_highlighter as calibre_highlighter, SyntaxHighlighter
|
||||||
from calibre.gui2.tweak_book.editor.themes import get_theme, highlight_to_char_format
|
from calibre.gui2.tweak_book.editor.themes import get_theme, highlight_to_char_format
|
||||||
from calibre.gui2.tweak_book.editor.syntax.utils import format_for_pygments_token, NULL_FMT
|
from calibre.gui2.tweak_book.editor.syntax.utils import format_for_pygments_token, NULL_FMT
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class QtHighlighter(QTextDocument):
|
class QtHighlighter(QTextDocument):
|
||||||
@ -59,7 +60,7 @@ class NullHighlighter(object):
|
|||||||
self.lines = text.splitlines()
|
self.lines = text.splitlines()
|
||||||
|
|
||||||
def copy_lines(self, lo, hi, cursor):
|
def copy_lines(self, lo, hi, cursor):
|
||||||
for i in xrange(lo, hi):
|
for i in range(lo, hi):
|
||||||
cursor.insertText(self.lines[i])
|
cursor.insertText(self.lines[i])
|
||||||
cursor.insertBlock()
|
cursor.insertBlock()
|
||||||
|
|
||||||
@ -101,7 +102,7 @@ class PygmentsHighlighter(object):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
def copy_lines(self, lo, hi, cursor):
|
def copy_lines(self, lo, hi, cursor):
|
||||||
for i in xrange(lo, hi):
|
for i in range(lo, hi):
|
||||||
for fmt, text in self.lines[i]:
|
for fmt, text in self.lines[i]:
|
||||||
cursor.insertText(text, fmt)
|
cursor.insertText(text, fmt)
|
||||||
cursor.setCharFormat(NULL_FMT)
|
cursor.setCharFormat(NULL_FMT)
|
||||||
|
@ -12,7 +12,7 @@ from math import ceil
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
from collections import namedtuple, OrderedDict
|
from collections import namedtuple, OrderedDict
|
||||||
from difflib import SequenceMatcher
|
from difflib import SequenceMatcher
|
||||||
from polyglot.builtins import unicode_type, zip
|
from polyglot.builtins import unicode_type, zip, range
|
||||||
|
|
||||||
import regex
|
import regex
|
||||||
from PyQt5.Qt import (
|
from PyQt5.Qt import (
|
||||||
@ -160,7 +160,7 @@ class TextBrowser(PlainTextEdit): # {{{
|
|||||||
|
|
||||||
def calculate_metrics(self):
|
def calculate_metrics(self):
|
||||||
w = self.fontMetrics()
|
w = self.fontMetrics()
|
||||||
self.number_width = max(map(lambda x:w.width(str(x)), xrange(10)))
|
self.number_width = max(map(lambda x:w.width(str(x)), range(10)))
|
||||||
self.space_width = w.width(' ')
|
self.space_width = w.width(' ')
|
||||||
|
|
||||||
def show_context_menu(self, pos):
|
def show_context_menu(self, pos):
|
||||||
@ -612,7 +612,7 @@ class DiffSplit(QSplitter): # {{{
|
|||||||
if size > 0:
|
if size > 0:
|
||||||
c.beginEditBlock()
|
c.beginEditBlock()
|
||||||
c.insertText(_('Size: {0} Resolution: {1}x{2}').format(human_readable(size), img.width(), img.height()))
|
c.insertText(_('Size: {0} Resolution: {1}x{2}').format(human_readable(size), img.width(), img.height()))
|
||||||
for i in xrange(lines + 1):
|
for i in range(lines + 1):
|
||||||
c.insertBlock()
|
c.insertBlock()
|
||||||
change.extend((start, c.block().blockNumber()))
|
change.extend((start, c.block().blockNumber()))
|
||||||
c.insertBlock()
|
c.insertBlock()
|
||||||
@ -640,7 +640,7 @@ class DiffSplit(QSplitter): # {{{
|
|||||||
c.beginEditBlock()
|
c.beginEditBlock()
|
||||||
c.movePosition(c.StartOfBlock)
|
c.movePosition(c.StartOfBlock)
|
||||||
if delta > 0:
|
if delta > 0:
|
||||||
for _ in xrange(delta):
|
for _ in range(delta):
|
||||||
c.insertBlock()
|
c.insertBlock()
|
||||||
else:
|
else:
|
||||||
c.movePosition(c.NextBlock, c.KeepAnchor, -delta)
|
c.movePosition(c.NextBlock, c.KeepAnchor, -delta)
|
||||||
@ -747,7 +747,7 @@ class DiffSplit(QSplitter): # {{{
|
|||||||
def do_insert(self, cursor, highlighter, line_number_map, lo, hi):
|
def do_insert(self, cursor, highlighter, line_number_map, lo, hi):
|
||||||
start_block = cursor.block()
|
start_block = cursor.block()
|
||||||
highlighter.copy_lines(lo, hi, cursor)
|
highlighter.copy_lines(lo, hi, cursor)
|
||||||
for num, i in enumerate(xrange(start_block.blockNumber(), cursor.blockNumber())):
|
for num, i in enumerate(range(start_block.blockNumber(), cursor.blockNumber())):
|
||||||
line_number_map[i] = lo + num + 1
|
line_number_map[i] = lo + num + 1
|
||||||
return start_block.blockNumber(), cursor.block().blockNumber()
|
return start_block.blockNumber(), cursor.block().blockNumber()
|
||||||
|
|
||||||
@ -806,10 +806,10 @@ class DiffSplit(QSplitter): # {{{
|
|||||||
# search for the pair that matches best without being identical
|
# search for the pair that matches best without being identical
|
||||||
# (identical lines must be junk lines, & we don't want to synch up
|
# (identical lines must be junk lines, & we don't want to synch up
|
||||||
# on junk -- unless we have to)
|
# on junk -- unless we have to)
|
||||||
for j in xrange(blo, bhi):
|
for j in range(blo, bhi):
|
||||||
bj = b[j]
|
bj = b[j]
|
||||||
cruncher.set_seq2(bj)
|
cruncher.set_seq2(bj)
|
||||||
for i in xrange(alo, ahi):
|
for i in range(alo, ahi):
|
||||||
ai = a[i]
|
ai = a[i]
|
||||||
if ai == bj:
|
if ai == bj:
|
||||||
if eqi is None:
|
if eqi is None:
|
||||||
|
@ -24,7 +24,7 @@ from calibre.gui2.tweak_book.widgets import Dialog, PlainTextEdit
|
|||||||
from calibre.utils.config import JSONConfig
|
from calibre.utils.config import JSONConfig
|
||||||
from calibre.utils.icu import string_length as strlen
|
from calibre.utils.icu import string_length as strlen
|
||||||
from calibre.utils.localization import localize_user_manual_link
|
from calibre.utils.localization import localize_user_manual_link
|
||||||
from polyglot.builtins import codepoint_to_chr, unicode_type
|
from polyglot.builtins import codepoint_to_chr, unicode_type, range
|
||||||
|
|
||||||
string_length = lambda x: strlen(unicode_type(x)) # Needed on narrow python builds, as subclasses of unicode dont work
|
string_length = lambda x: strlen(unicode_type(x)) # Needed on narrow python builds, as subclasses of unicode dont work
|
||||||
KEY = Qt.Key_J
|
KEY = Qt.Key_J
|
||||||
@ -529,7 +529,7 @@ class EditSnippet(QWidget):
|
|||||||
self.template.setPlainText(snip.get('template') or '')
|
self.template.setPlainText(snip.get('template') or '')
|
||||||
|
|
||||||
ftypes = snip.get('syntaxes', ())
|
ftypes = snip.get('syntaxes', ())
|
||||||
for i in xrange(self.types.count()):
|
for i in range(self.types.count()):
|
||||||
i = self.types.item(i)
|
i = self.types.item(i)
|
||||||
ftype = i.data(Qt.UserRole)
|
ftype = i.data(Qt.UserRole)
|
||||||
i.setCheckState(Qt.Checked if ftype in ftypes else Qt.Unchecked)
|
i.setCheckState(Qt.Checked if ftype in ftypes else Qt.Unchecked)
|
||||||
@ -544,7 +544,7 @@ class EditSnippet(QWidget):
|
|||||||
|
|
||||||
def fget(self):
|
def fget(self):
|
||||||
ftypes = []
|
ftypes = []
|
||||||
for i in xrange(self.types.count()):
|
for i in range(self.types.count()):
|
||||||
i = self.types.item(i)
|
i = self.types.item(i)
|
||||||
if i.checkState() == Qt.Checked:
|
if i.checkState() == Qt.Checked:
|
||||||
ftypes.append(i.data(Qt.UserRole))
|
ftypes.append(i.data(Qt.UserRole))
|
||||||
@ -657,7 +657,7 @@ class UserSnippets(Dialog):
|
|||||||
else:
|
else:
|
||||||
error_dialog(self, _('Invalid snippet'), err, show=True)
|
error_dialog(self, _('Invalid snippet'), err, show=True)
|
||||||
return
|
return
|
||||||
user_snippets['snippets'] = [self.snip_list.item(i).data(Qt.UserRole) for i in xrange(self.snip_list.count())]
|
user_snippets['snippets'] = [self.snip_list.item(i).data(Qt.UserRole) for i in range(self.snip_list.count())]
|
||||||
snippets(refresh=True)
|
snippets(refresh=True)
|
||||||
return Dialog.accept(self)
|
return Dialog.accept(self)
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
import textwrap
|
import textwrap
|
||||||
import unicodedata
|
import unicodedata
|
||||||
from polyglot.builtins import unicode_type, map
|
from polyglot.builtins import unicode_type, map, range
|
||||||
|
|
||||||
from PyQt5.Qt import (
|
from PyQt5.Qt import (
|
||||||
QColor, QColorDialog, QFont, QFontDatabase, QKeySequence, QPainter, QPalette,
|
QColor, QColorDialog, QFont, QFontDatabase, QKeySequence, QPainter, QPalette,
|
||||||
@ -262,7 +262,7 @@ class TextEdit(PlainTextEdit):
|
|||||||
self.setFont(font)
|
self.setFont(font)
|
||||||
self.highlighter.apply_theme(theme)
|
self.highlighter.apply_theme(theme)
|
||||||
w = self.fontMetrics()
|
w = self.fontMetrics()
|
||||||
self.number_width = max(map(lambda x:w.width(str(x)), xrange(10)))
|
self.number_width = max(map(lambda x:w.width(str(x)), range(10)))
|
||||||
self.size_hint = QSize(self.expected_geometry[0] * w.averageCharWidth(), self.expected_geometry[1] * w.height())
|
self.size_hint = QSize(self.expected_geometry[0] * w.averageCharWidth(), self.expected_geometry[1] * w.height())
|
||||||
self.highlight_color = theme_color(theme, 'HighlightRegion', 'bg')
|
self.highlight_color = theme_color(theme, 'HighlightRegion', 'bg')
|
||||||
self.highlight_cursor_line()
|
self.highlight_cursor_line()
|
||||||
|
@ -18,7 +18,7 @@ from calibre.gui2 import error_dialog
|
|||||||
from calibre.gui2.tweak_book import tprefs
|
from calibre.gui2.tweak_book import tprefs
|
||||||
from calibre.gui2.tweak_book.editor import syntax_text_char_format
|
from calibre.gui2.tweak_book.editor import syntax_text_char_format
|
||||||
from calibre.gui2.tweak_book.widgets import Dialog
|
from calibre.gui2.tweak_book.widgets import Dialog
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
|
|
||||||
underline_styles = {'single', 'dash', 'dot', 'dash_dot', 'dash_dot_dot', 'wave', 'spell'}
|
underline_styles = {'single', 'dash', 'dot', 'dash_dot', 'dash_dot_dot', 'wave', 'spell'}
|
||||||
|
|
||||||
@ -35,8 +35,8 @@ def default_theme():
|
|||||||
# The solarized themes {{{
|
# The solarized themes {{{
|
||||||
SLDX = {'base03':'1c1c1c', 'base02':'262626', 'base01':'585858', 'base00':'626262', 'base0':'808080', 'base1':'8a8a8a', 'base2':'e4e4e4', 'base3':'ffffd7', 'yellow':'af8700', 'orange':'d75f00', 'red':'d70000', 'magenta':'af005f', 'violet':'5f5faf', 'blue':'0087ff', 'cyan':'00afaf', 'green':'5f8700'} # noqa
|
SLDX = {'base03':'1c1c1c', 'base02':'262626', 'base01':'585858', 'base00':'626262', 'base0':'808080', 'base1':'8a8a8a', 'base2':'e4e4e4', 'base3':'ffffd7', 'yellow':'af8700', 'orange':'d75f00', 'red':'d70000', 'magenta':'af005f', 'violet':'5f5faf', 'blue':'0087ff', 'cyan':'00afaf', 'green':'5f8700'} # noqa
|
||||||
SLD = {'base03':'002b36', 'base02':'073642', 'base01':'586e75', 'base00':'657b83', 'base0':'839496', 'base1':'93a1a1', 'base2':'eee8d5', 'base3':'fdf6e3', 'yellow':'b58900', 'orange':'cb4b16', 'red':'dc322f', 'magenta':'d33682', 'violet':'6c71c4', 'blue':'268bd2', 'cyan':'2aa198', 'green':'859900'} # noqa
|
SLD = {'base03':'002b36', 'base02':'073642', 'base01':'586e75', 'base00':'657b83', 'base0':'839496', 'base1':'93a1a1', 'base2':'eee8d5', 'base3':'fdf6e3', 'yellow':'b58900', 'orange':'cb4b16', 'red':'dc322f', 'magenta':'d33682', 'violet':'6c71c4', 'blue':'268bd2', 'cyan':'2aa198', 'green':'859900'} # noqa
|
||||||
m = {'base%d'%n:'base%02d'%n for n in xrange(1, 4)}
|
m = {'base%d'%n:'base%02d'%n for n in range(1, 4)}
|
||||||
m.update({'base%02d'%n:'base%d'%n for n in xrange(1, 4)})
|
m.update({'base%02d'%n:'base%d'%n for n in range(1, 4)})
|
||||||
SLL = {m.get(k, k) : v for k, v in SLD.iteritems()}
|
SLL = {m.get(k, k) : v for k, v in SLD.iteritems()}
|
||||||
SLLX = {m.get(k, k) : v for k, v in SLDX.iteritems()}
|
SLLX = {m.get(k, k) : v for k, v in SLDX.iteritems()}
|
||||||
SOLARIZED = \
|
SOLARIZED = \
|
||||||
|
@ -8,6 +8,7 @@ __copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
|
|||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
from struct import unpack, pack
|
from struct import unpack, pack
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
t1_operand_encoding = [None] * 256
|
t1_operand_encoding = [None] * 256
|
||||||
t1_operand_encoding[0:32] = (32) * ["do_operator"]
|
t1_operand_encoding[0:32] = (32) * ["do_operator"]
|
||||||
@ -91,7 +92,7 @@ class ByteCode(dict):
|
|||||||
if len(nibbles) % 2:
|
if len(nibbles) % 2:
|
||||||
nibbles.append(0xf)
|
nibbles.append(0xf)
|
||||||
d = bytearray([30])
|
d = bytearray([30])
|
||||||
for i in xrange(0, len(nibbles), 2):
|
for i in range(0, len(nibbles), 2):
|
||||||
d.append(nibbles[i] << 4 | nibbles[i+1])
|
d.append(nibbles[i] << 4 | nibbles[i+1])
|
||||||
return bytes(d)
|
return bytes(d)
|
||||||
|
|
||||||
@ -164,7 +165,7 @@ class Dict(ByteCode):
|
|||||||
def handle_operator(self, operator, arg_type):
|
def handle_operator(self, operator, arg_type):
|
||||||
if isinstance(arg_type, tuple):
|
if isinstance(arg_type, tuple):
|
||||||
value = ()
|
value = ()
|
||||||
for i in xrange(len(arg_type)-1, -1, -1):
|
for i in range(len(arg_type)-1, -1, -1):
|
||||||
arg = arg_type[i]
|
arg = arg_type[i]
|
||||||
arghandler = getattr(self, 'arg_' + arg)
|
arghandler = getattr(self, 'arg_' + arg)
|
||||||
value = (arghandler(operator),) + value
|
value = (arghandler(operator),) + value
|
||||||
|
@ -15,6 +15,7 @@ from calibre.utils.fonts.sfnt.errors import UnsupportedFont, NoGlyphs
|
|||||||
from calibre.utils.fonts.sfnt.cff.dict_data import TopDict, PrivateDict
|
from calibre.utils.fonts.sfnt.cff.dict_data import TopDict, PrivateDict
|
||||||
from calibre.utils.fonts.sfnt.cff.constants import (cff_standard_strings,
|
from calibre.utils.fonts.sfnt.cff.constants import (cff_standard_strings,
|
||||||
STANDARD_CHARSETS)
|
STANDARD_CHARSETS)
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
# Useful links
|
# Useful links
|
||||||
# http://www.adobe.com/content/dam/Adobe/en/devnet/font/pdfs/5176.CFF.pdf
|
# http://www.adobe.com/content/dam/Adobe/en/devnet/font/pdfs/5176.CFF.pdf
|
||||||
@ -104,14 +105,14 @@ class Index(list):
|
|||||||
offset += 1
|
offset += 1
|
||||||
if self.offset_size == 3:
|
if self.offset_size == 3:
|
||||||
offsets = [unpack(b'>L', b'\0' + raw[i:i+3])[0]
|
offsets = [unpack(b'>L', b'\0' + raw[i:i+3])[0]
|
||||||
for i in xrange(offset, offset+3*(count+1), 3)]
|
for i in range(offset, offset+3*(count+1), 3)]
|
||||||
else:
|
else:
|
||||||
fmt = {1:'B', 2:'H', 4:'L'}[self.offset_size]
|
fmt = {1:'B', 2:'H', 4:'L'}[self.offset_size]
|
||||||
fmt = ('>%d%s'%(count+1, fmt)).encode('ascii')
|
fmt = ('>%d%s'%(count+1, fmt)).encode('ascii')
|
||||||
offsets = unpack_from(fmt, raw, offset)
|
offsets = unpack_from(fmt, raw, offset)
|
||||||
offset += self.offset_size * (count+1) - 1
|
offset += self.offset_size * (count+1) - 1
|
||||||
|
|
||||||
for i in xrange(len(offsets)-1):
|
for i in range(len(offsets)-1):
|
||||||
off, noff = offsets[i:i+2]
|
off, noff = offsets[i:i+2]
|
||||||
obj = raw[offset+off:offset+noff]
|
obj = raw[offset+off:offset+noff]
|
||||||
self.append(obj)
|
self.append(obj)
|
||||||
@ -166,7 +167,7 @@ class Charset(list):
|
|||||||
offset += sz
|
offset += sz
|
||||||
count += nleft + 1
|
count += nleft + 1
|
||||||
self.extend('cid%05d'%x if is_CID else strings[x] for x in
|
self.extend('cid%05d'%x if is_CID else strings[x] for x in
|
||||||
xrange(first, first + nleft+1))
|
range(first, first + nleft+1))
|
||||||
|
|
||||||
def lookup(self, glyph_id):
|
def lookup(self, glyph_id):
|
||||||
if self.standard_charset is None:
|
if self.standard_charset is None:
|
||||||
@ -219,6 +220,3 @@ class CFFTable(UnknownTable):
|
|||||||
CFF(s.raw)
|
CFF(s.raw)
|
||||||
|
|
||||||
self.raw = s.raw
|
self.raw = s.raw
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ from struct import pack
|
|||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
from calibre.utils.fonts.sfnt.cff.constants import cff_standard_strings
|
from calibre.utils.fonts.sfnt.cff.constants import cff_standard_strings
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class Index(list):
|
class Index(list):
|
||||||
@ -131,7 +132,7 @@ class Subset(object):
|
|||||||
charsets.extend(cff.charset[1:]) # .notdef is not included
|
charsets.extend(cff.charset[1:]) # .notdef is not included
|
||||||
|
|
||||||
endchar_operator = bytes(bytearray([14]))
|
endchar_operator = bytes(bytearray([14]))
|
||||||
for i in xrange(self.cff.num_glyphs):
|
for i in range(self.cff.num_glyphs):
|
||||||
cname = self.cff.charset.safe_lookup(i)
|
cname = self.cff.charset.safe_lookup(i)
|
||||||
ok = cname in keep_charnames
|
ok = cname in keep_charnames
|
||||||
cs = self.cff.char_strings[i] if ok else endchar_operator
|
cs = self.cff.char_strings[i] if ok else endchar_operator
|
||||||
@ -189,5 +190,3 @@ class Subset(object):
|
|||||||
self.raw += private_dict.raw
|
self.raw += private_dict.raw
|
||||||
if private_dict.subrs is not None:
|
if private_dict.subrs is not None:
|
||||||
self.raw += private_dict.subrs.raw
|
self.raw += private_dict.subrs.raw
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ from collections import OrderedDict
|
|||||||
from calibre.utils.fonts.utils import read_bmp_prefix
|
from calibre.utils.fonts.utils import read_bmp_prefix
|
||||||
from calibre.utils.fonts.sfnt import UnknownTable, max_power_of_two
|
from calibre.utils.fonts.sfnt import UnknownTable, max_power_of_two
|
||||||
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
|
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
def split_range(start_code, end_code, cmap): # {{{
|
def split_range(start_code, end_code, cmap): # {{{
|
||||||
@ -153,7 +154,7 @@ class BMPTable(object):
|
|||||||
ans = {}
|
ans = {}
|
||||||
for i, ec in enumerate(self.end_count):
|
for i, ec in enumerate(self.end_count):
|
||||||
sc = self.start_count[i]
|
sc = self.start_count[i]
|
||||||
for code in xrange(sc, ec+1):
|
for code in range(sc, ec+1):
|
||||||
ro = self.range_offset[i]
|
ro = self.range_offset[i]
|
||||||
if ro == 0:
|
if ro == 0:
|
||||||
glyph_id = self.id_delta[i] + code
|
glyph_id = self.id_delta[i] + code
|
||||||
@ -180,7 +181,7 @@ class CmapTable(UnknownTable):
|
|||||||
offset = 4
|
offset = 4
|
||||||
sz = calcsize(b'>HHL')
|
sz = calcsize(b'>HHL')
|
||||||
recs = []
|
recs = []
|
||||||
for i in xrange(self.num_tables):
|
for i in range(self.num_tables):
|
||||||
platform, encoding, table_offset = unpack_from(b'>HHL', self.raw,
|
platform, encoding, table_offset = unpack_from(b'>HHL', self.raw,
|
||||||
offset)
|
offset)
|
||||||
offset += sz
|
offset += sz
|
||||||
@ -188,7 +189,7 @@ class CmapTable(UnknownTable):
|
|||||||
|
|
||||||
self.bmp_table = None
|
self.bmp_table = None
|
||||||
|
|
||||||
for i in xrange(len(recs)):
|
for i in range(len(recs)):
|
||||||
platform, encoding, offset = recs[i]
|
platform, encoding, offset = recs[i]
|
||||||
try:
|
try:
|
||||||
next_offset = recs[i+1][-1]
|
next_offset = recs[i+1][-1]
|
||||||
@ -256,9 +257,9 @@ class CmapTable(UnknownTable):
|
|||||||
id_delta = []
|
id_delta = []
|
||||||
id_range_offset = []
|
id_range_offset = []
|
||||||
glyph_index_array = []
|
glyph_index_array = []
|
||||||
for i in xrange(len(end_code)-1): # skip the closing codes (0xffff)
|
for i in range(len(end_code)-1): # skip the closing codes (0xffff)
|
||||||
indices = list(cmap[char_code] for char_code in xrange(start_code[i], end_code[i] + 1))
|
indices = list(cmap[char_code] for char_code in range(start_code[i], end_code[i] + 1))
|
||||||
if indices == list(xrange(indices[0], indices[0] + len(indices))):
|
if indices == list(range(indices[0], indices[0] + len(indices))):
|
||||||
# indices is a contiguous list
|
# indices is a contiguous list
|
||||||
id_delta_temp = set_id_delta(indices[0] - start_code[i])
|
id_delta_temp = set_id_delta(indices[0] - start_code[i])
|
||||||
id_delta.append(id_delta_temp)
|
id_delta.append(id_delta_temp)
|
||||||
@ -290,4 +291,3 @@ class CmapTable(UnknownTable):
|
|||||||
fmt = b'>4HL'
|
fmt = b'>4HL'
|
||||||
offset = calcsize(fmt)
|
offset = calcsize(fmt)
|
||||||
self.raw = pack(fmt, self.version, self.num_tables, 3, 1, offset) + self.bmp_table
|
self.raw = pack(fmt, self.version, self.num_tables, 3, 1, offset) + self.bmp_table
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ from struct import unpack_from, calcsize
|
|||||||
from collections import OrderedDict, namedtuple
|
from collections import OrderedDict, namedtuple
|
||||||
|
|
||||||
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
|
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class Unpackable(object):
|
class Unpackable(object):
|
||||||
@ -41,7 +42,7 @@ class SimpleListTable(list):
|
|||||||
self.read_extra_header(data)
|
self.read_extra_header(data)
|
||||||
|
|
||||||
count = data.unpack('H')
|
count = data.unpack('H')
|
||||||
for i in xrange(count):
|
for i in range(count):
|
||||||
offset = data.unpack('H')
|
offset = data.unpack('H')
|
||||||
self.append(self.child_class(raw, data.start_pos + offset))
|
self.append(self.child_class(raw, data.start_pos + offset))
|
||||||
self.read_extra_footer(data)
|
self.read_extra_footer(data)
|
||||||
@ -66,7 +67,7 @@ class ListTable(OrderedDict):
|
|||||||
self.read_extra_header(data)
|
self.read_extra_header(data)
|
||||||
|
|
||||||
count = data.unpack('H')
|
count = data.unpack('H')
|
||||||
for i in xrange(count):
|
for i in range(count):
|
||||||
tag, coffset = data.unpack('4sH')
|
tag, coffset = data.unpack('4sH')
|
||||||
self[tag] = self.child_class(raw, data.start_pos + coffset)
|
self[tag] = self.child_class(raw, data.start_pos + coffset)
|
||||||
|
|
||||||
@ -93,7 +94,7 @@ class IndexTable(list):
|
|||||||
self.read_extra_header(data)
|
self.read_extra_header(data)
|
||||||
|
|
||||||
count = data.unpack('H')
|
count = data.unpack('H')
|
||||||
for i in xrange(count):
|
for i in range(count):
|
||||||
self.append(data.unpack('H'))
|
self.append(data.unpack('H'))
|
||||||
|
|
||||||
def read_extra_header(self, data):
|
def read_extra_header(self, data):
|
||||||
@ -167,6 +168,7 @@ def ExtensionSubstitution(raw, offset, subtable_map={}):
|
|||||||
raise UnsupportedFont('ExtensionSubstitution has unknown format: 0x%x'%subst_format)
|
raise UnsupportedFont('ExtensionSubstitution has unknown format: 0x%x'%subst_format)
|
||||||
return subtable_map[extension_lookup_type](raw, offset+data.start_pos)
|
return subtable_map[extension_lookup_type](raw, offset+data.start_pos)
|
||||||
|
|
||||||
|
|
||||||
CoverageRange = namedtuple('CoverageRange', 'start end start_coverage_index')
|
CoverageRange = namedtuple('CoverageRange', 'start end start_coverage_index')
|
||||||
|
|
||||||
|
|
||||||
@ -186,7 +188,7 @@ class Coverage(object):
|
|||||||
else:
|
else:
|
||||||
self.ranges = []
|
self.ranges = []
|
||||||
ranges = data.unpack('%dH'%(3*count), single_special=False)
|
ranges = data.unpack('%dH'%(3*count), single_special=False)
|
||||||
for i in xrange(count):
|
for i in range(count):
|
||||||
start, end, start_coverage_index = ranges[i*3:(i+1)*3]
|
start, end, start_coverage_index = ranges[i*3:(i+1)*3]
|
||||||
self.ranges.append(CoverageRange(start, end, start_coverage_index))
|
self.ranges.append(CoverageRange(start, end, start_coverage_index))
|
||||||
|
|
||||||
@ -249,4 +251,3 @@ class UnknownLookupSubTable(object):
|
|||||||
items.append(read_item(data))
|
items.append(read_item(data))
|
||||||
coverage_to_items_map.append(items)
|
coverage_to_items_map.append(items)
|
||||||
return coverage_to_items_map
|
return coverage_to_items_map
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@ from struct import unpack_from, calcsize, pack, error as struct_error
|
|||||||
from calibre.utils.fonts.sfnt import (UnknownTable, FixedProperty,
|
from calibre.utils.fonts.sfnt import (UnknownTable, FixedProperty,
|
||||||
max_power_of_two)
|
max_power_of_two)
|
||||||
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
|
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class KernTable(UnknownTable):
|
class KernTable(UnknownTable):
|
||||||
@ -30,7 +31,7 @@ class KernTable(UnknownTable):
|
|||||||
raise UnsupportedFont('kern table has version: %x'%self._version)
|
raise UnsupportedFont('kern table has version: %x'%self._version)
|
||||||
offset = 4 if (self._version == 0) else 8
|
offset = 4 if (self._version == 0) else 8
|
||||||
tables = []
|
tables = []
|
||||||
for i in xrange(self.num_tables):
|
for i in range(self.num_tables):
|
||||||
if self._version == 0:
|
if self._version == 0:
|
||||||
version, length, coverage = unpack_from(b'>3H', self.raw, offset)
|
version, length, coverage = unpack_from(b'>3H', self.raw, offset)
|
||||||
table_format = version
|
table_format = version
|
||||||
@ -57,7 +58,7 @@ class KernTable(UnknownTable):
|
|||||||
offset = calcsize(headerfmt + b'4H')
|
offset = calcsize(headerfmt + b'4H')
|
||||||
entries = []
|
entries = []
|
||||||
entrysz = calcsize(b'>2Hh')
|
entrysz = calcsize(b'>2Hh')
|
||||||
for i in xrange(npairs):
|
for i in range(npairs):
|
||||||
try:
|
try:
|
||||||
left, right, value = unpack_from(b'>2Hh', raw, offset)
|
left, right, value = unpack_from(b'>2Hh', raw, offset)
|
||||||
except struct_error:
|
except struct_error:
|
||||||
@ -87,4 +88,3 @@ class KernTable(UnknownTable):
|
|||||||
header = pack(headerfmt, length, coverage, tuple_index)
|
header = pack(headerfmt, length, coverage, tuple_index)
|
||||||
return header + pack(b'>4H', npairs, search_range, entry_selector,
|
return header + pack(b'>4H', npairs, search_range, entry_selector,
|
||||||
range_shift) + entries
|
range_shift) + entries
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ from struct import calcsize, unpack_from, pack
|
|||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
|
|
||||||
from calibre.utils.fonts.sfnt import UnknownTable
|
from calibre.utils.fonts.sfnt import UnknownTable
|
||||||
|
from polyglot.builtins import range
|
||||||
|
|
||||||
|
|
||||||
class LocaTable(UnknownTable):
|
class LocaTable(UnknownTable):
|
||||||
@ -46,7 +47,7 @@ class LocaTable(UnknownTable):
|
|||||||
self.offset_map[glyph_id+1] = offset + sz
|
self.offset_map[glyph_id+1] = offset + sz
|
||||||
# Fix all zero entries to be the same as the previous entry, which
|
# Fix all zero entries to be the same as the previous entry, which
|
||||||
# means that if the ith entry is zero, the i-1 glyph is not present.
|
# means that if the ith entry is zero, the i-1 glyph is not present.
|
||||||
for i in xrange(1, len(self.offset_map)):
|
for i in range(1, len(self.offset_map)):
|
||||||
if self.offset_map[i] == 0:
|
if self.offset_map[i] == 0:
|
||||||
self.offset_map[i] = self.offset_map[i-1]
|
self.offset_map[i] = self.offset_map[i-1]
|
||||||
|
|
||||||
@ -59,9 +60,7 @@ class LocaTable(UnknownTable):
|
|||||||
def dump_glyphs(self, sfnt):
|
def dump_glyphs(self, sfnt):
|
||||||
if not hasattr(self, 'offset_map'):
|
if not hasattr(self, 'offset_map'):
|
||||||
self.load_offsets(sfnt[b'head'], sfnt[b'maxp'])
|
self.load_offsets(sfnt[b'head'], sfnt[b'maxp'])
|
||||||
for i in xrange(len(self.offset_map)-1):
|
for i in range(len(self.offset_map)-1):
|
||||||
off, noff = self.offset_map[i], self.offset_map[i+1]
|
off, noff = self.offset_map[i], self.offset_map[i+1]
|
||||||
if noff != off:
|
if noff != off:
|
||||||
print('Glyph id:', i, 'size:', noff-off)
|
print('Glyph id:', i, 'size:', noff-off)
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ from functools import partial
|
|||||||
from calibre.utils.icu import safe_chr, ord_string
|
from calibre.utils.icu import safe_chr, ord_string
|
||||||
from calibre.utils.fonts.sfnt.container import Sfnt
|
from calibre.utils.fonts.sfnt.container import Sfnt
|
||||||
from calibre.utils.fonts.sfnt.errors import UnsupportedFont, NoGlyphs
|
from calibre.utils.fonts.sfnt.errors import UnsupportedFont, NoGlyphs
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, range
|
||||||
|
|
||||||
# TrueType outlines {{{
|
# TrueType outlines {{{
|
||||||
|
|
||||||
@ -115,7 +115,7 @@ def subset(raw, individual_chars, ranges=(), warnings=None):
|
|||||||
|
|
||||||
chars = set(map(safe_ord, individual_chars))
|
chars = set(map(safe_ord, individual_chars))
|
||||||
for r in ranges:
|
for r in ranges:
|
||||||
chars |= set(xrange(safe_ord(r[0]), safe_ord(r[1])+1))
|
chars |= set(range(safe_ord(r[0]), safe_ord(r[1])+1))
|
||||||
|
|
||||||
# Always add the space character for ease of use from the command line
|
# Always add the space character for ease of use from the command line
|
||||||
if safe_ord(' ') not in chars:
|
if safe_ord(' ') not in chars:
|
||||||
@ -307,10 +307,10 @@ def test_mem():
|
|||||||
start_mem = memory()
|
start_mem = memory()
|
||||||
raw = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
|
raw = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
|
||||||
calls = 1000
|
calls = 1000
|
||||||
for i in xrange(calls):
|
for i in range(calls):
|
||||||
subset(raw, (), (('a', 'z'),))
|
subset(raw, (), (('a', 'z'),))
|
||||||
del raw
|
del raw
|
||||||
for i in xrange(3):
|
for i in range(3):
|
||||||
gc.collect()
|
gc.collect()
|
||||||
print ('Leaked memory per call:', (memory() - start_mem)/calls*1024, 'KB')
|
print ('Leaked memory per call:', (memory() - start_mem)/calls*1024, 'KB')
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user