Nicer dump headers for joint KF8 files

This commit is contained in:
Kovid Goyal 2012-04-24 09:33:37 +05:30
parent 6b9fc3d0ab
commit 6d3bd67c93

View File

@ -337,11 +337,12 @@ class MOBIHeader(object): # {{{
# The following are all relative to the position of the header record # The following are all relative to the position of the header record
# make them absolute for ease of debugging # make them absolute for ease of debugging
for x in ('sect_idx', 'skel_idx', 'datp_idx', 'oth_idx', self.relative_records = {'sect_idx', 'skel_idx', 'datp_idx', 'oth_idx',
'meta_orth_indx', 'huffman_record_offset', 'meta_orth_indx', 'huffman_record_offset',
'first_non_book_record', 'datp_record_offset', 'fcis_number', 'first_non_book_record', 'datp_record_offset', 'fcis_number',
'flis_number', 'primary_index_record', 'fdst_idx', 'flis_number', 'primary_index_record', 'fdst_idx',
'first_image_index'): 'first_image_index'}
for x in self.relative_records:
if hasattr(self, x) and getattr(self, x) != NULL_INDEX: if hasattr(self, x) and getattr(self, x) != NULL_INDEX:
setattr(self, x, self.header_offset+getattr(self, x)) setattr(self, x, self.header_offset+getattr(self, x))
@ -355,70 +356,79 @@ class MOBIHeader(object): # {{{
def __str__(self): def __str__(self):
ans = ['*'*20 + ' MOBI %d Header '%self.file_version+ '*'*20] ans = ['*'*20 + ' MOBI %d Header '%self.file_version+ '*'*20]
a = ans.append a = ans.append
i = lambda d, x : a('%s (null value: %d): %d'%(d, NULL_INDEX, x))
ans.append('Compression: %s'%self.compression) def i(d, x):
ans.append('Unused: %r'%self.unused) x = 'NULL' if x == NULL_INDEX else x
ans.append('Number of text records: %d'%self.number_of_text_records) a('%s: %s'%(d, x))
ans.append('Text record size: %d'%self.text_record_size)
ans.append('Encryption: %s'%self.encryption_type) def r(d, attr):
ans.append('Unknown: %r'%self.unknown) x = getattr(self, attr)
ans.append('Identifier: %r'%self.identifier) if attr in self.relative_records and x != NULL_INDEX:
ans.append('Header length: %d'% self.length) a('%s: Absolute: %d Relative: %d'%(d, x, x-self.header_offset))
ans.append('Type: %s'%self.type) else:
ans.append('Encoding: %s'%self.encoding) i(d, x)
ans.append('UID: %r'%self.uid)
ans.append('File version: %d'%self.file_version) a('Compression: %s'%self.compression)
i('Meta Orth Index (Sections index in KF8)', self.meta_orth_indx) a('Unused: %r'%self.unused)
i('Meta Infl Index', self.meta_infl_indx) a('Number of text records: %d'%self.number_of_text_records)
ans.append('Secondary index record: %d (null val: %d)'%( a('Text record size: %d'%self.text_record_size)
self.secondary_index_record, NULL_INDEX)) a('Encryption: %s'%self.encryption_type)
ans.append('Reserved: %r'%self.reserved) a('Unknown: %r'%self.unknown)
ans.append('First non-book record (null value: %d): %d'%(NULL_INDEX, a('Identifier: %r'%self.identifier)
self.first_non_book_record)) a('Header length: %d'% self.length)
ans.append('Full name offset: %d'%self.fullname_offset) a('Type: %s'%self.type)
ans.append('Full name length: %d bytes'%self.fullname_length) a('Encoding: %s'%self.encoding)
ans.append('Langcode: %r'%self.locale_raw) a('UID: %r'%self.uid)
ans.append('Language: %s'%self.language) a('File version: %d'%self.file_version)
ans.append('Sub language: %s'%self.sublanguage) r('Meta Orth Index', 'meta_orth_indx')
ans.append('Input language: %r'%self.input_language) r('Meta Infl Index', 'meta_infl_indx')
ans.append('Output language: %r'%self.output_langauage) r('Secondary index record', 'secondary_index_record')
ans.append('Min version: %d'%self.min_version) a('Reserved: %r'%self.reserved)
ans.append('First Image index: %d'%self.first_image_index) r('First non-book record', 'first_non_book_record')
ans.append('Huffman record offset: %d'%self.huffman_record_offset) a('Full name offset: %d'%self.fullname_offset)
ans.append('Huffman record count: %d'%self.huffman_record_count) a('Full name length: %d bytes'%self.fullname_length)
ans.append('DATP record offset: %r'%self.datp_record_offset) a('Langcode: %r'%self.locale_raw)
ans.append('DATP record count: %r'%self.datp_record_count) a('Language: %s'%self.language)
ans.append('EXTH flags: %s (%s)'%(bin(self.exth_flags)[2:], self.has_exth)) a('Sub language: %s'%self.sublanguage)
a('Input language: %r'%self.input_language)
a('Output language: %r'%self.output_langauage)
a('Min version: %d'%self.min_version)
r('First Image index', 'first_image_index')
r('Huffman record offset', 'huffman_record_offset')
a('Huffman record count: %d'%self.huffman_record_count)
r('DATP record offset', 'datp_record_offset')
a('DATP record count: %r'%self.datp_record_count)
a('EXTH flags: %s (%s)'%(bin(self.exth_flags)[2:], self.has_exth))
if self.has_drm_data: if self.has_drm_data:
ans.append('Unknown3: %r'%self.unknown3) a('Unknown3: %r'%self.unknown3)
ans.append('DRM Offset: %s'%self.drm_offset) r('DRM Offset', 'drm_offset')
ans.append('DRM Count: %s'%self.drm_count) a('DRM Count: %s'%self.drm_count)
ans.append('DRM Size: %s'%self.drm_size) a('DRM Size: %s'%self.drm_size)
ans.append('DRM Flags: %r'%self.drm_flags) a('DRM Flags: %r'%self.drm_flags)
if self.has_extra_data_flags: if self.has_extra_data_flags:
ans.append('Unknown4: %r'%self.unknown4) a('Unknown4: %r'%self.unknown4)
ans.append('FDST Index: %d'% self.fdst_idx) r('FDST Index', 'fdst_idx')
ans.append('FDST Count: %d'% self.fdst_count) a('FDST Count: %d'% self.fdst_count)
ans.append('FCIS number: %d'% self.fcis_number) r('FCIS number', 'fcis_number')
ans.append('FCIS count: %d'% self.fcis_count) a('FCIS count: %d'% self.fcis_count)
ans.append('FLIS number: %d'% self.flis_number) r('FLIS number', 'flis_number')
ans.append('FLIS count: %d'% self.flis_count) a('FLIS count: %d'% self.flis_count)
ans.append('Unknown6: %r'% self.unknown6) a('Unknown6: %r'% self.unknown6)
ans.append('SRCS record index: %d'%self.srcs_record_index) r('SRCS record index', 'srcs_record_index')
ans.append('Number of SRCS records?: %d'%self.num_srcs_records) a('Number of SRCS records?: %d'%self.num_srcs_records)
ans.append('Unknown7: %r'%self.unknown7) a('Unknown7: %r'%self.unknown7)
ans.append(('Extra data flags: %s (has multibyte: %s) ' a(('Extra data flags: %s (has multibyte: %s) '
'(has indexing: %s) (has uncrossable breaks: %s)')%( '(has indexing: %s) (has uncrossable breaks: %s)')%(
bin(self.extra_data_flags), self.has_multibytes, bin(self.extra_data_flags), self.has_multibytes,
self.has_indexing_bytes, self.has_uncrossable_breaks )) self.has_indexing_bytes, self.has_uncrossable_breaks ))
ans.append('Primary index record (null value: %d): %d'%(NULL_INDEX, r('NCX index', 'primary_index_record')
self.primary_index_record))
if self.length >= 248: if self.length >= 248:
i('Sections Index', self.sect_idx) r('Sections Index', 'sect_idx')
i('SKEL Index', self.skel_idx) r('SKEL Index', 'skel_idx')
i('DATP Index', self.datp_idx) r('DATP Index', 'datp_idx')
i('Other Index', self.oth_idx) r('Other Index', 'oth_idx')
if self.unknown9: if self.unknown9:
a('Unknown9: %r'%self.unknown9) a('Unknown9: %r'%self.unknown9)