mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
GwR revisions supporting Save to disk for .mbp, .tan
This commit is contained in:
commit
2c7da9966c
@ -12,6 +12,7 @@ from cStringIO import StringIO
|
||||
from struct import unpack
|
||||
|
||||
from calibre.devices.usbms.driver import USBMS
|
||||
from calibre.ebooks.metadata.topaz import get_metadata as get_topaz_metadata
|
||||
|
||||
class KINDLE(USBMS):
|
||||
|
||||
@ -159,11 +160,12 @@ class Bookmark():
|
||||
self.id = id
|
||||
self.last_read = 0
|
||||
self.last_read_location = 0
|
||||
self.path = path
|
||||
self.timestamp = 0
|
||||
self.user_notes = None
|
||||
|
||||
self.get_bookmark_data(path)
|
||||
self.get_book_length(path)
|
||||
self.get_bookmark_data()
|
||||
self.get_book_length()
|
||||
try:
|
||||
self.percent_read = float(100*self.last_read / self.book_length)
|
||||
except:
|
||||
@ -180,13 +182,13 @@ class Bookmark():
|
||||
stop, = unpack('>I', self.data[offoff + 8:offoff + 12])
|
||||
return StreamSlicer(self.stream, start, stop)
|
||||
|
||||
def get_bookmark_data(self, path):
|
||||
def get_bookmark_data(self):
|
||||
''' Return the timestamp and last_read_location '''
|
||||
from calibre.ebooks.metadata.mobi import StreamSlicer
|
||||
user_notes = {}
|
||||
if self.bookmark_extension == 'mbp':
|
||||
MAGIC_MOBI_CONSTANT = 150
|
||||
with open(path,'rb') as f:
|
||||
with open(self.path,'rb') as f:
|
||||
stream = StringIO(f.read())
|
||||
data = StreamSlicer(stream)
|
||||
self.timestamp, = unpack('>I', data[0x24:0x28])
|
||||
@ -204,7 +206,7 @@ class Bookmark():
|
||||
eo = bpar_offset + bpar_len
|
||||
|
||||
# Walk bookmark entries
|
||||
#print " --- %s --- " % path
|
||||
#print " --- %s --- " % self.path
|
||||
current_entry = 1
|
||||
sig = data[eo:eo+4]
|
||||
previous_block = None
|
||||
@ -243,18 +245,28 @@ class Bookmark():
|
||||
while sig == 'BKMK':
|
||||
# Fix start location for Highlights using BKMK data
|
||||
end_loc, = unpack('>I', data[eo+0x10:eo+0x14])
|
||||
if end_loc in user_notes and user_notes[end_loc]['type'] == 'Highlight':
|
||||
|
||||
if end_loc in user_notes and \
|
||||
(user_notes[end_loc]['type'] == 'Highlight' or \
|
||||
user_notes[end_loc]['type'] == 'Note'):
|
||||
# Switch location to start (0x08:0x0c)
|
||||
start, = unpack('>I', data[eo+8:eo+12])
|
||||
user_notes[start] = user_notes[end_loc]
|
||||
'''
|
||||
print " %s: swapping 0x%x (%d) to 0x%x (%d)" % (user_notes[end_loc]['type'],
|
||||
end_loc,
|
||||
end_loc/MAGIC_MOBI_CONSTANT + 1,
|
||||
start,
|
||||
start//MAGIC_MOBI_CONSTANT + 1)
|
||||
'''
|
||||
user_notes[start]['displayed_location'] = start/MAGIC_MOBI_CONSTANT + 1
|
||||
user_notes.pop(end_loc)
|
||||
elif end_loc in user_notes and user_notes[end_loc]['type'] == 'Note':
|
||||
# Skip duplicate bookmarks for notes
|
||||
pass
|
||||
else:
|
||||
# If a bookmark coincides with a user annotation, the locs could
|
||||
# be the same - cheat by nudging -1
|
||||
# Skip bookmark for last_read_location
|
||||
if end_loc != self.last_read:
|
||||
# print " adding Bookmark at 0x%x (%d)" % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1)
|
||||
displayed_location = end_loc/MAGIC_MOBI_CONSTANT + 1
|
||||
user_notes[end_loc - 1] = dict(id=self.id,
|
||||
displayed_location=displayed_location,
|
||||
@ -265,10 +277,41 @@ class Bookmark():
|
||||
sig = data[eo:eo+4]
|
||||
|
||||
elif self.bookmark_extension == 'tan':
|
||||
# TAN bookmarks
|
||||
def get_topaz_highlight(displayed_location):
|
||||
# Parse My Clippings.txt for a matching highlight
|
||||
book_fs = self.path.replace('.%s' % self.bookmark_extension,'.%s' % self.book_format)
|
||||
with open(book_fs,'rb') as f2:
|
||||
stream = StringIO(f2.read())
|
||||
mi = get_topaz_metadata(stream)
|
||||
my_clippings = self.path
|
||||
split = my_clippings.find('documents') + len('documents/')
|
||||
my_clippings = my_clippings[:split] + "My Clippings.txt"
|
||||
try:
|
||||
with open(my_clippings, 'r') as f2:
|
||||
marker_found = 0
|
||||
text = ''
|
||||
search_str1 = '%s (%s)' % (mi.title, str(mi.author[0]))
|
||||
search_str2 = '- Highlight Loc. %d' % (displayed_location)
|
||||
for line in f2:
|
||||
if marker_found == 0:
|
||||
if line.startswith(search_str1):
|
||||
marker_found = 1
|
||||
elif marker_found == 1:
|
||||
if line.startswith(search_str2):
|
||||
marker_found = 2
|
||||
elif marker_found == 2:
|
||||
if line.startswith('=========='):
|
||||
break
|
||||
text += line.strip()
|
||||
else:
|
||||
raise error
|
||||
except:
|
||||
text = '(Unable to extract highlight text from My Clippings.txt)'
|
||||
return text
|
||||
|
||||
MAGIC_TOPAZ_CONSTANT = 33.33
|
||||
self.timestamp = os.path.getmtime(path)
|
||||
with open(path,'rb') as f:
|
||||
self.timestamp = os.path.getmtime(self.path)
|
||||
with open(self.path,'rb') as f:
|
||||
stream = StringIO(f.read())
|
||||
data = StreamSlicer(stream)
|
||||
self.last_read = int(unpack('>I', data[5:9])[0])
|
||||
@ -285,7 +328,7 @@ class Bookmark():
|
||||
e_type = 'Bookmark'
|
||||
elif e_type == 1:
|
||||
e_type = 'Highlight'
|
||||
text = "(Topaz highlights not yet supported)"
|
||||
text = get_topaz_highlight(location/MAGIC_TOPAZ_CONSTANT + 1)
|
||||
elif e_type == 2:
|
||||
e_type = 'Note'
|
||||
text = data[e_base+0x10:e_base+0x10+text_len]
|
||||
@ -293,10 +336,9 @@ class Bookmark():
|
||||
e_type = 'Unknown annotation type'
|
||||
|
||||
if self.book_format in ['tpz','azw1']:
|
||||
# *** This needs fine-tuning
|
||||
displayed_location = location/MAGIC_TOPAZ_CONSTANT + 1
|
||||
elif self.book_format == 'pdf':
|
||||
# *** This needs testing
|
||||
# *** This needs implementation
|
||||
displayed_location = location
|
||||
user_notes[location] = dict(id=self.id,
|
||||
displayed_location=displayed_location,
|
||||
@ -315,16 +357,9 @@ class Bookmark():
|
||||
print "unsupported bookmark_extension: %s" % self.bookmark_extension
|
||||
self.user_notes = user_notes
|
||||
|
||||
'''
|
||||
for location in sorted(user_notes):
|
||||
print ' Location %d: %s\n%s' % (user_notes[location]['displayed_location'],
|
||||
user_notes[location]['type'],
|
||||
'\n'.join(self.textdump(user_notes[location]['text'])))
|
||||
'''
|
||||
|
||||
def get_book_length(self, path):
|
||||
def get_book_length(self):
|
||||
from calibre.ebooks.metadata.mobi import StreamSlicer
|
||||
book_fs = path.replace('.%s' % self.bookmark_extension,'.%s' % self.book_format)
|
||||
book_fs = self.path.replace('.%s' % self.bookmark_extension,'.%s' % self.book_format)
|
||||
|
||||
self.book_length = 0
|
||||
if self.bookmark_extension == 'mbp':
|
||||
|
@ -25,7 +25,7 @@ class DRMError(ValueError):
|
||||
BOOK_EXTENSIONS = ['lrf', 'rar', 'zip', 'rtf', 'lit', 'txt', 'htm', 'xhtm',
|
||||
'html', 'xhtml', 'pdf', 'pdb', 'prc', 'mobi', 'azw', 'doc',
|
||||
'epub', 'fb2', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip',
|
||||
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml']
|
||||
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'mbp', 'tan']
|
||||
|
||||
class HTMLRenderer(object):
|
||||
|
||||
|
@ -336,6 +336,13 @@ class MetadataUpdater(object):
|
||||
if mi.publisher:
|
||||
update_exth_record((101, mi.publisher.encode(self.codec, 'replace')))
|
||||
if mi.comments:
|
||||
# Strip user annotations
|
||||
a_offset = mi.comments.find('<div class="user_annotations">')
|
||||
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
|
||||
if a_offset >= 0:
|
||||
mi.comments = mi.comments[:a_offset]
|
||||
if ad_offset >= 0:
|
||||
mi.comments = mi.comments[:ad_offset]
|
||||
update_exth_record((103, mi.comments.encode(self.codec, 'replace')))
|
||||
if mi.isbn:
|
||||
update_exth_record((104, mi.isbn.encode(self.codec, 'replace')))
|
||||
|
@ -1074,6 +1074,11 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
mi.comments = unicode(user_notes_soup.prettify())
|
||||
# Update library comments
|
||||
self.db.set_comment(id, mi.comments)
|
||||
'''
|
||||
# Add bookmark file to id
|
||||
self.db.add_format_with_hooks(id, bm.bookmark.bookmark_extension,
|
||||
bm.bookmark.path, index_is_id=True)
|
||||
'''
|
||||
self.update_progress.emit(i)
|
||||
self.update_done.emit()
|
||||
self.done_callback(self.am.keys())
|
||||
@ -1516,6 +1521,12 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
opts = config().parse()
|
||||
if single_format is not None:
|
||||
opts.formats = single_format
|
||||
# Special case for Kindle annotation files
|
||||
if single_format.lower() == 'mbp' or single_format == 'tan':
|
||||
opts.to_lowercase = False
|
||||
opts.save_cover = False
|
||||
opts.write_opf = False
|
||||
opts.template = opts.send_template
|
||||
if single_dir:
|
||||
opts.template = opts.template.split('/')[-1].strip()
|
||||
if not opts.template:
|
||||
|
@ -1052,7 +1052,16 @@ class EPUB_MOBI(CatalogPlugin):
|
||||
this_title['rating'] = record['rating'] if record['rating'] else 0
|
||||
this_title['date'] = strftime(u'%B %Y', record['pubdate'].timetuple())
|
||||
this_title['timestamp'] = record['timestamp']
|
||||
|
||||
if record['comments']:
|
||||
# Strip annotations
|
||||
a_offset = record['comments'].find('<div class="user_annotations">')
|
||||
ad_offset = record['comments'].find('<hr class="annotations_divider" />')
|
||||
if a_offset >= 0:
|
||||
record['comments'] = record['comments'][:a_offset]
|
||||
if ad_offset >= 0:
|
||||
record['comments'] = record['comments'][:ad_offset]
|
||||
|
||||
this_title['description'] = self.markdownComments(record['comments'])
|
||||
paras = BeautifulSoup(this_title['description']).findAll('p')
|
||||
tokens = []
|
||||
|
@ -125,7 +125,7 @@ With recent reader iterations, SONY, in all its wisdom has decided to try to for
|
||||
use their software. If you install it, it auto-launches whenever you connect the reader.
|
||||
If you don't want to uninstall it altogether, there are a couple of tricks you can use. The
|
||||
simplest is to simply re-name the executable file that launches the library program. More detail
|
||||
`here http://www.mobileread.com/forums/showthread.php?t=65809`_.
|
||||
`here <http://www.mobileread.com/forums/showthread.php?t=65809>`_.
|
||||
|
||||
Can I use the collections feature of the SONY reader?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user