Fix OPF backup thread

This commit is contained in:
Kovid Goyal 2010-09-26 12:17:37 -06:00
commit d6113a4d6f
3 changed files with 28 additions and 28 deletions

View File

@ -576,9 +576,9 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, # {{{
s.exit()
except:
pass
time.sleep(2)
except KeyboardInterrupt:
pass
time.sleep(2)
self.hide_windows()
return True

View File

@ -39,6 +39,7 @@ class MetadataBackup(Thread): # {{{
self.do_write = FunctionDispatcher(self.write)
self.get_metadata_for_dump = FunctionDispatcher(db.get_metadata_for_dump)
self.clear_dirtied = FunctionDispatcher(db.clear_dirtied)
self.set_dirtied = FunctionDispatcher(db.dirtied)
def stop(self):
self.keep_running = False
@ -68,8 +69,9 @@ class MetadataBackup(Thread): # {{{
traceback.print_exc()
continue
# at this point the dirty indication is off
if mi is None:
self.clear_dirtied([id_])
continue
# Give the GUI thread a chance to do something. Python threads don't
@ -79,6 +81,7 @@ class MetadataBackup(Thread): # {{{
try:
raw = metadata_to_opf(mi)
except:
self.set_dirtied([id_])
prints('Failed to convert to opf for id:', id_)
traceback.print_exc()
continue
@ -92,16 +95,11 @@ class MetadataBackup(Thread): # {{{
try:
self.do_write(path, raw)
except:
self.set_dirtied([id_])
prints('Failed to write backup metadata for id:', id_,
'again, giving up')
continue
time.sleep(0.1) # Give the GUI thread a chance to do something
try:
self.clear_dirtied([id_])
except:
prints('Failed to clear dirtied for id:', id_)
def write(self, path, raw):
with open(path, 'wb') as f:
f.write(raw)

View File

@ -573,8 +573,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
The last step is clearing the indicator
'''
for book_id in book_ids:
if not self.data.has_id(book_id):
continue
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,))
# if a later exception prevents the commit, then the dirtied
@ -588,9 +586,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
commit=True):
'''
Write metadata for each record to an individual OPF file
:param dump_to: None or list. If list then instead of writing to file,
data is append to list
'''
if book_ids is None:
book_ids = [x[0] for x in self.conn.get(
@ -598,23 +593,19 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
for book_id in book_ids:
if not self.data.has_id(book_id):
continue
path, mi = self.get_metadata_for_dump(book_id)
path, mi = self.get_metadata_for_dump(book_id,
remove_from_dirtied=remove_from_dirtied)
if path is None:
continue
try:
raw = metadata_to_opf(mi)
with open(path, 'wb') as f:
f.write(raw)
if remove_from_dirtied:
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,))
# if a later exception prevents the commit, then the dirtied
# table will still have the book. No big deal, because the OPF
# is there and correct. We will simply do it again on next
# start
self.dirtied_cache.discard(book_id)
except:
# Something went wrong. Put the book back on the dirty list
self.dirtied([book_id])
if commit:
self.conn.commit()
return True
def dirtied(self, book_ids, commit=True):
for book in book_ids:
@ -649,7 +640,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
self.dirtied_cache = set()
self.dirtied(book_ids)
def get_metadata_for_dump(self, idx):
def get_metadata_for_dump(self, idx, remove_from_dirtied=True):
try:
path = os.path.join(self.abspath(idx, index_is_id=True), 'metadata.opf')
mi = self.get_metadata(idx, index_is_id=True)
@ -658,7 +649,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
# cover is set/removed
mi.cover = 'cover.jpg'
except:
return (None, None)
# This almost certainly means that the book has been deleted while
# the backup operation sat in the queue.
path, mi = (None, None)
try:
# clear the dirtied indicator. The user must put it back if
# something goes wrong with writing the OPF
if remove_from_dirtied:
self.clear_dirtied([idx])
except:
# No real problem. We will just do it again.
pass
return (path, mi)
def get_metadata(self, idx, index_is_id=False, get_cover=False):