mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Move backup I/O into backup thread from GUI thread to prevent GUI slowdown when the calibre library is on a slow device like a network share
This commit is contained in:
parent
42ec47607c
commit
5b8a645050
@ -28,6 +28,7 @@ class MetadataBackup(Thread): # {{{
|
|||||||
self.daemon = True
|
self.daemon = True
|
||||||
self.db = db
|
self.db = db
|
||||||
self.dump_func = dump_func
|
self.dump_func = dump_func
|
||||||
|
self.dump_queue = Queue()
|
||||||
self.keep_running = True
|
self.keep_running = True
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
@ -42,13 +43,32 @@ class MetadataBackup(Thread): # {{{
|
|||||||
except:
|
except:
|
||||||
# Happens during interpreter shutdown
|
# Happens during interpreter shutdown
|
||||||
break
|
break
|
||||||
if self.dump_func([id_]) is None:
|
if self.dump_func([id_], dump_queue=self.dump_queue) is None:
|
||||||
# An exception occurred in dump_func, retry once
|
# An exception occurred in dump_func, retry once
|
||||||
prints('Failed to backup metadata for id:', id_, 'once')
|
prints('Failed to get backup metadata for id:', id_, 'once')
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
if not self.dump_func([id_]):
|
if not self.dump_func([id_], dump_queue=self.dump_queue):
|
||||||
prints('Failed to backup metadata for id:', id_, 'again, giving up')
|
prints('Failed to get backup metadata for id:', id_, 'again, giving up')
|
||||||
time.sleep(0.9) # Limit to one per second
|
while True:
|
||||||
|
try:
|
||||||
|
path, raw = self.dump_queue.get_nowait()
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(path, 'wb') as f:
|
||||||
|
f.write(raw)
|
||||||
|
except:
|
||||||
|
prints('Failed to write backup metadata for id:', id_, 'once')
|
||||||
|
time.sleep(2)
|
||||||
|
try:
|
||||||
|
with open(path, 'wb') as f:
|
||||||
|
f.write(raw)
|
||||||
|
except:
|
||||||
|
prints('Failed to write backup metadata for id:', id_,
|
||||||
|
'again, giving up')
|
||||||
|
|
||||||
|
time.sleep(0.2) # Limit to five per second
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
|
@ -566,7 +566,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
def metadata_for_field(self, key):
|
def metadata_for_field(self, key):
|
||||||
return self.field_metadata[key]
|
return self.field_metadata[key]
|
||||||
|
|
||||||
def dump_metadata(self, book_ids=None, remove_from_dirtied=True, commit=True):
|
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
|
||||||
|
commit=True, dump_queue=None):
|
||||||
'Write metadata for each record to an individual OPF file'
|
'Write metadata for each record to an individual OPF file'
|
||||||
if book_ids is None:
|
if book_ids is None:
|
||||||
book_ids = [x[0] for x in self.conn.get(
|
book_ids = [x[0] for x in self.conn.get(
|
||||||
@ -580,9 +581,13 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
# cover is set/removed
|
# cover is set/removed
|
||||||
mi.cover = 'cover.jpg'
|
mi.cover = 'cover.jpg'
|
||||||
raw = metadata_to_opf(mi)
|
raw = metadata_to_opf(mi)
|
||||||
path = self.abspath(book_id, index_is_id=True)
|
path = os.path.join(self.abspath(book_id, index_is_id=True),
|
||||||
with open(os.path.join(path, 'metadata.opf'), 'wb') as f:
|
'metadata.opf')
|
||||||
|
if dump_queue is None:
|
||||||
|
with open(path, 'wb') as f:
|
||||||
f.write(raw)
|
f.write(raw)
|
||||||
|
else:
|
||||||
|
dump_queue.put((path, raw))
|
||||||
if remove_from_dirtied:
|
if remove_from_dirtied:
|
||||||
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
|
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
|
||||||
(book_id,))
|
(book_id,))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user