mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
metadata backup that gets metadata on the GUI thread, computes the OPF on a separate thread, then writes the file on the GUI thread.
This commit is contained in:
parent
3e1cb3b5e0
commit
c8dbd70546
@ -89,6 +89,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
|||||||
self.alignment_map = {}
|
self.alignment_map = {}
|
||||||
self.buffer_size = buffer
|
self.buffer_size = buffer
|
||||||
self.cover_cache = None
|
self.cover_cache = None
|
||||||
|
self.metadata_backup = None
|
||||||
self.bool_yes_icon = QIcon(I('ok.png'))
|
self.bool_yes_icon = QIcon(I('ok.png'))
|
||||||
self.bool_no_icon = QIcon(I('list_remove.png'))
|
self.bool_no_icon = QIcon(I('list_remove.png'))
|
||||||
self.bool_blank_icon = QIcon(I('blank.png'))
|
self.bool_blank_icon = QIcon(I('blank.png'))
|
||||||
|
@ -19,6 +19,7 @@ from calibre.utils.date import parse_date, now, UNDEFINED_DATE
|
|||||||
from calibre.utils.search_query_parser import SearchQueryParser
|
from calibre.utils.search_query_parser import SearchQueryParser
|
||||||
from calibre.utils.pyparsing import ParseException
|
from calibre.utils.pyparsing import ParseException
|
||||||
from calibre.ebooks.metadata import title_sort
|
from calibre.ebooks.metadata import title_sort
|
||||||
|
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
||||||
from calibre import fit_image, prints
|
from calibre import fit_image, prints
|
||||||
|
|
||||||
class MetadataBackup(Thread): # {{{
|
class MetadataBackup(Thread): # {{{
|
||||||
@ -36,6 +37,8 @@ class MetadataBackup(Thread): # {{{
|
|||||||
self.keep_running = True
|
self.keep_running = True
|
||||||
from calibre.gui2 import FunctionDispatcher
|
from calibre.gui2 import FunctionDispatcher
|
||||||
self.do_write = FunctionDispatcher(self.write)
|
self.do_write = FunctionDispatcher(self.write)
|
||||||
|
self.get_metadata_for_dump = FunctionDispatcher(db.get_metadata_for_dump)
|
||||||
|
self.clear_dirtied = FunctionDispatcher(db.clear_dirtied)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self.keep_running = False
|
self.keep_running = False
|
||||||
@ -43,6 +46,7 @@ class MetadataBackup(Thread): # {{{
|
|||||||
def run(self):
|
def run(self):
|
||||||
while self.keep_running:
|
while self.keep_running:
|
||||||
try:
|
try:
|
||||||
|
time.sleep(0.5) # Limit to two per second
|
||||||
id_ = self.db.dirtied_queue.get(True, 2)
|
id_ = self.db.dirtied_queue.get(True, 2)
|
||||||
except Empty:
|
except Empty:
|
||||||
continue
|
continue
|
||||||
@ -50,25 +54,27 @@ class MetadataBackup(Thread): # {{{
|
|||||||
# Happens during interpreter shutdown
|
# Happens during interpreter shutdown
|
||||||
break
|
break
|
||||||
|
|
||||||
dump = []
|
|
||||||
try:
|
try:
|
||||||
self.db.dump_metadata([id_], dump_to=dump)
|
path, mi = self.get_metadata_for_dump(id_)
|
||||||
except:
|
except:
|
||||||
prints('Failed to get backup metadata for id:', id_, 'once')
|
prints('Failed to get backup metadata for id:', id_, 'once')
|
||||||
import traceback
|
import traceback
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
dump = []
|
|
||||||
try:
|
try:
|
||||||
self.db.dump_metadata([id_], dump_to=dump)
|
path, mi = self.get_metadata_for_dump(id_)
|
||||||
except:
|
except:
|
||||||
prints('Failed to get backup metadata for id:', id_, 'again, giving up')
|
prints('Failed to get backup metadata for id:', id_, 'again, giving up')
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
path, raw = dump[0]
|
print 'now do metadata'
|
||||||
|
raw = metadata_to_opf(mi)
|
||||||
except:
|
except:
|
||||||
break
|
prints('Failed to convert to opf for id:', id_)
|
||||||
|
traceback.print_exc()
|
||||||
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.do_write(path, raw)
|
self.do_write(path, raw)
|
||||||
except:
|
except:
|
||||||
@ -79,8 +85,12 @@ class MetadataBackup(Thread): # {{{
|
|||||||
except:
|
except:
|
||||||
prints('Failed to write backup metadata for id:', id_,
|
prints('Failed to write backup metadata for id:', id_,
|
||||||
'again, giving up')
|
'again, giving up')
|
||||||
|
continue
|
||||||
|
|
||||||
time.sleep(0.5) # Limit to two per second
|
try:
|
||||||
|
self.clear_dirtied([id_])
|
||||||
|
except:
|
||||||
|
prints('Failed to clear dirtied for id:', id_)
|
||||||
|
|
||||||
def write(self, path, raw):
|
def write(self, path, raw):
|
||||||
with open(path, 'wb') as f:
|
with open(path, 'wb') as f:
|
||||||
@ -106,7 +116,6 @@ class CoverCache(Thread): # {{{
|
|||||||
self.keep_running = False
|
self.keep_running = False
|
||||||
|
|
||||||
def _image_for_id(self, id_):
|
def _image_for_id(self, id_):
|
||||||
time.sleep(0.050) # Limit 20/second to not overwhelm the GUI
|
|
||||||
img = self.cover_func(id_, index_is_id=True, as_image=True)
|
img = self.cover_func(id_, index_is_id=True, as_image=True)
|
||||||
if img is None:
|
if img is None:
|
||||||
img = QImage()
|
img = QImage()
|
||||||
@ -122,6 +131,7 @@ class CoverCache(Thread): # {{{
|
|||||||
def run(self):
|
def run(self):
|
||||||
while self.keep_running:
|
while self.keep_running:
|
||||||
try:
|
try:
|
||||||
|
time.sleep(0.050) # Limit 20/second to not overwhelm the GUI
|
||||||
id_ = self.load_queue.get(True, 2)
|
id_ = self.load_queue.get(True, 2)
|
||||||
except Empty:
|
except Empty:
|
||||||
continue
|
continue
|
||||||
|
@ -566,6 +566,24 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
def metadata_for_field(self, key):
|
def metadata_for_field(self, key):
|
||||||
return self.field_metadata[key]
|
return self.field_metadata[key]
|
||||||
|
|
||||||
|
def clear_dirtied(self, book_ids=None):
|
||||||
|
'''
|
||||||
|
Clear the dirtied indicator for the books. This is used when fetching
|
||||||
|
metadata, creating an OPF, and writing a file are separated into steps.
|
||||||
|
The last step is clearing the indicator
|
||||||
|
'''
|
||||||
|
for book_id in book_ids:
|
||||||
|
if not self.data.has_id(book_id):
|
||||||
|
continue
|
||||||
|
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
|
||||||
|
(book_id,))
|
||||||
|
# if a later exception prevents the commit, then the dirtied
|
||||||
|
# table will still have the book. No big deal, because the OPF
|
||||||
|
# is there and correct. We will simply do it again on next
|
||||||
|
# start
|
||||||
|
self.dirtied_cache.discard(book_id)
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
|
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
|
||||||
commit=True, dump_to=None):
|
commit=True, dump_to=None):
|
||||||
'''
|
'''
|
||||||
@ -638,6 +656,11 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
self.dirtied_cache = set()
|
self.dirtied_cache = set()
|
||||||
self.dirtied(book_ids)
|
self.dirtied(book_ids)
|
||||||
|
|
||||||
|
def get_metadata_for_dump(self, idx):
|
||||||
|
path = os.path.join(self.abspath(idx, index_is_id=True), 'metadata.opf')
|
||||||
|
mi = self.get_metadata(idx, index_is_id=True)
|
||||||
|
return ((path, mi))
|
||||||
|
|
||||||
def get_metadata(self, idx, index_is_id=False, get_cover=False):
|
def get_metadata(self, idx, index_is_id=False, get_cover=False):
|
||||||
'''
|
'''
|
||||||
Convenience method to return metadata as a :class:`Metadata` object.
|
Convenience method to return metadata as a :class:`Metadata` object.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user