Send the plugboard cache only once per worker instead of once per job

This commit is contained in:
Kovid Goyal 2014-11-09 21:03:18 +05:30
parent 7a66bbbd40
commit e50915bfe8
3 changed files with 18 additions and 9 deletions

View File

@ -88,7 +88,6 @@ class Saver(QObject):
self.do_one_signal.connect(self.tick, type=Qt.QueuedConnection)
self.do_one = self.do_one_collect
self.ids_to_collect = iter(self.all_book_ids)
self.plugboards_cache = {}
self.tdir = PersistentTemporaryDirectory('_save_to_disk')
self.pool = None
@ -113,7 +112,7 @@ class Saver(QObject):
setattr(p, 'no_gc_%s' % id(self), None)
if self.pool is not None:
self.pool.shutdown()
self.jobs = self.pool = self.plugboards_cache = self.plugboards = self.template_functions = self.collected_data = self.all_book_ids = self.pd = self.db = None # noqa
self.jobs = self.pool = self.plugboards = self.template_functions = self.collected_data = self.all_book_ids = self.pd = self.db = None # noqa
def book_id_data(self, book_id):
ans = self._book_id_data.get(book_id)
@ -153,8 +152,15 @@ class Saver(QObject):
self.pd.value = 0
if self.opts.update_metadata:
all_fmts = {fmt for data in self.collected_data.itervalues() for fmt in data[2]}
self.plugboards_cache = {fmt:find_plugboard(plugboard_save_to_disk_value, fmt, self.plugboards) for fmt in all_fmts}
plugboards_cache = {fmt:find_plugboard(plugboard_save_to_disk_value, fmt, self.plugboards) for fmt in all_fmts}
self.pool = Pool(name='SaveToDisk') if self.pool is None else self.pool
try:
self.pool.set_common_data(plugboards_cache)
except Failure as err:
error_dialog(self.pd, _('Critical failure'), _(
'Could not save books to disk, click "Show details" for more information'),
det_msg=unicode(err.failure_message) + '\n' + unicode(err.details), show=True)
self.pd.canceled = True
self.do_one_signal.emit()
def do_one_write(self):
@ -260,11 +266,11 @@ class Saver(QObject):
if self.opts.update_metadata:
if d['fmts']:
try:
self.pool(book_id, 'calibre.library.save_to_disk', 'update_serialized_metadata', d, self.plugboards_cache)
self.pool(book_id, 'calibre.library.save_to_disk', 'update_serialized_metadata', d)
except Failure as err:
error_dialog(self.pd, _('Critical failure'), _(
'Could not save books to disk, click "Show details" for more information'),
det_msg=unicode(err) + '\n' + unicode(err.details), show=True)
det_msg=unicode(err.failure_message) + '\n' + unicode(err.details), show=True)
self.pd.canceled = True
else:
self.pd.value += 1
@ -298,7 +304,7 @@ class Saver(QObject):
except Failure as err:
error_dialog(self.pd, _('Critical failure'), _(
'Could not save books to disk, click "Show details" for more information'),
det_msg=unicode(err) + '\n' + unicode(err.details), show=True)
det_msg=unicode(err.failure_message) + '\n' + unicode(err.details), show=True)
self.pd.canceled = True
except RuntimeError:
pass # tasks not completed

View File

@ -446,8 +446,9 @@ def read_serialized_metadata(data):
cdata = f.read()
return mi, cdata
def update_serialized_metadata(book, plugboard_cache):
def update_serialized_metadata(book, common_data=None):
result = []
plugboard_cache = common_data
from calibre.customize.ui import apply_null_metadata
with apply_null_metadata:

View File

@ -26,6 +26,7 @@ class Failure(Exception):
Exception.__init__(self, tf.message)
self.details = tf.tb
self.job_id = tf.job_id
self.failure_message = tf.message
class Worker(object):
@ -89,7 +90,8 @@ class Pool(Thread):
needing to be transmitted every time. You must call this method before
queueing any jobs, otherwise the behavior is undefined. You can call it
after all jobs are done, then it will be used for the new round of
jobs. '''
jobs. Can raise the :class:`Failure` exception is data could not be
sent to workers.'''
with self.lock:
self.common_data = data
for worker in self.available_workers:
@ -99,7 +101,7 @@ class Pool(Thread):
import traceback
self.terminal_failure = TerminalFailure('Worker process crashed while sending common data', traceback.format_exc())
self.terminal_error()
break
raise Failure(self.terminal_failure)
def start_worker(self):
try: