mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
Split each author into its own EXTH Creator field, even with the 'Use author_sort as author' setting.
Also, in a slightly unrelated note, sprinkle azw3 awareness in a few places.
This commit is contained in:
parent
8297eebb61
commit
ca467cdb2e
@ -163,7 +163,7 @@ def render_html(path_to_html, width=590, height=750, as_xhtml=True):
|
||||
|
||||
def check_ebook_format(stream, current_guess):
|
||||
ans = current_guess
|
||||
if current_guess.lower() in ('prc', 'mobi', 'azw', 'azw1'):
|
||||
if current_guess.lower() in ('prc', 'mobi', 'azw', 'azw1', 'azw3'):
|
||||
stream.seek(0)
|
||||
if stream.read(3) == 'TPZ':
|
||||
ans = 'tpz'
|
||||
|
@ -70,7 +70,7 @@ class ArchiveExtract(FileTypePlugin):
|
||||
fname = fnames[0]
|
||||
ext = os.path.splitext(fname)[1][1:]
|
||||
if ext.lower() not in ('lit', 'epub', 'mobi', 'prc', 'rtf', 'pdf',
|
||||
'mp3', 'pdb', 'azw', 'azw1', 'fb2'):
|
||||
'mp3', 'pdb', 'azw', 'azw1', 'azw3', 'fb2'):
|
||||
return archive
|
||||
|
||||
of = self.temporary_file('_archive_extract.'+ext)
|
||||
|
@ -15,7 +15,7 @@ _METADATA_PRIORITIES = [
|
||||
'html', 'htm', 'xhtml', 'xhtm',
|
||||
'rtf', 'fb2', 'pdf', 'prc', 'odt',
|
||||
'epub', 'lit', 'lrx', 'lrf', 'mobi',
|
||||
'rb', 'imp', 'azw', 'snb'
|
||||
'rb', 'imp', 'azw', 'azw3', 'azw1' 'snb'
|
||||
]
|
||||
|
||||
# The priorities for loading metadata from different file types
|
||||
@ -85,7 +85,7 @@ def _get_metadata(stream, stream_type, use_libprs_metadata,
|
||||
if stream_type: stream_type = stream_type.lower()
|
||||
if stream_type in ('html', 'html', 'xhtml', 'xhtm', 'xml'):
|
||||
stream_type = 'html'
|
||||
if stream_type in ('mobi', 'prc', 'azw'):
|
||||
if stream_type in ('mobi', 'prc', 'azw', 'azw1', 'azw3'):
|
||||
stream_type = 'mobi'
|
||||
if stream_type in ('odt', 'ods', 'odp', 'odg', 'odf'):
|
||||
stream_type = 'odt'
|
||||
|
@ -341,11 +341,14 @@ class MetadataUpdater(object):
|
||||
kindle_pdoc = None
|
||||
share_not_sync = False
|
||||
if mi.author_sort and pas:
|
||||
authors = mi.author_sort
|
||||
update_exth_record((100, normalize(authors).encode(self.codec, 'replace')))
|
||||
# We want an EXTH field per author...
|
||||
authors = mi.author_sort.split(' & ')
|
||||
for author in authors:
|
||||
update_exth_record((100, normalize(author).encode(self.codec, 'replace')))
|
||||
elif mi.authors:
|
||||
authors = ';'.join(mi.authors)
|
||||
update_exth_record((100, normalize(authors).encode(self.codec, 'replace')))
|
||||
authors = mi.authors
|
||||
for author in authors:
|
||||
update_exth_record((100, normalize(author).encode(self.codec, 'replace')))
|
||||
if mi.publisher:
|
||||
update_exth_record((101, normalize(mi.publisher).encode(self.codec, 'replace')))
|
||||
if mi.comments:
|
||||
@ -360,6 +363,7 @@ class MetadataUpdater(object):
|
||||
if mi.isbn:
|
||||
update_exth_record((104, mi.isbn.encode(self.codec, 'replace')))
|
||||
if mi.tags:
|
||||
# FIXME: Keep a single subject per EXTH field?
|
||||
subjects = '; '.join(mi.tags)
|
||||
update_exth_record((105, normalize(subjects).encode(self.codec, 'replace')))
|
||||
|
||||
|
@ -32,7 +32,7 @@ def get_metadata(stream):
|
||||
if stream_type:
|
||||
stream_type = stream_type[1:]
|
||||
if stream_type in ('lit', 'opf', 'prc', 'mobi', 'fb2', 'epub',
|
||||
'rb', 'imp', 'pdf', 'lrf', 'azw'):
|
||||
'rb', 'imp', 'pdf', 'lrf', 'azw', 'azw1', 'azw3'):
|
||||
with TemporaryDirectory() as tdir:
|
||||
with CurrentDir(tdir):
|
||||
stream = extract_member(path, match=None, name=f,
|
||||
|
@ -23,7 +23,7 @@ def get_metadata(stream):
|
||||
if stream_type:
|
||||
stream_type = stream_type[1:]
|
||||
if stream_type in ('lit', 'opf', 'prc', 'mobi', 'fb2', 'epub',
|
||||
'rb', 'imp', 'pdf', 'lrf', 'azw'):
|
||||
'rb', 'imp', 'pdf', 'lrf', 'azw', 'azw1', 'azw3'):
|
||||
with TemporaryDirectory() as tdir:
|
||||
with CurrentDir(tdir):
|
||||
path = zf.extract(f)
|
||||
|
@ -54,8 +54,16 @@ def build_exth(metadata, prefer_author_sort=False, is_periodical=False,
|
||||
items = metadata[term]
|
||||
if term == 'creator':
|
||||
if prefer_author_sort:
|
||||
creators = [unicode(c.file_as or c) for c in
|
||||
# This is a bit hackish... We only get the first item in the creators list,
|
||||
# because we only care about the file_as property, and it contains *all* the authors in every creator markup,
|
||||
# so we only need one, or we end up with duplicates ;).
|
||||
# We then end up with a single item in our list, that contains every authors, in author sort syntax, separated by an ' & ' character.
|
||||
# That's not good enough, because we want each author in a separate entry in the list, so we just split this on every & ;).
|
||||
# This way, we properly end up with multiple Creator fields in the EXTH header, one for each author, like KindleGen :).
|
||||
all_creators = [unicode(c.file_as or c) for c in
|
||||
items][:1]
|
||||
for creator in all_creators:
|
||||
creators = creator.split(' & ')
|
||||
else:
|
||||
creators = [unicode(c) for c in items]
|
||||
items = creators
|
||||
|
@ -30,7 +30,7 @@ def get_filters():
|
||||
(_('LRF Books'), ['lrf']),
|
||||
(_('HTML Books'), ['htm', 'html', 'xhtm', 'xhtml']),
|
||||
(_('LIT Books'), ['lit']),
|
||||
(_('MOBI Books'), ['mobi', 'prc', 'azw']),
|
||||
(_('MOBI Books'), ['mobi', 'prc', 'azw', 'azw3']),
|
||||
(_('Topaz books'), ['tpz','azw1']),
|
||||
(_('Text books'), ['txt', 'text', 'rtf']),
|
||||
(_('PDF Books'), ['pdf', 'azw4']),
|
||||
|
@ -446,7 +446,7 @@ class KindlePage(QWizardPage, KindleUI):
|
||||
if not accounts: accounts = {}
|
||||
for y in accounts.values():
|
||||
y[2] = False
|
||||
accounts[x] = ['AZW, MOBI, TPZ, PRC, AZW1', True, True]
|
||||
accounts[x] = ['AZW, MOBI, TPZ, PRC, AZW1, AZW3', True, True]
|
||||
conf.set('accounts', accounts)
|
||||
|
||||
def nextId(self):
|
||||
|
Loading…
x
Reference in New Issue
Block a user