mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-11-22 14:33:02 -05:00
iteritems (manual 2)
This commit is contained in:
parent
0d37a96eaa
commit
ce13dfbb43
@ -12,7 +12,7 @@ import os
|
|||||||
import shutil
|
import shutil
|
||||||
import zipfile
|
import zipfile
|
||||||
|
|
||||||
from polyglot.builtins import iteritems, only_unicode_recursive
|
from polyglot.builtins import only_unicode_recursive
|
||||||
from setup import Command, basenames, download_securely, dump_json
|
from setup import Command, basenames, download_securely, dump_json
|
||||||
|
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ class Resources(Command): # {{{
|
|||||||
dump_json(function_dict, dest)
|
dump_json(function_dict, dest)
|
||||||
self.info('\tCreating user-manual-translation-stats.json')
|
self.info('\tCreating user-manual-translation-stats.json')
|
||||||
d = {}
|
d = {}
|
||||||
for lc, stats in iteritems(json.load(open(self.j(self.d(self.SRC), 'manual', 'locale', 'completed.json')))):
|
for lc, stats in json.load(open(self.j(self.d(self.SRC), 'manual', 'locale', 'completed.json'))).items():
|
||||||
total = sum(stats.values())
|
total = sum(stats.values())
|
||||||
d[lc] = stats['translated'] / float(total)
|
d[lc] = stats['translated'] / float(total)
|
||||||
dump_json(d, self.j(self.RESOURCES, 'user-manual-translation-stats.json'))
|
dump_json(d, self.j(self.RESOURCES, 'user-manual-translation-stats.json'))
|
||||||
|
|||||||
@ -23,7 +23,6 @@ from collections import defaultdict
|
|||||||
from functools import lru_cache, partial
|
from functools import lru_cache, partial
|
||||||
from locale import normalize as normalize_locale
|
from locale import normalize as normalize_locale
|
||||||
|
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
from setup import Command, __appname__, __version__, build_cache_dir, dump_json, edit_file, is_ci, require_git_master
|
from setup import Command, __appname__, __version__, build_cache_dir, dump_json, edit_file, is_ci, require_git_master
|
||||||
from setup.iso_codes import iso_data
|
from setup.iso_codes import iso_data
|
||||||
from setup.parallel_build import batched_parallel_jobs
|
from setup.parallel_build import batched_parallel_jobs
|
||||||
@ -500,7 +499,7 @@ class Translations(POT): # {{{
|
|||||||
raw = None
|
raw = None
|
||||||
po_data = data.decode('utf-8')
|
po_data = data.decode('utf-8')
|
||||||
data = json.loads(msgfmt(po_data))
|
data = json.loads(msgfmt(po_data))
|
||||||
translated_entries = {k:v for k, v in iteritems(data['entries']) if v and sum(map(len, v))}
|
translated_entries = {k:v for k, v in data['entries'].items() if v and sum(map(len, v))}
|
||||||
data['entries'] = translated_entries
|
data['entries'] = translated_entries
|
||||||
data['hash'] = h.hexdigest()
|
data['hash'] = h.hexdigest()
|
||||||
cdata = b'{}'
|
cdata = b'{}'
|
||||||
|
|||||||
@ -52,7 +52,7 @@ from calibre.utils.filenames import make_long_path_useable
|
|||||||
from calibre.utils.icu import lower as icu_lower
|
from calibre.utils.icu import lower as icu_lower
|
||||||
from calibre.utils.icu import sort_key
|
from calibre.utils.icu import sort_key
|
||||||
from calibre.utils.localization import canonicalize_lang
|
from calibre.utils.localization import canonicalize_lang
|
||||||
from polyglot.builtins import cmp, iteritems
|
from polyglot.builtins import cmp
|
||||||
|
|
||||||
|
|
||||||
class ExtraFile(NamedTuple):
|
class ExtraFile(NamedTuple):
|
||||||
@ -936,7 +936,7 @@ class Cache:
|
|||||||
''' Return a mapping of id to usage count for all values of the specified
|
''' Return a mapping of id to usage count for all values of the specified
|
||||||
field, which must be a many-one or many-many field. '''
|
field, which must be a many-one or many-many field. '''
|
||||||
try:
|
try:
|
||||||
return {k:len(v) for k, v in iteritems(self.fields[field].table.col_book_map)}
|
return {k:len(v) for k, v in self.fields[field].table.col_book_map.items()}
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
raise ValueError(f'{field} is not a many-one or many-many field')
|
raise ValueError(f'{field} is not a many-one or many-many field')
|
||||||
|
|
||||||
@ -2477,7 +2477,7 @@ class Cache:
|
|||||||
insensitive).
|
insensitive).
|
||||||
|
|
||||||
'''
|
'''
|
||||||
tag_map = {icu_lower(v):k for k, v in iteritems(self._get_id_map('tags'))}
|
tag_map = {icu_lower(v):k for k, v in self._get_id_map('tags').items()}
|
||||||
tag = icu_lower(tag.strip())
|
tag = icu_lower(tag.strip())
|
||||||
mht = icu_lower(must_have_tag.strip()) if must_have_tag else None
|
mht = icu_lower(must_have_tag.strip()) if must_have_tag else None
|
||||||
tag_id, mht_id = tag_map.get(tag, None), tag_map.get(mht, None)
|
tag_id, mht_id = tag_map.get(tag, None), tag_map.get(mht, None)
|
||||||
@ -2490,7 +2490,7 @@ class Cache:
|
|||||||
tagged_books = tagged_books.intersection(self._books_for_field('tags', mht_id))
|
tagged_books = tagged_books.intersection(self._books_for_field('tags', mht_id))
|
||||||
if tagged_books:
|
if tagged_books:
|
||||||
if must_have_authors is not None:
|
if must_have_authors is not None:
|
||||||
amap = {icu_lower(v):k for k, v in iteritems(self._get_id_map('authors'))}
|
amap = {icu_lower(v):k for k, v in self._get_id_map('authors').items()}
|
||||||
books = None
|
books = None
|
||||||
for author in must_have_authors:
|
for author in must_have_authors:
|
||||||
abooks = self._books_for_field('authors', amap.get(icu_lower(author), None))
|
abooks = self._books_for_field('authors', amap.get(icu_lower(author), None))
|
||||||
@ -3600,7 +3600,7 @@ def import_library(library_key, importer, library_path, progress=None, abort=Non
|
|||||||
cache = Cache(DB(library_path, load_user_formatter_functions=False))
|
cache = Cache(DB(library_path, load_user_formatter_functions=False))
|
||||||
cache.init()
|
cache.init()
|
||||||
|
|
||||||
format_data = {int(book_id):data for book_id, data in iteritems(metadata['format_data'])}
|
format_data = {int(book_id):data for book_id, data in metadata['format_data'].items()}
|
||||||
extra_files = {int(book_id):data for book_id, data in metadata.get('extra_files', {}).items()}
|
extra_files = {int(book_id):data for book_id, data in metadata.get('extra_files', {}).items()}
|
||||||
for i, (book_id, fmt_key_map) in enumerate(format_data.items()):
|
for i, (book_id, fmt_key_map) in enumerate(format_data.items()):
|
||||||
if abort is not None and abort.is_set():
|
if abort is not None and abort.is_set():
|
||||||
|
|||||||
@ -5,7 +5,6 @@
|
|||||||
from pprint import pformat
|
from pprint import pformat
|
||||||
|
|
||||||
from calibre import prints
|
from calibre import prints
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
readonly = True
|
readonly = True
|
||||||
version = 0 # change this if you change signature of implementation()
|
version = 0 # change this if you change signature of implementation()
|
||||||
@ -36,7 +35,7 @@ List available custom columns. Shows column labels and ids.
|
|||||||
|
|
||||||
|
|
||||||
def main(opts, args, dbctx):
|
def main(opts, args, dbctx):
|
||||||
for col, data in iteritems(dbctx.run('custom_columns')):
|
for col, data in dbctx.run('custom_columns').items():
|
||||||
if opts.details:
|
if opts.details:
|
||||||
prints(col)
|
prints(col)
|
||||||
print()
|
print()
|
||||||
|
|||||||
@ -7,7 +7,6 @@ version = 0 # change this if you change signature of implementation()
|
|||||||
|
|
||||||
from calibre import prints
|
from calibre import prints
|
||||||
from calibre.srv.changes import saved_searches
|
from calibre.srv.changes import saved_searches
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def implementation(db, notify_changes, action, *args):
|
def implementation(db, notify_changes, action, *args):
|
||||||
@ -55,7 +54,7 @@ Syntax for removing:
|
|||||||
def main(opts, args, dbctx):
|
def main(opts, args, dbctx):
|
||||||
args = args or ['list']
|
args = args or ['list']
|
||||||
if args[0] == 'list':
|
if args[0] == 'list':
|
||||||
for name, value in iteritems(dbctx.run('saved_searches', 'list')):
|
for name, value in dbctx.run('saved_searches', 'list').items():
|
||||||
prints(_('Name:'), name)
|
prints(_('Name:'), name)
|
||||||
prints(_('Search string:'), value)
|
prints(_('Search string:'), value)
|
||||||
print()
|
print()
|
||||||
|
|||||||
@ -5,7 +5,6 @@
|
|||||||
from calibre.db.utils import find_identical_books
|
from calibre.db.utils import find_identical_books
|
||||||
from calibre.utils.config import tweaks
|
from calibre.utils.config import tweaks
|
||||||
from calibre.utils.date import now
|
from calibre.utils.date import now
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def automerge_book(automerge_action, book_id, mi, identical_book_list, newdb, format_map, extra_file_map):
|
def automerge_book(automerge_action, book_id, mi, identical_book_list, newdb, format_map, extra_file_map):
|
||||||
@ -87,7 +86,7 @@ def copy_one_book(
|
|||||||
if path:
|
if path:
|
||||||
format_map[fmt.upper()] = path
|
format_map[fmt.upper()] = path
|
||||||
identical_book_list = set()
|
identical_book_list = set()
|
||||||
new_authors = {k for k, v in iteritems(newdb.get_item_ids('authors', mi.authors)) if v is None}
|
new_authors = {k for k, v in newdb.get_item_ids('authors', mi.authors).items() if v is None}
|
||||||
new_book_id = None
|
new_book_id = None
|
||||||
return_data = {
|
return_data = {
|
||||||
'book_id': book_id, 'title': mi.title, 'authors': mi.authors, 'author': mi.format_field('authors')[1],
|
'book_id': book_id, 'title': mi.title, 'authors': mi.authors, 'author': mi.format_field('authors')[1],
|
||||||
|
|||||||
@ -23,7 +23,6 @@ from calibre.db.write import clean_identifier, get_series_values
|
|||||||
from calibre.utils.date import utcnow
|
from calibre.utils.date import utcnow
|
||||||
from calibre.utils.icu import lower as icu_lower
|
from calibre.utils.icu import lower as icu_lower
|
||||||
from calibre.utils.search_query_parser import set_saved_searches
|
from calibre.utils.search_query_parser import set_saved_searches
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup_tags(tags):
|
def cleanup_tags(tags):
|
||||||
@ -531,7 +530,7 @@ class LibraryDatabase:
|
|||||||
ans = set()
|
ans = set()
|
||||||
if title:
|
if title:
|
||||||
title = icu_lower(force_unicode(title))
|
title = icu_lower(force_unicode(title))
|
||||||
for book_id, x in iteritems(self.new_api.get_id_map('title')):
|
for book_id, x in self.new_api.get_id_map('title').items():
|
||||||
if icu_lower(x) == title:
|
if icu_lower(x) == title:
|
||||||
ans.add(book_id)
|
ans.add(book_id)
|
||||||
if not all_matches:
|
if not all_matches:
|
||||||
@ -636,7 +635,7 @@ class LibraryDatabase:
|
|||||||
|
|
||||||
def delete_tags(self, tags):
|
def delete_tags(self, tags):
|
||||||
with self.new_api.write_lock:
|
with self.new_api.write_lock:
|
||||||
tag_map = {icu_lower(v):k for k, v in iteritems(self.new_api._get_id_map('tags'))}
|
tag_map = {icu_lower(v):k for k, v in self.new_api._get_id_map('tags').items()}
|
||||||
tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags)
|
tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags)
|
||||||
tag_ids = tuple(tid for tid in tag_ids if tid is not None)
|
tag_ids = tuple(tid for tid in tag_ids if tid is not None)
|
||||||
if tag_ids:
|
if tag_ids:
|
||||||
@ -961,7 +960,7 @@ for field in ('authors', 'tags', 'publisher', 'series'):
|
|||||||
LibraryDatabase.all_formats = lambda self: self.new_api.all_field_names('formats')
|
LibraryDatabase.all_formats = lambda self: self.new_api.all_field_names('formats')
|
||||||
LibraryDatabase.all_custom = lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num))
|
LibraryDatabase.all_custom = lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num))
|
||||||
|
|
||||||
for func, field in iteritems({'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}):
|
for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}.items():
|
||||||
def getter(field):
|
def getter(field):
|
||||||
def func(self):
|
def func(self):
|
||||||
return self.field_id_map(field)
|
return self.field_id_map(field)
|
||||||
@ -971,7 +970,7 @@ for func, field in iteritems({'all_authors':'authors', 'all_titles':'title', 'al
|
|||||||
LibraryDatabase.all_tags = lambda self: list(self.all_tag_names())
|
LibraryDatabase.all_tags = lambda self: list(self.all_tag_names())
|
||||||
LibraryDatabase.get_all_identifier_types = lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types())
|
LibraryDatabase.get_all_identifier_types = lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types())
|
||||||
LibraryDatabase.get_authors_with_ids = lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().items()]
|
LibraryDatabase.get_authors_with_ids = lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().items()]
|
||||||
LibraryDatabase.get_author_id = lambda self, author: {icu_lower(v):k for k, v in iteritems(self.new_api.get_id_map('authors'))}.get(icu_lower(author), None)
|
LibraryDatabase.get_author_id = lambda self, author: {icu_lower(v):k for k, v in self.new_api.get_id_map('authors').items()}.get(icu_lower(author), None)
|
||||||
|
|
||||||
for field in ('tags', 'series', 'publishers', 'ratings', 'languages'):
|
for field in ('tags', 'series', 'publishers', 'ratings', 'languages'):
|
||||||
def getter(field):
|
def getter(field):
|
||||||
|
|||||||
@ -18,7 +18,6 @@ from calibre.ptempfile import PersistentTemporaryFile
|
|||||||
from calibre.utils.date import UNDEFINED_DATE, now, utcnow
|
from calibre.utils.date import UNDEFINED_DATE, now, utcnow
|
||||||
from calibre.utils.img import image_from_path
|
from calibre.utils.img import image_from_path
|
||||||
from calibre.utils.resources import get_image_path
|
from calibre.utils.resources import get_image_path
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def import_test(replacement_data, replacement_fmt=None):
|
def import_test(replacement_data, replacement_fmt=None):
|
||||||
@ -245,7 +244,7 @@ class AddRemoveTest(BaseTest):
|
|||||||
authorpath = os.path.dirname(bookpath)
|
authorpath = os.path.dirname(bookpath)
|
||||||
os.mkdir(os.path.join(authorpath, '.DS_Store'))
|
os.mkdir(os.path.join(authorpath, '.DS_Store'))
|
||||||
open(os.path.join(authorpath, 'Thumbs.db'), 'wb').close()
|
open(os.path.join(authorpath, 'Thumbs.db'), 'wb').close()
|
||||||
item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
|
item_id = {v:k for k, v in cache.fields['#series'].table.id_map.items()}['My Series Two']
|
||||||
cache.remove_books((1,), permanent=True)
|
cache.remove_books((1,), permanent=True)
|
||||||
for x in (fmtpath, bookpath, authorpath):
|
for x in (fmtpath, bookpath, authorpath):
|
||||||
af(os.path.exists(x), f'The file {x} exists, when it should not')
|
af(os.path.exists(x), f'The file {x} exists, when it should not')
|
||||||
@ -276,7 +275,7 @@ class AddRemoveTest(BaseTest):
|
|||||||
os.mkdir(os.path.join(bookpath, 'xyz'))
|
os.mkdir(os.path.join(bookpath, 'xyz'))
|
||||||
open(os.path.join(bookpath, 'xyz', 'abc'), 'w').close()
|
open(os.path.join(bookpath, 'xyz', 'abc'), 'w').close()
|
||||||
authorpath = os.path.dirname(bookpath)
|
authorpath = os.path.dirname(bookpath)
|
||||||
item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
|
item_id = {v:k for k, v in cache.fields['#series'].table.id_map.items()}['My Series Two']
|
||||||
cache.remove_books((1,))
|
cache.remove_books((1,))
|
||||||
for x in (fmtpath, bookpath, authorpath):
|
for x in (fmtpath, bookpath, authorpath):
|
||||||
af(os.path.exists(x), f'The file {x} exists, when it should not')
|
af(os.path.exists(x), f'The file {x} exists, when it should not')
|
||||||
|
|||||||
@ -15,7 +15,6 @@ from operator import itemgetter
|
|||||||
from calibre.db.constants import NOTES_DIR_NAME
|
from calibre.db.constants import NOTES_DIR_NAME
|
||||||
from calibre.db.tests.base import BaseTest
|
from calibre.db.tests.base import BaseTest
|
||||||
from calibre.library.field_metadata import fm_as_dict
|
from calibre.library.field_metadata import fm_as_dict
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
# Utils {{{
|
# Utils {{{
|
||||||
|
|
||||||
@ -195,7 +194,7 @@ class LegacyTest(BaseTest):
|
|||||||
|
|
||||||
self.assertEqual(dict(db.prefs), dict(ndb.prefs))
|
self.assertEqual(dict(db.prefs), dict(ndb.prefs))
|
||||||
|
|
||||||
for meth, args in iteritems({
|
for meth, args in {
|
||||||
'find_identical_books': [(Metadata('title one', ['author one']),), (Metadata('unknown'),), (Metadata('xxxx'),)],
|
'find_identical_books': [(Metadata('title one', ['author one']),), (Metadata('unknown'),), (Metadata('xxxx'),)],
|
||||||
'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')],
|
'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')],
|
||||||
'get_next_series_num_for': [('A Series One',)],
|
'get_next_series_num_for': [('A Series One',)],
|
||||||
@ -260,7 +259,7 @@ class LegacyTest(BaseTest):
|
|||||||
'book_on_device_string':[(1,), (2,), (3,)],
|
'book_on_device_string':[(1,), (2,), (3,)],
|
||||||
'books_in_series_of':[(0,), (1,), (2,)],
|
'books_in_series_of':[(0,), (1,), (2,)],
|
||||||
'books_with_same_title':[(Metadata(db.title(0)),), (Metadata(db.title(1)),), (Metadata('1234'),)],
|
'books_with_same_title':[(Metadata(db.title(0)),), (Metadata(db.title(1)),), (Metadata('1234'),)],
|
||||||
}):
|
}.items():
|
||||||
if meth[0] in {'!', '@'}:
|
if meth[0] in {'!', '@'}:
|
||||||
fmt = {'!':dict, '@':frozenset}[meth[0]]
|
fmt = {'!':dict, '@':frozenset}[meth[0]]
|
||||||
meth = meth[1:]
|
meth = meth[1:]
|
||||||
@ -677,10 +676,10 @@ class LegacyTest(BaseTest):
|
|||||||
|
|
||||||
ndb = self.init_legacy(self.cloned_library)
|
ndb = self.init_legacy(self.cloned_library)
|
||||||
db = self.init_old(self.cloned_library)
|
db = self.init_old(self.cloned_library)
|
||||||
a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('authors'))}['Author One']
|
a = {v:k for k, v in ndb.new_api.get_id_map('authors').items()}['Author One']
|
||||||
t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('tags'))}['Tag One']
|
t = {v:k for k, v in ndb.new_api.get_id_map('tags').items()}['Tag One']
|
||||||
s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('series'))}['A Series One']
|
s = {v:k for k, v in ndb.new_api.get_id_map('series').items()}['A Series One']
|
||||||
p = {v:k for k, v in iteritems(ndb.new_api.get_id_map('publisher'))}['Publisher One']
|
p = {v:k for k, v in ndb.new_api.get_id_map('publisher').items()}['Publisher One']
|
||||||
run_funcs(self, db, ndb, (
|
run_funcs(self, db, ndb, (
|
||||||
('rename_author', a, 'Author Two'),
|
('rename_author', a, 'Author Two'),
|
||||||
('rename_tag', t, 'News'),
|
('rename_tag', t, 'News'),
|
||||||
@ -718,11 +717,11 @@ class LegacyTest(BaseTest):
|
|||||||
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
|
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
|
||||||
|
|
||||||
# Test renaming/deleting
|
# Test renaming/deleting
|
||||||
t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag One']
|
t = {v:k for k, v in ndb.new_api.get_id_map('#tags').items()}['My Tag One']
|
||||||
t2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag Two']
|
t2 = {v:k for k, v in ndb.new_api.get_id_map('#tags').items()}['My Tag Two']
|
||||||
a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['My Author Two']
|
a = {v:k for k, v in ndb.new_api.get_id_map('#authors').items()}['My Author Two']
|
||||||
a2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['Custom One']
|
a2 = {v:k for k, v in ndb.new_api.get_id_map('#authors').items()}['Custom One']
|
||||||
s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#series'))}['My Series One']
|
s = {v:k for k, v in ndb.new_api.get_id_map('#series').items()}['My Series One']
|
||||||
run_funcs(self, db, ndb, (
|
run_funcs(self, db, ndb, (
|
||||||
('delete_custom_item_using_id', t, 'tags'),
|
('delete_custom_item_using_id', t, 'tags'),
|
||||||
('delete_custom_item_using_id', a, 'authors'),
|
('delete_custom_item_using_id', a, 'authors'),
|
||||||
|
|||||||
@ -13,7 +13,6 @@ from time import time
|
|||||||
from calibre.db.tests.base import BaseTest
|
from calibre.db.tests.base import BaseTest
|
||||||
from calibre.utils.date import utc_tz
|
from calibre.utils.date import utc_tz
|
||||||
from calibre.utils.localization import calibre_langcode_to_name
|
from calibre.utils.localization import calibre_langcode_to_name
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def p(x):
|
def p(x):
|
||||||
@ -136,7 +135,7 @@ class ReadingTest(BaseTest):
|
|||||||
|
|
||||||
lmap = {x:cache.field_for('languages', x) for x in (1, 2, 3)}
|
lmap = {x:cache.field_for('languages', x) for x in (1, 2, 3)}
|
||||||
lq = sorted(lmap, key=lambda x: calibre_langcode_to_name((lmap[x] or ('',))[0]))
|
lq = sorted(lmap, key=lambda x: calibre_langcode_to_name((lmap[x] or ('',))[0]))
|
||||||
for field, order in iteritems({
|
for field, order in {
|
||||||
'title' : [2, 1, 3],
|
'title' : [2, 1, 3],
|
||||||
'authors': [2, 1, 3],
|
'authors': [2, 1, 3],
|
||||||
'series' : [3, 1, 2],
|
'series' : [3, 1, 2],
|
||||||
@ -160,7 +159,7 @@ class ReadingTest(BaseTest):
|
|||||||
'#yesno':[2, 1, 3],
|
'#yesno':[2, 1, 3],
|
||||||
'#comments':[3, 2, 1],
|
'#comments':[3, 2, 1],
|
||||||
'id': [1, 2, 3],
|
'id': [1, 2, 3],
|
||||||
}):
|
}.items():
|
||||||
x = list(reversed(order))
|
x = list(reversed(order))
|
||||||
ae(order, cache.multisort([(field, True)],
|
ae(order, cache.multisort([(field, True)],
|
||||||
ids_to_sort=x),
|
ids_to_sort=x),
|
||||||
|
|||||||
@ -16,7 +16,6 @@ from calibre.db.tests.base import IMG, BaseTest
|
|||||||
from calibre.ebooks.metadata import author_to_author_sort, title_sort
|
from calibre.ebooks.metadata import author_to_author_sort, title_sort
|
||||||
from calibre.ebooks.metadata.book.base import Metadata
|
from calibre.ebooks.metadata.book.base import Metadata
|
||||||
from calibre.utils.date import UNDEFINED_DATE
|
from calibre.utils.date import UNDEFINED_DATE
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
class WritingTest(BaseTest):
|
class WritingTest(BaseTest):
|
||||||
@ -169,7 +168,7 @@ class WritingTest(BaseTest):
|
|||||||
self.assertEqual(cache.set_field('#enum', {1:None}), {1})
|
self.assertEqual(cache.set_field('#enum', {1:None}), {1})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
for i, val in iteritems({1:None, 2:'One', 3:'Three'}):
|
for i, val in {1:None, 2:'One', 3:'Three'}.items():
|
||||||
self.assertEqual(c.field_for('#enum', i), val)
|
self.assertEqual(c.field_for('#enum', i), val)
|
||||||
del cache2
|
del cache2
|
||||||
|
|
||||||
@ -179,9 +178,9 @@ class WritingTest(BaseTest):
|
|||||||
self.assertEqual(cache.set_field('#rating', {1:None, 2:4, 3:8}), {1, 2, 3})
|
self.assertEqual(cache.set_field('#rating', {1:None, 2:4, 3:8}), {1, 2, 3})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
for i, val in iteritems({1:None, 2:4, 3:2}):
|
for i, val in {1:None, 2:4, 3:2}.items():
|
||||||
self.assertEqual(c.field_for('rating', i), val)
|
self.assertEqual(c.field_for('rating', i), val)
|
||||||
for i, val in iteritems({1:None, 2:4, 3:8}):
|
for i, val in {1:None, 2:4, 3:8}.items():
|
||||||
self.assertEqual(c.field_for('#rating', i), val)
|
self.assertEqual(c.field_for('#rating', i), val)
|
||||||
del cache2
|
del cache2
|
||||||
|
|
||||||
@ -194,14 +193,14 @@ class WritingTest(BaseTest):
|
|||||||
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), {2})
|
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), {2})
|
||||||
cache2 = self.init_cache(cl)
|
cache2 = self.init_cache(cl)
|
||||||
for c in (cache, cache2):
|
for c in (cache, cache2):
|
||||||
for i, val in iteritems({1:'A Series One', 2:'A Series One', 3:'Series'}):
|
for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.items():
|
||||||
self.assertEqual(c.field_for('series', i), val)
|
self.assertEqual(c.field_for('series', i), val)
|
||||||
cs_indices = {1:c.field_for('#series_index', 1), 3:c.field_for('#series_index', 3)}
|
cs_indices = {1:c.field_for('#series_index', 1), 3:c.field_for('#series_index', 3)}
|
||||||
for i in (1, 2, 3):
|
for i in (1, 2, 3):
|
||||||
self.assertEqual(c.field_for('#series', i), 'Series')
|
self.assertEqual(c.field_for('#series', i), 'Series')
|
||||||
for i, val in iteritems({1:2, 2:1, 3:3}):
|
for i, val in {1:2, 2:1, 3:3}.items():
|
||||||
self.assertEqual(c.field_for('series_index', i), val)
|
self.assertEqual(c.field_for('series_index', i), val)
|
||||||
for i, val in iteritems({1:cs_indices[1], 2:0, 3:cs_indices[3]}):
|
for i, val in {1:cs_indices[1], 2:0, 3:cs_indices[3]}.items():
|
||||||
self.assertEqual(c.field_for('#series_index', i), val)
|
self.assertEqual(c.field_for('#series_index', i), val)
|
||||||
del cache2
|
del cache2
|
||||||
|
|
||||||
@ -607,9 +606,9 @@ class WritingTest(BaseTest):
|
|||||||
cl = self.cloned_library
|
cl = self.cloned_library
|
||||||
cache = self.init_cache(cl)
|
cache = self.init_cache(cl)
|
||||||
# Check that renaming authors updates author sort and path
|
# Check that renaming authors updates author sort and path
|
||||||
a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Unknown']
|
a = {v:k for k, v in cache.get_id_map('authors').items()}['Unknown']
|
||||||
self.assertEqual(cache.rename_items('authors', {a:'New Author'})[0], {3})
|
self.assertEqual(cache.rename_items('authors', {a:'New Author'})[0], {3})
|
||||||
a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Author One']
|
a = {v:k for k, v in cache.get_id_map('authors').items()}['Author One']
|
||||||
self.assertEqual(cache.rename_items('authors', {a:'Author Two'})[0], {1, 2})
|
self.assertEqual(cache.rename_items('authors', {a:'Author Two'})[0], {1, 2})
|
||||||
for c in (cache, self.init_cache(cl)):
|
for c in (cache, self.init_cache(cl)):
|
||||||
self.assertEqual(c.all_field_names('authors'), {'New Author', 'Author Two'})
|
self.assertEqual(c.all_field_names('authors'), {'New Author', 'Author Two'})
|
||||||
@ -618,7 +617,7 @@ class WritingTest(BaseTest):
|
|||||||
self.assertEqual(c.field_for('authors', 1), ('Author Two',))
|
self.assertEqual(c.field_for('authors', 1), ('Author Two',))
|
||||||
self.assertEqual(c.field_for('author_sort', 1), 'Two, Author')
|
self.assertEqual(c.field_for('author_sort', 1), 'Two, Author')
|
||||||
|
|
||||||
t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
|
t = {v:k for k, v in cache.get_id_map('tags').items()}['Tag One']
|
||||||
# Test case change
|
# Test case change
|
||||||
self.assertEqual(cache.rename_items('tags', {t:'tag one'}), ({1, 2}, {t:t}))
|
self.assertEqual(cache.rename_items('tags', {t:'tag one'}), ({1, 2}, {t:t}))
|
||||||
for c in (cache, self.init_cache(cl)):
|
for c in (cache, self.init_cache(cl)):
|
||||||
@ -638,14 +637,14 @@ class WritingTest(BaseTest):
|
|||||||
self.assertEqual(set(c.field_for('tags', 1)), {'Tag Two', 'News'})
|
self.assertEqual(set(c.field_for('tags', 1)), {'Tag Two', 'News'})
|
||||||
self.assertEqual(set(c.field_for('tags', 2)), {'Tag Two'})
|
self.assertEqual(set(c.field_for('tags', 2)), {'Tag Two'})
|
||||||
# Test on a custom column
|
# Test on a custom column
|
||||||
t = {v:k for k, v in iteritems(cache.get_id_map('#tags'))}['My Tag One']
|
t = {v:k for k, v in cache.get_id_map('#tags').items()}['My Tag One']
|
||||||
self.assertEqual(cache.rename_items('#tags', {t:'My Tag Two'})[0], {2})
|
self.assertEqual(cache.rename_items('#tags', {t:'My Tag Two'})[0], {2})
|
||||||
for c in (cache, self.init_cache(cl)):
|
for c in (cache, self.init_cache(cl)):
|
||||||
self.assertEqual(c.all_field_names('#tags'), {'My Tag Two'})
|
self.assertEqual(c.all_field_names('#tags'), {'My Tag Two'})
|
||||||
self.assertEqual(set(c.field_for('#tags', 2)), {'My Tag Two'})
|
self.assertEqual(set(c.field_for('#tags', 2)), {'My Tag Two'})
|
||||||
|
|
||||||
# Test a Many-one field
|
# Test a Many-one field
|
||||||
s = {v:k for k, v in iteritems(cache.get_id_map('series'))}['A Series One']
|
s = {v:k for k, v in cache.get_id_map('series').items()}['A Series One']
|
||||||
# Test case change
|
# Test case change
|
||||||
self.assertEqual(cache.rename_items('series', {s:'a series one'}), ({1, 2}, {s:s}))
|
self.assertEqual(cache.rename_items('series', {s:'a series one'}), ({1, 2}, {s:s}))
|
||||||
for c in (cache, self.init_cache(cl)):
|
for c in (cache, self.init_cache(cl)):
|
||||||
@ -661,7 +660,7 @@ class WritingTest(BaseTest):
|
|||||||
self.assertEqual(c.field_for('series', 2), 'series')
|
self.assertEqual(c.field_for('series', 2), 'series')
|
||||||
self.assertEqual(c.field_for('series_index', 1), 2.0)
|
self.assertEqual(c.field_for('series_index', 1), 2.0)
|
||||||
|
|
||||||
s = {v:k for k, v in iteritems(cache.get_id_map('#series'))}['My Series One']
|
s = {v:k for k, v in cache.get_id_map('#series').items()}['My Series One']
|
||||||
# Test custom column with rename to existing
|
# Test custom column with rename to existing
|
||||||
self.assertEqual(cache.rename_items('#series', {s:'My Series Two'})[0], {2})
|
self.assertEqual(cache.rename_items('#series', {s:'My Series Two'})[0], {2})
|
||||||
for c in (cache, self.init_cache(cl)):
|
for c in (cache, self.init_cache(cl)):
|
||||||
@ -672,7 +671,7 @@ class WritingTest(BaseTest):
|
|||||||
|
|
||||||
# Test renaming many-many items to multiple items
|
# Test renaming many-many items to multiple items
|
||||||
cache = self.init_cache(self.cloned_library)
|
cache = self.init_cache(self.cloned_library)
|
||||||
t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
|
t = {v:k for k, v in cache.get_id_map('tags').items()}['Tag One']
|
||||||
affected_books, id_map = cache.rename_items('tags', {t:'Something, Else, Entirely'})
|
affected_books, id_map = cache.rename_items('tags', {t:'Something, Else, Entirely'})
|
||||||
self.assertEqual({1, 2}, affected_books)
|
self.assertEqual({1, 2}, affected_books)
|
||||||
tmap = cache.get_id_map('tags')
|
tmap = cache.get_id_map('tags')
|
||||||
@ -687,7 +686,7 @@ class WritingTest(BaseTest):
|
|||||||
# Test with restriction
|
# Test with restriction
|
||||||
cache = self.init_cache()
|
cache = self.init_cache()
|
||||||
cache.set_field('tags', {1:'a,b,c', 2:'x,y,z', 3:'a,x,z'})
|
cache.set_field('tags', {1:'a,b,c', 2:'x,y,z', 3:'a,x,z'})
|
||||||
tmap = {v:k for k, v in iteritems(cache.get_id_map('tags'))}
|
tmap = {v:k for k, v in cache.get_id_map('tags').items()}
|
||||||
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r'}, restrict_to_book_ids=()), (set(), {}))
|
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r'}, restrict_to_book_ids=()), (set(), {}))
|
||||||
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r', tmap['b']:'q'}, restrict_to_book_ids=(1,))[0], {1})
|
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r', tmap['b']:'q'}, restrict_to_book_ids=(1,))[0], {1})
|
||||||
self.assertEqual(cache.rename_items('tags', {tmap['x']:'X'}, restrict_to_book_ids=(2,))[0], {2})
|
self.assertEqual(cache.rename_items('tags', {tmap['x']:'X'}, restrict_to_book_ids=(2,))[0], {2})
|
||||||
@ -808,7 +807,7 @@ class WritingTest(BaseTest):
|
|||||||
conn.execute('INSERT INTO tags (name) VALUES ("t")')
|
conn.execute('INSERT INTO tags (name) VALUES ("t")')
|
||||||
norm = conn.last_insert_rowid()
|
norm = conn.last_insert_rowid()
|
||||||
conn.execute('DELETE FROM books_tags_link')
|
conn.execute('DELETE FROM books_tags_link')
|
||||||
for book_id, vals in iteritems({1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}):
|
for book_id, vals in {1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}.items():
|
||||||
conn.executemany('INSERT INTO books_tags_link (book,tag) VALUES (?,?)',
|
conn.executemany('INSERT INTO books_tags_link (book,tag) VALUES (?,?)',
|
||||||
tuple((book_id, x) for x in vals))
|
tuple((book_id, x) for x in vals))
|
||||||
cache.reload_from_db()
|
cache.reload_from_db()
|
||||||
|
|||||||
@ -25,7 +25,7 @@ from calibre.devices.mtp.filesystem_cache import FileOrFolder, convert_timestamp
|
|||||||
from calibre.ptempfile import PersistentTemporaryDirectory, SpooledTemporaryFile
|
from calibre.ptempfile import PersistentTemporaryDirectory, SpooledTemporaryFile
|
||||||
from calibre.utils.filenames import shorten_components_to
|
from calibre.utils.filenames import shorten_components_to
|
||||||
from calibre.utils.icu import lower as icu_lower
|
from calibre.utils.icu import lower as icu_lower
|
||||||
from polyglot.builtins import as_bytes, iteritems
|
from polyglot.builtins import as_bytes
|
||||||
|
|
||||||
BASE = importlib.import_module('calibre.devices.mtp.{}.driver'.format('windows' if iswindows else 'unix')).MTP_DEVICE
|
BASE = importlib.import_module('calibre.devices.mtp.{}.driver'.format('windows' if iswindows else 'unix')).MTP_DEVICE
|
||||||
DEFAULT_THUMBNAIL_HEIGHT = 320
|
DEFAULT_THUMBNAIL_HEIGHT = 320
|
||||||
@ -789,7 +789,7 @@ class MTP_DEVICE(BASE):
|
|||||||
def get_user_blacklisted_devices(self):
|
def get_user_blacklisted_devices(self):
|
||||||
bl = frozenset(self.prefs['blacklist'])
|
bl = frozenset(self.prefs['blacklist'])
|
||||||
ans = {}
|
ans = {}
|
||||||
for dev, x in iteritems(self.prefs['history']):
|
for dev, x in self.prefs['history'].items():
|
||||||
name = x[0]
|
name = x[0]
|
||||||
if dev in bl:
|
if dev in bl:
|
||||||
ans[dev] = name
|
ans[dev] = name
|
||||||
|
|||||||
@ -34,7 +34,6 @@ from operator import itemgetter
|
|||||||
from pprint import pformat, pprint
|
from pprint import pformat, pprint
|
||||||
|
|
||||||
from calibre import as_unicode, prints
|
from calibre import as_unicode, prints
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import winreg
|
import winreg
|
||||||
@ -671,13 +670,13 @@ def get_volume_information(drive_letter):
|
|||||||
'max_component_length': max_component_length.value,
|
'max_component_length': max_component_length.value,
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, num in iteritems({'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
|
for name, num in {'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
|
||||||
'FILE_NAMED_STREAMS':0x00040000, 'FILE_PERSISTENT_ACLS':0x00000008, 'FILE_READ_ONLY_VOLUME':0x00080000,
|
'FILE_NAMED_STREAMS':0x00040000, 'FILE_PERSISTENT_ACLS':0x00000008, 'FILE_READ_ONLY_VOLUME':0x00080000,
|
||||||
'FILE_SEQUENTIAL_WRITE_ONCE':0x00100000, 'FILE_SUPPORTS_ENCRYPTION':0x00020000, 'FILE_SUPPORTS_EXTENDED_ATTRIBUTES':0x00800000,
|
'FILE_SEQUENTIAL_WRITE_ONCE':0x00100000, 'FILE_SUPPORTS_ENCRYPTION':0x00020000, 'FILE_SUPPORTS_EXTENDED_ATTRIBUTES':0x00800000,
|
||||||
'FILE_SUPPORTS_HARD_LINKS':0x00400000, 'FILE_SUPPORTS_OBJECT_IDS':0x00010000, 'FILE_SUPPORTS_OPEN_BY_FILE_ID':0x01000000,
|
'FILE_SUPPORTS_HARD_LINKS':0x00400000, 'FILE_SUPPORTS_OBJECT_IDS':0x00010000, 'FILE_SUPPORTS_OPEN_BY_FILE_ID':0x01000000,
|
||||||
'FILE_SUPPORTS_REPARSE_POINTS':0x00000080, 'FILE_SUPPORTS_SPARSE_FILES':0x00000040, 'FILE_SUPPORTS_TRANSACTIONS':0x00200000,
|
'FILE_SUPPORTS_REPARSE_POINTS':0x00000080, 'FILE_SUPPORTS_SPARSE_FILES':0x00000040, 'FILE_SUPPORTS_TRANSACTIONS':0x00200000,
|
||||||
'FILE_SUPPORTS_USN_JOURNAL':0x02000000, 'FILE_UNICODE_ON_DISK':0x00000004, 'FILE_VOLUME_IS_COMPRESSED':0x00008000,
|
'FILE_SUPPORTS_USN_JOURNAL':0x02000000, 'FILE_UNICODE_ON_DISK':0x00000004, 'FILE_VOLUME_IS_COMPRESSED':0x00008000,
|
||||||
'FILE_VOLUME_QUOTAS':0x00000020}):
|
'FILE_VOLUME_QUOTAS':0x00000020}.items():
|
||||||
ans[name] = bool(num & flags)
|
ans[name] = bool(num & flags)
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
|||||||
@ -12,7 +12,6 @@ from css_parser.css import CSSRule, Property
|
|||||||
from calibre import force_unicode
|
from calibre import force_unicode
|
||||||
from calibre.ebooks import parse_css_length
|
from calibre.ebooks import parse_css_length
|
||||||
from calibre.ebooks.oeb.normalize_css import normalizers, safe_parser
|
from calibre.ebooks.oeb.normalize_css import normalizers, safe_parser
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def compile_pat(pat):
|
def compile_pat(pat):
|
||||||
@ -44,7 +43,7 @@ class StyleDeclaration:
|
|||||||
yield p, None
|
yield p, None
|
||||||
else:
|
else:
|
||||||
if p not in self.expanded_properties:
|
if p not in self.expanded_properties:
|
||||||
self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in iteritems(n(p.name, p.propertyValue))]
|
self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in n(p.name, p.propertyValue).items()]
|
||||||
for ep in self.expanded_properties[p]:
|
for ep in self.expanded_properties[p]:
|
||||||
yield ep, p
|
yield ep, p
|
||||||
|
|
||||||
|
|||||||
@ -7,8 +7,6 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
|||||||
import numbers
|
import numbers
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
class Inherit:
|
class Inherit:
|
||||||
|
|
||||||
@ -136,7 +134,7 @@ def read_border(parent, dest, XPath, get, border_edges=border_edges, name='pBdr'
|
|||||||
|
|
||||||
for border in XPath('./w:' + name)(parent):
|
for border in XPath('./w:' + name)(parent):
|
||||||
for edge in border_edges:
|
for edge in border_edges:
|
||||||
for prop, val in iteritems(read_single_border(border, edge, XPath, get)):
|
for prop, val in read_single_border(border, edge, XPath, get).items():
|
||||||
if val is not None:
|
if val is not None:
|
||||||
vals[prop % edge] = val
|
vals[prop % edge] = val
|
||||||
|
|
||||||
|
|||||||
@ -15,7 +15,6 @@ from calibre.ebooks.docx.names import SVG_BLIP_URI, barename
|
|||||||
from calibre.utils.filenames import ascii_filename
|
from calibre.utils.filenames import ascii_filename
|
||||||
from calibre.utils.img import image_to_data, resize_to_fit
|
from calibre.utils.img import image_to_data, resize_to_fit
|
||||||
from calibre.utils.imghdr import what
|
from calibre.utils.imghdr import what
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
class LinkedImageNotFound(ValueError):
|
class LinkedImageNotFound(ValueError):
|
||||||
@ -87,7 +86,7 @@ def get_image_properties(parent, XPath, get):
|
|||||||
|
|
||||||
def get_image_margins(elem):
|
def get_image_margins(elem):
|
||||||
ans = {}
|
ans = {}
|
||||||
for w, css in iteritems({'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}):
|
for w, css in {'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}.items():
|
||||||
val = elem.get(f'dist{w}', None)
|
val = elem.get(f'dist{w}', None)
|
||||||
if val is not None:
|
if val is not None:
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -18,7 +18,6 @@ from calibre.ebooks.pdf.render.common import PAPER_SIZES
|
|||||||
from calibre.utils.date import utcnow
|
from calibre.utils.date import utcnow
|
||||||
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
|
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
|
||||||
from calibre.utils.zipfile import ZipFile
|
from calibre.utils.zipfile import ZipFile
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def xml2str(root, pretty_print=False, with_tail=False):
|
def xml2str(root, pretty_print=False, with_tail=False):
|
||||||
@ -120,12 +119,12 @@ class DocumentRelationships:
|
|||||||
def __init__(self, namespace):
|
def __init__(self, namespace):
|
||||||
self.rmap = {}
|
self.rmap = {}
|
||||||
self.namespace = namespace
|
self.namespace = namespace
|
||||||
for typ, target in iteritems({
|
for typ, target in {
|
||||||
namespace.names['STYLES']: 'styles.xml',
|
namespace.names['STYLES']: 'styles.xml',
|
||||||
namespace.names['NUMBERING']: 'numbering.xml',
|
namespace.names['NUMBERING']: 'numbering.xml',
|
||||||
namespace.names['WEB_SETTINGS']: 'webSettings.xml',
|
namespace.names['WEB_SETTINGS']: 'webSettings.xml',
|
||||||
namespace.names['FONTS']: 'fontTable.xml',
|
namespace.names['FONTS']: 'fontTable.xml',
|
||||||
}):
|
}.items():
|
||||||
self.add_relationship(target, typ)
|
self.add_relationship(target, typ)
|
||||||
|
|
||||||
def get_relationship_id(self, target, rtype, target_mode=None):
|
def get_relationship_id(self, target, rtype, target_mode=None):
|
||||||
@ -172,7 +171,7 @@ class DOCX:
|
|||||||
def contenttypes(self):
|
def contenttypes(self):
|
||||||
E = ElementMaker(namespace=self.namespace.namespaces['ct'], nsmap={None:self.namespace.namespaces['ct']})
|
E = ElementMaker(namespace=self.namespace.namespaces['ct'], nsmap={None:self.namespace.namespaces['ct']})
|
||||||
types = E.Types()
|
types = E.Types()
|
||||||
for partname, mt in iteritems({
|
for partname, mt in {
|
||||||
'/word/footnotes.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml',
|
'/word/footnotes.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml',
|
||||||
'/word/document.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml',
|
'/word/document.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml',
|
||||||
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml',
|
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml',
|
||||||
@ -184,15 +183,15 @@ class DOCX:
|
|||||||
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml',
|
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml',
|
||||||
'/docProps/core.xml': 'application/vnd.openxmlformats-package.core-properties+xml',
|
'/docProps/core.xml': 'application/vnd.openxmlformats-package.core-properties+xml',
|
||||||
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocument.extended-properties+xml',
|
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocument.extended-properties+xml',
|
||||||
}):
|
}.items():
|
||||||
types.append(E.Override(PartName=partname, ContentType=mt))
|
types.append(E.Override(PartName=partname, ContentType=mt))
|
||||||
added = {'png', 'gif', 'jpeg', 'jpg', 'svg', 'xml'}
|
added = {'png', 'gif', 'jpeg', 'jpg', 'svg', 'xml'}
|
||||||
for ext in added:
|
for ext in added:
|
||||||
types.append(E.Default(Extension=ext, ContentType=guess_type('a.'+ext)[0]))
|
types.append(E.Default(Extension=ext, ContentType=guess_type('a.'+ext)[0]))
|
||||||
for ext, mt in iteritems({
|
for ext, mt in {
|
||||||
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
|
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
|
||||||
'odttf': 'application/vnd.openxmlformats-officedocument.obfuscatedFont',
|
'odttf': 'application/vnd.openxmlformats-officedocument.obfuscatedFont',
|
||||||
}):
|
}.items():
|
||||||
added.add(ext)
|
added.add(ext)
|
||||||
types.append(E.Default(Extension=ext, ContentType=mt))
|
types.append(E.Default(Extension=ext, ContentType=mt))
|
||||||
for fname in self.images:
|
for fname in self.images:
|
||||||
|
|||||||
@ -14,7 +14,6 @@ from tinycss.css21 import CSS21Parser
|
|||||||
from calibre.ebooks import parse_css_length
|
from calibre.ebooks import parse_css_length
|
||||||
from calibre.ebooks.docx.writer.utils import convert_color, int_or_zero
|
from calibre.ebooks.docx.writer.utils import convert_color, int_or_zero
|
||||||
from calibre.utils.localization import lang_as_iso639_1
|
from calibre.utils.localization import lang_as_iso639_1
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
css_parser = CSS21Parser()
|
css_parser = CSS21Parser()
|
||||||
|
|
||||||
@ -589,7 +588,7 @@ class BlockStyle(DOCXStyle):
|
|||||||
def serialize_properties(self, pPr, normal_style):
|
def serialize_properties(self, pPr, normal_style):
|
||||||
makeelement, w = self.makeelement, self.w
|
makeelement, w = self.makeelement, self.w
|
||||||
spacing = makeelement(pPr, 'spacing')
|
spacing = makeelement(pPr, 'spacing')
|
||||||
for edge, attr in iteritems({'top':'before', 'bottom':'after'}):
|
for edge, attr in {'top':'before', 'bottom':'after'}.items():
|
||||||
getter = attrgetter('css_margin_' + edge)
|
getter = attrgetter('css_margin_' + edge)
|
||||||
css_val, css_unit = parse_css_length(getter(self))
|
css_val, css_unit = parse_css_length(getter(self))
|
||||||
if css_unit in ('em', 'ex'):
|
if css_unit in ('em', 'ex'):
|
||||||
|
|||||||
@ -37,7 +37,6 @@ from calibre.utils.icu import lower as icu_lower
|
|||||||
from calibre.utils.icu import upper as icu_upper
|
from calibre.utils.icu import upper as icu_upper
|
||||||
from calibre.utils.localization import canonicalize_lang, get_lang
|
from calibre.utils.localization import canonicalize_lang, get_lang
|
||||||
from calibre.utils.xml_parse import safe_xml_fromstring
|
from calibre.utils.xml_parse import safe_xml_fromstring
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
from polyglot.urllib import unquote
|
from polyglot.urllib import unquote
|
||||||
|
|
||||||
pretty_print_opf = False
|
pretty_print_opf = False
|
||||||
@ -1208,7 +1207,7 @@ class OPF: # {{{
|
|||||||
def page_progression_direction(self):
|
def page_progression_direction(self):
|
||||||
spine = XPath('descendant::*[re:match(name(), "spine", "i")][1]')(self.root)
|
spine = XPath('descendant::*[re:match(name(), "spine", "i")][1]')(self.root)
|
||||||
if spine:
|
if spine:
|
||||||
for k, v in iteritems(spine[0].attrib):
|
for k, v in spine[0].attrib.items():
|
||||||
if k == 'page-progression-direction' or k.endswith('}page-progression-direction'):
|
if k == 'page-progression-direction' or k.endswith('}page-progression-direction'):
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|||||||
@ -22,7 +22,6 @@ from calibre.utils.date import fix_only_date, is_date_undefined, isoformat, utcn
|
|||||||
from calibre.utils.date import parse_date as parse_date_
|
from calibre.utils.date import parse_date as parse_date_
|
||||||
from calibre.utils.iso8601 import parse_iso8601
|
from calibre.utils.iso8601 import parse_iso8601
|
||||||
from calibre.utils.localization import canonicalize_lang
|
from calibre.utils.localization import canonicalize_lang
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
# Utils {{{
|
# Utils {{{
|
||||||
_xpath_cache = {}
|
_xpath_cache = {}
|
||||||
@ -1066,7 +1065,7 @@ def read_metadata(root, ver=None, return_extra_data=False):
|
|||||||
ans.series, ans.series_index = s, si
|
ans.series, ans.series_index = s, si
|
||||||
ans.link_maps = read_link_maps(root, prefixes, refines) or ans.link_maps
|
ans.link_maps = read_link_maps(root, prefixes, refines) or ans.link_maps
|
||||||
ans.user_categories = read_user_categories(root, prefixes, refines) or ans.user_categories
|
ans.user_categories = read_user_categories(root, prefixes, refines) or ans.user_categories
|
||||||
for name, fm in iteritems(read_user_metadata(root, prefixes, refines) or {}):
|
for name, fm in (read_user_metadata(root, prefixes, refines) or {}).items():
|
||||||
try:
|
try:
|
||||||
ans.set_user_metadata(name, fm)
|
ans.set_user_metadata(name, fm)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|||||||
@ -13,7 +13,6 @@ from calibre.constants import iswindows
|
|||||||
from calibre.ebooks.metadata import MetaInformation, check_doi, check_isbn, string_to_authors
|
from calibre.ebooks.metadata import MetaInformation, check_doi, check_isbn, string_to_authors
|
||||||
from calibre.ptempfile import TemporaryDirectory
|
from calibre.ptempfile import TemporaryDirectory
|
||||||
from calibre.utils.ipc.simple_worker import WorkerError, fork_job
|
from calibre.utils.ipc.simple_worker import WorkerError, fork_job
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def get_tools():
|
def get_tools():
|
||||||
@ -169,7 +168,7 @@ def get_metadata(stream, cover=True):
|
|||||||
|
|
||||||
# Look for recognizable identifiers in the info dict, if they were not
|
# Look for recognizable identifiers in the info dict, if they were not
|
||||||
# found in the XMP metadata
|
# found in the XMP metadata
|
||||||
for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}):
|
for scheme, check_func in {'doi':check_doi, 'isbn':check_isbn}.items():
|
||||||
if scheme not in mi.get_identifiers():
|
if scheme not in mi.get_identifiers():
|
||||||
for k, v in info.items():
|
for k, v in info.items():
|
||||||
if k != 'xmp_metadata':
|
if k != 'xmp_metadata':
|
||||||
|
|||||||
@ -21,7 +21,6 @@ from calibre.ebooks.metadata.opf2 import dump_dict
|
|||||||
from calibre.utils.date import isoformat, now, parse_date
|
from calibre.utils.date import isoformat, now, parse_date
|
||||||
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
|
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
|
||||||
from calibre.utils.xml_parse import safe_xml_fromstring
|
from calibre.utils.xml_parse import safe_xml_fromstring
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
_xml_declaration = re.compile(r'<\?xml[^<>]+encoding\s*=\s*[\'"](.*?)[\'"][^<>]*>', re.IGNORECASE)
|
_xml_declaration = re.compile(r'<\?xml[^<>]+encoding\s*=\s*[\'"](.*?)[\'"][^<>]*>', re.IGNORECASE)
|
||||||
|
|
||||||
@ -332,7 +331,7 @@ def metadata_from_xmp_packet(raw_bytes):
|
|||||||
identifiers[scheme] = val
|
identifiers[scheme] = val
|
||||||
|
|
||||||
# Check Dublin Core for recognizable identifier types
|
# Check Dublin Core for recognizable identifier types
|
||||||
for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}):
|
for scheme, check_func in {'doi':check_doi, 'isbn':check_isbn}.items():
|
||||||
if scheme not in identifiers:
|
if scheme not in identifiers:
|
||||||
val = check_func(first_simple('//dc:identifier', root))
|
val = check_func(first_simple('//dc:identifier', root))
|
||||||
if val:
|
if val:
|
||||||
@ -482,12 +481,12 @@ def metadata_to_xmp_packet(mi):
|
|||||||
dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc'))
|
dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc'))
|
||||||
dc.set(expand('rdf:about'), '')
|
dc.set(expand('rdf:about'), '')
|
||||||
rdf.append(dc)
|
rdf.append(dc)
|
||||||
for prop, tag in iteritems({'title':'dc:title', 'comments':'dc:description'}):
|
for prop, tag in {'title':'dc:title', 'comments':'dc:description'}.items():
|
||||||
val = mi.get(prop) or ''
|
val = mi.get(prop) or ''
|
||||||
create_alt_property(dc, tag, val)
|
create_alt_property(dc, tag, val)
|
||||||
for prop, (tag, ordered) in iteritems({
|
for prop, (tag, ordered) in {
|
||||||
'authors':('dc:creator', True), 'tags':('dc:subject', False), 'publisher':('dc:publisher', False),
|
'authors':('dc:creator', True), 'tags':('dc:subject', False), 'publisher':('dc:publisher', False),
|
||||||
}):
|
}.items():
|
||||||
val = mi.get(prop) or ()
|
val = mi.get(prop) or ()
|
||||||
if isinstance(val, (str, bytes)):
|
if isinstance(val, (str, bytes)):
|
||||||
val = [val]
|
val = [val]
|
||||||
|
|||||||
@ -11,7 +11,6 @@ from collections import OrderedDict, namedtuple
|
|||||||
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
|
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
|
||||||
from calibre.ebooks.mobi.reader.index import CNCX, INDEX_HEADER_FIELDS, get_tag_section_start, parse_index_record, parse_indx_header, parse_tagx_section
|
from calibre.ebooks.mobi.reader.index import CNCX, INDEX_HEADER_FIELDS, get_tag_section_start, parse_index_record, parse_indx_header, parse_tagx_section
|
||||||
from calibre.ebooks.mobi.reader.ncx import default_entry, tag_fieldname_map
|
from calibre.ebooks.mobi.reader.ncx import default_entry, tag_fieldname_map
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
File = namedtuple('File',
|
File = namedtuple('File',
|
||||||
'file_number name divtbl_count start_position length')
|
'file_number name divtbl_count start_position length')
|
||||||
@ -219,9 +218,9 @@ class NCXIndex(Index):
|
|||||||
# offset
|
# offset
|
||||||
fieldvalue = tuple(tag_map[tag])
|
fieldvalue = tuple(tag_map[tag])
|
||||||
entry[fieldname] = fieldvalue
|
entry[fieldname] = fieldvalue
|
||||||
for which, name in iteritems({3:'text', 5:'kind', 70:'description',
|
for which, name in {3:'text', 5:'kind', 70:'description',
|
||||||
71:'author', 72:'image_caption',
|
71:'author', 72:'image_caption',
|
||||||
73:'image_attribution'}):
|
73:'image_attribution'}.items():
|
||||||
if tag == which:
|
if tag == which:
|
||||||
entry[name] = self.cncx.get(fieldvalue,
|
entry[name] = self.cncx.get(fieldvalue,
|
||||||
default_entry[name])
|
default_entry[name])
|
||||||
|
|||||||
@ -29,7 +29,6 @@ from calibre.utils.img import AnimatedGIF, gif_data_to_png_data, save_cover_data
|
|||||||
from calibre.utils.imghdr import what
|
from calibre.utils.imghdr import what
|
||||||
from calibre.utils.logging import default_log
|
from calibre.utils.logging import default_log
|
||||||
from calibre.utils.xml_parse import safe_html_fromstring
|
from calibre.utils.xml_parse import safe_html_fromstring
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
class TopazError(ValueError):
|
class TopazError(ValueError):
|
||||||
@ -934,7 +933,7 @@ class MobiReader:
|
|||||||
|
|
||||||
|
|
||||||
def test_mbp_regex():
|
def test_mbp_regex():
|
||||||
for raw, m in iteritems({
|
for raw, m in {
|
||||||
'<mbp:pagebreak></mbp:pagebreak>':'',
|
'<mbp:pagebreak></mbp:pagebreak>':'',
|
||||||
'<mbp:pagebreak xxx></mbp:pagebreak>yyy':' xxxyyy',
|
'<mbp:pagebreak xxx></mbp:pagebreak>yyy':' xxxyyy',
|
||||||
'<mbp:pagebreak> </mbp:pagebreak>':'',
|
'<mbp:pagebreak> </mbp:pagebreak>':'',
|
||||||
@ -945,7 +944,7 @@ def test_mbp_regex():
|
|||||||
'</mbp:pagebreak>':'',
|
'</mbp:pagebreak>':'',
|
||||||
'</mbp:pagebreak sdf>':' sdf',
|
'</mbp:pagebreak sdf>':' sdf',
|
||||||
'</mbp:pagebreak><mbp:pagebreak></mbp:pagebreak>xxx':'xxx',
|
'</mbp:pagebreak><mbp:pagebreak></mbp:pagebreak>xxx':'xxx',
|
||||||
}):
|
}.items():
|
||||||
ans = MobiReader.PAGE_BREAK_PAT.sub(r'\1', raw)
|
ans = MobiReader.PAGE_BREAK_PAT.sub(r'\1', raw)
|
||||||
if ans != m:
|
if ans != m:
|
||||||
raise Exception(f'{ans!r} != {m!r} for {raw!r}')
|
raise Exception(f'{ans!r} != {m!r} for {raw!r}')
|
||||||
|
|||||||
@ -11,7 +11,6 @@ from calibre import replace_entities
|
|||||||
from calibre.ebooks.metadata.toc import TOC
|
from calibre.ebooks.metadata.toc import TOC
|
||||||
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
|
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
|
||||||
from calibre.ebooks.mobi.reader.index import read_index
|
from calibre.ebooks.mobi.reader.index import read_index
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
tag_fieldname_map = {
|
tag_fieldname_map = {
|
||||||
1: ['pos',0],
|
1: ['pos',0],
|
||||||
@ -69,9 +68,9 @@ def read_ncx(sections, index, codec):
|
|||||||
# offset
|
# offset
|
||||||
fieldvalue = tuple(tag_map[tag])
|
fieldvalue = tuple(tag_map[tag])
|
||||||
entry[fieldname] = fieldvalue
|
entry[fieldname] = fieldvalue
|
||||||
for which, name in iteritems({3:'text', 5:'kind', 70:'description',
|
for which, name in {3:'text', 5:'kind', 70:'description',
|
||||||
71:'author', 72:'image_caption',
|
71:'author', 72:'image_caption',
|
||||||
73:'image_attribution'}):
|
73:'image_attribution'}.items():
|
||||||
if tag == which:
|
if tag == which:
|
||||||
entry[name] = cncx.get(fieldvalue,
|
entry[name] = cncx.get(fieldvalue,
|
||||||
default_entry[name])
|
default_entry[name])
|
||||||
|
|||||||
@ -18,7 +18,6 @@ from calibre.ebooks.mobi.writer2 import PALMDOC, UNCOMPRESSED
|
|||||||
from calibre.ebooks.mobi.writer2.indexer import Indexer
|
from calibre.ebooks.mobi.writer2.indexer import Indexer
|
||||||
from calibre.ebooks.mobi.writer2.serializer import Serializer
|
from calibre.ebooks.mobi.writer2.serializer import Serializer
|
||||||
from calibre.utils.filenames import ascii_filename
|
from calibre.utils.filenames import ascii_filename
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
# Disabled as I don't care about uncrossable breaks
|
# Disabled as I don't care about uncrossable breaks
|
||||||
WRITE_UNCROSSABLE_BREAKS = False
|
WRITE_UNCROSSABLE_BREAKS = False
|
||||||
@ -423,10 +422,10 @@ class MobiWriter:
|
|||||||
extra_data_flags |= 0b10
|
extra_data_flags |= 0b10
|
||||||
header_fields['extra_data_flags'] = extra_data_flags
|
header_fields['extra_data_flags'] = extra_data_flags
|
||||||
|
|
||||||
for k, v in iteritems({'last_text_record':'last_text_record_idx',
|
for k, v in {'last_text_record':'last_text_record_idx',
|
||||||
'first_non_text_record':'first_non_text_record_idx',
|
'first_non_text_record':'first_non_text_record_idx',
|
||||||
'ncx_index':'primary_index_record_idx',
|
'ncx_index':'primary_index_record_idx',
|
||||||
}):
|
}.items():
|
||||||
header_fields[k] = getattr(self, v)
|
header_fields[k] = getattr(self, v)
|
||||||
if header_fields['ncx_index'] is None:
|
if header_fields['ncx_index'] is None:
|
||||||
header_fields['ncx_index'] = NULL_INDEX
|
header_fields['ncx_index'] = NULL_INDEX
|
||||||
|
|||||||
@ -13,7 +13,6 @@ from css_parser.css import PropertyValue
|
|||||||
from tinycss.fonts3 import parse_font, serialize_font_family
|
from tinycss.fonts3 import parse_font, serialize_font_family
|
||||||
|
|
||||||
from calibre.ebooks.oeb.base import css_text
|
from calibre.ebooks.oeb.base import css_text
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
DEFAULTS = {'azimuth': 'center', 'background-attachment': 'scroll', # {{{
|
DEFAULTS = {'azimuth': 'center', 'background-attachment': 'scroll', # {{{
|
||||||
'background-color': 'transparent', 'background-image': 'none',
|
'background-color': 'transparent', 'background-image': 'none',
|
||||||
@ -281,7 +280,7 @@ def test_normalization(return_tests=False): # {{{
|
|||||||
ans.update(expected)
|
ans.update(expected)
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
for raw, expected in iteritems({
|
for raw, expected in {
|
||||||
'some_font': {'font-family':'some_font'}, 'inherit':{k:'inherit' for k in font_composition},
|
'some_font': {'font-family':'some_font'}, 'inherit':{k:'inherit' for k in font_composition},
|
||||||
'1.2pt/1.4 A_Font': {'font-family':'A_Font', 'font-size':'1.2pt', 'line-height':'1.4'},
|
'1.2pt/1.4 A_Font': {'font-family':'A_Font', 'font-size':'1.2pt', 'line-height':'1.4'},
|
||||||
'bad font': {'font-family':'"bad font"'}, '10% serif': {'font-family':'serif', 'font-size':'10%'},
|
'bad font': {'font-family':'"bad font"'}, '10% serif': {'font-family':'serif', 'font-size':'10%'},
|
||||||
@ -292,7 +291,7 @@ def test_normalization(return_tests=False): # {{{
|
|||||||
{'font-family':'serif', 'font-weight':'bold', 'font-style':'italic', 'font-size':'larger',
|
{'font-family':'serif', 'font-weight':'bold', 'font-style':'italic', 'font-size':'larger',
|
||||||
'line-height':'normal', 'font-variant':'small-caps'},
|
'line-height':'normal', 'font-variant':'small-caps'},
|
||||||
'2em A B': {'font-family': '"A B"', 'font-size': '2em'},
|
'2em A B': {'font-family': '"A B"', 'font-size': '2em'},
|
||||||
}):
|
}.items():
|
||||||
val = tuple(parseStyle(f'font: {raw}', validate=False))[0].propertyValue
|
val = tuple(parseStyle(f'font: {raw}', validate=False))[0].propertyValue
|
||||||
style = normalizers['font']('font', val)
|
style = normalizers['font']('font', val)
|
||||||
self.assertDictEqual(font_dict(expected), style, raw)
|
self.assertDictEqual(font_dict(expected), style, raw)
|
||||||
@ -316,39 +315,39 @@ def test_normalization(return_tests=False): # {{{
|
|||||||
ans[f'border-{edge}-{val}'] = expected
|
ans[f'border-{edge}-{val}'] = expected
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
for raw, expected in iteritems({
|
for raw, expected in {
|
||||||
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
|
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
|
||||||
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
|
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
|
||||||
'2em groove': {'width':'2em', 'style':'groove'},
|
'2em groove': {'width':'2em', 'style':'groove'},
|
||||||
}):
|
}.items():
|
||||||
for edge in EDGES:
|
for edge in EDGES:
|
||||||
br = f'border-{edge}'
|
br = f'border-{edge}'
|
||||||
val = tuple(parseStyle(f'{br}: {raw}', validate=False))[0].propertyValue
|
val = tuple(parseStyle(f'{br}: {raw}', validate=False))[0].propertyValue
|
||||||
self.assertDictEqual(border_edge_dict(expected, edge), normalizers[br](br, val))
|
self.assertDictEqual(border_edge_dict(expected, edge), normalizers[br](br, val))
|
||||||
|
|
||||||
for raw, expected in iteritems({
|
for raw, expected in {
|
||||||
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
|
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
|
||||||
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
|
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
|
||||||
'thin groove': {'width':'thin', 'style':'groove'},
|
'thin groove': {'width':'thin', 'style':'groove'},
|
||||||
}):
|
}.items():
|
||||||
val = tuple(parseStyle('{}: {}'.format('border', raw), validate=False))[0].propertyValue
|
val = tuple(parseStyle('{}: {}'.format('border', raw), validate=False))[0].propertyValue
|
||||||
self.assertDictEqual(border_dict(expected), normalizers['border']('border', val))
|
self.assertDictEqual(border_dict(expected), normalizers['border']('border', val))
|
||||||
|
|
||||||
for name, val in iteritems({
|
for name, val in {
|
||||||
'width': '10%', 'color': 'rgb(0, 1, 1)', 'style': 'double',
|
'width': '10%', 'color': 'rgb(0, 1, 1)', 'style': 'double',
|
||||||
}):
|
}.items():
|
||||||
cval = tuple(parseStyle(f'border-{name}: {val}', validate=False))[0].propertyValue
|
cval = tuple(parseStyle(f'border-{name}: {val}', validate=False))[0].propertyValue
|
||||||
self.assertDictEqual(border_val_dict(val, name), normalizers['border-'+name]('border-'+name, cval))
|
self.assertDictEqual(border_val_dict(val, name), normalizers['border-'+name]('border-'+name, cval))
|
||||||
|
|
||||||
def test_edge_normalization(self):
|
def test_edge_normalization(self):
|
||||||
def edge_dict(prefix, expected):
|
def edge_dict(prefix, expected):
|
||||||
return {f'{prefix}-{edge}': x for edge, x in zip(EDGES, expected)}
|
return {f'{prefix}-{edge}': x for edge, x in zip(EDGES, expected)}
|
||||||
for raw, expected in iteritems({
|
for raw, expected in {
|
||||||
'2px': ('2px', '2px', '2px', '2px'),
|
'2px': ('2px', '2px', '2px', '2px'),
|
||||||
'1em 2em': ('1em', '2em', '1em', '2em'),
|
'1em 2em': ('1em', '2em', '1em', '2em'),
|
||||||
'1em 2em 3em': ('1em', '2em', '3em', '2em'),
|
'1em 2em 3em': ('1em', '2em', '3em', '2em'),
|
||||||
'1 2 3 4': ('1', '2', '3', '4'),
|
'1 2 3 4': ('1', '2', '3', '4'),
|
||||||
}):
|
}.items():
|
||||||
for prefix in ('margin', 'padding'):
|
for prefix in ('margin', 'padding'):
|
||||||
cval = tuple(parseStyle(f'{prefix}: {raw}', validate=False))[0].propertyValue
|
cval = tuple(parseStyle(f'{prefix}: {raw}', validate=False))[0].propertyValue
|
||||||
self.assertDictEqual(edge_dict(prefix, expected), normalizers[prefix](prefix, cval))
|
self.assertDictEqual(edge_dict(prefix, expected), normalizers[prefix](prefix, cval))
|
||||||
@ -359,11 +358,11 @@ def test_normalization(return_tests=False): # {{{
|
|||||||
for k, v in expected.items():
|
for k, v in expected.items():
|
||||||
ans[f'list-style-{k}'] = v
|
ans[f'list-style-{k}'] = v
|
||||||
return ans
|
return ans
|
||||||
for raw, expected in iteritems({
|
for raw, expected in {
|
||||||
'url(http://www.example.com/images/list.png)': {'image': 'url(http://www.example.com/images/list.png)'},
|
'url(http://www.example.com/images/list.png)': {'image': 'url(http://www.example.com/images/list.png)'},
|
||||||
'inside square': {'position':'inside', 'type':'square'},
|
'inside square': {'position':'inside', 'type':'square'},
|
||||||
'upper-roman url(img) outside': {'position':'outside', 'type':'upper-roman', 'image':'url(img)'},
|
'upper-roman url(img) outside': {'position':'outside', 'type':'upper-roman', 'image':'url(img)'},
|
||||||
}):
|
}.items():
|
||||||
cval = tuple(parseStyle(f'list-style: {raw}', validate=False))[0].propertyValue
|
cval = tuple(parseStyle(f'list-style: {raw}', validate=False))[0].propertyValue
|
||||||
self.assertDictEqual(ls_dict(expected), normalizers['list-style']('list-style', cval))
|
self.assertDictEqual(ls_dict(expected), normalizers['list-style']('list-style', cval))
|
||||||
|
|
||||||
@ -383,7 +382,7 @@ def test_normalization(return_tests=False): # {{{
|
|||||||
ae({'list-style', 'list-style-image', 'list-style-type', 'list-style-position'}, normalize_filter_css({'list-style'}))
|
ae({'list-style', 'list-style-image', 'list-style-type', 'list-style-position'}, normalize_filter_css({'list-style'}))
|
||||||
|
|
||||||
def test_edge_condensation(self):
|
def test_edge_condensation(self):
|
||||||
for s, v in iteritems({
|
for s, v in {
|
||||||
(1, 1, 3): None,
|
(1, 1, 3): None,
|
||||||
(1, 2, 3, 4): '2pt 3pt 4pt 1pt',
|
(1, 2, 3, 4): '2pt 3pt 4pt 1pt',
|
||||||
(1, 2, 3, 2): '2pt 3pt 2pt 1pt',
|
(1, 2, 3, 2): '2pt 3pt 2pt 1pt',
|
||||||
@ -392,7 +391,7 @@ def test_normalization(return_tests=False): # {{{
|
|||||||
(1, 1, 1, 1): '1pt',
|
(1, 1, 1, 1): '1pt',
|
||||||
('2%', '2%', '2%', '2%'): '2%',
|
('2%', '2%', '2%', '2%'): '2%',
|
||||||
tuple('0 0 0 0'.split()): '0',
|
tuple('0 0 0 0'.split()): '0',
|
||||||
}):
|
}.items():
|
||||||
for prefix in ('margin', 'padding'):
|
for prefix in ('margin', 'padding'):
|
||||||
css = {f'{prefix}-{x}': str(y)+'pt' if isinstance(y, numbers.Number) else y
|
css = {f'{prefix}-{x}': str(y)+'pt' if isinstance(y, numbers.Number) else y
|
||||||
for x, y in zip(('left', 'top', 'right', 'bottom'), s)}
|
for x, y in zip(('left', 'top', 'right', 'bottom'), s)}
|
||||||
|
|||||||
@ -18,7 +18,6 @@ from calibre.ebooks.oeb.base import OEB_STYLES, SVG, XHTML, css_text
|
|||||||
from calibre.ebooks.oeb.normalize_css import DEFAULTS, normalizers
|
from calibre.ebooks.oeb.normalize_css import DEFAULTS, normalizers
|
||||||
from calibre.ebooks.oeb.stylizer import INHERITED, media_ok
|
from calibre.ebooks.oeb.stylizer import INHERITED, media_ok
|
||||||
from calibre.utils.resources import get_path as P
|
from calibre.utils.resources import get_path as P
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
_html_css_stylesheet = None
|
_html_css_stylesheet = None
|
||||||
|
|
||||||
@ -97,7 +96,7 @@ def iterdeclaration(decl):
|
|||||||
if n is None:
|
if n is None:
|
||||||
yield p
|
yield p
|
||||||
else:
|
else:
|
||||||
for k, v in iteritems(n(p.name, p.propertyValue)):
|
for k, v in n(p.name, p.propertyValue).items():
|
||||||
yield Property(k, v, p.literalpriority)
|
yield Property(k, v, p.literalpriority)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -9,7 +9,6 @@ import shutil
|
|||||||
from calibre.ebooks.oeb.base import OEB_DOCS, OPF, XLINK, XPath, xml2text
|
from calibre.ebooks.oeb.base import OEB_DOCS, OPF, XLINK, XPath, xml2text
|
||||||
from calibre.ebooks.oeb.polish.replace import get_recommended_folders, replace_links
|
from calibre.ebooks.oeb.polish.replace import get_recommended_folders, replace_links
|
||||||
from calibre.utils.imghdr import identify
|
from calibre.utils.imghdr import identify
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def set_azw3_cover(container, cover_path, report, options=None):
|
def set_azw3_cover(container, cover_path, report, options=None):
|
||||||
@ -521,9 +520,9 @@ def set_epub_cover(container, cover_path, report, options=None, image_callback=N
|
|||||||
report(_('Cover updated') if updated else _('Cover inserted'))
|
report(_('Cover updated') if updated else _('Cover inserted'))
|
||||||
|
|
||||||
# Replace links to the old cover image/cover page
|
# Replace links to the old cover image/cover page
|
||||||
link_sub = {s:d for s, d in iteritems({
|
link_sub = {s:d for s, d in {
|
||||||
cover_page:titlepage, wrapped_image:raster_cover,
|
cover_page:titlepage, wrapped_image:raster_cover,
|
||||||
cover_image:raster_cover, extra_cover_page:titlepage})
|
cover_image:raster_cover, extra_cover_page:titlepage}.items()
|
||||||
if s is not None and s != d}
|
if s is not None and s != d}
|
||||||
if link_sub:
|
if link_sub:
|
||||||
replace_links(container, link_sub, frag_map=lambda x, y:None)
|
replace_links(container, link_sub, frag_map=lambda x, y:None)
|
||||||
|
|||||||
@ -13,7 +13,6 @@ from calibre.ebooks.oeb.base import SVG_NS, XHTML_NS, XLINK_NS, XPath
|
|||||||
from calibre.ebooks.oeb.parse_utils import html5_parse
|
from calibre.ebooks.oeb.parse_utils import html5_parse
|
||||||
from calibre.ebooks.oeb.polish.parsing import parse_html5 as parse
|
from calibre.ebooks.oeb.polish.parsing import parse_html5 as parse
|
||||||
from calibre.ebooks.oeb.polish.tests.base import BaseTest
|
from calibre.ebooks.oeb.polish.tests.base import BaseTest
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def nonvoid_cdata_elements(test, parse_function):
|
def nonvoid_cdata_elements(test, parse_function):
|
||||||
@ -190,7 +189,7 @@ class ParsingTests(BaseTest):
|
|||||||
for ds in (False, True):
|
for ds in (False, True):
|
||||||
src = '\n<html>\n<p>\n<svg><image />\n<b></svg> '
|
src = '\n<html>\n<p>\n<svg><image />\n<b></svg> '
|
||||||
root = parse(src, discard_namespaces=ds)
|
root = parse(src, discard_namespaces=ds)
|
||||||
for tag, lnum in iteritems({'html':2, 'head':3, 'body':3, 'p':3, 'svg':4, 'image':4, 'b':5}):
|
for tag, lnum in {'html':2, 'head':3, 'body':3, 'p':3, 'svg':4, 'image':4, 'b':5}.items():
|
||||||
elem = root.xpath(f'//*[local-name()="{tag}"]')[0]
|
elem = root.xpath(f'//*[local-name()="{tag}"]')[0]
|
||||||
self.assertEqual(lnum, elem.sourceline, f'Line number incorrect for {tag}, source: {src}:')
|
self.assertEqual(lnum, elem.sourceline, f'Line number incorrect for {tag}, source: {src}:')
|
||||||
|
|
||||||
|
|||||||
@ -14,7 +14,6 @@ from calibre import isbytestring, prepare_string_for_xml
|
|||||||
from calibre.ebooks.conversion.preprocess import DocAnalysis
|
from calibre.ebooks.conversion.preprocess import DocAnalysis
|
||||||
from calibre.ebooks.metadata.opf2 import OPFCreator
|
from calibre.ebooks.metadata.opf2 import OPFCreator
|
||||||
from calibre.utils.cleantext import clean_ascii_chars
|
from calibre.utils.cleantext import clean_ascii_chars
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
HTML_TEMPLATE = '<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/><title>%s </title></head><body>\n%s\n</body></html>'
|
HTML_TEMPLATE = '<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/><title>%s </title></head><body>\n%s\n</body></html>'
|
||||||
|
|
||||||
@ -146,7 +145,7 @@ def convert_markdown_with_metadata(txt, title='', extensions=DEFAULT_MD_EXTENSIO
|
|||||||
html = md.convert(txt)
|
html = md.convert(txt)
|
||||||
mi = Metadata(title or _('Unknown'))
|
mi = Metadata(title or _('Unknown'))
|
||||||
m = md.Meta
|
m = md.Meta
|
||||||
for k, v in iteritems({'date':'pubdate', 'summary':'comments'}):
|
for k, v in {'date':'pubdate', 'summary':'comments'}.items():
|
||||||
if v not in m and k in m:
|
if v not in m and k in m:
|
||||||
m[v] = m.pop(k)
|
m[v] = m.pop(k)
|
||||||
for k in 'title authors series tags pubdate comments publisher rating'.split():
|
for k in 'title authors series tags pubdate comments publisher rating'.split():
|
||||||
|
|||||||
@ -176,8 +176,7 @@ class Manager(QObject): # {{{
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def finalize(self):
|
def finalize(self):
|
||||||
custom_keys_map = {un:tuple(keys) for un, keys in iteritems(self.config.get(
|
custom_keys_map = {un:tuple(keys) for un, keys in self.config.get('map', {}).items()}
|
||||||
'map', {}))}
|
|
||||||
self.keys_map = finalize(self.shortcuts, custom_keys_map=custom_keys_map)
|
self.keys_map = finalize(self.shortcuts, custom_keys_map=custom_keys_map)
|
||||||
|
|
||||||
def replace_action(self, unique_name, new_action):
|
def replace_action(self, unique_name, new_action):
|
||||||
@ -309,7 +308,7 @@ class ConfigModel(SearchQueryParser, QAbstractItemModel):
|
|||||||
options_map = {}
|
options_map = {}
|
||||||
options_map.update(self.keyboard.config.get('options_map', {}))
|
options_map.update(self.keyboard.config.get('options_map', {}))
|
||||||
# keep mapped keys that are marked persistent.
|
# keep mapped keys that are marked persistent.
|
||||||
for un, keys in iteritems(self.keyboard.config.get('map', {})):
|
for un, keys in self.keyboard.config.get('map', {}).items():
|
||||||
if options_map.get(un, {}).get('persist_shortcut',False):
|
if options_map.get(un, {}).get('persist_shortcut',False):
|
||||||
kmap[un] = keys
|
kmap[un] = keys
|
||||||
for node in self.all_shortcuts:
|
for node in self.all_shortcuts:
|
||||||
|
|||||||
@ -49,7 +49,6 @@ from calibre.utils.icu import sort_key
|
|||||||
from calibre.utils.localization import calibre_langcode_to_name, ngettext
|
from calibre.utils.localization import calibre_langcode_to_name, ngettext
|
||||||
from calibre.utils.resources import get_path as P
|
from calibre.utils.resources import get_path as P
|
||||||
from calibre.utils.search_query_parser import ParseException, SearchQueryParser
|
from calibre.utils.search_query_parser import ParseException, SearchQueryParser
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
Counts = namedtuple('Counts', 'library_total total current')
|
Counts = namedtuple('Counts', 'library_total total current')
|
||||||
|
|
||||||
@ -375,7 +374,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
|||||||
|
|
||||||
if db:
|
if db:
|
||||||
style_map = {'bold': self.bold_font, 'bi': self.bi_font, 'italic': self.italic_font}
|
style_map = {'bold': self.bold_font, 'bi': self.bi_font, 'italic': self.italic_font}
|
||||||
self.styled_columns = {k: style_map.get(v, None) for k, v in iteritems(db.new_api.pref('styled_columns', {}))}
|
self.styled_columns = {k: style_map.get(v, None) for k, v in db.new_api.pref('styled_columns', {}).items()}
|
||||||
self.alignment_map = {}
|
self.alignment_map = {}
|
||||||
self.ids_to_highlight_set = set()
|
self.ids_to_highlight_set = set()
|
||||||
self.current_highlighted_idx = None
|
self.current_highlighted_idx = None
|
||||||
|
|||||||
@ -51,7 +51,6 @@ from calibre.startup import connect_lambda
|
|||||||
from calibre.utils.date import UNDEFINED_DATE
|
from calibre.utils.date import UNDEFINED_DATE
|
||||||
from calibre.utils.icu import lower as icu_lower
|
from calibre.utils.icu import lower as icu_lower
|
||||||
from calibre.utils.localization import ngettext
|
from calibre.utils.localization import ngettext
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
Widgets = namedtuple('Widgets', 'new old label button')
|
Widgets = namedtuple('Widgets', 'new old label button')
|
||||||
|
|
||||||
@ -279,7 +278,7 @@ class IdentifiersEdit(LineEdit):
|
|||||||
@property
|
@property
|
||||||
def as_dict(self):
|
def as_dict(self):
|
||||||
parts = (x.strip() for x in self.current_val.split(',') if x.strip())
|
parts = (x.strip() for x in self.current_val.split(',') if x.strip())
|
||||||
return {k:v for k, v in iteritems({x.partition(':')[0].strip():x.partition(':')[-1].strip() for x in parts}) if k and v}
|
return {k:v for k, v in {x.partition(':')[0].strip():x.partition(':')[-1].strip() for x in parts}.items() if k and v}
|
||||||
|
|
||||||
@as_dict.setter
|
@as_dict.setter
|
||||||
def as_dict(self, val):
|
def as_dict(self, val):
|
||||||
|
|||||||
@ -39,7 +39,6 @@ from calibre.gui2.widgets2 import Dialog
|
|||||||
from calibre.utils.config import JSONConfig
|
from calibre.utils.config import JSONConfig
|
||||||
from calibre.utils.icu import numeric_sort_key as sort_key
|
from calibre.utils.icu import numeric_sort_key as sort_key
|
||||||
from calibre.utils.resources import get_image_path as I
|
from calibre.utils.resources import get_image_path as I
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
ENTRY_ROLE = Qt.ItemDataRole.UserRole
|
ENTRY_ROLE = Qt.ItemDataRole.UserRole
|
||||||
|
|
||||||
@ -491,7 +490,7 @@ def register_keyboard_shortcuts(gui=None, finalize=False):
|
|||||||
gui.removeAction(action)
|
gui.removeAction(action)
|
||||||
registered_shortcuts.clear()
|
registered_shortcuts.clear()
|
||||||
|
|
||||||
for filetype, applications in iteritems(oprefs['entries']):
|
for filetype, applications in oprefs['entries'].items():
|
||||||
for application in applications:
|
for application in applications:
|
||||||
text = entry_to_icon_text(application, only_text=True)
|
text = entry_to_icon_text(application, only_text=True)
|
||||||
t = _('cover image') if filetype.upper() == 'COVER_IMAGE' else filetype.upper()
|
t = _('cover image') if filetype.upper() == 'COVER_IMAGE' else filetype.upper()
|
||||||
|
|||||||
@ -34,7 +34,6 @@ from calibre.gui2.widgets2 import Dialog
|
|||||||
from calibre.startup import connect_lambda
|
from calibre.startup import connect_lambda
|
||||||
from calibre.utils.icu import sort_key
|
from calibre.utils.icu import sort_key
|
||||||
from calibre.utils.resources import set_data
|
from calibre.utils.resources import set_data
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
class IdLinksRuleEdit(Dialog):
|
class IdLinksRuleEdit(Dialog):
|
||||||
@ -95,7 +94,7 @@ class IdLinksEditor(Dialog):
|
|||||||
la.setWordWrap(True)
|
la.setWordWrap(True)
|
||||||
l.addWidget(la)
|
l.addWidget(la)
|
||||||
items = []
|
items = []
|
||||||
for k, lx in iteritems(msprefs['id_link_rules']):
|
for k, lx in msprefs['id_link_rules'].items():
|
||||||
for n, t in lx:
|
for n, t in lx:
|
||||||
items.append((k, n, t))
|
items.append((k, n, t))
|
||||||
items.sort(key=lambda x: sort_key(x[1]))
|
items.sort(key=lambda x: sort_key(x[1]))
|
||||||
|
|||||||
@ -25,7 +25,6 @@ from calibre.utils.icu import collation_order_for_partitioning, contains, lower,
|
|||||||
from calibre.utils.icu import lower as icu_lower
|
from calibre.utils.icu import lower as icu_lower
|
||||||
from calibre.utils.icu import upper as icu_upper
|
from calibre.utils.icu import upper as icu_upper
|
||||||
from calibre.utils.serialize import json_dumps, json_loads
|
from calibre.utils.serialize import json_dumps, json_loads
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
TAG_SEARCH_STATES = {'clear': 0, 'mark_plus': 1, 'mark_plusplus': 2,
|
TAG_SEARCH_STATES = {'clear': 0, 'mark_plus': 1, 'mark_plusplus': 2,
|
||||||
'mark_minus': 3, 'mark_minusminus': 4}
|
'mark_minus': 3, 'mark_minusminus': 4}
|
||||||
@ -397,7 +396,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
|||||||
self.value_icons = {}
|
self.value_icons = {}
|
||||||
self.value_icon_cache = {}
|
self.value_icon_cache = {}
|
||||||
self.icon_config_dir = os.path.join(config_dir, 'tb_icons')
|
self.icon_config_dir = os.path.join(config_dir, 'tb_icons')
|
||||||
for k, v in iteritems(self.prefs['tags_browser_category_icons']):
|
for k, v in self.prefs['tags_browser_category_icons'].items():
|
||||||
icon = QIcon(os.path.join(self.icon_config_dir, v))
|
icon = QIcon(os.path.join(self.icon_config_dir, v))
|
||||||
if len(icon.availableSizes()) > 0:
|
if len(icon.availableSizes()) > 0:
|
||||||
self.category_custom_icons[k] = icon
|
self.category_custom_icons[k] = icon
|
||||||
|
|||||||
@ -20,7 +20,6 @@ from calibre.gui2.tweak_book.completion.utils import DataError, control, data
|
|||||||
from calibre.utils.icu import numeric_sort_key
|
from calibre.utils.icu import numeric_sort_key
|
||||||
from calibre.utils.ipc import eintr_retry_call
|
from calibre.utils.ipc import eintr_retry_call
|
||||||
from calibre.utils.matcher import Matcher
|
from calibre.utils.matcher import Matcher
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
Request = namedtuple('Request', 'id type data query')
|
Request = namedtuple('Request', 'id type data query')
|
||||||
|
|
||||||
@ -86,7 +85,7 @@ def complete_names(names_data, data_conn):
|
|||||||
names_cache['font'] = frozenset(n for n in all_names if n.mime_type in OEB_FONTS)
|
names_cache['font'] = frozenset(n for n in all_names if n.mime_type in OEB_FONTS)
|
||||||
names_cache['css_resource'] = names_cache['image'] | names_cache['font']
|
names_cache['css_resource'] = names_cache['image'] | names_cache['font']
|
||||||
names_cache['descriptions'] = d = {}
|
names_cache['descriptions'] = d = {}
|
||||||
for x, desc in iteritems({'text_link':_('Text'), 'stylesheet':_('Stylesheet'), 'image':_('Image'), 'font':_('Font')}):
|
for x, desc in {'text_link':_('Text'), 'stylesheet':_('Stylesheet'), 'image':_('Image'), 'font':_('Font')}.items():
|
||||||
for n in names_cache[x]:
|
for n in names_cache[x]:
|
||||||
d[n] = desc
|
d[n] = desc
|
||||||
names_type, base, root = names_data
|
names_type, base, root = names_data
|
||||||
|
|||||||
@ -11,7 +11,6 @@ from qt.core import QTextBlockUserData
|
|||||||
from calibre.gui2.tweak_book import verify_link
|
from calibre.gui2.tweak_book import verify_link
|
||||||
from calibre.gui2.tweak_book.editor import CSS_PROPERTY, LINK_PROPERTY, syntax_text_char_format
|
from calibre.gui2.tweak_book.editor import CSS_PROPERTY, LINK_PROPERTY, syntax_text_char_format
|
||||||
from calibre.gui2.tweak_book.editor.syntax.base import SyntaxHighlighter
|
from calibre.gui2.tweak_book.editor.syntax.base import SyntaxHighlighter
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
space_pat = re.compile(r'[ \n\t\r\f]+')
|
space_pat = re.compile(r'[ \n\t\r\f]+')
|
||||||
cdo_pat = re.compile(r'/\*')
|
cdo_pat = re.compile(r'/\*')
|
||||||
@ -295,10 +294,10 @@ def create_formats(highlighter):
|
|||||||
'pseudo_selector': theme['Special'],
|
'pseudo_selector': theme['Special'],
|
||||||
'tag': theme['Identifier'],
|
'tag': theme['Identifier'],
|
||||||
}
|
}
|
||||||
for name, msg in iteritems({
|
for name, msg in {
|
||||||
'unknown-normal': _('Invalid text'),
|
'unknown-normal': _('Invalid text'),
|
||||||
'unterminated-string': _('Unterminated string'),
|
'unterminated-string': _('Unterminated string'),
|
||||||
}):
|
}.items():
|
||||||
f = formats[name] = syntax_text_char_format(formats['error'])
|
f = formats[name] = syntax_text_char_format(formats['error'])
|
||||||
f.setToolTip(msg)
|
f.setToolTip(msg)
|
||||||
formats['link'] = syntax_text_char_format(theme['Link'])
|
formats['link'] = syntax_text_char_format(theme['Link'])
|
||||||
|
|||||||
@ -28,7 +28,6 @@ from calibre.gui2.tweak_book.editor.syntax.css import state_map as css_state_map
|
|||||||
from calibre.spell.break_iterator import split_into_words_and_positions
|
from calibre.spell.break_iterator import split_into_words_and_positions
|
||||||
from calibre.spell.dictionary import parse_lang_code
|
from calibre.spell.dictionary import parse_lang_code
|
||||||
from calibre_extensions import html_syntax_highlighter as _speedup
|
from calibre_extensions import html_syntax_highlighter as _speedup
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
cdata_tags = frozenset(['title', 'textarea', 'style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript'])
|
cdata_tags = frozenset(['title', 'textarea', 'style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript'])
|
||||||
normal_pat = re.compile(r'[^<>&]+')
|
normal_pat = re.compile(r'[^<>&]+')
|
||||||
@ -469,7 +468,7 @@ def create_formats(highlighter, add_css=True):
|
|||||||
'nbsp': t['SpecialCharacter'],
|
'nbsp': t['SpecialCharacter'],
|
||||||
'spell': t['SpellError'],
|
'spell': t['SpellError'],
|
||||||
}
|
}
|
||||||
for name, msg in iteritems({
|
for name, msg in {
|
||||||
'<': _('An unescaped < is not allowed. Replace it with <'),
|
'<': _('An unescaped < is not allowed. Replace it with <'),
|
||||||
'&': _('An unescaped ampersand is not allowed. Replace it with &'),
|
'&': _('An unescaped ampersand is not allowed. Replace it with &'),
|
||||||
'>': _('An unescaped > is not allowed. Replace it with >'),
|
'>': _('An unescaped > is not allowed. Replace it with >'),
|
||||||
@ -478,7 +477,7 @@ def create_formats(highlighter, add_css=True):
|
|||||||
'bad-closing': _('A closing tag must contain only the tag name and nothing else'),
|
'bad-closing': _('A closing tag must contain only the tag name and nothing else'),
|
||||||
'no-attr-value': _('Expecting an attribute value'),
|
'no-attr-value': _('Expecting an attribute value'),
|
||||||
'only-prefix': _('A tag name cannot end with a colon'),
|
'only-prefix': _('A tag name cannot end with a colon'),
|
||||||
}):
|
}.items():
|
||||||
f = formats[name] = syntax_text_char_format(formats['error'])
|
f = formats[name] = syntax_text_char_format(formats['error'])
|
||||||
f.setToolTip(msg)
|
f.setToolTip(msg)
|
||||||
f = formats['title'] = syntax_text_char_format()
|
f = formats['title'] = syntax_text_char_format()
|
||||||
|
|||||||
@ -61,7 +61,6 @@ from calibre.utils.icu import numeric_sort_key
|
|||||||
from calibre.utils.localization import ngettext, pgettext
|
from calibre.utils.localization import ngettext, pgettext
|
||||||
from calibre_extensions.progress_indicator import set_no_activate_on_click
|
from calibre_extensions.progress_indicator import set_no_activate_on_click
|
||||||
from polyglot.binary import as_hex_unicode
|
from polyglot.binary import as_hex_unicode
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
FILE_COPY_MIME = 'application/calibre-edit-book-files'
|
FILE_COPY_MIME = 'application/calibre-edit-book-files'
|
||||||
TOP_ICON_SIZE = 24
|
TOP_ICON_SIZE = 24
|
||||||
@ -298,13 +297,13 @@ class FileList(QTreeWidget, OpenWithHandler):
|
|||||||
self.font_name_cache = {}
|
self.font_name_cache = {}
|
||||||
self.top_level_pixmap_cache = {
|
self.top_level_pixmap_cache = {
|
||||||
name: QIcon.ic(icon).pixmap(TOP_ICON_SIZE, TOP_ICON_SIZE)
|
name: QIcon.ic(icon).pixmap(TOP_ICON_SIZE, TOP_ICON_SIZE)
|
||||||
for name, icon in iteritems({
|
for name, icon in {
|
||||||
'text':'keyboard-prefs.png',
|
'text':'keyboard-prefs.png',
|
||||||
'styles':'lookfeel.png',
|
'styles':'lookfeel.png',
|
||||||
'fonts':'font.png',
|
'fonts':'font.png',
|
||||||
'misc':'mimetypes/dir.png',
|
'misc':'mimetypes/dir.png',
|
||||||
'images':'view-image.png',
|
'images':'view-image.png',
|
||||||
})}
|
}.items()}
|
||||||
self.itemActivated.connect(self.item_double_clicked)
|
self.itemActivated.connect(self.item_double_clicked)
|
||||||
|
|
||||||
def possible_rename_requested(self, index, old, new):
|
def possible_rename_requested(self, index, old, new):
|
||||||
|
|||||||
@ -57,7 +57,7 @@ from calibre.utils.date import utcnow
|
|||||||
from calibre.utils.img import image_from_path
|
from calibre.utils.img import image_from_path
|
||||||
from calibre.utils.ipc.simple_worker import WorkerError
|
from calibre.utils.ipc.simple_worker import WorkerError
|
||||||
from calibre.utils.localization import _
|
from calibre.utils.localization import _
|
||||||
from polyglot.builtins import as_bytes, as_unicode, iteritems
|
from polyglot.builtins import as_bytes, as_unicode
|
||||||
|
|
||||||
|
|
||||||
def is_float(x):
|
def is_float(x):
|
||||||
@ -678,7 +678,7 @@ class EbookViewer(MainWindow):
|
|||||||
for annot_type, annots in lib_amap.items():
|
for annot_type, annots in lib_amap.items():
|
||||||
merge_annotations(annots, amap)
|
merge_annotations(annots, amap)
|
||||||
else:
|
else:
|
||||||
for annot_type, annots in iteritems(calibre_book_data['annotations_map']):
|
for annot_type, annots in calibre_book_data['annotations_map'].items():
|
||||||
merge_annotations(annots, amap)
|
merge_annotations(annots, amap)
|
||||||
|
|
||||||
def update_window_title(self):
|
def update_window_title(self):
|
||||||
|
|||||||
@ -15,7 +15,6 @@ from calibre.constants import filesystem_encoding
|
|||||||
from calibre.db.constants import COVER_FILE_NAME, DATA_DIR_NAME, METADATA_FILE_NAME, NOTES_DIR_NAME, TRASH_DIR_NAME
|
from calibre.db.constants import COVER_FILE_NAME, DATA_DIR_NAME, METADATA_FILE_NAME, NOTES_DIR_NAME, TRASH_DIR_NAME
|
||||||
from calibre.ebooks import BOOK_EXTENSIONS
|
from calibre.ebooks import BOOK_EXTENSIONS
|
||||||
from calibre.utils.localization import _
|
from calibre.utils.localization import _
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
EBOOK_EXTENSIONS = frozenset(BOOK_EXTENSIONS)
|
EBOOK_EXTENSIONS = frozenset(BOOK_EXTENSIONS)
|
||||||
NORMALS = frozenset({METADATA_FILE_NAME, COVER_FILE_NAME, DATA_DIR_NAME})
|
NORMALS = frozenset({METADATA_FILE_NAME, COVER_FILE_NAME, DATA_DIR_NAME})
|
||||||
@ -241,13 +240,13 @@ class CheckLibrary:
|
|||||||
missing = book_formats_lc - formats_lc
|
missing = book_formats_lc - formats_lc
|
||||||
|
|
||||||
# Check: any books that aren't formats or normally there?
|
# Check: any books that aren't formats or normally there?
|
||||||
for lcfn,ccfn in iteritems(lc_map(filenames, unknowns)):
|
for lcfn,ccfn in lc_map(filenames, unknowns).items():
|
||||||
if lcfn in missing: # An unknown format correctly registered
|
if lcfn in missing: # An unknown format correctly registered
|
||||||
continue
|
continue
|
||||||
self.extra_files.append((title_dir, os.path.join(db_path, ccfn), book_id))
|
self.extra_files.append((title_dir, os.path.join(db_path, ccfn), book_id))
|
||||||
|
|
||||||
# Check: any book formats that should be there?
|
# Check: any book formats that should be there?
|
||||||
for lcfn,ccfn in iteritems(lc_map(book_formats, missing)):
|
for lcfn,ccfn in lc_map(book_formats, missing).items():
|
||||||
if lcfn in unknowns: # An unknown format correctly registered
|
if lcfn in unknowns: # An unknown format correctly registered
|
||||||
continue
|
continue
|
||||||
self.missing_formats.append((title_dir, os.path.join(db_path, ccfn), book_id))
|
self.missing_formats.append((title_dir, os.path.join(db_path, ccfn), book_id))
|
||||||
|
|||||||
@ -19,7 +19,6 @@ from calibre.utils.config import JSONConfig
|
|||||||
from calibre.utils.icu import capitalize
|
from calibre.utils.icu import capitalize
|
||||||
from calibre.utils.localization import _, get_lang, get_system_locale
|
from calibre.utils.localization import _, get_lang, get_system_locale
|
||||||
from calibre.utils.resources import get_path as P
|
from calibre.utils.resources import get_path as P
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
Dictionary = namedtuple('Dictionary', 'primary_locale locales dicpath affpath builtin name id')
|
Dictionary = namedtuple('Dictionary', 'primary_locale locales dicpath affpath builtin name id')
|
||||||
LoadedDictionary = namedtuple('Dictionary', 'primary_locale locales obj builtin name id')
|
LoadedDictionary = namedtuple('Dictionary', 'primary_locale locales obj builtin name id')
|
||||||
@ -113,7 +112,7 @@ def best_locale_for_language(langcode):
|
|||||||
|
|
||||||
|
|
||||||
def preferred_dictionary(locale):
|
def preferred_dictionary(locale):
|
||||||
return {parse_lang_code(k):v for k, v in iteritems(dprefs['preferred_dictionaries'])}.get(locale, None)
|
return {parse_lang_code(k):v for k, v in dprefs['preferred_dictionaries'].items()}.get(locale, None)
|
||||||
|
|
||||||
|
|
||||||
def remove_dictionary(dictionary):
|
def remove_dictionary(dictionary):
|
||||||
@ -121,7 +120,7 @@ def remove_dictionary(dictionary):
|
|||||||
raise ValueError('Cannot remove builtin dictionaries')
|
raise ValueError('Cannot remove builtin dictionaries')
|
||||||
base = os.path.dirname(dictionary.dicpath)
|
base = os.path.dirname(dictionary.dicpath)
|
||||||
shutil.rmtree(base)
|
shutil.rmtree(base)
|
||||||
dprefs['preferred_dictionaries'] = {k:v for k, v in iteritems(dprefs['preferred_dictionaries']) if v != dictionary.id}
|
dprefs['preferred_dictionaries'] = {k:v for k, v in dprefs['preferred_dictionaries'].items() if v != dictionary.id}
|
||||||
|
|
||||||
|
|
||||||
def rename_dictionary(dictionary, name):
|
def rename_dictionary(dictionary, name):
|
||||||
|
|||||||
@ -18,7 +18,6 @@ from calibre.constants import config_dir
|
|||||||
from calibre.utils.resources import get_path as P
|
from calibre.utils.resources import get_path as P
|
||||||
from calibre.utils.xml_parse import safe_xml_fromstring
|
from calibre.utils.xml_parse import safe_xml_fromstring
|
||||||
from calibre.utils.zipfile import ZipFile
|
from calibre.utils.zipfile import ZipFile
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
NS_MAP = {
|
NS_MAP = {
|
||||||
'oor': 'http://openoffice.org/2001/registry',
|
'oor': 'http://openoffice.org/2001/registry',
|
||||||
@ -127,7 +126,7 @@ def _import_from_virtual_directory(read_file_func, name, dest_dir=None, prefix='
|
|||||||
root = safe_xml_fromstring(read_file_func('META-INF/manifest.xml'))
|
root = safe_xml_fromstring(read_file_func('META-INF/manifest.xml'))
|
||||||
xcu = XPath('//manifest:file-entry[@manifest:media-type="application/vnd.sun.star.configuration-data"]')(root)[0].get(
|
xcu = XPath('//manifest:file-entry[@manifest:media-type="application/vnd.sun.star.configuration-data"]')(root)[0].get(
|
||||||
'{{{}}}full-path'.format(NS_MAP['manifest']))
|
'{{{}}}full-path'.format(NS_MAP['manifest']))
|
||||||
for (dic, aff), locales in iteritems(parse_xcu(read_file_func(xcu), origin='')):
|
for (dic, aff), locales in parse_xcu(read_file_func(xcu), origin='').items():
|
||||||
dic, aff = dic.lstrip('/'), aff.lstrip('/')
|
dic, aff = dic.lstrip('/'), aff.lstrip('/')
|
||||||
d = tempfile.mkdtemp(prefix=prefix, dir=dest_dir)
|
d = tempfile.mkdtemp(prefix=prefix, dir=dest_dir)
|
||||||
locales = uniq([x for x in map(fill_country_code, locales) if parse_lang_code(x).countrycode])
|
locales = uniq([x for x in map(fill_country_code, locales) if parse_lang_code(x).countrycode])
|
||||||
|
|||||||
@ -16,7 +16,6 @@ from calibre.srv.utils import get_library_data
|
|||||||
from calibre.utils.localization import _
|
from calibre.utils.localization import _
|
||||||
from calibre.utils.monotonic import monotonic
|
from calibre.utils.monotonic import monotonic
|
||||||
from calibre.utils.shared_file import share_open
|
from calibre.utils.shared_file import share_open
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
receive_data_methods = {'GET', 'POST'}
|
receive_data_methods = {'GET', 'POST'}
|
||||||
conversion_jobs = {}
|
conversion_jobs = {}
|
||||||
@ -233,7 +232,7 @@ def get_conversion_options(input_fmt, output_fmt, book_id, db):
|
|||||||
ans['defaults'].update(defaults)
|
ans['defaults'].update(defaults)
|
||||||
ans['help'] = plumber.get_all_help()
|
ans['help'] = plumber.get_all_help()
|
||||||
|
|
||||||
for group_name, option_names in iteritems(OPTIONS['pipe']):
|
for group_name, option_names in OPTIONS['pipe'].items():
|
||||||
merge_group(group_name, option_names)
|
merge_group(group_name, option_names)
|
||||||
|
|
||||||
group_name, option_names = options_for_input_fmt(input_fmt)
|
group_name, option_names = options_for_input_fmt(input_fmt)
|
||||||
|
|||||||
@ -8,7 +8,6 @@ from calibre import prints
|
|||||||
from calibre.constants import iswindows, preferred_encoding
|
from calibre.constants import iswindows, preferred_encoding
|
||||||
from calibre.utils.config import OptionParser
|
from calibre.utils.config import OptionParser
|
||||||
from calibre.utils.localization import _, ngettext
|
from calibre.utils.localization import _, ngettext
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def create_subcommand_parser(name, usage):
|
def create_subcommand_parser(name, usage):
|
||||||
@ -365,7 +364,7 @@ def manage_users_cli(path=None, args=()):
|
|||||||
prints(
|
prints(
|
||||||
_('{} has the following additional per-library restrictions:')
|
_('{} has the following additional per-library restrictions:')
|
||||||
.format(username))
|
.format(username))
|
||||||
for k, v in iteritems(r['library_restrictions']):
|
for k, v in r['library_restrictions'].items():
|
||||||
prints(k + ':', v)
|
prints(k + ':', v)
|
||||||
else:
|
else:
|
||||||
prints(_('{} has no additional per-library restrictions').format(username))
|
prints(_('{} has no additional per-library restrictions').format(username))
|
||||||
|
|||||||
@ -14,7 +14,6 @@ from calibre import as_unicode
|
|||||||
from calibre.constants import config_dir
|
from calibre.constants import config_dir
|
||||||
from calibre.utils.config import from_json, to_json
|
from calibre.utils.config import from_json, to_json
|
||||||
from calibre.utils.localization import _
|
from calibre.utils.localization import _
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def as_json(data):
|
def as_json(data):
|
||||||
@ -48,7 +47,7 @@ def serialize_restriction(r):
|
|||||||
v = r.get(x)
|
v = r.get(x)
|
||||||
if v:
|
if v:
|
||||||
ans[x] = list(v)
|
ans[x] = list(v)
|
||||||
ans['library_restrictions'] = {l.lower(): v or '' for l, v in iteritems(r.get('library_restrictions', {}))}
|
ans['library_restrictions'] = {l.lower(): v or '' for l, v in r.get('library_restrictions', {}).items()}
|
||||||
return json.dumps(ans)
|
return json.dumps(ans)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ from calibre.utils.shared_file import share_open
|
|||||||
from calibre.utils.socket_inheritance import set_socket_inherit
|
from calibre.utils.socket_inheritance import set_socket_inherit
|
||||||
from polyglot.binary import as_hex_unicode as encode_name
|
from polyglot.binary import as_hex_unicode as encode_name
|
||||||
from polyglot.binary import from_hex_unicode as decode_name
|
from polyglot.binary import from_hex_unicode as decode_name
|
||||||
from polyglot.builtins import as_unicode, iteritems
|
from polyglot.builtins import as_unicode
|
||||||
|
|
||||||
HTTP1 = 'HTTP/1.0'
|
HTTP1 = 'HTTP/1.0'
|
||||||
HTTP11 = 'HTTP/1.1'
|
HTTP11 = 'HTTP/1.1'
|
||||||
@ -49,7 +49,7 @@ class MultiDict(dict): # {{{
|
|||||||
def create_from_query_string(qs):
|
def create_from_query_string(qs):
|
||||||
ans = MultiDict()
|
ans = MultiDict()
|
||||||
qs = as_unicode(qs)
|
qs = as_unicode(qs)
|
||||||
for k, v in iteritems(parse_qs(qs, keep_blank_values=True)):
|
for k, v in parse_qs(qs, keep_blank_values=True).items():
|
||||||
dict.__setitem__(ans, as_unicode(k), [as_unicode(x) for x in v])
|
dict.__setitem__(ans, as_unicode(k), [as_unicode(x) for x in v])
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ from collections import defaultdict
|
|||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from calibre.utils.resources import get_path as P
|
from calibre.utils.resources import get_path as P
|
||||||
from polyglot.builtins import as_bytes, iteritems
|
from polyglot.builtins import as_bytes
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedFont(ValueError):
|
class UnsupportedFont(ValueError):
|
||||||
@ -277,9 +277,9 @@ def get_all_font_names(raw, raw_is_table=False):
|
|||||||
records = _get_font_names(raw, raw_is_table)
|
records = _get_font_names(raw, raw_is_table)
|
||||||
ans = {}
|
ans = {}
|
||||||
|
|
||||||
for name, num in iteritems({'family_name':1, 'subfamily_name':2, 'full_name':4,
|
for name, num in {'family_name':1, 'subfamily_name':2, 'full_name':4,
|
||||||
'preferred_family_name':16, 'preferred_subfamily_name':17,
|
'preferred_family_name':16, 'preferred_subfamily_name':17,
|
||||||
'wws_family_name':21, 'wws_subfamily_name':22}):
|
'wws_family_name':21, 'wws_subfamily_name':22}.items():
|
||||||
try:
|
try:
|
||||||
ans[name] = decode_name_record(records[num])
|
ans[name] = decode_name_record(records[num])
|
||||||
except (IndexError, KeyError, ValueError):
|
except (IndexError, KeyError, ValueError):
|
||||||
|
|||||||
@ -14,13 +14,12 @@ from calibre.ptempfile import TemporaryDirectory
|
|||||||
from calibre.utils.localization import lang_as_iso639_1
|
from calibre.utils.localization import lang_as_iso639_1
|
||||||
from calibre.utils.lock import ExclusiveFile
|
from calibre.utils.lock import ExclusiveFile
|
||||||
from calibre.utils.resources import get_path as P
|
from calibre.utils.resources import get_path as P
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
|
|
||||||
def locale_map():
|
def locale_map():
|
||||||
ans = getattr(locale_map, 'ans', None)
|
ans = getattr(locale_map, 'ans', None)
|
||||||
if ans is None:
|
if ans is None:
|
||||||
ans = locale_map.ans = {k.lower(): v for k, v in iteritems(json.loads(P('hyphenation/locales.json', data=True)))}
|
ans = locale_map.ans = {k.lower(): v for k, v in json.loads(P('hyphenation/locales.json', data=True)).items()}
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -9,7 +9,7 @@ import unittest
|
|||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
|
||||||
from calibre.utils import icu
|
from calibre.utils import icu
|
||||||
from polyglot.builtins import cmp, iteritems
|
from polyglot.builtins import cmp
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
@ -66,7 +66,7 @@ class TestICU(unittest.TestCase):
|
|||||||
with make_collation_func('scmp', 'es', maker=icu.make_two_arg_func) as scmp:
|
with make_collation_func('scmp', 'es', maker=icu.make_two_arg_func) as scmp:
|
||||||
self.assertNotEqual(0, scmp('pena', 'peña'))
|
self.assertNotEqual(0, scmp('pena', 'peña'))
|
||||||
|
|
||||||
for k, v in iteritems({'pèché': 'peche', 'flüße':'Flusse', 'Štepánek':'ŠtepaneK'}):
|
for k, v in {'pèché': 'peche', 'flüße':'Flusse', 'Štepánek':'ŠtepaneK'}.items():
|
||||||
self.ae(0, icu.primary_strcmp(k, v))
|
self.ae(0, icu.primary_strcmp(k, v))
|
||||||
|
|
||||||
# Test different types of collation
|
# Test different types of collation
|
||||||
@ -100,7 +100,7 @@ class TestICU(unittest.TestCase):
|
|||||||
self.ae((1, 1), icu.find('\U0001f431', 'x\U0001f431x'))
|
self.ae((1, 1), icu.find('\U0001f431', 'x\U0001f431x'))
|
||||||
self.ae((1, 1), icu.find('y', '\U0001f431y'))
|
self.ae((1, 1), icu.find('y', '\U0001f431y'))
|
||||||
self.ae((0, 4), icu.primary_find('pena', 'peña'))
|
self.ae((0, 4), icu.primary_find('pena', 'peña'))
|
||||||
for k, v in iteritems({'pèché': 'peche', 'flüße':'Flusse', 'Štepánek':'ŠtepaneK'}):
|
for k, v in {'pèché': 'peche', 'flüße':'Flusse', 'Štepánek':'ŠtepaneK'}.items():
|
||||||
self.ae((1, len(k)), icu.primary_find(v, ' ' + k), f'Failed to find {v} in {k}')
|
self.ae((1, len(k)), icu.primary_find(v, ' ' + k), f'Failed to find {v} in {k}')
|
||||||
self.assertTrue(icu.startswith(b'abc', b'ab'))
|
self.assertTrue(icu.startswith(b'abc', b'ab'))
|
||||||
self.assertTrue(icu.startswith('abc', 'abc'))
|
self.assertTrue(icu.startswith('abc', 'abc'))
|
||||||
|
|||||||
@ -9,7 +9,6 @@ import re
|
|||||||
from gettext import GNUTranslations, NullTranslations
|
from gettext import GNUTranslations, NullTranslations
|
||||||
|
|
||||||
from calibre.utils.resources import get_path as P
|
from calibre.utils.resources import get_path as P
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
_available_translations = None
|
_available_translations = None
|
||||||
|
|
||||||
@ -494,7 +493,7 @@ def lang_map():
|
|||||||
translate = _
|
translate = _
|
||||||
global _lang_map
|
global _lang_map
|
||||||
if _lang_map is None:
|
if _lang_map is None:
|
||||||
_lang_map = {k:translate(v) for k, v in iteritems(iso639['by_3'])}
|
_lang_map = {k:translate(v) for k, v in iso639['by_3'].items()}
|
||||||
return _lang_map
|
return _lang_map
|
||||||
|
|
||||||
|
|
||||||
@ -525,7 +524,7 @@ def langnames_to_langcodes(names):
|
|||||||
translate = _
|
translate = _
|
||||||
ans = {}
|
ans = {}
|
||||||
names = set(names)
|
names = set(names)
|
||||||
for k, v in iteritems(iso639['by_3']):
|
for k, v in iso639['by_3'].items():
|
||||||
tv = translate(v)
|
tv = translate(v)
|
||||||
if tv in names:
|
if tv in names:
|
||||||
names.remove(tv)
|
names.remove(tv)
|
||||||
|
|||||||
@ -16,7 +16,6 @@ from calibre.utils.localization import _
|
|||||||
from calibre.utils.lock import singleinstance
|
from calibre.utils.lock import singleinstance
|
||||||
from calibre.utils.winreg.lib import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, Key
|
from calibre.utils.winreg.lib import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, Key
|
||||||
from calibre_extensions import winutil
|
from calibre_extensions import winutil
|
||||||
from polyglot.builtins import iteritems
|
|
||||||
|
|
||||||
# See https://msdn.microsoft.com/en-us/library/windows/desktop/cc144154(v=vs.85).aspx
|
# See https://msdn.microsoft.com/en-us/library/windows/desktop/cc144154(v=vs.85).aspx
|
||||||
|
|
||||||
@ -117,7 +116,7 @@ def register():
|
|||||||
prog_id_map = {ext:progid_name(data['assoc_name'], ext) for ext in ext_map}
|
prog_id_map = {ext:progid_name(data['assoc_name'], ext) for ext in ext_map}
|
||||||
|
|
||||||
with Key(capabilities_path) as key:
|
with Key(capabilities_path) as key:
|
||||||
for k, v in iteritems({'ApplicationDescription':'description', 'ApplicationName':'name'}):
|
for k, v in {'ApplicationDescription':'description', 'ApplicationName':'name'}.items():
|
||||||
key.set(k, data[v])
|
key.set(k, data[v])
|
||||||
key.set('ApplicationIcon', f'{exe},0')
|
key.set('ApplicationIcon', f'{exe},0')
|
||||||
key.set_default_value(r'shell\open\command', f'"{exe}" "%1"')
|
key.set_default_value(r'shell\open\command', f'"{exe}" "%1"')
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user