itervalues (regex 3)

(set|frozenset|list|tuple|enumerate|sorted|max|min|sum)\(itervalues\(([\w\.]+)\)
\1(\2.values()

replace itervalues() used on simple variables/attributs in a know iterable
This commit is contained in:
un-pogaz 2025-11-17 12:20:12 +01:00
parent 4ecf018231
commit 8ed12a2e78
28 changed files with 55 additions and 71 deletions

View File

@ -12,7 +12,7 @@ import os
import shutil
import zipfile
from polyglot.builtins import iteritems, itervalues, only_unicode_recursive
from polyglot.builtins import iteritems, only_unicode_recursive
from setup import Command, basenames, download_securely, dump_json
@ -211,7 +211,7 @@ class Resources(Command): # {{{
self.info('\tCreating user-manual-translation-stats.json')
d = {}
for lc, stats in iteritems(json.load(open(self.j(self.d(self.SRC), 'manual', 'locale', 'completed.json')))):
total = sum(itervalues(stats))
total = sum(stats.values())
d[lc] = stats['translated'] / float(total)
dump_json(d, self.j(self.RESOURCES, 'user-manual-translation-stats.json'))

View File

@ -16,7 +16,6 @@ from calibre.constants import filesystem_encoding, ismacos, iswindows
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.utils.filenames import make_long_path_useable
from calibre.utils.icu import lower as icu_lower
from polyglot.builtins import itervalues
def splitext(path):
@ -163,7 +162,7 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
if allow_path(path, ext, compiled_rules):
formats[ext] = path
if formats_ok(formats):
yield list(itervalues(formats))
yield list(formats.values())
else:
books = defaultdict(dict)
for path in listdir_impl(dirpath, sort_by_mtime=True):
@ -173,7 +172,7 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
for formats in books.values():
if formats_ok(formats):
yield list(itervalues(formats))
yield list(formats.values())
def create_format_map(formats):

View File

@ -72,7 +72,7 @@ from calibre.utils.formatter_functions import compile_user_template_functions, f
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import sort_key
from calibre.utils.resources import get_path as P
from polyglot.builtins import cmp, itervalues, reraise
from polyglot.builtins import cmp, reraise
# }}}
@ -937,7 +937,7 @@ class DB:
for k,v in self.FIELD_MAP.items():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(itervalues(self.FIELD_MAP))
base = max(self.FIELD_MAP.values())
for label_ in sorted(self.custom_column_label_map):
data = self.custom_column_label_map[label_]

View File

@ -261,7 +261,7 @@ class Cache:
self.backend.dirty_books_with_dirtied_annotations()
self.dirtied_cache = {x:i for i, x in enumerate(self.backend.dirtied_books())}
if self.dirtied_cache:
self.dirtied_sequence = max(itervalues(self.dirtied_cache))+1
self.dirtied_sequence = max(self.dirtied_cache.values())+1
self._initialize_dynamic_categories()
@write_api
@ -1604,12 +1604,12 @@ class Cache:
new_dirtied = book_ids - already_dirtied
already_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(already_dirtied)}
if already_dirtied:
self.dirtied_sequence = max(itervalues(already_dirtied)) + 1
self.dirtied_sequence = max(already_dirtied.values()) + 1
self.dirtied_cache.update(already_dirtied)
if new_dirtied:
self.backend.dirty_books(new_dirtied)
new_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(new_dirtied)}
self.dirtied_sequence = max(itervalues(new_dirtied)) + 1
self.dirtied_sequence = max(new_dirtied.values()) + 1
self.dirtied_cache.update(new_dirtied)
@write_api
@ -1625,7 +1625,7 @@ class Cache:
new_dirtied = book_ids - set(self.dirtied_cache)
if new_dirtied:
new_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(new_dirtied)}
self.dirtied_sequence = max(itervalues(new_dirtied)) + 1
self.dirtied_sequence = max(new_dirtied.values()) + 1
self.dirtied_cache.update(new_dirtied)
@write_api

View File

@ -232,7 +232,7 @@ class ManyToOneTable(Table):
bcm[book] = item_id
def fix_link_table(self, db):
linked_item_ids = set(itervalues(self.book_col_map))
linked_item_ids = set(self.book_col_map.values())
extra_item_ids = linked_item_ids - set(self.id_map)
if extra_item_ids:
for item_id in extra_item_ids:

View File

@ -227,14 +227,14 @@ class AddRemoveTest(BaseTest):
authors = cache.fields['authors'].table
# Delete a single book, with no formats and check cleaning
self.assertIn('Unknown', set(itervalues(authors.id_map)))
self.assertIn('Unknown', set(authors.id_map.values()))
olen = len(authors.id_map)
item_id = {v:k for k, v in authors.id_map.items()}['Unknown']
cache.remove_books((3,))
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(3, c.all_book_ids())
self.assertNotIn('Unknown', set(itervalues(table.id_map)))
self.assertNotIn('Unknown', set(table.id_map.values()))
self.assertNotIn(item_id, table.asort_map)
self.assertNotIn(item_id, table.link_map)
ae(len(table.id_map), olen-1)
@ -252,7 +252,7 @@ class AddRemoveTest(BaseTest):
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(1, c.all_book_ids())
self.assertNotIn('Author Two', set(itervalues(table.id_map)))
self.assertNotIn('Author Two', set(table.id_map.values()))
self.assertNotIn(6, set(itervalues(c.fields['rating'].table.id_map)))
self.assertIn('A Series One', set(itervalues(c.fields['series'].table.id_map)))
self.assertNotIn('My Series Two', set(itervalues(c.fields['#series'].table.id_map)))

View File

@ -472,7 +472,7 @@ class ReadingTest(BaseTest):
table = cache.fields['authors'].table
table.set_sort_names({next(iter(table.id_map)): 'Fake Sort'}, cache.backend)
authors = tuple(itervalues(table.id_map))
authors = tuple(table.id_map.values())
nval = cache.author_sort_from_authors(authors)
self.assertIn('Fake Sort', nval)
@ -489,7 +489,7 @@ class ReadingTest(BaseTest):
cache.set_field('series', {3:'test series'})
cache.set_field('series_index', {3:13})
table = cache.fields['series'].table
series = tuple(itervalues(table.id_map))
series = tuple(table.id_map.values())
nvals = {s:cache.get_next_series_num_for(s) for s in series}
db = self.init_old()
self.assertEqual({s:db.get_next_series_num_for(s) for s in series}, nvals)

View File

@ -34,7 +34,6 @@ from calibre.prints import debug_print
from calibre.ptempfile import PersistentTemporaryFile, TemporaryDirectory, better_mktemp
from calibre.utils.config_base import prefs
from calibre.utils.date import parse_date
from polyglot.builtins import itervalues
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
@ -435,7 +434,7 @@ class KOBO(USBMS):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(itervalues(bl_cache), reverse=True, key=lambda x: x or -1):
for idx in sorted(bl_cache.values(), reverse=True, key=lambda x: x or -1):
if idx is not None:
need_sync = True
del bl[idx]
@ -2137,7 +2136,7 @@ class KOBOTOUCH(KOBO):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(itervalues(bl_cache), reverse=True, key=lambda x: x or -1):
for idx in sorted(bl_cache.values(), reverse=True, key=lambda x: x or -1):
if idx is not None:
if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))) or not bl[idx].contentID:
need_sync = True

View File

@ -25,7 +25,7 @@ from calibre.devices.mtp.filesystem_cache import FileOrFolder, convert_timestamp
from calibre.ptempfile import PersistentTemporaryDirectory, SpooledTemporaryFile
from calibre.utils.filenames import shorten_components_to
from calibre.utils.icu import lower as icu_lower
from polyglot.builtins import as_bytes, iteritems, itervalues
from polyglot.builtins import as_bytes, iteritems
BASE = importlib.import_module('calibre.devices.mtp.{}.driver'.format('windows' if iswindows else 'unix')).MTP_DEVICE
DEFAULT_THUMBNAIL_HEIGHT = 320
@ -335,7 +335,7 @@ class MTP_DEVICE(BASE):
book.path = mtp_file.mtp_id_path
# Remove books in the cache that no longer exist
for idx in sorted(itervalues(relpath_cache), reverse=True):
for idx in sorted(relpath_cache.values(), reverse=True):
del bl[idx]
need_sync = True

View File

@ -17,7 +17,7 @@ from calibre.ebooks.metadata.book.base import Metadata
from calibre.prints import debug_print
from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
from polyglot.builtins import cmp, itervalues
from polyglot.builtins import cmp
def none_cmp(xx, yy):
@ -307,7 +307,7 @@ class CollectionsBookList(BookList):
result = {}
for category, lpaths in collections.items():
books = sorted(itervalues(lpaths), key=cmp_to_key(none_cmp))
books = sorted(lpaths.values(), key=cmp_to_key(none_cmp))
result[category] = [x[0] for x in books]
return result

View File

@ -20,7 +20,6 @@ from calibre.devices.usbms.cli import CLI
from calibre.devices.usbms.device import Device
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.prints import debug_print
from polyglot.builtins import itervalues
def safe_walk(top, topdown=True, onerror=None, followlinks=False, maxdepth=128):
@ -292,7 +291,7 @@ class USBMS(CLI, Device):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(itervalues(bl_cache), reverse=True, key=lambda x: -1 if x is None else x):
for idx in sorted(bl_cache.values(), reverse=True, key=lambda x: -1 if x is None else x):
if idx is not None:
need_sync = True
del bl[idx]

View File

@ -15,7 +15,7 @@ from calibre.ebooks.docx.names import SVG_BLIP_URI, barename
from calibre.utils.filenames import ascii_filename
from calibre.utils.img import image_to_data, resize_to_fit
from calibre.utils.imghdr import what
from polyglot.builtins import iteritems, itervalues
from polyglot.builtins import iteritems
class LinkedImageNotFound(ValueError):
@ -178,7 +178,7 @@ class Images:
return raw, base
def unique_name(self, base):
exists = frozenset(itervalues(self.used))
exists = frozenset(self.used.values())
c = 1
name = base
while name in exists:

View File

@ -10,7 +10,6 @@ from collections import Counter, OrderedDict
from calibre.ebooks.docx.block_styles import ParagraphStyle, inherit, twips
from calibre.ebooks.docx.char_styles import RunStyle
from calibre.ebooks.docx.tables import TableStyle
from polyglot.builtins import itervalues
class PageProperties:
@ -499,7 +498,7 @@ class Styles:
prefix = ef + '\n' + prefix
ans = []
for cls, css in sorted(itervalues(self.classes), key=lambda x:x[0]):
for cls, css in sorted(self.classes.values(), key=lambda x:x[0]):
b = (f'\t{k}: {v};' for k, v in css.items())
b = '\n'.join(b)
ans.append('.{} {{\n{}\n}}\n'.format(cls, b.rstrip(';')))

View File

@ -32,7 +32,6 @@ from calibre.ebooks.docx.theme import Theme
from calibre.ebooks.docx.toc import create_toc
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import itervalues
NBSP = '\xa0'
@ -426,7 +425,7 @@ class Convert:
if current_bm and p in rmap:
para = rmap[p]
if 'id' not in para.attrib:
para.set('id', generate_anchor(next(iter(current_bm)), frozenset(itervalues(self.anchor_map))))
para.set('id', generate_anchor(next(iter(current_bm)), frozenset(self.anchor_map.values())))
for name in current_bm:
self.anchor_map[name] = para.get('id')
current_bm = set()
@ -482,7 +481,7 @@ class Convert:
# _GoBack is a special bookmark inserted by Word 2010 for
# the return to previous edit feature, we ignore it
old_anchor = current_anchor
self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(itervalues(self.anchor_map)))
self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(self.anchor_map.values()))
if old_anchor is not None:
# The previous anchor was not applied to any element
for a, t in tuple(self.anchor_map.items()):
@ -493,7 +492,7 @@ class Convert:
elif x.tag.endswith('}instrText') and x.text and x.text.strip().startswith('TOC '):
old_anchor = current_anchor
anchor = str(uuid.uuid4())
self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(itervalues(self.anchor_map)))
self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(self.anchor_map.values()))
self.toc_anchor = current_anchor
if old_anchor is not None:
# The previous anchor was not applied to any element

View File

@ -7,8 +7,6 @@ __copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import defaultdict
from operator import attrgetter
from polyglot.builtins import itervalues
LIST_STYLES = frozenset(
'disc circle square decimal decimal-leading-zero lower-roman upper-roman'
' lower-greek lower-alpha lower-latin upper-alpha upper-latin hiragana hebrew'
@ -157,7 +155,7 @@ class ListsManager:
definitions[defn] = defn
defn.num_id = len(definitions) - 1
defn.link_blocks()
self.definitions = sorted(itervalues(definitions), key=attrgetter('num_id'))
self.definitions = sorted(definitions.values(), key=attrgetter('num_id'))
def serialize(self, parent):
for defn in self.definitions:

View File

@ -21,7 +21,6 @@ from calibre.ebooks.lit.maps import HTML_MAP, OPF_MAP
from calibre.ebooks.oeb.base import urlnormalize, xpath
from calibre.ebooks.oeb.reader import OEBReader
from calibre_extensions import lzx, msdes
from polyglot.builtins import itervalues
from polyglot.urllib import unquote as urlunquote
__all__ = ['LitReader']
@ -686,7 +685,7 @@ class LitFile:
mime_type, raw = consume_sized_utf8_string(raw, zpad=True)
self.manifest[internal] = ManifestItem(
original, internal, mime_type, offset, root, state)
mlist = list(itervalues(self.manifest))
mlist = list(self.manifest.values())
# Remove any common path elements
if len(mlist) > 1:
shared = mlist[0].path

View File

@ -31,7 +31,6 @@ from calibre.utils.icu import title_case as icu_title
from calibre.utils.localization import __, is_rtl_lang
from calibre.utils.short_uuid import uuid4
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import itervalues
from polyglot.urllib import unquote as urlunquote
XML_NS = 'http://www.w3.org/XML/1998/namespace'
@ -1461,7 +1460,7 @@ class Guide:
__iter__ = iterkeys
def values(self):
return sorted(itervalues(self.refs), key=lambda ref: ref.ORDER.get(ref.type, 10000))
return sorted(self.refs.values(), key=lambda ref: ref.ORDER.get(ref.type, 10000))
def items(self):
yield from self.refs.items()

View File

@ -13,7 +13,6 @@ from calibre import force_unicode, xml_replace_entities
from calibre.constants import filesystem_encoding
from calibre.ebooks.chardet import strip_encoding_declarations, xml_to_unicode
from calibre.utils.xml_parse import safe_html_fromstring, safe_xml_fromstring
from polyglot.builtins import itervalues
XHTML_NS = 'http://www.w3.org/1999/xhtml'
XMLNS_NS = 'http://www.w3.org/2000/xmlns/'
@ -140,7 +139,7 @@ def clean_word_doc(data, log):
def ensure_namespace_prefixes(node, nsmap):
namespace_uris = frozenset(itervalues(nsmap))
namespace_uris = frozenset(nsmap.values())
fnsmap = {k:v for k, v in node.nsmap.items() if v not in namespace_uris}
fnsmap.update(nsmap)
if fnsmap != dict(node.nsmap):

View File

@ -17,7 +17,6 @@ from calibre.ebooks.oeb.polish.cover import get_raster_cover_name
from calibre.ebooks.oeb.polish.parsing import parse_html5
from calibre.ebooks.oeb.polish.replace import remove_links_to
from calibre.ebooks.oeb.polish.utils import OEB_FONTS, actual_case_for_name, corrected_case_for_name, guess_type
from polyglot.builtins import itervalues
class BadLink(BaseError):
@ -386,7 +385,7 @@ def check_links(container):
continue
unreferenced.add(name)
manifest_names = set(itervalues(container.manifest_id_map))
manifest_names = set(container.manifest_id_map.values())
for name in container.mime_map:
if name not in manifest_names and not container.ok_to_be_unmanifested(name):
a(Unmanifested(name, unreferenced=name in unreferenced))

View File

@ -13,7 +13,6 @@ from calibre import prints
from calibre.ebooks.oeb.base import XHTML
from calibre.utils.filenames import ascii_filename
from calibre.utils.icu import lower as icu_lower
from polyglot.builtins import itervalues
props = {'font-family':None, 'font-weight':'normal', 'font-style':'normal', 'font-stretch':'normal'}
@ -77,7 +76,7 @@ def filter_by_stretch(fonts, val):
else:
candidates = expanded or condensed
distance_map = {i:abs(stretch_map[i] - val) for i in candidates}
min_dist = min(itervalues(distance_map))
min_dist = min(distance_map.values())
return [fonts[i] for i in candidates if distance_map[i] == min_dist]
@ -125,7 +124,7 @@ def filter_by_weight(fonts, val):
return [fonts[rmap[400]]]
candidates = below or above
distance_map = {i:abs(weight_map[i] - val) for i in candidates}
min_dist = min(itervalues(distance_map))
min_dist = min(distance_map.values())
return [fonts[i] for i in candidates if distance_map[i] == min_dist]
@ -197,7 +196,7 @@ def font_key(font):
def embed_all_fonts(container, stats, report):
all_font_rules = tuple(itervalues(stats.all_font_rules))
all_font_rules = tuple(stats.all_font_rules.values())
warned = set()
rules, nrules = [], {}
modified = set()

View File

@ -18,7 +18,6 @@ from calibre.ebooks.chardet import strip_encoding_declarations
from calibre.ebooks.oeb.base import css_text
from calibre.ebooks.oeb.polish.css import iter_declarations, remove_property_value
from calibre.ebooks.oeb.polish.utils import extract
from polyglot.builtins import itervalues
class LinkReplacer:
@ -188,7 +187,7 @@ def rename_files(container, file_map):
:param file_map: A mapping of old canonical name to new canonical name, for
example: :code:`{'text/chapter1.html': 'chapter1.html'}`.
'''
overlap = set(file_map).intersection(set(itervalues(file_map)))
overlap = set(file_map).intersection(set(file_map.values()))
if overlap:
raise ValueError('Circular rename detected. The files {} are both rename targets and destinations'.format(', '.join(overlap)))
for name, dest in file_map.items():
@ -197,7 +196,7 @@ def rename_files(container, file_map):
# A case change on an OS with a case insensitive file-system.
continue
raise ValueError(f'Cannot rename {name} to {dest} as {dest} already exists')
if len(tuple(itervalues(file_map))) != len(set(itervalues(file_map))):
if len(tuple(file_map.values())) != len(set(file_map.values())):
raise ValueError('Cannot rename, the set of destination files contains duplicates')
link_map = {}
for current_name, new_name in file_map.items():

View File

@ -18,7 +18,6 @@ from calibre.ebooks.oeb.polish.tests.base import BaseTest, get_simple_book, get_
from calibre.ptempfile import TemporaryDirectory, TemporaryFile
from calibre.utils.filenames import nlinks_file
from calibre.utils.resources import get_path as P
from polyglot.builtins import itervalues
def get_container(*args, **kwargs):
@ -187,13 +186,13 @@ class ContainerTests(BaseTest):
name = 'folder/added file.html'
c.add_file(name, b'xxx')
self.assertEqual('xxx', c.raw_data(name))
self.assertIn(name, set(itervalues(c.manifest_id_map)))
self.assertIn(name, set(c.manifest_id_map.values()))
self.assertIn(name, {x[0] for x in c.spine_names})
name = 'added.css'
c.add_file(name, b'xxx')
self.assertEqual('xxx', c.raw_data(name))
self.assertIn(name, set(itervalues(c.manifest_id_map)))
self.assertIn(name, set(c.manifest_id_map.values()))
self.assertNotIn(name, {x[0] for x in c.spine_names})
self.assertEqual(c.make_name_unique(name), 'added-1.css')
c.add_file('added-1.css', b'xxx')

View File

@ -14,7 +14,6 @@ from lxml import etree
from calibre.ebooks import ConversionError
from calibre.ebooks.oeb.base import TOC, XHTML, XPNSMAP, barename, xml2text
from polyglot.builtins import itervalues
def XPath(x):
@ -268,8 +267,8 @@ class DetectStructure:
return []
for document in self.oeb.spine:
previous_level1 = list(itervalues(added))[-1] if added else None
previous_level2 = list(itervalues(added2))[-1] if added2 else None
previous_level1 = list(added.values())[-1] if added else None
previous_level2 = list(added2.values())[-1] if added2 else None
level1_toc, level1_title = self.get_toc_parts_for_xpath(self.opts.level1_toc)
for elem in find_matches(level1_toc, document.data):

View File

@ -47,7 +47,7 @@ from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import upper as icu_upper
from calibre.utils.localization import ngettext
from calibre.utils.titlecase import titlecase
from polyglot.builtins import error_message, itervalues
from polyglot.builtins import error_message
Settings = namedtuple('Settings',
'remove_all remove add au aus do_aus rating pub do_series do_autonumber '
@ -467,7 +467,7 @@ class MyBlockingBusy(QDialog): # {{{
def next_series_num(bid, i):
if args.do_series_restart:
return sval + (i * args.series_increment)
next_num = _get_next_series_num_for_list(sorted(itervalues(sval)), unwrap=False)
next_num = _get_next_series_num_for_list(sorted(sval.values()), unwrap=False)
sval[bid] = next_num
return next_num

View File

@ -89,7 +89,7 @@ from calibre.utils.imghdr import identify
from calibre.utils.ipc.launch import exe_path, macos_edit_book_bundle_path
from calibre.utils.localization import ngettext
from calibre.utils.tdir_in_cache import tdir_in_cache
from polyglot.builtins import as_bytes, itervalues
from polyglot.builtins import as_bytes
_diff_dialogs = []
last_used_transform_rules = []
@ -859,7 +859,7 @@ class Boss(QObject):
self.gui.preview.current_name = newname
self.apply_container_update_to_gui()
if from_filelist:
self.gui.file_list.select_names(frozenset(itervalues(name_map)), current_name=name_map.get(from_filelist))
self.gui.file_list.select_names(frozenset(name_map.values()), current_name=name_map.get(from_filelist))
self.gui.file_list.file_list.setFocus(Qt.FocusReason.PopupFocusReason)
# }}}

View File

@ -20,7 +20,7 @@ from calibre.gui2.tweak_book.completion.utils import DataError, control, data
from calibre.utils.icu import numeric_sort_key
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.matcher import Matcher
from polyglot.builtins import iteritems, itervalues
from polyglot.builtins import iteritems
Request = namedtuple('Request', 'id type data query')
@ -93,7 +93,7 @@ def complete_names(names_data, data_conn):
quote = (lambda x:x) if base.lower().endswith('.css') else prepare_string_for_xml
names = names_cache.get(names_type, names_cache[None])
nmap = {name:name_to_href(name, root, base, quote) for name in names}
items = tuple(sorted(frozenset(itervalues(nmap)), key=numeric_sort_key))
items = tuple(sorted(frozenset(nmap.values()), key=numeric_sort_key))
d = names_cache['descriptions'].get
descriptions = {href:d(name) for name, href in nmap.items()}
return items, descriptions, {}

View File

@ -12,7 +12,6 @@ from calibre.utils.fonts.sfnt import UnknownTable
from calibre.utils.fonts.sfnt.cff.constants import STANDARD_CHARSETS, cff_standard_strings
from calibre.utils.fonts.sfnt.cff.dict_data import PrivateDict, TopDict
from calibre.utils.fonts.sfnt.errors import NoGlyphs, UnsupportedFont
from polyglot.builtins import itervalues
# Useful links
# http://www.adobe.com/content/dam/Adobe/en/devnet/font/pdfs/5176.CFF.pdf
@ -195,7 +194,7 @@ class CFFTable(UnknownTable):
# reconstruct character_map for the subset font
charset_map = {code:self.cff.charset.safe_lookup(glyph_id) for code,
glyph_id in character_map.items()}
charset = set(itervalues(charset_map))
charset = set(charset_map.values())
charset.discard(None)
if not charset and character_map:
raise NoGlyphs('This font has no glyphs for the specified characters')

View File

@ -20,7 +20,7 @@ from polyglot.builtins import itervalues
def resolve_glyphs(loca, glyf, character_map, extra_glyphs):
unresolved_glyphs = set(itervalues(character_map)) | extra_glyphs
unresolved_glyphs = set(character_map.values()) | extra_glyphs
unresolved_glyphs.add(0) # We always want the .notdef glyph
resolved_glyphs = {}
@ -183,7 +183,7 @@ def subset(raw, individual_chars, ranges=(), warnings=None):
if b'kern' in sfnt:
try:
sfnt[b'kern'].restrict_to_glyphs(frozenset(itervalues(character_map)))
sfnt[b'kern'].restrict_to_glyphs(frozenset(character_map.values()))
except UnsupportedFont as e:
warn(f'kern table unsupported, ignoring: {e}')
except Exception:
@ -222,8 +222,8 @@ def print_stats(old_stats, new_stats):
prints('========= Table comparison (original vs. subset) =========')
prints('Table', ' ', f"{'Size':>10}", ' ', 'Percent', ' ', f"{'New Size':>10}", ' New Percent')
prints('='*80)
old_total = sum(itervalues(old_stats))
new_total = sum(itervalues(new_stats))
old_total = sum(old_stats.values())
new_total = sum(new_stats.values())
tables = sorted(old_stats, key=lambda x: old_stats[x],
reverse=True)
for table in tables:
@ -361,7 +361,7 @@ def all():
print('Failed!')
failed.append((font['full_name'], font['path'], str(e)))
else:
averages.append(sum(itervalues(new_stats))/sum(itervalues(old_stats)) * 100)
averages.append(sum(new_stats.values())/sum(old_stats.values()) * 100)
print('Reduced to:', f'{averages[-1]:.1f}', '%')
if unsupported:
print('\n\nUnsupported:')