diff --git a/manual/epub.py b/manual/epub.py
index f16652b56d..dd93e34817 100644
--- a/manual/epub.py
+++ b/manual/epub.py
@@ -15,6 +15,7 @@ from calibre.ebooks.oeb.polish.container import get_container, OEB_DOCS
from calibre.ebooks.oeb.polish.check.links import check_links, UnreferencedResource
from calibre.ebooks.oeb.polish.pretty import pretty_html_tree, pretty_opf
from calibre.utils.imghdr import identify
+from polyglot.builtins import iteritems
class EPUBHelpBuilder(EpubBuilder):
@@ -28,7 +29,7 @@ class EPUBHelpBuilder(EpubBuilder):
def fix_epub(self, container):
' Fix all the brokenness that sphinx\'s epub builder creates '
- for name, mt in container.mime_map.iteritems():
+ for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
self.workaround_ade_quirks(container, name)
pretty_html_tree(container, container.parsed(name))
@@ -49,9 +50,9 @@ class EPUBHelpBuilder(EpubBuilder):
def fix_opf(self, container):
spine_names = {n for n, l in container.spine_names}
spine = container.opf_xpath('//opf:spine')[0]
- rmap = {v:k for k, v in container.manifest_id_map.iteritems()}
+ rmap = {v:k for k, v in iteritems(container.manifest_id_map)}
# Add unreferenced text files to the spine
- for name, mt in container.mime_map.iteritems():
+ for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS and name not in spine_names:
spine_names.add(name)
container.insert_into_xml(spine, spine.makeelement(OPF('itemref'), idref=rmap[name]))
diff --git a/setup/plugins_mirror.py b/setup/plugins_mirror.py
index 0d799cb3ab..a1fccc4347 100644
--- a/setup/plugins_mirror.py
+++ b/setup/plugins_mirror.py
@@ -33,6 +33,7 @@ from email.utils import parsedate
from functools import partial
from multiprocessing.pool import ThreadPool
from xml.sax.saxutils import escape, quoteattr
+from polyglot.builtins import iteritems, itervalues
# }}}
USER_AGENT = 'calibre mirror'
@@ -292,7 +293,7 @@ def get_plugin_info(raw, check_for_qt5=False):
metadata = names[inits[0]]
else:
# Legacy plugin
- for name, val in names.iteritems():
+ for name, val in iteritems(names):
if name.endswith('plugin.py'):
metadata = val
break
@@ -331,7 +332,7 @@ def update_plugin_from_entry(plugin, entry):
def fetch_plugin(old_index, entry):
- lm_map = {plugin['thread_id']:plugin for plugin in old_index.itervalues()}
+ lm_map = {plugin['thread_id']:plugin for plugin in itervalues(old_index)}
raw = read(entry.url)
url, name = parse_plugin_zip_url(raw)
if url is None:
@@ -403,7 +404,7 @@ def fetch_plugins(old_index):
log('Failed to get plugin', entry.name, 'at', datetime.utcnow().isoformat(), 'with error:')
log(plugin)
# Move staged files
- for plugin in ans.itervalues():
+ for plugin in itervalues(ans):
if plugin['file'].startswith('staging_'):
src = plugin['file']
plugin['file'] = src.partition('_')[-1]
@@ -411,7 +412,7 @@ def fetch_plugins(old_index):
raw = bz2.compress(json.dumps(ans, sort_keys=True, indent=4, separators=(',', ': ')))
atomic_write(raw, PLUGINS)
# Cleanup any extra .zip files
- all_plugin_files = {p['file'] for p in ans.itervalues()}
+ all_plugin_files = {p['file'] for p in itervalues(ans)}
extra = set(glob.glob('*.zip')) - all_plugin_files
for x in extra:
os.unlink(x)
@@ -498,7 +499,7 @@ h1 { text-align: center }
name, count = x
return '
%s | %s |
\n' % (escape(name), count)
- pstats = map(plugin_stats, sorted(stats.iteritems(), reverse=True, key=lambda x:x[1]))
+ pstats = map(plugin_stats, sorted(iteritems(stats), reverse=True, key=lambda x:x[1]))
stats = '''\
diff --git a/src/calibre/__init__.py b/src/calibre/__init__.py
index 5f441311e4..e17ab12edb 100644
--- a/src/calibre/__init__.py
+++ b/src/calibre/__init__.py
@@ -4,7 +4,8 @@ __copyright__ = '2008, Kovid Goyal '
__docformat__ = 'restructuredtext en'
import sys, os, re, time, random, warnings
-from polyglot.builtins import builtins, codepoint_to_chr, unicode_type, range
+from polyglot.builtins import (builtins, codepoint_to_chr, iteritems,
+ itervalues, unicode_type, range)
builtins.__dict__['dynamic_property'] = lambda func: func(None)
from math import floor
from functools import partial
@@ -706,7 +707,7 @@ def remove_bracketed_text(src,
counts = Counter()
buf = []
src = force_unicode(src)
- rmap = dict([(v, k) for k, v in brackets.iteritems()])
+ rmap = dict([(v, k) for k, v in iteritems(brackets)])
for char in src:
if char in brackets:
counts[char] += 1
@@ -714,7 +715,7 @@ def remove_bracketed_text(src,
idx = rmap[char]
if counts[idx] > 0:
counts[idx] -= 1
- elif sum(counts.itervalues()) < 1:
+ elif sum(itervalues(counts)) < 1:
buf.append(char)
return u''.join(buf)
diff --git a/src/calibre/customize/ui.py b/src/calibre/customize/ui.py
index 74a729c135..ae5d8e254e 100644
--- a/src/calibre/customize/ui.py
+++ b/src/calibre/customize/ui.py
@@ -23,6 +23,7 @@ from calibre.utils.config import (make_config_dir, Config, ConfigProxy,
plugin_dir, OptionParser)
from calibre.ebooks.metadata.sources.base import Source
from calibre.constants import DEBUG, numeric_version
+from polyglot.builtins import iteritems, itervalues
builtin_names = frozenset(p.name for p in builtin_plugins)
BLACKLISTED_PLUGINS = frozenset({'Marvin XD', 'iOS reader applications'})
@@ -347,7 +348,7 @@ def reread_metadata_plugins():
return (1 if plugin.plugin_path is None else 0), plugin.name
for group in (_metadata_readers, _metadata_writers):
- for plugins in group.itervalues():
+ for plugins in itervalues(group):
if len(plugins) > 1:
plugins.sort(key=key)
@@ -640,7 +641,7 @@ def patch_metadata_plugins(possibly_updated_plugins):
# Metadata source plugins dont use initialize() but that
# might change in the future, so be safe.
patches[i].initialize()
- for i, pup in patches.iteritems():
+ for i, pup in iteritems(patches):
_initialized_plugins[i] = pup
# }}}
diff --git a/src/calibre/customize/zipplugin.py b/src/calibre/customize/zipplugin.py
index 95c18da1b2..fec7842afd 100644
--- a/src/calibre/customize/zipplugin.py
+++ b/src/calibre/customize/zipplugin.py
@@ -2,7 +2,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
-from polyglot.builtins import map, unicode_type
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal '
@@ -15,7 +14,8 @@ from functools import partial
from calibre import as_unicode
from calibre.customize import (Plugin, numeric_version, platform,
InvalidPlugin, PluginNotFound)
-from polyglot.builtins import string_or_bytes
+from polyglot.builtins import (itervalues, iterkeys, map,
+ string_or_bytes, unicode_type)
# PEP 302 based plugin loading mechanism, works around the bug in zipimport in
# python 2.x that prevents importing from zip files in locations whose paths
@@ -202,7 +202,7 @@ class PluginLoader(object):
else:
m = importlib.import_module(plugin_module)
plugin_classes = []
- for obj in m.__dict__.itervalues():
+ for obj in itervalues(m.__dict__):
if isinstance(obj, type) and issubclass(obj, Plugin) and \
obj.name != 'Trivial Plugin':
plugin_classes.append(obj)
@@ -281,7 +281,7 @@ class PluginLoader(object):
# Legacy plugins
if '__init__' not in names:
- for name in list(names.iterkeys()):
+ for name in list(iterkeys(names)):
if '.' not in name and name.endswith('plugin'):
names['__init__'] = names[name]
break
diff --git a/src/calibre/db/__init__.py b/src/calibre/db/__init__.py
index 87144e4890..82ff46b5e4 100644
--- a/src/calibre/db/__init__.py
+++ b/src/calibre/db/__init__.py
@@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
SPOOL_SIZE = 30*1024*1024
import numbers
-from polyglot.builtins import range
+from polyglot.builtins import iteritems, range
def _get_next_series_num_for_list(series_indices, unwrap=True):
@@ -82,7 +82,7 @@ def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, conve
'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
'languages']).union(set(fdata))
- for x, data in fdata.iteritems():
+ for x, data in iteritems(fdata):
if data['datatype'] == 'series':
FIELDS.add('%d_index'%x)
data = []
diff --git a/src/calibre/db/adding.py b/src/calibre/db/adding.py
index 44fa239ed5..febd2dda5c 100644
--- a/src/calibre/db/adding.py
+++ b/src/calibre/db/adding.py
@@ -8,7 +8,7 @@ __copyright__ = '2013, Kovid Goyal '
import os, time, re
from collections import defaultdict
-from polyglot.builtins import map, unicode_type
+from polyglot.builtins import itervalues, map, unicode_type
from contextlib import contextmanager
from functools import partial
@@ -137,7 +137,7 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
if allow_path(path, ext, compiled_rules):
formats[ext] = path
if formats_ok(formats):
- yield list(formats.itervalues())
+ yield list(itervalues(formats))
else:
books = defaultdict(dict)
for path in listdir_impl(dirpath, sort_by_mtime=True):
@@ -145,9 +145,9 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
if allow_path(path, ext, compiled_rules):
books[icu_lower(key) if isinstance(key, unicode_type) else key.lower()][ext] = path
- for formats in books.itervalues():
+ for formats in itervalues(books):
if formats_ok(formats):
- yield list(formats.itervalues())
+ yield list(itervalues(formats))
def create_format_map(formats):
diff --git a/src/calibre/db/backend.py b/src/calibre/db/backend.py
index 72cb901cb6..ba9e696ce5 100644
--- a/src/calibre/db/backend.py
+++ b/src/calibre/db/backend.py
@@ -12,7 +12,8 @@ import os, shutil, uuid, json, glob, time, hashlib, errno, sys
from functools import partial
import apsw
-from polyglot.builtins import unicode_type, reraise, string_or_bytes
+from polyglot.builtins import (iteritems, iterkeys, itervalues,
+ unicode_type, reraise, string_or_bytes)
from calibre import isbytestring, force_unicode, prints, as_unicode
from calibre.constants import (iswindows, filesystem_encoding,
@@ -222,7 +223,7 @@ def SortedConcatenate(sep=','):
def finalize(ctxt):
if len(ctxt) == 0:
return None
- return sep.join(map(ctxt.get, sorted(ctxt.iterkeys())))
+ return sep.join(map(ctxt.get, sorted(iterkeys(ctxt))))
return ({}, step, finalize)
@@ -247,7 +248,7 @@ def AumSortedConcatenate():
ctxt[ndx] = ':::'.join((author, sort, link))
def finalize(ctxt):
- keys = list(ctxt.iterkeys())
+ keys = list(iterkeys(ctxt))
l = len(keys)
if l == 0:
return None
@@ -733,7 +734,7 @@ class DB(object):
}
# Create Tag Browser categories for custom columns
- for k in sorted(self.custom_column_label_map.iterkeys()):
+ for k in sorted(iterkeys(self.custom_column_label_map)):
v = self.custom_column_label_map[k]
if v['normalized']:
is_category = True
@@ -786,10 +787,10 @@ class DB(object):
'last_modified':19, 'identifiers':20, 'languages':21,
}
- for k,v in self.FIELD_MAP.iteritems():
+ for k,v in iteritems(self.FIELD_MAP):
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
- base = max(self.FIELD_MAP.itervalues())
+ base = max(itervalues(self.FIELD_MAP))
for label_ in sorted(self.custom_column_label_map):
data = self.custom_column_label_map[label_]
@@ -1263,7 +1264,7 @@ class DB(object):
'''
with self.conn: # Use a single transaction, to ensure nothing modifies the db while we are reading
- for table in self.tables.itervalues():
+ for table in itervalues(self.tables):
try:
table.read(self)
except:
@@ -1327,7 +1328,7 @@ class DB(object):
def remove_formats(self, remove_map):
paths = []
- for book_id, removals in remove_map.iteritems():
+ for book_id, removals in iteritems(remove_map):
for fmt, fname, path in removals:
path = self.format_abspath(book_id, fmt, fname, path)
if path is not None:
@@ -1585,7 +1586,7 @@ class DB(object):
if samefile(spath, tpath):
# The format filenames may have changed while the folder
# name remains the same
- for fmt, opath in original_format_map.iteritems():
+ for fmt, opath in iteritems(original_format_map):
npath = format_map.get(fmt, None)
if npath and os.path.abspath(npath.lower()) != os.path.abspath(opath.lower()) and samefile(opath, npath):
# opath and npath are different hard links to the same file
@@ -1648,7 +1649,7 @@ class DB(object):
def remove_books(self, path_map, permanent=False):
self.executemany(
'DELETE FROM books WHERE id=?', [(x,) for x in path_map])
- paths = {os.path.join(self.library_path, x) for x in path_map.itervalues() if x}
+ paths = {os.path.join(self.library_path, x) for x in itervalues(path_map) if x}
paths = {x for x in paths if os.path.exists(x) and self.is_deletable(x)}
if permanent:
for path in paths:
@@ -1663,7 +1664,7 @@ class DB(object):
self.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
- for book_id, val in val_map.iteritems()])
+ for book_id, val in iteritems(val_map)])
def get_custom_book_data(self, name, book_ids, default=None):
book_ids = frozenset(book_ids)
@@ -1722,7 +1723,7 @@ class DB(object):
def set_conversion_options(self, options, fmt):
options = [(book_id, fmt.upper(), buffer(pickle_binary_string(data.encode('utf-8') if isinstance(data, unicode_type) else data)))
- for book_id, data in options.iteritems()]
+ for book_id, data in iteritems(options)]
self.executemany('INSERT OR REPLACE INTO conversion_options(book,format,data) VALUES (?,?,?)', options)
def get_top_level_move_items(self, all_paths):
diff --git a/src/calibre/db/cache.py b/src/calibre/db/cache.py
index e7f1f14aa6..f04f16edeb 100644
--- a/src/calibre/db/cache.py
+++ b/src/calibre/db/cache.py
@@ -11,7 +11,7 @@ import os, traceback, random, shutil, operator
from io import BytesIO
from collections import defaultdict, Set, MutableSet
from functools import wraps, partial
-from polyglot.builtins import unicode_type, zip, string_or_bytes
+from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type, zip, string_or_bytes
from time import time
from calibre import isbytestring, as_unicode
@@ -170,7 +170,7 @@ class Cache(object):
# Reconstruct the user categories, putting them into field_metadata
fm = self.field_metadata
fm.remove_dynamic_categories()
- for user_cat in sorted(self._pref('user_categories', {}).iterkeys(), key=sort_key):
+ for user_cat in sorted(iterkeys(self._pref('user_categories', {})), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
while cat_name:
try:
@@ -181,7 +181,7 @@ class Cache(object):
# add grouped search term user categories
muc = frozenset(self._pref('grouped_search_make_user_categories', []))
- for cat in sorted(self._pref('grouped_search_terms', {}).iterkeys(), key=sort_key):
+ for cat in sorted(iterkeys(self._pref('grouped_search_terms', {})), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
@@ -200,7 +200,7 @@ class Cache(object):
self.dirtied_cache = {x:i for i, (x,) in enumerate(
self.backend.execute('SELECT book FROM metadata_dirtied'))}
if self.dirtied_cache:
- self.dirtied_sequence = max(self.dirtied_cache.itervalues())+1
+ self.dirtied_sequence = max(itervalues(self.dirtied_cache))+1
self._initialize_dynamic_categories()
@write_api
@@ -213,7 +213,7 @@ class Cache(object):
@write_api
def clear_composite_caches(self, book_ids=None):
- for field in self.composites.itervalues():
+ for field in itervalues(self.composites):
field.clear_caches(book_ids=book_ids)
@write_api
@@ -229,7 +229,7 @@ class Cache(object):
def clear_caches(self, book_ids=None, template_cache=True, search_cache=True):
if template_cache:
self._initialize_template_cache() # Clear the formatter template cache
- for field in self.fields.itervalues():
+ for field in itervalues(self.fields):
if hasattr(field, 'clear_caches'):
field.clear_caches(book_ids=book_ids) # Clear the composite cache and ondevice caches
if book_ids:
@@ -247,7 +247,7 @@ class Cache(object):
with self.backend.conn: # Prevent other processes, such as calibredb from interrupting the reload by locking the db
self.backend.prefs.load_from_db()
self._search_api.saved_searches.load_from_db()
- for field in self.fields.itervalues():
+ for field in itervalues(self.fields):
if hasattr(field, 'table'):
field.table.read(self.backend) # Reread data from metadata.db
@@ -358,7 +358,7 @@ class Cache(object):
self.backend.read_tables()
bools_are_tristate = self.backend.prefs['bools_are_tristate']
- for field, table in self.backend.tables.iteritems():
+ for field, table in iteritems(self.backend.tables):
self.fields[field] = create_field(field, table, bools_are_tristate,
self.backend.get_template_functions)
if table.metadata['datatype'] == 'composite':
@@ -368,7 +368,7 @@ class Cache(object):
VirtualTable('ondevice'), bools_are_tristate,
self.backend.get_template_functions)
- for name, field in self.fields.iteritems():
+ for name, field in iteritems(self.fields):
if name[0] == '#' and name.endswith('_index'):
field.series_field = self.fields[name[:-len('_index')]]
self.fields[name[:-len('_index')]].index_field = field
@@ -494,7 +494,7 @@ class Cache(object):
return frozenset(self.fields[field].table.col_book_map)
try:
- return frozenset(self.fields[field].table.id_map.itervalues())
+ return frozenset(itervalues(self.fields[field].table.id_map))
except AttributeError:
raise ValueError('%s is not a many-one or many-many field' % field)
@@ -503,7 +503,7 @@ class Cache(object):
''' Return a mapping of id to usage count for all values of the specified
field, which must be a many-one or many-many field. '''
try:
- return {k:len(v) for k, v in self.fields[field].table.col_book_map.iteritems()}
+ return {k:len(v) for k, v in iteritems(self.fields[field].table.col_book_map)}
except AttributeError:
raise ValueError('%s is not a many-one or many-many field' % field)
@@ -528,13 +528,13 @@ class Cache(object):
@read_api
def get_item_id(self, field, item_name):
' Return the item id for item_name (case-insensitive) '
- rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
+ rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in iteritems(self.fields[field].table.id_map)}
return rmap.get(icu_lower(item_name) if isinstance(item_name, unicode_type) else item_name, None)
@read_api
def get_item_ids(self, field, item_names):
' Return the item id for item_name (case-insensitive) '
- rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
+ rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in iteritems(self.fields[field].table.id_map)}
return {name:rmap.get(icu_lower(name) if isinstance(name, unicode_type) else name, None) for name in item_names}
@read_api
@@ -1038,13 +1038,13 @@ class Cache(object):
new_dirtied = book_ids - already_dirtied
already_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(already_dirtied)}
if already_dirtied:
- self.dirtied_sequence = max(already_dirtied.itervalues()) + 1
+ self.dirtied_sequence = max(itervalues(already_dirtied)) + 1
self.dirtied_cache.update(already_dirtied)
if new_dirtied:
self.backend.executemany('INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
((x,) for x in new_dirtied))
new_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(new_dirtied)}
- self.dirtied_sequence = max(new_dirtied.itervalues()) + 1
+ self.dirtied_sequence = max(itervalues(new_dirtied)) + 1
self.dirtied_cache.update(new_dirtied)
@write_api
@@ -1075,7 +1075,7 @@ class Cache(object):
if is_series:
bimap, simap = {}, {}
sfield = self.fields[name + '_index']
- for k, v in book_id_to_val_map.iteritems():
+ for k, v in iteritems(book_id_to_val_map):
if isinstance(v, string_or_bytes):
v, sid = get_series_values(v)
else:
@@ -1117,7 +1117,7 @@ class Cache(object):
@read_api
def get_a_dirtied_book(self):
if self.dirtied_cache:
- return random.choice(tuple(self.dirtied_cache.iterkeys()))
+ return random.choice(tuple(iterkeys(self.dirtied_cache)))
return None
@read_api
@@ -1220,7 +1220,7 @@ class Cache(object):
QPixmap, file object or bytestring. It can also be None, in which
case any existing cover is removed. '''
- for book_id, data in book_id_data_map.iteritems():
+ for book_id, data in iteritems(book_id_data_map):
try:
path = self._field_for('path', book_id).replace('/', os.sep)
except AttributeError:
@@ -1231,7 +1231,7 @@ class Cache(object):
for cc in self.cover_caches:
cc.invalidate(book_id_data_map)
return self._set_field('cover', {
- book_id:(0 if data is None else 1) for book_id, data in book_id_data_map.iteritems()})
+ book_id:(0 if data is None else 1) for book_id, data in iteritems(book_id_data_map)})
@write_api
def add_cover_cache(self, cover_cache):
@@ -1332,14 +1332,14 @@ class Cache(object):
protected_set_field('identifiers', mi_idents)
elif mi_idents:
identifiers = self._field_for('identifiers', book_id, default_value={})
- for key, val in mi_idents.iteritems():
+ for key, val in iteritems(mi_idents):
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
protected_set_field('identifiers', identifiers)
user_mi = mi.get_all_user_metadata(make_copy=False)
fm = self.field_metadata
- for key in user_mi.iterkeys():
+ for key in iterkeys(user_mi):
if (key in fm and user_mi[key]['datatype'] == fm[key]['datatype'] and (
user_mi[key]['datatype'] != 'text' or (
user_mi[key]['is_multiple'] == fm[key]['is_multiple']))):
@@ -1433,15 +1433,15 @@ class Cache(object):
:param db_only: If True, only remove the record for the format from the db, do not delete the actual format file from the filesystem.
'''
table = self.fields['formats'].table
- formats_map = {book_id:frozenset((f or '').upper() for f in fmts) for book_id, fmts in formats_map.iteritems()}
+ formats_map = {book_id:frozenset((f or '').upper() for f in fmts) for book_id, fmts in iteritems(formats_map)}
- for book_id, fmts in formats_map.iteritems():
+ for book_id, fmts in iteritems(formats_map):
for fmt in fmts:
self.format_metadata_cache[book_id].pop(fmt, None)
if not db_only:
removes = defaultdict(set)
- for book_id, fmts in formats_map.iteritems():
+ for book_id, fmts in iteritems(formats_map):
try:
path = self._field_for('path', book_id).replace('/', os.sep)
except:
@@ -1458,7 +1458,7 @@ class Cache(object):
size_map = table.remove_formats(formats_map, self.backend)
self.fields['size'].table.update_sizes(size_map)
- self._update_last_modified(tuple(formats_map.iterkeys()))
+ self._update_last_modified(tuple(iterkeys(formats_map)))
@read_api
def get_next_series_num_for(self, series, field='series', current_indices=False):
@@ -1481,7 +1481,7 @@ class Cache(object):
index_map = {book_id:self._fast_field_for(idf, book_id, default_value=1.0) for book_id in books}
if current_indices:
return index_map
- series_indices = sorted(index_map.itervalues())
+ series_indices = sorted(itervalues(index_map))
return _get_next_series_num_for_list(tuple(series_indices), unwrap=False)
@read_api
@@ -1491,7 +1491,7 @@ class Cache(object):
string. '''
table = self.fields['authors'].table
result = []
- rmap = {key_func(v):k for k, v in table.id_map.iteritems()}
+ rmap = {key_func(v):k for k, v in iteritems(table.id_map)}
for aut in authors:
aid = rmap.get(key_func(aut), None)
result.append(author_to_author_sort(aut) if aid is None else table.asort_map[aid])
@@ -1503,10 +1503,10 @@ class Cache(object):
implementation of :meth:`has_book` in a worker process without access to the
db. '''
try:
- return {icu_lower(title) for title in self.fields['title'].table.book_col_map.itervalues()}
+ return {icu_lower(title) for title in itervalues(self.fields['title'].table.book_col_map)}
except TypeError:
# Some non-unicode titles in the db
- return {icu_lower(as_unicode(title)) for title in self.fields['title'].table.book_col_map.itervalues()}
+ return {icu_lower(as_unicode(title)) for title in itervalues(self.fields['title'].table.book_col_map)}
@read_api
def has_book(self, mi):
@@ -1518,7 +1518,7 @@ class Cache(object):
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
q = icu_lower(title).strip()
- for title in self.fields['title'].table.book_col_map.itervalues():
+ for title in itervalues(self.fields['title'].table.book_col_map):
if q == icu_lower(title):
return True
return False
@@ -1599,7 +1599,7 @@ class Cache(object):
duplicates.append((mi, format_map))
else:
ids.append(book_id)
- for fmt, stream_or_path in format_map.iteritems():
+ for fmt, stream_or_path in iteritems(format_map):
if self.add_format(book_id, fmt, stream_or_path, dbapi=dbapi, run_hooks=run_hooks):
fmt_map[fmt.lower()] = getattr(stream_or_path, 'name', stream_or_path) or ''
run_plugins_on_postadd(dbapi or self, book_id, fmt_map)
@@ -1618,11 +1618,11 @@ class Cache(object):
path = None
path_map[book_id] = path
if iswindows:
- paths = (x.replace(os.sep, '/') for x in path_map.itervalues() if x)
+ paths = (x.replace(os.sep, '/') for x in itervalues(path_map) if x)
self.backend.windows_check_if_files_in_use(paths)
self.backend.remove_books(path_map, permanent=permanent)
- for field in self.fields.itervalues():
+ for field in itervalues(self.fields):
try:
table = field.table
except AttributeError:
@@ -1665,7 +1665,7 @@ class Cache(object):
restrict_to_book_ids = frozenset(restrict_to_book_ids)
id_map = {}
default_process_map = {}
- for old_id, new_name in item_id_to_new_name_map.iteritems():
+ for old_id, new_name in iteritems(item_id_to_new_name_map):
new_names = tuple(x.strip() for x in new_name.split(sv)) if sv else (new_name,)
# Get a list of books in the VL with the item
books_with_id = f.books_for(old_id)
@@ -1720,7 +1720,7 @@ class Cache(object):
raise ValueError('Cannot rename items for one-one fields: %s' % field)
moved_books = set()
id_map = {}
- for item_id, new_name in item_id_to_new_name_map.iteritems():
+ for item_id, new_name in iteritems(item_id_to_new_name_map):
new_names = tuple(x.strip() for x in new_name.split(sv)) if sv else (new_name,)
books, new_id = func(item_id, new_names[0], self.backend)
affected_books.update(books)
@@ -1735,7 +1735,7 @@ class Cache(object):
if affected_books:
if field == 'authors':
self._set_field('author_sort',
- {k:' & '.join(v) for k, v in self._author_sort_strings_for_books(affected_books).iteritems()})
+ {k:' & '.join(v) for k, v in iteritems(self._author_sort_strings_for_books(affected_books))})
self._update_path(affected_books, mark_as_dirtied=False)
elif change_index and hasattr(f, 'index_field') and tweaks['series_index_auto_increment'] != 'no_change':
for book_id in moved_books:
@@ -1835,7 +1835,7 @@ class Cache(object):
insensitive).
'''
- tag_map = {icu_lower(v):k for k, v in self._get_id_map('tags').iteritems()}
+ tag_map = {icu_lower(v):k for k, v in iteritems(self._get_id_map('tags'))}
tag = icu_lower(tag.strip())
mht = icu_lower(must_have_tag.strip()) if must_have_tag else None
tag_id, mht_id = tag_map.get(tag, None), tag_map.get(mht, None)
@@ -1848,7 +1848,7 @@ class Cache(object):
tagged_books = tagged_books.intersection(self._books_for_field('tags', mht_id))
if tagged_books:
if must_have_authors is not None:
- amap = {icu_lower(v):k for k, v in self._get_id_map('authors').iteritems()}
+ amap = {icu_lower(v):k for k, v in iteritems(self._get_id_map('authors'))}
books = None
for author in must_have_authors:
abooks = self._books_for_field('authors', amap.get(icu_lower(author), None))
@@ -1934,7 +1934,7 @@ class Cache(object):
db. See db.utils for an implementation. '''
at = self.fields['authors'].table
author_map = defaultdict(set)
- for aid, author in at.id_map.iteritems():
+ for aid, author in iteritems(at.id_map):
author_map[icu_lower(author)].add(aid)
return (author_map, at.col_book_map.copy(), self.fields['title'].table.book_col_map.copy(), self.fields['languages'].book_value_map.copy())
@@ -2079,12 +2079,12 @@ class Cache(object):
def virtual_libraries_for_books(self, book_ids):
libraries = self._pref('virtual_libraries', {})
ans = {book_id:[] for book_id in book_ids}
- for lib, expr in libraries.iteritems():
+ for lib, expr in iteritems(libraries):
books = self._search(expr) # We deliberately dont use book_ids as we want to use the search cache
for book in book_ids:
if book in books:
ans[book].append(lib)
- return {k:tuple(sorted(v, key=sort_key)) for k, v in ans.iteritems()}
+ return {k:tuple(sorted(v, key=sort_key)) for k, v in iteritems(ans)}
@read_api
def user_categories_for_books(self, book_ids, proxy_metadata_map=None):
@@ -2101,7 +2101,7 @@ class Cache(object):
for book_id in book_ids:
proxy_metadata = pmm.get(book_id) or self._get_proxy_metadata(book_id)
user_cat_vals = ans[book_id] = {}
- for ucat, categories in user_cats.iteritems():
+ for ucat, categories in iteritems(user_cats):
user_cat_vals[ucat] = res = []
for name, cat, ign in categories:
try:
@@ -2240,15 +2240,15 @@ def import_library(library_key, importer, library_path, progress=None, abort=Non
src.close()
cache = Cache(DB(library_path, load_user_formatter_functions=False))
cache.init()
- format_data = {int(book_id):data for book_id, data in metadata['format_data'].iteritems()}
- for i, (book_id, fmt_key_map) in enumerate(format_data.iteritems()):
+ format_data = {int(book_id):data for book_id, data in iteritems(metadata['format_data'])}
+ for i, (book_id, fmt_key_map) in enumerate(iteritems(format_data)):
if abort is not None and abort.is_set():
return
title = cache._field_for('title', book_id)
if progress is not None:
progress(title, i + 1, total)
cache._update_path((book_id,), mark_as_dirtied=False)
- for fmt, fmtkey in fmt_key_map.iteritems():
+ for fmt, fmtkey in iteritems(fmt_key_map):
if fmt == '.cover':
stream = importer.start_file(fmtkey, _('Cover for %s') % title)
path = cache._field_for('path', book_id).replace('/', os.sep)
diff --git a/src/calibre/db/categories.py b/src/calibre/db/categories.py
index 0ae1cee97f..38489a40fb 100644
--- a/src/calibre/db/categories.py
+++ b/src/calibre/db/categories.py
@@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
import copy
from functools import partial
-from polyglot.builtins import unicode_type, map
+from polyglot.builtins import iteritems, iterkeys, unicode_type, map
from calibre.constants import ispy3
from calibre.ebooks.metadata import author_to_author_sort
@@ -75,7 +75,7 @@ class Tag(object):
def find_categories(field_metadata):
- for category, cat in field_metadata.iteritems():
+ for category, cat in iteritems(field_metadata):
if (cat['is_category'] and cat['kind'] not in {'user', 'search'}):
yield (category, cat['is_multiple'].get('cache_to_list', None), False)
elif (cat['datatype'] == 'composite' and
@@ -215,11 +215,11 @@ def get_categories(dbcache, sort='name', book_ids=None, first_letter_sort=False)
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
- for c, items in categories.iteritems():
+ for c, items in iteritems(categories):
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), items))
# Add the category values to the user categories
- for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
+ for user_cat in sorted(iterkeys(user_categories), key=sort_key):
items = []
names_seen = {}
user_cat_is_gst = user_cat in gst
diff --git a/src/calibre/db/cli/cmd_custom_columns.py b/src/calibre/db/cli/cmd_custom_columns.py
index 0e8a0b5813..b8c54bad2c 100644
--- a/src/calibre/db/cli/cmd_custom_columns.py
+++ b/src/calibre/db/cli/cmd_custom_columns.py
@@ -7,6 +7,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
from pprint import pformat
from calibre import prints
+from polyglot.builtins import iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
@@ -37,7 +38,7 @@ List available custom columns. Shows column labels and ids.
def main(opts, args, dbctx):
- for col, data in dbctx.run('custom_columns').iteritems():
+ for col, data in iteritems(dbctx.run('custom_columns')):
if opts.details:
prints(col)
print()
diff --git a/src/calibre/db/cli/cmd_list.py b/src/calibre/db/cli/cmd_list.py
index 570271c351..a6635236d4 100644
--- a/src/calibre/db/cli/cmd_list.py
+++ b/src/calibre/db/cli/cmd_list.py
@@ -13,6 +13,7 @@ from calibre import prints
from calibre.db.cli.utils import str_width
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.date import isoformat
+from polyglot.builtins import iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
@@ -64,7 +65,7 @@ def implementation(
continue
if field == 'isbn':
x = db.all_field_for('identifiers', book_ids, default_value={})
- data[field] = {k: v.get('isbn') or '' for k, v in x.iteritems()}
+ data[field] = {k: v.get('isbn') or '' for k, v in iteritems(x)}
continue
field = field.replace('*', '#')
metadata[field] = fm[field]
@@ -80,37 +81,37 @@ def implementation(
def stringify(data, metadata, for_machine):
- for field, m in metadata.iteritems():
+ for field, m in iteritems(metadata):
if field == 'authors':
data[field] = {
k: authors_to_string(v)
- for k, v in data[field].iteritems()
+ for k, v in iteritems(data[field])
}
else:
dt = m['datatype']
if dt == 'datetime':
data[field] = {
k: isoformat(v, as_utc=for_machine) if v else 'None'
- for k, v in data[field].iteritems()
+ for k, v in iteritems(data[field])
}
elif not for_machine:
ism = m['is_multiple']
if ism:
data[field] = {
k: ism['list_to_ui'].join(v)
- for k, v in data[field].iteritems()
+ for k, v in iteritems(data[field])
}
if field == 'formats':
data[field] = {
k: '[' + v + ']'
- for k, v in data[field].iteritems()
+ for k, v in iteritems(data[field])
}
def as_machine_data(book_ids, data, metadata):
for book_id in book_ids:
ans = {'id': book_id}
- for field, val_map in data.iteritems():
+ for field, val_map in iteritems(data):
val = val_map.get(book_id)
if val is not None:
ans[field.replace('#', '*')] = val
diff --git a/src/calibre/db/cli/cmd_saved_searches.py b/src/calibre/db/cli/cmd_saved_searches.py
index fd31051782..b06585d48e 100644
--- a/src/calibre/db/cli/cmd_saved_searches.py
+++ b/src/calibre/db/cli/cmd_saved_searches.py
@@ -9,6 +9,7 @@ version = 0 # change this if you change signature of implementation()
from calibre import prints
from calibre.srv.changes import saved_searches
+from polyglot.builtins import iteritems
def implementation(db, notify_changes, action, *args):
@@ -56,7 +57,7 @@ Syntax for removing:
def main(opts, args, dbctx):
args = args or ['list']
if args[0] == 'list':
- for name, value in dbctx.run('saved_searches', 'list').iteritems():
+ for name, value in iteritems(dbctx.run('saved_searches', 'list')):
prints(_('Name:'), name)
prints(_('Search string:'), value)
print()
diff --git a/src/calibre/db/cli/cmd_set_metadata.py b/src/calibre/db/cli/cmd_set_metadata.py
index 18a09bafb9..02ac4462de 100644
--- a/src/calibre/db/cli/cmd_set_metadata.py
+++ b/src/calibre/db/cli/cmd_set_metadata.py
@@ -11,7 +11,7 @@ from calibre.ebooks.metadata.book.base import field_from_string
from calibre.ebooks.metadata.book.serialize import read_cover
from calibre.ebooks.metadata.opf import get_metadata
from calibre.srv.changes import metadata
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, unicode_type
readonly = False
version = 0 # change this if you change signature of implementation()
@@ -170,7 +170,7 @@ def main(opts, args, dbctx):
vals[field] = val
fvals = []
for field, val in sorted( # ensure series_index fields are set last
- vals.iteritems(), key=lambda k: 1 if k[0].endswith('_index') else 0):
+ iteritems(vals), key=lambda k: 1 if k[0].endswith('_index') else 0):
if field.endswith('_index'):
try:
val = float(val)
diff --git a/src/calibre/db/cli/tests.py b/src/calibre/db/cli/tests.py
index 3c5ad650b0..1f7f960e2c 100644
--- a/src/calibre/db/cli/tests.py
+++ b/src/calibre/db/cli/tests.py
@@ -13,14 +13,14 @@ import csv
import unittest
from cStringIO import StringIO
-
from calibre.db.cli.cmd_check_library import _print_check_library_results
+from polyglot.builtins import iteritems
class Checker(object):
def __init__(self, kw):
- for k, v in kw.iteritems():
+ for k, v in iteritems(kw):
setattr(self, k, v)
diff --git a/src/calibre/db/fields.py b/src/calibre/db/fields.py
index 1ecf43319c..d42ab52db9 100644
--- a/src/calibre/db/fields.py
+++ b/src/calibre/db/fields.py
@@ -2,7 +2,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
-# from polyglot.builtins import map
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal '
@@ -20,6 +19,7 @@ from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
from calibre.utils.date import UNDEFINED_DATE, clean_date_for_sort, parse_date
from calibre.utils.localization import calibre_langcode_to_name
+from polyglot.builtins import iteritems, iterkeys
def bool_sort_key(bools_are_tristate):
@@ -150,7 +150,7 @@ class Field(object):
id_map = self.table.id_map
special_sort = hasattr(self, 'category_sort_value')
- for item_id, item_book_ids in self.table.col_book_map.iteritems():
+ for item_id, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@@ -184,7 +184,7 @@ class OneToOneField(Field):
return {item_id}
def __iter__(self):
- return self.table.book_col_map.iterkeys()
+ return iterkeys(self.table.book_col_map)
def sort_keys_for_books(self, get_metadata, lang_map):
bcmg = self.table.book_col_map.get
@@ -315,7 +315,7 @@ class CompositeField(OneToOneField):
for v in vals:
if v:
val_map[v].add(book_id)
- for val, book_ids in val_map.iteritems():
+ for val, book_ids in iteritems(val_map):
yield val, book_ids
def get_composite_categories(self, tag_class, book_rating_map, book_ids,
@@ -328,7 +328,7 @@ class CompositeField(OneToOneField):
for val in vals:
if val:
id_map[val].add(book_id)
- for item_id, item_book_ids in id_map.iteritems():
+ for item_id, item_book_ids in iteritems(id_map):
ratings = tuple(r for r in (book_rating_map.get(book_id, 0) for
book_id in item_book_ids) if r > 0)
avg = sum(ratings)/len(ratings) if ratings else 0
@@ -409,7 +409,7 @@ class OnDeviceField(OneToOneField):
val_map = defaultdict(set)
for book_id in candidates:
val_map[self.for_book(book_id, default_value=default_value)].add(book_id)
- for val, book_ids in val_map.iteritems():
+ for val, book_ids in iteritems(val_map):
yield val, book_ids
@@ -456,7 +456,7 @@ class ManyToOneField(Field):
return self.table.col_book_map.get(item_id, set())
def __iter__(self):
- return self.table.id_map.iterkeys()
+ return iterkeys(self.table.id_map)
def sort_keys_for_books(self, get_metadata, lang_map):
sk_map = LazySortMap(self._default_sort_key, self._sort_key, self.table.id_map)
@@ -466,7 +466,7 @@ class ManyToOneField(Field):
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
empty = set()
- for item_id, val in self.table.id_map.iteritems():
+ for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
yield val, book_ids
@@ -475,7 +475,7 @@ class ManyToOneField(Field):
def book_value_map(self):
try:
return {book_id:self.table.id_map[item_id] for book_id, item_id in
- self.table.book_col_map.iteritems()}
+ iteritems(self.table.book_col_map)}
except KeyError:
raise InvalidLinkTable(self.name)
@@ -507,7 +507,7 @@ class ManyToManyField(Field):
return self.table.col_book_map.get(item_id, set())
def __iter__(self):
- return self.table.id_map.iterkeys()
+ return iterkeys(self.table.id_map)
def sort_keys_for_books(self, get_metadata, lang_map):
sk_map = LazySortMap(self._default_sort_key, self._sort_key, self.table.id_map)
@@ -524,7 +524,7 @@ class ManyToManyField(Field):
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
empty = set()
- for item_id, val in self.table.id_map.iteritems():
+ for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
yield val, book_ids
@@ -534,14 +534,14 @@ class ManyToManyField(Field):
cbm = self.table.book_col_map
for book_id in candidates:
val_map[len(cbm.get(book_id, ()))].add(book_id)
- for count, book_ids in val_map.iteritems():
+ for count, book_ids in iteritems(val_map):
yield count, book_ids
@property
def book_value_map(self):
try:
return {book_id:tuple(self.table.id_map[item_id] for item_id in item_ids)
- for book_id, item_ids in self.table.book_col_map.iteritems()}
+ for book_id, item_ids in iteritems(self.table.book_col_map)}
except KeyError:
raise InvalidLinkTable(self.name)
@@ -561,7 +561,7 @@ class IdentifiersField(ManyToManyField):
'Sort by identifier keys'
bcmg = self.table.book_col_map.get
dv = {self._default_sort_key:None}
- return lambda book_id: tuple(sorted(bcmg(book_id, dv).iterkeys()))
+ return lambda book_id: tuple(sorted(iterkeys(bcmg(book_id, dv))))
def iter_searchable_values(self, get_metadata, candidates, default_value=()):
bcm = self.table.book_col_map
@@ -573,7 +573,7 @@ class IdentifiersField(ManyToManyField):
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
ans = []
- for id_key, item_book_ids in self.table.col_book_map.iteritems():
+ for id_key, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@@ -618,13 +618,13 @@ class FormatsField(ManyToManyField):
for val in vals:
val_map[val].add(book_id)
- for val, book_ids in val_map.iteritems():
+ for val, book_ids in iteritems(val_map):
yield val, book_ids
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
ans = []
- for fmt, item_book_ids in self.table.col_book_map.iteritems():
+ for fmt, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@@ -665,7 +665,7 @@ class SeriesField(ManyToOneField):
return ssk(ts(val, order=sso, lang=lang))
sk_map = LazySeriesSortMap(self._default_sort_key, sk, self.table.id_map)
bcmg = self.table.book_col_map.get
- lang_map = {k:v[0] if v else None for k, v in lang_map.iteritems()}
+ lang_map = {k:v[0] if v else None for k, v in iteritems(lang_map)}
def key(book_id):
lang = lang_map.get(book_id, None)
@@ -694,8 +694,8 @@ class SeriesField(ManyToOneField):
sso = tweaks['title_series_sorting']
ts = title_sort
empty = set()
- lang_map = {k:v[0] if v else None for k, v in lang_map.iteritems()}
- for item_id, val in self.table.id_map.iteritems():
+ lang_map = {k:v[0] if v else None for k, v in iteritems(lang_map)}
+ for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
lang_counts = Counter()
@@ -712,7 +712,7 @@ class TagsField(ManyToManyField):
def get_news_category(self, tag_class, book_ids=None):
news_id = None
ans = []
- for item_id, val in self.table.id_map.iteritems():
+ for item_id, val in iteritems(self.table.id_map):
if val == _('News'):
news_id = item_id
break
@@ -724,7 +724,7 @@ class TagsField(ManyToManyField):
news_books = news_books.intersection(book_ids)
if not news_books:
return ans
- for item_id, item_book_ids in self.table.col_book_map.iteritems():
+ for item_id, item_book_ids in iteritems(self.table.col_book_map):
item_book_ids = item_book_ids.intersection(news_books)
if item_book_ids:
name = self.category_formatter(self.table.id_map[item_id])
diff --git a/src/calibre/db/lazy.py b/src/calibre/db/lazy.py
index ffa71f8612..e3c5128ed4 100644
--- a/src/calibre/db/lazy.py
+++ b/src/calibre/db/lazy.py
@@ -15,7 +15,7 @@ from copy import deepcopy
from calibre.ebooks.metadata.book.base import Metadata, SIMPLE_GET, TOP_LEVEL_IDENTIFIERS, NULL_VALUES, ALL_METADATA_FIELDS
from calibre.ebooks.metadata.book.formatter import SafeFormat
from calibre.utils.date import utcnow
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iterkeys, unicode_type
# Lazy format metadata retrieval {{{
'''
@@ -393,7 +393,7 @@ class ProxyMetadata(Metadata):
def all_field_keys(self):
um = ga(self, '_user_metadata')
- return frozenset(ALL_METADATA_FIELDS.union(um.iterkeys()))
+ return frozenset(ALL_METADATA_FIELDS.union(iterkeys(um)))
@property
def _proxy_metadata(self):
diff --git a/src/calibre/db/legacy.py b/src/calibre/db/legacy.py
index 60b2f083d8..3eff229a68 100644
--- a/src/calibre/db/legacy.py
+++ b/src/calibre/db/legacy.py
@@ -7,7 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal '
import os, traceback, types
-from polyglot.builtins import zip
+from polyglot.builtins import iteritems, zip
from calibre import force_unicode, isbytestring
from calibre.constants import preferred_encoding
@@ -171,14 +171,14 @@ class LibraryDatabase(object):
return not bool(self.new_api.fields['title'].table.book_col_map)
def get_usage_count_by_id(self, field):
- return [[k, v] for k, v in self.new_api.get_usage_count_by_id(field).iteritems()]
+ return [[k, v] for k, v in iteritems(self.new_api.get_usage_count_by_id(field))]
def field_id_map(self, field):
- return [(k, v) for k, v in self.new_api.get_id_map(field).iteritems()]
+ return [(k, v) for k, v in iteritems(self.new_api.get_id_map(field))]
def get_custom_items_with_ids(self, label=None, num=None):
try:
- return [[k, v] for k, v in self.new_api.get_id_map(self.custom_field_name(label, num)).iteritems()]
+ return [[k, v] for k, v in iteritems(self.new_api.get_id_map(self.custom_field_name(label, num)))]
except ValueError:
return []
@@ -233,7 +233,7 @@ class LibraryDatabase(object):
paths, formats, metadata = [], [], []
for mi, format_map in duplicates:
metadata.append(mi)
- for fmt, path in format_map.iteritems():
+ for fmt, path in iteritems(format_map):
formats.append(fmt)
paths.append(path)
duplicates = (paths, formats, metadata)
@@ -416,7 +416,7 @@ class LibraryDatabase(object):
ans = set()
if title:
title = icu_lower(force_unicode(title))
- for book_id, x in self.new_api.get_id_map('title').iteritems():
+ for book_id, x in iteritems(self.new_api.get_id_map('title')):
if icu_lower(x) == title:
ans.add(book_id)
if not all_matches:
@@ -521,7 +521,7 @@ class LibraryDatabase(object):
def delete_tags(self, tags):
with self.new_api.write_lock:
- tag_map = {icu_lower(v):k for k, v in self.new_api._get_id_map('tags').iteritems()}
+ tag_map = {icu_lower(v):k for k, v in iteritems(self.new_api._get_id_map('tags'))}
tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags)
tag_ids = tuple(tid for tid in tag_ids if tid is not None)
if tag_ids:
@@ -547,7 +547,7 @@ class LibraryDatabase(object):
def format_files(self, index, index_is_id=False):
book_id = index if index_is_id else self.id(index)
- return [(v, k) for k, v in self.new_api.format_files(book_id).iteritems()]
+ return [(v, k) for k, v in iteritems(self.new_api.format_files(book_id))]
def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False, commit=False):
return self.new_api.format_metadata(book_id, fmt, allow_cache=allow_cache, update_db=update_db)
@@ -632,7 +632,7 @@ class LibraryDatabase(object):
def delete_item_from_multiple(self, item, label=None, num=None):
field = self.custom_field_name(label, num)
existing = self.new_api.get_id_map(field)
- rmap = {icu_lower(v):k for k, v in existing.iteritems()}
+ rmap = {icu_lower(v):k for k, v in iteritems(existing)}
item_id = rmap.get(icu_lower(item), None)
if item_id is None:
return []
@@ -854,7 +854,7 @@ for field in ('authors', 'tags', 'publisher', 'series'):
LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats'))
LibraryDatabase.all_custom = MT(lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num)))
-for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}.iteritems():
+for func, field in iteritems({'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}):
def getter(field):
def func(self):
return self.field_id_map(field)
@@ -864,16 +864,16 @@ for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'
LibraryDatabase.all_tags = MT(lambda self: list(self.all_tag_names()))
LibraryDatabase.get_all_identifier_types = MT(lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types()))
LibraryDatabase.get_authors_with_ids = MT(
- lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().iteritems()])
+ lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in iteritems(self.new_api.author_data())])
LibraryDatabase.get_author_id = MT(
- lambda self, author: {icu_lower(v):k for k, v in self.new_api.get_id_map('authors').iteritems()}.get(icu_lower(author), None))
+ lambda self, author: {icu_lower(v):k for k, v in iteritems(self.new_api.get_id_map('authors'))}.get(icu_lower(author), None))
for field in ('tags', 'series', 'publishers', 'ratings', 'languages'):
def getter(field):
fname = field[:-1] if field in {'publishers', 'ratings'} else field
def func(self):
- return [[tid, tag] for tid, tag in self.new_api.get_id_map(fname).iteritems()]
+ return [[tid, tag] for tid, tag in iteritems(self.new_api.get_id_map(fname))]
return func
setattr(LibraryDatabase, 'get_%s_with_ids' % field, MT(getter(field)))
diff --git a/src/calibre/db/restore.py b/src/calibre/db/restore.py
index 11526cea34..7db2ad78d1 100644
--- a/src/calibre/db/restore.py
+++ b/src/calibre/db/restore.py
@@ -16,6 +16,7 @@ from calibre.db.cache import Cache
from calibre.constants import filesystem_encoding
from calibre.utils.date import utcfromtimestamp
from calibre import isbytestring, force_unicode
+from polyglot.builtins import iteritems
NON_EBOOK_EXTENSIONS = frozenset([
'jpg', 'jpeg', 'gif', 'png', 'bmp',
@@ -206,7 +207,7 @@ class Restore(Thread):
self.mismatched_dirs.append(dirpath)
alm = mi.get('author_link_map', {})
- for author, link in alm.iteritems():
+ for author, link in iteritems(alm):
existing_link, timestamp = self.authors_links.get(author, (None, None))
if existing_link is None or existing_link != link and timestamp < mi.timestamp:
self.authors_links[author] = (link, mi.timestamp)
@@ -259,7 +260,7 @@ class Restore(Thread):
self.progress_callback(book['mi'].title, i+1)
id_map = db.get_item_ids('authors', [author for author in self.authors_links])
- link_map = {aid:self.authors_links[name][0] for name, aid in id_map.iteritems() if aid is not None}
+ link_map = {aid:self.authors_links[name][0] for name, aid in iteritems(id_map) if aid is not None}
if link_map:
db.set_link_for_authors(link_map)
db.close()
diff --git a/src/calibre/db/schema_upgrades.py b/src/calibre/db/schema_upgrades.py
index d2e18ed94f..7b5dc28832 100644
--- a/src/calibre/db/schema_upgrades.py
+++ b/src/calibre/db/schema_upgrades.py
@@ -11,7 +11,7 @@ import os
from calibre import prints
from calibre.utils.date import isoformat, DEFAULT_DATE
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iterkeys, itervalues, unicode_type
class SchemaUpgrade(object):
@@ -299,7 +299,7 @@ class SchemaUpgrade(object):
'''.format(tn=table_name, cn=column_name, vcn=view_column_name))
self.db.execute(script)
- for field in self.field_metadata.itervalues():
+ for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
@@ -375,7 +375,7 @@ class SchemaUpgrade(object):
'''.format(lt=link_table_name, table=table_name)
self.db.execute(script)
- for field in self.field_metadata.itervalues():
+ for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
@@ -596,7 +596,7 @@ class SchemaUpgrade(object):
custom_recipe_filename)
bdir = os.path.dirname(custom_recipes.file_path)
for id_, title, script in recipes:
- existing = frozenset(map(int, custom_recipes.iterkeys()))
+ existing = frozenset(map(int, iterkeys(custom_recipes)))
if id_ in existing:
id_ = max(existing) + 1000
id_ = str(id_)
diff --git a/src/calibre/db/search.py b/src/calibre/db/search.py
index cc70ec4a39..b4cea09c71 100644
--- a/src/calibre/db/search.py
+++ b/src/calibre/db/search.py
@@ -19,7 +19,7 @@ from calibre.utils.date import parse_date, UNDEFINED_DATE, now, dt_as_local
from calibre.utils.icu import primary_contains, sort_key
from calibre.utils.localization import lang_map, canonicalize_lang
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
-from polyglot.builtins import unicode_type, string_or_bytes
+from polyglot.builtins import iteritems, iterkeys, unicode_type, string_or_bytes
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
@@ -167,7 +167,7 @@ class DateSearch(object): # {{{
matches |= book_ids
return matches
- for k, relop in self.operators.iteritems():
+ for k, relop in iteritems(self.operators):
if query.startswith(k):
query = query[len(k):]
break
@@ -254,7 +254,7 @@ class NumericSearch(object): # {{{
else:
relop = lambda x,y: x is not None
else:
- for k, relop in self.operators.iteritems():
+ for k, relop in iteritems(self.operators):
if query.startswith(k):
query = query[len(k):]
break
@@ -372,7 +372,7 @@ class KeyPairSearch(object): # {{{
return found if valq == 'true' else candidates - found
for m, book_ids in field_iter():
- for key, val in m.iteritems():
+ for key, val in iteritems(m):
if (keyq and not _match(keyq, (key,), keyq_mkind,
use_primary_find_in_search=use_primary_find)):
continue
@@ -445,7 +445,7 @@ class SavedSearchQueries(object): # {{{
db._set_pref(self.opt_name, smap)
def names(self):
- return sorted(self.queries.iterkeys(), key=sort_key)
+ return sorted(iterkeys(self.queries), key=sort_key)
# }}}
@@ -632,7 +632,7 @@ class Parser(SearchQueryParser): # {{{
text_fields = set()
field_metadata = {}
- for x, fm in self.field_metadata.iteritems():
+ for x, fm in iteritems(self.field_metadata):
if x.startswith('@'):
continue
if fm['search_terms'] and x not in {'series_sort', 'id'}:
@@ -670,7 +670,7 @@ class Parser(SearchQueryParser): # {{{
q = canonicalize_lang(query)
if q is None:
lm = lang_map()
- rm = {v.lower():k for k,v in lm.iteritems()}
+ rm = {v.lower():k for k,v in iteritems(lm)}
q = rm.get(query, query)
if matchkind == CONTAINS_MATCH and q.lower() in {'true', 'false'}:
@@ -799,7 +799,7 @@ class LRUCache(object): # {{{
return self.get(key)
def __iter__(self):
- return self.item_map.iteritems()
+ return iteritems(self.item_map)
# }}}
diff --git a/src/calibre/db/tables.py b/src/calibre/db/tables.py
index 1dfd722ff0..b22b53a5b7 100644
--- a/src/calibre/db/tables.py
+++ b/src/calibre/db/tables.py
@@ -14,7 +14,7 @@ from collections import defaultdict
from calibre.constants import plugins
from calibre.utils.date import parse_date, UNDEFINED_DATE, utc_tz
from calibre.ebooks.metadata import author_to_author_sort
-from polyglot.builtins import range
+from polyglot.builtins import iteritems, itervalues, range
_c_speedup = plugins['speedup'][0].parse_date
@@ -154,10 +154,10 @@ class UUIDTable(OneToOneTable):
def read(self, db):
OneToOneTable.read(self, db)
- self.uuid_to_id_map = {v:k for k, v in self.book_col_map.iteritems()}
+ self.uuid_to_id_map = {v:k for k, v in iteritems(self.book_col_map)}
def update_uuid_cache(self, book_id_val_map):
- for book_id, uuid in book_id_val_map.iteritems():
+ for book_id, uuid in iteritems(book_id_val_map):
self.uuid_to_id_map.pop(self.book_col_map.get(book_id, None), None) # discard old uuid
self.uuid_to_id_map[uuid] = book_id
@@ -226,7 +226,7 @@ class ManyToOneTable(Table):
bcm[book] = item_id
def fix_link_table(self, db):
- linked_item_ids = {item_id for item_id in self.book_col_map.itervalues()}
+ linked_item_ids = {item_id for item_id in itervalues(self.book_col_map)}
extra_item_ids = linked_item_ids - set(self.id_map)
if extra_item_ids:
for item_id in extra_item_ids:
@@ -238,10 +238,10 @@ class ManyToOneTable(Table):
def fix_case_duplicates(self, db):
case_map = defaultdict(set)
- for item_id, val in self.id_map.iteritems():
+ for item_id, val in iteritems(self.id_map):
case_map[icu_lower(val)].add(item_id)
- for v in case_map.itervalues():
+ for v in itervalues(case_map):
if len(v) > 1:
main_id = min(v)
v.discard(main_id)
@@ -322,7 +322,7 @@ class ManyToOneTable(Table):
return affected_books
def rename_item(self, item_id, new_name, db):
- rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
+ rmap = {icu_lower(v):k for k, v in iteritems(self.id_map)}
existing_item = rmap.get(icu_lower(new_name), None)
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
affected_books = self.col_book_map.get(item_id, set())
@@ -353,9 +353,9 @@ class RatingTable(ManyToOneTable):
ManyToOneTable.read_id_maps(self, db)
# Ensure there are no records with rating=0 in the table. These should
# be represented as rating:None instead.
- bad_ids = {item_id for item_id, rating in self.id_map.iteritems() if rating == 0}
+ bad_ids = {item_id for item_id, rating in iteritems(self.id_map) if rating == 0}
if bad_ids:
- self.id_map = {item_id:rating for item_id, rating in self.id_map.iteritems() if rating != 0}
+ self.id_map = {item_id:rating for item_id, rating in iteritems(self.id_map) if rating != 0}
db.executemany('DELETE FROM {0} WHERE {1}=?'.format(self.link_table, self.metadata['link_column']),
tuple((x,) for x in bad_ids))
db.execute('DELETE FROM {0} WHERE {1}=0'.format(
@@ -382,10 +382,10 @@ class ManyToManyTable(ManyToOneTable):
cbm[item_id].add(book)
bcm[book].append(item_id)
- self.book_col_map = {k:tuple(v) for k, v in bcm.iteritems()}
+ self.book_col_map = {k:tuple(v) for k, v in iteritems(bcm)}
def fix_link_table(self, db):
- linked_item_ids = {item_id for item_ids in self.book_col_map.itervalues() for item_id in item_ids}
+ linked_item_ids = {item_id for item_ids in itervalues(self.book_col_map) for item_id in item_ids}
extra_item_ids = linked_item_ids - set(self.id_map)
if extra_item_ids:
for item_id in extra_item_ids:
@@ -461,7 +461,7 @@ class ManyToManyTable(ManyToOneTable):
return affected_books
def rename_item(self, item_id, new_name, db):
- rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
+ rmap = {icu_lower(v):k for k, v in iteritems(self.id_map)}
existing_item = rmap.get(icu_lower(new_name), None)
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
affected_books = self.col_book_map.get(item_id, set())
@@ -490,10 +490,10 @@ class ManyToManyTable(ManyToOneTable):
def fix_case_duplicates(self, db):
from calibre.db.write import uniq
case_map = defaultdict(set)
- for item_id, val in self.id_map.iteritems():
+ for item_id, val in iteritems(self.id_map):
case_map[icu_lower(val)].add(item_id)
- for v in case_map.itervalues():
+ for v in itervalues(case_map):
if len(v) > 1:
done_books = set()
main_id = min(v)
@@ -541,19 +541,19 @@ class AuthorsTable(ManyToManyTable):
lm[aid] = link
def set_sort_names(self, aus_map, db):
- aus_map = {aid:(a or '').strip() for aid, a in aus_map.iteritems()}
- aus_map = {aid:a for aid, a in aus_map.iteritems() if a != self.asort_map.get(aid, None)}
+ aus_map = {aid:(a or '').strip() for aid, a in iteritems(aus_map)}
+ aus_map = {aid:a for aid, a in iteritems(aus_map) if a != self.asort_map.get(aid, None)}
self.asort_map.update(aus_map)
db.executemany('UPDATE authors SET sort=? WHERE id=?',
- [(v, k) for k, v in aus_map.iteritems()])
+ [(v, k) for k, v in iteritems(aus_map)])
return aus_map
def set_links(self, link_map, db):
- link_map = {aid:(l or '').strip() for aid, l in link_map.iteritems()}
- link_map = {aid:l for aid, l in link_map.iteritems() if l != self.alink_map.get(aid, None)}
+ link_map = {aid:(l or '').strip() for aid, l in iteritems(link_map)}
+ link_map = {aid:l for aid, l in iteritems(link_map) if l != self.alink_map.get(aid, None)}
self.alink_map.update(link_map)
db.executemany('UPDATE authors SET link=? WHERE id=?',
- [(v, k) for k, v in link_map.iteritems()])
+ [(v, k) for k, v in iteritems(link_map)])
return link_map
def remove_books(self, book_ids, db):
@@ -602,7 +602,7 @@ class FormatsTable(ManyToManyTable):
fnm[book][fmt] = name
sm[book][fmt] = sz
- self.book_col_map = {k:tuple(sorted(v)) for k, v in bcm.iteritems()}
+ self.book_col_map = {k:tuple(sorted(v)) for k, v in iteritems(bcm)}
def remove_books(self, book_ids, db):
clean = ManyToManyTable.remove_books(self, book_ids, db)
@@ -617,21 +617,21 @@ class FormatsTable(ManyToManyTable):
(fname, book_id, fmt))
def remove_formats(self, formats_map, db):
- for book_id, fmts in formats_map.iteritems():
+ for book_id, fmts in iteritems(formats_map):
self.book_col_map[book_id] = [fmt for fmt in self.book_col_map.get(book_id, []) if fmt not in fmts]
for m in (self.fname_map, self.size_map):
- m[book_id] = {k:v for k, v in m[book_id].iteritems() if k not in fmts}
+ m[book_id] = {k:v for k, v in iteritems(m[book_id]) if k not in fmts}
for fmt in fmts:
try:
self.col_book_map[fmt].discard(book_id)
except KeyError:
pass
db.executemany('DELETE FROM data WHERE book=? AND format=?',
- [(book_id, fmt) for book_id, fmts in formats_map.iteritems() for fmt in fmts])
+ [(book_id, fmt) for book_id, fmts in iteritems(formats_map) for fmt in fmts])
def zero_max(book_id):
try:
- return max(self.size_map[book_id].itervalues())
+ return max(itervalues(self.size_map[book_id]))
except ValueError:
return 0
@@ -661,7 +661,7 @@ class FormatsTable(ManyToManyTable):
self.size_map[book_id][fmt] = size
db.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(book_id, fmt, size, fname))
- return max(self.size_map[book_id].itervalues())
+ return max(itervalues(self.size_map[book_id]))
class IdentifiersTable(ManyToManyTable):
@@ -702,4 +702,4 @@ class IdentifiersTable(ManyToManyTable):
raise NotImplementedError('Cannot rename identifiers')
def all_identifier_types(self):
- return frozenset(k for k, v in self.col_book_map.iteritems() if v)
+ return frozenset(k for k, v in iteritems(self.col_book_map) if v)
diff --git a/src/calibre/db/tests/add_remove.py b/src/calibre/db/tests/add_remove.py
index 7dee5d33ec..a39a698f8b 100644
--- a/src/calibre/db/tests/add_remove.py
+++ b/src/calibre/db/tests/add_remove.py
@@ -15,6 +15,7 @@ from datetime import timedelta
from calibre.db.tests.base import BaseTest, IMG
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import now, UNDEFINED_DATE
+from polyglot.builtins import iteritems, itervalues
def import_test(replacement_data, replacement_fmt=None):
@@ -217,14 +218,14 @@ class AddRemoveTest(BaseTest):
authors = cache.fields['authors'].table
# Delete a single book, with no formats and check cleaning
- self.assertIn(_('Unknown'), set(authors.id_map.itervalues()))
+ self.assertIn(_('Unknown'), set(itervalues(authors.id_map)))
olen = len(authors.id_map)
- item_id = {v:k for k, v in authors.id_map.iteritems()}[_('Unknown')]
+ item_id = {v:k for k, v in iteritems(authors.id_map)}[_('Unknown')]
cache.remove_books((3,))
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(3, c.all_book_ids())
- self.assertNotIn(_('Unknown'), set(table.id_map.itervalues()))
+ self.assertNotIn(_('Unknown'), set(itervalues(table.id_map)))
self.assertNotIn(item_id, table.asort_map)
self.assertNotIn(item_id, table.alink_map)
ae(len(table.id_map), olen-1)
@@ -235,17 +236,17 @@ class AddRemoveTest(BaseTest):
authorpath = os.path.dirname(bookpath)
os.mkdir(os.path.join(authorpath, '.DS_Store'))
open(os.path.join(authorpath, 'Thumbs.db'), 'wb').close()
- item_id = {v:k for k, v in cache.fields['#series'].table.id_map.iteritems()}['My Series Two']
+ item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
cache.remove_books((1,), permanent=True)
for x in (fmtpath, bookpath, authorpath):
af(os.path.exists(x), 'The file %s exists, when it should not' % x)
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(1, c.all_book_ids())
- self.assertNotIn('Author Two', set(table.id_map.itervalues()))
- self.assertNotIn(6, set(c.fields['rating'].table.id_map.itervalues()))
- self.assertIn('A Series One', set(c.fields['series'].table.id_map.itervalues()))
- self.assertNotIn('My Series Two', set(c.fields['#series'].table.id_map.itervalues()))
+ self.assertNotIn('Author Two', set(itervalues(table.id_map)))
+ self.assertNotIn(6, set(itervalues(c.fields['rating'].table.id_map)))
+ self.assertIn('A Series One', set(itervalues(c.fields['series'].table.id_map)))
+ self.assertNotIn('My Series Two', set(itervalues(c.fields['#series'].table.id_map)))
self.assertNotIn(item_id, c.fields['#series'].table.col_book_map)
self.assertNotIn(1, c.fields['#series'].table.book_col_map)
@@ -264,7 +265,7 @@ class AddRemoveTest(BaseTest):
fmtpath = cache.format_abspath(1, 'FMT1')
bookpath = os.path.dirname(fmtpath)
authorpath = os.path.dirname(bookpath)
- item_id = {v:k for k, v in cache.fields['#series'].table.id_map.iteritems()}['My Series Two']
+ item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
cache.remove_books((1,))
delete_service().wait()
for x in (fmtpath, bookpath, authorpath):
diff --git a/src/calibre/db/tests/filesystem.py b/src/calibre/db/tests/filesystem.py
index c426f024c0..11c9fbe0d7 100644
--- a/src/calibre/db/tests/filesystem.py
+++ b/src/calibre/db/tests/filesystem.py
@@ -13,6 +13,7 @@ from io import BytesIO
from calibre.constants import iswindows
from calibre.db.tests.base import BaseTest
from calibre.ptempfile import TemporaryDirectory
+from polyglot.builtins import iterkeys
class FilesystemTest(BaseTest):
@@ -55,7 +56,7 @@ class FilesystemTest(BaseTest):
cache2 = self.init_cache(cl)
for c in (cache, cache2):
data = self.get_filesystem_data(c, 1)
- ae(set(orig_data.iterkeys()), set(data.iterkeys()))
+ ae(set(iterkeys(orig_data)), set(iterkeys(data)))
ae(orig_data, data, 'Filesystem data does not match')
ae(c.field_for('path', 1), 'Moved/Moved (1)')
ae(c.field_for('path', 3), 'Moved1/Moved1 (3)')
diff --git a/src/calibre/db/tests/legacy.py b/src/calibre/db/tests/legacy.py
index 919f817b00..9310392882 100644
--- a/src/calibre/db/tests/legacy.py
+++ b/src/calibre/db/tests/legacy.py
@@ -14,7 +14,7 @@ from operator import itemgetter
from calibre.library.field_metadata import fm_as_dict
from calibre.db.tests.base import BaseTest
-from polyglot.builtins import range
+from polyglot.builtins import iteritems, iterkeys, range
# Utils {{{
@@ -81,7 +81,7 @@ class LegacyTest(BaseTest):
# We ignore the key rec_index, since it is not stable for
# custom columns (it is created by iterating over a dict)
return {k.decode('utf-8') if isinstance(k, bytes) else k:to_unicode(v)
- for k, v in x.iteritems() if k != 'rec_index'}
+ for k, v in iteritems(x) if k != 'rec_index'}
return x
def get_props(db):
@@ -108,7 +108,7 @@ class LegacyTest(BaseTest):
'Test the get_property interface for reading data'
def get_values(db):
ans = {}
- for label, loc in db.FIELD_MAP.iteritems():
+ for label, loc in iteritems(db.FIELD_MAP):
if isinstance(label, numbers.Integral):
label = '#'+db.custom_column_num_map[label]['label']
label = type('')(label)
@@ -186,7 +186,7 @@ class LegacyTest(BaseTest):
self.assertEqual(dict(db.prefs), dict(ndb.prefs))
- for meth, args in {
+ for meth, args in iteritems({
'find_identical_books': [(Metadata('title one', ['author one']),), (Metadata('unknown'),), (Metadata('xxxx'),)],
'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')],
'get_next_series_num_for': [('A Series One',)],
@@ -251,7 +251,7 @@ class LegacyTest(BaseTest):
'book_on_device_string':[(1,), (2,), (3,)],
'books_in_series_of':[(0,), (1,), (2,)],
'books_with_same_title':[(Metadata(db.title(0)),), (Metadata(db.title(1)),), (Metadata('1234'),)],
- }.iteritems():
+ }):
fmt = lambda x: x
if meth[0] in {'!', '@'}:
fmt = {'!':dict, '@':frozenset}[meth[0]]
@@ -277,8 +277,8 @@ class LegacyTest(BaseTest):
old = db.get_data_as_dict(prefix='test-prefix')
new = ndb.get_data_as_dict(prefix='test-prefix')
for o, n in zip(old, new):
- o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in o.iteritems()}
- n = {k:set(v) if isinstance(v, list) else v for k, v in n.iteritems()}
+ o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in iteritems(o)}
+ n = {k:set(v) if isinstance(v, list) else v for k, v in iteritems(n)}
self.assertEqual(o, n)
ndb.search('title:Unknown')
@@ -316,9 +316,9 @@ class LegacyTest(BaseTest):
db = self.init_old()
cache = ndb.new_api
tmap = cache.get_id_map('tags')
- t = next(tmap.iterkeys())
+ t = next(iterkeys(tmap))
pmap = cache.get_id_map('publisher')
- p = next(pmap.iterkeys())
+ p = next(iterkeys(pmap))
run_funcs(self, db, ndb, (
('delete_tag_using_id', t),
('delete_publisher_using_id', p),
@@ -647,10 +647,10 @@ class LegacyTest(BaseTest):
ndb = self.init_legacy(self.cloned_library)
db = self.init_old(self.cloned_library)
- a = {v:k for k, v in ndb.new_api.get_id_map('authors').iteritems()}['Author One']
- t = {v:k for k, v in ndb.new_api.get_id_map('tags').iteritems()}['Tag One']
- s = {v:k for k, v in ndb.new_api.get_id_map('series').iteritems()}['A Series One']
- p = {v:k for k, v in ndb.new_api.get_id_map('publisher').iteritems()}['Publisher One']
+ a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('authors'))}['Author One']
+ t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('tags'))}['Tag One']
+ s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('series'))}['A Series One']
+ p = {v:k for k, v in iteritems(ndb.new_api.get_id_map('publisher'))}['Publisher One']
run_funcs(self, db, ndb, (
('rename_author', a, 'Author Two'),
('rename_tag', t, 'News'),
@@ -688,11 +688,11 @@ class LegacyTest(BaseTest):
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
# Test renaming/deleting
- t = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag One']
- t2 = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag Two']
- a = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['My Author Two']
- a2 = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['Custom One']
- s = {v:k for k, v in ndb.new_api.get_id_map('#series').iteritems()}['My Series One']
+ t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag One']
+ t2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag Two']
+ a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['My Author Two']
+ a2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['Custom One']
+ s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#series'))}['My Series One']
run_funcs(self, db, ndb, (
('delete_custom_item_using_id', t, 'tags'),
('delete_custom_item_using_id', a, 'authors'),
diff --git a/src/calibre/db/tests/reading.py b/src/calibre/db/tests/reading.py
index 4c95644e1a..3b90b4d93d 100644
--- a/src/calibre/db/tests/reading.py
+++ b/src/calibre/db/tests/reading.py
@@ -13,7 +13,7 @@ from time import time
from calibre.utils.date import utc_tz
from calibre.db.tests.base import BaseTest
-from polyglot.builtins import range
+from polyglot.builtins import iteritems, iterkeys, itervalues, range
class ReadingTest(BaseTest):
@@ -116,8 +116,8 @@ class ReadingTest(BaseTest):
},
}
- for book_id, test in tests.iteritems():
- for field, expected_val in test.iteritems():
+ for book_id, test in iteritems(tests):
+ for field, expected_val in iteritems(test):
val = cache.field_for(field, book_id)
if isinstance(val, tuple) and 'authors' not in field and 'languages' not in field:
val, expected_val = set(val), set(expected_val)
@@ -130,7 +130,7 @@ class ReadingTest(BaseTest):
'Test sorting'
cache = self.init_cache()
ae = self.assertEqual
- for field, order in {
+ for field, order in iteritems({
'title' : [2, 1, 3],
'authors': [2, 1, 3],
'series' : [3, 1, 2],
@@ -154,7 +154,7 @@ class ReadingTest(BaseTest):
'#yesno':[2, 1, 3],
'#comments':[3, 2, 1],
'id': [1, 2, 3],
- }.iteritems():
+ }):
x = list(reversed(order))
ae(order, cache.multisort([(field, True)],
ids_to_sort=x),
@@ -222,7 +222,7 @@ class ReadingTest(BaseTest):
old_metadata = {i:old.get_metadata(
i, index_is_id=True, get_cover=True, cover_as_data=True) for i in
range(1, 4)}
- for mi in old_metadata.itervalues():
+ for mi in itervalues(old_metadata):
mi.format_metadata = dict(mi.format_metadata)
if mi.formats:
mi.formats = tuple(mi.formats)
@@ -234,7 +234,7 @@ class ReadingTest(BaseTest):
new_metadata = {i:cache.get_metadata(
i, get_cover=True, cover_as_data=True) for i in range(1, 4)}
cache = None
- for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()):
+ for mi2, mi1 in zip(list(new_metadata.values()), list(old_metadata.values())):
self.compare_metadata(mi1, mi2)
# }}}
@@ -262,7 +262,7 @@ class ReadingTest(BaseTest):
old.conn.close()
old = None
cache = self.init_cache(self.library_path)
- for book_id, cdata in covers.iteritems():
+ for book_id, cdata in iteritems(covers):
self.assertEqual(cdata, cache.cover(book_id), 'Reading of cover failed')
f = cache.cover(book_id, as_file=True)
self.assertEqual(cdata, f.read() if f else f, 'Reading of cover as file failed')
@@ -325,7 +325,7 @@ class ReadingTest(BaseTest):
old = None
cache = self.init_cache(self.cloned_library)
- for query, ans in oldvals.iteritems():
+ for query, ans in iteritems(oldvals):
nr = cache.search(query, '')
self.assertEqual(ans, nr,
'Old result: %r != New result: %r for search: %s'%(
@@ -407,11 +407,11 @@ class ReadingTest(BaseTest):
lf = {i:set(old.formats(i, index_is_id=True).split(',')) if old.formats(
i, index_is_id=True) else set() for i in ids}
formats = {i:{f:old.format(i, f, index_is_id=True) for f in fmts} for
- i, fmts in lf.iteritems()}
+ i, fmts in iteritems(lf)}
old.conn.close()
old = None
cache = self.init_cache(self.library_path)
- for book_id, fmts in lf.iteritems():
+ for book_id, fmts in iteritems(lf):
self.assertEqual(fmts, set(cache.formats(book_id)),
'Set of formats is not the same')
for fmt in fmts:
@@ -439,9 +439,9 @@ class ReadingTest(BaseTest):
'Test getting the author sort for authors from the db'
cache = self.init_cache()
table = cache.fields['authors'].table
- table.set_sort_names({next(table.id_map.iterkeys()): 'Fake Sort'}, cache.backend)
+ table.set_sort_names({next(iterkeys(table.id_map)): 'Fake Sort'}, cache.backend)
- authors = tuple(table.id_map.itervalues())
+ authors = tuple(itervalues(table.id_map))
nval = cache.author_sort_from_authors(authors)
self.assertIn('Fake Sort', nval)
@@ -458,7 +458,7 @@ class ReadingTest(BaseTest):
cache.set_field('series', {3:'test series'})
cache.set_field('series_index', {3:13})
table = cache.fields['series'].table
- series = tuple(table.id_map.itervalues())
+ series = tuple(itervalues(table.id_map))
nvals = {s:cache.get_next_series_num_for(s) for s in series}
db = self.init_old()
self.assertEqual({s:db.get_next_series_num_for(s) for s in series}, nvals)
@@ -471,7 +471,7 @@ class ReadingTest(BaseTest):
from calibre.ebooks.metadata.book.base import Metadata
cache = self.init_cache()
db = self.init_old()
- for title in cache.fields['title'].table.book_col_map.itervalues():
+ for title in itervalues(cache.fields['title'].table.book_col_map):
for x in (db, cache):
self.assertTrue(x.has_book(Metadata(title)))
self.assertTrue(x.has_book(Metadata(title.upper())))
diff --git a/src/calibre/db/tests/writing.py b/src/calibre/db/tests/writing.py
index 5806b060bd..e41b30c743 100644
--- a/src/calibre/db/tests/writing.py
+++ b/src/calibre/db/tests/writing.py
@@ -14,6 +14,7 @@ from io import BytesIO
from calibre.ebooks.metadata import author_to_author_sort
from calibre.utils.date import UNDEFINED_DATE
from calibre.db.tests.base import BaseTest, IMG
+from polyglot.builtins import iteritems, itervalues
class WritingTest(BaseTest):
@@ -166,7 +167,7 @@ class WritingTest(BaseTest):
self.assertEqual(cache.set_field('#enum', {1:None}), {1})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
- for i, val in {1:None, 2:'One', 3:'Three'}.iteritems():
+ for i, val in iteritems({1:None, 2:'One', 3:'Three'}):
self.assertEqual(c.field_for('#enum', i), val)
del cache2
@@ -176,9 +177,9 @@ class WritingTest(BaseTest):
self.assertEqual(cache.set_field('#rating', {1:None, 2:4, 3:8}), {1, 2, 3})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
- for i, val in {1:None, 2:4, 3:2}.iteritems():
+ for i, val in iteritems({1:None, 2:4, 3:2}):
self.assertEqual(c.field_for('rating', i), val)
- for i, val in {1:None, 2:4, 3:8}.iteritems():
+ for i, val in iteritems({1:None, 2:4, 3:8}):
self.assertEqual(c.field_for('#rating', i), val)
del cache2
@@ -191,14 +192,14 @@ class WritingTest(BaseTest):
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), {2})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
- for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.iteritems():
+ for i, val in iteritems({1:'A Series One', 2:'A Series One', 3:'Series'}):
self.assertEqual(c.field_for('series', i), val)
cs_indices = {1:c.field_for('#series_index', 1), 3:c.field_for('#series_index', 3)}
for i in (1, 2, 3):
self.assertEqual(c.field_for('#series', i), 'Series')
- for i, val in {1:2, 2:1, 3:3}.iteritems():
+ for i, val in iteritems({1:2, 2:1, 3:3}):
self.assertEqual(c.field_for('series_index', i), val)
- for i, val in {1:cs_indices[1], 2:0, 3:cs_indices[3]}.iteritems():
+ for i, val in iteritems({1:cs_indices[1], 2:0, 3:cs_indices[3]}):
self.assertEqual(c.field_for('#series_index', i), val)
del cache2
@@ -461,13 +462,13 @@ class WritingTest(BaseTest):
tmap = cache.get_id_map('tags')
self.assertEqual(cache.remove_items('tags', tmap), {1, 2})
tmap = cache.get_id_map('#tags')
- t = {v:k for k, v in tmap.iteritems()}['My Tag Two']
+ t = {v:k for k, v in iteritems(tmap)}['My Tag Two']
self.assertEqual(cache.remove_items('#tags', (t,)), {1, 2})
smap = cache.get_id_map('series')
self.assertEqual(cache.remove_items('series', smap), {1, 2})
smap = cache.get_id_map('#series')
- s = {v:k for k, v in smap.iteritems()}['My Series Two']
+ s = {v:k for k, v in iteritems(smap)}['My Series Two']
self.assertEqual(cache.remove_items('#series', (s,)), {1})
for c in (cache, self.init_cache()):
@@ -507,7 +508,7 @@ class WritingTest(BaseTest):
for c in (cache, c2):
self.assertEqual(c.field_for('tags', 1), ())
self.assertEqual(c.field_for('tags', 2), ('b', 'a'))
- self.assertNotIn('c', set(c.get_id_map('tags').itervalues()))
+ self.assertNotIn('c', set(itervalues(c.get_id_map('tags'))))
self.assertEqual(c.field_for('series', 1), None)
self.assertEqual(c.field_for('series', 2), 'a')
self.assertEqual(c.field_for('series_index', 1), 1.0)
@@ -520,9 +521,9 @@ class WritingTest(BaseTest):
cl = self.cloned_library
cache = self.init_cache(cl)
# Check that renaming authors updates author sort and path
- a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Unknown']
+ a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Unknown']
self.assertEqual(cache.rename_items('authors', {a:'New Author'})[0], {3})
- a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Author One']
+ a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Author One']
self.assertEqual(cache.rename_items('authors', {a:'Author Two'})[0], {1, 2})
for c in (cache, self.init_cache(cl)):
self.assertEqual(c.all_field_names('authors'), {'New Author', 'Author Two'})
@@ -531,7 +532,7 @@ class WritingTest(BaseTest):
self.assertEqual(c.field_for('authors', 1), ('Author Two',))
self.assertEqual(c.field_for('author_sort', 1), 'Two, Author')
- t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
+ t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
# Test case change
self.assertEqual(cache.rename_items('tags', {t:'tag one'}), ({1, 2}, {t:t}))
for c in (cache, self.init_cache(cl)):
@@ -551,14 +552,14 @@ class WritingTest(BaseTest):
self.assertEqual(set(c.field_for('tags', 1)), {'Tag Two', 'News'})
self.assertEqual(set(c.field_for('tags', 2)), {'Tag Two'})
# Test on a custom column
- t = {v:k for k, v in cache.get_id_map('#tags').iteritems()}['My Tag One']
+ t = {v:k for k, v in iteritems(cache.get_id_map('#tags'))}['My Tag One']
self.assertEqual(cache.rename_items('#tags', {t:'My Tag Two'})[0], {2})
for c in (cache, self.init_cache(cl)):
self.assertEqual(c.all_field_names('#tags'), {'My Tag Two'})
self.assertEqual(set(c.field_for('#tags', 2)), {'My Tag Two'})
# Test a Many-one field
- s = {v:k for k, v in cache.get_id_map('series').iteritems()}['A Series One']
+ s = {v:k for k, v in iteritems(cache.get_id_map('series'))}['A Series One']
# Test case change
self.assertEqual(cache.rename_items('series', {s:'a series one'}), ({1, 2}, {s:s}))
for c in (cache, self.init_cache(cl)):
@@ -574,7 +575,7 @@ class WritingTest(BaseTest):
self.assertEqual(c.field_for('series', 2), 'series')
self.assertEqual(c.field_for('series_index', 1), 2.0)
- s = {v:k for k, v in cache.get_id_map('#series').iteritems()}['My Series One']
+ s = {v:k for k, v in iteritems(cache.get_id_map('#series'))}['My Series One']
# Test custom column with rename to existing
self.assertEqual(cache.rename_items('#series', {s:'My Series Two'})[0], {2})
for c in (cache, self.init_cache(cl)):
@@ -585,7 +586,7 @@ class WritingTest(BaseTest):
# Test renaming many-many items to multiple items
cache = self.init_cache(self.cloned_library)
- t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
+ t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
affected_books, id_map = cache.rename_items('tags', {t:'Something, Else, Entirely'})
self.assertEqual({1, 2}, affected_books)
tmap = cache.get_id_map('tags')
@@ -600,7 +601,7 @@ class WritingTest(BaseTest):
# Test with restriction
cache = self.init_cache()
cache.set_field('tags', {1:'a,b,c', 2:'x,y,z', 3:'a,x,z'})
- tmap = {v:k for k, v in cache.get_id_map('tags').iteritems()}
+ tmap = {v:k for k, v in iteritems(cache.get_id_map('tags'))}
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r'}, restrict_to_book_ids=()), (set(), {}))
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r', tmap['b']:'q'}, restrict_to_book_ids=(1,))[0], {1})
self.assertEqual(cache.rename_items('tags', {tmap['x']:'X'}, restrict_to_book_ids=(2,))[0], {2})
@@ -657,7 +658,7 @@ class WritingTest(BaseTest):
ldata = {aid:str(aid) for aid in adata}
self.assertEqual({1,2,3}, cache.set_link_for_authors(ldata))
for c in (cache, self.init_cache()):
- self.assertEqual(ldata, {aid:d['link'] for aid, d in c.author_data().iteritems()})
+ self.assertEqual(ldata, {aid:d['link'] for aid, d in iteritems(c.author_data())})
self.assertEqual({3}, cache.set_link_for_authors({aid:'xxx' if aid == max(adata) else str(aid) for aid in adata}),
'Setting the author link to the same value as before, incorrectly marked some books as dirty')
sdata = {aid:'%s, changed' % aid for aid in adata}
@@ -709,7 +710,7 @@ class WritingTest(BaseTest):
conn.execute('INSERT INTO tags (name) VALUES ("t")')
norm = conn.last_insert_rowid()
conn.execute('DELETE FROM books_tags_link')
- for book_id, vals in {1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}.iteritems():
+ for book_id, vals in iteritems({1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}):
conn.executemany('INSERT INTO books_tags_link (book,tag) VALUES (?,?)',
tuple((book_id, x) for x in vals))
cache.reload_from_db()
diff --git a/src/calibre/db/utils.py b/src/calibre/db/utils.py
index 5d02367248..d9d2b48893 100644
--- a/src/calibre/db/utils.py
+++ b/src/calibre/db/utils.py
@@ -9,7 +9,7 @@ __copyright__ = '2013, Kovid Goyal '
import os, errno, sys, re
from locale import localeconv
from collections import OrderedDict, namedtuple
-from polyglot.builtins import map, unicode_type, string_or_bytes
+from polyglot.builtins import iteritems, itervalues, map, unicode_type, string_or_bytes
from threading import Lock
from calibre import as_unicode, prints
@@ -208,7 +208,7 @@ class ThumbnailCache(object):
def _invalidate_sizes(self):
if self.size_changed:
size = self.thumbnail_size
- remove = (key for key, entry in self.items.iteritems() if size != entry.thumbnail_size)
+ remove = (key for key, entry in iteritems(self.items) if size != entry.thumbnail_size)
for key in remove:
self._remove(key)
self.size_changed = False
@@ -365,7 +365,7 @@ class ThumbnailCache(object):
pass
if not hasattr(self, 'total_size'):
self._load_index()
- for entry in self.items.itervalues():
+ for entry in itervalues(self.items):
self._do_delete(entry.path)
self.total_size = 0
self.items = OrderedDict()
diff --git a/src/calibre/db/view.py b/src/calibre/db/view.py
index fc773ac680..7fd51e783d 100644
--- a/src/calibre/db/view.py
+++ b/src/calibre/db/view.py
@@ -9,7 +9,8 @@ __docformat__ = 'restructuredtext en'
import weakref, operator, numbers
from functools import partial
-from polyglot.builtins import map, unicode_type, range, zip
+from polyglot.builtins import (iteritems, iterkeys, itervalues, map,
+ unicode_type, range, zip)
from calibre.ebooks.metadata import title_sort
from calibre.utils.config_base import tweaks, prefs
@@ -71,7 +72,7 @@ def format_is_multiple(x, sep=',', repl=None):
def format_identifiers(x):
if not x:
return None
- return ','.join('%s:%s'%(k, v) for k, v in x.iteritems())
+ return ','.join('%s:%s'%(k, v) for k, v in iteritems(x))
class View(object):
@@ -88,7 +89,7 @@ class View(object):
self.search_restriction_name = self.base_restriction_name = ''
self._field_getters = {}
self.column_count = len(cache.backend.FIELD_MAP)
- for col, idx in cache.backend.FIELD_MAP.iteritems():
+ for col, idx in iteritems(cache.backend.FIELD_MAP):
label, fmt = col, lambda x:x
func = {
'id': self._get_id,
@@ -373,14 +374,14 @@ class View(object):
self.marked_ids = dict.fromkeys(id_dict, u'true')
else:
# Ensure that all the items in the dict are text
- self.marked_ids = dict(zip(id_dict.iterkeys(), map(unicode_type,
- id_dict.itervalues())))
+ self.marked_ids = dict(zip(iterkeys(id_dict), map(unicode_type,
+ itervalues(id_dict))))
# This invalidates all searches in the cache even though the cache may
# be shared by multiple views. This is not ideal, but...
cmids = set(self.marked_ids)
self.cache.clear_search_caches(old_marked_ids | cmids)
if old_marked_ids != cmids:
- for funcref in self.marked_listeners.itervalues():
+ for funcref in itervalues(self.marked_listeners):
func = funcref()
if func is not None:
func(old_marked_ids, cmids)
diff --git a/src/calibre/db/write.py b/src/calibre/db/write.py
index 5670fff3f5..abc89e3353 100644
--- a/src/calibre/db/write.py
+++ b/src/calibre/db/write.py
@@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
import re
from functools import partial
from datetime import datetime
-from polyglot.builtins import unicode_type, zip
+from polyglot.builtins import iteritems, itervalues, unicode_type, zip
from calibre.constants import preferred_encoding
from calibre.ebooks.metadata import author_to_author_sort, title_sort
@@ -131,7 +131,7 @@ def adapt_identifiers(to_tuple, x):
if not isinstance(x, dict):
x = {k:v for k, v in (y.partition(':')[0::2] for y in to_tuple(x))}
ans = {}
- for k, v in x.iteritems():
+ for k, v in iteritems(x):
k, v = clean_identifier(k, v)
if k and v:
ans[k] = v
@@ -194,7 +194,7 @@ def get_adapter(name, metadata):
def one_one_in_books(book_id_val_map, db, field, *args):
'Set a one-one field in the books table'
if book_id_val_map:
- sequence = ((sqlite_datetime(v), k) for k, v in book_id_val_map.iteritems())
+ sequence = ((sqlite_datetime(v), k) for k, v in iteritems(book_id_val_map))
db.executemany(
'UPDATE books SET %s=? WHERE id=?'%field.metadata['column'], sequence)
field.table.book_col_map.update(book_id_val_map)
@@ -210,23 +210,23 @@ def set_title(book_id_val_map, db, field, *args):
ans = one_one_in_books(book_id_val_map, db, field, *args)
# Set the title sort field
field.title_sort_field.writer.set_books(
- {k:title_sort(v) for k, v in book_id_val_map.iteritems()}, db)
+ {k:title_sort(v) for k, v in iteritems(book_id_val_map)}, db)
return ans
def one_one_in_other(book_id_val_map, db, field, *args):
'Set a one-one field in the non-books table, like comments'
- deleted = tuple((k,) for k, v in book_id_val_map.iteritems() if v is None)
+ deleted = tuple((k,) for k, v in iteritems(book_id_val_map) if v is None)
if deleted:
db.executemany('DELETE FROM %s WHERE book=?'%field.metadata['table'],
deleted)
for book_id in deleted:
field.table.book_col_map.pop(book_id[0], None)
- updated = {k:v for k, v in book_id_val_map.iteritems() if v is not None}
+ updated = {k:v for k, v in iteritems(book_id_val_map) if v is not None}
if updated:
db.executemany('INSERT OR REPLACE INTO %s(book,%s) VALUES (?,?)'%(
field.metadata['table'], field.metadata['column']),
- ((k, sqlite_datetime(v)) for k, v in updated.iteritems()))
+ ((k, sqlite_datetime(v)) for k, v in iteritems(updated)))
field.table.book_col_map.update(updated)
return set(book_id_val_map)
@@ -234,7 +234,7 @@ def one_one_in_other(book_id_val_map, db, field, *args):
def custom_series_index(book_id_val_map, db, field, *args):
series_field = field.series_field
sequence = []
- for book_id, sidx in book_id_val_map.iteritems():
+ for book_id, sidx in iteritems(book_id_val_map):
if sidx is None:
sidx = 1.0
ids = series_field.ids_for_book(book_id)
@@ -285,12 +285,12 @@ def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
def change_case(case_changes, dirtied, db, table, m, is_authors=False):
if is_authors:
vals = ((val.replace(',', '|'), item_id) for item_id, val in
- case_changes.iteritems())
+ iteritems(case_changes))
else:
- vals = ((val, item_id) for item_id, val in case_changes.iteritems())
+ vals = ((val, item_id) for item_id, val in iteritems(case_changes))
db.executemany(
'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']), vals)
- for item_id, val in case_changes.iteritems():
+ for item_id, val in iteritems(case_changes):
table.id_map[item_id] = val
dirtied.update(table.col_book_map[item_id])
if is_authors:
@@ -306,14 +306,14 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
# Map values to db ids, including any new values
kmap = safe_lower if dt in {'text', 'series'} else lambda x:x
- rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
+ rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
- rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
+ rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
val_map = {None:None}
case_changes = {}
- for val in book_id_val_map.itervalues():
+ for val in itervalues(book_id_val_map):
if val is not None:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map)
@@ -321,17 +321,17 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
if case_changes:
change_case(case_changes, dirtied, db, table, m)
- book_id_item_id_map = {k:val_map[v] for k, v in book_id_val_map.iteritems()}
+ book_id_item_id_map = {k:val_map[v] for k, v in iteritems(book_id_val_map)}
# Ignore those items whose value is the same as the current value
- book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
+ book_id_item_id_map = {k:v for k, v in iteritems(book_id_item_id_map)
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
- for book_id, item_id in book_id_item_id_map.iteritems():
+ for book_id, item_id in iteritems(book_id_item_id_map):
old_item_id = table.book_col_map.get(book_id, None)
if old_item_id is not None:
table.col_book_map[old_item_id].discard(book_id)
@@ -355,7 +355,7 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
)
db.executemany(sql.format(table.link_table, m['link_column']),
((book_id, book_id, item_id) for book_id, item_id in
- updated.iteritems()))
+ iteritems(updated)))
# Remove no longer used items
remove = {item_id for item_id in table.id_map if not
@@ -392,15 +392,15 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
# Map values to db ids, including any new values
kmap = safe_lower if dt == 'text' else lambda x:x
- rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
+ rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
- rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
+ rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
val_map = {}
case_changes = {}
- book_id_val_map = {k:uniq(vals, kmap) for k, vals in book_id_val_map.iteritems()}
- for vals in book_id_val_map.itervalues():
+ book_id_val_map = {k:uniq(vals, kmap) for k, vals in iteritems(book_id_val_map)}
+ for vals in itervalues(book_id_val_map):
for val in vals:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map, is_authors=is_authors)
@@ -408,7 +408,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
if case_changes:
change_case(case_changes, dirtied, db, table, m, is_authors=is_authors)
if is_authors:
- for item_id, val in case_changes.iteritems():
+ for item_id, val in iteritems(case_changes):
for book_id in table.col_book_map[item_id]:
current_sort = field.db_author_sort_for_book(book_id)
new_sort = field.author_sort_for_book(book_id)
@@ -418,17 +418,17 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
field.author_sort_field.writer.set_books({book_id:new_sort}, db)
book_id_item_id_map = {k:tuple(val_map[v] for v in vals)
- for k, vals in book_id_val_map.iteritems()}
+ for k, vals in iteritems(book_id_val_map)}
# Ignore those items whose value is the same as the current value
- book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
+ book_id_item_id_map = {k:v for k, v in iteritems(book_id_item_id_map)
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
- for book_id, item_ids in book_id_item_id_map.iteritems():
+ for book_id, item_ids in iteritems(book_id_item_id_map):
old_item_ids = table.book_col_map.get(book_id, None)
if old_item_ids:
for old_item_id in old_item_ids:
@@ -448,7 +448,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
((k,) for k in deleted))
if updated:
vals = (
- (book_id, val) for book_id, vals in updated.iteritems()
+ (book_id, val) for book_id, vals in iteritems(updated)
for val in vals
)
db.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
@@ -481,7 +481,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
def identifiers(book_id_val_map, db, field, *args): # {{{
table = field.table
updates = set()
- for book_id, identifiers in book_id_val_map.iteritems():
+ for book_id, identifiers in iteritems(book_id_val_map):
if book_id not in table.book_col_map:
table.book_col_map[book_id] = {}
current_ids = table.book_col_map[book_id]
@@ -490,7 +490,7 @@ def identifiers(book_id_val_map, db, field, *args): # {{{
table.col_book_map.get(key, set()).discard(book_id)
current_ids.pop(key, None)
current_ids.update(identifiers)
- for key, val in identifiers.iteritems():
+ for key, val in iteritems(identifiers):
if key not in table.col_book_map:
table.col_book_map[key] = set()
table.col_book_map[key].add(book_id)
@@ -538,7 +538,7 @@ class Writer(object):
def set_books(self, book_id_val_map, db, allow_case_change=True):
book_id_val_map = {k:self.adapter(v) for k, v in
- book_id_val_map.iteritems() if self.accept_vals(v)}
+ iteritems(book_id_val_map) if self.accept_vals(v)}
if not book_id_val_map:
return set()
dirtied = self.set_books_func(book_id_val_map, db, self.field,
@@ -548,7 +548,7 @@ class Writer(object):
def set_books_for_enum(self, book_id_val_map, db, field,
allow_case_change):
allowed = set(field.metadata['display']['enum_values'])
- book_id_val_map = {k:v for k, v in book_id_val_map.iteritems() if v is
+ book_id_val_map = {k:v for k, v in iteritems(book_id_val_map) if v is
None or v in allowed}
if not book_id_val_map:
return set()
diff --git a/src/calibre/devices/kobo/driver.py b/src/calibre/devices/kobo/driver.py
index 2a33f9107f..da71d41885 100644
--- a/src/calibre/devices/kobo/driver.py
+++ b/src/calibre/devices/kobo/driver.py
@@ -32,7 +32,7 @@ from calibre import prints, fsync
from calibre.ptempfile import PersistentTemporaryFile
from calibre.constants import DEBUG
from calibre.utils.config_base import prefs
-from polyglot.builtins import unicode_type, string_or_bytes
+from polyglot.builtins import iteritems, itervalues, unicode_type, string_or_bytes
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
@@ -407,7 +407,7 @@ class KOBO(USBMS):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
- for idx in sorted(bl_cache.itervalues(), reverse=True):
+ for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
@@ -908,13 +908,13 @@ class KOBO(USBMS):
ContentID = self.contentid_from_path(book.path, ContentType)
- if category in readstatuslist.keys():
+ if category in list(readstatuslist.keys()):
# Manage ReadStatus
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
elif category == 'Shortlist' and self.dbversion >= 14:
# Manage FavouritesIndex/Shortlist
self.set_favouritesindex(connection, ContentID)
- elif category in accessibilitylist.keys():
+ elif category in list(accessibilitylist.keys()):
# Do not manage the Accessibility List
pass
else: # No collections
@@ -1964,7 +1964,7 @@ class KOBOTOUCH(KOBO):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
- for idx in sorted(bl_cache.itervalues(), reverse=True):
+ for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))) or not bl[idx].contentID:
need_sync = True
@@ -2138,7 +2138,7 @@ class KOBOTOUCH(KOBO):
from calibre.ebooks.oeb.base import OEB_STYLES
is_dirty = False
- for cssname, mt in container.mime_map.iteritems():
+ for cssname, mt in iteritems(container.mime_map):
if mt in OEB_STYLES:
newsheet = container.parsed(cssname)
oldrules = len(newsheet.cssRules)
@@ -2447,7 +2447,7 @@ class KOBOTOUCH(KOBO):
debug_print(' Setting bookshelf on device')
self.set_bookshelf(connection, book, category)
category_added = True
- elif category in readstatuslist.keys():
+ elif category in list(readstatuslist.keys()):
debug_print("KoboTouch:update_device_database_collections - about to set_readstatus - category='%s'"%(category, ))
# Manage ReadStatus
self.set_readstatus(connection, book.contentID, readstatuslist.get(category))
@@ -2462,7 +2462,7 @@ class KOBOTOUCH(KOBO):
debug_print(' and about to set it - %s'%book.title)
self.set_favouritesindex(connection, book.contentID)
category_added = True
- elif category in accessibilitylist.keys():
+ elif category in list(accessibilitylist.keys()):
# Do not manage the Accessibility List
pass
diff --git a/src/calibre/devices/mtp/defaults.py b/src/calibre/devices/mtp/defaults.py
index ed72ddaad1..9bcd6f2909 100644
--- a/src/calibre/devices/mtp/defaults.py
+++ b/src/calibre/devices/mtp/defaults.py
@@ -10,6 +10,7 @@ __docformat__ = 'restructuredtext en'
import traceback, re
from calibre.constants import iswindows
+from polyglot.builtins import iteritems
class DeviceDefaults(object):
@@ -47,7 +48,7 @@ class DeviceDefaults(object):
for rule in self.rules:
tests = rule[0]
matches = True
- for k, v in tests.iteritems():
+ for k, v in iteritems(tests):
if k == 'vendor' and v != vid:
matches = False
break
diff --git a/src/calibre/devices/mtp/driver.py b/src/calibre/devices/mtp/driver.py
index 725f497baa..58a19b19c7 100644
--- a/src/calibre/devices/mtp/driver.py
+++ b/src/calibre/devices/mtp/driver.py
@@ -17,7 +17,7 @@ from calibre.devices.mtp.base import debug
from calibre.devices.mtp.defaults import DeviceDefaults
from calibre.ptempfile import SpooledTemporaryFile, PersistentTemporaryDirectory
from calibre.utils.filenames import shorten_components_to
-from polyglot.builtins import unicode_type, zip
+from polyglot.builtins import iteritems, itervalues, unicode_type, zip
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%(
'windows' if iswindows else 'unix')).MTP_DEVICE
@@ -276,7 +276,7 @@ class MTP_DEVICE(BASE):
book.path = mtp_file.mtp_id_path
# Remove books in the cache that no longer exist
- for idx in sorted(relpath_cache.itervalues(), reverse=True):
+ for idx in sorted(itervalues(relpath_cache), reverse=True):
del bl[idx]
need_sync = True
@@ -546,7 +546,7 @@ class MTP_DEVICE(BASE):
def get_user_blacklisted_devices(self):
bl = frozenset(self.prefs['blacklist'])
ans = {}
- for dev, x in self.prefs['history'].iteritems():
+ for dev, x in iteritems(self.prefs['history']):
name = x[0]
if dev in bl:
ans[dev] = name
diff --git a/src/calibre/devices/mtp/filesystem_cache.py b/src/calibre/devices/mtp/filesystem_cache.py
index f5a3ef690d..1d2744ba98 100644
--- a/src/calibre/devices/mtp/filesystem_cache.py
+++ b/src/calibre/devices/mtp/filesystem_cache.py
@@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
import weakref, sys, json
from collections import deque
from operator import attrgetter
-from polyglot.builtins import map, unicode_type
+from polyglot.builtins import itervalues, map, unicode_type
from datetime import datetime
from calibre import human_readable, prints, force_unicode
@@ -201,7 +201,7 @@ class FilesystemCache(object):
for entry in entries:
FileOrFolder(entry, self)
- for item in self.id_map.itervalues():
+ for item in itervalues(self.id_map):
try:
p = item.parent
except KeyError:
@@ -227,7 +227,7 @@ class FilesystemCache(object):
return e
def iterebooks(self, storage_id):
- for x in self.id_map.itervalues():
+ for x in itervalues(self.id_map):
if x.storage_id == storage_id and x.is_ebook:
if x.parent_id == storage_id and x.name.lower().endswith('.txt'):
continue # Ignore .txt files in the root
diff --git a/src/calibre/devices/mtp/windows/driver.py b/src/calibre/devices/mtp/windows/driver.py
index 23c445570b..f99a0309c6 100644
--- a/src/calibre/devices/mtp/windows/driver.py
+++ b/src/calibre/devices/mtp/windows/driver.py
@@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
import time, threading, traceback
from functools import wraps, partial
-from polyglot.builtins import unicode_type, zip
+from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type, zip
from itertools import chain
from calibre import as_unicode, prints, force_unicode
@@ -107,7 +107,7 @@ class MTP_DEVICE(MTPDeviceBase):
# Get device data for detected devices. If there is an error, we will
# try again for that device the next time this method is called.
- for dev in tuple(self.detected_devices.iterkeys()):
+ for dev in tuple(iterkeys(self.detected_devices)):
data = self.detected_devices.get(dev, None)
if data is None or data is False:
try:
@@ -130,7 +130,7 @@ class MTP_DEVICE(MTPDeviceBase):
self.currently_connected_pnp_id in self.detected_devices
else None)
- for dev, data in self.detected_devices.iteritems():
+ for dev, data in iteritems(self.detected_devices):
if dev in self.blacklisted_devices or dev in self.ejected_devices:
# Ignore blacklisted and ejected devices
continue
@@ -267,10 +267,10 @@ class MTP_DEVICE(MTPDeviceBase):
self._currently_getting_sid = unicode_type(storage_id)
id_map = self.dev.get_filesystem(storage_id, partial(
self._filesystem_callback, {}))
- for x in id_map.itervalues():
+ for x in itervalues(id_map):
x['storage_id'] = storage_id
all_storage.append(storage)
- items.append(id_map.itervalues())
+ items.append(itervalues(id_map))
self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
time.time()-st, len(self._filesystem_cache)))
diff --git a/src/calibre/devices/scanner.py b/src/calibre/devices/scanner.py
index ca3014cda9..1eb5a24b95 100644
--- a/src/calibre/devices/scanner.py
+++ b/src/calibre/devices/scanner.py
@@ -13,7 +13,7 @@ from threading import Lock
from calibre import prints, as_unicode
from calibre.constants import (iswindows, isosx, plugins, islinux, isfreebsd,
isnetbsd)
-from polyglot.builtins import range
+from polyglot.builtins import iterkeys, range
osx_scanner = linux_scanner = freebsd_scanner = netbsd_scanner = None
@@ -77,7 +77,7 @@ class LibUSBScanner(object):
dev = USBDevice(*dev)
dev.busnum, dev.devnum = fingerprint[:2]
ans.add(dev)
- extra = set(self.libusb.cache.iterkeys()) - seen
+ extra = set(iterkeys(self.libusb.cache)) - seen
for x in extra:
self.libusb.cache.pop(x, None)
return ans
diff --git a/src/calibre/devices/usbms/device.py b/src/calibre/devices/usbms/device.py
index 401728928e..a7eb0b3567 100644
--- a/src/calibre/devices/usbms/device.py
+++ b/src/calibre/devices/usbms/device.py
@@ -23,7 +23,7 @@ from calibre.devices.errors import DeviceError
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.constants import iswindows, islinux, isosx, isfreebsd, plugins
from calibre.utils.filenames import ascii_filename as sanitize
-from polyglot.builtins import string_or_bytes
+from polyglot.builtins import iteritems, string_or_bytes
if isosx:
usbobserver, usbobserver_err = plugins['usbobserver']
@@ -404,7 +404,7 @@ class Device(DeviceConfig, DevicePlugin):
bsd_drives = self.osx_bsd_names()
drives = self.osx_sort_names(bsd_drives.copy())
mount_map = usbobserver.get_mounted_filesystems()
- drives = {k: mount_map.get(v) for k, v in drives.iteritems()}
+ drives = {k: mount_map.get(v) for k, v in iteritems(drives)}
if DEBUG:
print()
from pprint import pprint
diff --git a/src/calibre/devices/usbms/driver.py b/src/calibre/devices/usbms/driver.py
index 0efd62214d..c475800ab3 100644
--- a/src/calibre/devices/usbms/driver.py
+++ b/src/calibre/devices/usbms/driver.py
@@ -20,7 +20,7 @@ from calibre.devices.usbms.cli import CLI
from calibre.devices.usbms.device import Device
from calibre.devices.usbms.books import BookList, Book
from calibre.ebooks.metadata.book.json_codec import JsonCodec
-from polyglot.builtins import unicode_type, string_or_bytes
+from polyglot.builtins import itervalues, unicode_type, string_or_bytes
BASE_TIME = None
@@ -281,7 +281,7 @@ class USBMS(CLI, Device):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
- for idx in sorted(bl_cache.itervalues(), reverse=True):
+ for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
diff --git a/src/calibre/devices/winusb.py b/src/calibre/devices/winusb.py
index 5b1a005328..0a7647cce0 100644
--- a/src/calibre/devices/winusb.py
+++ b/src/calibre/devices/winusb.py
@@ -15,7 +15,7 @@ from ctypes import (
)
from ctypes.wintypes import DWORD, WORD, ULONG, LPCWSTR, HWND, BOOL, LPWSTR, UINT, BYTE, HANDLE, USHORT
from pprint import pprint, pformat
-from polyglot.builtins import map
+from polyglot.builtins import iteritems, itervalues, map
from calibre import prints, as_unicode
@@ -652,13 +652,13 @@ def get_volume_information(drive_letter):
'max_component_length': max_component_length.value,
}
- for name, num in {'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
+ for name, num in iteritems({'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
'FILE_NAMED_STREAMS':0x00040000, 'FILE_PERSISTENT_ACLS':0x00000008, 'FILE_READ_ONLY_VOLUME':0x00080000,
'FILE_SEQUENTIAL_WRITE_ONCE':0x00100000, 'FILE_SUPPORTS_ENCRYPTION':0x00020000, 'FILE_SUPPORTS_EXTENDED_ATTRIBUTES':0x00800000,
'FILE_SUPPORTS_HARD_LINKS':0x00400000, 'FILE_SUPPORTS_OBJECT_IDS':0x00010000, 'FILE_SUPPORTS_OPEN_BY_FILE_ID':0x01000000,
'FILE_SUPPORTS_REPARSE_POINTS':0x00000080, 'FILE_SUPPORTS_SPARSE_FILES':0x00000040, 'FILE_SUPPORTS_TRANSACTIONS':0x00200000,
'FILE_SUPPORTS_USN_JOURNAL':0x02000000, 'FILE_UNICODE_ON_DISK':0x00000004, 'FILE_VOLUME_IS_COMPRESSED':0x00008000,
- 'FILE_VOLUME_QUOTAS':0x00000020}.iteritems():
+ 'FILE_VOLUME_QUOTAS':0x00000020}):
ans[name] = bool(num & flags)
return ans
@@ -809,7 +809,7 @@ def get_storage_number_map(drive_types=(DRIVE_REMOVABLE, DRIVE_FIXED), debug=Fal
' Get a mapping of drive letters to storage numbers for all drives on system (of the specified types) '
mask = GetLogicalDrives()
type_map = {letter:GetDriveType(letter + ':' + os.sep) for i, letter in enumerate(string.ascii_uppercase) if mask & (1 << i)}
- drives = (letter for letter, dt in type_map.iteritems() if dt in drive_types)
+ drives = (letter for letter, dt in iteritems(type_map) if dt in drive_types)
ans = defaultdict(list)
for letter in drives:
try:
@@ -819,7 +819,7 @@ def get_storage_number_map(drive_types=(DRIVE_REMOVABLE, DRIVE_FIXED), debug=Fal
if debug:
prints('Failed to get storage number for drive: %s with error: %s' % (letter, as_unicode(err)))
continue
- for val in ans.itervalues():
+ for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)
@@ -859,7 +859,7 @@ def get_storage_number_map_alt(debug=False):
if debug:
prints('Failed to get storage number for drive: %s with error: %s' % (name[0], as_unicode(err)))
continue
- for val in ans.itervalues():
+ for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)
diff --git a/src/calibre/ebooks/conversion/cli.py b/src/calibre/ebooks/conversion/cli.py
index a1394b592b..10e9ce6b8f 100644
--- a/src/calibre/ebooks/conversion/cli.py
+++ b/src/calibre/ebooks/conversion/cli.py
@@ -17,6 +17,7 @@ from calibre.customize.conversion import OptionRecommendation
from calibre import patheq
from calibre.ebooks.conversion import ConversionUserFeedBack
from calibre.utils.localization import localize_user_manual_link
+from polyglot.builtins import iteritems
USAGE = '%prog ' + _('''\
input_file output_file [options]
@@ -254,7 +255,7 @@ def add_pipeline_options(parser, plumber):
))
- for group, (desc, options) in groups.iteritems():
+ for group, (desc, options) in iteritems(groups):
if group:
group = OptionGroup(parser, group, desc)
parser.add_option_group(group)
diff --git a/src/calibre/ebooks/conversion/plugins/fb2_input.py b/src/calibre/ebooks/conversion/plugins/fb2_input.py
index 879836aa85..42122d3a50 100644
--- a/src/calibre/ebooks/conversion/plugins/fb2_input.py
+++ b/src/calibre/ebooks/conversion/plugins/fb2_input.py
@@ -8,7 +8,7 @@ import os, re
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
from calibre import guess_type
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, unicode_type
FB2NS = 'http://www.gribuser.ru/xml/fictionbook/2.0'
FB21NS = 'http://www.gribuser.ru/xml/fictionbook/2.1'
@@ -103,7 +103,7 @@ class FB2Input(InputFormatPlugin):
notes = {a.get('href')[1:]: a for a in result.xpath('//a[@link_note and @href]') if a.get('href').startswith('#')}
cites = {a.get('link_cite'): a for a in result.xpath('//a[@link_cite]') if not a.get('href', '')}
all_ids = {x for x in result.xpath('//*/@id')}
- for cite, a in cites.iteritems():
+ for cite, a in iteritems(cites):
note = notes.get(cite, None)
if note:
c = 1
diff --git a/src/calibre/ebooks/conversion/plugins/pdf_output.py b/src/calibre/ebooks/conversion/plugins/pdf_output.py
index b0b4ad6bae..ee20c0f1ac 100644
--- a/src/calibre/ebooks/conversion/plugins/pdf_output.py
+++ b/src/calibre/ebooks/conversion/plugins/pdf_output.py
@@ -14,7 +14,7 @@ from calibre.constants import iswindows
from calibre.customize.conversion import (OutputFormatPlugin,
OptionRecommendation)
from calibre.ptempfile import TemporaryDirectory
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, unicode_type
UNITS = ['millimeter', 'centimeter', 'point', 'inch' , 'pica' , 'didot',
'cicero', 'devicepixel']
@@ -263,7 +263,7 @@ class PDFOutput(OutputFormatPlugin):
self.process_fonts()
if self.opts.pdf_use_document_margins and self.stored_page_margins:
import json
- for href, margins in self.stored_page_margins.iteritems():
+ for href, margins in iteritems(self.stored_page_margins):
item = oeb_book.manifest.hrefs.get(href)
if item is not None:
root = item.data
diff --git a/src/calibre/ebooks/conversion/plugins/rtf_input.py b/src/calibre/ebooks/conversion/plugins/rtf_input.py
index e38c76b2d5..98059a3eca 100644
--- a/src/calibre/ebooks/conversion/plugins/rtf_input.py
+++ b/src/calibre/ebooks/conversion/plugins/rtf_input.py
@@ -5,6 +5,7 @@ __copyright__ = '2008, Kovid Goyal '
import os, glob, re, textwrap
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
+from polyglot.builtins import iteritems
border_style_map = {
'single' : 'solid',
@@ -145,7 +146,7 @@ class RTFInput(InputFormatPlugin):
def convert_images(self, imap):
self.default_img = None
- for count, val in imap.iteritems():
+ for count, val in iteritems(imap):
try:
imap[count] = self.convert_image(val)
except:
@@ -210,7 +211,7 @@ class RTFInput(InputFormatPlugin):
css += '\n'+'\n'.join(font_size_classes)
css += '\n' +'\n'.join(color_classes)
- for cls, val in border_styles.iteritems():
+ for cls, val in iteritems(border_styles):
css += '\n\n.%s {\n%s\n}'%(cls, val)
with open(u'styles.css', 'ab') as f:
diff --git a/src/calibre/ebooks/covers.py b/src/calibre/ebooks/covers.py
index 035ca9ad39..a5a1c7df2d 100644
--- a/src/calibre/ebooks/covers.py
+++ b/src/calibre/ebooks/covers.py
@@ -10,7 +10,7 @@ import re, random, unicodedata, numbers
from collections import namedtuple
from contextlib import contextmanager
from math import ceil, sqrt, cos, sin, atan2
-from polyglot.builtins import map, zip, string_or_bytes
+from polyglot.builtins import iteritems, itervalues, map, zip, string_or_bytes
from itertools import chain
from PyQt5.Qt import (
@@ -282,7 +282,7 @@ def preserve_fields(obj, fields):
try:
yield
finally:
- for f, val in mem.iteritems():
+ for f, val in iteritems(mem):
if val is null:
delattr(obj, f)
else:
@@ -324,10 +324,10 @@ def load_color_themes(prefs):
t = default_color_themes.copy()
t.update(prefs.color_themes)
disabled = frozenset(prefs.disabled_color_themes)
- ans = [theme_to_colors(v) for k, v in t.iteritems() if k not in disabled]
+ ans = [theme_to_colors(v) for k, v in iteritems(t) if k not in disabled]
if not ans:
# Ignore disabled and return only the builtin color themes
- ans = [theme_to_colors(v) for k, v in default_color_themes.iteritems()]
+ ans = [theme_to_colors(v) for k, v in iteritems(default_color_themes)]
return ans
@@ -557,14 +557,14 @@ class Blocks(Style):
def all_styles():
return set(
- x.NAME for x in globals().itervalues() if
+ x.NAME for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style
)
def load_styles(prefs, respect_disabled=True):
disabled = frozenset(prefs.disabled_styles) if respect_disabled else ()
- ans = tuple(x for x in globals().itervalues() if
+ ans = tuple(x for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style and x.NAME not in disabled)
if not ans and disabled:
# If all styles have been disabled, ignore the disabling and return all
diff --git a/src/calibre/ebooks/css_transform_rules.py b/src/calibre/ebooks/css_transform_rules.py
index 58fc483047..3bd8428c2d 100644
--- a/src/calibre/ebooks/css_transform_rules.py
+++ b/src/calibre/ebooks/css_transform_rules.py
@@ -13,6 +13,7 @@ from css_parser.css import Property, CSSRule
from calibre import force_unicode
from calibre.ebooks import parse_css_length
from calibre.ebooks.oeb.normalize_css import normalizers, safe_parser
+from polyglot.builtins import iteritems
def compile_pat(pat):
@@ -44,7 +45,7 @@ class StyleDeclaration(object):
yield p, None
else:
if p not in self.expanded_properties:
- self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in n(p.name, p.propertyValue).iteritems()]
+ self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in iteritems(n(p.name, p.propertyValue))]
for ep in self.expanded_properties[p]:
yield ep, p
@@ -338,7 +339,7 @@ def export_rules(serialized_rules):
lines = []
for rule in serialized_rules:
lines.extend('# ' + l for l in rule_to_text(rule).splitlines())
- lines.extend('%s: %s' % (k, v.replace('\n', ' ')) for k, v in rule.iteritems() if k in allowed_keys)
+ lines.extend('%s: %s' % (k, v.replace('\n', ' ')) for k, v in iteritems(rule) if k in allowed_keys)
lines.append('')
return '\n'.join(lines).encode('utf-8')
diff --git a/src/calibre/ebooks/docx/block_styles.py b/src/calibre/ebooks/docx/block_styles.py
index 480738fa06..05f5f54692 100644
--- a/src/calibre/ebooks/docx/block_styles.py
+++ b/src/calibre/ebooks/docx/block_styles.py
@@ -8,6 +8,7 @@ __copyright__ = '2013, Kovid Goyal '
import numbers
from collections import OrderedDict
+from polyglot.builtins import iteritems
class Inherit:
@@ -115,11 +116,11 @@ def read_border(parent, dest, XPath, get, border_edges=border_edges, name='pBdr'
for border in XPath('./w:' + name)(parent):
for edge in border_edges:
- for prop, val in read_single_border(border, edge, XPath, get).iteritems():
+ for prop, val in iteritems(read_single_border(border, edge, XPath, get)):
if val is not None:
vals[prop % edge] = val
- for key, val in vals.iteritems():
+ for key, val in iteritems(vals):
setattr(dest, key, val)
diff --git a/src/calibre/ebooks/docx/cleanup.py b/src/calibre/ebooks/docx/cleanup.py
index 30fd9d25d9..6d71805fc0 100644
--- a/src/calibre/ebooks/docx/cleanup.py
+++ b/src/calibre/ebooks/docx/cleanup.py
@@ -7,7 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal '
import os
-from polyglot.builtins import range
+from polyglot.builtins import iterkeys, itervalues, range
NBSP = '\xa0'
@@ -54,7 +54,7 @@ def merge_run(run):
def liftable(css):
# A is liftable if all its styling would work just as well if it is
# specified on the parent element.
- prefixes = {x.partition('-')[0] for x in css.iterkeys()}
+ prefixes = {x.partition('-')[0] for x in iterkeys(css)}
return not (prefixes - {'text', 'font', 'letter', 'color', 'background'})
@@ -134,7 +134,7 @@ def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath):
current_run = [span]
# Process dir attributes
- class_map = dict(styles.classes.itervalues())
+ class_map = dict(itervalues(styles.classes))
parents = ('p', 'div') + tuple('h%d' % i for i in range(1, 7))
for parent in root.xpath('//*[(%s)]' % ' or '.join('name()="%s"' % t for t in parents)):
# Ensure that children of rtl parents that are not rtl have an
diff --git a/src/calibre/ebooks/docx/fields.py b/src/calibre/ebooks/docx/fields.py
index 3d7e457019..038ded9cb3 100644
--- a/src/calibre/ebooks/docx/fields.py
+++ b/src/calibre/ebooks/docx/fields.py
@@ -9,6 +9,7 @@ __copyright__ = '2013, Kovid Goyal '
import re
from calibre.ebooks.docx.index import process_index, polish_index_markup
+from polyglot.builtins import iteritems
class Field(object):
@@ -222,7 +223,7 @@ class Fields(object):
def polish_markup(self, object_map):
if not self.index_fields:
return
- rmap = {v:k for k, v in object_map.iteritems()}
+ rmap = {v:k for k, v in iteritems(object_map)}
for idx, blocks in self.index_fields:
polish_index_markup(idx, [rmap[b] for b in blocks])
diff --git a/src/calibre/ebooks/docx/fonts.py b/src/calibre/ebooks/docx/fonts.py
index 0aed536cab..9532d7c078 100644
--- a/src/calibre/ebooks/docx/fonts.py
+++ b/src/calibre/ebooks/docx/fonts.py
@@ -14,7 +14,7 @@ from calibre.utils.filenames import ascii_filename
from calibre.utils.fonts.scanner import font_scanner, NoFonts
from calibre.utils.fonts.utils import panose_to_css_generic_family, is_truetype_font
from calibre.utils.icu import ord_string
-from polyglot.builtins import codepoint_to_chr, range
+from polyglot.builtins import codepoint_to_chr, iteritems, range
Embed = namedtuple('Embed', 'name key subsetted')
@@ -172,7 +172,7 @@ class Fonts(object):
d['font-weight'] = 'bold'
if 'Italic' in variant:
d['font-style'] = 'italic'
- d = ['%s: %s' % (k, v) for k, v in d.iteritems()]
+ d = ['%s: %s' % (k, v) for k, v in iteritems(d)]
d = ';\n\t'.join(d)
defs.append('@font-face {\n\t%s\n}\n' % d)
return '\n'.join(defs)
diff --git a/src/calibre/ebooks/docx/footnotes.py b/src/calibre/ebooks/docx/footnotes.py
index a078b9f57c..15f40fb092 100644
--- a/src/calibre/ebooks/docx/footnotes.py
+++ b/src/calibre/ebooks/docx/footnotes.py
@@ -7,6 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal '
from collections import OrderedDict
+from polyglot.builtins import iteritems
class Note(object):
@@ -57,10 +58,9 @@ class Footnotes(object):
return None, None
def __iter__(self):
- for anchor, (counter, note) in self.notes.iteritems():
+ for anchor, (counter, note) in iteritems(self.notes):
yield anchor, counter, note
@property
def has_notes(self):
return bool(self.notes)
-
diff --git a/src/calibre/ebooks/docx/images.py b/src/calibre/ebooks/docx/images.py
index a3fcfc8efb..17a4bb08f0 100644
--- a/src/calibre/ebooks/docx/images.py
+++ b/src/calibre/ebooks/docx/images.py
@@ -15,6 +15,7 @@ from calibre.ebooks.docx.names import barename
from calibre.utils.filenames import ascii_filename
from calibre.utils.img import resize_to_fit, image_to_data
from calibre.utils.imghdr import what
+from polyglot.builtins import iteritems, itervalues
class LinkedImageNotFound(ValueError):
@@ -66,7 +67,7 @@ def get_image_properties(parent, XPath, get):
def get_image_margins(elem):
ans = {}
- for w, css in {'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}.iteritems():
+ for w, css in iteritems({'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}):
val = elem.get('dist%s' % w, None)
if val is not None:
try:
@@ -157,7 +158,7 @@ class Images(object):
return raw, base
def unique_name(self, base):
- exists = frozenset(self.used.itervalues())
+ exists = frozenset(itervalues(self.used))
c = 1
name = base
while name in exists:
@@ -242,7 +243,7 @@ class Images(object):
ans = self.pic_to_img(pic, alt, inline, title)
if ans is not None:
if style:
- ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in style.iteritems()))
+ ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in iteritems(style)))
yield ans
# Now process the floats
@@ -253,7 +254,7 @@ class Images(object):
ans = self.pic_to_img(pic, alt, anchor, title)
if ans is not None:
if style:
- ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in style.iteritems()))
+ ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in iteritems(style)))
yield ans
def pict_to_html(self, pict, page):
@@ -275,7 +276,7 @@ class Images(object):
style['margin-left'] = '0' if align == 'left' else 'auto'
style['margin-right'] = 'auto' if align == 'left' else '0'
if style:
- hr.set('style', '; '.join(('%s:%s' % (k, v) for k, v in style.iteritems())))
+ hr.set('style', '; '.join(('%s:%s' % (k, v) for k, v in iteritems(style))))
yield hr
for imagedata in XPath('descendant::v:imagedata[@r:id]')(pict):
diff --git a/src/calibre/ebooks/docx/index.py b/src/calibre/ebooks/docx/index.py
index a4e8e0ec60..38220a1c86 100644
--- a/src/calibre/ebooks/docx/index.py
+++ b/src/calibre/ebooks/docx/index.py
@@ -11,7 +11,7 @@ from operator import itemgetter
from lxml import etree
from calibre.utils.icu import partition_by_first_letter, sort_key
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, unicode_type
def get_applicable_xe_fields(index, xe_fields, XPath, expand):
@@ -103,7 +103,7 @@ def process_index(field, index, xe_fields, log, XPath, expand):
if heading_text is not None:
groups = partition_by_first_letter(xe_fields, key=itemgetter('text'))
items = []
- for key, fields in groups.iteritems():
+ for key, fields in iteritems(groups):
items.append(key), items.extend(fields)
if styles:
heading_style = styles[0]
diff --git a/src/calibre/ebooks/docx/names.py b/src/calibre/ebooks/docx/names.py
index 3db3f5e961..3238743b65 100644
--- a/src/calibre/ebooks/docx/names.py
+++ b/src/calibre/ebooks/docx/names.py
@@ -11,6 +11,7 @@ import re
from lxml.etree import XPath as X
from calibre.utils.filenames import ascii_text
+from polyglot.builtins import iteritems
# Names {{{
TRANSITIONAL_NAMES = {
@@ -32,7 +33,7 @@ TRANSITIONAL_NAMES = {
STRICT_NAMES = {
k:v.replace('http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument')
- for k, v in TRANSITIONAL_NAMES.iteritems()
+ for k, v in iteritems(TRANSITIONAL_NAMES)
}
TRANSITIONAL_NAMESPACES = {
@@ -72,7 +73,7 @@ STRICT_NAMESPACES = {
'http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument').replace(
'http://schemas.openxmlformats.org/wordprocessingml/2006', 'http://purl.oclc.org/ooxml/wordprocessingml').replace(
'http://schemas.openxmlformats.org/drawingml/2006', 'http://purl.oclc.org/ooxml/drawingml')
- for k, v in TRANSITIONAL_NAMESPACES.iteritems()
+ for k, v in iteritems(TRANSITIONAL_NAMESPACES)
}
# }}}
@@ -138,7 +139,7 @@ class DOCXNamespace(object):
return self.XPath('|'.join('descendant::%s' % a for a in args))(elem)
def makeelement(self, root, tag, append=True, **attrs):
- ans = root.makeelement(self.expand(tag), **{self.expand(k, sep='_'):v for k, v in attrs.iteritems()})
+ ans = root.makeelement(self.expand(tag), **{self.expand(k, sep='_'):v for k, v in iteritems(attrs)})
if append:
root.append(ans)
return ans
diff --git a/src/calibre/ebooks/docx/numbering.py b/src/calibre/ebooks/docx/numbering.py
index a132cf2233..caad53399d 100644
--- a/src/calibre/ebooks/docx/numbering.py
+++ b/src/calibre/ebooks/docx/numbering.py
@@ -15,6 +15,7 @@ from lxml.html.builder import OL, UL, SPAN
from calibre.ebooks.docx.block_styles import ParagraphStyle
from calibre.ebooks.docx.char_styles import RunStyle, inherit
from calibre.ebooks.metadata import roman
+from polyglot.builtins import iteritems
STYLE_MAP = {
'aiueo': 'hiragana',
@@ -168,7 +169,7 @@ class NumberingDefinition(object):
def copy(self):
ans = NumberingDefinition(self.namespace, an_id=self.abstract_numbering_definition_id)
- for l, lvl in self.levels.iteritems():
+ for l, lvl in iteritems(self.levels):
ans.levels[l] = lvl.copy()
return ans
@@ -224,7 +225,7 @@ class Numbering(object):
if alvl is None:
alvl = Level(self.namespace)
alvl.read_from_xml(lvl, override=True)
- for ilvl, so in start_overrides.iteritems():
+ for ilvl, so in iteritems(start_overrides):
try:
nd.levels[ilvl].start = start_override
except KeyError:
@@ -244,22 +245,22 @@ class Numbering(object):
self.instances[num_id] = create_instance(n, d)
numbering_links = styles.numbering_style_links
- for an_id, style_link in lazy_load.iteritems():
+ for an_id, style_link in iteritems(lazy_load):
num_id = numbering_links[style_link]
self.definitions[an_id] = self.instances[num_id].copy()
- for num_id, (an_id, n) in next_pass.iteritems():
+ for num_id, (an_id, n) in iteritems(next_pass):
d = self.definitions.get(an_id, None)
if d is not None:
self.instances[num_id] = create_instance(n, d)
- for num_id, d in self.instances.iteritems():
+ for num_id, d in iteritems(self.instances):
self.starts[num_id] = {lvl:d.levels[lvl].start for lvl in d.levels}
def get_pstyle(self, num_id, style_id):
d = self.instances.get(num_id, None)
if d is not None:
- for ilvl, lvl in d.levels.iteritems():
+ for ilvl, lvl in iteritems(d.levels):
if lvl.para_link == style_id:
return ilvl
@@ -271,7 +272,7 @@ class Numbering(object):
def update_counter(self, counter, levelnum, levels):
counter[levelnum] += 1
- for ilvl, lvl in levels.iteritems():
+ for ilvl, lvl in iteritems(levels):
restart = lvl.restart
if (restart is None and ilvl == levelnum + 1) or restart == levelnum + 1:
counter[ilvl] = lvl.start
diff --git a/src/calibre/ebooks/docx/styles.py b/src/calibre/ebooks/docx/styles.py
index 1b1847236d..56b5b9a4e3 100644
--- a/src/calibre/ebooks/docx/styles.py
+++ b/src/calibre/ebooks/docx/styles.py
@@ -12,6 +12,7 @@ from collections import OrderedDict, Counter
from calibre.ebooks.docx.block_styles import ParagraphStyle, inherit, twips
from calibre.ebooks.docx.char_styles import RunStyle
from calibre.ebooks.docx.tables import TableStyle
+from polyglot.builtins import iteritems, itervalues
class PageProperties(object):
@@ -124,7 +125,7 @@ class Styles(object):
self.default_paragraph_style = self.default_character_style = None
def __iter__(self):
- for s in self.id_map.itervalues():
+ for s in itervalues(self.id_map):
yield s
def __getitem__(self, key):
@@ -341,7 +342,7 @@ class Styles(object):
setattr(s, prop, inherit)
setattr(block_style, prop, next(iter(vals)))
- for p, runs in layers.iteritems():
+ for p, runs in iteritems(layers):
has_links = '1' in {r.get('is-link', None) for r in runs}
char_styles = [self.resolve_run(r) for r in runs]
block_style = self.resolve_paragraph(p)
@@ -421,7 +422,7 @@ class Styles(object):
ps.pageBreakBefore = True
def register(self, css, prefix):
- h = hash(frozenset(css.iteritems()))
+ h = hash(frozenset(iteritems(css)))
ans, _ = self.classes.get(h, (None, None))
if ans is None:
self.counter[prefix] += 1
@@ -430,17 +431,17 @@ class Styles(object):
return ans
def generate_classes(self):
- for bs in self.para_cache.itervalues():
+ for bs in itervalues(self.para_cache):
css = bs.css
if css:
self.register(css, 'block')
- for bs in self.run_cache.itervalues():
+ for bs in itervalues(self.run_cache):
css = bs.css
if css:
self.register(css, 'text')
def class_name(self, css):
- h = hash(frozenset(css.iteritems()))
+ h = hash(frozenset(iteritems(css)))
return self.classes.get(h, (None, None))[0]
def generate_css(self, dest_dir, docx, notes_nopb, nosupsub):
@@ -495,8 +496,8 @@ class Styles(object):
prefix = ef + '\n' + prefix
ans = []
- for (cls, css) in sorted(self.classes.itervalues(), key=lambda x:x[0]):
- b = ('\t%s: %s;' % (k, v) for k, v in css.iteritems())
+ for (cls, css) in sorted(itervalues(self.classes), key=lambda x:x[0]):
+ b = ('\t%s: %s;' % (k, v) for k, v in iteritems(css))
b = '\n'.join(b)
ans.append('.%s {\n%s\n}\n' % (cls, b.rstrip(';')))
return prefix + '\n' + '\n'.join(ans)
diff --git a/src/calibre/ebooks/docx/tables.py b/src/calibre/ebooks/docx/tables.py
index b21d723ce4..a6b5f8ef69 100644
--- a/src/calibre/ebooks/docx/tables.py
+++ b/src/calibre/ebooks/docx/tables.py
@@ -10,7 +10,7 @@ from lxml.html.builder import TABLE, TR, TD
from calibre.ebooks.docx.block_styles import inherit, read_shd as rs, read_border, binary_property, border_props, ParagraphStyle, border_to_css
from calibre.ebooks.docx.char_styles import RunStyle
-from polyglot.builtins import range
+from polyglot.builtins import iteritems, itervalues, range
# Read from XML {{{
read_shd = rs
@@ -86,7 +86,7 @@ def read_spacing(parent, dest, XPath, get):
def read_float(parent, dest, XPath, get):
ans = inherit
for x in XPath('./w:tblpPr')(parent):
- ans = {k.rpartition('}')[-1]: v for k, v in x.attrib.iteritems()}
+ ans = {k.rpartition('}')[-1]: v for k, v in iteritems(x.attrib)}
setattr(dest, 'float', ans)
@@ -618,7 +618,7 @@ class Table(object):
def __iter__(self):
for p in self.paragraphs:
yield p
- for t in self.sub_tables.itervalues():
+ for t in itervalues(self.sub_tables):
for p in t:
yield p
@@ -665,7 +665,7 @@ class Table(object):
table_style = self.table_style.css
if table_style:
table.set('class', self.styles.register(table_style, 'table'))
- for elem, style in style_map.iteritems():
+ for elem, style in iteritems(style_map):
css = style.css
if css:
elem.set('class', self.styles.register(css, elem.tag))
@@ -686,7 +686,7 @@ class Tables(object):
self.sub_tables |= set(self.tables[-1].sub_tables)
def apply_markup(self, object_map, page_map):
- rmap = {v:k for k, v in object_map.iteritems()}
+ rmap = {v:k for k, v in iteritems(object_map)}
for table in self.tables:
table.apply_markup(rmap, page_map[table.tbl])
diff --git a/src/calibre/ebooks/docx/to_html.py b/src/calibre/ebooks/docx/to_html.py
index f1301f1f93..85ab2554bc 100644
--- a/src/calibre/ebooks/docx/to_html.py
+++ b/src/calibre/ebooks/docx/to_html.py
@@ -29,6 +29,8 @@ from calibre.ebooks.docx.fields import Fields
from calibre.ebooks.docx.settings import Settings
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
+from polyglot.builtins import iteritems, itervalues
+
NBSP = '\xa0'
@@ -122,7 +124,7 @@ class Convert(object):
self.read_page_properties(doc)
self.resolve_alternate_content(doc)
self.current_rels = relationships_by_id
- for wp, page_properties in self.page_map.iteritems():
+ for wp, page_properties in iteritems(self.page_map):
self.current_page = page_properties
if wp.tag.endswith('}p'):
p = self.convert_p(wp)
@@ -162,7 +164,7 @@ class Convert(object):
self.styles.apply_contextual_spacing(paras)
self.mark_block_runs(paras)
- for p, wp in self.object_map.iteritems():
+ for p, wp in iteritems(self.object_map):
if len(p) > 0 and not p.text and len(p[0]) > 0 and not p[0].text and p[0][0].get('class', None) == 'tab':
# Paragraph uses tabs for indentation, convert to text-indent
parent = p[0]
@@ -192,7 +194,7 @@ class Convert(object):
self.tables.apply_markup(self.object_map, self.page_map)
numbered = []
- for html_obj, obj in self.object_map.iteritems():
+ for html_obj, obj in iteritems(self.object_map):
raw = obj.get('calibre_num_id', None)
if raw is not None:
lvl, num_id = raw.partition(':')[0::2]
@@ -212,7 +214,7 @@ class Convert(object):
self.log.debug('Converting styles to CSS')
self.styles.generate_classes()
- for html_obj, obj in self.object_map.iteritems():
+ for html_obj, obj in iteritems(self.object_map):
style = self.styles.resolve(obj)
if style is not None:
css = style.css
@@ -220,7 +222,7 @@ class Convert(object):
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
- for html_obj, css in self.framed_map.iteritems():
+ for html_obj, css in iteritems(self.framed_map):
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
@@ -407,13 +409,13 @@ class Convert(object):
doc_anchors = frozenset(self.namespace.XPath('./w:body/w:bookmarkStart[@w:name]')(doc))
if doc_anchors:
current_bm = set()
- rmap = {v:k for k, v in self.object_map.iteritems()}
+ rmap = {v:k for k, v in iteritems(self.object_map)}
for p in self.namespace.descendants(doc, 'w:p', 'w:bookmarkStart[@w:name]'):
if p.tag.endswith('}p'):
if current_bm and p in rmap:
para = rmap[p]
if 'id' not in para.attrib:
- para.set('id', generate_anchor(next(iter(current_bm)), frozenset(self.anchor_map.itervalues())))
+ para.set('id', generate_anchor(next(iter(current_bm)), frozenset(itervalues(self.anchor_map))))
for name in current_bm:
self.anchor_map[name] = para.get('id')
current_bm = set()
@@ -469,10 +471,10 @@ class Convert(object):
# _GoBack is a special bookmark inserted by Word 2010 for
# the return to previous edit feature, we ignore it
old_anchor = current_anchor
- self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(self.anchor_map.itervalues()))
+ self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(itervalues(self.anchor_map)))
if old_anchor is not None:
# The previous anchor was not applied to any element
- for a, t in tuple(self.anchor_map.iteritems()):
+ for a, t in tuple(iteritems(self.anchor_map)):
if t == old_anchor:
self.anchor_map[a] = current_anchor
elif x.tag.endswith('}hyperlink'):
@@ -480,11 +482,11 @@ class Convert(object):
elif x.tag.endswith('}instrText') and x.text and x.text.strip().startswith('TOC '):
old_anchor = current_anchor
anchor = str(uuid.uuid4())
- self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(self.anchor_map.itervalues()))
+ self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(itervalues(self.anchor_map)))
self.toc_anchor = current_anchor
if old_anchor is not None:
# The previous anchor was not applied to any element
- for a, t in tuple(self.anchor_map.iteritems()):
+ for a, t in tuple(iteritems(self.anchor_map)):
if t == old_anchor:
self.anchor_map[a] = current_anchor
if current_anchor is not None:
@@ -559,7 +561,7 @@ class Convert(object):
def resolve_links(self):
self.resolved_link_map = {}
- for hyperlink, spans in self.link_map.iteritems():
+ for hyperlink, spans in iteritems(self.link_map):
relationships_by_id = self.link_source_map[hyperlink]
span = spans[0]
if len(spans) > 1:
@@ -585,7 +587,7 @@ class Convert(object):
# hrefs that point nowhere give epubcheck a hernia. The element
# should be styled explicitly by Word anyway.
# span.set('href', '#')
- rmap = {v:k for k, v in self.object_map.iteritems()}
+ rmap = {v:k for k, v in iteritems(self.object_map)}
for hyperlink, runs in self.fields.hyperlink_fields:
spans = [rmap[r] for r in runs if r in rmap]
if not spans:
@@ -744,7 +746,7 @@ class Convert(object):
if not self.block_runs:
return
- rmap = {v:k for k, v in self.object_map.iteritems()}
+ rmap = {v:k for k, v in iteritems(self.object_map)}
for border_style, blocks in self.block_runs:
paras = tuple(rmap[p] for p in blocks)
for p in paras:
diff --git a/src/calibre/ebooks/docx/toc.py b/src/calibre/ebooks/docx/toc.py
index 53caff03e9..ec40071980 100644
--- a/src/calibre/ebooks/docx/toc.py
+++ b/src/calibre/ebooks/docx/toc.py
@@ -13,7 +13,7 @@ from lxml.etree import tostring
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.oeb.polish.toc import elem_to_toc_text
-from polyglot.builtins import unicode_type, range
+from polyglot.builtins import iteritems, unicode_type, range
def from_headings(body, log, namespace):
@@ -25,7 +25,7 @@ def from_headings(body, log, namespace):
level_prev = {i+1:None for i in range(len(xpaths))}
level_prev[0] = tocroot
level_item_map = {i+1:frozenset(xp(body)) for i, xp in enumerate(xpaths)}
- item_level_map = {e:i for i, elems in level_item_map.iteritems() for e in elems}
+ item_level_map = {e:i for i, elems in iteritems(level_item_map) for e in elems}
idcount = count()
diff --git a/src/calibre/ebooks/docx/writer/container.py b/src/calibre/ebooks/docx/writer/container.py
index 4d45910bc7..e51faed3fc 100644
--- a/src/calibre/ebooks/docx/writer/container.py
+++ b/src/calibre/ebooks/docx/writer/container.py
@@ -19,6 +19,7 @@ from calibre.utils.date import utcnow
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from calibre.utils.zipfile import ZipFile
from calibre.ebooks.pdf.render.common import PAPER_SIZES
+from polyglot.builtins import iteritems
def xml2str(root, pretty_print=False, with_tail=False):
@@ -55,7 +56,7 @@ def create_skeleton(opts, namespaces=None):
def w(x):
return '{%s}%s' % (namespaces['w'], x)
- dn = {k:v for k, v in namespaces.iteritems() if k in {'w', 'r', 'm', 've', 'o', 'wp', 'w10', 'wne', 'a', 'pic'}}
+ dn = {k:v for k, v in iteritems(namespaces) if k in {'w', 'r', 'm', 've', 'o', 'wp', 'w10', 'wne', 'a', 'pic'}}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
doc = E.document()
body = E.body()
@@ -73,7 +74,7 @@ def create_skeleton(opts, namespaces=None):
E.docGrid(**{w('linePitch'):"360"}),
))
- dn = {k:v for k, v in namespaces.iteritems() if k in tuple('wra') + ('wp',)}
+ dn = {k:v for k, v in iteritems(namespaces) if k in tuple('wra') + ('wp',)}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
styles = E.styles(
E.docDefaults(
@@ -120,12 +121,12 @@ class DocumentRelationships(object):
def __init__(self, namespace):
self.rmap = {}
self.namespace = namespace
- for typ, target in {
+ for typ, target in iteritems({
namespace.names['STYLES']: 'styles.xml',
namespace.names['NUMBERING']: 'numbering.xml',
namespace.names['WEB_SETTINGS']: 'webSettings.xml',
namespace.names['FONTS']: 'fontTable.xml',
- }.iteritems():
+ }):
self.add_relationship(target, typ)
def get_relationship_id(self, target, rtype, target_mode=None):
@@ -145,7 +146,7 @@ class DocumentRelationships(object):
namespaces = self.namespace.namespaces
E = ElementMaker(namespace=namespaces['pr'], nsmap={None:namespaces['pr']})
relationships = E.Relationships()
- for (target, rtype, target_mode), rid in self.rmap.iteritems():
+ for (target, rtype, target_mode), rid in iteritems(self.rmap):
r = E.Relationship(Id=rid, Type=rtype, Target=target)
if target_mode is not None:
r.set('TargetMode', target_mode)
@@ -172,7 +173,7 @@ class DOCX(object):
def contenttypes(self):
E = ElementMaker(namespace=self.namespace.namespaces['ct'], nsmap={None:self.namespace.namespaces['ct']})
types = E.Types()
- for partname, mt in {
+ for partname, mt in iteritems({
"/word/footnotes.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml",
"/word/document.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml",
"/word/numbering.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml",
@@ -184,15 +185,15 @@ class DOCX(object):
"/word/webSettings.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml",
"/docProps/core.xml": "application/vnd.openxmlformats-package.core-properties+xml",
"/docProps/app.xml": "application/vnd.openxmlformats-officedocument.extended-properties+xml",
- }.iteritems():
+ }):
types.append(E.Override(PartName=partname, ContentType=mt))
added = {'png', 'gif', 'jpeg', 'jpg', 'svg', 'xml'}
for ext in added:
types.append(E.Default(Extension=ext, ContentType=guess_type('a.'+ext)[0]))
- for ext, mt in {
+ for ext, mt in iteritems({
"rels": "application/vnd.openxmlformats-package.relationships+xml",
"odttf": "application/vnd.openxmlformats-officedocument.obfuscatedFont",
- }.iteritems():
+ }):
added.add(ext)
types.append(E.Default(Extension=ext, ContentType=mt))
for fname in self.images:
@@ -270,9 +271,9 @@ class DOCX(object):
zf.writestr('word/fontTable.xml', xml2str(self.font_table))
zf.writestr('word/_rels/document.xml.rels', self.document_relationships.serialize())
zf.writestr('word/_rels/fontTable.xml.rels', xml2str(self.embedded_fonts))
- for fname, data_getter in self.images.iteritems():
+ for fname, data_getter in iteritems(self.images):
zf.writestr(fname, data_getter())
- for fname, data in self.fonts.iteritems():
+ for fname, data in iteritems(self.fonts):
zf.writestr(fname, data)
diff --git a/src/calibre/ebooks/docx/writer/images.py b/src/calibre/ebooks/docx/writer/images.py
index 94d762a4e3..6aa6ff8bbb 100644
--- a/src/calibre/ebooks/docx/writer/images.py
+++ b/src/calibre/ebooks/docx/writer/images.py
@@ -10,7 +10,7 @@ import os
import posixpath
from collections import namedtuple
from functools import partial
-from polyglot.builtins import map
+from polyglot.builtins import iteritems, itervalues, map
from lxml import etree
@@ -131,7 +131,7 @@ class ImagesManager(object):
if fake_margins:
# DOCX does not support setting margins for inline images, so we
# fake it by using effect extents to simulate margins
- makeelement(parent, 'wp:effectExtent', **{k[-1].lower():v for k, v in get_image_margins(style).iteritems()})
+ makeelement(parent, 'wp:effectExtent', **{k[-1].lower():v for k, v in iteritems(get_image_margins(style))})
else:
makeelement(parent, 'wp:effectExtent', l='0', r='0', t='0', b='0')
if floating is not None:
@@ -175,7 +175,7 @@ class ImagesManager(object):
return fname
def serialize(self, images_map):
- for img in self.images.itervalues():
+ for img in itervalues(self.images):
images_map['word/' + img.fname] = partial(self.get_data, img.item)
def get_data(self, item):
diff --git a/src/calibre/ebooks/docx/writer/lists.py b/src/calibre/ebooks/docx/writer/lists.py
index c9b0d930b4..e3c0d6eec9 100644
--- a/src/calibre/ebooks/docx/writer/lists.py
+++ b/src/calibre/ebooks/docx/writer/lists.py
@@ -9,6 +9,8 @@ __copyright__ = '2015, Kovid Goyal '
from collections import defaultdict
from operator import attrgetter
+from polyglot.builtins import iteritems, itervalues
+
LIST_STYLES = frozenset(
'disc circle square decimal decimal-leading-zero lower-roman upper-roman'
' lower-greek lower-alpha lower-latin upper-alpha upper-latin hiragana hebrew'
@@ -62,7 +64,7 @@ class NumberingDefinition(object):
items_for_level = defaultdict(list)
container_for_level = {}
type_for_level = {}
- for ilvl, items in self.level_map.iteritems():
+ for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
items_for_level[ilvl].append(list_tag)
container_for_level[ilvl] = container
@@ -76,7 +78,7 @@ class NumberingDefinition(object):
return hash(self.levels)
def link_blocks(self):
- for ilvl, items in self.level_map.iteritems():
+ for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
block.numbering_id = (self.num_id + 1, ilvl)
@@ -148,16 +150,16 @@ class ListsManager(object):
ilvl = len(container_tags) - 1
l.level_map[ilvl].append((container_tags[0], list_tag, block, list_type, tag_style))
- [nd.finalize() for nd in lists.itervalues()]
+ [nd.finalize() for nd in itervalues(lists)]
definitions = {}
- for defn in lists.itervalues():
+ for defn in itervalues(lists):
try:
defn = definitions[defn]
except KeyError:
definitions[defn] = defn
defn.num_id = len(definitions) - 1
defn.link_blocks()
- self.definitions = sorted(definitions.itervalues(), key=attrgetter('num_id'))
+ self.definitions = sorted(itervalues(definitions), key=attrgetter('num_id'))
def serialize(self, parent):
for defn in self.definitions:
diff --git a/src/calibre/ebooks/docx/writer/styles.py b/src/calibre/ebooks/docx/writer/styles.py
index fd8e4cabc9..f1c918ad6b 100644
--- a/src/calibre/ebooks/docx/writer/styles.py
+++ b/src/calibre/ebooks/docx/writer/styles.py
@@ -15,7 +15,7 @@ from lxml import etree
from calibre.ebooks import parse_css_length
from calibre.ebooks.docx.writer.utils import convert_color, int_or_zero
from calibre.utils.localization import lang_as_iso639_1
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, iterkeys, unicode_type
from tinycss.css21 import CSS21Parser
css_parser = CSS21Parser()
@@ -158,7 +158,7 @@ class DOCXStyle(object):
getattr(self, x) for x in self.ALL_PROPS))
def makeelement(self, parent, name, **attrs):
- return parent.makeelement(self.w(name), **{self.w(k):v for k, v in attrs.iteritems()})
+ return parent.makeelement(self.w(name), **{self.w(k):v for k, v in iteritems(attrs)})
def __hash__(self):
return self._hash
@@ -365,7 +365,7 @@ class DescendantTextStyle(object):
p = []
def add(name, **props):
- p.append((name, frozenset(props.iteritems())))
+ p.append((name, frozenset(iteritems(props))))
def vals(attr):
return getattr(parent_style, attr), getattr(child_style, attr)
@@ -562,7 +562,7 @@ class BlockStyle(DOCXStyle):
def serialize_properties(self, pPr, normal_style):
makeelement, w = self.makeelement, self.w
spacing = makeelement(pPr, 'spacing')
- for edge, attr in {'top':'before', 'bottom':'after'}.iteritems():
+ for edge, attr in iteritems({'top':'before', 'bottom':'after'}):
getter = attrgetter('css_margin_' + edge)
css_val, css_unit = parse_css_length(getter(self))
if css_unit in ('em', 'ex'):
@@ -696,7 +696,7 @@ class StylesManager(object):
counts = Counter()
smap = {}
- for (bs, rs), blocks in used_pairs.iteritems():
+ for (bs, rs), blocks in iteritems(used_pairs):
s = CombinedStyle(bs, rs, blocks, self.namespace)
smap[(bs, rs)] = s
counts[s] += sum(1 for b in blocks if not b.is_empty())
@@ -721,7 +721,7 @@ class StylesManager(object):
heading_styles.append(style)
style.id = style.name = val
style.seq = i
- self.combined_styles = sorted(counts.iterkeys(), key=attrgetter('seq'))
+ self.combined_styles = sorted(iterkeys(counts), key=attrgetter('seq'))
[ls.apply() for ls in self.combined_styles]
descendant_style_map = {}
diff --git a/src/calibre/ebooks/docx/writer/tables.py b/src/calibre/ebooks/docx/writer/tables.py
index b0ab81524a..de2d3f8382 100644
--- a/src/calibre/ebooks/docx/writer/tables.py
+++ b/src/calibre/ebooks/docx/writer/tables.py
@@ -10,7 +10,7 @@ from collections import namedtuple
from calibre.ebooks.docx.writer.utils import convert_color
from calibre.ebooks.docx.writer.styles import read_css_block_borders as rcbb, border_edges
-from polyglot.builtins import range
+from polyglot.builtins import iteritems, range
class Dummy(object):
@@ -125,7 +125,7 @@ class Cell(object):
makeelement(tcPr, 'w:shd', w_val="clear", w_color="auto", w_fill=bc)
b = makeelement(tcPr, 'w:tcBorders', append=False)
- for edge, border in self.borders.iteritems():
+ for edge, border in iteritems(self.borders):
if border is not None and border.width > 0 and border.style != 'none':
makeelement(b, 'w:' + edge, w_val=border.style, w_sz=str(border.width), w_color=border.color)
if len(b) > 0:
diff --git a/src/calibre/ebooks/epub/cfi/tests.py b/src/calibre/ebooks/epub/cfi/tests.py
index 626660bd30..f6a6e2180b 100644
--- a/src/calibre/ebooks/epub/cfi/tests.py
+++ b/src/calibre/ebooks/epub/cfi/tests.py
@@ -10,7 +10,7 @@ import unittest, numbers
from polyglot.builtins import map
from calibre.ebooks.epub.cfi.parse import parser, cfi_sort_key, decode_cfi
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, unicode_type
class Tests(unittest.TestCase):
@@ -61,7 +61,7 @@ class Tests(unittest.TestCase):
if after is not None:
ta['after'] = after
if params:
- ta['params'] = {unicode_type(k):(v,) if isinstance(v, unicode_type) else v for k, v in params.iteritems()}
+ ta['params'] = {unicode_type(k):(v,) if isinstance(v, unicode_type) else v for k, v in iteritems(params)}
if ta:
step['text_assertion'] = ta
return ans
diff --git a/src/calibre/ebooks/lrf/pylrs/pylrf.py b/src/calibre/ebooks/lrf/pylrs/pylrf.py
index d8f8c42ddf..0529482017 100644
--- a/src/calibre/ebooks/lrf/pylrs/pylrf.py
+++ b/src/calibre/ebooks/lrf/pylrs/pylrf.py
@@ -10,6 +10,7 @@ import codecs
import os
from pylrfopt import tagListOptimizer
+from polyglot.builtins import iteritems
PYLRF_VERSION = "1.0"
@@ -526,7 +527,7 @@ class LrfObject(object):
# belongs somewhere, so here it is.
#
composites = {}
- for name, value in tagDict.iteritems():
+ for name, value in iteritems(tagDict):
if name == 'rubyAlignAndAdjust':
continue
if name in {
diff --git a/src/calibre/ebooks/metadata/book/base.py b/src/calibre/ebooks/metadata/book/base.py
index 00f60a7298..4bb37b6c9b 100644
--- a/src/calibre/ebooks/metadata/book/base.py
+++ b/src/calibre/ebooks/metadata/book/base.py
@@ -14,7 +14,7 @@ from calibre.ebooks.metadata.book import (SC_COPYABLE_FIELDS,
TOP_LEVEL_IDENTIFIERS, ALL_METADATA_FIELDS)
from calibre.library.field_metadata import FieldMetadata
from calibre.utils.icu import sort_key
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, iterkeys, unicode_type
# Special sets used to optimize the performance of getting and setting
# attributes on Metadata objects
@@ -137,7 +137,7 @@ class Metadata(object):
return object.__getattribute__(self, field)
except AttributeError:
pass
- if field in _data['user_metadata'].iterkeys():
+ if field in iterkeys(_data['user_metadata']):
d = _data['user_metadata'][field]
val = d['#value#']
if d['datatype'] != 'composite':
@@ -180,7 +180,7 @@ class Metadata(object):
if val and val.lower() != 'und':
langs = [val]
_data['languages'] = langs
- elif field in _data['user_metadata'].iterkeys():
+ elif field in iterkeys(_data['user_metadata']):
_data['user_metadata'][field]['#value#'] = val
_data['user_metadata'][field]['#extra#'] = extra
else:
@@ -190,7 +190,7 @@ class Metadata(object):
self.__dict__[field] = val
def __iter__(self):
- return object.__getattribute__(self, '_data').iterkeys()
+ return iterkeys(object.__getattribute__(self, '_data'))
def has_key(self, key):
return key in object.__getattribute__(self, '_data')
@@ -219,7 +219,7 @@ class Metadata(object):
def get_extra(self, field, default=None):
_data = object.__getattribute__(self, '_data')
- if field in _data['user_metadata'].iterkeys():
+ if field in iterkeys(_data['user_metadata']):
try:
return _data['user_metadata'][field]['#extra#']
except:
@@ -255,7 +255,7 @@ class Metadata(object):
Set all identifiers. Note that if you previously set ISBN, calling
this method will delete it.
'''
- cleaned = {ck(k):cv(v) for k, v in identifiers.iteritems() if k and v}
+ cleaned = {ck(k):cv(v) for k, v in iteritems(identifiers) if k and v}
object.__getattribute__(self, '_data')['identifiers'] = cleaned
def set_identifier(self, typ, val):
@@ -287,14 +287,14 @@ class Metadata(object):
'''
return a list of the custom fields in this book
'''
- return object.__getattribute__(self, '_data')['user_metadata'].iterkeys()
+ return iterkeys(object.__getattribute__(self, '_data')['user_metadata'])
def all_field_keys(self):
'''
All field keys known by this instance, even if their value is None
'''
_data = object.__getattribute__(self, '_data')
- return frozenset(ALL_METADATA_FIELDS.union(_data['user_metadata'].iterkeys()))
+ return frozenset(ALL_METADATA_FIELDS.union(iterkeys(_data['user_metadata'])))
def metadata_for_field(self, key):
'''
@@ -320,7 +320,7 @@ class Metadata(object):
v = self.get(attr, None)
if v is not None:
result[attr] = v
- for attr in _data['user_metadata'].iterkeys():
+ for attr in iterkeys(_data['user_metadata']):
v = self.get(attr, None)
if v is not None:
result[attr] = v
@@ -396,7 +396,7 @@ class Metadata(object):
return
um = {}
- for key, meta in metadata.iteritems():
+ for key, meta in iteritems(metadata):
m = meta.copy()
if '#value#' not in m:
if m['datatype'] == 'text' and m['is_multiple']:
@@ -576,7 +576,7 @@ class Metadata(object):
if callable(getattr(other, 'get_identifiers', None)):
d = self.get_identifiers()
s = other.get_identifiers()
- d.update([v for v in s.iteritems() if v[1] is not None])
+ d.update([v for v in iteritems(s) if v[1] is not None])
self.set_identifiers(d)
else:
# other structure not Metadata. Copy the top-level identifiers
@@ -749,7 +749,7 @@ class Metadata(object):
fmt('Rights', unicode_type(self.rights))
if self.identifiers:
fmt('Identifiers', u', '.join(['%s:%s'%(k, v) for k, v in
- self.identifiers.iteritems()]))
+ iteritems(self.identifiers)]))
if self.comments:
fmt('Comments', self.comments)
diff --git a/src/calibre/ebooks/metadata/book/json_codec.py b/src/calibre/ebooks/metadata/book/json_codec.py
index 6d15eb5031..4a411b15a2 100644
--- a/src/calibre/ebooks/metadata/book/json_codec.py
+++ b/src/calibre/ebooks/metadata/book/json_codec.py
@@ -13,6 +13,7 @@ from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.constants import filesystem_encoding, preferred_encoding
from calibre.library.field_metadata import FieldMetadata
from calibre import isbytestring
+from polyglot.builtins import iteritems, itervalues
# Translate datetimes to and from strings. The string form is the datetime in
# UTC. The returned date is also UTC
@@ -149,7 +150,7 @@ class JsonCodec(object):
def encode_metadata_attr(self, book, key):
if key == 'user_metadata':
meta = book.get_all_user_metadata(make_copy=True)
- for fm in meta.itervalues():
+ for fm in itervalues(meta):
if fm['datatype'] == 'datetime':
fm['#value#'] = datetime_to_string(fm['#value#'])
encode_is_multiple(fm)
@@ -184,7 +185,7 @@ class JsonCodec(object):
def raw_to_book(self, json_book, book_class, prefix):
try:
book = book_class(prefix, json_book.get('lpath', None))
- for key,val in json_book.iteritems():
+ for key,val in iteritems(json_book):
meta = self.decode_metadata(key, val)
if key == 'user_metadata':
book.set_all_user_metadata(meta)
@@ -201,7 +202,7 @@ class JsonCodec(object):
if key == 'classifiers':
key = 'identifiers'
if key == 'user_metadata':
- for fm in value.itervalues():
+ for fm in itervalues(value):
if fm['datatype'] == 'datetime':
fm['#value#'] = string_to_datetime(fm['#value#'])
decode_is_multiple(fm)
diff --git a/src/calibre/ebooks/metadata/book/serialize.py b/src/calibre/ebooks/metadata/book/serialize.py
index e1a7189530..8c741cc70f 100644
--- a/src/calibre/ebooks/metadata/book/serialize.py
+++ b/src/calibre/ebooks/metadata/book/serialize.py
@@ -10,7 +10,7 @@ from calibre.constants import preferred_encoding
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.imghdr import what
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, unicode_type
def ensure_unicode(obj, enc=preferred_encoding):
@@ -21,7 +21,7 @@ def ensure_unicode(obj, enc=preferred_encoding):
if isinstance(obj, (list, tuple)):
return [ensure_unicode(x) for x in obj]
if isinstance(obj, dict):
- return {ensure_unicode(k): ensure_unicode(v) for k, v in obj.iteritems()}
+ return {ensure_unicode(k): ensure_unicode(v) for k, v in iteritems(obj)}
return obj
@@ -63,7 +63,7 @@ def metadata_as_dict(mi, encode_cover_data=False):
def metadata_from_dict(src):
ans = Metadata('Unknown')
- for key, value in src.iteritems():
+ for key, value in iteritems(src):
if key == 'user_metadata':
ans.set_all_user_metadata(value)
else:
diff --git a/src/calibre/ebooks/metadata/cli.py b/src/calibre/ebooks/metadata/cli.py
index 5a7643c946..f6ca8ae1bc 100644
--- a/src/calibre/ebooks/metadata/cli.py
+++ b/src/calibre/ebooks/metadata/cli.py
@@ -16,7 +16,7 @@ from calibre.ebooks.metadata import string_to_authors, authors_to_sort_string, \
from calibre.ebooks.lrf.meta import LRFMetaFile
from calibre import prints
from calibre.utils.date import parse_date
-from polyglot.builtins import unicode_type
+from polyglot.builtins import iteritems, unicode_type
USAGE=_('%prog ebook_file [options]\n') + \
_('''
@@ -150,7 +150,7 @@ def do_set_metadata(opts, mi, stream, stream_type):
if val:
orig = mi.get_identifiers()
orig.update(val)
- val = {k:v for k, v in orig.iteritems() if k and v}
+ val = {k:v for k, v in iteritems(orig) if k and v}
mi.set_identifiers(val)
if getattr(opts, 'cover', None) is not None:
diff --git a/src/calibre/ebooks/metadata/html.py b/src/calibre/ebooks/metadata/html.py
index 55c3849107..e8428d4393 100644
--- a/src/calibre/ebooks/metadata/html.py
+++ b/src/calibre/ebooks/metadata/html.py
@@ -16,6 +16,7 @@ from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.chardet import xml_to_unicode
from calibre import replace_entities, isbytestring
from calibre.utils.date import parse_date, is_date_undefined
+from polyglot.builtins import iteritems, itervalues
def get_metadata(stream):
@@ -60,7 +61,7 @@ attr_pat = r'''(?:(?P')|(?P"))(?P(?(sq)[^']+|[^"]+))(?(sq)'|")'
def parse_meta_tags(src):
rmap = {}
- for field, names in META_NAMES.iteritems():
+ for field, names in iteritems(META_NAMES):
for name in names:
rmap[name.lower()] = field
all_names = '|'.join(rmap)
@@ -89,8 +90,8 @@ def parse_meta_tags(src):
def parse_comment_tags(src):
- all_names = '|'.join(COMMENT_NAMES.itervalues())
- rmap = {v:k for k, v in COMMENT_NAMES.iteritems()}
+ all_names = '|'.join(itervalues(COMMENT_NAMES))
+ rmap = {v:k for k, v in iteritems(COMMENT_NAMES)}
ans = {}
for match in re.finditer(r'''