This commit is contained in:
Kovid Goyal 2019-03-25 13:32:21 +05:30
commit 33cea777ac
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
332 changed files with 1713 additions and 1532 deletions

View File

@ -15,6 +15,7 @@ from calibre.ebooks.oeb.polish.container import get_container, OEB_DOCS
from calibre.ebooks.oeb.polish.check.links import check_links, UnreferencedResource
from calibre.ebooks.oeb.polish.pretty import pretty_html_tree, pretty_opf
from calibre.utils.imghdr import identify
from polyglot.builtins import iteritems
class EPUBHelpBuilder(EpubBuilder):
@ -28,7 +29,7 @@ class EPUBHelpBuilder(EpubBuilder):
def fix_epub(self, container):
' Fix all the brokenness that sphinx\'s epub builder creates '
for name, mt in container.mime_map.iteritems():
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
self.workaround_ade_quirks(container, name)
pretty_html_tree(container, container.parsed(name))
@ -49,9 +50,9 @@ class EPUBHelpBuilder(EpubBuilder):
def fix_opf(self, container):
spine_names = {n for n, l in container.spine_names}
spine = container.opf_xpath('//opf:spine')[0]
rmap = {v:k for k, v in container.manifest_id_map.iteritems()}
rmap = {v:k for k, v in iteritems(container.manifest_id_map)}
# Add unreferenced text files to the spine
for name, mt in container.mime_map.iteritems():
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS and name not in spine_names:
spine_names.add(name)
container.insert_into_xml(spine, spine.makeelement(OPF('itemref'), idref=rmap[name]))

View File

@ -2,6 +2,8 @@
max-line-length = 160
builtins = _,dynamic_property,__,P,I,lopen,icu_lower,icu_upper,icu_title,ngettext,connect_lambda
ignore = E12,E203,E22,E231,E241,E401,E402,E731,W391,E722,E741,W504
per-file-ignores =
src/polyglot/*:F401
[yapf]
based_on_style = pep8

View File

@ -10,11 +10,13 @@ from datetime import datetime
from setup import download_securely
from polyglot.builtins import filter
is_ci = os.environ.get('CI', '').lower() == 'true'
def filter_ans(ans):
return filter(None, (x.strip() for x in ans))
return list(filter(None, (x.strip() for x in ans)))
def common_user_agents():

View File

@ -33,6 +33,7 @@ from email.utils import parsedate
from functools import partial
from multiprocessing.pool import ThreadPool
from xml.sax.saxutils import escape, quoteattr
from polyglot.builtins import iteritems, itervalues
# }}}
USER_AGENT = 'calibre mirror'
@ -292,7 +293,7 @@ def get_plugin_info(raw, check_for_qt5=False):
metadata = names[inits[0]]
else:
# Legacy plugin
for name, val in names.iteritems():
for name, val in iteritems(names):
if name.endswith('plugin.py'):
metadata = val
break
@ -331,7 +332,7 @@ def update_plugin_from_entry(plugin, entry):
def fetch_plugin(old_index, entry):
lm_map = {plugin['thread_id']:plugin for plugin in old_index.itervalues()}
lm_map = {plugin['thread_id']:plugin for plugin in itervalues(old_index)}
raw = read(entry.url)
url, name = parse_plugin_zip_url(raw)
if url is None:
@ -373,10 +374,10 @@ def parallel_fetch(old_index, entry):
def log(*args, **kwargs):
print (*args, **kwargs)
print(*args, **kwargs)
with open('log', 'a') as f:
kwargs['file'] = f
print (*args, **kwargs)
print(*args, **kwargs)
def atomic_write(raw, name):
@ -403,7 +404,7 @@ def fetch_plugins(old_index):
log('Failed to get plugin', entry.name, 'at', datetime.utcnow().isoformat(), 'with error:')
log(plugin)
# Move staged files
for plugin in ans.itervalues():
for plugin in itervalues(ans):
if plugin['file'].startswith('staging_'):
src = plugin['file']
plugin['file'] = src.partition('_')[-1]
@ -411,7 +412,7 @@ def fetch_plugins(old_index):
raw = bz2.compress(json.dumps(ans, sort_keys=True, indent=4, separators=(',', ': ')))
atomic_write(raw, PLUGINS)
# Cleanup any extra .zip files
all_plugin_files = {p['file'] for p in ans.itervalues()}
all_plugin_files = {p['file'] for p in itervalues(ans)}
extra = set(glob.glob('*.zip')) - all_plugin_files
for x in extra:
os.unlink(x)
@ -498,7 +499,7 @@ h1 { text-align: center }
name, count = x
return '<tr><td>%s</td><td>%s</td></tr>\n' % (escape(name), count)
pstats = map(plugin_stats, sorted(stats.iteritems(), reverse=True, key=lambda x:x[1]))
pstats = map(plugin_stats, sorted(iteritems(stats), reverse=True, key=lambda x:x[1]))
stats = '''\
<!DOCTYPE html>
<html>
@ -681,7 +682,7 @@ def test_parse(): # {{{
new_entries = tuple(parse_index(raw))
for i, entry in enumerate(old_entries):
if entry != new_entries[i]:
print ('The new entry: %s != %s' % (new_entries[i], entry))
print('The new entry: %s != %s' % (new_entries[i], entry))
raise SystemExit(1)
pool = ThreadPool(processes=20)
urls = [e.url for e in new_entries]
@ -698,7 +699,7 @@ def test_parse(): # {{{
break
new_url, aname = parse_plugin_zip_url(raw)
if new_url != full_url:
print ('new url (%s): %s != %s for plugin at: %s' % (aname, new_url, full_url, url))
print('new url (%s): %s != %s for plugin at: %s' % (aname, new_url, full_url, url))
raise SystemExit(1)
# }}}

View File

@ -64,7 +64,7 @@ class Coffee(Command): # {{{
for src in self.COFFEE_DIRS:
for f in glob.glob(self.j(self.SRC, __appname__, src,
'*.coffee')):
bn = os.path.basename(f).rpartition('.')[0]
bn = self.b(f).rpartition('.')[0]
arcname = src.replace('/', '.') + '.' + bn + '.js'
try:
with open(f, 'rb') as fs:
@ -270,7 +270,7 @@ class RecentUAs(Command): # {{{
from setup.browser_data import get_data
data = get_data()
with open(self.UA_PATH, 'wb') as f:
f.write(json.dumps(data, indent=2))
f.write(json.dumps(data, indent=2).encode('utf-8'))
# }}}
@ -300,7 +300,7 @@ class Resources(Command): # {{{
dest = self.j(self.RESOURCES, 'scripts.calibre_msgpack')
if self.newer(dest, self.j(self.SRC, 'calibre', 'linux.py')):
self.info('\tCreating ' + os.path.basename(dest))
self.info('\tCreating ' + self.b(dest))
with open(dest, 'wb') as f:
f.write(msgpack_dumps(scripts))
@ -325,7 +325,7 @@ class Resources(Command): # {{{
with zipfile.ZipFile(dest, 'w', zipfile.ZIP_STORED) as zf:
for n in sorted(files, key=self.b):
with open(n, 'rb') as f:
zf.writestr(os.path.basename(n), f.read())
zf.writestr(self.b(n), f.read())
dest = self.j(self.RESOURCES, 'ebook-convert-complete.calibre_msgpack')
files = []
@ -334,7 +334,7 @@ class Resources(Command): # {{{
if f.endswith('.py'):
files.append(self.j(x[0], f))
if self.newer(dest, files):
self.info('\tCreating ' + dest)
self.info('\tCreating ' + self.b(dest))
complete = {}
from calibre.ebooks.conversion.plumber import supported_input_formats
complete['input_fmts'] = set(supported_input_formats())

View File

@ -4,7 +4,8 @@ __copyright__ = '2008, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, re, time, random, warnings
from polyglot.builtins import builtins, codepoint_to_chr, unicode_type, range
from polyglot.builtins import (builtins, codepoint_to_chr, iteritems,
itervalues, unicode_type, range)
builtins.__dict__['dynamic_property'] = lambda func: func(None)
from math import floor
from functools import partial
@ -706,7 +707,7 @@ def remove_bracketed_text(src,
counts = Counter()
buf = []
src = force_unicode(src)
rmap = dict([(v, k) for k, v in brackets.iteritems()])
rmap = dict([(v, k) for k, v in iteritems(brackets)])
for char in src:
if char in brackets:
counts[char] += 1
@ -714,7 +715,7 @@ def remove_bracketed_text(src,
idx = rmap[char]
if counts[idx] > 0:
counts[idx] -= 1
elif sum(counts.itervalues()) < 1:
elif sum(itervalues(counts)) < 1:
buf.append(char)
return u''.join(buf)

View File

@ -212,7 +212,7 @@ class Plugin(object): # {{{
For example to load an image::
pixmap = QPixmap()
pixmap.loadFromData(self.load_resources(['images/icon.png']).itervalues().next())
next(pixmap.loadFromData(self.load_resources(['images/icon.png']).itervalues())
icon = QIcon(pixmap)
:param names: List of paths to resources in the ZIP file using / as separator

View File

@ -23,6 +23,7 @@ from calibre.utils.config import (make_config_dir, Config, ConfigProxy,
plugin_dir, OptionParser)
from calibre.ebooks.metadata.sources.base import Source
from calibre.constants import DEBUG, numeric_version
from polyglot.builtins import iteritems, itervalues
builtin_names = frozenset(p.name for p in builtin_plugins)
BLACKLISTED_PLUGINS = frozenset({'Marvin XD', 'iOS reader applications'})
@ -195,7 +196,7 @@ def run_plugins_on_postimport(db, book_id, fmt):
try:
plugin.postimport(book_id, fmt, db)
except:
print ('Running file type plugin %s failed with traceback:'%
print('Running file type plugin %s failed with traceback:'%
plugin.name)
traceback.print_exc()
@ -210,7 +211,7 @@ def run_plugins_on_postadd(db, book_id, fmt_map):
try:
plugin.postadd(book_id, fmt_map, db)
except Exception:
print ('Running file type plugin %s failed with traceback:'%
print('Running file type plugin %s failed with traceback:'%
plugin.name)
traceback.print_exc()
@ -347,7 +348,7 @@ def reread_metadata_plugins():
return (1 if plugin.plugin_path is None else 0), plugin.name
for group in (_metadata_readers, _metadata_writers):
for plugins in group.itervalues():
for plugins in itervalues(group):
if len(plugins) > 1:
plugins.sort(key=key)
@ -640,7 +641,7 @@ def patch_metadata_plugins(possibly_updated_plugins):
# Metadata source plugins dont use initialize() but that
# might change in the future, so be safe.
patches[i].initialize()
for i, pup in patches.iteritems():
for i, pup in iteritems(patches):
_initialized_plugins[i] = pup
# }}}
@ -727,7 +728,7 @@ def initialize_plugins(perf=False):
sys.stdout, sys.stderr = ostdout, ostderr
if perf:
for x in sorted(times, key=lambda x:times[x]):
print ('%50s: %.3f'%(x, times[x]))
print('%50s: %.3f'%(x, times[x]))
_initialized_plugins.sort(cmp=lambda x,y:cmp(x.priority, y.priority), reverse=True)
reread_filetype_plugins()
reread_metadata_plugins()

View File

@ -2,7 +2,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from polyglot.builtins import map, unicode_type
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
@ -15,7 +14,8 @@ from functools import partial
from calibre import as_unicode
from calibre.customize import (Plugin, numeric_version, platform,
InvalidPlugin, PluginNotFound)
from polyglot.builtins import string_or_bytes
from polyglot.builtins import (itervalues, iterkeys, map,
string_or_bytes, unicode_type)
# PEP 302 based plugin loading mechanism, works around the bug in zipimport in
# python 2.x that prevents importing from zip files in locations whose paths
@ -202,7 +202,7 @@ class PluginLoader(object):
else:
m = importlib.import_module(plugin_module)
plugin_classes = []
for obj in m.__dict__.itervalues():
for obj in itervalues(m.__dict__):
if isinstance(obj, type) and issubclass(obj, Plugin) and \
obj.name != 'Trivial Plugin':
plugin_classes.append(obj)
@ -281,7 +281,7 @@ class PluginLoader(object):
# Legacy plugins
if '__init__' not in names:
for name in list(names.iterkeys()):
for name in list(iterkeys(names)):
if '.' not in name and name.endswith('plugin'):
names['__init__'] = names[name]
break

View File

@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
SPOOL_SIZE = 30*1024*1024
import numbers
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
def _get_next_series_num_for_list(series_indices, unwrap=True):
@ -82,7 +82,7 @@ def get_data_as_dict(self, prefix=None, authors_as_string=False, ids=None, conve
'rating', 'timestamp', 'size', 'tags', 'comments', 'series',
'series_index', 'uuid', 'pubdate', 'last_modified', 'identifiers',
'languages']).union(set(fdata))
for x, data in fdata.iteritems():
for x, data in iteritems(fdata):
if data['datatype'] == 'series':
FIELDS.add('%d_index'%x)
data = []

View File

@ -8,7 +8,7 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, time, re
from collections import defaultdict
from polyglot.builtins import map, unicode_type
from polyglot.builtins import itervalues, map, unicode_type
from contextlib import contextmanager
from functools import partial
@ -137,7 +137,7 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
if allow_path(path, ext, compiled_rules):
formats[ext] = path
if formats_ok(formats):
yield list(formats.itervalues())
yield list(itervalues(formats))
else:
books = defaultdict(dict)
for path in listdir_impl(dirpath, sort_by_mtime=True):
@ -145,9 +145,9 @@ def find_books_in_directory(dirpath, single_book_per_directory, compiled_rules=(
if allow_path(path, ext, compiled_rules):
books[icu_lower(key) if isinstance(key, unicode_type) else key.lower()][ext] = path
for formats in books.itervalues():
for formats in itervalues(books):
if formats_ok(formats):
yield list(formats.itervalues())
yield list(itervalues(formats))
def create_format_map(formats):

View File

@ -12,7 +12,8 @@ import os, shutil, uuid, json, glob, time, hashlib, errno, sys
from functools import partial
import apsw
from polyglot.builtins import unicode_type, reraise, string_or_bytes
from polyglot.builtins import (iteritems, iterkeys, itervalues,
unicode_type, reraise, string_or_bytes)
from calibre import isbytestring, force_unicode, prints, as_unicode
from calibre.constants import (iswindows, filesystem_encoding,
@ -46,7 +47,7 @@ from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
Differences in semantics from pysqlite:
1. execute/executemany operate in autocommit mode
2. There is no fetchone() method on cursor objects, instead use next()
2. There is no fetchone() method on cursor objects, instead use next(cursor)
3. There is no executescript
'''
@ -120,7 +121,7 @@ class DBPrefs(dict): # {{{
raw = self.to_raw(val)
with self.db.conn:
try:
dbraw = self.db.execute('SELECT id,val FROM preferences WHERE key=?', (key,)).next()
dbraw = next(self.db.execute('SELECT id,val FROM preferences WHERE key=?', (key,)))
except StopIteration:
dbraw = None
if dbraw is None or dbraw[1] != raw:
@ -222,7 +223,7 @@ def SortedConcatenate(sep=','):
def finalize(ctxt):
if len(ctxt) == 0:
return None
return sep.join(map(ctxt.get, sorted(ctxt.iterkeys())))
return sep.join(map(ctxt.get, sorted(iterkeys(ctxt))))
return ({}, step, finalize)
@ -247,7 +248,7 @@ def AumSortedConcatenate():
ctxt[ndx] = ':::'.join((author, sort, link))
def finalize(ctxt):
keys = list(ctxt.iterkeys())
keys = list(iterkeys(ctxt))
l = len(keys)
if l == 0:
return None
@ -271,7 +272,7 @@ class Connection(apsw.Connection): # {{{
self.execute('pragma cache_size=-5000')
self.execute('pragma temp_store=2')
encoding = self.execute('pragma encoding').next()[0]
encoding = next(self.execute('pragma encoding'))[0]
self.createcollation('PYNOCASE', partial(pynocase,
encoding=encoding))
@ -306,7 +307,7 @@ class Connection(apsw.Connection): # {{{
if kw.get('all', True):
return ans.fetchall()
try:
return ans.next()[0]
return next(ans)[0]
except (StopIteration, IndexError):
return None
@ -733,7 +734,7 @@ class DB(object):
}
# Create Tag Browser categories for custom columns
for k in sorted(self.custom_column_label_map.iterkeys()):
for k in sorted(iterkeys(self.custom_column_label_map)):
v = self.custom_column_label_map[k]
if v['normalized']:
is_category = True
@ -786,10 +787,10 @@ class DB(object):
'last_modified':19, 'identifiers':20, 'languages':21,
}
for k,v in self.FIELD_MAP.iteritems():
for k,v in iteritems(self.FIELD_MAP):
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.itervalues())
base = max(itervalues(self.FIELD_MAP))
for label_ in sorted(self.custom_column_label_map):
data = self.custom_column_label_map[label_]
@ -875,7 +876,7 @@ class DB(object):
if kw.get('all', True):
return ans.fetchall()
try:
return ans.next()[0]
return next(ans)[0]
except (StopIteration, IndexError):
return None
@ -1263,7 +1264,7 @@ class DB(object):
'''
with self.conn: # Use a single transaction, to ensure nothing modifies the db while we are reading
for table in self.tables.itervalues():
for table in itervalues(self.tables):
try:
table.read(self)
except:
@ -1327,7 +1328,7 @@ class DB(object):
def remove_formats(self, remove_map):
paths = []
for book_id, removals in remove_map.iteritems():
for book_id, removals in iteritems(remove_map):
for fmt, fname, path in removals:
path = self.format_abspath(book_id, fmt, fname, path)
if path is not None:
@ -1585,7 +1586,7 @@ class DB(object):
if samefile(spath, tpath):
# The format filenames may have changed while the folder
# name remains the same
for fmt, opath in original_format_map.iteritems():
for fmt, opath in iteritems(original_format_map):
npath = format_map.get(fmt, None)
if npath and os.path.abspath(npath.lower()) != os.path.abspath(opath.lower()) and samefile(opath, npath):
# opath and npath are different hard links to the same file
@ -1648,7 +1649,7 @@ class DB(object):
def remove_books(self, path_map, permanent=False):
self.executemany(
'DELETE FROM books WHERE id=?', [(x,) for x in path_map])
paths = {os.path.join(self.library_path, x) for x in path_map.itervalues() if x}
paths = {os.path.join(self.library_path, x) for x in itervalues(path_map) if x}
paths = {x for x in paths if os.path.exists(x) and self.is_deletable(x)}
if permanent:
for path in paths:
@ -1663,7 +1664,7 @@ class DB(object):
self.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in val_map.iteritems()])
for book_id, val in iteritems(val_map)])
def get_custom_book_data(self, name, book_ids, default=None):
book_ids = frozenset(book_ids)
@ -1722,7 +1723,7 @@ class DB(object):
def set_conversion_options(self, options, fmt):
options = [(book_id, fmt.upper(), buffer(pickle_binary_string(data.encode('utf-8') if isinstance(data, unicode_type) else data)))
for book_id, data in options.iteritems()]
for book_id, data in iteritems(options)]
self.executemany('INSERT OR REPLACE INTO conversion_options(book,format,data) VALUES (?,?,?)', options)
def get_top_level_move_items(self, all_paths):

View File

@ -11,7 +11,7 @@ import os, traceback, random, shutil, operator
from io import BytesIO
from collections import defaultdict, Set, MutableSet
from functools import wraps, partial
from polyglot.builtins import unicode_type, zip, string_or_bytes
from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type, zip, string_or_bytes
from time import time
from calibre import isbytestring, as_unicode
@ -170,7 +170,7 @@ class Cache(object):
# Reconstruct the user categories, putting them into field_metadata
fm = self.field_metadata
fm.remove_dynamic_categories()
for user_cat in sorted(self._pref('user_categories', {}).iterkeys(), key=sort_key):
for user_cat in sorted(iterkeys(self._pref('user_categories', {})), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
while cat_name:
try:
@ -181,7 +181,7 @@ class Cache(object):
# add grouped search term user categories
muc = frozenset(self._pref('grouped_search_make_user_categories', []))
for cat in sorted(self._pref('grouped_search_terms', {}).iterkeys(), key=sort_key):
for cat in sorted(iterkeys(self._pref('grouped_search_terms', {})), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
@ -200,7 +200,7 @@ class Cache(object):
self.dirtied_cache = {x:i for i, (x,) in enumerate(
self.backend.execute('SELECT book FROM metadata_dirtied'))}
if self.dirtied_cache:
self.dirtied_sequence = max(self.dirtied_cache.itervalues())+1
self.dirtied_sequence = max(itervalues(self.dirtied_cache))+1
self._initialize_dynamic_categories()
@write_api
@ -213,7 +213,7 @@ class Cache(object):
@write_api
def clear_composite_caches(self, book_ids=None):
for field in self.composites.itervalues():
for field in itervalues(self.composites):
field.clear_caches(book_ids=book_ids)
@write_api
@ -229,7 +229,7 @@ class Cache(object):
def clear_caches(self, book_ids=None, template_cache=True, search_cache=True):
if template_cache:
self._initialize_template_cache() # Clear the formatter template cache
for field in self.fields.itervalues():
for field in itervalues(self.fields):
if hasattr(field, 'clear_caches'):
field.clear_caches(book_ids=book_ids) # Clear the composite cache and ondevice caches
if book_ids:
@ -247,7 +247,7 @@ class Cache(object):
with self.backend.conn: # Prevent other processes, such as calibredb from interrupting the reload by locking the db
self.backend.prefs.load_from_db()
self._search_api.saved_searches.load_from_db()
for field in self.fields.itervalues():
for field in itervalues(self.fields):
if hasattr(field, 'table'):
field.table.read(self.backend) # Reread data from metadata.db
@ -358,7 +358,7 @@ class Cache(object):
self.backend.read_tables()
bools_are_tristate = self.backend.prefs['bools_are_tristate']
for field, table in self.backend.tables.iteritems():
for field, table in iteritems(self.backend.tables):
self.fields[field] = create_field(field, table, bools_are_tristate,
self.backend.get_template_functions)
if table.metadata['datatype'] == 'composite':
@ -368,7 +368,7 @@ class Cache(object):
VirtualTable('ondevice'), bools_are_tristate,
self.backend.get_template_functions)
for name, field in self.fields.iteritems():
for name, field in iteritems(self.fields):
if name[0] == '#' and name.endswith('_index'):
field.series_field = self.fields[name[:-len('_index')]]
self.fields[name[:-len('_index')]].index_field = field
@ -494,7 +494,7 @@ class Cache(object):
return frozenset(self.fields[field].table.col_book_map)
try:
return frozenset(self.fields[field].table.id_map.itervalues())
return frozenset(itervalues(self.fields[field].table.id_map))
except AttributeError:
raise ValueError('%s is not a many-one or many-many field' % field)
@ -503,7 +503,7 @@ class Cache(object):
''' Return a mapping of id to usage count for all values of the specified
field, which must be a many-one or many-many field. '''
try:
return {k:len(v) for k, v in self.fields[field].table.col_book_map.iteritems()}
return {k:len(v) for k, v in iteritems(self.fields[field].table.col_book_map)}
except AttributeError:
raise ValueError('%s is not a many-one or many-many field' % field)
@ -528,13 +528,13 @@ class Cache(object):
@read_api
def get_item_id(self, field, item_name):
' Return the item id for item_name (case-insensitive) '
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in iteritems(self.fields[field].table.id_map)}
return rmap.get(icu_lower(item_name) if isinstance(item_name, unicode_type) else item_name, None)
@read_api
def get_item_ids(self, field, item_names):
' Return the item id for item_name (case-insensitive) '
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in self.fields[field].table.id_map.iteritems()}
rmap = {icu_lower(v) if isinstance(v, unicode_type) else v:k for k, v in iteritems(self.fields[field].table.id_map)}
return {name:rmap.get(icu_lower(name) if isinstance(name, unicode_type) else name, None) for name in item_names}
@read_api
@ -1038,13 +1038,13 @@ class Cache(object):
new_dirtied = book_ids - already_dirtied
already_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(already_dirtied)}
if already_dirtied:
self.dirtied_sequence = max(already_dirtied.itervalues()) + 1
self.dirtied_sequence = max(itervalues(already_dirtied)) + 1
self.dirtied_cache.update(already_dirtied)
if new_dirtied:
self.backend.executemany('INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
((x,) for x in new_dirtied))
new_dirtied = {book_id:self.dirtied_sequence+i for i, book_id in enumerate(new_dirtied)}
self.dirtied_sequence = max(new_dirtied.itervalues()) + 1
self.dirtied_sequence = max(itervalues(new_dirtied)) + 1
self.dirtied_cache.update(new_dirtied)
@write_api
@ -1075,7 +1075,7 @@ class Cache(object):
if is_series:
bimap, simap = {}, {}
sfield = self.fields[name + '_index']
for k, v in book_id_to_val_map.iteritems():
for k, v in iteritems(book_id_to_val_map):
if isinstance(v, string_or_bytes):
v, sid = get_series_values(v)
else:
@ -1117,7 +1117,7 @@ class Cache(object):
@read_api
def get_a_dirtied_book(self):
if self.dirtied_cache:
return random.choice(tuple(self.dirtied_cache.iterkeys()))
return random.choice(tuple(iterkeys(self.dirtied_cache)))
return None
@read_api
@ -1220,7 +1220,7 @@ class Cache(object):
QPixmap, file object or bytestring. It can also be None, in which
case any existing cover is removed. '''
for book_id, data in book_id_data_map.iteritems():
for book_id, data in iteritems(book_id_data_map):
try:
path = self._field_for('path', book_id).replace('/', os.sep)
except AttributeError:
@ -1231,7 +1231,7 @@ class Cache(object):
for cc in self.cover_caches:
cc.invalidate(book_id_data_map)
return self._set_field('cover', {
book_id:(0 if data is None else 1) for book_id, data in book_id_data_map.iteritems()})
book_id:(0 if data is None else 1) for book_id, data in iteritems(book_id_data_map)})
@write_api
def add_cover_cache(self, cover_cache):
@ -1332,14 +1332,14 @@ class Cache(object):
protected_set_field('identifiers', mi_idents)
elif mi_idents:
identifiers = self._field_for('identifiers', book_id, default_value={})
for key, val in mi_idents.iteritems():
for key, val in iteritems(mi_idents):
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
protected_set_field('identifiers', identifiers)
user_mi = mi.get_all_user_metadata(make_copy=False)
fm = self.field_metadata
for key in user_mi.iterkeys():
for key in iterkeys(user_mi):
if (key in fm and user_mi[key]['datatype'] == fm[key]['datatype'] and (
user_mi[key]['datatype'] != 'text' or (
user_mi[key]['is_multiple'] == fm[key]['is_multiple']))):
@ -1433,15 +1433,15 @@ class Cache(object):
:param db_only: If True, only remove the record for the format from the db, do not delete the actual format file from the filesystem.
'''
table = self.fields['formats'].table
formats_map = {book_id:frozenset((f or '').upper() for f in fmts) for book_id, fmts in formats_map.iteritems()}
formats_map = {book_id:frozenset((f or '').upper() for f in fmts) for book_id, fmts in iteritems(formats_map)}
for book_id, fmts in formats_map.iteritems():
for book_id, fmts in iteritems(formats_map):
for fmt in fmts:
self.format_metadata_cache[book_id].pop(fmt, None)
if not db_only:
removes = defaultdict(set)
for book_id, fmts in formats_map.iteritems():
for book_id, fmts in iteritems(formats_map):
try:
path = self._field_for('path', book_id).replace('/', os.sep)
except:
@ -1458,7 +1458,7 @@ class Cache(object):
size_map = table.remove_formats(formats_map, self.backend)
self.fields['size'].table.update_sizes(size_map)
self._update_last_modified(tuple(formats_map.iterkeys()))
self._update_last_modified(tuple(iterkeys(formats_map)))
@read_api
def get_next_series_num_for(self, series, field='series', current_indices=False):
@ -1481,7 +1481,7 @@ class Cache(object):
index_map = {book_id:self._fast_field_for(idf, book_id, default_value=1.0) for book_id in books}
if current_indices:
return index_map
series_indices = sorted(index_map.itervalues())
series_indices = sorted(itervalues(index_map))
return _get_next_series_num_for_list(tuple(series_indices), unwrap=False)
@read_api
@ -1491,7 +1491,7 @@ class Cache(object):
string. '''
table = self.fields['authors'].table
result = []
rmap = {key_func(v):k for k, v in table.id_map.iteritems()}
rmap = {key_func(v):k for k, v in iteritems(table.id_map)}
for aut in authors:
aid = rmap.get(key_func(aut), None)
result.append(author_to_author_sort(aut) if aid is None else table.asort_map[aid])
@ -1503,10 +1503,10 @@ class Cache(object):
implementation of :meth:`has_book` in a worker process without access to the
db. '''
try:
return {icu_lower(title) for title in self.fields['title'].table.book_col_map.itervalues()}
return {icu_lower(title) for title in itervalues(self.fields['title'].table.book_col_map)}
except TypeError:
# Some non-unicode titles in the db
return {icu_lower(as_unicode(title)) for title in self.fields['title'].table.book_col_map.itervalues()}
return {icu_lower(as_unicode(title)) for title in itervalues(self.fields['title'].table.book_col_map)}
@read_api
def has_book(self, mi):
@ -1518,7 +1518,7 @@ class Cache(object):
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
q = icu_lower(title).strip()
for title in self.fields['title'].table.book_col_map.itervalues():
for title in itervalues(self.fields['title'].table.book_col_map):
if q == icu_lower(title):
return True
return False
@ -1599,7 +1599,7 @@ class Cache(object):
duplicates.append((mi, format_map))
else:
ids.append(book_id)
for fmt, stream_or_path in format_map.iteritems():
for fmt, stream_or_path in iteritems(format_map):
if self.add_format(book_id, fmt, stream_or_path, dbapi=dbapi, run_hooks=run_hooks):
fmt_map[fmt.lower()] = getattr(stream_or_path, 'name', stream_or_path) or '<stream>'
run_plugins_on_postadd(dbapi or self, book_id, fmt_map)
@ -1618,11 +1618,11 @@ class Cache(object):
path = None
path_map[book_id] = path
if iswindows:
paths = (x.replace(os.sep, '/') for x in path_map.itervalues() if x)
paths = (x.replace(os.sep, '/') for x in itervalues(path_map) if x)
self.backend.windows_check_if_files_in_use(paths)
self.backend.remove_books(path_map, permanent=permanent)
for field in self.fields.itervalues():
for field in itervalues(self.fields):
try:
table = field.table
except AttributeError:
@ -1665,7 +1665,7 @@ class Cache(object):
restrict_to_book_ids = frozenset(restrict_to_book_ids)
id_map = {}
default_process_map = {}
for old_id, new_name in item_id_to_new_name_map.iteritems():
for old_id, new_name in iteritems(item_id_to_new_name_map):
new_names = tuple(x.strip() for x in new_name.split(sv)) if sv else (new_name,)
# Get a list of books in the VL with the item
books_with_id = f.books_for(old_id)
@ -1720,7 +1720,7 @@ class Cache(object):
raise ValueError('Cannot rename items for one-one fields: %s' % field)
moved_books = set()
id_map = {}
for item_id, new_name in item_id_to_new_name_map.iteritems():
for item_id, new_name in iteritems(item_id_to_new_name_map):
new_names = tuple(x.strip() for x in new_name.split(sv)) if sv else (new_name,)
books, new_id = func(item_id, new_names[0], self.backend)
affected_books.update(books)
@ -1735,7 +1735,7 @@ class Cache(object):
if affected_books:
if field == 'authors':
self._set_field('author_sort',
{k:' & '.join(v) for k, v in self._author_sort_strings_for_books(affected_books).iteritems()})
{k:' & '.join(v) for k, v in iteritems(self._author_sort_strings_for_books(affected_books))})
self._update_path(affected_books, mark_as_dirtied=False)
elif change_index and hasattr(f, 'index_field') and tweaks['series_index_auto_increment'] != 'no_change':
for book_id in moved_books:
@ -1835,7 +1835,7 @@ class Cache(object):
insensitive).
'''
tag_map = {icu_lower(v):k for k, v in self._get_id_map('tags').iteritems()}
tag_map = {icu_lower(v):k for k, v in iteritems(self._get_id_map('tags'))}
tag = icu_lower(tag.strip())
mht = icu_lower(must_have_tag.strip()) if must_have_tag else None
tag_id, mht_id = tag_map.get(tag, None), tag_map.get(mht, None)
@ -1848,7 +1848,7 @@ class Cache(object):
tagged_books = tagged_books.intersection(self._books_for_field('tags', mht_id))
if tagged_books:
if must_have_authors is not None:
amap = {icu_lower(v):k for k, v in self._get_id_map('authors').iteritems()}
amap = {icu_lower(v):k for k, v in iteritems(self._get_id_map('authors'))}
books = None
for author in must_have_authors:
abooks = self._books_for_field('authors', amap.get(icu_lower(author), None))
@ -1934,7 +1934,7 @@ class Cache(object):
db. See db.utils for an implementation. '''
at = self.fields['authors'].table
author_map = defaultdict(set)
for aid, author in at.id_map.iteritems():
for aid, author in iteritems(at.id_map):
author_map[icu_lower(author)].add(aid)
return (author_map, at.col_book_map.copy(), self.fields['title'].table.book_col_map.copy(), self.fields['languages'].book_value_map.copy())
@ -2079,12 +2079,12 @@ class Cache(object):
def virtual_libraries_for_books(self, book_ids):
libraries = self._pref('virtual_libraries', {})
ans = {book_id:[] for book_id in book_ids}
for lib, expr in libraries.iteritems():
for lib, expr in iteritems(libraries):
books = self._search(expr) # We deliberately dont use book_ids as we want to use the search cache
for book in book_ids:
if book in books:
ans[book].append(lib)
return {k:tuple(sorted(v, key=sort_key)) for k, v in ans.iteritems()}
return {k:tuple(sorted(v, key=sort_key)) for k, v in iteritems(ans)}
@read_api
def user_categories_for_books(self, book_ids, proxy_metadata_map=None):
@ -2101,7 +2101,7 @@ class Cache(object):
for book_id in book_ids:
proxy_metadata = pmm.get(book_id) or self._get_proxy_metadata(book_id)
user_cat_vals = ans[book_id] = {}
for ucat, categories in user_cats.iteritems():
for ucat, categories in iteritems(user_cats):
user_cat_vals[ucat] = res = []
for name, cat, ign in categories:
try:
@ -2240,15 +2240,15 @@ def import_library(library_key, importer, library_path, progress=None, abort=Non
src.close()
cache = Cache(DB(library_path, load_user_formatter_functions=False))
cache.init()
format_data = {int(book_id):data for book_id, data in metadata['format_data'].iteritems()}
for i, (book_id, fmt_key_map) in enumerate(format_data.iteritems()):
format_data = {int(book_id):data for book_id, data in iteritems(metadata['format_data'])}
for i, (book_id, fmt_key_map) in enumerate(iteritems(format_data)):
if abort is not None and abort.is_set():
return
title = cache._field_for('title', book_id)
if progress is not None:
progress(title, i + 1, total)
cache._update_path((book_id,), mark_as_dirtied=False)
for fmt, fmtkey in fmt_key_map.iteritems():
for fmt, fmtkey in iteritems(fmt_key_map):
if fmt == '.cover':
stream = importer.start_file(fmtkey, _('Cover for %s') % title)
path = cache._field_for('path', book_id).replace('/', os.sep)

View File

@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
import copy
from functools import partial
from polyglot.builtins import unicode_type, map
from polyglot.builtins import iteritems, iterkeys, unicode_type, map
from calibre.constants import ispy3
from calibre.ebooks.metadata import author_to_author_sort
@ -75,7 +75,7 @@ class Tag(object):
def find_categories(field_metadata):
for category, cat in field_metadata.iteritems():
for category, cat in iteritems(field_metadata):
if (cat['is_category'] and cat['kind'] not in {'user', 'search'}):
yield (category, cat['is_multiple'].get('cache_to_list', None), False)
elif (cat['datatype'] == 'composite' and
@ -215,11 +215,11 @@ def get_categories(dbcache, sort='name', book_ids=None, first_letter_sort=False)
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c, items in categories.iteritems():
for c, items in iteritems(categories):
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), items))
# Add the category values to the user categories
for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
for user_cat in sorted(iterkeys(user_categories), key=sort_key):
items = []
names_seen = {}
user_cat_is_gst = user_cat in gst

View File

@ -7,6 +7,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
from pprint import pformat
from calibre import prints
from polyglot.builtins import iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
@ -37,7 +38,7 @@ List available custom columns. Shows column labels and ids.
def main(opts, args, dbctx):
for col, data in dbctx.run('custom_columns').iteritems():
for col, data in iteritems(dbctx.run('custom_columns')):
if opts.details:
prints(col)
print()

View File

@ -13,6 +13,7 @@ from calibre import prints
from calibre.db.cli.utils import str_width
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.date import isoformat
from polyglot.builtins import iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
@ -64,7 +65,7 @@ def implementation(
continue
if field == 'isbn':
x = db.all_field_for('identifiers', book_ids, default_value={})
data[field] = {k: v.get('isbn') or '' for k, v in x.iteritems()}
data[field] = {k: v.get('isbn') or '' for k, v in iteritems(x)}
continue
field = field.replace('*', '#')
metadata[field] = fm[field]
@ -80,37 +81,37 @@ def implementation(
def stringify(data, metadata, for_machine):
for field, m in metadata.iteritems():
for field, m in iteritems(metadata):
if field == 'authors':
data[field] = {
k: authors_to_string(v)
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
else:
dt = m['datatype']
if dt == 'datetime':
data[field] = {
k: isoformat(v, as_utc=for_machine) if v else 'None'
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
elif not for_machine:
ism = m['is_multiple']
if ism:
data[field] = {
k: ism['list_to_ui'].join(v)
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
if field == 'formats':
data[field] = {
k: '[' + v + ']'
for k, v in data[field].iteritems()
for k, v in iteritems(data[field])
}
def as_machine_data(book_ids, data, metadata):
for book_id in book_ids:
ans = {'id': book_id}
for field, val_map in data.iteritems():
for field, val_map in iteritems(data):
val = val_map.get(book_id)
if val is not None:
ans[field.replace('#', '*')] = val

View File

@ -9,6 +9,7 @@ version = 0 # change this if you change signature of implementation()
from calibre import prints
from calibre.srv.changes import saved_searches
from polyglot.builtins import iteritems
def implementation(db, notify_changes, action, *args):
@ -56,7 +57,7 @@ Syntax for removing:
def main(opts, args, dbctx):
args = args or ['list']
if args[0] == 'list':
for name, value in dbctx.run('saved_searches', 'list').iteritems():
for name, value in iteritems(dbctx.run('saved_searches', 'list')):
prints(_('Name:'), name)
prints(_('Search string:'), value)
print()

View File

@ -11,7 +11,7 @@ from calibre.ebooks.metadata.book.base import field_from_string
from calibre.ebooks.metadata.book.serialize import read_cover
from calibre.ebooks.metadata.opf import get_metadata
from calibre.srv.changes import metadata
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
readonly = False
version = 0 # change this if you change signature of implementation()
@ -170,7 +170,7 @@ def main(opts, args, dbctx):
vals[field] = val
fvals = []
for field, val in sorted( # ensure series_index fields are set last
vals.iteritems(), key=lambda k: 1 if k[0].endswith('_index') else 0):
iteritems(vals), key=lambda k: 1 if k[0].endswith('_index') else 0):
if field.endswith('_index'):
try:
val = float(val)

View File

@ -13,14 +13,14 @@ import csv
import unittest
from cStringIO import StringIO
from calibre.db.cli.cmd_check_library import _print_check_library_results
from polyglot.builtins import iteritems
class Checker(object):
def __init__(self, kw):
for k, v in kw.iteritems():
for k, v in iteritems(kw):
setattr(self, k, v)

View File

@ -2,7 +2,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
# from polyglot.builtins import map
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
@ -20,6 +19,7 @@ from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
from calibre.utils.date import UNDEFINED_DATE, clean_date_for_sort, parse_date
from calibre.utils.localization import calibre_langcode_to_name
from polyglot.builtins import iteritems, iterkeys
def bool_sort_key(bools_are_tristate):
@ -150,7 +150,7 @@ class Field(object):
id_map = self.table.id_map
special_sort = hasattr(self, 'category_sort_value')
for item_id, item_book_ids in self.table.col_book_map.iteritems():
for item_id, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@ -184,7 +184,7 @@ class OneToOneField(Field):
return {item_id}
def __iter__(self):
return self.table.book_col_map.iterkeys()
return iterkeys(self.table.book_col_map)
def sort_keys_for_books(self, get_metadata, lang_map):
bcmg = self.table.book_col_map.get
@ -315,7 +315,7 @@ class CompositeField(OneToOneField):
for v in vals:
if v:
val_map[v].add(book_id)
for val, book_ids in val_map.iteritems():
for val, book_ids in iteritems(val_map):
yield val, book_ids
def get_composite_categories(self, tag_class, book_rating_map, book_ids,
@ -328,7 +328,7 @@ class CompositeField(OneToOneField):
for val in vals:
if val:
id_map[val].add(book_id)
for item_id, item_book_ids in id_map.iteritems():
for item_id, item_book_ids in iteritems(id_map):
ratings = tuple(r for r in (book_rating_map.get(book_id, 0) for
book_id in item_book_ids) if r > 0)
avg = sum(ratings)/len(ratings) if ratings else 0
@ -409,7 +409,7 @@ class OnDeviceField(OneToOneField):
val_map = defaultdict(set)
for book_id in candidates:
val_map[self.for_book(book_id, default_value=default_value)].add(book_id)
for val, book_ids in val_map.iteritems():
for val, book_ids in iteritems(val_map):
yield val, book_ids
@ -456,7 +456,7 @@ class ManyToOneField(Field):
return self.table.col_book_map.get(item_id, set())
def __iter__(self):
return self.table.id_map.iterkeys()
return iterkeys(self.table.id_map)
def sort_keys_for_books(self, get_metadata, lang_map):
sk_map = LazySortMap(self._default_sort_key, self._sort_key, self.table.id_map)
@ -466,7 +466,7 @@ class ManyToOneField(Field):
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
empty = set()
for item_id, val in self.table.id_map.iteritems():
for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
yield val, book_ids
@ -475,7 +475,7 @@ class ManyToOneField(Field):
def book_value_map(self):
try:
return {book_id:self.table.id_map[item_id] for book_id, item_id in
self.table.book_col_map.iteritems()}
iteritems(self.table.book_col_map)}
except KeyError:
raise InvalidLinkTable(self.name)
@ -507,7 +507,7 @@ class ManyToManyField(Field):
return self.table.col_book_map.get(item_id, set())
def __iter__(self):
return self.table.id_map.iterkeys()
return iterkeys(self.table.id_map)
def sort_keys_for_books(self, get_metadata, lang_map):
sk_map = LazySortMap(self._default_sort_key, self._sort_key, self.table.id_map)
@ -524,7 +524,7 @@ class ManyToManyField(Field):
def iter_searchable_values(self, get_metadata, candidates, default_value=None):
cbm = self.table.col_book_map
empty = set()
for item_id, val in self.table.id_map.iteritems():
for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
yield val, book_ids
@ -534,14 +534,14 @@ class ManyToManyField(Field):
cbm = self.table.book_col_map
for book_id in candidates:
val_map[len(cbm.get(book_id, ()))].add(book_id)
for count, book_ids in val_map.iteritems():
for count, book_ids in iteritems(val_map):
yield count, book_ids
@property
def book_value_map(self):
try:
return {book_id:tuple(self.table.id_map[item_id] for item_id in item_ids)
for book_id, item_ids in self.table.book_col_map.iteritems()}
for book_id, item_ids in iteritems(self.table.book_col_map)}
except KeyError:
raise InvalidLinkTable(self.name)
@ -561,7 +561,7 @@ class IdentifiersField(ManyToManyField):
'Sort by identifier keys'
bcmg = self.table.book_col_map.get
dv = {self._default_sort_key:None}
return lambda book_id: tuple(sorted(bcmg(book_id, dv).iterkeys()))
return lambda book_id: tuple(sorted(iterkeys(bcmg(book_id, dv))))
def iter_searchable_values(self, get_metadata, candidates, default_value=()):
bcm = self.table.book_col_map
@ -573,7 +573,7 @@ class IdentifiersField(ManyToManyField):
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
ans = []
for id_key, item_book_ids in self.table.col_book_map.iteritems():
for id_key, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@ -618,13 +618,13 @@ class FormatsField(ManyToManyField):
for val in vals:
val_map[val].add(book_id)
for val, book_ids in val_map.iteritems():
for val, book_ids in iteritems(val_map):
yield val, book_ids
def get_categories(self, tag_class, book_rating_map, lang_map, book_ids=None):
ans = []
for fmt, item_book_ids in self.table.col_book_map.iteritems():
for fmt, item_book_ids in iteritems(self.table.col_book_map):
if book_ids is not None:
item_book_ids = item_book_ids.intersection(book_ids)
if item_book_ids:
@ -665,7 +665,7 @@ class SeriesField(ManyToOneField):
return ssk(ts(val, order=sso, lang=lang))
sk_map = LazySeriesSortMap(self._default_sort_key, sk, self.table.id_map)
bcmg = self.table.book_col_map.get
lang_map = {k:v[0] if v else None for k, v in lang_map.iteritems()}
lang_map = {k:v[0] if v else None for k, v in iteritems(lang_map)}
def key(book_id):
lang = lang_map.get(book_id, None)
@ -694,8 +694,8 @@ class SeriesField(ManyToOneField):
sso = tweaks['title_series_sorting']
ts = title_sort
empty = set()
lang_map = {k:v[0] if v else None for k, v in lang_map.iteritems()}
for item_id, val in self.table.id_map.iteritems():
lang_map = {k:v[0] if v else None for k, v in iteritems(lang_map)}
for item_id, val in iteritems(self.table.id_map):
book_ids = cbm.get(item_id, empty).intersection(candidates)
if book_ids:
lang_counts = Counter()
@ -712,7 +712,7 @@ class TagsField(ManyToManyField):
def get_news_category(self, tag_class, book_ids=None):
news_id = None
ans = []
for item_id, val in self.table.id_map.iteritems():
for item_id, val in iteritems(self.table.id_map):
if val == _('News'):
news_id = item_id
break
@ -724,7 +724,7 @@ class TagsField(ManyToManyField):
news_books = news_books.intersection(book_ids)
if not news_books:
return ans
for item_id, item_book_ids in self.table.col_book_map.iteritems():
for item_id, item_book_ids in iteritems(self.table.col_book_map):
item_book_ids = item_book_ids.intersection(news_books)
if item_book_ids:
name = self.category_formatter(self.table.id_map[item_id])

View File

@ -15,7 +15,7 @@ from copy import deepcopy
from calibre.ebooks.metadata.book.base import Metadata, SIMPLE_GET, TOP_LEVEL_IDENTIFIERS, NULL_VALUES, ALL_METADATA_FIELDS
from calibre.ebooks.metadata.book.formatter import SafeFormat
from calibre.utils.date import utcnow
from polyglot.builtins import unicode_type
from polyglot.builtins import iterkeys, unicode_type
# Lazy format metadata retrieval {{{
'''
@ -393,7 +393,7 @@ class ProxyMetadata(Metadata):
def all_field_keys(self):
um = ga(self, '_user_metadata')
return frozenset(ALL_METADATA_FIELDS.union(um.iterkeys()))
return frozenset(ALL_METADATA_FIELDS.union(iterkeys(um)))
@property
def _proxy_metadata(self):

View File

@ -7,7 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, traceback, types
from polyglot.builtins import zip
from polyglot.builtins import iteritems, zip
from calibre import force_unicode, isbytestring
from calibre.constants import preferred_encoding
@ -171,14 +171,14 @@ class LibraryDatabase(object):
return not bool(self.new_api.fields['title'].table.book_col_map)
def get_usage_count_by_id(self, field):
return [[k, v] for k, v in self.new_api.get_usage_count_by_id(field).iteritems()]
return [[k, v] for k, v in iteritems(self.new_api.get_usage_count_by_id(field))]
def field_id_map(self, field):
return [(k, v) for k, v in self.new_api.get_id_map(field).iteritems()]
return [(k, v) for k, v in iteritems(self.new_api.get_id_map(field))]
def get_custom_items_with_ids(self, label=None, num=None):
try:
return [[k, v] for k, v in self.new_api.get_id_map(self.custom_field_name(label, num)).iteritems()]
return [[k, v] for k, v in iteritems(self.new_api.get_id_map(self.custom_field_name(label, num)))]
except ValueError:
return []
@ -233,7 +233,7 @@ class LibraryDatabase(object):
paths, formats, metadata = [], [], []
for mi, format_map in duplicates:
metadata.append(mi)
for fmt, path in format_map.iteritems():
for fmt, path in iteritems(format_map):
formats.append(fmt)
paths.append(path)
duplicates = (paths, formats, metadata)
@ -416,7 +416,7 @@ class LibraryDatabase(object):
ans = set()
if title:
title = icu_lower(force_unicode(title))
for book_id, x in self.new_api.get_id_map('title').iteritems():
for book_id, x in iteritems(self.new_api.get_id_map('title')):
if icu_lower(x) == title:
ans.add(book_id)
if not all_matches:
@ -521,7 +521,7 @@ class LibraryDatabase(object):
def delete_tags(self, tags):
with self.new_api.write_lock:
tag_map = {icu_lower(v):k for k, v in self.new_api._get_id_map('tags').iteritems()}
tag_map = {icu_lower(v):k for k, v in iteritems(self.new_api._get_id_map('tags'))}
tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags)
tag_ids = tuple(tid for tid in tag_ids if tid is not None)
if tag_ids:
@ -547,7 +547,7 @@ class LibraryDatabase(object):
def format_files(self, index, index_is_id=False):
book_id = index if index_is_id else self.id(index)
return [(v, k) for k, v in self.new_api.format_files(book_id).iteritems()]
return [(v, k) for k, v in iteritems(self.new_api.format_files(book_id))]
def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False, commit=False):
return self.new_api.format_metadata(book_id, fmt, allow_cache=allow_cache, update_db=update_db)
@ -632,7 +632,7 @@ class LibraryDatabase(object):
def delete_item_from_multiple(self, item, label=None, num=None):
field = self.custom_field_name(label, num)
existing = self.new_api.get_id_map(field)
rmap = {icu_lower(v):k for k, v in existing.iteritems()}
rmap = {icu_lower(v):k for k, v in iteritems(existing)}
item_id = rmap.get(icu_lower(item), None)
if item_id is None:
return []
@ -854,7 +854,7 @@ for field in ('authors', 'tags', 'publisher', 'series'):
LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats'))
LibraryDatabase.all_custom = MT(lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num)))
for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}.iteritems():
for func, field in iteritems({'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}):
def getter(field):
def func(self):
return self.field_id_map(field)
@ -864,16 +864,16 @@ for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'
LibraryDatabase.all_tags = MT(lambda self: list(self.all_tag_names()))
LibraryDatabase.get_all_identifier_types = MT(lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types()))
LibraryDatabase.get_authors_with_ids = MT(
lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().iteritems()])
lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in iteritems(self.new_api.author_data())])
LibraryDatabase.get_author_id = MT(
lambda self, author: {icu_lower(v):k for k, v in self.new_api.get_id_map('authors').iteritems()}.get(icu_lower(author), None))
lambda self, author: {icu_lower(v):k for k, v in iteritems(self.new_api.get_id_map('authors'))}.get(icu_lower(author), None))
for field in ('tags', 'series', 'publishers', 'ratings', 'languages'):
def getter(field):
fname = field[:-1] if field in {'publishers', 'ratings'} else field
def func(self):
return [[tid, tag] for tid, tag in self.new_api.get_id_map(fname).iteritems()]
return [[tid, tag] for tid, tag in iteritems(self.new_api.get_id_map(fname))]
return func
setattr(LibraryDatabase, 'get_%s_with_ids' % field, MT(getter(field)))

View File

@ -16,6 +16,7 @@ from calibre.db.cache import Cache
from calibre.constants import filesystem_encoding
from calibre.utils.date import utcfromtimestamp
from calibre import isbytestring, force_unicode
from polyglot.builtins import iteritems
NON_EBOOK_EXTENSIONS = frozenset([
'jpg', 'jpeg', 'gif', 'png', 'bmp',
@ -206,7 +207,7 @@ class Restore(Thread):
self.mismatched_dirs.append(dirpath)
alm = mi.get('author_link_map', {})
for author, link in alm.iteritems():
for author, link in iteritems(alm):
existing_link, timestamp = self.authors_links.get(author, (None, None))
if existing_link is None or existing_link != link and timestamp < mi.timestamp:
self.authors_links[author] = (link, mi.timestamp)
@ -259,7 +260,7 @@ class Restore(Thread):
self.progress_callback(book['mi'].title, i+1)
id_map = db.get_item_ids('authors', [author for author in self.authors_links])
link_map = {aid:self.authors_links[name][0] for name, aid in id_map.iteritems() if aid is not None}
link_map = {aid:self.authors_links[name][0] for name, aid in iteritems(id_map) if aid is not None}
if link_map:
db.set_link_for_authors(link_map)
db.close()

View File

@ -11,7 +11,7 @@ import os
from calibre import prints
from calibre.utils.date import isoformat, DEFAULT_DATE
from polyglot.builtins import unicode_type
from polyglot.builtins import iterkeys, itervalues, unicode_type
class SchemaUpgrade(object):
@ -24,7 +24,7 @@ class SchemaUpgrade(object):
# Upgrade database
try:
while True:
uv = self.db.execute('pragma user_version').next()[0]
uv = next(self.db.execute('pragma user_version'))[0]
meth = getattr(self, 'upgrade_version_%d'%uv, None)
if meth is None:
break
@ -299,7 +299,7 @@ class SchemaUpgrade(object):
'''.format(tn=table_name, cn=column_name, vcn=view_column_name))
self.db.execute(script)
for field in self.field_metadata.itervalues():
for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
@ -375,7 +375,7 @@ class SchemaUpgrade(object):
'''.format(lt=link_table_name, table=table_name)
self.db.execute(script)
for field in self.field_metadata.itervalues():
for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
'SELECT name FROM sqlite_master WHERE type="table" AND name=?',
@ -596,7 +596,7 @@ class SchemaUpgrade(object):
custom_recipe_filename)
bdir = os.path.dirname(custom_recipes.file_path)
for id_, title, script in recipes:
existing = frozenset(map(int, custom_recipes.iterkeys()))
existing = frozenset(map(int, iterkeys(custom_recipes)))
if id_ in existing:
id_ = max(existing) + 1000
id_ = str(id_)

View File

@ -19,7 +19,7 @@ from calibre.utils.date import parse_date, UNDEFINED_DATE, now, dt_as_local
from calibre.utils.icu import primary_contains, sort_key
from calibre.utils.localization import lang_map, canonicalize_lang
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
from polyglot.builtins import unicode_type, string_or_bytes
from polyglot.builtins import iteritems, iterkeys, unicode_type, string_or_bytes
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
@ -167,7 +167,7 @@ class DateSearch(object): # {{{
matches |= book_ids
return matches
for k, relop in self.operators.iteritems():
for k, relop in iteritems(self.operators):
if query.startswith(k):
query = query[len(k):]
break
@ -254,7 +254,7 @@ class NumericSearch(object): # {{{
else:
relop = lambda x,y: x is not None
else:
for k, relop in self.operators.iteritems():
for k, relop in iteritems(self.operators):
if query.startswith(k):
query = query[len(k):]
break
@ -372,7 +372,7 @@ class KeyPairSearch(object): # {{{
return found if valq == 'true' else candidates - found
for m, book_ids in field_iter():
for key, val in m.iteritems():
for key, val in iteritems(m):
if (keyq and not _match(keyq, (key,), keyq_mkind,
use_primary_find_in_search=use_primary_find)):
continue
@ -445,7 +445,7 @@ class SavedSearchQueries(object): # {{{
db._set_pref(self.opt_name, smap)
def names(self):
return sorted(self.queries.iterkeys(), key=sort_key)
return sorted(iterkeys(self.queries), key=sort_key)
# }}}
@ -632,7 +632,7 @@ class Parser(SearchQueryParser): # {{{
text_fields = set()
field_metadata = {}
for x, fm in self.field_metadata.iteritems():
for x, fm in iteritems(self.field_metadata):
if x.startswith('@'):
continue
if fm['search_terms'] and x not in {'series_sort', 'id'}:
@ -670,7 +670,7 @@ class Parser(SearchQueryParser): # {{{
q = canonicalize_lang(query)
if q is None:
lm = lang_map()
rm = {v.lower():k for k,v in lm.iteritems()}
rm = {v.lower():k for k,v in iteritems(lm)}
q = rm.get(query, query)
if matchkind == CONTAINS_MATCH and q.lower() in {'true', 'false'}:
@ -799,7 +799,7 @@ class LRUCache(object): # {{{
return self.get(key)
def __iter__(self):
return self.item_map.iteritems()
return iteritems(self.item_map)
# }}}

View File

@ -14,7 +14,7 @@ from collections import defaultdict
from calibre.constants import plugins
from calibre.utils.date import parse_date, UNDEFINED_DATE, utc_tz
from calibre.ebooks.metadata import author_to_author_sort
from polyglot.builtins import range
from polyglot.builtins import iteritems, itervalues, range
_c_speedup = plugins['speedup'][0].parse_date
@ -154,10 +154,10 @@ class UUIDTable(OneToOneTable):
def read(self, db):
OneToOneTable.read(self, db)
self.uuid_to_id_map = {v:k for k, v in self.book_col_map.iteritems()}
self.uuid_to_id_map = {v:k for k, v in iteritems(self.book_col_map)}
def update_uuid_cache(self, book_id_val_map):
for book_id, uuid in book_id_val_map.iteritems():
for book_id, uuid in iteritems(book_id_val_map):
self.uuid_to_id_map.pop(self.book_col_map.get(book_id, None), None) # discard old uuid
self.uuid_to_id_map[uuid] = book_id
@ -226,7 +226,7 @@ class ManyToOneTable(Table):
bcm[book] = item_id
def fix_link_table(self, db):
linked_item_ids = {item_id for item_id in self.book_col_map.itervalues()}
linked_item_ids = {item_id for item_id in itervalues(self.book_col_map)}
extra_item_ids = linked_item_ids - set(self.id_map)
if extra_item_ids:
for item_id in extra_item_ids:
@ -238,10 +238,10 @@ class ManyToOneTable(Table):
def fix_case_duplicates(self, db):
case_map = defaultdict(set)
for item_id, val in self.id_map.iteritems():
for item_id, val in iteritems(self.id_map):
case_map[icu_lower(val)].add(item_id)
for v in case_map.itervalues():
for v in itervalues(case_map):
if len(v) > 1:
main_id = min(v)
v.discard(main_id)
@ -322,7 +322,7 @@ class ManyToOneTable(Table):
return affected_books
def rename_item(self, item_id, new_name, db):
rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
rmap = {icu_lower(v):k for k, v in iteritems(self.id_map)}
existing_item = rmap.get(icu_lower(new_name), None)
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
affected_books = self.col_book_map.get(item_id, set())
@ -353,9 +353,9 @@ class RatingTable(ManyToOneTable):
ManyToOneTable.read_id_maps(self, db)
# Ensure there are no records with rating=0 in the table. These should
# be represented as rating:None instead.
bad_ids = {item_id for item_id, rating in self.id_map.iteritems() if rating == 0}
bad_ids = {item_id for item_id, rating in iteritems(self.id_map) if rating == 0}
if bad_ids:
self.id_map = {item_id:rating for item_id, rating in self.id_map.iteritems() if rating != 0}
self.id_map = {item_id:rating for item_id, rating in iteritems(self.id_map) if rating != 0}
db.executemany('DELETE FROM {0} WHERE {1}=?'.format(self.link_table, self.metadata['link_column']),
tuple((x,) for x in bad_ids))
db.execute('DELETE FROM {0} WHERE {1}=0'.format(
@ -382,10 +382,10 @@ class ManyToManyTable(ManyToOneTable):
cbm[item_id].add(book)
bcm[book].append(item_id)
self.book_col_map = {k:tuple(v) for k, v in bcm.iteritems()}
self.book_col_map = {k:tuple(v) for k, v in iteritems(bcm)}
def fix_link_table(self, db):
linked_item_ids = {item_id for item_ids in self.book_col_map.itervalues() for item_id in item_ids}
linked_item_ids = {item_id for item_ids in itervalues(self.book_col_map) for item_id in item_ids}
extra_item_ids = linked_item_ids - set(self.id_map)
if extra_item_ids:
for item_id in extra_item_ids:
@ -461,7 +461,7 @@ class ManyToManyTable(ManyToOneTable):
return affected_books
def rename_item(self, item_id, new_name, db):
rmap = {icu_lower(v):k for k, v in self.id_map.iteritems()}
rmap = {icu_lower(v):k for k, v in iteritems(self.id_map)}
existing_item = rmap.get(icu_lower(new_name), None)
table, col, lcol = self.metadata['table'], self.metadata['column'], self.metadata['link_column']
affected_books = self.col_book_map.get(item_id, set())
@ -490,10 +490,10 @@ class ManyToManyTable(ManyToOneTable):
def fix_case_duplicates(self, db):
from calibre.db.write import uniq
case_map = defaultdict(set)
for item_id, val in self.id_map.iteritems():
for item_id, val in iteritems(self.id_map):
case_map[icu_lower(val)].add(item_id)
for v in case_map.itervalues():
for v in itervalues(case_map):
if len(v) > 1:
done_books = set()
main_id = min(v)
@ -541,19 +541,19 @@ class AuthorsTable(ManyToManyTable):
lm[aid] = link
def set_sort_names(self, aus_map, db):
aus_map = {aid:(a or '').strip() for aid, a in aus_map.iteritems()}
aus_map = {aid:a for aid, a in aus_map.iteritems() if a != self.asort_map.get(aid, None)}
aus_map = {aid:(a or '').strip() for aid, a in iteritems(aus_map)}
aus_map = {aid:a for aid, a in iteritems(aus_map) if a != self.asort_map.get(aid, None)}
self.asort_map.update(aus_map)
db.executemany('UPDATE authors SET sort=? WHERE id=?',
[(v, k) for k, v in aus_map.iteritems()])
[(v, k) for k, v in iteritems(aus_map)])
return aus_map
def set_links(self, link_map, db):
link_map = {aid:(l or '').strip() for aid, l in link_map.iteritems()}
link_map = {aid:l for aid, l in link_map.iteritems() if l != self.alink_map.get(aid, None)}
link_map = {aid:(l or '').strip() for aid, l in iteritems(link_map)}
link_map = {aid:l for aid, l in iteritems(link_map) if l != self.alink_map.get(aid, None)}
self.alink_map.update(link_map)
db.executemany('UPDATE authors SET link=? WHERE id=?',
[(v, k) for k, v in link_map.iteritems()])
[(v, k) for k, v in iteritems(link_map)])
return link_map
def remove_books(self, book_ids, db):
@ -602,7 +602,7 @@ class FormatsTable(ManyToManyTable):
fnm[book][fmt] = name
sm[book][fmt] = sz
self.book_col_map = {k:tuple(sorted(v)) for k, v in bcm.iteritems()}
self.book_col_map = {k:tuple(sorted(v)) for k, v in iteritems(bcm)}
def remove_books(self, book_ids, db):
clean = ManyToManyTable.remove_books(self, book_ids, db)
@ -617,21 +617,21 @@ class FormatsTable(ManyToManyTable):
(fname, book_id, fmt))
def remove_formats(self, formats_map, db):
for book_id, fmts in formats_map.iteritems():
for book_id, fmts in iteritems(formats_map):
self.book_col_map[book_id] = [fmt for fmt in self.book_col_map.get(book_id, []) if fmt not in fmts]
for m in (self.fname_map, self.size_map):
m[book_id] = {k:v for k, v in m[book_id].iteritems() if k not in fmts}
m[book_id] = {k:v for k, v in iteritems(m[book_id]) if k not in fmts}
for fmt in fmts:
try:
self.col_book_map[fmt].discard(book_id)
except KeyError:
pass
db.executemany('DELETE FROM data WHERE book=? AND format=?',
[(book_id, fmt) for book_id, fmts in formats_map.iteritems() for fmt in fmts])
[(book_id, fmt) for book_id, fmts in iteritems(formats_map) for fmt in fmts])
def zero_max(book_id):
try:
return max(self.size_map[book_id].itervalues())
return max(itervalues(self.size_map[book_id]))
except ValueError:
return 0
@ -661,7 +661,7 @@ class FormatsTable(ManyToManyTable):
self.size_map[book_id][fmt] = size
db.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(book_id, fmt, size, fname))
return max(self.size_map[book_id].itervalues())
return max(itervalues(self.size_map[book_id]))
class IdentifiersTable(ManyToManyTable):
@ -702,4 +702,4 @@ class IdentifiersTable(ManyToManyTable):
raise NotImplementedError('Cannot rename identifiers')
def all_identifier_types(self):
return frozenset(k for k, v in self.col_book_map.iteritems() if v)
return frozenset(k for k, v in iteritems(self.col_book_map) if v)

View File

@ -15,6 +15,7 @@ from datetime import timedelta
from calibre.db.tests.base import BaseTest, IMG
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import now, UNDEFINED_DATE
from polyglot.builtins import iteritems, itervalues
def import_test(replacement_data, replacement_fmt=None):
@ -217,14 +218,14 @@ class AddRemoveTest(BaseTest):
authors = cache.fields['authors'].table
# Delete a single book, with no formats and check cleaning
self.assertIn(_('Unknown'), set(authors.id_map.itervalues()))
self.assertIn(_('Unknown'), set(itervalues(authors.id_map)))
olen = len(authors.id_map)
item_id = {v:k for k, v in authors.id_map.iteritems()}[_('Unknown')]
item_id = {v:k for k, v in iteritems(authors.id_map)}[_('Unknown')]
cache.remove_books((3,))
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(3, c.all_book_ids())
self.assertNotIn(_('Unknown'), set(table.id_map.itervalues()))
self.assertNotIn(_('Unknown'), set(itervalues(table.id_map)))
self.assertNotIn(item_id, table.asort_map)
self.assertNotIn(item_id, table.alink_map)
ae(len(table.id_map), olen-1)
@ -235,17 +236,17 @@ class AddRemoveTest(BaseTest):
authorpath = os.path.dirname(bookpath)
os.mkdir(os.path.join(authorpath, '.DS_Store'))
open(os.path.join(authorpath, 'Thumbs.db'), 'wb').close()
item_id = {v:k for k, v in cache.fields['#series'].table.id_map.iteritems()}['My Series Two']
item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
cache.remove_books((1,), permanent=True)
for x in (fmtpath, bookpath, authorpath):
af(os.path.exists(x), 'The file %s exists, when it should not' % x)
for c in (cache, self.init_cache()):
table = c.fields['authors'].table
self.assertNotIn(1, c.all_book_ids())
self.assertNotIn('Author Two', set(table.id_map.itervalues()))
self.assertNotIn(6, set(c.fields['rating'].table.id_map.itervalues()))
self.assertIn('A Series One', set(c.fields['series'].table.id_map.itervalues()))
self.assertNotIn('My Series Two', set(c.fields['#series'].table.id_map.itervalues()))
self.assertNotIn('Author Two', set(itervalues(table.id_map)))
self.assertNotIn(6, set(itervalues(c.fields['rating'].table.id_map)))
self.assertIn('A Series One', set(itervalues(c.fields['series'].table.id_map)))
self.assertNotIn('My Series Two', set(itervalues(c.fields['#series'].table.id_map)))
self.assertNotIn(item_id, c.fields['#series'].table.col_book_map)
self.assertNotIn(1, c.fields['#series'].table.book_col_map)
@ -264,7 +265,7 @@ class AddRemoveTest(BaseTest):
fmtpath = cache.format_abspath(1, 'FMT1')
bookpath = os.path.dirname(fmtpath)
authorpath = os.path.dirname(bookpath)
item_id = {v:k for k, v in cache.fields['#series'].table.id_map.iteritems()}['My Series Two']
item_id = {v:k for k, v in iteritems(cache.fields['#series'].table.id_map)}['My Series Two']
cache.remove_books((1,))
delete_service().wait()
for x in (fmtpath, bookpath, authorpath):

View File

@ -13,6 +13,7 @@ from io import BytesIO
from calibre.constants import iswindows
from calibre.db.tests.base import BaseTest
from calibre.ptempfile import TemporaryDirectory
from polyglot.builtins import iterkeys
class FilesystemTest(BaseTest):
@ -55,7 +56,7 @@ class FilesystemTest(BaseTest):
cache2 = self.init_cache(cl)
for c in (cache, cache2):
data = self.get_filesystem_data(c, 1)
ae(set(orig_data.iterkeys()), set(data.iterkeys()))
ae(set(iterkeys(orig_data)), set(iterkeys(data)))
ae(orig_data, data, 'Filesystem data does not match')
ae(c.field_for('path', 1), 'Moved/Moved (1)')
ae(c.field_for('path', 3), 'Moved1/Moved1 (3)')

View File

@ -14,7 +14,7 @@ from operator import itemgetter
from calibre.library.field_metadata import fm_as_dict
from calibre.db.tests.base import BaseTest
from polyglot.builtins import range
from polyglot.builtins import iteritems, iterkeys, range
# Utils {{{
@ -81,7 +81,7 @@ class LegacyTest(BaseTest):
# We ignore the key rec_index, since it is not stable for
# custom columns (it is created by iterating over a dict)
return {k.decode('utf-8') if isinstance(k, bytes) else k:to_unicode(v)
for k, v in x.iteritems() if k != 'rec_index'}
for k, v in iteritems(x) if k != 'rec_index'}
return x
def get_props(db):
@ -108,7 +108,7 @@ class LegacyTest(BaseTest):
'Test the get_property interface for reading data'
def get_values(db):
ans = {}
for label, loc in db.FIELD_MAP.iteritems():
for label, loc in iteritems(db.FIELD_MAP):
if isinstance(label, numbers.Integral):
label = '#'+db.custom_column_num_map[label]['label']
label = type('')(label)
@ -186,7 +186,7 @@ class LegacyTest(BaseTest):
self.assertEqual(dict(db.prefs), dict(ndb.prefs))
for meth, args in {
for meth, args in iteritems({
'find_identical_books': [(Metadata('title one', ['author one']),), (Metadata('unknown'),), (Metadata('xxxx'),)],
'get_books_for_category': [('tags', newstag), ('#formats', 'FMT1')],
'get_next_series_num_for': [('A Series One',)],
@ -251,7 +251,7 @@ class LegacyTest(BaseTest):
'book_on_device_string':[(1,), (2,), (3,)],
'books_in_series_of':[(0,), (1,), (2,)],
'books_with_same_title':[(Metadata(db.title(0)),), (Metadata(db.title(1)),), (Metadata('1234'),)],
}.iteritems():
}):
fmt = lambda x: x
if meth[0] in {'!', '@'}:
fmt = {'!':dict, '@':frozenset}[meth[0]]
@ -277,8 +277,8 @@ class LegacyTest(BaseTest):
old = db.get_data_as_dict(prefix='test-prefix')
new = ndb.get_data_as_dict(prefix='test-prefix')
for o, n in zip(old, new):
o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in o.iteritems()}
n = {k:set(v) if isinstance(v, list) else v for k, v in n.iteritems()}
o = {type('')(k) if isinstance(k, bytes) else k:set(v) if isinstance(v, list) else v for k, v in iteritems(o)}
n = {k:set(v) if isinstance(v, list) else v for k, v in iteritems(n)}
self.assertEqual(o, n)
ndb.search('title:Unknown')
@ -316,9 +316,9 @@ class LegacyTest(BaseTest):
db = self.init_old()
cache = ndb.new_api
tmap = cache.get_id_map('tags')
t = next(tmap.iterkeys())
t = next(iterkeys(tmap))
pmap = cache.get_id_map('publisher')
p = next(pmap.iterkeys())
p = next(iterkeys(pmap))
run_funcs(self, db, ndb, (
('delete_tag_using_id', t),
('delete_publisher_using_id', p),
@ -647,10 +647,10 @@ class LegacyTest(BaseTest):
ndb = self.init_legacy(self.cloned_library)
db = self.init_old(self.cloned_library)
a = {v:k for k, v in ndb.new_api.get_id_map('authors').iteritems()}['Author One']
t = {v:k for k, v in ndb.new_api.get_id_map('tags').iteritems()}['Tag One']
s = {v:k for k, v in ndb.new_api.get_id_map('series').iteritems()}['A Series One']
p = {v:k for k, v in ndb.new_api.get_id_map('publisher').iteritems()}['Publisher One']
a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('authors'))}['Author One']
t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('tags'))}['Tag One']
s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('series'))}['A Series One']
p = {v:k for k, v in iteritems(ndb.new_api.get_id_map('publisher'))}['Publisher One']
run_funcs(self, db, ndb, (
('rename_author', a, 'Author Two'),
('rename_tag', t, 'News'),
@ -688,11 +688,11 @@ class LegacyTest(BaseTest):
run_funcs(self, db, ndb, [(func, idx, label) for idx in range(3)])
# Test renaming/deleting
t = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag One']
t2 = {v:k for k, v in ndb.new_api.get_id_map('#tags').iteritems()}['My Tag Two']
a = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['My Author Two']
a2 = {v:k for k, v in ndb.new_api.get_id_map('#authors').iteritems()}['Custom One']
s = {v:k for k, v in ndb.new_api.get_id_map('#series').iteritems()}['My Series One']
t = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag One']
t2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#tags'))}['My Tag Two']
a = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['My Author Two']
a2 = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#authors'))}['Custom One']
s = {v:k for k, v in iteritems(ndb.new_api.get_id_map('#series'))}['My Series One']
run_funcs(self, db, ndb, (
('delete_custom_item_using_id', t, 'tags'),
('delete_custom_item_using_id', a, 'authors'),

View File

@ -13,7 +13,7 @@ from time import time
from calibre.utils.date import utc_tz
from calibre.db.tests.base import BaseTest
from polyglot.builtins import range
from polyglot.builtins import iteritems, iterkeys, itervalues, range
class ReadingTest(BaseTest):
@ -116,8 +116,8 @@ class ReadingTest(BaseTest):
},
}
for book_id, test in tests.iteritems():
for field, expected_val in test.iteritems():
for book_id, test in iteritems(tests):
for field, expected_val in iteritems(test):
val = cache.field_for(field, book_id)
if isinstance(val, tuple) and 'authors' not in field and 'languages' not in field:
val, expected_val = set(val), set(expected_val)
@ -130,7 +130,7 @@ class ReadingTest(BaseTest):
'Test sorting'
cache = self.init_cache()
ae = self.assertEqual
for field, order in {
for field, order in iteritems({
'title' : [2, 1, 3],
'authors': [2, 1, 3],
'series' : [3, 1, 2],
@ -154,7 +154,7 @@ class ReadingTest(BaseTest):
'#yesno':[2, 1, 3],
'#comments':[3, 2, 1],
'id': [1, 2, 3],
}.iteritems():
}):
x = list(reversed(order))
ae(order, cache.multisort([(field, True)],
ids_to_sort=x),
@ -222,7 +222,7 @@ class ReadingTest(BaseTest):
old_metadata = {i:old.get_metadata(
i, index_is_id=True, get_cover=True, cover_as_data=True) for i in
range(1, 4)}
for mi in old_metadata.itervalues():
for mi in itervalues(old_metadata):
mi.format_metadata = dict(mi.format_metadata)
if mi.formats:
mi.formats = tuple(mi.formats)
@ -234,7 +234,7 @@ class ReadingTest(BaseTest):
new_metadata = {i:cache.get_metadata(
i, get_cover=True, cover_as_data=True) for i in range(1, 4)}
cache = None
for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()):
for mi2, mi1 in zip(list(new_metadata.values()), list(old_metadata.values())):
self.compare_metadata(mi1, mi2)
# }}}
@ -262,7 +262,7 @@ class ReadingTest(BaseTest):
old.conn.close()
old = None
cache = self.init_cache(self.library_path)
for book_id, cdata in covers.iteritems():
for book_id, cdata in iteritems(covers):
self.assertEqual(cdata, cache.cover(book_id), 'Reading of cover failed')
f = cache.cover(book_id, as_file=True)
self.assertEqual(cdata, f.read() if f else f, 'Reading of cover as file failed')
@ -325,7 +325,7 @@ class ReadingTest(BaseTest):
old = None
cache = self.init_cache(self.cloned_library)
for query, ans in oldvals.iteritems():
for query, ans in iteritems(oldvals):
nr = cache.search(query, '')
self.assertEqual(ans, nr,
'Old result: %r != New result: %r for search: %s'%(
@ -407,11 +407,11 @@ class ReadingTest(BaseTest):
lf = {i:set(old.formats(i, index_is_id=True).split(',')) if old.formats(
i, index_is_id=True) else set() for i in ids}
formats = {i:{f:old.format(i, f, index_is_id=True) for f in fmts} for
i, fmts in lf.iteritems()}
i, fmts in iteritems(lf)}
old.conn.close()
old = None
cache = self.init_cache(self.library_path)
for book_id, fmts in lf.iteritems():
for book_id, fmts in iteritems(lf):
self.assertEqual(fmts, set(cache.formats(book_id)),
'Set of formats is not the same')
for fmt in fmts:
@ -439,9 +439,9 @@ class ReadingTest(BaseTest):
'Test getting the author sort for authors from the db'
cache = self.init_cache()
table = cache.fields['authors'].table
table.set_sort_names({next(table.id_map.iterkeys()): 'Fake Sort'}, cache.backend)
table.set_sort_names({next(iterkeys(table.id_map)): 'Fake Sort'}, cache.backend)
authors = tuple(table.id_map.itervalues())
authors = tuple(itervalues(table.id_map))
nval = cache.author_sort_from_authors(authors)
self.assertIn('Fake Sort', nval)
@ -458,7 +458,7 @@ class ReadingTest(BaseTest):
cache.set_field('series', {3:'test series'})
cache.set_field('series_index', {3:13})
table = cache.fields['series'].table
series = tuple(table.id_map.itervalues())
series = tuple(itervalues(table.id_map))
nvals = {s:cache.get_next_series_num_for(s) for s in series}
db = self.init_old()
self.assertEqual({s:db.get_next_series_num_for(s) for s in series}, nvals)
@ -471,7 +471,7 @@ class ReadingTest(BaseTest):
from calibre.ebooks.metadata.book.base import Metadata
cache = self.init_cache()
db = self.init_old()
for title in cache.fields['title'].table.book_col_map.itervalues():
for title in itervalues(cache.fields['title'].table.book_col_map):
for x in (db, cache):
self.assertTrue(x.has_book(Metadata(title)))
self.assertTrue(x.has_book(Metadata(title.upper())))

View File

@ -14,6 +14,7 @@ from io import BytesIO
from calibre.ebooks.metadata import author_to_author_sort
from calibre.utils.date import UNDEFINED_DATE
from calibre.db.tests.base import BaseTest, IMG
from polyglot.builtins import iteritems, itervalues
class WritingTest(BaseTest):
@ -166,7 +167,7 @@ class WritingTest(BaseTest):
self.assertEqual(cache.set_field('#enum', {1:None}), {1})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:None, 2:'One', 3:'Three'}.iteritems():
for i, val in iteritems({1:None, 2:'One', 3:'Three'}):
self.assertEqual(c.field_for('#enum', i), val)
del cache2
@ -176,9 +177,9 @@ class WritingTest(BaseTest):
self.assertEqual(cache.set_field('#rating', {1:None, 2:4, 3:8}), {1, 2, 3})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:None, 2:4, 3:2}.iteritems():
for i, val in iteritems({1:None, 2:4, 3:2}):
self.assertEqual(c.field_for('rating', i), val)
for i, val in {1:None, 2:4, 3:8}.iteritems():
for i, val in iteritems({1:None, 2:4, 3:8}):
self.assertEqual(c.field_for('#rating', i), val)
del cache2
@ -191,14 +192,14 @@ class WritingTest(BaseTest):
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), {2})
cache2 = self.init_cache(cl)
for c in (cache, cache2):
for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.iteritems():
for i, val in iteritems({1:'A Series One', 2:'A Series One', 3:'Series'}):
self.assertEqual(c.field_for('series', i), val)
cs_indices = {1:c.field_for('#series_index', 1), 3:c.field_for('#series_index', 3)}
for i in (1, 2, 3):
self.assertEqual(c.field_for('#series', i), 'Series')
for i, val in {1:2, 2:1, 3:3}.iteritems():
for i, val in iteritems({1:2, 2:1, 3:3}):
self.assertEqual(c.field_for('series_index', i), val)
for i, val in {1:cs_indices[1], 2:0, 3:cs_indices[3]}.iteritems():
for i, val in iteritems({1:cs_indices[1], 2:0, 3:cs_indices[3]}):
self.assertEqual(c.field_for('#series_index', i), val)
del cache2
@ -461,13 +462,13 @@ class WritingTest(BaseTest):
tmap = cache.get_id_map('tags')
self.assertEqual(cache.remove_items('tags', tmap), {1, 2})
tmap = cache.get_id_map('#tags')
t = {v:k for k, v in tmap.iteritems()}['My Tag Two']
t = {v:k for k, v in iteritems(tmap)}['My Tag Two']
self.assertEqual(cache.remove_items('#tags', (t,)), {1, 2})
smap = cache.get_id_map('series')
self.assertEqual(cache.remove_items('series', smap), {1, 2})
smap = cache.get_id_map('#series')
s = {v:k for k, v in smap.iteritems()}['My Series Two']
s = {v:k for k, v in iteritems(smap)}['My Series Two']
self.assertEqual(cache.remove_items('#series', (s,)), {1})
for c in (cache, self.init_cache()):
@ -507,7 +508,7 @@ class WritingTest(BaseTest):
for c in (cache, c2):
self.assertEqual(c.field_for('tags', 1), ())
self.assertEqual(c.field_for('tags', 2), ('b', 'a'))
self.assertNotIn('c', set(c.get_id_map('tags').itervalues()))
self.assertNotIn('c', set(itervalues(c.get_id_map('tags'))))
self.assertEqual(c.field_for('series', 1), None)
self.assertEqual(c.field_for('series', 2), 'a')
self.assertEqual(c.field_for('series_index', 1), 1.0)
@ -520,9 +521,9 @@ class WritingTest(BaseTest):
cl = self.cloned_library
cache = self.init_cache(cl)
# Check that renaming authors updates author sort and path
a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Unknown']
a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Unknown']
self.assertEqual(cache.rename_items('authors', {a:'New Author'})[0], {3})
a = {v:k for k, v in cache.get_id_map('authors').iteritems()}['Author One']
a = {v:k for k, v in iteritems(cache.get_id_map('authors'))}['Author One']
self.assertEqual(cache.rename_items('authors', {a:'Author Two'})[0], {1, 2})
for c in (cache, self.init_cache(cl)):
self.assertEqual(c.all_field_names('authors'), {'New Author', 'Author Two'})
@ -531,7 +532,7 @@ class WritingTest(BaseTest):
self.assertEqual(c.field_for('authors', 1), ('Author Two',))
self.assertEqual(c.field_for('author_sort', 1), 'Two, Author')
t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
# Test case change
self.assertEqual(cache.rename_items('tags', {t:'tag one'}), ({1, 2}, {t:t}))
for c in (cache, self.init_cache(cl)):
@ -551,14 +552,14 @@ class WritingTest(BaseTest):
self.assertEqual(set(c.field_for('tags', 1)), {'Tag Two', 'News'})
self.assertEqual(set(c.field_for('tags', 2)), {'Tag Two'})
# Test on a custom column
t = {v:k for k, v in cache.get_id_map('#tags').iteritems()}['My Tag One']
t = {v:k for k, v in iteritems(cache.get_id_map('#tags'))}['My Tag One']
self.assertEqual(cache.rename_items('#tags', {t:'My Tag Two'})[0], {2})
for c in (cache, self.init_cache(cl)):
self.assertEqual(c.all_field_names('#tags'), {'My Tag Two'})
self.assertEqual(set(c.field_for('#tags', 2)), {'My Tag Two'})
# Test a Many-one field
s = {v:k for k, v in cache.get_id_map('series').iteritems()}['A Series One']
s = {v:k for k, v in iteritems(cache.get_id_map('series'))}['A Series One']
# Test case change
self.assertEqual(cache.rename_items('series', {s:'a series one'}), ({1, 2}, {s:s}))
for c in (cache, self.init_cache(cl)):
@ -574,7 +575,7 @@ class WritingTest(BaseTest):
self.assertEqual(c.field_for('series', 2), 'series')
self.assertEqual(c.field_for('series_index', 1), 2.0)
s = {v:k for k, v in cache.get_id_map('#series').iteritems()}['My Series One']
s = {v:k for k, v in iteritems(cache.get_id_map('#series'))}['My Series One']
# Test custom column with rename to existing
self.assertEqual(cache.rename_items('#series', {s:'My Series Two'})[0], {2})
for c in (cache, self.init_cache(cl)):
@ -585,7 +586,7 @@ class WritingTest(BaseTest):
# Test renaming many-many items to multiple items
cache = self.init_cache(self.cloned_library)
t = {v:k for k, v in cache.get_id_map('tags').iteritems()}['Tag One']
t = {v:k for k, v in iteritems(cache.get_id_map('tags'))}['Tag One']
affected_books, id_map = cache.rename_items('tags', {t:'Something, Else, Entirely'})
self.assertEqual({1, 2}, affected_books)
tmap = cache.get_id_map('tags')
@ -600,7 +601,7 @@ class WritingTest(BaseTest):
# Test with restriction
cache = self.init_cache()
cache.set_field('tags', {1:'a,b,c', 2:'x,y,z', 3:'a,x,z'})
tmap = {v:k for k, v in cache.get_id_map('tags').iteritems()}
tmap = {v:k for k, v in iteritems(cache.get_id_map('tags'))}
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r'}, restrict_to_book_ids=()), (set(), {}))
self.assertEqual(cache.rename_items('tags', {tmap['a']:'r', tmap['b']:'q'}, restrict_to_book_ids=(1,))[0], {1})
self.assertEqual(cache.rename_items('tags', {tmap['x']:'X'}, restrict_to_book_ids=(2,))[0], {2})
@ -657,7 +658,7 @@ class WritingTest(BaseTest):
ldata = {aid:str(aid) for aid in adata}
self.assertEqual({1,2,3}, cache.set_link_for_authors(ldata))
for c in (cache, self.init_cache()):
self.assertEqual(ldata, {aid:d['link'] for aid, d in c.author_data().iteritems()})
self.assertEqual(ldata, {aid:d['link'] for aid, d in iteritems(c.author_data())})
self.assertEqual({3}, cache.set_link_for_authors({aid:'xxx' if aid == max(adata) else str(aid) for aid in adata}),
'Setting the author link to the same value as before, incorrectly marked some books as dirty')
sdata = {aid:'%s, changed' % aid for aid in adata}
@ -709,7 +710,7 @@ class WritingTest(BaseTest):
conn.execute('INSERT INTO tags (name) VALUES ("t")')
norm = conn.last_insert_rowid()
conn.execute('DELETE FROM books_tags_link')
for book_id, vals in {1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}.iteritems():
for book_id, vals in iteritems({1:(lid, uid), 2:(uid, mid), 3:(lid, norm)}):
conn.executemany('INSERT INTO books_tags_link (book,tag) VALUES (?,?)',
tuple((book_id, x) for x in vals))
cache.reload_from_db()

View File

@ -9,7 +9,7 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, errno, sys, re
from locale import localeconv
from collections import OrderedDict, namedtuple
from polyglot.builtins import map, unicode_type, string_or_bytes
from polyglot.builtins import iteritems, itervalues, map, unicode_type, string_or_bytes
from threading import Lock
from calibre import as_unicode, prints
@ -208,7 +208,7 @@ class ThumbnailCache(object):
def _invalidate_sizes(self):
if self.size_changed:
size = self.thumbnail_size
remove = (key for key, entry in self.items.iteritems() if size != entry.thumbnail_size)
remove = (key for key, entry in iteritems(self.items) if size != entry.thumbnail_size)
for key in remove:
self._remove(key)
self.size_changed = False
@ -365,7 +365,7 @@ class ThumbnailCache(object):
pass
if not hasattr(self, 'total_size'):
self._load_index()
for entry in self.items.itervalues():
for entry in itervalues(self.items):
self._do_delete(entry.path)
self.total_size = 0
self.items = OrderedDict()

View File

@ -9,7 +9,8 @@ __docformat__ = 'restructuredtext en'
import weakref, operator, numbers
from functools import partial
from polyglot.builtins import map, unicode_type, range, zip
from polyglot.builtins import (iteritems, iterkeys, itervalues, map,
unicode_type, range, zip)
from calibre.ebooks.metadata import title_sort
from calibre.utils.config_base import tweaks, prefs
@ -71,7 +72,7 @@ def format_is_multiple(x, sep=',', repl=None):
def format_identifiers(x):
if not x:
return None
return ','.join('%s:%s'%(k, v) for k, v in x.iteritems())
return ','.join('%s:%s'%(k, v) for k, v in iteritems(x))
class View(object):
@ -88,7 +89,7 @@ class View(object):
self.search_restriction_name = self.base_restriction_name = ''
self._field_getters = {}
self.column_count = len(cache.backend.FIELD_MAP)
for col, idx in cache.backend.FIELD_MAP.iteritems():
for col, idx in iteritems(cache.backend.FIELD_MAP):
label, fmt = col, lambda x:x
func = {
'id': self._get_id,
@ -373,14 +374,14 @@ class View(object):
self.marked_ids = dict.fromkeys(id_dict, u'true')
else:
# Ensure that all the items in the dict are text
self.marked_ids = dict(zip(id_dict.iterkeys(), map(unicode_type,
id_dict.itervalues())))
self.marked_ids = dict(zip(iterkeys(id_dict), map(unicode_type,
itervalues(id_dict))))
# This invalidates all searches in the cache even though the cache may
# be shared by multiple views. This is not ideal, but...
cmids = set(self.marked_ids)
self.cache.clear_search_caches(old_marked_ids | cmids)
if old_marked_ids != cmids:
for funcref in self.marked_listeners.itervalues():
for funcref in itervalues(self.marked_listeners):
func = funcref()
if func is not None:
func(old_marked_ids, cmids)

View File

@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
import re
from functools import partial
from datetime import datetime
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import iteritems, itervalues, unicode_type, zip
from calibre.constants import preferred_encoding
from calibre.ebooks.metadata import author_to_author_sort, title_sort
@ -131,7 +131,7 @@ def adapt_identifiers(to_tuple, x):
if not isinstance(x, dict):
x = {k:v for k, v in (y.partition(':')[0::2] for y in to_tuple(x))}
ans = {}
for k, v in x.iteritems():
for k, v in iteritems(x):
k, v = clean_identifier(k, v)
if k and v:
ans[k] = v
@ -194,7 +194,7 @@ def get_adapter(name, metadata):
def one_one_in_books(book_id_val_map, db, field, *args):
'Set a one-one field in the books table'
if book_id_val_map:
sequence = ((sqlite_datetime(v), k) for k, v in book_id_val_map.iteritems())
sequence = ((sqlite_datetime(v), k) for k, v in iteritems(book_id_val_map))
db.executemany(
'UPDATE books SET %s=? WHERE id=?'%field.metadata['column'], sequence)
field.table.book_col_map.update(book_id_val_map)
@ -210,23 +210,23 @@ def set_title(book_id_val_map, db, field, *args):
ans = one_one_in_books(book_id_val_map, db, field, *args)
# Set the title sort field
field.title_sort_field.writer.set_books(
{k:title_sort(v) for k, v in book_id_val_map.iteritems()}, db)
{k:title_sort(v) for k, v in iteritems(book_id_val_map)}, db)
return ans
def one_one_in_other(book_id_val_map, db, field, *args):
'Set a one-one field in the non-books table, like comments'
deleted = tuple((k,) for k, v in book_id_val_map.iteritems() if v is None)
deleted = tuple((k,) for k, v in iteritems(book_id_val_map) if v is None)
if deleted:
db.executemany('DELETE FROM %s WHERE book=?'%field.metadata['table'],
deleted)
for book_id in deleted:
field.table.book_col_map.pop(book_id[0], None)
updated = {k:v for k, v in book_id_val_map.iteritems() if v is not None}
updated = {k:v for k, v in iteritems(book_id_val_map) if v is not None}
if updated:
db.executemany('INSERT OR REPLACE INTO %s(book,%s) VALUES (?,?)'%(
field.metadata['table'], field.metadata['column']),
((k, sqlite_datetime(v)) for k, v in updated.iteritems()))
((k, sqlite_datetime(v)) for k, v in iteritems(updated)))
field.table.book_col_map.update(updated)
return set(book_id_val_map)
@ -234,7 +234,7 @@ def one_one_in_other(book_id_val_map, db, field, *args):
def custom_series_index(book_id_val_map, db, field, *args):
series_field = field.series_field
sequence = []
for book_id, sidx in book_id_val_map.iteritems():
for book_id, sidx in iteritems(book_id_val_map):
if sidx is None:
sidx = 1.0
ids = series_field.ids_for_book(book_id)
@ -285,12 +285,12 @@ def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
def change_case(case_changes, dirtied, db, table, m, is_authors=False):
if is_authors:
vals = ((val.replace(',', '|'), item_id) for item_id, val in
case_changes.iteritems())
iteritems(case_changes))
else:
vals = ((val, item_id) for item_id, val in case_changes.iteritems())
vals = ((val, item_id) for item_id, val in iteritems(case_changes))
db.executemany(
'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']), vals)
for item_id, val in case_changes.iteritems():
for item_id, val in iteritems(case_changes):
table.id_map[item_id] = val
dirtied.update(table.col_book_map[item_id])
if is_authors:
@ -306,14 +306,14 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
# Map values to db ids, including any new values
kmap = safe_lower if dt in {'text', 'series'} else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
val_map = {None:None}
case_changes = {}
for val in book_id_val_map.itervalues():
for val in itervalues(book_id_val_map):
if val is not None:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map)
@ -321,17 +321,17 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
if case_changes:
change_case(case_changes, dirtied, db, table, m)
book_id_item_id_map = {k:val_map[v] for k, v in book_id_val_map.iteritems()}
book_id_item_id_map = {k:val_map[v] for k, v in iteritems(book_id_val_map)}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
book_id_item_id_map = {k:v for k, v in iteritems(book_id_item_id_map)
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_id in book_id_item_id_map.iteritems():
for book_id, item_id in iteritems(book_id_item_id_map):
old_item_id = table.book_col_map.get(book_id, None)
if old_item_id is not None:
table.col_book_map[old_item_id].discard(book_id)
@ -355,7 +355,7 @@ def many_one(book_id_val_map, db, field, allow_case_change, *args):
)
db.executemany(sql.format(table.link_table, m['link_column']),
((book_id, book_id, item_id) for book_id, item_id in
updated.iteritems()))
iteritems(updated)))
# Remove no longer used items
remove = {item_id for item_id in table.id_map if not
@ -392,15 +392,15 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
# Map values to db ids, including any new values
kmap = safe_lower if dt == 'text' else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
rid_map = {kmap(item):item_id for item_id, item in iteritems(table.id_map)}
val_map = {}
case_changes = {}
book_id_val_map = {k:uniq(vals, kmap) for k, vals in book_id_val_map.iteritems()}
for vals in book_id_val_map.itervalues():
book_id_val_map = {k:uniq(vals, kmap) for k, vals in iteritems(book_id_val_map)}
for vals in itervalues(book_id_val_map):
for val in vals:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map, is_authors=is_authors)
@ -408,7 +408,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
if case_changes:
change_case(case_changes, dirtied, db, table, m, is_authors=is_authors)
if is_authors:
for item_id, val in case_changes.iteritems():
for item_id, val in iteritems(case_changes):
for book_id in table.col_book_map[item_id]:
current_sort = field.db_author_sort_for_book(book_id)
new_sort = field.author_sort_for_book(book_id)
@ -418,17 +418,17 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
field.author_sort_field.writer.set_books({book_id:new_sort}, db)
book_id_item_id_map = {k:tuple(val_map[v] for v in vals)
for k, vals in book_id_val_map.iteritems()}
for k, vals in iteritems(book_id_val_map)}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
book_id_item_id_map = {k:v for k, v in iteritems(book_id_item_id_map)
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_ids in book_id_item_id_map.iteritems():
for book_id, item_ids in iteritems(book_id_item_id_map):
old_item_ids = table.book_col_map.get(book_id, None)
if old_item_ids:
for old_item_id in old_item_ids:
@ -448,7 +448,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
((k,) for k in deleted))
if updated:
vals = (
(book_id, val) for book_id, vals in updated.iteritems()
(book_id, val) for book_id, vals in iteritems(updated)
for val in vals
)
db.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
@ -481,7 +481,7 @@ def many_many(book_id_val_map, db, field, allow_case_change, *args):
def identifiers(book_id_val_map, db, field, *args): # {{{
table = field.table
updates = set()
for book_id, identifiers in book_id_val_map.iteritems():
for book_id, identifiers in iteritems(book_id_val_map):
if book_id not in table.book_col_map:
table.book_col_map[book_id] = {}
current_ids = table.book_col_map[book_id]
@ -490,7 +490,7 @@ def identifiers(book_id_val_map, db, field, *args): # {{{
table.col_book_map.get(key, set()).discard(book_id)
current_ids.pop(key, None)
current_ids.update(identifiers)
for key, val in identifiers.iteritems():
for key, val in iteritems(identifiers):
if key not in table.col_book_map:
table.col_book_map[key] = set()
table.col_book_map[key].add(book_id)
@ -538,7 +538,7 @@ class Writer(object):
def set_books(self, book_id_val_map, db, allow_case_change=True):
book_id_val_map = {k:self.adapter(v) for k, v in
book_id_val_map.iteritems() if self.accept_vals(v)}
iteritems(book_id_val_map) if self.accept_vals(v)}
if not book_id_val_map:
return set()
dirtied = self.set_books_func(book_id_val_map, db, self.field,
@ -548,7 +548,7 @@ class Writer(object):
def set_books_for_enum(self, book_id_val_map, db, field,
allow_case_change):
allowed = set(field.metadata['display']['enum_values'])
book_id_val_map = {k:v for k, v in book_id_val_map.iteritems() if v is
book_id_val_map = {k:v for k, v in iteritems(book_id_val_map) if v is
None or v in allowed}
if not book_id_val_map:
return set()

View File

@ -248,7 +248,8 @@ def run_script(path, args):
g = globals()
g['__name__'] = '__main__'
g['__file__'] = ef
execfile(ef, g)
with open(ef, 'rb') as f:
exec(compile(f.read(), ef, 'exec'), g)
def inspect_mobi(path):
@ -346,7 +347,7 @@ def main(args=sys.argv):
elif ext in {'mobi', 'azw', 'azw3'}:
inspect_mobi(path)
else:
print ('Cannot dump unknown filetype: %s' % path)
print('Cannot dump unknown filetype: %s' % path)
elif len(args) >= 2 and os.path.exists(os.path.join(args[1], '__main__.py')):
sys.path.insert(0, args[1])
run_script(os.path.join(args[1], '__main__.py'), args[2:])

View File

@ -12,6 +12,7 @@ class Bookmark(): # {{{
A simple class fetching bookmark data
kobo-specific
'''
def __init__(self, db_connection, contentid, path, id, book_format, bookmark_extension):
self.book_format = book_format
self.bookmark_extension = bookmark_extension
@ -62,7 +63,7 @@ class Bookmark(): # {{{
kepub_chapter_data = ('{0}-%'.format(row[1]), )
cursor2.execute(kepub_chapter_query, kepub_chapter_data)
try:
kepub_chapter = cursor2.next()
kepub_chapter = next(cursor2)
chapter_title = kepub_chapter[0]
current_chapter = kepub_chapter[1]
except StopIteration:

View File

@ -32,7 +32,7 @@ from calibre import prints, fsync
from calibre.ptempfile import PersistentTemporaryFile
from calibre.constants import DEBUG
from calibre.utils.config_base import prefs
from polyglot.builtins import unicode_type, string_or_bytes
from polyglot.builtins import iteritems, itervalues, unicode_type, string_or_bytes
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
@ -185,7 +185,7 @@ class KOBO(USBMS):
cursor = connection.cursor()
cursor.execute('SELECT version FROM dbversion')
try:
result = cursor.next()
result = next(cursor)
dbversion = result['version']
except StopIteration:
dbversion = 0
@ -407,7 +407,7 @@ class KOBO(USBMS):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
@ -572,7 +572,7 @@ class KOBO(USBMS):
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = metadata.next()
info = next(metadata)
debug_print("KoboTouch::add_books_to_metadata - info=%s" % info)
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
@ -790,7 +790,7 @@ class KOBO(USBMS):
t = (ContentID,)
cursor.execute('select DateLastRead, ReadStatus from Content where BookID is Null and ContentID = ?', t)
try:
result = cursor.next()
result = next(cursor)
datelastread = result['DateLastRead']
current_ReadStatus = result['ReadStatus']
except StopIteration:
@ -908,13 +908,13 @@ class KOBO(USBMS):
ContentID = self.contentid_from_path(book.path, ContentType)
if category in readstatuslist.keys():
if category in list(readstatuslist.keys()):
# Manage ReadStatus
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
elif category == 'Shortlist' and self.dbversion >= 14:
# Manage FavouritesIndex/Shortlist
self.set_favouritesindex(connection, ContentID)
elif category in accessibilitylist.keys():
elif category in list(accessibilitylist.keys()):
# Do not manage the Accessibility List
pass
else: # No collections
@ -1020,7 +1020,7 @@ class KOBO(USBMS):
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
try:
result = cursor.next()
result = next(cursor)
# debug_print("ImageId: ", result[0])
ImageID = result[0]
except StopIteration:
@ -1962,7 +1962,7 @@ class KOBOTOUCH(KOBO):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))) or not bl[idx].contentID:
need_sync = True
@ -2136,7 +2136,7 @@ class KOBOTOUCH(KOBO):
from calibre.ebooks.oeb.base import OEB_STYLES
is_dirty = False
for cssname, mt in container.mime_map.iteritems():
for cssname, mt in iteritems(container.mime_map):
if mt in OEB_STYLES:
newsheet = container.parsed(cssname)
oldrules = len(newsheet.cssRules)
@ -2445,7 +2445,7 @@ class KOBOTOUCH(KOBO):
debug_print(' Setting bookshelf on device')
self.set_bookshelf(connection, book, category)
category_added = True
elif category in readstatuslist.keys():
elif category in list(readstatuslist.keys()):
debug_print("KoboTouch:update_device_database_collections - about to set_readstatus - category='%s'"%(category, ))
# Manage ReadStatus
self.set_readstatus(connection, book.contentID, readstatuslist.get(category))
@ -2460,7 +2460,7 @@ class KOBOTOUCH(KOBO):
debug_print(' and about to set it - %s'%book.title)
self.set_favouritesindex(connection, book.contentID)
category_added = True
elif category in accessibilitylist.keys():
elif category in list(accessibilitylist.keys()):
# Do not manage the Accessibility List
pass
@ -2647,7 +2647,7 @@ class KOBOTOUCH(KOBO):
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
try:
result = cursor.next()
result = next(cursor)
ImageID = result[0]
except StopIteration:
ImageID = self.imageid_from_contentid(ContentID)
@ -2750,7 +2750,7 @@ class KOBOTOUCH(KOBO):
cursor = connection.cursor()
cursor.execute(test_query, test_values)
try:
result = cursor.next()
result = next(cursor)
except StopIteration:
result = None
@ -2858,7 +2858,7 @@ class KOBOTOUCH(KOBO):
cursor = connection.cursor()
cursor.execute(test_query, test_values)
try:
result = cursor.next()
result = next(cursor)
except StopIteration:
result = None
@ -2907,7 +2907,7 @@ class KOBOTOUCH(KOBO):
cursor = connection.cursor()
cursor.execute(test_query, test_values)
try:
result = cursor.next()
result = next(cursor)
except StopIteration:
result = None

View File

@ -10,6 +10,7 @@ __docformat__ = 'restructuredtext en'
import traceback, re
from calibre.constants import iswindows
from polyglot.builtins import iteritems
class DeviceDefaults(object):
@ -47,7 +48,7 @@ class DeviceDefaults(object):
for rule in self.rules:
tests = rule[0]
matches = True
for k, v in tests.iteritems():
for k, v in iteritems(tests):
if k == 'vendor' and v != vid:
matches = False
break

View File

@ -17,7 +17,7 @@ from calibre.devices.mtp.base import debug
from calibre.devices.mtp.defaults import DeviceDefaults
from calibre.ptempfile import SpooledTemporaryFile, PersistentTemporaryDirectory
from calibre.utils.filenames import shorten_components_to
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import iteritems, itervalues, unicode_type, zip
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%(
'windows' if iswindows else 'unix')).MTP_DEVICE
@ -276,7 +276,7 @@ class MTP_DEVICE(BASE):
book.path = mtp_file.mtp_id_path
# Remove books in the cache that no longer exist
for idx in sorted(relpath_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(relpath_cache), reverse=True):
del bl[idx]
need_sync = True
@ -546,7 +546,7 @@ class MTP_DEVICE(BASE):
def get_user_blacklisted_devices(self):
bl = frozenset(self.prefs['blacklist'])
ans = {}
for dev, x in self.prefs['history'].iteritems():
for dev, x in iteritems(self.prefs['history']):
name = x[0]
if dev in bl:
ans[dev] = name

View File

@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
import weakref, sys, json
from collections import deque
from operator import attrgetter
from polyglot.builtins import map, unicode_type
from polyglot.builtins import itervalues, map, unicode_type
from datetime import datetime
from calibre import human_readable, prints, force_unicode
@ -201,7 +201,7 @@ class FilesystemCache(object):
for entry in entries:
FileOrFolder(entry, self)
for item in self.id_map.itervalues():
for item in itervalues(self.id_map):
try:
p = item.parent
except KeyError:
@ -227,7 +227,7 @@ class FilesystemCache(object):
return e
def iterebooks(self, storage_id):
for x in self.id_map.itervalues():
for x in itervalues(self.id_map):
if x.storage_id == storage_id and x.is_ebook:
if x.parent_id == storage_id and x.name.lower().endswith('.txt'):
continue # Ignore .txt files in the root

View File

@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
import time, threading, traceback
from functools import wraps, partial
from polyglot.builtins import unicode_type, zip
from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type, zip
from itertools import chain
from calibre import as_unicode, prints, force_unicode
@ -107,7 +107,7 @@ class MTP_DEVICE(MTPDeviceBase):
# Get device data for detected devices. If there is an error, we will
# try again for that device the next time this method is called.
for dev in tuple(self.detected_devices.iterkeys()):
for dev in tuple(iterkeys(self.detected_devices)):
data = self.detected_devices.get(dev, None)
if data is None or data is False:
try:
@ -130,7 +130,7 @@ class MTP_DEVICE(MTPDeviceBase):
self.currently_connected_pnp_id in self.detected_devices
else None)
for dev, data in self.detected_devices.iteritems():
for dev, data in iteritems(self.detected_devices):
if dev in self.blacklisted_devices or dev in self.ejected_devices:
# Ignore blacklisted and ejected devices
continue
@ -267,10 +267,10 @@ class MTP_DEVICE(MTPDeviceBase):
self._currently_getting_sid = unicode_type(storage_id)
id_map = self.dev.get_filesystem(storage_id, partial(
self._filesystem_callback, {}))
for x in id_map.itervalues():
for x in itervalues(id_map):
x['storage_id'] = storage_id
all_storage.append(storage)
items.append(id_map.itervalues())
items.append(itervalues(id_map))
self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
time.time()-st, len(self._filesystem_cache)))

View File

@ -705,8 +705,8 @@ class XMLCache(object):
child.text = '\n'+'\t'*(level+1)
for gc in child:
gc.tail = '\n'+'\t'*(level+1)
child.iterchildren(reversed=True).next().tail = '\n'+'\t'*level
root.iterchildren(reversed=True).next().tail = '\n'+'\t'*(level-1)
next(child.iterchildren(reversed=True)).tail = '\n'+'\t'*level
next(root.iterchildren(reversed=True)).tail = '\n'+'\t'*(level-1)
def move_playlists_to_bottom(self):
for root in self.record_roots.values():
@ -799,4 +799,3 @@ class XMLCache(object):
self.namespaces[i] = ns
# }}}

View File

@ -13,7 +13,7 @@ from threading import Lock
from calibre import prints, as_unicode
from calibre.constants import (iswindows, isosx, plugins, islinux, isfreebsd,
isnetbsd)
from polyglot.builtins import range
from polyglot.builtins import iterkeys, range
osx_scanner = linux_scanner = freebsd_scanner = netbsd_scanner = None
@ -77,7 +77,7 @@ class LibUSBScanner(object):
dev = USBDevice(*dev)
dev.busnum, dev.devnum = fingerprint[:2]
ans.add(dev)
extra = set(self.libusb.cache.iterkeys()) - seen
extra = set(iterkeys(self.libusb.cache)) - seen
for x in extra:
self.libusb.cache.pop(x, None)
return ans

View File

@ -1471,7 +1471,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
metadata = iter(metadata)
for i, infile in enumerate(files):
mdata, fname = metadata.next(), names.next()
mdata, fname = next(metadata), next(names)
lpath = self._create_upload_path(mdata, fname, create_dirs=False)
self._debug('lpath', lpath)
if not hasattr(infile, 'read'):
@ -1497,7 +1497,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
for i, location in enumerate(locations):
self.report_progress((i + 1) / float(len(locations)),
_('Adding books to device metadata listing...'))
info = metadata.next()
info = next(metadata)
lpath = location[0]
length = location[1]
lpath = self._strip_prefix(lpath)

View File

@ -23,7 +23,7 @@ from calibre.devices.errors import DeviceError
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.constants import iswindows, islinux, isosx, isfreebsd, plugins
from calibre.utils.filenames import ascii_filename as sanitize
from polyglot.builtins import string_or_bytes
from polyglot.builtins import iteritems, string_or_bytes
if isosx:
usbobserver, usbobserver_err = plugins['usbobserver']
@ -404,7 +404,7 @@ class Device(DeviceConfig, DevicePlugin):
bsd_drives = self.osx_bsd_names()
drives = self.osx_sort_names(bsd_drives.copy())
mount_map = usbobserver.get_mounted_filesystems()
drives = {k: mount_map.get(v) for k, v in drives.iteritems()}
drives = {k: mount_map.get(v) for k, v in iteritems(drives)}
if DEBUG:
print()
from pprint import pprint

View File

@ -20,7 +20,7 @@ from calibre.devices.usbms.cli import CLI
from calibre.devices.usbms.device import Device
from calibre.devices.usbms.books import BookList, Book
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from polyglot.builtins import unicode_type, string_or_bytes
from polyglot.builtins import itervalues, unicode_type, string_or_bytes
BASE_TIME = None
@ -281,7 +281,7 @@ class USBMS(CLI, Device):
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
for idx in sorted(itervalues(bl_cache), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
@ -311,7 +311,7 @@ class USBMS(CLI, Device):
metadata = iter(metadata)
for i, infile in enumerate(files):
mdata, fname = metadata.next(), names.next()
mdata, fname = next(metadata), next(names)
filepath = self.normalize_path(self.create_upload_path(path, mdata, fname))
if not hasattr(infile, 'read'):
infile = self.normalize_path(infile)
@ -350,7 +350,7 @@ class USBMS(CLI, Device):
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = metadata.next()
info = next(metadata)
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,

View File

@ -15,7 +15,7 @@ from ctypes import (
)
from ctypes.wintypes import DWORD, WORD, ULONG, LPCWSTR, HWND, BOOL, LPWSTR, UINT, BYTE, HANDLE, USHORT
from pprint import pprint, pformat
from polyglot.builtins import map
from polyglot.builtins import iteritems, itervalues, map
from calibre import prints, as_unicode
@ -652,13 +652,13 @@ def get_volume_information(drive_letter):
'max_component_length': max_component_length.value,
}
for name, num in {'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
for name, num in iteritems({'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
'FILE_NAMED_STREAMS':0x00040000, 'FILE_PERSISTENT_ACLS':0x00000008, 'FILE_READ_ONLY_VOLUME':0x00080000,
'FILE_SEQUENTIAL_WRITE_ONCE':0x00100000, 'FILE_SUPPORTS_ENCRYPTION':0x00020000, 'FILE_SUPPORTS_EXTENDED_ATTRIBUTES':0x00800000,
'FILE_SUPPORTS_HARD_LINKS':0x00400000, 'FILE_SUPPORTS_OBJECT_IDS':0x00010000, 'FILE_SUPPORTS_OPEN_BY_FILE_ID':0x01000000,
'FILE_SUPPORTS_REPARSE_POINTS':0x00000080, 'FILE_SUPPORTS_SPARSE_FILES':0x00000040, 'FILE_SUPPORTS_TRANSACTIONS':0x00200000,
'FILE_SUPPORTS_USN_JOURNAL':0x02000000, 'FILE_UNICODE_ON_DISK':0x00000004, 'FILE_VOLUME_IS_COMPRESSED':0x00008000,
'FILE_VOLUME_QUOTAS':0x00000020}.iteritems():
'FILE_VOLUME_QUOTAS':0x00000020}):
ans[name] = bool(num & flags)
return ans
@ -809,7 +809,7 @@ def get_storage_number_map(drive_types=(DRIVE_REMOVABLE, DRIVE_FIXED), debug=Fal
' Get a mapping of drive letters to storage numbers for all drives on system (of the specified types) '
mask = GetLogicalDrives()
type_map = {letter:GetDriveType(letter + ':' + os.sep) for i, letter in enumerate(string.ascii_uppercase) if mask & (1 << i)}
drives = (letter for letter, dt in type_map.iteritems() if dt in drive_types)
drives = (letter for letter, dt in iteritems(type_map) if dt in drive_types)
ans = defaultdict(list)
for letter in drives:
try:
@ -819,7 +819,7 @@ def get_storage_number_map(drive_types=(DRIVE_REMOVABLE, DRIVE_FIXED), debug=Fal
if debug:
prints('Failed to get storage number for drive: %s with error: %s' % (letter, as_unicode(err)))
continue
for val in ans.itervalues():
for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)
@ -859,7 +859,7 @@ def get_storage_number_map_alt(debug=False):
if debug:
prints('Failed to get storage number for drive: %s with error: %s' % (name[0], as_unicode(err)))
continue
for val in ans.itervalues():
for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)

View File

@ -17,6 +17,7 @@ from calibre.customize.conversion import OptionRecommendation
from calibre import patheq
from calibre.ebooks.conversion import ConversionUserFeedBack
from calibre.utils.localization import localize_user_manual_link
from polyglot.builtins import iteritems
USAGE = '%prog ' + _('''\
input_file output_file [options]
@ -254,7 +255,7 @@ def add_pipeline_options(parser, plumber):
))
for group, (desc, options) in groups.iteritems():
for group, (desc, options) in iteritems(groups):
if group:
group = OptionGroup(parser, group, desc)
parser.add_option_group(group)

View File

@ -18,7 +18,7 @@ def decrypt_font_data(key, data, algorithm):
crypt_len = 1024 if is_adobe else 1040
crypt = bytearray(data[:crypt_len])
key = cycle(iter(bytearray(key)))
decrypt = bytes(bytearray(x^key.next() for x in crypt))
decrypt = bytes(bytearray(x^next(key) for x in crypt))
return decrypt + data[crypt_len:]

View File

@ -218,7 +218,7 @@ class EPUBOutput(OutputFormatPlugin):
if self.oeb.toc.count() == 0:
self.log.warn('This EPUB file has no Table of Contents. '
'Creating a default TOC')
first = iter(self.oeb.spine).next()
first = next(iter(self.oeb.spine))
self.oeb.toc.add(_('Start'), first.href)
from calibre.ebooks.oeb.base import OPF
@ -422,7 +422,7 @@ class EPUBOutput(OutputFormatPlugin):
if br.getparent() is None:
continue
try:
prior = br.itersiblings(preceding=True).next()
prior = next(br.itersiblings(preceding=True))
priortag = barename(prior.tag)
priortext = prior.tail
except:

View File

@ -8,7 +8,7 @@ import os, re
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
from calibre import guess_type
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
FB2NS = 'http://www.gribuser.ru/xml/fictionbook/2.0'
FB21NS = 'http://www.gribuser.ru/xml/fictionbook/2.1'
@ -103,7 +103,7 @@ class FB2Input(InputFormatPlugin):
notes = {a.get('href')[1:]: a for a in result.xpath('//a[@link_note and @href]') if a.get('href').startswith('#')}
cites = {a.get('link_cite'): a for a in result.xpath('//a[@link_cite]') if not a.get('href', '')}
all_ids = {x for x in result.xpath('//*/@id')}
for cite, a in cites.iteritems():
for cite, a in iteritems(cites):
note = notes.get(cite, None)
if note:
c = 1

View File

@ -14,7 +14,7 @@ from calibre.constants import iswindows
from calibre.customize.conversion import (OutputFormatPlugin,
OptionRecommendation)
from calibre.ptempfile import TemporaryDirectory
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
UNITS = ['millimeter', 'centimeter', 'point', 'inch' , 'pica' , 'didot',
'cicero', 'devicepixel']
@ -263,7 +263,7 @@ class PDFOutput(OutputFormatPlugin):
self.process_fonts()
if self.opts.pdf_use_document_margins and self.stored_page_margins:
import json
for href, margins in self.stored_page_margins.iteritems():
for href, margins in iteritems(self.stored_page_margins):
item = oeb_book.manifest.hrefs.get(href)
if item is not None:
root = item.data

View File

@ -5,6 +5,7 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import os, glob, re, textwrap
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
from polyglot.builtins import iteritems
border_style_map = {
'single' : 'solid',
@ -145,7 +146,7 @@ class RTFInput(InputFormatPlugin):
def convert_images(self, imap):
self.default_img = None
for count, val in imap.iteritems():
for count, val in iteritems(imap):
try:
imap[count] = self.convert_image(val)
except:
@ -210,7 +211,7 @@ class RTFInput(InputFormatPlugin):
css += '\n'+'\n'.join(font_size_classes)
css += '\n' +'\n'.join(color_classes)
for cls, val in border_styles.iteritems():
for cls, val in iteritems(border_styles):
css += '\n\n.%s {\n%s\n}'%(cls, val)
with open(u'styles.css', 'ab') as f:

View File

@ -125,10 +125,10 @@ class SNBOutput(OutputFormatPlugin):
if oeb_book.toc.count() == 0:
log.warn('This SNB file has no Table of Contents. '
'Creating a default TOC')
first = iter(oeb_book.spine).next()
first = next(iter(oeb_book.spine))
oeb_book.toc.add(_('Start page'), first.href)
else:
first = iter(oeb_book.spine).next()
first = next(iter(oeb_book.spine))
if oeb_book.toc[0].href != first.href:
# The pages before the fist item in toc will be stored as
# "Cover Pages".

View File

@ -10,7 +10,7 @@ import re, random, unicodedata, numbers
from collections import namedtuple
from contextlib import contextmanager
from math import ceil, sqrt, cos, sin, atan2
from polyglot.builtins import map, zip, string_or_bytes
from polyglot.builtins import iteritems, itervalues, map, zip, string_or_bytes
from itertools import chain
from PyQt5.Qt import (
@ -282,7 +282,7 @@ def preserve_fields(obj, fields):
try:
yield
finally:
for f, val in mem.iteritems():
for f, val in iteritems(mem):
if val is null:
delattr(obj, f)
else:
@ -324,10 +324,10 @@ def load_color_themes(prefs):
t = default_color_themes.copy()
t.update(prefs.color_themes)
disabled = frozenset(prefs.disabled_color_themes)
ans = [theme_to_colors(v) for k, v in t.iteritems() if k not in disabled]
ans = [theme_to_colors(v) for k, v in iteritems(t) if k not in disabled]
if not ans:
# Ignore disabled and return only the builtin color themes
ans = [theme_to_colors(v) for k, v in default_color_themes.iteritems()]
ans = [theme_to_colors(v) for k, v in iteritems(default_color_themes)]
return ans
@ -557,14 +557,14 @@ class Blocks(Style):
def all_styles():
return set(
x.NAME for x in globals().itervalues() if
x.NAME for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style
)
def load_styles(prefs, respect_disabled=True):
disabled = frozenset(prefs.disabled_styles) if respect_disabled else ()
ans = tuple(x for x in globals().itervalues() if
ans = tuple(x for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style and x.NAME not in disabled)
if not ans and disabled:
# If all styles have been disabled, ignore the disabling and return all

View File

@ -13,6 +13,7 @@ from css_parser.css import Property, CSSRule
from calibre import force_unicode
from calibre.ebooks import parse_css_length
from calibre.ebooks.oeb.normalize_css import normalizers, safe_parser
from polyglot.builtins import iteritems
def compile_pat(pat):
@ -44,7 +45,7 @@ class StyleDeclaration(object):
yield p, None
else:
if p not in self.expanded_properties:
self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in n(p.name, p.propertyValue).iteritems()]
self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in iteritems(n(p.name, p.propertyValue))]
for ep in self.expanded_properties[p]:
yield ep, p
@ -338,7 +339,7 @@ def export_rules(serialized_rules):
lines = []
for rule in serialized_rules:
lines.extend('# ' + l for l in rule_to_text(rule).splitlines())
lines.extend('%s: %s' % (k, v.replace('\n', ' ')) for k, v in rule.iteritems() if k in allowed_keys)
lines.extend('%s: %s' % (k, v.replace('\n', ' ')) for k, v in iteritems(rule) if k in allowed_keys)
lines.append('')
return '\n'.join(lines).encode('utf-8')

View File

@ -8,6 +8,7 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
from collections import OrderedDict
from polyglot.builtins import iteritems
class Inherit:
@ -115,11 +116,11 @@ def read_border(parent, dest, XPath, get, border_edges=border_edges, name='pBdr'
for border in XPath('./w:' + name)(parent):
for edge in border_edges:
for prop, val in read_single_border(border, edge, XPath, get).iteritems():
for prop, val in iteritems(read_single_border(border, edge, XPath, get)):
if val is not None:
vals[prop % edge] = val
for key, val in vals.iteritems():
for key, val in iteritems(vals):
setattr(dest, key, val)

View File

@ -7,7 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from polyglot.builtins import range
from polyglot.builtins import iterkeys, itervalues, range
NBSP = '\xa0'
@ -54,7 +54,7 @@ def merge_run(run):
def liftable(css):
# A <span> is liftable if all its styling would work just as well if it is
# specified on the parent element.
prefixes = {x.partition('-')[0] for x in css.iterkeys()}
prefixes = {x.partition('-')[0] for x in iterkeys(css)}
return not (prefixes - {'text', 'font', 'letter', 'color', 'background'})
@ -134,7 +134,7 @@ def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath):
current_run = [span]
# Process dir attributes
class_map = dict(styles.classes.itervalues())
class_map = dict(itervalues(styles.classes))
parents = ('p', 'div') + tuple('h%d' % i for i in range(1, 7))
for parent in root.xpath('//*[(%s)]' % ' or '.join('name()="%s"' % t for t in parents)):
# Ensure that children of rtl parents that are not rtl have an

View File

@ -9,6 +9,7 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from calibre.ebooks.docx.index import process_index, polish_index_markup
from polyglot.builtins import iteritems
class Field(object):
@ -38,6 +39,7 @@ class Field(object):
self.instructions = ''.join(self.buf)
del self.buf
WORD, FLAG = 0, 1
scanner = re.Scanner([
(r'\\\S{1}', lambda s, t: (t, FLAG)), # A flag of the form \x
@ -76,6 +78,7 @@ def parser(name, field_map, default_field_name=None):
return parse
parse_hyperlink = parser('hyperlink',
'l:anchor m:image-map n:target o:title t:target', 'url')
@ -222,7 +225,7 @@ class Fields(object):
def polish_markup(self, object_map):
if not self.index_fields:
return
rmap = {v:k for k, v in object_map.iteritems()}
rmap = {v:k for k, v in iteritems(object_map)}
for idx, blocks in self.index_fields:
polish_index_markup(idx, [rmap[b] for b in blocks])
@ -256,5 +259,6 @@ def test_parse_fields(return_tests=False):
return suite
unittest.TextTestRunner(verbosity=4).run(suite)
if __name__ == '__main__':
test_parse_fields()

View File

@ -14,7 +14,7 @@ from calibre.utils.filenames import ascii_filename
from calibre.utils.fonts.scanner import font_scanner, NoFonts
from calibre.utils.fonts.utils import panose_to_css_generic_family, is_truetype_font
from calibre.utils.icu import ord_string
from polyglot.builtins import codepoint_to_chr, range
from polyglot.builtins import codepoint_to_chr, iteritems, range
Embed = namedtuple('Embed', 'name key subsetted')
@ -172,7 +172,7 @@ class Fonts(object):
d['font-weight'] = 'bold'
if 'Italic' in variant:
d['font-style'] = 'italic'
d = ['%s: %s' % (k, v) for k, v in d.iteritems()]
d = ['%s: %s' % (k, v) for k, v in iteritems(d)]
d = ';\n\t'.join(d)
defs.append('@font-face {\n\t%s\n}\n' % d)
return '\n'.join(defs)

View File

@ -7,6 +7,7 @@ __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import OrderedDict
from polyglot.builtins import iteritems
class Note(object):
@ -57,10 +58,9 @@ class Footnotes(object):
return None, None
def __iter__(self):
for anchor, (counter, note) in self.notes.iteritems():
for anchor, (counter, note) in iteritems(self.notes):
yield anchor, counter, note
@property
def has_notes(self):
return bool(self.notes)

View File

@ -15,6 +15,7 @@ from calibre.ebooks.docx.names import barename
from calibre.utils.filenames import ascii_filename
from calibre.utils.img import resize_to_fit, image_to_data
from calibre.utils.imghdr import what
from polyglot.builtins import iteritems, itervalues
class LinkedImageNotFound(ValueError):
@ -66,7 +67,7 @@ def get_image_properties(parent, XPath, get):
def get_image_margins(elem):
ans = {}
for w, css in {'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}.iteritems():
for w, css in iteritems({'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}):
val = elem.get('dist%s' % w, None)
if val is not None:
try:
@ -157,7 +158,7 @@ class Images(object):
return raw, base
def unique_name(self, base):
exists = frozenset(self.used.itervalues())
exists = frozenset(itervalues(self.used))
c = 1
name = base
while name in exists:
@ -242,7 +243,7 @@ class Images(object):
ans = self.pic_to_img(pic, alt, inline, title)
if ans is not None:
if style:
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in style.iteritems()))
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in iteritems(style)))
yield ans
# Now process the floats
@ -253,7 +254,7 @@ class Images(object):
ans = self.pic_to_img(pic, alt, anchor, title)
if ans is not None:
if style:
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in style.iteritems()))
ans.set('style', '; '.join('%s: %s' % (k, v) for k, v in iteritems(style)))
yield ans
def pict_to_html(self, pict, page):
@ -275,7 +276,7 @@ class Images(object):
style['margin-left'] = '0' if align == 'left' else 'auto'
style['margin-right'] = 'auto' if align == 'left' else '0'
if style:
hr.set('style', '; '.join(('%s:%s' % (k, v) for k, v in style.iteritems())))
hr.set('style', '; '.join(('%s:%s' % (k, v) for k, v in iteritems(style))))
yield hr
for imagedata in XPath('descendant::v:imagedata[@r:id]')(pict):

View File

@ -11,7 +11,7 @@ from operator import itemgetter
from lxml import etree
from calibre.utils.icu import partition_by_first_letter, sort_key
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
def get_applicable_xe_fields(index, xe_fields, XPath, expand):
@ -103,7 +103,7 @@ def process_index(field, index, xe_fields, log, XPath, expand):
if heading_text is not None:
groups = partition_by_first_letter(xe_fields, key=itemgetter('text'))
items = []
for key, fields in groups.iteritems():
for key, fields in iteritems(groups):
items.append(key), items.extend(fields)
if styles:
heading_style = styles[0]

View File

@ -11,6 +11,7 @@ import re
from lxml.etree import XPath as X
from calibre.utils.filenames import ascii_text
from polyglot.builtins import iteritems
# Names {{{
TRANSITIONAL_NAMES = {
@ -32,7 +33,7 @@ TRANSITIONAL_NAMES = {
STRICT_NAMES = {
k:v.replace('http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument')
for k, v in TRANSITIONAL_NAMES.iteritems()
for k, v in iteritems(TRANSITIONAL_NAMES)
}
TRANSITIONAL_NAMESPACES = {
@ -72,7 +73,7 @@ STRICT_NAMESPACES = {
'http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument').replace(
'http://schemas.openxmlformats.org/wordprocessingml/2006', 'http://purl.oclc.org/ooxml/wordprocessingml').replace(
'http://schemas.openxmlformats.org/drawingml/2006', 'http://purl.oclc.org/ooxml/drawingml')
for k, v in TRANSITIONAL_NAMESPACES.iteritems()
for k, v in iteritems(TRANSITIONAL_NAMESPACES)
}
# }}}
@ -138,7 +139,7 @@ class DOCXNamespace(object):
return self.XPath('|'.join('descendant::%s' % a for a in args))(elem)
def makeelement(self, root, tag, append=True, **attrs):
ans = root.makeelement(self.expand(tag), **{self.expand(k, sep='_'):v for k, v in attrs.iteritems()})
ans = root.makeelement(self.expand(tag), **{self.expand(k, sep='_'):v for k, v in iteritems(attrs)})
if append:
root.append(ans)
return ans

View File

@ -15,6 +15,7 @@ from lxml.html.builder import OL, UL, SPAN
from calibre.ebooks.docx.block_styles import ParagraphStyle
from calibre.ebooks.docx.char_styles import RunStyle, inherit
from calibre.ebooks.metadata import roman
from polyglot.builtins import iteritems
STYLE_MAP = {
'aiueo': 'hiragana',
@ -36,6 +37,7 @@ def alphabet(val, lower=True):
x = string.ascii_lowercase if lower else string.ascii_uppercase
return x[(abs(val - 1)) % len(x)]
alphabet_map = {
'lower-alpha':alphabet, 'upper-alpha':partial(alphabet, lower=False),
'lower-roman':lambda x:roman(x).lower(), 'upper-roman':roman,
@ -168,7 +170,7 @@ class NumberingDefinition(object):
def copy(self):
ans = NumberingDefinition(self.namespace, an_id=self.abstract_numbering_definition_id)
for l, lvl in self.levels.iteritems():
for l, lvl in iteritems(self.levels):
ans.levels[l] = lvl.copy()
return ans
@ -224,7 +226,7 @@ class Numbering(object):
if alvl is None:
alvl = Level(self.namespace)
alvl.read_from_xml(lvl, override=True)
for ilvl, so in start_overrides.iteritems():
for ilvl, so in iteritems(start_overrides):
try:
nd.levels[ilvl].start = start_override
except KeyError:
@ -244,22 +246,22 @@ class Numbering(object):
self.instances[num_id] = create_instance(n, d)
numbering_links = styles.numbering_style_links
for an_id, style_link in lazy_load.iteritems():
for an_id, style_link in iteritems(lazy_load):
num_id = numbering_links[style_link]
self.definitions[an_id] = self.instances[num_id].copy()
for num_id, (an_id, n) in next_pass.iteritems():
for num_id, (an_id, n) in iteritems(next_pass):
d = self.definitions.get(an_id, None)
if d is not None:
self.instances[num_id] = create_instance(n, d)
for num_id, d in self.instances.iteritems():
for num_id, d in iteritems(self.instances):
self.starts[num_id] = {lvl:d.levels[lvl].start for lvl in d.levels}
def get_pstyle(self, num_id, style_id):
d = self.instances.get(num_id, None)
if d is not None:
for ilvl, lvl in d.levels.iteritems():
for ilvl, lvl in iteritems(d.levels):
if lvl.para_link == style_id:
return ilvl
@ -271,7 +273,7 @@ class Numbering(object):
def update_counter(self, counter, levelnum, levels):
counter[levelnum] += 1
for ilvl, lvl in levels.iteritems():
for ilvl, lvl in iteritems(levels):
restart = lvl.restart
if (restart is None and ilvl == levelnum + 1) or restart == levelnum + 1:
counter[ilvl] = lvl.start

View File

@ -12,6 +12,7 @@ from collections import OrderedDict, Counter
from calibre.ebooks.docx.block_styles import ParagraphStyle, inherit, twips
from calibre.ebooks.docx.char_styles import RunStyle
from calibre.ebooks.docx.tables import TableStyle
from polyglot.builtins import iteritems, itervalues
class PageProperties(object):
@ -124,7 +125,7 @@ class Styles(object):
self.default_paragraph_style = self.default_character_style = None
def __iter__(self):
for s in self.id_map.itervalues():
for s in itervalues(self.id_map):
yield s
def __getitem__(self, key):
@ -341,7 +342,7 @@ class Styles(object):
setattr(s, prop, inherit)
setattr(block_style, prop, next(iter(vals)))
for p, runs in layers.iteritems():
for p, runs in iteritems(layers):
has_links = '1' in {r.get('is-link', None) for r in runs}
char_styles = [self.resolve_run(r) for r in runs]
block_style = self.resolve_paragraph(p)
@ -421,7 +422,7 @@ class Styles(object):
ps.pageBreakBefore = True
def register(self, css, prefix):
h = hash(frozenset(css.iteritems()))
h = hash(frozenset(iteritems(css)))
ans, _ = self.classes.get(h, (None, None))
if ans is None:
self.counter[prefix] += 1
@ -430,17 +431,17 @@ class Styles(object):
return ans
def generate_classes(self):
for bs in self.para_cache.itervalues():
for bs in itervalues(self.para_cache):
css = bs.css
if css:
self.register(css, 'block')
for bs in self.run_cache.itervalues():
for bs in itervalues(self.run_cache):
css = bs.css
if css:
self.register(css, 'text')
def class_name(self, css):
h = hash(frozenset(css.iteritems()))
h = hash(frozenset(iteritems(css)))
return self.classes.get(h, (None, None))[0]
def generate_css(self, dest_dir, docx, notes_nopb, nosupsub):
@ -495,8 +496,8 @@ class Styles(object):
prefix = ef + '\n' + prefix
ans = []
for (cls, css) in sorted(self.classes.itervalues(), key=lambda x:x[0]):
b = ('\t%s: %s;' % (k, v) for k, v in css.iteritems())
for (cls, css) in sorted(itervalues(self.classes), key=lambda x:x[0]):
b = ('\t%s: %s;' % (k, v) for k, v in iteritems(css))
b = '\n'.join(b)
ans.append('.%s {\n%s\n}\n' % (cls, b.rstrip(';')))
return prefix + '\n' + '\n'.join(ans)

View File

@ -10,7 +10,7 @@ from lxml.html.builder import TABLE, TR, TD
from calibre.ebooks.docx.block_styles import inherit, read_shd as rs, read_border, binary_property, border_props, ParagraphStyle, border_to_css
from calibre.ebooks.docx.char_styles import RunStyle
from polyglot.builtins import range
from polyglot.builtins import iteritems, itervalues, range
# Read from XML {{{
read_shd = rs
@ -86,7 +86,7 @@ def read_spacing(parent, dest, XPath, get):
def read_float(parent, dest, XPath, get):
ans = inherit
for x in XPath('./w:tblpPr')(parent):
ans = {k.rpartition('}')[-1]: v for k, v in x.attrib.iteritems()}
ans = {k.rpartition('}')[-1]: v for k, v in iteritems(x.attrib)}
setattr(dest, 'float', ans)
@ -618,7 +618,7 @@ class Table(object):
def __iter__(self):
for p in self.paragraphs:
yield p
for t in self.sub_tables.itervalues():
for t in itervalues(self.sub_tables):
for p in t:
yield p
@ -665,7 +665,7 @@ class Table(object):
table_style = self.table_style.css
if table_style:
table.set('class', self.styles.register(table_style, 'table'))
for elem, style in style_map.iteritems():
for elem, style in iteritems(style_map):
css = style.css
if css:
elem.set('class', self.styles.register(css, elem.tag))
@ -686,7 +686,7 @@ class Tables(object):
self.sub_tables |= set(self.tables[-1].sub_tables)
def apply_markup(self, object_map, page_map):
rmap = {v:k for k, v in object_map.iteritems()}
rmap = {v:k for k, v in iteritems(object_map)}
for table in self.tables:
table.apply_markup(rmap, page_map[table.tbl])

View File

@ -29,6 +29,8 @@ from calibre.ebooks.docx.fields import Fields
from calibre.ebooks.docx.settings import Settings
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import iteritems, itervalues
NBSP = '\xa0'
@ -122,7 +124,7 @@ class Convert(object):
self.read_page_properties(doc)
self.resolve_alternate_content(doc)
self.current_rels = relationships_by_id
for wp, page_properties in self.page_map.iteritems():
for wp, page_properties in iteritems(self.page_map):
self.current_page = page_properties
if wp.tag.endswith('}p'):
p = self.convert_p(wp)
@ -162,7 +164,7 @@ class Convert(object):
self.styles.apply_contextual_spacing(paras)
self.mark_block_runs(paras)
for p, wp in self.object_map.iteritems():
for p, wp in iteritems(self.object_map):
if len(p) > 0 and not p.text and len(p[0]) > 0 and not p[0].text and p[0][0].get('class', None) == 'tab':
# Paragraph uses tabs for indentation, convert to text-indent
parent = p[0]
@ -192,7 +194,7 @@ class Convert(object):
self.tables.apply_markup(self.object_map, self.page_map)
numbered = []
for html_obj, obj in self.object_map.iteritems():
for html_obj, obj in iteritems(self.object_map):
raw = obj.get('calibre_num_id', None)
if raw is not None:
lvl, num_id = raw.partition(':')[0::2]
@ -212,7 +214,7 @@ class Convert(object):
self.log.debug('Converting styles to CSS')
self.styles.generate_classes()
for html_obj, obj in self.object_map.iteritems():
for html_obj, obj in iteritems(self.object_map):
style = self.styles.resolve(obj)
if style is not None:
css = style.css
@ -220,7 +222,7 @@ class Convert(object):
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
for html_obj, css in self.framed_map.iteritems():
for html_obj, css in iteritems(self.framed_map):
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
@ -407,13 +409,13 @@ class Convert(object):
doc_anchors = frozenset(self.namespace.XPath('./w:body/w:bookmarkStart[@w:name]')(doc))
if doc_anchors:
current_bm = set()
rmap = {v:k for k, v in self.object_map.iteritems()}
rmap = {v:k for k, v in iteritems(self.object_map)}
for p in self.namespace.descendants(doc, 'w:p', 'w:bookmarkStart[@w:name]'):
if p.tag.endswith('}p'):
if current_bm and p in rmap:
para = rmap[p]
if 'id' not in para.attrib:
para.set('id', generate_anchor(next(iter(current_bm)), frozenset(self.anchor_map.itervalues())))
para.set('id', generate_anchor(next(iter(current_bm)), frozenset(itervalues(self.anchor_map))))
for name in current_bm:
self.anchor_map[name] = para.get('id')
current_bm = set()
@ -469,10 +471,10 @@ class Convert(object):
# _GoBack is a special bookmark inserted by Word 2010 for
# the return to previous edit feature, we ignore it
old_anchor = current_anchor
self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(self.anchor_map.itervalues()))
self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(itervalues(self.anchor_map)))
if old_anchor is not None:
# The previous anchor was not applied to any element
for a, t in tuple(self.anchor_map.iteritems()):
for a, t in tuple(iteritems(self.anchor_map)):
if t == old_anchor:
self.anchor_map[a] = current_anchor
elif x.tag.endswith('}hyperlink'):
@ -480,11 +482,11 @@ class Convert(object):
elif x.tag.endswith('}instrText') and x.text and x.text.strip().startswith('TOC '):
old_anchor = current_anchor
anchor = str(uuid.uuid4())
self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(self.anchor_map.itervalues()))
self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(itervalues(self.anchor_map)))
self.toc_anchor = current_anchor
if old_anchor is not None:
# The previous anchor was not applied to any element
for a, t in tuple(self.anchor_map.iteritems()):
for a, t in tuple(iteritems(self.anchor_map)):
if t == old_anchor:
self.anchor_map[a] = current_anchor
if current_anchor is not None:
@ -559,7 +561,7 @@ class Convert(object):
def resolve_links(self):
self.resolved_link_map = {}
for hyperlink, spans in self.link_map.iteritems():
for hyperlink, spans in iteritems(self.link_map):
relationships_by_id = self.link_source_map[hyperlink]
span = spans[0]
if len(spans) > 1:
@ -585,7 +587,7 @@ class Convert(object):
# hrefs that point nowhere give epubcheck a hernia. The element
# should be styled explicitly by Word anyway.
# span.set('href', '#')
rmap = {v:k for k, v in self.object_map.iteritems()}
rmap = {v:k for k, v in iteritems(self.object_map)}
for hyperlink, runs in self.fields.hyperlink_fields:
spans = [rmap[r] for r in runs if r in rmap]
if not spans:
@ -744,7 +746,7 @@ class Convert(object):
if not self.block_runs:
return
rmap = {v:k for k, v in self.object_map.iteritems()}
rmap = {v:k for k, v in iteritems(self.object_map)}
for border_style, blocks in self.block_runs:
paras = tuple(rmap[p] for p in blocks)
for p in paras:

View File

@ -13,7 +13,7 @@ from lxml.etree import tostring
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.oeb.polish.toc import elem_to_toc_text
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iteritems, unicode_type, range
def from_headings(body, log, namespace):
@ -25,7 +25,7 @@ def from_headings(body, log, namespace):
level_prev = {i+1:None for i in range(len(xpaths))}
level_prev[0] = tocroot
level_item_map = {i+1:frozenset(xp(body)) for i, xp in enumerate(xpaths)}
item_level_map = {e:i for i, elems in level_item_map.iteritems() for e in elems}
item_level_map = {e:i for i, elems in iteritems(level_item_map) for e in elems}
idcount = count()

View File

@ -19,6 +19,7 @@ from calibre.utils.date import utcnow
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from calibre.utils.zipfile import ZipFile
from calibre.ebooks.pdf.render.common import PAPER_SIZES
from polyglot.builtins import iteritems
def xml2str(root, pretty_print=False, with_tail=False):
@ -55,7 +56,7 @@ def create_skeleton(opts, namespaces=None):
def w(x):
return '{%s}%s' % (namespaces['w'], x)
dn = {k:v for k, v in namespaces.iteritems() if k in {'w', 'r', 'm', 've', 'o', 'wp', 'w10', 'wne', 'a', 'pic'}}
dn = {k:v for k, v in iteritems(namespaces) if k in {'w', 'r', 'm', 've', 'o', 'wp', 'w10', 'wne', 'a', 'pic'}}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
doc = E.document()
body = E.body()
@ -73,7 +74,7 @@ def create_skeleton(opts, namespaces=None):
E.docGrid(**{w('linePitch'):"360"}),
))
dn = {k:v for k, v in namespaces.iteritems() if k in tuple('wra') + ('wp',)}
dn = {k:v for k, v in iteritems(namespaces) if k in tuple('wra') + ('wp',)}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
styles = E.styles(
E.docDefaults(
@ -120,12 +121,12 @@ class DocumentRelationships(object):
def __init__(self, namespace):
self.rmap = {}
self.namespace = namespace
for typ, target in {
for typ, target in iteritems({
namespace.names['STYLES']: 'styles.xml',
namespace.names['NUMBERING']: 'numbering.xml',
namespace.names['WEB_SETTINGS']: 'webSettings.xml',
namespace.names['FONTS']: 'fontTable.xml',
}.iteritems():
}):
self.add_relationship(target, typ)
def get_relationship_id(self, target, rtype, target_mode=None):
@ -145,7 +146,7 @@ class DocumentRelationships(object):
namespaces = self.namespace.namespaces
E = ElementMaker(namespace=namespaces['pr'], nsmap={None:namespaces['pr']})
relationships = E.Relationships()
for (target, rtype, target_mode), rid in self.rmap.iteritems():
for (target, rtype, target_mode), rid in iteritems(self.rmap):
r = E.Relationship(Id=rid, Type=rtype, Target=target)
if target_mode is not None:
r.set('TargetMode', target_mode)
@ -172,7 +173,7 @@ class DOCX(object):
def contenttypes(self):
E = ElementMaker(namespace=self.namespace.namespaces['ct'], nsmap={None:self.namespace.namespaces['ct']})
types = E.Types()
for partname, mt in {
for partname, mt in iteritems({
"/word/footnotes.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml",
"/word/document.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml",
"/word/numbering.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml",
@ -184,15 +185,15 @@ class DOCX(object):
"/word/webSettings.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml",
"/docProps/core.xml": "application/vnd.openxmlformats-package.core-properties+xml",
"/docProps/app.xml": "application/vnd.openxmlformats-officedocument.extended-properties+xml",
}.iteritems():
}):
types.append(E.Override(PartName=partname, ContentType=mt))
added = {'png', 'gif', 'jpeg', 'jpg', 'svg', 'xml'}
for ext in added:
types.append(E.Default(Extension=ext, ContentType=guess_type('a.'+ext)[0]))
for ext, mt in {
for ext, mt in iteritems({
"rels": "application/vnd.openxmlformats-package.relationships+xml",
"odttf": "application/vnd.openxmlformats-officedocument.obfuscatedFont",
}.iteritems():
}):
added.add(ext)
types.append(E.Default(Extension=ext, ContentType=mt))
for fname in self.images:
@ -270,12 +271,12 @@ class DOCX(object):
zf.writestr('word/fontTable.xml', xml2str(self.font_table))
zf.writestr('word/_rels/document.xml.rels', self.document_relationships.serialize())
zf.writestr('word/_rels/fontTable.xml.rels', xml2str(self.embedded_fonts))
for fname, data_getter in self.images.iteritems():
for fname, data_getter in iteritems(self.images):
zf.writestr(fname, data_getter())
for fname, data in self.fonts.iteritems():
for fname, data in iteritems(self.fonts):
zf.writestr(fname, data)
if __name__ == '__main__':
d = DOCX(None, None)
print (d.websettings)
print(d.websettings)

View File

@ -10,7 +10,7 @@ import os
import posixpath
from collections import namedtuple
from functools import partial
from polyglot.builtins import map
from polyglot.builtins import iteritems, itervalues, map
from lxml import etree
@ -131,7 +131,7 @@ class ImagesManager(object):
if fake_margins:
# DOCX does not support setting margins for inline images, so we
# fake it by using effect extents to simulate margins
makeelement(parent, 'wp:effectExtent', **{k[-1].lower():v for k, v in get_image_margins(style).iteritems()})
makeelement(parent, 'wp:effectExtent', **{k[-1].lower():v for k, v in iteritems(get_image_margins(style))})
else:
makeelement(parent, 'wp:effectExtent', l='0', r='0', t='0', b='0')
if floating is not None:
@ -175,7 +175,7 @@ class ImagesManager(object):
return fname
def serialize(self, images_map):
for img in self.images.itervalues():
for img in itervalues(self.images):
images_map['word/' + img.fname] = partial(self.get_data, img.item)
def get_data(self, item):

View File

@ -9,6 +9,8 @@ __copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import defaultdict
from operator import attrgetter
from polyglot.builtins import iteritems, itervalues
LIST_STYLES = frozenset(
'disc circle square decimal decimal-leading-zero lower-roman upper-roman'
' lower-greek lower-alpha lower-latin upper-alpha upper-latin hiragana hebrew'
@ -62,7 +64,7 @@ class NumberingDefinition(object):
items_for_level = defaultdict(list)
container_for_level = {}
type_for_level = {}
for ilvl, items in self.level_map.iteritems():
for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
items_for_level[ilvl].append(list_tag)
container_for_level[ilvl] = container
@ -76,7 +78,7 @@ class NumberingDefinition(object):
return hash(self.levels)
def link_blocks(self):
for ilvl, items in self.level_map.iteritems():
for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
block.numbering_id = (self.num_id + 1, ilvl)
@ -148,16 +150,16 @@ class ListsManager(object):
ilvl = len(container_tags) - 1
l.level_map[ilvl].append((container_tags[0], list_tag, block, list_type, tag_style))
[nd.finalize() for nd in lists.itervalues()]
[nd.finalize() for nd in itervalues(lists)]
definitions = {}
for defn in lists.itervalues():
for defn in itervalues(lists):
try:
defn = definitions[defn]
except KeyError:
definitions[defn] = defn
defn.num_id = len(definitions) - 1
defn.link_blocks()
self.definitions = sorted(definitions.itervalues(), key=attrgetter('num_id'))
self.definitions = sorted(itervalues(definitions), key=attrgetter('num_id'))
def serialize(self, parent):
for defn in self.definitions:

View File

@ -15,7 +15,7 @@ from lxml import etree
from calibre.ebooks import parse_css_length
from calibre.ebooks.docx.writer.utils import convert_color, int_or_zero
from calibre.utils.localization import lang_as_iso639_1
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, iterkeys, unicode_type
from tinycss.css21 import CSS21Parser
css_parser = CSS21Parser()
@ -158,7 +158,7 @@ class DOCXStyle(object):
getattr(self, x) for x in self.ALL_PROPS))
def makeelement(self, parent, name, **attrs):
return parent.makeelement(self.w(name), **{self.w(k):v for k, v in attrs.iteritems()})
return parent.makeelement(self.w(name), **{self.w(k):v for k, v in iteritems(attrs)})
def __hash__(self):
return self._hash
@ -365,7 +365,7 @@ class DescendantTextStyle(object):
p = []
def add(name, **props):
p.append((name, frozenset(props.iteritems())))
p.append((name, frozenset(iteritems(props))))
def vals(attr):
return getattr(parent_style, attr), getattr(child_style, attr)
@ -562,7 +562,7 @@ class BlockStyle(DOCXStyle):
def serialize_properties(self, pPr, normal_style):
makeelement, w = self.makeelement, self.w
spacing = makeelement(pPr, 'spacing')
for edge, attr in {'top':'before', 'bottom':'after'}.iteritems():
for edge, attr in iteritems({'top':'before', 'bottom':'after'}):
getter = attrgetter('css_margin_' + edge)
css_val, css_unit = parse_css_length(getter(self))
if css_unit in ('em', 'ex'):
@ -696,7 +696,7 @@ class StylesManager(object):
counts = Counter()
smap = {}
for (bs, rs), blocks in used_pairs.iteritems():
for (bs, rs), blocks in iteritems(used_pairs):
s = CombinedStyle(bs, rs, blocks, self.namespace)
smap[(bs, rs)] = s
counts[s] += sum(1 for b in blocks if not b.is_empty())
@ -721,7 +721,7 @@ class StylesManager(object):
heading_styles.append(style)
style.id = style.name = val
style.seq = i
self.combined_styles = sorted(counts.iterkeys(), key=attrgetter('seq'))
self.combined_styles = sorted(iterkeys(counts), key=attrgetter('seq'))
[ls.apply() for ls in self.combined_styles]
descendant_style_map = {}

View File

@ -10,7 +10,7 @@ from collections import namedtuple
from calibre.ebooks.docx.writer.utils import convert_color
from calibre.ebooks.docx.writer.styles import read_css_block_borders as rcbb, border_edges
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
class Dummy(object):
@ -125,7 +125,7 @@ class Cell(object):
makeelement(tcPr, 'w:shd', w_val="clear", w_color="auto", w_fill=bc)
b = makeelement(tcPr, 'w:tcBorders', append=False)
for edge, border in self.borders.iteritems():
for edge, border in iteritems(self.borders):
if border is not None and border.width > 0 and border.style != 'none':
makeelement(b, 'w:' + edge, w_val=border.style, w_sz=str(border.width), w_color=border.color)
if len(b) > 0:

View File

@ -10,7 +10,7 @@ import unittest, numbers
from polyglot.builtins import map
from calibre.ebooks.epub.cfi.parse import parser, cfi_sort_key, decode_cfi
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
class Tests(unittest.TestCase):
@ -61,7 +61,7 @@ class Tests(unittest.TestCase):
if after is not None:
ta['after'] = after
if params:
ta['params'] = {unicode_type(k):(v,) if isinstance(v, unicode_type) else v for k, v in params.iteritems()}
ta['params'] = {unicode_type(k):(v,) if isinstance(v, unicode_type) else v for k, v in iteritems(params)}
if ta:
step['text_assertion'] = ta
return ans

View File

@ -32,7 +32,7 @@ def filter_name(name):
def build_name_for(expr):
if not expr:
counter = count(1)
return lambda elem: str(counter.next())
return lambda elem: str(next(counter))
selector = XPath(expr, namespaces=NSMAP)
def name_for(elem):
@ -55,7 +55,7 @@ def add_page_map(opfpath, opts):
name = name_for(elem)
id = elem.get('id', None)
if id is None:
id = elem.attrib['id'] = idgen.next()
id = elem.attrib['id'] = next(idgen)
href = '#'.join((item.href, id))
oeb.pages.add(name, href)
writer = None # DirWriter(version='2.0', page_map=True)

View File

@ -349,7 +349,7 @@ class Table(object):
nc = self.rows[r].cell_iterator()
try:
while True:
cell = nc.next()
cell = next(nc)
cellmatrix[r][rowpos[r]] = cell
rowpos[r] += cell.colspan
for k in range(1, cell.rowspan):

View File

@ -10,6 +10,7 @@ import codecs
import os
from pylrfopt import tagListOptimizer
from polyglot.builtins import iteritems
PYLRF_VERSION = "1.0"
@ -526,7 +527,7 @@ class LrfObject(object):
# belongs somewhere, so here it is.
#
composites = {}
for name, value in tagDict.iteritems():
for name, value in iteritems(tagDict):
if name == 'rubyAlignAndAdjust':
continue
if name in {
@ -651,7 +652,7 @@ class LrfWriter(object):
return self.sourceEncoding
def toUnicode(self, string):
if type(string) is str:
if isinstance(string, str):
string = string.decode(self.sourceEncoding)
return string

View File

@ -14,7 +14,7 @@ from calibre.ebooks.metadata.book import (SC_COPYABLE_FIELDS,
TOP_LEVEL_IDENTIFIERS, ALL_METADATA_FIELDS)
from calibre.library.field_metadata import FieldMetadata
from calibre.utils.icu import sort_key
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, iterkeys, unicode_type
# Special sets used to optimize the performance of getting and setting
# attributes on Metadata objects
@ -137,7 +137,7 @@ class Metadata(object):
return object.__getattribute__(self, field)
except AttributeError:
pass
if field in _data['user_metadata'].iterkeys():
if field in iterkeys(_data['user_metadata']):
d = _data['user_metadata'][field]
val = d['#value#']
if d['datatype'] != 'composite':
@ -180,7 +180,7 @@ class Metadata(object):
if val and val.lower() != 'und':
langs = [val]
_data['languages'] = langs
elif field in _data['user_metadata'].iterkeys():
elif field in iterkeys(_data['user_metadata']):
_data['user_metadata'][field]['#value#'] = val
_data['user_metadata'][field]['#extra#'] = extra
else:
@ -190,7 +190,7 @@ class Metadata(object):
self.__dict__[field] = val
def __iter__(self):
return object.__getattribute__(self, '_data').iterkeys()
return iterkeys(object.__getattribute__(self, '_data'))
def has_key(self, key):
return key in object.__getattribute__(self, '_data')
@ -219,7 +219,7 @@ class Metadata(object):
def get_extra(self, field, default=None):
_data = object.__getattribute__(self, '_data')
if field in _data['user_metadata'].iterkeys():
if field in iterkeys(_data['user_metadata']):
try:
return _data['user_metadata'][field]['#extra#']
except:
@ -255,7 +255,7 @@ class Metadata(object):
Set all identifiers. Note that if you previously set ISBN, calling
this method will delete it.
'''
cleaned = {ck(k):cv(v) for k, v in identifiers.iteritems() if k and v}
cleaned = {ck(k):cv(v) for k, v in iteritems(identifiers) if k and v}
object.__getattribute__(self, '_data')['identifiers'] = cleaned
def set_identifier(self, typ, val):
@ -287,14 +287,14 @@ class Metadata(object):
'''
return a list of the custom fields in this book
'''
return object.__getattribute__(self, '_data')['user_metadata'].iterkeys()
return iterkeys(object.__getattribute__(self, '_data')['user_metadata'])
def all_field_keys(self):
'''
All field keys known by this instance, even if their value is None
'''
_data = object.__getattribute__(self, '_data')
return frozenset(ALL_METADATA_FIELDS.union(_data['user_metadata'].iterkeys()))
return frozenset(ALL_METADATA_FIELDS.union(iterkeys(_data['user_metadata'])))
def metadata_for_field(self, key):
'''
@ -320,7 +320,7 @@ class Metadata(object):
v = self.get(attr, None)
if v is not None:
result[attr] = v
for attr in _data['user_metadata'].iterkeys():
for attr in iterkeys(_data['user_metadata']):
v = self.get(attr, None)
if v is not None:
result[attr] = v
@ -396,7 +396,7 @@ class Metadata(object):
return
um = {}
for key, meta in metadata.iteritems():
for key, meta in iteritems(metadata):
m = meta.copy()
if '#value#' not in m:
if m['datatype'] == 'text' and m['is_multiple']:
@ -576,7 +576,7 @@ class Metadata(object):
if callable(getattr(other, 'get_identifiers', None)):
d = self.get_identifiers()
s = other.get_identifiers()
d.update([v for v in s.iteritems() if v[1] is not None])
d.update([v for v in iteritems(s) if v[1] is not None])
self.set_identifiers(d)
else:
# other structure not Metadata. Copy the top-level identifiers
@ -749,7 +749,7 @@ class Metadata(object):
fmt('Rights', unicode_type(self.rights))
if self.identifiers:
fmt('Identifiers', u', '.join(['%s:%s'%(k, v) for k, v in
self.identifiers.iteritems()]))
iteritems(self.identifiers)]))
if self.comments:
fmt('Comments', self.comments)

View File

@ -13,6 +13,7 @@ from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.constants import filesystem_encoding, preferred_encoding
from calibre.library.field_metadata import FieldMetadata
from calibre import isbytestring
from polyglot.builtins import iteritems, itervalues
# Translate datetimes to and from strings. The string form is the datetime in
# UTC. The returned date is also UTC
@ -149,7 +150,7 @@ class JsonCodec(object):
def encode_metadata_attr(self, book, key):
if key == 'user_metadata':
meta = book.get_all_user_metadata(make_copy=True)
for fm in meta.itervalues():
for fm in itervalues(meta):
if fm['datatype'] == 'datetime':
fm['#value#'] = datetime_to_string(fm['#value#'])
encode_is_multiple(fm)
@ -184,7 +185,7 @@ class JsonCodec(object):
def raw_to_book(self, json_book, book_class, prefix):
try:
book = book_class(prefix, json_book.get('lpath', None))
for key,val in json_book.iteritems():
for key,val in iteritems(json_book):
meta = self.decode_metadata(key, val)
if key == 'user_metadata':
book.set_all_user_metadata(meta)
@ -201,7 +202,7 @@ class JsonCodec(object):
if key == 'classifiers':
key = 'identifiers'
if key == 'user_metadata':
for fm in value.itervalues():
for fm in itervalues(value):
if fm['datatype'] == 'datetime':
fm['#value#'] = string_to_datetime(fm['#value#'])
decode_is_multiple(fm)

View File

@ -10,7 +10,7 @@ from calibre.constants import preferred_encoding
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.imghdr import what
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
def ensure_unicode(obj, enc=preferred_encoding):
@ -21,7 +21,7 @@ def ensure_unicode(obj, enc=preferred_encoding):
if isinstance(obj, (list, tuple)):
return [ensure_unicode(x) for x in obj]
if isinstance(obj, dict):
return {ensure_unicode(k): ensure_unicode(v) for k, v in obj.iteritems()}
return {ensure_unicode(k): ensure_unicode(v) for k, v in iteritems(obj)}
return obj
@ -63,7 +63,7 @@ def metadata_as_dict(mi, encode_cover_data=False):
def metadata_from_dict(src):
ans = Metadata('Unknown')
for key, value in src.iteritems():
for key, value in iteritems(src):
if key == 'user_metadata':
ans.set_all_user_metadata(value)
else:

View File

@ -16,7 +16,7 @@ from calibre.ebooks.metadata import string_to_authors, authors_to_sort_string, \
from calibre.ebooks.lrf.meta import LRFMetaFile
from calibre import prints
from calibre.utils.date import parse_date
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
USAGE=_('%prog ebook_file [options]\n') + \
_('''
@ -150,7 +150,7 @@ def do_set_metadata(opts, mi, stream, stream_type):
if val:
orig = mi.get_identifiers()
orig.update(val)
val = {k:v for k, v in orig.iteritems() if k and v}
val = {k:v for k, v in iteritems(orig) if k and v}
mi.set_identifiers(val)
if getattr(opts, 'cover', None) is not None:

View File

@ -16,6 +16,7 @@ from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.chardet import xml_to_unicode
from calibre import replace_entities, isbytestring
from calibre.utils.date import parse_date, is_date_undefined
from polyglot.builtins import iteritems, itervalues
def get_metadata(stream):
@ -60,16 +61,16 @@ attr_pat = r'''(?:(?P<sq>')|(?P<dq>"))(?P<content>(?(sq)[^']+|[^"]+))(?(sq)'|")'
def parse_meta_tags(src):
rmap = {}
for field, names in META_NAMES.iteritems():
for field, names in iteritems(META_NAMES):
for name in names:
rmap[name.lower()] = field
all_names = '|'.join(rmap)
ans = {}
npat = r'''name\s*=\s*['"]{0,1}(?P<name>%s)['"]{0,1}''' % all_names
cpat = 'content\s*=\s*%s' % attr_pat
cpat = r'content\s*=\s*%s' % attr_pat
for pat in (
'<meta\s+%s\s+%s' % (npat, cpat),
'<meta\s+%s\s+%s' % (cpat, npat),
r'<meta\s+%s\s+%s' % (npat, cpat),
r'<meta\s+%s\s+%s' % (cpat, npat),
):
for match in re.finditer(pat, src, flags=re.IGNORECASE):
x = match.group('name').lower()
@ -89,8 +90,8 @@ def parse_meta_tags(src):
def parse_comment_tags(src):
all_names = '|'.join(COMMENT_NAMES.itervalues())
rmap = {v:k for k, v in COMMENT_NAMES.iteritems()}
all_names = '|'.join(itervalues(COMMENT_NAMES))
rmap = {v:k for k, v in iteritems(COMMENT_NAMES)}
ans = {}
for match in re.finditer(r'''<!--\s*(?P<name>%s)\s*=\s*%s''' % (all_names, attr_pat), src):
field = rmap[match.group('name')]

View File

@ -11,6 +11,7 @@ from calibre.ebooks.metadata.opf2 import OPF, pretty_print
from calibre.ebooks.metadata.opf3 import apply_metadata, read_metadata
from calibre.ebooks.metadata.utils import parse_opf, normalize_languages, create_manifest_item, parse_opf_version
from calibre.ebooks.metadata import MetaInformation
from polyglot.builtins import iteritems
class DummyFile(object):
@ -61,7 +62,7 @@ def set_metadata_opf2(root, cover_prefix, mi, opf_version,
else:
orig = opf.get_identifiers()
orig.update(mi.get_identifiers())
opf.set_identifiers({k:v for k, v in orig.iteritems() if k and v})
opf.set_identifiers({k:v for k, v in iteritems(orig) if k and v})
if update_timestamp and mi.timestamp is not None:
opf.timestamp = mi.timestamp
raster_cover = opf.raster_cover

View File

@ -23,7 +23,7 @@ from calibre.utils.localization import get_lang, canonicalize_lang
from calibre import prints, guess_type
from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars
from calibre.utils.config import tweaks
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iteritems, unicode_type, range
from polyglot.urllib import unquote, urlparse
pretty_print_opf = False
@ -977,7 +977,7 @@ class OPF(object): # {{{
'descendant::*[local-name() = "identifier" and text()]')(
self.metadata):
found_scheme = False
for attr, val in x.attrib.iteritems():
for attr, val in iteritems(x.attrib):
if attr.endswith('scheme'):
typ = icu_lower(val)
val = etree.tostring(x, with_tail=False, encoding=unicode_type,
@ -1010,7 +1010,7 @@ class OPF(object): # {{{
self.metadata):
xid = x.get('id', None)
is_package_identifier = uuid_id is not None and uuid_id == xid
typ = {val for attr, val in x.attrib.iteritems() if attr.endswith('scheme')}
typ = {val for attr, val in iteritems(x.attrib) if attr.endswith('scheme')}
if is_package_identifier:
typ = tuple(typ)
if typ and typ[0].lower() in identifiers:
@ -1019,7 +1019,7 @@ class OPF(object): # {{{
if typ and not (typ & {'calibre', 'uuid'}):
x.getparent().remove(x)
for typ, val in identifiers.iteritems():
for typ, val in iteritems(identifiers):
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: typ.upper()}
self.set_text(self.create_metadata_element(
'identifier', attrib=attrib), unicode_type(val))
@ -1155,7 +1155,7 @@ class OPF(object): # {{{
def page_progression_direction(self):
spine = self.XPath('descendant::*[re:match(name(), "spine", "i")][1]')(self.root)
if spine:
for k, v in spine[0].attrib.iteritems():
for k, v in iteritems(spine[0].attrib):
if k == 'page-progression-direction' or k.endswith('}page-progression-direction'):
return v
@ -1525,7 +1525,7 @@ class OPFCreator(Metadata):
a(DC_ELEM('description', self.comments))
if self.publisher:
a(DC_ELEM('publisher', self.publisher))
for key, val in self.get_identifiers().iteritems():
for key, val in iteritems(self.get_identifiers()):
a(DC_ELEM('identifier', val, opf_attrs={'scheme':icu_upper(key)}))
if self.rights:
a(DC_ELEM('rights', self.rights))
@ -1651,7 +1651,7 @@ def metadata_to_opf(mi, as_string=True, default_lang=None):
try:
elem = metadata.makeelement(tag, attrib=attrib)
except ValueError:
elem = metadata.makeelement(tag, attrib={k:clean_xml_chars(v) for k, v in attrib.iteritems()})
elem = metadata.makeelement(tag, attrib={k:clean_xml_chars(v) for k, v in iteritems(attrib)})
elem.tail = '\n'+(' '*8)
if text:
try:
@ -1672,7 +1672,7 @@ def metadata_to_opf(mi, as_string=True, default_lang=None):
factory(DC('description'), clean_ascii_chars(mi.comments))
if mi.publisher:
factory(DC('publisher'), mi.publisher)
for key, val in mi.get_identifiers().iteritems():
for key, val in iteritems(mi.get_identifiers()):
factory(DC('identifier'), val, scheme=icu_upper(key))
if mi.rights:
factory(DC('rights'), mi.rights)

View File

@ -8,7 +8,7 @@ import json
import re
from collections import defaultdict, namedtuple
from functools import wraps
from polyglot.builtins import map
from polyglot.builtins import iteritems, map
from lxml import etree
@ -190,9 +190,9 @@ def ensure_prefix(root, prefixes, prefix, value=None):
if prefixes is None:
prefixes = read_prefixes(root)
prefixes[prefix] = value or reserved_prefixes[prefix]
prefixes = {k:v for k, v in prefixes.iteritems() if reserved_prefixes.get(k) != v}
prefixes = {k:v for k, v in iteritems(prefixes) if reserved_prefixes.get(k) != v}
if prefixes:
root.set('prefix', ' '.join('%s: %s' % (k, v) for k, v in prefixes.iteritems()))
root.set('prefix', ' '.join('%s: %s' % (k, v) for k, v in iteritems(prefixes)))
else:
root.attrib.pop('prefix', None)
@ -299,7 +299,7 @@ def set_identifiers(root, prefixes, refines, new_identifiers, force_identifiers=
remove_element(ident, refines)
continue
metadata = XPath('./opf:metadata')(root)[0]
for scheme, val in new_identifiers.iteritems():
for scheme, val in iteritems(new_identifiers):
ident = metadata.makeelement(DC('identifier'))
ident.text = '%s:%s' % (scheme, val)
if package_identifier is None:
@ -854,7 +854,7 @@ set_author_link_map = dict_writer('author_link_map')
def deserialize_user_metadata(val):
val = json.loads(val, object_hook=from_json)
ans = {}
for name, fm in val.iteritems():
for name, fm in iteritems(val):
decode_is_multiple(fm)
ans[name] = fm
return ans
@ -969,7 +969,7 @@ def read_metadata(root, ver=None, return_extra_data=False):
prefixes, refines = read_prefixes(root), read_refines(root)
identifiers = read_identifiers(root, prefixes, refines)
ids = {}
for key, vals in identifiers.iteritems():
for key, vals in iteritems(identifiers):
if key == 'calibre':
ans.application_id = vals[0]
elif key == 'uuid':
@ -1007,7 +1007,7 @@ def read_metadata(root, ver=None, return_extra_data=False):
ans.series, ans.series_index = s, si
ans.author_link_map = read_author_link_map(root, prefixes, refines) or ans.author_link_map
ans.user_categories = read_user_categories(root, prefixes, refines) or ans.user_categories
for name, fm in (read_user_metadata(root, prefixes, refines) or {}).iteritems():
for name, fm in iteritems((read_user_metadata(root, prefixes, refines) or {})):
ans.set_user_metadata(name, fm)
if return_extra_data:
ans = ans, ver, read_raster_cover(root, prefixes, refines), first_spine_item(root, prefixes, refines)

View File

@ -13,6 +13,7 @@ from calibre.ebooks.metadata.opf3 import (
set_refines, set_user_metadata3
)
from calibre.ebooks.metadata.utils import parse_opf, pretty_print_opf
from polyglot.builtins import itervalues
class Data(object):
@ -140,7 +141,7 @@ def upgrade_series(root, data):
def upgrade_custom(root, data):
m = read_user_metadata2(root, remove_tags=True)
if m:
for fm in m.itervalues():
for fm in itervalues(m):
encode_is_multiple(fm)
set_user_metadata3(root, data.prefixes, data.refines, m)

View File

@ -12,7 +12,7 @@ from calibre.ptempfile import TemporaryDirectory
from calibre.ebooks.metadata import (
MetaInformation, string_to_authors, check_isbn, check_doi)
from calibre.utils.ipc.simple_worker import fork_job, WorkerError
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, unicode_type
def get_tools():
@ -153,9 +153,9 @@ def get_metadata(stream, cover=True):
# Look for recognizable identifiers in the info dict, if they were not
# found in the XMP metadata
for scheme, check_func in {'doi':check_doi, 'isbn':check_isbn}.iteritems():
for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}):
if scheme not in mi.get_identifiers():
for k, v in info.iteritems():
for k, v in iteritems(info):
if k != 'xmp_metadata':
val = check_func(v)
if val:

View File

@ -4,6 +4,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from polyglot.builtins import iteritems
from polyglot.urllib import quote_plus
AUTHOR_SEARCHES = {
@ -54,7 +55,7 @@ def qquote(val):
def url_for(template, data):
return template.format(**{k: qquote(v) for k, v in data.iteritems()})
return template.format(**{k: qquote(v) for k, v in iteritems(data)})
def url_for_author_search(key, **kw):

View File

@ -14,6 +14,7 @@ from calibre.customize import Plugin
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.author_mapper import cap_author_token
from calibre.utils.localization import canonicalize_lang, get_lang
from polyglot.builtins import iteritems
def create_log(ostream=None):
@ -65,7 +66,7 @@ class InternalMetadataCompareKeyGen(object):
def __init__(self, mi, source_plugin, title, authors, identifiers):
same_identifier = 2
idents = mi.get_identifiers()
for k, v in identifiers.iteritems():
for k, v in iteritems(identifiers):
if idents.get(k) == v:
same_identifier = 1
break
@ -280,7 +281,7 @@ class Source(Plugin):
def get_related_isbns(self, id_):
with self.cache_lock:
for isbn, q in self._isbn_to_identifier_cache.iteritems():
for isbn, q in iteritems(self._isbn_to_identifier_cache):
if q == id_:
yield isbn

View File

@ -27,7 +27,7 @@ from calibre.utils.html2text import html2text
from calibre.utils.icu import lower
from calibre.utils.date import UNDEFINED_DATE
from calibre.utils.formatter import EvalFormatter
from polyglot.builtins import unicode_type
from polyglot.builtins import iteritems, iterkeys, itervalues, unicode_type
# Download worker {{{
@ -99,7 +99,7 @@ class ISBNMerge(object):
def isbn_in_pool(self, isbn):
if isbn:
for isbns, pool in self.pools.iteritems():
for isbns, pool in iteritems(self.pools):
if isbn in isbns:
return pool
return None
@ -147,7 +147,7 @@ class ISBNMerge(object):
def finalize(self):
has_isbn_result = False
for results in self.pools.itervalues():
for results in itervalues(self.pools):
if results:
has_isbn_result = True
break
@ -192,7 +192,7 @@ class ISBNMerge(object):
if len(groups) != len(self.results):
self.results = []
for rgroup in groups.itervalues():
for rgroup in itervalues(groups):
rel = [r.average_source_relevance for r in rgroup]
if len(rgroup) > 1:
result = self.merge(rgroup, None, do_asr=False)
@ -206,7 +206,7 @@ class ISBNMerge(object):
groups, empty = {}, []
for result in self.results:
key = set()
for typ, val in result.identifiers.iteritems():
for typ, val in iteritems(result.identifiers):
if typ and val:
key.add((typ, val))
if key:
@ -227,7 +227,7 @@ class ISBNMerge(object):
if len(groups) != len(self.results):
self.results = []
for rgroup in groups.itervalues():
for rgroup in itervalues(groups):
rel = [r.average_source_relevance for r in rgroup]
if len(rgroup) > 1:
result = self.merge(rgroup, None, do_asr=False)
@ -244,7 +244,7 @@ class ISBNMerge(object):
def merge_isbn_results(self):
self.results = []
sources = set()
for min_year, results in self.pools.itervalues():
for min_year, results in itervalues(self.pools):
if results:
for r in results:
sources.add(r.identify_plugin)
@ -362,7 +362,7 @@ class ISBNMerge(object):
def merge_identify_results(result_map, log):
isbn_merge = ISBNMerge(log)
for plugin, results in result_map.iteritems():
for plugin, results in iteritems(result_map):
for result in results:
isbn_merge.add_result(result)
@ -439,12 +439,12 @@ def identify(log, abort, # {{{
pass
sort_kwargs = dict(kwargs)
for k in list(sort_kwargs.iterkeys()):
for k in list(iterkeys(sort_kwargs)):
if k not in ('title', 'authors', 'identifiers'):
sort_kwargs.pop(k)
longest, lp = -1, ''
for plugin, presults in results.iteritems():
for plugin, presults in iteritems(results):
presults.sort(key=plugin.identify_results_keygen(**sort_kwargs))
# Throw away lower priority results from the same source that have exactly the same
@ -542,7 +542,7 @@ def identify(log, abort, # {{{
def urls_from_identifiers(identifiers): # {{{
identifiers = {k.lower():v for k, v in identifiers.iteritems()}
identifiers = {k.lower():v for k, v in iteritems(identifiers)}
ans = []
keys_left = set(identifiers)
@ -553,7 +553,7 @@ def urls_from_identifiers(identifiers): # {{{
rules = msprefs['id_link_rules']
if rules:
formatter = EvalFormatter()
for k, val in identifiers.iteritems():
for k, val in iteritems(identifiers):
val = val.replace('|', ',')
vals = {'id':quote(val if isinstance(val, bytes) else val.encode('utf-8')).decode('ascii')}
items = rules.get(k) or ()
@ -592,7 +592,7 @@ def urls_from_identifiers(identifiers): # {{{
add(issn, 'issn', issn,
'https://www.worldcat.org/issn/'+issn)
q = {'http', 'https', 'file'}
for k, url in identifiers.iteritems():
for k, url in iteritems(identifiers):
if url and re.match(r'ur[il]\d*$', k) is not None:
url = url[:8].replace('|', ':') + url[8:].replace('|', ',')
if url.partition(':')[0].lower() in q:

View File

@ -17,6 +17,7 @@ from calibre.constants import DEBUG, numeric_version
from calibre.ebooks.metadata.sources.base import Source
from calibre.utils.config import JSONConfig
from calibre.utils.https import get_https_resource_securely
from polyglot.builtins import iteritems, itervalues
cache = JSONConfig('metadata-sources-cache.json')
@ -38,7 +39,7 @@ def load_plugin(src):
src = src.encode('utf-8')
ns = {}
exec(src, ns)
for x in ns.itervalues():
for x in itervalues(ns):
if isinstance(x, type) and issubclass(x, Source) and x is not Source:
return x
@ -76,7 +77,7 @@ def patch_search_engines(src):
def patch_plugins():
from calibre.customize.ui import patch_metadata_plugins
patches = {}
for name, val in cache.iteritems():
for name, val in iteritems(cache):
if name == 'hashes':
continue
if name == 'search_engines':
@ -94,7 +95,7 @@ def update_needed():
'https://code.calibre-ebook.com/metadata-sources/hashes.json')
hashes = bz2.decompress(hashes)
hashes = json.loads(hashes)
for k, v in hashes.iteritems():
for k, v in iteritems(hashes):
if current_hashes.get(k) != v:
needed[k] = v
remove = set(current_hashes) - set(hashes)
@ -132,7 +133,7 @@ def main(report_error=prints, report_action=prints):
cache.touch()
return
updated = {}
for name, expected_hash in needed.iteritems():
for name, expected_hash in iteritems(needed):
report_action('Updating metadata source {}...'.format(name))
try:
update_plugin(name, updated, expected_hash)

View File

@ -18,6 +18,7 @@ from calibre.ebooks.metadata.sources.update import patch_plugins
from calibre.utils.date import as_utc
from calibre.utils.logging import GUILog
from polyglot.queue import Empty, Queue
from polyglot.builtins import iteritems
def merge_result(oldmi, newmi, ensure_fields=None):
@ -54,7 +55,7 @@ def main(do_identify, covers, metadata, ensure_fields, tdir):
log = GUILog()
patch_plugins()
for book_id, mi in metadata.iteritems():
for book_id, mi in iteritems(metadata):
mi = OPF(BytesIO(mi), basedir=tdir,
populate_spine=False).to_book_metadata()
title, authors, identifiers = mi.title, mi.authors, mi.identifiers

View File

@ -19,7 +19,7 @@ from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.opf2 import dump_dict
from calibre.utils.date import parse_date, isoformat, now
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import string_or_bytes
from polyglot.builtins import iteritems, string_or_bytes
_xml_declaration = re.compile(r'<\?xml[^<>]+encoding\s*=\s*[\'"](.*?)[\'"][^<>]*>', re.IGNORECASE)
@ -323,7 +323,7 @@ def metadata_from_xmp_packet(raw_bytes):
identifiers[scheme] = val
# Check Dublin Core for recognizable identifier types
for scheme, check_func in {'doi':check_doi, 'isbn':check_isbn}.iteritems():
for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}):
if scheme not in identifiers:
val = check_func(first_simple('//dc:identifier', root))
if val:
@ -407,7 +407,7 @@ def create_identifiers(xmp, identifiers):
xmp.append(xmpid)
bag = xmpid.makeelement(expand('rdf:Bag'))
xmpid.append(bag)
for scheme, value in identifiers.iteritems():
for scheme, value in iteritems(identifiers):
li = bag.makeelement(expand('rdf:li'))
li.set(expand('rdf:parseType'), 'Resource')
bag.append(li)
@ -443,7 +443,7 @@ def create_user_metadata(calibre, all_user_metadata):
calibre.append(s)
bag = s.makeelement(expand('rdf:Bag'))
s.append(bag)
for name, fm in all_user_metadata.iteritems():
for name, fm in iteritems(all_user_metadata):
try:
fm = copy.copy(fm)
encode_is_multiple(fm)
@ -473,12 +473,12 @@ def metadata_to_xmp_packet(mi):
dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc'))
dc.set(expand('rdf:about'), '')
rdf.append(dc)
for prop, tag in {'title':'dc:title', 'comments':'dc:description'}.iteritems():
for prop, tag in iteritems({'title':'dc:title', 'comments':'dc:description'}):
val = mi.get(prop) or ''
create_alt_property(dc, tag, val)
for prop, (tag, ordered) in {
for prop, (tag, ordered) in iteritems({
'authors':('dc:creator', True), 'tags':('dc:subject', False), 'publisher':('dc:publisher', False),
}.iteritems():
}):
val = mi.get(prop) or ()
if isinstance(val, string_or_bytes):
val = [val]
@ -502,9 +502,9 @@ def metadata_to_xmp_packet(mi):
identifiers = mi.get_identifiers()
if identifiers:
create_identifiers(xmp, identifiers)
for scheme, val in identifiers.iteritems():
for scheme, val in iteritems(identifiers):
if scheme in {'isbn', 'doi'}:
for prefix, parent in extra_ids.iteritems():
for prefix, parent in iteritems(extra_ids):
ie = parent.makeelement(expand('%s:%s'%(prefix, scheme)))
ie.text = val
parent.append(ie)
@ -552,7 +552,7 @@ def find_used_namespaces(elem):
def find_preferred_prefix(namespace, elems):
for elem in elems:
ans = {v:k for k, v in elem.nsmap.iteritems()}.get(namespace, None)
ans = {v:k for k, v in iteritems(elem.nsmap)}.get(namespace, None)
if ans is not None:
return ans
return find_preferred_prefix(namespace, elem.iterchildren(etree.Element))
@ -564,7 +564,7 @@ def find_nsmap(elems):
used_namespaces |= find_used_namespaces(elem)
ans = {}
used_namespaces -= {NS_MAP['xml'], NS_MAP['x'], None, NS_MAP['rdf']}
rmap = {v:k for k, v in NS_MAP.iteritems()}
rmap = {v:k for k, v in iteritems(NS_MAP)}
i = 0
for ns in used_namespaces:
if ns in rmap:

View File

@ -14,7 +14,7 @@ from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.langcodes import main_language, sub_language
from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.utils import get_trailing_data
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
# PalmDB {{{
@ -597,9 +597,9 @@ class TextRecord(object): # {{{
self.trailing_data['uncrossable_breaks'] = self.trailing_data.pop(2)
self.trailing_data['raw_bytes'] = raw_trailing_bytes
for typ, val in self.trailing_data.iteritems():
for typ, val in iteritems(self.trailing_data):
if isinstance(typ, numbers.Integral):
print ('Record %d has unknown trailing data of type: %d : %r'%
print('Record %d has unknown trailing data of type: %d : %r'%
(idx, typ, val))
self.idx = idx
@ -609,7 +609,7 @@ class TextRecord(object): # {{{
with open(os.path.join(folder, name+'.txt'), 'wb') as f:
f.write(self.raw)
with open(os.path.join(folder, name+'.trailing_data'), 'wb') as f:
for k, v in self.trailing_data.iteritems():
for k, v in iteritems(self.trailing_data):
raw = '%s : %r\n\n'%(k, v)
f.write(raw.encode('utf-8'))

View File

@ -15,7 +15,7 @@ from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.reader.index import (CNCX, parse_indx_header,
parse_tagx_section, parse_index_record, INDEX_HEADER_FIELDS)
from calibre.ebooks.mobi.reader.ncx import (tag_fieldname_map, default_entry)
from polyglot.builtins import range
from polyglot.builtins import iteritems, iterkeys, range
File = namedtuple('File',
'file_number name divtbl_count start_position length')
@ -110,13 +110,13 @@ class Index(object):
if self.cncx:
a('*'*10 + ' CNCX ' + '*'*10)
for offset, val in self.cncx.iteritems():
for offset, val in iteritems(self.cncx):
a('%10s: %s'%(offset, val))
ans.extend(['', ''])
if self.table is not None:
a('*'*10 + ' %d Index Entries '%len(self.table) + '*'*10)
for k, v in self.table.iteritems():
for k, v in iteritems(self.table):
a('%s: %r'%(k, v))
if self.records:
@ -140,11 +140,11 @@ class SKELIndex(Index):
self.records = []
if self.table is not None:
for i, text in enumerate(self.table.iterkeys()):
for i, text in enumerate(iterkeys(self.table)):
tag_map = self.table[text]
if set(tag_map.iterkeys()) != {1, 6}:
if set(iterkeys(tag_map)) != {1, 6}:
raise ValueError('SKEL Index has unknown tags: %s'%
(set(tag_map.iterkeys())-{1,6}))
(set(iterkeys(tag_map))-{1,6}))
self.records.append(File(
i, # file_number
text, # name
@ -161,11 +161,11 @@ class SECTIndex(Index):
self.records = []
if self.table is not None:
for i, text in enumerate(self.table.iterkeys()):
for i, text in enumerate(iterkeys(self.table)):
tag_map = self.table[text]
if set(tag_map.iterkeys()) != {2, 3, 4, 6}:
if set(iterkeys(tag_map)) != {2, 3, 4, 6}:
raise ValueError('Chunk Index has unknown tags: %s'%
(set(tag_map.iterkeys())-{2, 3, 4, 6}))
(set(iterkeys(tag_map))-{2, 3, 4, 6}))
toc_text = self.cncx[tag_map[2][0]]
self.records.append(Elem(
@ -186,9 +186,9 @@ class GuideIndex(Index):
self.records = []
if self.table is not None:
for i, text in enumerate(self.table.iterkeys()):
for i, text in enumerate(iterkeys(self.table)):
tag_map = self.table[text]
if set(tag_map.iterkeys()) not in ({1, 6}, {1, 2, 3}):
if set(iterkeys(tag_map)) not in ({1, 6}, {1, 2, 3}):
raise ValueError('Guide Index has unknown tags: %s'%
tag_map)
@ -211,13 +211,13 @@ class NCXIndex(Index):
NCXEntry = namedtuple('NCXEntry', 'index start length depth parent '
'first_child last_child title pos_fid kind')
for num, x in enumerate(self.table.iteritems()):
for num, x in enumerate(iteritems(self.table)):
text, tag_map = x
entry = e = default_entry.copy()
entry['name'] = text
entry['num'] = num
for tag in tag_fieldname_map.iterkeys():
for tag in iterkeys(tag_fieldname_map):
fieldname, i = tag_fieldname_map[tag]
if tag in tag_map:
fieldvalue = tag_map[tag][i]
@ -226,9 +226,9 @@ class NCXIndex(Index):
# offset
fieldvalue = tuple(tag_map[tag])
entry[fieldname] = fieldvalue
for which, name in {3:'text', 5:'kind', 70:'description',
for which, name in iteritems({3:'text', 5:'kind', 70:'description',
71:'author', 72:'image_caption',
73:'image_attribution'}.iteritems():
73:'image_attribution'}):
if tag == which:
entry[name] = self.cncx.get(fieldvalue,
default_entry[name])

View File

@ -12,7 +12,7 @@ from collections import OrderedDict, namedtuple
from calibre.ebooks.mobi.utils import (decint, count_set_bits,
decode_string)
from polyglot.builtins import range
from polyglot.builtins import iteritems, range
TagX = namedtuple('TagX', 'tag num_of_values bitmask eof')
PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values')
@ -105,7 +105,7 @@ class CNCX(object): # {{{
except:
byts = raw[pos:]
r = format_bytes(byts)
print ('CNCX entry at offset %d has unknown format %s'%(
print('CNCX entry at offset %d has unknown format %s'%(
pos+record_offset, r))
self.records[pos+record_offset] = r
pos = len(raw)
@ -123,7 +123,7 @@ class CNCX(object): # {{{
__nonzero__ = __bool__
def iteritems(self):
return self.records.iteritems()
return iteritems(self.records)
# }}}
@ -216,7 +216,7 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
header = parse_indx_header(data)
idxt_pos = header['start']
if data[idxt_pos:idxt_pos+4] != b'IDXT':
print ('WARNING: Invalid INDX record')
print('WARNING: Invalid INDX record')
entry_count = header['count']
# loop through to build up the IDXT position starts

View File

@ -23,7 +23,7 @@ from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.reader.headers import BookHeader
from calibre.utils.img import save_cover_data_to
from calibre.utils.imghdr import what
from polyglot.builtins import unicode_type, range
from polyglot.builtins import iteritems, unicode_type, range
class TopazError(ValueError):
@ -500,7 +500,7 @@ class MobiReader(object):
try:
float(sz)
except ValueError:
if sz in size_map.keys():
if sz in list(size_map.keys()):
attrib['size'] = size_map[sz]
elif tag.tag == 'img':
recindex = None
@ -894,7 +894,7 @@ class MobiReader(object):
def test_mbp_regex():
for raw, m in {
for raw, m in iteritems({
'<mbp:pagebreak></mbp:pagebreak>':'',
'<mbp:pagebreak xxx></mbp:pagebreak>yyy':' xxxyyy',
'<mbp:pagebreak> </mbp:pagebreak>':'',
@ -905,7 +905,7 @@ def test_mbp_regex():
'</mbp:pagebreak>':'',
'</mbp:pagebreak sdf>':' sdf',
'</mbp:pagebreak><mbp:pagebreak></mbp:pagebreak>xxx':'xxx',
}.iteritems():
}):
ans = MobiReader.PAGE_BREAK_PAT.sub(r'\1', raw)
if ans != m:
raise Exception('%r != %r for %r'%(ans, m, raw))

View File

@ -24,7 +24,7 @@ from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.utils import read_font_record
from calibre.ebooks.oeb.parse_utils import parse_html
from calibre.ebooks.oeb.base import XPath, XHTML, xml2text
from polyglot.builtins import range, zip
from polyglot.builtins import iterkeys, range, zip
from polyglot.urllib import urldefrag
Part = namedtuple('Part',
@ -134,7 +134,7 @@ class Mobi8Reader(object):
File = namedtuple('File',
'file_number name divtbl_count start_position length')
for i, text in enumerate(table.iterkeys()):
for i, text in enumerate(iterkeys(table)):
tag_map = table[text]
self.files.append(File(i, text, tag_map[1][0],
tag_map[6][0], tag_map[6][1]))
@ -143,7 +143,7 @@ class Mobi8Reader(object):
if self.header.dividx != NULL_INDEX:
table, cncx = read_index(self.kf8_sections, self.header.dividx,
self.header.codec)
for i, text in enumerate(table.iterkeys()):
for i, text in enumerate(iterkeys(table)):
tag_map = table[text]
toc_text = cncx[tag_map[2][0]]
self.elems.append(Elem(int(text), toc_text, tag_map[3][0],
@ -156,14 +156,14 @@ class Mobi8Reader(object):
Item = namedtuple('Item',
'type title pos_fid')
for i, ref_type in enumerate(table.iterkeys()):
for i, ref_type in enumerate(iterkeys(table)):
tag_map = table[ref_type]
# ref_type, ref_title, div/frag number
title = cncx[tag_map[1][0]]
fileno = None
if 3 in tag_map.keys():
if 3 in list(tag_map.keys()):
fileno = tag_map[3][0]
if 6 in tag_map.keys():
if 6 in list(tag_map.keys()):
fileno = tag_map[6]
self.guide.append(Item(ref_type.decode(self.header.codec),
title, fileno))

View File

@ -13,6 +13,7 @@ from calibre import replace_entities
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.reader.index import read_index
from polyglot.builtins import iteritems, iterkeys
tag_fieldname_map = {
1: ['pos',0],
@ -56,13 +57,13 @@ def read_ncx(sections, index, codec):
if index != NULL_INDEX:
table, cncx = read_index(sections, index, codec)
for num, x in enumerate(table.iteritems()):
for num, x in enumerate(iteritems(table)):
text, tag_map = x
entry = default_entry.copy()
entry['name'] = text
entry['num'] = num
for tag in tag_fieldname_map.iterkeys():
for tag in iterkeys(tag_fieldname_map):
fieldname, i = tag_fieldname_map[tag]
if tag in tag_map:
fieldvalue = tag_map[tag][i]
@ -71,9 +72,9 @@ def read_ncx(sections, index, codec):
# offset
fieldvalue = tuple(tag_map[tag])
entry[fieldname] = fieldvalue
for which, name in {3:'text', 5:'kind', 70:'description',
for which, name in iteritems({3:'text', 5:'kind', 70:'description',
71:'author', 72:'image_caption',
73:'image_attribution'}.iteritems():
73:'image_attribution'}):
if tag == which:
entry[name] = cncx.get(fieldvalue,
default_entry[name])
@ -100,4 +101,3 @@ def build_toc(index_entries):
item.play_order = i
return ans

Some files were not shown because too many files have changed in this diff Show More