Fix various uses of iteritems missed by dict_fixes

This commit is contained in:
Kovid Goyal 2019-03-25 15:29:08 +05:30
parent b0d6aec8ed
commit 0fcad5f21f
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
45 changed files with 334 additions and 214 deletions

View File

@ -17,7 +17,7 @@ sources = {'calibre':j(imgsrc, 'calibre.svg'), 'ebook-edit':j(imgsrc, 'tweak.svg
if sys.argv[-1] == 'only-logo': if sys.argv[-1] == 'only-logo':
sources = {'calibre':sources['calibre']} sources = {'calibre':sources['calibre']}
for name, src in sources.iteritems(): for name, src in sources.items():
iconset = name + '.iconset' iconset = name + '.iconset'
if os.path.exists(iconset): if os.path.exists(iconset):
shutil.rmtree(iconset) shutil.rmtree(iconset)
@ -44,4 +44,3 @@ for name, src in sources.iteritems():
subprocess.check_call(['optipng', '-o7', '-strip', 'all', name]) subprocess.check_call(['optipng', '-o7', '-strip', 'all', name])
finally: finally:
os.chdir('..') os.chdir('..')

View File

@ -17,7 +17,7 @@ sources = {'library':j(imgsrc, 'calibre.svg'), 'ebook-edit':j(imgsrc, 'tweak.svg
if sys.argv[-1] == 'only-logo': if sys.argv[-1] == 'only-logo':
sources = {'library':sources['library']} sources = {'library':sources['library']}
for name, src in sources.iteritems(): for name, src in sources.items():
os.mkdir('ico_temp') os.mkdir('ico_temp')
try: try:
names = [] names = []

View File

@ -78,7 +78,7 @@ class DemoTool(Tool):
# Iterate over all style declarations in the book, this means css # Iterate over all style declarations in the book, this means css
# stylesheets, <style> tags and style="" attributes # stylesheets, <style> tags and style="" attributes
for name, media_type in container.mime_map.iteritems(): for name, media_type in container.mime_map.items():
if media_type in OEB_STYLES: if media_type in OEB_STYLES:
# A stylesheet. Parsed stylesheets are css_parser CSSStylesheet # A stylesheet. Parsed stylesheets are css_parser CSSStylesheet
# objects. # objects.

View File

@ -17,6 +17,7 @@ from PyQt5.Qt import QDialog, QVBoxLayout, QPushButton, QMessageBox, QLabel
from calibre_plugins.interface_demo.config import prefs from calibre_plugins.interface_demo.config import prefs
class DemoDialog(QDialog): class DemoDialog(QDialog):
def __init__(self, gui, icon, do_user_config): def __init__(self, gui, icon, do_user_config):
@ -97,7 +98,7 @@ class DemoDialog(QDialog):
''' View the most recently added book ''' ''' View the most recently added book '''
most_recent = most_recent_id = None most_recent = most_recent_id = None
db = self.db.new_api db = self.db.new_api
for book_id, timestamp in db.all_field_for('timestamp', db.all_book_ids()).iteritems(): for book_id, timestamp in db.all_field_for('timestamp', db.all_book_ids()).items():
if most_recent is None or timestamp > most_recent: if most_recent is None or timestamp > most_recent:
most_recent = timestamp most_recent = timestamp
most_recent_id = book_id most_recent_id = book_id
@ -152,4 +153,3 @@ class DemoDialog(QDialog):
self.do_user_config(parent=self) self.do_user_config(parent=self)
# Apply the changes # Apply the changes
self.label.setText(prefs['hello_world_msg']) self.label.setText(prefs['hello_world_msg'])

View File

@ -112,7 +112,7 @@ class BrandEins(BasicNewsRecipe):
{'title': title, 'url': url, 'description': desc}) {'title': title, 'url': url, 'description': desc})
self.log('Found article:', title, 'at', url) self.log('Found article:', title, 'at', url)
return [(st, articles) for st, articles in feeds.iteritems() if articles] return [(st, articles) for st, articles in feeds.items() if articles]
def get_cover_url(self): def get_cover_url(self):
# the index does not contain a usable cover, but the 'Welt in # the index does not contain a usable cover, but the 'Welt in

View File

@ -76,7 +76,7 @@ class Chronicle(BasicNewsRecipe):
if section_title not in feeds: if section_title not in feeds:
feeds[section_title] = [] feeds[section_title] = []
feeds[section_title] += articles feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()] ans = [(key, val) for key, val in feeds.items()]
return ans return ans
def preprocess_html(self, soup): def preprocess_html(self, soup):

View File

@ -259,7 +259,7 @@ class Economist(BasicNewsRecipe):
feeds[section_title] = [] feeds[section_title] = []
feeds[section_title] += articles feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()] ans = [(key, val) for key, val in feeds.items()]
return ans return ans
def eco_find_image_tables(self, soup): def eco_find_image_tables(self, soup):

View File

@ -259,7 +259,7 @@ class Economist(BasicNewsRecipe):
feeds[section_title] = [] feeds[section_title] = []
feeds[section_title] += articles feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()] ans = [(key, val) for key, val in feeds.items()]
return ans return ans
def eco_find_image_tables(self, soup): def eco_find_image_tables(self, soup):

View File

@ -78,4 +78,4 @@ class EconomicAndPoliticalWeekly(BasicNewsRecipe):
sections[current_section].append( sections[current_section].append(
{'title': title, 'url': url, 'description': desc}) {'title': title, 'url': url, 'description': desc})
return [(t, articles) for t, articles in sections.iteritems() if articles] return [(t, articles) for t, articles in sections.items() if articles]

View File

@ -36,7 +36,7 @@ class AdvancedUserRecipe1322322819(BasicNewsRecipe):
s = s.group(1) s = s.group(1)
replacements = {"0B": ".", "0C": "/", replacements = {"0B": ".", "0C": "/",
"0H": ",", "0I": "_", "0D": "?", "0F": "="} "0H": ",", "0I": "_", "0D": "?", "0F": "="}
for (a, b) in replacements.iteritems(): for (a, b) in replacements.items():
s = string.replace(s, a, b) s = string.replace(s, a, b)
s = string.replace(s, "0A", "0") s = string.replace(s, "0A", "0")
return "http://" + s return "http://" + s

View File

@ -58,11 +58,11 @@ class HistoryToday(BasicNewsRecipe):
feeds = OrderedDict() feeds = OrderedDict()
section_title = '' section_title = ''
for section in div.findAll('div', attrs={'id': re.compile("block\-views\-contents.*")}): for section in div.findAll('div', attrs={'id': re.compile(r"block\-views\-contents.*")}):
section_title = self.tag_to_string( section_title = self.tag_to_string(
section.find('h2', attrs={'class': 'title'})) section.find('h2', attrs={'class': 'title'}))
sectionbody = section.find('div', attrs={'class': 'view-content'}) sectionbody = section.find('div', attrs={'class': 'view-content'})
for article in sectionbody.findAll('div', attrs={'class': re.compile("views\-row.*")}): for article in sectionbody.findAll('div', attrs={'class': re.compile(r"views\-row.*")}):
articles = [] articles = []
subarticle = [] subarticle = []
subarticle = article.findAll('div') subarticle = article.findAll('div')
@ -84,7 +84,7 @@ class HistoryToday(BasicNewsRecipe):
feeds[section_title] = [] feeds[section_title] = []
feeds[section_title] += articles feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()] ans = [(key, val) for key, val in feeds.items()]
return ans return ans
def cleanup(self): def cleanup(self):

View File

@ -126,7 +126,7 @@ class LentaRURecipe(BasicNewsRecipe):
extractElements = {'div': [{'id': 'readers-block'}]} extractElements = {'div': [{'id': 'readers-block'}]}
# Remove all elements that were not extracted before # Remove all elements that were not extracted before
for tag, attrs in extractElements.iteritems(): for tag, attrs in extractElements.items():
for attr in attrs: for attr in attrs:
garbage = soup.findAll(tag, attr) garbage = soup.findAll(tag, attr)
if garbage: if garbage:

View File

@ -65,6 +65,6 @@ class Liberation(BasicNewsRecipe):
encoding = {'0B': '.', '0C': '/', '0A': '0', '0F': '=', '0G': '&', encoding = {'0B': '.', '0C': '/', '0A': '0', '0F': '=', '0G': '&',
'0D': '?', '0E': '-', '0N': '.com', '0L': 'http://', '0S': '0D': '?', '0E': '-', '0N': '.com', '0L': 'http://', '0S':
'www.', '0I': '_'} 'www.', '0I': '_'}
for k, v in encoding.iteritems(): for k, v in encoding.items():
url = url.replace(k, v) url = url.replace(k, v)
return url.partition('?')[0] return url.partition('?')[0]

View File

@ -48,7 +48,7 @@ class AdvancedUserRecipe1279258912(BasicNewsRecipe):
encoding = {'0B': '.', '0C': '/', '0A': '0', '0F': '=', '0G': '&', encoding = {'0B': '.', '0C': '/', '0A': '0', '0F': '=', '0G': '&',
'0D': '?', '0E': '-', '0N': '.com', '0L': 'http:', '0D': '?', '0E': '-', '0N': '.com', '0L': 'http:',
'0S': '//', '0H': ','} '0S': '//', '0H': ','}
for k, v in encoding.iteritems(): for k, v in encoding.items():
link = link.replace(k, v) link = link.replace(k, v)
ans = link ans = link
elif link: elif link:

View File

@ -72,7 +72,7 @@ class PhilosophyNow(BasicNewsRecipe):
if section_title not in feeds: if section_title not in feeds:
feeds[section_title] = [] feeds[section_title] = []
feeds[section_title] += articles feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()] ans = [(key, val) for key, val in feeds.items()]
return ans return ans
def cleanup(self): def cleanup(self):

View File

@ -117,7 +117,7 @@ class Pocket(BasicNewsRecipe):
self.user_error( self.user_error(
"Only {0} articles retrieved, minimum_articles not reached".format(len(pocket_feed))) "Only {0} articles retrieved, minimum_articles not reached".format(len(pocket_feed)))
for pocket_article in pocket_feed.iteritems(): for pocket_article in pocket_feed.items():
self.articles.append({ self.articles.append({
'item_id': pocket_article[0], 'item_id': pocket_article[0],
'title': pocket_article[1]['resolved_title'], 'title': pocket_article[1]['resolved_title'],
@ -163,7 +163,7 @@ class Pocket(BasicNewsRecipe):
for img in article['images']: for img in article['images']:
imgdiv = '<div id="RIL_IMG_{0}" class="RIL_IMG"></div>'.format( imgdiv = '<div id="RIL_IMG_{0}" class="RIL_IMG"></div>'.format(
article['images'][img]['image_id']) article['images'][img]['image_id'])
imgtag = '<img src="{0}" \>'.format( imgtag = r'<img src="{0}" \>'.format(
article['images'][img]['src']) article['images'][img]['src'])
tmpbody = tmpbody.replace(imgdiv, imgtag) tmpbody = tmpbody.replace(imgdiv, imgtag)

View File

@ -39,7 +39,7 @@ class Smithsonian(BasicNewsRecipe):
# Find date # Find date
date = re.sub( date = re.sub(
'.*\:\W*', "", self.tag_to_string(div.find('h1')).strip()) r'.*\:\W*', "", self.tag_to_string(div.find('h1')).strip())
self.timefmt = u' [%s]' % date self.timefmt = u' [%s]' % date
# Find cover # Find cover
@ -66,5 +66,5 @@ class Smithsonian(BasicNewsRecipe):
self.log('\t', url) self.log('\t', url)
articles.append( articles.append(
{'title': title, 'url': url, 'description': desc}) {'title': title, 'url': url, 'description': desc})
ans = [(key, val) for key, val in feeds.iteritems()] ans = [(key, val) for key, val in feeds.items()]
return ans return ans

View File

@ -1,9 +1,8 @@
#!/usr/bin/env python2 #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import, from __future__ import (unicode_literals, division, absolute_import, print_function)
print_function)
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
@ -13,6 +12,7 @@ from argparse import ArgumentParser, FileType
from subprocess import check_call from subprocess import check_call
from collections import OrderedDict from collections import OrderedDict
class ReadFileWithProgressReporting(file): # {{{ class ReadFileWithProgressReporting(file): # {{{
def __init__(self, path, mode='rb'): def __init__(self, path, mode='rb'):
@ -34,26 +34,32 @@ class ReadFileWithProgressReporting(file): # {{{
def report_progress(self, size): def report_progress(self, size):
sys.stdout.write(b'\x1b[s') sys.stdout.write(b'\x1b[s')
sys.stdout.write(b'\x1b[K') sys.stdout.write(b'\x1b[K')
frac = float(self.tell())/self._total frac = float(self.tell()) / self._total
mb_pos = self.tell()/float(1024**2) mb_pos = self.tell() / float(1024**2)
mb_tot = self._total/float(1024**2) mb_tot = self._total / float(1024**2)
kb_pos = self.tell()/1024.0 kb_pos = self.tell() / 1024.0
kb_rate = kb_pos/(time.time()-self.start_time) kb_rate = kb_pos / (time.time() - self.start_time)
bit_rate = kb_rate * 1024 bit_rate = kb_rate * 1024
eta = int((self._total - self.tell())/bit_rate) + 1 eta = int((self._total - self.tell()) / bit_rate) + 1
eta_m, eta_s = eta / 60, eta % 60 eta_m, eta_s = eta / 60, eta % 60
sys.stdout.write( sys.stdout.write(
' %.1f%% %.1f/%.1fMB %.1f KB/sec %d minutes, %d seconds left'%( ' %.1f%% %.1f/%.1fMB %.1f KB/sec %d minutes, %d seconds left' %
frac*100, mb_pos, mb_tot, kb_rate, eta_m, eta_s)) (frac * 100, mb_pos, mb_tot, kb_rate, eta_m, eta_s)
)
sys.stdout.write(b'\x1b[u') sys.stdout.write(b'\x1b[u')
if self.tell() >= self._total: if self.tell() >= self._total:
sys.stdout.write('\n') sys.stdout.write('\n')
t = int(time.time() - self.start_time) + 1 t = int(time.time() - self.start_time) + 1
print ('Upload took %d minutes and %d seconds at %.1f KB/sec' % ( print(
t/60, t%60, kb_rate)) 'Upload took %d minutes and %d seconds at %.1f KB/sec' %
(t / 60, t % 60, kb_rate)
)
sys.stdout.flush() sys.stdout.flush()
# }}} # }}}
class Base(object): # {{{ class Base(object): # {{{
def __init__(self): def __init__(self):
@ -69,13 +75,15 @@ class Base(object): # {{{
sys.stdout.flush() sys.stdout.flush()
def warn(self, *args, **kwargs): def warn(self, *args, **kwargs):
print('\n'+'_'*20, 'WARNING','_'*20) print('\n' + '_' * 20, 'WARNING', '_' * 20)
print(*args, **kwargs) print(*args, **kwargs)
print('_'*50) print('_' * 50)
sys.stdout.flush() sys.stdout.flush()
# }}} # }}}
class SourceForge(Base): # {{{ class SourceForge(Base): # {{{
# Note that you should manually ssh once to username,project@frs.sourceforge.net # Note that you should manually ssh once to username,project@frs.sourceforge.net
@ -83,7 +91,7 @@ class SourceForge(Base): # {{{
def __init__(self, files, project, version, username, replace=False): def __init__(self, files, project, version, username, replace=False):
self.username, self.project, self.version = username, project, version self.username, self.project, self.version = username, project, version
self.base = '/home/frs/project/c/ca/'+project self.base = '/home/frs/project/c/ca/' + project
self.rdir = self.base + '/' + version self.rdir = self.base + '/' + version
self.files = files self.files = files
@ -93,27 +101,32 @@ class SourceForge(Base): # {{{
self.info('Uploading', x) self.info('Uploading', x)
for i in range(5): for i in range(5):
try: try:
check_call(['rsync', '-h', '-z', '--progress', '-e', 'ssh -x', x, check_call([
'%s,%s@frs.sourceforge.net:%s'%(self.username, self.project, 'rsync', '-h', '-z', '--progress', '-e', 'ssh -x', x,
self.rdir+'/')]) '%s,%s@frs.sourceforge.net:%s' %
(self.username, self.project, self.rdir + '/')
])
except KeyboardInterrupt: except KeyboardInterrupt:
raise SystemExit(1) raise SystemExit(1)
except: except:
print ('\nUpload failed, trying again in 30 seconds') print('\nUpload failed, trying again in 30 seconds')
time.sleep(30) time.sleep(30)
else: else:
break break
print ('Uploaded in', int(time.time() - start), 'seconds\n\n') print('Uploaded in', int(time.time() - start), 'seconds\n\n')
# }}} # }}}
class GitHub(Base): # {{{ class GitHub(Base): # {{{
API = 'https://api.github.com/' API = 'https://api.github.com/'
def __init__(self, files, reponame, version, username, password, replace=False): def __init__(self, files, reponame, version, username, password, replace=False):
self.files, self.reponame, self.version, self.username, self.password, self.replace = ( self.files, self.reponame, self.version, self.username, self.password, self.replace = (
files, reponame, version, username, password, replace) files, reponame, version, username, password, replace
)
self.current_tag_name = 'v' + self.version self.current_tag_name = 'v' + self.version
import requests import requests
self.requests = s = requests.Session() self.requests = s = requests.Session()
@ -126,12 +139,17 @@ class GitHub(Base): # {{{
release = self.create_release(releases) release = self.create_release(releases)
upload_url = release['upload_url'].partition('{')[0] upload_url = release['upload_url'].partition('{')[0]
existing_assets = self.existing_assets(release['id']) existing_assets = self.existing_assets(release['id'])
for path, desc in self.files.iteritems(): for path, desc in self.files.items():
self.info('') self.info('')
url = self.API + 'repos/%s/%s/releases/assets/{}' % (self.username, self.reponame) url = self.API + 'repos/%s/%s/releases/assets/{}' % (
self.username, self.reponame
)
fname = os.path.basename(path) fname = os.path.basename(path)
if fname in existing_assets: if fname in existing_assets:
self.info('Deleting %s from GitHub with id: %s' % (fname, existing_assets[fname])) self.info(
'Deleting %s from GitHub with id: %s' %
(fname, existing_assets[fname])
)
r = self.requests.delete(url.format(existing_assets[fname])) r = self.requests.delete(url.format(existing_assets[fname]))
if r.status_code != 204: if r.status_code != 204:
self.fail(r, 'Failed to delete %s from GitHub' % fname) self.fail(r, 'Failed to delete %s from GitHub' % fname)
@ -139,36 +157,61 @@ class GitHub(Base): # {{{
if r.status_code != 201: if r.status_code != 201:
self.fail(r, 'Failed to upload file: %s' % fname) self.fail(r, 'Failed to upload file: %s' % fname)
try: try:
r = self.requests.patch(url.format(r.json()['id']), r = self.requests.patch(
data=json.dumps({'name':fname, 'label':desc})) url.format(r.json()['id']),
data=json.dumps({
'name': fname,
'label': desc
})
)
except Exception: except Exception:
time.sleep(15) time.sleep(15)
r = self.requests.patch(url.format(r.json()['id']), r = self.requests.patch(
data=json.dumps({'name':fname, 'label':desc})) url.format(r.json()['id']),
data=json.dumps({
'name': fname,
'label': desc
})
)
if r.status_code != 200: if r.status_code != 200:
self.fail(r, 'Failed to set label for %s' % fname) self.fail(r, 'Failed to set label for %s' % fname)
def clean_older_releases(self, releases): def clean_older_releases(self, releases):
for release in releases: for release in releases:
if release.get('assets', None) and release['tag_name'] != self.current_tag_name: if release.get('assets',
self.info('\nDeleting old released installers from: %s' % release['tag_name']) None) and release['tag_name'] != self.current_tag_name:
self.info(
'\nDeleting old released installers from: %s' %
release['tag_name']
)
for asset in release['assets']: for asset in release['assets']:
r = self.requests.delete(self.API + 'repos/%s/%s/releases/assets/%s' % (self.username, self.reponame, asset['id'])) r = self.requests.delete(
self.API + 'repos/%s/%s/releases/assets/%s' %
(self.username, self.reponame, asset['id'])
)
if r.status_code != 204: if r.status_code != 204:
self.fail(r, 'Failed to delete obsolete asset: %s for release: %s' % ( self.fail(
asset['name'], release['tag_name'])) r, 'Failed to delete obsolete asset: %s for release: %s'
% (asset['name'], release['tag_name'])
)
def do_upload(self, url, path, desc, fname): def do_upload(self, url, path, desc, fname):
mime_type = mimetypes.guess_type(fname)[0] mime_type = mimetypes.guess_type(fname)[0]
self.info('Uploading to GitHub: %s (%s)' % (fname, mime_type)) self.info('Uploading to GitHub: %s (%s)' % (fname, mime_type))
with ReadFileWithProgressReporting(path) as f: with ReadFileWithProgressReporting(path) as f:
return self.requests.post( return self.requests.post(
url, headers={'Content-Type': mime_type, 'Content-Length':str(f._total)}, params={'name':fname}, url,
data=f) headers={
'Content-Type': mime_type,
'Content-Length': str(f._total)
},
params={'name': fname},
data=f
)
def fail(self, r, msg): def fail(self, r, msg):
print (msg, ' Status Code: %s' % r.status_code, file=sys.stderr) print(msg, ' Status Code: %s' % r.status_code, file=sys.stderr)
print ("JSON from response:", file=sys.stderr) print("JSON from response:", file=sys.stderr)
pprint(dict(r.json()), stream=sys.stderr) pprint(dict(r.json()), stream=sys.stderr)
raise SystemExit(1) raise SystemExit(1)
@ -177,11 +220,13 @@ class GitHub(Base): # {{{
return error_code == 'already_exists' return error_code == 'already_exists'
def existing_assets(self, release_id): def existing_assets(self, release_id):
url = self.API + 'repos/%s/%s/releases/%s/assets' % (self.username, self.reponame, release_id) url = self.API + 'repos/%s/%s/releases/%s/assets' % (
self.username, self.reponame, release_id
)
r = self.requests.get(url) r = self.requests.get(url)
if r.status_code != 200: if r.status_code != 200:
self.fail('Failed to get assets for release') self.fail('Failed to get assets for release')
return {asset['name']:asset['id'] for asset in r.json()} return {asset['name']: asset['id'] for asset in r.json()}
def releases(self): def releases(self):
url = self.API + 'repos/%s/%s/releases' % (self.username, self.reponame) url = self.API + 'repos/%s/%s/releases' % (self.username, self.reponame)
@ -197,19 +242,25 @@ class GitHub(Base): # {{{
if release['tag_name'] == self.current_tag_name: if release['tag_name'] == self.current_tag_name:
return release return release
url = self.API + 'repos/%s/%s/releases' % (self.username, self.reponame) url = self.API + 'repos/%s/%s/releases' % (self.username, self.reponame)
r = self.requests.post(url, data=json.dumps({ r = self.requests.post(
'tag_name': self.current_tag_name, url,
'target_commitish': 'master', data=json.dumps({
'name': 'version %s' % self.version, 'tag_name': self.current_tag_name,
'body': 'Release version %s' % self.version, 'target_commitish': 'master',
'draft': False, 'prerelease':False 'name': 'version %s' % self.version,
})) 'body': 'Release version %s' % self.version,
'draft': False,
'prerelease': False
})
)
if r.status_code != 201: if r.status_code != 201:
self.fail(r, 'Failed to create release for version: %s' % self.version) self.fail(r, 'Failed to create release for version: %s' % self.version)
return r.json() return r.json()
# }}} # }}}
def generate_index(): # {{{ def generate_index(): # {{{
os.chdir('/srv/download') os.chdir('/srv/download')
releases = set() releases = set()
@ -238,21 +289,36 @@ def generate_index(): # {{{
''' '''
body = [] body = []
for series in rmap: for series in rmap:
body.append('<li><a href="{0}.html" title="Releases in the {0}.x series">{0}.x</a>\xa0\xa0\xa0<span style="font-size:smaller">[{1} releases]</span></li>'.format( # noqa body.append(
'.'.join(map(type(''), series)), len(rmap[series]))) '<li><a href="{0}.html" title="Releases in the {0}.x series">{0}.x</a>\xa0\xa0\xa0<span style="font-size:smaller">[{1} releases]</span></li>'
.format( # noqa
'.'.join(map(type(''), series)), len(rmap[series])
)
)
body = '<ul>{0}</ul>'.format(' '.join(body)) body = '<ul>{0}</ul>'.format(' '.join(body))
index = template.format(title='Previous calibre releases', style=style, msg='Choose a series of calibre releases', body=body) index = template.format(
title='Previous calibre releases',
style=style,
msg='Choose a series of calibre releases',
body=body
)
with open('index.html', 'wb') as f: with open('index.html', 'wb') as f:
f.write(index.encode('utf-8')) f.write(index.encode('utf-8'))
for series, releases in rmap.iteritems(): for series, releases in rmap.items():
sname = '.'.join(map(type(''), series)) sname = '.'.join(map(type(''), series))
body = [ body = [
'<li><a href="{0}/" title="Release {0}">{0}</a></li>'.format('.'.join(map(type(''), r))) '<li><a href="{0}/" title="Release {0}">{0}</a></li>'.format(
for r in releases] '.'.join(map(type(''), r))
) for r in releases
]
body = '<ul class="release-list">{0}</ul>'.format(' '.join(body)) body = '<ul class="release-list">{0}</ul>'.format(' '.join(body))
index = template.format(title='Previous calibre releases (%s.x)' % sname, style=style, index = template.format(
msg='Choose a calibre release', body=body) title='Previous calibre releases (%s.x)' % sname,
style=style,
msg='Choose a calibre release',
body=body
)
with open('%s.html' % sname, 'wb') as f: with open('%s.html' % sname, 'wb') as f:
f.write(index.encode('utf-8')) f.write(index.encode('utf-8'))
@ -264,41 +330,69 @@ def generate_index(): # {{{
files = os.listdir('.') files = os.listdir('.')
windows = [x for x in files if x.endswith('.msi')] windows = [x for x in files if x.endswith('.msi')]
if windows: if windows:
windows = ['<li><a href="{0}" title="{1}">{1}</a></li>'.format( windows = [
x, 'Windows 64-bit Installer' if '64bit' in x else 'Windows 32-bit Installer') '<li><a href="{0}" title="{1}">{1}</a></li>'.format(
for x in windows] x, 'Windows 64-bit Installer'
body.append('<dt>Windows</dt><dd><ul>{0}</ul></dd>'.format(' '.join(windows))) if '64bit' in x else 'Windows 32-bit Installer'
) for x in windows
]
body.append(
'<dt>Windows</dt><dd><ul>{0}</ul></dd>'.format(
' '.join(windows)
)
)
portable = [x for x in files if '-portable-' in x] portable = [x for x in files if '-portable-' in x]
if portable: if portable:
body.append('<dt>Calibre Portable</dt><dd><a href="{0}" title="{1}">{1}</a></dd>'.format( body.append(
portable[0], 'Calibre Portable Installer')) '<dt>Calibre Portable</dt><dd><a href="{0}" title="{1}">{1}</a></dd>'
.format(portable[0], 'Calibre Portable Installer')
)
osx = [x for x in files if x.endswith('.dmg')] osx = [x for x in files if x.endswith('.dmg')]
if osx: if osx:
body.append('<dt>Apple Mac</dt><dd><a href="{0}" title="{1}">{1}</a></dd>'.format( body.append(
osx[0], 'OS X Disk Image (.dmg)')) '<dt>Apple Mac</dt><dd><a href="{0}" title="{1}">{1}</a></dd>'
linux = [x for x in files if x.endswith('.txz') or x.endswith('tar.bz2')] .format(osx[0], 'OS X Disk Image (.dmg)')
)
linux = [
x for x in files if x.endswith('.txz') or x.endswith('tar.bz2')
]
if linux: if linux:
linux = ['<li><a href="{0}" title="{1}">{1}</a></li>'.format( linux = [
x, 'Linux 64-bit binary' if 'x86_64' in x else 'Linux 32-bit binary') '<li><a href="{0}" title="{1}">{1}</a></li>'.format(
for x in linux] x, 'Linux 64-bit binary'
body.append('<dt>Linux</dt><dd><ul>{0}</ul></dd>'.format(' '.join(linux))) if 'x86_64' in x else 'Linux 32-bit binary'
) for x in linux
]
body.append(
'<dt>Linux</dt><dd><ul>{0}</ul></dd>'.format(
' '.join(linux)
)
)
source = [x for x in files if x.endswith('.xz') or x.endswith('.gz')] source = [x for x in files if x.endswith('.xz') or x.endswith('.gz')]
if source: if source:
body.append('<dt>Source Code</dt><dd><a href="{0}" title="{1}">{1}</a></dd>'.format( body.append(
source[0], 'Source code (all platforms)')) '<dt>Source Code</dt><dd><a href="{0}" title="{1}">{1}</a></dd>'
.format(source[0], 'Source code (all platforms)')
)
body = '<dl>{0}</dl>'.format(''.join(body)) body = '<dl>{0}</dl>'.format(''.join(body))
index = template.format(title='calibre release (%s)' % rname, style=style, index = template.format(
msg='', body=body) title='calibre release (%s)' % rname,
style=style,
msg='',
body=body
)
with open('index.html', 'wb') as f: with open('index.html', 'wb') as f:
f.write(index.encode('utf-8')) f.write(index.encode('utf-8'))
finally: finally:
os.chdir('..') os.chdir('..')
# }}} # }}}
SERVER_BASE = '/srv/download/' SERVER_BASE = '/srv/download/'
def upload_to_servers(files, version): # {{{ def upload_to_servers(files, version): # {{{
base = SERVER_BASE base = SERVER_BASE
dest = os.path.join(base, version) dest = os.path.join(base, version)
@ -312,7 +406,7 @@ def upload_to_servers(files, version): # {{{
finally: finally:
os.chdir(cwd) os.chdir(cwd)
# for server, rdir in {'files':'/srv/download/'}.iteritems(): # for server, rdir in {'files':'/srv/download/'}.items():
# print('Uploading to server:', server) # print('Uploading to server:', server)
# server = '%s.calibre-ebook.com' % server # server = '%s.calibre-ebook.com' % server
# # Copy the generated index files # # Copy the generated index files
@ -337,56 +431,72 @@ def upload_to_servers(files, version): # {{{
# break # break
# print ('Uploaded in', int(time.time() - start), 'seconds\n\n') # print ('Uploaded in', int(time.time() - start), 'seconds\n\n')
# #
# }}} # }}}
# CLI {{{ # CLI {{{
def cli_parser(): def cli_parser():
epilog='Copyright Kovid Goyal 2012' epilog = 'Copyright Kovid Goyal 2012'
p = ArgumentParser( p = ArgumentParser(
description='Upload project files to a hosting service automatically', description='Upload project files to a hosting service automatically',
epilog=epilog epilog=epilog
) )
a = p.add_argument a = p.add_argument
a('appname', help='The name of the application, all files to' a(
' upload should begin with this name') 'appname',
a('version', help='The version of the application, all files to' help='The name of the application, all files to'
' upload should contain this version') ' upload should begin with this name'
a('file_map', type=FileType('rb'), )
help='A file containing a mapping of files to be uploaded to ' a(
'descriptions of the files. The descriptions will be visible ' 'version',
'to users trying to get the file from the hosting service. ' help='The version of the application, all files to'
'The format of the file is filename: description, with one per ' ' upload should contain this version'
'line. filename can be a path to the file relative to the current ' )
'directory.') a(
a('--replace', action='store_true', default=False, 'file_map',
help='If specified, existing files are replaced, otherwise ' type=FileType('rb'),
'they are skipped.') help='A file containing a mapping of files to be uploaded to '
'descriptions of the files. The descriptions will be visible '
'to users trying to get the file from the hosting service. '
'The format of the file is filename: description, with one per '
'line. filename can be a path to the file relative to the current '
'directory.'
)
a(
'--replace',
action='store_true',
default=False,
help='If specified, existing files are replaced, otherwise '
'they are skipped.'
)
subparsers = p.add_subparsers(help='Where to upload to', dest='service', subparsers = p.add_subparsers(
title='Service', description='Hosting service to upload to') help='Where to upload to',
sf = subparsers.add_parser('sourceforge', help='Upload to sourceforge', dest='service',
epilog=epilog) title='Service',
gh = subparsers.add_parser('github', help='Upload to GitHub', description='Hosting service to upload to'
epilog=epilog) )
sf = subparsers.add_parser(
'sourceforge', help='Upload to sourceforge', epilog=epilog
)
gh = subparsers.add_parser('github', help='Upload to GitHub', epilog=epilog)
subparsers.add_parser('calibre', help='Upload to calibre file servers') subparsers.add_parser('calibre', help='Upload to calibre file servers')
a = sf.add_argument a = sf.add_argument
a('project', a('project', help='The name of the project on sourceforge we are uploading to')
help='The name of the project on sourceforge we are uploading to') a('username', help='Sourceforge username')
a('username',
help='Sourceforge username')
a = gh.add_argument a = gh.add_argument
a('project', a('project', help='The name of the repository on GitHub we are uploading to')
help='The name of the repository on GitHub we are uploading to') a('username', help='Username to log into your GitHub account')
a('username', a('password', help='Password to log into your GitHub account')
help='Username to log into your GitHub account')
a('password',
help='Password to log into your GitHub account')
return p return p
def main(args=None): def main(args=None):
cli = cli_parser() cli = cli_parser()
args = cli.parse_args(args) args = cli.parse_args(args)
@ -399,20 +509,28 @@ def main(args=None):
files[fname] = desc files[fname] = desc
ofiles = OrderedDict() ofiles = OrderedDict()
for x in sorted(files, key=lambda x:os.stat(x).st_size, reverse=True): for x in sorted(files, key=lambda x: os.stat(x).st_size, reverse=True):
ofiles[x] = files[x] ofiles[x] = files[x]
if args.service == 'sourceforge': if args.service == 'sourceforge':
sf = SourceForge(ofiles, args.project, args.version, args.username, sf = SourceForge(
replace=args.replace) ofiles, args.project, args.version, args.username, replace=args.replace
)
sf() sf()
elif args.service == 'github': elif args.service == 'github':
gh = GitHub(ofiles, args.project, args.version, args.username, args.password, gh = GitHub(
replace=args.replace) ofiles,
args.project,
args.version,
args.username,
args.password,
replace=args.replace
)
gh() gh()
elif args.service == 'calibre': elif args.service == 'calibre':
upload_to_servers(ofiles, args.version) upload_to_servers(ofiles, args.version)
if __name__ == '__main__': if __name__ == '__main__':
main() main()
# }}} # }}}

View File

@ -37,7 +37,7 @@ from calibre.utils.filenames import ascii_filename as sanitize, shorten_componen
from calibre.utils.mdns import (publish as publish_zeroconf, unpublish as from calibre.utils.mdns import (publish as publish_zeroconf, unpublish as
unpublish_zeroconf, get_all_ips) unpublish_zeroconf, get_all_ips)
from calibre.utils.socket_inheritance import set_socket_inherit from calibre.utils.socket_inheritance import set_socket_inherit
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, iteritems
from polyglot import queue from polyglot import queue
@ -266,7 +266,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
'SET_CALIBRE_DEVICE_NAME': 2, 'SET_CALIBRE_DEVICE_NAME': 2,
'TOTAL_SPACE' : 4, 'TOTAL_SPACE' : 4,
} }
reverse_opcodes = dict([(v, k) for k,v in opcodes.iteritems()]) reverse_opcodes = {v: k for k, v in iteritems(opcodes)}
MESSAGE_PASSWORD_ERROR = 1 MESSAGE_PASSWORD_ERROR = 1
MESSAGE_UPDATE_NEEDED = 2 MESSAGE_UPDATE_NEEDED = 2
@ -397,7 +397,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
try: try:
if isinstance(a, dict): if isinstance(a, dict):
printable = {} printable = {}
for k,v in a.iteritems(): for k,v in iteritems(a):
if isinstance(v, (bytes, unicode_type)) and len(v) > 50: if isinstance(v, (bytes, unicode_type)) and len(v) > 50:
printable[k] = 'too long' printable[k] = 'too long'
else: else:
@ -540,7 +540,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
# codec to first convert it to a string dict # codec to first convert it to a string dict
def _json_encode(self, op, arg): def _json_encode(self, op, arg):
res = {} res = {}
for k,v in arg.iteritems(): for k,v in iteritems(arg):
if isinstance(v, (Book, Metadata)): if isinstance(v, (Book, Metadata)):
res[k] = self.json_codec.encode_book_metadata(v) res[k] = self.json_codec.encode_book_metadata(v)
series = v.get('series', None) series = v.get('series', None)
@ -835,7 +835,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
prefix = os.path.join(cache_dir(), prefix = os.path.join(cache_dir(),
'wireless_device_' + self.device_uuid + '_metadata_cache') 'wireless_device_' + self.device_uuid + '_metadata_cache')
with lopen(prefix + '.tmp', mode='wb') as fd: with lopen(prefix + '.tmp', mode='wb') as fd:
for key,book in self.device_book_cache.iteritems(): for key,book in iteritems(self.device_book_cache):
if (now_ - book['last_used']).days > self.PURGE_CACHE_ENTRIES_DAYS: if (now_ - book['last_used']).days > self.PURGE_CACHE_ENTRIES_DAYS:
purged += 1 purged += 1
continue continue
@ -1392,7 +1392,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
coldict = {} coldict = {}
if colattrs: if colattrs:
collections = booklists[0].get_collections(colattrs) collections = booklists[0].get_collections(colattrs)
for k,v in collections.iteritems(): for k,v in iteritems(collections):
lpaths = [] lpaths = []
for book in v: for book in v:
lpaths.append(book.lpath) lpaths.append(book.lpath)

View File

@ -422,7 +422,7 @@ class Metadata(object):
m = dict(metadata) m = dict(metadata)
# Copying the elements should not be necessary. The objects referenced # Copying the elements should not be necessary. The objects referenced
# in the dict should not change. Of course, they can be replaced. # in the dict should not change. Of course, they can be replaced.
# for k,v in metadata.iteritems(): # for k,v in iteritems(metadata):
# m[k] = copy.copy(v) # m[k] = copy.copy(v)
if '#value#' not in m: if '#value#' not in m:
if m['datatype'] == 'text' and m['is_multiple']: if m['datatype'] == 'text' and m['is_multiple']:

View File

@ -293,7 +293,7 @@ class Worker(Thread): # Get details {{{
'chs': ('Chinese', u'中文', u'简体中文'), 'chs': ('Chinese', u'中文', u'简体中文'),
} }
self.lang_map = {} self.lang_map = {}
for code, names in lm.iteritems(): for code, names in lm.items():
for name in names: for name in names:
self.lang_map[name] = code self.lang_map[name] = code
@ -313,7 +313,7 @@ class Worker(Thread): # Get details {{{
if not self.months: if not self.months:
return raw return raw
ans = raw.lower() ans = raw.lower()
for i, vals in self.months.iteritems(): for i, vals in self.months.items():
for x in vals: for x in vals:
ans = ans.replace(x, self.english_months[i]) ans = ans.replace(x, self.english_months[i])
ans = ans.replace(' de ', ' ') ans = ans.replace(' de ', ' ')
@ -746,7 +746,7 @@ class Worker(Thread): # Get details {{{
mwidth = 0 mwidth = 0
try: try:
url = None url = None
for iurl, (width, height) in idata.iteritems(): for iurl, (width, height) in idata.items():
if width > mwidth: if width > mwidth:
mwidth = width mwidth = width
url = iurl url = iurl
@ -949,7 +949,7 @@ class Amazon(Source):
self.touched_fields = frozenset(tf) self.touched_fields = frozenset(tf)
def get_domain_and_asin(self, identifiers, extra_domains=()): def get_domain_and_asin(self, identifiers, extra_domains=()):
for key, val in identifiers.iteritems(): for key, val in identifiers.items():
key = key.lower() key = key.lower()
if key in ('amazon', 'asin'): if key in ('amazon', 'asin'):
return 'com', val return 'com', val
@ -1118,7 +1118,7 @@ class Amazon(Source):
encode_to = 'latin1' encode_to = 'latin1'
encoded_q = dict([(x.encode(encode_to, 'ignore'), y.encode(encode_to, encoded_q = dict([(x.encode(encode_to, 'ignore'), y.encode(encode_to,
'ignore')) for x, y in 'ignore')) for x, y in
q.iteritems()]) q.items()])
url = 'https://www.amazon.%s/s/?' % self.get_website_domain( url = 'https://www.amazon.%s/s/?' % self.get_website_domain(
domain) + urlencode(encoded_q) domain) + urlencode(encoded_q)
return url, domain return url, domain

View File

@ -41,7 +41,7 @@ def browser():
def encode_query(**query): def encode_query(**query):
q = {k.encode('utf-8'): v.encode('utf-8') for k, v in query.iteritems()} q = {k.encode('utf-8'): v.encode('utf-8') for k, v in query.items()}
return urlencode(q).decode('utf-8') return urlencode(q).decode('utf-8')

View File

@ -20,7 +20,7 @@ from calibre.ebooks.mobi.utils import (decode_hex_number, decint,
from calibre.utils.imghdr import what from calibre.utils.imghdr import what
from calibre.ebooks.mobi.debug import format_bytes from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.debug.headers import TextRecord from calibre.ebooks.mobi.debug.headers import TextRecord
from polyglot.builtins import unicode_type, range from polyglot.builtins import unicode_type, range, iteritems
class TagX(object): # {{{ class TagX(object): # {{{
@ -306,7 +306,7 @@ class IndexEntry(object): # {{{
except ValueError: except ValueError:
self.index = ident self.index = ident
self.tags = [Tag(tag_type, vals, cncx) for tag_type, vals in self.tags = [Tag(tag_type, vals, cncx) for tag_type, vals in
entry.iteritems()] iteritems(entry)]
@property @property
def label(self): def label(self):
@ -402,7 +402,7 @@ class IndexRecord(object): # {{{
self.indices = [] self.indices = []
for ident, entry in table.iteritems(): for ident, entry in iteritems(table):
self.indices.append(IndexEntry(ident, entry, cncx)) self.indices.append(IndexEntry(ident, entry, cncx))
def get_parent(self, index): def get_parent(self, index):
@ -473,7 +473,7 @@ class CNCX(object): # {{{
def __str__(self): def __str__(self):
ans = ['*'*20 + ' cncx (%d strings) '%len(self.records)+ '*'*20] ans = ['*'*20 + ' cncx (%d strings) '%len(self.records)+ '*'*20]
for k, v in self.records.iteritems(): for k, v in iteritems(self.records):
ans.append('%10d : %s'%(k, v)) ans.append('%10d : %s'%(k, v))
return '\n'.join(ans) return '\n'.join(ans)
@ -571,18 +571,18 @@ class TBSIndexing(object): # {{{
def __str__(self): def __str__(self):
ans = ['*'*20 + ' TBS Indexing (%d records) '%len(self.record_indices)+ '*'*20] ans = ['*'*20 + ' TBS Indexing (%d records) '%len(self.record_indices)+ '*'*20]
for r, dat in self.record_indices.iteritems(): for r, dat in iteritems(self.record_indices):
ans += self.dump_record(r, dat)[-1] ans += self.dump_record(r, dat)[-1]
return '\n'.join(ans) return '\n'.join(ans)
def dump(self, bdir): def dump(self, bdir):
types = defaultdict(list) types = defaultdict(list)
for r, dat in self.record_indices.iteritems(): for r, dat in iteritems(self.record_indices):
tbs_type, strings = self.dump_record(r, dat) tbs_type, strings = self.dump_record(r, dat)
if tbs_type == 0: if tbs_type == 0:
continue continue
types[tbs_type] += strings types[tbs_type] += strings
for typ, strings in types.iteritems(): for typ, strings in iteritems(types):
with open(os.path.join(bdir, 'tbs_type_%d.txt'%typ), 'wb') as f: with open(os.path.join(bdir, 'tbs_type_%d.txt'%typ), 'wb') as f:
f.write('\n'.join(strings)) f.write('\n'.join(strings))
@ -609,7 +609,7 @@ class TBSIndexing(object): # {{{
return bytes('0'*(4-len(ans)) + ans) return bytes('0'*(4-len(ans)) + ans)
def repr_extra(x): def repr_extra(x):
return str({bin4(k):v for k, v in extra.iteritems()}) return str({bin4(k):v for k, v in iteritems(extra)})
tbs_type = 0 tbs_type = 0
is_periodical = self.doc_type in (257, 258, 259) is_periodical = self.doc_type in (257, 258, 259)

View File

@ -19,7 +19,7 @@ from calibre.ebooks.mobi.utils import read_font_record, decode_tbs, RECORD_SIZE
from calibre.ebooks.mobi.debug import format_bytes from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.reader.headers import NULL_INDEX from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.utils.imghdr import what from calibre.utils.imghdr import what
from polyglot.builtins import zip from polyglot.builtins import zip, iteritems
class FDST(object): class FDST(object):
@ -274,7 +274,7 @@ class MOBIFile(object):
break break
flag_sz = 4 flag_sz = 4
tbs_bytes = tbs_bytes[consumed:] tbs_bytes = tbs_bytes[consumed:]
extra = {bin(k):v for k, v in extra.iteritems()} extra = {bin(k):v for k, v in iteritems(extra)}
sequences.append((val, extra)) sequences.append((val, extra))
for j, seq in enumerate(sequences): for j, seq in enumerate(sequences):
desc.append('Sequence #%d: %r %r'%(j, seq[0], seq[1])) desc.append('Sequence #%d: %r %r'%(j, seq[0], seq[1]))

View File

@ -8,7 +8,7 @@ __copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import hashlib, numbers import hashlib, numbers
from polyglot.builtins import map from polyglot.builtins import map, iteritems
from PyQt5.Qt import QBuffer, QByteArray, QImage, Qt, QColor, qRgba, QPainter from PyQt5.Qt import QBuffer, QByteArray, QImage, Qt, QColor, qRgba, QPainter
@ -119,24 +119,24 @@ class Page(Stream):
r = Dictionary() r = Dictionary()
if self.opacities: if self.opacities:
extgs = Dictionary() extgs = Dictionary()
for opref, name in self.opacities.iteritems(): for opref, name in iteritems(self.opacities):
extgs[name] = opref extgs[name] = opref
r['ExtGState'] = extgs r['ExtGState'] = extgs
if self.fonts: if self.fonts:
fonts = Dictionary() fonts = Dictionary()
for ref, name in self.fonts.iteritems(): for ref, name in iteritems(self.fonts):
fonts[name] = ref fonts[name] = ref
r['Font'] = fonts r['Font'] = fonts
if self.xobjects: if self.xobjects:
xobjects = Dictionary() xobjects = Dictionary()
for ref, name in self.xobjects.iteritems(): for ref, name in iteritems(self.xobjects):
xobjects[name] = ref xobjects[name] = ref
r['XObject'] = xobjects r['XObject'] = xobjects
if self.patterns: if self.patterns:
r['ColorSpace'] = Dictionary({'PCSp':Array( r['ColorSpace'] = Dictionary({'PCSp':Array(
[Name('Pattern'), Name('DeviceRGB')])}) [Name('Pattern'), Name('DeviceRGB')])})
patterns = Dictionary() patterns = Dictionary()
for ref, name in self.patterns.iteritems(): for ref, name in iteritems(self.patterns):
patterns[name] = ref patterns[name] = ref
r['Pattern'] = patterns r['Pattern'] = patterns
if r: if r:

View File

@ -15,7 +15,7 @@ from pprint import pformat
from gi.repository import Gtk, Gdk, GdkX11 # noqa from gi.repository import Gtk, Gdk, GdkX11 # noqa
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, iteritems
UI_INFO = """ UI_INFO = """
<ui> <ui>
@ -208,7 +208,7 @@ def convert(v):
if isinstance(v, list): if isinstance(v, list):
return [convert(val) for val in v] return [convert(val) for val in v]
if isinstance(v, dict): if isinstance(v, dict):
return {convert(k):convert(val) for k, val in v.iteritems()} return {convert(k):convert(val) for k, val in iteritems(v)}
if isinstance(v, dbus.Boolean): if isinstance(v, dbus.Boolean):
return bool(v) return bool(v)
if isinstance(v, (dbus.UInt32, dbus.UInt16)): if isinstance(v, (dbus.UInt32, dbus.UInt16)):
@ -233,10 +233,10 @@ class MyApplication(Gtk.Application):
conn = xcb.Connection() conn = xcb.Connection()
atoms = conn.core.ListProperties(win_id).reply().atoms atoms = conn.core.ListProperties(win_id).reply().atoms
atom_names = {atom:conn.core.GetAtomNameUnchecked(atom) for atom in atoms} atom_names = {atom:conn.core.GetAtomNameUnchecked(atom) for atom in atoms}
atom_names = {k:bytes(a.reply().name.buf()) for k, a in atom_names.iteritems()} atom_names = {k:bytes(a.reply().name.buf()) for k, a in iteritems(atom_names)}
property_names = {name:atom for atom, name in atom_names.iteritems() if property_names = {name:atom for atom, name in iteritems(atom_names) if
name.startswith('_GTK') or name.startswith('_UNITY') or name.startswith('_GNOME')} name.startswith('_GTK') or name.startswith('_UNITY') or name.startswith('_GNOME')}
replies = {name:conn.core.GetProperty(False, win_id, atom, xcb.xproto.GetPropertyType.Any, 0, 2 ** 32 - 1) for name, atom in property_names.iteritems()} replies = {name:conn.core.GetProperty(False, win_id, atom, xcb.xproto.GetPropertyType.Any, 0, 2 ** 32 - 1) for name, atom in iteritems(property_names)}
type_atom_cache = {} type_atom_cache = {}
@ -256,7 +256,7 @@ class MyApplication(Gtk.Application):
property_reply.value.buf())) property_reply.value.buf()))
return None return None
props = {name:get_property_value(r.reply()) for name, r in replies.iteritems()} props = {name:get_property_value(r.reply()) for name, r in iteritems(replies)}
ans = ['\nX Window properties:'] ans = ['\nX Window properties:']
for name in sorted(props): for name in sorted(props):
ans.append('%s: %r' % (name, props[name])) ans.append('%s: %r' % (name, props[name]))
@ -277,7 +277,7 @@ class MyApplication(Gtk.Application):
print ('Subscription group:', item[0]) print ('Subscription group:', item[0])
print ('Menu number:', item[1]) print ('Menu number:', item[1])
for menu_item in item[2]: for menu_item in item[2]:
menu_item = {unicode_type(k):convert(v) for k, v in menu_item.iteritems()} menu_item = {unicode_type(k):convert(v) for k, v in iteritems(menu_item)}
if ':submenu' in menu_item: if ':submenu' in menu_item:
groups.add(menu_item[':submenu'][0]) groups.add(menu_item[':submenu'][0])
if ':section' in menu_item: if ':section' in menu_item:

View File

@ -12,7 +12,7 @@ import dbus
from PyQt5.Qt import QSize, QImage, Qt, QKeySequence, QBuffer, QByteArray from PyQt5.Qt import QSize, QImage, Qt, QKeySequence, QBuffer, QByteArray
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, iteritems
def log(*args, **kw): def log(*args, **kw):
@ -110,7 +110,7 @@ def key_sequence_to_dbus_shortcut(qks):
if key == -1 or key == Qt.Key_unknown: if key == -1 or key == Qt.Key_unknown:
continue continue
items = [] items = []
for mod, name in {Qt.META:'Super', Qt.CTRL:'Control', Qt.ALT:'Alt', Qt.SHIFT:'Shift'}.iteritems(): for mod, name in iteritems({Qt.META:'Super', Qt.CTRL:'Control', Qt.ALT:'Alt', Qt.SHIFT:'Shift'}):
if key & mod == mod: if key & mod == mod:
items.append(name) items.append(name)
key &= int(~(Qt.ShiftModifier | Qt.ControlModifier | Qt.AltModifier | Qt.MetaModifier | Qt.KeypadModifier)) key &= int(~(Qt.ShiftModifier | Qt.ControlModifier | Qt.AltModifier | Qt.MetaModifier | Qt.KeypadModifier))
@ -146,7 +146,7 @@ def set_X_window_properties(win_id, **properties):
conn = xcb.connect() conn = xcb.connect()
atoms = {name:conn.core.InternAtom(False, len(name), name) for name in properties} atoms = {name:conn.core.InternAtom(False, len(name), name) for name in properties}
utf8_string_atom = None utf8_string_atom = None
for name, val in properties.iteritems(): for name, val in iteritems(properties):
atom = atoms[name].reply().atom atom = atoms[name].reply().atom
type_atom = xcb.xproto.Atom.STRING type_atom = xcb.xproto.Atom.STRING
if isinstance(val, unicode_type): if isinstance(val, unicode_type):

View File

@ -17,6 +17,7 @@ from calibre.constants import numeric_version, DEBUG
from calibre.gui2.store import StorePlugin from calibre.gui2.store import StorePlugin
from calibre.utils.config import JSONConfig from calibre.utils.config import JSONConfig
from polyglot.urllib import urlencode from polyglot.urllib import urlencode
from polyglot.builtins import iteritems
class VersionMismatch(ValueError): class VersionMismatch(ValueError):
@ -28,7 +29,7 @@ class VersionMismatch(ValueError):
def download_updates(ver_map={}, server='https://code.calibre-ebook.com'): def download_updates(ver_map={}, server='https://code.calibre-ebook.com'):
from calibre.utils.https import get_https_resource_securely from calibre.utils.https import get_https_resource_securely
data = {k:type(u'')(v) for k, v in ver_map.iteritems()} data = {k:type(u'')(v) for k, v in iteritems(ver_map)}
data['ver'] = '1' data['ver'] = '1'
url = '%s/stores?%s'%(server, urlencode(data)) url = '%s/stores?%s'%(server, urlencode(data))
# We use a timeout here to ensure the non-daemonic update thread does not # We use a timeout here to ensure the non-daemonic update thread does not
@ -58,7 +59,7 @@ class Stores(OrderedDict):
self.version_map = {} self.version_map = {}
self.cached_version_map = {} self.cached_version_map = {}
self.name_rmap = {} self.name_rmap = {}
for key, val in self.iteritems(): for key, val in iteritems(self):
prefix, name = val.__module__.rpartition('.')[0::2] prefix, name = val.__module__.rpartition('.')[0::2]
if prefix == 'calibre.gui2.store.stores' and name.endswith('_plugin'): if prefix == 'calibre.gui2.store.stores' and name.endswith('_plugin'):
module = sys.modules[val.__module__] module = sys.modules[val.__module__]
@ -74,7 +75,7 @@ class Stores(OrderedDict):
# Load plugins from on disk cache # Load plugins from on disk cache
remove = set() remove = set()
pat = re.compile(r'^store_version\s*=\s*(\d+)', re.M) pat = re.compile(r'^store_version\s*=\s*(\d+)', re.M)
for name, src in self.cache_file.iteritems(): for name, src in iteritems(self.cache_file):
try: try:
key = self.name_rmap[name] key = self.name_rmap[name]
except KeyError: except KeyError:
@ -123,7 +124,7 @@ class Stores(OrderedDict):
def download_updates(self): def download_updates(self):
ver_map = {name:max(ver, self.cached_version_map.get(name, -1)) ver_map = {name:max(ver, self.cached_version_map.get(name, -1))
for name, ver in self.version_map.iteritems()} for name, ver in iteritems(self.version_map)}
try: try:
updates = download_updates(ver_map) updates = download_updates(ver_map)
except: except:
@ -159,7 +160,7 @@ class Stores(OrderedDict):
if replacements: if replacements:
with self.cache_file: with self.cache_file:
for name, src in replacements.iteritems(): for name, src in iteritems(replacements):
self.cache_file[name] = src self.cache_file[name] = src
def replace_plugin(self, ver, name, obj, source): def replace_plugin(self, ver, name, obj, source):

View File

@ -43,7 +43,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -43,7 +43,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -45,7 +45,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -45,7 +45,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -45,7 +45,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -43,7 +43,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -45,7 +45,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -43,7 +43,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -43,7 +43,7 @@ def search_amazon(query, max_results=10, timeout=60,
if isinstance(x, type('')): if isinstance(x, type('')):
x = x.encode('utf-8') x = x.encode('utf-8')
return x return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()} uquery = {asbytes(k):asbytes(v) for k, v in uquery.items()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii') url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser(user_agent=get_user_agent()) br = browser(user_agent=get_user_agent())

View File

@ -181,13 +181,13 @@ class TOCViewer(QWidget):
menu.addAction(self.refresh_action) menu.addAction(self.refresh_action)
menu.exec_(self.view.mapToGlobal(pos)) menu.exec_(self.view.mapToGlobal(pos))
def iteritems(self, parent=None): def iter_items(self, parent=None):
if parent is None: if parent is None:
parent = self.invisibleRootItem() parent = self.invisibleRootItem()
for i in range(parent.childCount()): for i in range(parent.childCount()):
child = parent.child(i) child = parent.child(i)
yield child yield child
for gc in self.iteritems(parent=child): for gc in self.iter_items(parent=child):
yield gc yield gc
def emit_navigate(self, *args): def emit_navigate(self, *args):

View File

@ -42,7 +42,7 @@ from calibre.utils.icu import capitalize, collation_order, sort_key
from calibre.utils.img import scale_image from calibre.utils.img import scale_image
from calibre.utils.localization import get_lang, lang_as_iso639_1 from calibre.utils.localization import get_lang, lang_as_iso639_1
from calibre.utils.zipfile import ZipFile from calibre.utils.zipfile import ZipFile
from polyglot.builtins import unicode_type from polyglot.builtins import unicode_type, iteritems
NBSP = u'\u00a0' NBSP = u'\u00a0'
@ -2675,7 +2675,7 @@ class CatalogBuilder(object):
title_str=title_str, title_str=title_str,
xmlns=XHTML_NS, xmlns=XHTML_NS,
) )
for k, v in args.iteritems(): for k, v in iteritems(args):
if isbytestring(v): if isbytestring(v):
args[k] = v.decode('utf-8') args[k] = v.decode('utf-8')
generated_html = P('catalog/template.xhtml', generated_html = P('catalog/template.xhtml',
@ -4617,9 +4617,9 @@ class CatalogBuilder(object):
templates = {} templates = {}
ef = P('catalog/section_list_templates.py') ef = P('catalog/section_list_templates.py')
with open(ef, 'rb')) as f: with open(ef, 'rb') as f:
exec(compile(f.read(), ef, 'exec'), templates) exec(compile(f.read(), ef, 'exec'), templates)
for name, template in templates.iteritems(): for name, template in iteritems(templates):
if name.startswith('by_') and name.endswith('_template'): if name.startswith('by_') and name.endswith('_template'):
setattr(self, name, force_unicode(template, 'utf-8')) setattr(self, name, force_unicode(template, 'utf-8'))

View File

@ -15,7 +15,7 @@ from operator import itemgetter
from binascii import hexlify, unhexlify from binascii import hexlify, unhexlify
from calibre import prints from calibre import prints
from calibre.constants import iswindows from calibre.constants import iswindows, ispy3
from calibre.srv.errors import HTTPNotFound from calibre.srv.errors import HTTPNotFound
from calibre.utils.config_base import tweaks from calibre.utils.config_base import tweaks
from calibre.utils.localization import get_translator from calibre.utils.localization import get_translator
@ -57,7 +57,8 @@ class MultiDict(dict): # {{{
self[key] = val self[key] = val
def items(self, duplicates=True): def items(self, duplicates=True):
for k, v in dict.iteritems(self): f = dict.items if ispy3 else dict.iteritems
for k, v in f(self):
if duplicates: if duplicates:
for x in v: for x in v:
yield k, x yield k, x

View File

@ -13,6 +13,7 @@ from calibre.constants import config_dir, iswindows, filesystem_encoding
from calibre.utils.config_base import prefs, StringConfig, create_global_prefs from calibre.utils.config_base import prefs, StringConfig, create_global_prefs
from calibre.utils.config import JSONConfig from calibre.utils.config import JSONConfig
from calibre.utils.filenames import samefile from calibre.utils.filenames import samefile
from polyglot.builtins import iteritems
# Export {{{ # Export {{{
@ -175,11 +176,11 @@ def export(destdir, library_paths=None, dbmap=None, progress1=None, progress2=No
if library_paths is None: if library_paths is None:
library_paths = all_known_libraries() library_paths = all_known_libraries()
dbmap = dbmap or {} dbmap = dbmap or {}
dbmap = {os.path.normcase(os.path.abspath(k)):v for k, v in dbmap.iteritems()} dbmap = {os.path.normcase(os.path.abspath(k)):v for k, v in iteritems(dbmap)}
exporter = Exporter(destdir) exporter = Exporter(destdir)
exporter.metadata['libraries'] = libraries = {} exporter.metadata['libraries'] = libraries = {}
total = len(library_paths) + 1 total = len(library_paths) + 1
for i, (lpath, count) in enumerate(library_paths.iteritems()): for i, (lpath, count) in enumerate(iteritems(library_paths)):
if abort is not None and abort.is_set(): if abort is not None and abort.is_set():
return return
if progress1 is not None: if progress1 is not None:
@ -264,7 +265,7 @@ class Importer(object):
raise ValueError('The last part of this exported data set is missing') raise ValueError('The last part of this exported data set is missing')
if len(nums) != nums[-1]: if len(nums) != nums[-1]:
raise ValueError('There are some parts of the exported data set missing') raise ValueError('There are some parts of the exported data set missing')
self.part_map = {num:path for num, (path, is_last) in part_map.iteritems()} self.part_map = {num:path for num, (path, is_last) in iteritems(part_map)}
msf = struct.calcsize(Exporter.MDATA_SZ_FMT) msf = struct.calcsize(Exporter.MDATA_SZ_FMT)
offset = tail_size + msf offset = tail_size + msf
with self.part(nums[-1]) as f: with self.part(nums[-1]) as f:
@ -323,7 +324,7 @@ def import_data(importer, library_path_map, config_location=None, progress1=None
config_location = os.path.abspath(os.path.realpath(config_location)) config_location = os.path.abspath(os.path.realpath(config_location))
total = len(library_path_map) + 1 total = len(library_path_map) + 1
library_usage_stats = Counter() library_usage_stats = Counter()
for i, (library_key, dest) in enumerate(library_path_map.iteritems()): for i, (library_key, dest) in enumerate(iteritems(library_path_map)):
if abort is not None and abort.is_set(): if abort is not None and abort.is_set():
return return
if progress1 is not None: if progress1 is not None:
@ -394,7 +395,7 @@ def run_exporter(export_dir=None, args=None):
os.makedirs(export_dir) os.makedirs(export_dir)
if os.listdir(export_dir): if os.listdir(export_dir):
raise SystemExit('%s is not empty' % export_dir) raise SystemExit('%s is not empty' % export_dir)
all_libraries = {os.path.normcase(os.path.abspath(path)):lus for path, lus in all_known_libraries().iteritems()} all_libraries = {os.path.normcase(os.path.abspath(path)):lus for path, lus in iteritems(all_known_libraries())}
if 'all' in args[1:]: if 'all' in args[1:]:
libraries = set(all_libraries) libraries = set(all_libraries)
else: else:
@ -416,7 +417,7 @@ def run_exporter(export_dir=None, args=None):
if os.listdir(export_dir): if os.listdir(export_dir):
raise SystemExit('%s is not empty' % export_dir) raise SystemExit('%s is not empty' % export_dir)
library_paths = {} library_paths = {}
for lpath, lus in all_known_libraries().iteritems(): for lpath, lus in iteritems(all_known_libraries()):
if raw_input('Export the library %s [y/n]: ' % lpath).strip().lower() == b'y': if raw_input('Export the library %s [y/n]: ' % lpath).strip().lower() == b'y':
library_paths[lpath] = lus library_paths[lpath] = lus
if library_paths: if library_paths:

View File

@ -11,7 +11,7 @@ from struct import unpack_from, calcsize
from collections import OrderedDict, namedtuple from collections import OrderedDict, namedtuple
from calibre.utils.fonts.sfnt.errors import UnsupportedFont from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from polyglot.builtins import range from polyglot.builtins import range, iteritems
class Unpackable(object): class Unpackable(object):
@ -82,7 +82,7 @@ class ListTable(OrderedDict):
def dump(self, prefix=''): def dump(self, prefix=''):
print (prefix, self.__class__.__name__, sep='') print (prefix, self.__class__.__name__, sep='')
prefix += ' ' prefix += ' '
for tag, child in self.iteritems(): for tag, child in iteritems(self):
print (prefix, tag, sep='') print (prefix, tag, sep='')
child.dump(prefix=prefix+' ') child.dump(prefix=prefix+' ')

View File

@ -15,7 +15,7 @@ from functools import partial
from calibre.utils.icu import safe_chr, ord_string from calibre.utils.icu import safe_chr, ord_string
from calibre.utils.fonts.sfnt.container import Sfnt from calibre.utils.fonts.sfnt.container import Sfnt
from calibre.utils.fonts.sfnt.errors import UnsupportedFont, NoGlyphs from calibre.utils.fonts.sfnt.errors import UnsupportedFont, NoGlyphs
from polyglot.builtins import unicode_type, range from polyglot.builtins import unicode_type, range, iteritems
# TrueType outlines {{{ # TrueType outlines {{{
@ -37,7 +37,7 @@ def resolve_glyphs(loca, glyf, character_map, extra_glyphs):
if gid not in resolved_glyphs: if gid not in resolved_glyphs:
unresolved_glyphs.add(gid) unresolved_glyphs.add(gid)
return OrderedDict(sorted(resolved_glyphs.iteritems(), key=itemgetter(0))) return OrderedDict(sorted(iteritems(resolved_glyphs), key=itemgetter(0)))
def subset_truetype(sfnt, character_map, extra_glyphs): def subset_truetype(sfnt, character_map, extra_glyphs):
@ -56,7 +56,7 @@ def subset_truetype(sfnt, character_map, extra_glyphs):
'set, subsetting it is pointless') 'set, subsetting it is pointless')
# Keep only character codes that have resolved glyphs # Keep only character codes that have resolved glyphs
for code, glyph_id in tuple(character_map.iteritems()): for code, glyph_id in tuple(iteritems(character_map)):
if glyph_id not in resolved_glyphs: if glyph_id not in resolved_glyphs:
del character_map[code] del character_map[code]
@ -360,7 +360,7 @@ def all():
print() print()
if warnings: if warnings:
print ('\n\nWarnings:') print ('\n\nWarnings:')
for name, w in warnings.iteritems(): for name, w in iteritems(warnings):
if w: if w:
print (name) print (name)
print('', '\n\t'.join(w), sep='\t') print('', '\n\t'.join(w), sep='\t')

View File

@ -201,7 +201,7 @@ class JSError(Exception):
if fn: if fn:
msg = type('')(fn) + ':' + msg msg = type('')(fn) + ':' + msg
Exception.__init__(self, msg) Exception.__init__(self, msg)
for k, v in e.iteritems(): for k, v in e.items():
if k != 'message': if k != 'message':
setattr(self, k, v) setattr(self, k, v)
else: else:
@ -230,7 +230,7 @@ contexts = {}
def create_context(base_dirs, *args): def create_context(base_dirs, *args):
data = to_python(args[0]) if args else {} data = to_python(args[0]) if args else {}
ctx = Context(base_dirs=base_dirs) ctx = Context(base_dirs=base_dirs)
for k, val in data.iteritems(): for k, val in data.items():
setattr(ctx.g, k, val) setattr(ctx.g, k, val)
key = id(ctx) key = id(ctx)
contexts[key] = ctx contexts[key] = ctx
@ -280,7 +280,7 @@ class Context(object):
if (!String.prototype.trim) { if (!String.prototype.trim) {
(function() { (function() {
// Make sure we trim BOM and NBSP // Make sure we trim BOM and NBSP
var rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; var rtrim = /^[\\s\uFEFF\xA0]+|[\\s\uFEFF\xA0]+$/g;
String.prototype.trim = function() { String.prototype.trim = function() {
return this.replace(rtrim, ''); return this.replace(rtrim, '');
}; };
@ -289,7 +289,7 @@ class Context(object):
if (!String.prototype.trimLeft) { if (!String.prototype.trimLeft) {
(function() { (function() {
// Make sure we trim BOM and NBSP // Make sure we trim BOM and NBSP
var rtrim = /^[\s\uFEFF\xA0]+/g; var rtrim = /^[\\s\uFEFF\xA0]+/g;
String.prototype.trimLeft = function() { String.prototype.trimLeft = function() {
return this.replace(rtrim, ''); return this.replace(rtrim, '');
}; };
@ -298,7 +298,7 @@ class Context(object):
if (!String.prototype.trimRight) { if (!String.prototype.trimRight) {
(function() { (function() {
// Make sure we trim BOM and NBSP // Make sure we trim BOM and NBSP
var rtrim = /[\s\uFEFF\xA0]+$/g; var rtrim = /[\\s\uFEFF\xA0]+$/g;
String.prototype.trimRight = function() { String.prototype.trimRight = function() {
return this.replace(rtrim, ''); return this.replace(rtrim, '');
}; };

View File

@ -881,7 +881,7 @@ dl.notes dd:last-of-type { page-break-after: avoid }
css_styles = {} css_styles = {}
for name in self.stylestack: for name in self.stylestack:
styles = self.styledict.get(name) styles = self.styledict.get(name)
css2 = tuple(self.cs.convert_styles(styles).iteritems()) css2 = tuple(self.cs.convert_styles(styles).items())
if css2 in css_styles: if css2 in css_styles:
css_styles[css2].append(name) css_styles[css2].append(name)
else: else:
@ -902,7 +902,7 @@ dl.notes dd:last-of-type { page-break-after: avoid }
if k not in ignore: if k not in ignore:
yield k, v yield k, v
for css2, names in css_styles.iteritems(): for css2, names in css_styles.items():
self.writeout("%s {\n" % ', '.join(names)) self.writeout("%s {\n" % ', '.join(names))
for style, val in filter_margins(css2): for style, val in filter_margins(css2):
self.writeout("\t%s: %s;\n" % (style, val)) self.writeout("\t%s: %s;\n" % (style, val))