Fix various warnings from running updated pyflakes on the entire codebase

This commit is contained in:
Kovid Goyal 2014-04-13 09:51:00 +05:30
parent d425bcceca
commit e49227ee40
33 changed files with 66 additions and 64 deletions

View File

@ -124,5 +124,5 @@ class CIO_Magazine(BasicNewsRecipe):
dict(title=title, url=url, date=pubdate,
description=description,
content=''))
feeds = [(key, articles[key]) for key in feeds if articles.has_key(key)]
feeds = [(k, articles[k]) for k in feeds if articles.has_key(k)]
return feeds

View File

@ -114,5 +114,5 @@ class ElDiplo_Recipe(BasicNewsRecipe):
articles[section].append(dict(title=title,author=auth,url=url,date=None,description=description,content=''))
#ans = self.sort_index_by(ans, {'The Front Page':-1, 'Dining In, Dining Out':1, 'Obituaries':2})
ans = [(section, articles[section]) for section in ans if articles.has_key(section)]
ans = [(s, articles[s]) for s in ans if articles.has_key(s)]
return ans

View File

@ -80,7 +80,7 @@ class ModorosBlogHu(BasicNewsRecipe):
for h in cur_items:
f.write(h+'\n')
remove = [f for f in feeds if len(f) == 0 and
remove = [fl for fl in feeds if len(fl) == 0 and
self.remove_empty_feeds]
for f in remove:
feeds.remove(f)

View File

@ -100,7 +100,7 @@ class OfficeSpaceBlogHu(BasicNewsRecipe):
for h in cur_items:
f.write(h+'\n')
remove = [f for f in feeds if len(f) == 0 and
remove = [fl for fl in feeds if len(fl) == 0 and
self.remove_empty_feeds]
for f in remove:
feeds.remove(f)

View File

@ -46,5 +46,5 @@ class NYTimes(BasicNewsRecipe):
if feed not in articles:
articles[feed] = []
articles[feed].append(dict(title=title, url=url, date=pubdate,description=description,content=''))
ans = [(key, articles[key]) for key in articles.keys()]
ans = [(keyl, articles[keyl]) for keyl in articles.keys()]
return ans

View File

@ -216,5 +216,5 @@ class CanWestPaper(BasicNewsRecipe):
articles[key] = []
articles[key].append(dict(title=title,url=url,date=pubdate,description=description,author=author,content=''))
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
ans = [(keyl, articles[keyl]) for keyl in ans if articles.has_key(keyl)]
return ans

View File

@ -216,5 +216,5 @@ class CanWestPaper(BasicNewsRecipe):
articles[key] = []
articles[key].append(dict(title=title,url=url,date=pubdate,description=description,author=author,content=''))
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
ans = [(k, articles[k]) for k in ans if articles.has_key(k)]
return ans

View File

@ -102,5 +102,5 @@ class CanWestPaper(BasicNewsRecipe):
articles[key] = []
articles[key].append(dict(title=title,url=url,date=pubdate,description=description,author=author,content=''))
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
ans = [(keyl, articles[key]) for keyl in ans if articles.has_key(keyl)]
return ans

View File

@ -217,5 +217,5 @@ class CanWestPaper(BasicNewsRecipe):
articles[key] = []
articles[key].append(dict(title=title,url=url,date=pubdate,description=description,author=author,content=''))
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
ans = [(keyl, articles[keyl]) for keyl in ans if articles.has_key(keyl)]
return ans

View File

@ -211,7 +211,7 @@ class AddRemoveTest(BaseTest):
'Test removal of books'
cl = self.cloned_library
cache = self.init_cache()
af, ae, at = self.assertFalse, self.assertEqual, self.assertTrue
af, ae = self.assertFalse, self.assertEqual
authors = cache.fields['authors'].table
# Delete a single book, with no formats and check cleaning

View File

@ -285,7 +285,7 @@ class LegacyTest(BaseTest):
ndb = self.init_legacy()
db = self.init_old()
all_ids = ndb.new_api.all_book_ids()
op1, op2 = {'xx':'yy'}, {'yy':'zz'}
op1 = {'xx':'yy'}
for x in (
('has_conversion_options', all_ids),
('conversion_options', 1, 'PIPE'),

View File

@ -493,7 +493,7 @@ class ReadingTest(BaseTest):
cache = self.init_cache()
cache._search_api.cache = c = TestCache()
ae, at = self.assertEqual, self.assertTrue
ae = self.assertEqual
def test(hit, result, *args):
c.cc

View File

@ -337,7 +337,7 @@ class WritingTest(BaseTest):
'Test the automatic backup of changed metadata'
cl = self.cloned_library
cache = self.init_cache(cl)
ae, af, sf, ff = self.assertEqual, self.assertFalse, cache.set_field, cache.field_for
ae, af, sf = self.assertEqual, self.assertFalse, cache.set_field
# First empty dirtied
cache.dump_metadata()
af(cache.dirtied_cache)

View File

@ -720,7 +720,7 @@ class KOBO(USBMS):
return book
def get_device_paths(self):
paths, prefixes = {}, {}
paths = {}
for prefix, path, source_id in [
('main', 'metadata.calibre', 0),
('card_a', 'metadata.calibre', 1),

View File

@ -185,6 +185,7 @@ def cleanup_markup(log, root, styles, dest_dir, detect_cover):
width, height, fmt = identify(path)
except:
width, height, fmt = 0, 0, None
del fmt
try:
is_cover = 0.8 <= height/width <= 1.8 and height*width >= 160000
except ZeroDivisionError:

View File

@ -126,8 +126,8 @@ def mi_to_html(mi, field_list=None, default_author_link=None, use_roman_numbers=
ans.append((field, row % (name, u', '.join(fmts))))
elif field == 'identifiers':
urls = urls_from_identifiers(mi.identifiers)
links = [u'<a href="%s" title="%s:%s">%s</a>' % (a(url), a(id_typ), a(id_val), p(name))
for name, id_typ, id_val, url in urls]
links = [u'<a href="%s" title="%s:%s">%s</a>' % (a(url), a(id_typ), a(id_val), p(namel))
for namel, id_typ, id_val, url in urls]
links = u', '.join(links)
if links:
ans.append((field, row % (_('Ids')+':', links)))
@ -189,8 +189,8 @@ def mi_to_html(mi, field_list=None, default_author_link=None, use_roman_numbers=
dt = 'text'
return 'datatype_%s'%dt
ans = [u'<tr id="%s" class="%s">%s</tr>'%(field.replace('#', '_'),
classname(field), html) for field, html in ans]
ans = [u'<tr id="%s" class="%s">%s</tr>'%(fieldl.replace('#', '_'),
classname(fieldl), html) for fieldl, html in ans]
# print '\n'.join(ans)
return u'<table class="fields">%s</table>'%(u'\n'.join(ans)), comment_fields

View File

@ -23,13 +23,15 @@ def get_metadata(stream):
while 1:
data = stream.read(1)
if data == '\x00':
if not skip: return result
if not skip:
return result
skip -= 1
result, data = '', ''
result += data
stream.read(38) # skip past some uninteresting headers
_, category, title, author = cString(), cString(), cString(1), cString(2)
cString()
category, title, author = cString(), cString(1), cString(2)
if title:
mi.title = title

View File

@ -299,8 +299,8 @@ class Edelweiss(Source):
if not entries:
return
workers = [Worker(sku, url, i, result_queue, br.clone_browser(), timeout, log, self)
for i, (url, sku) in enumerate(entries[:5])]
workers = [Worker(skul, url, i, result_queue, br.clone_browser(), timeout, log, self)
for i, (url, skul) in enumerate(entries[:5])]
for w in workers:
w.start()

View File

@ -120,8 +120,8 @@ def embed_all_fonts(container, stats, report):
# Write out CSS
rules = [';\n\t'.join('%s: %s' % (
k, '"%s"' % v if k == 'font-family' else v) for k, v in rule.iteritems() if (k in props and props[k] != v and v != '400') or k == 'src')
for rule in rules]
k, '"%s"' % v if k == 'font-family' else v) for k, v in rulel.iteritems() if (k in props and props[k] != v and v != '400') or k == 'src')
for rulel in rules]
css = '\n\n'.join(['@font-face {\n\t%s\n}' % r for r in rules])
item = container.generate_item('fonts.css', id_prefix='font_embed')
name = container.href_to_name(item.get('href'), container.opf_name)

View File

@ -231,7 +231,7 @@ class AutoAdder(QObject):
paths.extend(p)
formats.extend(f)
metadata.extend(mis)
dups = [(mi, mi.cover, [p]) for mi, p in zip(metadata, paths)]
dups = [(mic, mic.cover, [p]) for mic, p in zip(metadata, paths)]
d = DuplicatesQuestion(m.db, dups, parent=gui)
dups = tuple(d.duplicates)
if dups:

View File

@ -78,7 +78,7 @@ class PluginWidget(QWidget, Ui_Form):
# Dictionary currently activated fields
if len(self.db_fields.selectedItems()):
opts_dict = {'fields':[unicode(item.text()) for item in self.db_fields.selectedItems()]}
opts_dict = {'fields':[unicode(i.text()) for i in self.db_fields.selectedItems()]}
else:
opts_dict = {'fields':['all']}

View File

@ -56,6 +56,6 @@ class PluginWidget(QWidget, Ui_Form):
# Return a dictionary with current options for this widget
if len(self.db_fields.selectedItems()):
return {'fields':[unicode(item.text()) for item in self.db_fields.selectedItems()]}
return {'fields':[unicode(i.text()) for i in self.db_fields.selectedItems()]}
else:
return {'fields':['all']}

View File

@ -200,11 +200,11 @@ def dnd_get_image(md, image_exts=IMAGE_EXTENSIONS):
md.urls()]
purls = [urlparse(u) for u in urls]
# First look for a local file
images = [u2p(x) for x in purls if x.scheme in ('', 'file')]
images = [x for x in images if
posixpath.splitext(urllib.unquote(x))[1][1:].lower() in
images = [u2p(xu) for xu in purls if xu.scheme in ('', 'file')]
images = [xi for xi in images if
posixpath.splitext(urllib.unquote(xi))[1][1:].lower() in
image_exts]
images = [x for x in images if os.path.exists(x)]
images = [xi for xi in images if os.path.exists(xi)]
p = QPixmap()
for path in images:
try:
@ -223,8 +223,8 @@ def dnd_get_image(md, image_exts=IMAGE_EXTENSIONS):
if rurl and fname:
return rurl, fname
# Look through all remaining URLs
remote_urls = [x for x in purls if x.scheme in ('http', 'https',
'ftp') and posixpath.splitext(x.path)[1][1:].lower() in image_exts]
remote_urls = [xu for xu in purls if xu.scheme in ('http', 'https',
'ftp') and posixpath.splitext(xu.path)[1][1:].lower() in image_exts]
if remote_urls:
rurl = remote_urls[0]
fname = posixpath.basename(urllib.unquote(rurl.path))

View File

@ -763,7 +763,7 @@ class FormatsManager(QWidget):
db.add_format(id_, ext, spool, notify=False,
index_is_id=True)
dbfmts = db.formats(id_, index_is_id=True)
db_extensions = set([f.lower() for f in (dbfmts.split(',') if dbfmts
db_extensions = set([fl.lower() for fl in (dbfmts.split(',') if dbfmts
else [])])
extensions = new_extensions.union(old_extensions)
for ext in db_extensions:
@ -1310,7 +1310,7 @@ class IdentifiersEdit(QLineEdit): # {{{
if v is not None:
val[k] = v
ids = sorted(val.iteritems(), key=keygen)
txt = ', '.join(['%s:%s'%(k.lower(), v) for k, v in ids])
txt = ', '.join(['%s:%s'%(k.lower(), vl) for k, vl in ids])
# Use clear + insert instead of setText so that undo works
self.clear()
self.insert(txt.strip())

View File

@ -148,7 +148,7 @@ class Matches(QAbstractItemModel):
def setData(self, index, data, role):
if not index.isValid():
return False
row, col = index.row(), index.column()
col = index.column()
if col == 0:
if data.toBool():
enable_plugin(self.get_plugin(index))
@ -225,7 +225,7 @@ class SearchFilter(SearchQueryParser):
elif query.startswith('~'):
matchkind = REGEXP_MATCH
query = query[1:]
if matchkind != REGEXP_MATCH: ### leave case in regexps because it can be significant e.g. \S \W \D
if matchkind != REGEXP_MATCH: # leave case in regexps because it can be significant e.g. \S \W \D
query = query.lower()
if location not in self.USABLE_LOCATIONS:
@ -265,9 +265,9 @@ class SearchFilter(SearchQueryParser):
if locvalue in ('affiliate', 'drm', 'enabled'):
continue
try:
### Can't separate authors because comma is used for name sep and author sep
### Exact match might not get what you want. For that reason, turn author
### exactmatch searches into contains searches.
# Can't separate authors because comma is used for name sep and author sep
# Exact match might not get what you want. For that reason, turn author
# exactmatch searches into contains searches.
if locvalue == 'name' and matchkind == EQUALS_MATCH:
m = CONTAINS_MATCH
else:

View File

@ -115,7 +115,7 @@ def get_decoded_raw(name):
enc = force_encoding(raw, verbose=True)
try:
raw = raw.decode(enc)
except ValueError:
except (LookupError, ValueError):
pass
return raw, syntax

View File

@ -636,7 +636,7 @@ class DiffSplit(QSplitter): # {{{
for x, val in v.line_number_map.iteritems():
dict.__setitem__(lnm, mapnum(x), val)
v.line_number_map = lnm
v.changes = [(mapnum(t), mapnum(b), kind) for t, b, kind in v.changes]
v.changes = [(mapnum(t), mapnum(b), k) for t, b, k in v.changes]
v.headers = [(mapnum(x), name) for x, name in v.headers]
v.images = OrderedDict((mapnum(x), v) for x, v in v.images.iteritems())
v.viewport().update()
@ -826,7 +826,6 @@ class DiffSplit(QSplitter): # {{{
self.replace_helper(alo, best_i, blo, best_j)
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
self.do_replace(best_i, best_i+1, best_j, best_j+1)
else:

View File

@ -680,7 +680,7 @@ class FileList(QTreeWidget):
l.addWidget(bb)
if d.exec_() == d.Accepted:
tprefs['remove_existing_links_when_linking_sheets'] = r.isChecked()
sheets = [unicode(s.item(i).text()) for i in xrange(s.count()) if s.item(i).checkState() == Qt.Checked]
sheets = [unicode(s.item(il).text()) for il in xrange(s.count()) if s.item(il).checkState() == Qt.Checked]
if sheets:
self.link_stylesheets_requested.emit(names, sheets, r.isChecked())

View File

@ -282,8 +282,8 @@ class BrowseServer(object):
ans = ans.replace('{Search}', _('Search'))
opts = ['<option %svalue="%s">%s</option>' % (
'selected="selected" ' if k==sort else '',
xml(k), xml(n), ) for k, n in
sorted(sort_opts, key=lambda x: sort_key(operator.itemgetter(1)(x))) if k and n]
xml(k), xml(nl), ) for k, nl in
sorted(sort_opts, key=lambda x: sort_key(operator.itemgetter(1)(x))) if k and nl]
ans = ans.replace('{sort_select_options}', ('\n'+' '*20).join(opts))
lp = self.db.library_path
if isbytestring(lp):

View File

@ -764,8 +764,8 @@ class DNSOutgoing(object):
self.finished = 1
for question in self.questions:
self.writeQuestion(question)
for answer, time in self.answers:
self.writeRecord(answer, time)
for answer, atime in self.answers:
self.writeRecord(answer, atime)
for authority in self.authorities:
self.writeRecord(authority, 0)
for additional in self.additionals:
@ -873,9 +873,9 @@ class Engine(threading.Thread):
rr, wr, er = select.select(rs, [], [], self.timeout)
if globals()['_GLOBAL_DONE']:
continue
for socket in rr:
for sock in rr:
try:
self.readers[socket].handle_read()
self.readers[sock].handle_read()
except:
if DEBUG:
traceback.print_exc()

View File

@ -1811,7 +1811,7 @@ Enter SQL statements terminated with a ";"
identity=lambda x:x
for i in range(ncols):
if len(datas[i])>1:
raise self.Error("Column #%d \"%s\" has ambiguous data format - %s" % (i+1, header[i], ", ".join([d.__name__ for d in datas[i]])))
raise self.Error("Column #%d \"%s\" has ambiguous data format - %s" % (i+1, header[i], ", ".join([dl.__name__ for dl in datas[i]])))
if datas[i]:
datas[i]=datas[i][0]
else:

View File

@ -192,7 +192,7 @@ def _get_line(img, dw, tokens, line_width):
line, rest = tokens, []
while True:
m = img.font_metrics(dw, ' '.join(line))
width, height = m.text_width, m.text_height
width = m.text_width
if width < line_width:
return line, rest
rest = line[-1:] + rest

View File

@ -1601,7 +1601,7 @@ class BasicNewsRecipe(Recipe):
parsed_feeds.append(feed)
self.log.exception(msg)
remove = [f for f in parsed_feeds if len(f) == 0 and
remove = [fl for fl in parsed_feeds if len(fl) == 0 and
self.remove_empty_feeds]
for f in remove:
parsed_feeds.remove(f)