This commit is contained in:
Eli Schwartz 2019-03-24 00:54:22 -04:00
parent a623717d96
commit 4f545af415
No known key found for this signature in database
GPG Key ID: CEB167EFB5722BD6
47 changed files with 137 additions and 65 deletions

View File

@ -374,10 +374,10 @@ def parallel_fetch(old_index, entry):
def log(*args, **kwargs): def log(*args, **kwargs):
print (*args, **kwargs) print(*args, **kwargs)
with open('log', 'a') as f: with open('log', 'a') as f:
kwargs['file'] = f kwargs['file'] = f
print (*args, **kwargs) print(*args, **kwargs)
def atomic_write(raw, name): def atomic_write(raw, name):
@ -682,7 +682,7 @@ def test_parse(): # {{{
new_entries = tuple(parse_index(raw)) new_entries = tuple(parse_index(raw))
for i, entry in enumerate(old_entries): for i, entry in enumerate(old_entries):
if entry != new_entries[i]: if entry != new_entries[i]:
print ('The new entry: %s != %s' % (new_entries[i], entry)) print('The new entry: %s != %s' % (new_entries[i], entry))
raise SystemExit(1) raise SystemExit(1)
pool = ThreadPool(processes=20) pool = ThreadPool(processes=20)
urls = [e.url for e in new_entries] urls = [e.url for e in new_entries]
@ -699,7 +699,7 @@ def test_parse(): # {{{
break break
new_url, aname = parse_plugin_zip_url(raw) new_url, aname = parse_plugin_zip_url(raw)
if new_url != full_url: if new_url != full_url:
print ('new url (%s): %s != %s for plugin at: %s' % (aname, new_url, full_url, url)) print('new url (%s): %s != %s for plugin at: %s' % (aname, new_url, full_url, url))
raise SystemExit(1) raise SystemExit(1)
# }}} # }}}

View File

@ -196,7 +196,7 @@ def run_plugins_on_postimport(db, book_id, fmt):
try: try:
plugin.postimport(book_id, fmt, db) plugin.postimport(book_id, fmt, db)
except: except:
print ('Running file type plugin %s failed with traceback:'% print('Running file type plugin %s failed with traceback:'%
plugin.name) plugin.name)
traceback.print_exc() traceback.print_exc()
@ -211,7 +211,7 @@ def run_plugins_on_postadd(db, book_id, fmt_map):
try: try:
plugin.postadd(book_id, fmt_map, db) plugin.postadd(book_id, fmt_map, db)
except Exception: except Exception:
print ('Running file type plugin %s failed with traceback:'% print('Running file type plugin %s failed with traceback:'%
plugin.name) plugin.name)
traceback.print_exc() traceback.print_exc()
@ -728,7 +728,7 @@ def initialize_plugins(perf=False):
sys.stdout, sys.stderr = ostdout, ostderr sys.stdout, sys.stderr = ostdout, ostderr
if perf: if perf:
for x in sorted(times, key=lambda x:times[x]): for x in sorted(times, key=lambda x:times[x]):
print ('%50s: %.3f'%(x, times[x])) print('%50s: %.3f'%(x, times[x]))
_initialized_plugins.sort(cmp=lambda x,y:cmp(x.priority, y.priority), reverse=True) _initialized_plugins.sort(cmp=lambda x,y:cmp(x.priority, y.priority), reverse=True)
reread_filetype_plugins() reread_filetype_plugins()
reread_metadata_plugins() reread_metadata_plugins()

View File

@ -347,7 +347,7 @@ def main(args=sys.argv):
elif ext in {'mobi', 'azw', 'azw3'}: elif ext in {'mobi', 'azw', 'azw3'}:
inspect_mobi(path) inspect_mobi(path)
else: else:
print ('Cannot dump unknown filetype: %s' % path) print('Cannot dump unknown filetype: %s' % path)
elif len(args) >= 2 and os.path.exists(os.path.join(args[1], '__main__.py')): elif len(args) >= 2 and os.path.exists(os.path.join(args[1], '__main__.py')):
sys.path.insert(0, args[1]) sys.path.insert(0, args[1])
run_script(os.path.join(args[1], '__main__.py'), args[2:]) run_script(os.path.join(args[1], '__main__.py'), args[2:])

View File

@ -12,6 +12,7 @@ class Bookmark(): # {{{
A simple class fetching bookmark data A simple class fetching bookmark data
kobo-specific kobo-specific
''' '''
def __init__(self, db_connection, contentid, path, id, book_format, bookmark_extension): def __init__(self, db_connection, contentid, path, id, book_format, bookmark_extension):
self.book_format = book_format self.book_format = book_format
self.bookmark_extension = bookmark_extension self.bookmark_extension = bookmark_extension

View File

@ -39,6 +39,7 @@ class Field(object):
self.instructions = ''.join(self.buf) self.instructions = ''.join(self.buf)
del self.buf del self.buf
WORD, FLAG = 0, 1 WORD, FLAG = 0, 1
scanner = re.Scanner([ scanner = re.Scanner([
(r'\\\S{1}', lambda s, t: (t, FLAG)), # A flag of the form \x (r'\\\S{1}', lambda s, t: (t, FLAG)), # A flag of the form \x
@ -77,6 +78,7 @@ def parser(name, field_map, default_field_name=None):
return parse return parse
parse_hyperlink = parser('hyperlink', parse_hyperlink = parser('hyperlink',
'l:anchor m:image-map n:target o:title t:target', 'url') 'l:anchor m:image-map n:target o:title t:target', 'url')
@ -257,5 +259,6 @@ def test_parse_fields(return_tests=False):
return suite return suite
unittest.TextTestRunner(verbosity=4).run(suite) unittest.TextTestRunner(verbosity=4).run(suite)
if __name__ == '__main__': if __name__ == '__main__':
test_parse_fields() test_parse_fields()

View File

@ -37,6 +37,7 @@ def alphabet(val, lower=True):
x = string.ascii_lowercase if lower else string.ascii_uppercase x = string.ascii_lowercase if lower else string.ascii_uppercase
return x[(abs(val - 1)) % len(x)] return x[(abs(val - 1)) % len(x)]
alphabet_map = { alphabet_map = {
'lower-alpha':alphabet, 'upper-alpha':partial(alphabet, lower=False), 'lower-alpha':alphabet, 'upper-alpha':partial(alphabet, lower=False),
'lower-roman':lambda x:roman(x).lower(), 'upper-roman':roman, 'lower-roman':lambda x:roman(x).lower(), 'upper-roman':roman,

View File

@ -279,4 +279,4 @@ class DOCX(object):
if __name__ == '__main__': if __name__ == '__main__':
d = DOCX(None, None) d = DOCX(None, None)
print (d.websettings) print(d.websettings)

View File

@ -652,7 +652,7 @@ class LrfWriter(object):
return self.sourceEncoding return self.sourceEncoding
def toUnicode(self, string): def toUnicode(self, string):
if type(string) is str: if isinstance(string, str):
string = string.decode(self.sourceEncoding) string = string.decode(self.sourceEncoding)
return string return string

View File

@ -67,10 +67,10 @@ def parse_meta_tags(src):
all_names = '|'.join(rmap) all_names = '|'.join(rmap)
ans = {} ans = {}
npat = r'''name\s*=\s*['"]{0,1}(?P<name>%s)['"]{0,1}''' % all_names npat = r'''name\s*=\s*['"]{0,1}(?P<name>%s)['"]{0,1}''' % all_names
cpat = 'content\s*=\s*%s' % attr_pat cpat = r'content\s*=\s*%s' % attr_pat
for pat in ( for pat in (
'<meta\s+%s\s+%s' % (npat, cpat), r'<meta\s+%s\s+%s' % (npat, cpat),
'<meta\s+%s\s+%s' % (cpat, npat), r'<meta\s+%s\s+%s' % (cpat, npat),
): ):
for match in re.finditer(pat, src, flags=re.IGNORECASE): for match in re.finditer(pat, src, flags=re.IGNORECASE):
x = match.group('name').lower() x = match.group('name').lower()

View File

@ -599,7 +599,7 @@ class TextRecord(object): # {{{
for typ, val in iteritems(self.trailing_data): for typ, val in iteritems(self.trailing_data):
if isinstance(typ, numbers.Integral): if isinstance(typ, numbers.Integral):
print ('Record %d has unknown trailing data of type: %d : %r'% print('Record %d has unknown trailing data of type: %d : %r'%
(idx, typ, val)) (idx, typ, val))
self.idx = idx self.idx = idx

View File

@ -105,7 +105,7 @@ class CNCX(object): # {{{
except: except:
byts = raw[pos:] byts = raw[pos:]
r = format_bytes(byts) r = format_bytes(byts)
print ('CNCX entry at offset %d has unknown format %s'%( print('CNCX entry at offset %d has unknown format %s'%(
pos+record_offset, r)) pos+record_offset, r))
self.records[pos+record_offset] = r self.records[pos+record_offset] = r
pos = len(raw) pos = len(raw)
@ -216,7 +216,7 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
header = parse_indx_header(data) header = parse_indx_header(data)
idxt_pos = header['start'] idxt_pos = header['start']
if data[idxt_pos:idxt_pos+4] != b'IDXT': if data[idxt_pos:idxt_pos+4] != b'IDXT':
print ('WARNING: Invalid INDX record') print('WARNING: Invalid INDX record')
entry_count = header['count'] entry_count = header['count']
# loop through to build up the IDXT position starts # loop through to build up the IDXT position starts

View File

@ -1530,7 +1530,7 @@ def test_roundtrip():
ebook3 = get_container(p.name) ebook3 = get_container(p.name)
diff = ebook3.compare_to(ebook2) diff = ebook3.compare_to(ebook2)
if diff is not None: if diff is not None:
print (diff) print(diff)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -65,5 +65,6 @@ def import_book_as_epub(srcpath, destpath, log=default_log):
for name in c.name_path_map: for name in c.name_path_map:
zf.writestr(name, c.raw_data(name, decode=False)) zf.writestr(name, c.raw_data(name, decode=False))
if __name__ == '__main__': if __name__ == '__main__':
import_book_as_epub(sys.argv[-2], sys.argv[-1]) import_book_as_epub(sys.argv[-2], sys.argv[-1])

View File

@ -89,6 +89,7 @@ def get_split_book(fmt='epub'):
os.remove(x) os.remove(x)
return ans return ans
devnull = DevNull() devnull = DevNull()

View File

@ -220,4 +220,4 @@ def timing():
f(raw) f(raw)
timings.append(monotonic() - st) timings.append(monotonic() - st)
avg = sum(timings)/len(timings) avg = sum(timings)/len(timings)
print ('Average time for %s: %.2g' % (name, avg)) print('Average time for %s: %.2g' % (name, avg))

View File

@ -121,7 +121,7 @@ def create_markdown_object(extensions):
if hasattr(module, 'makeExtension'): if hasattr(module, 'makeExtension'):
return module.makeExtension(**configs) return module.makeExtension(**configs)
for name, x in vars(module).items(): for name, x in vars(module).items():
if type(x) is type and issubclass(x, Extension) and x is not Extension: if isinstance(x, type) and issubclass(x, Extension) and x is not Extension:
return x(**configs) return x(**configs)
raise ImportError('No extension class in {}'.format(ext_name)) raise ImportError('No extension class in {}'.format(ext_name))

View File

@ -506,4 +506,4 @@ class EmailMixin(object): # {{{
if __name__ == '__main__': if __name__ == '__main__':
from PyQt5.Qt import QApplication from PyQt5.Qt import QApplication
app = QApplication([]) # noqa app = QApplication([]) # noqa
print (select_recipients()) print(select_recipients())

View File

@ -957,7 +957,7 @@ class GridView(QListView):
# rendered, but this is better than a deadlock # rendered, but this is better than a deadlock
join_with_timeout(self.delegate.render_queue) join_with_timeout(self.delegate.render_queue)
except RuntimeError: except RuntimeError:
print ('Cover rendering thread is stuck!') print('Cover rendering thread is stuck!')
finally: finally:
self.ignore_render_requests.clear() self.ignore_render_requests.clear()
else: else:

View File

@ -46,7 +46,7 @@ class CoverCache(dict):
def _pop(self, book_id): def _pop(self, book_id):
val = self.items.pop(book_id, None) val = self.items.pop(book_id, None)
if type(val) is QPixmap and current_thread() is not self.gui_thread: if isinstance(val, QPixmap) and current_thread() is not self.gui_thread:
self.pixmap_staging.append(val) self.pixmap_staging.append(val)
def __getitem__(self, key): def __getitem__(self, key):
@ -55,7 +55,7 @@ class CoverCache(dict):
self.clear_staging() self.clear_staging()
ans = self.items.pop(key, False) # pop() so that item is moved to the top ans = self.items.pop(key, False) # pop() so that item is moved to the top
if ans is not False: if ans is not False:
if type(ans) is QImage: if isinstance(ans, QImage):
# Convert to QPixmap, since rendering QPixmap is much # Convert to QPixmap, since rendering QPixmap is much
# faster # faster
ans = QPixmap.fromImage(ans) ans = QPixmap.fromImage(ans)
@ -73,7 +73,7 @@ class CoverCache(dict):
def clear(self): def clear(self):
with self.lock: with self.lock:
if current_thread() is not self.gui_thread: if current_thread() is not self.gui_thread:
pixmaps = (x for x in itervalues(self.items) if type(x) is QPixmap) pixmaps = (x for x in itervalues(self.items) if isinstance(x, QPixmap))
self.pixmap_staging.extend(pixmaps) self.pixmap_staging.extend(pixmaps)
self.items.clear() self.items.clear()

View File

@ -1130,8 +1130,8 @@ if __name__ == '__main__':
kind, col, r = d.rule kind, col, r = d.rule
print('Column to be colored:', col) print('Column to be colored:', col)
print ('Template:') print('Template:')
print (r.template) print(r.template)
else: else:
d = EditRules() d = EditRules()
d.resize(QSize(800, 600)) d.resize(QSize(800, 600))

View File

@ -68,6 +68,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
from calibre.utils.config import config_dir from calibre.utils.config import config_dir
open_local_file(config_dir) open_local_file(config_dir)
if __name__ == '__main__': if __name__ == '__main__':
from PyQt5.Qt import QApplication from PyQt5.Qt import QApplication
app = QApplication([]) app = QApplication([])

View File

@ -374,7 +374,7 @@ class TagsModel(QAbstractItemModel): # {{{
def rebuild_node_tree(self, state_map={}): def rebuild_node_tree(self, state_map={}):
if self._build_in_progress: if self._build_in_progress:
print ('Tag browser build already in progress') print('Tag browser build already in progress')
traceback.print_stack() traceback.print_stack()
return return
# traceback.print_stack() # traceback.print_stack()
@ -474,7 +474,7 @@ class TagsModel(QAbstractItemModel): # {{{
intermediate_nodes = {} intermediate_nodes = {}
if data is None: if data is None:
print ('_create_node_tree: no data!') print('_create_node_tree: no data!')
traceback.print_stack() traceback.print_stack()
return return
@ -1011,7 +1011,7 @@ class TagsModel(QAbstractItemModel): # {{{
if not isinstance(order, dict): if not isinstance(order, dict):
raise TypeError() raise TypeError()
except: except:
print ('Tweak tag_browser_category_order is not valid. Ignored') print('Tweak tag_browser_category_order is not valid. Ignored')
order = {'*': 100} order = {'*': 100}
defvalue = order.get('*', 100) defvalue = order.get('*', 100)
self.row_map = sorted(self.categories, key=lambda x: order.get(x, defvalue)) self.row_map = sorted(self.categories, key=lambda x: order.get(x, defvalue))
@ -1031,7 +1031,7 @@ class TagsModel(QAbstractItemModel): # {{{
Here to trap usages of refresh in the old architecture. Can eventually Here to trap usages of refresh in the old architecture. Can eventually
be removed. be removed.
''' '''
print ('TagsModel: refresh called!') print('TagsModel: refresh called!')
traceback.print_stack() traceback.print_stack()
return False return False

View File

@ -139,7 +139,7 @@ def string_diff(left, right, left_syntax=None, right_syntax=None, left_name='lef
def file_diff(left, right): def file_diff(left, right):
(raw1, syntax1), (raw2, syntax2) = map(get_decoded_raw, (left, right)) (raw1, syntax1), (raw2, syntax2) = map(get_decoded_raw, (left, right))
if type(raw1) is not type(raw2): if not isinstance(raw1, type(raw2)):
raw1, raw2 = open(left, 'rb').read(), open(right, 'rb').read() raw1, raw2 = open(left, 'rb').read(), open(right, 'rb').read()
cache = Cache() cache = Cache()
cache.set_left(left, raw1), cache.set_right(right, raw2) cache.set_left(left, raw1), cache.set_right(right, raw2)

View File

@ -269,6 +269,7 @@ def in_string(state, text, i, formats, user_data):
state.parse = (NORMAL if state.blocks < 1 else IN_CONTENT) state.parse = (NORMAL if state.blocks < 1 else IN_CONTENT)
return [(pos - i + len(q), formats['string'])] return [(pos - i + len(q), formats['string'])]
state_map = { state_map = {
NORMAL:normal, NORMAL:normal,
IN_COMMENT_NORMAL: comment, IN_COMMENT_NORMAL: comment,

View File

@ -32,6 +32,7 @@ def default_theme():
_default_theme = 'wombat-dark' if isdark else 'pyte-light' _default_theme = 'wombat-dark' if isdark else 'pyte-light'
return _default_theme return _default_theme
# The solarized themes {{{ # The solarized themes {{{
SLDX = {'base03':'1c1c1c', 'base02':'262626', 'base01':'585858', 'base00':'626262', 'base0':'808080', 'base1':'8a8a8a', 'base2':'e4e4e4', 'base3':'ffffd7', 'yellow':'af8700', 'orange':'d75f00', 'red':'d70000', 'magenta':'af005f', 'violet':'5f5faf', 'blue':'0087ff', 'cyan':'00afaf', 'green':'5f8700'} # noqa SLDX = {'base03':'1c1c1c', 'base02':'262626', 'base01':'585858', 'base00':'626262', 'base0':'808080', 'base1':'8a8a8a', 'base2':'e4e4e4', 'base3':'ffffd7', 'yellow':'af8700', 'orange':'d75f00', 'red':'d70000', 'magenta':'af005f', 'violet':'5f5faf', 'blue':'0087ff', 'cyan':'00afaf', 'green':'5f8700'} # noqa
SLD = {'base03':'002b36', 'base02':'073642', 'base01':'586e75', 'base00':'657b83', 'base0':'839496', 'base1':'93a1a1', 'base2':'eee8d5', 'base3':'fdf6e3', 'yellow':'b58900', 'orange':'cb4b16', 'red':'dc322f', 'magenta':'d33682', 'violet':'6c71c4', 'blue':'268bd2', 'cyan':'2aa198', 'green':'859900'} # noqa SLD = {'base03':'002b36', 'base02':'073642', 'base01':'586e75', 'base00':'657b83', 'base0':'839496', 'base1':'93a1a1', 'base2':'eee8d5', 'base3':'fdf6e3', 'yellow':'b58900', 'orange':'cb4b16', 'red':'dc322f', 'magenta':'d33682', 'violet':'6c71c4', 'blue':'268bd2', 'cyan':'2aa198', 'green':'859900'} # noqa

View File

@ -1338,7 +1338,7 @@ class ReportsWidget(QWidget):
self.stack.widget(i)(data) self.stack.widget(i)(data)
if DEBUG: if DEBUG:
category = self.reports.item(i).data(Qt.DisplayRole) category = self.reports.item(i).data(Qt.DisplayRole)
print ('Widget time for %12s: %.2fs seconds' % (category, time.time() - st)) print('Widget time for %12s: %.2fs seconds' % (category, time.time() - st))
def save(self): def save(self):
save_state('splitter-state', bytearray(self.splitter.saveState())) save_state('splitter-state', bytearray(self.splitter.saveState()))
@ -1440,7 +1440,7 @@ class Reports(Dialog):
data, timing = data data, timing = data
if DEBUG: if DEBUG:
for x, t in sorted(iteritems(timing), key=itemgetter(1)): for x, t in sorted(iteritems(timing), key=itemgetter(1)):
print ('Time for %6s data: %.3f seconds' % (x, t)) print('Time for %6s data: %.3f seconds' % (x, t))
self.reports(data) self.reports(data)
def accept(self): def accept(self):

View File

@ -569,8 +569,7 @@ class SearchesModel(QAbstractListModel):
def dropMimeData(self, data, action, row, column, parent): def dropMimeData(self, data, action, row, column, parent):
if parent.isValid() or action != Qt.MoveAction or not data.hasFormat('x-calibre/searches-rows') or not self.filtered_searches: if parent.isValid() or action != Qt.MoveAction or not data.hasFormat('x-calibre/searches-rows') or not self.filtered_searches:
return False return False
rows = map(int, bytes(bytearray(data.data('x-calibre/searches-rows'))).decode('ascii').split(',')) rows = sorted(map(int, bytes(bytearray(data.data('x-calibre/searches-rows'))).decode('ascii').split(',')))
rows.sort()
moved_searches = [self.searches[self.filtered_searches[r]] for r in rows] moved_searches = [self.searches[self.filtered_searches[r]] for r in rows]
moved_searches_q = {id(s) for s in moved_searches} moved_searches_q = {id(s) for s in moved_searches}
insert_at = max(0, min(row, len(self.filtered_searches))) insert_at = max(0, min(row, len(self.filtered_searches)))

View File

@ -50,7 +50,7 @@ class JavaScriptLoader(object):
compile_coffeescript compile_coffeescript
except: except:
self._dynamic_coffeescript = False self._dynamic_coffeescript = False
print ('WARNING: Failed to load serve_coffee, not compiling ' print('WARNING: Failed to load serve_coffee, not compiling '
'coffeescript dynamically.') 'coffeescript dynamically.')
self._cache = {} self._cache = {}

View File

@ -405,17 +405,17 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size', '(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size',
('rating', 'ratings', 'rating', 'ratings.rating'), ('rating', 'ratings', 'rating', 'ratings.rating'),
('tags', 'tags', 'tag', 'group_concat(name)'), ('tags', 'tags', 'tag', 'group_concat(name)'),
'(SELECT text FROM comments WHERE book=books.id) comments', '(SELECT text FROM comments WHERE book=books.id) comments',
('series', 'series', 'series', 'name'), ('series', 'series', 'series', 'name'),
('publisher', 'publishers', 'publisher', 'name'), ('publisher', 'publishers', 'publisher', 'name'),
'series_index', 'series_index',
'sort', 'sort',
'author_sort', 'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats', '(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
'path', 'path',
'pubdate', 'pubdate',
'uuid', 'uuid',
'has_cover', 'has_cover',
('au_map', 'authors', 'author', ('au_map', 'authors', 'author',
'aum_sortconcat(link.id, authors.name, authors.sort, authors.link)'), 'aum_sortconcat(link.id, authors.name, authors.sort, authors.link)'),
'last_modified', 'last_modified',

View File

@ -652,7 +652,7 @@ class PostInstall:
print('\n'+'_'*20, 'WARNING','_'*20) print('\n'+'_'*20, 'WARNING','_'*20)
prints(*args, **kwargs) prints(*args, **kwargs)
print('_'*50) print('_'*50)
print ('\n') print('\n')
self.warnings.append((args, kwargs)) self.warnings.append((args, kwargs))
sys.stdout.flush() sys.stdout.flush()

View File

@ -144,5 +144,6 @@ def import_from_oxt(source_path, name, dest_dir=None, prefix='dic-'):
num += 1 num += 1
return num return num
if __name__ == '__main__': if __name__ == '__main__':
import_from_libreoffice_source_tree(sys.argv[-1]) import_from_libreoffice_source_tree(sys.argv[-1])

View File

@ -388,6 +388,7 @@ def split_name(name):
return l[1:], r return l[1:], r
return None, l return None, l
boolean_attributes = frozenset('allowfullscreen,async,autofocus,autoplay,checked,compact,controls,declare,default,defaultchecked,defaultmuted,defaultselected,defer,disabled,enabled,formnovalidate,hidden,indeterminate,inert,ismap,itemscope,loop,multiple,muted,nohref,noresize,noshade,novalidate,nowrap,open,pauseonexit,readonly,required,reversed,scoped,seamless,selected,sortable,truespeed,typemustmatch,visible'.split(',')) # noqa boolean_attributes = frozenset('allowfullscreen,async,autofocus,autoplay,checked,compact,controls,declare,default,defaultchecked,defaultmuted,defaultselected,defer,disabled,enabled,formnovalidate,hidden,indeterminate,inert,ismap,itemscope,loop,multiple,muted,nohref,noresize,noshade,novalidate,nowrap,open,pauseonexit,readonly,required,reversed,scoped,seamless,selected,sortable,truespeed,typemustmatch,visible'.split(',')) # noqa
EPUB_TYPE_MAP = {k:'doc-' + k for k in ( EPUB_TYPE_MAP = {k:'doc-' + k for k in (

View File

@ -142,6 +142,7 @@ class dbus_property(object):
def setter(self, fset): def setter(self, fset):
return self._copy(fset=fset) return self._copy(fset=fset)
_logger = logging.getLogger('dbus.service') _logger = logging.getLogger('dbus.service')

View File

@ -208,8 +208,7 @@ class CmapTable(UnknownTable):
if self.bmp_table is None: if self.bmp_table is None:
raise UnsupportedFont('This font has no Windows BMP cmap subtable.' raise UnsupportedFont('This font has no Windows BMP cmap subtable.'
' Most likely a special purpose font.') ' Most likely a special purpose font.')
chars = list(set(chars)) chars = sorted(set(chars))
chars.sort()
ans = OrderedDict() ans = OrderedDict()
for i, glyph_id in enumerate(self.bmp_table.get_glyph_ids(chars)): for i, glyph_id in enumerate(self.bmp_table.get_glyph_ids(chars)):
if glyph_id > 0: if glyph_id > 0:
@ -230,8 +229,7 @@ class CmapTable(UnknownTable):
def set_character_map(self, cmap): def set_character_map(self, cmap):
self.version, self.num_tables = 0, 1 self.version, self.num_tables = 0, 1
fmt = b'>7H' fmt = b'>7H'
codes = list(iterkeys(cmap)) codes = sorted(iterkeys(cmap))
codes.sort()
if not codes: if not codes:
start_code = [0xffff] start_code = [0xffff]

View File

@ -167,6 +167,7 @@ def test_roundtrip(ff=None):
raise ValueError('Roundtripping failed, size different (%d vs. %d)'% raise ValueError('Roundtripping failed, size different (%d vs. %d)'%
(len(data), len(rd))) (len(data), len(rd)))
if __name__ == '__main__': if __name__ == '__main__':
import sys import sys
test_roundtrip(sys.argv[-1]) test_roundtrip(sys.argv[-1])

View File

@ -132,6 +132,7 @@ class ReverseChainSingleSubstitution(UnknownLookupSubTable):
gid_index_map = self.coverage.coverage_indices(glyph_ids) gid_index_map = self.coverage.coverage_indices(glyph_ids)
return {self.substitutes[i] for i in itervalues(gid_index_map)} return {self.substitutes[i] for i in itervalues(gid_index_map)}
subtable_map = { subtable_map = {
1: SingleSubstitution, 1: SingleSubstitution,
2: MultipleSubstitution, 2: MultipleSubstitution,

View File

@ -489,12 +489,12 @@ def test():
def main(): def main():
import sys, os import sys, os
for f in sys.argv[1:]: for f in sys.argv[1:]:
print (os.path.basename(f)) print(os.path.basename(f))
raw = open(f, 'rb').read() raw = open(f, 'rb').read()
print (get_font_names(raw)) print(get_font_names(raw))
characs = get_font_characteristics(raw) characs = get_font_characteristics(raw)
print (characs) print(characs)
print (panose_to_css_generic_family(characs[5])) print(panose_to_css_generic_family(characs[5]))
verify_checksums(raw) verify_checksums(raw)
remove_embed_restriction(raw) remove_embed_restriction(raw)

View File

@ -150,7 +150,7 @@ def load_winfonts():
def test_ttf_reading(): def test_ttf_reading():
for f in sys.argv[1:]: for f in sys.argv[1:]:
raw = open(f).read() raw = open(f).read()
print (os.path.basename(f)) print(os.path.basename(f))
get_font_characteristics(raw) get_font_characteristics(raw)
print() print()
@ -166,15 +166,16 @@ def test():
else: else:
w = load_winfonts() w = load_winfonts()
print (w.w) print(w.w)
families = w.font_families() families = w.font_families()
print (families) print(families)
for family in families: for family in families:
prints(family + ':') prints(family + ':')
for font, data in iteritems(w.fonts_for_family(family)): for font, data in iteritems(w.fonts_for_family(family)):
prints(' ', font, data[0], data[1], len(data[2])) prints(' ', font, data[0], data[1], len(data[2]))
print () print()
if __name__ == '__main__': if __name__ == '__main__':
test() test()

View File

@ -399,7 +399,7 @@ def run_main(func):
def test_write(): def test_write():
print ('Printing to stdout in worker') print('Printing to stdout in worker')
def test(): def test():
@ -408,8 +408,8 @@ def test():
while not p.results.empty(): while not p.results.empty():
r = p.results.get() r = p.results.get()
if not ignore_fail and r.is_terminal_failure: if not ignore_fail and r.is_terminal_failure:
print (r.result.err) print(r.result.err)
print (r.result.traceback) print(r.result.traceback)
raise SystemExit(1) raise SystemExit(1)
ans[r.id] = r.result ans[r.id] = r.result
return ans return ans
@ -487,4 +487,4 @@ def test():
p(i, 'import time;\ndef x(i):\n time.sleep(10000)', 'x', i) p(i, 'import time;\ndef x(i):\n time.sleep(10000)', 'x', i)
p.shutdown(), p.join() p.shutdown(), p.join()
print ('Tests all passed!') print('Tests all passed!')

View File

@ -189,7 +189,7 @@ def load_po(path):
try: try:
make(path, buf) make(path, buf)
except Exception: except Exception:
print (('Failed to compile translations file: %s, ignoring') % path) print(('Failed to compile translations file: %s, ignoring') % path)
buf = None buf = None
else: else:
buf = io.BytesIO(buf.getvalue()) buf = io.BytesIO(buf.getvalue())

View File

@ -316,5 +316,6 @@ class LocalZipFile(object):
shutil.copyfileobj(temp, zipstream) shutil.copyfileobj(temp, zipstream)
zipstream.flush() zipstream.flush()
if __name__ == '__main__': if __name__ == '__main__':
extractall(sys.argv[-1]) extractall(sys.argv[-1])

View File

@ -98,7 +98,7 @@ def _compile_coffeescript(name):
cs, errors = compile_coffeescript(f.read(), src) cs, errors = compile_coffeescript(f.read(), src)
if errors: if errors:
for line in errors: for line in errors:
print (line) print(line)
raise Exception('Failed to compile coffeescript' raise Exception('Failed to compile coffeescript'
': %s'%src) ': %s'%src)
return cs return cs

View File

@ -39,4 +39,6 @@ def points_for_word(w):
ans = plugins['unicode_names'][0].codepoints_for_word(w.encode('utf-8')) | html_entities().get(w, set()) ans = plugins['unicode_names'][0].codepoints_for_word(w.encode('utf-8')) | html_entities().get(w, set())
points_for_word.cache[w] = ans points_for_word.cache[w] = ans
return ans return ans
points_for_word.cache = {} # noqa points_for_word.cache = {} # noqa

View File

@ -93,7 +93,7 @@ def serialize_builtin_recipes():
try: try:
recipe_class = compile_recipe(stream.read()) recipe_class = compile_recipe(stream.read())
except: except:
print ('Failed to compile: %s'%f) print('Failed to compile: %s'%f)
raise raise
if recipe_class is not None: if recipe_class is not None:
recipe_mapping['builtin:'+rid] = recipe_class recipe_mapping['builtin:'+rid] = recipe_class

View File

@ -27,6 +27,7 @@ xpath_cache = OrderedDict()
# Test that the string is not empty and does not contain whitespace # Test that the string is not empty and does not contain whitespace
is_non_whitespace = re.compile(r'^[^ \t\r\n\f]+$').match is_non_whitespace = re.compile(r'^[^ \t\r\n\f]+$').match
def get_parsed_selector(raw): def get_parsed_selector(raw):
try: try:
return parse_cache[raw] return parse_cache[raw]
@ -36,6 +37,7 @@ def get_parsed_selector(raw):
parse_cache.pop(next(iter(parse_cache))) parse_cache.pop(next(iter(parse_cache)))
return ans return ans
def get_compiled_xpath(expr): def get_compiled_xpath(expr):
try: try:
return xpath_cache[expr] return xpath_cache[expr]
@ -45,12 +47,16 @@ def get_compiled_xpath(expr):
xpath_cache.pop(next(iter(xpath_cache))) xpath_cache.pop(next(iter(xpath_cache)))
return ans return ans
class AlwaysIn(object): class AlwaysIn(object):
def __contains__(self, x): def __contains__(self, x):
return True return True
always_in = AlwaysIn() always_in = AlwaysIn()
def trace_wrapper(func): def trace_wrapper(func):
@wraps(func) @wraps(func)
def trace(*args, **kwargs): def trace(*args, **kwargs):
@ -59,6 +65,7 @@ def trace_wrapper(func):
return func(*args, **kwargs) return func(*args, **kwargs)
return trace return trace
def normalize_language_tag(tag): def normalize_language_tag(tag):
"""Return a list of normalized combinations for a `BCP 47` language tag. """Return a list of normalized combinations for a `BCP 47` language tag.
@ -80,9 +87,11 @@ def normalize_language_tag(tag):
taglist.add('-'.join(base_tag + tags)) taglist.add('-'.join(base_tag + tags))
return taglist return taglist
INAPPROPRIATE_PSEUDO_CLASSES = frozenset([ INAPPROPRIATE_PSEUDO_CLASSES = frozenset([
'active', 'after', 'disabled', 'visited', 'link', 'before', 'focus', 'first-letter', 'enabled', 'first-line', 'hover', 'checked', 'target']) 'active', 'after', 'disabled', 'visited', 'link', 'before', 'focus', 'first-letter', 'enabled', 'first-line', 'hover', 'checked', 'target'])
class Select(object): class Select(object):
''' '''
@ -325,6 +334,7 @@ class Select(object):
# Combinators {{{ # Combinators {{{
def select_combinedselector(cache, combined): def select_combinedselector(cache, combined):
"""Translate a combined selector.""" """Translate a combined selector."""
combinator = cache.combinator_mapping[combined.combinator] combinator = cache.combinator_mapping[combined.combinator]
@ -334,6 +344,7 @@ def select_combinedselector(cache, combined):
for item in cache.dispatch_map[combinator](cache, cache.iterparsedselector(combined.selector), right): for item in cache.dispatch_map[combinator](cache, cache.iterparsedselector(combined.selector), right):
yield item yield item
def select_descendant(cache, left, right): def select_descendant(cache, left, right):
"""right is a child, grand-child or further descendant of left""" """right is a child, grand-child or further descendant of left"""
right = always_in if right is None else frozenset(right) right = always_in if right is None else frozenset(right)
@ -342,6 +353,7 @@ def select_descendant(cache, left, right):
if descendant in right: if descendant in right:
yield descendant yield descendant
def select_child(cache, left, right): def select_child(cache, left, right):
"""right is an immediate child of left""" """right is an immediate child of left"""
right = always_in if right is None else frozenset(right) right = always_in if right is None else frozenset(right)
@ -350,6 +362,7 @@ def select_child(cache, left, right):
if child in right: if child in right:
yield child yield child
def select_direct_adjacent(cache, left, right): def select_direct_adjacent(cache, left, right):
"""right is a sibling immediately after left""" """right is a sibling immediately after left"""
right = always_in if right is None else frozenset(right) right = always_in if right is None else frozenset(right)
@ -359,6 +372,7 @@ def select_direct_adjacent(cache, left, right):
yield sibling yield sibling
break break
def select_indirect_adjacent(cache, left, right): def select_indirect_adjacent(cache, left, right):
"""right is a sibling after left, immediately or not""" """right is a sibling after left, immediately or not"""
right = always_in if right is None else frozenset(right) right = always_in if right is None else frozenset(right)
@ -368,6 +382,7 @@ def select_indirect_adjacent(cache, left, right):
yield sibling yield sibling
# }}} # }}}
def select_element(cache, selector): def select_element(cache, selector):
"""A type or universal selector.""" """A type or universal selector."""
element = selector.element element = selector.element
@ -378,6 +393,7 @@ def select_element(cache, selector):
for elem in cache.element_map[ascii_lower(element)]: for elem in cache.element_map[ascii_lower(element)]:
yield elem yield elem
def select_hash(cache, selector): def select_hash(cache, selector):
'An id selector' 'An id selector'
items = cache.id_map[ascii_lower(selector.id)] items = cache.id_map[ascii_lower(selector.id)]
@ -386,6 +402,7 @@ def select_hash(cache, selector):
if elem in items: if elem in items:
yield elem yield elem
def select_class(cache, selector): def select_class(cache, selector):
'A class selector' 'A class selector'
items = cache.class_map[ascii_lower(selector.class_name)] items = cache.class_map[ascii_lower(selector.class_name)]
@ -394,6 +411,7 @@ def select_class(cache, selector):
if elem in items: if elem in items:
yield elem yield elem
def select_negation(cache, selector): def select_negation(cache, selector):
'Implement :not()' 'Implement :not()'
exclude = frozenset(cache.iterparsedselector(selector.subselector)) exclude = frozenset(cache.iterparsedselector(selector.subselector))
@ -403,6 +421,7 @@ def select_negation(cache, selector):
# Attribute selectors {{{ # Attribute selectors {{{
def select_attrib(cache, selector): def select_attrib(cache, selector):
operator = cache.attribute_operator_mapping[selector.operator] operator = cache.attribute_operator_mapping[selector.operator]
items = frozenset(cache.dispatch_map[operator](cache, ascii_lower(selector.attrib), selector.value)) items = frozenset(cache.dispatch_map[operator](cache, ascii_lower(selector.attrib), selector.value))
@ -410,20 +429,24 @@ def select_attrib(cache, selector):
if item in items: if item in items:
yield item yield item
def select_exists(cache, attrib, value=None): def select_exists(cache, attrib, value=None):
for elem_set in itervalues(cache.attrib_map[attrib]): for elem_set in itervalues(cache.attrib_map[attrib]):
for elem in elem_set: for elem in elem_set:
yield elem yield elem
def select_equals(cache, attrib, value): def select_equals(cache, attrib, value):
for elem in cache.attrib_map[attrib][value]: for elem in cache.attrib_map[attrib][value]:
yield elem yield elem
def select_includes(cache, attrib, value): def select_includes(cache, attrib, value):
if is_non_whitespace(value): if is_non_whitespace(value):
for elem in cache.attrib_space_map[attrib][value]: for elem in cache.attrib_space_map[attrib][value]:
yield elem yield elem
def select_dashmatch(cache, attrib, value): def select_dashmatch(cache, attrib, value):
if value: if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]): for val, elem_set in iteritems(cache.attrib_map[attrib]):
@ -431,6 +454,7 @@ def select_dashmatch(cache, attrib, value):
for elem in elem_set: for elem in elem_set:
yield elem yield elem
def select_prefixmatch(cache, attrib, value): def select_prefixmatch(cache, attrib, value):
if value: if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]): for val, elem_set in iteritems(cache.attrib_map[attrib]):
@ -438,6 +462,7 @@ def select_prefixmatch(cache, attrib, value):
for elem in elem_set: for elem in elem_set:
yield elem yield elem
def select_suffixmatch(cache, attrib, value): def select_suffixmatch(cache, attrib, value):
if value: if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]): for val, elem_set in iteritems(cache.attrib_map[attrib]):
@ -445,6 +470,7 @@ def select_suffixmatch(cache, attrib, value):
for elem in elem_set: for elem in elem_set:
yield elem yield elem
def select_substringmatch(cache, attrib, value): def select_substringmatch(cache, attrib, value):
if value: if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]): for val, elem_set in iteritems(cache.attrib_map[attrib]):
@ -456,6 +482,7 @@ def select_substringmatch(cache, attrib, value):
# Function selectors {{{ # Function selectors {{{
def select_function(cache, function): def select_function(cache, function):
"""Select with a functional pseudo-class.""" """Select with a functional pseudo-class."""
fname = function.name.replace('-', '_') fname = function.name.replace('-', '_')
@ -474,6 +501,7 @@ def select_function(cache, function):
if func(cache, function, item): if func(cache, function, item):
yield item yield item
def select_lang(cache, function): def select_lang(cache, function):
' Implement :lang() ' ' Implement :lang() '
if function.argument_types() not in (['STRING'], ['IDENT']): if function.argument_types() not in (['STRING'], ['IDENT']):
@ -487,6 +515,7 @@ def select_lang(cache, function):
for elem in elem_set: for elem in elem_set:
yield elem yield elem
def select_nth_child(cache, function, elem): def select_nth_child(cache, function, elem):
' Implement :nth-child() ' ' Implement :nth-child() '
a, b = function.parsed_arguments a, b = function.parsed_arguments
@ -499,6 +528,7 @@ def select_nth_child(cache, function, elem):
n = (num - b) / a n = (num - b) / a
return n.is_integer() and n > -1 return n.is_integer() and n > -1
def select_nth_last_child(cache, function, elem): def select_nth_last_child(cache, function, elem):
' Implement :nth-last-child() ' ' Implement :nth-last-child() '
a, b = function.parsed_arguments a, b = function.parsed_arguments
@ -511,6 +541,7 @@ def select_nth_last_child(cache, function, elem):
n = (num - b) / a n = (num - b) / a
return n.is_integer() and n > -1 return n.is_integer() and n > -1
def select_nth_of_type(cache, function, elem): def select_nth_of_type(cache, function, elem):
' Implement :nth-of-type() ' ' Implement :nth-of-type() '
a, b = function.parsed_arguments a, b = function.parsed_arguments
@ -523,6 +554,7 @@ def select_nth_of_type(cache, function, elem):
n = (num - b) / a n = (num - b) / a
return n.is_integer() and n > -1 return n.is_integer() and n > -1
def select_nth_last_of_type(cache, function, elem): def select_nth_last_of_type(cache, function, elem):
' Implement :nth-last-of-type() ' ' Implement :nth-last-of-type() '
a, b = function.parsed_arguments a, b = function.parsed_arguments
@ -539,6 +571,7 @@ def select_nth_last_of_type(cache, function, elem):
# Pseudo elements {{{ # Pseudo elements {{{
def select_pseudo(cache, pseudo): def select_pseudo(cache, pseudo):
try: try:
func = cache.dispatch_map[pseudo.ident.replace('-', '_')] func = cache.dispatch_map[pseudo.ident.replace('-', '_')]
@ -565,50 +598,71 @@ def select_pseudo(cache, pseudo):
if func(cache, item): if func(cache, item):
yield item yield item
def select_first_child(cache, elem): def select_first_child(cache, elem):
try: try:
return cache.sibling_count(elem) == 0 return cache.sibling_count(elem) == 0
except ValueError: except ValueError:
return False return False
select_first_child.is_pseudo = True select_first_child.is_pseudo = True
def select_last_child(cache, elem): def select_last_child(cache, elem):
try: try:
return cache.sibling_count(elem, before=False) == 0 return cache.sibling_count(elem, before=False) == 0
except ValueError: except ValueError:
return False return False
select_last_child.is_pseudo = True select_last_child.is_pseudo = True
def select_only_child(cache, elem): def select_only_child(cache, elem):
try: try:
return cache.all_sibling_count(elem) == 0 return cache.all_sibling_count(elem) == 0
except ValueError: except ValueError:
return False return False
select_only_child.is_pseudo = True select_only_child.is_pseudo = True
def select_first_of_type(cache, elem): def select_first_of_type(cache, elem):
try: try:
return cache.sibling_count(elem, same_type=True) == 0 return cache.sibling_count(elem, same_type=True) == 0
except ValueError: except ValueError:
return False return False
select_first_of_type.is_pseudo = True select_first_of_type.is_pseudo = True
def select_last_of_type(cache, elem): def select_last_of_type(cache, elem):
try: try:
return cache.sibling_count(elem, before=False, same_type=True) == 0 return cache.sibling_count(elem, before=False, same_type=True) == 0
except ValueError: except ValueError:
return False return False
select_last_of_type.is_pseudo = True select_last_of_type.is_pseudo = True
def select_only_of_type(cache, elem): def select_only_of_type(cache, elem):
try: try:
return cache.all_sibling_count(elem, same_type=True) == 0 return cache.all_sibling_count(elem, same_type=True) == 0
except ValueError: except ValueError:
return False return False
select_only_of_type.is_pseudo = True select_only_of_type.is_pseudo = True
def select_empty(cache, elem): def select_empty(cache, elem):
return cache.is_empty(elem) return cache.is_empty(elem)
select_empty.is_pseudo = True select_empty.is_pseudo = True
# }}} # }}}

View File

@ -47,6 +47,7 @@ def make_parser(*features, **kwargs):
parser_class = CSS21Parser parser_class = CSS21Parser
return parser_class(**kwargs) return parser_class(**kwargs)
def make_full_parser(**kwargs): def make_full_parser(**kwargs):
''' A parser that parses all supported CSS 3 modules in addition to CSS 2.1 ''' ''' A parser that parses all supported CSS 3 modules in addition to CSS 2.1 '''
features = tuple(iterkeys(PARSER_MODULES)) features = tuple(iterkeys(PARSER_MODULES))

View File

@ -11,6 +11,7 @@ from tinycss.tests import BaseTest
from polyglot.builtins import iteritems from polyglot.builtins import iteritems
class TestFonts3(BaseTest): class TestFonts3(BaseTest):
def test_font_face(self): def test_font_face(self):