mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
py3: more work on universal __future__s
This commit is contained in:
parent
824f909627
commit
1f794c4cd2
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
"""
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
"""
|
||||
|
@ -1,99 +1,101 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import re
|
||||
|
||||
NAME_MAP = {
|
||||
u'aliceblue': u'#F0F8FF',
|
||||
u'antiquewhite': u'#FAEBD7',
|
||||
u'aqua': u'#00FFFF',
|
||||
u'aquamarine': u'#7FFFD4',
|
||||
u'azure': u'#F0FFFF',
|
||||
u'beige': u'#F5F5DC',
|
||||
u'bisque': u'#FFE4C4',
|
||||
u'black': u'#000000',
|
||||
u'blanchedalmond': u'#FFEBCD',
|
||||
u'blue': u'#0000FF',
|
||||
u'brown': u'#A52A2A',
|
||||
u'burlywood': u'#DEB887',
|
||||
u'cadetblue': u'#5F9EA0',
|
||||
u'chartreuse': u'#7FFF00',
|
||||
u'chocolate': u'#D2691E',
|
||||
u'coral': u'#FF7F50',
|
||||
u'crimson': u'#DC143C',
|
||||
u'cyan': u'#00FFFF',
|
||||
u'darkblue': u'#00008B',
|
||||
u'darkgoldenrod': u'#B8860B',
|
||||
u'darkgreen': u'#006400',
|
||||
u'darkkhaki': u'#BDB76B',
|
||||
u'darkmagenta': u'#8B008B',
|
||||
u'darkolivegreen': u'#556B2F',
|
||||
u'darkorange': u'#FF8C00',
|
||||
u'darkorchid': u'#9932CC',
|
||||
u'darkred': u'#8B0000',
|
||||
u'darksalmon': u'#E9967A',
|
||||
u'darkslateblue': u'#483D8B',
|
||||
u'darkslategrey': u'#2F4F4F',
|
||||
u'darkviolet': u'#9400D3',
|
||||
u'deeppink': u'#FF1493',
|
||||
u'dodgerblue': u'#1E90FF',
|
||||
u'firebrick': u'#B22222',
|
||||
u'floralwhite': u'#FFFAF0',
|
||||
u'forestgreen': u'#228B22',
|
||||
u'fuchsia': u'#FF00FF',
|
||||
u'gainsboro': u'#DCDCDC',
|
||||
u'ghostwhite': u'#F8F8FF',
|
||||
u'gold': u'#FFD700',
|
||||
u'goldenrod': u'#DAA520',
|
||||
u'indianred ': u'#CD5C5C',
|
||||
u'indigo ': u'#4B0082',
|
||||
u'khaki': u'#F0E68C',
|
||||
u'lavenderblush': u'#FFF0F5',
|
||||
u'lawngreen': u'#7CFC00',
|
||||
u'lightblue': u'#ADD8E6',
|
||||
u'lightcoral': u'#F08080',
|
||||
u'lightgoldenrodyellow': u'#FAFAD2',
|
||||
u'lightgray': u'#D3D3D3',
|
||||
u'lightgrey': u'#D3D3D3',
|
||||
u'lightskyblue': u'#87CEFA',
|
||||
u'lightslategrey': u'#778899',
|
||||
u'lightsteelblue': u'#B0C4DE',
|
||||
u'lime': u'#87CEFA',
|
||||
u'linen': u'#FAF0E6',
|
||||
u'magenta': u'#FF00FF',
|
||||
u'maroon': u'#800000',
|
||||
u'mediumaquamarine': u'#66CDAA',
|
||||
u'mediumblue': u'#0000CD',
|
||||
u'mediumorchid': u'#BA55D3',
|
||||
u'mediumpurple': u'#9370D8',
|
||||
u'mediumseagreen': u'#3CB371',
|
||||
u'mediumslateblue': u'#7B68EE',
|
||||
u'midnightblue': u'#191970',
|
||||
u'moccasin': u'#FFE4B5',
|
||||
u'navajowhite': u'#FFDEAD',
|
||||
u'navy': u'#000080',
|
||||
u'oldlace': u'#FDF5E6',
|
||||
u'olive': u'#808000',
|
||||
u'orange': u'#FFA500',
|
||||
u'orangered': u'#FF4500',
|
||||
u'orchid': u'#DA70D6',
|
||||
u'paleturquoise': u'#AFEEEE',
|
||||
u'papayawhip': u'#FFEFD5',
|
||||
u'peachpuff': u'#FFDAB9',
|
||||
u'powderblue': u'#B0E0E6',
|
||||
u'rosybrown': u'#BC8F8F',
|
||||
u'royalblue': u'#4169E1',
|
||||
u'saddlebrown': u'#8B4513',
|
||||
u'sandybrown': u'#8B4513',
|
||||
u'seashell': u'#FFF5EE',
|
||||
u'sienna': u'#A0522D',
|
||||
u'silver': u'#C0C0C0',
|
||||
u'skyblue': u'#87CEEB',
|
||||
u'slategrey': u'#708090',
|
||||
u'snow': u'#FFFAFA',
|
||||
u'springgreen': u'#00FF7F',
|
||||
u'violet': u'#EE82EE',
|
||||
u'yellowgreen': u'#9ACD32'
|
||||
'aliceblue': '#F0F8FF',
|
||||
'antiquewhite': '#FAEBD7',
|
||||
'aqua': '#00FFFF',
|
||||
'aquamarine': '#7FFFD4',
|
||||
'azure': '#F0FFFF',
|
||||
'beige': '#F5F5DC',
|
||||
'bisque': '#FFE4C4',
|
||||
'black': '#000000',
|
||||
'blanchedalmond': '#FFEBCD',
|
||||
'blue': '#0000FF',
|
||||
'brown': '#A52A2A',
|
||||
'burlywood': '#DEB887',
|
||||
'cadetblue': '#5F9EA0',
|
||||
'chartreuse': '#7FFF00',
|
||||
'chocolate': '#D2691E',
|
||||
'coral': '#FF7F50',
|
||||
'crimson': '#DC143C',
|
||||
'cyan': '#00FFFF',
|
||||
'darkblue': '#00008B',
|
||||
'darkgoldenrod': '#B8860B',
|
||||
'darkgreen': '#006400',
|
||||
'darkkhaki': '#BDB76B',
|
||||
'darkmagenta': '#8B008B',
|
||||
'darkolivegreen': '#556B2F',
|
||||
'darkorange': '#FF8C00',
|
||||
'darkorchid': '#9932CC',
|
||||
'darkred': '#8B0000',
|
||||
'darksalmon': '#E9967A',
|
||||
'darkslateblue': '#483D8B',
|
||||
'darkslategrey': '#2F4F4F',
|
||||
'darkviolet': '#9400D3',
|
||||
'deeppink': '#FF1493',
|
||||
'dodgerblue': '#1E90FF',
|
||||
'firebrick': '#B22222',
|
||||
'floralwhite': '#FFFAF0',
|
||||
'forestgreen': '#228B22',
|
||||
'fuchsia': '#FF00FF',
|
||||
'gainsboro': '#DCDCDC',
|
||||
'ghostwhite': '#F8F8FF',
|
||||
'gold': '#FFD700',
|
||||
'goldenrod': '#DAA520',
|
||||
'indianred ': '#CD5C5C',
|
||||
'indigo ': '#4B0082',
|
||||
'khaki': '#F0E68C',
|
||||
'lavenderblush': '#FFF0F5',
|
||||
'lawngreen': '#7CFC00',
|
||||
'lightblue': '#ADD8E6',
|
||||
'lightcoral': '#F08080',
|
||||
'lightgoldenrodyellow': '#FAFAD2',
|
||||
'lightgray': '#D3D3D3',
|
||||
'lightgrey': '#D3D3D3',
|
||||
'lightskyblue': '#87CEFA',
|
||||
'lightslategrey': '#778899',
|
||||
'lightsteelblue': '#B0C4DE',
|
||||
'lime': '#87CEFA',
|
||||
'linen': '#FAF0E6',
|
||||
'magenta': '#FF00FF',
|
||||
'maroon': '#800000',
|
||||
'mediumaquamarine': '#66CDAA',
|
||||
'mediumblue': '#0000CD',
|
||||
'mediumorchid': '#BA55D3',
|
||||
'mediumpurple': '#9370D8',
|
||||
'mediumseagreen': '#3CB371',
|
||||
'mediumslateblue': '#7B68EE',
|
||||
'midnightblue': '#191970',
|
||||
'moccasin': '#FFE4B5',
|
||||
'navajowhite': '#FFDEAD',
|
||||
'navy': '#000080',
|
||||
'oldlace': '#FDF5E6',
|
||||
'olive': '#808000',
|
||||
'orange': '#FFA500',
|
||||
'orangered': '#FF4500',
|
||||
'orchid': '#DA70D6',
|
||||
'paleturquoise': '#AFEEEE',
|
||||
'papayawhip': '#FFEFD5',
|
||||
'peachpuff': '#FFDAB9',
|
||||
'powderblue': '#B0E0E6',
|
||||
'rosybrown': '#BC8F8F',
|
||||
'royalblue': '#4169E1',
|
||||
'saddlebrown': '#8B4513',
|
||||
'sandybrown': '#8B4513',
|
||||
'seashell': '#FFF5EE',
|
||||
'sienna': '#A0522D',
|
||||
'silver': '#C0C0C0',
|
||||
'skyblue': '#87CEEB',
|
||||
'slategrey': '#708090',
|
||||
'snow': '#FFFAFA',
|
||||
'springgreen': '#00FF7F',
|
||||
'violet': '#EE82EE',
|
||||
'yellowgreen': '#9ACD32'
|
||||
}
|
||||
|
||||
hex_pat = re.compile(r'#(\d{2})(\d{2})(\d{2})')
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
"""
|
||||
@ -106,7 +107,7 @@ class HTMLConverter(object):
|
||||
re.IGNORECASE), lambda m: '<br />'),
|
||||
|
||||
# Replace entities
|
||||
(re.compile(u'&(\\S+?);'), partial(entity_to_unicode,
|
||||
(re.compile(r'&(\S+?);'), partial(entity_to_unicode,
|
||||
exceptions=['lt', 'gt', 'amp', 'quot'])),
|
||||
# Remove comments from within style tags as they can mess up BeatifulSoup
|
||||
(re.compile(r'(<style.*?</style>)', re.IGNORECASE|re.DOTALL),
|
||||
@ -397,7 +398,7 @@ class HTMLConverter(object):
|
||||
def parse_css(self, style):
|
||||
"""
|
||||
Parse the contents of a <style> tag or .css file.
|
||||
@param style: C{str(style)} should be the CSS to parse.
|
||||
@param style: C{unicode_type(style)} should be the CSS to parse.
|
||||
@return: A dictionary with one entry per selector where the key is the
|
||||
selector name and the value is a dictionary of properties
|
||||
"""
|
||||
@ -789,7 +790,7 @@ class HTMLConverter(object):
|
||||
src = src.lstrip()
|
||||
f = src[0]
|
||||
next = 1
|
||||
if f in ("'", '"', u'\u201c', u'\u2018', u'\u201d', u'\u2019'):
|
||||
if f in ("'", '"', '\u201c', '\u2018', '\u201d', '\u2019'):
|
||||
if len(src) >= 2:
|
||||
next = 2
|
||||
f = src[:2]
|
||||
@ -805,14 +806,14 @@ class HTMLConverter(object):
|
||||
|
||||
def append_text(src):
|
||||
fp, key, variant = self.font_properties(css)
|
||||
for x, y in [(u'\xad', ''), (u'\xa0', ' '), (u'\ufb00', 'ff'), (u'\ufb01', 'fi'), (u'\ufb02', 'fl'), (u'\ufb03', 'ffi'), (u'\ufb04', 'ffl')]:
|
||||
for x, y in [('\xad', ''), ('\xa0', ' '), ('\ufb00', 'ff'), ('\ufb01', 'fi'), ('\ufb02', 'fl'), ('\ufb03', 'ffi'), ('\ufb04', 'ffl')]:
|
||||
src = src.replace(x, y)
|
||||
|
||||
valigner = lambda x: x
|
||||
if 'vertical-align' in css:
|
||||
valign = css['vertical-align']
|
||||
if valign in ('sup', 'super', 'sub'):
|
||||
fp['fontsize'] = int(int(fp['fontsize']) * 5 / 3.0)
|
||||
fp['fontsize'] = int(fp['fontsize']) * 5 // 3
|
||||
valigner = Sub if valign == 'sub' else Sup
|
||||
normal_font_size = int(fp['fontsize'])
|
||||
|
||||
@ -864,12 +865,12 @@ class HTMLConverter(object):
|
||||
|
||||
if collapse_whitespace:
|
||||
src = re.sub(r'\s{1,}', ' ', src)
|
||||
if self.stripped_space and len(src) == len(src.lstrip(u' \n\r\t')):
|
||||
if self.stripped_space and len(src) == len(src.lstrip(' \n\r\t')):
|
||||
src = self.stripped_space + src
|
||||
src, orig = src.rstrip(u' \n\r\t'), src
|
||||
src, orig = src.rstrip(' \n\r\t'), src
|
||||
self.stripped_space = orig[len(src):]
|
||||
if len(self.previous_text) != len(self.previous_text.rstrip(u' \n\r\t')):
|
||||
src = src.lstrip(u' \n\r\t')
|
||||
if len(self.previous_text) != len(self.previous_text.rstrip(' \n\r\t')):
|
||||
src = src.lstrip(' \n\r\t')
|
||||
if len(src):
|
||||
self.previous_text = src
|
||||
append_text(src)
|
||||
@ -971,8 +972,8 @@ class HTMLConverter(object):
|
||||
xsize=width, ysize=height)
|
||||
line_height = (int(self.current_block.textStyle.attrs['baselineskip']) +
|
||||
int(self.current_block.textStyle.attrs['linespace']))//10
|
||||
line_height *= self.profile.dpi/72.
|
||||
lines = int(ceil(float(height)/line_height))
|
||||
line_height *= self.profile.dpi/72
|
||||
lines = int(ceil(height/line_height))
|
||||
dc = DropCaps(lines)
|
||||
dc.append(Plot(im, xsize=ceil(width*factor), ysize=ceil(height*factor)))
|
||||
self.current_para.append(dc)
|
||||
@ -1011,10 +1012,10 @@ class HTMLConverter(object):
|
||||
|
||||
self.process_alignment(tag_css)
|
||||
|
||||
if max(width, height) <= min(pwidth, pheight)/5.:
|
||||
if max(width, height) <= min(pwidth, pheight)/5:
|
||||
self.current_para.append(Plot(im, xsize=ceil(width*factor),
|
||||
ysize=ceil(height*factor)))
|
||||
elif height <= int(floor((2/3.)*pheight)):
|
||||
elif height <= int(floor((2/3)*pheight)):
|
||||
pb = self.current_block
|
||||
self.end_current_para()
|
||||
self.process_alignment(tag_css)
|
||||
@ -1032,7 +1033,7 @@ class HTMLConverter(object):
|
||||
self.current_page.contents[0:1] = []
|
||||
self.current_page.append(Canvas(width=pwidth,
|
||||
height=height))
|
||||
left = int(floor((pwidth - width)/2.))
|
||||
left = int(floor((pwidth - width)/2))
|
||||
self.current_page.contents[-1].put_object(
|
||||
ImageBlock(self.images[path], xsize=width,
|
||||
ysize=height, x1=width, y1=height,
|
||||
@ -1083,7 +1084,7 @@ class HTMLConverter(object):
|
||||
|
||||
s1, s2 = get('margin'), get('padding')
|
||||
|
||||
bl = str(self.current_block.blockStyle.attrs['blockwidth'])+'px'
|
||||
bl = unicode_type(self.current_block.blockStyle.attrs['blockwidth'])+'px'
|
||||
|
||||
def set(default, one, two):
|
||||
fval = None
|
||||
@ -1113,7 +1114,7 @@ class HTMLConverter(object):
|
||||
val /= 2.
|
||||
ans['sidemargin'] = int(val)
|
||||
if 2*int(ans['sidemargin']) >= factor*int(self.current_block.blockStyle.attrs['blockwidth']):
|
||||
ans['sidemargin'] = int((factor*int(self.current_block.blockStyle.attrs['blockwidth']))/2.)
|
||||
ans['sidemargin'] = (factor*int(self.current_block.blockStyle.attrs['blockwidth'])) // 2
|
||||
|
||||
for prop in ('topskip', 'footskip', 'sidemargin'):
|
||||
if isinstance(ans[prop], string_or_bytes):
|
||||
@ -1212,7 +1213,7 @@ class HTMLConverter(object):
|
||||
ans = 120
|
||||
if ans is not None:
|
||||
ans += int(self.font_delta * 20)
|
||||
ans = str(ans)
|
||||
ans = unicode_type(ans)
|
||||
return ans
|
||||
|
||||
family, weight, style, variant = 'serif', 'normal', 'normal', None
|
||||
@ -1268,7 +1269,7 @@ class HTMLConverter(object):
|
||||
|
||||
fs = int(t['fontsize'])
|
||||
if fs > 120:
|
||||
t['wordspace'] = int(fs/4.)
|
||||
t['wordspace'] = fs // 4
|
||||
t['baselineskip'] = fs + 20
|
||||
return t, key, variant
|
||||
|
||||
@ -1290,27 +1291,27 @@ class HTMLConverter(object):
|
||||
unit = float(m.group(1))
|
||||
if m.group(2) == '%':
|
||||
normal = self.unit_convert(base_length)
|
||||
result = (unit/100.0) * normal
|
||||
result = (unit/100) * normal
|
||||
elif m.group(2) == 'px':
|
||||
result = unit
|
||||
elif m.group(2) == 'in':
|
||||
result = unit * dpi
|
||||
elif m.group(2) == 'pt':
|
||||
result = unit * dpi/72.
|
||||
result = unit * dpi/72
|
||||
elif m.group(2) == 'dpt':
|
||||
result = unit * dpi/720.
|
||||
result = unit * dpi/720
|
||||
elif m.group(2) == 'em':
|
||||
normal = self.unit_convert(base_length)
|
||||
result = unit * normal
|
||||
elif m.group(2) == 'pc':
|
||||
result = unit * (dpi/72.) * 12
|
||||
result = unit * (dpi/72) * 12
|
||||
elif m.group(2) == 'mm':
|
||||
result = unit * 0.04 * (dpi)
|
||||
elif m.group(2) == 'cm':
|
||||
result = unit * 0.4 * (dpi)
|
||||
if result is not None:
|
||||
if pts:
|
||||
result = int(round(result * (720./dpi)))
|
||||
result = int(round(result * (720/dpi)))
|
||||
else:
|
||||
result = int(round(result))
|
||||
return result
|
||||
@ -1318,7 +1319,7 @@ class HTMLConverter(object):
|
||||
def text_properties(self, tag_css):
|
||||
indent = self.book.defaultTextStyle.attrs['parindent']
|
||||
if 'text-indent' in tag_css:
|
||||
bl = str(self.current_block.blockStyle.attrs['blockwidth'])+'px'
|
||||
bl = unicode_type(self.current_block.blockStyle.attrs['blockwidth'])+'px'
|
||||
if 'em' in tag_css['text-indent']:
|
||||
bl = '10pt'
|
||||
indent = self.unit_convert(unicode_type(tag_css['text-indent']), pts=True, base_length=bl)
|
||||
@ -1349,12 +1350,12 @@ class HTMLConverter(object):
|
||||
''' Ensure padding and text-indent properties are respected '''
|
||||
text_properties = self.text_properties(tag_css)
|
||||
block_properties = self.block_properties(tag_css)
|
||||
indent = (float(text_properties['parindent'])/10) * (self.profile.dpi/72.)
|
||||
indent = (float(text_properties['parindent'])//10) * (self.profile.dpi/72)
|
||||
margin = float(block_properties['sidemargin'])
|
||||
# Since we're flattening the block structure, we need to ensure that text
|
||||
# doesn't go off the left edge of the screen
|
||||
if indent < 0 and margin + indent < 0:
|
||||
text_properties['parindent'] = int(-margin * (72./self.profile.dpi) * 10)
|
||||
text_properties['parindent'] = int(-margin * (72/self.profile.dpi) * 10)
|
||||
|
||||
align = self.get_alignment(tag_css)
|
||||
|
||||
@ -1515,7 +1516,7 @@ class HTMLConverter(object):
|
||||
elif not urlparse(tag['src'])[0]:
|
||||
self.log.warn('Could not find image: '+tag['src'])
|
||||
else:
|
||||
self.log.debug("Failed to process: %s"%str(tag))
|
||||
self.log.debug("Failed to process: %s"%unicode_type(tag))
|
||||
elif tagname in ['style', 'link']:
|
||||
ncss, npcss = {}, {}
|
||||
if tagname == 'style':
|
||||
@ -1609,7 +1610,7 @@ class HTMLConverter(object):
|
||||
in_ol = parent.name.lower() == 'ol'
|
||||
break
|
||||
parent = parent.parent
|
||||
prepend = str(self.list_counter)+'. ' if in_ol else u'\u2022' + ' '
|
||||
prepend = unicode_type(self.list_counter)+'. ' if in_ol else '\u2022' + ' '
|
||||
self.current_para.append(Span(prepend))
|
||||
self.process_children(tag, tag_css, tag_pseudo_css)
|
||||
if in_ol:
|
||||
@ -1652,7 +1653,7 @@ class HTMLConverter(object):
|
||||
|
||||
if (self.anchor_ids and tag.has_attr('id')) or (self.book_designer and tag.get('class') in ('title', ['title'])):
|
||||
if not tag.has_attr('id'):
|
||||
tag['id'] = __appname__+'_id_'+str(self.id_counter)
|
||||
tag['id'] = __appname__+'_id_'+unicode_type(self.id_counter)
|
||||
self.id_counter += 1
|
||||
|
||||
tkey = self.target_prefix+tag['id']
|
||||
@ -1781,7 +1782,7 @@ class HTMLConverter(object):
|
||||
else:
|
||||
if xpos > 65535:
|
||||
xpos = 65535
|
||||
canvases[-1].put_object(block, xpos + int(delta/2.), ypos)
|
||||
canvases[-1].put_object(block, xpos + delta//2, ypos)
|
||||
|
||||
for canvas in canvases:
|
||||
self.current_page.append(canvas)
|
||||
@ -1819,7 +1820,7 @@ def process_file(path, options, logger):
|
||||
options.profile.screen_height - options.profile.fudge
|
||||
width, height = im.size
|
||||
if width < pwidth:
|
||||
corrf = float(pwidth)/width
|
||||
corrf = pwidth/width
|
||||
width, height = pwidth, int(corrf*height)
|
||||
|
||||
scaled, width, height = fit_image(width, height, pwidth, pheight)
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
import math, sys, re, numbers
|
||||
@ -125,7 +126,7 @@ class Cell(object):
|
||||
|
||||
def pts_to_pixels(self, pts):
|
||||
pts = int(pts)
|
||||
return ceil((float(self.conv.profile.dpi)/72.)*(pts/10.))
|
||||
return ceil((float(self.conv.profile.dpi)/72)*(pts/10))
|
||||
|
||||
def minimum_width(self):
|
||||
return max([self.minimum_tb_width(tb) for tb in self.text_blocks])
|
||||
@ -328,15 +329,15 @@ class Table(object):
|
||||
min_widths = [self.minimum_width(i)+10 for i in range(cols)]
|
||||
for i in range(len(widths)):
|
||||
wp = self.width_percent(i)
|
||||
if wp >= 0.:
|
||||
widths[i] = max(min_widths[i], ceil((wp/100.) * (maxwidth - (cols-1)*self.colpad)))
|
||||
if wp >= 0:
|
||||
widths[i] = max(min_widths[i], ceil((wp/100) * (maxwidth - (cols-1)*self.colpad)))
|
||||
|
||||
itercount = 0
|
||||
|
||||
while sum(widths) > maxwidth-((len(widths)-1)*self.colpad) and itercount < 100:
|
||||
for i in range(cols):
|
||||
widths[i] = ceil((95./100.)*widths[i]) if \
|
||||
ceil((95./100.)*widths[i]) >= min_widths[i] else widths[i]
|
||||
widths[i] = ceil((95/100)*widths[i]) if \
|
||||
ceil((95/100)*widths[i]) >= min_widths[i] else widths[i]
|
||||
itercount += 1
|
||||
|
||||
return [i+self.colpad for i in widths]
|
||||
|
@ -1,4 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
@ -73,7 +75,7 @@ def render_table(soup, table, css, base_dir, width, height, dpi, factor=1.0):
|
||||
style = ''
|
||||
for key, val in css.items():
|
||||
style += key + ':%s;'%val
|
||||
html = u'''\
|
||||
html = '''\
|
||||
<html>
|
||||
<head>
|
||||
%s
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
@ -12,7 +12,7 @@ from copy import deepcopy, copy
|
||||
from lxml import etree
|
||||
|
||||
from calibre import guess_type
|
||||
from polyglot.builtins import as_bytes, map
|
||||
from polyglot.builtins import as_bytes, map, unicode_type
|
||||
|
||||
|
||||
class Canvas(etree.XSLTExtension):
|
||||
@ -70,9 +70,9 @@ class Canvas(etree.XSLTExtension):
|
||||
height = self.styles.to_num(block.get("ysize", None))
|
||||
img = div.makeelement('img')
|
||||
if width is not None:
|
||||
img.set('width', str(int(width)))
|
||||
img.set('width', unicode_type(int(width)))
|
||||
if height is not None:
|
||||
img.set('height', str(int(height)))
|
||||
img.set('height', unicode_type(int(height)))
|
||||
ref = block.get('refstream', None)
|
||||
if ref is not None:
|
||||
imstr = self.doc.xpath('//ImageStream[@objid="%s"]'%ref)
|
||||
@ -263,13 +263,13 @@ class TextBlock(etree.XSLTExtension):
|
||||
a.set('href', self.char_button_map[oid])
|
||||
self.process_container(child, a)
|
||||
elif child.tag == 'Plot':
|
||||
xsize = self.styles.to_num(child.get('xsize', None), 166./720)
|
||||
ysize = self.styles.to_num(child.get('ysize', None), 166./720)
|
||||
xsize = self.styles.to_num(child.get('xsize', None), 166/720)
|
||||
ysize = self.styles.to_num(child.get('ysize', None), 166/720)
|
||||
img = self.root.makeelement('img')
|
||||
if xsize is not None:
|
||||
img.set('width', str(int(xsize)))
|
||||
img.set('width', unicode_type(int(xsize)))
|
||||
if ysize is not None:
|
||||
img.set('height', str(int(ysize)))
|
||||
img.set('height', unicode_type(int(ysize)))
|
||||
ro = child.get('refobj', None)
|
||||
if ro in self.plot_map:
|
||||
img.set('src', self.plot_map[ro])
|
||||
@ -320,8 +320,7 @@ class Styles(etree.XSLTExtension):
|
||||
|
||||
def px_to_pt(self, px):
|
||||
try:
|
||||
px = float(px)
|
||||
return px * 72./166.
|
||||
return px * 72/166
|
||||
except:
|
||||
return None
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
''''''
|
||||
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
'''
|
||||
@ -21,7 +22,7 @@ from calibre.ebooks.lrf.pylrs.pylrs import (
|
||||
TextStyle
|
||||
)
|
||||
from calibre.utils.config import OptionParser
|
||||
from polyglot.builtins import string_or_bytes
|
||||
from polyglot.builtins import string_or_bytes, unicode_type
|
||||
|
||||
|
||||
class LrsParser(object):
|
||||
@ -54,7 +55,7 @@ class LrsParser(object):
|
||||
for key, val in tag.attrs:
|
||||
if key in exclude:
|
||||
continue
|
||||
result[str(key)] = val
|
||||
result[unicode_type(key)] = val
|
||||
return result
|
||||
|
||||
def text_tag_to_element(self, tag):
|
||||
|
@ -7,7 +7,7 @@ import struct, array, zlib, io, collections, re
|
||||
from calibre.ebooks.lrf import LRFParseError, PRS500_PROFILE
|
||||
from calibre import entity_to_unicode, prepare_string_for_xml
|
||||
from calibre.ebooks.lrf.tags import Tag
|
||||
from polyglot.builtins import is_py3, unicode_type, string_or_bytes
|
||||
from polyglot.builtins import is_py3, unicode_type
|
||||
|
||||
ruby_tags = {
|
||||
0xF575: ['rubyAlignAndAdjust', 'W'],
|
||||
@ -83,7 +83,7 @@ class LRFObject(object):
|
||||
if h[1] != '' and h[0] != '':
|
||||
setattr(self, h[0], val)
|
||||
else:
|
||||
raise LRFParseError("Unknown tag in %s: %s" % (self.__class__.__name__, str(tag)))
|
||||
raise LRFParseError("Unknown tag in %s: %s" % (self.__class__.__name__, unicode_type(tag)))
|
||||
|
||||
def __iter__(self):
|
||||
for i in range(0):
|
||||
@ -121,7 +121,7 @@ class LRFContentObject(LRFObject):
|
||||
def handle_tag(self, tag):
|
||||
if tag.id in self.tag_map:
|
||||
action = self.tag_map[tag.id]
|
||||
if isinstance(action, string_or_bytes):
|
||||
if isinstance(action, unicode_type):
|
||||
func, args = action, ()
|
||||
else:
|
||||
func, args = action[0], (action[1],)
|
||||
@ -377,9 +377,9 @@ class Page(LRFStream):
|
||||
0xF5D6: 'sound_stop',
|
||||
}
|
||||
|
||||
def __init__(self, bytes, objects):
|
||||
def __init__(self, byts, objects):
|
||||
self.in_blockspace = False
|
||||
LRFContentObject.__init__(self, bytes, objects)
|
||||
LRFContentObject.__init__(self, byts, objects)
|
||||
|
||||
def link(self, tag):
|
||||
self.close_blockspace()
|
||||
@ -524,7 +524,7 @@ class TextCSS(object):
|
||||
|
||||
fs = getattr(obj, 'fontsize', None)
|
||||
if fs is not None:
|
||||
ans += item('font-size: %fpt;'%(int(fs)/10.))
|
||||
ans += item('font-size: %fpt;'%(int(fs)/10))
|
||||
fw = getattr(obj, 'fontweight', None)
|
||||
if fw is not None:
|
||||
ans += item('font-weight: %s;'%('bold' if int(fw) >= 700 else 'normal'))
|
||||
@ -546,10 +546,10 @@ class TextCSS(object):
|
||||
ans += item('text-align: %s;'%al)
|
||||
lh = getattr(obj, 'linespace', None)
|
||||
if lh is not None:
|
||||
ans += item('text-align: %fpt;'%(int(lh)/10.))
|
||||
ans += item('text-align: %fpt;'%(int(lh)/10))
|
||||
pi = getattr(obj, 'parindent', None)
|
||||
if pi is not None:
|
||||
ans += item('text-indent: %fpt;'%(int(pi)/10.))
|
||||
ans += item('text-indent: %fpt;'%(int(pi)/10))
|
||||
|
||||
return ans
|
||||
|
||||
@ -880,7 +880,7 @@ class Text(LRFStream):
|
||||
self.add_text(stream.read(tag.word))
|
||||
elif tag.id in self.__class__.text_tags: # A Text tag
|
||||
action = self.__class__.text_tags[tag.id]
|
||||
if isinstance(action, string_or_bytes):
|
||||
if isinstance(action, unicode_type):
|
||||
getattr(self, action)(tag, stream)
|
||||
else:
|
||||
getattr(self, action[0])(tag, action[1])
|
||||
@ -904,7 +904,7 @@ class Text(LRFStream):
|
||||
s = ''
|
||||
open_containers = collections.deque()
|
||||
for c in self.content:
|
||||
if isinstance(c, string_or_bytes):
|
||||
if isinstance(c, unicode_type):
|
||||
s += prepare_string_for_xml(c).replace('\0', '')
|
||||
elif c is None:
|
||||
if open_containers:
|
||||
@ -930,7 +930,7 @@ class Text(LRFStream):
|
||||
open_containers = collections.deque()
|
||||
in_p = False
|
||||
for c in self.content:
|
||||
if isinstance(c, string_or_bytes):
|
||||
if isinstance(c, unicode_type):
|
||||
s += c
|
||||
elif c is None:
|
||||
if c.name == 'P':
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
"""
|
||||
This package contains code to generate ebooks in the SONY LRS/F format. It was
|
||||
originally developed by Mike Higgins and has been extended and modified by Kovid
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
""" elements.py -- replacements and helpers for ElementTree """
|
||||
|
||||
from polyglot.builtins import unicode_type, string_or_bytes
|
||||
@ -14,7 +16,7 @@ class ElementWriter(object):
|
||||
self.outputEncodingName = outputEncodingName
|
||||
|
||||
def _encodeCdata(self, rawText):
|
||||
if type(rawText) is str:
|
||||
if isinstance(rawText, bytes):
|
||||
rawText = rawText.decode(self.sourceEncoding)
|
||||
|
||||
text = rawText.replace("&", "&")
|
||||
@ -23,20 +25,20 @@ class ElementWriter(object):
|
||||
return text
|
||||
|
||||
def _writeAttribute(self, f, name, value):
|
||||
f.write(u' %s="' % unicode_type(name))
|
||||
f.write(' %s="' % unicode_type(name))
|
||||
if not isinstance(value, string_or_bytes):
|
||||
value = unicode_type(value)
|
||||
value = self._encodeCdata(value)
|
||||
value = value.replace('"', '"')
|
||||
f.write(value)
|
||||
f.write(u'"')
|
||||
f.write('"')
|
||||
|
||||
def _writeText(self, f, rawText):
|
||||
text = self._encodeCdata(rawText)
|
||||
f.write(text)
|
||||
|
||||
def _write(self, f, e):
|
||||
f.write(u'<' + unicode_type(e.tag))
|
||||
f.write('<' + unicode_type(e.tag))
|
||||
|
||||
attributes = e.items()
|
||||
attributes.sort()
|
||||
@ -44,7 +46,7 @@ class ElementWriter(object):
|
||||
self._writeAttribute(f, name, value)
|
||||
|
||||
if e.text is not None or len(e) > 0:
|
||||
f.write(u'>')
|
||||
f.write('>')
|
||||
|
||||
if e.text:
|
||||
self._writeText(f, e.text)
|
||||
@ -52,11 +54,11 @@ class ElementWriter(object):
|
||||
for e2 in e:
|
||||
self._write(f, e2)
|
||||
|
||||
f.write(u'</%s>' % e.tag)
|
||||
f.write('</%s>' % e.tag)
|
||||
else:
|
||||
if self.spaceBeforeClose:
|
||||
f.write(' ')
|
||||
f.write(u'/>')
|
||||
f.write('/>')
|
||||
|
||||
if e.tail is not None:
|
||||
self._writeText(f, e.tail)
|
||||
@ -67,10 +69,10 @@ class ElementWriter(object):
|
||||
buffer = []
|
||||
x.write = buffer.append
|
||||
self.write(x)
|
||||
return u''.join(buffer)
|
||||
return ''.join(buffer)
|
||||
|
||||
def write(self, f):
|
||||
if self.header:
|
||||
f.write(u'<?xml version="1.0" encoding="%s"?>\n' % self.outputEncodingName)
|
||||
f.write('<?xml version="1.0" encoding="%s"?>\n' % self.outputEncodingName)
|
||||
|
||||
self._write(f, self.e)
|
||||
|
@ -1,4 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
"""
|
||||
pylrf.py -- very low level interface to create lrf files. See pylrs for
|
||||
higher level interface that can use this module to render books to lrf.
|
||||
@ -10,7 +12,7 @@ import codecs
|
||||
import os
|
||||
|
||||
from .pylrfopt import tagListOptimizer
|
||||
from polyglot.builtins import iteritems, string_or_bytes
|
||||
from polyglot.builtins import iteritems, string_or_bytes, unicode_type
|
||||
|
||||
PYLRF_VERSION = "1.0"
|
||||
|
||||
@ -83,7 +85,7 @@ def writeWord(f, word):
|
||||
if int(word) > 65535:
|
||||
raise LrfError('Cannot encode a number greater than 65535 in a word.')
|
||||
if int(word) < 0:
|
||||
raise LrfError('Cannot encode a number < 0 in a word: '+str(word))
|
||||
raise LrfError('Cannot encode a number < 0 in a word: '+unicode_type(word))
|
||||
f.write(struct.pack("<H", int(word)))
|
||||
|
||||
|
||||
@ -510,7 +512,7 @@ class LrfObject(object):
|
||||
raise LrfError("object name %s not recognized" % name)
|
||||
|
||||
def __str__(self):
|
||||
return 'LRFObject: ' + self.name + ", " + str(self.objId)
|
||||
return 'LRFObject: ' + self.name + ", " + unicode_type(self.objId)
|
||||
|
||||
def appendLrfTag(self, tag):
|
||||
self.tags.append(tag)
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
def _optimize(tagList, tagName, conversion):
|
||||
# copy the tag of interest plus any text
|
||||
newTagList = []
|
||||
@ -39,5 +41,3 @@ def tagListOptimizer(tagList):
|
||||
_optimize(tagList, "fontsize", int)
|
||||
_optimize(tagList, "fontweight", int)
|
||||
return oldSize - len(tagList)
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
# Copyright (c) 2007 Mike Higgins (Falstaff)
|
||||
# Modifications from the original:
|
||||
# Copyright (C) 2007 Kovid Goyal <kovid@kovidgoyal.net>
|
||||
@ -108,10 +109,9 @@ def appendTextElements(e, contentsList, se):
|
||||
""" A helper function to convert text streams into the proper elements. """
|
||||
|
||||
def uconcat(text, newText, se):
|
||||
if type(newText) != type(text):
|
||||
if type(text) is str:
|
||||
if isinstance(text, bytes):
|
||||
text = text.decode(se)
|
||||
else:
|
||||
if isinstance(newText, bytes):
|
||||
newText = newText.decode(se)
|
||||
|
||||
return text + newText
|
||||
@ -228,8 +228,8 @@ class LrsAttributes(object):
|
||||
if name not in self.attrs and name not in alsoAllow:
|
||||
raise LrsError("%s does not support setting %s" %
|
||||
(self.__class__.__name__, name))
|
||||
if type(value) is int:
|
||||
value = str(value)
|
||||
if isinstance(value, int):
|
||||
value = unicode_type(value)
|
||||
self.attrs[name] = value
|
||||
|
||||
|
||||
@ -333,13 +333,13 @@ class LrsObject(object):
|
||||
def lrsObjectElement(self, name, objlabel="objlabel", labelName=None,
|
||||
labelDecorate=True, **settings):
|
||||
element = Element(name)
|
||||
element.attrib["objid"] = str(self.objId)
|
||||
element.attrib["objid"] = unicode_type(self.objId)
|
||||
if labelName is None:
|
||||
labelName = name
|
||||
if labelDecorate:
|
||||
label = "%s.%d" % (labelName, self.objId)
|
||||
else:
|
||||
label = str(self.objId)
|
||||
label = unicode_type(self.objId)
|
||||
element.attrib[objlabel] = label
|
||||
element.attrib.update(settings)
|
||||
return element
|
||||
@ -565,7 +565,7 @@ class Book(Delegator):
|
||||
factor = base_font_size / old_base_font_size
|
||||
|
||||
def rescale(old):
|
||||
return str(int(int(old) * factor))
|
||||
return unicode_type(int(int(old) * factor))
|
||||
|
||||
text_blocks = list(main.get_all(lambda x: isinstance(x, TextBlock)))
|
||||
for tb in text_blocks:
|
||||
@ -696,7 +696,7 @@ class TableOfContents(object):
|
||||
def addTocEntry(self, tocLabel, textBlock):
|
||||
if not isinstance(textBlock, (Canvas, TextBlock, ImageBlock, RuledLine)):
|
||||
raise LrsError("TOC destination must be a Canvas, TextBlock, ImageBlock or RuledLine"+
|
||||
" not a " + str(type(textBlock)))
|
||||
" not a " + unicode_type(type(textBlock)))
|
||||
|
||||
if textBlock.parent is None:
|
||||
raise LrsError("TOC text block must be already appended to a page")
|
||||
@ -746,8 +746,8 @@ class TocLabel(object):
|
||||
|
||||
def toElement(self, se):
|
||||
return ElementWithText("TocLabel", self.label,
|
||||
refobj=str(self.textBlock.objId),
|
||||
refpage=str(self.textBlock.parent.objId))
|
||||
refobj=unicode_type(self.textBlock.objId),
|
||||
refpage=unicode_type(self.textBlock.parent.objId))
|
||||
|
||||
|
||||
class BookInfo(object):
|
||||
@ -808,7 +808,7 @@ class DocInfo(object):
|
||||
self.thumbnail = None
|
||||
self.language = "en"
|
||||
self.creator = None
|
||||
self.creationdate = str(isoformat(date.today()))
|
||||
self.creationdate = unicode_type(isoformat(date.today()))
|
||||
self.producer = "%s v%s"%(__appname__, __version__)
|
||||
self.numberofpages = "0"
|
||||
|
||||
@ -832,7 +832,7 @@ class DocInfo(object):
|
||||
docInfo.append(ElementWithText("Creator", self.creator))
|
||||
docInfo.append(ElementWithText("CreationDate", self.creationdate))
|
||||
docInfo.append(ElementWithText("Producer", self.producer))
|
||||
docInfo.append(ElementWithText("SumPage", str(self.numberofpages)))
|
||||
docInfo.append(ElementWithText("SumPage", unicode_type(self.numberofpages)))
|
||||
return docInfo
|
||||
|
||||
|
||||
@ -1094,7 +1094,7 @@ class LrsStyle(LrsObject, LrsAttributes, LrsContainer):
|
||||
self.elementName = elementName
|
||||
self.objectsAppended = False
|
||||
# self.label = "%s.%d" % (elementName, self.objId)
|
||||
# self.label = str(self.objId)
|
||||
# self.label = unicode_type(self.objId)
|
||||
# self.parent = None
|
||||
|
||||
def update(self, settings):
|
||||
@ -1104,11 +1104,11 @@ class LrsStyle(LrsObject, LrsAttributes, LrsContainer):
|
||||
self.attrs[name] = value
|
||||
|
||||
def getLabel(self):
|
||||
return str(self.objId)
|
||||
return unicode_type(self.objId)
|
||||
|
||||
def toElement(self, se):
|
||||
element = Element(self.elementName, stylelabel=self.getLabel(),
|
||||
objid=str(self.objId))
|
||||
objid=unicode_type(self.objId))
|
||||
element.attrib.update(self.attrs)
|
||||
return element
|
||||
|
||||
@ -1236,14 +1236,14 @@ class PageStyle(LrsStyle):
|
||||
del settings[evenbase]
|
||||
if evenObj.parent is None:
|
||||
parent.append(evenObj)
|
||||
settings[evenbase + "id"] = str(evenObj.objId)
|
||||
settings[evenbase + "id"] = unicode_type(evenObj.objId)
|
||||
|
||||
if oddbase in settings:
|
||||
oddObj = settings[oddbase]
|
||||
del settings[oddbase]
|
||||
if oddObj.parent is None:
|
||||
parent.append(oddObj)
|
||||
settings[oddbase + "id"] = str(oddObj.objId)
|
||||
settings[oddbase + "id"] = unicode_type(oddObj.objId)
|
||||
|
||||
def appendReferencedObjects(self, parent):
|
||||
if self.objectsAppended:
|
||||
@ -1580,7 +1580,7 @@ class DropCaps(LrsTextTag):
|
||||
return self.text is None or not self.text.strip()
|
||||
|
||||
def toElement(self, se):
|
||||
elem = Element('DrawChar', line=str(self.line))
|
||||
elem = Element('DrawChar', line=unicode_type(self.line))
|
||||
appendTextElements(elem, self.contents, se)
|
||||
return elem
|
||||
|
||||
@ -1656,7 +1656,7 @@ class JumpTo(LrsContainer):
|
||||
self.textBlock = textBlock
|
||||
|
||||
def toElement(self, se):
|
||||
return Element("JumpTo", refpage=str(self.textBlock.parent.objId), refobj=str(self.textBlock.objId))
|
||||
return Element("JumpTo", refpage=unicode_type(self.textBlock.parent.objId), refobj=unicode_type(self.textBlock.objId))
|
||||
|
||||
|
||||
class Plot(LrsSimpleChar1, LrsContainer):
|
||||
@ -1688,8 +1688,8 @@ class Plot(LrsSimpleChar1, LrsContainer):
|
||||
parent.append(self.obj)
|
||||
|
||||
def toElement(self, se):
|
||||
elem = Element('Plot', xsize=str(self.xsize), ysize=str(self.ysize),
|
||||
refobj=str(self.obj.objId))
|
||||
elem = Element('Plot', xsize=unicode_type(self.xsize), ysize=unicode_type(self.ysize),
|
||||
refobj=unicode_type(self.obj.objId))
|
||||
if self.adjustment:
|
||||
elem.set('adjustment', self.adjustment)
|
||||
return elem
|
||||
@ -1771,7 +1771,7 @@ class Space(LrsSimpleChar1, LrsContainer):
|
||||
if self.xsize == 0:
|
||||
return
|
||||
|
||||
return Element("Space", xsize=str(self.xsize))
|
||||
return Element("Space", xsize=unicode_type(self.xsize))
|
||||
|
||||
def toLrfContainer(self, lrfWriter, container):
|
||||
if self.xsize != 0:
|
||||
@ -1858,7 +1858,7 @@ class Span(LrsSimpleChar1, LrsContainer):
|
||||
def toElement(self, se):
|
||||
element = Element('Span')
|
||||
for (key, value) in self.attrs.items():
|
||||
element.set(key, str(value))
|
||||
element.set(key, unicode_type(value))
|
||||
|
||||
appendTextElements(element, self.contents, se)
|
||||
return element
|
||||
@ -1871,9 +1871,9 @@ class EmpLine(LrsTextTag, LrsSimpleChar1):
|
||||
def __init__(self, text=None, emplineposition='before', emplinetype='solid'):
|
||||
LrsTextTag.__init__(self, text, [LrsSimpleChar1])
|
||||
if emplineposition not in self.__class__.emplinepositions:
|
||||
raise LrsError('emplineposition for an EmpLine must be one of: '+str(self.__class__.emplinepositions))
|
||||
raise LrsError('emplineposition for an EmpLine must be one of: '+unicode_type(self.__class__.emplinepositions))
|
||||
if emplinetype not in self.__class__.emplinetypes:
|
||||
raise LrsError('emplinetype for an EmpLine must be one of: '+str(self.__class__.emplinetypes))
|
||||
raise LrsError('emplinetype for an EmpLine must be one of: '+unicode_type(self.__class__.emplinetypes))
|
||||
|
||||
self.emplinetype = emplinetype
|
||||
self.emplineposition = emplineposition
|
||||
@ -1933,9 +1933,9 @@ class BlockSpace(LrsContainer):
|
||||
element = Element("BlockSpace")
|
||||
|
||||
if self.xspace != 0:
|
||||
element.attrib["xspace"] = str(self.xspace)
|
||||
element.attrib["xspace"] = unicode_type(self.xspace)
|
||||
if self.yspace != 0:
|
||||
element.attrib["yspace"] = str(self.yspace)
|
||||
element.attrib["yspace"] = unicode_type(self.yspace)
|
||||
|
||||
return element
|
||||
|
||||
@ -1979,7 +1979,7 @@ class CharButton(LrsSimpleChar1, LrsContainer):
|
||||
container.appendLrfTag(LrfTag("CharButtonEnd"))
|
||||
|
||||
def toElement(self, se):
|
||||
cb = Element("CharButton", refobj=str(self.button.objId))
|
||||
cb = Element("CharButton", refobj=unicode_type(self.button.objId))
|
||||
appendTextElements(cb, self.contents, se)
|
||||
return cb
|
||||
|
||||
@ -2081,8 +2081,8 @@ class JumpButton(LrsObject, LrsContainer):
|
||||
b = self.lrsObjectElement("Button")
|
||||
pb = SubElement(b, "PushButton")
|
||||
SubElement(pb, "JumpTo",
|
||||
refpage=str(self.textBlock.parent.objId),
|
||||
refobj=str(self.textBlock.objId))
|
||||
refpage=unicode_type(self.textBlock.parent.objId),
|
||||
refobj=unicode_type(self.textBlock.objId))
|
||||
return b
|
||||
|
||||
|
||||
@ -2230,8 +2230,8 @@ class PutObj(LrsContainer):
|
||||
self.content.objId)))
|
||||
|
||||
def toElement(self, se):
|
||||
el = Element("PutObj", x1=str(self.x1), y1=str(self.y1),
|
||||
refobj=str(self.content.objId))
|
||||
el = Element("PutObj", x1=unicode_type(self.x1), y1=unicode_type(self.y1),
|
||||
refobj=unicode_type(self.content.objId))
|
||||
return el
|
||||
|
||||
|
||||
@ -2314,9 +2314,9 @@ class Image(LrsObject, LrsContainer, LrsAttributes):
|
||||
|
||||
def toElement(self, se):
|
||||
element = self.lrsObjectElement("Image", **self.attrs)
|
||||
element.set("refstream", str(self.refstream.objId))
|
||||
element.set("refstream", unicode_type(self.refstream.objId))
|
||||
for name in ["x0", "y0", "x1", "y1", "xsize", "ysize"]:
|
||||
element.set(name, str(getattr(self, name)))
|
||||
element.set(name, unicode_type(getattr(self, name)))
|
||||
return element
|
||||
|
||||
def toLrf(self, lrfWriter):
|
||||
@ -2397,9 +2397,9 @@ class ImageBlock(LrsObject, LrsContainer, LrsAttributes):
|
||||
|
||||
def toElement(self, se):
|
||||
element = self.lrsObjectElement("ImageBlock", **self.attrs)
|
||||
element.set("refstream", str(self.refstream.objId))
|
||||
element.set("refstream", unicode_type(self.refstream.objId))
|
||||
for name in ["x0", "y0", "x1", "y1", "xsize", "ysize"]:
|
||||
element.set(name, str(getattr(self, name)))
|
||||
element.set(name, unicode_type(getattr(self, name)))
|
||||
element.text = self.alttext
|
||||
return element
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
''''''
|
||||
@ -5,7 +7,7 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
import struct
|
||||
|
||||
from calibre.ebooks.lrf import LRFParseError
|
||||
from polyglot.builtins import unicode_type, string_or_bytes
|
||||
from polyglot.builtins import unicode_type
|
||||
|
||||
|
||||
class Tag(object):
|
||||
@ -196,7 +198,7 @@ class Tag(object):
|
||||
self.id = 0xF500 + tag_id[0]
|
||||
|
||||
size, self.name = self.__class__.tags[tag_id[0]]
|
||||
if isinstance(size, string_or_bytes):
|
||||
if isinstance(size, unicode_type):
|
||||
parser = getattr(self, size + '_parser')
|
||||
self.contents = parser(stream)
|
||||
else:
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=utf-8
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
@ -26,9 +28,9 @@ except:
|
||||
def string_to_authors(raw):
|
||||
if not raw:
|
||||
return []
|
||||
raw = raw.replace('&&', u'\uffff')
|
||||
raw = raw.replace('&&', '\uffff')
|
||||
raw = _author_pat.sub('&', raw)
|
||||
authors = [a.strip().replace(u'\uffff', '&') for a in raw.split('&')]
|
||||
authors = [a.strip().replace('\uffff', '&') for a in raw.split('&')]
|
||||
return [a for a in authors if a]
|
||||
|
||||
|
||||
@ -41,7 +43,7 @@ def authors_to_string(authors):
|
||||
|
||||
def remove_bracketed_text(src, brackets=None):
|
||||
if brackets is None:
|
||||
brackets = {u'(': u')', u'[': u']', u'{': u'}'}
|
||||
brackets = {'(': ')', '[': ']', '{': '}'}
|
||||
from collections import Counter
|
||||
counts = Counter()
|
||||
buf = []
|
||||
@ -56,12 +58,12 @@ def remove_bracketed_text(src, brackets=None):
|
||||
counts[idx] -= 1
|
||||
elif sum(itervalues(counts)) < 1:
|
||||
buf.append(char)
|
||||
return u''.join(buf)
|
||||
return ''.join(buf)
|
||||
|
||||
|
||||
def author_to_author_sort(author, method=None):
|
||||
if not author:
|
||||
return u''
|
||||
return ''
|
||||
sauthor = remove_bracketed_text(author).strip()
|
||||
tokens = sauthor.split()
|
||||
if len(tokens) < 2:
|
||||
@ -72,13 +74,13 @@ def author_to_author_sort(author, method=None):
|
||||
ltoks = frozenset(x.lower() for x in tokens)
|
||||
copy_words = frozenset(x.lower() for x in tweaks['author_name_copywords'])
|
||||
if ltoks.intersection(copy_words):
|
||||
method = u'copy'
|
||||
method = 'copy'
|
||||
|
||||
if method == u'copy':
|
||||
if method == 'copy':
|
||||
return author
|
||||
|
||||
prefixes = {force_unicode(y).lower() for y in tweaks['author_name_prefixes']}
|
||||
prefixes |= {y+u'.' for y in prefixes}
|
||||
prefixes |= {y+'.' for y in prefixes}
|
||||
while True:
|
||||
if not tokens:
|
||||
return author
|
||||
@ -89,9 +91,9 @@ def author_to_author_sort(author, method=None):
|
||||
break
|
||||
|
||||
suffixes = {force_unicode(y).lower() for y in tweaks['author_name_suffixes']}
|
||||
suffixes |= {y+u'.' for y in suffixes}
|
||||
suffixes |= {y+'.' for y in suffixes}
|
||||
|
||||
suffix = u''
|
||||
suffix = ''
|
||||
while True:
|
||||
if not tokens:
|
||||
return author
|
||||
@ -103,7 +105,7 @@ def author_to_author_sort(author, method=None):
|
||||
break
|
||||
suffix = suffix.strip()
|
||||
|
||||
if method == u'comma' and u',' in u''.join(tokens):
|
||||
if method == 'comma' and ',' in ''.join(tokens):
|
||||
return author
|
||||
|
||||
atokens = tokens[-1:] + tokens[:-1]
|
||||
@ -111,10 +113,10 @@ def author_to_author_sort(author, method=None):
|
||||
if suffix:
|
||||
atokens.append(suffix)
|
||||
|
||||
if method != u'nocomma' and num_toks > 1:
|
||||
atokens[0] += u','
|
||||
if method != 'nocomma' and num_toks > 1:
|
||||
atokens[0] += ','
|
||||
|
||||
return u' '.join(atokens)
|
||||
return ' '.join(atokens)
|
||||
|
||||
|
||||
def authors_to_sort_string(authors):
|
||||
@ -154,7 +156,7 @@ def get_title_sort_pat(lang=None):
|
||||
return ans
|
||||
|
||||
|
||||
_ignore_starts = u'\'"'+u''.join(codepoint_to_chr(x) for x in
|
||||
_ignore_starts = '\'"'+''.join(codepoint_to_chr(x) for x in
|
||||
list(range(0x2018, 0x201e))+[0x2032, 0x2033])
|
||||
|
||||
|
||||
@ -187,7 +189,7 @@ coding = list(zip(
|
||||
|
||||
def roman(num):
|
||||
if num <= 0 or num >= 4000 or int(num) != num:
|
||||
return str(num)
|
||||
return unicode_type(num)
|
||||
result = []
|
||||
for d, r in coding:
|
||||
while num >= d:
|
||||
@ -202,7 +204,7 @@ def fmt_sidx(i, fmt='%.2f', use_roman=False):
|
||||
try:
|
||||
i = float(i)
|
||||
except TypeError:
|
||||
return str(i)
|
||||
return unicode_type(i)
|
||||
if int(i) == float(i):
|
||||
return roman(int(i)) if use_roman else '%d'%int(i)
|
||||
return fmt%i
|
||||
@ -312,7 +314,7 @@ class ResourceCollection(object):
|
||||
return '[%s]'%', '.join(resources)
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
return unicode_type(self)
|
||||
|
||||
def append(self, resource):
|
||||
if not isinstance(resource, Resource):
|
||||
@ -374,7 +376,7 @@ def check_isbn13(isbn):
|
||||
check = 10 - (sum(products)%10)
|
||||
if check == 10:
|
||||
check = 0
|
||||
if str(check) == isbn[12]:
|
||||
if unicode_type(check) == isbn[12]:
|
||||
return isbn
|
||||
except Exception:
|
||||
pass
|
||||
@ -430,12 +432,9 @@ def check_doi(doi):
|
||||
return None
|
||||
|
||||
|
||||
def rating_to_stars(value, allow_half_stars=False, star=u'★', half=u'½'):
|
||||
def rating_to_stars(value, allow_half_stars=False, star='★', half='½'):
|
||||
r = max(0, min(int(value or 0), 10))
|
||||
if allow_half_stars:
|
||||
ans = star * (r // 2)
|
||||
if r % 2:
|
||||
if allow_half_stars and r % 2:
|
||||
ans += half
|
||||
else:
|
||||
ans = star * int(r/2.0)
|
||||
return ans
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
@ -10,7 +10,7 @@ import os
|
||||
from contextlib import closing
|
||||
|
||||
from calibre.customize import FileTypePlugin
|
||||
from polyglot.builtins import filter
|
||||
from polyglot.builtins import filter, unicode_type
|
||||
|
||||
|
||||
def is_comic(list_of_names):
|
||||
@ -142,7 +142,7 @@ def get_comic_book_info(d, mi, series_index='volume'):
|
||||
from datetime import date
|
||||
try:
|
||||
dt = date(puby, 6 if pubm is None else pubm, 15)
|
||||
dt = parse_only_date(str(dt))
|
||||
dt = parse_only_date(unicode_type(dt))
|
||||
mi.pubdate = dt
|
||||
except:
|
||||
pass
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
@ -24,7 +25,7 @@ SIMPLE_SET = frozenset(SIMPLE_GET - {'identifiers'})
|
||||
|
||||
def human_readable(size, precision=2):
|
||||
""" Convert a size in bytes into megabytes """
|
||||
return ('%.'+str(precision)+'f'+ 'MB') % ((size/(1024.*1024.)),)
|
||||
return ('%.'+unicode_type(precision)+'f'+ 'MB') % (size/(1024*1024),)
|
||||
|
||||
|
||||
NULL_VALUES = {
|
||||
@ -607,13 +608,13 @@ class Metadata(object):
|
||||
return authors_to_string(self.authors)
|
||||
|
||||
def format_tags(self):
|
||||
return u', '.join([unicode_type(t) for t in sorted(self.tags, key=sort_key)])
|
||||
return ', '.join([unicode_type(t) for t in sorted(self.tags, key=sort_key)])
|
||||
|
||||
def format_rating(self, v=None, divide_by=1.0):
|
||||
def format_rating(self, v=None, divide_by=1):
|
||||
if v is None:
|
||||
if self.rating is not None:
|
||||
return unicode_type(self.rating/divide_by)
|
||||
return u'None'
|
||||
return 'None'
|
||||
return unicode_type(v/divide_by)
|
||||
|
||||
def format_field(self, key, series_with_index=True):
|
||||
@ -662,7 +663,7 @@ class Metadata(object):
|
||||
elif datatype == 'bool':
|
||||
res = _('Yes') if res else _('No')
|
||||
elif datatype == 'rating':
|
||||
res = u'%.2g'%(res/2.0)
|
||||
res = '%.2g'%(res/2)
|
||||
elif datatype in ['int', 'float']:
|
||||
try:
|
||||
fmt = cmeta['display'].get('number_format', None)
|
||||
@ -702,7 +703,7 @@ class Metadata(object):
|
||||
elif datatype == 'datetime':
|
||||
res = format_date(res, fmeta['display'].get('date_format','dd MMM yyyy'))
|
||||
elif datatype == 'rating':
|
||||
res = u'%.2g'%(res/2.0)
|
||||
res = '%.2g'%(res/2)
|
||||
elif key == 'size':
|
||||
res = human_readable(res)
|
||||
return (name, unicode_type(res), orig_res, fmeta)
|
||||
@ -719,7 +720,7 @@ class Metadata(object):
|
||||
ans = []
|
||||
|
||||
def fmt(x, y):
|
||||
ans.append(u'%-20s: %s'%(unicode_type(x), unicode_type(y)))
|
||||
ans.append('%-20s: %s'%(unicode_type(x), unicode_type(y)))
|
||||
|
||||
fmt('Title', self.title)
|
||||
if self.title_sort:
|
||||
@ -733,14 +734,14 @@ class Metadata(object):
|
||||
if getattr(self, 'book_producer', False):
|
||||
fmt('Book Producer', self.book_producer)
|
||||
if self.tags:
|
||||
fmt('Tags', u', '.join([unicode_type(t) for t in self.tags]))
|
||||
fmt('Tags', ', '.join([unicode_type(t) for t in self.tags]))
|
||||
if self.series:
|
||||
fmt('Series', self.series + ' #%s'%self.format_series_index())
|
||||
if not self.is_null('languages'):
|
||||
fmt('Languages', ', '.join(self.languages))
|
||||
if self.rating is not None:
|
||||
fmt('Rating', (u'%.2g'%(float(self.rating)/2.0)) if self.rating
|
||||
else u'')
|
||||
fmt('Rating', ('%.2g'%(float(self.rating)/2)) if self.rating
|
||||
else '')
|
||||
if self.timestamp is not None:
|
||||
fmt('Timestamp', isoformat(self.timestamp))
|
||||
if self.pubdate is not None:
|
||||
@ -748,7 +749,7 @@ class Metadata(object):
|
||||
if self.rights is not None:
|
||||
fmt('Rights', unicode_type(self.rights))
|
||||
if self.identifiers:
|
||||
fmt('Identifiers', u', '.join(['%s:%s'%(k, v) for k, v in
|
||||
fmt('Identifiers', ', '.join(['%s:%s'%(k, v) for k, v in
|
||||
iteritems(self.identifiers)]))
|
||||
if self.comments:
|
||||
fmt('Comments', self.comments)
|
||||
@ -758,7 +759,7 @@ class Metadata(object):
|
||||
if val:
|
||||
(name, val) = self.format_field(key)
|
||||
fmt(name, unicode_type(val))
|
||||
return u'\n'.join(ans)
|
||||
return '\n'.join(ans)
|
||||
|
||||
def to_html(self):
|
||||
'''
|
||||
@ -772,10 +773,10 @@ class Metadata(object):
|
||||
ans += [(_('Producer'), unicode_type(self.book_producer))]
|
||||
ans += [(_('Comments'), unicode_type(self.comments))]
|
||||
ans += [('ISBN', unicode_type(self.isbn))]
|
||||
ans += [(_('Tags'), u', '.join([unicode_type(t) for t in self.tags]))]
|
||||
ans += [(_('Tags'), ', '.join([unicode_type(t) for t in self.tags]))]
|
||||
if self.series:
|
||||
ans += [(_('Series'), unicode_type(self.series) + ' #%s'%self.format_series_index())]
|
||||
ans += [(_('Languages'), u', '.join(self.languages))]
|
||||
ans += [(_('Languages'), ', '.join(self.languages))]
|
||||
if self.timestamp is not None:
|
||||
ans += [(_('Timestamp'), unicode_type(isoformat(self.timestamp, as_utc=False, sep=' ')))]
|
||||
if self.pubdate is not None:
|
||||
@ -788,8 +789,8 @@ class Metadata(object):
|
||||
(name, val) = self.format_field(key)
|
||||
ans += [(name, val)]
|
||||
for i, x in enumerate(ans):
|
||||
ans[i] = u'<tr><td><b>%s</b></td><td>%s</td></tr>'%x
|
||||
return u'<table>%s</table>'%u'\n'.join(ans)
|
||||
ans[i] = '<tr><td><b>%s</b></td><td>%s</td></tr>'%x
|
||||
return '<table>%s</table>'%'\n'.join(ans)
|
||||
|
||||
if ispy3:
|
||||
__str__ = __unicode__representation__
|
||||
|
@ -1,9 +1,10 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
'''
|
||||
Created on 4 Jun 2010
|
||||
|
||||
@author: charles
|
||||
'''
|
||||
from __future__ import print_function
|
||||
|
||||
import json, traceback
|
||||
from datetime import datetime, time
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python2
|
||||
from __future__ import print_function, with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
@ -1,4 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=utf-8
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>, '\
|
||||
'2008, Anatoly Shipitsin <norguhtar at gmail.com>'
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from __future__ import print_function
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
@ -20,7 +20,7 @@ from polyglot.urllib import parse_qs, quote_plus
|
||||
URL = \
|
||||
"http://ww2.kdl.org/libcat/WhatsNext.asp?AuthorLastName={0}&AuthorFirstName=&SeriesName=&BookTitle={1}&CategoryID=0&cmdSearch=Search&Search=1&grouping="
|
||||
|
||||
_ignore_starts = u'\'"'+u''.join(codepoint_to_chr(x) for x in list(range(0x2018, 0x201e))+[0x2032, 0x2033])
|
||||
_ignore_starts = '\'"'+''.join(codepoint_to_chr(x) for x in list(range(0x2018, 0x201e))+[0x2032, 0x2033])
|
||||
|
||||
|
||||
def get_series(title, authors, timeout=60):
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
'''
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
@ -10,7 +11,7 @@ from calibre.ebooks.metadata.opf2 import OPF
|
||||
from calibre import isbytestring
|
||||
from calibre.customize.ui import get_file_type_metadata, set_file_type_metadata
|
||||
from calibre.ebooks.metadata import MetaInformation, string_to_authors
|
||||
from polyglot.builtins import getcwd
|
||||
from polyglot.builtins import getcwd, unicode_type
|
||||
|
||||
_METADATA_PRIORITIES = [
|
||||
'html', 'htm', 'xhtml', 'xhtm',
|
||||
@ -230,7 +231,7 @@ def forked_read_metadata(path, tdir):
|
||||
f.seek(0, 2)
|
||||
sz = f.tell()
|
||||
with lopen(os.path.join(tdir, 'size.txt'), 'wb') as s:
|
||||
s.write(str(sz).encode('ascii'))
|
||||
s.write(unicode_type(sz).encode('ascii'))
|
||||
f.seek(0)
|
||||
mi = get_metadata(f, fmt)
|
||||
if mi.cover_data and mi.cover_data[1]:
|
||||
|
@ -2,8 +2,7 @@
|
||||
Retrieve and modify in-place Mobipocket book metadata.
|
||||
'''
|
||||
|
||||
from __future__ import with_statement
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net and ' \
|
||||
@ -398,11 +397,11 @@ class MetadataUpdater(object):
|
||||
self.original_exth_records.get(501, None) == 'EBOK' and
|
||||
not added_501 and not share_not_sync):
|
||||
from uuid import uuid4
|
||||
update_exth_record((113, str(uuid4())))
|
||||
update_exth_record((113, unicode_type(uuid4())))
|
||||
# Add a 112 record with actual UUID
|
||||
if getattr(mi, 'uuid', None):
|
||||
update_exth_record((112,
|
||||
(u"calibre:%s" % mi.uuid).encode(self.codec, 'replace')))
|
||||
("calibre:%s" % mi.uuid).encode(self.codec, 'replace')))
|
||||
if 503 in self.original_exth_records:
|
||||
update_exth_record((503, mi.title.encode(self.codec, 'replace')))
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/python2
|
||||
# -*- coding: utf-8 -*-
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
|
||||
#
|
||||
# Copyright (C) 2006 Søren Roug, European Environment Agency
|
||||
@ -19,7 +18,7 @@
|
||||
#
|
||||
# Contributor(s):
|
||||
#
|
||||
from __future__ import division
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import zipfile, re, io
|
||||
import xml.sax.saxutils
|
||||
@ -37,22 +36,22 @@ from polyglot.builtins import string_or_bytes
|
||||
whitespace = re.compile(r'\s+')
|
||||
|
||||
fields = {
|
||||
'title': (DCNS,u'title'),
|
||||
'description': (DCNS,u'description'),
|
||||
'subject': (DCNS,u'subject'),
|
||||
'creator': (DCNS,u'creator'),
|
||||
'date': (DCNS,u'date'),
|
||||
'language': (DCNS,u'language'),
|
||||
'generator': (METANS,u'generator'),
|
||||
'initial-creator': (METANS,u'initial-creator'),
|
||||
'keyword': (METANS,u'keyword'),
|
||||
'editing-duration': (METANS,u'editing-duration'),
|
||||
'editing-cycles': (METANS,u'editing-cycles'),
|
||||
'printed-by': (METANS,u'printed-by'),
|
||||
'print-date': (METANS,u'print-date'),
|
||||
'creation-date': (METANS,u'creation-date'),
|
||||
'user-defined': (METANS,u'user-defined'),
|
||||
# 'template': (METANS,u'template'),
|
||||
'title': (DCNS, 'title'),
|
||||
'description': (DCNS, 'description'),
|
||||
'subject': (DCNS, 'subject'),
|
||||
'creator': (DCNS, 'creator'),
|
||||
'date': (DCNS, 'date'),
|
||||
'language': (DCNS, 'language'),
|
||||
'generator': (METANS, 'generator'),
|
||||
'initial-creator': (METANS, 'initial-creator'),
|
||||
'keyword': (METANS, 'keyword'),
|
||||
'editing-duration': (METANS, 'editing-duration'),
|
||||
'editing-cycles': (METANS, 'editing-cycles'),
|
||||
'printed-by': (METANS, 'printed-by'),
|
||||
'print-date': (METANS, 'print-date'),
|
||||
'creation-date': (METANS, 'creation-date'),
|
||||
'user-defined': (METANS, 'user-defined'),
|
||||
# 'template': (METANS, 'template'),
|
||||
}
|
||||
|
||||
|
||||
@ -107,8 +106,8 @@ class odfmetaparser(xml.sax.saxutils.XMLGenerator):
|
||||
# location and not at the end
|
||||
# if name == (METANS,u'template'):
|
||||
# self._data = [attrs.get((XLINKNS,u'title'),'')]
|
||||
if name == (METANS,u'user-defined'):
|
||||
field = attrs.get((METANS,u'name'))
|
||||
if name == (METANS, 'user-defined'):
|
||||
field = attrs.get((METANS, 'name'))
|
||||
if field in self.deletefields:
|
||||
self.output.dowrite = False
|
||||
elif field in self.yieldfields:
|
||||
@ -120,15 +119,15 @@ class odfmetaparser(xml.sax.saxutils.XMLGenerator):
|
||||
|
||||
def endElementNS(self, name, qname):
|
||||
field = name
|
||||
if name == (METANS,u'user-defined'):
|
||||
if name == (METANS, 'user-defined'):
|
||||
field = self._tag
|
||||
if name == (OFFICENS,u'meta'):
|
||||
if name == (OFFICENS, 'meta'):
|
||||
for k,v in self.addfields.items():
|
||||
if len(v) > 0:
|
||||
if isinstance(k, string_or_bytes):
|
||||
xml.sax.saxutils.XMLGenerator.startElementNS(self,(METANS,u'user-defined'),None,{(METANS,u'name'):k})
|
||||
xml.sax.saxutils.XMLGenerator.startElementNS(self,(METANS, 'user-defined'),None,{(METANS, 'name'):k})
|
||||
xml.sax.saxutils.XMLGenerator.characters(self, v)
|
||||
xml.sax.saxutils.XMLGenerator.endElementNS(self, (METANS,u'user-defined'),None)
|
||||
xml.sax.saxutils.XMLGenerator.endElementNS(self, (METANS, 'user-defined'),None)
|
||||
else:
|
||||
xml.sax.saxutils.XMLGenerator.startElementNS(self, k, None, {})
|
||||
xml.sax.saxutils.XMLGenerator.characters(self, v)
|
||||
@ -140,7 +139,7 @@ class odfmetaparser(xml.sax.saxutils.XMLGenerator):
|
||||
self.seenfields[texttag] = self.data()
|
||||
# OpenOffice has the habit to capitalize custom properties, so we add a
|
||||
# lowercase version for easy access
|
||||
if texttag[:4].lower() == u'opf.':
|
||||
if texttag[:4].lower() == 'opf.':
|
||||
self.seenfields[texttag.lower()] = self.data()
|
||||
|
||||
if field in self.deletefields:
|
||||
@ -245,7 +244,7 @@ def read_cover(stream, zin, mi, opfmeta, extract_cover):
|
||||
except Exception:
|
||||
continue
|
||||
imgnum += 1
|
||||
if opfmeta and frm.getAttribute('name').lower() == u'opf.cover':
|
||||
if opfmeta and frm.getAttribute('name').lower() == 'opf.cover':
|
||||
cover_href = i_href
|
||||
cover_data = (fmt, raw)
|
||||
cover_frame = frm.getAttribute('name') # could have upper case
|
||||
|
@ -1,4 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
'''
|
||||
Read meta information from pdb files.
|
||||
@ -59,4 +60,3 @@ def set_metadata(stream, mi):
|
||||
|
||||
stream.seek(0)
|
||||
stream.write('%s\x00' % re.sub('[^-A-Za-z0-9 ]+', '_', mi.title).ljust(31, '\x00')[:31].encode('ascii', 'replace'))
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
'''Read meta information from PDF files'''
|
||||
@ -47,9 +48,9 @@ def read_info(outputdir, get_cover):
|
||||
return None
|
||||
|
||||
for line in info_raw.splitlines():
|
||||
if u':' not in line:
|
||||
if ':' not in line:
|
||||
continue
|
||||
field, val = line.partition(u':')[::2]
|
||||
field, val = line.partition(':')[::2]
|
||||
val = val.strip()
|
||||
if field and val:
|
||||
ans[field] = val.strip()
|
||||
|
@ -1,4 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
@ -34,5 +36,3 @@ def get_metadata(stream):
|
||||
mi.timestamp = None
|
||||
return mi
|
||||
raise ValueError('No ebook found in RAR archive')
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Ashish Kulkarni <kulkarni.ashish@gmail.com>'
|
||||
'''Read meta information from RB files'''
|
||||
@ -19,7 +20,7 @@ def get_metadata(stream):
|
||||
stream.seek(0)
|
||||
try:
|
||||
if not stream.read(14) == MAGIC:
|
||||
print(u'Couldn\'t read RB header from file', file=sys.stderr)
|
||||
print('Couldn\'t read RB header from file', file=sys.stderr)
|
||||
return mi
|
||||
stream.read(10)
|
||||
|
||||
@ -34,7 +35,7 @@ def get_metadata(stream):
|
||||
if flag == 2:
|
||||
break
|
||||
else:
|
||||
print(u'Couldn\'t find INFO from RB file', file=sys.stderr)
|
||||
print('Couldn\'t find INFO from RB file', file=sys.stderr)
|
||||
return mi
|
||||
|
||||
stream.seek(offset)
|
||||
@ -48,7 +49,7 @@ def get_metadata(stream):
|
||||
elif key.strip() == 'AUTHOR':
|
||||
mi.authors = string_to_authors(value)
|
||||
except Exception as err:
|
||||
msg = u'Couldn\'t read metadata from rb: %s with error %s'%(mi.title, unicode_type(err))
|
||||
msg = 'Couldn\'t read metadata from rb: %s with error %s'%(mi.title, unicode_type(err))
|
||||
prints(msg, file=sys.stderr)
|
||||
raise
|
||||
return mi
|
||||
|
@ -1,6 +1,6 @@
|
||||
'''Read meta information from SNB files'''
|
||||
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Li Fanxi <lifanxi@freemindworld.com>'
|
||||
|
@ -1,9 +1,7 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
|
||||
|
@ -208,18 +208,18 @@ class Worker(Thread): # Get details {{{
|
||||
12: ['diciembre'],
|
||||
},
|
||||
'jp': {
|
||||
1: [u'1月'],
|
||||
2: [u'2月'],
|
||||
3: [u'3月'],
|
||||
4: [u'4月'],
|
||||
5: [u'5月'],
|
||||
6: [u'6月'],
|
||||
7: [u'7月'],
|
||||
8: [u'8月'],
|
||||
9: [u'9月'],
|
||||
10: [u'10月'],
|
||||
11: [u'11月'],
|
||||
12: [u'12月'],
|
||||
1: ['1月'],
|
||||
2: ['2月'],
|
||||
3: ['3月'],
|
||||
4: ['4月'],
|
||||
5: ['5月'],
|
||||
6: ['6月'],
|
||||
7: ['7月'],
|
||||
8: ['8月'],
|
||||
9: ['9月'],
|
||||
10: ['10月'],
|
||||
11: ['11月'],
|
||||
12: ['12月'],
|
||||
},
|
||||
'nl': {
|
||||
1: ['januari'], 2: ['februari'], 3: ['maart'], 5: ['mei'], 6: ['juni'], 7: ['juli'], 8: ['augustus'], 10: ['oktober'],
|
||||
@ -294,10 +294,10 @@ class Worker(Thread): # Get details {{{
|
||||
'ita': ('Italian', 'Italiano'),
|
||||
'deu': ('German', 'Deutsch'),
|
||||
'spa': ('Spanish', 'Espa\xf1ol', 'Espaniol'),
|
||||
'jpn': ('Japanese', u'日本語'),
|
||||
'jpn': ('Japanese', '日本語'),
|
||||
'por': ('Portuguese', 'Português'),
|
||||
'nld': ('Dutch', 'Nederlands',),
|
||||
'chs': ('Chinese', u'中文', u'简体中文'),
|
||||
'chs': ('Chinese', '中文', '简体中文'),
|
||||
}
|
||||
self.lang_map = {}
|
||||
for code, names in lm.items():
|
||||
@ -1134,9 +1134,9 @@ class Amazon(Source):
|
||||
|
||||
# magic parameter to enable Japanese Shift_JIS encoding.
|
||||
if domain == 'jp':
|
||||
q['__mk_ja_JP'] = u'カタカナ'
|
||||
q['__mk_ja_JP'] = 'カタカナ'
|
||||
if domain == 'nl':
|
||||
q['__mk_nl_NL'] = u'ÅMÅŽÕÑ'
|
||||
q['__mk_nl_NL'] = 'ÅMÅŽÕÑ'
|
||||
if 'field-keywords' not in q:
|
||||
q['field-keywords'] = ''
|
||||
for f in 'field-isbn field-title field-author'.split():
|
||||
@ -1604,18 +1604,18 @@ def manual_tests(domain, **kw): # {{{
|
||||
all_tests['jp'] = [ # {{{
|
||||
( # Adult filtering test
|
||||
{'identifiers': {'isbn': '4799500066'}},
|
||||
[title_test(u'Bitch Trap'), ]
|
||||
[title_test('Bitch Trap'), ]
|
||||
),
|
||||
|
||||
( # isbn -> title, authors
|
||||
{'identifiers': {'isbn': '9784101302720'}},
|
||||
[title_test(u'精霊の守り人',
|
||||
exact=True), authors_test([u'上橋 菜穂子'])
|
||||
[title_test('精霊の守り人',
|
||||
exact=True), authors_test(['上橋 菜穂子'])
|
||||
]
|
||||
),
|
||||
( # title, authors -> isbn (will use Shift_JIS encoding in query.)
|
||||
{'title': u'考えない練習',
|
||||
'authors': [u'小池 龍之介']},
|
||||
{'title': '考えない練習',
|
||||
'authors': ['小池 龍之介']},
|
||||
[isbn_test('9784093881067'), ]
|
||||
),
|
||||
] # }}}
|
||||
|
@ -220,12 +220,10 @@ class GoogleBooks(Source):
|
||||
if author_tokens:
|
||||
q += ('+' if q else '') + build_term('author', author_tokens)
|
||||
|
||||
if isinstance(q, type(u'')):
|
||||
q = q.encode('utf-8')
|
||||
if not q:
|
||||
return None
|
||||
return BASE_URL + urlencode({
|
||||
'q': q,
|
||||
'q': q.encode('utf-8'),
|
||||
'max-results': 20,
|
||||
'start-index': 1,
|
||||
'min-viewability': 'none',
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python2
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
@ -69,7 +70,7 @@ class TOC(list):
|
||||
def __str__(self):
|
||||
lines = ['TOC: %s#%s %s'%(self.href, self.fragment, self.text)]
|
||||
for child in self:
|
||||
c = str(child).splitlines()
|
||||
c = unicode_type(child).splitlines()
|
||||
for l in c:
|
||||
lines.append('\t'+l)
|
||||
return '\n'.join(lines)
|
||||
@ -245,8 +246,8 @@ class TOC(list):
|
||||
def render(self, stream, uid):
|
||||
root = E.ncx(
|
||||
E.head(
|
||||
E.meta(name='dtb:uid', content=str(uid)),
|
||||
E.meta(name='dtb:depth', content=str(self.depth())),
|
||||
E.meta(name='dtb:uid', content=unicode_type(uid)),
|
||||
E.meta(name='dtb:depth', content=unicode_type(self.depth())),
|
||||
E.meta(name='dtb:generator', content='%s (%s)'%(__appname__,
|
||||
__version__)),
|
||||
E.meta(name='dtb:totalPageCount', content='0'),
|
||||
@ -271,7 +272,7 @@ class TOC(list):
|
||||
E.content(src=unicode_type(np.href)+(('#' + unicode_type(np.fragment))
|
||||
if np.fragment else '')),
|
||||
id=item_id,
|
||||
playOrder=str(np.play_order)
|
||||
playOrder=unicode_type(np.play_order)
|
||||
)
|
||||
au = getattr(np, 'author', None)
|
||||
if au:
|
||||
|
@ -1,4 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python2
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from __future__ import print_function
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
@ -1,4 +1,5 @@
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user