mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
py3: More unicode porting
This commit is contained in:
parent
fab8c8f2d4
commit
c2e36408d3
@ -5,7 +5,7 @@ __copyright__ = '2008, Kovid Goyal <kovid@kovidgoyal.net>'
|
|||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import sys, os, re, time, random, warnings
|
import sys, os, re, time, random, warnings
|
||||||
from polyglot.builtins import codepoint_to_chr, unicode_type, range, hasenv
|
from polyglot.builtins import codepoint_to_chr, unicode_type, range, hasenv, native_string_type
|
||||||
from math import floor
|
from math import floor
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
@ -183,7 +183,7 @@ def prints(*args, **kwargs):
|
|||||||
arg = repr(arg)
|
arg = repr(arg)
|
||||||
if not isinstance(arg, bytes):
|
if not isinstance(arg, bytes):
|
||||||
try:
|
try:
|
||||||
arg = str(arg)
|
arg = native_string_type(arg)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
arg = unicode_type(arg)
|
arg = unicode_type(arg)
|
||||||
if isinstance(arg, unicode_type):
|
if isinstance(arg, unicode_type):
|
||||||
@ -334,7 +334,7 @@ def get_parsed_proxy(typ='http', debug=True):
|
|||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
else:
|
else:
|
||||||
if debug:
|
if debug:
|
||||||
prints('Using http proxy', str(ans))
|
prints('Using http proxy', unicode_type(ans))
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
|
||||||
@ -623,10 +623,10 @@ def as_unicode(obj, enc=preferred_encoding):
|
|||||||
if not isbytestring(obj):
|
if not isbytestring(obj):
|
||||||
try:
|
try:
|
||||||
obj = unicode_type(obj)
|
obj = unicode_type(obj)
|
||||||
except:
|
except Exception:
|
||||||
try:
|
try:
|
||||||
obj = str(obj)
|
obj = native_string_type(obj)
|
||||||
except:
|
except Exception:
|
||||||
obj = repr(obj)
|
obj = repr(obj)
|
||||||
return force_unicode(obj, enc=enc)
|
return force_unicode(obj, enc=enc)
|
||||||
|
|
||||||
@ -645,7 +645,7 @@ def human_readable(size, sep=' '):
|
|||||||
if size < (1 << ((i + 1) * 10)):
|
if size < (1 << ((i + 1) * 10)):
|
||||||
divisor, suffix = (1 << (i * 10)), candidate
|
divisor, suffix = (1 << (i * 10)), candidate
|
||||||
break
|
break
|
||||||
size = str(float(size)/divisor)
|
size = unicode_type(float(size)/divisor)
|
||||||
if size.find(".") > -1:
|
if size.find(".") > -1:
|
||||||
size = size[:size.find(".")+2]
|
size = size[:size.find(".")+2]
|
||||||
if size.endswith('.0'):
|
if size.endswith('.0'):
|
||||||
|
@ -302,7 +302,7 @@ class Connection(apsw.Connection): # {{{
|
|||||||
self.createscalarfunction('title_sort', title_sort, 1)
|
self.createscalarfunction('title_sort', title_sort, 1)
|
||||||
self.createscalarfunction('author_to_author_sort',
|
self.createscalarfunction('author_to_author_sort',
|
||||||
_author_to_author_sort, 1)
|
_author_to_author_sort, 1)
|
||||||
self.createscalarfunction('uuid4', lambda: str(uuid.uuid4()),
|
self.createscalarfunction('uuid4', lambda: unicode_type(uuid.uuid4()),
|
||||||
0)
|
0)
|
||||||
|
|
||||||
# Dummy functions for dynamically created filters
|
# Dummy functions for dynamically created filters
|
||||||
@ -517,11 +517,11 @@ class DB(object):
|
|||||||
from calibre.library.coloring import migrate_old_rule
|
from calibre.library.coloring import migrate_old_rule
|
||||||
old_rules = []
|
old_rules = []
|
||||||
for i in range(1, 6):
|
for i in range(1, 6):
|
||||||
col = self.prefs.get('column_color_name_'+str(i), None)
|
col = self.prefs.get('column_color_name_%d' % i, None)
|
||||||
templ = self.prefs.get('column_color_template_'+str(i), None)
|
templ = self.prefs.get('column_color_template_%d' % i, None)
|
||||||
if col and templ:
|
if col and templ:
|
||||||
try:
|
try:
|
||||||
del self.prefs['column_color_name_'+str(i)]
|
del self.prefs['column_color_name_%d' % i]
|
||||||
rules = migrate_old_rule(self.field_metadata, templ)
|
rules = migrate_old_rule(self.field_metadata, templ)
|
||||||
for templ in rules:
|
for templ in rules:
|
||||||
old_rules.append((col, templ))
|
old_rules.append((col, templ))
|
||||||
@ -827,7 +827,7 @@ class DB(object):
|
|||||||
# account for the series index column. Field_metadata knows that
|
# account for the series index column. Field_metadata knows that
|
||||||
# the series index is one larger than the series. If you change
|
# the series index is one larger than the series. If you change
|
||||||
# it here, be sure to change it there as well.
|
# it here, be sure to change it there as well.
|
||||||
self.FIELD_MAP[str(data['num'])+'_index'] = base = base+1
|
self.FIELD_MAP[unicode_type(data['num'])+'_index'] = base = base+1
|
||||||
self.field_metadata.set_field_record_index(label_+'_index', base,
|
self.field_metadata.set_field_record_index(label_+'_index', base,
|
||||||
prefer_custom=True)
|
prefer_custom=True)
|
||||||
|
|
||||||
@ -1256,7 +1256,7 @@ class DB(object):
|
|||||||
if getattr(self, '_library_id_', None) is None:
|
if getattr(self, '_library_id_', None) is None:
|
||||||
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
|
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
|
||||||
if ans is None:
|
if ans is None:
|
||||||
ans = str(uuid.uuid4())
|
ans = unicode_type(uuid.uuid4())
|
||||||
self.library_id = ans
|
self.library_id = ans
|
||||||
else:
|
else:
|
||||||
self._library_id_ = ans
|
self._library_id_ = ans
|
||||||
|
@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
|
|||||||
|
|
||||||
import copy
|
import copy
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from polyglot.builtins import iteritems, unicode_type, map
|
from polyglot.builtins import iteritems, unicode_type, map, native_string_type
|
||||||
|
|
||||||
from calibre.constants import ispy3
|
from calibre.constants import ispy3
|
||||||
from calibre.ebooks.metadata import author_to_author_sort
|
from calibre.ebooks.metadata import author_to_author_sort
|
||||||
@ -59,7 +59,7 @@ class Tag(object):
|
|||||||
return self.string_representation
|
return self.string_representation
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return str(self)
|
return native_string_type(self)
|
||||||
|
|
||||||
__calibre_serializable__ = True
|
__calibre_serializable__ = True
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ from calibre import prints
|
|||||||
from calibre.db.cli.utils import str_width
|
from calibre.db.cli.utils import str_width
|
||||||
from calibre.ebooks.metadata import authors_to_string
|
from calibre.ebooks.metadata import authors_to_string
|
||||||
from calibre.utils.date import isoformat
|
from calibre.utils.date import isoformat
|
||||||
from polyglot.builtins import iteritems
|
from polyglot.builtins import iteritems, unicode_type
|
||||||
|
|
||||||
readonly = True
|
readonly = True
|
||||||
version = 0 # change this if you change signature of implementation()
|
version = 0 # change this if you change signature of implementation()
|
||||||
@ -309,7 +309,7 @@ List the books available in the calibre database.
|
|||||||
def main(opts, args, dbctx):
|
def main(opts, args, dbctx):
|
||||||
afields = set(FIELDS) | {'id'}
|
afields = set(FIELDS) | {'id'}
|
||||||
if opts.fields.strip():
|
if opts.fields.strip():
|
||||||
fields = [str(f.strip().lower()) for f in opts.fields.split(',')]
|
fields = [unicode_type(f.strip().lower()) for f in opts.fields.split(',')]
|
||||||
else:
|
else:
|
||||||
fields = []
|
fields = []
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ from copy import deepcopy
|
|||||||
from calibre.ebooks.metadata.book.base import Metadata, SIMPLE_GET, TOP_LEVEL_IDENTIFIERS, NULL_VALUES, ALL_METADATA_FIELDS
|
from calibre.ebooks.metadata.book.base import Metadata, SIMPLE_GET, TOP_LEVEL_IDENTIFIERS, NULL_VALUES, ALL_METADATA_FIELDS
|
||||||
from calibre.ebooks.metadata.book.formatter import SafeFormat
|
from calibre.ebooks.metadata.book.formatter import SafeFormat
|
||||||
from calibre.utils.date import utcnow
|
from calibre.utils.date import utcnow
|
||||||
from polyglot.builtins import unicode_type
|
from polyglot.builtins import unicode_type, native_string_type
|
||||||
|
|
||||||
# Lazy format metadata retrieval {{{
|
# Lazy format metadata retrieval {{{
|
||||||
'''
|
'''
|
||||||
@ -39,7 +39,7 @@ class MutableBase(object):
|
|||||||
|
|
||||||
@resolved
|
@resolved
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return str(self._values)
|
return native_string_type(self._values)
|
||||||
|
|
||||||
@resolved
|
@resolved
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python2
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
@ -18,10 +20,10 @@ from calibre.utils.date import utcfromtimestamp
|
|||||||
from calibre import isbytestring, force_unicode
|
from calibre import isbytestring, force_unicode
|
||||||
from polyglot.builtins import iteritems, filter
|
from polyglot.builtins import iteritems, filter
|
||||||
|
|
||||||
NON_EBOOK_EXTENSIONS = frozenset([
|
NON_EBOOK_EXTENSIONS = frozenset((
|
||||||
'jpg', 'jpeg', 'gif', 'png', 'bmp',
|
'jpg', 'jpeg', 'gif', 'png', 'bmp',
|
||||||
'opf', 'swp', 'swo'
|
'opf', 'swp', 'swo'
|
||||||
])
|
))
|
||||||
|
|
||||||
|
|
||||||
class Restorer(Cache):
|
class Restorer(Cache):
|
||||||
@ -124,7 +126,7 @@ class Restore(Thread):
|
|||||||
self.create_cc_metadata()
|
self.create_cc_metadata()
|
||||||
self.restore_books()
|
self.restore_books()
|
||||||
if self.successes == 0 and len(self.dirs) > 0:
|
if self.successes == 0 and len(self.dirs) > 0:
|
||||||
raise Exception(('Something bad happened'))
|
raise Exception('Something bad happened')
|
||||||
self.replace_db()
|
self.replace_db()
|
||||||
except:
|
except:
|
||||||
self.tb = traceback.format_exc()
|
self.tb = traceback.format_exc()
|
||||||
|
@ -599,7 +599,7 @@ class SchemaUpgrade(object):
|
|||||||
existing = frozenset(map(int, custom_recipes))
|
existing = frozenset(map(int, custom_recipes))
|
||||||
if id_ in existing:
|
if id_ in existing:
|
||||||
id_ = max(existing) + 1000
|
id_ = max(existing) + 1000
|
||||||
id_ = str(id_)
|
id_ = unicode_type(id_)
|
||||||
fname = custom_recipe_filename(id_, title)
|
fname = custom_recipe_filename(id_, title)
|
||||||
custom_recipes[id_] = (title, fname)
|
custom_recipes[id_] = (title, fname)
|
||||||
if isinstance(script, unicode_type):
|
if isinstance(script, unicode_type):
|
||||||
|
@ -10,7 +10,7 @@ __docformat__ = 'restructuredtext en'
|
|||||||
import unittest, os, shutil, tempfile, atexit, gc, time
|
import unittest, os, shutil, tempfile, atexit, gc, time
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from polyglot.builtins import map
|
from polyglot.builtins import map, unicode_type
|
||||||
|
|
||||||
rmtree = partial(shutil.rmtree, ignore_errors=True)
|
rmtree = partial(shutil.rmtree, ignore_errors=True)
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ class BaseTest(unittest.TestCase):
|
|||||||
atexit.register(rmtree, self.clone_dir)
|
atexit.register(rmtree, self.clone_dir)
|
||||||
self.clone_count = 0
|
self.clone_count = 0
|
||||||
self.clone_count += 1
|
self.clone_count += 1
|
||||||
dest = os.path.join(self.clone_dir, str(self.clone_count))
|
dest = os.path.join(self.clone_dir, unicode_type(self.clone_count))
|
||||||
shutil.copytree(library_path, dest)
|
shutil.copytree(library_path, dest)
|
||||||
return dest
|
return dest
|
||||||
|
|
||||||
@ -114,5 +114,3 @@ class BaseTest(unittest.TestCase):
|
|||||||
attr1, attr2 = mi1.get_extra(attr), mi2.get_extra(attr)
|
attr1, attr2 = mi1.get_extra(attr), mi2.get_extra(attr)
|
||||||
self.assertEqual(attr1, attr2,
|
self.assertEqual(attr1, attr2,
|
||||||
'%s {#extra} not the same: %r != %r'%(attr, attr1, attr2))
|
'%s {#extra} not the same: %r != %r'%(attr, attr1, attr2))
|
||||||
|
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ from io import BytesIO
|
|||||||
from calibre.ebooks.metadata import author_to_author_sort, title_sort
|
from calibre.ebooks.metadata import author_to_author_sort, title_sort
|
||||||
from calibre.utils.date import UNDEFINED_DATE
|
from calibre.utils.date import UNDEFINED_DATE
|
||||||
from calibre.db.tests.base import BaseTest, IMG
|
from calibre.db.tests.base import BaseTest, IMG
|
||||||
from polyglot.builtins import iteritems, itervalues
|
from polyglot.builtins import iteritems, itervalues, unicode_type
|
||||||
|
|
||||||
|
|
||||||
class WritingTest(BaseTest):
|
class WritingTest(BaseTest):
|
||||||
@ -655,11 +655,11 @@ class WritingTest(BaseTest):
|
|||||||
def test_set_author_data(self): # {{{
|
def test_set_author_data(self): # {{{
|
||||||
cache = self.init_cache()
|
cache = self.init_cache()
|
||||||
adata = cache.author_data()
|
adata = cache.author_data()
|
||||||
ldata = {aid:str(aid) for aid in adata}
|
ldata = {aid:unicode_type(aid) for aid in adata}
|
||||||
self.assertEqual({1,2,3}, cache.set_link_for_authors(ldata))
|
self.assertEqual({1,2,3}, cache.set_link_for_authors(ldata))
|
||||||
for c in (cache, self.init_cache()):
|
for c in (cache, self.init_cache()):
|
||||||
self.assertEqual(ldata, {aid:d['link'] for aid, d in iteritems(c.author_data())})
|
self.assertEqual(ldata, {aid:d['link'] for aid, d in iteritems(c.author_data())})
|
||||||
self.assertEqual({3}, cache.set_link_for_authors({aid:'xxx' if aid == max(adata) else str(aid) for aid in adata}),
|
self.assertEqual({3}, cache.set_link_for_authors({aid:'xxx' if aid == max(adata) else unicode_type(aid) for aid in adata}),
|
||||||
'Setting the author link to the same value as before, incorrectly marked some books as dirty')
|
'Setting the author link to the same value as before, incorrectly marked some books as dirty')
|
||||||
sdata = {aid:'%s, changed' % aid for aid in adata}
|
sdata = {aid:'%s, changed' % aid for aid in adata}
|
||||||
self.assertEqual({1,2,3}, cache.set_sort_for_authors(sdata))
|
self.assertEqual({1,2,3}, cache.set_sort_for_authors(sdata))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python2
|
||||||
from __future__ import print_function
|
from __future__ import print_function, unicode_literals
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python2
|
||||||
from __future__ import print_function
|
from __future__ import print_function, unicode_literals
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
|
|
||||||
@ -13,6 +15,8 @@ def get_download_filename_from_response(response):
|
|||||||
purl = urlparse(response.geturl())
|
purl = urlparse(response.geturl())
|
||||||
last_part_name = unquote(purl.path.split('/')[-1])
|
last_part_name = unquote(purl.path.split('/')[-1])
|
||||||
disposition = response.info().get('Content-disposition', '')
|
disposition = response.info().get('Content-disposition', '')
|
||||||
|
if isinstance(disposition, bytes):
|
||||||
|
disposition = disposition.decode('utf-8', 'replace')
|
||||||
for p in disposition.split(';'):
|
for p in disposition.split(';'):
|
||||||
if 'filename' in p:
|
if 'filename' in p:
|
||||||
if '*=' in disposition:
|
if '*=' in disposition:
|
||||||
|
@ -1,2 +1,3 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python2
|
||||||
from __future__ import print_function, with_statement
|
from __future__ import print_function, with_statement, unicode_literals
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
@ -293,7 +293,7 @@ class RecursiveFetcher(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
def start_fetch(self, url):
|
def start_fetch(self, url):
|
||||||
soup = BeautifulSoup(u'<a href="'+url+'" />')
|
soup = BeautifulSoup('<a href="'+url+'" />')
|
||||||
res = self.process_links(soup, url, 0, into_dir='')
|
res = self.process_links(soup, url, 0, into_dir='')
|
||||||
self.log.debug(url, 'saved to', res)
|
self.log.debug(url, 'saved to', res)
|
||||||
return res
|
return res
|
||||||
@ -346,7 +346,7 @@ class RecursiveFetcher(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception('Could not fetch stylesheet ', iurl)
|
self.log.exception('Could not fetch stylesheet ', iurl)
|
||||||
continue
|
continue
|
||||||
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
|
stylepath = os.path.join(diskpath, 'style'+unicode_type(c)+'.css')
|
||||||
with self.stylemap_lock:
|
with self.stylemap_lock:
|
||||||
self.stylemap[iurl] = stylepath
|
self.stylemap[iurl] = stylepath
|
||||||
with open(stylepath, 'wb') as x:
|
with open(stylepath, 'wb') as x:
|
||||||
@ -354,7 +354,7 @@ class RecursiveFetcher(object):
|
|||||||
tag['href'] = stylepath
|
tag['href'] = stylepath
|
||||||
else:
|
else:
|
||||||
for ns in tag.findAll(text=True):
|
for ns in tag.findAll(text=True):
|
||||||
src = str(ns)
|
src = unicode_type(ns)
|
||||||
m = self.__class__.CSS_IMPORT_PATTERN.search(src)
|
m = self.__class__.CSS_IMPORT_PATTERN.search(src)
|
||||||
if m:
|
if m:
|
||||||
iurl = m.group(1)
|
iurl = m.group(1)
|
||||||
@ -370,7 +370,7 @@ class RecursiveFetcher(object):
|
|||||||
self.log.exception('Could not fetch stylesheet ', iurl)
|
self.log.exception('Could not fetch stylesheet ', iurl)
|
||||||
continue
|
continue
|
||||||
c += 1
|
c += 1
|
||||||
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
|
stylepath = os.path.join(diskpath, 'style'+unicode_type(c)+'.css')
|
||||||
with self.stylemap_lock:
|
with self.stylemap_lock:
|
||||||
self.stylemap[iurl] = stylepath
|
self.stylemap[iurl] = stylepath
|
||||||
with open(stylepath, 'wb') as x:
|
with open(stylepath, 'wb') as x:
|
||||||
@ -404,14 +404,14 @@ class RecursiveFetcher(object):
|
|||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
data = self.fetch_url(iurl)
|
data = self.fetch_url(iurl)
|
||||||
if data == 'GIF89a\x01':
|
if data == b'GIF89a\x01':
|
||||||
# Skip empty GIF files as PIL errors on them anyway
|
# Skip empty GIF files as PIL errors on them anyway
|
||||||
continue
|
continue
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception('Could not fetch image ', iurl)
|
self.log.exception('Could not fetch image ', iurl)
|
||||||
continue
|
continue
|
||||||
c += 1
|
c += 1
|
||||||
fname = ascii_filename('img'+str(c))
|
fname = ascii_filename('img'+unicode_type(c))
|
||||||
data = self.preprocess_image_ext(data, iurl) if self.preprocess_image_ext is not None else data
|
data = self.preprocess_image_ext(data, iurl) if self.preprocess_image_ext is not None else data
|
||||||
if data is None:
|
if data is None:
|
||||||
continue
|
continue
|
||||||
@ -507,7 +507,7 @@ class RecursiveFetcher(object):
|
|||||||
continue
|
continue
|
||||||
if self.files > self.max_files:
|
if self.files > self.max_files:
|
||||||
return res
|
return res
|
||||||
linkdir = 'link'+str(c) if into_dir else ''
|
linkdir = 'link'+unicode_type(c) if into_dir else ''
|
||||||
linkdiskpath = os.path.join(diskpath, linkdir)
|
linkdiskpath = os.path.join(diskpath, linkdir)
|
||||||
if not os.path.exists(linkdiskpath):
|
if not os.path.exists(linkdiskpath):
|
||||||
os.mkdir(linkdiskpath)
|
os.mkdir(linkdiskpath)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user