Fix #2981 ([Errno 11] Resource temporarily unavailable error after conversion). Miscellaneous minor fixes.

This commit is contained in:
Kovid Goyal 2009-08-10 18:33:21 -06:00
commit 09e7550353
7 changed files with 35 additions and 48 deletions

View File

@ -29,7 +29,7 @@ class CLI(object):
def get_file(self, path, outfile, end_session=True):
path = self.munge_path(path)
with open(path, 'rb') as src:
shutil.copyfileobj(src, outfile, 10*1024*1024)
shutil.copyfileobj(src, outfile)
def put_file(self, infile, path, replace_file=False, end_session=True):
path = self.munge_path(path)
@ -44,10 +44,8 @@ class CLI(object):
d = os.path.dirname(path)
if not os.path.exists(d):
os.makedirs(d)
dest = open(path, 'wb')
shutil.copyfileobj(infile, dest, 10*1024*1024)
dest.flush()
dest.close()
with open(path, 'wb') as dest:
shutil.copyfileobj(infile, dest)
if close:
infile.close()

View File

@ -38,6 +38,9 @@ class LRFOptions(object):
self.use_metadata_cover = True
self.output = output
self.ignore_tables = opts.linearize_tables
if opts.disable_font_rescaling:
self.base_font_size = 0
else:
self.base_font_size = opts.base_font_size
self.blank_after_para = opts.insert_blank_line
self.use_spine = True

View File

@ -4,12 +4,13 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
Interface to isbndb.com. My key HLLXQX2A.
'''
import sys, re, socket
from urllib import urlopen, quote
import sys, re
from urllib import quote
from calibre.utils.config import OptionParser
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.BeautifulSoup import BeautifulStoneSoup
from calibre import browser
BASE_URL = 'http://isbndb.com/api/books.xml?access_key=%(key)s&page_number=1&results=subjects,authors,texts&'
@ -20,12 +21,10 @@ def fetch_metadata(url, max=100, timeout=5.):
books = []
page_number = 1
total_results = sys.maxint
timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
br = browser()
while len(books) < total_results and max > 0:
try:
raw = urlopen(url).read()
raw = br.open(url, timeout=timeout).read()
except Exception, err:
raise ISBNDBError('Could not fetch ISBNDB metadata. Error: '+str(err))
soup = BeautifulStoneSoup(raw,
@ -40,8 +39,6 @@ def fetch_metadata(url, max=100, timeout=5.):
books.extend(book_list.findAll('bookdata'))
max -= 1
return books
finally:
socket.setdefaulttimeout(timeout)
class ISBNDBMetadata(MetaInformation):

View File

@ -38,17 +38,16 @@ def cover_from_isbn(isbn, timeout=5., username=None, password=None):
global browser
if browser is None:
browser = _browser()
_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
src = None
try:
return browser.open(OPENLIBRARY%isbn).read(), 'jpg'
return browser.open(OPENLIBRARY%isbn, timeout=timeout).read(), 'jpg'
except:
pass # Cover not found
if username and password:
login(username, password, force=False)
try:
src = browser.open('http://www.librarything.com/isbn/'+isbn).read().decode('utf-8', 'replace')
src = browser.open('http://www.librarything.com/isbn/'+isbn,
timeout=timeout).read().decode('utf-8', 'replace')
except Exception, err:
if isinstance(getattr(err, 'args', [None])[0], socket.timeout):
err = LibraryThingError(_('LibraryThing.com timed out. Try again later.'))
@ -66,8 +65,6 @@ def cover_from_isbn(isbn, timeout=5., username=None, password=None):
url = re.sub(r'_S[XY]\d+', '', url['src'])
cover_data = browser.open(url).read()
return cover_data, url.rpartition('.')[-1]
finally:
socket.setdefaulttimeout(_timeout)
def option_parser():
parser = OptionParser(usage=\

View File

@ -84,5 +84,5 @@ class MainWindow(QMainWindow):
msg = '<b>%s</b>:'%type.__name__ + unicode(str(value), 'utf8', 'replace')
error_dialog(self, _('ERROR: Unhandled exception'), msg, det_msg=fe,
show=True)
except:
except BaseException:
pass

View File

@ -192,8 +192,6 @@ class Server(Thread):
if len(self.pool) + len(self.workers) < self.pool_size:
try:
self.pool.append(self.launch_worker())
except CriticalError:
raise
except Exception:
pass

View File

@ -105,9 +105,8 @@ class RecursiveFetcher(object):
if not os.path.exists(self.base_dir):
os.makedirs(self.base_dir)
self.log = log
self.default_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(options.timeout)
self.verbose = options.verbose
self.timeout = options.timeout
self.encoding = options.encoding
self.browser = options.browser if hasattr(options, 'browser') else browser()
self.max_recursions = options.max_recursions
@ -194,7 +193,7 @@ class RecursiveFetcher(object):
url = urlparse.urlunparse(purl)
with self.browser_lock:
try:
with closing(self.browser.open(url)) as f:
with closing(self.browser.open(url, timeout=self.timeout)) as f:
data = response(f.read()+f.read())
data.newurl = f.geturl()
except urllib2.URLError, err:
@ -204,7 +203,7 @@ class RecursiveFetcher(object):
getattr(getattr(err, 'args', [None])[0], 'errno', None) == -2: # Connection reset by peer or Name or service not know
self.log.debug('Temporary error, retrying in 1 second')
time.sleep(1)
with closing(self.browser.open(url)) as f:
with closing(self.browser.open(url, timeout=self.timeout)) as f:
data = response(f.read()+f.read())
data.newurl = f.geturl()
else:
@ -450,11 +449,6 @@ class RecursiveFetcher(object):
print
return res
def __del__(self):
dt = getattr(self, 'default_timeout', None)
if dt is not None:
socket.setdefaulttimeout(dt)
def option_parser(usage=_('%prog URL\n\nWhere URL is for example http://google.com')):
parser = OptionParser(usage=usage)
parser.add_option('-d', '--base-dir',