mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-08-11 09:13:57 -04:00
pep8
This commit is contained in:
parent
65877258a0
commit
af642adeb9
@ -27,6 +27,7 @@ class FetchError(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
class closing(object):
|
class closing(object):
|
||||||
|
|
||||||
'Context to automatically close something at the end of a block.'
|
'Context to automatically close something at the end of a block.'
|
||||||
|
|
||||||
def __init__(self, thing):
|
def __init__(self, thing):
|
||||||
@ -203,7 +204,6 @@ class RecursiveFetcher(object):
|
|||||||
tag.extract()
|
tag.extract()
|
||||||
return self.preprocess_html_ext(soup)
|
return self.preprocess_html_ext(soup)
|
||||||
|
|
||||||
|
|
||||||
def fetch_url(self, url):
|
def fetch_url(self, url):
|
||||||
data = None
|
data = None
|
||||||
self.log.debug('Fetching', url)
|
self.log.debug('Fetching', url)
|
||||||
@ -246,8 +246,8 @@ class RecursiveFetcher(object):
|
|||||||
data = response(f.read()+f.read())
|
data = response(f.read()+f.read())
|
||||||
data.newurl = f.geturl()
|
data.newurl = f.geturl()
|
||||||
except urllib2.URLError as err:
|
except urllib2.URLError as err:
|
||||||
if hasattr(err, 'code') and responses.has_key(err.code):
|
if hasattr(err, 'code') and err.code in responses:
|
||||||
raise FetchError, responses[err.code]
|
raise FetchError(responses[err.code])
|
||||||
if getattr(err, 'reason', [0])[0] == 104 or \
|
if getattr(err, 'reason', [0])[0] == 104 or \
|
||||||
getattr(getattr(err, 'args', [None])[0], 'errno', None) in (-2,
|
getattr(getattr(err, 'args', [None])[0], 'errno', None) in (-2,
|
||||||
-3): # Connection reset by peer or Name or service not known
|
-3): # Connection reset by peer or Name or service not known
|
||||||
@ -262,7 +262,6 @@ class RecursiveFetcher(object):
|
|||||||
self.last_fetch_at = time.time()
|
self.last_fetch_at = time.time()
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def start_fetch(self, url):
|
def start_fetch(self, url):
|
||||||
soup = BeautifulSoup(u'<a href="'+url+'" />')
|
soup = BeautifulSoup(u'<a href="'+url+'" />')
|
||||||
self.log.debug('Downloading')
|
self.log.debug('Downloading')
|
||||||
@ -593,9 +592,16 @@ def option_parser(usage=_('%prog URL\n\nWhere URL is for example http://google.c
|
|||||||
parser.add_option('--encoding', default=None,
|
parser.add_option('--encoding', default=None,
|
||||||
help=_('The character encoding for the websites you are trying to download. The default is to try and guess the encoding.'))
|
help=_('The character encoding for the websites you are trying to download. The default is to try and guess the encoding.'))
|
||||||
parser.add_option('--match-regexp', default=[], action='append', dest='match_regexps',
|
parser.add_option('--match-regexp', default=[], action='append', dest='match_regexps',
|
||||||
help=_('Only links that match this regular expression will be followed. This option can be specified multiple times, in which case as long as a link matches any one regexp, it will be followed. By default all links are followed.'))
|
help=_('Only links that match this regular expression will be followed. '
|
||||||
|
'This option can be specified multiple times, in which case as long '
|
||||||
|
'as a link matches any one regexp, it will be followed. By default all '
|
||||||
|
'links are followed.'))
|
||||||
parser.add_option('--filter-regexp', default=[], action='append', dest='filter_regexps',
|
parser.add_option('--filter-regexp', default=[], action='append', dest='filter_regexps',
|
||||||
help=_('Any link that matches this regular expression will be ignored. This option can be specified multiple times, in which case as long as any regexp matches a link, it will be ignored. By default, no links are ignored. If both filter regexp and match regexp are specified, then filter regexp is applied first.'))
|
help=_('Any link that matches this regular expression will be ignored.'
|
||||||
|
' This option can be specified multiple times, in which case as'
|
||||||
|
' long as any regexp matches a link, it will be ignored. By'
|
||||||
|
' default, no links are ignored. If both filter regexp and match'
|
||||||
|
' regexp are specified, then filter regexp is applied first.'))
|
||||||
parser.add_option('--dont-download-stylesheets', action='store_true', default=False,
|
parser.add_option('--dont-download-stylesheets', action='store_true', default=False,
|
||||||
help=_('Do not download CSS stylesheets.'), dest='no_stylesheets')
|
help=_('Do not download CSS stylesheets.'), dest='no_stylesheets')
|
||||||
parser.add_option('--verbose', help=_('Show detailed output information. Useful for debugging'),
|
parser.add_option('--verbose', help=_('Show detailed output information. Useful for debugging'),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user