and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
@@ -804,7 +851,8 @@ class _FeedParserMixin:
for piece in pieces[:-1]:
if piece.startswith(''):
depth -= 1
- if depth == 0: break
+ if depth == 0:
+ break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
@@ -812,13 +860,14 @@ class _FeedParserMixin:
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
- if not isinstance(v, basestring):
+ if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
- output = ''.join(pieces)
+ output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
- if not expectingText: return output
+ if not expectingText:
+ return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
@@ -841,8 +890,11 @@ class _FeedParserMixin:
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
- if self.lookslikehtml(output):
- self.contentparams['type']='text/html'
+ # some feed formats require consumers to guess
+ # whether the content is html or plain text
+ if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
+ if self.lookslikehtml(output):
+ self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
@@ -854,16 +906,16 @@ class _FeedParserMixin:
except KeyError:
pass
- is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
+ is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
- output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
+ output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
- if is_htmlish and element in ['content', 'description', 'summary']:
+ if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
@@ -879,31 +931,28 @@ class _FeedParserMixin:
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
- output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
+ output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
- if self.encoding and type(output) != type(u''):
- try:
- output = unicode(output, self.encoding)
- except:
- pass
+ if self.encoding and not isinstance(output, unicode):
+ output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
- if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and type(output) == type(u''):
+ if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
- output = unicode(output.encode('iso-8859-1'), 'utf-8')
- except:
+ output = output.encode('iso-8859-1').decode('utf-8')
+ except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
- if type(output) == type(u''):
- output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
+ if isinstance(output, unicode):
+ output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
- if element == 'title' and self.hasTitle:
+ if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
@@ -925,7 +974,10 @@ class _FeedParserMixin:
else:
if element == 'description':
element = 'summary'
- self.entries[-1][element] = output
+ old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
+ if old_value_depth is None or self.depth <= old_value_depth:
+ self.property_depth_map[self.entries[-1]][element] = self.depth
+ self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
@@ -948,7 +1000,8 @@ class _FeedParserMixin:
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
- if self.lang: self.lang=self.lang.replace('_','-')
+ if self.lang:
+ self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
@@ -966,21 +1019,20 @@ class _FeedParserMixin:
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
- def lookslikehtml(self, s):
- if self.version.startswith('atom'): return
- if self.contentparams.get('type','text/html') != 'text/plain': return
-
- # must have a close tag or a entity reference to qualify
- if not (re.search(r'(\w+)>',s) or re.search("?\w+;",s)): return
+ @staticmethod
+ def lookslikehtml(s):
+ # must have a close tag or an entity reference to qualify
+ if not (re.search(r'(\w+)>',s) or re.search("?\w+;",s)):
+ return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
- re.findall(r'?(\w+)',s)): return
+ re.findall(r'?(\w+)',s)):
+ return
# all entities must have been defined as valid HTML entities
- from htmlentitydefs import entitydefs
- if filter(lambda e: e not in entitydefs.keys(),
- re.findall(r'&(\w+);',s)): return
+ if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
+ return
return 1
@@ -999,11 +1051,11 @@ class _FeedParserMixin:
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
- if self.contentparams['type'].startswith('text/'):
+ if self.contentparams['type'].startswith(u'text/'):
return 0
- if self.contentparams['type'].endswith('+xml'):
+ if self.contentparams['type'].endswith(u'+xml'):
return 0
- if self.contentparams['type'].endswith('/xml'):
+ if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
@@ -1029,53 +1081,49 @@ class _FeedParserMixin:
context.setdefault(key, value)
def _start_rss(self, attrsD):
- versionmap = {'0.91': 'rss091u',
- '0.92': 'rss092',
- '0.93': 'rss093',
- '0.94': 'rss094'}
+ versionmap = {'0.91': u'rss091u',
+ '0.92': u'rss092',
+ '0.93': u'rss093',
+ '0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
- if not self.version or not self.version.startswith('rss'):
+ if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
- self.version = 'rss20'
+ self.version = u'rss20'
else:
- self.version = 'rss'
-
- def _start_dlhottitles(self, attrsD):
- self.version = 'hotrss'
+ self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
- _start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
- if attrsD.has_key('lastmod'):
+ if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
- if attrsD.has_key('href'):
+ if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
- versionmap = {'0.1': 'atom01',
- '0.2': 'atom02',
- '0.3': 'atom03'}
+ versionmap = {'0.1': u'atom01',
+ '0.2': u'atom02',
+ '0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
- self.version = 'atom'
+ self.version = u'atom'
def _end_channel(self):
self.infeed = 0
@@ -1086,7 +1134,7 @@ class _FeedParserMixin:
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
- self.hasTitle = 0
+ self.title_depth = -1
self.push('image', 0)
def _end_image(self):
@@ -1097,7 +1145,7 @@ class _FeedParserMixin:
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
- self.hasTitle = 0
+ self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
@@ -1182,7 +1230,7 @@ class _FeedParserMixin:
value = self.pop('width')
try:
value = int(value)
- except:
+ except ValueError:
value = 0
if self.inimage:
context = self._getContext()
@@ -1195,7 +1243,7 @@ class _FeedParserMixin:
value = self.pop('height')
try:
value = int(value)
- except:
+ except ValueError:
value = 0
if self.inimage:
context = self._getContext()
@@ -1232,7 +1280,7 @@ class _FeedParserMixin:
def _getContext(self):
if self.insource:
context = self.sourcedata
- elif self.inimage and self.feeddata.has_key('image'):
+ elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
@@ -1262,26 +1310,27 @@ class _FeedParserMixin:
name = detail.get('name')
email = detail.get('email')
if name and email:
- context[key] = '%s (%s)' % (name, email)
+ context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
- if not author: return
- emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
+ if not author:
+ return
+ emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
- author = author.replace(email, '')
- author = author.replace('()', '')
- author = author.replace('<>', '')
- author = author.replace('<>', '')
+ author = author.replace(email, u'')
+ author = author.replace(u'()', u'')
+ author = author.replace(u'<>', u'')
+ author = author.replace(u'<>', u'')
author = author.strip()
- if author and (author[0] == '('):
+ if author and (author[0] == u'('):
author = author[1:]
- if author and (author[-1] == ')'):
+ if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
@@ -1292,7 +1341,7 @@ class _FeedParserMixin:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
- self.pushContent('subtitle', attrsD, 'text/plain', 1)
+ self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
@@ -1302,7 +1351,7 @@ class _FeedParserMixin:
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
- self.pushContent('rights', attrsD, 'text/plain', 1)
+ self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
@@ -1316,14 +1365,13 @@ class _FeedParserMixin:
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
- self.hasTitle = 0
+ self.title_depth = -1
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
- _start_product = _start_item
def _end_item(self):
self.pop('item')
@@ -1351,18 +1399,19 @@ class _FeedParserMixin:
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
+ _start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
+ _end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
- _start_pubdate = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
@@ -1372,7 +1421,6 @@ class _FeedParserMixin:
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
- _end_pubdate = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
@@ -1395,8 +1443,9 @@ class _FeedParserMixin:
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
- attrsD['rel']='license'
- if value: attrsD['href']=value
+ attrsD['rel'] = u'license'
+ if value:
+ attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
@@ -1407,8 +1456,9 @@ class _FeedParserMixin:
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
- attrsD['rel']='license'
- if value: attrsD['href']=value
+ attrsD['rel'] = u'license'
+ if value:
+ attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
@@ -1423,13 +1473,13 @@ class _FeedParserMixin:
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
- if (not term) and (not scheme) and (not label): return
+ if (not term) and (not scheme) and (not label):
+ return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
- if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
@@ -1439,20 +1489,22 @@ class _FeedParserMixin:
_start_keywords = _start_category
def _start_media_category(self, attrsD):
- attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
+ attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
- for term in self.pop('itunes_keywords').split():
- self._addTag(term, 'http://www.itunes.com/', None)
+ for term in self.pop('itunes_keywords').split(','):
+ if term.strip():
+ self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
- self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
+ self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
- if not value: return
+ if not value:
+ return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
@@ -1468,74 +1520,75 @@ class _FeedParserMixin:
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
- attrsD.setdefault('rel', 'alternate')
- if attrsD['rel'] == 'self':
- attrsD.setdefault('type', 'application/atom+xml')
+ attrsD.setdefault('rel', u'alternate')
+ if attrsD['rel'] == u'self':
+ attrsD.setdefault('type', u'application/atom+xml')
else:
- attrsD.setdefault('type', 'text/html')
+ attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
- if attrsD.has_key('href'):
+ if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
- if attrsD.has_key('href'):
+ if 'href' in attrsD:
expectingText = 0
- if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
+ if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
- _start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
- context = self._getContext()
- _end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
+ _start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
- self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
+ self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
+ _end_id = _end_guid
def _start_title(self, attrsD):
- if self.svgOK: return self.unknown_starttag('title', attrsD.items())
- self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
+ if self.svgOK:
+ return self.unknown_starttag('title', attrsD.items())
+ self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
- if self.svgOK: return
+ if self.svgOK:
+ return
value = self.popContent('title')
- if not value: return
- context = self._getContext()
- self.hasTitle = 1
+ if not value:
+ return
+ self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
- hasTitle = self.hasTitle
+ title_depth = self.title_depth
self._end_title()
- self.hasTitle = hasTitle
+ self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
- if context.has_key('summary'):
+ if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
- self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
+ self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
- self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
+ self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
@@ -1547,7 +1600,7 @@ class _FeedParserMixin:
_end_dc_description = _end_description
def _start_info(self, attrsD):
- self.pushContent('info', attrsD, 'text/plain', 1)
+ self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
@@ -1557,7 +1610,7 @@ class _FeedParserMixin:
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
- if attrsD.has_key('href'):
+ if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
@@ -1565,7 +1618,7 @@ class _FeedParserMixin:
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
- if context.has_key('generator_detail'):
+ if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
@@ -1585,12 +1638,12 @@ class _FeedParserMixin:
def _start_summary(self, attrsD):
context = self._getContext()
- if context.has_key('summary'):
+ if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
- self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
+ self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
@@ -1604,45 +1657,42 @@ class _FeedParserMixin:
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
- attrsD['rel']='enclosure'
+ attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
- # This means that we're processing a source element from an RSS 2.0 feed
- self.sourcedata['href'] = attrsD[u'url']
+ # This means that we're processing a source element from an RSS 2.0 feed
+ self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
- self.hasTitle = 0
+ self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
- self.sourcedata['title'] = value
+ self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
- self.pushContent('content', attrsD, 'text/plain', 1)
+ self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
- def _start_prodlink(self, attrsD):
- self.pushContent('content', attrsD, 'text/html', 1)
-
def _start_body(self, attrsD):
- self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
+ self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
- self.pushContent('content', attrsD, 'text/html', 1)
+ self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
- copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
+ copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
@@ -1651,12 +1701,13 @@ class _FeedParserMixin:
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
- _end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
+ elif attrsD.get('url'):
+ self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
@@ -1685,7 +1736,7 @@ class _FeedParserMixin:
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
- if not context['media_thumbnail'][-1].has_key('url'):
+ if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
@@ -1711,7 +1762,6 @@ class _FeedParserMixin:
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
- if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
@@ -1719,24 +1769,28 @@ if _XML_AVAILABLE:
self.decls = {}
def startPrefixMapping(self, prefix, uri):
+ if not uri:
+ return
+ # Jython uses '' instead of None; standardize on None
+ prefix = prefix or None
self.trackNamespace(prefix, uri)
- if uri == 'http://www.w3.org/1999/xlink':
- self.decls['xmlns:'+prefix] = uri
+ if prefix and uri == 'http://www.w3.org/1999/xlink':
+ self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
- if lowernamespace.find('backend.userland.com/rss') <> -1:
+ if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
- namespace = 'http://backend.userland.com/rss'
+ namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
- if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
- raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
+ if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
+ raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
@@ -1756,12 +1810,11 @@ if _XML_AVAILABLE:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
- if name and value == namespace:
- localname = name + ':' + localname
- break
- if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
+ if name and value == namespace:
+ localname = name + ':' + localname
+ break
- for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
+ for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
@@ -1786,9 +1839,9 @@ if _XML_AVAILABLE:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
- if name and value == namespace:
- localname = name + ':' + localname
- break
+ if name and value == namespace:
+ localname = name + ':' + localname
+ break
localname = str(localname).lower()
self.unknown_endtag(localname)
@@ -1796,6 +1849,9 @@ if _XML_AVAILABLE:
self.bozo = 1
self.exc = exc
+ # drv_libxml2 calls warning() in some cases
+ warning = error
+
def fatalError(self, exc):
self.error(exc)
raise exc
@@ -1803,16 +1859,15 @@ if _XML_AVAILABLE:
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
- elements_no_end_tag = [
+ elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
- ]
+ ])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
- if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
@@ -1826,8 +1881,21 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
else:
return '<' + tag + '>' + tag + '>'
+ # By declaring these methods and overriding their compiled code
+ # with the code from sgmllib, the original code will execute in
+ # feedparser's scope instead of sgmllib's. This means that the
+ # `tagfind` and `charref` regular expressions will be found as
+ # they're declared above, not as they're declared in sgmllib.
+ def goahead(self, i):
+ pass
+ goahead.func_code = sgmllib.SGMLParser.goahead.func_code
+
+ def __parse_starttag(self, i):
+ pass
+ __parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
+
def parse_starttag(self,i):
- j=sgmllib.SGMLParser.parse_starttag(self, i)
+ j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
@@ -1835,7 +1903,6 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
def feed(self, data):
data = re.compile(r'', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
@@ -1843,15 +1910,16 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
bytes
if bytes is str:
raise NameError
- self.encoding = self.encoding + '_INVALID_PYTHON_3'
+ self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
- if self.encoding and type(data) == type(u''):
+ if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
- if not attrs: return attrs
+ if not attrs:
+ return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
@@ -1862,7 +1930,6 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for
, tag='pre', attrs=[('class', 'screen')]
- if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
@@ -1870,11 +1937,8 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
- if type(value) != type(u''):
- try:
- value = unicode(value, self.encoding)
- except:
- value = unicode(value, 'iso-8859-1')
+ if not isinstance(value, unicode):
+ value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
@@ -1883,64 +1947,64 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
- strattrs=strattrs.encode(self.encoding)
- except:
+ strattrs = strattrs.encode(self.encoding)
+ except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
- self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+ self.pieces.append('<%s%s />' % (tag, strattrs))
else:
- self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+ self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for
, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
- self.pieces.append("%(tag)s>" % locals())
+ self.pieces.append("%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
+ ref = ref.lower()
if ref.startswith('x'):
- value = unichr(int(ref[1:],16))
+ value = int(ref[1:], 16)
else:
- value = unichr(int(ref))
+ value = int(ref)
- if value in _cp1252.keys():
+ if value in _cp1252:
self.pieces.append('%s;' % hex(ord(_cp1252[value]))[1:])
else:
- self.pieces.append('%(ref)s;' % locals())
+ self.pieces.append('%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
- if name2codepoint.has_key(ref):
- self.pieces.append('&%(ref)s;' % locals())
+ if ref in name2codepoint or ref == 'apos':
+ self.pieces.append('&%s;' % ref)
else:
- self.pieces.append('&%(ref)s' % locals())
+ self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
- if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g.
# Reconstruct the original comment.
- self.pieces.append('' % locals())
+ self.pieces.append('' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g.
# Reconstruct original processing instruction.
- self.pieces.append('%(text)s>' % locals())
+ self.pieces.append('%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
#
# Reconstruct original DOCTYPE
- self.pieces.append('' % locals())
+ self.pieces.append('' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
@@ -1998,7 +2062,7 @@ class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
- if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
+ if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
@@ -2016,14 +2080,14 @@ class _MicroformatsParser:
NODE = 4
EMAIL = 5
- known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
- known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
+ known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
+ known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
- if type(data) == type(u''):
+ if isinstance(data, unicode):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
@@ -2031,7 +2095,7 @@ class _MicroformatsParser:
self.vcard = None
def vcardEscape(self, s):
- if type(s) in (type(''), type(u'')):
+ if isinstance(s, basestring):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
@@ -2095,12 +2159,18 @@ class _MicroformatsParser:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
- if bAllowMultiple: return []
- elif iPropertyType == self.STRING: return ''
- elif iPropertyType == self.DATE: return None
- elif iPropertyType == self.URI: return ''
- elif iPropertyType == self.NODE: return None
- else: return None
+ if bAllowMultiple:
+ return []
+ elif iPropertyType == self.STRING:
+ return ''
+ elif iPropertyType == self.DATE:
+ return None
+ elif iPropertyType == self.URI:
+ return ''
+ elif iPropertyType == self.NODE:
+ return None
+ else:
+ return None
arValues = []
for elmResult in arResults:
sValue = None
@@ -2120,9 +2190,12 @@ class _MicroformatsParser:
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
- if sNodeName == 'a': sValue = elmResult.get('href')
- elif sNodeName == 'img': sValue = elmResult.get('src')
- elif sNodeName == 'object': sValue = elmResult.get('data')
+ if sNodeName == 'a':
+ sValue = elmResult.get('href')
+ elif sNodeName == 'img':
+ sValue = elmResult.get('src')
+ elif sNodeName == 'object':
+ sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
@@ -2136,7 +2209,8 @@ class _MicroformatsParser:
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
- if not sValue: continue
+ if not sValue:
+ continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
@@ -2358,20 +2432,29 @@ class _MicroformatsParser:
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
+ # XXX - this is super ugly; properly fix this with issue 148
+ for i, s in enumerate(arLines):
+ if not isinstance(s, unicode):
+ arLines[i] = s.decode('utf-8', 'ignore')
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
- if not attrsD.has_key('href'): return 0
+ if 'href' not in attrsD:
+ return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
- path = urlparse.urlparse(attrsD['href'])[2]
- if path.find('.') == -1: return 0
+ try:
+ path = urlparse.urlparse(attrsD['href'])[2]
+ except ValueError:
+ return 0
+ if path.find('.') == -1:
+ return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
@@ -2379,13 +2462,18 @@ class _MicroformatsParser:
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
- if not href: continue
+ if not href:
+ continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
- tag = segments.pop()
+ if segments:
+ tag = segments.pop()
+ else:
+ # there are no tags
+ continue
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
@@ -2395,7 +2483,8 @@ class _MicroformatsParser:
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
- if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
+ if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
+ continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
@@ -2404,17 +2493,14 @@ class _MicroformatsParser:
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
- rels = elm.get('rel', '').split()
- xfn_rels = []
- for rel in rels:
- if rel in self.known_xfn_relationships:
- xfn_rels.append(rel)
+ rels = elm.get('rel', u'').split()
+ xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
- if not BeautifulSoup: return
- if _debug: sys.stderr.write('entering _parseMicroformats\n')
+ if not BeautifulSoup:
+ return
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
@@ -2428,7 +2514,7 @@ def _parseMicroformats(htmlSource, baseURI, encoding):
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
- relative_uris = [('a', 'href'),
+ relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
@@ -2452,26 +2538,22 @@ class _RelativeURIResolver(_BaseHTMLProcessor):
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
- ('script', 'src')]
+ ('script', 'src'),
+ ('video', 'poster')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
- return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip()))
+ return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
- if _debug:
- sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs)))
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
- if _debug:
- sys.stderr.write('entering _resolveRelativeURIs\n')
-
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
@@ -2479,21 +2561,30 @@ def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
- return _urljoin(base, rel or u'')
+ try:
+ return _urljoin(base, rel or u'')
+ except ValueError:
+ return u''
if not base:
return rel or u''
if not rel:
- scheme = urlparse.urlparse(base)[0]
+ try:
+ scheme = urlparse.urlparse(base)[0]
+ except ValueError:
+ return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
- uri = _urljoin(base, rel)
+ try:
+ uri = _urljoin(base, rel)
+ except ValueError:
+ return u''
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
- acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
+ acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
@@ -2505,9 +2596,9 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
- 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
+ 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
- acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
+ acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
@@ -2522,17 +2613,17 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
- 'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
- 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
- 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
- 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
- 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
- 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
- 'xml:lang']
+ 'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
+ 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
+ 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
+ 'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
+ 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
+ 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
+ 'width', 'wrap', 'xml:lang'])
- unacceptable_elements_with_end_tag = ['script', 'applet', 'style']
+ unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
- acceptable_css_properties = ['azimuth', 'background-color',
+ acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
@@ -2542,26 +2633,26 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
- 'white-space', 'width']
+ 'white-space', 'width'])
# survey of common keywords found in feeds
- acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
+ acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
- 'transparent', 'underline', 'white', 'yellow']
+ 'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
- mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
+ mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
- 'munderover', 'none', 'semantics']
+ 'munderover', 'none', 'semantics'])
- mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
+ mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
@@ -2569,18 +2660,18 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
- 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']
+ 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
- svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
+ svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
- 'svg', 'switch', 'text', 'title', 'tspan', 'use']
+ 'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
- svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
+ svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
@@ -2606,14 +2697,14 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
- 'y2', 'zoomAndPan']
+ 'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
- acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
+ acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
- 'stroke-opacity']
+ 'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
@@ -2680,7 +2771,8 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
- if clean_value: clean_attrs.append((key,clean_value))
+ if clean_value:
+ clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
@@ -2688,10 +2780,12 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
- if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1
+ if tag == 'math' and self.mathmlOK:
+ self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
- if tag == 'svg' and self.svgOK: self.svgOK -= 1
+ if tag == 'svg' and self.svgOK:
+ self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
@@ -2711,24 +2805,27 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
- if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
+ if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
+ return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
- if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return ''
+ if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
+ return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
- if not value: continue
- if prop.lower() in self.acceptable_css_properties:
- clean.append(prop + ': ' + value + ';')
- elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
- for keyword in value.split():
- if not keyword in self.acceptable_css_keywords and \
- not self.valid_css_values.match(keyword):
- break
- else:
- clean.append(prop + ': ' + value + ';')
- elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
- clean.append(prop + ': ' + value + ';')
+ if not value:
+ continue
+ if prop.lower() in self.acceptable_css_properties:
+ clean.append(prop + ': ' + value + ';')
+ elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
+ for keyword in value.split():
+ if not keyword in self.acceptable_css_keywords and \
+ not self.valid_css_values.match(keyword):
+ break
+ else:
+ clean.append(prop + ': ' + value + ';')
+ elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
+ clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
@@ -2770,7 +2867,7 @@ def _sanitizeHTML(htmlSource, encoding, _type):
except:
pass
if _tidy:
- utf8 = type(data) == type(u'')
+ utf8 = isinstance(data, unicode)
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
@@ -2787,39 +2884,29 @@ def _sanitizeHTML(htmlSource, encoding, _type):
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
- if ((code / 100) == 3) and (code != 304):
- return self.http_error_302(req, fp, code, msg, headers)
- infourl = urllib.addinfourl(fp, headers, req.get_full_url())
- infourl.status = code
- return infourl
+ # The default implementation just raises HTTPError.
+ # Forget that.
+ fp.status = code
+ return fp
- def http_error_302(self, req, fp, code, msg, headers):
- if headers.dict.has_key('location'):
- infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
- else:
- infourl = urllib.addinfourl(fp, headers, req.get_full_url())
- if not hasattr(infourl, 'status'):
- infourl.status = code
- return infourl
-
- def http_error_301(self, req, fp, code, msg, headers):
- if headers.dict.has_key('location'):
- infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
- else:
- infourl = urllib.addinfourl(fp, headers, req.get_full_url())
- if not hasattr(infourl, 'status'):
- infourl.status = code
- return infourl
-
- http_error_300 = http_error_302
- http_error_303 = http_error_302
- http_error_307 = http_error_302
+ def http_error_301(self, req, fp, code, msg, hdrs):
+ result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
+ code, msg, hdrs)
+ result.status = code
+ result.newurl = result.geturl()
+ return result
+ # The default implementations in urllib2.HTTPRedirectHandler
+ # are identical, so hardcoding a http_error_301 call above
+ # won't affect anything
+ http_error_300 = http_error_301
+ http_error_302 = http_error_301
+ http_error_303 = http_error_301
+ http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
- # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
@@ -2827,17 +2914,16 @@ class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
- try:
- assert sys.version.split()[0] >= '2.3.3'
- assert base64 != None
- user, passw = _base64decode(req.headers['Authorization'].split(' ')[1]).split(':')
- realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
- self.add_password(realm, host, user, passw)
- retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
- self.reset_retry_count()
- return retry
- except:
+ if base64 is None or 'Authorization' not in req.headers \
+ or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
+ auth = _base64decode(req.headers['Authorization'].split(' ')[1])
+ user, passw = auth.split(':')
+ realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
+ self.add_password(realm, host, user, passw)
+ retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
+ self.reset_retry_count()
+ return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
@@ -2874,10 +2960,8 @@ def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, h
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
- if url_file_stream_or_string == '-':
- return sys.stdin
-
- if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
+ if isinstance(url_file_stream_or_string, basestring) \
+ and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
@@ -2885,9 +2969,9 @@ def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, h
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
- # test for inline user:password for basic auth
+ # Test for inline user:password credentials for HTTP basic auth
auth = None
- if base64:
+ if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
@@ -2897,17 +2981,12 @@ def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, h
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
- try:
- if isinstance(url_file_stream_or_string,unicode):
- url_file_stream_or_string = url_file_stream_or_string.encode('idna').decode('utf-8')
- else:
- url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna').decode('utf-8')
- except:
- pass
+ if isinstance(url_file_stream_or_string, unicode):
+ url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
- opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()]))
+ opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
@@ -2917,18 +2996,51 @@ def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, h
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
- except:
+ except (IOError, UnicodeEncodeError, TypeError):
+ # if url_file_stream_or_string is a unicode object that
+ # cannot be converted to the encoding returned by
+ # sys.getfilesystemencoding(), a UnicodeEncodeError
+ # will be thrown
+ # If url_file_stream_or_string is a string that contains NULL
+ # (such as an XML document encoded in UTF-32), TypeError will
+ # be thrown.
pass
# treat url_file_stream_or_string as string
- return _StringIO(str(url_file_stream_or_string))
+ if isinstance(url_file_stream_or_string, unicode):
+ return _StringIO(url_file_stream_or_string.encode('utf-8'))
+ return _StringIO(url_file_stream_or_string)
+
+def _convert_to_idn(url):
+ """Convert a URL to IDN notation"""
+ # this function should only be called with a unicode string
+ # strategy: if the host cannot be encoded in ascii, then
+ # it'll be necessary to encode it in idn form
+ parts = list(urlparse.urlsplit(url))
+ try:
+ parts[1].encode('ascii')
+ except UnicodeEncodeError:
+ # the url needs to be converted to idn notation
+ host = parts[1].rsplit(':', 1)
+ newhost = []
+ port = u''
+ if len(host) == 2:
+ port = host.pop()
+ for h in host[0].split('.'):
+ newhost.append(h.encode('idna').decode('utf-8'))
+ parts[1] = '.'.join(newhost)
+ if port:
+ parts[1] += ':' + port
+ return urlparse.urlunsplit(parts)
+ else:
+ return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
- if type(modified) == type(''):
+ if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
@@ -3008,9 +3120,12 @@ def _parse_date_iso8601(dateString):
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
- if m: break
- if not m: return
- if m.span() == (0, 0): return
+ if m:
+ break
+ if not m:
+ return
+ if m.span() == (0, 0):
+ return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
@@ -3048,7 +3163,7 @@ def _parse_date_iso8601(dateString):
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
- if 'century' in params.keys():
+ if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
@@ -3095,19 +3210,20 @@ _korean_nate_date_re = \
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
- if not m: return
+ if not m:
+ return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
- if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
- if not m: return
+ if not m:
+ return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
@@ -3119,24 +3235,9 @@ def _parse_date_nate(dateString):
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
- if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
-_mssql_date_re = \
- re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
-def _parse_date_mssql(dateString):
- '''Parse a string according to the MS SQL date format'''
- m = _mssql_date_re.match(dateString)
- if not m: return
- w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
- {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
- 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
- 'zonediff': '+09:00'}
- if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
- return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_mssql)
-
# Unicode strings for Greek date strings
_greek_months = \
{ \
@@ -3178,17 +3279,14 @@ _greek_date_format_re = \
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
- if not m: return
- try:
- wday = _greek_wdays[m.group(1)]
- month = _greek_months[m.group(3)]
- except:
+ if not m:
return
+ wday = _greek_wdays[m.group(1)]
+ month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
- if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
@@ -3215,22 +3313,19 @@ _hungarian_date_format_re = \
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
- if not m: return
- try:
- month = _hungarian_months[m.group(2)]
- day = m.group(3)
- if len(day) == 1:
- day = '0' + day
- hour = m.group(4)
- if len(hour) == 1:
- hour = '0' + hour
- except:
- return
+ if not m or m.group(2) not in _hungarian_months:
+ return None
+ month = _hungarian_months[m.group(2)]
+ day = m.group(3)
+ if len(day) == 1:
+ day = '0' + day
+ hour = m.group(4)
+ if len(hour) == 1:
+ hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
- if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
@@ -3238,6 +3333,9 @@ registerDateHandler(_parse_date_hungarian)
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
+# Modified to also support MSSQL-style datetimes as defined at:
+# http://msdn.microsoft.com/en-us/library/ms186724.aspx
+# (which basically means allowing a space as a date/time/timezone separator)
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
@@ -3263,7 +3361,7 @@ def _parse_date_w3dtf(dateString):
day = 31
elif jday < julian:
if day + diff < 28:
- day = day + diff
+ day = day + diff
else:
month = month + 1
return year, month, day
@@ -3319,321 +3417,461 @@ def _parse_date_w3dtf(dateString):
'(?:(?P
-|)'
'(?:(?P\d\d)(?:(?P=dsep)(?P\d\d))?'
'|(?P\d\d\d)))?')
- __tzd_re = '(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)'
- __tzd_rx = re.compile(__tzd_re)
+ __tzd_re = ' ?(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)?'
__time_re = ('(?P\d\d)(?P:|)(?P\d\d)'
'(?:(?P=tsep)(?P\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
- __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
+ __datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
- if (m is None) or (m.group() != dateString): return
+ if (m is None) or (m.group() != dateString):
+ return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
- if gmt[0] == 0: return
+ if gmt[0] == 0:
+ return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
-def _parse_date_rfc822(dateString):
- '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
- data = dateString.split()
- if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
- del data[0]
- if len(data) == 4:
- s = data[3]
- i = s.find('+')
- if i > 0:
- data[3:] = [s[:i], s[i+1:]]
- else:
- data.append('')
- dateString = " ".join(data)
- # Account for the Etc/GMT timezone by stripping 'Etc/'
- elif len(data) == 5 and data[4].lower().startswith('etc/'):
- data[4] = data[4][4:]
- dateString = " ".join(data)
- if len(data) < 5:
- dateString += ' 00:00:00 GMT'
+# Define the strings used by the RFC822 datetime parser
+_rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
+ 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
+_rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
+
+# Only the first three letters of the month name matter
+_rfc822_month = "(?P%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
+# The year may be 2 or 4 digits; capture the century if it exists
+_rfc822_year = "(?P(?:\d{2})?\d{2})"
+_rfc822_day = "(?P *\d{1,2})"
+_rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
+
+_rfc822_hour = "(?P\d{2}):(?P\d{2})(?::(?P\d{2}))?"
+_rfc822_tz = "(?Put|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
+_rfc822_tznames = {
+ 'ut': 0, 'gmt': 0, 'z': 0,
+ 'adt': -3, 'ast': -4, 'at': -4,
+ 'edt': -4, 'est': -5, 'et': -5,
+ 'cdt': -5, 'cst': -6, 'ct': -6,
+ 'mdt': -6, 'mst': -7, 'mt': -7,
+ 'pdt': -7, 'pst': -8, 'pt': -8,
+ 'a': -1, 'n': 1,
+ 'm': -12, 'y': 12,
+ }
+# The timezone may be prefixed by 'Etc/'
+_rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
+
+_rfc822_dayname = "(?P%s)" % ('|'.join(_rfc822_daynames))
+_rfc822_match = re.compile(
+ "(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
+).match
+
+def _parse_date_group_rfc822(m):
+ # Calculate a date and timestamp
+ for k in ('year', 'day', 'hour', 'minute', 'second'):
+ m[k] = int(m[k])
+ m['month'] = _rfc822_months.index(m['month']) + 1
+ # If the year is 2 digits, assume everything in the 90's is the 1990's
+ if m['year'] < 100:
+ m['year'] += (1900, 2000)[m['year'] < 90]
+ stamp = datetime.datetime(*[m[i] for i in
+ ('year', 'month', 'day', 'hour', 'minute', 'second')])
+
+ # Use the timezone information to calculate the difference between
+ # the given date and timestamp and Universal Coordinated Time
+ tzhour = 0
+ tzmin = 0
+ if m['tz'] and m['tz'].startswith('gmt'):
+ # Handle GMT and GMT+hh:mm timezone syntax (the trailing
+ # timezone info will be handled by the next `if` block)
+ m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
+ if not m['tz']:
+ pass
+ elif m['tz'].startswith('+'):
+ tzhour = int(m['tz'][1:3])
+ tzmin = int(m['tz'][3:])
+ elif m['tz'].startswith('-'):
+ tzhour = int(m['tz'][1:3]) * -1
+ tzmin = int(m['tz'][3:]) * -1
+ else:
+ tzhour = _rfc822_tznames[m['tz']]
+ delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
+
+ # Return the date and timestamp in UTC
+ return (stamp - delta).utctimetuple()
+
+def _parse_date_rfc822(dt):
+ """Parse RFC 822 dates and times, with one minor
+ difference: years may be 4DIGIT or 2DIGIT.
+ http://tools.ietf.org/html/rfc822#section-5"""
+ try:
+ m = _rfc822_match(dt.lower()).groupdict(0)
+ except AttributeError:
+ return None
+
+ return _parse_date_group_rfc822(m)
+registerDateHandler(_parse_date_rfc822)
+
+def _parse_date_rfc822_grubby(dt):
+ """Parse date format similar to RFC 822, but
+ the comma after the dayname is optional and
+ month/day are inverted"""
+ _rfc822_date_grubby = "%s %s %s" % (_rfc822_month, _rfc822_day, _rfc822_year)
+ _rfc822_match_grubby = re.compile(
+ "(?:%s[,]? )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date_grubby, _rfc822_time)
+ ).match
+
+ try:
+ m = _rfc822_match_grubby(dt.lower()).groupdict(0)
+ except AttributeError:
+ return None
+
+ return _parse_date_group_rfc822(m)
+registerDateHandler(_parse_date_rfc822_grubby)
+
+def _parse_date_asctime(dt):
+ """Parse asctime-style dates"""
+ dayname, month, day, remainder = dt.split(None, 3)
+ # Convert month and day into zero-padded integers
+ month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
+ day = '%02i ' % (int(day),)
+ dt = month + day + remainder
+ return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
+registerDateHandler(_parse_date_asctime)
+
+def _parse_date_perforce(aDateString):
+ """parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
+ # Fri, 2006/09/15 08:19:53 EDT
+ _my_date_pattern = re.compile( \
+ r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
+
+ m = _my_date_pattern.search(aDateString)
+ if m is None:
+ return None
+ dow, year, month, day, hour, minute, second, tz = m.groups()
+ months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+ dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
-# rfc822.py defines several time zones, but we define some extra ones.
-# 'ET' is equivalent to 'EST', etc.
-_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
-rfc822._timezones.update(_additional_timezones)
-registerDateHandler(_parse_date_rfc822)
-
-def _parse_date_perforce(aDateString):
- """parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
- # Fri, 2006/09/15 08:19:53 EDT
- _my_date_pattern = re.compile( \
- r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
-
- dow, year, month, day, hour, minute, second, tz = \
- _my_date_pattern.search(aDateString).groups()
- months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
- dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
- tm = rfc822.parsedate_tz(dateString)
- if tm:
- return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
+ if not dateString:
+ return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
- if not date9tuple: continue
- if len(date9tuple) != 9:
- if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
- raise ValueError
- map(int, date9tuple)
- return date9tuple
- except Exception as e:
- if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
- pass
+ except (KeyError, OverflowError, ValueError):
+ continue
+ if not date9tuple:
+ continue
+ if len(date9tuple) != 9:
+ continue
+ return date9tuple
return None
-def _getCharacterEncoding(http_headers, xml_data):
- '''Get the character encoding of the XML document
+# Each marker represents some of the characters of the opening XML
+# processing instruction ('
+RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
+
+# Capture the value of the XML processing instruction's encoding attribute.
+# Example:
+RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
+
+def convert_to_utf8(http_headers, data):
+ '''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
- xml_data is a raw string (not Unicode)
+ data is a raw string (not Unicode)'''
- This is so much trickier than it sounds, it's not even funny.
- According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
- is application/xml, application/*+xml,
- application/xml-external-parsed-entity, or application/xml-dtd,
- the encoding given in the charset parameter of the HTTP Content-Type
- takes precedence over the encoding given in the XML prefix within the
- document, and defaults to 'utf-8' if neither are specified. But, if
- the HTTP Content-Type is text/xml, text/*+xml, or
- text/xml-external-parsed-entity, the encoding given in the XML prefix
- within the document is ALWAYS IGNORED and only the encoding given in
- the charset parameter of the HTTP Content-Type header should be
- respected, and it defaults to 'us-ascii' if not specified.
+ # This is so much trickier than it sounds, it's not even funny.
+ # According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
+ # is application/xml, application/*+xml,
+ # application/xml-external-parsed-entity, or application/xml-dtd,
+ # the encoding given in the charset parameter of the HTTP Content-Type
+ # takes precedence over the encoding given in the XML prefix within the
+ # document, and defaults to 'utf-8' if neither are specified. But, if
+ # the HTTP Content-Type is text/xml, text/*+xml, or
+ # text/xml-external-parsed-entity, the encoding given in the XML prefix
+ # within the document is ALWAYS IGNORED and only the encoding given in
+ # the charset parameter of the HTTP Content-Type header should be
+ # respected, and it defaults to 'us-ascii' if not specified.
- Furthermore, discussion on the atom-syntax mailing list with the
- author of RFC 3023 leads me to the conclusion that any document
- served with a Content-Type of text/* and no charset parameter
- must be treated as us-ascii. (We now do this.) And also that it
- must always be flagged as non-well-formed. (We now do this too.)
+ # Furthermore, discussion on the atom-syntax mailing list with the
+ # author of RFC 3023 leads me to the conclusion that any document
+ # served with a Content-Type of text/* and no charset parameter
+ # must be treated as us-ascii. (We now do this.) And also that it
+ # must always be flagged as non-well-formed. (We now do this too.)
- If Content-Type is unspecified (input was local file or non-HTTP source)
- or unrecognized (server just got it totally wrong), then go by the
- encoding given in the XML prefix of the document and default to
- 'iso-8859-1' as per the HTTP specification (RFC 2616).
+ # If Content-Type is unspecified (input was local file or non-HTTP source)
+ # or unrecognized (server just got it totally wrong), then go by the
+ # encoding given in the XML prefix of the document and default to
+ # 'iso-8859-1' as per the HTTP specification (RFC 2616).
- Then, assuming we didn't find a character encoding in the HTTP headers
- (and the HTTP Content-type allowed us to look in the body), we need
- to sniff the first few bytes of the XML data and try to determine
- whether the encoding is ASCII-compatible. Section F of the XML
- specification shows the way here:
- http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
-
- If the sniffed encoding is not ASCII-compatible, we need to make it
- ASCII compatible so that we can sniff further into the XML declaration
- to find the encoding attribute, which will tell us the true encoding.
-
- Of course, none of this guarantees that we will be able to parse the
- feed in the declared character encoding (assuming it was declared
- correctly, which many are not). CJKCodecs and iconv_codec help a lot;
- you should definitely install them if you can.
- http://cjkpython.i18n.org/
- '''
-
- def _parseHTTPContentType(content_type):
- '''takes HTTP Content-Type header and returns (content type, charset)
-
- If no charset is specified, returns (content type, '')
- If no content type is specified, returns ('', '')
- Both return parameters are guaranteed to be lowercase strings
- '''
- content_type = content_type or ''
- content_type, params = cgi.parse_header(content_type)
- return content_type, params.get('charset', '').replace("'", '')
-
- sniffed_xml_encoding = ''
- xml_encoding = ''
- true_encoding = ''
- http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type')))
- # Must sniff for non-ASCII-compatible character encodings before
- # searching for XML declaration. This heuristic is defined in
- # section F of the XML specification:
+ # Then, assuming we didn't find a character encoding in the HTTP headers
+ # (and the HTTP Content-type allowed us to look in the body), we need
+ # to sniff the first few bytes of the XML data and try to determine
+ # whether the encoding is ASCII-compatible. Section F of the XML
+ # specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
+
+ # If the sniffed encoding is not ASCII-compatible, we need to make it
+ # ASCII compatible so that we can sniff further into the XML declaration
+ # to find the encoding attribute, which will tell us the true encoding.
+
+ # Of course, none of this guarantees that we will be able to parse the
+ # feed in the declared character encoding (assuming it was declared
+ # correctly, which many are not). iconv_codec can help a lot;
+ # you should definitely install it if you can.
+ # http://cjkpython.i18n.org/
+
+ bom_encoding = u''
+ xml_encoding = u''
+ rfc3023_encoding = u''
+
+ # Look at the first few bytes of the document to guess what
+ # its encoding may be. We only need to decode enough of the
+ # document that we can use an ASCII-compatible regular
+ # expression to search for an XML encoding declaration.
+ # The heuristic follows the XML specification, section F:
+ # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
+ # Check for BOMs first.
+ if data[:4] == codecs.BOM_UTF32_BE:
+ bom_encoding = u'utf-32be'
+ data = data[4:]
+ elif data[:4] == codecs.BOM_UTF32_LE:
+ bom_encoding = u'utf-32le'
+ data = data[4:]
+ elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
+ bom_encoding = u'utf-16be'
+ data = data[2:]
+ elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
+ bom_encoding = u'utf-16le'
+ data = data[2:]
+ elif data[:3] == codecs.BOM_UTF8:
+ bom_encoding = u'utf-8'
+ data = data[3:]
+ # Check for the characters '= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
- # UTF-16BE with BOM
- sniffed_xml_encoding = 'utf-16be'
- xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
- elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]):
- # UTF-16LE
- sniffed_xml_encoding = 'utf-16le'
- xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
- elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
- # UTF-16LE with BOM
- sniffed_xml_encoding = 'utf-16le'
- xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
- elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]):
- # UTF-32BE
- sniffed_xml_encoding = 'utf-32be'
- xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
- elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]):
- # UTF-32LE
- sniffed_xml_encoding = 'utf-32le'
- xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
- elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
- # UTF-32BE with BOM
- sniffed_xml_encoding = 'utf-32be'
- xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
- elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
- # UTF-32LE with BOM
- sniffed_xml_encoding = 'utf-32le'
- xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
- elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
- # UTF-8 with BOM
- sniffed_xml_encoding = 'utf-8'
- xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
- else:
- # ASCII-compatible
- pass
- xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data)
- except:
+ if bom_encoding:
+ tempdata = data.decode(bom_encoding).encode('utf-8')
+ except (UnicodeDecodeError, LookupError):
+ # feedparser recognizes UTF-32 encodings that aren't
+ # available in Python 2.4 and 2.5, so it's possible to
+ # encounter a LookupError during decoding.
xml_encoding_match = None
+ else:
+ xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
+
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
- if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
- xml_encoding = sniffed_xml_encoding
+ # Normalize the xml_encoding if necessary.
+ if bom_encoding and (xml_encoding in (
+ u'u16', u'utf-16', u'utf16', u'utf_16',
+ u'u32', u'utf-32', u'utf32', u'utf_32',
+ u'iso-10646-ucs-2', u'iso-10646-ucs-4',
+ u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
+ )):
+ xml_encoding = bom_encoding
+
+ # Find the HTTP Content-Type and, hopefully, a character
+ # encoding provided by the server. The Content-Type is used
+ # to choose the "correct" encoding among the BOM encoding,
+ # XML declaration encoding, and HTTP encoding, following the
+ # heuristic defined in RFC 3023.
+ http_content_type = http_headers.get('content-type') or ''
+ http_content_type, params = cgi.parse_header(http_content_type)
+ http_encoding = params.get('charset', '').replace("'", "")
+ if not isinstance(http_encoding, unicode):
+ http_encoding = http_encoding.decode('utf-8', 'ignore')
+
acceptable_content_type = 0
- application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
- text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
+ application_content_types = (u'application/xml', u'application/xml-dtd',
+ u'application/xml-external-parsed-entity')
+ text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
- (http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
+ (http_content_type.startswith(u'application/') and
+ http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
- true_encoding = http_encoding or xml_encoding or 'utf-8'
+ rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
- (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
+ (http_content_type.startswith(u'text/') and
+ http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
- true_encoding = http_encoding or 'us-ascii'
- elif http_content_type.startswith('text/'):
- true_encoding = http_encoding or 'us-ascii'
- elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))):
- true_encoding = xml_encoding or 'iso-8859-1'
+ rfc3023_encoding = http_encoding or u'us-ascii'
+ elif http_content_type.startswith(u'text/'):
+ rfc3023_encoding = http_encoding or u'us-ascii'
+ elif http_headers and 'content-type' not in http_headers:
+ rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
- true_encoding = xml_encoding or 'utf-8'
- # some feeds claim to be gb2312 but are actually gb18030.
- # apparently MSIE and Firefox both do the following switch:
- if true_encoding.lower() == 'gb2312':
- true_encoding = 'gb18030'
- return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
+ rfc3023_encoding = xml_encoding or u'utf-8'
+ # gb18030 is a superset of gb2312, so always replace gb2312
+ # with gb18030 for greater compatibility.
+ if rfc3023_encoding.lower() == u'gb2312':
+ rfc3023_encoding = u'gb18030'
+ if xml_encoding.lower() == u'gb2312':
+ xml_encoding = u'gb18030'
-def _toUTF8(data, encoding):
- '''Changes an XML data stream on the fly to specify a new encoding
+ # there are four encodings to keep track of:
+ # - http_encoding is the encoding declared in the Content-Type HTTP header
+ # - xml_encoding is the encoding declared in the = 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])):
- if _debug:
- sys.stderr.write('stripping BOM\n')
- if encoding != 'utf-16be':
- sys.stderr.write('trying utf-16be instead\n')
- encoding = 'utf-16be'
- data = data[2:]
- elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])):
- if _debug:
- sys.stderr.write('stripping BOM\n')
- if encoding != 'utf-16le':
- sys.stderr.write('trying utf-16le instead\n')
- encoding = 'utf-16le'
- data = data[2:]
- elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
- if _debug:
- sys.stderr.write('stripping BOM\n')
- if encoding != 'utf-8':
- sys.stderr.write('trying utf-8 instead\n')
- encoding = 'utf-8'
- data = data[3:]
- elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
- if _debug:
- sys.stderr.write('stripping BOM\n')
- if encoding != 'utf-32be':
- sys.stderr.write('trying utf-32be instead\n')
- encoding = 'utf-32be'
- data = data[4:]
- elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
- if _debug:
- sys.stderr.write('stripping BOM\n')
- if encoding != 'utf-32le':
- sys.stderr.write('trying utf-32le instead\n')
- encoding = 'utf-32le'
- data = data[4:]
- newdata = unicode(data, encoding)
- if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
- declmatch = re.compile('^<\?xml[^>]*?>')
- newdecl = ''''''
- if declmatch.search(newdata):
- newdata = declmatch.sub(newdecl, newdata)
- else:
- newdata = newdecl + u'\n' + newdata
- return newdata.encode('utf-8')
+ if http_headers and (not acceptable_content_type):
+ if 'content-type' in http_headers:
+ msg = '%s is not an XML media type' % http_headers['content-type']
+ else:
+ msg = 'no Content-type specified'
+ error = NonXMLContentType(msg)
-def _stripDoctype(data):
- '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
+ # determine character encoding
+ known_encoding = 0
+ chardet_encoding = None
+ tried_encodings = []
+ if chardet:
+ chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore')
+ # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
+ for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
+ chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
+ if not proposed_encoding:
+ continue
+ if proposed_encoding in tried_encodings:
+ continue
+ tried_encodings.append(proposed_encoding)
+ try:
+ data = data.decode(proposed_encoding)
+ except (UnicodeDecodeError, LookupError):
+ pass
+ else:
+ known_encoding = 1
+ # Update the encoding in the opening XML processing instruction.
+ new_declaration = ''''''
+ if RE_XML_DECLARATION.search(data):
+ data = RE_XML_DECLARATION.sub(new_declaration, data)
+ else:
+ data = new_declaration + u'\n' + data
+ data = data.encode('utf-8')
+ break
+ # if still no luck, give up
+ if not known_encoding:
+ error = CharacterEncodingUnknown(
+ 'document encoding unknown, I tried ' +
+ '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
+ (rfc3023_encoding, xml_encoding))
+ rfc3023_encoding = u''
+ elif proposed_encoding != rfc3023_encoding:
+ error = CharacterEncodingOverride(
+ 'document declared as %s, but parsed as %s' %
+ (rfc3023_encoding, proposed_encoding))
+ rfc3023_encoding = proposed_encoding
+
+ return data, rfc3023_encoding, error
+
+# Match XML entity declarations.
+# Example:
+RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*]*?)>'), re.MULTILINE)
+
+# Match XML DOCTYPE declarations.
+# Example:
+RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*]*?)>'), re.MULTILINE)
+
+# Match safe entity declarations.
+# This will allow hexadecimal character references through,
+# as well as text, but not arbitrary nested entities.
+# Example: cubed "³"
+# Example: copyright "(C)"
+# Forbidden: explode1 "&explode2;&explode2;"
+RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(\w+;|[^&"]*)"'))
+
+def replace_doctype(data):
+ '''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
- stripped_data is the same XML document, minus the DOCTYPE
+ stripped_data is the same XML document with a replaced DOCTYPE
'''
+
+ # Divide the document into two groups by finding the location
+ # of the first element that doesn't begin with '' or ']*?)>'), re.MULTILINE)
- entity_results=entity_pattern.findall(head)
- head = entity_pattern.sub(_s2bytes(''), head)
- doctype_pattern = re.compile(_s2bytes(r'^\s*]*?)>'), re.MULTILINE)
- doctype_results = doctype_pattern.findall(head)
+ # Save and then remove all of the ENTITY declarations.
+ entity_results = RE_ENTITY_PATTERN.findall(head)
+ head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
+
+ # Find the DOCTYPE declaration and check the feed type.
+ doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
- if doctype.lower().count(_s2bytes('netscape')):
- version = 'rss091n'
+ if _s2bytes('netscape') in doctype.lower():
+ version = u'rss091n'
else:
version = None
- # only allow in 'safe' inline entity definitions
- replacement=_s2bytes('')
- if len(doctype_results)==1 and entity_results:
- safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(\w+;|[^&"]*)"'))
- safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
- if safe_entities:
- replacement=_s2bytes('\n \n]>')
- data = doctype_pattern.sub(replacement, head) + data
+ # Re-insert the safe ENTITY declarations if a DOCTYPE was found.
+ replacement = _s2bytes('')
+ if len(doctype_results) == 1 and entity_results:
+ match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
+ safe_entities = filter(match_safe_entities, entity_results)
+ if safe_entities:
+ replacement = _s2bytes('\n\n]>')
+ data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
- return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)])
+ # Precompute the safe entities for the loose parser.
+ safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
+ for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
+ return version, data, safe_entities
-def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], request_headers={}, response_headers={}):
+def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
+
+ if handlers is None:
+ handlers = []
+ if request_headers is None:
+ request_headers = {}
+ if response_headers is None:
+ response_headers = {}
+
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
- if _XML_AVAILABLE:
- result['bozo'] = 0
+ result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
- except Exception as e:
+ except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
@@ -3647,148 +3885,88 @@ def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, refer
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
+ # lowercase all of the HTTP headers for comparisons per RFC 2616
+ if 'headers' in result:
+ http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
+ else:
+ http_headers = {}
+
# if feed is gzip-compressed, decompress it
- if f and data and 'headers' in result:
- if gzip and result['headers'].get('content-encoding') == 'gzip':
+ if f and data and http_headers:
+ if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
- except Exception as e:
- # Some feeds claim to be gzipped but they're not, so
- # we get garbage. Ideally, we should re-request the
- # feed without the 'Accept-encoding: gzip' header,
- # but we don't.
+ except (IOError, struct.error), e:
+ # IOError can occur if the gzip header is bad.
+ # struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
- data = ''
- elif zlib and result['headers'].get('content-encoding') == 'deflate':
+ if isinstance(e, struct.error):
+ # A gzip header was found but the data is corrupt.
+ # Ideally, we should re-request the feed without the
+ # 'Accept-encoding: gzip' header, but we don't.
+ data = None
+ elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
- data = zlib.decompress(data, -zlib.MAX_WBITS)
- except Exception as e:
- result['bozo'] = 1
- result['bozo_exception'] = e
- data = ''
+ data = zlib.decompress(data)
+ except zlib.error, e:
+ try:
+ # The data may have no headers and no checksum.
+ data = zlib.decompress(data, -15)
+ except zlib.error, e:
+ result['bozo'] = 1
+ result['bozo_exception'] = e
# save HTTP headers
- if 'headers' in result:
- if 'etag' in result['headers'] or 'ETag' in result['headers']:
- etag = result['headers'].get('etag', result['headers'].get('ETag'))
+ if http_headers:
+ if 'etag' in http_headers:
+ etag = http_headers.get('etag', u'')
+ if not isinstance(etag, unicode):
+ etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
- if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']:
- modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified'))
+ if 'last-modified' in http_headers:
+ modified = http_headers.get('last-modified', u'')
if modified:
- result['modified'] = _parse_date(modified)
+ result['modified'] = modified
+ result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
- result['href'] = f.url
+ if not isinstance(f.url, unicode):
+ result['href'] = f.url.decode('utf-8', 'ignore')
+ else:
+ result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
- # there are four encodings to keep track of:
- # - http_encoding is the encoding declared in the Content-Type HTTP header
- # - xml_encoding is the encoding declared in the