MOBI Input: Fix parsing of some old MOBI files that contain unclosed <p> tags leading to a hanging EPUB conversion

This commit is contained in:
Kovid Goyal 2009-04-25 08:25:39 -07:00
parent c779f7af15
commit 0140831b53
3 changed files with 25 additions and 20 deletions

View File

@ -292,6 +292,11 @@ class MobiReader(object):
if self.verbose:
print 'Parsing HTML...'
root = html.fromstring(self.processed_html)
if root.xpath('descendant::p/descendant::p'):
from lxml.html import soupparser
self.log.warning('Markup contains unclosed <p> tags, parsing using',
'BeatifulSoup')
root = soupparser.fromstring(self.processed_html)
self.upshift_markup(root)
guides = root.xpath('//guide')
guide = guides[0] if guides else None

View File

@ -72,7 +72,7 @@ class ZAOBAO(BasicNewsRecipe):
return soup
def parse_feeds(self):
self.log_debug(_('ZAOBAO overrided parse_feeds()'))
self.log_debug('ZAOBAO overrided parse_feeds()')
parsed_feeds = BasicNewsRecipe.parse_feeds(self)
for id, obj in enumerate(self.INDEXES):
@ -89,7 +89,7 @@ class ZAOBAO(BasicNewsRecipe):
a_title = self.tag_to_string(a)
date = ''
description = ''
self.log_debug(_('adding %s at %s')%(a_title,a_url))
self.log_debug('adding %s at %s'%(a_title,a_url))
articles.append({
'title':a_title,
'date':date,
@ -100,23 +100,23 @@ class ZAOBAO(BasicNewsRecipe):
pfeeds = feeds_from_index([(title, articles)], oldest_article=self.oldest_article,
max_articles_per_feed=self.max_articles_per_feed)
self.log_debug(_('adding %s to feed')%(title))
self.log_debug('adding %s to feed'%(title))
for feed in pfeeds:
self.log_debug(_('adding feed: %s')%(feed.title))
self.log_debug('adding feed: %s'%(feed.title))
feed.description = self.DESC_SENSE
parsed_feeds.append(feed)
for a, article in enumerate(feed):
self.log_debug(_('added article %s from %s')%(article.title, article.url))
self.log_debug(_('added feed %s')%(feed.title))
self.log_debug('added article %s from %s'%(article.title, article.url))
self.log_debug('added feed %s'%(feed.title))
for i, feed in enumerate(parsed_feeds):
# workaorund a strange problem: Somethimes the xml encoding is not apllied correctly by parse()
weired_encoding_detected = False
if not isinstance(feed.description, unicode) and self.encoding and feed.description:
self.log_debug(_('Feed %s is not encoded correctly, manually replace it')%(feed.title))
self.log_debug('Feed %s is not encoded correctly, manually replace it'%(feed.title))
feed.description = feed.description.decode(self.encoding, 'replace')
elif feed.description.find(self.DESC_SENSE) == -1 and self.encoding and feed.description:
self.log_debug(_('Feed %s is weired encoded, manually redo all')%(feed.title))
self.log_debug('Feed %s is strangely encoded, manually redo all'%(feed.title))
feed.description = feed.description.encode('cp1252', 'replace').decode(self.encoding, 'replace')
weired_encoding_detected = True
@ -138,7 +138,7 @@ class ZAOBAO(BasicNewsRecipe):
article.text_summary = article.text_summary.encode('cp1252', 'replace').decode(self.encoding, 'replace')
if article.title == "Untitled article":
self.log_debug(_('Removing empty article %s from %s')%(article.title, article.url))
self.log_debug('Removing empty article %s from %s'%(article.title, article.url))
# remove the article
feed.articles[a:a+1] = []
return parsed_feeds