From 6b421643705fb3e575bdda1225171485ba01965a Mon Sep 17 00:00:00 2001
From: ldolse
Date: Sun, 30 Jan 2011 18:11:15 +0800
Subject: [PATCH 01/14] adjusted margins for scene break heuristics
---
src/calibre/ebooks/conversion/utils.py | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index 5beefb5bd9..a115e584b6 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -26,7 +26,7 @@ class HeuristicProcessor(object):
self.blanks_deleted = False
self.blanks_between_paragraphs = False
self.linereg = re.compile('(?<=)', re.IGNORECASE|re.DOTALL)
- self.blankreg = re.compile(r'\s*(?P]*>)\s*(?P
)', re.IGNORECASE)
+ self.blankreg = re.compile(r'\s*(?P]*>)\s*(?P
)', re.IGNORECASE)
self.anyblank = re.compile(r'\s*(?P]*>)\s*(?P
)', re.IGNORECASE)
self.multi_blank = re.compile(r'(\s*]*>\s*
){2,}(?!\s*]*>\s*
){1,}(?=\s*)(\s*]*>\s*
){1,}', re.IGNORECASE)
- def markup_spacers(match):
+ def markup_whitespaces(match):
blanks = match.group(0)
- blanks = self.blankreg.sub('\n
', blanks)
+ blanks = self.blankreg.sub('\n
', blanks)
return blanks
- html = blanks_before_headings.sub(markup_spacers, html)
- html = blanks_after_headings.sub(markup_spacers, html)
+ html = blanks_before_headings.sub(markup_whitespaces, html)
+ html = blanks_after_headings.sub(markup_whitespaces, html)
if self.html_preprocess_sections > self.min_chapters:
- html = re.sub('(?si)^.*?(?=
', html)
+ html = self.multi_blank.sub('\n
', html)
else:
- html = self.blankreg.sub('\n
', html)
+ html = self.blankreg.sub('\n
', html)
return html
@@ -489,6 +489,7 @@ class HeuristicProcessor(object):
if getattr(self.extra_opts, 'markup_chapter_headings', False):
html = self.markup_chapters(html, self.totalwords, self.blanks_between_paragraphs)
+ self.dump(html, 'after_chapter_markup')
if getattr(self.extra_opts, 'italicize_common_cases', False):
html = self.markup_italicis(html)
@@ -498,7 +499,7 @@ class HeuristicProcessor(object):
if self.blanks_between_paragraphs and getattr(self.extra_opts, 'delete_blank_paragraphs', False):
self.log.debug("deleting blank lines")
self.blanks_deleted = True
- html = self.multi_blank.sub('\n
', html)
+ html = self.multi_blank.sub('\n
', html)
html = self.blankreg.sub('', html)
# Determine line ending type
@@ -553,7 +554,7 @@ class HeuristicProcessor(object):
html = self.detect_blank_formatting(html)
html = self.detect_soft_breaks(html)
# Center separator lines
- html = re.sub(u'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?P([*#•=✦]+\s*)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', '' + '\g' + '
', html)
+ html = re.sub(u'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?P([*#•=✦]+\s*)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', '' + '\g' + '
', html)
#html = re.sub(']*>\s*
', '
', html)
if self.deleted_nbsps:
From e8153d5e6900df625125900c6bab539533acc502 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Mon, 31 Jan 2011 01:36:08 +0800
Subject: [PATCH 02/14] merge multiple blank paragraphs
---
src/calibre/ebooks/conversion/utils.py | 44 ++++++++++++++++++++------
src/calibre/ebooks/txt/txtml.py | 2 ++
2 files changed, 37 insertions(+), 9 deletions(-)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index a115e584b6..b37cd4b869 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -29,6 +29,7 @@ class HeuristicProcessor(object):
self.blankreg = re.compile(r'\s*(?P]*>)\s*(?P
)', re.IGNORECASE)
self.anyblank = re.compile(r'\s*(?P]*>)\s*(?P
)', re.IGNORECASE)
self.multi_blank = re.compile(r'(\s*]*>\s*
){2,}(?!\s*]*>\s*){2,}', re.IGNORECASE)
def is_pdftohtml(self, src):
return '' in src[:1000]
@@ -418,14 +419,32 @@ class HeuristicProcessor(object):
if getattr(self.extra_opts, option, False):
return True
return False
+
+ def merge_blanks(self, html, blanks_count=None):
+ single_blank = re.compile(r'(\s*]*>\s*
)', re.IGNORECASE)
+ base_em = .5 # Baseline is 1.5em per blank line, 1st line is .5 em css and 1em for the nbsp
+ em_per_line = 1.5 # Add another 1.5 em for each additional blank
+
+ def merge_matches(match):
+ to_merge = match.group(0)
+ lines = float(len(single_blank.findall(to_merge))) - 1.
+ em = base_em + (em_per_line * lines)
+ if to_merge.find('whitespace'):
+ newline = self.any_multi_blank.sub('\n
', match.group(0))
+ else:
+ newline = self.any_multi_blank.sub('\n
', match.group(0))
+ return newline
+
+ html = self.any_multi_blank.sub(merge_matches, html)
+ return html
- def detect_blank_formatting(self, html):
+ def detect_whitespace(self, html):
blanks_before_headings = re.compile(r'(\s*]*>\s*
){1,}(?=\s*)(\s*]*>\s*
){1,}', re.IGNORECASE)
def markup_whitespaces(match):
blanks = match.group(0)
- blanks = self.blankreg.sub('\n
', blanks)
+ blanks = self.blankreg.sub('\n
', blanks)
return blanks
html = blanks_before_headings.sub(markup_whitespaces, html)
html = blanks_after_headings.sub(markup_whitespaces, html)
@@ -435,9 +454,9 @@ class HeuristicProcessor(object):
def detect_soft_breaks(self, html):
if not self.blanks_deleted and self.blanks_between_paragraphs:
- html = self.multi_blank.sub('\n
', html)
+ html = self.multi_blank.sub('\n
', html)
else:
- html = self.blankreg.sub('\n
', html)
+ html = self.blankreg.sub('\n
', html)
return html
@@ -499,7 +518,7 @@ class HeuristicProcessor(object):
if self.blanks_between_paragraphs and getattr(self.extra_opts, 'delete_blank_paragraphs', False):
self.log.debug("deleting blank lines")
self.blanks_deleted = True
- html = self.multi_blank.sub('\n
', html)
+ html = self.multi_blank.sub('\n
', html)
html = self.blankreg.sub('', html)
# Determine line ending type
@@ -550,14 +569,21 @@ class HeuristicProcessor(object):
doubleheading = re.compile(r'(?P]*>.+?\s*(<(?!h\d)[^>]*>\s*)*)[^>]*>.+?)', re.IGNORECASE)
html = doubleheading.sub('\g'+'\n'+'
', html)
+ # If scene break formatting is enabled, find all blank paragraphs that definitely aren't scenebreaks,
+ # style it with the 'whitespace' class. All remaining blank lines are styled as softbreaks.
+ # Multiple sequential blank paragraphs are merged with appropriate margins
+ # If non-blank scene breaks exist they are center aligned and styled with appropriate margins.
if getattr(self.extra_opts, 'format_scene_breaks', False):
- html = self.detect_blank_formatting(html)
+ html = self.detect_whitespace(html)
html = self.detect_soft_breaks(html)
- # Center separator lines
- html = re.sub(u'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?P([*#•=✦]+\s*)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', '' + '\g' + '
', html)
+ blanks_count = len(self.any_multi_blank.findall(html))
+ if blanks_count >= 1:
+ html = self.merge_blanks(html, blanks_count)
+ # Center separator lines, use a bit larger margin in this case
+ html = re.sub(u'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?P([*#•=✦]+\s*)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', '' + '\g' + '
', html)
#html = re.sub(']*>\s*
', '
', html)
if self.deleted_nbsps:
- # put back non-breaking spaces in empty paragraphs to preserve original formatting
+ # put back non-breaking spaces in empty paragraphs so they render correctly
html = self.anyblank.sub('\n'+r'\g'+u'\u00a0'+r'\g', html)
return html
diff --git a/src/calibre/ebooks/txt/txtml.py b/src/calibre/ebooks/txt/txtml.py
index 00992a8612..bf33e5540a 100644
--- a/src/calibre/ebooks/txt/txtml.py
+++ b/src/calibre/ebooks/txt/txtml.py
@@ -222,6 +222,8 @@ class TXTMLizer(object):
# Scene breaks.
if tag == 'hr':
text.append('\n\n* * *\n\n')
+ elif style['margin-top']:
+ text.append('\n\n' + '\n' * round(style['margin-top']))
# Process tags that contain text.
if hasattr(elem, 'text') and elem.text:
From 31c277880e6fce5b2d99e8fdfdede943804b6917 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Mon, 31 Jan 2011 09:39:28 +0800
Subject: [PATCH 03/14] scene break detection to detect any repeating non-word
character
---
src/calibre/ebooks/conversion/utils.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index b37cd4b869..d0dc81405b 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -580,10 +580,10 @@ class HeuristicProcessor(object):
if blanks_count >= 1:
html = self.merge_blanks(html, blanks_count)
# Center separator lines, use a bit larger margin in this case
- html = re.sub(u'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?P([*#•=✦]+\s*)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', '' + '\g' + '
', html)
+ html = re.sub(u'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?![\w\'\"])(?P((?P(?!\s)\W)\s*(?P=breakchar)?)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', '' + '\g' + '
', html)
#html = re.sub(']*>\s*
', '
', html)
if self.deleted_nbsps:
# put back non-breaking spaces in empty paragraphs so they render correctly
html = self.anyblank.sub('\n'+r'\g'+u'\u00a0'+r'\g', html)
- return html
+ return html
\ No newline at end of file
From a96c73480d6a014e0b446c5003d773c8c48bb022 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Mon, 31 Jan 2011 16:19:47 +0800
Subject: [PATCH 04/14] fixed overmatching/substitution issue in italicize
function
---
src/calibre/ebooks/conversion/utils.py | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index d0dc81405b..74afbe7a42 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -159,7 +159,7 @@ class HeuristicProcessor(object):
]
for word in ITALICIZE_WORDS:
- html = re.sub(r'(?<=\s|>)' + word + r'(?=\s|<)', '%s' % word, html)
+ html = re.sub(r'(?<=\s|>)' + re.escape(word) + r'(?=\s|<)', '%s' % word, html)
for pat in ITALICIZE_STYLE_PATS:
html = re.sub(pat, lambda mo: '%s' % mo.group('words'), html)
@@ -375,8 +375,8 @@ class HeuristicProcessor(object):
html = re.sub(ur'\s*\s*', ' ', html)
# Delete microsoft 'smart' tags
html = re.sub('(?i)?st1:\w+>', '', html)
- # Delete self closing paragraph tags
- html = re.sub('', '', html)
+ # Re-open self closing paragraph tags
+ html = re.sub('/]*/>', '
', html)
# Get rid of empty span, bold, font, em, & italics tags
html = re.sub(r"\s*]*>\s*(]*>\s*){0,2}\s*\s*", " ", html)
html = re.sub(r"\s*<(font|[ibu]|em|strong)[^>]*>\s*(<(font|[ibu]|em|strong)[^>]*>\s*(font|[ibu]|em|strong)>\s*){0,2}\s*(font|[ibu]|em|strong)>", " ", html)
@@ -463,7 +463,6 @@ class HeuristicProcessor(object):
def __call__(self, html):
self.log.debug("********* Heuristic processing HTML *********")
-
# Count the words in the document to estimate how many chapters to look for and whether
# other types of processing are attempted
try:
@@ -477,7 +476,7 @@ class HeuristicProcessor(object):
# Arrange line feeds and tags so the line_length and no_markup functions work correctly
html = self.arrange_htm_line_endings(html)
-
+ self.dump(html, 'after_arrange_line_endings')
if self.cleanup_required():
###### Check Markup ######
#
@@ -580,7 +579,9 @@ class HeuristicProcessor(object):
if blanks_count >= 1:
html = self.merge_blanks(html, blanks_count)
# Center separator lines, use a bit larger margin in this case
- html = re.sub(u'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?![\w\'\"])(?P((?P(?!\s)\W)\s*(?P=breakchar)?)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', '' + '\g' + '
', html)
+ scene_break = re.compile(r'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?![\w\'\"])(?P((?P((?!\s)\W))\s*(?P=break_char)?)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', re.IGNORECASE|re.UNICODE)
+ print "found "+str(len(scene_break.findall(html)))+" scene breaks"
+ html = scene_break.sub('' + '\g' + '
', html)
#html = re.sub(']*>\s*
', '
', html)
if self.deleted_nbsps:
From 5596f506a7a511eea83f3dad86e93ac87fb9f757 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Tue, 1 Feb 2011 01:51:22 +0800
Subject: [PATCH 05/14] improved scene break/whitespace formatting
---
src/calibre/ebooks/conversion/utils.py | 60 +++++++++++++++++++-------
1 file changed, 44 insertions(+), 16 deletions(-)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index 74afbe7a42..77086efd97 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -30,6 +30,9 @@ class HeuristicProcessor(object):
self.anyblank = re.compile(r'\s*(?P]*>)\s*(?P
)', re.IGNORECASE)
self.multi_blank = re.compile(r'(\s*]*>\s*
){2,}(?!\s*]*>\s*){2,}', re.IGNORECASE)
+ self.line_open = "<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*"
+ self.line_close = "((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>"
+ self.single_blank = re.compile(r'(\s*]*>\s*
)', re.IGNORECASE)
def is_pdftohtml(self, src):
return '' in src[:1000]
@@ -188,19 +191,17 @@ class HeuristicProcessor(object):
# Build the Regular Expressions in pieces
init_lookahead = "(?=<(p|div))"
- chapter_line_open = "<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*"
+ chapter_line_open = self.line_open
title_line_open = "<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*"
chapter_header_open = r"(?P"
title_header_open = r"(?P"
chapter_header_close = ")\s*"
title_header_close = ")"
- chapter_line_close = "((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>"
+ chapter_line_close = self.line_close
title_line_close = "((?P=inner6)>)?\s*((?P=inner5)>)?\s*((?P=inner4)>)?\s*(?P=outer2)>"
is_pdftohtml = self.is_pdftohtml(html)
if is_pdftohtml:
- chapter_line_open = "<(?Pp)[^>]*>(\s*<[ibu][^>]*>)?\s*"
- chapter_line_close = "\s*([ibu][^>]*>\s*)?(?P=outer)>"
title_line_open = "<(?Pp)[^>]*>\s*"
title_line_close = "\s*(?P=outer2)>"
@@ -382,6 +383,8 @@ class HeuristicProcessor(object):
html = re.sub(r"\s*<(font|[ibu]|em|strong)[^>]*>\s*(<(font|[ibu]|em|strong)[^>]*>\s*(font|[ibu]|em|strong)>\s*){0,2}\s*(font|[ibu]|em|strong)>", " ", html)
html = re.sub(r"\s*]*>\s*(]>\s*){0,2}\s*\s*", " ", html)
html = re.sub(r"\s*<(font|[ibu]|em|strong)[^>]*>\s*(<(font|[ibu]|em|strong)[^>]*>\s*(font|[ibu]|em|strong)>\s*){0,2}\s*(font|[ibu]|em|strong)>", " ", html)
+ # Empty heading tags
+ html = re.sub(r'(?i)\s*', '', html)
self.deleted_nbsps = True
return html
@@ -421,13 +424,12 @@ class HeuristicProcessor(object):
return False
def merge_blanks(self, html, blanks_count=None):
- single_blank = re.compile(r'(\s*]*>\s*
)', re.IGNORECASE)
base_em = .5 # Baseline is 1.5em per blank line, 1st line is .5 em css and 1em for the nbsp
em_per_line = 1.5 # Add another 1.5 em for each additional blank
def merge_matches(match):
to_merge = match.group(0)
- lines = float(len(single_blank.findall(to_merge))) - 1.
+ lines = float(len(self.single_blank.findall(to_merge))) - 1.
em = base_em + (em_per_line * lines)
if to_merge.find('whitespace'):
newline = self.any_multi_blank.sub('\n
', match.group(0))
@@ -439,17 +441,37 @@ class HeuristicProcessor(object):
return html
def detect_whitespace(self, html):
- blanks_before_headings = re.compile(r'(\s*]*>\s*
){1,}(?=\s*)(\s*]*>\s*
){1,}', re.IGNORECASE)
+ blanks_around_headings = re.compile(r'(?P(]*>\s*
\s*){1,}\s*)?(?P\d+)[^>]*>.*?)(?P\s*(]*>\s*
\s*){1,})?', re.IGNORECASE)
+ blanks_n_nopunct = re.compile(r'(?P(]*>\s*
\s*){1,}\s*)?]*>\s*(<(span|[ibu]|em|strong|font)[^>]*>\s*)*.{1,100}?[^\W]((span|[ibu]|em|strong|font)>\s*)*
(?P\s*(]*>\s*
\s*){1,})?', re.IGNORECASE)
+ def merge_header_whitespace(match):
+ initblanks = match.group('initparas')
+ endblanks = match.group('initparas')
+ heading = match.group('heading')
+ top_margin = ''
+ bottom_margin = ''
+ if initblanks is not None:
+ top_margin = 'margin=top:'+str(len(self.single_blank.findall(initblanks)))+'em;'
+ if endblanks is not None:
+ bottom_margin = 'margin=top:'+str(len(self.single_blank.findall(initblanks)))+'em;'
+
+ if initblanks == None and endblanks == None:
+ return heading
+ else:
+ heading = re.sub('(?i)\d+)[^>]*>', ''+' style="'+top_margin+bottom_margin+'">', heading)
+ return heading
+
+ html = blanks_around_headings.sub(merge_header_whitespace, html)
+
def markup_whitespaces(match):
- blanks = match.group(0)
- blanks = self.blankreg.sub('\n
', blanks)
- return blanks
- html = blanks_before_headings.sub(markup_whitespaces, html)
- html = blanks_after_headings.sub(markup_whitespaces, html)
+ blanks = match.group(0)
+ blanks = self.blankreg.sub('\n
', blanks)
+ return blanks
+
+ html = blanks_n_nopunct.sub(markup_whitespaces, html)
if self.html_preprocess_sections > self.min_chapters:
html = re.sub('(?si)^.*?(?=, change to empty paragraphs
#html = re.sub('
]*>', u'\u00a0
', html)
@@ -558,7 +585,7 @@ class HeuristicProcessor(object):
if self.html_preprocess_sections < self.min_chapters and getattr(self.extra_opts, 'markup_chapter_headings', False):
self.log.debug("Looking for more split points based on punctuation,"
" currently have " + unicode(self.html_preprocess_sections))
- chapdetect3 = re.compile(r'<(?P(p|div)[^>]*)>\s*(?P(]*>)?\s*(?!([*#•]+\s*)+)(<[ibu][^>]*>){0,2}\s*(]*>)?\s*(<[ibu][^>]*>){0,2}\s*(]*>)?\s*.?(?=[a-z#\-*\s]+<)([a-z#-*]+\s*){1,5}\s*\s*()?([ibu]>){0,2}\s*()?\s*([ibu]>){0,2}\s*()?\s*(p|div)>)', re.IGNORECASE)
+ chapdetect3 = re.compile(r'<(?P(p|div)[^>]*)>\s*(?P(]*>)?\s*(?!([\W]+\s*)+)(<[ibu][^>]*>){0,2}\s*(]*>)?\s*(<[ibu][^>]*>){0,2}\s*(]*>)?\s*.?(?=[a-z#\-*\s]+<)([a-z#-*]+\s*){1,5}\s*\s*()?([ibu]>){0,2}\s*()?\s*([ibu]>){0,2}\s*()?\s*(p|div)>)', re.IGNORECASE)
html = chapdetect3.sub(self.chapter_break, html)
if getattr(self.extra_opts, 'renumber_headings', False):
@@ -579,9 +606,10 @@ class HeuristicProcessor(object):
if blanks_count >= 1:
html = self.merge_blanks(html, blanks_count)
# Center separator lines, use a bit larger margin in this case
- scene_break = re.compile(r'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(?![\w\'\"])(?P((?P((?!\s)\W))\s*(?P=break_char)?)+)\s*((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>', re.IGNORECASE|re.UNICODE)
+ scene_break_regex = self.line_open+'(?![\w\'\"])(?P((?P((?!\s)\W))\s*(?P=break_char)?)+)\s*'+self.line_close
+ scene_break = re.compile(r'%s' % scene_break_regex, re.IGNORECASE|re.UNICODE)
print "found "+str(len(scene_break.findall(html)))+" scene breaks"
- html = scene_break.sub('' + '\g' + '
', html)
+ html = scene_break.sub('' + '\g' + '
', html)
#html = re.sub(']*>\s*
', '
', html)
if self.deleted_nbsps:
From c47bacb016eabdd6870d2a3409b1d2f2ba29f8eb Mon Sep 17 00:00:00 2001
From: Kovid Goyal
Date: Mon, 31 Jan 2011 17:12:22 -0700
Subject: [PATCH 06/14] Update 20 Minutos
---
resources/recipes/20_minutos.recipe | 27 ++++++++++++++++++---------
1 file changed, 18 insertions(+), 9 deletions(-)
diff --git a/resources/recipes/20_minutos.recipe b/resources/recipes/20_minutos.recipe
index cb3002a76c..106c0dcffa 100644
--- a/resources/recipes/20_minutos.recipe
+++ b/resources/recipes/20_minutos.recipe
@@ -1,25 +1,25 @@
-# -*- coding: utf-8
__license__ = 'GPL v3'
__author__ = 'Luis Hernandez'
__copyright__ = 'Luis Hernandez'
-description = 'Periódico gratuito en español - v0.8 - 27 Jan 2011'
+__version__ = 'v0.85'
+__date__ = '31 January 2011'
'''
www.20minutos.es
'''
-
+import re
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1294946868(BasicNewsRecipe):
- title = u'20 Minutos'
+ title = u'20 Minutos new'
publisher = u'Grupo 20 Minutos'
- __author__ = 'Luis Hernández'
- description = 'Periódico gratuito en español'
+ __author__ = 'Luis Hernandez'
+ description = 'Free spanish newspaper'
cover_url = 'http://estaticos.20minutos.es/mmedia/especiales/corporativo/css/img/logotipos_grupo20minutos.gif'
- oldest_article = 5
+ oldest_article = 2
max_articles_per_feed = 100
remove_javascript = True
@@ -29,6 +29,7 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
encoding = 'ISO-8859-1'
language = 'es'
timefmt = '[%a, %d %b, %Y]'
+ remove_empty_feeds = True
keep_only_tags = [
dict(name='div', attrs={'id':['content','vinetas',]})
@@ -43,13 +44,21 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
remove_tags = [
dict(name='ol', attrs={'class':['navigation',]})
,dict(name='span', attrs={'class':['action']})
- ,dict(name='div', attrs={'class':['twitter comments-list hidden','related-news','col','photo-gallery','calendario','article-comment','postto estirar','otras_vinetas estirar','kment','user-actions']})
+ ,dict(name='div', attrs={'class':['twitter comments-list hidden','related-news','col','photo-gallery','photo-gallery side-art-block','calendario','article-comment','postto estirar','otras_vinetas estirar','kment','user-actions']})
,dict(name='div', attrs={'id':['twitter-destacados','eco-tabs','inner','vineta_calendario','vinetistas clearfix','otras_vinetas estirar','MIN1','main','SUP1','INT']})
,dict(name='ul', attrs={'class':['article-user-actions','stripped-list']})
,dict(name='ul', attrs={'id':['site-links']})
,dict(name='li', attrs={'class':['puntuacion','enviar','compartir']})
]
+ extra_css = """
+ p{text-align: justify; font-size: 100%}
+ body{ text-align: left; font-size:100% }
+ h3{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
+ """
+
+ preprocess_regexps = [(re.compile(r'', re.DOTALL), lambda m: '')]
+
feeds = [
(u'Portada' , u'http://www.20minutos.es/rss/')
,(u'Nacional' , u'http://www.20minutos.es/rss/nacional/')
@@ -65,6 +74,6 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
,(u'Empleo' , u'http://www.20minutos.es/rss/empleo/')
,(u'Cine' , u'http://www.20minutos.es/rss/cine/')
,(u'Musica' , u'http://www.20minutos.es/rss/musica/')
- ,(u'Vinetas' , u'http://www.20minutos.es/rss/vinetas/')
+ ,(u'Vinetas' , u'http://www.20minutos.es/rss/vinetas/')
,(u'Comunidad20' , u'http://www.20minutos.es/rss/zona20/')
]
From 527f11e32e5d42b2cdda5a3439189dad9dc154ff Mon Sep 17 00:00:00 2001
From: Kovid Goyal
Date: Mon, 31 Jan 2011 17:15:02 -0700
Subject: [PATCH 07/14] Cinco Dias by Luis Hernandez
---
resources/images/news/latimes.png | Bin 0 -> 358 bytes
resources/recipes/cinco_dias.recipe | 71 ++++++++++++++++++++++++++++
2 files changed, 71 insertions(+)
create mode 100644 resources/images/news/latimes.png
create mode 100644 resources/recipes/cinco_dias.recipe
diff --git a/resources/images/news/latimes.png b/resources/images/news/latimes.png
new file mode 100644
index 0000000000000000000000000000000000000000..62bb4d0b8a2586c4884c4ccbac5b481bff096309
GIT binary patch
literal 358
zcmeAS@N?(olHy`uVBq!ia0vp^0wB!60wlNoGJgf6n3BBRT^JZv^(q?yd7K3vk;OpT
z1B~5HX4`=T%L*LRfizezL(H+Yhk=~Qo-U3d8t11@4CHM#5OL)XZfNmb!6K6*q{XV9
z!gat=b%N#%84>n$d4~)!g@@msFMYq~`s~|WmREDNqm`6p1$K&ee13VRF63-l_VwSa
z99j7WD{t=D8onTOa)<9t5pMp*UpJnJuc(w`W)}Wn!KTJ$ahu`&(}j~4JlHxrRq)D-
zH7WbWLsmYMf6?2*WJZb$#?VG66YWwZt`|BqgyV)hf9t6-Y4{85kPs
z8W`#t8-^GfS{WKxnVRbwm|GbbFwC#DN70a*pOTqYiK4;C%FxKlzyhM7ER_8(Py>Uf
LtDnm{r-UW|G&pfi
literal 0
HcmV?d00001
diff --git a/resources/recipes/cinco_dias.recipe b/resources/recipes/cinco_dias.recipe
new file mode 100644
index 0000000000..40241aff5c
--- /dev/null
+++ b/resources/recipes/cinco_dias.recipe
@@ -0,0 +1,71 @@
+__license__ = 'GPL v3'
+__author__ = 'Luis Hernandez'
+__copyright__ = 'Luis Hernandez'
+__version__ = 'v1.2'
+__date__ = '31 January 2011'
+
+'''
+http://www.cincodias.com/
+'''
+
+from calibre.web.feeds.news import BasicNewsRecipe
+
+class AdvancedUserRecipe1294946868(BasicNewsRecipe):
+
+ title = u'Cinco Dias'
+ publisher = u'Grupo Prisa'
+
+ __author__ = 'Luis Hernandez'
+ description = 'spanish web about money and bussiness, free edition'
+
+ cover_url = 'http://www.prisa.com/images/logos/logo_cinco_dias.gif'
+ oldest_article = 2
+ max_articles_per_feed = 100
+
+ remove_javascript = True
+ no_stylesheets = True
+ use_embedded_content = False
+
+ language = 'es'
+ remove_empty_feeds = True
+ encoding = 'ISO-8859-1'
+ timefmt = '[%a, %d %b, %Y]'
+
+ keep_only_tags = [
+ dict(name='div', attrs={'class':['cab_articulo cab_noticia','pos_3','txt_noticia','mod_despiece']})
+ ,dict(name='p', attrs={'class':['cintillo']})
+ ]
+
+ remove_tags_before = dict(name='div' , attrs={'class':['publi_h']})
+ remove_tags_after = dict(name='div' , attrs={'class':['tab_util util_estadisticas']})
+
+ remove_tags = [
+ dict(name='div', attrs={'class':['util-1','util-2','util-3','inner estirar','inner1','inner2','inner3','cont','tab_util util_estadisticas','tab_util util_enviar','mod_list_inf','mod_similares','mod_divisas','mod_sectores','mod_termometro','mod post','mod_img','mod_txt','nivel estirar','barra estirar','info_brujula btnBrujula','utilidad_brujula estirar']})
+ ,dict(name='li', attrs={'class':['lnk-fcbook','lnk-retweet','lnk-meneame','desplegable','comentarios','list-options','estirar']})
+ ,dict(name='ul', attrs={'class':['lista-izquierda','list-options','estirar']})
+ ,dict(name='p', attrs={'class':['autor']})
+ ]
+
+ extra_css = """
+ p{text-align: justify; font-size: 100%}
+ body{ text-align: left; font-size:100% }
+ h1{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
+ h3{font-family: sans-serif; font-size:100%; font-style: italic; text-align: justify; }
+ """
+
+ feeds = [
+ (u'Ultima Hora' , u'http://www.cincodias.com/rss/feed.html?feedId=17029')
+ ,(u'Empresas' , u'http://www.cincodias.com/rss/feed.html?feedId=19')
+ ,(u'Mercados' , u'http://www.cincodias.com/rss/feed.html?feedId=20')
+ ,(u'Economia' , u'http://www.cincodias.com/rss/feed.html?feedId=21')
+ ,(u'Tecnorama' , u'http://www.cincodias.com/rss/feed.html?feedId=17230')
+ ,(u'Tecnologia' , u'http://www.cincodias.com/rss/feed.html?feedId=17106')
+ ,(u'Finanzas Personales' , u'http://www.cincodias.com/rss/feed.html?feedId=22')
+ ,(u'Fiscalidad' , u'http://www.cincodias.com/rss/feed.html?feedId=17107')
+ ,(u'Vivienda' , u'http://www.cincodias.com/rss/feed.html?feedId=17108')
+ ,(u'Tendencias' , u'http://www.cincodias.com/rss/feed.html?feedId=17109')
+ ,(u'Empleo' , u'http://www.cincodias.com/rss/feed.html?feedId=17110')
+ ,(u'IBEX 35' , u'http://www.cincodias.com/rss/feed.html?feedId=17125')
+ ,(u'Sectores' , u'http://www.cincodias.com/rss/feed.html?feedId=17126')
+ ,(u'Opinion' , u'http://www.cincodias.com/rss/feed.html?feedId=17105')
+ ]
From 360fd374c77d5d3f13c1b98b341a37a809154b72 Mon Sep 17 00:00:00 2001
From: Kovid Goyal
Date: Mon, 31 Jan 2011 18:01:13 -0700
Subject: [PATCH 08/14] ...
---
src/calibre/manual/faq.rst | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/calibre/manual/faq.rst b/src/calibre/manual/faq.rst
index 849ded82c9..59f6a9b88d 100644
--- a/src/calibre/manual/faq.rst
+++ b/src/calibre/manual/faq.rst
@@ -391,6 +391,8 @@ Take your pick:
* A tribute to the SONY Librie which was the first e-ink based e-book reader
* My wife chose it ;-)
+|app| is pronounced as cal-i-ber *not* ca-libre. If you're wondering, |app| is the British/commonwealth spelling for caliber. Being Indian, that's the natural spelling for me.
+
Why does |app| show only some of my fonts on OS X?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|app| embeds fonts in ebook files it creates. E-book files support embedding only TrueType (.ttf) fonts. Most fonts on OS X systems are in .dfont format, thus they cannot be embedded. |app| shows only TrueType fonts found on your system. You can obtain many TrueType fonts on the web. Simply download the .ttf files and add them to the Library/Fonts directory in your home directory.
From 5d4c7388629914e40c122f17a0106a363de3f810 Mon Sep 17 00:00:00 2001
From: Kovid Goyal
Date: Mon, 31 Jan 2011 18:58:04 -0700
Subject: [PATCH 09/14] Fix #8672 (Converted format disappears while adding a
new format)
---
src/calibre/gui2/dialogs/metadata_single.py | 8 ++++++--
src/calibre/gui2/metadata/basic_widgets.py | 9 ++++++---
2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/src/calibre/gui2/dialogs/metadata_single.py b/src/calibre/gui2/dialogs/metadata_single.py
index 7a8e4ea8d0..fa20658c12 100644
--- a/src/calibre/gui2/dialogs/metadata_single.py
+++ b/src/calibre/gui2/dialogs/metadata_single.py
@@ -429,10 +429,12 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
old_extensions.add(ext)
for ext in new_extensions:
self.db.add_format(self.row, ext, open(paths[ext], 'rb'), notify=False)
- db_extensions = set([f.lower() for f in self.db.formats(self.row).split(',')])
+ dbfmts = self.db.formats(self.row)
+ db_extensions = set([f.lower() for f in (dbfmts.split(',') if dbfmts
+ else [])])
extensions = new_extensions.union(old_extensions)
for ext in db_extensions:
- if ext not in extensions:
+ if ext not in extensions and ext in self.original_formats:
self.db.remove_format(self.row, ext, notify=False)
def show_format(self, item, *args):
@@ -576,6 +578,7 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
self.orig_date = qt_to_dt(self.date.date())
exts = self.db.formats(row)
+ self.original_formats = []
if exts:
exts = exts.split(',')
for ext in exts:
@@ -586,6 +589,7 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
if size is None:
continue
Format(self.formats, ext, size, timestamp=timestamp)
+ self.original_formats.append(ext.lower())
self.initialize_combos()
diff --git a/src/calibre/gui2/metadata/basic_widgets.py b/src/calibre/gui2/metadata/basic_widgets.py
index 590a8be3bb..d3fa5958ab 100644
--- a/src/calibre/gui2/metadata/basic_widgets.py
+++ b/src/calibre/gui2/metadata/basic_widgets.py
@@ -472,6 +472,7 @@ class FormatsManager(QWidget): # {{{
def initialize(self, db, id_):
self.changed = False
exts = db.formats(id_, index_is_id=True)
+ self.original_val = set([])
if exts:
exts = exts.split(',')
for ext in exts:
@@ -482,6 +483,7 @@ class FormatsManager(QWidget): # {{{
if size is None:
continue
Format(self.formats, ext, size, timestamp=timestamp)
+ self.original_val.add(ext.lower())
def commit(self, db, id_):
if not self.changed:
@@ -500,11 +502,12 @@ class FormatsManager(QWidget): # {{{
for ext in new_extensions:
db.add_format(id_, ext, open(paths[ext], 'rb'), notify=False,
index_is_id=True)
- db_extensions = set([f.lower() for f in db.formats(id_,
- index_is_id=True).split(',')])
+ dbfmts = db.formats(id_, index_is_id=True)
+ db_extensions = set([f.lower() for f in (dbfmts.split(',') if dbfmts
+ else [])])
extensions = new_extensions.union(old_extensions)
for ext in db_extensions:
- if ext not in extensions:
+ if ext not in extensions and ext in self.original_val:
db.remove_format(id_, ext, notify=False, index_is_id=True)
self.changed = False
From d2ba1812bb0b0d9c95acd6c0e22287ce47502bc9 Mon Sep 17 00:00:00 2001
From: Kovid Goyal
Date: Mon, 31 Jan 2011 20:09:26 -0700
Subject: [PATCH 10/14] Initial import of new metadata download framework
---
src/calibre/ebooks/metadata/sources/base.py | 61 +++++
src/calibre/ebooks/metadata/sources/google.py | 215 ++++++++++++++++++
2 files changed, 276 insertions(+)
create mode 100644 src/calibre/ebooks/metadata/sources/base.py
create mode 100644 src/calibre/ebooks/metadata/sources/google.py
diff --git a/src/calibre/ebooks/metadata/sources/base.py b/src/calibre/ebooks/metadata/sources/base.py
new file mode 100644
index 0000000000..89ad8a7956
--- /dev/null
+++ b/src/calibre/ebooks/metadata/sources/base.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
+
+__license__ = 'GPL v3'
+__copyright__ = '2011, Kovid Goyal '
+__docformat__ = 'restructuredtext en'
+
+import re
+
+from calibre.customize import Plugin
+
+class Source(Plugin):
+
+ type = _('Metadata source')
+ author = 'Kovid Goyal'
+
+ supported_platforms = ['windows', 'osx', 'linux']
+
+ result_of_identify_is_complete = True
+
+ def get_author_tokens(self, authors):
+ 'Take a list of authors and return a list of tokens useful for a '
+ 'AND search query'
+ # Leave ' in there for Irish names
+ pat = re.compile(r'[-,:;+!@#$%^&*(){}.`~"\s\[\]/]')
+ for au in authors:
+ for tok in au.split():
+ yield pat.sub('', tok)
+
+ def split_jobs(self, jobs, num):
+ 'Split a list of jobs into at most num groups, as evenly as possible'
+ groups = [[] for i in range(num)]
+ jobs = list(jobs)
+ while jobs:
+ for gr in groups:
+ try:
+ job = jobs.pop()
+ except IndexError:
+ break
+ gr.append(job)
+ return [g for g in groups if g]
+
+ def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}):
+ '''
+ Identify a book by its title/author/isbn/etc.
+
+ :param log: A log object, use it to output debugging information/errors
+ :param result_queue: A result Queue, results should be put into it.
+ Each result is a Metadata object
+ :param abort: If abort.is_set() returns True, abort further processing
+ and return as soon as possible
+ :param title: The title of the book, can be None
+ :param authors: A list of authors of the book, can be None
+ :param identifiers: A dictionary of other identifiers, most commonly
+ {'isbn':'1234...'}
+ :return: None if no errors occurred, otherwise a unicode representation
+ of the error suitable for showing to the user
+
+ '''
+ return None
+
diff --git a/src/calibre/ebooks/metadata/sources/google.py b/src/calibre/ebooks/metadata/sources/google.py
new file mode 100644
index 0000000000..1a3bf6d516
--- /dev/null
+++ b/src/calibre/ebooks/metadata/sources/google.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python
+# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
+
+__license__ = 'GPL v3'
+__copyright__ = '2011, Kovid Goyal '
+__docformat__ = 'restructuredtext en'
+
+import time
+from urllib import urlencode
+from functools import partial
+from threading import Thread
+
+from lxml import etree
+
+from calibre.ebooks.metadata.sources import Source
+from calibre.ebooks.metadata.book.base import Metadata
+from calibre.utils.date import parse_date, utcnow
+from calibre import browser, as_unicode
+
+NAMESPACES = {
+ 'openSearch':'http://a9.com/-/spec/opensearchrss/1.0/',
+ 'atom' : 'http://www.w3.org/2005/Atom',
+ 'dc': 'http://purl.org/dc/terms'
+ }
+XPath = partial(etree.XPath, namespaces=NAMESPACES)
+
+total_results = XPath('//openSearch:totalResults')
+start_index = XPath('//openSearch:startIndex')
+items_per_page = XPath('//openSearch:itemsPerPage')
+entry = XPath('//atom:entry')
+entry_id = XPath('descendant::atom:id')
+creator = XPath('descendant::dc:creator')
+identifier = XPath('descendant::dc:identifier')
+title = XPath('descendant::dc:title')
+date = XPath('descendant::dc:date')
+publisher = XPath('descendant::dc:publisher')
+subject = XPath('descendant::dc:subject')
+description = XPath('descendant::dc:description')
+language = XPath('descendant::dc:language')
+
+
+
+def to_metadata(browser, log, entry_):
+
+ def get_text(extra, x):
+ try:
+ ans = x(extra)
+ if ans:
+ ans = ans[0].text
+ if ans and ans.strip():
+ return ans.strip()
+ except:
+ log.exception('Programming error:')
+ return None
+
+
+ id_url = entry_id(entry_)[0].text
+ title_ = ': '.join([x.text for x in title(entry_)]).strip()
+ authors = [x.text.strip() for x in creator(entry_) if x.text]
+ if not authors:
+ authors = [_('Unknown')]
+ if not id_url or not title:
+ # Silently discard this entry
+ return None
+
+ mi = Metadata(title_, authors)
+ try:
+ raw = browser.open(id_url).read()
+ feed = etree.fromstring(raw)
+ extra = entry(feed)[0]
+ except:
+ log.exception('Failed to get additional details for', mi.title)
+ return mi
+
+ mi.comments = get_text(extra, description)
+ #mi.language = get_text(extra, language)
+ mi.publisher = get_text(extra, publisher)
+
+ # Author sort
+ for x in creator(extra):
+ for key, val in x.attrib.items():
+ if key.endswith('file-as') and val and val.strip():
+ mi.author_sort = val
+ break
+ # ISBN
+ isbns = []
+ for x in identifier(extra):
+ t = str(x.text).strip()
+ if t[:5].upper() in ('ISBN:', 'LCCN:', 'OCLC:'):
+ if t[:5].upper() == 'ISBN:':
+ isbns.append(t[5:])
+ if isbns:
+ mi.isbn = sorted(isbns, key=len)[-1]
+
+ # Tags
+ try:
+ btags = [x.text for x in subject(extra) if x.text]
+ tags = []
+ for t in btags:
+ tags.extend([y.strip() for y in t.split('/')])
+ tags = list(sorted(list(set(tags))))
+ except:
+ log.exception('Failed to parse tags:')
+ tags = []
+ if tags:
+ mi.tags = [x.replace(',', ';') for x in tags]
+
+ # pubdate
+ pubdate = get_text(extra, date)
+ if pubdate:
+ try:
+ default = utcnow().replace(day=15)
+ mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)
+ except:
+ log.exception('Failed to parse pubdate')
+
+
+ return mi
+
+class Worker(Thread):
+
+ def __init__(self, log, entries, abort, result_queue):
+ self.browser, self.log, self.entries = browser(), log, entries
+ self.abort, self.result_queue = abort, result_queue
+ Thread.__init__(self)
+ self.daemon = True
+
+ def run(self):
+ for i in self.entries:
+ try:
+ ans = to_metadata(self.browser, self.log, i)
+ if ans is not None:
+ self.result_queue.put(ans)
+ except:
+ self.log.exception(
+ 'Failed to get metadata for identify entry:',
+ etree.tostring(i))
+ if self.abort.is_set():
+ break
+
+
+class GoogleBooks(Source):
+
+ name = 'Google Books'
+
+ def create_query(self, log, title=None, authors=None, identifiers={},
+ start_index=1):
+ BASE_URL = 'http://books.google.com/books/feeds/volumes?'
+ isbn = identifiers.get('isbn', None)
+ q = ''
+ if isbn is not None:
+ q += 'isbn:'+isbn
+ elif title or authors:
+ def build_term(prefix, parts):
+ return ' '.join('in'+prefix + ':' + x for x in parts)
+ if title is not None:
+ q += build_term('title', title.split())
+ if authors:
+ q += ('+' if q else '')+build_term('author',
+ self.get_author_tokens(authors))
+
+ if isinstance(q, unicode):
+ q = q.encode('utf-8')
+ if not q:
+ return None
+ return BASE_URL+urlencode({
+ 'q':q,
+ 'max-results':20,
+ 'start-index':start_index,
+ 'min-viewability':'none',
+ })
+
+
+ def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}):
+ query = self.create_query(log, title=title, authors=authors,
+ identifiers=identifiers)
+ try:
+ raw = browser().open_novisit(query).read()
+ except Exception, e:
+ log.exception('Failed to make identify query: %r'%query)
+ return as_unicode(e)
+
+ try:
+ parser = etree.XMLParser(recover=True, no_network=True)
+ feed = etree.fromstring(raw, parser=parser)
+ entries = entry(feed)
+ except Exception, e:
+ log.exception('Failed to parse identify results')
+ return as_unicode(e)
+
+
+ groups = self.split_jobs(entries, 5) # At most 5 threads
+ if not groups:
+ return
+ workers = [Worker(log, entries, abort, result_queue) for entries in
+ groups]
+
+ if abort.is_set():
+ return
+
+ for worker in workers: worker.start()
+
+ has_alive_worker = True
+ while has_alive_worker and not abort.is_set():
+ has_alive_worker = False
+ for worker in workers:
+ if worker.is_alive():
+ has_alive_worker = True
+ time.sleep(0.1)
+
+ return None
+
+
+
+
From 971e3150f9aaf86f7b253d6d88534e5e0256dc57 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Tue, 1 Feb 2011 13:17:58 +0800
Subject: [PATCH 11/14] ...
---
src/calibre/ebooks/conversion/utils.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index 77086efd97..1263372ce3 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -451,21 +451,21 @@ class HeuristicProcessor(object):
top_margin = ''
bottom_margin = ''
if initblanks is not None:
- top_margin = 'margin=top:'+str(len(self.single_blank.findall(initblanks)))+'em;'
+ top_margin = 'margin-top:'+str(len(self.single_blank.findall(initblanks)))+'em;'
if endblanks is not None:
- bottom_margin = 'margin=top:'+str(len(self.single_blank.findall(initblanks)))+'em;'
+ bottom_margin = 'margin-bottom:'+str(len(self.single_blank.findall(initblanks)))+'em;'
if initblanks == None and endblanks == None:
return heading
else:
- heading = re.sub('(?i)\d+)[^>]*>', ''+' style="'+top_margin+bottom_margin+'">', heading)
+ heading = re.sub('(?i)\d+)[^>]*>', '\n\n'+' style="'+top_margin+bottom_margin+'">', heading)
return heading
html = blanks_around_headings.sub(merge_header_whitespace, html)
def markup_whitespaces(match):
blanks = match.group(0)
- blanks = self.blankreg.sub('\n
', blanks)
+ blanks = self.blankreg.sub('\n
', blanks)
return blanks
html = blanks_n_nopunct.sub(markup_whitespaces, html)
From d75e17e6b44e8ae688ade08bd30ae552ab0c48c3 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Tue, 1 Feb 2011 18:07:37 +0800
Subject: [PATCH 12/14] added scene break replacement logic
---
src/calibre/ebooks/conversion/cli.py | 2 +-
src/calibre/ebooks/conversion/plumber.py | 4 +++
src/calibre/ebooks/conversion/utils.py | 33 ++++++++++++++++++++----
3 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/src/calibre/ebooks/conversion/cli.py b/src/calibre/ebooks/conversion/cli.py
index 33ae61f16a..278d599378 100644
--- a/src/calibre/ebooks/conversion/cli.py
+++ b/src/calibre/ebooks/conversion/cli.py
@@ -143,7 +143,7 @@ def add_pipeline_options(parser, plumber):
' patterns. Disabled by default. Use %s to enable. '
' Individual actions can be disabled with the %s options.')
% ('--enable-heuristics', '--disable-*'),
- ['enable_heuristics'] + HEURISTIC_OPTIONS
+ ['enable_heuristics', 'replace_scene_breaks'] + HEURISTIC_OPTIONS
),
'SEARCH AND REPLACE' : (
diff --git a/src/calibre/ebooks/conversion/plumber.py b/src/calibre/ebooks/conversion/plumber.py
index 5807ba5f8f..59d7a0ed2a 100644
--- a/src/calibre/ebooks/conversion/plumber.py
+++ b/src/calibre/ebooks/conversion/plumber.py
@@ -530,6 +530,10 @@ OptionRecommendation(name='format_scene_breaks',
help=_('Left aligned scene break markers are center aligned. '
'Replace soft scene breaks that use multiple blank lines with'
'horizontal rules.')),
+
+OptionRecommendation(name='replace_scene_breaks',
+ recommended_value=None, level=OptionRecommendation.LOW,
+ help=_('Replace scene breaks with the specified text.')),
OptionRecommendation(name='dehyphenate',
recommended_value=True, level=OptionRecommendation.LOW,
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index 1263372ce3..cf305f1022 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -33,6 +33,7 @@ class HeuristicProcessor(object):
self.line_open = "<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*"
self.line_close = "((?P=inner3)>)?\s*((?P=inner2)>)?\s*((?P=inner1)>)?\s*(?P=outer)>"
self.single_blank = re.compile(r'(\s*]*>\s*
)', re.IGNORECASE)
+ self.scene_break_open = ''
def is_pdftohtml(self, src):
return '' in src[:1000]
@@ -481,6 +482,22 @@ class HeuristicProcessor(object):
html = self.blankreg.sub('\n
', html)
return html
+ def markup_user_break(self, replacement_break):
+ hr_open = ''
+ if re.findall('(<|>)', replacement_break):
+ if re.match('^
'
+ elif re.match('^
'
+ else:
+ replacement_break = html2text(replacement_break)
+ replacement_break = re.sub('\s', ' ', replacement_break)
+ scene_break = self.scene_break_open+replacement_break+''
+ else:
+ replacement_break = re.sub('\s', ' ', replacement_break)
+ scene_break = self.scene_break_open+replacement_break+''
+
+ return scene_break
def __call__(self, html):
@@ -498,7 +515,7 @@ class HeuristicProcessor(object):
# Arrange line feeds and tags so the line_length and no_markup functions work correctly
html = self.arrange_htm_line_endings(html)
- self.dump(html, 'after_arrange_line_endings')
+ #self.dump(html, 'after_arrange_line_endings')
if self.cleanup_required():
###### Check Markup ######
#
@@ -534,7 +551,7 @@ class HeuristicProcessor(object):
if getattr(self.extra_opts, 'markup_chapter_headings', False):
html = self.markup_chapters(html, self.totalwords, self.blanks_between_paragraphs)
- self.dump(html, 'after_chapter_markup')
+ #self.dump(html, 'after_chapter_markup')
if getattr(self.extra_opts, 'italicize_common_cases', False):
html = self.markup_italicis(html)
@@ -608,9 +625,15 @@ class HeuristicProcessor(object):
# Center separator lines, use a bit larger margin in this case
scene_break_regex = self.line_open+'(?![\w\'\"])(?P((?P((?!\s)\W))\s*(?P=break_char)?)+)\s*'+self.line_close
scene_break = re.compile(r'%s' % scene_break_regex, re.IGNORECASE|re.UNICODE)
- print "found "+str(len(scene_break.findall(html)))+" scene breaks"
- html = scene_break.sub('' + '\g' + '
', html)
- #html = re.sub(']*>\s*
', '
', html)
+ replacement_break = getattr(self.extra_opts, 'replace_scene_breaks', None)
+ if replacement_break is not None:
+ replacement_break = self.markup_user_break(replacement_break)
+ if len(scene_break.findall(html)) >= 1:
+ html = scene_break.sub(replacement_break, html)
+ else:
+ html = re.sub(']*>\s*
', replacement_break, html)
+ else:
+ html = scene_break.sub(self.scene_break_open+'\g'+'', html)
if self.deleted_nbsps:
# put back non-breaking spaces in empty paragraphs so they render correctly
From 48f202c7fd875bb4ccabeed6d7078e56607da142 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Tue, 1 Feb 2011 21:21:36 +0800
Subject: [PATCH 13/14] allow user applied styles to
tags, updated
comments/docs
---
src/calibre/ebooks/conversion/utils.py | 19 +++++++++++++++++--
src/calibre/manual/conversion.rst | 13 +++++++++----
2 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index cf305f1022..21c6063f63 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -483,10 +483,23 @@ class HeuristicProcessor(object):
return html
def markup_user_break(self, replacement_break):
+ '''
+ Takes string a user supplies and wraps it in markup that will be centered with
+ appropriate margins.
and
tags are allowed. If the user specifies
+ a style with width attributes in the
tag then the appropriate margins are
+ applied to wrapping divs. This is because many ebook devices don't support margin:auto
+ All other html is converted to text.
+ '''
hr_open = ''
if re.findall('(<|>)', replacement_break):
if re.match('^
'
+ if replacement_break.find('width') != -1:
+ width = int(re.sub('.*?width(:|=)(?P\d+).*', '\g', replacement_break))
+ divpercent = (100 - width) / 2
+ hr_open = re.sub('45', str(divpercent), hr_open)
+ scene_break = hr_open+replacement_break+''
+ else:
+ scene_break = hr_open+'
'
elif re.match('^
'
else:
@@ -622,9 +635,11 @@ class HeuristicProcessor(object):
blanks_count = len(self.any_multi_blank.findall(html))
if blanks_count >= 1:
html = self.merge_blanks(html, blanks_count)
- # Center separator lines, use a bit larger margin in this case
scene_break_regex = self.line_open+'(?![\w\'\"])(?P((?P((?!\s)\W))\s*(?P=break_char)?)+)\s*'+self.line_close
scene_break = re.compile(r'%s' % scene_break_regex, re.IGNORECASE|re.UNICODE)
+ # If the user has enabled scene break replacement, then either softbreaks
+ # or 'hard' scene breaks are replaced, depending on which is in use
+ # Otherwise separator lines are centered, use a bit larger margin in this case
replacement_break = getattr(self.extra_opts, 'replace_scene_breaks', None)
if replacement_break is not None:
replacement_break = self.markup_user_break(replacement_break)
diff --git a/src/calibre/manual/conversion.rst b/src/calibre/manual/conversion.rst
index 7f3ff21fe0..ecd8609ecc 100644
--- a/src/calibre/manual/conversion.rst
+++ b/src/calibre/manual/conversion.rst
@@ -311,10 +311,15 @@ remove all non-breaking-space entities, or may include false positive matches re
:guilabel:`Ensure scene breaks are consistently formatted`
With this option |app| will attempt to detect common scene-break markers and ensure that they are center aligned.
- It also attempts to detect scene breaks defined by white space and replace them with a horizontal rule 15% of the
- page width. Some readers may find this desirable as these 'soft' scene breaks often become page breaks on readers, and
- thus become difficult to distinguish.
+ 'Soft' scene break markers, i.e. scene breaks only defined by extra white space, are styled to ensure that they
+ will not be displayed in conjunction with page breaks.
+:guilabel:`Replace scene breaks`
+ If this option is configured then |app| will replace scene break markers it finds with the replacement text specified by the
+ user. In general you should avoid using html tags, |app| will discard any tags and use pre-defined markup.
+ tags, i.e. horizontal rules, are an exception. These can optionally be specified with styles, if you choose to add your own
+ style be sure to include the 'width' setting, otherwise the style information will be discarded.
+
:guilabel:`Remove unnecessary hyphens`
|app| will analyze all hyphenated content in the document when this option is enabled. The document itself is used
as a dictionary for analysis. This allows |app| to accurately remove hyphens for any words in the document in any language,
@@ -628,7 +633,7 @@ between 0 and 1. The default is 0.45, just under the median line length. Lower t
text in the unwrapping. Increase to include less. You can adjust this value in the conversion settings under :guilabel:`PDF Input`.
Also, they often have headers and footers as part of the document that will become included with the text.
-Use the options to remove headers and footers to mitigate this issue. If the headers and footers are not
+Use the Search and Replace panel to remove headers and footers to mitigate this issue. If the headers and footers are not
removed from the text it can throw off the paragraph unwrapping. To learn how to use the header and footer removal options, read
:ref:`regexptutorial`.
From 72fe944b95bd3a6066b43b77a5b0ba9abb1685e8 Mon Sep 17 00:00:00 2001
From: ldolse
Date: Tue, 1 Feb 2011 22:05:54 +0800
Subject: [PATCH 14/14] ...
---
src/calibre/gui2/convert/single.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/calibre/gui2/convert/single.py b/src/calibre/gui2/convert/single.py
index 6540383229..59fcbb65ad 100644
--- a/src/calibre/gui2/convert/single.py
+++ b/src/calibre/gui2/convert/single.py
@@ -258,7 +258,6 @@ class Config(ResizableDialog, Ui_Dialog):
if not w.pre_commit_check():
return
x = w.commit(save_defaults=False)
- print x
recs.update(x)
self.opf_file, self.cover_file = self.mw.opf_file, self.mw.cover_file
self._recommendations = recs