Some text with an ABBR and a REF. Ignore REFERENCE and ref.
+Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and + [Seemant Kulleen](http://www.kulleen.org/) -Copyright 2007-2008 -* [Waylan Limberg](http://achinghead.com/) -* [Seemant Kulleen](http://www.kulleen.org/) - +All changes Copyright 2008-2014 The Python Markdown Project + +License: [BSD](http://www.opensource.org/licenses/bsd-license.php) ''' @@ -28,20 +21,21 @@ from __future__ import unicode_literals from . import Extension from ..preprocessors import Preprocessor from ..inlinepatterns import Pattern -from ..util import etree +from ..util import etree, AtomicString import re # Global Vars ABBR_REF_RE = re.compile(r'[*]\[(?P[^\]]*)\][ ]?:\s*(?PNote
-This is the first line inside the box
-Did you know?
-Another line here.
-Note
` + # e.g.: `!!! note` will render + # `Note
` title = klass.capitalize() elif title == '': # an explicit blank title should not be rendered @@ -114,5 +92,5 @@ class AdmonitionProcessor(BlockProcessor): return klass, title -def makeExtension(configs={}): - return AdmonitionExtension(configs=configs) +def makeExtension(*args, **kwargs): + return AdmonitionExtension(*args, **kwargs) diff --git a/src/calibre/ebooks/markdown/extensions/attr_list.py b/src/calibre/ebooks/markdown/extensions/attr_list.py index c98aa850a6..683bdf831c 100644 --- a/src/calibre/ebooks/markdown/extensions/attr_list.py +++ b/src/calibre/ebooks/markdown/extensions/attr_list.py @@ -2,19 +2,18 @@ Attribute List Extension for Python-Markdown ============================================ -Adds attribute list syntax. Inspired by +Adds attribute list syntax. Inspired by [maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s feature of the same name. -Copyright 2011 [Waylan Limberg](http://achinghead.com/). +See%s
\n'% \
- (self.css_class, class_str, txt)
+ class_str = ' class="%s"' % ' '.join(classes)
+ return '%s
\n' % \
+ (self.css_class, class_str, txt)
- def _getLang(self):
+ def _parseHeader(self):
"""
Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python)
- then it is assumed to be a mock shebang for language identifitation of a
- code fragment and removed from the code block prior to processing for
+ then it is assumed to be a mock shebang for language identifitation of
+ a code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
+ Also parses optional list of highlight lines, like:
+
+ :::python hl_lines="1 3"
"""
import re
- #split text into lines
+ # split text into lines
lines = self.src.split("\n")
- #pull first line to examine
+ # pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
- (?:(?:^::+)|(?PA paragraph before a fenced code block:
-Fenced code block
-
+See A paragraph before a fenced code block:
-Fenced code block
-
-Include tilde's in a code block and wrap with blank lines:
-
- >>> text = '''
- ... ~~~~~~~~
- ...
- ... ~~~~
- ... ~~~~~~~~'''
- >>> print markdown.markdown(text, extensions=['fenced_code'])
-
- ~~~~
-
-
-Language tags:
-
- >>> text = '''
- ... ~~~~{.python}
- ... # Some python code
- ... ~~~~'''
- >>> print markdown.markdown(text, extensions=['fenced_code'])
- # Some python code
-
-
-Optionally backticks instead of tildes as per how github's code block markdown is identified:
-
- >>> text = '''
- ... `````
- ... # Arbitrary code
- ... ~~~~~ # these tildes will not close the block
- ... `````'''
- >>> print markdown.markdown(text, extensions=['fenced_code'])
- # Arbitrary code
- ~~~~~ # these tildes will not close the block
-
-
-Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
-
-Project website: .*?)(?<=\n)(?P=fence)[ ]*$',
- re.MULTILINE|re.DOTALL
- )
-CODE_WRAP = '%s
'
-LANG_TAG = ' class="%s"'
class FencedCodeExtension(Extension):
@@ -95,11 +30,21 @@ class FencedCodeExtension(Extension):
md.registerExtension(self)
md.preprocessors.add('fenced_code_block',
- FencedBlockPreprocessor(md),
- ">normalize_whitespace")
+ FencedBlockPreprocessor(md),
+ ">normalize_whitespace")
class FencedBlockPreprocessor(Preprocessor):
+ FENCED_BLOCK_RE = re.compile(r'''
+(?P^(?:~{3,}|`{3,}))[ ]* # Opening ``` or ~~~
+(\{?\.?(?P[a-zA-Z0-9_+-]*))?[ ]* # Optional {, and lang
+# Optional highlight lines, single- or double-quote-delimited
+(hl_lines=(?P"|')(?P.*?)(?P=quot))?[ ]*
+}?[ ]*\n # Optional closing }
+(?P.*?)(?<=\n)
+(?P=fence)[ ]*$''', re.MULTILINE | re.DOTALL | re.VERBOSE)
+ CODE_WRAP = '%s
'
+ LANG_TAG = ' class="%s"'
def __init__(self, md):
super(FencedBlockPreprocessor, self).__init__(md)
@@ -121,29 +66,35 @@ class FencedBlockPreprocessor(Preprocessor):
text = "\n".join(lines)
while 1:
- m = FENCED_BLOCK_RE.search(text)
+ m = self.FENCED_BLOCK_RE.search(text)
if m:
lang = ''
if m.group('lang'):
- lang = LANG_TAG % m.group('lang')
+ lang = self.LANG_TAG % m.group('lang')
# If config is not empty, then the codehighlite extension
- # is enabled, so we call it to highlite the code
+ # is enabled, so we call it to highlight the code
if self.codehilite_conf:
- highliter = CodeHilite(m.group('code'),
- linenums=self.codehilite_conf['linenums'][0],
- guess_lang=self.codehilite_conf['guess_lang'][0],
- css_class=self.codehilite_conf['css_class'][0],
- style=self.codehilite_conf['pygments_style'][0],
- lang=(m.group('lang') or None),
- noclasses=self.codehilite_conf['noclasses'][0])
+ highliter = CodeHilite(
+ m.group('code'),
+ linenums=self.codehilite_conf['linenums'][0],
+ guess_lang=self.codehilite_conf['guess_lang'][0],
+ css_class=self.codehilite_conf['css_class'][0],
+ style=self.codehilite_conf['pygments_style'][0],
+ lang=(m.group('lang') or None),
+ noclasses=self.codehilite_conf['noclasses'][0],
+ hl_lines=parse_hl_lines(m.group('hl_lines'))
+ )
code = highliter.hilite()
else:
- code = CODE_WRAP % (lang, self._escape(m.group('code')))
+ code = self.CODE_WRAP % (lang,
+ self._escape(m.group('code')))
placeholder = self.markdown.htmlStash.store(code, safe=True)
- text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
+ text = '%s\n%s\n%s' % (text[:m.start()],
+ placeholder,
+ text[m.end():])
else:
break
return text.split("\n")
@@ -157,5 +108,5 @@ class FencedBlockPreprocessor(Preprocessor):
return txt
-def makeExtension(configs=None):
- return FencedCodeExtension(configs=configs)
+def makeExtension(*args, **kwargs):
+ return FencedCodeExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/footnotes.py b/src/calibre/ebooks/markdown/extensions/footnotes.py
index 65ed597a7b..b52815f3c8 100644
--- a/src/calibre/ebooks/markdown/extensions/footnotes.py
+++ b/src/calibre/ebooks/markdown/extensions/footnotes.py
@@ -1,25 +1,15 @@
"""
-========================= FOOTNOTES =================================
+Footnotes Extension for Python-Markdown
+=======================================
-This section adds footnote handling to markdown. It can be used as
-an example for extending python-markdown with relatively complex
-functionality. While in this case the extension is included inside
-the module itself, it could just as easily be added from outside the
-module. Not that all markdown classes above are ignorant about
-footnotes. All footnote functionality is provided separately and
-then added to the markdown instance at the run time.
+Adds footnote handling to Python-Markdown.
-Footnote functionality is attached by calling extendMarkdown()
-method of FootnoteExtension. The method also registers the
-extension to allow it's state to be reset by a call to reset()
-method.
+See
+for documentation.
-Example:
- Footnotes[^1] have a label[^label] and a definition[^!DEF].
+Copyright The Python Markdown Project
- [^1]: This is a footnote
- [^label]: A footnote on "label"
- [^!DEF]: The footnote for definition
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
@@ -35,29 +25,31 @@ from ..odict import OrderedDict
import re
FN_BACKLINK_TEXT = "zz1337820767766393qq"
-NBSP_PLACEHOLDER = "qq3936677670287331zz"
+NBSP_PLACEHOLDER = "qq3936677670287331zz"
DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)')
+
class FootnoteExtension(Extension):
""" Footnote Extension. """
- def __init__ (self, configs):
+ def __init__(self, *args, **kwargs):
""" Setup configs. """
- self.config = {'PLACE_MARKER':
- ["///Footnotes Go Here///",
- "The text string that marks where the footnotes go"],
- 'UNIQUE_IDS':
- [False,
- "Avoid name collisions across "
- "multiple calls to reset()."],
- "BACKLINK_TEXT":
- ["↩",
- "The text string that links from the footnote to the reader's place."]
- }
- for key, value in configs:
- self.config[key][0] = value
+ self.config = {
+ 'PLACE_MARKER':
+ ["///Footnotes Go Here///",
+ "The text string that marks where the footnotes go"],
+ 'UNIQUE_IDS':
+ [False,
+ "Avoid name collisions across "
+ "multiple calls to reset()."],
+ "BACKLINK_TEXT":
+ ["↩",
+ "The text string that links from the footnote "
+ "to the reader's place."]
+ }
+ super(FootnoteExtension, self).__init__(*args, **kwargs)
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
@@ -69,27 +61,28 @@ class FootnoteExtension(Extension):
md.registerExtension(self)
self.parser = md.parser
self.md = md
- self.sep = ':'
- if self.md.output_format in ['html5', 'xhtml5']:
- self.sep = '-'
# Insert a preprocessor before ReferencePreprocessor
- md.preprocessors.add("footnote", FootnotePreprocessor(self),
- "amp_substitute")
+ md.postprocessors.add(
+ "footnote", FootnotePostprocessor(self), ">amp_substitute"
+ )
def reset(self):
- """ Clear the footnotes on reset, and prepare for a distinct document. """
+ """ Clear footnotes on reset, and prepare for distinct document. """
self.footnotes = OrderedDict()
self.unique_prefix += 1
@@ -103,9 +96,11 @@ class FootnoteExtension(Extension):
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False
- finder(child)
+ child_res = finder(child)
+ if child_res is not None:
+ return child_res
return None
-
+
res = finder(root)
return res
@@ -113,19 +108,25 @@ class FootnoteExtension(Extension):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
+ def get_separator(self):
+ if self.md.output_format in ['html5', 'xhtml5']:
+ return '-'
+ return ':'
+
def makeFootnoteId(self, id):
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
- return 'fn%s%d-%s' % (self.sep, self.unique_prefix, id)
+ return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
else:
- return 'fn%s%s' % (self.sep, id)
+ return 'fn%s%s' % (self.get_separator(), id)
def makeFootnoteRefId(self, id):
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
- return 'fnref%s%d-%s' % (self.sep, self.unique_prefix, id)
+ return 'fnref%s%d-%s' % (self.get_separator(),
+ self.unique_prefix, id)
else:
- return 'fnref%s%s' % (self.sep, id)
+ return 'fnref%s%s' % (self.get_separator(), id)
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
@@ -145,10 +146,13 @@ class FootnoteExtension(Extension):
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
if self.md.output_format not in ['html5', 'xhtml5']:
- backlink.set("rev", "footnote") # Invalid in HTML5
+ backlink.set("rev", "footnote") # Invalid in HTML5
backlink.set("class", "footnote-backref")
- backlink.set("title", "Jump back to footnote %d in the text" % \
- (self.footnotes.index(id)+1))
+ backlink.set(
+ "title",
+ "Jump back to footnote %d in the text" %
+ (self.footnotes.index(id)+1)
+ )
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
@@ -165,7 +169,7 @@ class FootnoteExtension(Extension):
class FootnotePreprocessor(Preprocessor):
""" Find all footnote references and store for later use. """
- def __init__ (self, footnotes):
+ def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, lines):
@@ -186,7 +190,7 @@ class FootnotePreprocessor(Preprocessor):
if m:
fn, _i = self.detectTabbed(lines[i+1:])
fn.insert(0, m.group(2))
- i += _i-1 # skip past footnote
+ i += _i-1 # skip past footnote
self.footnotes.setFootnote(m.group(1), "\n".join(fn))
else:
newlines.append(lines[i])
@@ -207,16 +211,16 @@ class FootnotePreprocessor(Preprocessor):
"""
items = []
- blank_line = False # have we encountered a blank line yet?
- i = 0 # to keep track of where we are
+ blank_line = False # have we encountered a blank line yet?
+ i = 0 # to keep track of where we are
def detab(line):
match = TABBED_RE.match(line)
if match:
- return match.group(4)
+ return match.group(4)
for line in lines:
- if line.strip(): # Non-blank line
+ if line.strip(): # Non-blank line
detabbed_line = detab(line)
if detabbed_line:
items.append(detabbed_line)
@@ -230,23 +234,24 @@ class FootnotePreprocessor(Preprocessor):
else:
return items, i+1
- else: # Blank line: _maybe_ we are done.
+ else: # Blank line: _maybe_ we are done.
blank_line = True
- i += 1 # advance
+ i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
- next_line = lines[j]; break
+ next_line = lines[j]
+ break
else:
- break # There is no more text; we are done.
+ break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
- if detab(next_line): # Yes, more work to do.
+ if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
- break # No, we are done.
+ break # No, we are done.
else:
i += 1
@@ -268,7 +273,7 @@ class FootnotePattern(Pattern):
sup.set('id', self.footnotes.makeFootnoteRefId(id))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
- a.set('rel', 'footnote') # invalid in HTML5
+ a.set('rel', 'footnote') # invalid in HTML5
a.set('class', 'footnote-ref')
a.text = text_type(self.footnotes.footnotes.index(id) + 1)
return sup
@@ -279,12 +284,12 @@ class FootnotePattern(Pattern):
class FootnoteTreeprocessor(Treeprocessor):
""" Build and append footnote div to end of document. """
- def __init__ (self, footnotes):
+ def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
- if footnotesDiv:
+ if footnotesDiv is not None:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
child, parent, isText = result
@@ -298,16 +303,19 @@ class FootnoteTreeprocessor(Treeprocessor):
else:
root.append(footnotesDiv)
+
class FootnotePostprocessor(Postprocessor):
""" Replace placeholders with html entities. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, text):
- text = text.replace(FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT"))
+ text = text.replace(
+ FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
+ )
return text.replace(NBSP_PLACEHOLDER, " ")
-def makeExtension(configs=[]):
- """ Return an instance of the FootnoteExtension """
- return FootnoteExtension(configs=configs)
+def makeExtension(*args, **kwargs):
+ """ Return an instance of the FootnoteExtension """
+ return FootnoteExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/headerid.py b/src/calibre/ebooks/markdown/extensions/headerid.py
index 7681b8d499..2cb20b97ab 100644
--- a/src/calibre/ebooks/markdown/extensions/headerid.py
+++ b/src/calibre/ebooks/markdown/extensions/headerid.py
@@ -4,73 +4,14 @@ HeaderID Extension for Python-Markdown
Auto-generate id attributes for HTML headers.
-Basic usage:
+See
+for documentation.
- >>> import markdown
- >>> text = "# Some Header #"
- >>> md = markdown.markdown(text, ['headerid'])
- >>> print md
- Some Header
+Original code Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
-All header IDs are unique:
+All changes Copyright 2011-2014 The Python Markdown Project
- >>> text = '''
- ... #Header
- ... #Header
- ... #Header'''
- >>> md = markdown.markdown(text, ['headerid'])
- >>> print md
- Header
- Header
- Header
-
-To fit within a html template's hierarchy, set the header base level:
-
- >>> text = '''
- ... #Some Header
- ... ## Next Level'''
- >>> md = markdown.markdown(text, ['headerid(level=3)'])
- >>> print md
- Some Header
- Next Level
-
-Works with inline markup.
-
- >>> text = '#Some *Header* with [markup](http://example.com).'
- >>> md = markdown.markdown(text, ['headerid'])
- >>> print md
- Some Header with markup.
-
-Turn off auto generated IDs:
-
- >>> text = '''
- ... # Some Header
- ... # Another Header'''
- >>> md = markdown.markdown(text, ['headerid(forceid=False)'])
- >>> print md
- Some Header
- Another Header
-
-Use with MetaData extension:
-
- >>> text = '''header_level: 2
- ... header_forceid: Off
- ...
- ... # A Header'''
- >>> md = markdown.markdown(text, ['headerid', 'meta'])
- >>> print md
- A Header
-
-Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
-
-Project website:
-Contact: markdown@freewisdom.org
-
-License: BSD (see ../docs/LICENSE for details)
-
-Dependencies:
-* [Python 2.3+](http://python.org)
-* [Markdown 2.0+](http://packages.python.org/Markdown/)
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
@@ -78,47 +19,9 @@ from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
-import re
-import logging
-import unicodedata
-
-logger = logging.getLogger('MARKDOWN')
-
-IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
-
-
-def slugify(value, separator):
- """ Slugify a string, to make it URL friendly. """
- value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
- value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
- return re.sub('[%s\s]+' % separator, separator, value)
-
-
-def unique(id, ids):
- """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """
- while id in ids or not id:
- m = IDCOUNT_RE.match(id)
- if m:
- id = '%s_%d'% (m.group(1), int(m.group(2))+1)
- else:
- id = '%s_%d'% (id, 1)
- ids.add(id)
- return id
-
-
-def itertext(elem):
- """ Loop through all children and return text only.
-
- Reimplements method of same name added to ElementTree in Python 2.7
-
- """
- if elem.text:
- yield elem.text
- for e in elem:
- for s in itertext(e):
- yield s
- if e.tail:
- yield e.tail
+from ..util import parseBoolValue
+from .toc import slugify, unique, stashedHTML2text
+import warnings
class HeaderIdTreeprocessor(Treeprocessor):
@@ -130,13 +33,14 @@ class HeaderIdTreeprocessor(Treeprocessor):
start_level, force_id = self._get_meta()
slugify = self.config['slugify']
sep = self.config['separator']
- for elem in doc.getiterator():
+ for elem in doc:
if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
if force_id:
if "id" in elem.attrib:
id = elem.get('id')
else:
- id = slugify(''.join(itertext(elem)), sep)
+ id = stashedHTML2text(''.join(elem.itertext()), self.md)
+ id = slugify(id, sep)
elem.set('id', unique(id, self.IDs))
if start_level:
level = int(elem.tag[-1]) + start_level
@@ -144,40 +48,34 @@ class HeaderIdTreeprocessor(Treeprocessor):
level = 6
elem.tag = 'h%d' % level
-
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level']) - 1
- force = self._str2bool(self.config['forceid'])
+ force = parseBoolValue(self.config['forceid'])
if hasattr(self.md, 'Meta'):
if 'header_level' in self.md.Meta:
level = int(self.md.Meta['header_level'][0]) - 1
- if 'header_forceid' in self.md.Meta:
- force = self._str2bool(self.md.Meta['header_forceid'][0])
+ if 'header_forceid' in self.md.Meta:
+ force = parseBoolValue(self.md.Meta['header_forceid'][0])
return level, force
- def _str2bool(self, s, default=False):
- """ Convert a string to a booleen value. """
- s = str(s)
- if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
- return False
- elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
- return True
- return default
-
class HeaderIdExtension(Extension):
- def __init__(self, configs):
+ def __init__(self, *args, **kwargs):
# set defaults
self.config = {
- 'level' : ['1', 'Base level for headers.'],
- 'forceid' : ['True', 'Force all headers to have an id.'],
- 'separator' : ['-', 'Word separator.'],
- 'slugify' : [slugify, 'Callable to generate anchors'],
- }
+ 'level': ['1', 'Base level for headers.'],
+ 'forceid': ['True', 'Force all headers to have an id.'],
+ 'separator': ['-', 'Word separator.'],
+ 'slugify': [slugify, 'Callable to generate anchors']
+ }
- for key, value in configs:
- self.setConfig(key, value)
+ super(HeaderIdExtension, self).__init__(*args, **kwargs)
+
+ warnings.warn(
+ 'The HeaderId Extension is pending deprecation. Use the TOC Extension instead.',
+ PendingDeprecationWarning
+ )
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
@@ -195,5 +93,5 @@ class HeaderIdExtension(Extension):
self.processor.IDs = set()
-def makeExtension(configs=None):
- return HeaderIdExtension(configs=configs)
+def makeExtension(*args, **kwargs):
+ return HeaderIdExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/meta.py b/src/calibre/ebooks/markdown/extensions/meta.py
index aaff4365a8..711235ef4a 100644
--- a/src/calibre/ebooks/markdown/extensions/meta.py
+++ b/src/calibre/ebooks/markdown/extensions/meta.py
@@ -4,38 +4,14 @@ Meta Data Extension for Python-Markdown
This extension adds Meta Data handling to markdown.
-Basic Usage:
+See
+for documentation.
- >>> import markdown
- >>> text = '''Title: A Test Doc.
- ... Author: Waylan Limberg
- ... John Doe
- ... Blank_Data:
- ...
- ... The body. This is paragraph one.
- ... '''
- >>> md = markdown.Markdown(['meta'])
- >>> print md.convert(text)
- The body. This is paragraph one.
- >>> print md.Meta
- {u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
+Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
-Make sure text without Meta Data still works (markdown < 1.6b returns a ).
+All changes Copyright 2008-2014 The Python Markdown Project
- >>> text = ' Some Code - not extra lines of meta data.'
- >>> md = markdown.Markdown(['meta'])
- >>> print md.convert(text)
-
Some Code - not extra lines of meta data.
-
- >>> md.Meta
- {}
-
-Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
-
-Project website:
-Contact: markdown@freewisdom.org
-
-License: BSD (see ../LICENSE.md for details)
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
@@ -44,18 +20,25 @@ from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
+import logging
+
+log = logging.getLogger('MARKDOWN')
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P[A-Za-z0-9_-]+):\s*(?P.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P.*)')
+BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
+END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
+
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
-
- md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
+ md.preprocessors.add("meta",
+ MetaPreprocessor(md),
+ ">normalize_whitespace")
class MetaPreprocessor(Preprocessor):
@@ -65,11 +48,13 @@ class MetaPreprocessor(Preprocessor):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
- while 1:
+ if lines and BEGIN_RE.match(lines[0]):
+ lines.pop(0)
+ while lines:
line = lines.pop(0)
- if line.strip() == '':
- break # blank line - done
m1 = META_RE.match(line)
+ if line.strip() == '' or END_RE.match(line):
+ break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
@@ -84,10 +69,10 @@ class MetaPreprocessor(Preprocessor):
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
- break # no meta data - done
+ break # no meta data - done
self.markdown.Meta = meta
return lines
-
-def makeExtension(configs={}):
- return MetaExtension(configs=configs)
+
+def makeExtension(*args, **kwargs):
+ return MetaExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/nl2br.py b/src/calibre/ebooks/markdown/extensions/nl2br.py
index da4b339958..8acd60c2e1 100644
--- a/src/calibre/ebooks/markdown/extensions/nl2br.py
+++ b/src/calibre/ebooks/markdown/extensions/nl2br.py
@@ -5,18 +5,14 @@ NL2BR Extension
A Python-Markdown extension to treat newlines as hard breaks; like
GitHub-flavored Markdown does.
-Usage:
+See
+for documentation.
- >>> import markdown
- >>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
- line 1
- line 2
+Oringinal code Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
-Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
+All changes Copyright 2011-2014 The Python Markdown Project
-Dependencies:
-* [Python 2.4+](http://python.org)
-* [Markdown 2.1+](http://packages.python.org/Markdown/)
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
@@ -27,6 +23,7 @@ from ..inlinepatterns import SubstituteTagPattern
BR_RE = r'\n'
+
class Nl2BrExtension(Extension):
def extendMarkdown(self, md, md_globals):
@@ -34,5 +31,5 @@ class Nl2BrExtension(Extension):
md.inlinePatterns.add('nl', br_tag, '_end')
-def makeExtension(configs=None):
- return Nl2BrExtension(configs)
+def makeExtension(*args, **kwargs):
+ return Nl2BrExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/sane_lists.py b/src/calibre/ebooks/markdown/extensions/sane_lists.py
index 23e9a7f4a6..828ae7ab34 100644
--- a/src/calibre/ebooks/markdown/extensions/sane_lists.py
+++ b/src/calibre/ebooks/markdown/extensions/sane_lists.py
@@ -2,19 +2,16 @@
Sane List Extension for Python-Markdown
=======================================
-Modify the behavior of Lists in Python-Markdown t act in a sane manor.
+Modify the behavior of Lists in Python-Markdown to act in a sane manor.
-In standard Markdown sytex, the following would constitute a single
-ordered list. However, with this extension, the output would include
-two lists, the first an ordered list and the second and unordered list.
+See
+for documentation.
- 1. ordered
- 2. list
+Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
- * unordered
- * list
+All changes Copyright 2011-2014 The Python Markdown Project
-Copyright 2011 - [Waylan Limberg](http://achinghead.com)
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
@@ -26,16 +23,24 @@ import re
class SaneOListProcessor(OListProcessor):
-
- CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
+
SIBLING_TAGS = ['ol']
+ def __init__(self, parser):
+ super(SaneOListProcessor, self).__init__(parser)
+ self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' %
+ (self.tab_length - 1))
+
class SaneUListProcessor(UListProcessor):
-
- CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
+
SIBLING_TAGS = ['ul']
+ def __init__(self, parser):
+ super(SaneUListProcessor, self).__init__(parser)
+ self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
+ (self.tab_length - 1))
+
class SaneListExtension(Extension):
""" Add sane lists to Markdown. """
@@ -46,6 +51,5 @@ class SaneListExtension(Extension):
md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
-def makeExtension(configs={}):
- return SaneListExtension(configs=configs)
-
+def makeExtension(*args, **kwargs):
+ return SaneListExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/smart_strong.py b/src/calibre/ebooks/markdown/extensions/smart_strong.py
index 4818cf9ea8..58570bb55e 100644
--- a/src/calibre/ebooks/markdown/extensions/smart_strong.py
+++ b/src/calibre/ebooks/markdown/extensions/smart_strong.py
@@ -4,21 +4,14 @@ Smart_Strong Extension for Python-Markdown
This extention adds smarter handling of double underscores within words.
-Simple Usage:
+See
+for documentation.
- >>> import markdown
- >>> print markdown.markdown('Text with double__underscore__words.',
- ... extensions=['smart_strong'])
- Text with double__underscore__words.
- >>> print markdown.markdown('__Strong__ still works.',
- ... extensions=['smart_strong'])
- Strong still works.
- >>> print markdown.markdown('__this__works__too__.',
- ... extensions=['smart_strong'])
- this__works__too.
+Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
-Copyright 2011
-[Waylan Limberg](http://achinghead.com)
+All changes Copyright 2011-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
@@ -30,13 +23,19 @@ from ..inlinepatterns import SimpleTagPattern
SMART_STRONG_RE = r'(?emphasis2')
+ md.inlinePatterns.add(
+ 'strong2',
+ SimpleTagPattern(SMART_STRONG_RE, 'strong'),
+ '>emphasis2'
+ )
-def makeExtension(configs={}):
- return SmartEmphasisExtension(configs=dict(configs))
+
+def makeExtension(*args, **kwargs):
+ return SmartEmphasisExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/tables.py b/src/calibre/ebooks/markdown/extensions/tables.py
index ad52ec11c7..494aaeb3e4 100644
--- a/src/calibre/ebooks/markdown/extensions/tables.py
+++ b/src/calibre/ebooks/markdown/extensions/tables.py
@@ -4,29 +4,32 @@ Tables Extension for Python-Markdown
Added parsing of tables to Python-Markdown.
-A simple example:
+See
+for documentation.
- First Header | Second Header
- ------------- | -------------
- Content Cell | Content Cell
- Content Cell | Content Cell
+Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-Copyright 2009 - [Waylan Limberg](http://achinghead.com)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
+from ..inlinepatterns import BacktickPattern, BACKTICK_RE
from ..util import etree
+
class TableProcessor(BlockProcessor):
""" Process Tables. """
def test(self, parent, block):
rows = block.split('\n')
- return (len(rows) > 2 and '|' in rows[0] and
- '|' in rows[1] and '-' in rows[1] and
+ return (len(rows) > 1 and '|' in rows[0] and
+ '|' in rows[1] and '-' in rows[1] and
rows[1].strip()[0] in ['|', ':', '-'])
def run(self, parent, blocks):
@@ -34,7 +37,7 @@ class TableProcessor(BlockProcessor):
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
- rows = block[2:]
+ rows = [] if len(block) < 3 else block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
@@ -65,13 +68,17 @@ class TableProcessor(BlockProcessor):
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
- # We use align here rather than cells to ensure every row
+ # We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
- c.text = cells[i].strip()
- except IndexError:
+ if isinstance(cells[i], str) or isinstance(cells[i], unicode):
+ c.text = cells[i].strip()
+ else:
+ # we've already inserted a code element
+ c.append(cells[i])
+ except IndexError: # pragma: no cover
c.text = ""
if a:
c.set('align', a)
@@ -83,7 +90,49 @@ class TableProcessor(BlockProcessor):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
- return row.split('|')
+ return self._split(row, '|')
+
+ def _split(self, row, marker):
+ """ split a row of text with some code into a list of cells. """
+ if self._row_has_unpaired_backticks(row):
+ # fallback on old behaviour
+ return row.split(marker)
+ # modify the backtick pattern to only match at the beginning of the search string
+ backtick_pattern = BacktickPattern('^' + BACKTICK_RE)
+ elements = []
+ current = ''
+ i = 0
+ while i < len(row):
+ letter = row[i]
+ if letter == marker:
+ if current != '' or len(elements) == 0:
+ # Don't append empty string unless it is the first element
+ # The border is already removed when we get the row, then the line is strip()'d
+ # If the first element is a marker, then we have an empty first cell
+ elements.append(current)
+ current = ''
+ else:
+ match = backtick_pattern.getCompiledRegExp().match(row[i:])
+ if not match:
+ current += letter
+ else:
+ groups = match.groups()
+ delim = groups[1] # the code block delimeter (ie 1 or more backticks)
+ row_contents = groups[2] # the text contained inside the code block
+ i += match.start(4) # jump pointer to the beginning of the rest of the text (group #4)
+ element = delim + row_contents + delim # reinstert backticks
+ current += element
+ i += 1
+ elements.append(current)
+ return elements
+
+ def _row_has_unpaired_backticks(self, row):
+ count_total_backtick = row.count('`')
+ count_escaped_backtick = row.count('\`')
+ count_backtick = count_total_backtick - count_escaped_backtick
+ # odd number of backticks,
+ # we won't be able to build correct code blocks
+ return count_backtick & 1
class TableExtension(Extension):
@@ -91,10 +140,10 @@ class TableExtension(Extension):
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
- md.parser.blockprocessors.add('table',
+ md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'
+for documentation.
-Dependencies:
-* [Markdown 2.1+](http://packages.python.org/Markdown/)
+Oringinal code Copyright 2008 [Jack Miller](http://codezen.org)
+
+All changes Copyright 2008-2014 The Python Markdown Project
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
@@ -13,99 +17,192 @@ from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
-from ..util import etree
-from .headerid import slugify, unique, itertext
+from ..util import etree, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, string_type
import re
+import unicodedata
-def order_toc_list(toc_list):
+def slugify(value, separator):
+ """ Slugify a string, to make it URL friendly. """
+ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
+ value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
+ return re.sub('[%s\s]+' % separator, separator, value)
+
+
+IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
+
+
+def unique(id, ids):
+ """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """
+ while id in ids or not id:
+ m = IDCOUNT_RE.match(id)
+ if m:
+ id = '%s_%d' % (m.group(1), int(m.group(2))+1)
+ else:
+ id = '%s_%d' % (id, 1)
+ ids.add(id)
+ return id
+
+
+def stashedHTML2text(text, md):
+ """ Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
+ def _html_sub(m):
+ """ Substitute raw html with plain text. """
+ try:
+ raw, safe = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
+ except (IndexError, TypeError): # pragma: no cover
+ return m.group(0)
+ if md.safeMode and not safe: # pragma: no cover
+ return ''
+ # Strip out tags and entities - leaveing text
+ return re.sub(r'(<[^>]+>)|(&[\#a-zA-Z0-9]+;)', '', raw)
+
+ return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
+
+
+def nest_toc_tokens(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
-
+
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
-
- def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
-
- if not remaining_list:
- return [], []
-
- current = remaining_list.pop(0)
- if not 'children' in current.keys():
- current['children'] = []
-
- if not prev_elements:
- # This happens for instance with [8, 1, 1], ie. when some
- # header level is outside a scope. We treat it as a
- # top-level
- next_elements, children = build_correct(remaining_list, [current])
- current['children'].append(children)
- return [current] + next_elements, []
-
- prev_element = prev_elements.pop()
- children = []
- next_elements = []
- # Is current part of the child list or next list?
- if current['level'] > prev_element['level']:
- #print "%d is a child of %d" % (current['level'], prev_element['level'])
- prev_elements.append(prev_element)
- prev_elements.append(current)
- prev_element['children'].append(current)
- next_elements2, children2 = build_correct(remaining_list, prev_elements)
- children += children2
- next_elements += next_elements2
- else:
- #print "%d is ancestor of %d" % (current['level'], prev_element['level'])
- if not prev_elements:
- #print "No previous elements, so appending to the next set"
- next_elements.append(current)
- prev_elements = [current]
- next_elements2, children2 = build_correct(remaining_list, prev_elements)
- current['children'].extend(children2)
+
+ ordered_list = []
+ if len(toc_list):
+ # Initialize everything by processing the first entry
+ last = toc_list.pop(0)
+ last['children'] = []
+ levels = [last['level']]
+ ordered_list.append(last)
+ parents = []
+
+ # Walk the rest nesting the entries properly
+ while toc_list:
+ t = toc_list.pop(0)
+ current_level = t['level']
+ t['children'] = []
+
+ # Reduce depth if current level < last item's level
+ if current_level < levels[-1]:
+ # Pop last level since we know we are less than it
+ levels.pop()
+
+ # Pop parents and levels we are less than or equal to
+ to_pop = 0
+ for p in reversed(parents):
+ if current_level <= p['level']:
+ to_pop += 1
+ else: # pragma: no cover
+ break
+ if to_pop:
+ levels = levels[:-to_pop]
+ parents = parents[:-to_pop]
+
+ # Note current level as last
+ levels.append(current_level)
+
+ # Level is the same, so append to
+ # the current parent (if available)
+ if current_level == levels[-1]:
+ (parents[-1]['children'] if parents
+ else ordered_list).append(t)
+
+ # Current level is > last item's level,
+ # So make last item a parent and append current as child
else:
- #print "Previous elements, comparing to those first"
- remaining_list.insert(0, current)
- next_elements2, children2 = build_correct(remaining_list, prev_elements)
- children.extend(children2)
- next_elements += next_elements2
-
- return next_elements, children
-
- ordered_list, __ = build_correct(toc_list)
+ last['children'].append(t)
+ parents.append(last)
+ levels.append(current_level)
+ last = t
+
return ordered_list
class TocTreeprocessor(Treeprocessor):
-
- # Iterator wrapper to get parent and child all at once
+ def __init__(self, md, config):
+ super(TocTreeprocessor, self).__init__(md)
+
+ self.marker = config["marker"]
+ self.title = config["title"]
+ self.base_level = int(config["baselevel"]) - 1
+ self.slugify = config["slugify"]
+ self.sep = config["separator"]
+ self.use_anchors = parseBoolValue(config["anchorlink"])
+ self.use_permalinks = parseBoolValue(config["permalink"], False)
+ if self.use_permalinks is None:
+ self.use_permalinks = config["permalink"]
+
+ self.header_rgx = re.compile("[Hh][123456]")
+
def iterparent(self, root):
- for parent in root.getiterator():
+ ''' Iterator wrapper to get parent and child all at once. '''
+ for parent in root.iter():
for child in parent:
yield parent, child
-
- def add_anchor(self, c, elem_id): #@ReservedAssignment
- if self.use_anchors:
- anchor = etree.Element("a")
- anchor.text = c.text
- anchor.attrib["href"] = "#" + elem_id
- anchor.attrib["class"] = "toclink"
- c.text = ""
- for elem in c.getchildren():
- anchor.append(elem)
- c.remove(elem)
- c.append(anchor)
-
- def build_toc_etree(self, div, toc_list):
+
+ def replace_marker(self, root, elem):
+ ''' Replace marker with elem. '''
+ for (p, c) in self.iterparent(root):
+ text = ''.join(c.itertext()).strip()
+ if not text:
+ continue
+
+ # To keep the output from screwing up the
+ # validation by putting a inside of a
+ # we actually replace the
in its entirety.
+ # We do not allow the marker inside a header as that
+ # would causes an enless loop of placing a new TOC
+ # inside previously generated TOC.
+ if c.text and c.text.strip() == self.marker and \
+ not self.header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
+ for i in range(len(p)):
+ if p[i] == c:
+ p[i] = elem
+ break
+
+ def set_level(self, elem):
+ ''' Adjust header level according to base level. '''
+ level = int(elem.tag[-1]) + self.base_level
+ if level > 6:
+ level = 6
+ elem.tag = 'h%d' % level
+
+ def add_anchor(self, c, elem_id): # @ReservedAssignment
+ anchor = etree.Element("a")
+ anchor.text = c.text
+ anchor.attrib["href"] = "#" + elem_id
+ anchor.attrib["class"] = "toclink"
+ c.text = ""
+ for elem in c:
+ anchor.append(elem)
+ c.remove(elem)
+ c.append(anchor)
+
+ def add_permalink(self, c, elem_id):
+ permalink = etree.Element("a")
+ permalink.text = ("%spara;" % AMP_SUBSTITUTE
+ if self.use_permalinks is True
+ else self.use_permalinks)
+ permalink.attrib["href"] = "#" + elem_id
+ permalink.attrib["class"] = "headerlink"
+ permalink.attrib["title"] = "Permanent link"
+ c.append(permalink)
+
+ def build_toc_div(self, toc_list):
+ """ Return a string div given a toc list. """
+ div = etree.Element("div")
+ div.attrib["class"] = "toc"
+
# Add title to the div
- if self.config["title"]:
+ if self.title:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
- header.text = self.config["title"]
+ header.text = self.title
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
@@ -118,104 +215,95 @@ class TocTreeprocessor(Treeprocessor):
if item['children']:
build_etree_ul(item['children'], li)
return ul
-
- return build_etree_ul(toc_list, div)
-
- def run(self, doc):
- div = etree.Element("div")
- div.attrib["class"] = "toc"
- header_rgx = re.compile("[Hh][123456]")
-
- self.use_anchors = self.config["anchorlink"] in [1, '1', True, 'True', 'true']
-
+ build_etree_ul(toc_list, div)
+ prettify = self.markdown.treeprocessors.get('prettify')
+ if prettify:
+ prettify.run(div)
+ return div
+
+ def run(self, doc):
# Get a list of id attributes
used_ids = set()
- for c in doc.getiterator():
- if "id" in c.attrib:
- used_ids.add(c.attrib["id"])
+ for el in doc.iter():
+ if "id" in el.attrib:
+ used_ids.add(el.attrib["id"])
- toc_list = []
- marker_found = False
- for (p, c) in self.iterparent(doc):
- text = ''.join(itertext(c)).strip()
- if not text:
- continue
+ toc_tokens = []
+ for el in doc.iter():
+ if isinstance(el.tag, string_type) and self.header_rgx.match(el.tag):
+ self.set_level(el)
+ text = ''.join(el.itertext()).strip()
- # To keep the output from screwing up the
- # validation by putting a
inside of a
- # we actually replace the
in its entirety.
- # We do not allow the marker inside a header as that
- # would causes an enless loop of placing a new TOC
- # inside previously generated TOC.
- if c.text and c.text.strip() == self.config["marker"] and \
- not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
- for i in range(len(p)):
- if p[i] == c:
- p[i] = div
- break
- marker_found = True
-
- if header_rgx.match(c.tag):
-
- # Do not override pre-existing ids
- if not "id" in c.attrib:
- elem_id = unique(self.config["slugify"](text, '-'), used_ids)
- c.attrib["id"] = elem_id
- else:
- elem_id = c.attrib["id"]
+ # Do not override pre-existing ids
+ if "id" not in el.attrib:
+ innertext = stashedHTML2text(text, self.markdown)
+ el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
- tag_level = int(c.tag[-1])
-
- toc_list.append({'level': tag_level,
- 'id': elem_id,
- 'name': text})
-
- self.add_anchor(c, elem_id)
-
- toc_list_nested = order_toc_list(toc_list)
- self.build_toc_etree(div, toc_list_nested)
- prettify = self.markdown.treeprocessors.get('prettify')
- if prettify: prettify.run(div)
- if not marker_found:
- # serialize and attach to markdown instance.
- toc = self.markdown.serializer(div)
- for pp in self.markdown.postprocessors.values():
- toc = pp.run(toc)
- self.markdown.toc = toc
+ toc_tokens.append({
+ 'level': int(el.tag[-1]),
+ 'id': el.attrib["id"],
+ 'name': text
+ })
+
+ if self.use_anchors:
+ self.add_anchor(el, el.attrib["id"])
+ if self.use_permalinks:
+ self.add_permalink(el, el.attrib["id"])
+
+ div = self.build_toc_div(nest_toc_tokens(toc_tokens))
+ if self.marker:
+ self.replace_marker(doc, div)
+
+ # serialize and attach to markdown instance.
+ toc = self.markdown.serializer(div)
+ for pp in self.markdown.postprocessors.values():
+ toc = pp.run(toc)
+ self.markdown.toc = toc
class TocExtension(Extension):
-
- TreeProcessorClass = TocTreeprocessor
-
- def __init__(self, configs=[]):
- self.config = { "marker" : ["[TOC]",
- "Text to find and replace with Table of Contents -"
- "Defaults to \"[TOC]\""],
- "slugify" : [slugify,
- "Function to generate anchors based on header text-"
- "Defaults to the headerid ext's slugify function."],
- "title" : [None,
- "Title to insert into TOC
- "
- "Defaults to None"],
- "anchorlink" : [0,
- "1 if header should be a self link"
- "Defaults to 0"]}
- for key, value in configs:
- self.setConfig(key, value)
+ TreeProcessorClass = TocTreeprocessor
+
+ def __init__(self, *args, **kwargs):
+ self.config = {
+ "marker": ['[TOC]',
+ 'Text to find and replace with Table of Contents - '
+ 'Set to an empty string to disable. Defaults to "[TOC]"'],
+ "title": ["",
+ "Title to insert into TOC - "
+ "Defaults to an empty string"],
+ "anchorlink": [False,
+ "True if header should be a self link - "
+ "Defaults to False"],
+ "permalink": [0,
+ "True or link text if a Sphinx-style permalink should "
+ "be added - Defaults to False"],
+ "baselevel": ['1', 'Base level for headers.'],
+ "slugify": [slugify,
+ "Function to generate anchors based on header text - "
+ "Defaults to the headerid ext's slugify function."],
+ 'separator': ['-', 'Word separator. Defaults to "-".']
+ }
+
+ super(TocExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
- tocext = self.TreeProcessorClass(md)
- tocext.config = self.getConfigs()
+ md.registerExtension(self)
+ self.md = md
+ self.reset()
+ tocext = self.TreeProcessorClass(md, self.getConfigs())
# Headerid ext is set to '>prettify'. With this set to '_end',
- # it should always come after headerid ext (and honor ids assinged
- # by the header id extension) if both are used. Same goes for
+ # it should always come after headerid ext (and honor ids assinged
+ # by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
+ def reset(self):
+ self.md.toc = ''
-def makeExtension(configs={}):
- return TocExtension(configs=configs)
+
+def makeExtension(*args, **kwargs):
+ return TocExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/wikilinks.py b/src/calibre/ebooks/markdown/extensions/wikilinks.py
index 877890b8ab..94e1b67948 100644
--- a/src/calibre/ebooks/markdown/extensions/wikilinks.py
+++ b/src/calibre/ebooks/markdown/extensions/wikilinks.py
@@ -2,78 +2,17 @@
WikiLinks Extension for Python-Markdown
======================================
-Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
+Converts [[WikiLinks]] to relative links.
-Basic usage:
+See
+for documentation.
- >>> import markdown
- >>> text = "Some text with a [[WikiLink]]."
- >>> html = markdown.markdown(text, ['wikilinks'])
- >>> print html
- Some text with a WikiLink.
+Original code Copyright [Waylan Limberg](http://achinghead.com/).
-Whitespace behavior:
+All changes Copyright The Python Markdown Project
- >>> print markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
-
- >>> print markdown.markdown('foo [[ ]] bar', ['wikilinks'])
- foo bar
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-To define custom settings the simple way:
-
- >>> print markdown.markdown(text,
- ... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
- ... )
- Some text with a WikiLink.
-
-Custom settings the complex way:
-
- >>> md = markdown.Markdown(
- ... extensions = ['wikilinks'],
- ... extension_configs = {'wikilinks': [
- ... ('base_url', 'http://example.com/'),
- ... ('end_url', '.html'),
- ... ('html_class', '') ]},
- ... safe_mode = True)
- >>> print md.convert(text)
- Some text with a WikiLink.
-
-Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
-
- >>> text = """wiki_base_url: http://example.com/
- ... wiki_end_url: .html
- ... wiki_html_class:
- ...
- ... Some text with a [[WikiLink]]."""
- >>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
- >>> print md.convert(text)
- Some text with a WikiLink.
-
-MetaData should not carry over to next document:
-
- >>> print md.convert("No [[MetaData]] here.")
- No MetaData here.
-
-Define a custom URL builder:
-
- >>> def my_url_builder(label, base, end):
- ... return '/bar/'
- >>> md = markdown.Markdown(extensions=['wikilinks'],
- ... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
- >>> print md.convert('[[foo]]')
-
-
-From the command line:
-
- python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
-
-By [Waylan Limberg](http://achinghead.com/).
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-Dependencies:
-* [Python 2.3+](http://python.org)
-* [Markdown 2.0+](http://packages.python.org/Markdown/)
'''
from __future__ import absolute_import
@@ -83,29 +22,28 @@ from ..inlinepatterns import Pattern
from ..util import etree
import re
+
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
- return '%s%s%s'% (base, clean_label, end)
+ return '%s%s%s' % (base, clean_label, end)
class WikiLinkExtension(Extension):
- def __init__(self, configs):
- # set extension defaults
+
+ def __init__(self, *args, **kwargs):
self.config = {
- 'base_url' : ['/', 'String to append to beginning or URL.'],
- 'end_url' : ['/', 'String to append to end of URL.'],
- 'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
- 'build_url' : [build_url, 'Callable formats URL from label.'],
+ 'base_url': ['/', 'String to append to beginning or URL.'],
+ 'end_url': ['/', 'String to append to end of URL.'],
+ 'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
+ 'build_url': [build_url, 'Callable formats URL from label.'],
}
-
- # Override defaults with user settings
- for key, value in configs :
- self.setConfig(key, value)
-
+
+ super(WikiLinkExtension, self).__init__(*args, **kwargs)
+
def extendMarkdown(self, md, md_globals):
self.md = md
-
+
# append to end of inline patterns
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
@@ -117,14 +55,14 @@ class WikiLinks(Pattern):
def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern)
self.config = config
-
+
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a')
- a.text = label
+ a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
@@ -145,7 +83,7 @@ class WikiLinks(Pattern):
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
-
-def makeExtension(configs=None) :
- return WikiLinkExtension(configs=configs)
+
+def makeExtension(*args, **kwargs):
+ return WikiLinkExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/inlinepatterns.py b/src/calibre/ebooks/markdown/inlinepatterns.py
index de957ef480..95d358d715 100644
--- a/src/calibre/ebooks/markdown/inlinepatterns.py
+++ b/src/calibre/ebooks/markdown/inlinepatterns.py
@@ -46,13 +46,13 @@ from __future__ import unicode_literals
from . import util
from . import odict
import re
-try:
+try: # pragma: no cover
from urllib.parse import urlparse, urlunparse
-except ImportError:
+except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse
-try:
+try: # pragma: no cover
from html import entities
-except ImportError:
+except ImportError: # pragma: no cover
import htmlentitydefs as entities
@@ -64,10 +64,12 @@ def build_inlinepatterns(md_instance, **kwargs):
inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
- inlinePatterns["image_reference"] = \
- ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance)
- inlinePatterns["short_reference"] = \
- ReferencePattern(SHORT_REF_RE, md_instance)
+ inlinePatterns["image_reference"] = ImageReferencePattern(
+ IMAGE_REFERENCE_RE, md_instance
+ )
+ inlinePatterns["short_reference"] = ReferencePattern(
+ SHORT_REF_RE, md_instance
+ )
inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
@@ -75,7 +77,8 @@ def build_inlinepatterns(md_instance, **kwargs):
inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
- inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
+ inlinePatterns["em_strong"] = DoubleTagPattern(EM_STRONG_RE, 'strong,em')
+ inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'em,strong')
inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
if md_instance.smart_emphasis:
@@ -90,46 +93,84 @@ The actual regular expressions for patterns
"""
NOBRACKET = r'[^\]\[]*'
-BRK = ( r'\[('
- + (NOBRACKET + r'(\[')*6
- + (NOBRACKET+ r'\])*')*6
- + NOBRACKET + r')\]' )
+BRK = (
+ r'\[(' +
+ (NOBRACKET + r'(\[')*6 +
+ (NOBRACKET + r'\])*')*6 +
+ NOBRACKET + r')\]'
+)
NOIMG = r'(?|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
+# `e=f()` or ``e=f("`")``
+BACKTICK_RE = r'(?) or [text](url "title")
+LINK_RE = NOIMG + BRK + \
+ r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
-IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
-REFERENCE_RE = NOIMG + BRK+ r'\s?\[([^\]]*)\]' # [Google][3]
-SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]' # [Google]
-IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]' # ![alt text][2]
-NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
-AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>' #
-AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' #
+IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
-HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
-ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
-LINE_BREAK_RE = r' \n' # two spaces at end of line
+# [Google][3]
+REFERENCE_RE = NOIMG + BRK + r'\s?\[([^\]]*)\]'
+
+# [Google]
+SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]'
+
+# ![alt text][2]
+IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]'
+
+# stand-alone * or _
+NOT_STRONG_RE = r'((^| )(\*|_)( |$))'
+
+#
+AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>'
+
+#
+AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>'
+
+# <...>
+HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)'
+
+# &
+ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)'
+
+# two spaces at end of line
+LINE_BREAK_RE = r' \n'
def dequote(string):
"""Remove quotes from around a string."""
- if ( ( string.startswith('"') and string.endswith('"'))
- or (string.startswith("'") and string.endswith("'")) ):
+ if ((string.startswith('"') and string.endswith('"')) or
+ (string.startswith("'") and string.endswith("'"))):
return string[1:-1]
else:
return string
-ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
+
+ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
+
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
@@ -143,6 +184,7 @@ The pattern classes
-----------------------------------------------------------------------------
"""
+
class Pattern(object):
"""Base class that inline patterns subclass. """
@@ -156,7 +198,7 @@ class Pattern(object):
"""
self.pattern = pattern
- self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
+ self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE)
# Api for Markdown to pass safe_mode into instance
@@ -178,7 +220,7 @@ class Pattern(object):
* m: A re match object containing a match of the pattern.
"""
- pass
+ pass # pragma: no cover
def type(self):
""" Return class name, to define pattern type """
@@ -188,9 +230,10 @@ class Pattern(object):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
- except KeyError:
+ except KeyError: # pragma: no cover
return text
- def itertext(el):
+
+ def itertext(el): # pragma: no cover
' Reimplement Element.itertext for older python versions '
tag = el.tag
if not isinstance(tag, util.string_type) and tag is not None:
@@ -202,6 +245,7 @@ class Pattern(object):
yield s
if e.tail:
yield e.tail
+
def get_stash(m):
id = m.group(1)
if id in stash:
@@ -210,17 +254,14 @@ class Pattern(object):
return value
else:
# An etree Element - return text content only
- return ''.join(itertext(value))
+ return ''.join(itertext(value))
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class SimpleTextPattern(Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
- text = m.group(2)
- if text == util.INLINE_PLACEHOLDER_PREFIX:
- return None
- return text
+ return m.group(2)
class EscapePattern(Pattern):
@@ -231,7 +272,7 @@ class EscapePattern(Pattern):
if char in self.markdown.ESCAPED_CHARS:
return '%s%s%s' % (util.STX, ord(char), util.ETX)
else:
- return '\\%s' % char
+ return None
class SimpleTagPattern(Pattern):
@@ -240,7 +281,7 @@ class SimpleTagPattern(Pattern):
of a Pattern.
"""
- def __init__ (self, pattern, tag):
+ def __init__(self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
@@ -252,13 +293,13 @@ class SimpleTagPattern(Pattern):
class SubstituteTagPattern(SimpleTagPattern):
""" Return an element of type `tag` with no children. """
- def handleMatch (self, m):
+ def handleMatch(self, m):
return util.etree.Element(self.tag)
class BacktickPattern(Pattern):
""" Return a `` element containing the matching text. """
- def __init__ (self, pattern):
+ def __init__(self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
@@ -279,12 +320,14 @@ class DoubleTagPattern(SimpleTagPattern):
el1 = util.etree.Element(tag1)
el2 = util.etree.SubElement(el1, tag2)
el2.text = m.group(3)
+ if len(m.groups()) == 5:
+ el2.tail = m.group(4)
return el1
class HtmlPattern(Pattern):
""" Store raw inline html and return a placeholder. """
- def handleMatch (self, m):
+ def handleMatch(self, m):
rawhtml = self.unescape(m.group(2))
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
@@ -293,8 +336,9 @@ class HtmlPattern(Pattern):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
- except KeyError:
+ except KeyError: # pragma: no cover
return text
+
def get_stash(m):
id = m.group(1)
value = stash.get(id)
@@ -303,7 +347,7 @@ class HtmlPattern(Pattern):
return self.markdown.serializer(value)
except:
return '\%s' % value
-
+
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
@@ -323,7 +367,7 @@ class LinkPattern(Pattern):
el.set("href", "")
if title:
- title = dequote(self.unescape(title))
+ title = dequote(self.unescape(title))
el.set("title", title)
return el
@@ -344,35 +388,36 @@ class LinkPattern(Pattern):
`username:password@host:port`.
"""
- url = url.replace(' ', '%20')
if not self.markdown.safeMode:
# Return immediately bipassing parsing.
return url
-
+
try:
scheme, netloc, path, params, query, fragment = url = urlparse(url)
- except ValueError:
+ except ValueError: # pragma: no cover
# Bad url - so bad it couldn't be parsed.
return ''
-
+
locless_schemes = ['', 'mailto', 'news']
allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
if scheme not in allowed_schemes:
# Not a known (allowed) scheme. Not safe.
return ''
-
- if netloc == '' and scheme not in locless_schemes:
+
+ if netloc == '' and scheme not in locless_schemes: # pragma: no cover
# This should not happen. Treat as suspect.
return ''
for part in url[2:]:
if ":" in part:
- # A colon in "path", "parameters", "query" or "fragment" is suspect.
+ # A colon in "path", "parameters", "query"
+ # or "fragment" is suspect.
return ''
# Url passes all tests. Return url as-is.
return urlunparse(url)
+
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
@@ -396,6 +441,7 @@ class ImagePattern(LinkPattern):
el.set('alt', self.unescape(truealt))
return el
+
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
@@ -413,7 +459,7 @@ class ReferencePattern(LinkPattern):
# Clean up linebreaks in id
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
- if not id in self.markdown.references: # ignore undefined refs
+ if id not in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
@@ -454,6 +500,7 @@ class AutolinkPattern(Pattern):
el.text = util.AtomicString(m.group(2))
return el
+
class AutomailPattern(Pattern):
"""
Return a mailto link Element given an automail link (``).
@@ -480,4 +527,3 @@ class AutomailPattern(Pattern):
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
-
diff --git a/src/calibre/ebooks/markdown/odict.py b/src/calibre/ebooks/markdown/odict.py
index 8089ece21a..584ad7c173 100644
--- a/src/calibre/ebooks/markdown/odict.py
+++ b/src/calibre/ebooks/markdown/odict.py
@@ -1,18 +1,13 @@
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
-
from copy import deepcopy
-def iteritems_compat(d):
- """Return an iterator over the (key, value) pairs of a dictionary.
- Copied from `six` module."""
- return iter(getattr(d, _iteritems)())
class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
-
+
Copied from Django's SortedDict with some modifications.
"""
@@ -87,11 +82,11 @@ class OrderedDict(dict):
for key in self.keyOrder:
yield self[key]
- if util.PY3:
+ if util.PY3: # pragma: no cover
items = _iteritems
keys = _iterkeys
values = _itervalues
- else:
+ else: # pragma: no cover
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
@@ -106,8 +101,8 @@ class OrderedDict(dict):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
- for k, v in iteritems_compat(dict_):
- self[k] = v
+ for k in dict_:
+ self[k] = dict_[k]
def setdefault(self, key, default):
if key not in self:
@@ -138,7 +133,9 @@ class OrderedDict(dict):
Replaces the normal dict.__repr__ with a version that returns the keys
in their Ordered order.
"""
- return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in iteritems_compat(self)])
+ return '{%s}' % ', '.join(
+ ['%r: %r' % (k, v) for k, v in self._iteritems()]
+ )
def clear(self):
super(OrderedDict, self).clear()
diff --git a/src/calibre/ebooks/markdown/postprocessors.py b/src/calibre/ebooks/markdown/postprocessors.py
index 5f3f032c15..2d4dcb589e 100644
--- a/src/calibre/ebooks/markdown/postprocessors.py
+++ b/src/calibre/ebooks/markdown/postprocessors.py
@@ -42,7 +42,7 @@ class Postprocessor(util.Processor):
(possibly modified) string.
"""
- pass
+ pass # pragma: no cover
class RawHtmlPostprocessor(Postprocessor):
@@ -51,7 +51,7 @@ class RawHtmlPostprocessor(Postprocessor):
def run(self, text):
""" Iterate over html stash and restore "safe" html. """
for i in range(self.markdown.htmlStash.html_counter):
- html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
+ html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html)
@@ -59,12 +59,16 @@ class RawHtmlPostprocessor(Postprocessor):
html = ''
else:
html = self.markdown.html_replacement_text
- if self.isblocklevel(html) and (safe or not self.markdown.safeMode):
- text = text.replace("%s
" %
- (self.markdown.htmlStash.get_placeholder(i)),
- html + "\n")
- text = text.replace(self.markdown.htmlStash.get_placeholder(i),
- html)
+ if (self.isblocklevel(html) and
+ (safe or not self.markdown.safeMode)):
+ text = text.replace(
+ "%s
" %
+ (self.markdown.htmlStash.get_placeholder(i)),
+ html + "\n"
+ )
+ text = text.replace(
+ self.markdown.htmlStash.get_placeholder(i), html
+ )
return text
def escape(self, html):
@@ -88,7 +92,7 @@ class AndSubstitutePostprocessor(Postprocessor):
""" Restore valid entities """
def run(self, text):
- text = text.replace(util.AMP_SUBSTITUTE, "&")
+ text = text.replace(util.AMP_SUBSTITUTE, "&")
return text
diff --git a/src/calibre/ebooks/markdown/preprocessors.py b/src/calibre/ebooks/markdown/preprocessors.py
index 72b2ed6f35..7ea4fcf9f5 100644
--- a/src/calibre/ebooks/markdown/preprocessors.py
+++ b/src/calibre/ebooks/markdown/preprocessors.py
@@ -3,7 +3,7 @@ PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
-complicated.
+complicated.
"""
from __future__ import absolute_import
@@ -41,7 +41,7 @@ class Preprocessor(util.Processor):
the (possibly modified) list of lines.
"""
- pass
+ pass # pragma: no cover
class NormalizeWhitespace(Preprocessor):
@@ -61,13 +61,14 @@ class HtmlBlockPreprocessor(Preprocessor):
right_tag_patterns = ["%s>", "%s>"]
attrs_pattern = r"""
- \s+(?P[^>"'/= ]+)=(?P['"])(?P.*?)(?P=q) # attr="value"
- | # OR
- \s+(?P[^>"'/= ]+)=(?P[^> ]+) # attr=value
- | # OR
- \s+(?P[^>"'/= ]+) # attr
+ \s+(?P[^>"'/= ]+)=(?P['"])(?P.*?)(?P=q) # attr="value"
+ | # OR
+ \s+(?P[^>"'/= ]+)=(?P[^> ]+) # attr=value
+ | # OR
+ \s+(?P[^>"'/= ]+) # attr
"""
- left_tag_pattern = r'^\<(?P[^> ]+)(?P(%s)*)\s*\/?\>?' % attrs_pattern
+ left_tag_pattern = r'^\<(?P[^> ]+)(?P(%s)*)\s*\/?\>?' % \
+ attrs_pattern
attrs_re = re.compile(attrs_pattern, re.VERBOSE)
left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
markdown_in_raw = False
@@ -87,7 +88,9 @@ class HtmlBlockPreprocessor(Preprocessor):
attrs[ma.group('attr').strip()] = ""
elif ma.group('attr1'):
if ma.group('value1'):
- attrs[ma.group('attr1').strip()] = ma.group('value1')
+ attrs[ma.group('attr1').strip()] = ma.group(
+ 'value1'
+ )
else:
attrs[ma.group('attr1').strip()] = ""
elif ma.group('attr2'):
@@ -102,7 +105,7 @@ class HtmlBlockPreprocessor(Preprocessor):
i = block.find(rtag, start_index)
if i == -1:
return -1
- j = block.find(ltag, start_index)
+ j = block.find(ltag, start_index)
# if no ltag, or rtag found before another ltag, return index
if (j > i or j == -1):
return i + len(rtag)
@@ -111,27 +114,28 @@ class HtmlBlockPreprocessor(Preprocessor):
j = block.find('>', j)
start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
if start_index == -1:
- # HTML potentially malformed- ltag has no corresponding
+ # HTML potentially malformed- ltag has no corresponding
# rtag
return -1
def _get_right_tag(self, left_tag, left_index, block):
for p in self.right_tag_patterns:
tag = p % left_tag
- i = self._recursive_tagfind("<%s" % left_tag, tag, left_index, block)
+ i = self._recursive_tagfind(
+ "<%s" % left_tag, tag, left_index, block
+ )
if i > 2:
return tag.lstrip("<").rstrip(">"), i
return block.rstrip()[-left_index:-1].lower(), len(block)
-
+
def _equal_tags(self, left_tag, right_tag):
- if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
+ if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
- elif left_tag == right_tag[1:] \
- and right_tag[0] == "/":
+ elif left_tag == right_tag[1:] and right_tag[0] == "/":
return True
else:
return False
@@ -139,6 +143,49 @@ class HtmlBlockPreprocessor(Preprocessor):
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
+ def _stringindex_to_listindex(self, stringindex, items):
+ """
+ Same effect as concatenating the strings in items,
+ finding the character to which stringindex refers in that string,
+ and returning the index of the item in which that character resides.
+ """
+ items.append('dummy')
+ i, count = 0, 0
+ while count <= stringindex:
+ count += len(items[i])
+ i += 1
+ return i - 1
+
+ def _nested_markdown_in_html(self, items):
+ """Find and process html child elements of the given element block."""
+ for i, item in enumerate(items):
+ if self.left_tag_re.match(item):
+ left_tag, left_index, attrs = \
+ self._get_left_tag(''.join(items[i:]))
+ right_tag, data_index = self._get_right_tag(
+ left_tag, left_index, ''.join(items[i:]))
+ right_listindex = \
+ self._stringindex_to_listindex(data_index, items[i:]) + i
+ if 'markdown' in attrs.keys():
+ items[i] = items[i][left_index:] # remove opening tag
+ placeholder = self.markdown.htmlStash.store_tag(
+ left_tag, attrs, i + 1, right_listindex + 1)
+ items.insert(i, placeholder)
+ if len(items) - right_listindex <= 1: # last nest, no tail
+ right_listindex -= 1
+ items[right_listindex] = items[right_listindex][
+ :-len(right_tag) - 2] # remove closing tag
+ else: # raw html
+ if len(items) - right_listindex <= 1: # last element
+ right_listindex -= 1
+ if right_listindex <= i:
+ right_listindex = i + 1
+ placeholder = self.markdown.htmlStash.store('\n\n'.join(
+ items[i:right_listindex]))
+ del items[i:right_listindex]
+ items.insert(i, placeholder)
+ return items
+
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
@@ -146,7 +193,7 @@ class HtmlBlockPreprocessor(Preprocessor):
items = []
left_tag = ''
right_tag = ''
- in_tag = False # flag
+ in_tag = False # flag
while text:
block = text[0]
@@ -160,24 +207,21 @@ class HtmlBlockPreprocessor(Preprocessor):
if not in_tag:
if block.startswith("<") and len(block.strip()) > 1:
- if block[1] == "!":
+ if block[1:4] == "!--":
# is a comment block
- left_tag, left_index, attrs = "--", 2, {}
+ left_tag, left_index, attrs = "--", 2, {}
else:
left_tag, left_index, attrs = self._get_left_tag(block)
- right_tag, data_index = self._get_right_tag(left_tag,
+ right_tag, data_index = self._get_right_tag(left_tag,
left_index,
block)
# keep checking conditions below and maybe just append
-
- if data_index < len(block) \
- and (util.isBlockLevel(left_tag)
- or left_tag == '--'):
+
+ if data_index < len(block) and (util.isBlockLevel(left_tag) or left_tag == '--'):
text.insert(0, block[data_index:])
block = block[:data_index]
- if not (util.isBlockLevel(left_tag) \
- or block[1] in ["!", "?", "@", "%"]):
+ if not (util.isBlockLevel(left_tag) or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
@@ -186,35 +230,30 @@ class HtmlBlockPreprocessor(Preprocessor):
continue
if block.rstrip().endswith(">") \
- and self._equal_tags(left_tag, right_tag):
+ and self._equal_tags(left_tag, right_tag):
if self.markdown_in_raw and 'markdown' in attrs.keys():
- start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
- '', block[:left_index])
- end = block[-len(right_tag)-2:]
- block = block[left_index:-len(right_tag)-2]
- new_blocks.append(
- self.markdown.htmlStash.store(start))
- new_blocks.append(block)
- new_blocks.append(
- self.markdown.htmlStash.store(end))
+ block = block[left_index:-len(right_tag) - 2]
+ new_blocks.append(self.markdown.htmlStash.
+ store_tag(left_tag, attrs, 0, 2))
+ new_blocks.extend([block])
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
- else:
+ else:
# if is block level tag and is not complete
-
- if util.isBlockLevel(left_tag) or left_tag == "--" \
- and not block.rstrip().endswith(">"):
+ if (not self._equal_tags(left_tag, right_tag)) and \
+ (util.isBlockLevel(left_tag) or left_tag == "--"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
- self.markdown.htmlStash.store(block.strip()))
-
+ self.markdown.htmlStash.store(block.strip())
+ )
continue
- new_blocks.append(block)
+ else:
+ new_blocks.append(block)
else:
items.append(block)
@@ -223,7 +262,7 @@ class HtmlBlockPreprocessor(Preprocessor):
if self._equal_tags(left_tag, right_tag):
# if find closing tag
-
+
if data_index < len(block):
# we have more text after right_tag
items[-1] = block[:data_index]
@@ -231,16 +270,21 @@ class HtmlBlockPreprocessor(Preprocessor):
in_tag = False
if self.markdown_in_raw and 'markdown' in attrs.keys():
- start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
- '', items[0][:left_index])
items[0] = items[0][left_index:]
- end = items[-1][-len(right_tag)-2:]
- items[-1] = items[-1][:-len(right_tag)-2]
- new_blocks.append(
- self.markdown.htmlStash.store(start))
- new_blocks.extend(items)
- new_blocks.append(
- self.markdown.htmlStash.store(end))
+ items[-1] = items[-1][:-len(right_tag) - 2]
+ if items[len(items) - 1]: # not a newline/empty string
+ right_index = len(items) + 3
+ else:
+ right_index = len(items) + 2
+ new_blocks.append(self.markdown.htmlStash.store_tag(
+ left_tag, attrs, 0, right_index))
+ placeholderslen = len(self.markdown.htmlStash.tag_data)
+ new_blocks.extend(
+ self._nested_markdown_in_html(items))
+ nests = len(self.markdown.htmlStash.tag_data) - \
+ placeholderslen
+ self.markdown.htmlStash.tag_data[-1 - nests][
+ 'right_index'] += nests - 2
else:
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
@@ -248,21 +292,23 @@ class HtmlBlockPreprocessor(Preprocessor):
if items:
if self.markdown_in_raw and 'markdown' in attrs.keys():
- start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
- '', items[0][:left_index])
items[0] = items[0][left_index:]
- end = items[-1][-len(right_tag)-2:]
- items[-1] = items[-1][:-len(right_tag)-2]
+ items[-1] = items[-1][:-len(right_tag) - 2]
+ if items[len(items) - 1]: # not a newline/empty string
+ right_index = len(items) + 3
+ else:
+ right_index = len(items) + 2
new_blocks.append(
- self.markdown.htmlStash.store(start))
- new_blocks.extend(items)
- if end.strip():
- new_blocks.append(
- self.markdown.htmlStash.store(end))
+ self.markdown.htmlStash.store_tag(
+ left_tag, attrs, 0, right_index))
+ placeholderslen = len(self.markdown.htmlStash.tag_data)
+ new_blocks.extend(self._nested_markdown_in_html(items))
+ nests = len(self.markdown.htmlStash.tag_data) - placeholderslen
+ self.markdown.htmlStash.tag_data[-1 - nests][
+ 'right_index'] += nests - 2
else:
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
- #new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
@@ -273,11 +319,13 @@ class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
- RE = re.compile(r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL)
+ RE = re.compile(
+ r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL
+ )
TITLE_RE = re.compile(r'^%s$' % TITLE)
- def run (self, lines):
- new_text = [];
+ def run(self, lines):
+ new_text = []
while lines:
line = lines.pop(0)
m = self.RE.match(line)
@@ -295,4 +343,4 @@ class ReferencePreprocessor(Preprocessor):
else:
new_text.append(line)
- return new_text #+ "\n"
+ return new_text # + "\n"
diff --git a/src/calibre/ebooks/markdown/serializers.py b/src/calibre/ebooks/markdown/serializers.py
index b19d61c93d..1e8d9dd288 100644
--- a/src/calibre/ebooks/markdown/serializers.py
+++ b/src/calibre/ebooks/markdown/serializers.py
@@ -42,9 +42,9 @@ from __future__ import unicode_literals
from . import util
ElementTree = util.etree.ElementTree
QName = util.etree.QName
-if hasattr(util.etree, 'test_comment'):
+if hasattr(util.etree, 'test_comment'): # pragma: no cover
Comment = util.etree.test_comment
-else:
+else: # pragma: no cover
Comment = util.etree.Comment
PI = util.etree.PI
ProcessingInstruction = util.etree.ProcessingInstruction
@@ -56,7 +56,7 @@ HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
try:
HTML_EMPTY = set(HTML_EMPTY)
-except NameError:
+except NameError: # pragma: no cover
pass
_namespace_map = {
@@ -73,17 +73,19 @@ _namespace_map = {
}
-def _raise_serialization_error(text):
+def _raise_serialization_error(text): # pragma: no cover
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
+
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
- except (TypeError, AttributeError):
+ except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text)
+
def _escape_cdata(text):
# escape character data
try:
@@ -97,7 +99,7 @@ def _escape_cdata(text):
if ">" in text:
text = text.replace(">", ">")
return text
- except (TypeError, AttributeError):
+ except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text)
@@ -115,9 +117,10 @@ def _escape_attrib(text):
if "\n" in text:
text = text.replace("\n", "
")
return text
- except (TypeError, AttributeError):
+ except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text)
+
def _escape_attrib_html(text):
# escape attribute value
try:
@@ -130,7 +133,7 @@ def _escape_attrib_html(text):
if "\"" in text:
text = text.replace("\"", """)
return text
- except (TypeError, AttributeError):
+ except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text)
@@ -152,7 +155,7 @@ def _serialize_html(write, elem, qnames, namespaces, format):
write("<" + tag)
items = elem.items()
if items or namespaces:
- items.sort() # lexical order
+ items = sorted(items) # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
@@ -167,28 +170,28 @@ def _serialize_html(write, elem, qnames, namespaces, format):
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
- items.sort(key=lambda x: x[1]) # sort on prefix
+ items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
- if format == "xhtml" and tag in HTML_EMPTY:
+ if format == "xhtml" and tag.lower() in HTML_EMPTY:
write(" />")
else:
write(">")
- tag = tag.lower()
if text:
- if tag == "script" or tag == "style":
+ if tag.lower() in ["script", "style"]:
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, format)
- if tag not in HTML_EMPTY:
+ if tag.lower() not in HTML_EMPTY:
write("" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
+
def _write_html(root,
encoding=None,
default_namespace=None,
@@ -233,7 +236,7 @@ def _namespaces(elem, default_namespace=None):
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
- qnames[qname] = tag # default element
+ qnames[qname] = tag # default element
else:
if default_namespace:
raise ValueError(
@@ -241,14 +244,14 @@ def _namespaces(elem, default_namespace=None):
"default_namespace option"
)
qnames[qname] = qname
- except TypeError:
+ except TypeError: # pragma: no cover
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
- iterate = elem.getiterator # cET compatibility
+ iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
@@ -270,8 +273,10 @@ def _namespaces(elem, default_namespace=None):
add_qname(text.text)
return qnames, namespaces
+
def to_html_string(element):
return _write_html(ElementTree(element).getroot(), format="html")
+
def to_xhtml_string(element):
return _write_html(ElementTree(element).getroot(), format="xhtml")
diff --git a/src/calibre/ebooks/markdown/treeprocessors.py b/src/calibre/ebooks/markdown/treeprocessors.py
index e6d3dc9381..d06f192885 100644
--- a/src/calibre/ebooks/markdown/treeprocessors.py
+++ b/src/calibre/ebooks/markdown/treeprocessors.py
@@ -34,11 +34,11 @@ class Treeprocessor(util.Processor):
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
- takes a root ElementTree. This method can return another ElementTree
- object, and the existing root ElementTree will be replaced, or it can
+ takes a root ElementTree. This method can return another ElementTree
+ object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
- pass
+ pass # pragma: no cover
class InlineProcessor(Treeprocessor):
@@ -53,6 +53,7 @@ class InlineProcessor(Treeprocessor):
+ len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md
+ self.inlinePatterns = md.inlinePatterns
def __makePlaceholder(self, type):
""" Generate a placeholder """
@@ -70,7 +71,7 @@ class InlineProcessor(Treeprocessor):
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
-
+
"""
m = self.__placeholder_re.search(data, index)
if m:
@@ -99,9 +100,9 @@ class InlineProcessor(Treeprocessor):
"""
if not isinstance(data, util.AtomicString):
startIndex = 0
- while patternIndex < len(self.markdown.inlinePatterns):
+ while patternIndex < len(self.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
- self.markdown.inlinePatterns.value_for_index(patternIndex),
+ self.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
@@ -128,11 +129,10 @@ class InlineProcessor(Treeprocessor):
text = subnode.tail
subnode.tail = None
- childResult = self.__processPlaceholders(text, subnode)
+ childResult = self.__processPlaceholders(text, subnode, isText)
if not isText and node is not subnode:
- pos = node.getchildren().index(subnode)
- node.remove(subnode)
+ pos = list(node).index(subnode) + 1
else:
pos = 0
@@ -140,7 +140,7 @@ class InlineProcessor(Treeprocessor):
for newChild in childResult:
node.insert(pos, newChild)
- def __processPlaceholders(self, data, parent):
+ def __processPlaceholders(self, data, parent, isText=True):
"""
Process string with placeholders and generate ElementTree tree.
@@ -150,7 +150,7 @@ class InlineProcessor(Treeprocessor):
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
-
+
"""
def linkText(text):
if text:
@@ -159,6 +159,11 @@ class InlineProcessor(Treeprocessor):
result[-1].tail += text
else:
result[-1].tail = text
+ elif not isText:
+ if parent.tail:
+ parent.tail += text
+ else:
+ parent.tail = text
else:
if parent.text:
parent.text += text
@@ -178,15 +183,17 @@ class InlineProcessor(Treeprocessor):
text = data[strartIndex:index]
linkText(text)
- if not isString(node): # it's Element
- for child in [node] + node.getchildren():
+ if not isString(node): # it's Element
+ for child in [node] + list(node):
if child.tail:
if child.tail.strip():
- self.__processElementText(node, child,False)
+ self.__processElementText(
+ node, child, False
+ )
if child.text:
if child.text.strip():
self.__processElementText(child, child)
- else: # it's just a string
+ else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
@@ -194,7 +201,7 @@ class InlineProcessor(Treeprocessor):
strartIndex = phEndIndex
result.append(node)
- else: # wrong placeholder
+ else: # wrong placeholder
end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end])
strartIndex = end
@@ -237,14 +244,16 @@ class InlineProcessor(Treeprocessor):
if not isString(node):
if not isinstance(node.text, util.AtomicString):
# We need to process current node too
- for child in [node] + node.getchildren():
+ for child in [node] + list(node):
if not isString(node):
- if child.text:
- child.text = self.__handleInline(child.text,
- patternIndex + 1)
+ if child.text:
+ child.text = self.__handleInline(
+ child.text, patternIndex + 1
+ )
if child.tail:
- child.tail = self.__handleInline(child.tail,
- patternIndex)
+ child.tail = self.__handleInline(
+ child.tail, patternIndex
+ )
placeholder = self.__stashNode(node, pattern.type())
@@ -257,8 +266,8 @@ class InlineProcessor(Treeprocessor):
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
- want to process your data with inline paterns, instead of normal string,
- use subclass AtomicString:
+ want to process your data with inline paterns, instead of normal
+ string, use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
@@ -276,47 +285,49 @@ class InlineProcessor(Treeprocessor):
while stack:
currElement = stack.pop()
insertQueue = []
- for child in currElement.getchildren():
- if child.text and not isinstance(child.text, util.AtomicString):
+ for child in currElement:
+ if child.text and not isinstance(
+ child.text, util.AtomicString
+ ):
text = child.text
child.text = None
- lst = self.__processPlaceholders(self.__handleInline(
- text), child)
+ lst = self.__processPlaceholders(
+ self.__handleInline(text), child
+ )
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
- tailResult = self.__processPlaceholders(tail, dumby)
- if dumby.text:
- child.tail = dumby.text
- else:
- child.tail = None
- pos = currElement.getchildren().index(child) + 1
+ child.tail = None
+ tailResult = self.__processPlaceholders(tail, dumby, False)
+ if dumby.tail:
+ child.tail = dumby.tail
+ pos = list(currElement).index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
- if child.getchildren():
+ if len(child):
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
- element.text = \
- inlinepatterns.handleAttributes(element.text,
- element)
+ element.text = inlinepatterns.handleAttributes(
+ element.text, element
+ )
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
- newChild.tail = \
- inlinepatterns.handleAttributes(newChild.tail,
- element)
+ newChild.tail = inlinepatterns.handleAttributes(
+ newChild.tail, element
+ )
if newChild.text and isString(newChild.text):
- newChild.text = \
- inlinepatterns.handleAttributes(newChild.text,
- newChild)
+ newChild.text = inlinepatterns.handleAttributes(
+ newChild.text, newChild
+ )
element.insert(i, newChild)
i += 1
return tree
@@ -357,4 +368,4 @@ class PrettifyTreeprocessor(Treeprocessor):
pres = root.getiterator('pre')
for pre in pres:
if len(pre) and pre[0].tag == 'code':
- pre[0].text = pre[0].text.rstrip() + '\n'
+ pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n')
diff --git a/src/calibre/ebooks/markdown/util.py b/src/calibre/ebooks/markdown/util.py
index f07da1e111..d3d48f0999 100644
--- a/src/calibre/ebooks/markdown/util.py
+++ b/src/calibre/ebooks/markdown/util.py
@@ -10,14 +10,14 @@ Python 3 Stuff
"""
PY3 = sys.version_info[0] == 3
-if PY3:
+if PY3: # pragma: no cover
string_type = str
text_type = str
int2str = chr
-else:
- string_type = basestring
- text_type = unicode
- int2str = unichr
+else: # pragma: no cover
+ string_type = basestring # noqa
+ text_type = unicode # noqa
+ int2str = unichr # noqa
"""
@@ -25,12 +25,16 @@ Constants you might want to modify
-----------------------------------------------------------------------------
"""
-BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
- "|script|noscript|form|fieldset|iframe|math"
- "|hr|hr/|style|li|dt|dd|thead|tbody"
- "|tr|th|td|section|footer|header|group|figure"
- "|figcaption|aside|article|canvas|output"
- "|progress|video)$", re.IGNORECASE)
+
+BLOCK_LEVEL_ELEMENTS = re.compile(
+ "^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
+ "|script|noscript|form|fieldset|iframe|math"
+ "|hr|hr/|style|li|dt|dd|thead|tbody"
+ "|tr|th|td|section|footer|header|group|figure"
+ "|figcaption|aside|article|canvas|output"
+ "|progress|video|nav)$",
+ re.IGNORECASE
+)
# Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
@@ -38,30 +42,36 @@ INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
AMP_SUBSTITUTE = STX+"amp"+ETX
+HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
+HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
+TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
+
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
-RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'),
- # Hebrew (0590-05FF), Arabic (0600-06FF),
- # Syriac (0700-074F), Arabic supplement (0750-077F),
- # Thaana (0780-07BF), Nko (07C0-07FF).
- ('\u2D30', '\u2D7F'), # Tifinagh
- )
+RTL_BIDI_RANGES = (
+ ('\u0590', '\u07FF'),
+ # Hebrew (0590-05FF), Arabic (0600-06FF),
+ # Syriac (0700-074F), Arabic supplement (0750-077F),
+ # Thaana (0780-07BF), Nko (07C0-07FF).
+ ('\u2D30', '\u2D7F') # Tifinagh
+)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself.
-try: # Is the C implemenation of ElementTree available?
+try: # pragma: no cover
+ # Is the C implementation of ElementTree available?
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment
etree.test_comment = Comment
if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
-except (ImportError, RuntimeError):
+except (ImportError, RuntimeError): # pragma: no cover
# Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree
if etree.VERSION < "1.1":
@@ -81,11 +91,32 @@ def isBlockLevel(tag):
# Some ElementTree tags are not strings, so return False.
return False
+
+def parseBoolValue(value, fail_on_errors=True, preserve_none=False):
+ """Parses a string representing bool value. If parsing was successful,
+ returns True or False. If preserve_none=True, returns True, False,
+ or None. If parsing was not successful, raises ValueError, or, if
+ fail_on_errors=False, returns None."""
+ if not isinstance(value, string_type):
+ if preserve_none and value is None:
+ return value
+ return bool(value)
+ elif preserve_none and value.lower() == 'none':
+ return None
+ elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
+ return True
+ elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'):
+ return False
+ elif fail_on_errors:
+ raise ValueError('Cannot parse bool value: %r' % value)
+
+
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
+
class AtomicString(text_type):
"""A string which should not be further processed."""
pass
@@ -103,10 +134,12 @@ class HtmlStash(object):
in the beginning and replace with place-holders.
"""
- def __init__ (self):
+ def __init__(self):
""" Create a HtmlStash. """
- self.html_counter = 0 # for counting inline html segments
- self.rawHtmlBlocks=[]
+ self.html_counter = 0 # for counting inline html segments
+ self.rawHtmlBlocks = []
+ self.tag_counter = 0
+ self.tag_data = [] # list of dictionaries in the order tags appear
def store(self, html, safe=False):
"""
@@ -132,5 +165,13 @@ class HtmlStash(object):
self.rawHtmlBlocks = []
def get_placeholder(self, key):
- return "%swzxhzdk:%d%s" % (STX, key, ETX)
+ return HTML_PLACEHOLDER % key
+ def store_tag(self, tag, attrs, left_index, right_index):
+ """Store tag data and return a placeholder."""
+ self.tag_data.append({'tag': tag, 'attrs': attrs,
+ 'left_index': left_index,
+ 'right_index': right_index})
+ placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
+ self.tag_counter += 1 # equal to the tag's index in self.tag_data
+ return placeholder
diff --git a/src/calibre/ebooks/txt/processor.py b/src/calibre/ebooks/txt/processor.py
index ba41378deb..a397dfaa03 100644
--- a/src/calibre/ebooks/txt/processor.py
+++ b/src/calibre/ebooks/txt/processor.py
@@ -98,8 +98,8 @@ def convert_basic(txt, title='', epub_split_size_kb=0):
def convert_markdown(txt, title='', extensions=('footnotes', 'tables', 'toc')):
from calibre.ebooks.conversion.plugins.txt_input import MD_EXTENSIONS
from calibre.ebooks.markdown import Markdown
- extensions = [x.lower() for x in extensions if x.lower() in MD_EXTENSIONS]
- md = Markdown(extensions=extensions, safe_mode=False)
+ extensions = ['calibre.ebooks.markdown.extensions.' + x.lower() for x in extensions if x.lower() in MD_EXTENSIONS]
+ md = Markdown(extensions=extensions)
return HTML_TEMPLATE % (title, md.convert(txt))
def convert_textile(txt, title=''):
diff --git a/src/calibre/test_build.py b/src/calibre/test_build.py
index 9c68a91f41..3fce5203f7 100644
--- a/src/calibre/test_build.py
+++ b/src/calibre/test_build.py
@@ -233,6 +233,11 @@ def test_terminal():
del readline
print ('readline and curses OK!')
+def test_markdown():
+ from calibre.ebooks.markdown import Markdown
+ Markdown(extensions=['extra'])
+ print('Markdown OK!')
+
def test():
if iswindows:
test_dlls()
@@ -256,6 +261,7 @@ def test():
test_netifaces()
test_psutil()
test_podofo()
+ test_markdown()
if iswindows:
test_wpd()
test_winutil()