Note
` - title = klass.capitalize() - elif title == '': - # an explicit blank title should not be rendered - # e.g.: `!!! warning ""` will *not* render `p` with a title - title = None - return klass, title - - -def makeExtension(*args, **kwargs): - return AdmonitionExtension(*args, **kwargs) diff --git a/src/calibre/ebooks/markdown/extensions/attr_list.py b/src/calibre/ebooks/markdown/extensions/attr_list.py deleted file mode 100644 index 683bdf831c..0000000000 --- a/src/calibre/ebooks/markdown/extensions/attr_list.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -Attribute List Extension for Python-Markdown -============================================ - -Adds attribute list syntax. Inspired by -[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s -feature of the same name. - -See%s
\n' % \
- (self.css_class, class_str, txt)
-
- def _parseHeader(self):
- """
- Determines language of a code block from shebang line and whether said
- line should be removed or left in place. If the sheband line contains a
- path (even a single /) then it is assumed to be a real shebang line and
- left alone. However, if no path is given (e.i.: #!python or :::python)
- then it is assumed to be a mock shebang for language identifitation of
- a code fragment and removed from the code block prior to processing for
- code highlighting. When a mock shebang (e.i: #!python) is found, line
- numbering is turned on. When colons are found in place of a shebang
- (e.i.: :::python), line numbering is left in the current state - off
- by default.
-
- Also parses optional list of highlight lines, like:
-
- :::python hl_lines="1 3"
- """
-
- import re
-
- # split text into lines
- lines = self.src.split("\n")
- # pull first line to examine
- fl = lines.pop(0)
-
- c = re.compile(r'''
- (?:(?:^::+)|(?P.*?)(?<=\n)
-(?P=fence)[ ]*$''', re.MULTILINE | re.DOTALL | re.VERBOSE)
- CODE_WRAP = '%s
'
- LANG_TAG = ' class="%s"'
-
- def __init__(self, md):
- super(FencedBlockPreprocessor, self).__init__(md)
-
- self.checked_for_codehilite = False
- self.codehilite_conf = {}
-
- def run(self, lines):
- """ Match and store Fenced Code Blocks in the HtmlStash. """
-
- # Check for code hilite extension
- if not self.checked_for_codehilite:
- for ext in self.markdown.registeredExtensions:
- if isinstance(ext, CodeHiliteExtension):
- self.codehilite_conf = ext.config
- break
-
- self.checked_for_codehilite = True
-
- text = "\n".join(lines)
- while 1:
- m = self.FENCED_BLOCK_RE.search(text)
- if m:
- lang = ''
- if m.group('lang'):
- lang = self.LANG_TAG % m.group('lang')
-
- # If config is not empty, then the codehighlite extension
- # is enabled, so we call it to highlight the code
- if self.codehilite_conf:
- highliter = CodeHilite(
- m.group('code'),
- linenums=self.codehilite_conf['linenums'][0],
- guess_lang=self.codehilite_conf['guess_lang'][0],
- css_class=self.codehilite_conf['css_class'][0],
- style=self.codehilite_conf['pygments_style'][0],
- lang=(m.group('lang') or None),
- noclasses=self.codehilite_conf['noclasses'][0],
- hl_lines=parse_hl_lines(m.group('hl_lines'))
- )
-
- code = highliter.hilite()
- else:
- code = self.CODE_WRAP % (lang,
- self._escape(m.group('code')))
-
- placeholder = self.markdown.htmlStash.store(code, safe=True)
- text = '%s\n%s\n%s' % (text[:m.start()],
- placeholder,
- text[m.end():])
- else:
- break
- return text.split("\n")
-
- def _escape(self, txt):
- """ basic html escaping """
- txt = txt.replace('&', '&')
- txt = txt.replace('<', '<')
- txt = txt.replace('>', '>')
- txt = txt.replace('"', '"')
- return txt
-
-
-def makeExtension(*args, **kwargs):
- return FencedCodeExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/footnotes.py b/src/calibre/ebooks/markdown/extensions/footnotes.py
deleted file mode 100644
index 362aa20b64..0000000000
--- a/src/calibre/ebooks/markdown/extensions/footnotes.py
+++ /dev/null
@@ -1,321 +0,0 @@
-"""
-Footnotes Extension for Python-Markdown
-=======================================
-
-Adds footnote handling to Python-Markdown.
-
-See
-for documentation.
-
-Copyright The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..preprocessors import Preprocessor
-from ..inlinepatterns import Pattern
-from ..treeprocessors import Treeprocessor
-from ..postprocessors import Postprocessor
-from ..util import etree, text_type
-from ..odict import OrderedDict
-import re
-
-FN_BACKLINK_TEXT = "zz1337820767766393qq"
-NBSP_PLACEHOLDER = "qq3936677670287331zz"
-DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
-TABBED_RE = re.compile(r'((\t)|( ))(.*)')
-
-
-class FootnoteExtension(Extension):
- """ Footnote Extension. """
-
- def __init__(self, *args, **kwargs):
- """ Setup configs. """
-
- self.config = {
- 'PLACE_MARKER':
- ["///Footnotes Go Here///",
- "The text string that marks where the footnotes go"],
- 'UNIQUE_IDS':
- [False,
- "Avoid name collisions across "
- "multiple calls to reset()."],
- "BACKLINK_TEXT":
- ["↩",
- "The text string that links from the footnote "
- "to the reader's place."]
- }
- super(FootnoteExtension, self).__init__(*args, **kwargs)
-
- # In multiple invocations, emit links that don't get tangled.
- self.unique_prefix = 0
-
- self.reset()
-
- def extendMarkdown(self, md, md_globals):
- """ Add pieces to Markdown. """
- md.registerExtension(self)
- self.parser = md.parser
- self.md = md
- # Insert a preprocessor before ReferencePreprocessor
- md.preprocessors.add(
- "footnote", FootnotePreprocessor(self), "amp_substitute"
- )
-
- def reset(self):
- """ Clear footnotes on reset, and prepare for distinct document. """
- self.footnotes = OrderedDict()
- self.unique_prefix += 1
-
- def findFootnotesPlaceholder(self, root):
- """ Return ElementTree Element that contains Footnote placeholder. """
- def finder(element):
- for child in element:
- if child.text:
- if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
- return child, element, True
- if child.tail:
- if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
- return child, element, False
- child_res = finder(child)
- if child_res is not None:
- return child_res
- return None
-
- res = finder(root)
- return res
-
- def setFootnote(self, id, text):
- """ Store a footnote for later retrieval. """
- self.footnotes[id] = text
-
- def get_separator(self):
- if self.md.output_format in ['html5', 'xhtml5']:
- return '-'
- return ':'
-
- def makeFootnoteId(self, id):
- """ Return footnote link id. """
- if self.getConfig("UNIQUE_IDS"):
- return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
- else:
- return 'fn%s%s' % (self.get_separator(), id)
-
- def makeFootnoteRefId(self, id):
- """ Return footnote back-link id. """
- if self.getConfig("UNIQUE_IDS"):
- return 'fnref%s%d-%s' % (self.get_separator(),
- self.unique_prefix, id)
- else:
- return 'fnref%s%s' % (self.get_separator(), id)
-
- def makeFootnotesDiv(self, root):
- """ Return div of footnotes as et Element. """
-
- if not list(self.footnotes.keys()):
- return None
-
- div = etree.Element("div")
- div.set('class', 'footnote')
- etree.SubElement(div, "hr")
- ol = etree.SubElement(div, "ol")
-
- for id in self.footnotes.keys():
- li = etree.SubElement(ol, "li")
- li.set("id", self.makeFootnoteId(id))
- self.parser.parseChunk(li, self.footnotes[id])
- backlink = etree.Element("a")
- backlink.set("href", "#" + self.makeFootnoteRefId(id))
- if self.md.output_format not in ['html5', 'xhtml5']:
- backlink.set("rev", "footnote") # Invalid in HTML5
- backlink.set("class", "footnote-backref")
- backlink.set(
- "title",
- "Jump back to footnote %d in the text" %
- (self.footnotes.index(id)+1)
- )
- backlink.text = FN_BACKLINK_TEXT
-
- if li.getchildren():
- node = li[-1]
- if node.tag == "p":
- node.text = node.text + NBSP_PLACEHOLDER
- node.append(backlink)
- else:
- p = etree.SubElement(li, "p")
- p.append(backlink)
- return div
-
-
-class FootnotePreprocessor(Preprocessor):
- """ Find all footnote references and store for later use. """
-
- def __init__(self, footnotes):
- self.footnotes = footnotes
-
- def run(self, lines):
- """
- Loop through lines and find, set, and remove footnote definitions.
-
- Keywords:
-
- * lines: A list of lines of text
-
- Return: A list of lines of text with footnote definitions removed.
-
- """
- newlines = []
- i = 0
- while True:
- m = DEF_RE.match(lines[i])
- if m:
- fn, _i = self.detectTabbed(lines[i+1:])
- fn.insert(0, m.group(2))
- i += _i-1 # skip past footnote
- self.footnotes.setFootnote(m.group(1), "\n".join(fn))
- else:
- newlines.append(lines[i])
- if len(lines) > i+1:
- i += 1
- else:
- break
- return newlines
-
- def detectTabbed(self, lines):
- """ Find indented text and remove indent before further proccesing.
-
- Keyword arguments:
-
- * lines: an array of strings
-
- Returns: a list of post processed items and the index of last line.
-
- """
- items = []
- blank_line = False # have we encountered a blank line yet?
- i = 0 # to keep track of where we are
-
- def detab(line):
- match = TABBED_RE.match(line)
- if match:
- return match.group(4)
-
- for line in lines:
- if line.strip(): # Non-blank line
- detabbed_line = detab(line)
- if detabbed_line:
- items.append(detabbed_line)
- i += 1
- continue
- elif not blank_line and not DEF_RE.match(line):
- # not tabbed but still part of first par.
- items.append(line)
- i += 1
- continue
- else:
- return items, i+1
-
- else: # Blank line: _maybe_ we are done.
- blank_line = True
- i += 1 # advance
-
- # Find the next non-blank line
- for j in range(i, len(lines)):
- if lines[j].strip():
- next_line = lines[j]
- break
- else:
- break # There is no more text; we are done.
-
- # Check if the next non-blank line is tabbed
- if detab(next_line): # Yes, more work to do.
- items.append("")
- continue
- else:
- break # No, we are done.
- else:
- i += 1
-
- return items, i
-
-
-class FootnotePattern(Pattern):
- """ InlinePattern for footnote markers in a document's body text. """
-
- def __init__(self, pattern, footnotes):
- super(FootnotePattern, self).__init__(pattern)
- self.footnotes = footnotes
-
- def handleMatch(self, m):
- id = m.group(2)
- if id in self.footnotes.footnotes.keys():
- sup = etree.Element("sup")
- a = etree.SubElement(sup, "a")
- sup.set('id', self.footnotes.makeFootnoteRefId(id))
- a.set('href', '#' + self.footnotes.makeFootnoteId(id))
- if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
- a.set('rel', 'footnote') # invalid in HTML5
- a.set('class', 'footnote-ref')
- a.text = text_type(self.footnotes.footnotes.index(id) + 1)
- return sup
- else:
- return None
-
-
-class FootnoteTreeprocessor(Treeprocessor):
- """ Build and append footnote div to end of document. """
-
- def __init__(self, footnotes):
- self.footnotes = footnotes
-
- def run(self, root):
- footnotesDiv = self.footnotes.makeFootnotesDiv(root)
- if footnotesDiv is not None:
- result = self.footnotes.findFootnotesPlaceholder(root)
- if result:
- child, parent, isText = result
- ind = parent.getchildren().index(child)
- if isText:
- parent.remove(child)
- parent.insert(ind, footnotesDiv)
- else:
- parent.insert(ind + 1, footnotesDiv)
- child.tail = None
- else:
- root.append(footnotesDiv)
-
-
-class FootnotePostprocessor(Postprocessor):
- """ Replace placeholders with html entities. """
- def __init__(self, footnotes):
- self.footnotes = footnotes
-
- def run(self, text):
- text = text.replace(
- FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
- )
- return text.replace(NBSP_PLACEHOLDER, " ")
-
-
-def makeExtension(*args, **kwargs):
- """ Return an instance of the FootnoteExtension """
- return FootnoteExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/headerid.py b/src/calibre/ebooks/markdown/extensions/headerid.py
deleted file mode 100644
index 2cb20b97ab..0000000000
--- a/src/calibre/ebooks/markdown/extensions/headerid.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-HeaderID Extension for Python-Markdown
-======================================
-
-Auto-generate id attributes for HTML headers.
-
-See
-for documentation.
-
-Original code Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
-
-All changes Copyright 2011-2014 The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..treeprocessors import Treeprocessor
-from ..util import parseBoolValue
-from .toc import slugify, unique, stashedHTML2text
-import warnings
-
-
-class HeaderIdTreeprocessor(Treeprocessor):
- """ Assign IDs to headers. """
-
- IDs = set()
-
- def run(self, doc):
- start_level, force_id = self._get_meta()
- slugify = self.config['slugify']
- sep = self.config['separator']
- for elem in doc:
- if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
- if force_id:
- if "id" in elem.attrib:
- id = elem.get('id')
- else:
- id = stashedHTML2text(''.join(elem.itertext()), self.md)
- id = slugify(id, sep)
- elem.set('id', unique(id, self.IDs))
- if start_level:
- level = int(elem.tag[-1]) + start_level
- if level > 6:
- level = 6
- elem.tag = 'h%d' % level
-
- def _get_meta(self):
- """ Return meta data suported by this ext as a tuple """
- level = int(self.config['level']) - 1
- force = parseBoolValue(self.config['forceid'])
- if hasattr(self.md, 'Meta'):
- if 'header_level' in self.md.Meta:
- level = int(self.md.Meta['header_level'][0]) - 1
- if 'header_forceid' in self.md.Meta:
- force = parseBoolValue(self.md.Meta['header_forceid'][0])
- return level, force
-
-
-class HeaderIdExtension(Extension):
- def __init__(self, *args, **kwargs):
- # set defaults
- self.config = {
- 'level': ['1', 'Base level for headers.'],
- 'forceid': ['True', 'Force all headers to have an id.'],
- 'separator': ['-', 'Word separator.'],
- 'slugify': [slugify, 'Callable to generate anchors']
- }
-
- super(HeaderIdExtension, self).__init__(*args, **kwargs)
-
- warnings.warn(
- 'The HeaderId Extension is pending deprecation. Use the TOC Extension instead.',
- PendingDeprecationWarning
- )
-
- def extendMarkdown(self, md, md_globals):
- md.registerExtension(self)
- self.processor = HeaderIdTreeprocessor()
- self.processor.md = md
- self.processor.config = self.getConfigs()
- if 'attr_list' in md.treeprocessors.keys():
- # insert after attr_list treeprocessor
- md.treeprocessors.add('headerid', self.processor, '>attr_list')
- else:
- # insert after 'prettify' treeprocessor.
- md.treeprocessors.add('headerid', self.processor, '>prettify')
-
- def reset(self):
- self.processor.IDs = set()
-
-
-def makeExtension(*args, **kwargs):
- return HeaderIdExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/meta.py b/src/calibre/ebooks/markdown/extensions/meta.py
deleted file mode 100644
index 711235ef4a..0000000000
--- a/src/calibre/ebooks/markdown/extensions/meta.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""
-Meta Data Extension for Python-Markdown
-=======================================
-
-This extension adds Meta Data handling to markdown.
-
-See
-for documentation.
-
-Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
-
-All changes Copyright 2008-2014 The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..preprocessors import Preprocessor
-import re
-import logging
-
-log = logging.getLogger('MARKDOWN')
-
-# Global Vars
-META_RE = re.compile(r'^[ ]{0,3}(?P[A-Za-z0-9_-]+):\s*(?P.*)')
-META_MORE_RE = re.compile(r'^[ ]{4,}(?P.*)')
-BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
-END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
-
-
-class MetaExtension (Extension):
- """ Meta-Data extension for Python-Markdown. """
-
- def extendMarkdown(self, md, md_globals):
- """ Add MetaPreprocessor to Markdown instance. """
- md.preprocessors.add("meta",
- MetaPreprocessor(md),
- ">normalize_whitespace")
-
-
-class MetaPreprocessor(Preprocessor):
- """ Get Meta-Data. """
-
- def run(self, lines):
- """ Parse Meta-Data and store in Markdown.Meta. """
- meta = {}
- key = None
- if lines and BEGIN_RE.match(lines[0]):
- lines.pop(0)
- while lines:
- line = lines.pop(0)
- m1 = META_RE.match(line)
- if line.strip() == '' or END_RE.match(line):
- break # blank line or end of YAML header - done
- if m1:
- key = m1.group('key').lower().strip()
- value = m1.group('value').strip()
- try:
- meta[key].append(value)
- except KeyError:
- meta[key] = [value]
- else:
- m2 = META_MORE_RE.match(line)
- if m2 and key:
- # Add another line to existing key
- meta[key].append(m2.group('value').strip())
- else:
- lines.insert(0, line)
- break # no meta data - done
- self.markdown.Meta = meta
- return lines
-
-
-def makeExtension(*args, **kwargs):
- return MetaExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/nl2br.py b/src/calibre/ebooks/markdown/extensions/nl2br.py
deleted file mode 100644
index 8acd60c2e1..0000000000
--- a/src/calibre/ebooks/markdown/extensions/nl2br.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
-NL2BR Extension
-===============
-
-A Python-Markdown extension to treat newlines as hard breaks; like
-GitHub-flavored Markdown does.
-
-See
-for documentation.
-
-Oringinal code Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
-
-All changes Copyright 2011-2014 The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..inlinepatterns import SubstituteTagPattern
-
-BR_RE = r'\n'
-
-
-class Nl2BrExtension(Extension):
-
- def extendMarkdown(self, md, md_globals):
- br_tag = SubstituteTagPattern(BR_RE, 'br')
- md.inlinePatterns.add('nl', br_tag, '_end')
-
-
-def makeExtension(*args, **kwargs):
- return Nl2BrExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/sane_lists.py b/src/calibre/ebooks/markdown/extensions/sane_lists.py
deleted file mode 100644
index 828ae7ab34..0000000000
--- a/src/calibre/ebooks/markdown/extensions/sane_lists.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""
-Sane List Extension for Python-Markdown
-=======================================
-
-Modify the behavior of Lists in Python-Markdown to act in a sane manor.
-
-See
-for documentation.
-
-Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
-
-All changes Copyright 2011-2014 The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..blockprocessors import OListProcessor, UListProcessor
-import re
-
-
-class SaneOListProcessor(OListProcessor):
-
- SIBLING_TAGS = ['ol']
-
- def __init__(self, parser):
- super(SaneOListProcessor, self).__init__(parser)
- self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' %
- (self.tab_length - 1))
-
-
-class SaneUListProcessor(UListProcessor):
-
- SIBLING_TAGS = ['ul']
-
- def __init__(self, parser):
- super(SaneUListProcessor, self).__init__(parser)
- self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
- (self.tab_length - 1))
-
-
-class SaneListExtension(Extension):
- """ Add sane lists to Markdown. """
-
- def extendMarkdown(self, md, md_globals):
- """ Override existing Processors. """
- md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
- md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
-
-
-def makeExtension(*args, **kwargs):
- return SaneListExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/smart_strong.py b/src/calibre/ebooks/markdown/extensions/smart_strong.py
deleted file mode 100644
index 58570bb55e..0000000000
--- a/src/calibre/ebooks/markdown/extensions/smart_strong.py
+++ /dev/null
@@ -1,41 +0,0 @@
-'''
-Smart_Strong Extension for Python-Markdown
-==========================================
-
-This extention adds smarter handling of double underscores within words.
-
-See
-for documentation.
-
-Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
-
-All changes Copyright 2011-2014 The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-'''
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..inlinepatterns import SimpleTagPattern
-
-SMART_STRONG_RE = r'(?emphasis2'
- )
-
-
-def makeExtension(*args, **kwargs):
- return SmartEmphasisExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/tables.py b/src/calibre/ebooks/markdown/extensions/tables.py
deleted file mode 100644
index 494aaeb3e4..0000000000
--- a/src/calibre/ebooks/markdown/extensions/tables.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Tables Extension for Python-Markdown
-====================================
-
-Added parsing of tables to Python-Markdown.
-
-See
-for documentation.
-
-Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
-
-All changes Copyright 2008-2014 The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..blockprocessors import BlockProcessor
-from ..inlinepatterns import BacktickPattern, BACKTICK_RE
-from ..util import etree
-
-
-class TableProcessor(BlockProcessor):
- """ Process Tables. """
-
- def test(self, parent, block):
- rows = block.split('\n')
- return (len(rows) > 1 and '|' in rows[0] and
- '|' in rows[1] and '-' in rows[1] and
- rows[1].strip()[0] in ['|', ':', '-'])
-
- def run(self, parent, blocks):
- """ Parse a table block and build table. """
- block = blocks.pop(0).split('\n')
- header = block[0].strip()
- seperator = block[1].strip()
- rows = [] if len(block) < 3 else block[2:]
- # Get format type (bordered by pipes or not)
- border = False
- if header.startswith('|'):
- border = True
- # Get alignment of columns
- align = []
- for c in self._split_row(seperator, border):
- if c.startswith(':') and c.endswith(':'):
- align.append('center')
- elif c.startswith(':'):
- align.append('left')
- elif c.endswith(':'):
- align.append('right')
- else:
- align.append(None)
- # Build table
- table = etree.SubElement(parent, 'table')
- thead = etree.SubElement(table, 'thead')
- self._build_row(header, thead, align, border)
- tbody = etree.SubElement(table, 'tbody')
- for row in rows:
- self._build_row(row.strip(), tbody, align, border)
-
- def _build_row(self, row, parent, align, border):
- """ Given a row of text, build table cells. """
- tr = etree.SubElement(parent, 'tr')
- tag = 'td'
- if parent.tag == 'thead':
- tag = 'th'
- cells = self._split_row(row, border)
- # We use align here rather than cells to ensure every row
- # contains the same number of columns.
- for i, a in enumerate(align):
- c = etree.SubElement(tr, tag)
- try:
- if isinstance(cells[i], str) or isinstance(cells[i], unicode):
- c.text = cells[i].strip()
- else:
- # we've already inserted a code element
- c.append(cells[i])
- except IndexError: # pragma: no cover
- c.text = ""
- if a:
- c.set('align', a)
-
- def _split_row(self, row, border):
- """ split a row of text into list of cells. """
- if border:
- if row.startswith('|'):
- row = row[1:]
- if row.endswith('|'):
- row = row[:-1]
- return self._split(row, '|')
-
- def _split(self, row, marker):
- """ split a row of text with some code into a list of cells. """
- if self._row_has_unpaired_backticks(row):
- # fallback on old behaviour
- return row.split(marker)
- # modify the backtick pattern to only match at the beginning of the search string
- backtick_pattern = BacktickPattern('^' + BACKTICK_RE)
- elements = []
- current = ''
- i = 0
- while i < len(row):
- letter = row[i]
- if letter == marker:
- if current != '' or len(elements) == 0:
- # Don't append empty string unless it is the first element
- # The border is already removed when we get the row, then the line is strip()'d
- # If the first element is a marker, then we have an empty first cell
- elements.append(current)
- current = ''
- else:
- match = backtick_pattern.getCompiledRegExp().match(row[i:])
- if not match:
- current += letter
- else:
- groups = match.groups()
- delim = groups[1] # the code block delimeter (ie 1 or more backticks)
- row_contents = groups[2] # the text contained inside the code block
- i += match.start(4) # jump pointer to the beginning of the rest of the text (group #4)
- element = delim + row_contents + delim # reinstert backticks
- current += element
- i += 1
- elements.append(current)
- return elements
-
- def _row_has_unpaired_backticks(self, row):
- count_total_backtick = row.count('`')
- count_escaped_backtick = row.count('\`')
- count_backtick = count_total_backtick - count_escaped_backtick
- # odd number of backticks,
- # we won't be able to build correct code blocks
- return count_backtick & 1
-
-
-class TableExtension(Extension):
- """ Add tables to Markdown. """
-
- def extendMarkdown(self, md, md_globals):
- """ Add an instance of TableProcessor to BlockParser. """
- md.parser.blockprocessors.add('table',
- TableProcessor(md.parser),
- '
-for documentation.
-
-Oringinal code Copyright 2008 [Jack Miller](http://codezen.org)
-
-All changes Copyright 2008-2014 The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..treeprocessors import Treeprocessor
-from ..util import etree, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, string_type
-import re
-import unicodedata
-
-
-def slugify(value, separator):
- """ Slugify a string, to make it URL friendly. """
- value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
- value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
- return re.sub('[%s\s]+' % separator, separator, value)
-
-
-IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
-
-
-def unique(id, ids):
- """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """
- while id in ids or not id:
- m = IDCOUNT_RE.match(id)
- if m:
- id = '%s_%d' % (m.group(1), int(m.group(2))+1)
- else:
- id = '%s_%d' % (id, 1)
- ids.add(id)
- return id
-
-
-def stashedHTML2text(text, md):
- """ Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
- def _html_sub(m):
- """ Substitute raw html with plain text. """
- try:
- raw, safe = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
- except (IndexError, TypeError): # pragma: no cover
- return m.group(0)
- if md.safeMode and not safe: # pragma: no cover
- return ''
- # Strip out tags and entities - leaveing text
- return re.sub(r'(<[^>]+>)|(&[\#a-zA-Z0-9]+;)', '', raw)
-
- return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
-
-
-def nest_toc_tokens(toc_list):
- """Given an unsorted list with errors and skips, return a nested one.
- [{'level': 1}, {'level': 2}]
- =>
- [{'level': 1, 'children': [{'level': 2, 'children': []}]}]
-
- A wrong list is also converted:
- [{'level': 2}, {'level': 1}]
- =>
- [{'level': 2, 'children': []}, {'level': 1, 'children': []}]
- """
-
- ordered_list = []
- if len(toc_list):
- # Initialize everything by processing the first entry
- last = toc_list.pop(0)
- last['children'] = []
- levels = [last['level']]
- ordered_list.append(last)
- parents = []
-
- # Walk the rest nesting the entries properly
- while toc_list:
- t = toc_list.pop(0)
- current_level = t['level']
- t['children'] = []
-
- # Reduce depth if current level < last item's level
- if current_level < levels[-1]:
- # Pop last level since we know we are less than it
- levels.pop()
-
- # Pop parents and levels we are less than or equal to
- to_pop = 0
- for p in reversed(parents):
- if current_level <= p['level']:
- to_pop += 1
- else: # pragma: no cover
- break
- if to_pop:
- levels = levels[:-to_pop]
- parents = parents[:-to_pop]
-
- # Note current level as last
- levels.append(current_level)
-
- # Level is the same, so append to
- # the current parent (if available)
- if current_level == levels[-1]:
- (parents[-1]['children'] if parents
- else ordered_list).append(t)
-
- # Current level is > last item's level,
- # So make last item a parent and append current as child
- else:
- last['children'].append(t)
- parents.append(last)
- levels.append(current_level)
- last = t
-
- return ordered_list
-
-
-class TocTreeprocessor(Treeprocessor):
- def __init__(self, md, config):
- super(TocTreeprocessor, self).__init__(md)
-
- self.marker = config["marker"]
- self.title = config["title"]
- self.base_level = int(config["baselevel"]) - 1
- self.slugify = config["slugify"]
- self.sep = config["separator"]
- self.use_anchors = parseBoolValue(config["anchorlink"])
- self.use_permalinks = parseBoolValue(config["permalink"], False)
- if self.use_permalinks is None:
- self.use_permalinks = config["permalink"]
-
- self.header_rgx = re.compile("[Hh][123456]")
-
- def iterparent(self, root):
- ''' Iterator wrapper to get parent and child all at once. '''
- for parent in root.iter():
- for child in parent:
- yield parent, child
-
- def replace_marker(self, root, elem):
- ''' Replace marker with elem. '''
- for (p, c) in self.iterparent(root):
- text = ''.join(c.itertext()).strip()
- if not text:
- continue
-
- # To keep the output from screwing up the
- # validation by putting a inside of a
- # we actually replace the
in its entirety.
- # We do not allow the marker inside a header as that
- # would causes an enless loop of placing a new TOC
- # inside previously generated TOC.
- if c.text and c.text.strip() == self.marker and \
- not self.header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
- for i in range(len(p)):
- if p[i] == c:
- p[i] = elem
- break
-
- def set_level(self, elem):
- ''' Adjust header level according to base level. '''
- level = int(elem.tag[-1]) + self.base_level
- if level > 6:
- level = 6
- elem.tag = 'h%d' % level
-
- def add_anchor(self, c, elem_id): # @ReservedAssignment
- anchor = etree.Element("a")
- anchor.text = c.text
- anchor.attrib["href"] = "#" + elem_id
- anchor.attrib["class"] = "toclink"
- c.text = ""
- for elem in c:
- anchor.append(elem)
- c.remove(elem)
- c.append(anchor)
-
- def add_permalink(self, c, elem_id):
- permalink = etree.Element("a")
- permalink.text = ("%spara;" % AMP_SUBSTITUTE
- if self.use_permalinks is True
- else self.use_permalinks)
- permalink.attrib["href"] = "#" + elem_id
- permalink.attrib["class"] = "headerlink"
- permalink.attrib["title"] = "Permanent link"
- c.append(permalink)
-
- def build_toc_div(self, toc_list):
- """ Return a string div given a toc list. """
- div = etree.Element("div")
- div.attrib["class"] = "toc"
-
- # Add title to the div
- if self.title:
- header = etree.SubElement(div, "span")
- header.attrib["class"] = "toctitle"
- header.text = self.title
-
- def build_etree_ul(toc_list, parent):
- ul = etree.SubElement(parent, "ul")
- for item in toc_list:
- # List item link, to be inserted into the toc div
- li = etree.SubElement(ul, "li")
- link = etree.SubElement(li, "a")
- link.text = item.get('name', '')
- link.attrib["href"] = '#' + item.get('id', '')
- if item['children']:
- build_etree_ul(item['children'], li)
- return ul
-
- build_etree_ul(toc_list, div)
- prettify = self.markdown.treeprocessors.get('prettify')
- if prettify:
- prettify.run(div)
- return div
-
- def run(self, doc):
- # Get a list of id attributes
- used_ids = set()
- for el in doc.iter():
- if "id" in el.attrib:
- used_ids.add(el.attrib["id"])
-
- toc_tokens = []
- for el in doc.iter():
- if isinstance(el.tag, string_type) and self.header_rgx.match(el.tag):
- self.set_level(el)
- text = ''.join(el.itertext()).strip()
-
- # Do not override pre-existing ids
- if "id" not in el.attrib:
- innertext = stashedHTML2text(text, self.markdown)
- el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
-
- toc_tokens.append({
- 'level': int(el.tag[-1]),
- 'id': el.attrib["id"],
- 'name': text
- })
-
- if self.use_anchors:
- self.add_anchor(el, el.attrib["id"])
- if self.use_permalinks:
- self.add_permalink(el, el.attrib["id"])
-
- div = self.build_toc_div(nest_toc_tokens(toc_tokens))
- if self.marker:
- self.replace_marker(doc, div)
-
- # serialize and attach to markdown instance.
- toc = self.markdown.serializer(div)
- for pp in self.markdown.postprocessors.values():
- toc = pp.run(toc)
- self.markdown.toc = toc
-
-
-class TocExtension(Extension):
-
- TreeProcessorClass = TocTreeprocessor
-
- def __init__(self, *args, **kwargs):
- self.config = {
- "marker": ['[TOC]',
- 'Text to find and replace with Table of Contents - '
- 'Set to an empty string to disable. Defaults to "[TOC]"'],
- "title": ["",
- "Title to insert into TOC
- "
- "Defaults to an empty string"],
- "anchorlink": [False,
- "True if header should be a self link - "
- "Defaults to False"],
- "permalink": [0,
- "True or link text if a Sphinx-style permalink should "
- "be added - Defaults to False"],
- "baselevel": ['1', 'Base level for headers.'],
- "slugify": [slugify,
- "Function to generate anchors based on header text - "
- "Defaults to the headerid ext's slugify function."],
- 'separator': ['-', 'Word separator. Defaults to "-".']
- }
-
- super(TocExtension, self).__init__(*args, **kwargs)
-
- def extendMarkdown(self, md, md_globals):
- md.registerExtension(self)
- self.md = md
- self.reset()
- tocext = self.TreeProcessorClass(md, self.getConfigs())
- # Headerid ext is set to '>prettify'. With this set to '_end',
- # it should always come after headerid ext (and honor ids assinged
- # by the header id extension) if both are used. Same goes for
- # attr_list extension. This must come last because we don't want
- # to redefine ids after toc is created. But we do want toc prettified.
- md.treeprocessors.add("toc", tocext, "_end")
-
- def reset(self):
- self.md.toc = ''
-
-
-def makeExtension(*args, **kwargs):
- return TocExtension(*args, **kwargs)
diff --git a/src/calibre/ebooks/markdown/extensions/wikilinks.py b/src/calibre/ebooks/markdown/extensions/wikilinks.py
deleted file mode 100644
index 94e1b67948..0000000000
--- a/src/calibre/ebooks/markdown/extensions/wikilinks.py
+++ /dev/null
@@ -1,89 +0,0 @@
-'''
-WikiLinks Extension for Python-Markdown
-======================================
-
-Converts [[WikiLinks]] to relative links.
-
-See
-for documentation.
-
-Original code Copyright [Waylan Limberg](http://achinghead.com/).
-
-All changes Copyright The Python Markdown Project
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-'''
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..inlinepatterns import Pattern
-from ..util import etree
-import re
-
-
-def build_url(label, base, end):
- """ Build a url from the label, a base, and an end. """
- clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
- return '%s%s%s' % (base, clean_label, end)
-
-
-class WikiLinkExtension(Extension):
-
- def __init__(self, *args, **kwargs):
- self.config = {
- 'base_url': ['/', 'String to append to beginning or URL.'],
- 'end_url': ['/', 'String to append to end of URL.'],
- 'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
- 'build_url': [build_url, 'Callable formats URL from label.'],
- }
-
- super(WikiLinkExtension, self).__init__(*args, **kwargs)
-
- def extendMarkdown(self, md, md_globals):
- self.md = md
-
- # append to end of inline patterns
- WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
- wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
- wikilinkPattern.md = md
- md.inlinePatterns.add('wikilink', wikilinkPattern, " tags
-and _then_ try to replace inline html, we would end up with a mess.
-So, we apply the expressions in the following order:
-
-* escape and backticks have to go before everything else, so
- that we can preempt any markdown patterns by escaping them.
-
-* then we handle auto-links (must be done before inline html)
-
-* then we handle inline HTML. At this point we will simply
- replace all inline HTML strings with a placeholder and add
- the actual HTML to a hash.
-
-* then inline images (must be done before links)
-
-* then bracketed links, first regular then reference-style
-
-* finally we apply strong and emphasis
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-from . import odict
-import re
-try: # pragma: no cover
- from urllib.parse import urlparse, urlunparse
-except ImportError: # pragma: no cover
- from urlparse import urlparse, urlunparse
-try: # pragma: no cover
- from html import entities
-except ImportError: # pragma: no cover
- import htmlentitydefs as entities
-
-
-def build_inlinepatterns(md_instance, **kwargs):
- """ Build the default set of inline patterns for Markdown. """
- inlinePatterns = odict.OrderedDict()
- inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
- inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
- inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
- inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
- inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
- inlinePatterns["image_reference"] = ImageReferencePattern(
- IMAGE_REFERENCE_RE, md_instance
- )
- inlinePatterns["short_reference"] = ReferencePattern(
- SHORT_REF_RE, md_instance
- )
- inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
- inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
- inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
- if md_instance.safeMode != 'escape':
- inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
- inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
- inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
- inlinePatterns["em_strong"] = DoubleTagPattern(EM_STRONG_RE, 'strong,em')
- inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'em,strong')
- inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
- inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
- if md_instance.smart_emphasis:
- inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
- else:
- inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
- return inlinePatterns
-
-"""
-The actual regular expressions for patterns
------------------------------------------------------------------------------
-"""
-
-NOBRACKET = r'[^\]\[]*'
-BRK = (
- r'\[(' +
- (NOBRACKET + r'(\[')*6 +
- (NOBRACKET + r'\])*')*6 +
- NOBRACKET + r')\]'
-)
-NOIMG = r'(?) or [text](url "title")
-LINK_RE = NOIMG + BRK + \
- r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
-
-#  or 
-IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
-
-# [Google][3]
-REFERENCE_RE = NOIMG + BRK + r'\s?\[([^\]]*)\]'
-
-# [Google]
-SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]'
-
-# ![alt text][2]
-IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]'
-
-# stand-alone * or _
-NOT_STRONG_RE = r'((^| )(\*|_)( |$))'
-
-#
-AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>'
-
-#
-AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>'
-
-# <...>
-HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)'
-
-# &
-ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)'
-
-# two spaces at end of line
-LINE_BREAK_RE = r' \n'
-
-
-def dequote(string):
- """Remove quotes from around a string."""
- if ((string.startswith('"') and string.endswith('"')) or
- (string.startswith("'") and string.endswith("'"))):
- return string[1:-1]
- else:
- return string
-
-
-ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
-
-
-def handleAttributes(text, parent):
- """Set values of an element based on attribute definitions ({@id=123})."""
- def attributeCallback(match):
- parent.set(match.group(1), match.group(2).replace('\n', ' '))
- return ATTR_RE.sub(attributeCallback, text)
-
-
-"""
-The pattern classes
------------------------------------------------------------------------------
-"""
-
-
-class Pattern(object):
- """Base class that inline patterns subclass. """
-
- def __init__(self, pattern, markdown_instance=None):
- """
- Create an instant of an inline pattern.
-
- Keyword arguments:
-
- * pattern: A regular expression that matches a pattern
-
- """
- self.pattern = pattern
- self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
- re.DOTALL | re.UNICODE)
-
- # Api for Markdown to pass safe_mode into instance
- self.safe_mode = False
- if markdown_instance:
- self.markdown = markdown_instance
-
- def getCompiledRegExp(self):
- """ Return a compiled regular expression. """
- return self.compiled_re
-
- def handleMatch(self, m):
- """Return a ElementTree element from the given match.
-
- Subclasses should override this method.
-
- Keyword arguments:
-
- * m: A re match object containing a match of the pattern.
-
- """
- pass # pragma: no cover
-
- def type(self):
- """ Return class name, to define pattern type """
- return self.__class__.__name__
-
- def unescape(self, text):
- """ Return unescaped text given text with an inline placeholder. """
- try:
- stash = self.markdown.treeprocessors['inline'].stashed_nodes
- except KeyError: # pragma: no cover
- return text
-
- def itertext(el): # pragma: no cover
- ' Reimplement Element.itertext for older python versions '
- tag = el.tag
- if not isinstance(tag, util.string_type) and tag is not None:
- return
- if el.text:
- yield el.text
- for e in el:
- for s in itertext(e):
- yield s
- if e.tail:
- yield e.tail
-
- def get_stash(m):
- id = m.group(1)
- if id in stash:
- value = stash.get(id)
- if isinstance(value, util.string_type):
- return value
- else:
- # An etree Element - return text content only
- return ''.join(itertext(value))
- return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
-
-
-class SimpleTextPattern(Pattern):
- """ Return a simple text of group(2) of a Pattern. """
- def handleMatch(self, m):
- return m.group(2)
-
-
-class EscapePattern(Pattern):
- """ Return an escaped character. """
-
- def handleMatch(self, m):
- char = m.group(2)
- if char in self.markdown.ESCAPED_CHARS:
- return '%s%s%s' % (util.STX, ord(char), util.ETX)
- else:
- return None
-
-
-class SimpleTagPattern(Pattern):
- """
- Return element of type `tag` with a text attribute of group(3)
- of a Pattern.
-
- """
- def __init__(self, pattern, tag):
- Pattern.__init__(self, pattern)
- self.tag = tag
-
- def handleMatch(self, m):
- el = util.etree.Element(self.tag)
- el.text = m.group(3)
- return el
-
-
-class SubstituteTagPattern(SimpleTagPattern):
- """ Return an element of type `tag` with no children. """
- def handleMatch(self, m):
- return util.etree.Element(self.tag)
-
-
-class BacktickPattern(Pattern):
- """ Return a `` element containing the matching text. """
- def __init__(self, pattern):
- Pattern.__init__(self, pattern)
- self.tag = "code"
-
- def handleMatch(self, m):
- el = util.etree.Element(self.tag)
- el.text = util.AtomicString(m.group(3).strip())
- return el
-
-
-class DoubleTagPattern(SimpleTagPattern):
- """Return a ElementTree element nested in tag2 nested in tag1.
-
- Useful for strong emphasis etc.
-
- """
- def handleMatch(self, m):
- tag1, tag2 = self.tag.split(",")
- el1 = util.etree.Element(tag1)
- el2 = util.etree.SubElement(el1, tag2)
- el2.text = m.group(3)
- if len(m.groups()) == 5:
- el2.tail = m.group(4)
- return el1
-
-
-class HtmlPattern(Pattern):
- """ Store raw inline html and return a placeholder. """
- def handleMatch(self, m):
- rawhtml = self.unescape(m.group(2))
- place_holder = self.markdown.htmlStash.store(rawhtml)
- return place_holder
-
- def unescape(self, text):
- """ Return unescaped text given text with an inline placeholder. """
- try:
- stash = self.markdown.treeprocessors['inline'].stashed_nodes
- except KeyError: # pragma: no cover
- return text
-
- def get_stash(m):
- id = m.group(1)
- value = stash.get(id)
- if value is not None:
- try:
- return self.markdown.serializer(value)
- except:
- return '\%s' % value
-
- return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
-
-
-class LinkPattern(Pattern):
- """ Return a link element from the given match. """
- def handleMatch(self, m):
- el = util.etree.Element("a")
- el.text = m.group(2)
- title = m.group(13)
- href = m.group(9)
-
- if href:
- if href[0] == "<":
- href = href[1:-1]
- el.set("href", self.sanitize_url(self.unescape(href.strip())))
- else:
- el.set("href", "")
-
- if title:
- title = dequote(self.unescape(title))
- el.set("title", title)
- return el
-
- def sanitize_url(self, url):
- """
- Sanitize a url against xss attacks in "safe_mode".
-
- Rather than specifically blacklisting `javascript:alert("XSS")` and all
- its aliases (see ), we whitelist known
- safe url formats. Most urls contain a network location, however some
- are known not to (i.e.: mailto links). Script urls do not contain a
- location. Additionally, for `javascript:...`, the scheme would be
- "javascript" but some aliases will appear to `urlparse()` to have no
- scheme. On top of that relative links (i.e.: "foo/bar.html") have no
- scheme. Therefore we must check "path", "parameters", "query" and
- "fragment" for any literal colons. We don't check "scheme" for colons
- because it *should* never have any and "netloc" must allow the form:
- `username:password@host:port`.
-
- """
- if not self.markdown.safeMode:
- # Return immediately bipassing parsing.
- return url
-
- try:
- scheme, netloc, path, params, query, fragment = url = urlparse(url)
- except ValueError: # pragma: no cover
- # Bad url - so bad it couldn't be parsed.
- return ''
-
- locless_schemes = ['', 'mailto', 'news']
- allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
- if scheme not in allowed_schemes:
- # Not a known (allowed) scheme. Not safe.
- return ''
-
- if netloc == '' and scheme not in locless_schemes: # pragma: no cover
- # This should not happen. Treat as suspect.
- return ''
-
- for part in url[2:]:
- if ":" in part:
- # A colon in "path", "parameters", "query"
- # or "fragment" is suspect.
- return ''
-
- # Url passes all tests. Return url as-is.
- return urlunparse(url)
-
-
-class ImagePattern(LinkPattern):
- """ Return a img element from the given match. """
- def handleMatch(self, m):
- el = util.etree.Element("img")
- src_parts = m.group(9).split()
- if src_parts:
- src = src_parts[0]
- if src[0] == "<" and src[-1] == ">":
- src = src[1:-1]
- el.set('src', self.sanitize_url(self.unescape(src)))
- else:
- el.set('src', "")
- if len(src_parts) > 1:
- el.set('title', dequote(self.unescape(" ".join(src_parts[1:]))))
-
- if self.markdown.enable_attributes:
- truealt = handleAttributes(m.group(2), el)
- else:
- truealt = m.group(2)
-
- el.set('alt', self.unescape(truealt))
- return el
-
-
-class ReferencePattern(LinkPattern):
- """ Match to a stored reference and return link element. """
-
- NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE)
-
- def handleMatch(self, m):
- try:
- id = m.group(9).lower()
- except IndexError:
- id = None
- if not id:
- # if we got something like "[Google][]" or "[Goggle]"
- # we'll use "google" as the id
- id = m.group(2).lower()
-
- # Clean up linebreaks in id
- id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
- if id not in self.markdown.references: # ignore undefined refs
- return None
- href, title = self.markdown.references[id]
-
- text = m.group(2)
- return self.makeTag(href, title, text)
-
- def makeTag(self, href, title, text):
- el = util.etree.Element('a')
-
- el.set('href', self.sanitize_url(href))
- if title:
- el.set('title', title)
-
- el.text = text
- return el
-
-
-class ImageReferencePattern(ReferencePattern):
- """ Match to a stored reference and return img element. """
- def makeTag(self, href, title, text):
- el = util.etree.Element("img")
- el.set("src", self.sanitize_url(href))
- if title:
- el.set("title", title)
-
- if self.markdown.enable_attributes:
- text = handleAttributes(text, el)
-
- el.set("alt", self.unescape(text))
- return el
-
-
-class AutolinkPattern(Pattern):
- """ Return a link Element given an autolink (``). """
- def handleMatch(self, m):
- el = util.etree.Element("a")
- el.set('href', self.unescape(m.group(2)))
- el.text = util.AtomicString(m.group(2))
- return el
-
-
-class AutomailPattern(Pattern):
- """
- Return a mailto link Element given an automail link (``).
- """
- def handleMatch(self, m):
- el = util.etree.Element('a')
- email = self.unescape(m.group(2))
- if email.startswith("mailto:"):
- email = email[len("mailto:"):]
-
- def codepoint2name(code):
- """Return entity definition by code, or the code if not defined."""
- entity = entities.codepoint2name.get(code)
- if entity:
- return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
- else:
- return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
-
- letters = [codepoint2name(ord(letter)) for letter in email]
- el.text = util.AtomicString(''.join(letters))
-
- mailto = "mailto:" + email
- mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
- ord(letter) for letter in mailto])
- el.set('href', mailto)
- return el
diff --git a/src/calibre/ebooks/markdown/odict.py b/src/calibre/ebooks/markdown/odict.py
deleted file mode 100644
index 584ad7c173..0000000000
--- a/src/calibre/ebooks/markdown/odict.py
+++ /dev/null
@@ -1,191 +0,0 @@
-from __future__ import unicode_literals
-from __future__ import absolute_import
-from . import util
-from copy import deepcopy
-
-
-class OrderedDict(dict):
- """
- A dictionary that keeps its keys in the order in which they're inserted.
-
- Copied from Django's SortedDict with some modifications.
-
- """
- def __new__(cls, *args, **kwargs):
- instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
- instance.keyOrder = []
- return instance
-
- def __init__(self, data=None):
- if data is None or isinstance(data, dict):
- data = data or []
- super(OrderedDict, self).__init__(data)
- self.keyOrder = list(data) if data else []
- else:
- super(OrderedDict, self).__init__()
- super_set = super(OrderedDict, self).__setitem__
- for key, value in data:
- # Take the ordering from first key
- if key not in self:
- self.keyOrder.append(key)
- # But override with last value in data (dict() does this)
- super_set(key, value)
-
- def __deepcopy__(self, memo):
- return self.__class__([(key, deepcopy(value, memo))
- for key, value in self.items()])
-
- def __copy__(self):
- # The Python's default copy implementation will alter the state
- # of self. The reason for this seems complex but is likely related to
- # subclassing dict.
- return self.copy()
-
- def __setitem__(self, key, value):
- if key not in self:
- self.keyOrder.append(key)
- super(OrderedDict, self).__setitem__(key, value)
-
- def __delitem__(self, key):
- super(OrderedDict, self).__delitem__(key)
- self.keyOrder.remove(key)
-
- def __iter__(self):
- return iter(self.keyOrder)
-
- def __reversed__(self):
- return reversed(self.keyOrder)
-
- def pop(self, k, *args):
- result = super(OrderedDict, self).pop(k, *args)
- try:
- self.keyOrder.remove(k)
- except ValueError:
- # Key wasn't in the dictionary in the first place. No problem.
- pass
- return result
-
- def popitem(self):
- result = super(OrderedDict, self).popitem()
- self.keyOrder.remove(result[0])
- return result
-
- def _iteritems(self):
- for key in self.keyOrder:
- yield key, self[key]
-
- def _iterkeys(self):
- for key in self.keyOrder:
- yield key
-
- def _itervalues(self):
- for key in self.keyOrder:
- yield self[key]
-
- if util.PY3: # pragma: no cover
- items = _iteritems
- keys = _iterkeys
- values = _itervalues
- else: # pragma: no cover
- iteritems = _iteritems
- iterkeys = _iterkeys
- itervalues = _itervalues
-
- def items(self):
- return [(k, self[k]) for k in self.keyOrder]
-
- def keys(self):
- return self.keyOrder[:]
-
- def values(self):
- return [self[k] for k in self.keyOrder]
-
- def update(self, dict_):
- for k in dict_:
- self[k] = dict_[k]
-
- def setdefault(self, key, default):
- if key not in self:
- self.keyOrder.append(key)
- return super(OrderedDict, self).setdefault(key, default)
-
- def value_for_index(self, index):
- """Returns the value of the item at the given zero-based index."""
- return self[self.keyOrder[index]]
-
- def insert(self, index, key, value):
- """Inserts the key, value pair before the item with the given index."""
- if key in self.keyOrder:
- n = self.keyOrder.index(key)
- del self.keyOrder[n]
- if n < index:
- index -= 1
- self.keyOrder.insert(index, key)
- super(OrderedDict, self).__setitem__(key, value)
-
- def copy(self):
- """Returns a copy of this object."""
- # This way of initializing the copy means it works for subclasses, too.
- return self.__class__(self)
-
- def __repr__(self):
- """
- Replaces the normal dict.__repr__ with a version that returns the keys
- in their Ordered order.
- """
- return '{%s}' % ', '.join(
- ['%r: %r' % (k, v) for k, v in self._iteritems()]
- )
-
- def clear(self):
- super(OrderedDict, self).clear()
- self.keyOrder = []
-
- def index(self, key):
- """ Return the index of a given key. """
- try:
- return self.keyOrder.index(key)
- except ValueError:
- raise ValueError("Element '%s' was not found in OrderedDict" % key)
-
- def index_for_location(self, location):
- """ Return index or None for a given location. """
- if location == '_begin':
- i = 0
- elif location == '_end':
- i = None
- elif location.startswith('<') or location.startswith('>'):
- i = self.index(location[1:])
- if location.startswith('>'):
- if i >= len(self):
- # last item
- i = None
- else:
- i += 1
- else:
- raise ValueError('Not a valid location: "%s". Location key '
- 'must start with a ">" or "<".' % location)
- return i
-
- def add(self, key, value, location):
- """ Insert by key location. """
- i = self.index_for_location(location)
- if i is not None:
- self.insert(i, key, value)
- else:
- self.__setitem__(key, value)
-
- def link(self, key, location):
- """ Change location of an existing item. """
- n = self.keyOrder.index(key)
- del self.keyOrder[n]
- try:
- i = self.index_for_location(location)
- if i is not None:
- self.keyOrder.insert(i, key)
- else:
- self.keyOrder.append(key)
- except Exception as e:
- # restore to prevent data loss and reraise
- self.keyOrder.insert(n, key)
- raise e
diff --git a/src/calibre/ebooks/markdown/postprocessors.py b/src/calibre/ebooks/markdown/postprocessors.py
deleted file mode 100644
index 2d4dcb589e..0000000000
--- a/src/calibre/ebooks/markdown/postprocessors.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-POST-PROCESSORS
-=============================================================================
-
-Markdown also allows post-processors, which are similar to preprocessors in
-that they need to implement a "run" method. However, they are run after core
-processing.
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-from . import odict
-import re
-
-
-def build_postprocessors(md_instance, **kwargs):
- """ Build the default postprocessors for Markdown. """
- postprocessors = odict.OrderedDict()
- postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance)
- postprocessors["amp_substitute"] = AndSubstitutePostprocessor()
- postprocessors["unescape"] = UnescapePostprocessor()
- return postprocessors
-
-
-class Postprocessor(util.Processor):
- """
- Postprocessors are run after the ElementTree it converted back into text.
-
- Each Postprocessor implements a "run" method that takes a pointer to a
- text string, modifies it as necessary and returns a text string.
-
- Postprocessors must extend markdown.Postprocessor.
-
- """
-
- def run(self, text):
- """
- Subclasses of Postprocessor should implement a `run` method, which
- takes the html document as a single text string and returns a
- (possibly modified) string.
-
- """
- pass # pragma: no cover
-
-
-class RawHtmlPostprocessor(Postprocessor):
- """ Restore raw html to the document. """
-
- def run(self, text):
- """ Iterate over html stash and restore "safe" html. """
- for i in range(self.markdown.htmlStash.html_counter):
- html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
- if self.markdown.safeMode and not safe:
- if str(self.markdown.safeMode).lower() == 'escape':
- html = self.escape(html)
- elif str(self.markdown.safeMode).lower() == 'remove':
- html = ''
- else:
- html = self.markdown.html_replacement_text
- if (self.isblocklevel(html) and
- (safe or not self.markdown.safeMode)):
- text = text.replace(
- "%s
" %
- (self.markdown.htmlStash.get_placeholder(i)),
- html + "\n"
- )
- text = text.replace(
- self.markdown.htmlStash.get_placeholder(i), html
- )
- return text
-
- def escape(self, html):
- """ Basic html escaping """
- html = html.replace('&', '&')
- html = html.replace('<', '<')
- html = html.replace('>', '>')
- return html.replace('"', '"')
-
- def isblocklevel(self, html):
- m = re.match(r'^\<\/?([^ >]+)', html)
- if m:
- if m.group(1)[0] in ('!', '?', '@', '%'):
- # Comment, php etc...
- return True
- return util.isBlockLevel(m.group(1))
- return False
-
-
-class AndSubstitutePostprocessor(Postprocessor):
- """ Restore valid entities """
-
- def run(self, text):
- text = text.replace(util.AMP_SUBSTITUTE, "&")
- return text
-
-
-class UnescapePostprocessor(Postprocessor):
- """ Restore escaped chars """
-
- RE = re.compile('%s(\d+)%s' % (util.STX, util.ETX))
-
- def unescape(self, m):
- return util.int2str(int(m.group(1)))
-
- def run(self, text):
- return self.RE.sub(self.unescape, text)
diff --git a/src/calibre/ebooks/markdown/preprocessors.py b/src/calibre/ebooks/markdown/preprocessors.py
deleted file mode 100644
index 7ea4fcf9f5..0000000000
--- a/src/calibre/ebooks/markdown/preprocessors.py
+++ /dev/null
@@ -1,346 +0,0 @@
-"""
-PRE-PROCESSORS
-=============================================================================
-
-Preprocessors work on source text before we start doing anything too
-complicated.
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-from . import odict
-import re
-
-
-def build_preprocessors(md_instance, **kwargs):
- """ Build the default set of preprocessors used by Markdown. """
- preprocessors = odict.OrderedDict()
- preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance)
- if md_instance.safeMode != 'escape':
- preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance)
- preprocessors["reference"] = ReferencePreprocessor(md_instance)
- return preprocessors
-
-
-class Preprocessor(util.Processor):
- """
- Preprocessors are run after the text is broken into lines.
-
- Each preprocessor implements a "run" method that takes a pointer to a
- list of lines of the document, modifies it as necessary and returns
- either the same pointer or a pointer to a new list.
-
- Preprocessors must extend markdown.Preprocessor.
-
- """
- def run(self, lines):
- """
- Each subclass of Preprocessor should override the `run` method, which
- takes the document as a list of strings split by newlines and returns
- the (possibly modified) list of lines.
-
- """
- pass # pragma: no cover
-
-
-class NormalizeWhitespace(Preprocessor):
- """ Normalize whitespace for consistant parsing. """
-
- def run(self, lines):
- source = '\n'.join(lines)
- source = source.replace(util.STX, "").replace(util.ETX, "")
- source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
- source = source.expandtabs(self.markdown.tab_length)
- source = re.sub(r'(?<=\n) +\n', '\n', source)
- return source.split('\n')
-
-
-class HtmlBlockPreprocessor(Preprocessor):
- """Remove html blocks from the text and store them for later retrieval."""
-
- right_tag_patterns = ["%s>", "%s>"]
- attrs_pattern = r"""
- \s+(?P[^>"'/= ]+)=(?P['"])(?P.*?)(?P=q) # attr="value"
- | # OR
- \s+(?P[^>"'/= ]+)=(?P[^> ]+) # attr=value
- | # OR
- \s+(?P[^>"'/= ]+) # attr
- """
- left_tag_pattern = r'^\<(?P[^> ]+)(?P(%s)*)\s*\/?\>?' % \
- attrs_pattern
- attrs_re = re.compile(attrs_pattern, re.VERBOSE)
- left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
- markdown_in_raw = False
-
- def _get_left_tag(self, block):
- m = self.left_tag_re.match(block)
- if m:
- tag = m.group('tag')
- raw_attrs = m.group('attrs')
- attrs = {}
- if raw_attrs:
- for ma in self.attrs_re.finditer(raw_attrs):
- if ma.group('attr'):
- if ma.group('value'):
- attrs[ma.group('attr').strip()] = ma.group('value')
- else:
- attrs[ma.group('attr').strip()] = ""
- elif ma.group('attr1'):
- if ma.group('value1'):
- attrs[ma.group('attr1').strip()] = ma.group(
- 'value1'
- )
- else:
- attrs[ma.group('attr1').strip()] = ""
- elif ma.group('attr2'):
- attrs[ma.group('attr2').strip()] = ""
- return tag, len(m.group(0)), attrs
- else:
- tag = block[1:].split(">", 1)[0].lower()
- return tag, len(tag)+2, {}
-
- def _recursive_tagfind(self, ltag, rtag, start_index, block):
- while 1:
- i = block.find(rtag, start_index)
- if i == -1:
- return -1
- j = block.find(ltag, start_index)
- # if no ltag, or rtag found before another ltag, return index
- if (j > i or j == -1):
- return i + len(rtag)
- # another ltag found before rtag, use end of ltag as starting
- # point and search again
- j = block.find('>', j)
- start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
- if start_index == -1:
- # HTML potentially malformed- ltag has no corresponding
- # rtag
- return -1
-
- def _get_right_tag(self, left_tag, left_index, block):
- for p in self.right_tag_patterns:
- tag = p % left_tag
- i = self._recursive_tagfind(
- "<%s" % left_tag, tag, left_index, block
- )
- if i > 2:
- return tag.lstrip("<").rstrip(">"), i
- return block.rstrip()[-left_index:-1].lower(), len(block)
-
- def _equal_tags(self, left_tag, right_tag):
- if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
- return True
- if ("/" + left_tag) == right_tag:
- return True
- if (right_tag == "--" and left_tag == "--"):
- return True
- elif left_tag == right_tag[1:] and right_tag[0] == "/":
- return True
- else:
- return False
-
- def _is_oneliner(self, tag):
- return (tag in ['hr', 'hr/'])
-
- def _stringindex_to_listindex(self, stringindex, items):
- """
- Same effect as concatenating the strings in items,
- finding the character to which stringindex refers in that string,
- and returning the index of the item in which that character resides.
- """
- items.append('dummy')
- i, count = 0, 0
- while count <= stringindex:
- count += len(items[i])
- i += 1
- return i - 1
-
- def _nested_markdown_in_html(self, items):
- """Find and process html child elements of the given element block."""
- for i, item in enumerate(items):
- if self.left_tag_re.match(item):
- left_tag, left_index, attrs = \
- self._get_left_tag(''.join(items[i:]))
- right_tag, data_index = self._get_right_tag(
- left_tag, left_index, ''.join(items[i:]))
- right_listindex = \
- self._stringindex_to_listindex(data_index, items[i:]) + i
- if 'markdown' in attrs.keys():
- items[i] = items[i][left_index:] # remove opening tag
- placeholder = self.markdown.htmlStash.store_tag(
- left_tag, attrs, i + 1, right_listindex + 1)
- items.insert(i, placeholder)
- if len(items) - right_listindex <= 1: # last nest, no tail
- right_listindex -= 1
- items[right_listindex] = items[right_listindex][
- :-len(right_tag) - 2] # remove closing tag
- else: # raw html
- if len(items) - right_listindex <= 1: # last element
- right_listindex -= 1
- if right_listindex <= i:
- right_listindex = i + 1
- placeholder = self.markdown.htmlStash.store('\n\n'.join(
- items[i:right_listindex]))
- del items[i:right_listindex]
- items.insert(i, placeholder)
- return items
-
- def run(self, lines):
- text = "\n".join(lines)
- new_blocks = []
- text = text.rsplit("\n\n")
- items = []
- left_tag = ''
- right_tag = ''
- in_tag = False # flag
-
- while text:
- block = text[0]
- if block.startswith("\n"):
- block = block[1:]
- text = text[1:]
-
- if block.startswith("\n"):
- block = block[1:]
-
- if not in_tag:
- if block.startswith("<") and len(block.strip()) > 1:
-
- if block[1:4] == "!--":
- # is a comment block
- left_tag, left_index, attrs = "--", 2, {}
- else:
- left_tag, left_index, attrs = self._get_left_tag(block)
- right_tag, data_index = self._get_right_tag(left_tag,
- left_index,
- block)
- # keep checking conditions below and maybe just append
-
- if data_index < len(block) and (util.isBlockLevel(left_tag) or left_tag == '--'):
- text.insert(0, block[data_index:])
- block = block[:data_index]
-
- if not (util.isBlockLevel(left_tag) or block[1] in ["!", "?", "@", "%"]):
- new_blocks.append(block)
- continue
-
- if self._is_oneliner(left_tag):
- new_blocks.append(block.strip())
- continue
-
- if block.rstrip().endswith(">") \
- and self._equal_tags(left_tag, right_tag):
- if self.markdown_in_raw and 'markdown' in attrs.keys():
- block = block[left_index:-len(right_tag) - 2]
- new_blocks.append(self.markdown.htmlStash.
- store_tag(left_tag, attrs, 0, 2))
- new_blocks.extend([block])
- else:
- new_blocks.append(
- self.markdown.htmlStash.store(block.strip()))
- continue
- else:
- # if is block level tag and is not complete
- if (not self._equal_tags(left_tag, right_tag)) and \
- (util.isBlockLevel(left_tag) or left_tag == "--"):
- items.append(block.strip())
- in_tag = True
- else:
- new_blocks.append(
- self.markdown.htmlStash.store(block.strip())
- )
- continue
-
- else:
- new_blocks.append(block)
-
- else:
- items.append(block)
-
- right_tag, data_index = self._get_right_tag(left_tag, 0, block)
-
- if self._equal_tags(left_tag, right_tag):
- # if find closing tag
-
- if data_index < len(block):
- # we have more text after right_tag
- items[-1] = block[:data_index]
- text.insert(0, block[data_index:])
-
- in_tag = False
- if self.markdown_in_raw and 'markdown' in attrs.keys():
- items[0] = items[0][left_index:]
- items[-1] = items[-1][:-len(right_tag) - 2]
- if items[len(items) - 1]: # not a newline/empty string
- right_index = len(items) + 3
- else:
- right_index = len(items) + 2
- new_blocks.append(self.markdown.htmlStash.store_tag(
- left_tag, attrs, 0, right_index))
- placeholderslen = len(self.markdown.htmlStash.tag_data)
- new_blocks.extend(
- self._nested_markdown_in_html(items))
- nests = len(self.markdown.htmlStash.tag_data) - \
- placeholderslen
- self.markdown.htmlStash.tag_data[-1 - nests][
- 'right_index'] += nests - 2
- else:
- new_blocks.append(
- self.markdown.htmlStash.store('\n\n'.join(items)))
- items = []
-
- if items:
- if self.markdown_in_raw and 'markdown' in attrs.keys():
- items[0] = items[0][left_index:]
- items[-1] = items[-1][:-len(right_tag) - 2]
- if items[len(items) - 1]: # not a newline/empty string
- right_index = len(items) + 3
- else:
- right_index = len(items) + 2
- new_blocks.append(
- self.markdown.htmlStash.store_tag(
- left_tag, attrs, 0, right_index))
- placeholderslen = len(self.markdown.htmlStash.tag_data)
- new_blocks.extend(self._nested_markdown_in_html(items))
- nests = len(self.markdown.htmlStash.tag_data) - placeholderslen
- self.markdown.htmlStash.tag_data[-1 - nests][
- 'right_index'] += nests - 2
- else:
- new_blocks.append(
- self.markdown.htmlStash.store('\n\n'.join(items)))
- new_blocks.append('\n')
-
- new_text = "\n\n".join(new_blocks)
- return new_text.split("\n")
-
-
-class ReferencePreprocessor(Preprocessor):
- """ Remove reference definitions from text and store for later use. """
-
- TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
- RE = re.compile(
- r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL
- )
- TITLE_RE = re.compile(r'^%s$' % TITLE)
-
- def run(self, lines):
- new_text = []
- while lines:
- line = lines.pop(0)
- m = self.RE.match(line)
- if m:
- id = m.group(1).strip().lower()
- link = m.group(2).lstrip('<').rstrip('>')
- t = m.group(5) or m.group(6) or m.group(7)
- if not t:
- # Check next line for title
- tm = self.TITLE_RE.match(lines[0])
- if tm:
- lines.pop(0)
- t = tm.group(2) or tm.group(3) or tm.group(4)
- self.markdown.references[id] = (link, t)
- else:
- new_text.append(line)
-
- return new_text # + "\n"
diff --git a/src/calibre/ebooks/markdown/serializers.py b/src/calibre/ebooks/markdown/serializers.py
deleted file mode 100644
index 1e8d9dd288..0000000000
--- a/src/calibre/ebooks/markdown/serializers.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# markdown/searializers.py
-#
-# Add x/html serialization to Elementree
-# Taken from ElementTree 1.3 preview with slight modifications
-#
-# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
-#
-# fredrik@pythonware.com
-# http://www.pythonware.com
-#
-# --------------------------------------------------------------------
-# The ElementTree toolkit is
-#
-# Copyright (c) 1999-2007 by Fredrik Lundh
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
-#
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Secret Labs AB or the author not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
-# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
-# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-# --------------------------------------------------------------------
-
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-ElementTree = util.etree.ElementTree
-QName = util.etree.QName
-if hasattr(util.etree, 'test_comment'): # pragma: no cover
- Comment = util.etree.test_comment
-else: # pragma: no cover
- Comment = util.etree.Comment
-PI = util.etree.PI
-ProcessingInstruction = util.etree.ProcessingInstruction
-
-__all__ = ['to_html_string', 'to_xhtml_string']
-
-HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
- "img", "input", "isindex", "link", "meta" "param")
-
-try:
- HTML_EMPTY = set(HTML_EMPTY)
-except NameError: # pragma: no cover
- pass
-
-_namespace_map = {
- # "well-known" namespace prefixes
- "http://www.w3.org/XML/1998/namespace": "xml",
- "http://www.w3.org/1999/xhtml": "html",
- "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
- "http://schemas.xmlsoap.org/wsdl/": "wsdl",
- # xml schema
- "http://www.w3.org/2001/XMLSchema": "xs",
- "http://www.w3.org/2001/XMLSchema-instance": "xsi",
- # dublic core
- "http://purl.org/dc/elements/1.1/": "dc",
-}
-
-
-def _raise_serialization_error(text): # pragma: no cover
- raise TypeError(
- "cannot serialize %r (type %s)" % (text, type(text).__name__)
- )
-
-
-def _encode(text, encoding):
- try:
- return text.encode(encoding, "xmlcharrefreplace")
- except (TypeError, AttributeError): # pragma: no cover
- _raise_serialization_error(text)
-
-
-def _escape_cdata(text):
- # escape character data
- try:
- # it's worth avoiding do-nothing calls for strings that are
- # shorter than 500 character, or so. assume that's, by far,
- # the most common case in most applications.
- if "&" in text:
- text = text.replace("&", "&")
- if "<" in text:
- text = text.replace("<", "<")
- if ">" in text:
- text = text.replace(">", ">")
- return text
- except (TypeError, AttributeError): # pragma: no cover
- _raise_serialization_error(text)
-
-
-def _escape_attrib(text):
- # escape attribute value
- try:
- if "&" in text:
- text = text.replace("&", "&")
- if "<" in text:
- text = text.replace("<", "<")
- if ">" in text:
- text = text.replace(">", ">")
- if "\"" in text:
- text = text.replace("\"", """)
- if "\n" in text:
- text = text.replace("\n", "
")
- return text
- except (TypeError, AttributeError): # pragma: no cover
- _raise_serialization_error(text)
-
-
-def _escape_attrib_html(text):
- # escape attribute value
- try:
- if "&" in text:
- text = text.replace("&", "&")
- if "<" in text:
- text = text.replace("<", "<")
- if ">" in text:
- text = text.replace(">", ">")
- if "\"" in text:
- text = text.replace("\"", """)
- return text
- except (TypeError, AttributeError): # pragma: no cover
- _raise_serialization_error(text)
-
-
-def _serialize_html(write, elem, qnames, namespaces, format):
- tag = elem.tag
- text = elem.text
- if tag is Comment:
- write("" % _escape_cdata(text))
- elif tag is ProcessingInstruction:
- write("%s?>" % _escape_cdata(text))
- else:
- tag = qnames[tag]
- if tag is None:
- if text:
- write(_escape_cdata(text))
- for e in elem:
- _serialize_html(write, e, qnames, None, format)
- else:
- write("<" + tag)
- items = elem.items()
- if items or namespaces:
- items = sorted(items) # lexical order
- for k, v in items:
- if isinstance(k, QName):
- k = k.text
- if isinstance(v, QName):
- v = qnames[v.text]
- else:
- v = _escape_attrib_html(v)
- if qnames[k] == v and format == 'html':
- # handle boolean attributes
- write(" %s" % v)
- else:
- write(" %s=\"%s\"" % (qnames[k], v))
- if namespaces:
- items = namespaces.items()
- items.sort(key=lambda x: x[1]) # sort on prefix
- for v, k in items:
- if k:
- k = ":" + k
- write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
- if format == "xhtml" and tag.lower() in HTML_EMPTY:
- write(" />")
- else:
- write(">")
- if text:
- if tag.lower() in ["script", "style"]:
- write(text)
- else:
- write(_escape_cdata(text))
- for e in elem:
- _serialize_html(write, e, qnames, None, format)
- if tag.lower() not in HTML_EMPTY:
- write("" + tag + ">")
- if elem.tail:
- write(_escape_cdata(elem.tail))
-
-
-def _write_html(root,
- encoding=None,
- default_namespace=None,
- format="html"):
- assert root is not None
- data = []
- write = data.append
- qnames, namespaces = _namespaces(root, default_namespace)
- _serialize_html(write, root, qnames, namespaces, format)
- if encoding is None:
- return "".join(data)
- else:
- return _encode("".join(data))
-
-
-# --------------------------------------------------------------------
-# serialization support
-
-def _namespaces(elem, default_namespace=None):
- # identify namespaces used in this tree
-
- # maps qnames to *encoded* prefix:local names
- qnames = {None: None}
-
- # maps uri:s to prefixes
- namespaces = {}
- if default_namespace:
- namespaces[default_namespace] = ""
-
- def add_qname(qname):
- # calculate serialized qname representation
- try:
- if qname[:1] == "{":
- uri, tag = qname[1:].split("}", 1)
- prefix = namespaces.get(uri)
- if prefix is None:
- prefix = _namespace_map.get(uri)
- if prefix is None:
- prefix = "ns%d" % len(namespaces)
- if prefix != "xml":
- namespaces[uri] = prefix
- if prefix:
- qnames[qname] = "%s:%s" % (prefix, tag)
- else:
- qnames[qname] = tag # default element
- else:
- if default_namespace:
- raise ValueError(
- "cannot use non-qualified names with "
- "default_namespace option"
- )
- qnames[qname] = qname
- except TypeError: # pragma: no cover
- _raise_serialization_error(qname)
-
- # populate qname and namespaces table
- try:
- iterate = elem.iter
- except AttributeError:
- iterate = elem.getiterator # cET compatibility
- for elem in iterate():
- tag = elem.tag
- if isinstance(tag, QName) and tag.text not in qnames:
- add_qname(tag.text)
- elif isinstance(tag, util.string_type):
- if tag not in qnames:
- add_qname(tag)
- elif tag is not None and tag is not Comment and tag is not PI:
- _raise_serialization_error(tag)
- for key, value in elem.items():
- if isinstance(key, QName):
- key = key.text
- if key not in qnames:
- add_qname(key)
- if isinstance(value, QName) and value.text not in qnames:
- add_qname(value.text)
- text = elem.text
- if isinstance(text, QName) and text.text not in qnames:
- add_qname(text.text)
- return qnames, namespaces
-
-
-def to_html_string(element):
- return _write_html(ElementTree(element).getroot(), format="html")
-
-
-def to_xhtml_string(element):
- return _write_html(ElementTree(element).getroot(), format="xhtml")
diff --git a/src/calibre/ebooks/markdown/treeprocessors.py b/src/calibre/ebooks/markdown/treeprocessors.py
deleted file mode 100644
index d06f192885..0000000000
--- a/src/calibre/ebooks/markdown/treeprocessors.py
+++ /dev/null
@@ -1,371 +0,0 @@
-from __future__ import unicode_literals
-from __future__ import absolute_import
-from . import util
-from . import odict
-from . import inlinepatterns
-
-
-def build_treeprocessors(md_instance, **kwargs):
- """ Build the default treeprocessors for Markdown. """
- treeprocessors = odict.OrderedDict()
- treeprocessors["inline"] = InlineProcessor(md_instance)
- treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
- return treeprocessors
-
-
-def isString(s):
- """ Check if it's string """
- if not isinstance(s, util.AtomicString):
- return isinstance(s, util.string_type)
- return False
-
-
-class Treeprocessor(util.Processor):
- """
- Treeprocessors are run on the ElementTree object before serialization.
-
- Each Treeprocessor implements a "run" method that takes a pointer to an
- ElementTree, modifies it as necessary and returns an ElementTree
- object.
-
- Treeprocessors must extend markdown.Treeprocessor.
-
- """
- def run(self, root):
- """
- Subclasses of Treeprocessor should implement a `run` method, which
- takes a root ElementTree. This method can return another ElementTree
- object, and the existing root ElementTree will be replaced, or it can
- modify the current tree and return None.
- """
- pass # pragma: no cover
-
-
-class InlineProcessor(Treeprocessor):
- """
- A Treeprocessor that traverses a tree, applying inline patterns.
- """
-
- def __init__(self, md):
- self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
- self.__placeholder_suffix = util.ETX
- self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
- + len(self.__placeholder_suffix)
- self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
- self.markdown = md
- self.inlinePatterns = md.inlinePatterns
-
- def __makePlaceholder(self, type):
- """ Generate a placeholder """
- id = "%04d" % len(self.stashed_nodes)
- hash = util.INLINE_PLACEHOLDER % id
- return hash, id
-
- def __findPlaceholder(self, data, index):
- """
- Extract id from data string, start from index
-
- Keyword arguments:
-
- * data: string
- * index: index, from which we start search
-
- Returns: placeholder id and string index, after the found placeholder.
-
- """
- m = self.__placeholder_re.search(data, index)
- if m:
- return m.group(1), m.end()
- else:
- return None, index + 1
-
- def __stashNode(self, node, type):
- """ Add node to stash """
- placeholder, id = self.__makePlaceholder(type)
- self.stashed_nodes[id] = node
- return placeholder
-
- def __handleInline(self, data, patternIndex=0):
- """
- Process string with inline patterns and replace it
- with placeholders
-
- Keyword arguments:
-
- * data: A line of Markdown text
- * patternIndex: The index of the inlinePattern to start with
-
- Returns: String with placeholders.
-
- """
- if not isinstance(data, util.AtomicString):
- startIndex = 0
- while patternIndex < len(self.inlinePatterns):
- data, matched, startIndex = self.__applyPattern(
- self.inlinePatterns.value_for_index(patternIndex),
- data, patternIndex, startIndex)
- if not matched:
- patternIndex += 1
- return data
-
- def __processElementText(self, node, subnode, isText=True):
- """
- Process placeholders in Element.text or Element.tail
- of Elements popped from self.stashed_nodes.
-
- Keywords arguments:
-
- * node: parent node
- * subnode: processing node
- * isText: bool variable, True - it's text, False - it's tail
-
- Returns: None
-
- """
- if isText:
- text = subnode.text
- subnode.text = None
- else:
- text = subnode.tail
- subnode.tail = None
-
- childResult = self.__processPlaceholders(text, subnode, isText)
-
- if not isText and node is not subnode:
- pos = list(node).index(subnode) + 1
- else:
- pos = 0
-
- childResult.reverse()
- for newChild in childResult:
- node.insert(pos, newChild)
-
- def __processPlaceholders(self, data, parent, isText=True):
- """
- Process string with placeholders and generate ElementTree tree.
-
- Keyword arguments:
-
- * data: string with placeholders instead of ElementTree elements.
- * parent: Element, which contains processing inline data
-
- Returns: list with ElementTree elements with applied inline patterns.
-
- """
- def linkText(text):
- if text:
- if result:
- if result[-1].tail:
- result[-1].tail += text
- else:
- result[-1].tail = text
- elif not isText:
- if parent.tail:
- parent.tail += text
- else:
- parent.tail = text
- else:
- if parent.text:
- parent.text += text
- else:
- parent.text = text
- result = []
- strartIndex = 0
- while data:
- index = data.find(self.__placeholder_prefix, strartIndex)
- if index != -1:
- id, phEndIndex = self.__findPlaceholder(data, index)
-
- if id in self.stashed_nodes:
- node = self.stashed_nodes.get(id)
-
- if index > 0:
- text = data[strartIndex:index]
- linkText(text)
-
- if not isString(node): # it's Element
- for child in [node] + list(node):
- if child.tail:
- if child.tail.strip():
- self.__processElementText(
- node, child, False
- )
- if child.text:
- if child.text.strip():
- self.__processElementText(child, child)
- else: # it's just a string
- linkText(node)
- strartIndex = phEndIndex
- continue
-
- strartIndex = phEndIndex
- result.append(node)
-
- else: # wrong placeholder
- end = index + len(self.__placeholder_prefix)
- linkText(data[strartIndex:end])
- strartIndex = end
- else:
- text = data[strartIndex:]
- if isinstance(data, util.AtomicString):
- # We don't want to loose the AtomicString
- text = util.AtomicString(text)
- linkText(text)
- data = ""
-
- return result
-
- def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
- """
- Check if the line fits the pattern, create the necessary
- elements, add it to stashed_nodes.
-
- Keyword arguments:
-
- * data: the text to be processed
- * pattern: the pattern to be checked
- * patternIndex: index of current pattern
- * startIndex: string index, from which we start searching
-
- Returns: String with placeholders instead of ElementTree elements.
-
- """
- match = pattern.getCompiledRegExp().match(data[startIndex:])
- leftData = data[:startIndex]
-
- if not match:
- return data, False, 0
-
- node = pattern.handleMatch(match)
-
- if node is None:
- return data, True, len(leftData)+match.span(len(match.groups()))[0]
-
- if not isString(node):
- if not isinstance(node.text, util.AtomicString):
- # We need to process current node too
- for child in [node] + list(node):
- if not isString(node):
- if child.text:
- child.text = self.__handleInline(
- child.text, patternIndex + 1
- )
- if child.tail:
- child.tail = self.__handleInline(
- child.tail, patternIndex
- )
-
- placeholder = self.__stashNode(node, pattern.type())
-
- return "%s%s%s%s" % (leftData,
- match.group(1),
- placeholder, match.groups()[-1]), True, 0
-
- def run(self, tree):
- """Apply inline patterns to a parsed Markdown tree.
-
- Iterate over ElementTree, find elements with inline tag, apply inline
- patterns and append newly created Elements to tree. If you don't
- want to process your data with inline paterns, instead of normal
- string, use subclass AtomicString:
-
- node.text = markdown.AtomicString("This will not be processed.")
-
- Arguments:
-
- * tree: ElementTree object, representing Markdown tree.
-
- Returns: ElementTree object with applied inline patterns.
-
- """
- self.stashed_nodes = {}
-
- stack = [tree]
-
- while stack:
- currElement = stack.pop()
- insertQueue = []
- for child in currElement:
- if child.text and not isinstance(
- child.text, util.AtomicString
- ):
- text = child.text
- child.text = None
- lst = self.__processPlaceholders(
- self.__handleInline(text), child
- )
- stack += lst
- insertQueue.append((child, lst))
- if child.tail:
- tail = self.__handleInline(child.tail)
- dumby = util.etree.Element('d')
- child.tail = None
- tailResult = self.__processPlaceholders(tail, dumby, False)
- if dumby.tail:
- child.tail = dumby.tail
- pos = list(currElement).index(child) + 1
- tailResult.reverse()
- for newChild in tailResult:
- currElement.insert(pos, newChild)
- if len(child):
- stack.append(child)
-
- for element, lst in insertQueue:
- if self.markdown.enable_attributes:
- if element.text and isString(element.text):
- element.text = inlinepatterns.handleAttributes(
- element.text, element
- )
- i = 0
- for newChild in lst:
- if self.markdown.enable_attributes:
- # Processing attributes
- if newChild.tail and isString(newChild.tail):
- newChild.tail = inlinepatterns.handleAttributes(
- newChild.tail, element
- )
- if newChild.text and isString(newChild.text):
- newChild.text = inlinepatterns.handleAttributes(
- newChild.text, newChild
- )
- element.insert(i, newChild)
- i += 1
- return tree
-
-
-class PrettifyTreeprocessor(Treeprocessor):
- """ Add linebreaks to the html document. """
-
- def _prettifyETree(self, elem):
- """ Recursively add linebreaks to ElementTree children. """
-
- i = "\n"
- if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
- if (not elem.text or not elem.text.strip()) \
- and len(elem) and util.isBlockLevel(elem[0].tag):
- elem.text = i
- for e in elem:
- if util.isBlockLevel(e.tag):
- self._prettifyETree(e)
- if not elem.tail or not elem.tail.strip():
- elem.tail = i
- if not elem.tail or not elem.tail.strip():
- elem.tail = i
-
- def run(self, root):
- """ Add linebreaks to ElementTree root object. """
-
- self._prettifyETree(root)
- # Do
's seperately as they are often in the middle of
- # inline content and missed by _prettifyETree.
- brs = root.getiterator('br')
- for br in brs:
- if not br.tail or not br.tail.strip():
- br.tail = '\n'
- else:
- br.tail = '\n%s' % br.tail
- # Clean up extra empty lines at end of code blocks.
- pres = root.getiterator('pre')
- for pre in pres:
- if len(pre) and pre[0].tag == 'code':
- pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n')
diff --git a/src/calibre/ebooks/markdown/util.py b/src/calibre/ebooks/markdown/util.py
deleted file mode 100644
index d3d48f0999..0000000000
--- a/src/calibre/ebooks/markdown/util.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-import re
-import sys
-
-
-"""
-Python 3 Stuff
-=============================================================================
-"""
-PY3 = sys.version_info[0] == 3
-
-if PY3: # pragma: no cover
- string_type = str
- text_type = str
- int2str = chr
-else: # pragma: no cover
- string_type = basestring # noqa
- text_type = unicode # noqa
- int2str = unichr # noqa
-
-
-"""
-Constants you might want to modify
------------------------------------------------------------------------------
-"""
-
-
-BLOCK_LEVEL_ELEMENTS = re.compile(
- "^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
- "|script|noscript|form|fieldset|iframe|math"
- "|hr|hr/|style|li|dt|dd|thead|tbody"
- "|tr|th|td|section|footer|header|group|figure"
- "|figcaption|aside|article|canvas|output"
- "|progress|video|nav)$",
- re.IGNORECASE
-)
-# Placeholders
-STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
-ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
-INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
-INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
-INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
-AMP_SUBSTITUTE = STX+"amp"+ETX
-HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
-HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
-TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
-
-
-"""
-Constants you probably do not need to change
------------------------------------------------------------------------------
-"""
-
-RTL_BIDI_RANGES = (
- ('\u0590', '\u07FF'),
- # Hebrew (0590-05FF), Arabic (0600-06FF),
- # Syriac (0700-074F), Arabic supplement (0750-077F),
- # Thaana (0780-07BF), Nko (07C0-07FF).
- ('\u2D30', '\u2D7F') # Tifinagh
-)
-
-# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
-# markdown.util import etree`). Do not import it by yourself.
-
-try: # pragma: no cover
- # Is the C implementation of ElementTree available?
- import xml.etree.cElementTree as etree
- from xml.etree.ElementTree import Comment
- # Serializers (including ours) test with non-c Comment
- etree.test_comment = Comment
- if etree.VERSION < "1.0.5":
- raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
-except (ImportError, RuntimeError): # pragma: no cover
- # Use the Python implementation of ElementTree?
- import xml.etree.ElementTree as etree
- if etree.VERSION < "1.1":
- raise RuntimeError("ElementTree version 1.1 or higher is required")
-
-
-"""
-AUXILIARY GLOBAL FUNCTIONS
-=============================================================================
-"""
-
-
-def isBlockLevel(tag):
- """Check if the tag is a block level HTML tag."""
- if isinstance(tag, string_type):
- return BLOCK_LEVEL_ELEMENTS.match(tag)
- # Some ElementTree tags are not strings, so return False.
- return False
-
-
-def parseBoolValue(value, fail_on_errors=True, preserve_none=False):
- """Parses a string representing bool value. If parsing was successful,
- returns True or False. If preserve_none=True, returns True, False,
- or None. If parsing was not successful, raises ValueError, or, if
- fail_on_errors=False, returns None."""
- if not isinstance(value, string_type):
- if preserve_none and value is None:
- return value
- return bool(value)
- elif preserve_none and value.lower() == 'none':
- return None
- elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
- return True
- elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'):
- return False
- elif fail_on_errors:
- raise ValueError('Cannot parse bool value: %r' % value)
-
-
-"""
-MISC AUXILIARY CLASSES
-=============================================================================
-"""
-
-
-class AtomicString(text_type):
- """A string which should not be further processed."""
- pass
-
-
-class Processor(object):
- def __init__(self, markdown_instance=None):
- if markdown_instance:
- self.markdown = markdown_instance
-
-
-class HtmlStash(object):
- """
- This class is used for stashing HTML objects that we extract
- in the beginning and replace with place-holders.
- """
-
- def __init__(self):
- """ Create a HtmlStash. """
- self.html_counter = 0 # for counting inline html segments
- self.rawHtmlBlocks = []
- self.tag_counter = 0
- self.tag_data = [] # list of dictionaries in the order tags appear
-
- def store(self, html, safe=False):
- """
- Saves an HTML segment for later reinsertion. Returns a
- placeholder string that needs to be inserted into the
- document.
-
- Keyword arguments:
-
- * html: an html segment
- * safe: label an html segment as safe for safemode
-
- Returns : a placeholder string
-
- """
- self.rawHtmlBlocks.append((html, safe))
- placeholder = self.get_placeholder(self.html_counter)
- self.html_counter += 1
- return placeholder
-
- def reset(self):
- self.html_counter = 0
- self.rawHtmlBlocks = []
-
- def get_placeholder(self, key):
- return HTML_PLACEHOLDER % key
-
- def store_tag(self, tag, attrs, left_index, right_index):
- """Store tag data and return a placeholder."""
- self.tag_data.append({'tag': tag, 'attrs': attrs,
- 'left_index': left_index,
- 'right_index': right_index})
- placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
- self.tag_counter += 1 # equal to the tag's index in self.tag_data
- return placeholder
diff --git a/src/calibre/startup.py b/src/calibre/startup.py
index b40f9fac74..b1be5025f7 100644
--- a/src/calibre/startup.py
+++ b/src/calibre/startup.py
@@ -41,13 +41,14 @@ if not _run_once:
class DeVendor(object):
def find_module(self, fullname, path=None):
- if fullname == 'calibre.web.feeds.feedparser':
+ if fullname == 'calibre.web.feeds.feedparser' or fullname.startswith('calibre.ebooks.markdown'):
return self
def load_module(self, fullname):
+ from importlib import import_module
if fullname == 'calibre.web.feeds.feedparser':
- from importlib import import_module
return import_module('feedparser')
+ return import_module(fullname[len('calibre.ebooks.'):])
sys.meta_path.insert(0, DeVendor())