Update bundled markdown to version 2.6.3

This commit is contained in:
Kovid Goyal 2015-11-03 08:06:45 +05:30
parent 72aa133cc6
commit 700c77ddcd
31 changed files with 1649 additions and 1262 deletions

View File

@ -10,7 +10,7 @@ called from the command line.
import markdown import markdown
html = markdown.markdown(your_text_string) html = markdown.markdown(your_text_string)
See <http://packages.python.org/Markdown/> for more See <https://pythonhosted.org/Markdown/> for more
information and instructions on how to extend the functionality of information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file. Python Markdown. Read that before you try modifying this file.
@ -32,11 +32,12 @@ License: BSD (see LICENSE for details).
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from .__version__ import version, version_info from .__version__ import version, version_info # noqa
import re
import codecs import codecs
import sys import sys
import logging import logging
import warnings
import importlib
from . import util from . import util
from .preprocessors import build_preprocessors from .preprocessors import build_preprocessors
from .blockprocessors import build_block_parser from .blockprocessors import build_block_parser
@ -48,6 +49,7 @@ from .serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile'] __all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN') logger = logging.getLogger('MARKDOWN')
@ -57,24 +59,24 @@ class Markdown(object):
doc_tag = "div" # Element used to wrap document - later removed doc_tag = "div" # Element used to wrap document - later removed
option_defaults = { option_defaults = {
'html_replacement_text' : '[HTML_REMOVED]', 'html_replacement_text': '[HTML_REMOVED]',
'tab_length' : 4, 'tab_length': 4,
'enable_attributes' : True, 'enable_attributes': True,
'smart_emphasis' : True, 'smart_emphasis': True,
'lazy_ol' : True, 'lazy_ol': True,
} }
output_formats = { output_formats = {
'html' : to_html_string, 'html': to_html_string,
'html4' : to_html_string, 'html4': to_html_string,
'html5' : to_html_string, 'html5': to_html_string,
'xhtml' : to_xhtml_string, 'xhtml': to_xhtml_string,
'xhtml1': to_xhtml_string, 'xhtml1': to_xhtml_string,
'xhtml5': to_xhtml_string, 'xhtml5': to_xhtml_string,
} }
ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']', ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!'] '(', ')', '>', '#', '+', '-', '.', '!']
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
""" """
@ -86,36 +88,42 @@ class Markdown(object):
If they are of type string, the module mdx_name.py will be loaded. If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used If they are a subclass of markdown.Extension, they will be used
as-is. as-is.
* extension_configs: Configuration settingis for extensions. * extension_configs: Configuration settings for extensions.
* output_format: Format of output. Supported formats are: * output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default. * "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml5": Outputs XHTML style tags of HTML 5 * "xhtml5": Outputs XHTML style tags of HTML 5
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1). * "xhtml": Outputs latest supported version of XHTML
(currently XHTML 1.1).
* "html4": Outputs HTML 4 * "html4": Outputs HTML 4
* "html5": Outputs HTML style tags of HTML 5 * "html5": Outputs HTML style tags of HTML 5
* "html": Outputs latest supported version of HTML (currently HTML 4). * "html": Outputs latest supported version of HTML
(currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1" Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time. if it makes sense at that time.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape". * safe_mode: Deprecated! Disallow raw html. One of "remove", "replace"
* html_replacement_text: Text used when safe_mode is set to "replace". or "escape".
* html_replacement_text: Deprecated! Text used when safe_mode is set
to "replace".
* tab_length: Length of tabs in the source. Default: 4 * tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True * enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphasis: Treat `_connected_words_` intelegently Default: True * smart_emphasis: Treat `_connected_words_` intelligently Default: True
* lazy_ol: Ignore number of first item of ordered lists. Default: True * lazy_ol: Ignore number of first item of ordered lists. Default: True
""" """
# For backward compatibility, loop through old positional args # For backward compatibility, loop through old positional args
pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format'] pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
c = 0 for c, arg in enumerate(args):
for arg in args:
if pos[c] not in kwargs: if pos[c] not in kwargs:
kwargs[pos[c]] = arg kwargs[pos[c]] = arg
c += 1 if c+1 == len(pos): # pragma: no cover
if c == len(pos):
# ignore any additional args # ignore any additional args
break break
if len(args):
warnings.warn('Positional arguments are deprecated in Markdown. '
'Use keyword arguments only.',
DeprecationWarning)
# Loop through kwargs and assign defaults # Loop through kwargs and assign defaults
for option, default in self.option_defaults.items(): for option, default in self.option_defaults.items():
@ -126,6 +134,19 @@ class Markdown(object):
# Disable attributes in safeMode when not explicitly set # Disable attributes in safeMode when not explicitly set
self.enable_attributes = False self.enable_attributes = False
if 'safe_mode' in kwargs:
warnings.warn('"safe_mode" is deprecated in Python-Markdown. '
'Use an HTML sanitizer (like '
'Bleach http://bleach.readthedocs.org/) '
'if you are parsing untrusted markdown text. '
'See the 2.6 release notes for more info',
DeprecationWarning)
if 'html_replacement_text' in kwargs:
warnings.warn('The "html_replacement_text" keyword is '
'deprecated along with "safe_mode".',
DeprecationWarning)
self.registeredExtensions = [] self.registeredExtensions = []
self.docType = "" self.docType = ""
self.stripTopLevelTags = True self.stripTopLevelTags = True
@ -134,9 +155,9 @@ class Markdown(object):
self.references = {} self.references = {}
self.htmlStash = util.HtmlStash() self.htmlStash = util.HtmlStash()
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.registerExtensions(extensions=kwargs.get('extensions', []), self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {})) configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.reset() self.reset()
def build_parser(self): def build_parser(self):
@ -161,9 +182,13 @@ class Markdown(object):
""" """
for ext in extensions: for ext in extensions:
if isinstance(ext, util.string_type): if isinstance(ext, util.string_type):
ext = self.build_extension(ext, configs.get(ext, [])) ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension): if isinstance(ext, Extension):
ext.extendMarkdown(self, globals()) ext.extendMarkdown(self, globals())
logger.debug(
'Successfully loaded extension "%s.%s".'
% (ext.__class__.__module__, ext.__class__.__name__)
)
elif ext is not None: elif ext is not None:
raise TypeError( raise TypeError(
'Extension "%s.%s" must be of type: "markdown.Extension"' 'Extension "%s.%s" must be of type: "markdown.Extension"'
@ -171,7 +196,7 @@ class Markdown(object):
return self return self
def build_extension(self, ext_name, configs = []): def build_extension(self, ext_name, configs):
"""Build extension by name, then return the module. """Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the The extension name may contain arguments as part of the string in the
@ -179,44 +204,102 @@ class Markdown(object):
""" """
# Parse extensions config params (ignore the order)
configs = dict(configs) configs = dict(configs)
pos = ext_name.find("(") # find the first "("
# Parse extensions config params (ignore the order)
pos = ext_name.find("(") # find the first "("
if pos > 0: if pos > 0:
ext_args = ext_name[pos+1:-1] ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos] ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")] pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs]) configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
warnings.warn('Setting configs in the Named Extension string is '
'deprecated. It is recommended that you '
'pass an instance of the extension class to '
'Markdown or use the "extension_configs" keyword. '
'The current behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown version '
'2.6 for more info.', DeprecationWarning)
# Setup the module name # Get class name (if provided): `path.to.module:ClassName`
module_name = ext_name ext_name, class_name = ext_name.split(':', 1) \
if ':' in ext_name else (ext_name, '')
if '.' not in ext_name: if '.' not in ext_name:
module_name = '.'.join(['calibre.ebooks.markdown.extensions', ext_name]) ext_name = 'markdown.extensions.' + ext_name
if ext_name.startswith('markdown.'):
ext_name = 'calibre.ebooks.' + ext_name
# Try loading the extension first from one place, then another # Try loading the extension first from one place, then another
try: # New style (markdown.extensons.<extension>) try:
module = __import__(module_name, {}, {}, [module_name.rpartition('.')[0]]) # Assume string uses dot syntax (`path.to.some.module`)
module = importlib.import_module(ext_name)
logger.debug(
'Successfuly imported extension module "%s".' % ext_name
)
# For backward compat (until deprecation)
# check that this is an extension.
if ('.' not in ext_name and not (hasattr(module, 'makeExtension') or
(class_name and hasattr(module, class_name)))):
# We have a name conflict
# eg: extensions=['tables'] and PyTables is installed
raise ImportError
except ImportError: except ImportError:
module_name_old_style = '_'.join(['mdx', ext_name]) # Preppend `markdown.extensions.` to name
try: # Old style (mdx_<extension>) module_name = '.'.join(['markdown.extensions', ext_name])
module = __import__(module_name_old_style) try:
except ImportError as e: module = importlib.import_module(module_name)
message = "Failed loading extension '%s' from '%s' or '%s'" \ logger.debug(
% (ext_name, module_name, module_name_old_style) 'Successfuly imported extension module "%s".' %
module_name
)
warnings.warn('Using short names for Markdown\'s builtin '
'extensions is deprecated. Use the '
'full path to the extension with Python\'s dot '
'notation (eg: "%s" instead of "%s"). The '
'current behavior will raise an error in version '
'2.7. See the Release Notes for '
'Python-Markdown version 2.6 for more info.' %
(module_name, ext_name),
DeprecationWarning)
except ImportError:
# Preppend `mdx_` to name
module_name_old_style = '_'.join(['mdx', ext_name])
try:
module = importlib.import_module(module_name_old_style)
logger.debug(
'Successfuly imported extension module "%s".' %
module_name_old_style)
warnings.warn('Markdown\'s behavior of prepending "mdx_" '
'to an extension name is deprecated. '
'Use the full path to the '
'extension with Python\'s dot notation '
'(eg: "%s" instead of "%s"). The current '
'behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown '
'version 2.6 for more info.' %
(module_name_old_style, ext_name),
DeprecationWarning)
except ImportError as e:
message = "Failed loading extension '%s' from '%s', '%s' " \
"or '%s'" % (ext_name, ext_name, module_name,
module_name_old_style)
e.args = (message,) + e.args[1:]
raise
if class_name:
# Load given class name from module.
return getattr(module, class_name)(**configs)
else:
# Expect makeExtension() function to return a class.
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:] e.args = (message,) + e.args[1:]
raise raise
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension): def registerExtension(self, extension):
""" This gets called by the extension """ """ This gets called by the extension """
self.registeredExtensions.append(extension) self.registeredExtensions.append(extension)
@ -244,8 +327,8 @@ class Markdown(object):
valid_formats = list(self.output_formats.keys()) valid_formats = list(self.output_formats.keys())
valid_formats.sort() valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \ message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format, % (self.output_format,
'"' + '", "'.join(valid_formats) + '"') '"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:] e.args = (message,) + e.args[1:]
raise raise
return self return self
@ -294,23 +377,25 @@ class Markdown(object):
# Run the tree-processors # Run the tree-processors
for treeprocessor in self.treeprocessors.values(): for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root) newRoot = treeprocessor.run(root)
if newRoot: if newRoot is not None:
root = newRoot root = newRoot
# Serialize _properly_. Strip top-level tags. # Serialize _properly_. Strip top-level tags.
output = self.serializer(root) output = self.serializer(root)
if self.stripTopLevelTags: if self.stripTopLevelTags:
try: try:
start = output.index('<%s>'%self.doc_tag)+len(self.doc_tag)+2 start = output.index(
end = output.rindex('</%s>'%self.doc_tag) '<%s>' % self.doc_tag) + len(self.doc_tag) + 2
end = output.rindex('</%s>' % self.doc_tag)
output = output[start:end].strip() output = output[start:end].strip()
except ValueError: except ValueError: # pragma: no cover
if output.strip().endswith('<%s />'%self.doc_tag): if output.strip().endswith('<%s />' % self.doc_tag):
# We have an empty document # We have an empty document
output = '' output = ''
else: else:
# We have a serious problem # We have a serious problem
raise ValueError('Markdown failed to strip top-level tags. Document=%r' % output.strip()) raise ValueError('Markdown failed to strip top-level '
'tags. Document=%r' % output.strip())
# Run the text post-processors # Run the text post-processors
for pp in self.postprocessors.values(): for pp in self.postprocessors.values():
@ -319,7 +404,7 @@ class Markdown(object):
return output.strip() return output.strip()
def convertFile(self, input=None, output=None, encoding=None): def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string. """Converts a Markdown file and returns the HTML as a Unicode string.
Decodes the file using the provided encoding (defaults to utf-8), Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either passes the file content to markdown, and outputs the html to either
@ -327,9 +412,9 @@ class Markdown(object):
encoding as the source file. The 'xmlcharrefreplace' error handler is encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output. used when encoding the output.
**Note:** This is the only place that decoding and encoding of unicode **Note:** This is the only place that decoding and encoding of Unicode
takes place in Python-Markdown. (All other code is unicode-in / takes place in Python-Markdown. (All other code is Unicode-in /
unicode-out.) Unicode-out.)
Keyword arguments: Keyword arguments:
@ -354,7 +439,7 @@ class Markdown(object):
if not isinstance(text, util.text_type): if not isinstance(text, util.text_type):
text = text.decode(encoding) text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert # Convert
html = self.convert(text) html = self.convert(text)
@ -373,7 +458,7 @@ class Markdown(object):
output_file.write(html) output_file.write(html)
# Don't close here. User may want to write more. # Don't close here. User may want to write more.
else: else:
# Encode manually and write bytes to stdout. # Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace") html = html.encode(encoding, "xmlcharrefreplace")
try: try:
# Write bytes directly to buffer (Python 3). # Write bytes directly to buffer (Python 3).
@ -393,8 +478,9 @@ Those are the two functions we really mean to export: markdown() and
markdownFromFile(). markdownFromFile().
""" """
def markdown(text, *args, **kwargs): def markdown(text, *args, **kwargs):
"""Convert a markdown string to HTML and return HTML as a unicode string. """Convert a Markdown string to HTML and return HTML as a Unicode string.
This is a shortcut function for `Markdown` class to cover the most This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the basic use case. It initializes an instance of Markdown, loads the
@ -435,9 +521,13 @@ def markdownFromFile(*args, **kwargs):
c += 1 c += 1
if c == len(pos): if c == len(pos):
break break
if len(args):
warnings.warn('Positional arguments are depreacted in '
'Markdown and will raise an error in version 2.7. '
'Use keyword arguments only.',
DeprecationWarning)
md = Markdown(**kwargs) md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None), md.convertFile(kwargs.get('input', None),
kwargs.get('output', None), kwargs.get('output', None),
kwargs.get('encoding', None)) kwargs.get('encoding', None))

View File

@ -4,23 +4,30 @@ COMMAND-LINE SPECIFIC STUFF
""" """
import calibre.ebooks.markdown as markdown
import sys import sys
import optparse import optparse
import codecs
import warnings
import calibre.ebooks.markdown as markdown
try:
import yaml
except ImportError: # pragma: no cover
import json as yaml
import logging import logging
from logging import DEBUG, INFO, CRITICAL from logging import DEBUG, WARNING, CRITICAL
logger = logging.getLogger('MARKDOWN') logger = logging.getLogger('MARKDOWN')
def parse_options():
def parse_options(args=None, values=None):
""" """
Define and parse `optparse` options for command-line usage. Define and parse `optparse` options for command-line usage.
""" """
usage = """%prog [options] [INPUTFILE] usage = """%prog [options] [INPUTFILE]
(STDIN is assumed if no INPUTFILE is given)""" (STDIN is assumed if no INPUTFILE is given)"""
desc = "A Python implementation of John Gruber's Markdown. " \ desc = "A Python implementation of John Gruber's Markdown. " \
"http://packages.python.org/Markdown/" "https://pythonhosted.org/Markdown/"
ver = "%%prog %s" % markdown.version ver = "%%prog %s" % markdown.version
parser = optparse.OptionParser(usage=usage, description=desc, version=ver) parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
@ -29,28 +36,40 @@ def parse_options():
metavar="OUTPUT_FILE") metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding", parser.add_option("-e", "--encoding", dest="encoding",
help="Encoding for input and output files.",) help="Encoding for input and output files.",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="Suppress all warnings.")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="Print all warnings.")
parser.add_option("-s", "--safe", dest="safe", default=False, parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE", metavar="SAFE_MODE",
help="'replace', 'remove' or 'escape' HTML tags in input") help="Deprecated! 'replace', 'remove' or 'escape' HTML "
"tags in input")
parser.add_option("-o", "--output_format", dest="output_format", parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml1', metavar="OUTPUT_FORMAT", default='xhtml1', metavar="OUTPUT_FORMAT",
help="'xhtml1' (default), 'html4' or 'html5'.") help="'xhtml1' (default), 'html4' or 'html5'.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="Print debug messages.")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "Load extension EXTENSION.", metavar="EXTENSION")
parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol", parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
action='store_false', default=True, action='store_false', default=True,
help="Observe number of first item of ordered lists.") help="Observe number of first item of ordered lists.")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help="Load extension EXTENSION.", metavar="EXTENSION")
parser.add_option("-c", "--extension_configs",
dest="configfile", default=None,
help="Read extension configurations from CONFIG_FILE. "
"CONFIG_FILE must be of JSON or YAML format. YAML"
"format requires that a python YAML library be "
"installed. The parsed JSON or YAML must result in a "
"python dictionary which would be accepted by the "
"'extension_configs' keyword on the markdown.Markdown "
"class. The extensions must also be loaded with the "
"`--extension` option.",
metavar="CONFIG_FILE")
parser.add_option("-q", "--quiet", default=CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="Suppress all warnings.")
parser.add_option("-v", "--verbose",
action="store_const", const=WARNING, dest="verbose",
help="Print all warnings.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="Print debug messages.")
(options, args) = parser.parse_args() (options, args) = parser.parse_args(args, values)
if len(args) == 0: if len(args) == 0:
input_file = None input_file = None
@ -60,28 +79,58 @@ def parse_options():
if not options.extensions: if not options.extensions:
options.extensions = [] options.extensions = []
return {'input': input_file, extension_configs = {}
'output': options.filename, if options.configfile:
'safe_mode': options.safe, with codecs.open(
'extensions': options.extensions, options.configfile, mode="r", encoding=options.encoding
'encoding': options.encoding, ) as fp:
'output_format': options.output_format, try:
'lazy_ol': options.lazy_ol}, options.verbose extension_configs = yaml.load(fp)
except Exception as e:
message = "Failed parsing extension config file: %s" % \
options.configfile
e.args = (message,) + e.args[1:]
raise
def run(): opts = {
'input': input_file,
'output': options.filename,
'extensions': options.extensions,
'extension_configs': extension_configs,
'encoding': options.encoding,
'output_format': options.output_format,
'lazy_ol': options.lazy_ol
}
if options.safe:
# Avoid deprecation warning if user didn't set option
opts['safe_mode'] = options.safe
return opts, options.verbose
def run(): # pragma: no cover
"""Run Markdown from the command line.""" """Run Markdown from the command line."""
# Parse options and adjust logging level if necessary # Parse options and adjust logging level if necessary
options, logging_level = parse_options() options, logging_level = parse_options()
if not options: sys.exit(2) if not options:
sys.exit(2)
logger.setLevel(logging_level) logger.setLevel(logging_level)
logger.addHandler(logging.StreamHandler()) console_handler = logging.StreamHandler()
logger.addHandler(console_handler)
if logging_level <= WARNING:
# Ensure deprecation warnings get displayed
warnings.filterwarnings('default')
logging.captureWarnings(True)
warn_logger = logging.getLogger('py.warnings')
warn_logger.addHandler(console_handler)
# Run # Run
markdown.markdownFromFile(**options) markdown.markdownFromFile(**options)
if __name__ == '__main__':
if __name__ == '__main__': # pragma: no cover
# Support running module as a commandline command. # Support running module as a commandline command.
# Python 2.5 & 2.6 do: `python -m markdown.__main__ [options] [args]`.
# Python 2.7 & 3.x do: `python -m markdown [options] [args]`. # Python 2.7 & 3.x do: `python -m markdown [options] [args]`.
run() run()

View File

@ -1,11 +1,12 @@
# #
# markdown/__version__.py # markdown/__version__.py
# #
# version_info should conform to PEP 386 # version_info should conform to PEP 386
# (major, minor, micro, alpha/beta/rc/final, #) # (major, minor, micro, alpha/beta/rc/final, #)
# (1, 1, 2, 'alpha', 0) => "1.1.2.dev" # (1, 1, 2, 'alpha', 0) => "1.1.2.dev"
# (1, 2, 0, 'beta', 2) => "1.2b2" # (1, 2, 0, 'beta', 2) => "1.2b2"
version_info = (2, 3, 1, 'final', 0) version_info = (2, 6, 3, 'final', 0)
def _get_version(): def _get_version():
" Returns a PEP 386-compliant version number from version_info. " " Returns a PEP 386-compliant version number from version_info. "

View File

@ -3,16 +3,17 @@ from __future__ import absolute_import
from . import util from . import util
from . import odict from . import odict
class State(list): class State(list):
""" Track the current and nested state of the parser. """ Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of list. Each time a state is reset, that state is removed from the end of
the list. the list.
Therefore, each time a state is set for a nested block, that state must be Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be reset when we back out of that level of nesting or the state could be
corrupted. corrupted.
@ -36,9 +37,10 @@ class State(list):
else: else:
return False return False
class BlockParser: class BlockParser:
""" Parse Markdown blocks into an ElementTree object. """ Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together, A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object. looping through them and creating an ElementTree object.
""" """
@ -49,12 +51,12 @@ class BlockParser:
self.markdown = markdown self.markdown = markdown
def parseDocument(self, lines): def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree. """ Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent Element) Given a list of lines, an ElementTree object (not just a parent
is created and the root element is passed to the parser as the parent. Element) is created and the root element is passed to the parser
The ElementTree object is returned. as the parent. The ElementTree object is returned.
This should only be called on an entire document, not pieces. This should only be called on an entire document, not pieces.
""" """
@ -64,29 +66,30 @@ class BlockParser:
return util.etree.ElementTree(self.root) return util.etree.ElementTree(self.root)
def parseChunk(self, parent, text): def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node. """ Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when block. Generally, this method would be called by extensions when
block parsing is required. block parsing is required.
The ``parent`` etree Element passed in is altered in place. The ``parent`` etree Element passed in is altered in place.
Nothing is returned. Nothing is returned.
""" """
self.parseBlocks(parent, text.split('\n\n')) self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks): def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node. """ Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used internally. call this method directly, it's generally expected to be used
internally.
This is a public method as an extension may need to add/alter additional This is a public method as an extension may need to add/alter
BlockProcessors which call this method to recursively parse a nested additional BlockProcessors which call this method to recursively
block. parse a nested block.
""" """
while blocks: while blocks:
@ -95,5 +98,3 @@ class BlockParser:
if processor.run(parent, blocks) is not False: if processor.run(parent, blocks) is not False:
# run returns True or None # run returns True or None
break break

View File

@ -2,9 +2,9 @@
CORE MARKDOWN BLOCKPARSER CORE MARKDOWN BLOCKPARSER
=========================================================================== ===========================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself This parser handles basic parsing of Markdown blocks. It doesn't concern
with inline elements such as **bold** or *italics*, but rather just catches itself with inline elements such as **bold** or *italics*, but rather just
blocks, lists, quotes, etc. catches blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors different type of block. Extensions may add/replace/remove BlockProcessors
@ -19,7 +19,7 @@ import re
from . import util from . import util
from .blockparser import BlockParser from .blockparser import BlockParser
logger = logging.getLogger('MARKDOWN') logger = logging.getLogger('MARKDOWN')
def build_block_parser(md_instance, **kwargs): def build_block_parser(md_instance, **kwargs):
@ -38,9 +38,9 @@ def build_block_parser(md_instance, **kwargs):
return parser return parser
class BlockProcessor: class BlockProcessor(object):
""" Base class for block processors. """ Base class for block processors.
Each subclass will provide the methods below to work with the source and Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run`` tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate methods. The ``test`` method should return True or False, to indicate
@ -82,32 +82,32 @@ class BlockProcessor:
return '\n'.join(lines) return '\n'.join(lines)
def test(self, parent, block): def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses. """ Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method As the parser loops through processors, it will call the ``test``
on each to determine if the given block of text is of that type. This method on each to determine if the given block of text is of that
method must return a boolean ``True`` or ``False``. The actual method of type. This method must return a boolean ``True`` or ``False``. The
testing is left to the needs of that particular block type. It could actual method of testing is left to the needs of that particular
be as simple as ``block.startswith(some_string)`` or a complex regular block type. It could be as simple as ``block.startswith(some_string)``
expression. As the block type may be different depending on the parent or a complex regular expression. As the block type may be different
of the block (i.e. inside a list), the parent etree element is also depending on the parent of the block (i.e. inside a list), the parent
provided and may be used as part of the test. etree element is also provided and may be used as part of the test.
Keywords: Keywords:
* ``parent``: A etree element which will be the parent of the block. * ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at * ``block``: A block of text from the source which has been split at
blank lines. blank lines.
""" """
pass pass # pragma: no cover
def run(self, parent, blocks): def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses. """ Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to should parse the individual lines of the block and append them to
the etree. the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each to instances of the objects which should be edited in place. Each
@ -123,12 +123,12 @@ class BlockProcessor:
* ``parent``: A etree element which is the parent of the current block. * ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document. * ``blocks``: A list of all remaining blocks of the document.
""" """
pass pass # pragma: no cover
class ListIndentProcessor(BlockProcessor): class ListIndentProcessor(BlockProcessor):
""" Process children of list items. """ Process children of list items.
Example: Example:
* a list item * a list item
process this part process this part
@ -141,17 +141,15 @@ class ListIndentProcessor(BlockProcessor):
LIST_TYPES = ['ul', 'ol'] LIST_TYPES = ['ul', 'ol']
def __init__(self, *args): def __init__(self, *args):
BlockProcessor.__init__(self, *args) super(ListIndentProcessor, self).__init__(*args)
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)'% self.tab_length) self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length)
def test(self, parent, block): def test(self, parent, block):
return block.startswith(' '*self.tab_length) and \ return block.startswith(' '*self.tab_length) and \
not self.parser.state.isstate('detabbed') and \ not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \ (parent.tag in self.ITEM_TYPES or
(len(parent) and parent[-1] and \ (len(parent) and parent[-1] is not None and
(parent[-1].tag in self.LIST_TYPES) (parent[-1].tag in self.LIST_TYPES)))
)
)
def run(self, parent, blocks): def run(self, parent, blocks):
block = blocks.pop(0) block = blocks.pop(0)
@ -162,7 +160,7 @@ class ListIndentProcessor(BlockProcessor):
if parent.tag in self.ITEM_TYPES: if parent.tag in self.ITEM_TYPES:
# It's possible that this parent has a 'ul' or 'ol' child list # It's possible that this parent has a 'ul' or 'ol' child list
# with a member. If that is the case, then that should be the # with a member. If that is the case, then that should be the
# parent. This is intended to catch the edge case of an indented # parent. This is intended to catch the edge case of an indented
# list whose first member was parsed previous to this point # list whose first member was parsed previous to this point
# see OListProcessor # see OListProcessor
if len(parent) and parent[-1].tag in self.LIST_TYPES: if len(parent) and parent[-1].tag in self.LIST_TYPES:
@ -193,7 +191,7 @@ class ListIndentProcessor(BlockProcessor):
""" Create a new li and parse the block with it as the parent. """ """ Create a new li and parse the block with it as the parent. """
li = util.etree.SubElement(parent, 'li') li = util.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block]) self.parser.parseBlocks(li, [block])
def get_level(self, parent, block): def get_level(self, parent, block):
""" Get level of indent based on list level. """ """ Get level of indent based on list level. """
# Get indent level # Get indent level
@ -211,7 +209,8 @@ class ListIndentProcessor(BlockProcessor):
# Step through children of tree to find matching indent level. # Step through children of tree to find matching indent level.
while indent_level > level: while indent_level > level:
child = self.lastChild(parent) child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES): if (child is not None and
(child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)):
if child.tag in self.LIST_TYPES: if child.tag in self.LIST_TYPES:
level += 1 level += 1
parent = child parent = child
@ -227,19 +226,21 @@ class CodeBlockProcessor(BlockProcessor):
def test(self, parent, block): def test(self, parent, block):
return block.startswith(' '*self.tab_length) return block.startswith(' '*self.tab_length)
def run(self, parent, blocks): def run(self, parent, blocks):
sibling = self.lastChild(parent) sibling = self.lastChild(parent)
block = blocks.pop(0) block = blocks.pop(0)
theRest = '' theRest = ''
if sibling and sibling.tag == "pre" and len(sibling) \ if (sibling is not None and sibling.tag == "pre" and
and sibling[0].tag == "code": len(sibling) and sibling[0].tag == "code"):
# The previous block was a code block. As blank lines do not start # The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back # new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list. # linebreaks removed from the split into a list.
code = sibling[0] code = sibling[0]
block, theRest = self.detab(block) block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n%s\n' % (code.text, block.rstrip())) code.text = util.AtomicString(
'%s\n%s\n' % (code.text, block.rstrip())
)
else: else:
# This is a new codeblock. Create the elements and insert text. # This is a new codeblock. Create the elements and insert text.
pre = util.etree.SubElement(parent, 'pre') pre = util.etree.SubElement(parent, 'pre')
@ -247,7 +248,7 @@ class CodeBlockProcessor(BlockProcessor):
block, theRest = self.detab(block) block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n' % block.rstrip()) code.text = util.AtomicString('%s\n' % block.rstrip())
if theRest: if theRest:
# This block contained unindented line(s) after the first indented # This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks # line. Insert these lines as the first block of the master blocks
# list for future processing. # list for future processing.
blocks.insert(0, theRest) blocks.insert(0, theRest)
@ -264,14 +265,15 @@ class BlockQuoteProcessor(BlockProcessor):
block = blocks.pop(0) block = blocks.pop(0)
m = self.RE.search(block) m = self.RE.search(block)
if m: if m:
before = block[:m.start()] # Lines before blockquote before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst. # Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before]) self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line. # Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in block = '\n'.join(
block[m.start():].split('\n')]) [self.clean(line) for line in block[m.start():].split('\n')]
)
sibling = self.lastChild(parent) sibling = self.lastChild(parent)
if sibling and sibling.tag == "blockquote": if sibling is not None and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent # Previous block was a blockquote so set that as this blocks parent
quote = sibling quote = sibling
else: else:
@ -293,24 +295,30 @@ class BlockQuoteProcessor(BlockProcessor):
else: else:
return line return line
class OListProcessor(BlockProcessor): class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """ """ Process ordered list blocks. """
TAG = 'ol' TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
# The integer (python string) with which the lists starts (default=1) # The integer (python string) with which the lists starts (default=1)
# Eg: If list is intialized as) # Eg: If list is intialized as)
# 3. Item # 3. Item
# The ol tag will get starts="3" attribute # The ol tag will get starts="3" attribute
STARTSWITH = '1' STARTSWITH = '1'
# List of allowed sibling tags. # List of allowed sibling tags.
SIBLING_TAGS = ['ol', 'ul'] SIBLING_TAGS = ['ol', 'ul']
def __init__(self, parser):
super(OListProcessor, self).__init__(parser)
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1))
# Detect items on secondary lines. they can be of either list type.
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' %
(self.tab_length - 1))
# Detect indented (nested) items of either type
self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' %
(self.tab_length, self.tab_length * 2 - 1))
def test(self, parent, block): def test(self, parent, block):
return bool(self.RE.match(block)) return bool(self.RE.match(block))
@ -319,15 +327,15 @@ class OListProcessor(BlockProcessor):
items = self.get_items(blocks.pop(0)) items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent) sibling = self.lastChild(parent)
if sibling and sibling.tag in self.SIBLING_TAGS: if sibling is not None and sibling.tag in self.SIBLING_TAGS:
# Previous block was a list item, so set that as parent # Previous block was a list item, so set that as parent
lst = sibling lst = sibling
# make sure previous item is in a p- if the item has text, then it # make sure previous item is in a p- if the item has text,
# it isn't in a p # then it isn't in a p
if lst[-1].text: if lst[-1].text:
# since it's possible there are other children for this sibling, # since it's possible there are other children for this
# we can't just SubElement the p, we need to insert it as the # sibling, we can't just SubElement the p, we need to
# first item # insert it as the first item.
p = util.etree.Element('p') p = util.etree.Element('p')
p.text = lst[-1].text p.text = lst[-1].text
lst[-1].text = '' lst[-1].text = ''
@ -347,7 +355,7 @@ class OListProcessor(BlockProcessor):
self.parser.parseBlocks(li, [firstitem]) self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset() self.parser.state.reset()
elif parent.tag in ['ol', 'ul']: elif parent.tag in ['ol', 'ul']:
# this catches the edge case of a multi-item indented list whose # this catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item: # first item is in a blank parent-list item:
# * * subitem1 # * * subitem1
# * subitem2 # * subitem2
@ -357,7 +365,7 @@ class OListProcessor(BlockProcessor):
# This is a new list so create parent with appropriate tag. # This is a new list so create parent with appropriate tag.
lst = util.etree.SubElement(parent, self.TAG) lst = util.etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set # Check if a custom start integer is set
if not self.parser.markdown.lazy_ol and self.STARTSWITH !='1': if not self.parser.markdown.lazy_ol and self.STARTSWITH != '1':
lst.attrib['start'] = self.STARTSWITH lst.attrib['start'] = self.STARTSWITH
self.parser.state.set('list') self.parser.state.set('list')
@ -381,7 +389,7 @@ class OListProcessor(BlockProcessor):
if m: if m:
# This is a new list item # This is a new list item
# Check first item for the start index # Check first item for the start index
if not items and self.TAG=='ol': if not items and self.TAG == 'ol':
# Detect the integer value of first list item # Detect the integer value of first list item
INTEGER_RE = re.compile('(\d+)') INTEGER_RE = re.compile('(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group() self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
@ -404,7 +412,11 @@ class UListProcessor(OListProcessor):
""" Process unordered list blocks. """ """ Process unordered list blocks. """
TAG = 'ul' TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
def __init__(self, parser):
super(UListProcessor, self).__init__(parser)
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
self.RE = re.compile(r'^[ ]{0,%d}[*+-][ ]+(.*)' % (self.tab_length - 1))
class HashHeaderProcessor(BlockProcessor): class HashHeaderProcessor(BlockProcessor):
@ -420,8 +432,8 @@ class HashHeaderProcessor(BlockProcessor):
block = blocks.pop(0) block = blocks.pop(0)
m = self.RE.search(block) m = self.RE.search(block)
if m: if m:
before = block[:m.start()] # All lines before header before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header after = block[m.end():] # All lines after header
if before: if before:
# As the header was not the first line of the block and the # As the header was not the first line of the block and the
# lines before the header must be parsed first, # lines before the header must be parsed first,
@ -433,7 +445,7 @@ class HashHeaderProcessor(BlockProcessor):
if after: if after:
# Insert remaining lines as first block for future parsing. # Insert remaining lines as first block for future parsing.
blocks.insert(0, after) blocks.insert(0, after)
else: else: # pragma: no cover
# This should never happen, but just in case... # This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block) logger.warn("We've got a problem header: %r" % block)
@ -495,7 +507,6 @@ class HRProcessor(BlockProcessor):
blocks.insert(0, postlines) blocks.insert(0, postlines)
class EmptyBlockProcessor(BlockProcessor): class EmptyBlockProcessor(BlockProcessor):
""" Process blocks that are empty or start with an empty line. """ """ Process blocks that are empty or start with an empty line. """
@ -515,9 +526,12 @@ class EmptyBlockProcessor(BlockProcessor):
# Add remaining lines to master blocks for later. # Add remaining lines to master blocks for later.
blocks.insert(0, theRest) blocks.insert(0, theRest)
sibling = self.lastChild(parent) sibling = self.lastChild(parent)
if sibling and sibling.tag == 'pre' and len(sibling) and sibling[0].tag == 'code': if (sibling is not None and sibling.tag == 'pre' and
len(sibling) and sibling[0].tag == 'code'):
# Last block is a codeblock. Append to preserve whitespace. # Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = util.AtomicString('%s%s' % (sibling[0].text, filler)) sibling[0].text = util.AtomicString(
'%s%s' % (sibling[0].text, filler)
)
class ParagraphProcessor(BlockProcessor): class ParagraphProcessor(BlockProcessor):
@ -533,7 +547,7 @@ class ParagraphProcessor(BlockProcessor):
if self.parser.state.isstate('list'): if self.parser.state.isstate('list'):
# The parent is a tight-list. # The parent is a tight-list.
# #
# Check for any children. This will likely only happen in a # Check for any children. This will likely only happen in a
# tight-list when a header isn't followed by a blank line. # tight-list when a header isn't followed by a blank line.
# For example: # For example:
# #

View File

@ -4,17 +4,51 @@ Extensions
""" """
from __future__ import unicode_literals from __future__ import unicode_literals
from ..util import parseBoolValue
import warnings
class Extension(object): class Extension(object):
""" Base class for extensions to subclass. """ """ Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments: # Default config -- to be overriden by a subclass
# Must be of the following format:
# {
# 'key': ['value', 'description']
# }
# Note that Extension.setConfig will raise a KeyError
# if a default is not set here.
config = {}
* configs: A dict of configuration setting used by an Extension. def __init__(self, *args, **kwargs):
""" """ Initiate Extension and set up configs. """
self.config = configs
# check for configs arg for backward compat.
# (there only ever used to be one so we use arg[0])
if len(args):
if args[0] is not None:
self.setConfigs(args[0])
warnings.warn('Extension classes accepting positional args is '
'pending Deprecation. Each setting should be '
'passed into the Class as a keyword. Positional '
'args are deprecated and will raise '
'an error in version 2.7. See the Release Notes for '
'Python-Markdown version 2.6 for more info.',
DeprecationWarning)
# check for configs kwarg for backward compat.
if 'configs' in kwargs.keys():
if kwargs['configs'] is not None:
self.setConfigs(kwargs.pop('configs', {}))
warnings.warn('Extension classes accepting a dict on the single '
'keyword "config" is pending Deprecation. Each '
'setting should be passed into the Class as a '
'keyword directly. The "config" keyword is '
'deprecated and raise an error in '
'version 2.7. See the Release Notes for '
'Python-Markdown version 2.6 for more info.',
DeprecationWarning)
# finally, use kwargs
self.setConfigs(kwargs)
def getConfig(self, key, default=''): def getConfig(self, key, default=''):
""" Return a setting for the given key or an empty string. """ """ Return a setting for the given key or an empty string. """
@ -33,8 +67,20 @@ class Extension(object):
def setConfig(self, key, value): def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """ """ Set a config setting for `key` with the given `value`. """
if isinstance(self.config[key][0], bool):
value = parseBoolValue(value)
if self.config[key][0] is None:
value = parseBoolValue(value, preserve_none=True)
self.config[key][0] = value self.config[key][0] = value
def setConfigs(self, items):
""" Set multiple config settings given a dict or list of tuples. """
if hasattr(items, 'items'):
# it's a dict
items = items.items()
for key, value in items:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
""" """
Add the various proccesors and patterns to the Markdown Instance. Add the various proccesors and patterns to the Markdown Instance.
@ -48,6 +94,7 @@ class Extension(object):
* md_globals: Global variables in the markdown module namespace. * md_globals: Global variables in the markdown module namespace.
""" """
raise NotImplementedError('Extension "%s.%s" must define an "extendMarkdown"' \ raise NotImplementedError(
'method.' % (self.__class__.__module__, self.__class__.__name__)) 'Extension "%s.%s" must define an "extendMarkdown"'
'method.' % (self.__class__.__module__, self.__class__.__name__)
)

View File

@ -4,22 +4,15 @@ Abbreviation Extension for Python-Markdown
This extension adds abbreviation handling to Python-Markdown. This extension adds abbreviation handling to Python-Markdown.
Simple Usage: See <https://pythonhosted.org/Markdown/extensions/abbreviations.html>
for documentation.
>>> import markdown Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
>>> text = """ [Seemant Kulleen](http://www.kulleen.org/)
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> print markdown.markdown(text, ['abbr'])
<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>
Copyright 2007-2008 All changes Copyright 2008-2014 The Python Markdown Project
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/) License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
''' '''
@ -28,20 +21,21 @@ from __future__ import unicode_literals
from . import Extension from . import Extension
from ..preprocessors import Preprocessor from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern from ..inlinepatterns import Pattern
from ..util import etree from ..util import etree, AtomicString
import re import re
# Global Vars # Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)') ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(Extension): class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """ """ Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """ """ Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference') md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(Preprocessor): class AbbrPreprocessor(Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """ """ Abbreviation Preprocessor - parse text for abbr references. """
@ -49,7 +43,7 @@ class AbbrPreprocessor(Preprocessor):
''' '''
Find and remove all Abbreviation references from the text. Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance. Each reference is set as a new AbbrPattern in the markdown instance.
''' '''
new_text = [] new_text = []
for line in lines: for line in lines:
@ -57,19 +51,19 @@ class AbbrPreprocessor(Preprocessor):
if m: if m:
abbr = m.group('abbr').strip() abbr = m.group('abbr').strip()
title = m.group('title').strip() title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \ self.markdown.inlinePatterns['abbr-%s' % abbr] = \
AbbrPattern(self._generate_pattern(abbr), title) AbbrPattern(self._generate_pattern(abbr), title)
else: else:
new_text.append(line) new_text.append(line)
return new_text return new_text
def _generate_pattern(self, text): def _generate_pattern(self, text):
''' '''
Given a string, returns an regex pattern to match that string. Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])' 'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand. know what they will be beforehand.
''' '''
@ -88,9 +82,10 @@ class AbbrPattern(Pattern):
def handleMatch(self, m): def handleMatch(self, m):
abbr = etree.Element('abbr') abbr = etree.Element('abbr')
abbr.text = m.group('abbr') abbr.text = AtomicString(m.group('abbr'))
abbr.set('title', self.title) abbr.set('title', self.title)
return abbr return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs) def makeExtension(*args, **kwargs):
return AbbrExtension(*args, **kwargs)

View File

@ -4,39 +4,16 @@ Admonition extension for Python-Markdown
Adds rST-style admonitions. Inspired by [rST][] feature with the same name. Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
The syntax is (followed by an indented block with the contents): [rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions # noqa
!!! [type] [optional explicit title]
Where `type` is used as a CSS class name of the div. If not present, `title` See <https://pythonhosted.org/Markdown/extensions/admonition.html>
defaults to the capitalized `type`, so "note" -> "Note". for documentation.
rST suggests the following `types`, but you're free to use whatever you want: Original code Copyright [Tiago Serafim](http://www.tiagoserafim.com/).
attention, caution, danger, error, hint, important, note, tip, warning
All changes Copyright The Python Markdown Project
A simple example: License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
!!! note
This is the first line inside the box.
Outputs:
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>This is the first line inside the box</p>
</div>
You can also specify the title and CSS class of the admonition:
!!! custom "Did you know?"
Another line here.
Outputs:
<div class="admonition custom">
<p class="admonition-title">Did you know?</p>
<p>Another line here.</p>
</div>
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions
By [Tiago Serafim](http://www.tiagoserafim.com/).
""" """
@ -69,8 +46,8 @@ class AdmonitionProcessor(BlockProcessor):
def test(self, parent, block): def test(self, parent, block):
sibling = self.lastChild(parent) sibling = self.lastChild(parent)
return self.RE.search(block) or \ return self.RE.search(block) or \
(block.startswith(' ' * self.tab_length) and sibling and \ (block.startswith(' ' * self.tab_length) and sibling is not None and
sibling.get('class', '').find(self.CLASSNAME) != -1) sibling.get('class', '').find(self.CLASSNAME) != -1)
def run(self, parent, blocks): def run(self, parent, blocks):
sibling = self.lastChild(parent) sibling = self.lastChild(parent)
@ -105,7 +82,8 @@ class AdmonitionProcessor(BlockProcessor):
klass, title = match.group(1).lower(), match.group(2) klass, title = match.group(1).lower(), match.group(2)
if title is None: if title is None:
# no title was provided, use the capitalized classname as title # no title was provided, use the capitalized classname as title
# e.g.: `!!! note` will render `<p class="admonition-title">Note</p>` # e.g.: `!!! note` will render
# `<p class="admonition-title">Note</p>`
title = klass.capitalize() title = klass.capitalize()
elif title == '': elif title == '':
# an explicit blank title should not be rendered # an explicit blank title should not be rendered
@ -114,5 +92,5 @@ class AdmonitionProcessor(BlockProcessor):
return klass, title return klass, title
def makeExtension(configs={}): def makeExtension(*args, **kwargs):
return AdmonitionExtension(configs=configs) return AdmonitionExtension(*args, **kwargs)

View File

@ -2,19 +2,18 @@
Attribute List Extension for Python-Markdown Attribute List Extension for Python-Markdown
============================================ ============================================
Adds attribute list syntax. Inspired by Adds attribute list syntax. Inspired by
[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s [maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
feature of the same name. feature of the same name.
Copyright 2011 [Waylan Limberg](http://achinghead.com/). See <https://pythonhosted.org/Markdown/extensions/attr_list.html>
for documentation.
Contact: markdown@freewisdom.org Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
License: BSD (see ../LICENSE.md for details) All changes Copyright 2011-2014 The Python Markdown Project
Dependencies: License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
""" """
@ -27,21 +26,25 @@ import re
try: try:
Scanner = re.Scanner Scanner = re.Scanner
except AttributeError: except AttributeError: # pragma: no cover
# must be on Python 2.4 # must be on Python 2.4
from sre import Scanner from sre import Scanner
def _handle_double_quote(s, t): def _handle_double_quote(s, t):
k, v = t.split('=') k, v = t.split('=')
return k, v.strip('"') return k, v.strip('"')
def _handle_single_quote(s, t): def _handle_single_quote(s, t):
k, v = t.split('=') k, v = t.split('=')
return k, v.strip("'") return k, v.strip("'")
def _handle_key_value(s, t):
def _handle_key_value(s, t):
return t.split('=') return t.split('=')
def _handle_word(s, t): def _handle_word(s, t):
if t.startswith('.'): if t.startswith('.'):
return '.', t[1:] return '.', t[1:]
@ -52,27 +55,31 @@ def _handle_word(s, t):
_scanner = Scanner([ _scanner = Scanner([
(r'[^ ]+=".*?"', _handle_double_quote), (r'[^ ]+=".*?"', _handle_double_quote),
(r"[^ ]+='.*?'", _handle_single_quote), (r"[^ ]+='.*?'", _handle_single_quote),
(r'[^ ]+=[^ ]*', _handle_key_value), (r'[^ ]+=[^ =]+', _handle_key_value),
(r'[^ ]+', _handle_word), (r'[^ =]+', _handle_word),
(r' ', None) (r' ', None)
]) ])
def get_attrs(str): def get_attrs(str):
""" Parse attribute list and return a list of attribute tuples. """ """ Parse attribute list and return a list of attribute tuples. """
return _scanner.scan(str)[0] return _scanner.scan(str)[0]
def isheader(elem): def isheader(elem):
return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
class AttrListTreeprocessor(Treeprocessor): class AttrListTreeprocessor(Treeprocessor):
BASE_RE = r'\{\:?([^\}]*)\}' BASE_RE = r'\{\:?([^\}]*)\}'
HEADER_RE = re.compile(r'[ ]*%s[ ]*$' % BASE_RE) HEADER_RE = re.compile(r'[ ]+%s[ ]*$' % BASE_RE)
BLOCK_RE = re.compile(r'\n[ ]*%s[ ]*$' % BASE_RE) BLOCK_RE = re.compile(r'\n[ ]*%s[ ]*$' % BASE_RE)
INLINE_RE = re.compile(r'^%s' % BASE_RE) INLINE_RE = re.compile(r'^%s' % BASE_RE)
NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d' NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
r'\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef' r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
r'\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd' r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
r'\uf900-\ufdcf\ufdf0-\ufffd'
r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+') r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
def run(self, doc): def run(self, doc):
@ -80,10 +87,36 @@ class AttrListTreeprocessor(Treeprocessor):
if isBlockLevel(elem.tag): if isBlockLevel(elem.tag):
# Block level: check for attrs on last line of text # Block level: check for attrs on last line of text
RE = self.BLOCK_RE RE = self.BLOCK_RE
if isheader(elem): if isheader(elem) or elem.tag == 'dt':
# header: check for attrs at end of line # header or def-term: check for attrs at end of line
RE = self.HEADER_RE RE = self.HEADER_RE
if len(elem) and elem[-1].tail: if len(elem) and elem.tag == 'li':
# special case list items. children may include a ul or ol.
pos = None
# find the ul or ol position
for i, child in enumerate(elem):
if child.tag in ['ul', 'ol']:
pos = i
break
if pos is None and elem[-1].tail:
# use tail of last child. no ul or ol.
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
elif pos is not None and pos > 0 and elem[pos-1].tail:
# use tail of last child before ul or ol
m = RE.search(elem[pos-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[pos-1].tail = elem[pos-1].tail[:m.start()]
elif elem.text:
# use text. ul is first child.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
elif len(elem) and elem[-1].tail:
# has children. Get from tail of last child # has children. Get from tail of last child
m = RE.search(elem[-1].tail) m = RE.search(elem[-1].tail)
if m: if m:
@ -95,6 +128,8 @@ class AttrListTreeprocessor(Treeprocessor):
elif elem.text: elif elem.text:
# no children. Get from text. # no children. Get from text.
m = RE.search(elem.text) m = RE.search(elem.text)
if not m and elem.tag == 'td':
m = re.search(self.BASE_RE, elem.text)
if m: if m:
self.assign_attrs(elem, m.group(1)) self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()] elem.text = elem.text[:m.start()]
@ -133,8 +168,10 @@ class AttrListTreeprocessor(Treeprocessor):
class AttrListExtension(Extension): class AttrListExtension(Extension):
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
md.treeprocessors.add('attr_list', AttrListTreeprocessor(md), '>prettify') md.treeprocessors.add(
'attr_list', AttrListTreeprocessor(md), '>prettify'
)
def makeExtension(configs={}): def makeExtension(*args, **kwargs):
return AttrListExtension(configs=configs) return AttrListExtension(*args, **kwargs)

View File

@ -4,17 +4,14 @@ CodeHilite Extension for Python-Markdown
Adds code/syntax highlighting to standard Python-Markdown code blocks. Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/). See <https://pythonhosted.org/Markdown/extensions/code_hilite.html>
for documentation.
Project website: <http://packages.python.org/Markdown/extensions/code_hilite.html> Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details) All changes Copyright 2008-2014 The Python Markdown Project
Dependencies: License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments](http://pygments.org/)
""" """
@ -22,19 +19,35 @@ from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from . import Extension from . import Extension
from ..treeprocessors import Treeprocessor from ..treeprocessors import Treeprocessor
import warnings
try: try:
from pygments import highlight from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import HtmlFormatter from pygments.formatters import get_formatter_by_name
pygments = True pygments = True
except ImportError: except ImportError:
pygments = False pygments = False
def parse_hl_lines(expr):
"""Support our syntax for emphasizing certain lines of code.
expr should be like '1 2' to emphasize lines 1 and 2 of a code block.
Returns a list of ints, the line numbers to emphasize.
"""
if not expr:
return []
try:
return list(map(int, expr.split()))
except ValueError:
return []
# ------------------ The Main CodeHilite Class ---------------------- # ------------------ The Main CodeHilite Class ----------------------
class CodeHilite(object): class CodeHilite(object):
""" """
Determine language of source code, and pass it into the pygments hilighter. Determine language of source code, and pass it into pygments hilighter.
Basic Usage: Basic Usage:
>>> code = CodeHilite(src = 'some text') >>> code = CodeHilite(src = 'some text')
@ -42,24 +55,27 @@ class CodeHilite(object):
* src: Source string or any object with a .readline attribute. * src: Source string or any object with a .readline attribute.
* linenums: (Boolean) Set line numbering to 'on' (True), 'off' (False) or 'auto'(None). * linenums: (Boolean) Set line numbering to 'on' (True),
Set to 'auto' by default. 'off' (False) or 'auto'(None). Set to 'auto' by default.
* guess_lang: (Boolean) Turn language auto-detection 'on' or 'off' (on by default). * guess_lang: (Boolean) Turn language auto-detection
'on' or 'off' (on by default).
* css_class: Set class name of wrapper div ('codehilite' by default). * css_class: Set class name of wrapper div ('codehilite' by default).
* hl_lines: (List of integers) Lines to emphasize, 1-indexed.
Low Level Usage: Low Level Usage:
>>> code = CodeHilite() >>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr. >>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of. >>> code.linenos = True # Turns line numbering on or of.
>>> html = code.hilite() >>> html = code.hilite()
""" """
def __init__(self, src=None, linenums=None, guess_lang=True, def __init__(self, src=None, linenums=None, guess_lang=True,
css_class="codehilite", lang=None, style='default', css_class="codehilite", lang=None, style='default',
noclasses=False, tab_length=4): noclasses=False, tab_length=4, hl_lines=None, use_pygments=True):
self.src = src self.src = src
self.lang = lang self.lang = lang
self.linenums = linenums self.linenums = linenums
@ -68,6 +84,8 @@ class CodeHilite(object):
self.style = style self.style = style
self.noclasses = noclasses self.noclasses = noclasses
self.tab_length = tab_length self.tab_length = tab_length
self.hl_lines = hl_lines or []
self.use_pygments = use_pygments
def hilite(self): def hilite(self):
""" """
@ -83,9 +101,9 @@ class CodeHilite(object):
self.src = self.src.strip('\n') self.src = self.src.strip('\n')
if self.lang is None: if self.lang is None:
self._getLang() self._parseHeader()
if pygments: if pygments and self.use_pygments:
try: try:
lexer = get_lexer_by_name(self.lang) lexer = get_lexer_by_name(self.lang)
except ValueError: except ValueError:
@ -93,13 +111,15 @@ class CodeHilite(object):
if self.guess_lang: if self.guess_lang:
lexer = guess_lexer(self.src) lexer = guess_lexer(self.src)
else: else:
lexer = TextLexer() lexer = get_lexer_by_name('text')
except ValueError: except ValueError:
lexer = TextLexer() lexer = get_lexer_by_name('text')
formatter = HtmlFormatter(linenos=self.linenums, formatter = get_formatter_by_name('html',
cssclass=self.css_class, linenos=self.linenums,
style=self.style, cssclass=self.css_class,
noclasses=self.noclasses) style=self.style,
noclasses=self.noclasses,
hl_lines=self.hl_lines)
return highlight(self.src, lexer, formatter) return highlight(self.src, lexer, formatter)
else: else:
# just escape and build markup usable by JS highlighting libs # just escape and build markup usable by JS highlighting libs
@ -114,36 +134,42 @@ class CodeHilite(object):
classes.append('linenums') classes.append('linenums')
class_str = '' class_str = ''
if classes: if classes:
class_str = ' class="%s"' % ' '.join(classes) class_str = ' class="%s"' % ' '.join(classes)
return '<pre class="%s"><code%s>%s</code></pre>\n'% \ return '<pre class="%s"><code%s>%s</code></pre>\n' % \
(self.css_class, class_str, txt) (self.css_class, class_str, txt)
def _getLang(self): def _parseHeader(self):
""" """
Determines language of a code block from shebang line and whether said Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python) left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a then it is assumed to be a mock shebang for language identifitation of
code fragment and removed from the code block prior to processing for a code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off (e.i.: :::python), line numbering is left in the current state - off
by default. by default.
Also parses optional list of highlight lines, like:
:::python hl_lines="1 3"
""" """
import re import re
#split text into lines # split text into lines
lines = self.src.split("\n") lines = self.src.split("\n")
#pull first line to examine # pull first line to examine
fl = lines.pop(0) fl = lines.pop(0)
c = re.compile(r''' c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons. (?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path (?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language (?P<lang>[\w+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
''', re.VERBOSE) ''', re.VERBOSE)
# search first line for shebang # search first line for shebang
m = c.search(fl) m = c.search(fl)
@ -159,6 +185,8 @@ class CodeHilite(object):
if self.linenums is None and m.group('shebang'): if self.linenums is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers # Overridable and Shebang exists - use line numbers
self.linenums = True self.linenums = True
self.hl_lines = parse_hl_lines(m.group('hl_lines'))
else: else:
# No match # No match
lines.insert(0, fl) lines.insert(0, fl)
@ -166,24 +194,27 @@ class CodeHilite(object):
self.src = "\n".join(lines).strip("\n") self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension ------------------------------- # ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor): class HiliteTreeprocessor(Treeprocessor):
""" Hilight source code in code blocks. """ """ Hilight source code in code blocks. """
def run(self, root): def run(self, root):
""" Find code blocks and store in htmlStash. """ """ Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre') blocks = root.iter('pre')
for block in blocks: for block in blocks:
children = block.getchildren() if len(block) == 1 and block[0].tag == 'code':
if len(children) == 1 and children[0].tag == 'code': code = CodeHilite(
code = CodeHilite(children[0].text, block[0].text,
linenums=self.config['linenums'], linenums=self.config['linenums'],
guess_lang=self.config['guess_lang'], guess_lang=self.config['guess_lang'],
css_class=self.config['css_class'], css_class=self.config['css_class'],
style=self.config['pygments_style'], style=self.config['pygments_style'],
noclasses=self.config['noclasses'], noclasses=self.config['noclasses'],
tab_length=self.markdown.tab_length) tab_length=self.markdown.tab_length,
use_pygments=self.config['use_pygments']
)
placeholder = self.markdown.htmlStash.store(code.hilite(), placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True) safe=True)
# Clear codeblock in etree instance # Clear codeblock in etree instance
@ -197,34 +228,29 @@ class HiliteTreeprocessor(Treeprocessor):
class CodeHiliteExtension(Extension): class CodeHiliteExtension(Extension):
""" Add source code hilighting to markdown codeblocks. """ """ Add source code hilighting to markdown codeblocks. """
def __init__(self, configs): def __init__(self, *args, **kwargs):
# define default configs # define default configs
self.config = { self.config = {
'linenums': [None, "Use lines numbers. True=yes, False=no, None=auto"], 'linenums': [None,
'force_linenos' : [False, "Depreciated! Use 'linenums' instead. Force line numbers - Default: False"], "Use lines numbers. True=yes, False=no, None=auto"],
'guess_lang' : [True, "Automatic language detection - Default: True"], 'guess_lang': [True,
'css_class' : ["codehilite", "Automatic language detection - Default: True"],
"Set class name for wrapper <div> - Default: codehilite"], 'css_class': ["codehilite",
'pygments_style' : ['default', 'Pygments HTML Formatter Style (Colorscheme) - Default: default'], "Set class name for wrapper <div> - "
'noclasses': [False, 'Use inline styles instead of CSS classes - Default false'] "Default: codehilite"],
'pygments_style': ['default',
'Pygments HTML Formatter Style '
'(Colorscheme) - Default: default'],
'noclasses': [False,
'Use inline styles instead of CSS classes - '
'Default false'],
'use_pygments': [True,
'Use Pygments to Highlight code blocks. '
'Disable if using a JavaScript library. '
'Default: True']
} }
# Override defaults with user settings super(CodeHiliteExtension, self).__init__(*args, **kwargs)
for key, value in configs:
# convert strings to booleans
if value == 'True': value = True
if value == 'False': value = False
if value == 'None': value = None
if key == 'force_linenos':
warnings.warn('The "force_linenos" config setting'
' to the CodeHilite extension is deprecrecated.'
' Use "linenums" instead.', PendingDeprecationWarning)
if value:
# Carry 'force_linenos' over to new 'linenos'.
self.setConfig('linenums', True)
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """ """ Add HilitePostprocessor to Markdown instance. """
@ -235,6 +261,5 @@ class CodeHiliteExtension(Extension):
md.registerExtension(self) md.registerExtension(self)
def makeExtension(configs={}): def makeExtension(*args, **kwargs):
return CodeHiliteExtension(configs=configs) return CodeHiliteExtension(*args, **kwargs)

View File

@ -2,19 +2,16 @@
Definition List Extension for Python-Markdown Definition List Extension for Python-Markdown
============================================= =============================================
Added parsing of Definition Lists to Python-Markdown. Adds parsing of Definition Lists to Python-Markdown.
A simple example: See <https://pythonhosted.org/Markdown/extensions/definition_lists.html>
for documentation.
Apple Original code Copyright 2008 [Waylan Limberg](http://achinghead.com)
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange All changes Copyright 2008-2014 The Python Markdown Project
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com) License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
""" """
@ -39,7 +36,8 @@ class DefListProcessor(BlockProcessor):
raw_block = blocks.pop(0) raw_block = blocks.pop(0)
m = self.RE.search(raw_block) m = self.RE.search(raw_block)
terms = [l.strip() for l in raw_block[:m.start()].split('\n') if l.strip()] terms = [l.strip() for l in
raw_block[:m.start()].split('\n') if l.strip()]
block = raw_block[m.end():] block = raw_block[m.end():]
no_indent = self.NO_INDENT_RE.match(block) no_indent = self.NO_INDENT_RE.match(block)
if no_indent: if no_indent:
@ -52,7 +50,7 @@ class DefListProcessor(BlockProcessor):
d = m.group(2) d = m.group(2)
sibling = self.lastChild(parent) sibling = self.lastChild(parent)
if not terms and sibling is None: if not terms and sibling is None:
# This is not a definition item. Most likely a paragraph that # This is not a definition item. Most likely a paragraph that
# starts with a colon at the begining of a document or list. # starts with a colon at the begining of a document or list.
blocks.insert(0, raw_block) blocks.insert(0, raw_block)
return False return False
@ -66,10 +64,10 @@ class DefListProcessor(BlockProcessor):
else: else:
state = 'list' state = 'list'
if sibling and sibling.tag == 'dl': if sibling is not None and sibling.tag == 'dl':
# This is another item on an existing list # This is another item on an existing list
dl = sibling dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]): if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist' state = 'looselist'
else: else:
# This is a new list # This is a new list
@ -87,6 +85,7 @@ class DefListProcessor(BlockProcessor):
if theRest: if theRest:
blocks.insert(0, theRest) blocks.insert(0, theRest)
class DefListIndentProcessor(ListIndentProcessor): class DefListIndentProcessor(ListIndentProcessor):
""" Process indented children of definition list items. """ """ Process indented children of definition list items. """
@ -97,7 +96,6 @@ class DefListIndentProcessor(ListIndentProcessor):
""" Create a new dd and parse the block with it as the parent. """ """ Create a new dd and parse the block with it as the parent. """
dd = etree.SubElement(parent, 'dd') dd = etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block]) self.parser.parseBlocks(dd, [block])
class DefListExtension(Extension): class DefListExtension(Extension):
@ -108,11 +106,10 @@ class DefListExtension(Extension):
md.parser.blockprocessors.add('defindent', md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser), DefListIndentProcessor(md.parser),
'>indent') '>indent')
md.parser.blockprocessors.add('deflist', md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser), DefListProcessor(md.parser),
'>ulist') '>ulist')
def makeExtension(configs={}): def makeExtension(*args, **kwargs):
return DefListExtension(configs=configs) return DefListExtension(*args, **kwargs)

View File

@ -6,49 +6,127 @@ A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/). [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual initiating Markdown. See the documentation for each individual
extension for specifics about that extension. extension for specifics about that extension.
In the event that one or more of the supported extensions are not There may be additional extensions that are distributed with
available for import, Markdown will issue a warning and simply continue
without that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra additional extensions, we suggest creating your own clone of Extra
under a differant name. You could also edit the `extensions` global under a differant name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown. when you upgrade to any future version of Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/extra.html>
for documentation.
Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
""" """
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from . import Extension from . import Extension
from ..blockprocessors import BlockProcessor
from .. import util
import re
extensions = [
'markdown.extensions.smart_strong',
'markdown.extensions.fenced_code',
'markdown.extensions.footnotes',
'markdown.extensions.attr_list',
'markdown.extensions.def_list',
'markdown.extensions.tables',
'markdown.extensions.abbr'
]
extensions = ['smart_strong',
'fenced_code',
'footnotes',
'attr_list',
'def_list',
'tables',
'abbr',
]
class ExtraExtension(Extension): class ExtraExtension(Extension):
""" Add various extensions to Markdown class.""" """ Add various extensions to Markdown class."""
def __init__(self, *args, **kwargs):
""" config is a dumb holder which gets passed to actual ext later. """
self.config = kwargs.pop('configs', {})
self.config.update(kwargs)
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
""" Register extension instances. """ """ Register extension instances. """
md.registerExtensions(extensions, self.config) md.registerExtensions(extensions, self.config)
if not md.safeMode: if not md.safeMode:
# Turn on processing of markdown text within raw html # Turn on processing of markdown text within raw html
md.preprocessors['html_block'].markdown_in_raw = True md.preprocessors['html_block'].markdown_in_raw = True
md.parser.blockprocessors.add('markdown_block',
MarkdownInHtmlProcessor(md.parser),
'_begin')
md.parser.blockprocessors.tag_counter = -1
md.parser.blockprocessors.contain_span_tags = re.compile(
r'^(p|h[1-6]|li|dd|dt|td|th|legend|address)$', re.IGNORECASE)
def makeExtension(configs={}):
return ExtraExtension(configs=dict(configs)) def makeExtension(*args, **kwargs):
return ExtraExtension(*args, **kwargs)
class MarkdownInHtmlProcessor(BlockProcessor):
"""Process Markdown Inside HTML Blocks."""
def test(self, parent, block):
return block == util.TAG_PLACEHOLDER % \
str(self.parser.blockprocessors.tag_counter + 1)
def _process_nests(self, element, block):
"""Process the element's child elements in self.run."""
# Build list of indexes of each nest within the parent element.
nest_index = [] # a list of tuples: (left index, right index)
i = self.parser.blockprocessors.tag_counter + 1
while len(self._tag_data) > i and self._tag_data[i]['left_index']:
left_child_index = self._tag_data[i]['left_index']
right_child_index = self._tag_data[i]['right_index']
nest_index.append((left_child_index - 1, right_child_index))
i += 1
# Create each nest subelement.
for i, (left_index, right_index) in enumerate(nest_index[:-1]):
self.run(element, block[left_index:right_index],
block[right_index:nest_index[i + 1][0]], True)
self.run(element, block[nest_index[-1][0]:nest_index[-1][1]], # last
block[nest_index[-1][1]:], True) # nest
def run(self, parent, blocks, tail=None, nest=False):
self._tag_data = self.parser.markdown.htmlStash.tag_data
self.parser.blockprocessors.tag_counter += 1
tag = self._tag_data[self.parser.blockprocessors.tag_counter]
# Create Element
markdown_value = tag['attrs'].pop('markdown')
element = util.etree.SubElement(parent, tag['tag'], tag['attrs'])
# Slice Off Block
if nest:
self.parser.parseBlocks(parent, tail) # Process Tail
block = blocks[1:]
else: # includes nests since a third level of nesting isn't supported
block = blocks[tag['left_index'] + 1: tag['right_index']]
del blocks[:tag['right_index']]
# Process Text
if (self.parser.blockprocessors.contain_span_tags.match( # Span Mode
tag['tag']) and markdown_value != 'block') or \
markdown_value == 'span':
element.text = '\n'.join(block)
else: # Block Mode
i = self.parser.blockprocessors.tag_counter + 1
if len(self._tag_data) > i and self._tag_data[i]['left_index']:
first_subelement_index = self._tag_data[i]['left_index'] - 1
self.parser.parseBlocks(
element, block[:first_subelement_index])
if not nest:
block = self._process_nests(element, block)
else:
self.parser.parseBlocks(element, block)

View File

@ -4,89 +4,24 @@ Fenced Code Extension for Python Markdown
This extension adds Fenced Code Blocks to Python-Markdown. This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown See <https://pythonhosted.org/Markdown/extensions/fenced_code_blocks.html>
>>> text = ''' for documentation.
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash): Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines: All changes Copyright 2008-2014 The Python Markdown Project
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Optionally backticks instead of tildes as per how github's code block markdown is identified:
>>> text = '''
... `````
... # Arbitrary code
... ~~~~~ # these tildes will not close the block
... `````'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code># Arbitrary code
~~~~~ # these tildes will not close the block
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
""" """
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from . import Extension from . import Extension
from ..preprocessors import Preprocessor from ..preprocessors import Preprocessor
from .codehilite import CodeHilite, CodeHiliteExtension from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines
import re import re
# Global vars
FENCED_BLOCK_RE = re.compile( \
r'(?P<fence>^(?:~{3,}|`{3,}))[ ]*(\{?\.?(?P<lang>[a-zA-Z0-9_+-]*)\}?)?[ ]*\n(?P<code>.*?)(?<=\n)(?P=fence)[ ]*$',
re.MULTILINE|re.DOTALL
)
CODE_WRAP = '<pre><code%s>%s</code></pre>'
LANG_TAG = ' class="%s"'
class FencedCodeExtension(Extension): class FencedCodeExtension(Extension):
@ -95,11 +30,21 @@ class FencedCodeExtension(Extension):
md.registerExtension(self) md.registerExtension(self)
md.preprocessors.add('fenced_code_block', md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md), FencedBlockPreprocessor(md),
">normalize_whitespace") ">normalize_whitespace")
class FencedBlockPreprocessor(Preprocessor): class FencedBlockPreprocessor(Preprocessor):
FENCED_BLOCK_RE = re.compile(r'''
(?P<fence>^(?:~{3,}|`{3,}))[ ]* # Opening ``` or ~~~
(\{?\.?(?P<lang>[a-zA-Z0-9_+-]*))?[ ]* # Optional {, and lang
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?[ ]*
}?[ ]*\n # Optional closing }
(?P<code>.*?)(?<=\n)
(?P=fence)[ ]*$''', re.MULTILINE | re.DOTALL | re.VERBOSE)
CODE_WRAP = '<pre><code%s>%s</code></pre>'
LANG_TAG = ' class="%s"'
def __init__(self, md): def __init__(self, md):
super(FencedBlockPreprocessor, self).__init__(md) super(FencedBlockPreprocessor, self).__init__(md)
@ -121,29 +66,35 @@ class FencedBlockPreprocessor(Preprocessor):
text = "\n".join(lines) text = "\n".join(lines)
while 1: while 1:
m = FENCED_BLOCK_RE.search(text) m = self.FENCED_BLOCK_RE.search(text)
if m: if m:
lang = '' lang = ''
if m.group('lang'): if m.group('lang'):
lang = LANG_TAG % m.group('lang') lang = self.LANG_TAG % m.group('lang')
# If config is not empty, then the codehighlite extension # If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code # is enabled, so we call it to highlight the code
if self.codehilite_conf: if self.codehilite_conf:
highliter = CodeHilite(m.group('code'), highliter = CodeHilite(
linenums=self.codehilite_conf['linenums'][0], m.group('code'),
guess_lang=self.codehilite_conf['guess_lang'][0], linenums=self.codehilite_conf['linenums'][0],
css_class=self.codehilite_conf['css_class'][0], guess_lang=self.codehilite_conf['guess_lang'][0],
style=self.codehilite_conf['pygments_style'][0], css_class=self.codehilite_conf['css_class'][0],
lang=(m.group('lang') or None), style=self.codehilite_conf['pygments_style'][0],
noclasses=self.codehilite_conf['noclasses'][0]) lang=(m.group('lang') or None),
noclasses=self.codehilite_conf['noclasses'][0],
hl_lines=parse_hl_lines(m.group('hl_lines'))
)
code = highliter.hilite() code = highliter.hilite()
else: else:
code = CODE_WRAP % (lang, self._escape(m.group('code'))) code = self.CODE_WRAP % (lang,
self._escape(m.group('code')))
placeholder = self.markdown.htmlStash.store(code, safe=True) placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():]) text = '%s\n%s\n%s' % (text[:m.start()],
placeholder,
text[m.end():])
else: else:
break break
return text.split("\n") return text.split("\n")
@ -157,5 +108,5 @@ class FencedBlockPreprocessor(Preprocessor):
return txt return txt
def makeExtension(configs=None): def makeExtension(*args, **kwargs):
return FencedCodeExtension(configs=configs) return FencedCodeExtension(*args, **kwargs)

View File

@ -1,25 +1,15 @@
""" """
========================= FOOTNOTES ================================= Footnotes Extension for Python-Markdown
=======================================
This section adds footnote handling to markdown. It can be used as Adds footnote handling to Python-Markdown.
an example for extending python-markdown with relatively complex
functionality. While in this case the extension is included inside
the module itself, it could just as easily be added from outside the
module. Not that all markdown classes above are ignorant about
footnotes. All footnote functionality is provided separately and
then added to the markdown instance at the run time.
Footnote functionality is attached by calling extendMarkdown() See <https://pythonhosted.org/Markdown/extensions/footnotes.html>
method of FootnoteExtension. The method also registers the for documentation.
extension to allow it's state to be reset by a call to reset()
method.
Example: Copyright The Python Markdown Project
Footnotes[^1] have a label[^label] and a definition[^!DEF].
[^1]: This is a footnote License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
[^label]: A footnote on "label"
[^!DEF]: The footnote for definition
""" """
@ -35,29 +25,31 @@ from ..odict import OrderedDict
import re import re
FN_BACKLINK_TEXT = "zz1337820767766393qq" FN_BACKLINK_TEXT = "zz1337820767766393qq"
NBSP_PLACEHOLDER = "qq3936677670287331zz" NBSP_PLACEHOLDER = "qq3936677670287331zz"
DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)') DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)') TABBED_RE = re.compile(r'((\t)|( ))(.*)')
class FootnoteExtension(Extension): class FootnoteExtension(Extension):
""" Footnote Extension. """ """ Footnote Extension. """
def __init__ (self, configs): def __init__(self, *args, **kwargs):
""" Setup configs. """ """ Setup configs. """
self.config = {'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."],
"BACKLINK_TEXT":
["&#8617;",
"The text string that links from the footnote to the reader's place."]
}
for key, value in configs: self.config = {
self.config[key][0] = value 'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."],
"BACKLINK_TEXT":
["&#8617;",
"The text string that links from the footnote "
"to the reader's place."]
}
super(FootnoteExtension, self).__init__(*args, **kwargs)
# In multiple invocations, emit links that don't get tangled. # In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0 self.unique_prefix = 0
@ -69,27 +61,28 @@ class FootnoteExtension(Extension):
md.registerExtension(self) md.registerExtension(self)
self.parser = md.parser self.parser = md.parser
self.md = md self.md = md
self.sep = ':'
if self.md.output_format in ['html5', 'xhtml5']:
self.sep = '-'
# Insert a preprocessor before ReferencePreprocessor # Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add("footnote", FootnotePreprocessor(self), md.preprocessors.add(
"<reference") "footnote", FootnotePreprocessor(self), "<reference"
)
# Insert an inline pattern before ImageReferencePattern # Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self), md.inlinePatterns.add(
"<reference") "footnote", FootnotePattern(FOOTNOTE_RE, self), "<reference"
)
# Insert a tree-processor that would actually add the footnote div # Insert a tree-processor that would actually add the footnote div
# This must be before all other treeprocessors (i.e., inline and # This must be before all other treeprocessors (i.e., inline and
# codehilite) so they can run on the the contents of the div. # codehilite) so they can run on the the contents of the div.
md.treeprocessors.add("footnote", FootnoteTreeprocessor(self), md.treeprocessors.add(
"_begin") "footnote", FootnoteTreeprocessor(self), "_begin"
)
# Insert a postprocessor after amp_substitute oricessor # Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add("footnote", FootnotePostprocessor(self), md.postprocessors.add(
">amp_substitute") "footnote", FootnotePostprocessor(self), ">amp_substitute"
)
def reset(self): def reset(self):
""" Clear the footnotes on reset, and prepare for a distinct document. """ """ Clear footnotes on reset, and prepare for distinct document. """
self.footnotes = OrderedDict() self.footnotes = OrderedDict()
self.unique_prefix += 1 self.unique_prefix += 1
@ -103,9 +96,11 @@ class FootnoteExtension(Extension):
if child.tail: if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1: if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False return child, element, False
finder(child) child_res = finder(child)
if child_res is not None:
return child_res
return None return None
res = finder(root) res = finder(root)
return res return res
@ -113,19 +108,25 @@ class FootnoteExtension(Extension):
""" Store a footnote for later retrieval. """ """ Store a footnote for later retrieval. """
self.footnotes[id] = text self.footnotes[id] = text
def get_separator(self):
if self.md.output_format in ['html5', 'xhtml5']:
return '-'
return ':'
def makeFootnoteId(self, id): def makeFootnoteId(self, id):
""" Return footnote link id. """ """ Return footnote link id. """
if self.getConfig("UNIQUE_IDS"): if self.getConfig("UNIQUE_IDS"):
return 'fn%s%d-%s' % (self.sep, self.unique_prefix, id) return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
else: else:
return 'fn%s%s' % (self.sep, id) return 'fn%s%s' % (self.get_separator(), id)
def makeFootnoteRefId(self, id): def makeFootnoteRefId(self, id):
""" Return footnote back-link id. """ """ Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"): if self.getConfig("UNIQUE_IDS"):
return 'fnref%s%d-%s' % (self.sep, self.unique_prefix, id) return 'fnref%s%d-%s' % (self.get_separator(),
self.unique_prefix, id)
else: else:
return 'fnref%s%s' % (self.sep, id) return 'fnref%s%s' % (self.get_separator(), id)
def makeFootnotesDiv(self, root): def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """ """ Return div of footnotes as et Element. """
@ -145,10 +146,13 @@ class FootnoteExtension(Extension):
backlink = etree.Element("a") backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id)) backlink.set("href", "#" + self.makeFootnoteRefId(id))
if self.md.output_format not in ['html5', 'xhtml5']: if self.md.output_format not in ['html5', 'xhtml5']:
backlink.set("rev", "footnote") # Invalid in HTML5 backlink.set("rev", "footnote") # Invalid in HTML5
backlink.set("class", "footnote-backref") backlink.set("class", "footnote-backref")
backlink.set("title", "Jump back to footnote %d in the text" % \ backlink.set(
(self.footnotes.index(id)+1)) "title",
"Jump back to footnote %d in the text" %
(self.footnotes.index(id)+1)
)
backlink.text = FN_BACKLINK_TEXT backlink.text = FN_BACKLINK_TEXT
if li.getchildren(): if li.getchildren():
@ -165,7 +169,7 @@ class FootnoteExtension(Extension):
class FootnotePreprocessor(Preprocessor): class FootnotePreprocessor(Preprocessor):
""" Find all footnote references and store for later use. """ """ Find all footnote references and store for later use. """
def __init__ (self, footnotes): def __init__(self, footnotes):
self.footnotes = footnotes self.footnotes = footnotes
def run(self, lines): def run(self, lines):
@ -186,7 +190,7 @@ class FootnotePreprocessor(Preprocessor):
if m: if m:
fn, _i = self.detectTabbed(lines[i+1:]) fn, _i = self.detectTabbed(lines[i+1:])
fn.insert(0, m.group(2)) fn.insert(0, m.group(2))
i += _i-1 # skip past footnote i += _i-1 # skip past footnote
self.footnotes.setFootnote(m.group(1), "\n".join(fn)) self.footnotes.setFootnote(m.group(1), "\n".join(fn))
else: else:
newlines.append(lines[i]) newlines.append(lines[i])
@ -207,16 +211,16 @@ class FootnotePreprocessor(Preprocessor):
""" """
items = [] items = []
blank_line = False # have we encountered a blank line yet? blank_line = False # have we encountered a blank line yet?
i = 0 # to keep track of where we are i = 0 # to keep track of where we are
def detab(line): def detab(line):
match = TABBED_RE.match(line) match = TABBED_RE.match(line)
if match: if match:
return match.group(4) return match.group(4)
for line in lines: for line in lines:
if line.strip(): # Non-blank line if line.strip(): # Non-blank line
detabbed_line = detab(line) detabbed_line = detab(line)
if detabbed_line: if detabbed_line:
items.append(detabbed_line) items.append(detabbed_line)
@ -230,23 +234,24 @@ class FootnotePreprocessor(Preprocessor):
else: else:
return items, i+1 return items, i+1
else: # Blank line: _maybe_ we are done. else: # Blank line: _maybe_ we are done.
blank_line = True blank_line = True
i += 1 # advance i += 1 # advance
# Find the next non-blank line # Find the next non-blank line
for j in range(i, len(lines)): for j in range(i, len(lines)):
if lines[j].strip(): if lines[j].strip():
next_line = lines[j]; break next_line = lines[j]
break
else: else:
break # There is no more text; we are done. break # There is no more text; we are done.
# Check if the next non-blank line is tabbed # Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do. if detab(next_line): # Yes, more work to do.
items.append("") items.append("")
continue continue
else: else:
break # No, we are done. break # No, we are done.
else: else:
i += 1 i += 1
@ -268,7 +273,7 @@ class FootnotePattern(Pattern):
sup.set('id', self.footnotes.makeFootnoteRefId(id)) sup.set('id', self.footnotes.makeFootnoteRefId(id))
a.set('href', '#' + self.footnotes.makeFootnoteId(id)) a.set('href', '#' + self.footnotes.makeFootnoteId(id))
if self.footnotes.md.output_format not in ['html5', 'xhtml5']: if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
a.set('rel', 'footnote') # invalid in HTML5 a.set('rel', 'footnote') # invalid in HTML5
a.set('class', 'footnote-ref') a.set('class', 'footnote-ref')
a.text = text_type(self.footnotes.footnotes.index(id) + 1) a.text = text_type(self.footnotes.footnotes.index(id) + 1)
return sup return sup
@ -279,12 +284,12 @@ class FootnotePattern(Pattern):
class FootnoteTreeprocessor(Treeprocessor): class FootnoteTreeprocessor(Treeprocessor):
""" Build and append footnote div to end of document. """ """ Build and append footnote div to end of document. """
def __init__ (self, footnotes): def __init__(self, footnotes):
self.footnotes = footnotes self.footnotes = footnotes
def run(self, root): def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root) footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv: if footnotesDiv is not None:
result = self.footnotes.findFootnotesPlaceholder(root) result = self.footnotes.findFootnotesPlaceholder(root)
if result: if result:
child, parent, isText = result child, parent, isText = result
@ -298,16 +303,19 @@ class FootnoteTreeprocessor(Treeprocessor):
else: else:
root.append(footnotesDiv) root.append(footnotesDiv)
class FootnotePostprocessor(Postprocessor): class FootnotePostprocessor(Postprocessor):
""" Replace placeholders with html entities. """ """ Replace placeholders with html entities. """
def __init__(self, footnotes): def __init__(self, footnotes):
self.footnotes = footnotes self.footnotes = footnotes
def run(self, text): def run(self, text):
text = text.replace(FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")) text = text.replace(
FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
)
return text.replace(NBSP_PLACEHOLDER, "&#160;") return text.replace(NBSP_PLACEHOLDER, "&#160;")
def makeExtension(configs=[]):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(configs=configs)
def makeExtension(*args, **kwargs):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(*args, **kwargs)

View File

@ -4,73 +4,14 @@ HeaderID Extension for Python-Markdown
Auto-generate id attributes for HTML headers. Auto-generate id attributes for HTML headers.
Basic usage: See <https://pythonhosted.org/Markdown/extensions/header_id.html>
for documentation.
>>> import markdown Original code Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
>>> text = "# Some Header #"
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header">Some Header</h1>
All header IDs are unique: All changes Copyright 2011-2014 The Python Markdown Project
>>> text = ''' License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
... #Header
... #Header
... #Header'''
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="header">Header</h1>
<h1 id="header_1">Header</h1>
<h1 id="header_2">Header</h1>
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> print md
<h3 id="some-header">Some Header</h3>
<h4 id="next-level">Next Level</h4>
Works with inline markup.
>>> text = '#Some *Header* with [markup](http://example.com).'
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header-with-markup">Some <em>Header</em> with <a href="http://example.com">markup</a>.</h1>
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Another Header'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> print md
<h1>Some Header</h1>
<h1>Another Header</h1>
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> print md
<h2>A Header</h2>
Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/header_id.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
""" """
@ -78,47 +19,9 @@ from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from . import Extension from . import Extension
from ..treeprocessors import Treeprocessor from ..treeprocessors import Treeprocessor
import re from ..util import parseBoolValue
import logging from .toc import slugify, unique, stashedHTML2text
import unicodedata import warnings
logger = logging.getLogger('MARKDOWN')
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def slugify(value, separator):
""" Slugify a string, to make it URL friendly. """
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
return re.sub('[%s\s]+' % separator, separator, value)
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
ids.add(id)
return id
def itertext(elem):
""" Loop through all children and return text only.
Reimplements method of same name added to ElementTree in Python 2.7
"""
if elem.text:
yield elem.text
for e in elem:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
class HeaderIdTreeprocessor(Treeprocessor): class HeaderIdTreeprocessor(Treeprocessor):
@ -130,13 +33,14 @@ class HeaderIdTreeprocessor(Treeprocessor):
start_level, force_id = self._get_meta() start_level, force_id = self._get_meta()
slugify = self.config['slugify'] slugify = self.config['slugify']
sep = self.config['separator'] sep = self.config['separator']
for elem in doc.getiterator(): for elem in doc:
if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']: if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
if force_id: if force_id:
if "id" in elem.attrib: if "id" in elem.attrib:
id = elem.get('id') id = elem.get('id')
else: else:
id = slugify(''.join(itertext(elem)), sep) id = stashedHTML2text(''.join(elem.itertext()), self.md)
id = slugify(id, sep)
elem.set('id', unique(id, self.IDs)) elem.set('id', unique(id, self.IDs))
if start_level: if start_level:
level = int(elem.tag[-1]) + start_level level = int(elem.tag[-1]) + start_level
@ -144,40 +48,34 @@ class HeaderIdTreeprocessor(Treeprocessor):
level = 6 level = 6
elem.tag = 'h%d' % level elem.tag = 'h%d' % level
def _get_meta(self): def _get_meta(self):
""" Return meta data suported by this ext as a tuple """ """ Return meta data suported by this ext as a tuple """
level = int(self.config['level']) - 1 level = int(self.config['level']) - 1
force = self._str2bool(self.config['forceid']) force = parseBoolValue(self.config['forceid'])
if hasattr(self.md, 'Meta'): if hasattr(self.md, 'Meta'):
if 'header_level' in self.md.Meta: if 'header_level' in self.md.Meta:
level = int(self.md.Meta['header_level'][0]) - 1 level = int(self.md.Meta['header_level'][0]) - 1
if 'header_forceid' in self.md.Meta: if 'header_forceid' in self.md.Meta:
force = self._str2bool(self.md.Meta['header_forceid'][0]) force = parseBoolValue(self.md.Meta['header_forceid'][0])
return level, force return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
class HeaderIdExtension(Extension): class HeaderIdExtension(Extension):
def __init__(self, configs): def __init__(self, *args, **kwargs):
# set defaults # set defaults
self.config = { self.config = {
'level' : ['1', 'Base level for headers.'], 'level': ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.'], 'forceid': ['True', 'Force all headers to have an id.'],
'separator' : ['-', 'Word separator.'], 'separator': ['-', 'Word separator.'],
'slugify' : [slugify, 'Callable to generate anchors'], 'slugify': [slugify, 'Callable to generate anchors']
} }
for key, value in configs: super(HeaderIdExtension, self).__init__(*args, **kwargs)
self.setConfig(key, value)
warnings.warn(
'The HeaderId Extension is pending deprecation. Use the TOC Extension instead.',
PendingDeprecationWarning
)
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
md.registerExtension(self) md.registerExtension(self)
@ -195,5 +93,5 @@ class HeaderIdExtension(Extension):
self.processor.IDs = set() self.processor.IDs = set()
def makeExtension(configs=None): def makeExtension(*args, **kwargs):
return HeaderIdExtension(configs=configs) return HeaderIdExtension(*args, **kwargs)

View File

@ -4,38 +4,14 @@ Meta Data Extension for Python-Markdown
This extension adds Meta Data handling to markdown. This extension adds Meta Data handling to markdown.
Basic Usage: See <https://pythonhosted.org/Markdown/extensions/meta_data.html>
for documentation.
>>> import markdown Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> print md.convert(text)
<p>The body. This is paragraph one.</p>
>>> print md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>). All changes Copyright 2008-2014 The Python Markdown Project
>>> text = ' Some Code - not extra lines of meta data.' License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
>>> md = markdown.Markdown(['meta'])
>>> print md.convert(text)
<pre><code>Some Code - not extra lines of meta data.
</code></pre>
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://packages.python.org/Markdown/meta_data.html>
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details)
""" """
@ -44,18 +20,25 @@ from __future__ import unicode_literals
from . import Extension from . import Extension
from ..preprocessors import Preprocessor from ..preprocessors import Preprocessor
import re import re
import logging
log = logging.getLogger('MARKDOWN')
# Global Vars # Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)') META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)') META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension): class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """ """ Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """ """ Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta",
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin") MetaPreprocessor(md),
">normalize_whitespace")
class MetaPreprocessor(Preprocessor): class MetaPreprocessor(Preprocessor):
@ -65,11 +48,13 @@ class MetaPreprocessor(Preprocessor):
""" Parse Meta-Data and store in Markdown.Meta. """ """ Parse Meta-Data and store in Markdown.Meta. """
meta = {} meta = {}
key = None key = None
while 1: if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0) line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line) m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1: if m1:
key = m1.group('key').lower().strip() key = m1.group('key').lower().strip()
value = m1.group('value').strip() value = m1.group('value').strip()
@ -84,10 +69,10 @@ class MetaPreprocessor(Preprocessor):
meta[key].append(m2.group('value').strip()) meta[key].append(m2.group('value').strip())
else: else:
lines.insert(0, line) lines.insert(0, line)
break # no meta data - done break # no meta data - done
self.markdown.Meta = meta self.markdown.Meta = meta
return lines return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs) def makeExtension(*args, **kwargs):
return MetaExtension(*args, **kwargs)

View File

@ -5,18 +5,14 @@ NL2BR Extension
A Python-Markdown extension to treat newlines as hard breaks; like A Python-Markdown extension to treat newlines as hard breaks; like
GitHub-flavored Markdown does. GitHub-flavored Markdown does.
Usage: See <https://pythonhosted.org/Markdown/extensions/nl2br.html>
for documentation.
>>> import markdown Oringinal code Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
>>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
<p>line 1<br />
line 2</p>
Copyright 2011 [Brian Neal](http://deathofagremmie.com/) All changes Copyright 2011-2014 The Python Markdown Project
Dependencies: License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
""" """
@ -27,6 +23,7 @@ from ..inlinepatterns import SubstituteTagPattern
BR_RE = r'\n' BR_RE = r'\n'
class Nl2BrExtension(Extension): class Nl2BrExtension(Extension):
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
@ -34,5 +31,5 @@ class Nl2BrExtension(Extension):
md.inlinePatterns.add('nl', br_tag, '_end') md.inlinePatterns.add('nl', br_tag, '_end')
def makeExtension(configs=None): def makeExtension(*args, **kwargs):
return Nl2BrExtension(configs) return Nl2BrExtension(*args, **kwargs)

View File

@ -2,19 +2,16 @@
Sane List Extension for Python-Markdown Sane List Extension for Python-Markdown
======================================= =======================================
Modify the behavior of Lists in Python-Markdown t act in a sane manor. Modify the behavior of Lists in Python-Markdown to act in a sane manor.
In standard Markdown sytex, the following would constitute a single See <https://pythonhosted.org/Markdown/extensions/sane_lists.html>
ordered list. However, with this extension, the output would include for documentation.
two lists, the first an ordered list and the second and unordered list.
1. ordered Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
2. list
* unordered All changes Copyright 2011-2014 The Python Markdown Project
* list
Copyright 2011 - [Waylan Limberg](http://achinghead.com) License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
""" """
@ -26,16 +23,24 @@ import re
class SaneOListProcessor(OListProcessor): class SaneOListProcessor(OListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
SIBLING_TAGS = ['ol'] SIBLING_TAGS = ['ol']
def __init__(self, parser):
super(SaneOListProcessor, self).__init__(parser)
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' %
(self.tab_length - 1))
class SaneUListProcessor(UListProcessor): class SaneUListProcessor(UListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
SIBLING_TAGS = ['ul'] SIBLING_TAGS = ['ul']
def __init__(self, parser):
super(SaneUListProcessor, self).__init__(parser)
self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
(self.tab_length - 1))
class SaneListExtension(Extension): class SaneListExtension(Extension):
""" Add sane lists to Markdown. """ """ Add sane lists to Markdown. """
@ -46,6 +51,5 @@ class SaneListExtension(Extension):
md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser) md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
def makeExtension(configs={}): def makeExtension(*args, **kwargs):
return SaneListExtension(configs=configs) return SaneListExtension(*args, **kwargs)

View File

@ -4,21 +4,14 @@ Smart_Strong Extension for Python-Markdown
This extention adds smarter handling of double underscores within words. This extention adds smarter handling of double underscores within words.
Simple Usage: See <https://pythonhosted.org/Markdown/extensions/smart_strong.html>
for documentation.
>>> import markdown Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
>>> print markdown.markdown('Text with double__underscore__words.',
... extensions=['smart_strong'])
<p>Text with double__underscore__words.</p>
>>> print markdown.markdown('__Strong__ still works.',
... extensions=['smart_strong'])
<p><strong>Strong</strong> still works.</p>
>>> print markdown.markdown('__this__works__too__.',
... extensions=['smart_strong'])
<p><strong>this__works__too</strong>.</p>
Copyright 2011 All changes Copyright 2011-2014 The Python Markdown Project
[Waylan Limberg](http://achinghead.com)
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
''' '''
@ -30,13 +23,19 @@ from ..inlinepatterns import SimpleTagPattern
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\2(?!\w)' SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\2(?!\w)'
STRONG_RE = r'(\*{2})(.+?)\2' STRONG_RE = r'(\*{2})(.+?)\2'
class SmartEmphasisExtension(Extension): class SmartEmphasisExtension(Extension):
""" Add smart_emphasis extension to Markdown class.""" """ Add smart_emphasis extension to Markdown class."""
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
""" Modify inline patterns. """ """ Modify inline patterns. """
md.inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong') md.inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong')
md.inlinePatterns.add('strong2', SimpleTagPattern(SMART_STRONG_RE, 'strong'), '>emphasis2') md.inlinePatterns.add(
'strong2',
SimpleTagPattern(SMART_STRONG_RE, 'strong'),
'>emphasis2'
)
def makeExtension(configs={}):
return SmartEmphasisExtension(configs=dict(configs)) def makeExtension(*args, **kwargs):
return SmartEmphasisExtension(*args, **kwargs)

View File

@ -4,29 +4,32 @@ Tables Extension for Python-Markdown
Added parsing of tables to Python-Markdown. Added parsing of tables to Python-Markdown.
A simple example: See <https://pythonhosted.org/Markdown/extensions/tables.html>
for documentation.
First Header | Second Header Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
------------- | -------------
Content Cell | Content Cell All changes Copyright 2008-2014 The Python Markdown Project
Content Cell | Content Cell
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Copyright 2009 - [Waylan Limberg](http://achinghead.com)
""" """
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from . import Extension from . import Extension
from ..blockprocessors import BlockProcessor from ..blockprocessors import BlockProcessor
from ..inlinepatterns import BacktickPattern, BACKTICK_RE
from ..util import etree from ..util import etree
class TableProcessor(BlockProcessor): class TableProcessor(BlockProcessor):
""" Process Tables. """ """ Process Tables. """
def test(self, parent, block): def test(self, parent, block):
rows = block.split('\n') rows = block.split('\n')
return (len(rows) > 2 and '|' in rows[0] and return (len(rows) > 1 and '|' in rows[0] and
'|' in rows[1] and '-' in rows[1] and '|' in rows[1] and '-' in rows[1] and
rows[1].strip()[0] in ['|', ':', '-']) rows[1].strip()[0] in ['|', ':', '-'])
def run(self, parent, blocks): def run(self, parent, blocks):
@ -34,7 +37,7 @@ class TableProcessor(BlockProcessor):
block = blocks.pop(0).split('\n') block = blocks.pop(0).split('\n')
header = block[0].strip() header = block[0].strip()
seperator = block[1].strip() seperator = block[1].strip()
rows = block[2:] rows = [] if len(block) < 3 else block[2:]
# Get format type (bordered by pipes or not) # Get format type (bordered by pipes or not)
border = False border = False
if header.startswith('|'): if header.startswith('|'):
@ -65,13 +68,17 @@ class TableProcessor(BlockProcessor):
if parent.tag == 'thead': if parent.tag == 'thead':
tag = 'th' tag = 'th'
cells = self._split_row(row, border) cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row # We use align here rather than cells to ensure every row
# contains the same number of columns. # contains the same number of columns.
for i, a in enumerate(align): for i, a in enumerate(align):
c = etree.SubElement(tr, tag) c = etree.SubElement(tr, tag)
try: try:
c.text = cells[i].strip() if isinstance(cells[i], str) or isinstance(cells[i], unicode):
except IndexError: c.text = cells[i].strip()
else:
# we've already inserted a code element
c.append(cells[i])
except IndexError: # pragma: no cover
c.text = "" c.text = ""
if a: if a:
c.set('align', a) c.set('align', a)
@ -83,7 +90,49 @@ class TableProcessor(BlockProcessor):
row = row[1:] row = row[1:]
if row.endswith('|'): if row.endswith('|'):
row = row[:-1] row = row[:-1]
return row.split('|') return self._split(row, '|')
def _split(self, row, marker):
""" split a row of text with some code into a list of cells. """
if self._row_has_unpaired_backticks(row):
# fallback on old behaviour
return row.split(marker)
# modify the backtick pattern to only match at the beginning of the search string
backtick_pattern = BacktickPattern('^' + BACKTICK_RE)
elements = []
current = ''
i = 0
while i < len(row):
letter = row[i]
if letter == marker:
if current != '' or len(elements) == 0:
# Don't append empty string unless it is the first element
# The border is already removed when we get the row, then the line is strip()'d
# If the first element is a marker, then we have an empty first cell
elements.append(current)
current = ''
else:
match = backtick_pattern.getCompiledRegExp().match(row[i:])
if not match:
current += letter
else:
groups = match.groups()
delim = groups[1] # the code block delimeter (ie 1 or more backticks)
row_contents = groups[2] # the text contained inside the code block
i += match.start(4) # jump pointer to the beginning of the rest of the text (group #4)
element = delim + row_contents + delim # reinstert backticks
current += element
i += 1
elements.append(current)
return elements
def _row_has_unpaired_backticks(self, row):
count_total_backtick = row.count('`')
count_escaped_backtick = row.count('\`')
count_backtick = count_total_backtick - count_escaped_backtick
# odd number of backticks,
# we won't be able to build correct code blocks
return count_backtick & 1
class TableExtension(Extension): class TableExtension(Extension):
@ -91,10 +140,10 @@ class TableExtension(Extension):
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """ """ Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table', md.parser.blockprocessors.add('table',
TableProcessor(md.parser), TableProcessor(md.parser),
'<hashheader') '<hashheader')
def makeExtension(configs={}): def makeExtension(*args, **kwargs):
return TableExtension(configs=configs) return TableExtension(*args, **kwargs)

View File

@ -1,11 +1,15 @@
""" """
Table of Contents Extension for Python-Markdown Table of Contents Extension for Python-Markdown
* * * ===============================================
(c) 2008 [Jack Miller](http://codezen.org) See <https://pythonhosted.org/Markdown/extensions/toc.html>
for documentation.
Dependencies: Oringinal code Copyright 2008 [Jack Miller](http://codezen.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
""" """
@ -13,99 +17,192 @@ from __future__ import absolute_import
from __future__ import unicode_literals from __future__ import unicode_literals
from . import Extension from . import Extension
from ..treeprocessors import Treeprocessor from ..treeprocessors import Treeprocessor
from ..util import etree from ..util import etree, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, string_type
from .headerid import slugify, unique, itertext
import re import re
import unicodedata
def order_toc_list(toc_list): def slugify(value, separator):
""" Slugify a string, to make it URL friendly. """
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
return re.sub('[%s\s]+' % separator, separator, value)
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d' % (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d' % (id, 1)
ids.add(id)
return id
def stashedHTML2text(text, md):
""" Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
def _html_sub(m):
""" Substitute raw html with plain text. """
try:
raw, safe = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
except (IndexError, TypeError): # pragma: no cover
return m.group(0)
if md.safeMode and not safe: # pragma: no cover
return ''
# Strip out tags and entities - leaveing text
return re.sub(r'(<[^>]+>)|(&[\#a-zA-Z0-9]+;)', '', raw)
return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
def nest_toc_tokens(toc_list):
"""Given an unsorted list with errors and skips, return a nested one. """Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}] [{'level': 1}, {'level': 2}]
=> =>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}] [{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted: A wrong list is also converted:
[{'level': 2}, {'level': 1}] [{'level': 2}, {'level': 1}]
=> =>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}] [{'level': 2, 'children': []}, {'level': 1, 'children': []}]
""" """
def build_correct(remaining_list, prev_elements=[{'level': 1000}]): ordered_list = []
if len(toc_list):
if not remaining_list: # Initialize everything by processing the first entry
return [], [] last = toc_list.pop(0)
last['children'] = []
current = remaining_list.pop(0) levels = [last['level']]
if not 'children' in current.keys(): ordered_list.append(last)
current['children'] = [] parents = []
if not prev_elements: # Walk the rest nesting the entries properly
# This happens for instance with [8, 1, 1], ie. when some while toc_list:
# header level is outside a scope. We treat it as a t = toc_list.pop(0)
# top-level current_level = t['level']
next_elements, children = build_correct(remaining_list, [current]) t['children'] = []
current['children'].append(children)
return [current] + next_elements, [] # Reduce depth if current level < last item's level
if current_level < levels[-1]:
prev_element = prev_elements.pop() # Pop last level since we know we are less than it
children = [] levels.pop()
next_elements = []
# Is current part of the child list or next list? # Pop parents and levels we are less than or equal to
if current['level'] > prev_element['level']: to_pop = 0
#print "%d is a child of %d" % (current['level'], prev_element['level']) for p in reversed(parents):
prev_elements.append(prev_element) if current_level <= p['level']:
prev_elements.append(current) to_pop += 1
prev_element['children'].append(current) else: # pragma: no cover
next_elements2, children2 = build_correct(remaining_list, prev_elements) break
children += children2 if to_pop:
next_elements += next_elements2 levels = levels[:-to_pop]
else: parents = parents[:-to_pop]
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements: # Note current level as last
#print "No previous elements, so appending to the next set" levels.append(current_level)
next_elements.append(current)
prev_elements = [current] # Level is the same, so append to
next_elements2, children2 = build_correct(remaining_list, prev_elements) # the current parent (if available)
current['children'].extend(children2) if current_level == levels[-1]:
(parents[-1]['children'] if parents
else ordered_list).append(t)
# Current level is > last item's level,
# So make last item a parent and append current as child
else: else:
#print "Previous elements, comparing to those first" last['children'].append(t)
remaining_list.insert(0, current) parents.append(last)
next_elements2, children2 = build_correct(remaining_list, prev_elements) levels.append(current_level)
children.extend(children2) last = t
next_elements += next_elements2
return next_elements, children
ordered_list, __ = build_correct(toc_list)
return ordered_list return ordered_list
class TocTreeprocessor(Treeprocessor): class TocTreeprocessor(Treeprocessor):
def __init__(self, md, config):
# Iterator wrapper to get parent and child all at once super(TocTreeprocessor, self).__init__(md)
self.marker = config["marker"]
self.title = config["title"]
self.base_level = int(config["baselevel"]) - 1
self.slugify = config["slugify"]
self.sep = config["separator"]
self.use_anchors = parseBoolValue(config["anchorlink"])
self.use_permalinks = parseBoolValue(config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = config["permalink"]
self.header_rgx = re.compile("[Hh][123456]")
def iterparent(self, root): def iterparent(self, root):
for parent in root.getiterator(): ''' Iterator wrapper to get parent and child all at once. '''
for parent in root.iter():
for child in parent: for child in parent:
yield parent, child yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment def replace_marker(self, root, elem):
if self.use_anchors: ''' Replace marker with elem. '''
anchor = etree.Element("a") for (p, c) in self.iterparent(root):
anchor.text = c.text text = ''.join(c.itertext()).strip()
anchor.attrib["href"] = "#" + elem_id if not text:
anchor.attrib["class"] = "toclink" continue
c.text = ""
for elem in c.getchildren(): # To keep the output from screwing up the
anchor.append(elem) # validation by putting a <div> inside of a <p>
c.remove(elem) # we actually replace the <p> in its entirety.
c.append(anchor) # We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
def build_toc_etree(self, div, toc_list): # inside previously generated TOC.
if c.text and c.text.strip() == self.marker and \
not self.header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = elem
break
def set_level(self, elem):
''' Adjust header level according to base level. '''
level = int(elem.tag[-1]) + self.base_level
if level > 6:
level = 6
elem.tag = 'h%d' % level
def add_anchor(self, c, elem_id): # @ReservedAssignment
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c:
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True
else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = "headerlink"
permalink.attrib["title"] = "Permanent link"
c.append(permalink)
def build_toc_div(self, toc_list):
""" Return a string div given a toc list. """
div = etree.Element("div")
div.attrib["class"] = "toc"
# Add title to the div # Add title to the div
if self.config["title"]: if self.title:
header = etree.SubElement(div, "span") header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle" header.attrib["class"] = "toctitle"
header.text = self.config["title"] header.text = self.title
def build_etree_ul(toc_list, parent): def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul") ul = etree.SubElement(parent, "ul")
@ -118,104 +215,95 @@ class TocTreeprocessor(Treeprocessor):
if item['children']: if item['children']:
build_etree_ul(item['children'], li) build_etree_ul(item['children'], li)
return ul return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div") build_etree_ul(toc_list, div)
div.attrib["class"] = "toc" prettify = self.markdown.treeprocessors.get('prettify')
header_rgx = re.compile("[Hh][123456]") if prettify:
prettify.run(div)
self.use_anchors = self.config["anchorlink"] in [1, '1', True, 'True', 'true'] return div
def run(self, doc):
# Get a list of id attributes # Get a list of id attributes
used_ids = set() used_ids = set()
for c in doc.getiterator(): for el in doc.iter():
if "id" in c.attrib: if "id" in el.attrib:
used_ids.add(c.attrib["id"]) used_ids.add(el.attrib["id"])
toc_list = [] toc_tokens = []
marker_found = False for el in doc.iter():
for (p, c) in self.iterparent(doc): if isinstance(el.tag, string_type) and self.header_rgx.match(el.tag):
text = ''.join(itertext(c)).strip() self.set_level(el)
if not text: text = ''.join(el.itertext()).strip()
continue
# To keep the output from screwing up the # Do not override pre-existing ids
# validation by putting a <div> inside of a <p> if "id" not in el.attrib:
# we actually replace the <p> in its entirety. innertext = stashedHTML2text(text, self.markdown)
# We do not allow the marker inside a header as that el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = unique(self.config["slugify"](text, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1]) toc_tokens.append({
'level': int(el.tag[-1]),
toc_list.append({'level': tag_level, 'id': el.attrib["id"],
'id': elem_id, 'name': text
'name': text}) })
self.add_anchor(c, elem_id) if self.use_anchors:
self.add_anchor(el, el.attrib["id"])
toc_list_nested = order_toc_list(toc_list) if self.use_permalinks:
self.build_toc_etree(div, toc_list_nested) self.add_permalink(el, el.attrib["id"])
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div) div = self.build_toc_div(nest_toc_tokens(toc_tokens))
if not marker_found: if self.marker:
# serialize and attach to markdown instance. self.replace_marker(doc, div)
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values(): # serialize and attach to markdown instance.
toc = pp.run(toc) toc = self.markdown.serializer(div)
self.markdown.toc = toc for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension): class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs: TreeProcessorClass = TocTreeprocessor
self.setConfig(key, value)
def __init__(self, *args, **kwargs):
self.config = {
"marker": ['[TOC]',
'Text to find and replace with Table of Contents - '
'Set to an empty string to disable. Defaults to "[TOC]"'],
"title": ["",
"Title to insert into TOC <div> - "
"Defaults to an empty string"],
"anchorlink": [False,
"True if header should be a self link - "
"Defaults to False"],
"permalink": [0,
"True or link text if a Sphinx-style permalink should "
"be added - Defaults to False"],
"baselevel": ['1', 'Base level for headers.'],
"slugify": [slugify,
"Function to generate anchors based on header text - "
"Defaults to the headerid ext's slugify function."],
'separator': ['-', 'Word separator. Defaults to "-".']
}
super(TocExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md) md.registerExtension(self)
tocext.config = self.getConfigs() self.md = md
self.reset()
tocext = self.TreeProcessorClass(md, self.getConfigs())
# Headerid ext is set to '>prettify'. With this set to '_end', # Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged # it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for # by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want # attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified. # to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end") md.treeprocessors.add("toc", tocext, "_end")
def reset(self):
self.md.toc = ''
def makeExtension(configs={}):
return TocExtension(configs=configs) def makeExtension(*args, **kwargs):
return TocExtension(*args, **kwargs)

View File

@ -2,78 +2,17 @@
WikiLinks Extension for Python-Markdown WikiLinks Extension for Python-Markdown
====================================== ======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+ Converts [[WikiLinks]] to relative links.
Basic usage: See <https://pythonhosted.org/Markdown/extensions/wikilinks.html>
for documentation.
>>> import markdown Original code Copyright [Waylan Limberg](http://achinghead.com/).
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> print html
<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>
Whitespace behavior: All changes Copyright The Python Markdown Project
>>> print markdown.markdown('[[ foo bar_baz ]]', ['wikilinks']) License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>
>>> print markdown.markdown('foo [[ ]] bar', ['wikilinks'])
<p>foo bar</p>
To define custom settings the simple way:
>>> print markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
MetaData should not carry over to next document:
>>> print md.convert("No [[MetaData]] here.")
<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> print md.convert('[[foo]]')
<p><a class="wikilink" href="/bar/">foo</a></p>
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
''' '''
from __future__ import absolute_import from __future__ import absolute_import
@ -83,29 +22,28 @@ from ..inlinepatterns import Pattern
from ..util import etree from ..util import etree
import re import re
def build_url(label, base, end): def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """ """ Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label) clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end) return '%s%s%s' % (base, clean_label, end)
class WikiLinkExtension(Extension): class WikiLinkExtension(Extension):
def __init__(self, configs):
# set extension defaults def __init__(self, *args, **kwargs):
self.config = { self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'], 'base_url': ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'], 'end_url': ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'], 'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'], 'build_url': [build_url, 'Callable formats URL from label.'],
} }
# Override defaults with user settings super(WikiLinkExtension, self).__init__(*args, **kwargs)
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals): def extendMarkdown(self, md, md_globals):
self.md = md self.md = md
# append to end of inline patterns # append to end of inline patterns
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]' WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs()) wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
@ -117,14 +55,14 @@ class WikiLinks(Pattern):
def __init__(self, pattern, config): def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern) super(WikiLinks, self).__init__(pattern)
self.config = config self.config = config
def handleMatch(self, m): def handleMatch(self, m):
if m.group(2).strip(): if m.group(2).strip():
base_url, end_url, html_class = self._getMeta() base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip() label = m.group(2).strip()
url = self.config['build_url'](label, base_url, end_url) url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a') a = etree.Element('a')
a.text = label a.text = label
a.set('href', url) a.set('href', url)
if html_class: if html_class:
a.set('class', html_class) a.set('class', html_class)
@ -145,7 +83,7 @@ class WikiLinks(Pattern):
if 'wiki_html_class' in self.md.Meta: if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0] html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs) def makeExtension(*args, **kwargs):
return WikiLinkExtension(*args, **kwargs)

View File

@ -46,13 +46,13 @@ from __future__ import unicode_literals
from . import util from . import util
from . import odict from . import odict
import re import re
try: try: # pragma: no cover
from urllib.parse import urlparse, urlunparse from urllib.parse import urlparse, urlunparse
except ImportError: except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse from urlparse import urlparse, urlunparse
try: try: # pragma: no cover
from html import entities from html import entities
except ImportError: except ImportError: # pragma: no cover
import htmlentitydefs as entities import htmlentitydefs as entities
@ -64,10 +64,12 @@ def build_inlinepatterns(md_instance, **kwargs):
inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance) inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance) inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance) inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
inlinePatterns["image_reference"] = \ inlinePatterns["image_reference"] = ImageReferencePattern(
ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance) IMAGE_REFERENCE_RE, md_instance
inlinePatterns["short_reference"] = \ )
ReferencePattern(SHORT_REF_RE, md_instance) inlinePatterns["short_reference"] = ReferencePattern(
SHORT_REF_RE, md_instance
)
inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance) inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance) inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br') inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
@ -75,7 +77,8 @@ def build_inlinepatterns(md_instance, **kwargs):
inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance) inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance) inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE) inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em') inlinePatterns["em_strong"] = DoubleTagPattern(EM_STRONG_RE, 'strong,em')
inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'em,strong')
inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong') inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em') inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
if md_instance.smart_emphasis: if md_instance.smart_emphasis:
@ -90,46 +93,84 @@ The actual regular expressions for patterns
""" """
NOBRACKET = r'[^\]\[]*' NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[(' BRK = (
+ (NOBRACKET + r'(\[')*6 r'\[(' +
+ (NOBRACKET+ r'\])*')*6 (NOBRACKET + r'(\[')*6 +
+ NOBRACKET + r')\]' ) (NOBRACKET + r'\])*')*6 +
NOBRACKET + r')\]'
)
NOIMG = r'(?<!\!)' NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")`` # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \< BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)'
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong** # \<
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong*** ESCAPE_RE = r'\\(.)'
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\2(?!\w)' # _smart_emphasis_
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_ # *emphasis*
LINK_RE = NOIMG + BRK + \ EMPHASIS_RE = r'(\*)([^\*]+)\2'
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
# **strong**
STRONG_RE = r'(\*{2}|_{2})(.+?)\2'
# ***strongem*** or ***em*strong**
EM_STRONG_RE = r'(\*|_)\2{2}(.+?)\2(.*?)\2{2}'
# ***strong**em*
STRONG_EM_RE = r'(\*|_)\2{2}(.+?)\2{2}(.*?)\2'
# _smart_emphasis_
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\2(?!\w)'
# _emphasis_
EMPHASIS_2_RE = r'(_)(.+?)\2'
# [text](url) or [text](<url>) or [text](url "title") # [text](url) or [text](<url>) or [text](url "title")
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
# ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>) # ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>)
REFERENCE_RE = NOIMG + BRK+ r'\s?\[([^\]]*)\]' # [Google][3] IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]' # [Google]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...> # [Google][3]
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &amp; REFERENCE_RE = NOIMG + BRK + r'\s?\[([^\]]*)\]'
LINE_BREAK_RE = r' \n' # two spaces at end of line
# [Google]
SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]'
# ![alt text][2]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]'
# stand-alone * or _
NOT_STRONG_RE = r'((^| )(\*|_)( |$))'
# <http://www.123.com>
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>'
# <me@example.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>'
# <...>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)'
# &amp;
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)'
# two spaces at end of line
LINE_BREAK_RE = r' \n'
def dequote(string): def dequote(string):
"""Remove quotes from around a string.""" """Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"')) if ((string.startswith('"') and string.endswith('"')) or
or (string.startswith("'") and string.endswith("'")) ): (string.startswith("'") and string.endswith("'"))):
return string[1:-1] return string[1:-1]
else: else:
return string return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent): def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123}).""" """Set values of an element based on attribute definitions ({@id=123})."""
@ -143,6 +184,7 @@ The pattern classes
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
""" """
class Pattern(object): class Pattern(object):
"""Base class that inline patterns subclass. """ """Base class that inline patterns subclass. """
@ -156,7 +198,7 @@ class Pattern(object):
""" """
self.pattern = pattern self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE) re.DOTALL | re.UNICODE)
# Api for Markdown to pass safe_mode into instance # Api for Markdown to pass safe_mode into instance
@ -178,7 +220,7 @@ class Pattern(object):
* m: A re match object containing a match of the pattern. * m: A re match object containing a match of the pattern.
""" """
pass pass # pragma: no cover
def type(self): def type(self):
""" Return class name, to define pattern type """ """ Return class name, to define pattern type """
@ -188,9 +230,10 @@ class Pattern(object):
""" Return unescaped text given text with an inline placeholder. """ """ Return unescaped text given text with an inline placeholder. """
try: try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError: except KeyError: # pragma: no cover
return text return text
def itertext(el):
def itertext(el): # pragma: no cover
' Reimplement Element.itertext for older python versions ' ' Reimplement Element.itertext for older python versions '
tag = el.tag tag = el.tag
if not isinstance(tag, util.string_type) and tag is not None: if not isinstance(tag, util.string_type) and tag is not None:
@ -202,6 +245,7 @@ class Pattern(object):
yield s yield s
if e.tail: if e.tail:
yield e.tail yield e.tail
def get_stash(m): def get_stash(m):
id = m.group(1) id = m.group(1)
if id in stash: if id in stash:
@ -210,17 +254,14 @@ class Pattern(object):
return value return value
else: else:
# An etree Element - return text content only # An etree Element - return text content only
return ''.join(itertext(value)) return ''.join(itertext(value))
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class SimpleTextPattern(Pattern): class SimpleTextPattern(Pattern):
""" Return a simple text of group(2) of a Pattern. """ """ Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m): def handleMatch(self, m):
text = m.group(2) return m.group(2)
if text == util.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class EscapePattern(Pattern): class EscapePattern(Pattern):
@ -231,7 +272,7 @@ class EscapePattern(Pattern):
if char in self.markdown.ESCAPED_CHARS: if char in self.markdown.ESCAPED_CHARS:
return '%s%s%s' % (util.STX, ord(char), util.ETX) return '%s%s%s' % (util.STX, ord(char), util.ETX)
else: else:
return '\\%s' % char return None
class SimpleTagPattern(Pattern): class SimpleTagPattern(Pattern):
@ -240,7 +281,7 @@ class SimpleTagPattern(Pattern):
of a Pattern. of a Pattern.
""" """
def __init__ (self, pattern, tag): def __init__(self, pattern, tag):
Pattern.__init__(self, pattern) Pattern.__init__(self, pattern)
self.tag = tag self.tag = tag
@ -252,13 +293,13 @@ class SimpleTagPattern(Pattern):
class SubstituteTagPattern(SimpleTagPattern): class SubstituteTagPattern(SimpleTagPattern):
""" Return an element of type `tag` with no children. """ """ Return an element of type `tag` with no children. """
def handleMatch (self, m): def handleMatch(self, m):
return util.etree.Element(self.tag) return util.etree.Element(self.tag)
class BacktickPattern(Pattern): class BacktickPattern(Pattern):
""" Return a `<code>` element containing the matching text. """ """ Return a `<code>` element containing the matching text. """
def __init__ (self, pattern): def __init__(self, pattern):
Pattern.__init__(self, pattern) Pattern.__init__(self, pattern)
self.tag = "code" self.tag = "code"
@ -279,12 +320,14 @@ class DoubleTagPattern(SimpleTagPattern):
el1 = util.etree.Element(tag1) el1 = util.etree.Element(tag1)
el2 = util.etree.SubElement(el1, tag2) el2 = util.etree.SubElement(el1, tag2)
el2.text = m.group(3) el2.text = m.group(3)
if len(m.groups()) == 5:
el2.tail = m.group(4)
return el1 return el1
class HtmlPattern(Pattern): class HtmlPattern(Pattern):
""" Store raw inline html and return a placeholder. """ """ Store raw inline html and return a placeholder. """
def handleMatch (self, m): def handleMatch(self, m):
rawhtml = self.unescape(m.group(2)) rawhtml = self.unescape(m.group(2))
place_holder = self.markdown.htmlStash.store(rawhtml) place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder return place_holder
@ -293,8 +336,9 @@ class HtmlPattern(Pattern):
""" Return unescaped text given text with an inline placeholder. """ """ Return unescaped text given text with an inline placeholder. """
try: try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError: except KeyError: # pragma: no cover
return text return text
def get_stash(m): def get_stash(m):
id = m.group(1) id = m.group(1)
value = stash.get(id) value = stash.get(id)
@ -303,7 +347,7 @@ class HtmlPattern(Pattern):
return self.markdown.serializer(value) return self.markdown.serializer(value)
except: except:
return '\%s' % value return '\%s' % value
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
@ -323,7 +367,7 @@ class LinkPattern(Pattern):
el.set("href", "") el.set("href", "")
if title: if title:
title = dequote(self.unescape(title)) title = dequote(self.unescape(title))
el.set("title", title) el.set("title", title)
return el return el
@ -344,35 +388,36 @@ class LinkPattern(Pattern):
`username:password@host:port`. `username:password@host:port`.
""" """
url = url.replace(' ', '%20')
if not self.markdown.safeMode: if not self.markdown.safeMode:
# Return immediately bipassing parsing. # Return immediately bipassing parsing.
return url return url
try: try:
scheme, netloc, path, params, query, fragment = url = urlparse(url) scheme, netloc, path, params, query, fragment = url = urlparse(url)
except ValueError: except ValueError: # pragma: no cover
# Bad url - so bad it couldn't be parsed. # Bad url - so bad it couldn't be parsed.
return '' return ''
locless_schemes = ['', 'mailto', 'news'] locless_schemes = ['', 'mailto', 'news']
allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps'] allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
if scheme not in allowed_schemes: if scheme not in allowed_schemes:
# Not a known (allowed) scheme. Not safe. # Not a known (allowed) scheme. Not safe.
return '' return ''
if netloc == '' and scheme not in locless_schemes: if netloc == '' and scheme not in locless_schemes: # pragma: no cover
# This should not happen. Treat as suspect. # This should not happen. Treat as suspect.
return '' return ''
for part in url[2:]: for part in url[2:]:
if ":" in part: if ":" in part:
# A colon in "path", "parameters", "query" or "fragment" is suspect. # A colon in "path", "parameters", "query"
# or "fragment" is suspect.
return '' return ''
# Url passes all tests. Return url as-is. # Url passes all tests. Return url as-is.
return urlunparse(url) return urlunparse(url)
class ImagePattern(LinkPattern): class ImagePattern(LinkPattern):
""" Return a img element from the given match. """ """ Return a img element from the given match. """
def handleMatch(self, m): def handleMatch(self, m):
@ -396,6 +441,7 @@ class ImagePattern(LinkPattern):
el.set('alt', self.unescape(truealt)) el.set('alt', self.unescape(truealt))
return el return el
class ReferencePattern(LinkPattern): class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """ """ Match to a stored reference and return link element. """
@ -413,7 +459,7 @@ class ReferencePattern(LinkPattern):
# Clean up linebreaks in id # Clean up linebreaks in id
id = self.NEWLINE_CLEANUP_RE.sub(' ', id) id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
if not id in self.markdown.references: # ignore undefined refs if id not in self.markdown.references: # ignore undefined refs
return None return None
href, title = self.markdown.references[id] href, title = self.markdown.references[id]
@ -454,6 +500,7 @@ class AutolinkPattern(Pattern):
el.text = util.AtomicString(m.group(2)) el.text = util.AtomicString(m.group(2))
return el return el
class AutomailPattern(Pattern): class AutomailPattern(Pattern):
""" """
Return a mailto link Element given an automail link (`<foo@example.com>`). Return a mailto link Element given an automail link (`<foo@example.com>`).
@ -480,4 +527,3 @@ class AutomailPattern(Pattern):
ord(letter) for letter in mailto]) ord(letter) for letter in mailto])
el.set('href', mailto) el.set('href', mailto)
return el return el

View File

@ -1,18 +1,13 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from __future__ import absolute_import from __future__ import absolute_import
from . import util from . import util
from copy import deepcopy from copy import deepcopy
def iteritems_compat(d):
"""Return an iterator over the (key, value) pairs of a dictionary.
Copied from `six` module."""
return iter(getattr(d, _iteritems)())
class OrderedDict(dict): class OrderedDict(dict):
""" """
A dictionary that keeps its keys in the order in which they're inserted. A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications. Copied from Django's SortedDict with some modifications.
""" """
@ -87,11 +82,11 @@ class OrderedDict(dict):
for key in self.keyOrder: for key in self.keyOrder:
yield self[key] yield self[key]
if util.PY3: if util.PY3: # pragma: no cover
items = _iteritems items = _iteritems
keys = _iterkeys keys = _iterkeys
values = _itervalues values = _itervalues
else: else: # pragma: no cover
iteritems = _iteritems iteritems = _iteritems
iterkeys = _iterkeys iterkeys = _iterkeys
itervalues = _itervalues itervalues = _itervalues
@ -106,8 +101,8 @@ class OrderedDict(dict):
return [self[k] for k in self.keyOrder] return [self[k] for k in self.keyOrder]
def update(self, dict_): def update(self, dict_):
for k, v in iteritems_compat(dict_): for k in dict_:
self[k] = v self[k] = dict_[k]
def setdefault(self, key, default): def setdefault(self, key, default):
if key not in self: if key not in self:
@ -138,7 +133,9 @@ class OrderedDict(dict):
Replaces the normal dict.__repr__ with a version that returns the keys Replaces the normal dict.__repr__ with a version that returns the keys
in their Ordered order. in their Ordered order.
""" """
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in iteritems_compat(self)]) return '{%s}' % ', '.join(
['%r: %r' % (k, v) for k, v in self._iteritems()]
)
def clear(self): def clear(self):
super(OrderedDict, self).clear() super(OrderedDict, self).clear()

View File

@ -42,7 +42,7 @@ class Postprocessor(util.Processor):
(possibly modified) string. (possibly modified) string.
""" """
pass pass # pragma: no cover
class RawHtmlPostprocessor(Postprocessor): class RawHtmlPostprocessor(Postprocessor):
@ -51,7 +51,7 @@ class RawHtmlPostprocessor(Postprocessor):
def run(self, text): def run(self, text):
""" Iterate over html stash and restore "safe" html. """ """ Iterate over html stash and restore "safe" html. """
for i in range(self.markdown.htmlStash.html_counter): for i in range(self.markdown.htmlStash.html_counter):
html, safe = self.markdown.htmlStash.rawHtmlBlocks[i] html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe: if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape': if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html) html = self.escape(html)
@ -59,12 +59,16 @@ class RawHtmlPostprocessor(Postprocessor):
html = '' html = ''
else: else:
html = self.markdown.html_replacement_text html = self.markdown.html_replacement_text
if self.isblocklevel(html) and (safe or not self.markdown.safeMode): if (self.isblocklevel(html) and
text = text.replace("<p>%s</p>" % (safe or not self.markdown.safeMode)):
(self.markdown.htmlStash.get_placeholder(i)), text = text.replace(
html + "\n") "<p>%s</p>" %
text = text.replace(self.markdown.htmlStash.get_placeholder(i), (self.markdown.htmlStash.get_placeholder(i)),
html) html + "\n"
)
text = text.replace(
self.markdown.htmlStash.get_placeholder(i), html
)
return text return text
def escape(self, html): def escape(self, html):
@ -88,7 +92,7 @@ class AndSubstitutePostprocessor(Postprocessor):
""" Restore valid entities """ """ Restore valid entities """
def run(self, text): def run(self, text):
text = text.replace(util.AMP_SUBSTITUTE, "&") text = text.replace(util.AMP_SUBSTITUTE, "&")
return text return text

View File

@ -3,7 +3,7 @@ PRE-PROCESSORS
============================================================================= =============================================================================
Preprocessors work on source text before we start doing anything too Preprocessors work on source text before we start doing anything too
complicated. complicated.
""" """
from __future__ import absolute_import from __future__ import absolute_import
@ -41,7 +41,7 @@ class Preprocessor(util.Processor):
the (possibly modified) list of lines. the (possibly modified) list of lines.
""" """
pass pass # pragma: no cover
class NormalizeWhitespace(Preprocessor): class NormalizeWhitespace(Preprocessor):
@ -61,13 +61,14 @@ class HtmlBlockPreprocessor(Preprocessor):
right_tag_patterns = ["</%s>", "%s>"] right_tag_patterns = ["</%s>", "%s>"]
attrs_pattern = r""" attrs_pattern = r"""
\s+(?P<attr>[^>"'/= ]+)=(?P<q>['"])(?P<value>.*?)(?P=q) # attr="value" \s+(?P<attr>[^>"'/= ]+)=(?P<q>['"])(?P<value>.*?)(?P=q) # attr="value"
| # OR | # OR
\s+(?P<attr1>[^>"'/= ]+)=(?P<value1>[^> ]+) # attr=value \s+(?P<attr1>[^>"'/= ]+)=(?P<value1>[^> ]+) # attr=value
| # OR | # OR
\s+(?P<attr2>[^>"'/= ]+) # attr \s+(?P<attr2>[^>"'/= ]+) # attr
""" """
left_tag_pattern = r'^\<(?P<tag>[^> ]+)(?P<attrs>(%s)*)\s*\/?\>?' % attrs_pattern left_tag_pattern = r'^\<(?P<tag>[^> ]+)(?P<attrs>(%s)*)\s*\/?\>?' % \
attrs_pattern
attrs_re = re.compile(attrs_pattern, re.VERBOSE) attrs_re = re.compile(attrs_pattern, re.VERBOSE)
left_tag_re = re.compile(left_tag_pattern, re.VERBOSE) left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
markdown_in_raw = False markdown_in_raw = False
@ -87,7 +88,9 @@ class HtmlBlockPreprocessor(Preprocessor):
attrs[ma.group('attr').strip()] = "" attrs[ma.group('attr').strip()] = ""
elif ma.group('attr1'): elif ma.group('attr1'):
if ma.group('value1'): if ma.group('value1'):
attrs[ma.group('attr1').strip()] = ma.group('value1') attrs[ma.group('attr1').strip()] = ma.group(
'value1'
)
else: else:
attrs[ma.group('attr1').strip()] = "" attrs[ma.group('attr1').strip()] = ""
elif ma.group('attr2'): elif ma.group('attr2'):
@ -102,7 +105,7 @@ class HtmlBlockPreprocessor(Preprocessor):
i = block.find(rtag, start_index) i = block.find(rtag, start_index)
if i == -1: if i == -1:
return -1 return -1
j = block.find(ltag, start_index) j = block.find(ltag, start_index)
# if no ltag, or rtag found before another ltag, return index # if no ltag, or rtag found before another ltag, return index
if (j > i or j == -1): if (j > i or j == -1):
return i + len(rtag) return i + len(rtag)
@ -111,27 +114,28 @@ class HtmlBlockPreprocessor(Preprocessor):
j = block.find('>', j) j = block.find('>', j)
start_index = self._recursive_tagfind(ltag, rtag, j + 1, block) start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
if start_index == -1: if start_index == -1:
# HTML potentially malformed- ltag has no corresponding # HTML potentially malformed- ltag has no corresponding
# rtag # rtag
return -1 return -1
def _get_right_tag(self, left_tag, left_index, block): def _get_right_tag(self, left_tag, left_index, block):
for p in self.right_tag_patterns: for p in self.right_tag_patterns:
tag = p % left_tag tag = p % left_tag
i = self._recursive_tagfind("<%s" % left_tag, tag, left_index, block) i = self._recursive_tagfind(
"<%s" % left_tag, tag, left_index, block
)
if i > 2: if i > 2:
return tag.lstrip("<").rstrip(">"), i return tag.lstrip("<").rstrip(">"), i
return block.rstrip()[-left_index:-1].lower(), len(block) return block.rstrip()[-left_index:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag): def _equal_tags(self, left_tag, right_tag):
if left_tag[0] in ['?', '@', '%']: # handle PHP, etc. if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True return True
if ("/" + left_tag) == right_tag: if ("/" + left_tag) == right_tag:
return True return True
if (right_tag == "--" and left_tag == "--"): if (right_tag == "--" and left_tag == "--"):
return True return True
elif left_tag == right_tag[1:] \ elif left_tag == right_tag[1:] and right_tag[0] == "/":
and right_tag[0] == "/":
return True return True
else: else:
return False return False
@ -139,6 +143,49 @@ class HtmlBlockPreprocessor(Preprocessor):
def _is_oneliner(self, tag): def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/']) return (tag in ['hr', 'hr/'])
def _stringindex_to_listindex(self, stringindex, items):
"""
Same effect as concatenating the strings in items,
finding the character to which stringindex refers in that string,
and returning the index of the item in which that character resides.
"""
items.append('dummy')
i, count = 0, 0
while count <= stringindex:
count += len(items[i])
i += 1
return i - 1
def _nested_markdown_in_html(self, items):
"""Find and process html child elements of the given element block."""
for i, item in enumerate(items):
if self.left_tag_re.match(item):
left_tag, left_index, attrs = \
self._get_left_tag(''.join(items[i:]))
right_tag, data_index = self._get_right_tag(
left_tag, left_index, ''.join(items[i:]))
right_listindex = \
self._stringindex_to_listindex(data_index, items[i:]) + i
if 'markdown' in attrs.keys():
items[i] = items[i][left_index:] # remove opening tag
placeholder = self.markdown.htmlStash.store_tag(
left_tag, attrs, i + 1, right_listindex + 1)
items.insert(i, placeholder)
if len(items) - right_listindex <= 1: # last nest, no tail
right_listindex -= 1
items[right_listindex] = items[right_listindex][
:-len(right_tag) - 2] # remove closing tag
else: # raw html
if len(items) - right_listindex <= 1: # last element
right_listindex -= 1
if right_listindex <= i:
right_listindex = i + 1
placeholder = self.markdown.htmlStash.store('\n\n'.join(
items[i:right_listindex]))
del items[i:right_listindex]
items.insert(i, placeholder)
return items
def run(self, lines): def run(self, lines):
text = "\n".join(lines) text = "\n".join(lines)
new_blocks = [] new_blocks = []
@ -146,7 +193,7 @@ class HtmlBlockPreprocessor(Preprocessor):
items = [] items = []
left_tag = '' left_tag = ''
right_tag = '' right_tag = ''
in_tag = False # flag in_tag = False # flag
while text: while text:
block = text[0] block = text[0]
@ -160,24 +207,21 @@ class HtmlBlockPreprocessor(Preprocessor):
if not in_tag: if not in_tag:
if block.startswith("<") and len(block.strip()) > 1: if block.startswith("<") and len(block.strip()) > 1:
if block[1] == "!": if block[1:4] == "!--":
# is a comment block # is a comment block
left_tag, left_index, attrs = "--", 2, {} left_tag, left_index, attrs = "--", 2, {}
else: else:
left_tag, left_index, attrs = self._get_left_tag(block) left_tag, left_index, attrs = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, right_tag, data_index = self._get_right_tag(left_tag,
left_index, left_index,
block) block)
# keep checking conditions below and maybe just append # keep checking conditions below and maybe just append
if data_index < len(block) \ if data_index < len(block) and (util.isBlockLevel(left_tag) or left_tag == '--'):
and (util.isBlockLevel(left_tag)
or left_tag == '--'):
text.insert(0, block[data_index:]) text.insert(0, block[data_index:])
block = block[:data_index] block = block[:data_index]
if not (util.isBlockLevel(left_tag) \ if not (util.isBlockLevel(left_tag) or block[1] in ["!", "?", "@", "%"]):
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block) new_blocks.append(block)
continue continue
@ -186,35 +230,30 @@ class HtmlBlockPreprocessor(Preprocessor):
continue continue
if block.rstrip().endswith(">") \ if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag): and self._equal_tags(left_tag, right_tag):
if self.markdown_in_raw and 'markdown' in attrs.keys(): if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?', block = block[left_index:-len(right_tag) - 2]
'', block[:left_index]) new_blocks.append(self.markdown.htmlStash.
end = block[-len(right_tag)-2:] store_tag(left_tag, attrs, 0, 2))
block = block[left_index:-len(right_tag)-2] new_blocks.extend([block])
new_blocks.append(
self.markdown.htmlStash.store(start))
new_blocks.append(block)
new_blocks.append(
self.markdown.htmlStash.store(end))
else: else:
new_blocks.append( new_blocks.append(
self.markdown.htmlStash.store(block.strip())) self.markdown.htmlStash.store(block.strip()))
continue continue
else: else:
# if is block level tag and is not complete # if is block level tag and is not complete
if (not self._equal_tags(left_tag, right_tag)) and \
if util.isBlockLevel(left_tag) or left_tag == "--" \ (util.isBlockLevel(left_tag) or left_tag == "--"):
and not block.rstrip().endswith(">"):
items.append(block.strip()) items.append(block.strip())
in_tag = True in_tag = True
else: else:
new_blocks.append( new_blocks.append(
self.markdown.htmlStash.store(block.strip())) self.markdown.htmlStash.store(block.strip())
)
continue continue
new_blocks.append(block) else:
new_blocks.append(block)
else: else:
items.append(block) items.append(block)
@ -223,7 +262,7 @@ class HtmlBlockPreprocessor(Preprocessor):
if self._equal_tags(left_tag, right_tag): if self._equal_tags(left_tag, right_tag):
# if find closing tag # if find closing tag
if data_index < len(block): if data_index < len(block):
# we have more text after right_tag # we have more text after right_tag
items[-1] = block[:data_index] items[-1] = block[:data_index]
@ -231,16 +270,21 @@ class HtmlBlockPreprocessor(Preprocessor):
in_tag = False in_tag = False
if self.markdown_in_raw and 'markdown' in attrs.keys(): if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', items[0][:left_index])
items[0] = items[0][left_index:] items[0] = items[0][left_index:]
end = items[-1][-len(right_tag)-2:] items[-1] = items[-1][:-len(right_tag) - 2]
items[-1] = items[-1][:-len(right_tag)-2] if items[len(items) - 1]: # not a newline/empty string
new_blocks.append( right_index = len(items) + 3
self.markdown.htmlStash.store(start)) else:
new_blocks.extend(items) right_index = len(items) + 2
new_blocks.append( new_blocks.append(self.markdown.htmlStash.store_tag(
self.markdown.htmlStash.store(end)) left_tag, attrs, 0, right_index))
placeholderslen = len(self.markdown.htmlStash.tag_data)
new_blocks.extend(
self._nested_markdown_in_html(items))
nests = len(self.markdown.htmlStash.tag_data) - \
placeholderslen
self.markdown.htmlStash.tag_data[-1 - nests][
'right_index'] += nests - 2
else: else:
new_blocks.append( new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items))) self.markdown.htmlStash.store('\n\n'.join(items)))
@ -248,21 +292,23 @@ class HtmlBlockPreprocessor(Preprocessor):
if items: if items:
if self.markdown_in_raw and 'markdown' in attrs.keys(): if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', items[0][:left_index])
items[0] = items[0][left_index:] items[0] = items[0][left_index:]
end = items[-1][-len(right_tag)-2:] items[-1] = items[-1][:-len(right_tag) - 2]
items[-1] = items[-1][:-len(right_tag)-2] if items[len(items) - 1]: # not a newline/empty string
right_index = len(items) + 3
else:
right_index = len(items) + 2
new_blocks.append( new_blocks.append(
self.markdown.htmlStash.store(start)) self.markdown.htmlStash.store_tag(
new_blocks.extend(items) left_tag, attrs, 0, right_index))
if end.strip(): placeholderslen = len(self.markdown.htmlStash.tag_data)
new_blocks.append( new_blocks.extend(self._nested_markdown_in_html(items))
self.markdown.htmlStash.store(end)) nests = len(self.markdown.htmlStash.tag_data) - placeholderslen
self.markdown.htmlStash.tag_data[-1 - nests][
'right_index'] += nests - 2
else: else:
new_blocks.append( new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items))) self.markdown.htmlStash.store('\n\n'.join(items)))
#new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n') new_blocks.append('\n')
new_text = "\n\n".join(new_blocks) new_text = "\n\n".join(new_blocks)
@ -273,11 +319,13 @@ class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """ """ Remove reference definitions from text and store for later use. """
TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*' TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
RE = re.compile(r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL) RE = re.compile(
r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL
)
TITLE_RE = re.compile(r'^%s$' % TITLE) TITLE_RE = re.compile(r'^%s$' % TITLE)
def run (self, lines): def run(self, lines):
new_text = []; new_text = []
while lines: while lines:
line = lines.pop(0) line = lines.pop(0)
m = self.RE.match(line) m = self.RE.match(line)
@ -295,4 +343,4 @@ class ReferencePreprocessor(Preprocessor):
else: else:
new_text.append(line) new_text.append(line)
return new_text #+ "\n" return new_text # + "\n"

View File

@ -42,9 +42,9 @@ from __future__ import unicode_literals
from . import util from . import util
ElementTree = util.etree.ElementTree ElementTree = util.etree.ElementTree
QName = util.etree.QName QName = util.etree.QName
if hasattr(util.etree, 'test_comment'): if hasattr(util.etree, 'test_comment'): # pragma: no cover
Comment = util.etree.test_comment Comment = util.etree.test_comment
else: else: # pragma: no cover
Comment = util.etree.Comment Comment = util.etree.Comment
PI = util.etree.PI PI = util.etree.PI
ProcessingInstruction = util.etree.ProcessingInstruction ProcessingInstruction = util.etree.ProcessingInstruction
@ -56,7 +56,7 @@ HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
try: try:
HTML_EMPTY = set(HTML_EMPTY) HTML_EMPTY = set(HTML_EMPTY)
except NameError: except NameError: # pragma: no cover
pass pass
_namespace_map = { _namespace_map = {
@ -73,17 +73,19 @@ _namespace_map = {
} }
def _raise_serialization_error(text): def _raise_serialization_error(text): # pragma: no cover
raise TypeError( raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__) "cannot serialize %r (type %s)" % (text, type(text).__name__)
) )
def _encode(text, encoding): def _encode(text, encoding):
try: try:
return text.encode(encoding, "xmlcharrefreplace") return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError): except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text) _raise_serialization_error(text)
def _escape_cdata(text): def _escape_cdata(text):
# escape character data # escape character data
try: try:
@ -97,7 +99,7 @@ def _escape_cdata(text):
if ">" in text: if ">" in text:
text = text.replace(">", "&gt;") text = text.replace(">", "&gt;")
return text return text
except (TypeError, AttributeError): except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text) _raise_serialization_error(text)
@ -115,9 +117,10 @@ def _escape_attrib(text):
if "\n" in text: if "\n" in text:
text = text.replace("\n", "&#10;") text = text.replace("\n", "&#10;")
return text return text
except (TypeError, AttributeError): except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text) _raise_serialization_error(text)
def _escape_attrib_html(text): def _escape_attrib_html(text):
# escape attribute value # escape attribute value
try: try:
@ -130,7 +133,7 @@ def _escape_attrib_html(text):
if "\"" in text: if "\"" in text:
text = text.replace("\"", "&quot;") text = text.replace("\"", "&quot;")
return text return text
except (TypeError, AttributeError): except (TypeError, AttributeError): # pragma: no cover
_raise_serialization_error(text) _raise_serialization_error(text)
@ -152,7 +155,7 @@ def _serialize_html(write, elem, qnames, namespaces, format):
write("<" + tag) write("<" + tag)
items = elem.items() items = elem.items()
if items or namespaces: if items or namespaces:
items.sort() # lexical order items = sorted(items) # lexical order
for k, v in items: for k, v in items:
if isinstance(k, QName): if isinstance(k, QName):
k = k.text k = k.text
@ -167,28 +170,28 @@ def _serialize_html(write, elem, qnames, namespaces, format):
write(" %s=\"%s\"" % (qnames[k], v)) write(" %s=\"%s\"" % (qnames[k], v))
if namespaces: if namespaces:
items = namespaces.items() items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items: for v, k in items:
if k: if k:
k = ":" + k k = ":" + k
write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v))) write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
if format == "xhtml" and tag in HTML_EMPTY: if format == "xhtml" and tag.lower() in HTML_EMPTY:
write(" />") write(" />")
else: else:
write(">") write(">")
tag = tag.lower()
if text: if text:
if tag == "script" or tag == "style": if tag.lower() in ["script", "style"]:
write(text) write(text)
else: else:
write(_escape_cdata(text)) write(_escape_cdata(text))
for e in elem: for e in elem:
_serialize_html(write, e, qnames, None, format) _serialize_html(write, e, qnames, None, format)
if tag not in HTML_EMPTY: if tag.lower() not in HTML_EMPTY:
write("</" + tag + ">") write("</" + tag + ">")
if elem.tail: if elem.tail:
write(_escape_cdata(elem.tail)) write(_escape_cdata(elem.tail))
def _write_html(root, def _write_html(root,
encoding=None, encoding=None,
default_namespace=None, default_namespace=None,
@ -233,7 +236,7 @@ def _namespaces(elem, default_namespace=None):
if prefix: if prefix:
qnames[qname] = "%s:%s" % (prefix, tag) qnames[qname] = "%s:%s" % (prefix, tag)
else: else:
qnames[qname] = tag # default element qnames[qname] = tag # default element
else: else:
if default_namespace: if default_namespace:
raise ValueError( raise ValueError(
@ -241,14 +244,14 @@ def _namespaces(elem, default_namespace=None):
"default_namespace option" "default_namespace option"
) )
qnames[qname] = qname qnames[qname] = qname
except TypeError: except TypeError: # pragma: no cover
_raise_serialization_error(qname) _raise_serialization_error(qname)
# populate qname and namespaces table # populate qname and namespaces table
try: try:
iterate = elem.iter iterate = elem.iter
except AttributeError: except AttributeError:
iterate = elem.getiterator # cET compatibility iterate = elem.getiterator # cET compatibility
for elem in iterate(): for elem in iterate():
tag = elem.tag tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames: if isinstance(tag, QName) and tag.text not in qnames:
@ -270,8 +273,10 @@ def _namespaces(elem, default_namespace=None):
add_qname(text.text) add_qname(text.text)
return qnames, namespaces return qnames, namespaces
def to_html_string(element): def to_html_string(element):
return _write_html(ElementTree(element).getroot(), format="html") return _write_html(ElementTree(element).getroot(), format="html")
def to_xhtml_string(element): def to_xhtml_string(element):
return _write_html(ElementTree(element).getroot(), format="xhtml") return _write_html(ElementTree(element).getroot(), format="xhtml")

View File

@ -34,11 +34,11 @@ class Treeprocessor(util.Processor):
def run(self, root): def run(self, root):
""" """
Subclasses of Treeprocessor should implement a `run` method, which Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None. modify the current tree and return None.
""" """
pass pass # pragma: no cover
class InlineProcessor(Treeprocessor): class InlineProcessor(Treeprocessor):
@ -53,6 +53,7 @@ class InlineProcessor(Treeprocessor):
+ len(self.__placeholder_suffix) + len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md self.markdown = md
self.inlinePatterns = md.inlinePatterns
def __makePlaceholder(self, type): def __makePlaceholder(self, type):
""" Generate a placeholder """ """ Generate a placeholder """
@ -70,7 +71,7 @@ class InlineProcessor(Treeprocessor):
* index: index, from which we start search * index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder. Returns: placeholder id and string index, after the found placeholder.
""" """
m = self.__placeholder_re.search(data, index) m = self.__placeholder_re.search(data, index)
if m: if m:
@ -99,9 +100,9 @@ class InlineProcessor(Treeprocessor):
""" """
if not isinstance(data, util.AtomicString): if not isinstance(data, util.AtomicString):
startIndex = 0 startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns): while patternIndex < len(self.inlinePatterns):
data, matched, startIndex = self.__applyPattern( data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex), self.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex) data, patternIndex, startIndex)
if not matched: if not matched:
patternIndex += 1 patternIndex += 1
@ -128,11 +129,10 @@ class InlineProcessor(Treeprocessor):
text = subnode.tail text = subnode.tail
subnode.tail = None subnode.tail = None
childResult = self.__processPlaceholders(text, subnode) childResult = self.__processPlaceholders(text, subnode, isText)
if not isText and node is not subnode: if not isText and node is not subnode:
pos = node.getchildren().index(subnode) pos = list(node).index(subnode) + 1
node.remove(subnode)
else: else:
pos = 0 pos = 0
@ -140,7 +140,7 @@ class InlineProcessor(Treeprocessor):
for newChild in childResult: for newChild in childResult:
node.insert(pos, newChild) node.insert(pos, newChild)
def __processPlaceholders(self, data, parent): def __processPlaceholders(self, data, parent, isText=True):
""" """
Process string with placeholders and generate ElementTree tree. Process string with placeholders and generate ElementTree tree.
@ -150,7 +150,7 @@ class InlineProcessor(Treeprocessor):
* parent: Element, which contains processing inline data * parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns. Returns: list with ElementTree elements with applied inline patterns.
""" """
def linkText(text): def linkText(text):
if text: if text:
@ -159,6 +159,11 @@ class InlineProcessor(Treeprocessor):
result[-1].tail += text result[-1].tail += text
else: else:
result[-1].tail = text result[-1].tail = text
elif not isText:
if parent.tail:
parent.tail += text
else:
parent.tail = text
else: else:
if parent.text: if parent.text:
parent.text += text parent.text += text
@ -178,15 +183,17 @@ class InlineProcessor(Treeprocessor):
text = data[strartIndex:index] text = data[strartIndex:index]
linkText(text) linkText(text)
if not isString(node): # it's Element if not isString(node): # it's Element
for child in [node] + node.getchildren(): for child in [node] + list(node):
if child.tail: if child.tail:
if child.tail.strip(): if child.tail.strip():
self.__processElementText(node, child,False) self.__processElementText(
node, child, False
)
if child.text: if child.text:
if child.text.strip(): if child.text.strip():
self.__processElementText(child, child) self.__processElementText(child, child)
else: # it's just a string else: # it's just a string
linkText(node) linkText(node)
strartIndex = phEndIndex strartIndex = phEndIndex
continue continue
@ -194,7 +201,7 @@ class InlineProcessor(Treeprocessor):
strartIndex = phEndIndex strartIndex = phEndIndex
result.append(node) result.append(node)
else: # wrong placeholder else: # wrong placeholder
end = index + len(self.__placeholder_prefix) end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end]) linkText(data[strartIndex:end])
strartIndex = end strartIndex = end
@ -237,14 +244,16 @@ class InlineProcessor(Treeprocessor):
if not isString(node): if not isString(node):
if not isinstance(node.text, util.AtomicString): if not isinstance(node.text, util.AtomicString):
# We need to process current node too # We need to process current node too
for child in [node] + node.getchildren(): for child in [node] + list(node):
if not isString(node): if not isString(node):
if child.text: if child.text:
child.text = self.__handleInline(child.text, child.text = self.__handleInline(
patternIndex + 1) child.text, patternIndex + 1
)
if child.tail: if child.tail:
child.tail = self.__handleInline(child.tail, child.tail = self.__handleInline(
patternIndex) child.tail, patternIndex
)
placeholder = self.__stashNode(node, pattern.type()) placeholder = self.__stashNode(node, pattern.type())
@ -257,8 +266,8 @@ class InlineProcessor(Treeprocessor):
Iterate over ElementTree, find elements with inline tag, apply inline Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string, want to process your data with inline paterns, instead of normal
use subclass AtomicString: string, use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.") node.text = markdown.AtomicString("This will not be processed.")
@ -276,47 +285,49 @@ class InlineProcessor(Treeprocessor):
while stack: while stack:
currElement = stack.pop() currElement = stack.pop()
insertQueue = [] insertQueue = []
for child in currElement.getchildren(): for child in currElement:
if child.text and not isinstance(child.text, util.AtomicString): if child.text and not isinstance(
child.text, util.AtomicString
):
text = child.text text = child.text
child.text = None child.text = None
lst = self.__processPlaceholders(self.__handleInline( lst = self.__processPlaceholders(
text), child) self.__handleInline(text), child
)
stack += lst stack += lst
insertQueue.append((child, lst)) insertQueue.append((child, lst))
if child.tail: if child.tail:
tail = self.__handleInline(child.tail) tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d') dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby) child.tail = None
if dumby.text: tailResult = self.__processPlaceholders(tail, dumby, False)
child.tail = dumby.text if dumby.tail:
else: child.tail = dumby.tail
child.tail = None pos = list(currElement).index(child) + 1
pos = currElement.getchildren().index(child) + 1
tailResult.reverse() tailResult.reverse()
for newChild in tailResult: for newChild in tailResult:
currElement.insert(pos, newChild) currElement.insert(pos, newChild)
if child.getchildren(): if len(child):
stack.append(child) stack.append(child)
for element, lst in insertQueue: for element, lst in insertQueue:
if self.markdown.enable_attributes: if self.markdown.enable_attributes:
if element.text and isString(element.text): if element.text and isString(element.text):
element.text = \ element.text = inlinepatterns.handleAttributes(
inlinepatterns.handleAttributes(element.text, element.text, element
element) )
i = 0 i = 0
for newChild in lst: for newChild in lst:
if self.markdown.enable_attributes: if self.markdown.enable_attributes:
# Processing attributes # Processing attributes
if newChild.tail and isString(newChild.tail): if newChild.tail and isString(newChild.tail):
newChild.tail = \ newChild.tail = inlinepatterns.handleAttributes(
inlinepatterns.handleAttributes(newChild.tail, newChild.tail, element
element) )
if newChild.text and isString(newChild.text): if newChild.text and isString(newChild.text):
newChild.text = \ newChild.text = inlinepatterns.handleAttributes(
inlinepatterns.handleAttributes(newChild.text, newChild.text, newChild
newChild) )
element.insert(i, newChild) element.insert(i, newChild)
i += 1 i += 1
return tree return tree
@ -357,4 +368,4 @@ class PrettifyTreeprocessor(Treeprocessor):
pres = root.getiterator('pre') pres = root.getiterator('pre')
for pre in pres: for pre in pres:
if len(pre) and pre[0].tag == 'code': if len(pre) and pre[0].tag == 'code':
pre[0].text = pre[0].text.rstrip() + '\n' pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n')

View File

@ -10,14 +10,14 @@ Python 3 Stuff
""" """
PY3 = sys.version_info[0] == 3 PY3 = sys.version_info[0] == 3
if PY3: if PY3: # pragma: no cover
string_type = str string_type = str
text_type = str text_type = str
int2str = chr int2str = chr
else: else: # pragma: no cover
string_type = basestring string_type = basestring # noqa
text_type = unicode text_type = unicode # noqa
int2str = unichr int2str = unichr # noqa
""" """
@ -25,12 +25,16 @@ Constants you might want to modify
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
""" """
BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math" BLOCK_LEVEL_ELEMENTS = re.compile(
"|hr|hr/|style|li|dt|dd|thead|tbody" "^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|tr|th|td|section|footer|header|group|figure" "|script|noscript|form|fieldset|iframe|math"
"|figcaption|aside|article|canvas|output" "|hr|hr/|style|li|dt|dd|thead|tbody"
"|progress|video)$", re.IGNORECASE) "|tr|th|td|section|footer|header|group|figure"
"|figcaption|aside|article|canvas|output"
"|progress|video|nav)$",
re.IGNORECASE
)
# Placeholders # Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
@ -38,30 +42,36 @@ INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)') INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
AMP_SUBSTITUTE = STX+"amp"+ETX AMP_SUBSTITUTE = STX+"amp"+ETX
HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
""" """
Constants you probably do not need to change Constants you probably do not need to change
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
""" """
RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'), RTL_BIDI_RANGES = (
# Hebrew (0590-05FF), Arabic (0600-06FF), ('\u0590', '\u07FF'),
# Syriac (0700-074F), Arabic supplement (0750-077F), # Hebrew (0590-05FF), Arabic (0600-06FF),
# Thaana (0780-07BF), Nko (07C0-07FF). # Syriac (0700-074F), Arabic supplement (0750-077F),
('\u2D30', '\u2D7F'), # Tifinagh # Thaana (0780-07BF), Nko (07C0-07FF).
) ('\u2D30', '\u2D7F') # Tifinagh
)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from # Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself. # markdown.util import etree`). Do not import it by yourself.
try: # Is the C implemenation of ElementTree available? try: # pragma: no cover
# Is the C implementation of ElementTree available?
import xml.etree.cElementTree as etree import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment # Serializers (including ours) test with non-c Comment
etree.test_comment = Comment etree.test_comment = Comment
if etree.VERSION < "1.0.5": if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.") raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
except (ImportError, RuntimeError): except (ImportError, RuntimeError): # pragma: no cover
# Use the Python implementation of ElementTree? # Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree import xml.etree.ElementTree as etree
if etree.VERSION < "1.1": if etree.VERSION < "1.1":
@ -81,11 +91,32 @@ def isBlockLevel(tag):
# Some ElementTree tags are not strings, so return False. # Some ElementTree tags are not strings, so return False.
return False return False
def parseBoolValue(value, fail_on_errors=True, preserve_none=False):
"""Parses a string representing bool value. If parsing was successful,
returns True or False. If preserve_none=True, returns True, False,
or None. If parsing was not successful, raises ValueError, or, if
fail_on_errors=False, returns None."""
if not isinstance(value, string_type):
if preserve_none and value is None:
return value
return bool(value)
elif preserve_none and value.lower() == 'none':
return None
elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
return True
elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'):
return False
elif fail_on_errors:
raise ValueError('Cannot parse bool value: %r' % value)
""" """
MISC AUXILIARY CLASSES MISC AUXILIARY CLASSES
============================================================================= =============================================================================
""" """
class AtomicString(text_type): class AtomicString(text_type):
"""A string which should not be further processed.""" """A string which should not be further processed."""
pass pass
@ -103,10 +134,12 @@ class HtmlStash(object):
in the beginning and replace with place-holders. in the beginning and replace with place-holders.
""" """
def __init__ (self): def __init__(self):
""" Create a HtmlStash. """ """ Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[] self.rawHtmlBlocks = []
self.tag_counter = 0
self.tag_data = [] # list of dictionaries in the order tags appear
def store(self, html, safe=False): def store(self, html, safe=False):
""" """
@ -132,5 +165,13 @@ class HtmlStash(object):
self.rawHtmlBlocks = [] self.rawHtmlBlocks = []
def get_placeholder(self, key): def get_placeholder(self, key):
return "%swzxhzdk:%d%s" % (STX, key, ETX) return HTML_PLACEHOLDER % key
def store_tag(self, tag, attrs, left_index, right_index):
"""Store tag data and return a placeholder."""
self.tag_data.append({'tag': tag, 'attrs': attrs,
'left_index': left_index,
'right_index': right_index})
placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
self.tag_counter += 1 # equal to the tag's index in self.tag_data
return placeholder

View File

@ -98,8 +98,8 @@ def convert_basic(txt, title='', epub_split_size_kb=0):
def convert_markdown(txt, title='', extensions=('footnotes', 'tables', 'toc')): def convert_markdown(txt, title='', extensions=('footnotes', 'tables', 'toc')):
from calibre.ebooks.conversion.plugins.txt_input import MD_EXTENSIONS from calibre.ebooks.conversion.plugins.txt_input import MD_EXTENSIONS
from calibre.ebooks.markdown import Markdown from calibre.ebooks.markdown import Markdown
extensions = [x.lower() for x in extensions if x.lower() in MD_EXTENSIONS] extensions = ['calibre.ebooks.markdown.extensions.' + x.lower() for x in extensions if x.lower() in MD_EXTENSIONS]
md = Markdown(extensions=extensions, safe_mode=False) md = Markdown(extensions=extensions)
return HTML_TEMPLATE % (title, md.convert(txt)) return HTML_TEMPLATE % (title, md.convert(txt))
def convert_textile(txt, title=''): def convert_textile(txt, title=''):

View File

@ -233,6 +233,11 @@ def test_terminal():
del readline del readline
print ('readline and curses OK!') print ('readline and curses OK!')
def test_markdown():
from calibre.ebooks.markdown import Markdown
Markdown(extensions=['extra'])
print('Markdown OK!')
def test(): def test():
if iswindows: if iswindows:
test_dlls() test_dlls()
@ -256,6 +261,7 @@ def test():
test_netifaces() test_netifaces()
test_psutil() test_psutil()
test_podofo() test_podofo()
test_markdown()
if iswindows: if iswindows:
test_wpd() test_wpd()
test_winutil() test_winutil()