From 6fc606bfa6f77ebd0eebf5855a8456ac49bada50 Mon Sep 17 00:00:00 2001 From: John Schember Date: Sat, 3 Sep 2011 15:00:35 -0400 Subject: [PATCH] Update Python Markdown to version 2.0 --- src/calibre/ebooks/markdown/__init__.py | 7 - src/calibre/ebooks/markdown/blockparser.py | 95 + .../ebooks/markdown/blockprocessors.py | 460 ++++ src/calibre/ebooks/markdown/commandline.py | 96 + src/calibre/ebooks/markdown/etree_loader.py | 33 + .../ebooks/markdown/extensions/__init__.py | 0 .../ebooks/markdown/extensions/abbr.py | 96 + .../ebooks/markdown/extensions/codehilite.py | 224 ++ .../ebooks/markdown/extensions/def_list.py | 105 + .../ebooks/markdown/extensions/extra.py | 49 + .../ebooks/markdown/extensions/fenced_code.py | 118 + .../ebooks/markdown/extensions/footnotes.py | 294 +++ .../ebooks/markdown/extensions/headerid.py | 195 ++ .../ebooks/markdown/extensions/meta.py | 91 + src/calibre/ebooks/markdown/extensions/rss.py | 114 + .../ebooks/markdown/extensions/tables.py | 97 + src/calibre/ebooks/markdown/extensions/toc.py | 140 ++ .../ebooks/markdown/extensions/wikilinks.py | 155 ++ src/calibre/ebooks/markdown/html4.py | 274 ++ src/calibre/ebooks/markdown/inlinepatterns.py | 371 +++ src/calibre/ebooks/markdown/markdown.py | 2237 ++++------------- src/calibre/ebooks/markdown/mdx_footnotes.py | 257 -- src/calibre/ebooks/markdown/mdx_tables.py | 65 - src/calibre/ebooks/markdown/mdx_toc.py | 170 -- src/calibre/ebooks/markdown/odict.py | 162 ++ src/calibre/ebooks/markdown/postprocessors.py | 77 + src/calibre/ebooks/markdown/preprocessors.py | 214 ++ src/calibre/ebooks/markdown/treeprocessors.py | 329 +++ src/calibre/ebooks/txt/processor.py | 6 +- 29 files changed, 4285 insertions(+), 2246 deletions(-) create mode 100644 src/calibre/ebooks/markdown/blockparser.py create mode 100644 src/calibre/ebooks/markdown/blockprocessors.py create mode 100644 src/calibre/ebooks/markdown/commandline.py create mode 100644 src/calibre/ebooks/markdown/etree_loader.py create mode 100644 src/calibre/ebooks/markdown/extensions/__init__.py create mode 100644 src/calibre/ebooks/markdown/extensions/abbr.py create mode 100644 src/calibre/ebooks/markdown/extensions/codehilite.py create mode 100644 src/calibre/ebooks/markdown/extensions/def_list.py create mode 100644 src/calibre/ebooks/markdown/extensions/extra.py create mode 100644 src/calibre/ebooks/markdown/extensions/fenced_code.py create mode 100644 src/calibre/ebooks/markdown/extensions/footnotes.py create mode 100644 src/calibre/ebooks/markdown/extensions/headerid.py create mode 100644 src/calibre/ebooks/markdown/extensions/meta.py create mode 100644 src/calibre/ebooks/markdown/extensions/rss.py create mode 100644 src/calibre/ebooks/markdown/extensions/tables.py create mode 100644 src/calibre/ebooks/markdown/extensions/toc.py create mode 100644 src/calibre/ebooks/markdown/extensions/wikilinks.py create mode 100644 src/calibre/ebooks/markdown/html4.py create mode 100644 src/calibre/ebooks/markdown/inlinepatterns.py delete mode 100644 src/calibre/ebooks/markdown/mdx_footnotes.py delete mode 100644 src/calibre/ebooks/markdown/mdx_tables.py delete mode 100644 src/calibre/ebooks/markdown/mdx_toc.py create mode 100644 src/calibre/ebooks/markdown/odict.py create mode 100644 src/calibre/ebooks/markdown/postprocessors.py create mode 100644 src/calibre/ebooks/markdown/preprocessors.py create mode 100644 src/calibre/ebooks/markdown/treeprocessors.py diff --git a/src/calibre/ebooks/markdown/__init__.py b/src/calibre/ebooks/markdown/__init__.py index 2676e91934..e69de29bb2 100644 --- a/src/calibre/ebooks/markdown/__init__.py +++ b/src/calibre/ebooks/markdown/__init__.py @@ -1,7 +0,0 @@ -''' Package defines lightweight markup language for processing of txt files''' -# Initialize extensions -from calibre.ebooks.markdown import mdx_footnotes -from calibre.ebooks.markdown import mdx_tables -from calibre.ebooks.markdown import mdx_toc - -mdx_footnotes, mdx_tables, mdx_toc diff --git a/src/calibre/ebooks/markdown/blockparser.py b/src/calibre/ebooks/markdown/blockparser.py new file mode 100644 index 0000000000..e18b338487 --- /dev/null +++ b/src/calibre/ebooks/markdown/blockparser.py @@ -0,0 +1,95 @@ + +import markdown + +class State(list): + """ Track the current and nested state of the parser. + + This utility class is used to track the state of the BlockParser and + support multiple levels if nesting. It's just a simple API wrapped around + a list. Each time a state is set, that state is appended to the end of the + list. Each time a state is reset, that state is removed from the end of + the list. + + Therefore, each time a state is set for a nested block, that state must be + reset when we back out of that level of nesting or the state could be + corrupted. + + While all the methods of a list object are available, only the three + defined below need be used. + + """ + + def set(self, state): + """ Set a new state. """ + self.append(state) + + def reset(self): + """ Step back one step in nested state. """ + self.pop() + + def isstate(self, state): + """ Test that top (current) level is of given state. """ + if len(self): + return self[-1] == state + else: + return False + +class BlockParser: + """ Parse Markdown blocks into an ElementTree object. + + A wrapper class that stitches the various BlockProcessors together, + looping through them and creating an ElementTree object. + """ + + def __init__(self): + self.blockprocessors = markdown.odict.OrderedDict() + self.state = State() + + def parseDocument(self, lines): + """ Parse a markdown document into an ElementTree. + + Given a list of lines, an ElementTree object (not just a parent Element) + is created and the root element is passed to the parser as the parent. + The ElementTree object is returned. + + This should only be called on an entire document, not pieces. + + """ + # Create a ElementTree from the lines + self.root = markdown.etree.Element(markdown.DOC_TAG) + self.parseChunk(self.root, '\n'.join(lines)) + return markdown.etree.ElementTree(self.root) + + def parseChunk(self, parent, text): + """ Parse a chunk of markdown text and attach to given etree node. + + While the ``text`` argument is generally assumed to contain multiple + blocks which will be split on blank lines, it could contain only one + block. Generally, this method would be called by extensions when + block parsing is required. + + The ``parent`` etree Element passed in is altered in place. + Nothing is returned. + + """ + self.parseBlocks(parent, text.split('\n\n')) + + def parseBlocks(self, parent, blocks): + """ Process blocks of markdown text and attach to given etree node. + + Given a list of ``blocks``, each blockprocessor is stepped through + until there are no blocks left. While an extension could potentially + call this method directly, it's generally expected to be used internally. + + This is a public method as an extension may need to add/alter additional + BlockProcessors which call this method to recursively parse a nested + block. + + """ + while blocks: + for processor in self.blockprocessors.values(): + if processor.test(parent, blocks[0]): + processor.run(parent, blocks) + break + + diff --git a/src/calibre/ebooks/markdown/blockprocessors.py b/src/calibre/ebooks/markdown/blockprocessors.py new file mode 100644 index 0000000000..79f4db93bc --- /dev/null +++ b/src/calibre/ebooks/markdown/blockprocessors.py @@ -0,0 +1,460 @@ +""" +CORE MARKDOWN BLOCKPARSER +============================================================================= + +This parser handles basic parsing of Markdown blocks. It doesn't concern itself +with inline elements such as **bold** or *italics*, but rather just catches +blocks, lists, quotes, etc. + +The BlockParser is made up of a bunch of BlockProssors, each handling a +different type of block. Extensions may add/replace/remove BlockProcessors +as they need to alter how markdown blocks are parsed. + +""" + +import re +import markdown + +class BlockProcessor: + """ Base class for block processors. + + Each subclass will provide the methods below to work with the source and + tree. Each processor will need to define it's own ``test`` and ``run`` + methods. The ``test`` method should return True or False, to indicate + whether the current block should be processed by this processor. If the + test passes, the parser will call the processors ``run`` method. + + """ + + def __init__(self, parser=None): + self.parser = parser + + def lastChild(self, parent): + """ Return the last child of an etree element. """ + if len(parent): + return parent[-1] + else: + return None + + def detab(self, text): + """ Remove a tab from the front of each line of the given text. """ + newtext = [] + lines = text.split('\n') + for line in lines: + if line.startswith(' '*markdown.TAB_LENGTH): + newtext.append(line[markdown.TAB_LENGTH:]) + elif not line.strip(): + newtext.append('') + else: + break + return '\n'.join(newtext), '\n'.join(lines[len(newtext):]) + + def looseDetab(self, text, level=1): + """ Remove a tab from front of lines but allowing dedented lines. """ + lines = text.split('\n') + for i in range(len(lines)): + if lines[i].startswith(' '*markdown.TAB_LENGTH*level): + lines[i] = lines[i][markdown.TAB_LENGTH*level:] + return '\n'.join(lines) + + def test(self, parent, block): + """ Test for block type. Must be overridden by subclasses. + + As the parser loops through processors, it will call the ``test`` method + on each to determine if the given block of text is of that type. This + method must return a boolean ``True`` or ``False``. The actual method of + testing is left to the needs of that particular block type. It could + be as simple as ``block.startswith(some_string)`` or a complex regular + expression. As the block type may be different depending on the parent + of the block (i.e. inside a list), the parent etree element is also + provided and may be used as part of the test. + + Keywords: + + * ``parent``: A etree element which will be the parent of the block. + * ``block``: A block of text from the source which has been split at + blank lines. + """ + pass + + def run(self, parent, blocks): + """ Run processor. Must be overridden by subclasses. + + When the parser determines the appropriate type of a block, the parser + will call the corresponding processor's ``run`` method. This method + should parse the individual lines of the block and append them to + the etree. + + Note that both the ``parent`` and ``etree`` keywords are pointers + to instances of the objects which should be edited in place. Each + processor must make changes to the existing objects as there is no + mechanism to return new/different objects to replace them. + + This means that this method should be adding SubElements or adding text + to the parent, and should remove (``pop``) or add (``insert``) items to + the list of blocks. + + Keywords: + + * ``parent``: A etree element which is the parent of the current block. + * ``blocks``: A list of all remaining blocks of the document. + """ + pass + + +class ListIndentProcessor(BlockProcessor): + """ Process children of list items. + + Example: + * a list item + process this part + + or this part + + """ + + INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH) + ITEM_TYPES = ['li'] + LIST_TYPES = ['ul', 'ol'] + + def test(self, parent, block): + return block.startswith(' '*markdown.TAB_LENGTH) and \ + not self.parser.state.isstate('detabbed') and \ + (parent.tag in self.ITEM_TYPES or \ + (len(parent) and parent[-1] and \ + (parent[-1].tag in self.LIST_TYPES) + ) + ) + + def run(self, parent, blocks): + block = blocks.pop(0) + level, sibling = self.get_level(parent, block) + block = self.looseDetab(block, level) + + self.parser.state.set('detabbed') + if parent.tag in self.ITEM_TYPES: + # The parent is already a li. Just parse the child block. + self.parser.parseBlocks(parent, [block]) + elif sibling.tag in self.ITEM_TYPES: + # The sibling is a li. Use it as parent. + self.parser.parseBlocks(sibling, [block]) + elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES: + # The parent is a list (``ol`` or ``ul``) which has children. + # Assume the last child li is the parent of this block. + if sibling[-1].text: + # If the parent li has text, that text needs to be moved to a p + block = '%s\n\n%s' % (sibling[-1].text, block) + sibling[-1].text = '' + self.parser.parseChunk(sibling[-1], block) + else: + self.create_item(sibling, block) + self.parser.state.reset() + + def create_item(self, parent, block): + """ Create a new li and parse the block with it as the parent. """ + li = markdown.etree.SubElement(parent, 'li') + self.parser.parseBlocks(li, [block]) + + def get_level(self, parent, block): + """ Get level of indent based on list level. """ + # Get indent level + m = self.INDENT_RE.match(block) + if m: + indent_level = len(m.group(1))/markdown.TAB_LENGTH + else: + indent_level = 0 + if self.parser.state.isstate('list'): + # We're in a tightlist - so we already are at correct parent. + level = 1 + else: + # We're in a looselist - so we need to find parent. + level = 0 + # Step through children of tree to find matching indent level. + while indent_level > level: + child = self.lastChild(parent) + if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES): + if child.tag in self.LIST_TYPES: + level += 1 + parent = child + else: + # No more child levels. If we're short of indent_level, + # we have a code block. So we stop here. + break + return level, parent + + +class CodeBlockProcessor(BlockProcessor): + """ Process code blocks. """ + + def test(self, parent, block): + return block.startswith(' '*markdown.TAB_LENGTH) + + def run(self, parent, blocks): + sibling = self.lastChild(parent) + block = blocks.pop(0) + theRest = '' + if sibling and sibling.tag == "pre" and len(sibling) \ + and sibling[0].tag == "code": + # The previous block was a code block. As blank lines do not start + # new code blocks, append this block to the previous, adding back + # linebreaks removed from the split into a list. + code = sibling[0] + block, theRest = self.detab(block) + code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip())) + else: + # This is a new codeblock. Create the elements and insert text. + pre = markdown.etree.SubElement(parent, 'pre') + code = markdown.etree.SubElement(pre, 'code') + block, theRest = self.detab(block) + code.text = markdown.AtomicString('%s\n' % block.rstrip()) + if theRest: + # This block contained unindented line(s) after the first indented + # line. Insert these lines as the first block of the master blocks + # list for future processing. + blocks.insert(0, theRest) + + +class BlockQuoteProcessor(BlockProcessor): + + RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)') + + def test(self, parent, block): + return bool(self.RE.search(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # Lines before blockquote + # Pass lines before blockquote in recursively for parsing forst. + self.parser.parseBlocks(parent, [before]) + # Remove ``> `` from begining of each line. + block = '\n'.join([self.clean(line) for line in + block[m.start():].split('\n')]) + sibling = self.lastChild(parent) + if sibling and sibling.tag == "blockquote": + # Previous block was a blockquote so set that as this blocks parent + quote = sibling + else: + # This is a new blockquote. Create a new parent element. + quote = markdown.etree.SubElement(parent, 'blockquote') + # Recursively parse block with blockquote as parent. + self.parser.parseChunk(quote, block) + + def clean(self, line): + """ Remove ``>`` from beginning of a line. """ + m = self.RE.match(line) + if line.strip() == ">": + return "" + elif m: + return m.group(2) + else: + return line + +class OListProcessor(BlockProcessor): + """ Process ordered list blocks. """ + + TAG = 'ol' + # Detect an item (``1. item``). ``group(1)`` contains contents of item. + RE = re.compile(r'^[ ]{0,3}\d+\.[ ](.*)') + # Detect items on secondary lines. they can be of either list type. + CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ](.*)') + # Detect indented (nested) items of either type + INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ].*') + + def test(self, parent, block): + return bool(self.RE.match(block)) + + def run(self, parent, blocks): + # Check fr multiple items in one block. + items = self.get_items(blocks.pop(0)) + sibling = self.lastChild(parent) + if sibling and sibling.tag in ['ol', 'ul']: + # Previous block was a list item, so set that as parent + lst = sibling + # make sure previous item is in a p. + if len(lst) and lst[-1].text and not len(lst[-1]): + p = markdown.etree.SubElement(lst[-1], 'p') + p.text = lst[-1].text + lst[-1].text = '' + # parse first block differently as it gets wrapped in a p. + li = markdown.etree.SubElement(lst, 'li') + self.parser.state.set('looselist') + firstitem = items.pop(0) + self.parser.parseBlocks(li, [firstitem]) + self.parser.state.reset() + else: + # This is a new list so create parent with appropriate tag. + lst = markdown.etree.SubElement(parent, self.TAG) + self.parser.state.set('list') + # Loop through items in block, recursively parsing each with the + # appropriate parent. + for item in items: + if item.startswith(' '*markdown.TAB_LENGTH): + # Item is indented. Parse with last item as parent + self.parser.parseBlocks(lst[-1], [item]) + else: + # New item. Create li and parse with it as parent + li = markdown.etree.SubElement(lst, 'li') + self.parser.parseBlocks(li, [item]) + self.parser.state.reset() + + def get_items(self, block): + """ Break a block into list items. """ + items = [] + for line in block.split('\n'): + m = self.CHILD_RE.match(line) + if m: + # This is a new item. Append + items.append(m.group(3)) + elif self.INDENT_RE.match(line): + # This is an indented (possibly nested) item. + if items[-1].startswith(' '*markdown.TAB_LENGTH): + # Previous item was indented. Append to that item. + items[-1] = '%s\n%s' % (items[-1], line) + else: + items.append(line) + else: + # This is another line of previous item. Append to that item. + items[-1] = '%s\n%s' % (items[-1], line) + return items + + +class UListProcessor(OListProcessor): + """ Process unordered list blocks. """ + + TAG = 'ul' + RE = re.compile(r'^[ ]{0,3}[*+-][ ](.*)') + + +class HashHeaderProcessor(BlockProcessor): + """ Process Hash Headers. """ + + # Detect a header at start of any line in block + RE = re.compile(r'(^|\n)(?P#{1,6})(?P
.*?)#*(\n|$)') + + def test(self, parent, block): + return bool(self.RE.search(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # All lines before header + after = block[m.end():] # All lines after header + if before: + # As the header was not the first line of the block and the + # lines before the header must be parsed first, + # recursively parse this lines as a block. + self.parser.parseBlocks(parent, [before]) + # Create header using named groups from RE + h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level'))) + h.text = m.group('header').strip() + if after: + # Insert remaining lines as first block for future parsing. + blocks.insert(0, after) + else: + # This should never happen, but just in case... + message(CRITICAL, "We've got a problem header!") + + +class SetextHeaderProcessor(BlockProcessor): + """ Process Setext-style Headers. """ + + # Detect Setext-style header. Must be first 2 lines of block. + RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE) + + def test(self, parent, block): + return bool(self.RE.match(block)) + + def run(self, parent, blocks): + lines = blocks.pop(0).split('\n') + # Determine level. ``=`` is 1 and ``-`` is 2. + if lines[1].startswith('='): + level = 1 + else: + level = 2 + h = markdown.etree.SubElement(parent, 'h%d' % level) + h.text = lines[0].strip() + if len(lines) > 2: + # Block contains additional lines. Add to master blocks for later. + blocks.insert(0, '\n'.join(lines[2:])) + + +class HRProcessor(BlockProcessor): + """ Process Horizontal Rules. """ + + RE = r'[ ]{0,3}(?P[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*' + # Detect hr on any line of a block. + SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE) + # Match a hr on a single line of text. + MATCH_RE = re.compile(r'^%s$' % RE) + + def test(self, parent, block): + return bool(self.SEARCH_RE.search(block)) + + def run(self, parent, blocks): + lines = blocks.pop(0).split('\n') + prelines = [] + # Check for lines in block before hr. + for line in lines: + m = self.MATCH_RE.match(line) + if m: + break + else: + prelines.append(line) + if len(prelines): + # Recursively parse lines before hr so they get parsed first. + self.parser.parseBlocks(parent, ['\n'.join(prelines)]) + # create hr + hr = markdown.etree.SubElement(parent, 'hr') + # check for lines in block after hr. + lines = lines[len(prelines)+1:] + if len(lines): + # Add lines after hr to master blocks for later parsing. + blocks.insert(0, '\n'.join(lines)) + + +class EmptyBlockProcessor(BlockProcessor): + """ Process blocks and start with an empty line. """ + + # Detect a block that only contains whitespace + # or only whitespace on the first line. + RE = re.compile(r'^\s*\n') + + def test(self, parent, block): + return bool(self.RE.match(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.match(block) + if m: + # Add remaining line to master blocks for later. + blocks.insert(0, block[m.end():]) + sibling = self.lastChild(parent) + if sibling and sibling.tag == 'pre' and sibling[0] and \ + sibling[0].tag == 'code': + # Last block is a codeblock. Append to preserve whitespace. + sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text ) + + +class ParagraphProcessor(BlockProcessor): + """ Process Paragraph blocks. """ + + def test(self, parent, block): + return True + + def run(self, parent, blocks): + block = blocks.pop(0) + if block.strip(): + # Not a blank block. Add to parent, otherwise throw it away. + if self.parser.state.isstate('list'): + # The parent is a tight-list. Append to parent.text + if parent.text: + parent.text = '%s\n%s' % (parent.text, block) + else: + parent.text = block.lstrip() + else: + # Create a regular paragraph + p = markdown.etree.SubElement(parent, 'p') + p.text = block.lstrip() diff --git a/src/calibre/ebooks/markdown/commandline.py b/src/calibre/ebooks/markdown/commandline.py new file mode 100644 index 0000000000..1eedc6dbb1 --- /dev/null +++ b/src/calibre/ebooks/markdown/commandline.py @@ -0,0 +1,96 @@ +""" +COMMAND-LINE SPECIFIC STUFF +============================================================================= + +The rest of the code is specifically for handling the case where Python +Markdown is called from the command line. +""" + +import markdown +import sys +import logging +from logging import DEBUG, INFO, WARN, ERROR, CRITICAL + +EXECUTABLE_NAME_FOR_USAGE = "python markdown.py" +""" The name used in the usage statement displayed for python versions < 2.3. +(With python 2.3 and higher the usage statement is generated by optparse +and uses the actual name of the executable called.) """ + +OPTPARSE_WARNING = """ +Python 2.3 or higher required for advanced command line options. +For lower versions of Python use: + + %s INPUT_FILE > OUTPUT_FILE + +""" % EXECUTABLE_NAME_FOR_USAGE + +def parse_options(): + """ + Define and parse `optparse` options for command-line usage. + """ + + try: + optparse = __import__("optparse") + except: + if len(sys.argv) == 2: + return {'input': sys.argv[1], + 'output': None, + 'safe': False, + 'extensions': [], + 'encoding': None }, CRITICAL + else: + print OPTPARSE_WARNING + return None, None + + parser = optparse.OptionParser(usage="%prog INPUTFILE [options]") + parser.add_option("-f", "--file", dest="filename", default=sys.stdout, + help="write output to OUTPUT_FILE", + metavar="OUTPUT_FILE") + parser.add_option("-e", "--encoding", dest="encoding", + help="encoding for input and output files",) + parser.add_option("-q", "--quiet", default = CRITICAL, + action="store_const", const=CRITICAL+10, dest="verbose", + help="suppress all messages") + parser.add_option("-v", "--verbose", + action="store_const", const=INFO, dest="verbose", + help="print info messages") + parser.add_option("-s", "--safe", dest="safe", default=False, + metavar="SAFE_MODE", + help="safe mode ('replace', 'remove' or 'escape' user's HTML tag)") + parser.add_option("-o", "--output_format", dest="output_format", + default='xhtml1', metavar="OUTPUT_FORMAT", + help="Format of output. One of 'xhtml1' (default) or 'html4'.") + parser.add_option("--noisy", + action="store_const", const=DEBUG, dest="verbose", + help="print debug messages") + parser.add_option("-x", "--extension", action="append", dest="extensions", + help = "load extension EXTENSION", metavar="EXTENSION") + + (options, args) = parser.parse_args() + + if not len(args) == 1: + parser.print_help() + return None, None + else: + input_file = args[0] + + if not options.extensions: + options.extensions = [] + + return {'input': input_file, + 'output': options.filename, + 'safe_mode': options.safe, + 'extensions': options.extensions, + 'encoding': options.encoding, + 'output_format': options.output_format}, options.verbose + +def run(): + """Run Markdown from the command line.""" + + # Parse options and adjust logging level if necessary + options, logging_level = parse_options() + if not options: sys.exit(0) + if logging_level: logging.getLogger('MARKDOWN').setLevel(logging_level) + + # Run + markdown.markdownFromFile(**options) diff --git a/src/calibre/ebooks/markdown/etree_loader.py b/src/calibre/ebooks/markdown/etree_loader.py new file mode 100644 index 0000000000..e2599b2cb9 --- /dev/null +++ b/src/calibre/ebooks/markdown/etree_loader.py @@ -0,0 +1,33 @@ + +from markdown import message, CRITICAL +import sys + +## Import +def importETree(): + """Import the best implementation of ElementTree, return a module object.""" + etree_in_c = None + try: # Is it Python 2.5+ with C implemenation of ElementTree installed? + import xml.etree.cElementTree as etree_in_c + except ImportError: + try: # Is it Python 2.5+ with Python implementation of ElementTree? + import xml.etree.ElementTree as etree + except ImportError: + try: # An earlier version of Python with cElementTree installed? + import cElementTree as etree_in_c + except ImportError: + try: # An earlier version of Python with Python ElementTree? + import elementtree.ElementTree as etree + except ImportError: + message(CRITICAL, "Failed to import ElementTree") + sys.exit(1) + if etree_in_c and etree_in_c.VERSION < "1.0": + message(CRITICAL, "For cElementTree version 1.0 or higher is required.") + sys.exit(1) + elif etree_in_c : + return etree_in_c + elif etree.VERSION < "1.1": + message(CRITICAL, "For ElementTree version 1.1 or higher is required") + sys.exit(1) + else : + return etree + diff --git a/src/calibre/ebooks/markdown/extensions/__init__.py b/src/calibre/ebooks/markdown/extensions/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/calibre/ebooks/markdown/extensions/abbr.py b/src/calibre/ebooks/markdown/extensions/abbr.py new file mode 100644 index 0000000000..bcc727b47c --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/abbr.py @@ -0,0 +1,96 @@ +''' +Abbreviation Extension for Python-Markdown +========================================== + +This extension adds abbreviation handling to Python-Markdown. + +Simple Usage: + + >>> import markdown + >>> text = """ + ... Some text with an ABBR and a REF. Ignore REFERENCE and ref. + ... + ... *[ABBR]: Abbreviation + ... *[REF]: Abbreviation Reference + ... """ + >>> markdown.markdown(text, ['abbr']) + u'

Some text with an ABBR and a REF. Ignore REFERENCE and ref.

' + +Copyright 2007-2008 +* [Waylan Limberg](http://achinghead.com/) +* [Seemant Kulleen](http://www.kulleen.org/) + + +''' + +import re +import calibre.ebooks.markdown.markdown as markdown +from calibre.ebooks.markdown.markdown import etree + +# Global Vars +ABBR_REF_RE = re.compile(r'[*]\[(?P[^\]]*)\][ ]?:\s*(?P.*)') + +class AbbrExtension(markdown.Extension): + """ Abbreviation Extension for Python-Markdown. """ + + def extendMarkdown(self, md, md_globals): + """ Insert AbbrPreprocessor before ReferencePreprocessor. """ + md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference') + + +class AbbrPreprocessor(markdown.preprocessors.Preprocessor): + """ Abbreviation Preprocessor - parse text for abbr references. """ + + def run(self, lines): + ''' + Find and remove all Abbreviation references from the text. + Each reference is set as a new AbbrPattern in the markdown instance. + + ''' + new_text = [] + for line in lines: + m = ABBR_REF_RE.match(line) + if m: + abbr = m.group('abbr').strip() + title = m.group('title').strip() + self.markdown.inlinePatterns['abbr-%s'%abbr] = \ + AbbrPattern(self._generate_pattern(abbr), title) + else: + new_text.append(line) + return new_text + + def _generate_pattern(self, text): + ''' + Given a string, returns an regex pattern to match that string. + + 'HTML' -> r'(?P<abbr>[H][T][M][L])' + + Note: we force each char as a literal match (in brackets) as we don't + know what they will be beforehand. + + ''' + chars = list(text) + for i in range(len(chars)): + chars[i] = r'[%s]' % chars[i] + return r'(?P<abbr>\b%s\b)' % (r''.join(chars)) + + +class AbbrPattern(markdown.inlinepatterns.Pattern): + """ Abbreviation inline pattern. """ + + def __init__(self, pattern, title): + markdown.inlinepatterns.Pattern.__init__(self, pattern) + self.title = title + + def handleMatch(self, m): + abbr = etree.Element('abbr') + abbr.text = m.group('abbr') + abbr.set('title', self.title) + return abbr + +def makeExtension(configs=None): + return AbbrExtension(configs=configs) + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/src/calibre/ebooks/markdown/extensions/codehilite.py b/src/calibre/ebooks/markdown/extensions/codehilite.py new file mode 100644 index 0000000000..42649ec252 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/codehilite.py @@ -0,0 +1,224 @@ +#!/usr/bin/python + +""" +CodeHilite Extension for Python-Markdown +======================================== + +Adds code/syntax highlighting to standard Python-Markdown code blocks. + +Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/). + +Project website: <http://www.freewisdom.org/project/python-markdown/CodeHilite> +Contact: markdown@freewisdom.org + +License: BSD (see ../docs/LICENSE for details) + +Dependencies: +* [Python 2.3+](http://python.org/) +* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/) +* [Pygments](http://pygments.org/) + +""" + +import calibre.ebooks.markdown.markdown as markdown + +# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY ----------------- + +try: + TAB_LENGTH = markdown.TAB_LENGTH +except AttributeError: + TAB_LENGTH = 4 + + +# ------------------ The Main CodeHilite Class ---------------------- +class CodeHilite: + """ + Determine language of source code, and pass it into the pygments hilighter. + + Basic Usage: + >>> code = CodeHilite(src = 'some text') + >>> html = code.hilite() + + * src: Source string or any object with a .readline attribute. + + * linenos: (Boolen) Turn line numbering 'on' or 'off' (off by default). + + * css_class: Set class name of wrapper div ('codehilite' by default). + + Low Level Usage: + >>> code = CodeHilite() + >>> code.src = 'some text' # String or anything with a .readline attr. + >>> code.linenos = True # True or False; Turns line numbering on or of. + >>> html = code.hilite() + + """ + + def __init__(self, src=None, linenos=False, css_class="codehilite"): + self.src = src + self.lang = None + self.linenos = linenos + self.css_class = css_class + + def hilite(self): + """ + Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with + optional line numbers. The output should then be styled with css to + your liking. No styles are applied by default - only styling hooks + (i.e.: <span class="k">). + + returns : A string of html. + + """ + + self.src = self.src.strip('\n') + + self._getLang() + + try: + from pygments import highlight + from pygments.lexers import get_lexer_by_name, guess_lexer, \ + TextLexer + from pygments.formatters import HtmlFormatter + except ImportError: + # just escape and pass through + txt = self._escape(self.src) + if self.linenos: + txt = self._number(txt) + else : + txt = '<div class="%s"><pre>%s</pre></div>\n'% \ + (self.css_class, txt) + return txt + else: + try: + lexer = get_lexer_by_name(self.lang) + except ValueError: + try: + lexer = guess_lexer(self.src) + except ValueError: + lexer = TextLexer() + formatter = HtmlFormatter(linenos=self.linenos, + cssclass=self.css_class) + return highlight(self.src, lexer, formatter) + + def _escape(self, txt): + """ basic html escaping """ + txt = txt.replace('&', '&') + txt = txt.replace('<', '<') + txt = txt.replace('>', '>') + txt = txt.replace('"', '"') + return txt + + def _number(self, txt): + """ Use <ol> for line numbering """ + # Fix Whitespace + txt = txt.replace('\t', ' '*TAB_LENGTH) + txt = txt.replace(" "*4, "    ") + txt = txt.replace(" "*3, "   ") + txt = txt.replace(" "*2, "  ") + + # Add line numbers + lines = txt.splitlines() + txt = '<div class="codehilite"><pre><ol>\n' + for line in lines: + txt += '\t<li>%s</li>\n'% line + txt += '</ol></pre></div>\n' + return txt + + + def _getLang(self): + """ + Determines language of a code block from shebang lines and whether said + line should be removed or left in place. If the sheband line contains a + path (even a single /) then it is assumed to be a real shebang lines and + left alone. However, if no path is given (e.i.: #!python or :::python) + then it is assumed to be a mock shebang for language identifitation of a + code fragment and removed from the code block prior to processing for + code highlighting. When a mock shebang (e.i: #!python) is found, line + numbering is turned on. When colons are found in place of a shebang + (e.i.: :::python), line numbering is left in the current state - off + by default. + + """ + + import re + + #split text into lines + lines = self.src.split("\n") + #pull first line to examine + fl = lines.pop(0) + + c = re.compile(r''' + (?:(?:::+)|(?P<shebang>[#]!)) # Shebang or 2 or more colons. + (?P<path>(?:/\w+)*[/ ])? # Zero or 1 path + (?P<lang>[\w+-]*) # The language + ''', re.VERBOSE) + # search first line for shebang + m = c.search(fl) + if m: + # we have a match + try: + self.lang = m.group('lang').lower() + except IndexError: + self.lang = None + if m.group('path'): + # path exists - restore first line + lines.insert(0, fl) + if m.group('shebang'): + # shebang exists - use line numbers + self.linenos = True + else: + # No match + lines.insert(0, fl) + + self.src = "\n".join(lines).strip("\n") + + + +# ------------------ The Markdown Extension ------------------------------- +class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor): + """ Hilight source code in code blocks. """ + + def run(self, root): + """ Find code blocks and store in htmlStash. """ + blocks = root.getiterator('pre') + for block in blocks: + children = block.getchildren() + if len(children) == 1 and children[0].tag == 'code': + code = CodeHilite(children[0].text, + linenos=self.config['force_linenos'][0], + css_class=self.config['css_class'][0]) + placeholder = self.markdown.htmlStash.store(code.hilite(), + safe=True) + # Clear codeblock in etree instance + block.clear() + # Change to p element which will later + # be removed when inserting raw html + block.tag = 'p' + block.text = placeholder + + +class CodeHiliteExtension(markdown.Extension): + """ Add source code hilighting to markdown codeblocks. """ + + def __init__(self, configs): + # define default configs + self.config = { + 'force_linenos' : [False, "Force line numbers - Default: False"], + 'css_class' : ["codehilite", + "Set class name for wrapper <div> - Default: codehilite"], + } + + # Override defaults with user settings + for key, value in configs: + self.setConfig(key, value) + + def extendMarkdown(self, md, md_globals): + """ Add HilitePostprocessor to Markdown instance. """ + hiliter = HiliteTreeprocessor(md) + hiliter.config = self.config + md.treeprocessors.add("hilite", hiliter, "_begin") + + +def makeExtension(configs={}): + return CodeHiliteExtension(configs=configs) + diff --git a/src/calibre/ebooks/markdown/extensions/def_list.py b/src/calibre/ebooks/markdown/extensions/def_list.py new file mode 100644 index 0000000000..5bc1eb56b0 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/def_list.py @@ -0,0 +1,105 @@ +#!/usr/bin/env Python +""" +Definition List Extension for Python-Markdown +============================================= + +Added parsing of Definition Lists to Python-Markdown. + +A simple example: + + Apple + : Pomaceous fruit of plants of the genus Malus in + the family Rosaceae. + : An american computer company. + + Orange + : The fruit of an evergreen tree of the genus Citrus. + +Copyright 2008 - [Waylan Limberg](http://achinghead.com) + +""" + +import re +import calibre.ebooks.markdown.markdown as markdown +from calibre.ebooks.markdown.markdown import etree + + +class DefListProcessor(markdown.blockprocessors.BlockProcessor): + """ Process Definition Lists. """ + + RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)') + + def test(self, parent, block): + return bool(self.RE.search(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + terms = [l.strip() for l in block[:m.start()].split('\n') if l.strip()] + d, theRest = self.detab(block[m.end():]) + if d: + d = '%s\n%s' % (m.group(2), d) + else: + d = m.group(2) + #import ipdb; ipdb.set_trace() + sibling = self.lastChild(parent) + if not terms and sibling.tag == 'p': + # The previous paragraph contains the terms + state = 'looselist' + terms = sibling.text.split('\n') + parent.remove(sibling) + # Aquire new sibling + sibling = self.lastChild(parent) + else: + state = 'list' + + if sibling and sibling.tag == 'dl': + # This is another item on an existing list + dl = sibling + if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]): + state = 'looselist' + else: + # This is a new list + dl = etree.SubElement(parent, 'dl') + # Add terms + for term in terms: + dt = etree.SubElement(dl, 'dt') + dt.text = term + # Add definition + self.parser.state.set(state) + dd = etree.SubElement(dl, 'dd') + self.parser.parseBlocks(dd, [d]) + self.parser.state.reset() + + if theRest: + blocks.insert(0, theRest) + +class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor): + """ Process indented children of definition list items. """ + + ITEM_TYPES = ['dd'] + LIST_TYPES = ['dl'] + + def create_item(parent, block): + """ Create a new dd and parse the block with it as the parent. """ + dd = markdown.etree.SubElement(parent, 'dd') + self.parser.parseBlocks(dd, [block]) + + + +class DefListExtension(markdown.Extension): + """ Add definition lists to Markdown. """ + + def extendMarkdown(self, md, md_globals): + """ Add an instance of DefListProcessor to BlockParser. """ + md.parser.blockprocessors.add('defindent', + DefListIndentProcessor(md.parser), + '>indent') + md.parser.blockprocessors.add('deflist', + DefListProcessor(md.parser), + '>ulist') + + +def makeExtension(configs={}): + return DefListExtension(configs=configs) + diff --git a/src/calibre/ebooks/markdown/extensions/extra.py b/src/calibre/ebooks/markdown/extensions/extra.py new file mode 100644 index 0000000000..830c9455e1 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/extra.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +""" +Python-Markdown Extra Extension +=============================== + +A compilation of various Python-Markdown extensions that imitates +[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/). + +Note that each of the individual extensions still need to be available +on your PYTHONPATH. This extension simply wraps them all up as a +convenience so that only one extension needs to be listed when +initiating Markdown. See the documentation for each individual +extension for specifics about that extension. + +In the event that one or more of the supported extensions are not +available for import, Markdown will issue a warning and simply continue +without that extension. + +There may be additional extensions that are distributed with +Python-Markdown that are not included here in Extra. Those extensions +are not part of PHP Markdown Extra, and therefore, not part of +Python-Markdown Extra. If you really would like Extra to include +additional extensions, we suggest creating your own clone of Extra +under a differant name. You could also edit the `extensions` global +variable defined below, but be aware that such changes may be lost +when you upgrade to any future version of Python-Markdown. + +""" + +import calibre.ebooks.markdown.markdown as markdown + +extensions = ['fenced_code', + 'footnotes', + 'headerid', + 'def_list', + 'tables', + 'abbr', + ] + + +class ExtraExtension(markdown.Extension): + """ Add various extensions to Markdown class.""" + + def extendMarkdown(self, md, md_globals): + """ Register extension instances. """ + md.registerExtensions(extensions, self.config) + +def makeExtension(configs={}): + return ExtraExtension(configs=dict(configs)) diff --git a/src/calibre/ebooks/markdown/extensions/fenced_code.py b/src/calibre/ebooks/markdown/extensions/fenced_code.py new file mode 100644 index 0000000000..6e12c23319 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/fenced_code.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python + +""" +Fenced Code Extension for Python Markdown +========================================= + +This extension adds Fenced Code Blocks to Python-Markdown. + + >>> import markdown + >>> text = ''' + ... A paragraph before a fenced code block: + ... + ... ~~~ + ... Fenced code block + ... ~~~ + ... ''' + >>> html = markdown.markdown(text, extensions=['fenced_code']) + >>> html + u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>' + +Works with safe_mode also (we check this because we are using the HtmlStash): + + >>> markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace') + u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>' + +Include tilde's in a code block and wrap with blank lines: + + >>> text = ''' + ... ~~~~~~~~ + ... + ... ~~~~ + ... + ... ~~~~~~~~''' + >>> markdown.markdown(text, extensions=['fenced_code']) + u'<pre><code>\\n~~~~\\n\\n</code></pre>' + +Multiple blocks and language tags: + + >>> text = ''' + ... ~~~~{.python} + ... block one + ... ~~~~ + ... + ... ~~~~.html + ... <p>block two</p> + ... ~~~~''' + >>> markdown.markdown(text, extensions=['fenced_code']) + u'<pre><code class="python">block one\\n</code></pre>\\n\\n<pre><code class="html"><p>block two</p>\\n</code></pre>' + +Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/). + +Project website: <http://www.freewisdom.org/project/python-markdown/Fenced__Code__Blocks> +Contact: markdown@freewisdom.org + +License: BSD (see ../docs/LICENSE for details) + +Dependencies: +* [Python 2.3+](http://python.org) +* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/) + +""" + +import re +import calibre.ebooks.markdown.markdown as markdown + +# Global vars +FENCED_BLOCK_RE = re.compile( \ + r'(?P<fence>^~{3,})[ ]*(\{?\.(?P<lang>[a-zA-Z0-9_-]*)\}?)?[ ]*\n(?P<code>.*?)(?P=fence)[ ]*$', + re.MULTILINE|re.DOTALL + ) +CODE_WRAP = '<pre><code%s>%s</code></pre>' +LANG_TAG = ' class="%s"' + + +class FencedCodeExtension(markdown.Extension): + + def extendMarkdown(self, md, md_globals): + """ Add FencedBlockPreprocessor to the Markdown instance. """ + + md.preprocessors.add('fenced_code_block', + FencedBlockPreprocessor(md), + "_begin") + + +class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor): + + def run(self, lines): + """ Match and store Fenced Code Blocks in the HtmlStash. """ + text = "\n".join(lines) + while 1: + m = FENCED_BLOCK_RE.search(text) + if m: + lang = '' + if m.group('lang'): + lang = LANG_TAG % m.group('lang') + code = CODE_WRAP % (lang, self._escape(m.group('code'))) + placeholder = self.markdown.htmlStash.store(code, safe=True) + text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():]) + else: + break + return text.split("\n") + + def _escape(self, txt): + """ basic html escaping """ + txt = txt.replace('&', '&') + txt = txt.replace('<', '<') + txt = txt.replace('>', '>') + txt = txt.replace('"', '"') + return txt + + +def makeExtension(configs=None): + return FencedCodeExtension() + + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/src/calibre/ebooks/markdown/extensions/footnotes.py b/src/calibre/ebooks/markdown/extensions/footnotes.py new file mode 100644 index 0000000000..729b49b2e6 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/footnotes.py @@ -0,0 +1,294 @@ +""" +========================= FOOTNOTES ================================= + +This section adds footnote handling to markdown. It can be used as +an example for extending python-markdown with relatively complex +functionality. While in this case the extension is included inside +the module itself, it could just as easily be added from outside the +module. Not that all markdown classes above are ignorant about +footnotes. All footnote functionality is provided separately and +then added to the markdown instance at the run time. + +Footnote functionality is attached by calling extendMarkdown() +method of FootnoteExtension. The method also registers the +extension to allow it's state to be reset by a call to reset() +method. + +Example: + Footnotes[^1] have a label[^label] and a definition[^!DEF]. + + [^1]: This is a footnote + [^label]: A footnote on "label" + [^!DEF]: The footnote for definition + +""" + +import re +import calibre.ebooks.markdown.markdown as markdown +from calibre.ebooks.markdown.markdown import etree + +FN_BACKLINK_TEXT = "zz1337820767766393qq" +NBSP_PLACEHOLDER = "qq3936677670287331zz" +DEF_RE = re.compile(r'(\ ?\ ?\ ?)\[\^([^\]]*)\]:\s*(.*)') +TABBED_RE = re.compile(r'((\t)|( ))(.*)') + +class FootnoteExtension(markdown.Extension): + """ Footnote Extension. """ + + def __init__ (self, configs): + """ Setup configs. """ + self.config = {'PLACE_MARKER': + ["///Footnotes Go Here///", + "The text string that marks where the footnotes go"]} + + for key, value in configs: + self.config[key][0] = value + + self.reset() + + def extendMarkdown(self, md, md_globals): + """ Add pieces to Markdown. """ + md.registerExtension(self) + self.parser = md.parser + # Insert a preprocessor before ReferencePreprocessor + md.preprocessors.add("footnote", FootnotePreprocessor(self), + "<reference") + # Insert an inline pattern before ImageReferencePattern + FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah + md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self), + "<reference") + # Insert a tree-processor that would actually add the footnote div + # This must be before the inline treeprocessor so inline patterns + # run on the contents of the div. + md.treeprocessors.add("footnote", FootnoteTreeprocessor(self), + "<inline") + # Insert a postprocessor after amp_substitute oricessor + md.postprocessors.add("footnote", FootnotePostprocessor(self), + ">amp_substitute") + + def reset(self): + """ Clear the footnotes on reset. """ + self.footnotes = markdown.odict.OrderedDict() + + def findFootnotesPlaceholder(self, root): + """ Return ElementTree Element that contains Footnote placeholder. """ + def finder(element): + for child in element: + if child.text: + if child.text.find(self.getConfig("PLACE_MARKER")) > -1: + return child, True + if child.tail: + if child.tail.find(self.getConfig("PLACE_MARKER")) > -1: + return (child, element), False + finder(child) + return None + + res = finder(root) + return res + + def setFootnote(self, id, text): + """ Store a footnote for later retrieval. """ + self.footnotes[id] = text + + def makeFootnoteId(self, id): + """ Return footnote link id. """ + return 'fn:%s' % id + + def makeFootnoteRefId(self, id): + """ Return footnote back-link id. """ + return 'fnref:%s' % id + + def makeFootnotesDiv(self, root): + """ Return div of footnotes as et Element. """ + + if not self.footnotes.keys(): + return None + + div = etree.Element("div") + div.set('class', 'footnote') + hr = etree.SubElement(div, "hr") + ol = etree.SubElement(div, "ol") + + for id in self.footnotes.keys(): + li = etree.SubElement(ol, "li") + li.set("id", self.makeFootnoteId(id)) + self.parser.parseChunk(li, self.footnotes[id]) + backlink = etree.Element("a") + backlink.set("href", "#" + self.makeFootnoteRefId(id)) + backlink.set("rev", "footnote") + backlink.set("title", "Jump back to footnote %d in the text" % \ + (self.footnotes.index(id)+1)) + backlink.text = FN_BACKLINK_TEXT + + if li.getchildren(): + node = li[-1] + if node.tag == "p": + node.text = node.text + NBSP_PLACEHOLDER + node.append(backlink) + else: + p = etree.SubElement(li, "p") + p.append(backlink) + return div + + +class FootnotePreprocessor(markdown.preprocessors.Preprocessor): + """ Find all footnote references and store for later use. """ + + def __init__ (self, footnotes): + self.footnotes = footnotes + + def run(self, lines): + lines = self._handleFootnoteDefinitions(lines) + text = "\n".join(lines) + return text.split("\n") + + def _handleFootnoteDefinitions(self, lines): + """ + Recursively find all footnote definitions in lines. + + Keywords: + + * lines: A list of lines of text + + Return: A list of lines with footnote definitions removed. + + """ + i, id, footnote = self._findFootnoteDefinition(lines) + + if id : + plain = lines[:i] + detabbed, theRest = self.detectTabbed(lines[i+1:]) + self.footnotes.setFootnote(id, + footnote + "\n" + + "\n".join(detabbed)) + more_plain = self._handleFootnoteDefinitions(theRest) + return plain + [""] + more_plain + else : + return lines + + def _findFootnoteDefinition(self, lines): + """ + Find the parts of a footnote definition. + + Keywords: + + * lines: A list of lines of text. + + Return: A three item tuple containing the index of the first line of a + footnote definition, the id of the definition and the body of the + definition. + + """ + counter = 0 + for line in lines: + m = DEF_RE.match(line) + if m: + return counter, m.group(2), m.group(3) + counter += 1 + return counter, None, None + + def detectTabbed(self, lines): + """ Find indented text and remove indent before further proccesing. + + Keyword arguments: + + * lines: an array of strings + + Returns: a list of post processed items and the unused + remainder of the original list + + """ + items = [] + item = -1 + i = 0 # to keep track of where we are + + def detab(line): + match = TABBED_RE.match(line) + if match: + return match.group(4) + + for line in lines: + if line.strip(): # Non-blank line + line = detab(line) + if line: + items.append(line) + i += 1 + continue + else: + return items, lines[i:] + + else: # Blank line: _maybe_ we are done. + i += 1 # advance + + # Find the next non-blank line + for j in range(i, len(lines)): + if lines[j].strip(): + next_line = lines[j]; break + else: + break # There is no more text; we are done. + + # Check if the next non-blank line is tabbed + if detab(next_line): # Yes, more work to do. + items.append("") + continue + else: + break # No, we are done. + else: + i += 1 + + return items, lines[i:] + + +class FootnotePattern(markdown.inlinepatterns.Pattern): + """ InlinePattern for footnote markers in a document's body text. """ + + def __init__(self, pattern, footnotes): + markdown.inlinepatterns.Pattern.__init__(self, pattern) + self.footnotes = footnotes + + def handleMatch(self, m): + sup = etree.Element("sup") + a = etree.SubElement(sup, "a") + id = m.group(2) + sup.set('id', self.footnotes.makeFootnoteRefId(id)) + a.set('href', '#' + self.footnotes.makeFootnoteId(id)) + a.set('rel', 'footnote') + a.text = str(self.footnotes.footnotes.index(id) + 1) + return sup + + +class FootnoteTreeprocessor(markdown.treeprocessors.Treeprocessor): + """ Build and append footnote div to end of document. """ + + def __init__ (self, footnotes): + self.footnotes = footnotes + + def run(self, root): + footnotesDiv = self.footnotes.makeFootnotesDiv(root) + if footnotesDiv: + result = self.footnotes.findFootnotesPlaceholder(root) + if result: + node, isText = result + if isText: + node.text = None + node.getchildren().insert(0, footnotesDiv) + else: + child, element = node + ind = element.getchildren().find(child) + element.getchildren().insert(ind + 1, footnotesDiv) + child.tail = None + fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv) + else: + root.append(footnotesDiv) + +class FootnotePostprocessor(markdown.postprocessors.Postprocessor): + """ Replace placeholders with html entities. """ + + def run(self, text): + text = text.replace(FN_BACKLINK_TEXT, "↩") + return text.replace(NBSP_PLACEHOLDER, " ") + +def makeExtension(configs=[]): + """ Return an instance of the FootnoteExtension """ + return FootnoteExtension(configs=configs) + diff --git a/src/calibre/ebooks/markdown/extensions/headerid.py b/src/calibre/ebooks/markdown/extensions/headerid.py new file mode 100644 index 0000000000..7433673736 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/headerid.py @@ -0,0 +1,195 @@ +#!/usr/bin/python + +""" +HeaderID Extension for Python-Markdown +====================================== + +Adds ability to set HTML IDs for headers. + +Basic usage: + + >>> import markdown + >>> text = "# Some Header # {#some_id}" + >>> md = markdown.markdown(text, ['headerid']) + >>> md + u'<h1 id="some_id">Some Header</h1>' + +All header IDs are unique: + + >>> text = ''' + ... #Header + ... #Another Header {#header} + ... #Third Header {#header}''' + >>> md = markdown.markdown(text, ['headerid']) + >>> md + u'<h1 id="header">Header</h1>\\n<h1 id="header_1">Another Header</h1>\\n<h1 id="header_2">Third Header</h1>' + +To fit within a html template's hierarchy, set the header base level: + + >>> text = ''' + ... #Some Header + ... ## Next Level''' + >>> md = markdown.markdown(text, ['headerid(level=3)']) + >>> md + u'<h3 id="some_header">Some Header</h3>\\n<h4 id="next_level">Next Level</h4>' + +Turn off auto generated IDs: + + >>> text = ''' + ... # Some Header + ... # Header with ID # { #foo }''' + >>> md = markdown.markdown(text, ['headerid(forceid=False)']) + >>> md + u'<h1>Some Header</h1>\\n<h1 id="foo">Header with ID</h1>' + +Use with MetaData extension: + + >>> text = '''header_level: 2 + ... header_forceid: Off + ... + ... # A Header''' + >>> md = markdown.markdown(text, ['headerid', 'meta']) + >>> md + u'<h2>A Header</h2>' + +Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/). + +Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId> +Contact: markdown@freewisdom.org + +License: BSD (see ../docs/LICENSE for details) + +Dependencies: +* [Python 2.3+](http://python.org) +* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/) + +""" + +import calibre.ebooks.markdown.markdown as markdown +from calibre.ebooks.markdown.markdown import etree +import re +from string import ascii_lowercase, digits, punctuation + +ID_CHARS = ascii_lowercase + digits + '-_' +IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$') + + +class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor): + """ Replacement BlockProcessor for Header IDs. """ + + # Detect a header at start of any line in block + RE = re.compile(r"""(^|\n) + (?P<level>\#{1,6}) # group('level') = string of hashes + (?P<header>.*?) # group('header') = Header text + \#* # optional closing hashes + (?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})? + (\n|$) # ^^ group('id') = id attribute + """, + re.VERBOSE) + + IDs = [] + + def test(self, parent, block): + return bool(self.RE.search(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # All lines before header + after = block[m.end():] # All lines after header + if before: + # As the header was not the first line of the block and the + # lines before the header must be parsed first, + # recursively parse this lines as a block. + self.parser.parseBlocks(parent, [before]) + # Create header using named groups from RE + start_level, force_id = self._get_meta() + level = len(m.group('level')) + start_level + if level > 6: + level = 6 + h = markdown.etree.SubElement(parent, 'h%d' % level) + h.text = m.group('header').strip() + if m.group('id'): + h.set('id', self._unique_id(m.group('id'))) + elif force_id: + h.set('id', self._create_id(m.group('header').strip())) + if after: + # Insert remaining lines as first block for future parsing. + blocks.insert(0, after) + else: + # This should never happen, but just in case... + message(CRITICAL, "We've got a problem header!") + + def _get_meta(self): + """ Return meta data suported by this ext as a tuple """ + level = int(self.config['level'][0]) - 1 + force = self._str2bool(self.config['forceid'][0]) + if hasattr(self.md, 'Meta'): + if self.md.Meta.has_key('header_level'): + level = int(self.md.Meta['header_level'][0]) - 1 + if self.md.Meta.has_key('header_forceid'): + force = self._str2bool(self.md.Meta['header_forceid'][0]) + return level, force + + def _str2bool(self, s, default=False): + """ Convert a string to a booleen value. """ + s = str(s) + if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']: + return False + elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']: + return True + return default + + def _unique_id(self, id): + """ Ensure ID is unique. Append '_1', '_2'... if not """ + while id in self.IDs: + m = IDCOUNT_RE.match(id) + if m: + id = '%s_%d'% (m.group(1), int(m.group(2))+1) + else: + id = '%s_%d'% (id, 1) + self.IDs.append(id) + return id + + def _create_id(self, header): + """ Return ID from Header text. """ + h = '' + for c in header.lower().replace(' ', '_'): + if c in ID_CHARS: + h += c + elif c not in punctuation: + h += '+' + return self._unique_id(h) + + +class HeaderIdExtension (markdown.Extension): + def __init__(self, configs): + # set defaults + self.config = { + 'level' : ['1', 'Base level for headers.'], + 'forceid' : ['True', 'Force all headers to have an id.'] + } + + for key, value in configs: + self.setConfig(key, value) + + def extendMarkdown(self, md, md_globals): + md.registerExtension(self) + self.processor = HeaderIdProcessor(md.parser) + self.processor.md = md + self.processor.config = self.config + # Replace existing hasheader in place. + md.parser.blockprocessors['hashheader'] = self.processor + + def reset(self): + self.processor.IDs = [] + + +def makeExtension(configs=None): + return HeaderIdExtension(configs=configs) + +if __name__ == "__main__": + import doctest + doctest.testmod() + diff --git a/src/calibre/ebooks/markdown/extensions/meta.py b/src/calibre/ebooks/markdown/extensions/meta.py new file mode 100644 index 0000000000..b33eaa0f71 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/meta.py @@ -0,0 +1,91 @@ +#!usr/bin/python + +""" +Meta Data Extension for Python-Markdown +======================================= + +This extension adds Meta Data handling to markdown. + +Basic Usage: + + >>> import markdown + >>> text = '''Title: A Test Doc. + ... Author: Waylan Limberg + ... John Doe + ... Blank_Data: + ... + ... The body. This is paragraph one. + ... ''' + >>> md = markdown.Markdown(['meta']) + >>> md.convert(text) + u'<p>The body. This is paragraph one.</p>' + >>> md.Meta + {u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']} + +Make sure text without Meta Data still works (markdown < 1.6b returns a <p>). + + >>> text = ' Some Code - not extra lines of meta data.' + >>> md = markdown.Markdown(['meta']) + >>> md.convert(text) + u'<pre><code>Some Code - not extra lines of meta data.\\n</code></pre>' + >>> md.Meta + {} + +Copyright 2007-2008 [Waylan Limberg](http://achinghead.com). + +Project website: <http://www.freewisdom.org/project/python-markdown/Meta-Data> +Contact: markdown@freewisdom.org + +License: BSD (see ../docs/LICENSE for details) + +""" + +import re +import calibre.ebooks.markdown.markdown as markdown + +# Global Vars +META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)') +META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)') + +class MetaExtension (markdown.Extension): + """ Meta-Data extension for Python-Markdown. """ + + def extendMarkdown(self, md, md_globals): + """ Add MetaPreprocessor to Markdown instance. """ + + md.preprocessors.add("meta", MetaPreprocessor(md), "_begin") + + +class MetaPreprocessor(markdown.preprocessors.Preprocessor): + """ Get Meta-Data. """ + + def run(self, lines): + """ Parse Meta-Data and store in Markdown.Meta. """ + meta = {} + key = None + while 1: + line = lines.pop(0) + if line.strip() == '': + break # blank line - done + m1 = META_RE.match(line) + if m1: + key = m1.group('key').lower().strip() + meta[key] = [m1.group('value').strip()] + else: + m2 = META_MORE_RE.match(line) + if m2 and key: + # Add another line to existing key + meta[key].append(m2.group('value').strip()) + else: + lines.insert(0, line) + break # no meta data - done + self.markdown.Meta = meta + return lines + + +def makeExtension(configs={}): + return MetaExtension(configs=configs) + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/src/calibre/ebooks/markdown/extensions/rss.py b/src/calibre/ebooks/markdown/extensions/rss.py new file mode 100644 index 0000000000..466c502da0 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/rss.py @@ -0,0 +1,114 @@ +import calibre.ebooks.markdown.markdown as markdown +from calibre.ebooks.markdown.markdown import etree + +DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/" +DEFAULT_CREATOR = "Yuri Takhteyev" +DEFAULT_TITLE = "Markdown in Python" +GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss" + +month_map = { "Jan" : "01", + "Feb" : "02", + "March" : "03", + "April" : "04", + "May" : "05", + "June" : "06", + "July" : "07", + "August" : "08", + "September" : "09", + "October" : "10", + "November" : "11", + "December" : "12" } + +def get_time(heading): + + heading = heading.split("-")[0] + heading = heading.strip().replace(",", " ").replace(".", " ") + + month, date, year = heading.split() + month = month_map[month] + + return rdftime(" ".join((month, date, year, "12:00:00 AM"))) + +def rdftime(time): + + time = time.replace(":", " ") + time = time.replace("/", " ") + time = time.split() + return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2], + time[3], time[4], time[5]) + + +def get_date(text): + return "date" + +class RssExtension (markdown.Extension): + + def extendMarkdown(self, md, md_globals): + + self.config = { 'URL' : [DEFAULT_URL, "Main URL"], + 'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"], + 'TITLE' : [DEFAULT_TITLE, "Feed title"] } + + md.xml_mode = True + + # Insert a tree-processor that would actually add the title tag + treeprocessor = RssTreeProcessor(md) + treeprocessor.ext = self + md.treeprocessors['rss'] = treeprocessor + md.stripTopLevelTags = 0 + md.docType = '<?xml version="1.0" encoding="utf-8"?>\n' + +class RssTreeProcessor(markdown.treeprocessors.Treeprocessor): + + def run (self, root): + + rss = etree.Element("rss") + rss.set("version", "2.0") + + channel = etree.SubElement(rss, "channel") + + for tag, text in (("title", self.ext.getConfig("TITLE")), + ("link", self.ext.getConfig("URL")), + ("description", None)): + + element = etree.SubElement(channel, tag) + element.text = text + + for child in root: + + if child.tag in ["h1", "h2", "h3", "h4", "h5"]: + + heading = child.text.strip() + item = etree.SubElement(channel, "item") + link = etree.SubElement(item, "link") + link.text = self.ext.getConfig("URL") + title = etree.SubElement(item, "title") + title.text = heading + + guid = ''.join([x for x in heading if x.isalnum()]) + guidElem = etree.SubElement(item, "guid") + guidElem.text = guid + guidElem.set("isPermaLink", "false") + + elif child.tag in ["p"]: + try: + description = etree.SubElement(item, "description") + except UnboundLocalError: + # Item not defined - moving on + pass + else: + if len(child): + content = "\n".join([etree.tostring(node) + for node in child]) + else: + content = child.text + pholder = self.markdown.htmlStash.store( + "<![CDATA[ %s]]>" % content) + description.text = pholder + + return rss + + +def makeExtension(configs): + + return RssExtension(configs) diff --git a/src/calibre/ebooks/markdown/extensions/tables.py b/src/calibre/ebooks/markdown/extensions/tables.py new file mode 100644 index 0000000000..f47ec1cc0e --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/tables.py @@ -0,0 +1,97 @@ +#!/usr/bin/env Python +""" +Tables Extension for Python-Markdown +==================================== + +Added parsing of tables to Python-Markdown. + +A simple example: + + First Header | Second Header + ------------- | ------------- + Content Cell | Content Cell + Content Cell | Content Cell + +Copyright 2009 - [Waylan Limberg](http://achinghead.com) +""" +import calibre.ebooks.markdown.markdown as markdown +from calibre.ebooks.markdown.markdown import etree + + +class TableProcessor(markdown.blockprocessors.BlockProcessor): + """ Process Tables. """ + + def test(self, parent, block): + rows = block.split('\n') + return (len(rows) > 2 and '|' in rows[0] and + '|' in rows[1] and '-' in rows[1] and + rows[1][0] in ['|', ':', '-']) + + def run(self, parent, blocks): + """ Parse a table block and build table. """ + block = blocks.pop(0).split('\n') + header = block[:2] + rows = block[2:] + # Get format type (bordered by pipes or not) + border = False + if header[0].startswith('|'): + border = True + # Get alignment of columns + align = [] + for c in self._split_row(header[1], border): + if c.startswith(':') and c.endswith(':'): + align.append('center') + elif c.startswith(':'): + align.append('left') + elif c.endswith(':'): + align.append('right') + else: + align.append(None) + # Build table + table = etree.SubElement(parent, 'table') + thead = etree.SubElement(table, 'thead') + self._build_row(header[0], thead, align, border) + tbody = etree.SubElement(table, 'tbody') + for row in rows: + self._build_row(row, tbody, align, border) + + def _build_row(self, row, parent, align, border): + """ Given a row of text, build table cells. """ + tr = etree.SubElement(parent, 'tr') + tag = 'td' + if parent.tag == 'thead': + tag = 'th' + cells = self._split_row(row, border) + # We use align here rather than cells to ensure every row + # contains the same number of columns. + for i, a in enumerate(align): + c = etree.SubElement(tr, tag) + try: + c.text = cells[i].strip() + except IndexError: + c.text = "" + if a: + c.set('align', a) + + def _split_row(self, row, border): + """ split a row of text into list of cells. """ + if border: + if row.startswith('|'): + row = row[1:] + if row.endswith('|'): + row = row[:-1] + return row.split('|') + + +class TableExtension(markdown.Extension): + """ Add tables to Markdown. """ + + def extendMarkdown(self, md, md_globals): + """ Add an instance of TableProcessor to BlockParser. """ + md.parser.blockprocessors.add('table', + TableProcessor(md.parser), + '<hashheader') + + +def makeExtension(configs={}): + return TableExtension(configs=configs) diff --git a/src/calibre/ebooks/markdown/extensions/toc.py b/src/calibre/ebooks/markdown/extensions/toc.py new file mode 100644 index 0000000000..efa5516624 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/toc.py @@ -0,0 +1,140 @@ +""" +Table of Contents Extension for Python-Markdown +* * * + +(c) 2008 [Jack Miller](http://codezen.org) + +Dependencies: +* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/) + +""" +import calibre.ebooks.markdown.markdown as markdown +from calibre.ebooks.markdown.markdown import etree +import re + +class TocTreeprocessor(markdown.treeprocessors.Treeprocessor): + # Iterator wrapper to get parent and child all at once + def iterparent(self, root): + for parent in root.getiterator(): + for child in parent: + yield parent, child + + def run(self, doc): + div = etree.Element("div") + div.attrib["class"] = "toc" + last_li = None + + # Add title to the div + if self.config["title"][0]: + header = etree.SubElement(div, "span") + header.attrib["class"] = "toctitle" + header.text = self.config["title"][0] + + level = 0 + list_stack=[div] + header_rgx = re.compile("[Hh][123456]") + + # Get a list of id attributes + used_ids = [] + for c in doc.getiterator(): + if "id" in c.attrib: + used_ids.append(c.attrib["id"]) + + for (p, c) in self.iterparent(doc): + if not c.text: + continue + + # To keep the output from screwing up the + # validation by putting a <div> inside of a <p> + # we actually replace the <p> in its entirety. + # We do not allow the marker inside a header as that + # would causes an enless loop of placing a new TOC + # inside previously generated TOC. + + if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag): + for i in range(len(p)): + if p[i] == c: + p[i] = div + break + + if header_rgx.match(c.tag): + tag_level = int(c.tag[-1]) + + # Regardless of how many levels we jumped + # only one list should be created, since + # empty lists containing lists are illegal. + + if tag_level < level: + list_stack.pop() + level = tag_level + + if tag_level > level: + newlist = etree.Element("ul") + if last_li: + last_li.append(newlist) + else: + list_stack[-1].append(newlist) + list_stack.append(newlist) + level = tag_level + + # Do not override pre-existing ids + if not "id" in c.attrib: + id = self.config["slugify"][0](c.text) + if id in used_ids: + ctr = 1 + while "%s_%d" % (id, ctr) in used_ids: + ctr += 1 + id = "%s_%d" % (id, ctr) + used_ids.append(id) + c.attrib["id"] = id + else: + id = c.attrib["id"] + + # List item link, to be inserted into the toc div + last_li = etree.Element("li") + link = etree.SubElement(last_li, "a") + link.text = c.text + link.attrib["href"] = '#' + id + + if int(self.config["anchorlink"][0]): + anchor = etree.SubElement(c, "a") + anchor.text = c.text + anchor.attrib["href"] = "#" + id + anchor.attrib["class"] = "toclink" + c.text = "" + + list_stack[-1].append(last_li) + +class TocExtension(markdown.Extension): + def __init__(self, configs): + self.config = { "marker" : ["[TOC]", + "Text to find and replace with Table of Contents -" + "Defaults to \"[TOC]\""], + "slugify" : [self.slugify, + "Function to generate anchors based on header text-" + "Defaults to a built in slugify function."], + "title" : [None, + "Title to insert into TOC <div> - " + "Defaults to None"], + "anchorlink" : [0, + "1 if header should be a self link" + "Defaults to 0"]} + + for key, value in configs: + self.setConfig(key, value) + + # This is exactly the same as Django's slugify + def slugify(self, value): + """ Slugify a string, to make it URL friendly. """ + import unicodedata + value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') + value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) + return re.sub('[-\s]+','-',value) + + def extendMarkdown(self, md, md_globals): + tocext = TocTreeprocessor(md) + tocext.config = self.config + md.treeprocessors.add("toc", tocext, "_begin") + +def makeExtension(configs={}): + return TocExtension(configs=configs) diff --git a/src/calibre/ebooks/markdown/extensions/wikilinks.py b/src/calibre/ebooks/markdown/extensions/wikilinks.py new file mode 100644 index 0000000000..ddb7b5f0d6 --- /dev/null +++ b/src/calibre/ebooks/markdown/extensions/wikilinks.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python + +''' +WikiLinks Extension for Python-Markdown +====================================== + +Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+ + +Basic usage: + + >>> import markdown + >>> text = "Some text with a [[WikiLink]]." + >>> html = markdown.markdown(text, ['wikilinks']) + >>> html + u'<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>' + +Whitespace behavior: + + >>> markdown.markdown('[[ foo bar_baz ]]', ['wikilinks']) + u'<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>' + >>> markdown.markdown('foo [[ ]] bar', ['wikilinks']) + u'<p>foo bar</p>' + +To define custom settings the simple way: + + >>> markdown.markdown(text, + ... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)'] + ... ) + u'<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>' + +Custom settings the complex way: + + >>> md = markdown.Markdown( + ... extensions = ['wikilinks'], + ... extension_configs = {'wikilinks': [ + ... ('base_url', 'http://example.com/'), + ... ('end_url', '.html'), + ... ('html_class', '') ]}, + ... safe_mode = True) + >>> md.convert(text) + u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>' + +Use MetaData with mdx_meta.py (Note the blank html_class in MetaData): + + >>> text = """wiki_base_url: http://example.com/ + ... wiki_end_url: .html + ... wiki_html_class: + ... + ... Some text with a [[WikiLink]].""" + >>> md = markdown.Markdown(extensions=['meta', 'wikilinks']) + >>> md.convert(text) + u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>' + +MetaData should not carry over to next document: + + >>> md.convert("No [[MetaData]] here.") + u'<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>' + +Define a custom URL builder: + + >>> def my_url_builder(label, base, end): + ... return '/bar/' + >>> md = markdown.Markdown(extensions=['wikilinks'], + ... extension_configs={'wikilinks' : [('build_url', my_url_builder)]}) + >>> md.convert('[[foo]]') + u'<p><a class="wikilink" href="/bar/">foo</a></p>' + +From the command line: + + python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt + +By [Waylan Limberg](http://achinghead.com/). + +License: [BSD](http://www.opensource.org/licenses/bsd-license.php) + +Dependencies: +* [Python 2.3+](http://python.org) +* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/) +''' + +import calibre.ebooks.markdown.markdown as markdown +import re + +def build_url(label, base, end): + """ Build a url from the label, a base, and an end. """ + clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label) + return '%s%s%s'% (base, clean_label, end) + + +class WikiLinkExtension(markdown.Extension): + def __init__(self, configs): + # set extension defaults + self.config = { + 'base_url' : ['/', 'String to append to beginning or URL.'], + 'end_url' : ['/', 'String to append to end of URL.'], + 'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'], + 'build_url' : [build_url, 'Callable formats URL from label.'], + } + + # Override defaults with user settings + for key, value in configs : + self.setConfig(key, value) + + def extendMarkdown(self, md, md_globals): + self.md = md + + # append to end of inline patterns + WIKILINK_RE = r'\[\[([A-Za-z0-9_ -]+)\]\]' + wikilinkPattern = WikiLinks(WIKILINK_RE, self.config) + wikilinkPattern.md = md + md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong") + + +class WikiLinks(markdown.inlinepatterns.Pattern): + def __init__(self, pattern, config): + markdown.inlinepatterns.Pattern.__init__(self, pattern) + self.config = config + + def handleMatch(self, m): + if m.group(2).strip(): + base_url, end_url, html_class = self._getMeta() + label = m.group(2).strip() + url = self.config['build_url'][0](label, base_url, end_url) + a = markdown.etree.Element('a') + a.text = label + a.set('href', url) + if html_class: + a.set('class', html_class) + else: + a = '' + return a + + def _getMeta(self): + """ Return meta data or config data. """ + base_url = self.config['base_url'][0] + end_url = self.config['end_url'][0] + html_class = self.config['html_class'][0] + if hasattr(self.md, 'Meta'): + if self.md.Meta.has_key('wiki_base_url'): + base_url = self.md.Meta['wiki_base_url'][0] + if self.md.Meta.has_key('wiki_end_url'): + end_url = self.md.Meta['wiki_end_url'][0] + if self.md.Meta.has_key('wiki_html_class'): + html_class = self.md.Meta['wiki_html_class'][0] + return base_url, end_url, html_class + + +def makeExtension(configs=None) : + return WikiLinkExtension(configs=configs) + + +if __name__ == "__main__": + import doctest + doctest.testmod() + diff --git a/src/calibre/ebooks/markdown/html4.py b/src/calibre/ebooks/markdown/html4.py new file mode 100644 index 0000000000..08f241d57a --- /dev/null +++ b/src/calibre/ebooks/markdown/html4.py @@ -0,0 +1,274 @@ +# markdown/html4.py +# +# Add html4 serialization to older versions of Elementree +# Taken from ElementTree 1.3 preview with slight modifications +# +# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2007 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + + +import markdown +ElementTree = markdown.etree.ElementTree +QName = markdown.etree.QName +Comment = markdown.etree.Comment +PI = markdown.etree.PI +ProcessingInstruction = markdown.etree.ProcessingInstruction + +HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", + "img", "input", "isindex", "link", "meta" "param") + +try: + HTML_EMPTY = set(HTML_EMPTY) +except NameError: + pass + +_namespace_map = { + # "well-known" namespace prefixes + "http://www.w3.org/XML/1998/namespace": "xml", + "http://www.w3.org/1999/xhtml": "html", + "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", + "http://schemas.xmlsoap.org/wsdl/": "wsdl", + # xml schema + "http://www.w3.org/2001/XMLSchema": "xs", + "http://www.w3.org/2001/XMLSchema-instance": "xsi", + # dublic core + "http://purl.org/dc/elements/1.1/": "dc", +} + + +def _raise_serialization_error(text): + raise TypeError( + "cannot serialize %r (type %s)" % (text, type(text).__name__) + ) + +def _encode(text, encoding): + try: + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_cdata(text, encoding): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _escape_attrib(text, encoding): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + if "\n" in text: + text = text.replace("\n", " ") + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib_html(text, encoding): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _serialize_html(write, elem, encoding, qnames, namespaces): + tag = elem.tag + text = elem.text + if tag is Comment: + write("<!--%s-->" % _escape_cdata(text, encoding)) + elif tag is ProcessingInstruction: + write("<?%s?>" % _escape_cdata(text, encoding)) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text, encoding)) + for e in elem: + _serialize_html(write, e, encoding, qnames, None) + else: + write("<" + tag) + items = elem.items() + if items or namespaces: + items.sort() # lexical order + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib_html(v, encoding) + # FIXME: handle boolean attributes + write(" %s=\"%s\"" % (qnames[k], v)) + if namespaces: + items = namespaces.items() + items.sort(key=lambda x: x[1]) # sort on prefix + for v, k in items: + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k.encode(encoding), + _escape_attrib(v, encoding) + )) + write(">") + tag = tag.lower() + if text: + if tag == "script" or tag == "style": + write(_encode(text, encoding)) + else: + write(_escape_cdata(text, encoding)) + for e in elem: + _serialize_html(write, e, encoding, qnames, None) + if tag not in HTML_EMPTY: + write("</" + tag + ">") + if elem.tail: + write(_escape_cdata(elem.tail, encoding)) + +def write_html(root, f, + # keyword arguments + encoding="us-ascii", + default_namespace=None): + assert root is not None + if not hasattr(f, "write"): + f = open(f, "wb") + write = f.write + if not encoding: + encoding = "us-ascii" + qnames, namespaces = _namespaces( + root, encoding, default_namespace + ) + _serialize_html( + write, root, encoding, qnames, namespaces + ) + +# -------------------------------------------------------------------- +# serialization support + +def _namespaces(elem, encoding, default_namespace=None): + # identify namespaces used in this tree + + # maps qnames to *encoded* prefix:local names + qnames = {None: None} + + # maps uri:s to prefixes + namespaces = {} + if default_namespace: + namespaces[default_namespace] = "" + + def encode(text): + return text.encode(encoding) + + def add_qname(qname): + # calculate serialized qname representation + try: + if qname[:1] == "{": + uri, tag = qname[1:].split("}", 1) + prefix = namespaces.get(uri) + if prefix is None: + prefix = _namespace_map.get(uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + if prefix != "xml": + namespaces[uri] = prefix + if prefix: + qnames[qname] = encode("%s:%s" % (prefix, tag)) + else: + qnames[qname] = encode(tag) # default element + else: + if default_namespace: + # FIXME: can this be handled in XML 1.0? + raise ValueError( + "cannot use non-qualified names with " + "default_namespace option" + ) + qnames[qname] = encode(qname) + except TypeError: + _raise_serialization_error(qname) + + # populate qname and namespaces table + try: + iterate = elem.iter + except AttributeError: + iterate = elem.getiterator # cET compatibility + for elem in iterate(): + tag = elem.tag + if isinstance(tag, QName) and tag.text not in qnames: + add_qname(tag.text) + elif isinstance(tag, basestring): + if tag not in qnames: + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) + for key, value in elem.items(): + if isinstance(key, QName): + key = key.text + if key not in qnames: + add_qname(key) + if isinstance(value, QName) and value.text not in qnames: + add_qname(value.text) + text = elem.text + if isinstance(text, QName) and text.text not in qnames: + add_qname(text.text) + return qnames, namespaces + +def to_html_string(element, encoding=None): + class dummy: + pass + data = [] + file = dummy() + file.write = data.append + write_html(ElementTree(element).getroot(),file,encoding) + return "".join(data) diff --git a/src/calibre/ebooks/markdown/inlinepatterns.py b/src/calibre/ebooks/markdown/inlinepatterns.py new file mode 100644 index 0000000000..89fa3b2ef4 --- /dev/null +++ b/src/calibre/ebooks/markdown/inlinepatterns.py @@ -0,0 +1,371 @@ +""" +INLINE PATTERNS +============================================================================= + +Inline patterns such as *emphasis* are handled by means of auxiliary +objects, one per pattern. Pattern objects must be instances of classes +that extend markdown.Pattern. Each pattern object uses a single regular +expression and needs support the following methods: + + pattern.getCompiledRegExp() # returns a regular expression + + pattern.handleMatch(m) # takes a match object and returns + # an ElementTree element or just plain text + +All of python markdown's built-in patterns subclass from Pattern, +but you can add additional patterns that don't. + +Also note that all the regular expressions used by inline must +capture the whole block. For this reason, they all start with +'^(.*)' and end with '(.*)!'. In case with built-in expression +Pattern takes care of adding the "^(.*)" and "(.*)!". + +Finally, the order in which regular expressions are applied is very +important - e.g. if we first replace http://.../ links with <a> tags +and _then_ try to replace inline html, we would end up with a mess. +So, we apply the expressions in the following order: + +* escape and backticks have to go before everything else, so + that we can preempt any markdown patterns by escaping them. + +* then we handle auto-links (must be done before inline html) + +* then we handle inline HTML. At this point we will simply + replace all inline HTML strings with a placeholder and add + the actual HTML to a hash. + +* then inline images (must be done before links) + +* then bracketed links, first regular then reference-style + +* finally we apply strong and emphasis +""" + +import markdown +import re +from urlparse import urlparse, urlunparse +import sys +if sys.version >= "3.0": + from html import entities as htmlentitydefs +else: + import htmlentitydefs + +""" +The actual regular expressions for patterns +----------------------------------------------------------------------------- +""" + +NOBRACKET = r'[^\]\[]*' +BRK = ( r'\[(' + + (NOBRACKET + r'(\[')*6 + + (NOBRACKET+ r'\])*')*6 + + NOBRACKET + r')\]' ) +NOIMG = r'(?<!\!)' + +BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")`` +ESCAPE_RE = r'\\(.)' # \< +EMPHASIS_RE = r'(\*)([^\*]*)\2' # *emphasis* +STRONG_RE = r'(\*{2}|_{2})(.*?)\2' # **strong** +STRONG_EM_RE = r'(\*{3}|_{3})(.*?)\2' # ***strong*** + +if markdown.SMART_EMPHASIS: + EMPHASIS_2_RE = r'(?<!\S)(_)(\S.*?)\2' # _emphasis_ +else: + EMPHASIS_2_RE = r'(_)(.*?)\2' # _emphasis_ + +LINK_RE = NOIMG + BRK + \ +r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*)\12)?\)''' +# [text](url) or [text](<url>) + +IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)' +# ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>) +REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3] +IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2] +NOT_STRONG_RE = r'( \* )' # stand-alone * or _ +AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com> +AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com> + +HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...> +ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # & +LINE_BREAK_RE = r' \n' # two spaces at end of line +LINE_BREAK_2_RE = r' $' # two spaces at end of text + + +def dequote(string): + """Remove quotes from around a string.""" + if ( ( string.startswith('"') and string.endswith('"')) + or (string.startswith("'") and string.endswith("'")) ): + return string[1:-1] + else: + return string + +ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123} + +def handleAttributes(text, parent): + """Set values of an element based on attribute definitions ({@id=123}).""" + def attributeCallback(match): + parent.set(match.group(1), match.group(2).replace('\n', ' ')) + return ATTR_RE.sub(attributeCallback, text) + + +""" +The pattern classes +----------------------------------------------------------------------------- +""" + +class Pattern: + """Base class that inline patterns subclass. """ + + def __init__ (self, pattern, markdown_instance=None): + """ + Create an instant of an inline pattern. + + Keyword arguments: + + * pattern: A regular expression that matches a pattern + + """ + self.pattern = pattern + self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL) + + # Api for Markdown to pass safe_mode into instance + self.safe_mode = False + if markdown_instance: + self.markdown = markdown_instance + + def getCompiledRegExp (self): + """ Return a compiled regular expression. """ + return self.compiled_re + + def handleMatch(self, m): + """Return a ElementTree element from the given match. + + Subclasses should override this method. + + Keyword arguments: + + * m: A re match object containing a match of the pattern. + + """ + pass + + def type(self): + """ Return class name, to define pattern type """ + return self.__class__.__name__ + +BasePattern = Pattern # for backward compatibility + +class SimpleTextPattern (Pattern): + """ Return a simple text of group(2) of a Pattern. """ + def handleMatch(self, m): + text = m.group(2) + if text == markdown.INLINE_PLACEHOLDER_PREFIX: + return None + return text + +class SimpleTagPattern (Pattern): + """ + Return element of type `tag` with a text attribute of group(3) + of a Pattern. + + """ + def __init__ (self, pattern, tag): + Pattern.__init__(self, pattern) + self.tag = tag + + def handleMatch(self, m): + el = markdown.etree.Element(self.tag) + el.text = m.group(3) + return el + + +class SubstituteTagPattern (SimpleTagPattern): + """ Return a eLement of type `tag` with no children. """ + def handleMatch (self, m): + return markdown.etree.Element(self.tag) + + +class BacktickPattern (Pattern): + """ Return a `<code>` element containing the matching text. """ + def __init__ (self, pattern): + Pattern.__init__(self, pattern) + self.tag = "code" + + def handleMatch(self, m): + el = markdown.etree.Element(self.tag) + el.text = markdown.AtomicString(m.group(3).strip()) + return el + + +class DoubleTagPattern (SimpleTagPattern): + """Return a ElementTree element nested in tag2 nested in tag1. + + Useful for strong emphasis etc. + + """ + def handleMatch(self, m): + tag1, tag2 = self.tag.split(",") + el1 = markdown.etree.Element(tag1) + el2 = markdown.etree.SubElement(el1, tag2) + el2.text = m.group(3) + return el1 + + +class HtmlPattern (Pattern): + """ Store raw inline html and return a placeholder. """ + def handleMatch (self, m): + rawhtml = m.group(2) + inline = True + place_holder = self.markdown.htmlStash.store(rawhtml) + return place_holder + + +class LinkPattern (Pattern): + """ Return a link element from the given match. """ + def handleMatch(self, m): + el = markdown.etree.Element("a") + el.text = m.group(2) + title = m.group(11) + href = m.group(9) + + if href: + if href[0] == "<": + href = href[1:-1] + el.set("href", self.sanitize_url(href.strip())) + else: + el.set("href", "") + + if title: + title = dequote(title) #.replace('"', """) + el.set("title", title) + return el + + def sanitize_url(self, url): + """ + Sanitize a url against xss attacks in "safe_mode". + + Rather than specifically blacklisting `javascript:alert("XSS")` and all + its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known + safe url formats. Most urls contain a network location, however some + are known not to (i.e.: mailto links). Script urls do not contain a + location. Additionally, for `javascript:...`, the scheme would be + "javascript" but some aliases will appear to `urlparse()` to have no + scheme. On top of that relative links (i.e.: "foo/bar.html") have no + scheme. Therefore we must check "path", "parameters", "query" and + "fragment" for any literal colons. We don't check "scheme" for colons + because it *should* never have any and "netloc" must allow the form: + `username:password@host:port`. + + """ + locless_schemes = ['', 'mailto', 'news'] + scheme, netloc, path, params, query, fragment = url = urlparse(url) + safe_url = False + if netloc != '' or scheme in locless_schemes: + safe_url = True + + for part in url[2:]: + if ":" in part: + safe_url = False + + if self.markdown.safeMode and not safe_url: + return '' + else: + return urlunparse(url) + +class ImagePattern(LinkPattern): + """ Return a img element from the given match. """ + def handleMatch(self, m): + el = markdown.etree.Element("img") + src_parts = m.group(9).split() + if src_parts: + src = src_parts[0] + if src[0] == "<" and src[-1] == ">": + src = src[1:-1] + el.set('src', self.sanitize_url(src)) + else: + el.set('src', "") + if len(src_parts) > 1: + el.set('title', dequote(" ".join(src_parts[1:]))) + + if markdown.ENABLE_ATTRIBUTES: + truealt = handleAttributes(m.group(2), el) + else: + truealt = m.group(2) + + el.set('alt', truealt) + return el + +class ReferencePattern(LinkPattern): + """ Match to a stored reference and return link element. """ + def handleMatch(self, m): + if m.group(9): + id = m.group(9).lower() + else: + # if we got something like "[Google][]" + # we'll use "google" as the id + id = m.group(2).lower() + + if not id in self.markdown.references: # ignore undefined refs + return None + href, title = self.markdown.references[id] + + text = m.group(2) + return self.makeTag(href, title, text) + + def makeTag(self, href, title, text): + el = markdown.etree.Element('a') + + el.set('href', self.sanitize_url(href)) + if title: + el.set('title', title) + + el.text = text + return el + + +class ImageReferencePattern (ReferencePattern): + """ Match to a stored reference and return img element. """ + def makeTag(self, href, title, text): + el = markdown.etree.Element("img") + el.set("src", self.sanitize_url(href)) + if title: + el.set("title", title) + el.set("alt", text) + return el + + +class AutolinkPattern (Pattern): + """ Return a link Element given an autolink (`<http://example/com>`). """ + def handleMatch(self, m): + el = markdown.etree.Element("a") + el.set('href', m.group(2)) + el.text = markdown.AtomicString(m.group(2)) + return el + +class AutomailPattern (Pattern): + """ + Return a mailto link Element given an automail link (`<foo@example.com>`). + """ + def handleMatch(self, m): + el = markdown.etree.Element('a') + email = m.group(2) + if email.startswith("mailto:"): + email = email[len("mailto:"):] + + def codepoint2name(code): + """Return entity definition by code, or the code if not defined.""" + entity = htmlentitydefs.codepoint2name.get(code) + if entity: + return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity) + else: + return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code) + + letters = [codepoint2name(ord(letter)) for letter in email] + el.text = markdown.AtomicString(''.join(letters)) + + mailto = "mailto:" + email + mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' % + ord(letter) for letter in mailto]) + el.set('href', mailto) + return el + diff --git a/src/calibre/ebooks/markdown/markdown.py b/src/calibre/ebooks/markdown/markdown.py index 677047878a..8d7e83aca3 100644 --- a/src/calibre/ebooks/markdown/markdown.py +++ b/src/calibre/ebooks/markdown/markdown.py @@ -1,1840 +1,607 @@ -#!/usr/bin/env python - -version = "1.7" -version_info = (1,7,0,"rc-2") -__revision__ = "$Rev: 72 $" - """ -Python-Markdown +Python Markdown =============== -Converts Markdown to HTML. Basic usage as a module: +Python Markdown converts Markdown to HTML and can be used as a library or +called from the command line. + +## Basic usage as a module: import markdown md = Markdown() html = md.convert(your_text_string) -See http://www.freewisdom.org/projects/python-markdown/ for more -information and instructions on how to extend the functionality of the -script. (You might want to read that before you try modifying this -file.) +## Basic use from the command line: + + python markdown.py source.txt > destination.html + +Run "python markdown.py --help" to see more options. + +## Extensions + +See <http://www.freewisdom.org/projects/python-markdown/> for more +information and instructions on how to extend the functionality of +Python Markdown. Read that before you try modifying this file. + +## Authors and License Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and -maintained by [Yuri Takhteyev](http://www.freewisdom.org) and [Waylan -Limberg](http://achinghead.com/). +maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan +Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com). -Contact: yuri [at] freewisdom.org - waylan [at] gmail.com +Contact: markdown@freewisdom.org + +Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later) +Copyright 200? Django Software Foundation (OrderedDict implementation) +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +Copyright 2004 Manfred Stienstra (the original version) + +License: BSD (see docs/LICENSE for details). +""" +from calibre.ebooks.markdown.commandline import parse_options + +version = "2.0" +version_info = (2,0,0, "Final") + +import re +import codecs +import sys +import warnings +import logging +from logging import DEBUG, INFO, WARN, ERROR, CRITICAL -License: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD +""" +CONSTANTS +============================================================================= """ +""" +Constants you might want to modify +----------------------------------------------------------------------------- +""" -import re, sys, codecs - -from logging import getLogger, StreamHandler, Formatter, \ - DEBUG, INFO, WARN, CRITICAL - - -MESSAGE_THRESHOLD = CRITICAL - - -# Configure debug message logger (the hard way - to support python 2.3) -logger = getLogger('MARKDOWN') -logger.setLevel(DEBUG) # This is restricted by handlers later -console_hndlr = StreamHandler() -formatter = Formatter('%(name)s-%(levelname)s: "%(message)s"') -console_hndlr.setFormatter(formatter) -console_hndlr.setLevel(MESSAGE_THRESHOLD) -logger.addHandler(console_hndlr) - - -def message(level, text): - ''' A wrapper method for logging debug messages. ''' - logger.log(level, text) - - -# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY ----------------- - -TAB_LENGTH = 4 # expand tabs to this many spaces -ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz"> -SMART_EMPHASIS = 1 # this_or_that does not become this<i>or</i>that +# default logging level for command-line use +COMMAND_LINE_LOGGING_LEVEL = CRITICAL +TAB_LENGTH = 4 # expand tabs to this many spaces +ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz"> +SMART_EMPHASIS = True # this_or_that does not become this<i>or</i>that +DEFAULT_OUTPUT_FORMAT = 'xhtml1' # xhtml or html4 output HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode +BLOCK_LEVEL_ELEMENTS = re.compile("p|div|h[1-6]|blockquote|pre|table|dl|ol|ul" + "|script|noscript|form|fieldset|iframe|math" + "|ins|del|hr|hr/|style|li|dt|dd|thead|tbody" + "|tr|th|td") +DOC_TAG = "div" # Element used to wrap document - later removed -RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'), - # from Hebrew to Nko (includes Arabic, Syriac and Thaana) - (u'\u2D30', u'\u2D7F'), - # Tifinagh - ) +# Placeholders +STX = u'\u0002' # Use STX ("Start of text") for start-of-placeholder +ETX = u'\u0003' # Use ETX ("End of text") for end-of-placeholder +INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:" +INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX +AMP_SUBSTITUTE = STX+"amp"+ETX -# Unicode Reference Table: -# 0590-05FF - Hebrew -# 0600-06FF - Arabic -# 0700-074F - Syriac -# 0750-077F - Arabic Supplement -# 0780-07BF - Thaana -# 07C0-07FF - Nko -BOMS = { 'utf-8': (codecs.BOM_UTF8, ), - 'utf-16': (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE), - #'utf-32': (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE) - } - -def removeBOM(text, encoding): - convert = isinstance(text, unicode) - for bom in BOMS[encoding]: - bom = convert and bom.decode(encoding) or bom - if text.startswith(bom): - return text.lstrip(bom) - return text - -# The following constant specifies the name used in the usage -# statement displayed for python versions lower than 2.3. (With -# python2.3 and higher the usage statement is generated by optparse -# and uses the actual name of the executable called.) +""" +Constants you probably do not need to change +----------------------------------------------------------------------------- +""" EXECUTABLE_NAME_FOR_USAGE = "python markdown.py" +RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'), + # Hebrew (0590-05FF), Arabic (0600-06FF), + # Syriac (0700-074F), Arabic supplement (0750-077F), + # Thaana (0780-07BF), Nko (07C0-07FF). + (u'\u2D30', u'\u2D7F'), # Tifinagh + ) -# --------------- CONSTANTS YOU _SHOULD NOT_ HAVE TO CHANGE ---------- - -# a template for html placeholders -HTML_PLACEHOLDER_PREFIX = "qaodmasdkwaspemas" -HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%dajkqlsmdqpakldnzsdfls" - -BLOCK_LEVEL_ELEMENTS = ['p', 'div', 'blockquote', 'pre', 'table', - 'dl', 'ol', 'ul', 'script', 'noscript', - 'form', 'fieldset', 'iframe', 'math', 'ins', - 'del', 'hr', 'hr/', 'style'] - -def isBlockLevel (tag): - return ( (tag in BLOCK_LEVEL_ELEMENTS) or - (tag[0] == 'h' and tag[1] in "0123456789") ) """ -====================================================================== -========================== NANODOM =================================== -====================================================================== - -The three classes below implement some of the most basic DOM -methods. I use this instead of minidom because I need a simpler -functionality and do not want to require additional libraries. - -Importantly, NanoDom does not do normalization, which is what we -want. It also adds extra white space when converting DOM to string +AUXILIARY GLOBAL FUNCTIONS +============================================================================= """ -ENTITY_NORMALIZATION_EXPRESSIONS = [ (re.compile("&"), "&"), - (re.compile("<"), "<"), - (re.compile(">"), ">")] - -ENTITY_NORMALIZATION_EXPRESSIONS_SOFT = [ (re.compile("&(?!\#)"), "&"), - (re.compile("<"), "<"), - (re.compile(">"), ">"), - (re.compile("\""), """)] - - -def getBidiType(text): - - if not text: return None - - ch = text[0] - - if not isinstance(ch, unicode) or not ch.isalpha(): - return None +def message(level, text): + """ A wrapper method for logging debug messages. """ + logger = logging.getLogger('MARKDOWN') + if logger.handlers: + # The logger is configured + logger.log(level, text) + if level > WARN: + sys.exit(0) + elif level > WARN: + raise MarkdownException, text else: + warnings.warn(text, MarkdownWarning) - for min, max in RTL_BIDI_RANGES: - if ( ch >= min and ch <= max ): - return "rtl" - else: - return "ltr" +def isBlockLevel(tag): + """Check if the tag is a block level HTML tag.""" + return BLOCK_LEVEL_ELEMENTS.match(tag) -class Document: +""" +MISC AUXILIARY CLASSES +============================================================================= +""" - def __init__ (self): - self.bidi = "ltr" +class AtomicString(unicode): + """A string which should not be further processed.""" + pass - def appendChild(self, child): - self.documentElement = child - child.isDocumentElement = True - child.parent = self - self.entities = {} - def setBidi(self, bidi): - if bidi: - self.bidi = bidi +class MarkdownException(Exception): + """ A Markdown Exception. """ + pass - def createElement(self, tag, textNode=None): - el = Element(tag) - el.doc = self - if textNode: - el.appendChild(self.createTextNode(textNode)) - return el - def createTextNode(self, text): - node = TextNode(text) - node.doc = self - return node - - def createEntityReference(self, entity): - if entity not in self.entities: - self.entities[entity] = EntityReference(entity) - return self.entities[entity] - - def createCDATA(self, text): - node = CDATA(text) - node.doc = self - return node - - def toxml (self): - return self.documentElement.toxml() - - def normalizeEntities(self, text, avoidDoubleNormalizing=False): - - if avoidDoubleNormalizing: - regexps = ENTITY_NORMALIZATION_EXPRESSIONS_SOFT - else: - regexps = ENTITY_NORMALIZATION_EXPRESSIONS - - for regexp, substitution in regexps: - text = regexp.sub(substitution, text) - return text - - def find(self, test): - return self.documentElement.find(test) - - def unlink(self): - self.documentElement.unlink() - self.documentElement = None - - -class CDATA: - - type = "cdata" - - def __init__ (self, text): - self.text = text - - def handleAttributes(self): - pass - - def toxml (self): - return "<![CDATA[" + self.text + "]]>" - -class Element: - - type = "element" - - def __init__ (self, tag): - - self.nodeName = tag - self.attributes = [] - self.attribute_values = {} - self.childNodes = [] - self.bidi = None - self.isDocumentElement = False - - def setBidi(self, bidi): - - if bidi: - - if not self.bidi or self.isDocumentElement: - # Once the bidi is set don't change it (except for doc element) - self.bidi = bidi - self.parent.setBidi(bidi) - - - def unlink(self): - for child in self.childNodes: - if child.type == "element": - child.unlink() - self.childNodes = None - - def setAttribute(self, attr, value): - if not attr in self.attributes: - self.attributes.append(attr) - - self.attribute_values[attr] = value - - def insertChild(self, position, child): - self.childNodes.insert(position, child) - child.parent = self - - def removeChild(self, child): - self.childNodes.remove(child) - - def replaceChild(self, oldChild, newChild): - position = self.childNodes.index(oldChild) - self.removeChild(oldChild) - self.insertChild(position, newChild) - - def appendChild(self, child): - self.childNodes.append(child) - child.parent = self - - def handleAttributes(self): - pass - - def find(self, test, depth=0): - """ Returns a list of descendants that pass the test function """ - matched_nodes = [] - for child in self.childNodes: - if test(child): - matched_nodes.append(child) - if child.type == "element": - matched_nodes += child.find(test, depth+1) - return matched_nodes - - def toxml(self): - if ENABLE_ATTRIBUTES: - for child in self.childNodes: - child.handleAttributes() - - buffer = "" - if self.nodeName in ['h1', 'h2', 'h3', 'h4']: - buffer += "\n" - elif self.nodeName in ['li']: - buffer += "\n " - - # Process children FIRST, then do the attributes - - childBuffer = "" - - if self.childNodes or self.nodeName in ['blockquote']: - childBuffer += ">" - for child in self.childNodes: - childBuffer += child.toxml() - if self.nodeName == 'p': - childBuffer += "\n" - elif self.nodeName == 'li': - childBuffer += "\n " - childBuffer += "</%s>" % self.nodeName - else: - childBuffer += "/>" - - - - buffer += "<" + self.nodeName - - if self.nodeName in ['p', 'li', 'ul', 'ol', - 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']: - - if not self.attribute_values.has_key("dir"): - if self.bidi: - bidi = self.bidi - else: - bidi = self.doc.bidi - - if bidi=="rtl": - self.setAttribute("dir", "rtl") - - for attr in self.attributes: - value = self.attribute_values[attr] - value = self.doc.normalizeEntities(value, - avoidDoubleNormalizing=True) - buffer += ' %s="%s"' % (attr, value) - - - # Now let's actually append the children - - buffer += childBuffer - - if self.nodeName in ['p', 'br ', 'li', 'ul', 'ol', - 'h1', 'h2', 'h3', 'h4'] : - buffer += "\n" - - return buffer - - -class TextNode: - - type = "text" - attrRegExp = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123} - - def __init__ (self, text): - self.value = text - - def attributeCallback(self, match): - - self.parent.setAttribute(match.group(1), match.group(2)) - - def handleAttributes(self): - self.value = self.attrRegExp.sub(self.attributeCallback, self.value) - - def toxml(self): - - text = self.value - - self.parent.setBidi(getBidiType(text)) - - if not text.startswith(HTML_PLACEHOLDER_PREFIX): - if self.parent.nodeName == "p": - text = text.replace("\n", "\n ") - elif (self.parent.nodeName == "li" - and self.parent.childNodes[0]==self): - text = "\n " + text.replace("\n", "\n ") - text = self.doc.normalizeEntities(text) - return text - - -class EntityReference: - - type = "entity_ref" - - def __init__(self, entity): - self.entity = entity - - def handleAttributes(self): - pass - - def toxml(self): - return "&" + self.entity + ";" +class MarkdownWarning(Warning): + """ A Markdown Warning. """ + pass """ -====================================================================== -========================== PRE-PROCESSORS ============================ -====================================================================== +OVERALL DESIGN +============================================================================= -Preprocessors munge source text before we start doing anything too -complicated. +Markdown processing takes place in four steps: -There are two types of preprocessors: TextPreprocessor and Preprocessor. +1. A bunch of "preprocessors" munge the input text. +2. BlockParser() parses the high-level structural elements of the + pre-processed text into an ElementTree. +3. A bunch of "treeprocessors" are run against the ElementTree. One such + treeprocessor runs InlinePatterns against the ElementTree, detecting inline + markup. +4. Some post-processors are run against the text after the ElementTree has + been serialized into text. +5. The output is written to a string. + +Those steps are put together by the Markdown() class. """ - -class TextPreprocessor: - ''' - TextPreprocessors are run before the text is broken into lines. - - Each TextPreprocessor implements a "run" method that takes a pointer to a - text string of the document, modifies it as necessary and returns - either the same pointer or a pointer to a new string. - - TextPreprocessors must extend markdown.TextPreprocessor. - ''' - - def run(self, text): - pass - - -class Preprocessor: - ''' - Preprocessors are run after the text is broken into lines. - - Each preprocessor implements a "run" method that takes a pointer to a - list of lines of the document, modifies it as necessary and returns - either the same pointer or a pointer to a new list. - - Preprocessors must extend markdown.Preprocessor. - ''' - - def run(self, lines): - pass - - -class HtmlBlockPreprocessor(TextPreprocessor): - """Removes html blocks from the source text and stores it.""" - - def _get_left_tag(self, block): - return block[1:].replace(">", " ", 1).split()[0].lower() - - - def _get_right_tag(self, left_tag, block): - return block.rstrip()[-len(left_tag)-2:-1].lower() - - def _equal_tags(self, left_tag, right_tag): - - if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc. - return True - if ("/" + left_tag) == right_tag: - return True - if (right_tag == "--" and left_tag == "--"): - return True - elif left_tag == right_tag[1:] \ - and right_tag[0] != "<": - return True - else: - return False - - def _is_oneliner(self, tag): - return (tag in ['hr', 'hr/']) - - - def run(self, text): - - new_blocks = [] - text = text.split("\n\n") - - items = [] - left_tag = '' - right_tag = '' - in_tag = False # flag - - for block in text: - if block.startswith("\n"): - block = block[1:] - - if not in_tag: - - if block.startswith("<"): - - left_tag = self._get_left_tag(block) - right_tag = self._get_right_tag(left_tag, block) - - if not (isBlockLevel(left_tag) \ - or block[1] in ["!", "?", "@", "%"]): - new_blocks.append(block) - continue - - if self._is_oneliner(left_tag): - new_blocks.append(block.strip()) - continue - - if block[1] == "!": - # is a comment block - left_tag = "--" - right_tag = self._get_right_tag(left_tag, block) - # keep checking conditions below and maybe just append - - if block.rstrip().endswith(">") \ - and self._equal_tags(left_tag, right_tag): - new_blocks.append( - self.stash.store(block.strip())) - continue - else: #if not block[1] == "!": - # if is block level tag and is not complete - items.append(block.strip()) - in_tag = True - continue - - new_blocks.append(block) - - else: - items.append(block.strip()) - - right_tag = self._get_right_tag(left_tag, block) - - if self._equal_tags(left_tag, right_tag): - # if find closing tag - in_tag = False - new_blocks.append( - self.stash.store('\n\n'.join(items))) - items = [] - - if items: - new_blocks.append(self.stash.store('\n\n'.join(items))) - new_blocks.append('\n') - - return "\n\n".join(new_blocks) - -HTML_BLOCK_PREPROCESSOR = HtmlBlockPreprocessor() - - -class HeaderPreprocessor(Preprocessor): - - """ - Replaces underlined headers with hashed headers to avoid - the nead for lookahead later. - """ - - def run (self, lines): - - i = -1 - while i+1 < len(lines): - i = i+1 - if not lines[i].strip(): - continue - - if lines[i].startswith("#"): - lines.insert(i+1, "\n") - - if (i+1 <= len(lines) - and lines[i+1] - and lines[i+1][0] in ['-', '=']): - - underline = lines[i+1].strip() - - if underline == "="*len(underline): - lines[i] = "# " + lines[i].strip() - lines[i+1] = "" - elif underline == "-"*len(underline): - lines[i] = "## " + lines[i].strip() - lines[i+1] = "" - - return lines - -HEADER_PREPROCESSOR = HeaderPreprocessor() - - -class LinePreprocessor(Preprocessor): - """Deals with HR lines (needs to be done before processing lists)""" - - blockquote_re = re.compile(r'^(> )+') - - def run (self, lines): - for i in range(len(lines)): - prefix = '' - m = self.blockquote_re.search(lines[i]) - if m : prefix = m.group(0) - if self._isLine(lines[i][len(prefix):]): - lines[i] = prefix + self.stash.store("<hr />", safe=True) - return lines - - def _isLine(self, block): - """Determines if a block should be replaced with an <HR>""" - if block.startswith(" "): return 0 # a code block - text = "".join([x for x in block if not x.isspace()]) - if len(text) <= 2: - return 0 - for pattern in ['isline1', 'isline2', 'isline3']: - m = RE.regExp[pattern].match(text) - if (m and m.group(1)): - return 1 - else: - return 0 - -LINE_PREPROCESSOR = LinePreprocessor() - - -class ReferencePreprocessor(Preprocessor): - ''' - Removes reference definitions from the text and stores them for later use. - ''' - - def run (self, lines): - - new_text = []; - for line in lines: - m = RE.regExp['reference-def'].match(line) - if m: - id = m.group(2).strip().lower() - t = m.group(4).strip() # potential title - if not t: - self.references[id] = (m.group(3), t) - elif (len(t) >= 2 - and (t[0] == t[-1] == "\"" - or t[0] == t[-1] == "\'" - or (t[0] == "(" and t[-1] == ")") ) ): - self.references[id] = (m.group(3), t[1:-1]) - else: - new_text.append(line) - else: - new_text.append(line) - - return new_text #+ "\n" - -REFERENCE_PREPROCESSOR = ReferencePreprocessor() - -""" -====================================================================== -========================== INLINE PATTERNS =========================== -====================================================================== - -Inline patterns such as *emphasis* are handled by means of auxiliary -objects, one per pattern. Pattern objects must be instances of classes -that extend markdown.Pattern. Each pattern object uses a single regular -expression and needs support the following methods: - - pattern.getCompiledRegExp() - returns a regular expression - - pattern.handleMatch(m, doc) - takes a match object and returns - a NanoDom node (as a part of the provided - doc) or None - -All of python markdown's built-in patterns subclass from Patter, -but you can add additional patterns that don't. - -Also note that all the regular expressions used by inline must -capture the whole block. For this reason, they all start with -'^(.*)' and end with '(.*)!'. In case with built-in expression -Pattern takes care of adding the "^(.*)" and "(.*)!". - -Finally, the order in which regular expressions are applied is very -important - e.g. if we first replace http://.../ links with <a> tags -and _then_ try to replace inline html, we would end up with a mess. -So, we apply the expressions in the following order: - - * escape and backticks have to go before everything else, so - that we can preempt any markdown patterns by escaping them. - - * then we handle auto-links (must be done before inline html) - - * then we handle inline HTML. At this point we will simply - replace all inline HTML strings with a placeholder and add - the actual HTML to a hash. - - * then inline images (must be done before links) - - * then bracketed links, first regular then reference-style - - * finally we apply strong and emphasis -""" - -NOBRACKET = r'[^\]\[]*' -BRK = ( r'\[(' - + (NOBRACKET + r'(\[')*6 - + (NOBRACKET+ r'\])*')*6 - + NOBRACKET + r')\]' ) -NOIMG = r'(?<!\!)' - -BACKTICK_RE = r'\`([^\`]*)\`' # `e= m*c^2` -DOUBLE_BACKTICK_RE = r'\`\`(.*)\`\`' # ``e=f("`")`` -ESCAPE_RE = r'\\(.)' # \< -EMPHASIS_RE = r'\*([^\*]*)\*' # *emphasis* -STRONG_RE = r'\*\*(.*)\*\*' # **strong** -STRONG_EM_RE = r'\*\*\*([^_]*)\*\*\*' # ***strong*** - -if SMART_EMPHASIS: - EMPHASIS_2_RE = r'(?<!\S)_(\S[^_]*)_' # _emphasis_ -else: - EMPHASIS_2_RE = r'_([^_]*)_' # _emphasis_ - -STRONG_2_RE = r'__([^_]*)__' # __strong__ -STRONG_EM_2_RE = r'___([^_]*)___' # ___strong___ - -LINK_RE = NOIMG + BRK + r'\s*\(([^\)]*)\)' # [text](url) -LINK_ANGLED_RE = NOIMG + BRK + r'\s*\(<([^\)]*)>\)' # [text](<url>) -IMAGE_LINK_RE = r'\!' + BRK + r'\s*\(([^\)]*)\)' # ![alttxt](http://x.com/) -REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3] -IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2] -NOT_STRONG_RE = r'( \* )' # stand-alone * or _ -AUTOLINK_RE = r'<(http://[^>]*)>' # <http://www.123.com> -AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com> -#HTML_RE = r'(\<[^\>]*\>)' # <...> -HTML_RE = r'(\<[a-zA-Z/][^\>]*\>)' # <...> -ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # & -LINE_BREAK_RE = r' \n' # two spaces at end of line -LINE_BREAK_2_RE = r' $' # two spaces at end of text - -class Pattern: - - def __init__ (self, pattern): - self.pattern = pattern - self.compiled_re = re.compile("^(.*)%s(.*)$" % pattern, re.DOTALL) - - def getCompiledRegExp (self): - return self.compiled_re - -BasePattern = Pattern # for backward compatibility - -class SimpleTextPattern (Pattern): - - def handleMatch(self, m, doc): - return doc.createTextNode(m.group(2)) - -class SimpleTagPattern (Pattern): - - def __init__ (self, pattern, tag): - Pattern.__init__(self, pattern) - self.tag = tag - - def handleMatch(self, m, doc): - el = doc.createElement(self.tag) - el.appendChild(doc.createTextNode(m.group(2))) - return el - -class SubstituteTagPattern (SimpleTagPattern): - - def handleMatch (self, m, doc): - return doc.createElement(self.tag) - -class BacktickPattern (Pattern): - - def __init__ (self, pattern): - Pattern.__init__(self, pattern) - self.tag = "code" - - def handleMatch(self, m, doc): - el = doc.createElement(self.tag) - text = m.group(2).strip() - #text = text.replace("&", "&") - el.appendChild(doc.createTextNode(text)) - return el - - -class DoubleTagPattern (SimpleTagPattern): - - def handleMatch(self, m, doc): - tag1, tag2 = self.tag.split(",") - el1 = doc.createElement(tag1) - el2 = doc.createElement(tag2) - el1.appendChild(el2) - el2.appendChild(doc.createTextNode(m.group(2))) - return el1 - - -class HtmlPattern (Pattern): - - def handleMatch (self, m, doc): - rawhtml = m.group(2) - place_holder = self.stash.store(rawhtml) - return doc.createTextNode(place_holder) - - -class LinkPattern (Pattern): - - def handleMatch(self, m, doc): - el = doc.createElement('a') - el.appendChild(doc.createTextNode(m.group(2))) - parts = m.group(9).split('"') - # We should now have [], [href], or [href, title] - if parts: - el.setAttribute('href', parts[0].strip()) - else: - el.setAttribute('href', "") - if len(parts) > 1: - # we also got a title - title = '"' + '"'.join(parts[1:]).strip() - title = dequote(title) #.replace('"', """) - el.setAttribute('title', title) - return el - - -class ImagePattern (Pattern): - - def handleMatch(self, m, doc): - el = doc.createElement('img') - src_parts = m.group(9).split() - if src_parts: - el.setAttribute('src', src_parts[0]) - else: - el.setAttribute('src', "") - if len(src_parts) > 1: - el.setAttribute('title', dequote(" ".join(src_parts[1:]))) - if ENABLE_ATTRIBUTES: - text = doc.createTextNode(m.group(2)) - el.appendChild(text) - text.handleAttributes() - truealt = text.value - el.childNodes.remove(text) - else: - truealt = m.group(2) - el.setAttribute('alt', truealt) - return el - -class ReferencePattern (Pattern): - - def handleMatch(self, m, doc): - - if m.group(9): - id = m.group(9).lower() - else: - # if we got something like "[Google][]" - # we'll use "google" as the id - id = m.group(2).lower() - - if not self.references.has_key(id): # ignore undefined refs - return None - href, title = self.references[id] - text = m.group(2) - return self.makeTag(href, title, text, doc) - - def makeTag(self, href, title, text, doc): - el = doc.createElement('a') - el.setAttribute('href', href) - if title: - el.setAttribute('title', title) - el.appendChild(doc.createTextNode(text)) - return el - - -class ImageReferencePattern (ReferencePattern): - - def makeTag(self, href, title, text, doc): - el = doc.createElement('img') - el.setAttribute('src', href) - if title: - el.setAttribute('title', title) - el.setAttribute('alt', text) - return el - - -class AutolinkPattern (Pattern): - - def handleMatch(self, m, doc): - el = doc.createElement('a') - el.setAttribute('href', m.group(2)) - el.appendChild(doc.createTextNode(m.group(2))) - return el - -class AutomailPattern (Pattern): - - def handleMatch(self, m, doc): - el = doc.createElement('a') - email = m.group(2) - if email.startswith("mailto:"): - email = email[len("mailto:"):] - for letter in email: - entity = doc.createEntityReference("#%d" % ord(letter)) - el.appendChild(entity) - mailto = "mailto:" + email - mailto = "".join(['&#%d;' % ord(letter) for letter in mailto]) - el.setAttribute('href', mailto) - return el - -ESCAPE_PATTERN = SimpleTextPattern(ESCAPE_RE) -NOT_STRONG_PATTERN = SimpleTextPattern(NOT_STRONG_RE) - -BACKTICK_PATTERN = BacktickPattern(BACKTICK_RE) -DOUBLE_BACKTICK_PATTERN = BacktickPattern(DOUBLE_BACKTICK_RE) -STRONG_PATTERN = SimpleTagPattern(STRONG_RE, 'strong') -STRONG_PATTERN_2 = SimpleTagPattern(STRONG_2_RE, 'strong') -EMPHASIS_PATTERN = SimpleTagPattern(EMPHASIS_RE, 'em') -EMPHASIS_PATTERN_2 = SimpleTagPattern(EMPHASIS_2_RE, 'em') - -STRONG_EM_PATTERN = DoubleTagPattern(STRONG_EM_RE, 'strong,em') -STRONG_EM_PATTERN_2 = DoubleTagPattern(STRONG_EM_2_RE, 'strong,em') - -LINE_BREAK_PATTERN = SubstituteTagPattern(LINE_BREAK_RE, 'br ') -LINE_BREAK_PATTERN_2 = SubstituteTagPattern(LINE_BREAK_2_RE, 'br ') - -LINK_PATTERN = LinkPattern(LINK_RE) -LINK_ANGLED_PATTERN = LinkPattern(LINK_ANGLED_RE) -IMAGE_LINK_PATTERN = ImagePattern(IMAGE_LINK_RE) -IMAGE_REFERENCE_PATTERN = ImageReferencePattern(IMAGE_REFERENCE_RE) -REFERENCE_PATTERN = ReferencePattern(REFERENCE_RE) - -HTML_PATTERN = HtmlPattern(HTML_RE) -ENTITY_PATTERN = HtmlPattern(ENTITY_RE) - -AUTOLINK_PATTERN = AutolinkPattern(AUTOLINK_RE) -AUTOMAIL_PATTERN = AutomailPattern(AUTOMAIL_RE) - - -""" -====================================================================== -========================== POST-PROCESSORS =========================== -====================================================================== - -Markdown also allows post-processors, which are similar to -preprocessors in that they need to implement a "run" method. However, -they are run after core processing. - -There are two types of post-processors: Postprocessor and TextPostprocessor -""" - - -class Postprocessor: - ''' - Postprocessors are run before the dom it converted back into text. - - Each Postprocessor implements a "run" method that takes a pointer to a - NanoDom document, modifies it as necessary and returns a NanoDom - document. - - Postprocessors must extend markdown.Postprocessor. - - There are currently no standard post-processors, but the footnote - extension uses one. - ''' - - def run(self, dom): - pass - - - -class TextPostprocessor: - ''' - TextPostprocessors are run after the dom it converted back into text. - - Each TextPostprocessor implements a "run" method that takes a pointer to a - text string, modifies it as necessary and returns a text string. - - TextPostprocessors must extend markdown.TextPostprocessor. - ''' - - def run(self, text): - pass - - -class RawHtmlTextPostprocessor(TextPostprocessor): - - def __init__(self): - pass - - def run(self, text): - for i in range(self.stash.html_counter): - html, safe = self.stash.rawHtmlBlocks[i] - if self.safeMode and not safe: - if str(self.safeMode).lower() == 'escape': - html = self.escape(html) - elif str(self.safeMode).lower() == 'remove': - html = '' - else: - html = HTML_REMOVED_TEXT - - text = text.replace("<p>%s\n</p>" % (HTML_PLACEHOLDER % i), - html + "\n") - text = text.replace(HTML_PLACEHOLDER % i, html) - return text - - def escape(self, html): - ''' Basic html escaping ''' - html = html.replace('&', '&') - html = html.replace('<', '<') - html = html.replace('>', '>') - return html.replace('"', '"') - -RAWHTMLTEXTPOSTPROCESSOR = RawHtmlTextPostprocessor() - -""" -====================================================================== -========================== MISC AUXILIARY CLASSES ==================== -====================================================================== -""" - -class HtmlStash: - """This class is used for stashing HTML objects that we extract - in the beginning and replace with place-holders.""" - - def __init__ (self): - self.html_counter = 0 # for counting inline html segments - self.rawHtmlBlocks=[] - - def store(self, html, safe=False): - """Saves an HTML segment for later reinsertion. Returns a - placeholder string that needs to be inserted into the - document. - - @param html: an html segment - @param safe: label an html segment as safe for safemode - @param inline: label a segmant as inline html - @returns : a placeholder string """ - self.rawHtmlBlocks.append((html, safe)) - placeholder = HTML_PLACEHOLDER % self.html_counter - self.html_counter += 1 - return placeholder - - -class BlockGuru: - - def _findHead(self, lines, fn, allowBlank=0): - - """Functional magic to help determine boundaries of indented - blocks. - - @param lines: an array of strings - @param fn: a function that returns a substring of a string - if the string matches the necessary criteria - @param allowBlank: specifies whether it's ok to have blank - lines between matching functions - @returns: a list of post processes items and the unused - remainder of the original list""" - - items = [] - - i = 0 # to keep track of where we are - - for line in lines: - - if not line.strip() and not allowBlank: - return items, lines[i:] - - if not line.strip() and allowBlank: - # If we see a blank line, this _might_ be the end - i += 1 - - # Find the next non-blank line - for j in range(i, len(lines)): - if lines[j].strip(): - next = lines[j] - break - else: - # There is no more text => this is the end - break - - # Check if the next non-blank line is still a part of the list - - part = fn(next) - - if part: - items.append("") - continue - else: - break # found end of the list - - part = fn(line) - - if part: - items.append(part) - i += 1 - continue - else: - return items, lines[i:] - else: - i += 1 - - return items, lines[i:] - - - def detabbed_fn(self, line): - """ An auxiliary method to be passed to _findHead """ - m = RE.regExp['tabbed'].match(line) - if m: - return m.group(4) - else: - return None - - - def detectTabbed(self, lines): - - return self._findHead(lines, self.detabbed_fn, - allowBlank = 1) - - -def print_error(string): - """Print an error string to stderr""" - sys.stderr.write(string +'\n') - - -def dequote(string): - """ Removes quotes from around a string """ - if ( ( string.startswith('"') and string.endswith('"')) - or (string.startswith("'") and string.endswith("'")) ): - return string[1:-1] - else: - return string - -""" -====================================================================== -========================== CORE MARKDOWN ============================= -====================================================================== - -This stuff is ugly, so if you are thinking of extending the syntax, -see first if you can do it via pre-processors, post-processors, -inline patterns or a combination of the three. -""" - -class CorePatterns: - """This class is scheduled for removal as part of a refactoring - effort.""" - - patterns = { - 'header': r'(#*)([^#]*)(#*)', # # A title - 'reference-def': r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)', - # [Google]: http://www.google.com/ - 'containsline': r'([-]*)$|^([=]*)', # -----, =====, etc. - 'ol': r'[ ]{0,3}[\d]*\.\s+(.*)', # 1. text - 'ul': r'[ ]{0,3}[*+-]\s+(.*)', # "* text" - 'isline1': r'(\**)', # *** - 'isline2': r'(\-*)', # --- - 'isline3': r'(\_*)', # ___ - 'tabbed': r'((\t)|( ))(.*)', # an indented line - 'quoted': r'> ?(.*)', # a quoted block ("> ...") - } - - def __init__ (self): - - self.regExp = {} - for key in self.patterns.keys(): - self.regExp[key] = re.compile("^%s$" % self.patterns[key], - re.DOTALL) - - self.regExp['containsline'] = re.compile(r'^([-]*)$|^([=]*)$', re.M) - -RE = CorePatterns() +import preprocessors +import blockprocessors +import treeprocessors +import inlinepatterns +import postprocessors +import blockparser +import etree_loader +import odict + +# Extensions should use "markdown.etree" instead of "etree" (or do `from +# markdown import etree`). Do not import it by yourself. + +etree = etree_loader.importETree() + +# Adds the ability to output html4 +import html4 class Markdown: - """ Markdown formatter class for creating an html document from - Markdown text """ + """Convert Markdown to HTML.""" - - def __init__(self, source=None, # depreciated + def __init__(self, extensions=[], - extension_configs=None, - safe_mode = False): - """Creates a new Markdown instance. + extension_configs={}, + safe_mode = False, + output_format=DEFAULT_OUTPUT_FORMAT): + """ + Creates a new Markdown instance. - @param source: The text in Markdown format. Depreciated! - @param extensions: A list if extensions. - @param extension-configs: Configuration setting for extensions. - @param safe_mode: Disallow raw html. """ + Keyword arguments: - self.source = source - if source is not None: - message(WARN, "The `source` arg of Markdown.__init__() is depreciated and will be removed in the future. Use `instance.convert(source)` instead.") + * extensions: A list of extensions. + If they are of type string, the module mdx_name.py will be loaded. + If they are a subclass of markdown.Extension, they will be used + as-is. + * extension-configs: Configuration setting for extensions. + * safe_mode: Disallow raw html. One of "remove", "replace" or "escape". + * output_format: Format of output. Supported formats are: + * "xhtml1": Outputs XHTML 1.x. Default. + * "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1). + * "html4": Outputs HTML 4 + * "html": Outputs latest supported version of HTML (currently HTML 4). + Note that it is suggested that the more specific formats ("xhtml1" + and "html4") be used as "xhtml" or "html" may change in the future + if it makes sense at that time. + + """ + self.safeMode = safe_mode - self.blockGuru = BlockGuru() self.registeredExtensions = [] - self.stripTopLevelTags = 1 self.docType = "" + self.stripTopLevelTags = True - self.textPreprocessors = [HTML_BLOCK_PREPROCESSOR] + # Preprocessors + self.preprocessors = odict.OrderedDict() + self.preprocessors["html_block"] = \ + preprocessors.HtmlBlockPreprocessor(self) + self.preprocessors["reference"] = \ + preprocessors.ReferencePreprocessor(self) + # footnote preprocessor will be inserted with "<reference" - self.preprocessors = [HEADER_PREPROCESSOR, - LINE_PREPROCESSOR, - # A footnote preprocessor will - # get inserted here - REFERENCE_PREPROCESSOR] + # Block processors - ran by the parser + self.parser = blockparser.BlockParser() + self.parser.blockprocessors['empty'] = \ + blockprocessors.EmptyBlockProcessor(self.parser) + self.parser.blockprocessors['indent'] = \ + blockprocessors.ListIndentProcessor(self.parser) + self.parser.blockprocessors['code'] = \ + blockprocessors.CodeBlockProcessor(self.parser) + self.parser.blockprocessors['hashheader'] = \ + blockprocessors.HashHeaderProcessor(self.parser) + self.parser.blockprocessors['setextheader'] = \ + blockprocessors.SetextHeaderProcessor(self.parser) + self.parser.blockprocessors['hr'] = \ + blockprocessors.HRProcessor(self.parser) + self.parser.blockprocessors['olist'] = \ + blockprocessors.OListProcessor(self.parser) + self.parser.blockprocessors['ulist'] = \ + blockprocessors.UListProcessor(self.parser) + self.parser.blockprocessors['quote'] = \ + blockprocessors.BlockQuoteProcessor(self.parser) + self.parser.blockprocessors['paragraph'] = \ + blockprocessors.ParagraphProcessor(self.parser) - self.postprocessors = [] # a footnote postprocessor will get - # inserted later + #self.prePatterns = [] - self.textPostprocessors = [# a footnote postprocessor will get - # inserted here - RAWHTMLTEXTPOSTPROCESSOR] - - self.prePatterns = [] + # Inline patterns - Run on the tree + self.inlinePatterns = odict.OrderedDict() + self.inlinePatterns["backtick"] = \ + inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE) + self.inlinePatterns["escape"] = \ + inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE) + self.inlinePatterns["reference"] = \ + inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self) + self.inlinePatterns["link"] = \ + inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self) + self.inlinePatterns["image_link"] = \ + inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self) + self.inlinePatterns["image_reference"] = \ + inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self) + self.inlinePatterns["autolink"] = \ + inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self) + self.inlinePatterns["automail"] = \ + inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self) + self.inlinePatterns["linebreak2"] = \ + inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br') + self.inlinePatterns["linebreak"] = \ + inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br') + self.inlinePatterns["html"] = \ + inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self) + self.inlinePatterns["entity"] = \ + inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self) + self.inlinePatterns["not_strong"] = \ + inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE) + self.inlinePatterns["strong_em"] = \ + inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em') + self.inlinePatterns["strong"] = \ + inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong') + self.inlinePatterns["emphasis"] = \ + inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em') + self.inlinePatterns["emphasis2"] = \ + inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em') + # The order of the handlers matters!!! - self.inlinePatterns = [DOUBLE_BACKTICK_PATTERN, - BACKTICK_PATTERN, - ESCAPE_PATTERN, - REFERENCE_PATTERN, - LINK_ANGLED_PATTERN, - LINK_PATTERN, - IMAGE_LINK_PATTERN, - IMAGE_REFERENCE_PATTERN, - AUTOLINK_PATTERN, - AUTOMAIL_PATTERN, - #LINE_BREAK_PATTERN_2, Removed by Kovid as causes problems with mdx_tables - LINE_BREAK_PATTERN, - HTML_PATTERN, - ENTITY_PATTERN, - NOT_STRONG_PATTERN, - STRONG_EM_PATTERN, - STRONG_EM_PATTERN_2, - STRONG_PATTERN, - STRONG_PATTERN_2, - EMPHASIS_PATTERN, - EMPHASIS_PATTERN_2 - # The order of the handlers matters!!! - ] + # Tree processors - run once we have a basic parse. + self.treeprocessors = odict.OrderedDict() + self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self) + self.treeprocessors["prettify"] = \ + treeprocessors.PrettifyTreeprocessor(self) + # Postprocessors - finishing touches. + self.postprocessors = odict.OrderedDict() + self.postprocessors["raw_html"] = \ + postprocessors.RawHtmlPostprocessor(self) + self.postprocessors["amp_substitute"] = \ + postprocessors.AndSubstitutePostprocessor() + # footnote postprocessor will be inserted with ">amp_substitute" + + # Map format keys to serializers + self.output_formats = { + 'html' : html4.to_html_string, + 'html4' : html4.to_html_string, + 'xhtml' : etree.tostring, + 'xhtml1': etree.tostring, + } + + self.references = {} + self.htmlStash = preprocessors.HtmlStash() self.registerExtensions(extensions = extensions, configs = extension_configs) - + self.set_output_format(output_format) self.reset() - def registerExtensions(self, extensions, configs): + """ + Register extensions with this instance of Markdown. - if not configs: - configs = {} + Keyword aurguments: + * extensions: A list of extensions, which can either + be strings or objects. See the docstring on Markdown. + * configs: A dictionary mapping module names to config options. + + """ for ext in extensions: - - extension_module_name = "calibre.ebooks.markdown.mdx_" + ext - + if isinstance(ext, basestring): + ext = load_extension(ext, configs.get(ext, [])) try: - module = sys.modules[extension_module_name] - - except: - message(CRITICAL, - "couldn't load extension %s (looking for %s module)" - % (ext, extension_module_name) ) - else: - - if configs.has_key(ext): - configs_for_ext = configs[ext] - else: - configs_for_ext = [] - extension = module.makeExtension(configs_for_ext) - extension.extendMarkdown(self, globals()) - - - + ext.extendMarkdown(self, globals()) + except AttributeError: + message(ERROR, "Incorrect type! Extension '%s' is " + "neither a string or an Extension." %(repr(ext))) + def registerExtension(self, extension): """ This gets called by the extension """ self.registeredExtensions.append(extension) def reset(self): - """Resets all state variables so that we can start - with a new text.""" - self.references={} - self.htmlStash = HtmlStash() - - HTML_BLOCK_PREPROCESSOR.stash = self.htmlStash - LINE_PREPROCESSOR.stash = self.htmlStash - REFERENCE_PREPROCESSOR.references = self.references - HTML_PATTERN.stash = self.htmlStash - ENTITY_PATTERN.stash = self.htmlStash - REFERENCE_PATTERN.references = self.references - IMAGE_REFERENCE_PATTERN.references = self.references - RAWHTMLTEXTPOSTPROCESSOR.stash = self.htmlStash - RAWHTMLTEXTPOSTPROCESSOR.safeMode = self.safeMode + """ + Resets all state variables so that we can start with a new text. + """ + self.htmlStash.reset() + self.references.clear() for extension in self.registeredExtensions: extension.reset() + def set_output_format(self, format): + """ Set the output format for the class instance. """ + try: + self.serializer = self.output_formats[format.lower()] + except KeyError: + message(CRITICAL, 'Invalid Output Format: "%s". Use one of %s.' \ + % (format, self.output_formats.keys())) - def _transform(self): - """Transforms the Markdown text into a XHTML body document + def convert(self, source): + """ + Convert markdown to serialized XHTML or HTML. - @returns: A NanoDom Document """ + Keyword arguments: - # Setup the document + * source: Source text as a Unicode string. - self.doc = Document() - self.top_element = self.doc.createElement("span") - self.top_element.appendChild(self.doc.createTextNode('\n')) - self.top_element.setAttribute('class', 'markdown') - self.doc.appendChild(self.top_element) - - # Fixup the source text - text = self.source - text = text.replace("\r\n", "\n").replace("\r", "\n") - text += "\n\n" - text = text.expandtabs(TAB_LENGTH) - - # Split into lines and run the preprocessors that will work with - # self.lines - - self.lines = text.split("\n") - - # Run the pre-processors on the lines - for prep in self.preprocessors : - self.lines = prep.run(self.lines) - - # Create a NanoDom tree from the lines and attach it to Document - - - buffer = [] - for line in self.lines: - if line.startswith("#"): - self._processSection(self.top_element, buffer) - buffer = [line] - else: - buffer.append(line) - self._processSection(self.top_element, buffer) - - #self._processSection(self.top_element, self.lines) - - # Not sure why I put this in but let's leave it for now. - self.top_element.appendChild(self.doc.createTextNode('\n')) - - # Run the post-processors - for postprocessor in self.postprocessors: - postprocessor.run(self.doc) - - return self.doc - - - def _processSection(self, parent_elem, lines, - inList = 0, looseList = 0): - - """Process a section of a source document, looking for high - level structural elements like lists, block quotes, code - segments, html blocks, etc. Some those then get stripped - of their high level markup (e.g. get unindented) and the - lower-level markup is processed recursively. - - @param parent_elem: A NanoDom element to which the content - will be added - @param lines: a list of lines - @param inList: a level - @returns: None""" - - # Loop through lines until none left. - while lines: - - # Check if this section starts with a list, a blockquote or - # a code block - - processFn = { 'ul': self._processUList, - 'ol': self._processOList, - 'quoted': self._processQuote, - 'tabbed': self._processCodeBlock} - - for regexp in ['ul', 'ol', 'quoted', 'tabbed']: - m = RE.regExp[regexp].match(lines[0]) - if m: - processFn[regexp](parent_elem, lines, inList) - return - - # We are NOT looking at one of the high-level structures like - # lists or blockquotes. So, it's just a regular paragraph - # (though perhaps nested inside a list or something else). If - # we are NOT inside a list, we just need to look for a blank - # line to find the end of the block. If we ARE inside a - # list, however, we need to consider that a sublist does not - # need to be separated by a blank line. Rather, the following - # markup is legal: - # - # * The top level list item - # - # Another paragraph of the list. This is where we are now. - # * Underneath we might have a sublist. - # - - if inList: - - start, lines = self._linesUntil(lines, (lambda line: - RE.regExp['ul'].match(line) - or RE.regExp['ol'].match(line) - or not line.strip())) - - self._processSection(parent_elem, start, - inList - 1, looseList = looseList) - inList = inList-1 - - else: # Ok, so it's just a simple block - - paragraph, lines = self._linesUntil(lines, lambda line: - not line.strip()) - - if len(paragraph) and paragraph[0].startswith('#'): - self._processHeader(parent_elem, paragraph) - - elif paragraph: - self._processParagraph(parent_elem, paragraph, - inList, looseList) - - if lines and not lines[0].strip(): - lines = lines[1:] # skip the first (blank) line - - - def _processHeader(self, parent_elem, paragraph): - m = RE.regExp['header'].match(paragraph[0]) - if m: - level = len(m.group(1)) - h = self.doc.createElement("h%d" % level) - parent_elem.appendChild(h) - for item in self._handleInline(m.group(2).strip()): - h.appendChild(item) - else: - message(CRITICAL, "We've got a problem header!") - - - def _processParagraph(self, parent_elem, paragraph, inList, looseList): - list = self._handleInline("\n".join(paragraph)) - - if ( parent_elem.nodeName == 'li' - and not (looseList or parent_elem.childNodes)): - - # If this is the first paragraph inside "li", don't - # put <p> around it - append the paragraph bits directly - # onto parent_elem - el = parent_elem - else: - # Otherwise make a "p" element - el = self.doc.createElement("p") - parent_elem.appendChild(el) - - for item in list: - el.appendChild(item) - - - def _processUList(self, parent_elem, lines, inList): - self._processList(parent_elem, lines, inList, - listexpr='ul', tag = 'ul') - - def _processOList(self, parent_elem, lines, inList): - self._processList(parent_elem, lines, inList, - listexpr='ol', tag = 'ol') - - - def _processList(self, parent_elem, lines, inList, listexpr, tag): - """Given a list of document lines starting with a list item, - finds the end of the list, breaks it up, and recursively - processes each list item and the remainder of the text file. - - @param parent_elem: A dom element to which the content will be added - @param lines: a list of lines - @param inList: a level - @returns: None""" - - ul = self.doc.createElement(tag) # ul might actually be '<ol>' - parent_elem.appendChild(ul) - - looseList = 0 - - # Make a list of list items - items = [] - item = -1 - - i = 0 # a counter to keep track of where we are - - for line in lines: - - loose = 0 - if not line.strip(): - # If we see a blank line, this _might_ be the end of the list - i += 1 - loose = 1 - - # Find the next non-blank line - for j in range(i, len(lines)): - if lines[j].strip(): - next = lines[j] - break - else: - # There is no more text => end of the list - break - - # Check if the next non-blank line is still a part of the list - if ( RE.regExp['ul'].match(next) or - RE.regExp['ol'].match(next) or - RE.regExp['tabbed'].match(next) ): - # get rid of any white space in the line - items[item].append(line.strip()) - looseList = loose or looseList - continue - else: - break # found end of the list - - # Now we need to detect list items (at the current level) - # while also detabing child elements if necessary - - for expr in ['ul', 'ol', 'tabbed']: - - m = RE.regExp[expr].match(line) - if m: - if expr in ['ul', 'ol']: # We are looking at a new item - #if m.group(1) : - # Removed the check to allow for a blank line - # at the beginning of the list item - items.append([m.group(1)]) - item += 1 - elif expr == 'tabbed': # This line needs to be detabbed - items[item].append(m.group(4)) #after the 'tab' - - i += 1 - break - else: - items[item].append(line) # Just regular continuation - i += 1 # added on 2006.02.25 - else: - i += 1 - - # Add the dom elements - for item in items: - li = self.doc.createElement("li") - ul.appendChild(li) - - self._processSection(li, item, inList + 1, looseList = looseList) - - # Process the remaining part of the section - - self._processSection(parent_elem, lines[i:], inList) - - - def _linesUntil(self, lines, condition): - """ A utility function to break a list of lines upon the - first line that satisfied a condition. The condition - argument should be a predicate function. - """ - - i = -1 - for line in lines: - i += 1 - if condition(line): break - else: - i += 1 - return lines[:i], lines[i:] - - def _processQuote(self, parent_elem, lines, inList): - """Given a list of document lines starting with a quote finds - the end of the quote, unindents it and recursively - processes the body of the quote and the remainder of the - text file. - - @param parent_elem: DOM element to which the content will be added - @param lines: a list of lines - @param inList: a level - @returns: None """ - - dequoted = [] - i = 0 - blank_line = False # allow one blank line between paragraphs - for line in lines: - m = RE.regExp['quoted'].match(line) - if m: - dequoted.append(m.group(1)) - i += 1 - blank_line = False - elif not blank_line and line.strip() != '': - dequoted.append(line) - i += 1 - elif not blank_line and line.strip() == '': - dequoted.append(line) - i += 1 - blank_line = True - else: - break - - blockquote = self.doc.createElement('blockquote') - parent_elem.appendChild(blockquote) - - self._processSection(blockquote, dequoted, inList) - self._processSection(parent_elem, lines[i:], inList) - - - - - def _processCodeBlock(self, parent_elem, lines, inList): - """Given a list of document lines starting with a code block - finds the end of the block, puts it into the dom verbatim - wrapped in ("<pre><code>") and recursively processes the - the remainder of the text file. - - @param parent_elem: DOM element to which the content will be added - @param lines: a list of lines - @param inList: a level - @returns: None""" - - detabbed, theRest = self.blockGuru.detectTabbed(lines) - - pre = self.doc.createElement('pre') - code = self.doc.createElement('code') - parent_elem.appendChild(pre) - pre.appendChild(code) - text = "\n".join(detabbed).rstrip()+"\n" - #text = text.replace("&", "&") - code.appendChild(self.doc.createTextNode(text)) - self._processSection(parent_elem, theRest, inList) - - - - def _handleInline (self, line, patternIndex=0): - """Transform a Markdown line with inline elements to an XHTML - fragment. - - This function uses auxiliary objects called inline patterns. - See notes on inline patterns above. - - @param line: A line of Markdown text - @param patternIndex: The index of the inlinePattern to start with - @return: A list of NanoDom nodes """ - - - parts = [line] - - while patternIndex < len(self.inlinePatterns): - - i = 0 - - while i < len(parts): - - x = parts[i] - - if isinstance(x, (str, unicode)): - result = self._applyPattern(x, \ - self.inlinePatterns[patternIndex], \ - patternIndex) - - if result: - i -= 1 - parts.remove(x) - for y in result: - parts.insert(i+1,y) - - i += 1 - patternIndex += 1 - - for i in range(len(parts)): - x = parts[i] - if isinstance(x, (str, unicode)): - parts[i] = self.doc.createTextNode(x) - - return parts - - - def _applyPattern(self, line, pattern, patternIndex): - - """ Given a pattern name, this function checks if the line - fits the pattern, creates the necessary elements, and returns - back a list consisting of NanoDom elements and/or strings. - - @param line: the text to be processed - @param pattern: the pattern to be checked - - @returns: the appropriate newly created NanoDom element if the - pattern matches, None otherwise. """ - # match the line to pattern's pre-compiled reg exp. - # if no match, move on. - - - - m = pattern.getCompiledRegExp().match(line) - if not m: - return None - - # if we got a match let the pattern make us a NanoDom node - # if it doesn't, move on - node = pattern.handleMatch(m, self.doc) - - # check if any of this nodes have children that need processing - - if isinstance(node, Element): - - if not node.nodeName in ["code", "pre"]: - for child in node.childNodes: - if isinstance(child, TextNode): - - result = self._handleInline(child.value, patternIndex+1) - - if result: - - if result == [child]: - continue - - result.reverse() - #to make insertion easier - - position = node.childNodes.index(child) - - node.removeChild(child) - - for item in result: - - if isinstance(item, (str, unicode)): - if len(item) > 0: - node.insertChild(position, - self.doc.createTextNode(item)) - else: - node.insertChild(position, item) - - - - - if node: - # Those are in the reverse order! - return ( m.groups()[-1], # the string to the left - node, # the new node - m.group(1)) # the string to the right of the match - - else: - return None - - def convert (self, source = None): - """Return the document in XHTML format. - - @returns: A serialized XHTML body.""" - - if source is not None: #Allow blank string - self.source = source - - if not self.source: - return u"" - + # Fixup the source text + if not source.strip(): + return u"" # a blank unicode string try: - self.source = unicode(self.source) + source = unicode(source) except UnicodeDecodeError: - message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.') + message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.') return u"" - for pp in self.textPreprocessors: - self.source = pp.run(self.source) + source = source.replace(STX, "").replace(ETX, "") + source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n" + source = re.sub(r'\n\s+\n', '\n\n', source) + source = source.expandtabs(TAB_LENGTH) - doc = self._transform() - xml = doc.toxml() + # Split into lines and run the line preprocessors. + self.lines = source.split("\n") + for prep in self.preprocessors.values(): + self.lines = prep.run(self.lines) + # Parse the high-level elements. + root = self.parser.parseDocument(self.lines).getroot() - # Return everything but the top level tag + # Run the tree-processors + for treeprocessor in self.treeprocessors.values(): + newRoot = treeprocessor.run(root) + if newRoot: + root = newRoot + # Serialize _properly_. Strip top-level tags. + output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf8")) if self.stripTopLevelTags: - xml = xml.strip()[23:-7] + "\n" + start = output.index('<%s>'%DOC_TAG)+len(DOC_TAG)+2 + end = output.rindex('</%s>'%DOC_TAG) + output = output[start:end].strip() - for pp in self.textPostprocessors: - xml = pp.run(xml) + # Run the text post-processors + for pp in self.postprocessors.values(): + output = pp.run(output) - return (self.docType + xml).strip() + return output.strip() + def convertFile(self, input=None, output=None, encoding=None): + """Converts a markdown file and returns the HTML as a unicode string. - def __str__(self): - ''' Report info about instance. Markdown always returns unicode. ''' - if self.source is None: - status = 'in which no source text has been assinged.' + Decodes the file using the provided encoding (defaults to utf-8), + passes the file content to markdown, and outputs the html to either + the provided stream or the file with provided name, using the same + encoding as the source file. + + **Note:** This is the only place that decoding and encoding of unicode + takes place in Python-Markdown. (All other code is unicode-in / + unicode-out.) + + Keyword arguments: + + * input: Name of source text file. + * output: Name of output file. Writes to stdout if `None`. + * encoding: Encoding of input and output files. Defaults to utf-8. + + """ + + encoding = encoding or "utf-8" + + # Read the source + input_file = codecs.open(input, mode="r", encoding=encoding) + text = input_file.read() + input_file.close() + text = text.lstrip(u'\ufeff') # remove the byte-order mark + + # Convert + html = self.convert(text) + + # Write to file or stdout + if isinstance(output, (str, unicode)): + output_file = codecs.open(output, "w", encoding=encoding) + output_file.write(html) + output_file.close() else: - status = 'which contains %d chars and %d line(s) of source.'%\ - (len(self.source), self.source.count('\n')+1) - return 'An instance of "%s" %s'% (self.__class__, status) - - __unicode__ = convert # markdown should always return a unicode string + output.write(html.encode(encoding)) - - - -# ==================================================================== - -def markdownFromFile(input = None, - output = None, - extensions = [], - encoding = None, - message_threshold = CRITICAL, - safe = False): - - global console_hndlr - console_hndlr.setLevel(message_threshold) - - message(DEBUG, "input file: %s" % input) - - if not encoding: - encoding = "utf-8" - - input_file = codecs.open(input, mode="r", encoding=encoding) - text = input_file.read() - input_file.close() - - text = removeBOM(text, encoding) - - new_text = markdown(text, extensions, safe_mode = safe) - - if output: - output_file = codecs.open(output, "w", encoding=encoding) - output_file.write(new_text) - output_file.close() - - else: - sys.stdout.write(new_text.encode(encoding)) - -def markdown(text, - extensions = [], - safe_mode = False): - - message(DEBUG, "in markdown.markdown(), received text:\n%s" % text) - - extension_names = [] - extension_configs = {} - - for ext in extensions: - pos = ext.find("(") - if pos == -1: - extension_names.append(ext) - else: - name = ext[:pos] - extension_names.append(name) - pairs = [x.split("=") for x in ext[pos+1:-1].split(",")] - configs = [(x.strip(), y.strip()) for (x, y) in pairs] - extension_configs[name] = configs - - md = Markdown(extensions=extension_names, - extension_configs=extension_configs, - safe_mode = safe_mode) - - return md.convert(text) - +""" +Extensions +----------------------------------------------------------------------------- +""" class Extension: - + """ Base class for extensions to subclass. """ def __init__(self, configs = {}): + """Create an instance of an Extention. + + Keyword arguments: + + * configs: A dict of configuration setting used by an Extension. + """ self.config = configs def getConfig(self, key): - if self.config.has_key(key): + """ Return a setting for the given key or an empty string. """ + if key in self.config: return self.config[key][0] else: return "" def getConfigInfo(self): + """ Return all config settings as a list of tuples. """ return [(key, self.config[key][1]) for key in self.config.keys()] def setConfig(self, key, value): + """ Set a config setting for `key` with the given `value`. """ self.config[key][0] = value + def extendMarkdown(self, md, md_globals): + """ + Add the various proccesors and patterns to the Markdown Instance. + + This method must be overriden by every extension. + + Keyword arguments: + + * md: The Markdown instance. + + * md_globals: Global variables in the markdown module namespace. + + """ + pass + + +def load_extension(ext_name, configs = []): + """Load extension by name, then return the module. + + The extension name may contain arguments as part of the string in the + following format: "extname(key1=value1,key2=value2)" + + """ + + # Parse extensions config params (ignore the order) + configs = dict(configs) + pos = ext_name.find("(") # find the first "(" + if pos > 0: + ext_args = ext_name[pos+1:-1] + ext_name = ext_name[:pos] + pairs = [x.split("=") for x in ext_args.split(",")] + configs.update([(x.strip(), y.strip()) for (x, y) in pairs]) + + # Setup the module names + ext_module = 'calibre.ebooks.markdown.extensions' + module_name_new_style = '.'.join([ext_module, ext_name]) + module_name_old_style = '_'.join(['mdx', ext_name]) + + # Try loading the extention first from one place, then another + try: # New style (markdown.extensons.<extension>) + module = __import__(module_name_new_style, {}, {}, [ext_module]) + except ImportError: + try: # Old style (mdx.<extension>) + module = __import__(module_name_old_style) + except ImportError: + message(WARN, "Failed loading extension '%s' from '%s' or '%s'" + % (ext_name, module_name_new_style, module_name_old_style)) + # Return None so we don't try to initiate none-existant extension + return None + + # If the module is loaded successfully, we expect it to define a + # function called makeExtension() + try: + return module.makeExtension(configs.items()) + except AttributeError: + message(CRITICAL, "Failed to initiate extension '%s'" % ext_name) + + +def load_extensions(ext_names): + """Loads multiple extensions""" + extensions = [] + for ext_name in ext_names: + extension = load_extension(ext_name) + if extension: + extensions.append(extension) + return extensions + + +""" +EXPORTED FUNCTIONS +============================================================================= + +Those are the two functions we really mean to export: markdown() and +markdownFromFile(). +""" + +def markdown(text, + extensions = [], + safe_mode = False, + output_format = DEFAULT_OUTPUT_FORMAT): + """Convert a markdown string to HTML and return HTML as a unicode string. + + This is a shortcut function for `Markdown` class to cover the most + basic use case. It initializes an instance of Markdown, loads the + necessary extensions and runs the parser on the given text. + + Keyword arguments: + + * text: Markdown formatted text as Unicode or ASCII string. + * extensions: A list of extensions or extension names (may contain config args). + * safe_mode: Disallow raw html. One of "remove", "replace" or "escape". + * output_format: Format of output. Supported formats are: + * "xhtml1": Outputs XHTML 1.x. Default. + * "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1). + * "html4": Outputs HTML 4 + * "html": Outputs latest supported version of HTML (currently HTML 4). + Note that it is suggested that the more specific formats ("xhtml1" + and "html4") be used as "xhtml" or "html" may change in the future + if it makes sense at that time. + + Returns: An HTML document as a string. + + """ + md = Markdown(extensions=load_extensions(extensions), + safe_mode=safe_mode, + output_format=output_format) + return md.convert(text) + + +def markdownFromFile(input = None, + output = None, + extensions = [], + encoding = None, + safe_mode = False, + output_format = DEFAULT_OUTPUT_FORMAT): + """Read markdown code from a file and write it to a file or a stream.""" + md = Markdown(extensions=load_extensions(extensions), + safe_mode=safe_mode, + output_format=output_format) + md.convertFile(input, output, encoding) + OPTPARSE_WARNING = """ Python 2.3 or higher required for advanced command line options. @@ -1853,19 +620,11 @@ def parse_options(): metavar="OUTPUT_FILE") parser.add_option("-e", "--encoding", dest="encoding", help="encoding for input and output files",) - parser.add_option("-q", "--quiet", default = CRITICAL, - action="store_const", const=60, dest="verbose", - help="suppress all messages") - parser.add_option("-v", "--verbose", - action="store_const", const=INFO, dest="verbose", - help="print info messages") - parser.add_option("-s", "--safe", dest="safe", default=False, - metavar="SAFE_MODE", + parser.add_option("-s", "--safe", dest="safe_mode", default=False, help="same mode ('replace', 'remove' or 'escape' user's HTML tag)") - - parser.add_option("--noisy", - action="store_const", const=DEBUG, dest="verbose", - help="print debug messages") + parser.add_option("-m", "--output_format", default=DEFAULT_OUTPUT_FORMAT, + dest="output_format", + help="output formats ('xhtml1', 'xhtml', 'html4' or html)") parser.add_option("-x", "--extension", action="append", dest="extensions", help = "load extension EXTENSION", metavar="EXTENSION") @@ -1882,33 +641,21 @@ def parse_options(): return {'input': input_file, 'output': options.filename, - 'message_threshold': options.verbose, - 'safe': options.safe, + 'safe_mode': options.safe_mode, 'extensions': options.extensions, - 'encoding': options.encoding } + 'encoding': options.encoding, + 'output_format': options.output_format } def main(): options = parse_options() - #if os.access(inFile, os.R_OK): - if not options: sys.exit(0) markdownFromFile(**options) + if __name__ == '__main__': sys.exit(main()) - """ Run Markdown from the command line. """ - - - - - - - - - - - + ''' Run Markdown from the command line. ''' diff --git a/src/calibre/ebooks/markdown/mdx_footnotes.py b/src/calibre/ebooks/markdown/mdx_footnotes.py deleted file mode 100644 index de79ed49a1..0000000000 --- a/src/calibre/ebooks/markdown/mdx_footnotes.py +++ /dev/null @@ -1,257 +0,0 @@ -""" -========================= FOOTNOTES ================================= - -This section adds footnote handling to markdown. It can be used as -an example for extending python-markdown with relatively complex -functionality. While in this case the extension is included inside -the module itself, it could just as easily be added from outside the -module. Not that all markdown classes above are ignorant about -footnotes. All footnote functionality is provided separately and -then added to the markdown instance at the run time. - -Footnote functionality is attached by calling extendMarkdown() -method of FootnoteExtension. The method also registers the -extension to allow it's state to be reset by a call to reset() -method. -""" - -FN_BACKLINK_TEXT = "zz1337820767766393qq" - - -import re, markdown, random - -class FootnoteExtension (markdown.Extension): - - DEF_RE = re.compile(r'(\ ?\ ?\ ?)\[\^([^\]]*)\]:\s*(.*)') - SHORT_USE_RE = re.compile(r'\[\^([^\]]*)\]', re.M) # [^a] - - def __init__ (self, configs) : - - self.config = {'PLACE_MARKER' : - ["///Footnotes Go Here///", - "The text string that marks where the footnotes go"]} - - for key, value in configs : - self.config[key][0] = value - - self.reset() - - def extendMarkdown(self, md, md_globals) : - - self.md = md - - # Stateless extensions do not need to be registered - md.registerExtension(self) - - # Insert a preprocessor before ReferencePreprocessor - index = md.preprocessors.index(md_globals['REFERENCE_PREPROCESSOR']) - preprocessor = FootnotePreprocessor(self) - preprocessor.md = md - md.preprocessors.insert(index, preprocessor) - - # Insert an inline pattern before ImageReferencePattern - FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah - index = md.inlinePatterns.index(md_globals['IMAGE_REFERENCE_PATTERN']) - md.inlinePatterns.insert(index, FootnotePattern(FOOTNOTE_RE, self)) - - # Insert a post-processor that would actually add the footnote div - postprocessor = FootnotePostprocessor(self) - postprocessor.extension = self - - md.postprocessors.append(postprocessor) - - textPostprocessor = FootnoteTextPostprocessor(self) - - md.textPostprocessors.append(textPostprocessor) - - - def reset(self) : - # May be called by Markdown is state reset is desired - - self.footnote_suffix = "-" + str(int(random.random()*1000000000)) - self.used_footnotes={} - self.footnotes = {} - - def findFootnotesPlaceholder(self, doc) : - def findFootnotePlaceholderFn(node=None, indent=0): - if node.type == 'text': - if node.value.find(self.getConfig("PLACE_MARKER")) > -1 : - return True - - fn_div_list = doc.find(findFootnotePlaceholderFn) - if fn_div_list : - return fn_div_list[0] - - - def setFootnote(self, id, text) : - self.footnotes[id] = text - - def makeFootnoteId(self, num) : - return 'fn%d%s' % (num, self.footnote_suffix) - - def makeFootnoteRefId(self, num) : - return 'fnr%d%s' % (num, self.footnote_suffix) - - def makeFootnotesDiv (self, doc) : - """Creates the div with class='footnote' and populates it with - the text of the footnotes. - - @returns: the footnote div as a dom element """ - - if not self.footnotes.keys() : - return None - - div = doc.createElement("div") - div.setAttribute('class', 'footnote') - hr = doc.createElement("hr") - div.appendChild(hr) - ol = doc.createElement("ol") - div.appendChild(ol) - - footnotes = [(self.used_footnotes[id], id) - for id in self.footnotes.keys()] - footnotes.sort() - - for i, id in footnotes : - li = doc.createElement('li') - li.setAttribute('id', self.makeFootnoteId(i)) - - self.md._processSection(li, self.footnotes[id].split("\n"), looseList=1) - - #li.appendChild(doc.createTextNode(self.footnotes[id])) - - backlink = doc.createElement('a') - backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i)) - backlink.setAttribute('class', 'footnoteBackLink') - backlink.setAttribute('title', - 'Jump back to footnote %d in the text' % 1) - backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT)) - - if li.childNodes : - node = li.childNodes[-1] - if node.type == "text" : - li.appendChild(backlink) - elif node.nodeName == "p": - node.appendChild(backlink) - else: - p = doc.createElement('p') - p.appendChild(backlink) - li.appendChild(p) - - ol.appendChild(li) - - return div - - -class FootnotePreprocessor : - - def __init__ (self, footnotes) : - self.footnotes = footnotes - - def run(self, lines) : - - self.blockGuru = markdown.BlockGuru() - lines = self._handleFootnoteDefinitions (lines) - - # Make a hash of all footnote marks in the text so that we - # know in what order they are supposed to appear. (This - # function call doesn't really substitute anything - it's just - # a way to get a callback for each occurence. - - text = "\n".join(lines) - self.footnotes.SHORT_USE_RE.sub(self.recordFootnoteUse, text) - - return text.split("\n") - - - def recordFootnoteUse(self, match) : - - id = match.group(1) - id = id.strip() - nextNum = len(self.footnotes.used_footnotes.keys()) + 1 - self.footnotes.used_footnotes[id] = nextNum - - - def _handleFootnoteDefinitions(self, lines) : - """Recursively finds all footnote definitions in the lines. - - @param lines: a list of lines of text - @returns: a string representing the text with footnote - definitions removed """ - - i, id, footnote = self._findFootnoteDefinition(lines) - - if id : - - plain = lines[:i] - - detabbed, theRest = self.blockGuru.detectTabbed(lines[i+1:]) - - self.footnotes.setFootnote(id, - footnote + "\n" - + "\n".join(detabbed)) - - more_plain = self._handleFootnoteDefinitions(theRest) - return plain + [""] + more_plain - - else : - return lines - - def _findFootnoteDefinition(self, lines) : - """Finds the first line of a footnote definition. - - @param lines: a list of lines of text - @returns: the index of the line containing a footnote definition """ - - counter = 0 - for line in lines : - m = self.footnotes.DEF_RE.match(line) - if m : - return counter, m.group(2), m.group(3) - counter += 1 - return counter, None, None - - -class FootnotePattern (markdown.Pattern) : - - def __init__ (self, pattern, footnotes) : - - markdown.Pattern.__init__(self, pattern) - self.footnotes = footnotes - - def handleMatch(self, m, doc) : - sup = doc.createElement('sup') - a = doc.createElement('a') - sup.appendChild(a) - id = m.group(2) - num = self.footnotes.used_footnotes[id] - sup.setAttribute('id', self.footnotes.makeFootnoteRefId(num)) - a.setAttribute('href', '#' + self.footnotes.makeFootnoteId(num)) - a.appendChild(doc.createTextNode(str(num))) - return sup - -class FootnotePostprocessor (markdown.Postprocessor): - - def __init__ (self, footnotes) : - self.footnotes = footnotes - - def run(self, doc) : - footnotesDiv = self.footnotes.makeFootnotesDiv(doc) - if footnotesDiv : - fnPlaceholder = self.extension.findFootnotesPlaceholder(doc) - if fnPlaceholder : - fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv) - else : - doc.documentElement.appendChild(footnotesDiv) - -class FootnoteTextPostprocessor (markdown.Postprocessor): - - def __init__ (self, footnotes) : - self.footnotes = footnotes - - def run(self, text) : - return text.replace(FN_BACKLINK_TEXT, "↩") - -def makeExtension(configs=None) : - return FootnoteExtension(configs=configs) - diff --git a/src/calibre/ebooks/markdown/mdx_tables.py b/src/calibre/ebooks/markdown/mdx_tables.py deleted file mode 100644 index c5c84a4adf..0000000000 --- a/src/calibre/ebooks/markdown/mdx_tables.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python - -""" -Table extension for Python-Markdown -""" - -import markdown - - -class TablePattern(markdown.Pattern) : - def __init__ (self, md): - markdown.Pattern.__init__(self, r'^\|([^\n]*)\|(\n|$)') - self.md = md - - def handleMatch(self, m, doc) : - # a single line represents a row - tr = doc.createElement('tr') - tr.appendChild(doc.createTextNode('\n')) - # chunks between pipes represent cells - for t in m.group(2).split('|'): - if len(t) >= 2 and t.startswith('*') and t.endswith('*'): - # if a cell is bounded by asterisks, it is a <th> - td = doc.createElement('th') - t = t[1:-1] - else: - # otherwise it is a <td> - td = doc.createElement('td') - # apply inline patterns on chunks - for n in self.md._handleInline(t): - if(type(n) == unicode): - td.appendChild(doc.createTextNode(n)) - else: - td.appendChild(n) - tr.appendChild(td) - # very long lines are evil - tr.appendChild(doc.createTextNode('\n')) - return tr - - -class TablePostprocessor: - def run(self, doc): - # markdown wrapped our <tr>s in a <p>, we fix that here - def test_for_p(element): - return element.type == 'element' and element.nodeName == 'p' - # replace "p > tr" with "table > tr" - for element in doc.find(test_for_p): - for node in element.childNodes: - if(node.type == 'text' and node.value.strip() == ''): - # skip leading whitespace - continue - if (node.type == 'element' and node.nodeName == 'tr'): - element.nodeName = 'table' - break - - -class TableExtension(markdown.Extension): - def extendMarkdown(self, md, md_globals): - md.inlinePatterns.insert(0, TablePattern(md)) - md.postprocessors.append(TablePostprocessor()) - - -def makeExtension(configs): - return TableExtension(configs) - - diff --git a/src/calibre/ebooks/markdown/mdx_toc.py b/src/calibre/ebooks/markdown/mdx_toc.py deleted file mode 100644 index 322b820a4e..0000000000 --- a/src/calibre/ebooks/markdown/mdx_toc.py +++ /dev/null @@ -1,170 +0,0 @@ -## To access this file as plain text go to -## http://freewisdom.org/projects/python-markdown/mdx_toc.raw_content - -""" -Chris Clark - clach04 -at- sf.net - -My markdown extensions for adding: - Table of Contents (aka toc) -""" - -import re -import markdown - -DEFAULT_TITLE = None - -def extract_alphanumeric(in_str=None): - """take alpha-numeric (7bit ascii) and return as a string - """ - # I'm sure this is really inefficient and - # could be done with a lambda/map() - #x.strip(). title().replace(' ', "") - out_str=[] - for x in in_str: - x = icu_title(x) - if x.isalnum(): out_str.append(x) - return ''.join(out_str) - -class TitlePostprocessor (markdown.Postprocessor): - - def __init__ (self, extension) : - self.extension = extension - - def run(self, doc) : - titleElement = self.extension.createTitle(doc) - if titleElement : - doc.documentElement.insertChild(0, titleElement) - - -class TocExtension (markdown.Extension): - """Markdown extension: generate a Table Of Contents (aka toc) - toc is returned in a div tag with class='toc' - toc is either: - appended to end of document - OR - replaces first string occurence of "///Table of Contents Goes Here///" - """ - - def __init__ (self, configs={}) : - #maybe add these as parameters to the class init? - self.TOC_INCLUDE_MARKER = "///Table of Contents///" - self.TOC_TITLE = "Table Of Contents" - self.auto_toc_heading_type=2 - self.toc_heading_type=3 - self.configs = configs - - def extendMarkdown(self, md, md_globals) : - # Just insert in the end - md.postprocessors.append(TocPostprocessor(self)) - # Stateless extensions do not need to be registered, so we don't - # register. - - def findTocPlaceholder(self, doc) : - def findTocPlaceholderFn(node=None, indent=0): - if node.type == 'text': - if node.value.find(self.TOC_INCLUDE_MARKER) > -1 : - return True - - toc_div_list = doc.find(findTocPlaceholderFn) - if toc_div_list : - return toc_div_list[0] - - - def createTocDiv(self, doc) : - """ - Creates Table Of Contents based on headers. - - @returns: toc as a single as a dom element - in a <div> tag with class='toc' - """ - - # Find headers - headers_compiled_re = re.compile("h[123456]", re.IGNORECASE) - def findHeadersFn(element=None): - if element.type=='element': - if headers_compiled_re.match(element.nodeName): - return True - - headers_doc_list = doc.find(findHeadersFn) - - # Insert anchor tags into dom - generated_anchor_id=0 - headers_list=[] - min_header_size_found = 6 - for element in headers_doc_list: - heading_title = element.childNodes[0].value - if heading_title.strip() !="": - heading_type = int(element.nodeName[-1:]) - if heading_type == self.auto_toc_heading_type: - min_header_size_found=min(min_header_size_found, - heading_type) - - html_anchor_name= (extract_alphanumeric(heading_title) - +'__MD_autoTOC_%d' % (generated_anchor_id)) - - # insert anchor tag inside header tags - html_anchor = doc.createElement("a") - html_anchor.setAttribute('name', html_anchor_name) - element.appendChild(html_anchor) - - headers_list.append( (heading_type, heading_title, - html_anchor_name) ) - generated_anchor_id = generated_anchor_id + 1 - - # create dom for toc - if headers_list != []: - # Create list - toc_doc_list = doc.createElement("ul") - for (heading_type, heading_title, html_anchor_name) in headers_list: - if heading_type == self.auto_toc_heading_type: - toc_doc_entry = doc.createElement("li") - toc_doc_link = doc.createElement("a") - toc_doc_link.setAttribute('href', '#'+html_anchor_name) - toc_doc_text = doc.createTextNode(heading_title) - toc_doc_link.appendChild(toc_doc_text) - toc_doc_entry.appendChild(toc_doc_link) - toc_doc_list.appendChild(toc_doc_entry) - - - # Put list into div - div = doc.createElement("div") - div.setAttribute('class', 'toc') - if self.TOC_TITLE: - toc_header = doc.createElement("h%d"%(self.toc_heading_type) ) - toc_header_text = doc.createTextNode(self.TOC_TITLE) - toc_header.appendChild(toc_header_text) - div.appendChild(toc_header) - div.appendChild(toc_doc_list) - #hr = doc.createElement("hr") - #div.appendChild(hr) - - return div - - -class TocPostprocessor (markdown.Postprocessor): - - def __init__ (self, toc) : - self.toc = toc - - def run(self, doc): - tocPlaceholder = self.toc.findTocPlaceholder(doc) - - if self.toc.configs.get("disable_toc", False): - if tocPlaceholder: - tocPlaceholder.parent.replaceChild(tocPlaceholder, "") - else: - - tocDiv = self.toc.createTocDiv(doc) - - if tocDiv: - if tocPlaceholder : - # Replace "magic" pattern with toc - tocPlaceholder.parent.replaceChild(tocPlaceholder, tocDiv) - else : - # Dump at the end of the DOM - # Probably want to use CSS to position div - doc.documentElement.appendChild(tocDiv) - - -def makeExtension(configs={}): - return TocExtension(configs=configs) diff --git a/src/calibre/ebooks/markdown/odict.py b/src/calibre/ebooks/markdown/odict.py new file mode 100644 index 0000000000..bf3ef07182 --- /dev/null +++ b/src/calibre/ebooks/markdown/odict.py @@ -0,0 +1,162 @@ +class OrderedDict(dict): + """ + A dictionary that keeps its keys in the order in which they're inserted. + + Copied from Django's SortedDict with some modifications. + + """ + def __new__(cls, *args, **kwargs): + instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs) + instance.keyOrder = [] + return instance + + def __init__(self, data=None): + if data is None: + data = {} + super(OrderedDict, self).__init__(data) + if isinstance(data, dict): + self.keyOrder = data.keys() + else: + self.keyOrder = [] + for key, value in data: + if key not in self.keyOrder: + self.keyOrder.append(key) + + def __deepcopy__(self, memo): + from copy import deepcopy + return self.__class__([(key, deepcopy(value, memo)) + for key, value in self.iteritems()]) + + def __setitem__(self, key, value): + super(OrderedDict, self).__setitem__(key, value) + if key not in self.keyOrder: + self.keyOrder.append(key) + + def __delitem__(self, key): + super(OrderedDict, self).__delitem__(key) + self.keyOrder.remove(key) + + def __iter__(self): + for k in self.keyOrder: + yield k + + def pop(self, k, *args): + result = super(OrderedDict, self).pop(k, *args) + try: + self.keyOrder.remove(k) + except ValueError: + # Key wasn't in the dictionary in the first place. No problem. + pass + return result + + def popitem(self): + result = super(OrderedDict, self).popitem() + self.keyOrder.remove(result[0]) + return result + + def items(self): + return zip(self.keyOrder, self.values()) + + def iteritems(self): + for key in self.keyOrder: + yield key, super(OrderedDict, self).__getitem__(key) + + def keys(self): + return self.keyOrder[:] + + def iterkeys(self): + return iter(self.keyOrder) + + def values(self): + return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder] + + def itervalues(self): + for key in self.keyOrder: + yield super(OrderedDict, self).__getitem__(key) + + def update(self, dict_): + for k, v in dict_.items(): + self.__setitem__(k, v) + + def setdefault(self, key, default): + if key not in self.keyOrder: + self.keyOrder.append(key) + return super(OrderedDict, self).setdefault(key, default) + + def value_for_index(self, index): + """Return the value of the item at the given zero-based index.""" + return self[self.keyOrder[index]] + + def insert(self, index, key, value): + """Insert the key, value pair before the item with the given index.""" + if key in self.keyOrder: + n = self.keyOrder.index(key) + del self.keyOrder[n] + if n < index: + index -= 1 + self.keyOrder.insert(index, key) + super(OrderedDict, self).__setitem__(key, value) + + def copy(self): + """Return a copy of this object.""" + # This way of initializing the copy means it works for subclasses, too. + obj = self.__class__(self) + obj.keyOrder = self.keyOrder[:] + return obj + + def __repr__(self): + """ + Replace the normal dict.__repr__ with a version that returns the keys + in their sorted order. + """ + return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()]) + + def clear(self): + super(OrderedDict, self).clear() + self.keyOrder = [] + + def index(self, key): + """ Return the index of a given key. """ + return self.keyOrder.index(key) + + def index_for_location(self, location): + """ Return index or None for a given location. """ + if location == '_begin': + i = 0 + elif location == '_end': + i = None + elif location.startswith('<') or location.startswith('>'): + i = self.index(location[1:]) + if location.startswith('>'): + if i >= len(self): + # last item + i = None + else: + i += 1 + else: + raise ValueError('Not a valid location: "%s". Location key ' + 'must start with a ">" or "<".' % location) + return i + + def add(self, key, value, location): + """ Insert by key location. """ + i = self.index_for_location(location) + if i is not None: + self.insert(i, key, value) + else: + self.__setitem__(key, value) + + def link(self, key, location): + """ Change location of an existing item. """ + n = self.keyOrder.index(key) + del self.keyOrder[n] + i = self.index_for_location(location) + try: + if i is not None: + self.keyOrder.insert(i, key) + else: + self.keyOrder.append(key) + except Error: + # restore to prevent data loss and reraise + self.keyOrder.insert(n, key) + raise Error diff --git a/src/calibre/ebooks/markdown/postprocessors.py b/src/calibre/ebooks/markdown/postprocessors.py new file mode 100644 index 0000000000..80227bb909 --- /dev/null +++ b/src/calibre/ebooks/markdown/postprocessors.py @@ -0,0 +1,77 @@ +""" +POST-PROCESSORS +============================================================================= + +Markdown also allows post-processors, which are similar to preprocessors in +that they need to implement a "run" method. However, they are run after core +processing. + +""" + + +import markdown + +class Processor: + def __init__(self, markdown_instance=None): + if markdown_instance: + self.markdown = markdown_instance + +class Postprocessor(Processor): + """ + Postprocessors are run after the ElementTree it converted back into text. + + Each Postprocessor implements a "run" method that takes a pointer to a + text string, modifies it as necessary and returns a text string. + + Postprocessors must extend markdown.Postprocessor. + + """ + + def run(self, text): + """ + Subclasses of Postprocessor should implement a `run` method, which + takes the html document as a single text string and returns a + (possibly modified) string. + + """ + pass + + +class RawHtmlPostprocessor(Postprocessor): + """ Restore raw html to the document. """ + + def run(self, text): + """ Iterate over html stash and restore "safe" html. """ + for i in range(self.markdown.htmlStash.html_counter): + html, safe = self.markdown.htmlStash.rawHtmlBlocks[i] + if self.markdown.safeMode and not safe: + if str(self.markdown.safeMode).lower() == 'escape': + html = self.escape(html) + elif str(self.markdown.safeMode).lower() == 'remove': + html = '' + else: + html = markdown.HTML_REMOVED_TEXT + if safe or not self.markdown.safeMode: + text = text.replace("<p>%s</p>" % + (markdown.preprocessors.HTML_PLACEHOLDER % i), + html + "\n") + text = text.replace(markdown.preprocessors.HTML_PLACEHOLDER % i, + html) + return text + + def escape(self, html): + """ Basic html escaping """ + html = html.replace('&', '&') + html = html.replace('<', '<') + html = html.replace('>', '>') + return html.replace('"', '"') + + +class AndSubstitutePostprocessor(Postprocessor): + """ Restore valid entities """ + def __init__(self): + pass + + def run(self, text): + text = text.replace(markdown.AMP_SUBSTITUTE, "&") + return text diff --git a/src/calibre/ebooks/markdown/preprocessors.py b/src/calibre/ebooks/markdown/preprocessors.py new file mode 100644 index 0000000000..712a1e8755 --- /dev/null +++ b/src/calibre/ebooks/markdown/preprocessors.py @@ -0,0 +1,214 @@ + +""" +PRE-PROCESSORS +============================================================================= + +Preprocessors work on source text before we start doing anything too +complicated. +""" + +import re +import markdown + +HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:" +HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX + +class Processor: + def __init__(self, markdown_instance=None): + if markdown_instance: + self.markdown = markdown_instance + +class Preprocessor (Processor): + """ + Preprocessors are run after the text is broken into lines. + + Each preprocessor implements a "run" method that takes a pointer to a + list of lines of the document, modifies it as necessary and returns + either the same pointer or a pointer to a new list. + + Preprocessors must extend markdown.Preprocessor. + + """ + def run(self, lines): + """ + Each subclass of Preprocessor should override the `run` method, which + takes the document as a list of strings split by newlines and returns + the (possibly modified) list of lines. + + """ + pass + +class HtmlStash: + """ + This class is used for stashing HTML objects that we extract + in the beginning and replace with place-holders. + """ + + def __init__ (self): + """ Create a HtmlStash. """ + self.html_counter = 0 # for counting inline html segments + self.rawHtmlBlocks=[] + + def store(self, html, safe=False): + """ + Saves an HTML segment for later reinsertion. Returns a + placeholder string that needs to be inserted into the + document. + + Keyword arguments: + + * html: an html segment + * safe: label an html segment as safe for safemode + + Returns : a placeholder string + + """ + self.rawHtmlBlocks.append((html, safe)) + placeholder = HTML_PLACEHOLDER % self.html_counter + self.html_counter += 1 + return placeholder + + def reset(self): + self.html_counter = 0 + self.rawHtmlBlocks = [] + + +class HtmlBlockPreprocessor(Preprocessor): + """Remove html blocks from the text and store them for later retrieval.""" + + right_tag_patterns = ["</%s>", "%s>"] + + def _get_left_tag(self, block): + return block[1:].replace(">", " ", 1).split()[0].lower() + + def _get_right_tag(self, left_tag, block): + for p in self.right_tag_patterns: + tag = p % left_tag + i = block.rfind(tag) + if i > 2: + return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag) + return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block) + + def _equal_tags(self, left_tag, right_tag): + if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc. + return True + if ("/" + left_tag) == right_tag: + return True + if (right_tag == "--" and left_tag == "--"): + return True + elif left_tag == right_tag[1:] \ + and right_tag[0] != "<": + return True + else: + return False + + def _is_oneliner(self, tag): + return (tag in ['hr', 'hr/']) + + def run(self, lines): + text = "\n".join(lines) + new_blocks = [] + text = text.split("\n\n") + items = [] + left_tag = '' + right_tag = '' + in_tag = False # flag + + while text: + block = text[0] + if block.startswith("\n"): + block = block[1:] + text = text[1:] + + if block.startswith("\n"): + block = block[1:] + + if not in_tag: + if block.startswith("<"): + left_tag = self._get_left_tag(block) + right_tag, data_index = self._get_right_tag(left_tag, block) + + if data_index < len(block): + text.insert(0, block[data_index:]) + block = block[:data_index] + + if not (markdown.isBlockLevel(left_tag) \ + or block[1] in ["!", "?", "@", "%"]): + new_blocks.append(block) + continue + + if self._is_oneliner(left_tag): + new_blocks.append(block.strip()) + continue + + if block[1] == "!": + # is a comment block + left_tag = "--" + right_tag, data_index = self._get_right_tag(left_tag, block) + # keep checking conditions below and maybe just append + + if block.rstrip().endswith(">") \ + and self._equal_tags(left_tag, right_tag): + new_blocks.append( + self.markdown.htmlStash.store(block.strip())) + continue + else: #if not block[1] == "!": + # if is block level tag and is not complete + + if markdown.isBlockLevel(left_tag) or left_tag == "--" \ + and not block.rstrip().endswith(">"): + items.append(block.strip()) + in_tag = True + else: + new_blocks.append( + self.markdown.htmlStash.store(block.strip())) + + continue + + new_blocks.append(block) + + else: + items.append(block.strip()) + + right_tag, data_index = self._get_right_tag(left_tag, block) + + if self._equal_tags(left_tag, right_tag): + # if find closing tag + in_tag = False + new_blocks.append( + self.markdown.htmlStash.store('\n\n'.join(items))) + items = [] + + if items: + new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items))) + new_blocks.append('\n') + + new_text = "\n\n".join(new_blocks) + return new_text.split("\n") + + +class ReferencePreprocessor(Preprocessor): + """ Remove reference definitions from text and store for later use. """ + + RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL) + + def run (self, lines): + new_text = []; + for line in lines: + m = self.RE.match(line) + if m: + id = m.group(2).strip().lower() + t = m.group(4).strip() # potential title + if not t: + self.markdown.references[id] = (m.group(3), t) + elif (len(t) >= 2 + and (t[0] == t[-1] == "\"" + or t[0] == t[-1] == "\'" + or (t[0] == "(" and t[-1] == ")") ) ): + self.markdown.references[id] = (m.group(3), t[1:-1]) + else: + new_text.append(line) + else: + new_text.append(line) + + return new_text #+ "\n" diff --git a/src/calibre/ebooks/markdown/treeprocessors.py b/src/calibre/ebooks/markdown/treeprocessors.py new file mode 100644 index 0000000000..1dc612a95e --- /dev/null +++ b/src/calibre/ebooks/markdown/treeprocessors.py @@ -0,0 +1,329 @@ +import markdown +import re + +def isString(s): + """ Check if it's string """ + return isinstance(s, unicode) or isinstance(s, str) + +class Processor: + def __init__(self, markdown_instance=None): + if markdown_instance: + self.markdown = markdown_instance + +class Treeprocessor(Processor): + """ + Treeprocessors are run on the ElementTree object before serialization. + + Each Treeprocessor implements a "run" method that takes a pointer to an + ElementTree, modifies it as necessary and returns an ElementTree + object. + + Treeprocessors must extend markdown.Treeprocessor. + + """ + def run(self, root): + """ + Subclasses of Treeprocessor should implement a `run` method, which + takes a root ElementTree. This method can return another ElementTree + object, and the existing root ElementTree will be replaced, or it can + modify the current tree and return None. + """ + pass + + +class InlineProcessor(Treeprocessor): + """ + A Treeprocessor that traverses a tree, applying inline patterns. + """ + + def __init__ (self, md): + self.__placeholder_prefix = markdown.INLINE_PLACEHOLDER_PREFIX + self.__placeholder_suffix = markdown.ETX + self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + + len(self.__placeholder_suffix) + self.__placeholder_re = re.compile(markdown.INLINE_PLACEHOLDER % r'([0-9]{4})') + self.markdown = md + + def __makePlaceholder(self, type): + """ Generate a placeholder """ + id = "%04d" % len(self.stashed_nodes) + hash = markdown.INLINE_PLACEHOLDER % id + return hash, id + + def __findPlaceholder(self, data, index): + """ + Extract id from data string, start from index + + Keyword arguments: + + * data: string + * index: index, from which we start search + + Returns: placeholder id and string index, after the found placeholder. + """ + + m = self.__placeholder_re.search(data, index) + if m: + return m.group(1), m.end() + else: + return None, index + 1 + + def __stashNode(self, node, type): + """ Add node to stash """ + placeholder, id = self.__makePlaceholder(type) + self.stashed_nodes[id] = node + return placeholder + + def __handleInline(self, data, patternIndex=0): + """ + Process string with inline patterns and replace it + with placeholders + + Keyword arguments: + + * data: A line of Markdown text + * patternIndex: The index of the inlinePattern to start with + + Returns: String with placeholders. + + """ + if not isinstance(data, markdown.AtomicString): + startIndex = 0 + while patternIndex < len(self.markdown.inlinePatterns): + data, matched, startIndex = self.__applyPattern( + self.markdown.inlinePatterns.value_for_index(patternIndex), + data, patternIndex, startIndex) + if not matched: + patternIndex += 1 + return data + + def __processElementText(self, node, subnode, isText=True): + """ + Process placeholders in Element.text or Element.tail + of Elements popped from self.stashed_nodes. + + Keywords arguments: + + * node: parent node + * subnode: processing node + * isText: bool variable, True - it's text, False - it's tail + + Returns: None + + """ + if isText: + text = subnode.text + subnode.text = None + else: + text = subnode.tail + subnode.tail = None + + childResult = self.__processPlaceholders(text, subnode) + + if not isText and node is not subnode: + pos = node.getchildren().index(subnode) + node.remove(subnode) + else: + pos = 0 + + childResult.reverse() + for newChild in childResult: + node.insert(pos, newChild) + + def __processPlaceholders(self, data, parent): + """ + Process string with placeholders and generate ElementTree tree. + + Keyword arguments: + + * data: string with placeholders instead of ElementTree elements. + * parent: Element, which contains processing inline data + + Returns: list with ElementTree elements with applied inline patterns. + """ + def linkText(text): + if text: + if result: + if result[-1].tail: + result[-1].tail += text + else: + result[-1].tail = text + else: + if parent.text: + parent.text += text + else: + parent.text = text + + result = [] + strartIndex = 0 + while data: + index = data.find(self.__placeholder_prefix, strartIndex) + if index != -1: + id, phEndIndex = self.__findPlaceholder(data, index) + + if id in self.stashed_nodes: + node = self.stashed_nodes.get(id) + + if index > 0: + text = data[strartIndex:index] + linkText(text) + + if not isString(node): # it's Element + for child in [node] + node.getchildren(): + if child.tail: + if child.tail.strip(): + self.__processElementText(node, child, False) + if child.text: + if child.text.strip(): + self.__processElementText(child, child) + else: # it's just a string + linkText(node) + strartIndex = phEndIndex + continue + + strartIndex = phEndIndex + result.append(node) + + else: # wrong placeholder + end = index + len(prefix) + linkText(data[strartIndex:end]) + strartIndex = end + else: + text = data[strartIndex:] + linkText(text) + data = "" + + return result + + def __applyPattern(self, pattern, data, patternIndex, startIndex=0): + """ + Check if the line fits the pattern, create the necessary + elements, add it to stashed_nodes. + + Keyword arguments: + + * data: the text to be processed + * pattern: the pattern to be checked + * patternIndex: index of current pattern + * startIndex: string index, from which we starting search + + Returns: String with placeholders instead of ElementTree elements. + + """ + match = pattern.getCompiledRegExp().match(data[startIndex:]) + leftData = data[:startIndex] + + if not match: + return data, False, 0 + + node = pattern.handleMatch(match) + + if node is None: + return data, True, len(leftData) + match.span(len(match.groups()))[0] + + if not isString(node): + if not isinstance(node.text, markdown.AtomicString): + # We need to process current node too + for child in [node] + node.getchildren(): + if not isString(node): + if child.text: + child.text = self.__handleInline(child.text, + patternIndex + 1) + if child.tail: + child.tail = self.__handleInline(child.tail, + patternIndex) + + placeholder = self.__stashNode(node, pattern.type()) + + return "%s%s%s%s" % (leftData, + match.group(1), + placeholder, match.groups()[-1]), True, 0 + + def run(self, tree): + """Apply inline patterns to a parsed Markdown tree. + + Iterate over ElementTree, find elements with inline tag, apply inline + patterns and append newly created Elements to tree. If you don't + want process your data with inline paterns, instead of normal string, + use subclass AtomicString: + + node.text = markdown.AtomicString("data won't be processed with inline patterns") + + Arguments: + + * markdownTree: ElementTree object, representing Markdown tree. + + Returns: ElementTree object with applied inline patterns. + + """ + self.stashed_nodes = {} + + stack = [tree] + + while stack: + currElement = stack.pop() + insertQueue = [] + for child in currElement.getchildren(): + if child.text and not isinstance(child.text, markdown.AtomicString): + text = child.text + child.text = None + lst = self.__processPlaceholders(self.__handleInline( + text), child) + stack += lst + insertQueue.append((child, lst)) + + if child.getchildren(): + stack.append(child) + + for element, lst in insertQueue: + if element.text: + element.text = \ + markdown.inlinepatterns.handleAttributes(element.text, + element) + i = 0 + for newChild in lst: + # Processing attributes + if newChild.tail: + newChild.tail = \ + markdown.inlinepatterns.handleAttributes(newChild.tail, + element) + if newChild.text: + newChild.text = \ + markdown.inlinepatterns.handleAttributes(newChild.text, + newChild) + element.insert(i, newChild) + i += 1 + return tree + + +class PrettifyTreeprocessor(Treeprocessor): + """ Add linebreaks to the html document. """ + + def _prettifyETree(self, elem): + """ Recursively add linebreaks to ElementTree children. """ + + i = "\n" + if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: + if (not elem.text or not elem.text.strip()) \ + and len(elem) and markdown.isBlockLevel(elem[0].tag): + elem.text = i + for e in elem: + if markdown.isBlockLevel(e.tag): + self._prettifyETree(e) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + if not elem.tail or not elem.tail.strip(): + elem.tail = i + + def run(self, root): + """ Add linebreaks to ElementTree root object. """ + + self._prettifyETree(root) + # Do <br />'s seperately as they are often in the middle of + # inline content and missed by _prettifyETree. + brs = root.getiterator('br') + for br in brs: + if not br.tail or not br.tail.strip(): + br.tail = '\n' + else: + br.tail = '\n%s' % br.tail diff --git a/src/calibre/ebooks/txt/processor.py b/src/calibre/ebooks/txt/processor.py index 54369190de..4037ee1be7 100644 --- a/src/calibre/ebooks/txt/processor.py +++ b/src/calibre/ebooks/txt/processor.py @@ -96,9 +96,11 @@ def convert_basic(txt, title='', epub_split_size_kb=0): def convert_markdown(txt, title='', disable_toc=False): from calibre.ebooks.markdown import markdown + extensions=['footnotes', 'tables'] + if not disable_toc: + extensions.append('toc') md = markdown.Markdown( - extensions=['footnotes', 'tables', 'toc'], - extension_configs={"toc": {"disable_toc": disable_toc}}, + extensions, safe_mode=False) return HTML_TEMPLATE % (title, md.convert(txt))