mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Sync to trunk.
This commit is contained in:
commit
23093f6b94
@ -77,32 +77,23 @@ class Economist(BasicNewsRecipe):
|
||||
continue
|
||||
self.log('Found section: %s'%section_title)
|
||||
articles = []
|
||||
for h5 in section.findAll('h5'):
|
||||
article_title = self.tag_to_string(h5).strip()
|
||||
if not article_title:
|
||||
continue
|
||||
data = h5.findNextSibling(attrs={'class':'article'})
|
||||
if data is None: continue
|
||||
a = data.find('a', href=True)
|
||||
if a is None: continue
|
||||
url = a['href']
|
||||
if url.startswith('/'): url = 'http://www.economist.com'+url
|
||||
url += '/print'
|
||||
article_title += ': %s'%self.tag_to_string(a).strip()
|
||||
articles.append({'title':article_title, 'url':url,
|
||||
'description':'', 'date':''})
|
||||
if not articles:
|
||||
# We have last or first section
|
||||
for art in section.findAll(attrs={'class':'article'}):
|
||||
a = art.find('a', href=True)
|
||||
if a is not None:
|
||||
url = a['href']
|
||||
if url.startswith('/'): url = 'http://www.economist.com'+url
|
||||
url += '/print'
|
||||
title = self.tag_to_string(a)
|
||||
if title:
|
||||
articles.append({'title':title, 'url':url,
|
||||
'description':'', 'date':''})
|
||||
subsection = ''
|
||||
for node in section.findAll(attrs={'class':'article'}):
|
||||
subsec = node.findPreviousSibling('h5')
|
||||
if subsec is not None:
|
||||
subsection = self.tag_to_string(subsec)
|
||||
prefix = (subsection+': ') if subsection else ''
|
||||
a = node.find('a', href=True)
|
||||
if a is not None:
|
||||
url = a['href']
|
||||
if url.startswith('/'): url = 'http://www.economist.com'+url
|
||||
url += '/print'
|
||||
title = self.tag_to_string(a)
|
||||
if title:
|
||||
title = prefix + title
|
||||
self.log('\tFound article:', title)
|
||||
articles.append({'title':title, 'url':url,
|
||||
'description':'', 'date':''})
|
||||
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
|
@ -69,32 +69,23 @@ class Economist(BasicNewsRecipe):
|
||||
continue
|
||||
self.log('Found section: %s'%section_title)
|
||||
articles = []
|
||||
for h5 in section.findAll('h5'):
|
||||
article_title = self.tag_to_string(h5).strip()
|
||||
if not article_title:
|
||||
continue
|
||||
data = h5.findNextSibling(attrs={'class':'article'})
|
||||
if data is None: continue
|
||||
a = data.find('a', href=True)
|
||||
if a is None: continue
|
||||
url = a['href']
|
||||
if url.startswith('/'): url = 'http://www.economist.com'+url
|
||||
url += '/print'
|
||||
article_title += ': %s'%self.tag_to_string(a).strip()
|
||||
articles.append({'title':article_title, 'url':url,
|
||||
'description':'', 'date':''})
|
||||
if not articles:
|
||||
# We have last or first section
|
||||
for art in section.findAll(attrs={'class':'article'}):
|
||||
a = art.find('a', href=True)
|
||||
if a is not None:
|
||||
url = a['href']
|
||||
if url.startswith('/'): url = 'http://www.economist.com'+url
|
||||
url += '/print'
|
||||
title = self.tag_to_string(a)
|
||||
if title:
|
||||
articles.append({'title':title, 'url':url,
|
||||
'description':'', 'date':''})
|
||||
subsection = ''
|
||||
for node in section.findAll(attrs={'class':'article'}):
|
||||
subsec = node.findPreviousSibling('h5')
|
||||
if subsec is not None:
|
||||
subsection = self.tag_to_string(subsec)
|
||||
prefix = (subsection+': ') if subsection else ''
|
||||
a = node.find('a', href=True)
|
||||
if a is not None:
|
||||
url = a['href']
|
||||
if url.startswith('/'): url = 'http://www.economist.com'+url
|
||||
url += '/print'
|
||||
title = self.tag_to_string(a)
|
||||
if title:
|
||||
title = prefix + title
|
||||
self.log('\tFound article:', title)
|
||||
articles.append({'title':title, 'url':url,
|
||||
'description':'', 'date':''})
|
||||
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
|
@ -63,7 +63,7 @@ authors_completer_append_separator = False
|
||||
# end of an author name. The case of the suffix is ignored and trailing
|
||||
# periods are automatically handled.
|
||||
# The author name copy words are a set of words which if they occur in an
|
||||
# author name cause the automatically geenrated author sort string to be
|
||||
# author name cause the automatically generated author sort string to be
|
||||
# identical to the author name. This means that the sort for a string like Acme
|
||||
# Inc. will be Acme Inc. instead of Inc., Acme
|
||||
author_sort_copy_method = 'comma'
|
||||
|
@ -63,10 +63,10 @@ class Check(Command):
|
||||
for f in x[-1]:
|
||||
y = self.j(x[0], f)
|
||||
mtime = os.stat(y).st_mtime
|
||||
if f.endswith('.py') and f not in ('ptempfile.py', 'feedparser.py',
|
||||
'pyparsing.py', 'markdown.py') and \
|
||||
'genshi' not in y and cache.get(y, 0) != mtime and \
|
||||
'prs500/driver.py' not in y:
|
||||
if (f.endswith('.py') and f not in ('ptempfile.py', 'feedparser.py',
|
||||
'pyparsing.py', 'markdown.py') and
|
||||
'genshi' not in y and cache.get(y, 0) != mtime and
|
||||
'prs500/driver.py' not in y):
|
||||
yield y, mtime
|
||||
|
||||
for x in os.walk(self.j(self.d(self.SRC), 'recipes')):
|
||||
|
@ -25,7 +25,8 @@ from calibre.utils.config import to_json, from_json, prefs, tweaks
|
||||
from calibre.utils.date import utcfromtimestamp, parse_date
|
||||
from calibre.utils.filenames import is_case_sensitive
|
||||
from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
|
||||
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable, CompositeTable)
|
||||
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable,
|
||||
CompositeTable, LanguagesTable)
|
||||
# }}}
|
||||
|
||||
'''
|
||||
@ -604,11 +605,12 @@ class DB(object):
|
||||
for col in ('series', 'publisher', 'rating'):
|
||||
tables[col] = ManyToOneTable(col, self.field_metadata[col].copy())
|
||||
|
||||
for col in ('authors', 'tags', 'formats', 'identifiers'):
|
||||
for col in ('authors', 'tags', 'formats', 'identifiers', 'languages'):
|
||||
cls = {
|
||||
'authors':AuthorsTable,
|
||||
'formats':FormatsTable,
|
||||
'identifiers':IdentifiersTable,
|
||||
'languages':LanguagesTable,
|
||||
}.get(col, ManyToManyTable)
|
||||
tables[col] = cls(col, self.field_metadata[col].copy())
|
||||
|
||||
|
@ -13,7 +13,8 @@ from functools import wraps, partial
|
||||
|
||||
from calibre.db.locking import create_locks, RecordLock
|
||||
from calibre.db.fields import create_field
|
||||
from calibre.ebooks.book.base import Metadata
|
||||
from calibre.db.tables import VirtualTable
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import now
|
||||
|
||||
def api(f):
|
||||
@ -189,7 +190,8 @@ class Cache(object):
|
||||
if table.metadata['datatype'] == 'composite':
|
||||
self.composites.add(field)
|
||||
|
||||
self.fields['ondevice'] = create_field('ondevice', None)
|
||||
self.fields['ondevice'] = create_field('ondevice',
|
||||
VirtualTable('ondevice'))
|
||||
|
||||
@read_api
|
||||
def field_for(self, name, book_id, default_value=None):
|
||||
@ -345,8 +347,9 @@ class Cache(object):
|
||||
as_path=as_path)
|
||||
|
||||
@read_api
|
||||
def multisort(self, fields):
|
||||
all_book_ids = frozenset(self._all_book_ids())
|
||||
def multisort(self, fields, ids_to_sort=None):
|
||||
all_book_ids = frozenset(self._all_book_ids() if ids_to_sort is None
|
||||
else ids_to_sort)
|
||||
get_metadata = partial(self._get_metadata, get_user_categories=False)
|
||||
|
||||
sort_keys = tuple(self.fields[field[0]].sort_keys_for_books(get_metadata,
|
||||
|
@ -51,9 +51,13 @@ class Field(object):
|
||||
|
||||
def __iter__(self):
|
||||
'''
|
||||
Iterate over the ids for all values in this field
|
||||
Iterate over the ids for all values in this field.
|
||||
|
||||
WARNING: Some fields such as composite fields and virtual
|
||||
fields like ondevice do not have ids for their values, in such
|
||||
cases this is an empty iterator.
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
return iter(())
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
'''
|
||||
@ -78,9 +82,6 @@ class OneToOneField(Field):
|
||||
def __iter__(self):
|
||||
return self.table.book_col_map.iterkeys()
|
||||
|
||||
def iter_book_ids(self):
|
||||
return self.table.book_col_map.iterkeys()
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
return {id_ : self._sort_key(self.book_col_map.get(id_, '')) for id_ in
|
||||
all_book_ids}
|
||||
@ -154,9 +155,6 @@ class OnDeviceField(OneToOneField):
|
||||
def __iter__(self):
|
||||
return iter(())
|
||||
|
||||
def iter_book_ids(self):
|
||||
return iter(())
|
||||
|
||||
def sort_keys_for_books(self, get_metadata, all_book_ids):
|
||||
return {id_ : self.for_book(id_) for id_ in
|
||||
all_book_ids}
|
||||
|
@ -13,6 +13,7 @@ from dateutil.tz import tzoffset
|
||||
|
||||
from calibre.constants import plugins
|
||||
from calibre.utils.date import parse_date, local_tz, UNDEFINED_DATE
|
||||
from calibre.utils.localization import lang_map
|
||||
from calibre.ebooks.metadata import author_to_author_sort
|
||||
|
||||
_c_speedup = plugins['speedup'][0]
|
||||
@ -54,6 +55,19 @@ class Table(object):
|
||||
self.link_table = (link_table if link_table else
|
||||
'books_%s_link'%self.metadata['table'])
|
||||
|
||||
class VirtualTable(Table):
|
||||
|
||||
'''
|
||||
A dummy table used for fields that only exist in memory like ondevice
|
||||
'''
|
||||
|
||||
def __init__(self, name, table_type=ONE_ONE, datatype='text'):
|
||||
metadata = {'datatype':datatype, 'table':name}
|
||||
self.table_type = table_type
|
||||
Table.__init__(self, name, metadata)
|
||||
|
||||
|
||||
|
||||
class OneToOneTable(Table):
|
||||
|
||||
'''
|
||||
@ -210,3 +224,9 @@ class IdentifiersTable(ManyToManyTable):
|
||||
for key in tuple(self.col_book_map.iterkeys()):
|
||||
self.col_book_map[key] = tuple(self.col_book_map[key])
|
||||
|
||||
class LanguagesTable(ManyToManyTable):
|
||||
|
||||
def read_id_maps(self, db):
|
||||
ManyToManyTable.read_id_maps(self, db)
|
||||
lm = lang_map()
|
||||
self.lang_name_map = {x:lm.get(x, x) for x in self.id_map.itervalues()}
|
||||
|
@ -81,6 +81,7 @@ class ANDROID(USBMS):
|
||||
|
||||
# LG
|
||||
0x1004 : {
|
||||
0x61c5 : [0x100, 0x226, 0x9999],
|
||||
0x61cc : [0x100],
|
||||
0x61ce : [0x100],
|
||||
0x618e : [0x226, 0x9999, 0x100]
|
||||
|
@ -3,10 +3,10 @@ CORE MARKDOWN BLOCKPARSER
|
||||
=============================================================================
|
||||
|
||||
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
|
||||
with inline elements such as **bold** or *italics*, but rather just catches
|
||||
with inline elements such as **bold** or *italics*, but rather just catches
|
||||
blocks, lists, quotes, etc.
|
||||
|
||||
The BlockParser is made up of a bunch of BlockProssors, each handling a
|
||||
The BlockParser is made up of a bunch of BlockProssors, each handling a
|
||||
different type of block. Extensions may add/replace/remove BlockProcessors
|
||||
as they need to alter how markdown blocks are parsed.
|
||||
|
||||
@ -16,8 +16,8 @@ import re
|
||||
import markdown
|
||||
|
||||
class BlockProcessor:
|
||||
""" Base class for block processors.
|
||||
|
||||
""" Base class for block processors.
|
||||
|
||||
Each subclass will provide the methods below to work with the source and
|
||||
tree. Each processor will need to define it's own ``test`` and ``run``
|
||||
methods. The ``test`` method should return True or False, to indicate
|
||||
@ -58,32 +58,32 @@ class BlockProcessor:
|
||||
return '\n'.join(lines)
|
||||
|
||||
def test(self, parent, block):
|
||||
""" Test for block type. Must be overridden by subclasses.
|
||||
|
||||
""" Test for block type. Must be overridden by subclasses.
|
||||
|
||||
As the parser loops through processors, it will call the ``test`` method
|
||||
on each to determine if the given block of text is of that type. This
|
||||
method must return a boolean ``True`` or ``False``. The actual method of
|
||||
testing is left to the needs of that particular block type. It could
|
||||
testing is left to the needs of that particular block type. It could
|
||||
be as simple as ``block.startswith(some_string)`` or a complex regular
|
||||
expression. As the block type may be different depending on the parent
|
||||
of the block (i.e. inside a list), the parent etree element is also
|
||||
of the block (i.e. inside a list), the parent etree element is also
|
||||
provided and may be used as part of the test.
|
||||
|
||||
Keywords:
|
||||
|
||||
|
||||
* ``parent``: A etree element which will be the parent of the block.
|
||||
* ``block``: A block of text from the source which has been split at
|
||||
* ``block``: A block of text from the source which has been split at
|
||||
blank lines.
|
||||
"""
|
||||
pass
|
||||
|
||||
def run(self, parent, blocks):
|
||||
""" Run processor. Must be overridden by subclasses.
|
||||
|
||||
""" Run processor. Must be overridden by subclasses.
|
||||
|
||||
When the parser determines the appropriate type of a block, the parser
|
||||
will call the corresponding processor's ``run`` method. This method
|
||||
should parse the individual lines of the block and append them to
|
||||
the etree.
|
||||
the etree.
|
||||
|
||||
Note that both the ``parent`` and ``etree`` keywords are pointers
|
||||
to instances of the objects which should be edited in place. Each
|
||||
@ -103,8 +103,8 @@ class BlockProcessor:
|
||||
|
||||
|
||||
class ListIndentProcessor(BlockProcessor):
|
||||
""" Process children of list items.
|
||||
|
||||
""" Process children of list items.
|
||||
|
||||
Example:
|
||||
* a list item
|
||||
process this part
|
||||
@ -154,7 +154,7 @@ class ListIndentProcessor(BlockProcessor):
|
||||
""" Create a new li and parse the block with it as the parent. """
|
||||
li = markdown.etree.SubElement(parent, 'li')
|
||||
self.parser.parseBlocks(li, [block])
|
||||
|
||||
|
||||
def get_level(self, parent, block):
|
||||
""" Get level of indent based on list level. """
|
||||
# Get indent level
|
||||
@ -188,7 +188,7 @@ class CodeBlockProcessor(BlockProcessor):
|
||||
|
||||
def test(self, parent, block):
|
||||
return block.startswith(' '*markdown.TAB_LENGTH)
|
||||
|
||||
|
||||
def run(self, parent, blocks):
|
||||
sibling = self.lastChild(parent)
|
||||
block = blocks.pop(0)
|
||||
@ -208,7 +208,7 @@ class CodeBlockProcessor(BlockProcessor):
|
||||
block, theRest = self.detab(block)
|
||||
code.text = markdown.AtomicString('%s\n' % block.rstrip())
|
||||
if theRest:
|
||||
# This block contained unindented line(s) after the first indented
|
||||
# This block contained unindented line(s) after the first indented
|
||||
# line. Insert these lines as the first block of the master blocks
|
||||
# list for future processing.
|
||||
blocks.insert(0, theRest)
|
||||
@ -229,7 +229,7 @@ class BlockQuoteProcessor(BlockProcessor):
|
||||
# Pass lines before blockquote in recursively for parsing forst.
|
||||
self.parser.parseBlocks(parent, [before])
|
||||
# Remove ``> `` from begining of each line.
|
||||
block = '\n'.join([self.clean(line) for line in
|
||||
block = '\n'.join([self.clean(line) for line in
|
||||
block[m.start():].split('\n')])
|
||||
sibling = self.lastChild(parent)
|
||||
if sibling and sibling.tag == "blockquote":
|
||||
@ -355,7 +355,7 @@ class HashHeaderProcessor(BlockProcessor):
|
||||
blocks.insert(0, after)
|
||||
else:
|
||||
# This should never happen, but just in case...
|
||||
message(CRITICAL, "We've got a problem header!")
|
||||
print("We've got a problem header!")
|
||||
|
||||
|
||||
class SetextHeaderProcessor(BlockProcessor):
|
||||
@ -407,7 +407,7 @@ class HRProcessor(BlockProcessor):
|
||||
# Recursively parse lines before hr so they get parsed first.
|
||||
self.parser.parseBlocks(parent, ['\n'.join(prelines)])
|
||||
# create hr
|
||||
hr = markdown.etree.SubElement(parent, 'hr')
|
||||
markdown.etree.SubElement(parent, 'hr')
|
||||
# check for lines in block after hr.
|
||||
lines = lines[len(prelines)+1:]
|
||||
if len(lines):
|
||||
@ -418,7 +418,7 @@ class HRProcessor(BlockProcessor):
|
||||
class EmptyBlockProcessor(BlockProcessor):
|
||||
""" Process blocks and start with an empty line. """
|
||||
|
||||
# Detect a block that only contains whitespace
|
||||
# Detect a block that only contains whitespace
|
||||
# or only whitespace on the first line.
|
||||
RE = re.compile(r'^\s*\n')
|
||||
|
||||
|
@ -9,7 +9,7 @@ Markdown is called from the command line.
|
||||
import markdown
|
||||
import sys
|
||||
import logging
|
||||
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
|
||||
from logging import DEBUG, INFO, CRITICAL
|
||||
|
||||
EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
|
||||
""" The name used in the usage statement displayed for python versions < 2.3.
|
||||
@ -57,7 +57,7 @@ def parse_options():
|
||||
parser.add_option("-s", "--safe", dest="safe", default=False,
|
||||
metavar="SAFE_MODE",
|
||||
help="safe mode ('replace', 'remove' or 'escape' user's HTML tag)")
|
||||
parser.add_option("-o", "--output_format", dest="output_format",
|
||||
parser.add_option("-o", "--output_format", dest="output_format",
|
||||
default='xhtml1', metavar="OUTPUT_FORMAT",
|
||||
help="Format of output. One of 'xhtml1' (default) or 'html4'.")
|
||||
parser.add_option("--noisy",
|
||||
|
@ -8,9 +8,11 @@ def importETree():
|
||||
etree_in_c = None
|
||||
try: # Is it Python 2.5+ with C implemenation of ElementTree installed?
|
||||
import xml.etree.cElementTree as etree_in_c
|
||||
etree_in_c
|
||||
except ImportError:
|
||||
try: # Is it Python 2.5+ with Python implementation of ElementTree?
|
||||
import xml.etree.ElementTree as etree
|
||||
etree
|
||||
except ImportError:
|
||||
try: # An earlier version of Python with cElementTree installed?
|
||||
import cElementTree as etree_in_c
|
||||
|
@ -8,7 +8,7 @@ Added parsing of Definition Lists to Python-Markdown.
|
||||
A simple example:
|
||||
|
||||
Apple
|
||||
: Pomaceous fruit of plants of the genus Malus in
|
||||
: Pomaceous fruit of plants of the genus Malus in
|
||||
the family Rosaceae.
|
||||
: An american computer company.
|
||||
|
||||
@ -80,11 +80,11 @@ class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
|
||||
ITEM_TYPES = ['dd']
|
||||
LIST_TYPES = ['dl']
|
||||
|
||||
def create_item(parent, block):
|
||||
def create_item(self, parent, block):
|
||||
""" Create a new dd and parse the block with it as the parent. """
|
||||
dd = markdown.etree.SubElement(parent, 'dd')
|
||||
self.parser.parseBlocks(dd, [block])
|
||||
|
||||
|
||||
|
||||
|
||||
class DefListExtension(markdown.Extension):
|
||||
@ -95,7 +95,7 @@ class DefListExtension(markdown.Extension):
|
||||
md.parser.blockprocessors.add('defindent',
|
||||
DefListIndentProcessor(md.parser),
|
||||
'>indent')
|
||||
md.parser.blockprocessors.add('deflist',
|
||||
md.parser.blockprocessors.add('deflist',
|
||||
DefListProcessor(md.parser),
|
||||
'>ulist')
|
||||
|
||||
|
@ -43,7 +43,7 @@ class FootnoteExtension(markdown.Extension):
|
||||
|
||||
for key, value in configs:
|
||||
self.config[key][0] = value
|
||||
|
||||
|
||||
self.reset()
|
||||
|
||||
def extendMarkdown(self, md, md_globals):
|
||||
@ -82,7 +82,7 @@ class FootnoteExtension(markdown.Extension):
|
||||
return (child, element), False
|
||||
finder(child)
|
||||
return None
|
||||
|
||||
|
||||
res = finder(root)
|
||||
return res
|
||||
|
||||
@ -106,7 +106,7 @@ class FootnoteExtension(markdown.Extension):
|
||||
|
||||
div = etree.Element("div")
|
||||
div.set('class', 'footnote')
|
||||
hr = etree.SubElement(div, "hr")
|
||||
etree.SubElement(div, "hr")
|
||||
ol = etree.SubElement(div, "ol")
|
||||
|
||||
for id in self.footnotes.keys():
|
||||
@ -149,9 +149,9 @@ class FootnotePreprocessor(markdown.preprocessors.Preprocessor):
|
||||
Keywords:
|
||||
|
||||
* lines: A list of lines of text
|
||||
|
||||
|
||||
Return: A list of lines with footnote definitions removed.
|
||||
|
||||
|
||||
"""
|
||||
i, id, footnote = self._findFootnoteDefinition(lines)
|
||||
|
||||
@ -175,9 +175,9 @@ class FootnotePreprocessor(markdown.preprocessors.Preprocessor):
|
||||
* lines: A list of lines of text.
|
||||
|
||||
Return: A three item tuple containing the index of the first line of a
|
||||
footnote definition, the id of the definition and the body of the
|
||||
footnote definition, the id of the definition and the body of the
|
||||
definition.
|
||||
|
||||
|
||||
"""
|
||||
counter = 0
|
||||
for line in lines:
|
||||
@ -199,7 +199,6 @@ class FootnotePreprocessor(markdown.preprocessors.Preprocessor):
|
||||
|
||||
"""
|
||||
items = []
|
||||
item = -1
|
||||
i = 0 # to keep track of where we are
|
||||
|
||||
def detab(line):
|
||||
@ -277,7 +276,6 @@ class FootnoteTreeprocessor(markdown.treeprocessors.Treeprocessor):
|
||||
ind = element.getchildren().find(child)
|
||||
element.getchildren().insert(ind + 1, footnotesDiv)
|
||||
child.tail = None
|
||||
fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv)
|
||||
else:
|
||||
root.append(footnotesDiv)
|
||||
|
||||
|
@ -57,7 +57,7 @@ Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
|
||||
Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId>
|
||||
Contact: markdown@freewisdom.org
|
||||
|
||||
License: BSD (see ../docs/LICENSE for details)
|
||||
License: BSD (see ../docs/LICENSE for details)
|
||||
|
||||
Dependencies:
|
||||
* [Python 2.3+](http://python.org)
|
||||
@ -66,7 +66,6 @@ Dependencies:
|
||||
"""
|
||||
|
||||
import calibre.ebooks.markdown.markdown as markdown
|
||||
from calibre.ebooks.markdown.markdown import etree
|
||||
import re
|
||||
from string import ascii_lowercase, digits, punctuation
|
||||
|
||||
@ -106,7 +105,7 @@ class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
|
||||
# Create header using named groups from RE
|
||||
start_level, force_id = self._get_meta()
|
||||
level = len(m.group('level')) + start_level
|
||||
if level > 6:
|
||||
if level > 6:
|
||||
level = 6
|
||||
h = markdown.etree.SubElement(parent, 'h%d' % level)
|
||||
h.text = m.group('header').strip()
|
||||
@ -119,7 +118,7 @@ class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
|
||||
blocks.insert(0, after)
|
||||
else:
|
||||
# This should never happen, but just in case...
|
||||
message(CRITICAL, "We've got a problem header!")
|
||||
print ("We've got a problem header!")
|
||||
|
||||
def _get_meta(self):
|
||||
""" Return meta data suported by this ext as a tuple """
|
||||
@ -128,7 +127,7 @@ class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
|
||||
if hasattr(self.md, 'Meta'):
|
||||
if self.md.Meta.has_key('header_level'):
|
||||
level = int(self.md.Meta['header_level'][0]) - 1
|
||||
if self.md.Meta.has_key('header_forceid'):
|
||||
if self.md.Meta.has_key('header_forceid'):
|
||||
force = self._str2bool(self.md.Meta['header_forceid'][0])
|
||||
return level, force
|
||||
|
||||
|
@ -47,6 +47,7 @@ from urlparse import urlparse, urlunparse
|
||||
import sys
|
||||
if sys.version >= "3.0":
|
||||
from html import entities as htmlentitydefs
|
||||
htmlentitydefs
|
||||
else:
|
||||
import htmlentitydefs
|
||||
|
||||
@ -215,7 +216,6 @@ class HtmlPattern (Pattern):
|
||||
""" Store raw inline html and return a placeholder. """
|
||||
def handleMatch (self, m):
|
||||
rawhtml = m.group(2)
|
||||
inline = True
|
||||
place_holder = self.markdown.htmlStash.store(rawhtml)
|
||||
return place_holder
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
class OrderedDict(dict):
|
||||
"""
|
||||
A dictionary that keeps its keys in the order in which they're inserted.
|
||||
|
||||
|
||||
Copied from Django's SortedDict with some modifications.
|
||||
|
||||
"""
|
||||
@ -156,7 +156,7 @@ class OrderedDict(dict):
|
||||
self.keyOrder.insert(i, key)
|
||||
else:
|
||||
self.keyOrder.append(key)
|
||||
except Error:
|
||||
except Exception as e:
|
||||
# restore to prevent data loss and reraise
|
||||
self.keyOrder.insert(n, key)
|
||||
raise Error
|
||||
raise e
|
||||
|
@ -24,8 +24,8 @@ class Treeprocessor(Processor):
|
||||
def run(self, root):
|
||||
"""
|
||||
Subclasses of Treeprocessor should implement a `run` method, which
|
||||
takes a root ElementTree. This method can return another ElementTree
|
||||
object, and the existing root ElementTree will be replaced, or it can
|
||||
takes a root ElementTree. This method can return another ElementTree
|
||||
object, and the existing root ElementTree will be replaced, or it can
|
||||
modify the current tree and return None.
|
||||
"""
|
||||
pass
|
||||
@ -185,7 +185,7 @@ class InlineProcessor(Treeprocessor):
|
||||
result.append(node)
|
||||
|
||||
else: # wrong placeholder
|
||||
end = index + len(prefix)
|
||||
end = index + len(self.__placeholder_prefix)
|
||||
linkText(data[strartIndex:end])
|
||||
strartIndex = end
|
||||
else:
|
||||
@ -278,7 +278,7 @@ class InlineProcessor(Treeprocessor):
|
||||
for element, lst in insertQueue:
|
||||
if element.text:
|
||||
element.text = \
|
||||
markdown.inlinepatterns.handleAttributes(element.text,
|
||||
markdown.inlinepatterns.handleAttributes(element.text,
|
||||
element)
|
||||
i = 0
|
||||
for newChild in lst:
|
||||
|
@ -220,12 +220,11 @@ class InterfaceAction(QObject):
|
||||
ac.setStatusTip(description)
|
||||
ac.setWhatsThis(description)
|
||||
|
||||
ac.calibre_shortcut_unique_name = None
|
||||
ac.calibre_shortcut_unique_name = unique_name
|
||||
if shortcut is not False:
|
||||
self.gui.keyboard.register_shortcut(unique_name,
|
||||
shortcut_name, default_keys=keys,
|
||||
action=ac, description=description, group=self.action_spec[0])
|
||||
ac.calibre_shortcut_unique_name = unique_name
|
||||
if triggered is not None:
|
||||
ac.triggered.connect(triggered)
|
||||
return ac
|
||||
|
@ -125,11 +125,14 @@ class Manager(QObject): # {{{
|
||||
#pprint.pprint(self.keys_map)
|
||||
|
||||
def replace_action(self, unique_name, new_action):
|
||||
'''
|
||||
Replace the action associated with a shortcut.
|
||||
Once you're done calling replace_action() for all shortcuts you want
|
||||
replaced, call finalize() to have the shortcuts assigned to the replaced
|
||||
actions.
|
||||
'''
|
||||
sc = self.shortcuts[unique_name]
|
||||
ac = sc['action']
|
||||
if ac is not None:
|
||||
new_action.setShortcuts(ac.shortcuts())
|
||||
ac.setShortcuts([])
|
||||
sc['action'] = new_action
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -83,6 +83,10 @@ def category_url(prefix, cid):
|
||||
|
||||
def icon_url(prefix, name):
|
||||
return absurl(prefix, '/browse/icon/'+name)
|
||||
|
||||
def books_in_url(prefix, category, cid):
|
||||
return absurl(prefix, '/ajax/books_in/%s/%s'%(
|
||||
encode_name(category), encode_name(cid)))
|
||||
# }}}
|
||||
|
||||
class AjaxServer(object):
|
||||
@ -114,7 +118,7 @@ class AjaxServer(object):
|
||||
|
||||
|
||||
# Get book metadata {{{
|
||||
def ajax_book_to_json(self, book_id):
|
||||
def ajax_book_to_json(self, book_id, get_category_urls=True):
|
||||
mi = self.db.get_metadata(book_id, index_is_id=True)
|
||||
try:
|
||||
mi.rating = mi.rating/2.
|
||||
@ -151,18 +155,46 @@ class AjaxServer(object):
|
||||
data['other_formats'] = {fmt: absurl(self.opts.url_prefix, u'/get/%s/%d'%(fmt, book_id)) for fmt
|
||||
in other_fmts}
|
||||
|
||||
if get_category_urls:
|
||||
category_urls = data['category_urls'] = {}
|
||||
ccache = self.categories_cache()
|
||||
for key in mi.all_field_keys():
|
||||
fm = mi.metadata_for_field(key)
|
||||
if (fm and fm['is_category'] and not fm['is_csp'] and
|
||||
key != 'formats' and fm['datatype'] not in ['rating']):
|
||||
categories = mi.get(key)
|
||||
if isinstance(categories, basestring):
|
||||
categories = [categories]
|
||||
if categories is None:
|
||||
categories = []
|
||||
dbtags = {}
|
||||
for category in categories:
|
||||
for tag in ccache.get(key, []):
|
||||
if tag.original_name == category:
|
||||
dbtags[category] = books_in_url(self.opts.url_prefix,
|
||||
tag.category if tag.category else key,
|
||||
tag.original_name if tag.id is None else
|
||||
unicode(tag.id))
|
||||
break
|
||||
category_urls[key] = dbtags
|
||||
|
||||
return data, mi.last_modified
|
||||
|
||||
@Endpoint(set_last_modified=False)
|
||||
def ajax_book(self, book_id):
|
||||
def ajax_book(self, book_id, category_urls='true'):
|
||||
'''
|
||||
Return the metadata of the book as a JSON dictionary.
|
||||
|
||||
If category_urls == 'true' the returned dictionary also contains a
|
||||
mapping of category names to URLs that return the list of books in the
|
||||
given category.
|
||||
'''
|
||||
cherrypy.response.timeout = 3600
|
||||
|
||||
try:
|
||||
book_id = int(book_id)
|
||||
data, last_modified = self.ajax_book_to_json(book_id)
|
||||
data, last_modified = self.ajax_book_to_json(book_id,
|
||||
get_category_urls=category_urls.lower()=='true')
|
||||
except:
|
||||
raise cherrypy.HTTPError(404, 'No book with id: %r'%book_id)
|
||||
|
||||
@ -172,7 +204,7 @@ class AjaxServer(object):
|
||||
return data
|
||||
|
||||
@Endpoint(set_last_modified=False)
|
||||
def ajax_books(self, ids=None):
|
||||
def ajax_books(self, ids=None, category_urls='true'):
|
||||
'''
|
||||
Return the metadata for a list of books specified as a comma separated
|
||||
list of ids. The metadata is returned as a dictionary mapping ids to
|
||||
@ -192,9 +224,11 @@ class AjaxServer(object):
|
||||
' of integers')
|
||||
ans = {}
|
||||
lm = None
|
||||
gcu = category_urls.lower()=='true'
|
||||
for book_id in ids:
|
||||
try:
|
||||
data, last_modified = self.ajax_book_to_json(book_id)
|
||||
data, last_modified = self.ajax_book_to_json(book_id,
|
||||
get_category_urls=gcu)
|
||||
except:
|
||||
ans[book_id] = None
|
||||
else:
|
||||
@ -431,9 +465,9 @@ class AjaxServer(object):
|
||||
'name':item_names.get(x, x.original_name),
|
||||
'average_rating': x.avg_rating,
|
||||
'count': x.count,
|
||||
'url': absurl(self.opts.url_prefix, '/ajax/books_in/%s/%s'%(
|
||||
encode_name(x.category if x.category else toplevel),
|
||||
encode_name(x.original_name if x.id is None else unicode(x.id)))),
|
||||
'url': books_in_url(self.opts.url_prefix,
|
||||
x.category if x.category else toplevel,
|
||||
x.original_name if x.id is None else unicode(x.id)),
|
||||
'has_children': x.original_name in children,
|
||||
} for x in items]
|
||||
|
||||
|
@ -49,6 +49,8 @@ class DispatchController(object): # {{{
|
||||
elif self.prefix:
|
||||
self.dispatcher.connect(name+'prefix_extra', self.prefix, self,
|
||||
**kwargs)
|
||||
self.dispatcher.connect(name+'prefix_extra_trailing',
|
||||
self.prefix+'/', self, **kwargs)
|
||||
self.dispatcher.connect(name, route, self, **kwargs)
|
||||
self.funcs.append(expose(func))
|
||||
|
||||
|
@ -695,8 +695,8 @@ class BrowseServer(object):
|
||||
for tag in dbtags:
|
||||
tval = ('<a title="Browse books by {3}: {0}"'
|
||||
' href="{1}" class="details_category_link">{2}</a>')
|
||||
href='/browse/matches/%s/%s' % \
|
||||
(quote(tag.category), quote(str(tag.id)))
|
||||
href='%s/browse/matches/%s/%s' % \
|
||||
(self.opts.url_prefix, quote(tag.category), quote(str(tag.id)))
|
||||
vals.append(tval.format(xml(tag.name, True),
|
||||
xml(href, True),
|
||||
xml(val if len(dbtags) == 1 else tag.name),
|
||||
|
Loading…
x
Reference in New Issue
Block a user