mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
GwR apple driver wip
This commit is contained in:
commit
def237d5bb
@ -123,7 +123,7 @@ function fetch_library_books(start, num, timeout, sort, order, search) {
|
|||||||
|
|
||||||
current_library_request = $.ajax({
|
current_library_request = $.ajax({
|
||||||
type: "GET",
|
type: "GET",
|
||||||
url: "library",
|
url: "xml",
|
||||||
data: data,
|
data: data,
|
||||||
cache: false,
|
cache: false,
|
||||||
timeout: timeout, //milliseconds
|
timeout: timeout, //milliseconds
|
||||||
|
@ -35,7 +35,7 @@ bool_custom_columns_are_tristate = 'yes'
|
|||||||
|
|
||||||
|
|
||||||
# Provide a set of columns to be sorted on when calibre starts
|
# Provide a set of columns to be sorted on when calibre starts
|
||||||
# The argument is None of saved sort history is to be used
|
# The argument is None if saved sort history is to be used
|
||||||
# otherwise it is a list of column,order pairs. Column is the
|
# otherwise it is a list of column,order pairs. Column is the
|
||||||
# lookup/search name, found using the tooltip for the column
|
# lookup/search name, found using the tooltip for the column
|
||||||
# Order is 0 for ascending, 1 for descending
|
# Order is 0 for ascending, 1 for descending
|
||||||
|
BIN
resources/images/devices/ipad.png
Normal file
BIN
resources/images/devices/ipad.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 17 KiB |
BIN
resources/images/news/american_thinker.png
Normal file
BIN
resources/images/news/american_thinker.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 705 B |
43
resources/recipes/american_thinker.recipe
Normal file
43
resources/recipes/american_thinker.recipe
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2010, Walt Anthony <workshop.northpole at gmail.com>'
|
||||||
|
'''
|
||||||
|
www.americanthinker.com
|
||||||
|
'''
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class AmericanThinker(BasicNewsRecipe):
|
||||||
|
title = u'American Thinker'
|
||||||
|
description = "American Thinker is a daily internet publication devoted to the thoughtful exploration of issues of importance to Americans."
|
||||||
|
__author__ = 'Walt Anthony'
|
||||||
|
publisher = 'Thomas Lifson'
|
||||||
|
category = 'news, politics, USA'
|
||||||
|
oldest_article = 7 #days
|
||||||
|
max_articles_per_feed = 50
|
||||||
|
summary_length = 150
|
||||||
|
language = 'en'
|
||||||
|
|
||||||
|
remove_javascript = True
|
||||||
|
no_stylesheets = True
|
||||||
|
|
||||||
|
|
||||||
|
conversion_options = {
|
||||||
|
'comment' : description
|
||||||
|
, 'tags' : category
|
||||||
|
, 'publisher' : publisher
|
||||||
|
, 'language' : language
|
||||||
|
, 'linearize_tables' : True
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name=['table', 'iframe', 'embed', 'object'])
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags_after = dict(name='div', attrs={'class':'article_body'})
|
||||||
|
|
||||||
|
|
||||||
|
feeds = [(u'http://feeds.feedburner.com/americanthinker'),
|
||||||
|
(u'http://feeds.feedburner.com/AmericanThinkerBlog')
|
||||||
|
]
|
||||||
|
|
||||||
|
def print_version(self, url):
|
||||||
|
return 'http://www.americanthinker.com/printpage/?url=' + url
|
@ -50,6 +50,7 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
'articlecontent','photoBox', 'article columnist first']}, ]
|
'articlecontent','photoBox', 'article columnist first']}, ]
|
||||||
recursions = 1
|
recursions = 1
|
||||||
match_regexps = [r'http://www.newsweek.com/id/\S+/page/\d+']
|
match_regexps = [r'http://www.newsweek.com/id/\S+/page/\d+']
|
||||||
|
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
|
||||||
|
|
||||||
def find_title(self, section):
|
def find_title(self, section):
|
||||||
d = {'scope':'Scope', 'thetake':'The Take', 'features':'Features',
|
d = {'scope':'Scope', 'thetake':'The Take', 'features':'Features',
|
||||||
|
@ -451,6 +451,20 @@ def prepare_string_for_xml(raw, attribute=False):
|
|||||||
def isbytestring(obj):
|
def isbytestring(obj):
|
||||||
return isinstance(obj, (str, bytes))
|
return isinstance(obj, (str, bytes))
|
||||||
|
|
||||||
|
def human_readable(size):
|
||||||
|
""" Convert a size in bytes into a human readable form """
|
||||||
|
divisor, suffix = 1, "B"
|
||||||
|
for i, candidate in enumerate(('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
|
||||||
|
if size < 1024**(i+1):
|
||||||
|
divisor, suffix = 1024**(i), candidate
|
||||||
|
break
|
||||||
|
size = str(float(size)/divisor)
|
||||||
|
if size.find(".") > -1:
|
||||||
|
size = size[:size.find(".")+2]
|
||||||
|
if size.endswith('.0'):
|
||||||
|
size = size[:-2]
|
||||||
|
return size + " " + suffix
|
||||||
|
|
||||||
if isosx:
|
if isosx:
|
||||||
import glob, shutil
|
import glob, shutil
|
||||||
fdir = os.path.expanduser('~/.fonts')
|
fdir = os.path.expanduser('~/.fonts')
|
||||||
|
@ -449,7 +449,7 @@ from calibre.devices.eslick.driver import ESLICK
|
|||||||
from calibre.devices.nuut2.driver import NUUT2
|
from calibre.devices.nuut2.driver import NUUT2
|
||||||
from calibre.devices.iriver.driver import IRIVER_STORY
|
from calibre.devices.iriver.driver import IRIVER_STORY
|
||||||
from calibre.devices.binatone.driver import README
|
from calibre.devices.binatone.driver import README
|
||||||
from calibre.devices.hanvon.driver import N516, EB511, ALEX, AZBOOKA
|
from calibre.devices.hanvon.driver import N516, EB511, ALEX, AZBOOKA, THEBOOK
|
||||||
from calibre.devices.edge.driver import EDGE
|
from calibre.devices.edge.driver import EDGE
|
||||||
from calibre.devices.teclast.driver import TECLAST_K3, NEWSMY, IPAPYRUS
|
from calibre.devices.teclast.driver import TECLAST_K3, NEWSMY, IPAPYRUS
|
||||||
from calibre.devices.sne.driver import SNE
|
from calibre.devices.sne.driver import SNE
|
||||||
@ -529,6 +529,7 @@ plugins += [
|
|||||||
EB600,
|
EB600,
|
||||||
README,
|
README,
|
||||||
N516,
|
N516,
|
||||||
|
THEBOOK,
|
||||||
EB511,
|
EB511,
|
||||||
ELONEX,
|
ELONEX,
|
||||||
TECLAST_K3,
|
TECLAST_K3,
|
||||||
|
@ -21,14 +21,20 @@ if iswindows:
|
|||||||
print "running in Windows"
|
print "running in Windows"
|
||||||
import win32com.client
|
import win32com.client
|
||||||
|
|
||||||
|
class UserInteractionRequired(Exception):
|
||||||
|
print "UserInteractionRequired() exception"
|
||||||
|
#pass
|
||||||
|
|
||||||
class ITUNES(DevicePlugin):
|
class ITUNES(DevicePlugin):
|
||||||
name = 'Apple device interface'
|
name = 'Apple device interface'
|
||||||
gui_name = 'Apple device'
|
gui_name = 'Apple device'
|
||||||
icon = I('devices/iPad.png')
|
icon = I('devices/ipad.png')
|
||||||
description = _('Communicate with iBooks through iTunes.')
|
description = _('Communicate with iBooks through iTunes.')
|
||||||
supported_platforms = ['windows','osx']
|
supported_platforms = ['windows','osx']
|
||||||
author = 'GRiker'
|
author = 'GRiker'
|
||||||
|
|
||||||
|
OPEN_FEEDBACK_MESSAGE = _('Apple device detected, launching iTunes')
|
||||||
|
|
||||||
FORMATS = ['epub']
|
FORMATS = ['epub']
|
||||||
|
|
||||||
VENDOR_ID = [0x05ac]
|
VENDOR_ID = [0x05ac]
|
||||||
@ -39,8 +45,10 @@ class ITUNES(DevicePlugin):
|
|||||||
BCD = [0x01]
|
BCD = [0x01]
|
||||||
|
|
||||||
# Properties
|
# Properties
|
||||||
cached_paths = {}
|
cached_books = {}
|
||||||
|
ejected = False
|
||||||
iTunes= None
|
iTunes= None
|
||||||
|
path_template = 'iTunes/%s - %s.epub'
|
||||||
sources = None
|
sources = None
|
||||||
verbose = True
|
verbose = True
|
||||||
|
|
||||||
@ -68,53 +76,69 @@ class ITUNES(DevicePlugin):
|
|||||||
in main memory of device. If a card is specified and no
|
in main memory of device. If a card is specified and no
|
||||||
books are on the card return empty list.
|
books are on the card return empty list.
|
||||||
@return: A BookList.
|
@return: A BookList.
|
||||||
|
|
||||||
|
Implementation notes:
|
||||||
|
iTunes does not sync purchased books, they are only on the device. They are visible, but
|
||||||
|
they are not backed up to iTunes. Since calibre can't manage them, don't show them in the
|
||||||
|
list of device books.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
print "ITUNES:books(oncard=%s)" % oncard
|
print "ITUNES:books(oncard=%s)" % oncard
|
||||||
|
|
||||||
if not oncard:
|
if not oncard:
|
||||||
# Fetch a list of books from iPod device connected to iTunes
|
# Fetch a list of books from iPod device connected to iTunes
|
||||||
if isosx:
|
if isosx:
|
||||||
'''
|
|
||||||
print "self.sources: %s" % self.sources
|
# Fetch Library|Books
|
||||||
print "self.sources['library']: %s" % self.sources['library']
|
|
||||||
lib = self.iTunes.sources['library']
|
lib = self.iTunes.sources['library']
|
||||||
|
|
||||||
if 'Books' in lib.playlists.name():
|
if 'Books' in lib.playlists.name():
|
||||||
booklist = BookList()
|
lib_books = lib.playlists['Books'].file_tracks()
|
||||||
it_books = lib.playlists['Books'].file_tracks()
|
library_books = {}
|
||||||
for it_book in it_books:
|
for book in lib_books:
|
||||||
this_book = Book(it_book.name(), it_book.artist())
|
path = self.path_template % (book.name(), book.artist())
|
||||||
this_book.datetime = parse_date(str(it_book.date_added())).timetuple()
|
library_books[path] = book
|
||||||
this_book.db_id = None
|
|
||||||
this_book.device_collections = []
|
|
||||||
this_book.path = 'iTunes/Books/%s.epub' % it_book.name()
|
|
||||||
this_book.size = it_book.size()
|
|
||||||
this_book.thumbnail = None
|
|
||||||
booklist.add_book(this_book, False)
|
|
||||||
return booklist
|
|
||||||
|
|
||||||
else:
|
# Fetch iPod|Books, ignore iPod|Purchased books
|
||||||
return []
|
|
||||||
'''
|
|
||||||
if 'iPod' in self.sources:
|
if 'iPod' in self.sources:
|
||||||
device = self.sources['iPod']
|
device = self.sources['iPod']
|
||||||
|
if 'Purchased' in self.iTunes.sources[device].playlists.name():
|
||||||
|
purchased_book_ids = [pb.database_ID() for pb in self.iTunes.sources[device].playlists['Purchased'].file_tracks()]
|
||||||
|
else:
|
||||||
|
purchased_books_ids = []
|
||||||
|
|
||||||
if 'Books' in self.iTunes.sources[device].playlists.name():
|
if 'Books' in self.iTunes.sources[device].playlists.name():
|
||||||
booklist = BookList()
|
booklist = BookList()
|
||||||
cached_paths = {}
|
cached_books = {}
|
||||||
books = self.iTunes.sources[device].playlists['Books'].file_tracks()
|
device_books = self.iTunes.sources[device].playlists['Books'].file_tracks()
|
||||||
for book in books:
|
for book in device_books:
|
||||||
|
if book.database_ID() in purchased_book_ids:
|
||||||
|
if self.verbose:
|
||||||
|
print " skipping purchased book '%s'" % book.name()
|
||||||
|
continue
|
||||||
this_book = Book(book.name(), book.artist())
|
this_book = Book(book.name(), book.artist())
|
||||||
this_book.datetime = parse_date(str(book.date_added())).timetuple()
|
this_book.datetime = parse_date(str(book.date_added())).timetuple()
|
||||||
this_book.db_id = None
|
this_book.db_id = None
|
||||||
this_book.device_collections = []
|
this_book.device_collections = []
|
||||||
this_book.path = 'iTunes/%s - %s.epub' % (book.name(), book.artist())
|
this_book.path = self.path_template % (book.name(), book.artist())
|
||||||
this_book.size = book.size()
|
this_book.size = book.size()
|
||||||
this_book.thumbnail = None
|
this_book.thumbnail = None
|
||||||
booklist.add_book(this_book, False)
|
cached_books[this_book.path] = { 'title':book.name(),
|
||||||
cached_paths[this_book.path] = { 'title':book.name(),
|
|
||||||
'author':book.artist(),
|
'author':book.artist(),
|
||||||
'book':book}
|
'lib_book':library_books[this_book.path] if this_book.path in library_books else None,
|
||||||
self.cached_paths = cached_paths
|
'dev_book':book,
|
||||||
print self.cached_paths
|
'bl_index':len(booklist)
|
||||||
|
}
|
||||||
|
booklist.add_book(this_book, False)
|
||||||
|
|
||||||
|
if self.verbose:
|
||||||
|
print
|
||||||
|
print "%-40.40s %-12.12s" % ('Device Books','In Library')
|
||||||
|
print "%-40.40s %-12.12s" % ('------------','----------')
|
||||||
|
|
||||||
|
for cp in cached_books.keys():
|
||||||
|
print "%-40.40s %6.6s" % (cached_books[cp]['title'], 'yes' if cached_books[cp]['lib_book'] else ' no')
|
||||||
|
print
|
||||||
|
self.cached_books = cached_books
|
||||||
return booklist
|
return booklist
|
||||||
else:
|
else:
|
||||||
# No books installed on this device
|
# No books installed on this device
|
||||||
@ -139,6 +163,10 @@ class ITUNES(DevicePlugin):
|
|||||||
'''
|
'''
|
||||||
# print "ITUNES:can_handle()"
|
# print "ITUNES:can_handle()"
|
||||||
if isosx:
|
if isosx:
|
||||||
|
if self.ejected:
|
||||||
|
print "ITUNES:can_handle(): device detected, but ejected from iTunes"
|
||||||
|
return False
|
||||||
|
|
||||||
if self.iTunes:
|
if self.iTunes:
|
||||||
# Check for connected book-capable device
|
# Check for connected book-capable device
|
||||||
names = [s.name() for s in self.iTunes.sources()]
|
names = [s.name() for s in self.iTunes.sources()]
|
||||||
@ -196,32 +224,19 @@ class ITUNES(DevicePlugin):
|
|||||||
def delete_books(self, paths, end_session=True):
|
def delete_books(self, paths, end_session=True):
|
||||||
'''
|
'''
|
||||||
Delete books at paths on device.
|
Delete books at paths on device.
|
||||||
Since we're deleting through iTunes, we'll use the cached handle to the book
|
iTunes doesn't let us directly delete a book on the device.
|
||||||
|
Delete the path(s) from the library, then update iPad
|
||||||
|
|
||||||
'''
|
'''
|
||||||
for path in paths:
|
for path in paths:
|
||||||
title = self.cached_paths[path]['title']
|
title = self.cached_books[path]['title']
|
||||||
author = self.cached_paths[path]['author']
|
author = self.cached_books[path]['author']
|
||||||
book = self.cached_paths[path]['book']
|
dev_book = self.cached_books[path]['dev_book']
|
||||||
print "ITUNES.delete_books(): Searching for '%s - %s'" % (title,author)
|
lib_book = self.cached_books[path]['lib_book']
|
||||||
if True:
|
|
||||||
results = self.iTunes.playlists['library'].file_tracks[
|
|
||||||
(appscript.its.name == title).AND
|
|
||||||
(appscript.its.artist == author).AND
|
|
||||||
(appscript.its.kind == 'Book')].get()
|
|
||||||
if len(results) == 1:
|
|
||||||
book_to_delete = results[0]
|
|
||||||
print "book_to_delete: %s" % book_to_delete
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print "ITUNES:delete_books(): Deleting '%s - %s'" % (title, author)
|
print "ITUNES:delete_books(): Deleting '%s' from iTunes library" % (path)
|
||||||
self.iTunes.delete(results[0])
|
self.iTunes.delete(lib_book)
|
||||||
elif len(results) > 1:
|
self._update_device()
|
||||||
print "ITUNES.delete_books(): More than one book matches '%s - %s'" % (title, author)
|
|
||||||
else:
|
|
||||||
print "ITUNES.delete_books(): No book '%s - %s' found in iTunes" % (title, author)
|
|
||||||
else:
|
|
||||||
if self.verbose:
|
|
||||||
print "ITUNES:delete_books(): Deleting '%s - %s'" % (title, author)
|
|
||||||
self.iTunes.delete(book)
|
|
||||||
|
|
||||||
def eject(self):
|
def eject(self):
|
||||||
'''
|
'''
|
||||||
@ -284,6 +299,8 @@ class ITUNES(DevicePlugin):
|
|||||||
# Launch iTunes if not already running
|
# Launch iTunes if not already running
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print "ITUNES:open(): Instantiating iTunes"
|
print "ITUNES:open(): Instantiating iTunes"
|
||||||
|
|
||||||
|
# Instantiate iTunes
|
||||||
running_apps = appscript.app('System Events')
|
running_apps = appscript.app('System Events')
|
||||||
if not 'iTunes' in running_apps.processes.name():
|
if not 'iTunes' in running_apps.processes.name():
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
@ -297,13 +314,26 @@ class ITUNES(DevicePlugin):
|
|||||||
if self.verbose:
|
if self.verbose:
|
||||||
print " %s - %s (already running)" % (self.iTunes.name(), self.iTunes.version())
|
print " %s - %s (already running)" % (self.iTunes.name(), self.iTunes.version())
|
||||||
|
|
||||||
|
# Init the iTunes source list
|
||||||
|
names = [s.name() for s in self.iTunes.sources()]
|
||||||
|
kinds = [str(s.kind()).rpartition('.')[2] for s in self.iTunes.sources()]
|
||||||
|
self.sources = sources = dict(zip(kinds,names))
|
||||||
|
|
||||||
|
# If we're running, but 'iPod' is not a listed source, device was
|
||||||
|
# previously ejected but not physically removed. can_handle() needs to know this
|
||||||
|
if not 'iPod' in self.sources:
|
||||||
|
self.ejected = True
|
||||||
|
|
||||||
|
else:
|
||||||
|
print "ITUNES:open(): check for presync here ..."
|
||||||
|
|
||||||
def post_yank_cleanup(self):
|
def post_yank_cleanup(self):
|
||||||
'''
|
'''
|
||||||
Called if the user yanks the device without ejecting it first.
|
Called if the user yanks the device without ejecting it first.
|
||||||
'''
|
'''
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def remove_books_from_metadata(cls, paths, booklists):
|
def remove_books_from_metadata(self, paths, booklists):
|
||||||
'''
|
'''
|
||||||
Remove books from the metadata list. This function must not communicate
|
Remove books from the metadata list. This function must not communicate
|
||||||
with the device.
|
with the device.
|
||||||
@ -312,7 +342,14 @@ class ITUNES(DevicePlugin):
|
|||||||
(L{books}(oncard=None), L{books}(oncard='carda'),
|
(L{books}(oncard=None), L{books}(oncard='carda'),
|
||||||
L{books}(oncard='cardb')).
|
L{books}(oncard='cardb')).
|
||||||
'''
|
'''
|
||||||
print "ITUNES.remove_books_from_metadata(): need to implement"
|
print "ITUNES.remove_books_from_metadata():"
|
||||||
|
for path in paths:
|
||||||
|
print " Removing '%s' from calibre booklist, index: %d" % (path, self.cached_books[path]['bl_index'])
|
||||||
|
booklists[0].pop(self.cached_books[path]['bl_index'])
|
||||||
|
|
||||||
|
print " Removing '%s' from self.cached_books" % path
|
||||||
|
self.cached_books.pop(path)
|
||||||
|
|
||||||
|
|
||||||
def reset(self, key='-1', log_packets=False, report_progress=None,
|
def reset(self, key='-1', log_packets=False, report_progress=None,
|
||||||
detected_device=None) :
|
detected_device=None) :
|
||||||
@ -407,6 +444,15 @@ class ITUNES(DevicePlugin):
|
|||||||
'''
|
'''
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
# Private methods
|
||||||
|
def _update_device(self):
|
||||||
|
'''
|
||||||
|
This probably needs a job spinner
|
||||||
|
'''
|
||||||
|
if self.verbose:
|
||||||
|
print "ITUNES:_update_device(): Syncing device with iTunes Library"
|
||||||
|
self.iTunes.update()
|
||||||
|
|
||||||
class BookList(list):
|
class BookList(list):
|
||||||
'''
|
'''
|
||||||
A list of books. Each Book object must have the fields:
|
A list of books. Each Book object must have the fields:
|
||||||
|
@ -24,7 +24,7 @@ class N516(USBMS):
|
|||||||
|
|
||||||
VENDOR_ID = [0x0525]
|
VENDOR_ID = [0x0525]
|
||||||
PRODUCT_ID = [0xa4a5]
|
PRODUCT_ID = [0xa4a5]
|
||||||
BCD = [0x323, 0x326, 0x399]
|
BCD = [0x323, 0x326]
|
||||||
|
|
||||||
VENDOR_NAME = 'INGENIC'
|
VENDOR_NAME = 'INGENIC'
|
||||||
WINDOWS_MAIN_MEM = '_FILE-STOR_GADGE'
|
WINDOWS_MAIN_MEM = '_FILE-STOR_GADGE'
|
||||||
@ -34,6 +34,16 @@ class N516(USBMS):
|
|||||||
EBOOK_DIR_MAIN = 'e_book'
|
EBOOK_DIR_MAIN = 'e_book'
|
||||||
SUPPORTS_SUB_DIRS = True
|
SUPPORTS_SUB_DIRS = True
|
||||||
|
|
||||||
|
class THEBOOK(N516):
|
||||||
|
name = 'The Book driver'
|
||||||
|
gui_name = 'The Book'
|
||||||
|
description = _('Communicate with The Book reader.')
|
||||||
|
author = 'Kovid Goyal'
|
||||||
|
|
||||||
|
BCD = [0x399]
|
||||||
|
MAIN_MEMORY_VOLUME_LABEL = 'The Book Main Memory'
|
||||||
|
EBOOK_DIR_MAIN = 'My books'
|
||||||
|
|
||||||
class ALEX(N516):
|
class ALEX(N516):
|
||||||
|
|
||||||
name = 'Alex driver'
|
name = 'Alex driver'
|
||||||
|
@ -17,7 +17,7 @@ class PALMPRE(USBMS):
|
|||||||
supported_platforms = ['windows', 'osx', 'linux']
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
|
|
||||||
# Ordered list of supported formats
|
# Ordered list of supported formats
|
||||||
FORMATS = ['mobi', 'prc', 'pdb', 'txt']
|
FORMATS = ['epub', 'mobi', 'prc', 'pdb', 'txt']
|
||||||
|
|
||||||
VENDOR_ID = [0x0830]
|
VENDOR_ID = [0x0830]
|
||||||
PRODUCT_ID = [0x8004, 0x8002, 0x0101]
|
PRODUCT_ID = [0x8004, 0x8002, 0x0101]
|
||||||
|
@ -88,17 +88,28 @@ CALIBRE_METADATA_FIELDS = frozenset([
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
CALIBRE_RESERVED_LABELS = frozenset([
|
||||||
|
'search', # reserved for saved searches
|
||||||
|
'date',
|
||||||
|
'all',
|
||||||
|
'ondevice',
|
||||||
|
'inlibrary',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
RESERVED_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
|
RESERVED_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
|
||||||
PUBLICATION_METADATA_FIELDS).union(
|
PUBLICATION_METADATA_FIELDS).union(
|
||||||
BOOK_STRUCTURE_FIELDS).union(
|
BOOK_STRUCTURE_FIELDS).union(
|
||||||
USER_METADATA_FIELDS).union(
|
USER_METADATA_FIELDS).union(
|
||||||
DEVICE_METADATA_FIELDS).union(
|
DEVICE_METADATA_FIELDS).union(
|
||||||
CALIBRE_METADATA_FIELDS)
|
CALIBRE_METADATA_FIELDS).union(
|
||||||
|
CALIBRE_RESERVED_LABELS)
|
||||||
|
|
||||||
assert len(RESERVED_METADATA_FIELDS) == sum(map(len, (
|
assert len(RESERVED_METADATA_FIELDS) == sum(map(len, (
|
||||||
SOCIAL_METADATA_FIELDS, PUBLICATION_METADATA_FIELDS,
|
SOCIAL_METADATA_FIELDS, PUBLICATION_METADATA_FIELDS,
|
||||||
BOOK_STRUCTURE_FIELDS, USER_METADATA_FIELDS,
|
BOOK_STRUCTURE_FIELDS, USER_METADATA_FIELDS,
|
||||||
DEVICE_METADATA_FIELDS, CALIBRE_METADATA_FIELDS,
|
DEVICE_METADATA_FIELDS, CALIBRE_METADATA_FIELDS,
|
||||||
|
CALIBRE_RESERVED_LABELS
|
||||||
)))
|
)))
|
||||||
|
|
||||||
SERIALIZABLE_FIELDS = SOCIAL_METADATA_FIELDS.union(
|
SERIALIZABLE_FIELDS = SOCIAL_METADATA_FIELDS.union(
|
||||||
|
@ -1,538 +0,0 @@
|
|||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
|
||||||
'''Read/Write metadata from Open Packaging Format (.opf) files.'''
|
|
||||||
|
|
||||||
import re, os
|
|
||||||
import uuid
|
|
||||||
from urllib import unquote, quote
|
|
||||||
|
|
||||||
from calibre.constants import __appname__, __version__
|
|
||||||
from calibre.ebooks.metadata import MetaInformation, string_to_authors
|
|
||||||
from calibre.ebooks.BeautifulSoup import BeautifulStoneSoup, BeautifulSoup
|
|
||||||
from calibre.ebooks.lrf import entity_to_unicode
|
|
||||||
from calibre.ebooks.metadata import Resource, ResourceCollection
|
|
||||||
from calibre.ebooks.metadata.toc import TOC
|
|
||||||
|
|
||||||
class OPFSoup(BeautifulStoneSoup):
|
|
||||||
|
|
||||||
def __init__(self, raw):
|
|
||||||
BeautifulStoneSoup.__init__(self, raw,
|
|
||||||
convertEntities=BeautifulSoup.HTML_ENTITIES,
|
|
||||||
selfClosingTags=['item', 'itemref', 'reference'])
|
|
||||||
|
|
||||||
class ManifestItem(Resource):
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_opf_manifest_item(item, basedir):
|
|
||||||
if item.has_key('href'):
|
|
||||||
href = item['href']
|
|
||||||
if unquote(href) == href:
|
|
||||||
try:
|
|
||||||
href = quote(href)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
res = ManifestItem(href, basedir=basedir, is_path=False)
|
|
||||||
mt = item.get('media-type', '').strip()
|
|
||||||
if mt:
|
|
||||||
res.mime_type = mt
|
|
||||||
return res
|
|
||||||
|
|
||||||
@dynamic_property
|
|
||||||
def media_type(self):
|
|
||||||
def fget(self):
|
|
||||||
return self.mime_type
|
|
||||||
def fset(self, val):
|
|
||||||
self.mime_type = val
|
|
||||||
return property(fget=fget, fset=fset)
|
|
||||||
|
|
||||||
|
|
||||||
def __unicode__(self):
|
|
||||||
return u'<item id="%s" href="%s" media-type="%s" />'%(self.id, self.href(), self.media_type)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return unicode(self).encode('utf-8')
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return unicode(self)
|
|
||||||
|
|
||||||
|
|
||||||
def __getitem__(self, index):
|
|
||||||
if index == 0:
|
|
||||||
return self.href()
|
|
||||||
if index == 1:
|
|
||||||
return self.media_type
|
|
||||||
raise IndexError('%d out of bounds.'%index)
|
|
||||||
|
|
||||||
|
|
||||||
class Manifest(ResourceCollection):
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_opf_manifest_element(manifest, dir):
|
|
||||||
m = Manifest()
|
|
||||||
for item in manifest.findAll(re.compile('item')):
|
|
||||||
try:
|
|
||||||
m.append(ManifestItem.from_opf_manifest_item(item, dir))
|
|
||||||
id = item.get('id', '')
|
|
||||||
if not id:
|
|
||||||
id = 'id%d'%m.next_id
|
|
||||||
m[-1].id = id
|
|
||||||
m.next_id += 1
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
return m
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_paths(entries):
|
|
||||||
'''
|
|
||||||
`entries`: List of (path, mime-type) If mime-type is None it is autodetected
|
|
||||||
'''
|
|
||||||
m = Manifest()
|
|
||||||
for path, mt in entries:
|
|
||||||
mi = ManifestItem(path, is_path=True)
|
|
||||||
if mt:
|
|
||||||
mi.mime_type = mt
|
|
||||||
mi.id = 'id%d'%m.next_id
|
|
||||||
m.next_id += 1
|
|
||||||
m.append(mi)
|
|
||||||
return m
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
ResourceCollection.__init__(self)
|
|
||||||
self.next_id = 1
|
|
||||||
|
|
||||||
|
|
||||||
def item(self, id):
|
|
||||||
for i in self:
|
|
||||||
if i.id == id:
|
|
||||||
return i
|
|
||||||
|
|
||||||
def id_for_path(self, path):
|
|
||||||
path = os.path.normpath(os.path.abspath(path))
|
|
||||||
for i in self:
|
|
||||||
if i.path and os.path.normpath(i.path) == path:
|
|
||||||
return i.id
|
|
||||||
|
|
||||||
def path_for_id(self, id):
|
|
||||||
for i in self:
|
|
||||||
if i.id == id:
|
|
||||||
return i.path
|
|
||||||
|
|
||||||
class Spine(ResourceCollection):
|
|
||||||
|
|
||||||
class Item(Resource):
|
|
||||||
|
|
||||||
def __init__(self, idfunc, *args, **kwargs):
|
|
||||||
Resource.__init__(self, *args, **kwargs)
|
|
||||||
self.is_linear = True
|
|
||||||
self.id = idfunc(self.path)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_opf_spine_element(spine, manifest):
|
|
||||||
s = Spine(manifest)
|
|
||||||
for itemref in spine.findAll(re.compile('itemref')):
|
|
||||||
if itemref.has_key('idref'):
|
|
||||||
r = Spine.Item(s.manifest.id_for_path,
|
|
||||||
s.manifest.path_for_id(itemref['idref']), is_path=True)
|
|
||||||
r.is_linear = itemref.get('linear', 'yes') == 'yes'
|
|
||||||
s.append(r)
|
|
||||||
return s
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_paths(paths, manifest):
|
|
||||||
s = Spine(manifest)
|
|
||||||
for path in paths:
|
|
||||||
try:
|
|
||||||
s.append(Spine.Item(s.manifest.id_for_path, path, is_path=True))
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, manifest):
|
|
||||||
ResourceCollection.__init__(self)
|
|
||||||
self.manifest = manifest
|
|
||||||
|
|
||||||
|
|
||||||
def linear_items(self):
|
|
||||||
for r in self:
|
|
||||||
if r.is_linear:
|
|
||||||
yield r.path
|
|
||||||
|
|
||||||
def nonlinear_items(self):
|
|
||||||
for r in self:
|
|
||||||
if not r.is_linear:
|
|
||||||
yield r.path
|
|
||||||
|
|
||||||
def items(self):
|
|
||||||
for i in self:
|
|
||||||
yield i.path
|
|
||||||
|
|
||||||
|
|
||||||
class Guide(ResourceCollection):
|
|
||||||
|
|
||||||
class Reference(Resource):
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_opf_resource_item(ref, basedir):
|
|
||||||
title, href, type = ref.get('title', ''), ref['href'], ref['type']
|
|
||||||
res = Guide.Reference(href, basedir, is_path=False)
|
|
||||||
res.title = title
|
|
||||||
res.type = type
|
|
||||||
return res
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
ans = '<reference type="%s" href="%s" '%(self.type, self.href())
|
|
||||||
if self.title:
|
|
||||||
ans += 'title="%s" '%self.title
|
|
||||||
return ans + '/>'
|
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_opf_guide(guide_elem, base_dir=os.getcwdu()):
|
|
||||||
coll = Guide()
|
|
||||||
for ref in guide_elem.findAll('reference'):
|
|
||||||
try:
|
|
||||||
ref = Guide.Reference.from_opf_resource_item(ref, base_dir)
|
|
||||||
coll.append(ref)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
return coll
|
|
||||||
|
|
||||||
def set_cover(self, path):
|
|
||||||
map(self.remove, [i for i in self if 'cover' in i.type.lower()])
|
|
||||||
for type in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
|
|
||||||
self.append(Guide.Reference(path, is_path=True))
|
|
||||||
self[-1].type = type
|
|
||||||
self[-1].title = ''
|
|
||||||
|
|
||||||
|
|
||||||
class standard_field(object):
|
|
||||||
|
|
||||||
def __init__(self, name):
|
|
||||||
self.name = name
|
|
||||||
|
|
||||||
def __get__(self, obj, typ=None):
|
|
||||||
return getattr(obj, 'get_'+self.name)()
|
|
||||||
|
|
||||||
|
|
||||||
class OPF(MetaInformation):
|
|
||||||
|
|
||||||
MIMETYPE = 'application/oebps-package+xml'
|
|
||||||
ENTITY_PATTERN = re.compile(r'&(\S+?);')
|
|
||||||
|
|
||||||
uid = standard_field('uid')
|
|
||||||
application_id = standard_field('application_id')
|
|
||||||
title = standard_field('title')
|
|
||||||
authors = standard_field('authors')
|
|
||||||
language = standard_field('language')
|
|
||||||
title_sort = standard_field('title_sort')
|
|
||||||
author_sort = standard_field('author_sort')
|
|
||||||
comments = standard_field('comments')
|
|
||||||
category = standard_field('category')
|
|
||||||
publisher = standard_field('publisher')
|
|
||||||
isbn = standard_field('isbn')
|
|
||||||
cover = standard_field('cover')
|
|
||||||
series = standard_field('series')
|
|
||||||
series_index = standard_field('series_index')
|
|
||||||
rating = standard_field('rating')
|
|
||||||
tags = standard_field('tags')
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
raise NotImplementedError('Abstract base class')
|
|
||||||
|
|
||||||
@dynamic_property
|
|
||||||
def package(self):
|
|
||||||
def fget(self):
|
|
||||||
return self.soup.find(re.compile('package'))
|
|
||||||
return property(fget=fget)
|
|
||||||
|
|
||||||
@dynamic_property
|
|
||||||
def metadata(self):
|
|
||||||
def fget(self):
|
|
||||||
return self.package.find(re.compile('metadata'))
|
|
||||||
return property(fget=fget)
|
|
||||||
|
|
||||||
|
|
||||||
def get_title(self):
|
|
||||||
title = self.metadata.find('dc:title')
|
|
||||||
if title and title.string:
|
|
||||||
return self.ENTITY_PATTERN.sub(entity_to_unicode, title.string).strip()
|
|
||||||
return self.default_title.strip()
|
|
||||||
|
|
||||||
def get_authors(self):
|
|
||||||
creators = self.metadata.findAll('dc:creator')
|
|
||||||
for elem in creators:
|
|
||||||
role = elem.get('role')
|
|
||||||
if not role:
|
|
||||||
role = elem.get('opf:role')
|
|
||||||
if not role:
|
|
||||||
role = 'aut'
|
|
||||||
if role == 'aut' and elem.string:
|
|
||||||
raw = self.ENTITY_PATTERN.sub(entity_to_unicode, elem.string)
|
|
||||||
return string_to_authors(raw)
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_author_sort(self):
|
|
||||||
creators = self.metadata.findAll('dc:creator')
|
|
||||||
for elem in creators:
|
|
||||||
role = elem.get('role')
|
|
||||||
if not role:
|
|
||||||
role = elem.get('opf:role')
|
|
||||||
if role == 'aut':
|
|
||||||
fa = elem.get('file-as')
|
|
||||||
return self.ENTITY_PATTERN.sub(entity_to_unicode, fa).strip() if fa else None
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_title_sort(self):
|
|
||||||
title = self.package.find('dc:title')
|
|
||||||
if title:
|
|
||||||
if title.has_key('file-as'):
|
|
||||||
return title['file-as'].strip()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_comments(self):
|
|
||||||
comments = self.soup.find('dc:description')
|
|
||||||
if comments and comments.string:
|
|
||||||
return self.ENTITY_PATTERN.sub(entity_to_unicode, comments.string).strip()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_uid(self):
|
|
||||||
package = self.package
|
|
||||||
if package.has_key('unique-identifier'):
|
|
||||||
return package['unique-identifier']
|
|
||||||
|
|
||||||
def get_category(self):
|
|
||||||
category = self.soup.find('dc:type')
|
|
||||||
if category and category.string:
|
|
||||||
return self.ENTITY_PATTERN.sub(entity_to_unicode, category.string).strip()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_publisher(self):
|
|
||||||
publisher = self.soup.find('dc:publisher')
|
|
||||||
if publisher and publisher.string:
|
|
||||||
return self.ENTITY_PATTERN.sub(entity_to_unicode, publisher.string).strip()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_isbn(self):
|
|
||||||
for item in self.metadata.findAll('dc:identifier'):
|
|
||||||
scheme = item.get('scheme')
|
|
||||||
if not scheme:
|
|
||||||
scheme = item.get('opf:scheme')
|
|
||||||
if scheme is not None and scheme.lower() == 'isbn' and item.string:
|
|
||||||
return str(item.string).strip()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_language(self):
|
|
||||||
item = self.metadata.find('dc:language')
|
|
||||||
if not item:
|
|
||||||
return _('Unknown')
|
|
||||||
return ''.join(item.findAll(text=True)).strip()
|
|
||||||
|
|
||||||
def get_application_id(self):
|
|
||||||
for item in self.metadata.findAll('dc:identifier'):
|
|
||||||
scheme = item.get('scheme', None)
|
|
||||||
if scheme is None:
|
|
||||||
scheme = item.get('opf:scheme', None)
|
|
||||||
if scheme in ['libprs500', 'calibre']:
|
|
||||||
return str(item.string).strip()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_cover(self):
|
|
||||||
guide = getattr(self, 'guide', [])
|
|
||||||
if not guide:
|
|
||||||
guide = []
|
|
||||||
references = [ref for ref in guide if 'cover' in ref.type.lower()]
|
|
||||||
for candidate in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
|
|
||||||
matches = [r for r in references if r.type.lower() == candidate and r.path]
|
|
||||||
if matches:
|
|
||||||
return matches[0].path
|
|
||||||
|
|
||||||
def possible_cover_prefixes(self):
|
|
||||||
isbn, ans = [], []
|
|
||||||
for item in self.metadata.findAll('dc:identifier'):
|
|
||||||
scheme = item.get('scheme')
|
|
||||||
if not scheme:
|
|
||||||
scheme = item.get('opf:scheme')
|
|
||||||
isbn.append((scheme, item.string))
|
|
||||||
for item in isbn:
|
|
||||||
ans.append(item[1].replace('-', ''))
|
|
||||||
return ans
|
|
||||||
|
|
||||||
def get_series(self):
|
|
||||||
s = self.metadata.find('series')
|
|
||||||
if s is not None:
|
|
||||||
return str(s.string).strip()
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_series_index(self):
|
|
||||||
s = self.metadata.find('series-index')
|
|
||||||
if s and s.string:
|
|
||||||
try:
|
|
||||||
return float(str(s.string).strip())
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_rating(self):
|
|
||||||
s = self.metadata.find('rating')
|
|
||||||
if s and s.string:
|
|
||||||
try:
|
|
||||||
return int(str(s.string).strip())
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_tags(self):
|
|
||||||
ans = []
|
|
||||||
subs = self.soup.findAll('dc:subject')
|
|
||||||
for sub in subs:
|
|
||||||
val = sub.string
|
|
||||||
if val:
|
|
||||||
ans.append(val)
|
|
||||||
return [unicode(a).strip() for a in ans]
|
|
||||||
|
|
||||||
|
|
||||||
class OPFReader(OPF):
|
|
||||||
|
|
||||||
def __init__(self, stream, dir=os.getcwdu()):
|
|
||||||
manage = False
|
|
||||||
if not hasattr(stream, 'read'):
|
|
||||||
manage = True
|
|
||||||
dir = os.path.dirname(stream)
|
|
||||||
stream = open(stream, 'rb')
|
|
||||||
self.default_title = stream.name if hasattr(stream, 'name') else 'Unknown'
|
|
||||||
if hasattr(stream, 'seek'):
|
|
||||||
stream.seek(0)
|
|
||||||
self.soup = OPFSoup(stream.read())
|
|
||||||
if manage:
|
|
||||||
stream.close()
|
|
||||||
self.manifest = Manifest()
|
|
||||||
m = self.soup.find(re.compile('manifest'))
|
|
||||||
if m is not None:
|
|
||||||
self.manifest = Manifest.from_opf_manifest_element(m, dir)
|
|
||||||
self.spine = None
|
|
||||||
spine = self.soup.find(re.compile('spine'))
|
|
||||||
if spine is not None:
|
|
||||||
self.spine = Spine.from_opf_spine_element(spine, self.manifest)
|
|
||||||
|
|
||||||
self.toc = TOC(base_path=dir)
|
|
||||||
self.toc.read_from_opf(self)
|
|
||||||
guide = self.soup.find(re.compile('guide'))
|
|
||||||
if guide is not None:
|
|
||||||
self.guide = Guide.from_opf_guide(guide, dir)
|
|
||||||
self.base_dir = dir
|
|
||||||
self.cover_data = (None, None)
|
|
||||||
|
|
||||||
|
|
||||||
class OPFCreator(MetaInformation):
|
|
||||||
|
|
||||||
def __init__(self, base_path, *args, **kwargs):
|
|
||||||
'''
|
|
||||||
Initialize.
|
|
||||||
@param base_path: An absolute path to the directory in which this OPF file
|
|
||||||
will eventually be. This is used by the L{create_manifest} method
|
|
||||||
to convert paths to files into relative paths.
|
|
||||||
'''
|
|
||||||
MetaInformation.__init__(self, *args, **kwargs)
|
|
||||||
self.base_path = os.path.abspath(base_path)
|
|
||||||
if self.application_id is None:
|
|
||||||
self.application_id = str(uuid.uuid4())
|
|
||||||
if not isinstance(self.toc, TOC):
|
|
||||||
self.toc = None
|
|
||||||
if not self.authors:
|
|
||||||
self.authors = [_('Unknown')]
|
|
||||||
if self.guide is None:
|
|
||||||
self.guide = Guide()
|
|
||||||
if self.cover:
|
|
||||||
self.guide.set_cover(self.cover)
|
|
||||||
|
|
||||||
|
|
||||||
def create_manifest(self, entries):
|
|
||||||
'''
|
|
||||||
Create <manifest>
|
|
||||||
|
|
||||||
`entries`: List of (path, mime-type) If mime-type is None it is autodetected
|
|
||||||
'''
|
|
||||||
entries = map(lambda x: x if os.path.isabs(x[0]) else
|
|
||||||
(os.path.abspath(os.path.join(self.base_path, x[0])), x[1]),
|
|
||||||
entries)
|
|
||||||
self.manifest = Manifest.from_paths(entries)
|
|
||||||
self.manifest.set_basedir(self.base_path)
|
|
||||||
|
|
||||||
def create_manifest_from_files_in(self, files_and_dirs):
|
|
||||||
entries = []
|
|
||||||
|
|
||||||
def dodir(dir):
|
|
||||||
for spec in os.walk(dir):
|
|
||||||
root, files = spec[0], spec[-1]
|
|
||||||
for name in files:
|
|
||||||
path = os.path.join(root, name)
|
|
||||||
if os.path.isfile(path):
|
|
||||||
entries.append((path, None))
|
|
||||||
|
|
||||||
for i in files_and_dirs:
|
|
||||||
if os.path.isdir(i):
|
|
||||||
dodir(i)
|
|
||||||
else:
|
|
||||||
entries.append((i, None))
|
|
||||||
|
|
||||||
self.create_manifest(entries)
|
|
||||||
|
|
||||||
def create_spine(self, entries):
|
|
||||||
'''
|
|
||||||
Create the <spine> element. Must first call :method:`create_manifest`.
|
|
||||||
|
|
||||||
`entries`: List of paths
|
|
||||||
'''
|
|
||||||
entries = map(lambda x: x if os.path.isabs(x) else
|
|
||||||
os.path.abspath(os.path.join(self.base_path, x)), entries)
|
|
||||||
self.spine = Spine.from_paths(entries, self.manifest)
|
|
||||||
|
|
||||||
def set_toc(self, toc):
|
|
||||||
'''
|
|
||||||
Set the toc. You must call :method:`create_spine` before calling this
|
|
||||||
method.
|
|
||||||
|
|
||||||
:param toc: A :class:`TOC` object
|
|
||||||
'''
|
|
||||||
self.toc = toc
|
|
||||||
|
|
||||||
def create_guide(self, guide_element):
|
|
||||||
self.guide = Guide.from_opf_guide(guide_element, self.base_path)
|
|
||||||
self.guide.set_basedir(self.base_path)
|
|
||||||
|
|
||||||
def render(self, opf_stream, ncx_stream=None, ncx_manifest_entry=None):
|
|
||||||
from calibre.utils.genshi.template import MarkupTemplate
|
|
||||||
opf_template = open(P('templates/opf.xml'), 'rb').read()
|
|
||||||
template = MarkupTemplate(opf_template)
|
|
||||||
if self.manifest:
|
|
||||||
self.manifest.set_basedir(self.base_path)
|
|
||||||
if ncx_manifest_entry is not None:
|
|
||||||
if not os.path.isabs(ncx_manifest_entry):
|
|
||||||
ncx_manifest_entry = os.path.join(self.base_path, ncx_manifest_entry)
|
|
||||||
remove = [i for i in self.manifest if i.id == 'ncx']
|
|
||||||
for item in remove:
|
|
||||||
self.manifest.remove(item)
|
|
||||||
self.manifest.append(ManifestItem(ncx_manifest_entry, self.base_path))
|
|
||||||
self.manifest[-1].id = 'ncx'
|
|
||||||
self.manifest[-1].mime_type = 'application/x-dtbncx+xml'
|
|
||||||
if not self.guide:
|
|
||||||
self.guide = Guide()
|
|
||||||
if self.cover:
|
|
||||||
cover = self.cover
|
|
||||||
if not os.path.isabs(cover):
|
|
||||||
cover = os.path.abspath(os.path.join(self.base_path, cover))
|
|
||||||
self.guide.set_cover(cover)
|
|
||||||
self.guide.set_basedir(self.base_path)
|
|
||||||
|
|
||||||
opf = template.generate(__appname__=__appname__, mi=self, __version__=__version__).render('xml')
|
|
||||||
if not opf.startswith('<?xml '):
|
|
||||||
opf = '<?xml version="1.0" encoding="UTF-8"?>\n'+opf
|
|
||||||
opf_stream.write(opf)
|
|
||||||
opf_stream.flush()
|
|
||||||
toc = getattr(self, 'toc', None)
|
|
||||||
if toc is not None and ncx_stream is not None:
|
|
||||||
toc.render(ncx_stream, self.application_id)
|
|
||||||
ncx_stream.flush()
|
|
||||||
|
|
@ -97,8 +97,6 @@ def _config():
|
|||||||
help=_('Overwrite author and title with new metadata'))
|
help=_('Overwrite author and title with new metadata'))
|
||||||
c.add_opt('enforce_cpu_limit', default=True,
|
c.add_opt('enforce_cpu_limit', default=True,
|
||||||
help=_('Limit max simultaneous jobs to number of CPUs'))
|
help=_('Limit max simultaneous jobs to number of CPUs'))
|
||||||
c.add_opt('user_categories', default={},
|
|
||||||
help=_('User-created tag browser categories'))
|
|
||||||
|
|
||||||
return ConfigProxy(c)
|
return ConfigProxy(c)
|
||||||
|
|
||||||
@ -229,19 +227,6 @@ def info_dialog(parent, title, msg, det_msg='', show=False):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
def human_readable(size):
|
|
||||||
""" Convert a size in bytes into a human readable form """
|
|
||||||
divisor, suffix = 1, "B"
|
|
||||||
for i, candidate in enumerate(('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
|
|
||||||
if size < 1024**(i+1):
|
|
||||||
divisor, suffix = 1024**(i), candidate
|
|
||||||
break
|
|
||||||
size = str(float(size)/divisor)
|
|
||||||
if size.find(".") > -1:
|
|
||||||
size = size[:size.find(".")+2]
|
|
||||||
if size.endswith('.0'):
|
|
||||||
size = size[:-2]
|
|
||||||
return size + " " + suffix
|
|
||||||
|
|
||||||
class Dispatcher(QObject):
|
class Dispatcher(QObject):
|
||||||
'''Convenience class to ensure that a function call always happens in the
|
'''Convenience class to ensure that a function call always happens in the
|
||||||
|
@ -7,7 +7,7 @@ from PyQt4.QtCore import SIGNAL, Qt
|
|||||||
from PyQt4.QtGui import QDialog, QIcon, QListWidgetItem
|
from PyQt4.QtGui import QDialog, QIcon, QListWidgetItem
|
||||||
|
|
||||||
from calibre.gui2.dialogs.tag_categories_ui import Ui_TagCategories
|
from calibre.gui2.dialogs.tag_categories_ui import Ui_TagCategories
|
||||||
from calibre.gui2 import config
|
from calibre.utils.config import prefs
|
||||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||||
from calibre.constants import islinux
|
from calibre.constants import islinux
|
||||||
|
|
||||||
@ -22,7 +22,7 @@ class Item:
|
|||||||
return 'name=%s, label=%s, index=%s, exists='%(self.name, self.label, self.index, self.exists)
|
return 'name=%s, label=%s, index=%s, exists='%(self.name, self.label, self.index, self.exists)
|
||||||
|
|
||||||
class TagCategories(QDialog, Ui_TagCategories):
|
class TagCategories(QDialog, Ui_TagCategories):
|
||||||
category_labels_orig = ['', 'author', 'series', 'publisher', 'tag']
|
category_labels_orig = ['', 'authors', 'series', 'publishers', 'tags']
|
||||||
|
|
||||||
def __init__(self, window, db, index=None):
|
def __init__(self, window, db, index=None):
|
||||||
QDialog.__init__(self, window)
|
QDialog.__init__(self, window)
|
||||||
@ -64,7 +64,7 @@ class TagCategories(QDialog, Ui_TagCategories):
|
|||||||
self.all_items.append(t)
|
self.all_items.append(t)
|
||||||
self.all_items_dict[label+':'+n] = t
|
self.all_items_dict[label+':'+n] = t
|
||||||
|
|
||||||
self.categories = dict.copy(config['user_categories'])
|
self.categories = dict.copy(prefs['user_categories'])
|
||||||
if self.categories is None:
|
if self.categories is None:
|
||||||
self.categories = {}
|
self.categories = {}
|
||||||
for cat in self.categories:
|
for cat in self.categories:
|
||||||
@ -181,7 +181,7 @@ class TagCategories(QDialog, Ui_TagCategories):
|
|||||||
|
|
||||||
def accept(self):
|
def accept(self):
|
||||||
self.save_category()
|
self.save_category()
|
||||||
config['user_categories'] = self.categories
|
prefs['user_categories'] = self.categories
|
||||||
QDialog.accept(self)
|
QDialog.accept(self)
|
||||||
|
|
||||||
def save_category(self):
|
def save_category(self):
|
||||||
|
@ -650,7 +650,7 @@
|
|||||||
<normaloff>:/images/merge_books.svg</normaloff>:/images/merge_books.svg</iconset>
|
<normaloff>:/images/merge_books.svg</normaloff>:/images/merge_books.svg</iconset>
|
||||||
</property>
|
</property>
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Merge books</string>
|
<string>Merge book records</string>
|
||||||
</property>
|
</property>
|
||||||
<property name="shortcut">
|
<property name="shortcut">
|
||||||
<string>M</string>
|
<string>M</string>
|
||||||
|
@ -126,7 +126,7 @@ class TagTreeItem(object): # {{{
|
|||||||
TAG = 1
|
TAG = 1
|
||||||
ROOT = 2
|
ROOT = 2
|
||||||
|
|
||||||
def __init__(self, data=None, category_icon=None, icon_map=None, parent=None):
|
def __init__(self, data=None, category_icon=None, icon_map=None, parent=None, tooltip=None):
|
||||||
self.parent = parent
|
self.parent = parent
|
||||||
self.children = []
|
self.children = []
|
||||||
if self.parent is not None:
|
if self.parent is not None:
|
||||||
@ -144,6 +144,7 @@ class TagTreeItem(object): # {{{
|
|||||||
elif self.type == self.TAG:
|
elif self.type == self.TAG:
|
||||||
icon_map[0] = data.icon
|
icon_map[0] = data.icon
|
||||||
self.tag, self.icon_state_map = data, list(map(QVariant, icon_map))
|
self.tag, self.icon_state_map = data, list(map(QVariant, icon_map))
|
||||||
|
self.tooltip = tooltip
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if self.type == self.ROOT:
|
if self.type == self.ROOT:
|
||||||
@ -175,6 +176,8 @@ class TagTreeItem(object): # {{{
|
|||||||
return self.icon
|
return self.icon
|
||||||
if role == Qt.FontRole:
|
if role == Qt.FontRole:
|
||||||
return self.bold_font
|
return self.bold_font
|
||||||
|
if role == Qt.ToolTipRole and self.tooltip is not None:
|
||||||
|
return QVariant(self.tooltip)
|
||||||
return NONE
|
return NONE
|
||||||
|
|
||||||
def tag_data(self, role):
|
def tag_data(self, role):
|
||||||
@ -197,33 +200,32 @@ class TagTreeItem(object): # {{{
|
|||||||
|
|
||||||
class TagsModel(QAbstractItemModel): # {{{
|
class TagsModel(QAbstractItemModel): # {{{
|
||||||
|
|
||||||
categories_orig = [_('Authors'), _('Series'), _('Formats'), _('Publishers'),
|
|
||||||
_('Ratings'), _('News'), _('Tags')]
|
|
||||||
row_map_orig = ['author', 'series', 'format', 'publisher', 'rating',
|
|
||||||
'news', 'tag']
|
|
||||||
tags_categories_start= 7
|
|
||||||
search_keys=['search', _('Searches')]
|
|
||||||
|
|
||||||
def __init__(self, db, parent=None):
|
def __init__(self, db, parent=None):
|
||||||
QAbstractItemModel.__init__(self, parent)
|
QAbstractItemModel.__init__(self, parent)
|
||||||
self.cat_icon_map_orig = list(map(QIcon, [I('user_profile.svg'),
|
|
||||||
I('series.svg'), I('book.svg'), I('publisher.png'), I('star.png'),
|
# must do this here because 'QPixmap: Must construct a QApplication
|
||||||
I('news.svg'), I('tags.svg')]))
|
# before a QPaintDevice'
|
||||||
|
self.category_icon_map = {'authors': QIcon(I('user_profile.svg')),
|
||||||
|
'series': QIcon(I('series.svg')),
|
||||||
|
'formats':QIcon(I('book.svg')),
|
||||||
|
'publishers': QIcon(I('publisher.png')),
|
||||||
|
'ratings':QIcon(I('star.png')),
|
||||||
|
'news':QIcon(I('news.svg')),
|
||||||
|
'tags':QIcon(I('tags.svg')),
|
||||||
|
'*custom':QIcon(I('column.svg')),
|
||||||
|
'*user':QIcon(I('drawer.svg')),
|
||||||
|
'search':QIcon(I('search.svg'))}
|
||||||
self.icon_state_map = [None, QIcon(I('plus.svg')), QIcon(I('minus.svg'))]
|
self.icon_state_map = [None, QIcon(I('plus.svg')), QIcon(I('minus.svg'))]
|
||||||
self.custcol_icon = QIcon(I('column.svg'))
|
|
||||||
self.search_icon = QIcon(I('search.svg'))
|
|
||||||
self.usercat_icon = QIcon(I('drawer.svg'))
|
|
||||||
self.label_to_icon_map = dict(map(None, self.row_map_orig, self.cat_icon_map_orig))
|
|
||||||
self.label_to_icon_map['*custom'] = self.custcol_icon
|
|
||||||
self.db = db
|
self.db = db
|
||||||
self.search_restriction = ''
|
self.search_restriction = ''
|
||||||
self.user_categories = {}
|
|
||||||
self.ignore_next_search = 0
|
self.ignore_next_search = 0
|
||||||
data = self.get_node_tree(config['sort_by_popularity'])
|
data = self.get_node_tree(config['sort_by_popularity'])
|
||||||
self.root_item = TagTreeItem()
|
self.root_item = TagTreeItem()
|
||||||
for i, r in enumerate(self.row_map):
|
for i, r in enumerate(self.row_map):
|
||||||
c = TagTreeItem(parent=self.root_item,
|
c = TagTreeItem(parent=self.root_item,
|
||||||
data=self.categories[i], category_icon=self.cat_icon_map[i])
|
data=self.categories[i],
|
||||||
|
category_icon=self.category_icon_map[r],
|
||||||
|
tooltip=_('The lookup/search name is "{0}"').format(r))
|
||||||
for tag in data[r]:
|
for tag in data[r]:
|
||||||
TagTreeItem(parent=c, data=tag, icon_map=self.icon_state_map)
|
TagTreeItem(parent=c, data=tag, icon_map=self.icon_state_map)
|
||||||
|
|
||||||
@ -233,74 +235,21 @@ class TagsModel(QAbstractItemModel): # {{{
|
|||||||
def get_node_tree(self, sort):
|
def get_node_tree(self, sort):
|
||||||
self.row_map = []
|
self.row_map = []
|
||||||
self.categories = []
|
self.categories = []
|
||||||
# strip the icons after the 'standard' categories. We will put them back later
|
|
||||||
if self.tags_categories_start < len(self.row_map_orig):
|
|
||||||
self.cat_icon_map = self.cat_icon_map_orig[:self.tags_categories_start-len(self.row_map_orig)]
|
|
||||||
else:
|
|
||||||
self.cat_icon_map = self.cat_icon_map_orig[:]
|
|
||||||
|
|
||||||
self.user_categories = dict.copy(config['user_categories'])
|
|
||||||
column_map = config['column_map']
|
|
||||||
|
|
||||||
for i in range(0, self.tags_categories_start): # First the standard categories
|
|
||||||
self.row_map.append(self.row_map_orig[i])
|
|
||||||
self.categories.append(self.categories_orig[i])
|
|
||||||
if len(self.search_restriction):
|
if len(self.search_restriction):
|
||||||
data = self.db.get_categories(sort_on_count=sort, icon_map=self.label_to_icon_map,
|
data = self.db.get_categories(sort_on_count=sort, icon_map=self.category_icon_map,
|
||||||
ids=self.db.search(self.search_restriction, return_matches=True))
|
ids=self.db.search(self.search_restriction, return_matches=True))
|
||||||
else:
|
else:
|
||||||
data = self.db.get_categories(sort_on_count=sort, icon_map=self.label_to_icon_map)
|
data = self.db.get_categories(sort_on_count=sort, icon_map=self.category_icon_map)
|
||||||
|
|
||||||
for c in data: # now the custom columns
|
tb_categories = self.db.get_tag_browser_categories()
|
||||||
if c not in self.row_map_orig and c in column_map:
|
for category in tb_categories.iterkeys():
|
||||||
self.row_map.append(c)
|
if category in data: # They should always be there, but ...
|
||||||
self.categories.append(self.db.custom_column_label_map[c]['name'])
|
self.row_map.append(category)
|
||||||
self.cat_icon_map.append(self.custcol_icon)
|
self.categories.append(tb_categories[category]['name'])
|
||||||
|
|
||||||
# Now the rest of the normal tag categories
|
|
||||||
for i in range(self.tags_categories_start, len(self.row_map_orig)):
|
|
||||||
self.row_map.append(self.row_map_orig[i])
|
|
||||||
self.categories.append(self.categories_orig[i])
|
|
||||||
self.cat_icon_map.append(self.cat_icon_map_orig[i])
|
|
||||||
|
|
||||||
# Clean up the author's tags, getting rid of the '|' characters
|
|
||||||
if data['author'] is not None:
|
|
||||||
for t in data['author']:
|
|
||||||
t.name = t.name.replace('|', ',')
|
|
||||||
|
|
||||||
# Now do the user-defined categories. There is a time/space tradeoff here.
|
|
||||||
# By converting the tags into a map, we can do the verification in the category
|
|
||||||
# loop much faster, at the cost of duplicating the categories lists.
|
|
||||||
taglist = {}
|
|
||||||
for c in self.row_map:
|
|
||||||
taglist[c] = dict(map(lambda t:(t.name, t), data[c]))
|
|
||||||
|
|
||||||
for c in self.user_categories:
|
|
||||||
l = []
|
|
||||||
for (name,label,ign) in self.user_categories[c]:
|
|
||||||
if label in taglist and name in taglist[label]: # use same node as the complete category
|
|
||||||
l.append(taglist[label][name])
|
|
||||||
# else: do nothing, to eliminate nodes that have zero counts
|
|
||||||
if config['sort_by_popularity']:
|
|
||||||
data[c+'*'] = sorted(l, cmp=(lambda x, y: cmp(x.count, y.count)))
|
|
||||||
else:
|
|
||||||
data[c+'*'] = sorted(l, cmp=(lambda x, y: cmp(x.name.lower(), y.name.lower())))
|
|
||||||
self.row_map.append(c+'*')
|
|
||||||
self.categories.append(c)
|
|
||||||
self.cat_icon_map.append(self.usercat_icon)
|
|
||||||
|
|
||||||
data['search'] = self.get_search_nodes(self.search_icon) # Add the search category
|
|
||||||
self.row_map.append(self.search_keys[0])
|
|
||||||
self.categories.append(self.search_keys[1])
|
|
||||||
self.cat_icon_map.append(self.search_icon)
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def get_search_nodes(self, icon):
|
|
||||||
l = []
|
|
||||||
for i in saved_searches.names():
|
|
||||||
l.append(Tag(i, tooltip=saved_searches.lookup(i), icon=icon))
|
|
||||||
return l
|
|
||||||
|
|
||||||
def refresh(self):
|
def refresh(self):
|
||||||
data = self.get_node_tree(config['sort_by_popularity']) # get category data
|
data = self.get_node_tree(config['sort_by_popularity']) # get category data
|
||||||
for i, r in enumerate(self.row_map):
|
for i, r in enumerate(self.row_map):
|
||||||
|
@ -183,7 +183,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
|||||||
_('Error communicating with device'), ' ')
|
_('Error communicating with device'), ' ')
|
||||||
self.device_error_dialog.setModal(Qt.NonModal)
|
self.device_error_dialog.setModal(Qt.NonModal)
|
||||||
self.tb_wrapper = textwrap.TextWrapper(width=40)
|
self.tb_wrapper = textwrap.TextWrapper(width=40)
|
||||||
self.device_connected = False
|
self.device_connected = None
|
||||||
self.viewers = collections.deque()
|
self.viewers = collections.deque()
|
||||||
self.content_server = None
|
self.content_server = None
|
||||||
self.system_tray_icon = SystemTrayIcon(QIcon(I('library.png')), self)
|
self.system_tray_icon = SystemTrayIcon(QIcon(I('library.png')), self)
|
||||||
@ -675,6 +675,15 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
|||||||
self._sync_menu.fetch_annotations.connect(self.fetch_annotations)
|
self._sync_menu.fetch_annotations.connect(self.fetch_annotations)
|
||||||
self._sync_menu.connect_to_folder.connect(self.connect_to_folder)
|
self._sync_menu.connect_to_folder.connect(self.connect_to_folder)
|
||||||
self._sync_menu.disconnect_from_folder.connect(self.disconnect_from_folder)
|
self._sync_menu.disconnect_from_folder.connect(self.disconnect_from_folder)
|
||||||
|
if self.device_connected:
|
||||||
|
self._sync_menu.connect_to_folder_action.setEnabled(False)
|
||||||
|
if self.device_connected == 'folder':
|
||||||
|
self._sync_menu.disconnect_from_folder_action.setEnabled(True)
|
||||||
|
else:
|
||||||
|
self._sync_menu.disconnect_from_folder_action.setEnabled(False)
|
||||||
|
else:
|
||||||
|
self._sync_menu.connect_to_folder_action.setEnabled(True)
|
||||||
|
self._sync_menu.disconnect_from_folder_action.setEnabled(False)
|
||||||
|
|
||||||
def add_spare_server(self, *args):
|
def add_spare_server(self, *args):
|
||||||
self.spare_servers.append(Server(limit=int(config['worker_limit']/2.0)))
|
self.spare_servers.append(Server(limit=int(config['worker_limit']/2.0)))
|
||||||
@ -944,7 +953,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
|||||||
self.status_bar.showMessage(_('Device: ')+\
|
self.status_bar.showMessage(_('Device: ')+\
|
||||||
self.device_manager.device.__class__.get_gui_name()+\
|
self.device_manager.device.__class__.get_gui_name()+\
|
||||||
_(' detected.'), 3000)
|
_(' detected.'), 3000)
|
||||||
self.device_connected = True
|
self.device_connected = 'device' if not is_folder_device else 'folder'
|
||||||
self._sync_menu.enable_device_actions(True,
|
self._sync_menu.enable_device_actions(True,
|
||||||
self.device_manager.device.card_prefix(),
|
self.device_manager.device.card_prefix(),
|
||||||
self.device_manager.device)
|
self.device_manager.device)
|
||||||
@ -955,7 +964,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
|||||||
self._sync_menu.connect_to_folder_action.setEnabled(True)
|
self._sync_menu.connect_to_folder_action.setEnabled(True)
|
||||||
self._sync_menu.disconnect_from_folder_action.setEnabled(False)
|
self._sync_menu.disconnect_from_folder_action.setEnabled(False)
|
||||||
self.save_device_view_settings()
|
self.save_device_view_settings()
|
||||||
self.device_connected = False
|
self.device_connected = None
|
||||||
self._sync_menu.enable_device_actions(False)
|
self._sync_menu.enable_device_actions(False)
|
||||||
self.location_view.model().update_devices()
|
self.location_view.model().update_devices()
|
||||||
self.vanity.setText(self.vanity_template%\
|
self.vanity.setText(self.vanity_template%\
|
||||||
|
@ -13,11 +13,10 @@ from PyQt4.Qt import QListView, QIcon, QFont, QLabel, QListWidget, \
|
|||||||
QAbstractButton, QPainter, QLineEdit, QComboBox, \
|
QAbstractButton, QPainter, QLineEdit, QComboBox, \
|
||||||
QMenu, QStringListModel, QCompleter, QStringList
|
QMenu, QStringListModel, QCompleter, QStringList
|
||||||
|
|
||||||
from calibre.gui2 import human_readable, NONE, \
|
from calibre.gui2 import NONE, error_dialog, pixmap_to_data, dynamic
|
||||||
error_dialog, pixmap_to_data, dynamic
|
|
||||||
|
|
||||||
from calibre.gui2.filename_pattern_ui import Ui_Form
|
from calibre.gui2.filename_pattern_ui import Ui_Form
|
||||||
from calibre import fit_image
|
from calibre import fit_image, human_readable
|
||||||
from calibre.utils.fonts import fontconfig
|
from calibre.utils.fonts import fontconfig
|
||||||
from calibre.ebooks import BOOK_EXTENSIONS
|
from calibre.ebooks import BOOK_EXTENSIONS
|
||||||
from calibre.ebooks.metadata.meta import metadata_from_filename
|
from calibre.ebooks.metadata.meta import metadata_from_filename
|
||||||
|
@ -100,6 +100,13 @@ class Booq(Device):
|
|||||||
output_format = 'EPUB'
|
output_format = 'EPUB'
|
||||||
id = 'booq'
|
id = 'booq'
|
||||||
|
|
||||||
|
class TheBook(Device):
|
||||||
|
name = 'The Book'
|
||||||
|
manufacturer = 'Augen'
|
||||||
|
output_profile = 'prs505'
|
||||||
|
output_format = 'EPUB'
|
||||||
|
id = 'thebook'
|
||||||
|
|
||||||
class Avant(Booq):
|
class Avant(Booq):
|
||||||
name = 'Booq Avant'
|
name = 'Booq Avant'
|
||||||
|
|
||||||
|
@ -626,20 +626,24 @@ class ResultCache(SearchQueryParser):
|
|||||||
self._map.sort(cmp=fcmp, reverse=not ascending)
|
self._map.sort(cmp=fcmp, reverse=not ascending)
|
||||||
self._map_filtered = [id for id in self._map if id in self._map_filtered]
|
self._map_filtered = [id for id in self._map if id in self._map_filtered]
|
||||||
|
|
||||||
def search(self, query, return_matches=False):
|
def search(self, query, return_matches=False,
|
||||||
|
ignore_search_restriction=False):
|
||||||
if not query or not query.strip():
|
if not query or not query.strip():
|
||||||
|
q = ''
|
||||||
|
if not ignore_search_restriction:
|
||||||
q = self.search_restriction
|
q = self.search_restriction
|
||||||
else:
|
elif not ignore_search_restriction:
|
||||||
q = '%s (%s)' % (self.search_restriction, query)
|
q = u'%s (%s)' % (self.search_restriction, query)
|
||||||
if not q:
|
if not q:
|
||||||
if return_matches:
|
if return_matches:
|
||||||
return list(self.map) # when return_matches, do not update the maps!
|
return list(self._map) # when return_matches, do not update the maps!
|
||||||
self._map_filtered = list(self._map)
|
self._map_filtered = list(self._map)
|
||||||
return []
|
return []
|
||||||
matches = sorted(self.parse(q))
|
matches = sorted(self.parse(q))
|
||||||
|
ans = [id for id in self._map if id in matches]
|
||||||
if return_matches:
|
if return_matches:
|
||||||
return [id for id in self._map if id in matches]
|
return ans
|
||||||
self._map_filtered = [id for id in self._map if id in matches]
|
self._map_filtered = ans
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def set_search_restriction(self, s):
|
def set_search_restriction(self, s):
|
||||||
|
@ -9,99 +9,18 @@ Command line interface to the calibre database.
|
|||||||
|
|
||||||
import sys, os, cStringIO
|
import sys, os, cStringIO
|
||||||
from textwrap import TextWrapper
|
from textwrap import TextWrapper
|
||||||
from urllib import quote
|
|
||||||
|
|
||||||
from calibre import terminal_controller, preferred_encoding, prints
|
from calibre import terminal_controller, preferred_encoding, prints
|
||||||
from calibre.utils.config import OptionParser, prefs
|
from calibre.utils.config import OptionParser, prefs
|
||||||
from calibre.ebooks.metadata.meta import get_metadata
|
from calibre.ebooks.metadata.meta import get_metadata
|
||||||
from calibre.library.database2 import LibraryDatabase2
|
from calibre.library.database2 import LibraryDatabase2
|
||||||
from calibre.ebooks.metadata.opf2 import OPFCreator, OPF
|
from calibre.ebooks.metadata.opf2 import OPFCreator, OPF
|
||||||
from calibre.utils.genshi.template import MarkupTemplate
|
|
||||||
from calibre.utils.date import isoformat
|
from calibre.utils.date import isoformat
|
||||||
|
|
||||||
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
|
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
|
||||||
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
|
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
|
||||||
'formats', 'isbn', 'uuid', 'pubdate', 'cover'])
|
'formats', 'isbn', 'uuid', 'pubdate', 'cover'])
|
||||||
|
|
||||||
XML_TEMPLATE = '''\
|
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<calibredb xmlns:py="http://genshi.edgewall.org/">
|
|
||||||
<py:for each="record in data">
|
|
||||||
<record>
|
|
||||||
<id>${record['id']}</id>
|
|
||||||
<uuid>${record['uuid']}</uuid>
|
|
||||||
<title>${record['title']}</title>
|
|
||||||
<authors sort="${record['author_sort']}">
|
|
||||||
<py:for each="author in record['authors']">
|
|
||||||
<author>$author</author>
|
|
||||||
</py:for>
|
|
||||||
</authors>
|
|
||||||
<publisher>${record['publisher']}</publisher>
|
|
||||||
<rating>${record['rating']}</rating>
|
|
||||||
<date>${record['timestamp'].isoformat()}</date>
|
|
||||||
<pubdate>${record['pubdate'].isoformat()}</pubdate>
|
|
||||||
<size>${record['size']}</size>
|
|
||||||
<tags py:if="record['tags']">
|
|
||||||
<py:for each="tag in record['tags']">
|
|
||||||
<tag>$tag</tag>
|
|
||||||
</py:for>
|
|
||||||
</tags>
|
|
||||||
<comments>${record['comments']}</comments>
|
|
||||||
<series py:if="record['series']" index="${record['series_index']}">${record['series']}</series>
|
|
||||||
<isbn>${record['isbn']}</isbn>
|
|
||||||
<cover py:if="record['cover']">${record['cover'].replace(os.sep, '/')}</cover>
|
|
||||||
<formats py:if="record['formats']">
|
|
||||||
<py:for each="path in record['formats']">
|
|
||||||
<format>${path.replace(os.sep, '/')}</format>
|
|
||||||
</py:for>
|
|
||||||
</formats>
|
|
||||||
</record>
|
|
||||||
</py:for>
|
|
||||||
</calibredb>
|
|
||||||
'''
|
|
||||||
|
|
||||||
STANZA_TEMPLATE='''\
|
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:py="http://genshi.edgewall.org/">
|
|
||||||
<title>calibre Library</title>
|
|
||||||
<author>
|
|
||||||
<name>calibre</name>
|
|
||||||
<uri>http://calibre-ebook.com</uri>
|
|
||||||
</author>
|
|
||||||
<id>$id</id>
|
|
||||||
<updated>${updated.isoformat()}</updated>
|
|
||||||
<subtitle>
|
|
||||||
${subtitle}
|
|
||||||
</subtitle>
|
|
||||||
<py:for each="record in data">
|
|
||||||
<entry>
|
|
||||||
<title>${record['title']}</title>
|
|
||||||
<id>urn:calibre:${record['uuid']}</id>
|
|
||||||
<author><name>${record['author_sort']}</name></author>
|
|
||||||
<updated>${record['timestamp'].isoformat()}</updated>
|
|
||||||
<link type="application/epub+zip" href="${quote(record['fmt_epub'].replace(sep, '/'))}"/>
|
|
||||||
<link py:if="record['cover']" rel="x-stanza-cover-image" type="image/png" href="${quote(record['cover'].replace(sep, '/'))}"/>
|
|
||||||
<link py:if="record['cover']" rel="x-stanza-cover-image-thumbnail" type="image/png" href="${quote(record['cover'].replace(sep, '/'))}"/>
|
|
||||||
<content type="xhtml">
|
|
||||||
<div xmlns="http://www.w3.org/1999/xhtml">
|
|
||||||
<py:for each="f in ('authors', 'publisher', 'rating', 'tags', 'series', 'isbn')">
|
|
||||||
<py:if test="record[f]">
|
|
||||||
${f.capitalize()}:${unicode(', '.join(record[f]) if f=='tags' else record[f])}
|
|
||||||
<py:if test="f =='series'"># ${str(record['series_index'])}</py:if>
|
|
||||||
<br/>
|
|
||||||
</py:if>
|
|
||||||
</py:for>
|
|
||||||
<py:if test="record['comments']">
|
|
||||||
<br/>
|
|
||||||
${record['comments']}
|
|
||||||
</py:if>
|
|
||||||
</div>
|
|
||||||
</content>
|
|
||||||
</entry>
|
|
||||||
</py:for>
|
|
||||||
</feed>
|
|
||||||
'''
|
|
||||||
|
|
||||||
def send_message(msg=''):
|
def send_message(msg=''):
|
||||||
prints('Notifying calibre of the change')
|
prints('Notifying calibre of the change')
|
||||||
from calibre.utils.ipc import RC
|
from calibre.utils.ipc import RC
|
||||||
@ -130,18 +49,17 @@ def get_db(dbpath, options):
|
|||||||
return LibraryDatabase2(dbpath)
|
return LibraryDatabase2(dbpath)
|
||||||
|
|
||||||
def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, separator,
|
def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, separator,
|
||||||
prefix, output_format, subtitle='Books in the calibre database'):
|
prefix, subtitle='Books in the calibre database'):
|
||||||
if sort_by:
|
if sort_by:
|
||||||
db.sort(sort_by, ascending)
|
db.sort(sort_by, ascending)
|
||||||
if search_text:
|
if search_text:
|
||||||
db.search(search_text)
|
db.search(search_text)
|
||||||
authors_to_string = output_format in ['stanza', 'text']
|
data = db.get_data_as_dict(prefix, authors_as_string=True)
|
||||||
data = db.get_data_as_dict(prefix, authors_as_string=authors_to_string)
|
|
||||||
fields = ['id'] + fields
|
fields = ['id'] + fields
|
||||||
title_fields = fields
|
title_fields = fields
|
||||||
fields = [db.custom_column_label_map[x[1:]]['num'] if x[0]=='*'
|
fields = [db.custom_column_label_map[x[1:]]['num'] if x[0]=='*'
|
||||||
else x for x in fields]
|
else x for x in fields]
|
||||||
if output_format == 'text':
|
|
||||||
for f in data:
|
for f in data:
|
||||||
fmts = [x for x in f['formats'] if x is not None]
|
fmts = [x for x in f['formats'] if x is not None]
|
||||||
f['formats'] = u'[%s]'%u','.join(fmts)
|
f['formats'] = u'[%s]'%u','.join(fmts)
|
||||||
@ -192,19 +110,6 @@ def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, se
|
|||||||
o.write(filler+separator)
|
o.write(filler+separator)
|
||||||
print >>o
|
print >>o
|
||||||
return o.getvalue()
|
return o.getvalue()
|
||||||
elif output_format == 'xml':
|
|
||||||
template = MarkupTemplate(XML_TEMPLATE)
|
|
||||||
return template.generate(data=data, os=os).render('xml')
|
|
||||||
elif output_format == 'stanza':
|
|
||||||
data = [i for i in data if i.has_key('fmt_epub')]
|
|
||||||
for x in data:
|
|
||||||
if isinstance(x['fmt_epub'], unicode):
|
|
||||||
x['fmt_epub'] = x['fmt_epub'].encode('utf-8')
|
|
||||||
if isinstance(x['cover'], unicode):
|
|
||||||
x['cover'] = x['cover'].encode('utf-8')
|
|
||||||
template = MarkupTemplate(STANZA_TEMPLATE)
|
|
||||||
return template.generate(id="urn:calibre:main", data=data, subtitle=subtitle,
|
|
||||||
sep=os.sep, quote=quote, updated=db.last_modified()).render('xml')
|
|
||||||
|
|
||||||
def list_option_parser(db=None):
|
def list_option_parser(db=None):
|
||||||
fields = set(FIELDS)
|
fields = set(FIELDS)
|
||||||
@ -236,9 +141,6 @@ List the books available in the calibre database.
|
|||||||
help=_('The maximum width of a single line in the output. Defaults to detecting screen size.'))
|
help=_('The maximum width of a single line in the output. Defaults to detecting screen size.'))
|
||||||
parser.add_option('--separator', default=' ', help=_('The string used to separate fields. Default is a space.'))
|
parser.add_option('--separator', default=' ', help=_('The string used to separate fields. Default is a space.'))
|
||||||
parser.add_option('--prefix', default=None, help=_('The prefix for all file paths. Default is the absolute path to the library folder.'))
|
parser.add_option('--prefix', default=None, help=_('The prefix for all file paths. Default is the absolute path to the library folder.'))
|
||||||
of = ['text', 'xml', 'stanza']
|
|
||||||
parser.add_option('--output-format', choices=of, default='text',
|
|
||||||
help=_('The format in which to output the data. Available choices: %s. Defaults is text.')%of)
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
@ -272,7 +174,7 @@ def command_list(args, dbpath):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
print do_list(db, fields, afields, opts.sort_by, opts.ascending, opts.search, opts.line_width, opts.separator,
|
print do_list(db, fields, afields, opts.sort_by, opts.ascending, opts.search, opts.line_width, opts.separator,
|
||||||
opts.prefix, opts.output_format)
|
opts.prefix)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
@ -141,11 +141,15 @@ class CustomColumns(object):
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Create Tag Browser categories for custom columns
|
# Create Tag Browser categories for custom columns
|
||||||
for i, v in self.custom_column_num_map.items():
|
for k in sorted(self.custom_column_label_map.keys()):
|
||||||
|
v = self.custom_column_label_map[k]
|
||||||
if v['normalized']:
|
if v['normalized']:
|
||||||
tn = 'custom_column_{0}'.format(i)
|
tn = 'custom_column_{0}'.format(v['num'])
|
||||||
self.tag_browser_categories[tn] = [v['label'], 'value']
|
self.tag_browser_categories[v['label']] = {
|
||||||
self.tag_browser_datatype[v['label']] = v['datatype']
|
'table':tn, 'column':'value',
|
||||||
|
'type':v['datatype'], 'is_multiple':v['is_multiple'],
|
||||||
|
'kind':'custom', 'name':v['name']
|
||||||
|
}
|
||||||
|
|
||||||
def get_custom(self, idx, label=None, num=None, index_is_id=False):
|
def get_custom(self, idx, label=None, num=None, index_is_id=False):
|
||||||
if label is not None:
|
if label is not None:
|
||||||
|
@ -33,6 +33,9 @@ from calibre.customize.ui import run_plugins_on_import
|
|||||||
|
|
||||||
from calibre.utils.filenames import ascii_filename
|
from calibre.utils.filenames import ascii_filename
|
||||||
from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp
|
from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp
|
||||||
|
from calibre.utils.ordered_dict import OrderedDict
|
||||||
|
from calibre.utils.config import prefs
|
||||||
|
from calibre.utils.search_query_parser import saved_searches
|
||||||
from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format
|
from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format
|
||||||
|
|
||||||
if iswindows:
|
if iswindows:
|
||||||
@ -123,24 +126,33 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
if isinstance(self.dbpath, unicode):
|
if isinstance(self.dbpath, unicode):
|
||||||
self.dbpath = self.dbpath.encode(filesystem_encoding)
|
self.dbpath = self.dbpath.encode(filesystem_encoding)
|
||||||
|
|
||||||
self.tag_browser_categories = {
|
# Order as has been customary in the tags pane.
|
||||||
'tags' : ['tag', 'name'],
|
tag_browser_categories_items = [
|
||||||
'series' : ['series', 'name'],
|
('authors', {'table':'authors', 'column':'name',
|
||||||
'publishers': ['publisher', 'name'],
|
'type':'text', 'is_multiple':False,
|
||||||
'authors' : ['author', 'name'],
|
'kind':'standard', 'name':_('Authors')}),
|
||||||
'news' : ['news', 'name'],
|
('series', {'table':'series', 'column':'name',
|
||||||
'ratings' : ['rating', 'rating']
|
'type':None, 'is_multiple':False,
|
||||||
}
|
'kind':'standard', 'name':_('Series')}),
|
||||||
self.tag_browser_datatype = {
|
('formats', {'table':None, 'column':None,
|
||||||
'tag' : 'textmult',
|
'type':None, 'is_multiple':False,
|
||||||
'series' : None,
|
'kind':'standard', 'name':_('Formats')}),
|
||||||
'publisher' : 'text',
|
('publishers',{'table':'publishers', 'column':'name',
|
||||||
'author' : 'text',
|
'type':'text', 'is_multiple':False,
|
||||||
'news' : None,
|
'kind':'standard', 'name':_('Publishers')}),
|
||||||
'rating' : 'rating',
|
('ratings', {'table':'ratings', 'column':'rating',
|
||||||
}
|
'type':'rating', 'is_multiple':False,
|
||||||
|
'kind':'standard', 'name':_('Ratings')}),
|
||||||
self.tag_browser_formatters = {'rating': lambda x:u'\u2605'*int(round(x/2.))}
|
('news', {'table':'news', 'column':'name',
|
||||||
|
'type':None, 'is_multiple':False,
|
||||||
|
'kind':'standard', 'name':_('News')}),
|
||||||
|
('tags', {'table':'tags', 'column':'name',
|
||||||
|
'type':'text', 'is_multiple':True,
|
||||||
|
'kind':'standard', 'name':_('Tags')}),
|
||||||
|
]
|
||||||
|
self.tag_browser_categories = OrderedDict()
|
||||||
|
for k,v in tag_browser_categories_items:
|
||||||
|
self.tag_browser_categories[k] = v
|
||||||
|
|
||||||
self.connect()
|
self.connect()
|
||||||
self.is_case_sensitive = not iswindows and not isosx and \
|
self.is_case_sensitive = not iswindows and not isosx and \
|
||||||
@ -649,36 +661,65 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
def get_recipe(self, id):
|
def get_recipe(self, id):
|
||||||
return self.conn.get('SELECT script FROM feeds WHERE id=?', (id,), all=False)
|
return self.conn.get('SELECT script FROM feeds WHERE id=?', (id,), all=False)
|
||||||
|
|
||||||
|
def get_tag_browser_categories(self):
|
||||||
|
return self.tag_browser_categories
|
||||||
|
|
||||||
def get_categories(self, sort_on_count=False, ids=None, icon_map=None):
|
def get_categories(self, sort_on_count=False, ids=None, icon_map=None):
|
||||||
self.books_list_filter.change([] if not ids else ids)
|
self.books_list_filter.change([] if not ids else ids)
|
||||||
|
|
||||||
categories = {}
|
categories = {}
|
||||||
for tn, cn in self.tag_browser_categories.items():
|
|
||||||
|
#### First, build the standard and custom-column categories ####
|
||||||
|
for category in self.tag_browser_categories.keys():
|
||||||
|
tn = self.tag_browser_categories[category]['table']
|
||||||
|
categories[category] = [] #reserve the position in the ordered list
|
||||||
|
if tn is None: # Nothing to do for the moment
|
||||||
|
continue
|
||||||
|
cn = self.tag_browser_categories[category]['column']
|
||||||
if ids is None:
|
if ids is None:
|
||||||
query = 'SELECT id, {0}, count FROM tag_browser_{1}'.format(cn[1], tn)
|
query = 'SELECT id, {0}, count FROM tag_browser_{1}'.format(cn, tn)
|
||||||
else:
|
else:
|
||||||
query = 'SELECT id, {0}, count FROM tag_browser_filtered_{1}'.format(cn[1], tn)
|
query = 'SELECT id, {0}, count FROM tag_browser_filtered_{1}'.format(cn, tn)
|
||||||
if sort_on_count:
|
if sort_on_count:
|
||||||
query += ' ORDER BY count DESC'
|
query += ' ORDER BY count DESC'
|
||||||
else:
|
else:
|
||||||
query += ' ORDER BY {0} ASC'.format(cn[1])
|
query += ' ORDER BY {0} ASC'.format(cn)
|
||||||
data = self.conn.get(query)
|
data = self.conn.get(query)
|
||||||
category = cn[0]
|
|
||||||
|
# icon_map is not None if get_categories is to store an icon and
|
||||||
|
# possibly a tooltip in the tag structure.
|
||||||
icon, tooltip = None, ''
|
icon, tooltip = None, ''
|
||||||
if icon_map:
|
if icon_map:
|
||||||
|
if self.tag_browser_categories[category]['kind'] == 'standard':
|
||||||
if category in icon_map:
|
if category in icon_map:
|
||||||
icon = icon_map[category]
|
icon = icon_map[category]
|
||||||
else:
|
elif self.tag_browser_categories[category]['kind'] == 'custom':
|
||||||
icon = icon_map['*custom']
|
icon = icon_map['*custom']
|
||||||
|
icon_map[category] = icon_map['*custom']
|
||||||
tooltip = self.custom_column_label_map[category]['name']
|
tooltip = self.custom_column_label_map[category]['name']
|
||||||
datatype = self.tag_browser_datatype[category]
|
|
||||||
formatter = self.tag_browser_formatters.get(datatype, lambda x: x)
|
datatype = self.tag_browser_categories[category]['type']
|
||||||
|
if datatype == 'rating':
|
||||||
|
item_zero_func = (lambda x: len(formatter(r[1])) > 0)
|
||||||
|
formatter = (lambda x:u'\u2605'*int(round(x/2.)))
|
||||||
|
elif category == 'authors':
|
||||||
|
item_zero_func = (lambda x: x[2] > 0)
|
||||||
|
# Clean up the authors strings to human-readable form
|
||||||
|
formatter = (lambda x: x.replace('|', ','))
|
||||||
|
else:
|
||||||
|
item_zero_func = (lambda x: x[2] > 0)
|
||||||
|
formatter = (lambda x:x)
|
||||||
|
|
||||||
categories[category] = [Tag(formatter(r[1]), count=r[2], id=r[0],
|
categories[category] = [Tag(formatter(r[1]), count=r[2], id=r[0],
|
||||||
icon=icon, tooltip = tooltip)
|
icon=icon, tooltip = tooltip)
|
||||||
for r in data
|
for r in data if item_zero_func(r)]
|
||||||
if r[2] > 0 and
|
|
||||||
(datatype != 'rating' or len(formatter(r[1])) > 0)]
|
# We delayed computing the standard formats category because it does not
|
||||||
categories['format'] = []
|
# use a view, but is computed dynamically
|
||||||
|
categories['formats'] = []
|
||||||
|
icon = None
|
||||||
|
if icon_map and 'formats' in icon_map:
|
||||||
|
icon = icon_map['formats']
|
||||||
for fmt in self.conn.get('SELECT DISTINCT format FROM data'):
|
for fmt in self.conn.get('SELECT DISTINCT format FROM data'):
|
||||||
fmt = fmt[0]
|
fmt = fmt[0]
|
||||||
if ids is not None:
|
if ids is not None:
|
||||||
@ -693,13 +734,70 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
WHERE format="%s"'''%fmt,
|
WHERE format="%s"'''%fmt,
|
||||||
all=False)
|
all=False)
|
||||||
if count > 0:
|
if count > 0:
|
||||||
categories['format'].append(Tag(fmt, count=count))
|
categories['formats'].append(Tag(fmt, count=count, icon=icon))
|
||||||
|
|
||||||
if sort_on_count:
|
if sort_on_count:
|
||||||
categories['format'].sort(cmp=lambda x,y:cmp(x.count, y.count),
|
categories['formats'].sort(cmp=lambda x,y:cmp(x.count, y.count),
|
||||||
reverse=True)
|
reverse=True)
|
||||||
else:
|
else:
|
||||||
categories['format'].sort(cmp=lambda x,y:cmp(x.name, y.name))
|
categories['formats'].sort(cmp=lambda x,y:cmp(x.name, y.name))
|
||||||
|
|
||||||
|
#### Now do the user-defined categories. ####
|
||||||
|
user_categories = prefs['user_categories']
|
||||||
|
|
||||||
|
# remove all user categories from tag_browser_categories. They can
|
||||||
|
# easily come and go. We will add all the existing ones in below.
|
||||||
|
for k in self.tag_browser_categories.keys():
|
||||||
|
if self.tag_browser_categories[k]['kind'] in ['user', 'search']:
|
||||||
|
del self.tag_browser_categories[k]
|
||||||
|
|
||||||
|
# We want to use same node in the user category as in the source
|
||||||
|
# category. To do that, we need to find the original Tag node. There is
|
||||||
|
# a time/space tradeoff here. By converting the tags into a map, we can
|
||||||
|
# do the verification in the category loop much faster, at the cost of
|
||||||
|
# temporarily duplicating the categories lists.
|
||||||
|
taglist = {}
|
||||||
|
for c in categories.keys():
|
||||||
|
taglist[c] = dict(map(lambda t:(t.name, t), categories[c]))
|
||||||
|
|
||||||
|
for user_cat in sorted(user_categories.keys()):
|
||||||
|
items = []
|
||||||
|
for (name,label,ign) in user_categories[user_cat]:
|
||||||
|
if label in taglist and name in taglist[label]:
|
||||||
|
items.append(taglist[label][name])
|
||||||
|
# else: do nothing, to not include nodes w zero counts
|
||||||
|
if len(items):
|
||||||
|
cat_name = user_cat+'*' # add the * to avoid name collision
|
||||||
|
self.tag_browser_categories[cat_name] = {
|
||||||
|
'table':None, 'column':None,
|
||||||
|
'type':None, 'is_multiple':False,
|
||||||
|
'kind':'user', 'name':user_cat}
|
||||||
|
# Not a problem if we accumulate entries in the icon map
|
||||||
|
if icon_map is not None:
|
||||||
|
icon_map[cat_name] = icon_map['*user']
|
||||||
|
if sort_on_count:
|
||||||
|
categories[cat_name] = \
|
||||||
|
sorted(items, cmp=(lambda x, y: cmp(y.count, x.count)))
|
||||||
|
else:
|
||||||
|
categories[cat_name] = \
|
||||||
|
sorted(items, cmp=(lambda x, y: cmp(x.name.lower(), y.name.lower())))
|
||||||
|
|
||||||
|
#### Finally, the saved searches category ####
|
||||||
|
items = []
|
||||||
|
icon = None
|
||||||
|
if icon_map and 'search' in icon_map:
|
||||||
|
icon = icon_map['search']
|
||||||
|
for srch in saved_searches.names():
|
||||||
|
items.append(Tag(srch, tooltip=saved_searches.lookup(srch), icon=icon))
|
||||||
|
if len(items):
|
||||||
|
self.tag_browser_categories['search'] = {
|
||||||
|
'table':None, 'column':None,
|
||||||
|
'type':None, 'is_multiple':False,
|
||||||
|
'kind':'search', 'name':_('Searches')}
|
||||||
|
if icon_map is not None:
|
||||||
|
icon_map['search'] = icon_map['search']
|
||||||
|
categories['search'] = items
|
||||||
|
|
||||||
return categories
|
return categories
|
||||||
|
|
||||||
def tags_older_than(self, tag, delta):
|
def tags_older_than(self, tag, delta):
|
||||||
|
@ -14,14 +14,46 @@ import cherrypy
|
|||||||
from calibre.constants import __appname__, __version__
|
from calibre.constants import __appname__, __version__
|
||||||
from calibre.utils.date import fromtimestamp
|
from calibre.utils.date import fromtimestamp
|
||||||
from calibre.library.server import listen_on, log_access_file, log_error_file
|
from calibre.library.server import listen_on, log_access_file, log_error_file
|
||||||
|
from calibre.library.server.utils import expose
|
||||||
from calibre.utils.mdns import publish as publish_zeroconf, \
|
from calibre.utils.mdns import publish as publish_zeroconf, \
|
||||||
stop_server as stop_zeroconf, get_external_ip
|
stop_server as stop_zeroconf, get_external_ip
|
||||||
from calibre.library.server.content import ContentServer
|
from calibre.library.server.content import ContentServer
|
||||||
from calibre.library.server.mobile import MobileServer
|
from calibre.library.server.mobile import MobileServer
|
||||||
from calibre.library.server.xml import XMLServer
|
from calibre.library.server.xml import XMLServer
|
||||||
from calibre.library.server.opds import OPDSServer
|
from calibre.library.server.opds import OPDSServer
|
||||||
|
from calibre.library.server.cache import Cache
|
||||||
|
|
||||||
class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer):
|
|
||||||
|
class DispatchController(object): # {{{
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.dispatcher = cherrypy.dispatch.RoutesDispatcher()
|
||||||
|
self.funcs = []
|
||||||
|
self.seen = set([])
|
||||||
|
|
||||||
|
def __call__(self, name, route, func, **kwargs):
|
||||||
|
if name in self.seen:
|
||||||
|
raise NameError('Route name: '+ repr(name) + ' already used')
|
||||||
|
self.seen.add(name)
|
||||||
|
kwargs['action'] = 'f_%d'%len(self.funcs)
|
||||||
|
self.dispatcher.connect(name, route, self, **kwargs)
|
||||||
|
self.funcs.append(expose(func))
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
if not attr.startswith('f_'):
|
||||||
|
raise AttributeError(attr + ' not found')
|
||||||
|
num = attr.rpartition('_')[-1]
|
||||||
|
try:
|
||||||
|
num = int(num)
|
||||||
|
except:
|
||||||
|
raise AttributeError(attr + ' not found')
|
||||||
|
if num < 0 or num >= len(self.funcs):
|
||||||
|
raise AttributeError(attr + ' not found')
|
||||||
|
return self.funcs[num]
|
||||||
|
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer, Cache):
|
||||||
|
|
||||||
server_name = __appname__ + '/' + __version__
|
server_name = __appname__ + '/' + __version__
|
||||||
|
|
||||||
@ -88,8 +120,16 @@ class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer):
|
|||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.is_running = False
|
self.is_running = False
|
||||||
|
d = DispatchController()
|
||||||
|
for x in self.__class__.__bases__:
|
||||||
|
if hasattr(x, 'add_routes'):
|
||||||
|
x.add_routes(self, d)
|
||||||
|
root_conf = self.config.get('/', {})
|
||||||
|
root_conf['request.dispatch'] = d.dispatcher
|
||||||
|
self.config['/'] = root_conf
|
||||||
|
|
||||||
self.setup_loggers()
|
self.setup_loggers()
|
||||||
cherrypy.tree.mount(self, '', config=self.config)
|
cherrypy.tree.mount(root=None, config=self.config)
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
cherrypy.engine.start()
|
cherrypy.engine.start()
|
||||||
|
33
src/calibre/library/server/cache.py
Normal file
33
src/calibre/library/server/cache.py
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
from calibre.utils.date import utcnow
|
||||||
|
from calibre.utils.ordered_dict import OrderedDict
|
||||||
|
|
||||||
|
class Cache(object):
|
||||||
|
|
||||||
|
def add_routes(self, c):
|
||||||
|
self._category_cache = OrderedDict()
|
||||||
|
self._search_cache = OrderedDict()
|
||||||
|
|
||||||
|
def search_cache(self, search):
|
||||||
|
old = self._search_cache.get(search, None)
|
||||||
|
if old is None or old[0] <= self.db.last_modified():
|
||||||
|
matches = self.db.data.search(search)
|
||||||
|
self._search_cache[search] = frozenset(matches)
|
||||||
|
if len(self._search_cache) > 10:
|
||||||
|
self._search_cache.popitem(last=False)
|
||||||
|
|
||||||
|
|
||||||
|
def categories_cache(self, restrict_to=frozenset([])):
|
||||||
|
old = self._category_cache.get(frozenset(restrict_to), None)
|
||||||
|
if old is None or old[0] <= self.db.last_modified():
|
||||||
|
categories = self.db.get_categories(ids=restrict_to)
|
||||||
|
self._category_cache[restrict_to] = (utcnow(), categories)
|
||||||
|
if len(self._category_cache) > 10:
|
||||||
|
self._category_cache.popitem(last=False)
|
||||||
|
return self._category_cache[restrict_to][1]
|
@ -16,7 +16,7 @@ except ImportError:
|
|||||||
|
|
||||||
from calibre import fit_image, guess_type
|
from calibre import fit_image, guess_type
|
||||||
from calibre.utils.date import fromtimestamp
|
from calibre.utils.date import fromtimestamp
|
||||||
from calibre.library.server.utils import expose
|
|
||||||
|
|
||||||
class ContentServer(object):
|
class ContentServer(object):
|
||||||
|
|
||||||
@ -25,6 +25,13 @@ class ContentServer(object):
|
|||||||
a few utility methods.
|
a few utility methods.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
def add_routes(self, connect):
|
||||||
|
connect('root', '/', self.index)
|
||||||
|
connect('get', '/get/{what}/{id}', self.get,
|
||||||
|
conditions=dict(method=["GET", "HEAD"]))
|
||||||
|
connect('static', '/static/{name}', self.static,
|
||||||
|
conditions=dict(method=["GET", "HEAD"]))
|
||||||
|
|
||||||
# Utility methods {{{
|
# Utility methods {{{
|
||||||
def last_modified(self, updated):
|
def last_modified(self, updated):
|
||||||
'''
|
'''
|
||||||
@ -68,8 +75,7 @@ class ContentServer(object):
|
|||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
|
|
||||||
@expose
|
def get(self, what, id):
|
||||||
def get(self, what, id, *args, **kwargs):
|
|
||||||
'Serves files, covers, thumbnails from the calibre database'
|
'Serves files, covers, thumbnails from the calibre database'
|
||||||
try:
|
try:
|
||||||
id = int(id)
|
id = int(id)
|
||||||
@ -87,7 +93,6 @@ class ContentServer(object):
|
|||||||
return self.get_cover(id)
|
return self.get_cover(id)
|
||||||
return self.get_format(id, what)
|
return self.get_format(id, what)
|
||||||
|
|
||||||
@expose
|
|
||||||
def static(self, name):
|
def static(self, name):
|
||||||
'Serves static content'
|
'Serves static content'
|
||||||
name = name.lower()
|
name = name.lower()
|
||||||
@ -108,7 +113,6 @@ class ContentServer(object):
|
|||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(lm)
|
cherrypy.response.headers['Last-Modified'] = self.last_modified(lm)
|
||||||
return open(path, 'rb').read()
|
return open(path, 'rb').read()
|
||||||
|
|
||||||
@expose
|
|
||||||
def index(self, **kwargs):
|
def index(self, **kwargs):
|
||||||
'The / URL'
|
'The / URL'
|
||||||
ua = cherrypy.request.headers.get('User-Agent', '').strip()
|
ua = cherrypy.request.headers.get('User-Agent', '').strip()
|
||||||
|
@ -5,34 +5,143 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import re, copy
|
import re
|
||||||
import __builtin__
|
import __builtin__
|
||||||
|
|
||||||
import cherrypy
|
import cherrypy
|
||||||
|
from lxml import html
|
||||||
|
from lxml.html.builder import HTML, HEAD, TITLE, STYLE, LINK, DIV, IMG, BODY, \
|
||||||
|
OPTION, SELECT, INPUT, FORM, SPAN, TABLE, TR, TD, A, HR
|
||||||
|
|
||||||
from calibre.utils.genshi.template import MarkupTemplate
|
from calibre.library.server.utils import strftime
|
||||||
from calibre.library.server.utils import strftime, expose
|
|
||||||
from calibre.ebooks.metadata import fmt_sidx
|
from calibre.ebooks.metadata import fmt_sidx
|
||||||
|
from calibre.constants import __appname__
|
||||||
|
from calibre import human_readable
|
||||||
|
|
||||||
# Templates {{{
|
def CLASS(*args, **kwargs): # class is a reserved word in Python
|
||||||
MOBILE_BOOK = '''\
|
kwargs['class'] = ' '.join(args)
|
||||||
<tr xmlns:py="http://genshi.edgewall.org/">
|
return kwargs
|
||||||
<td class="thumbnail">
|
|
||||||
<img type="image/jpeg" src="/get/thumb/${r[FM['id']]}" border="0"/>
|
|
||||||
</td>
|
def build_search_box(num, search, sort, order): # {{{
|
||||||
<td>
|
div = DIV(id='search_box')
|
||||||
<py:for each="format in r[FM['formats']].split(',')">
|
form = FORM('Show ', method='get', action='mobile')
|
||||||
<span class="button"><a href="/get/${format}/${authors}-${r[FM['title']]}_${r[FM['id']]}.${format}">${format.lower()}</a></span>
|
div.append(form)
|
||||||
</py:for>
|
|
||||||
${r[FM['title']]}${(' ['+r[FM['series']]+'-'+r[FM['series_index']]+']') if r[FM['series']] else ''} by ${authors} - ${r[FM['size']]/1024}k - ${r[FM['publisher']] if r[FM['publisher']] else ''} ${pubdate} ${'['+r[FM['tags']]+']' if r[FM['tags']] else ''}
|
num_select = SELECT(name='num')
|
||||||
</td>
|
for option in (5, 10, 25, 100):
|
||||||
</tr>
|
kwargs = {'value':str(option)}
|
||||||
|
if option == num:
|
||||||
|
kwargs['SELECTED'] = 'SELECTED'
|
||||||
|
num_select.append(OPTION(str(option), **kwargs))
|
||||||
|
num_select.tail = ' books matching '
|
||||||
|
form.append(num_select)
|
||||||
|
|
||||||
|
searchf = INPUT(name='search', id='s', value=search if search else '')
|
||||||
|
searchf.tail = ' sorted by '
|
||||||
|
form.append(searchf)
|
||||||
|
|
||||||
|
sort_select = SELECT(name='sort')
|
||||||
|
for option in ('date','author','title','rating','size','tags','series'):
|
||||||
|
kwargs = {'value':option}
|
||||||
|
if option == sort:
|
||||||
|
kwargs['SELECTED'] = 'SELECTED'
|
||||||
|
sort_select.append(OPTION(option, **kwargs))
|
||||||
|
form.append(sort_select)
|
||||||
|
|
||||||
|
order_select = SELECT(name='order')
|
||||||
|
for option in ('ascending','descending'):
|
||||||
|
kwargs = {'value':option}
|
||||||
|
if option == order:
|
||||||
|
kwargs['SELECTED'] = 'SELECTED'
|
||||||
|
order_select.append(OPTION(option, **kwargs))
|
||||||
|
form.append(order_select)
|
||||||
|
|
||||||
|
form.append(INPUT(id='go', type='submit', value='Search'))
|
||||||
|
|
||||||
|
return div
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def build_navigation(start, num, total, url_base): # {{{
|
||||||
|
end = min((start+num-1), total)
|
||||||
|
tagline = SPAN('Books %d to %d of %d'%(start, end, total),
|
||||||
|
style='display: block; text-align: center;')
|
||||||
|
left_buttons = TD(CLASS('button', style='text-align:left'))
|
||||||
|
right_buttons = TD(CLASS('button', style='text-align:right'))
|
||||||
|
|
||||||
|
if start > 1:
|
||||||
|
for t,s in [('First', 1), ('Previous', max(start-(num+1),1))]:
|
||||||
|
left_buttons.append(A(t, href='%s;start=%d'%(url_base, s)))
|
||||||
|
|
||||||
|
if total > start + num:
|
||||||
|
for t,s in [('Next', start+num), ('Last', total-num+1)]:
|
||||||
|
right_buttons.append(A(t, href='%s;start=%d'%(url_base, s)))
|
||||||
|
|
||||||
|
buttons = TABLE(
|
||||||
|
TR(left_buttons, right_buttons),
|
||||||
|
CLASS('buttons'))
|
||||||
|
return DIV(tagline, buttons, CLASS('navigation'))
|
||||||
|
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def build_index(books, num, search, sort, order, start, total, url_base):
|
||||||
|
logo = DIV(IMG(src='/static/calibre.png', alt=__appname__), id='logo')
|
||||||
|
|
||||||
|
search_box = build_search_box(num, search, sort, order)
|
||||||
|
navigation = build_navigation(start, num, total, url_base)
|
||||||
|
bookt = TABLE(id='listing')
|
||||||
|
|
||||||
|
body = BODY(
|
||||||
|
logo,
|
||||||
|
search_box,
|
||||||
|
navigation,
|
||||||
|
HR(CLASS('spacer')),
|
||||||
|
bookt
|
||||||
|
)
|
||||||
|
|
||||||
|
# Book list {{{
|
||||||
|
for book in books:
|
||||||
|
thumbnail = TD(
|
||||||
|
IMG(type='image/jpeg', border='0', src='/get/thumb/%s' %
|
||||||
|
book['id']),
|
||||||
|
CLASS('thumbnail'))
|
||||||
|
|
||||||
|
data = TD()
|
||||||
|
last = None
|
||||||
|
for fmt in book['formats'].split(','):
|
||||||
|
s = SPAN(
|
||||||
|
A(
|
||||||
|
fmt.lower(),
|
||||||
|
href='/get/%s/%s-%s_%d.%s' % (fmt, book['authors'],
|
||||||
|
book['title'], book['id'], fmt)
|
||||||
|
),
|
||||||
|
CLASS('button'))
|
||||||
|
s.tail = u'\u202f' #
|
||||||
|
last = s
|
||||||
|
data.append(s)
|
||||||
|
|
||||||
|
series = u'[%s - %s]'%(book['series'], book['series_index']) \
|
||||||
|
if book['series'] else ''
|
||||||
|
tags = u'[%s]'%book['tags'] if book['tags'] else ''
|
||||||
|
|
||||||
|
text = u'\u202f%s %s by %s - %s - %s %s' % (book['title'], series,
|
||||||
|
book['authors'], book['size'], book['timestamp'], tags)
|
||||||
|
|
||||||
|
if last is None:
|
||||||
|
data.text = text
|
||||||
|
else:
|
||||||
|
last.tail += text
|
||||||
|
|
||||||
|
bookt.append(TR(thumbnail, data))
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
return HTML(
|
||||||
|
HEAD(
|
||||||
|
TITLE(__appname__ + ' Library'),
|
||||||
|
LINK(rel='icon', href='http://calibre-ebook.com/favicon.ico',
|
||||||
|
type='image/x-icon'),
|
||||||
|
STYLE( # {{{
|
||||||
'''
|
'''
|
||||||
|
|
||||||
MOBILE = MarkupTemplate('''\
|
|
||||||
<html xmlns:py="http://genshi.edgewall.org/">
|
|
||||||
<head>
|
|
||||||
<style>
|
|
||||||
.navigation table.buttons {
|
.navigation table.buttons {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
}
|
}
|
||||||
@ -109,71 +218,20 @@ div.navigation {
|
|||||||
clear: both;
|
clear: both;
|
||||||
}
|
}
|
||||||
|
|
||||||
</style>
|
''', type='text/css') # }}}
|
||||||
<link rel="icon" href="http://calibre-ebook.com/favicon.ico" type="image/x-icon" />
|
), # End head
|
||||||
</head>
|
body
|
||||||
<body>
|
) # End html
|
||||||
<div id="logo">
|
|
||||||
<img src="/static/calibre.png" alt="Calibre" />
|
|
||||||
</div>
|
|
||||||
<div id="search_box">
|
|
||||||
<form method="get" action="/mobile">
|
|
||||||
Show <select name="num">
|
|
||||||
<py:for each="option in [5,10,25,100]">
|
|
||||||
<option py:if="option == num" value="${option}" SELECTED="SELECTED">${option}</option>
|
|
||||||
<option py:if="option != num" value="${option}">${option}</option>
|
|
||||||
</py:for>
|
|
||||||
</select>
|
|
||||||
books matching <input name="search" id="s" value="${search}" /> sorted by
|
|
||||||
|
|
||||||
<select name="sort">
|
|
||||||
<py:for each="option in ['date','author','title','rating','size','tags','series']">
|
|
||||||
<option py:if="option == sort" value="${option}" SELECTED="SELECTED">${option}</option>
|
|
||||||
<option py:if="option != sort" value="${option}">${option}</option>
|
|
||||||
</py:for>
|
|
||||||
</select>
|
|
||||||
<select name="order">
|
|
||||||
<py:for each="option in ['ascending','descending']">
|
|
||||||
<option py:if="option == order" value="${option}" SELECTED="SELECTED">${option}</option>
|
|
||||||
<option py:if="option != order" value="${option}">${option}</option>
|
|
||||||
</py:for>
|
|
||||||
</select>
|
|
||||||
<input id="go" type="submit" value="Search"/>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
<div class="navigation">
|
|
||||||
<span style="display: block; text-align: center;">Books ${start} to ${ min((start+num-1) , total) } of ${total}</span>
|
|
||||||
<table class="buttons">
|
|
||||||
<tr>
|
|
||||||
<td class="button" style="text-align:left;">
|
|
||||||
<a py:if="start > 1" href="${url_base};start=1">First</a>
|
|
||||||
<a py:if="start > 1" href="${url_base};start=${max(start-(num+1),1)}">Previous</a>
|
|
||||||
</td>
|
|
||||||
<td class="button" style="text-align: right;">
|
|
||||||
<a py:if=" total > (start + num) " href="${url_base};start=${start+num}">Next</a>
|
|
||||||
<a py:if=" total > (start + num) " href="${url_base};start=${total-num+1}">Last</a>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
<hr class="spacer" />
|
|
||||||
<table id="listing">
|
|
||||||
<py:for each="book in books">
|
|
||||||
${Markup(book)}
|
|
||||||
</py:for>
|
|
||||||
</table>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
''')
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
class MobileServer(object):
|
class MobileServer(object):
|
||||||
'A view optimized for browsers in mobile devices'
|
'A view optimized for browsers in mobile devices'
|
||||||
|
|
||||||
MOBILE_UA = re.compile('(?i)(?:iPhone|Opera Mini|NetFront|webOS|Mobile|Android|imode|DoCoMo|Minimo|Blackberry|MIDP|Symbian|HD2)')
|
MOBILE_UA = re.compile('(?i)(?:iPhone|Opera Mini|NetFront|webOS|Mobile|Android|imode|DoCoMo|Minimo|Blackberry|MIDP|Symbian|HD2)')
|
||||||
|
|
||||||
@expose
|
def add_routes(self, connect):
|
||||||
|
connect('mobile', '/mobile', self.mobile)
|
||||||
|
|
||||||
def mobile(self, start='1', num='25', sort='date', search='',
|
def mobile(self, start='1', num='25', sort='date', search='',
|
||||||
_=None, order='descending'):
|
_=None, order='descending'):
|
||||||
'''
|
'''
|
||||||
@ -193,26 +251,31 @@ class MobileServer(object):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num)
|
raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num)
|
||||||
ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set()
|
ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set()
|
||||||
ids = sorted(ids)
|
|
||||||
FM = self.db.FIELD_MAP
|
FM = self.db.FIELD_MAP
|
||||||
items = copy.deepcopy([r for r in iter(self.db) if r[FM['id']] in ids])
|
items = [r for r in iter(self.db) if r[FM['id']] in ids]
|
||||||
if sort is not None:
|
if sort is not None:
|
||||||
self.sort(items, sort, (order.lower().strip() == 'ascending'))
|
self.sort(items, sort, (order.lower().strip() == 'ascending'))
|
||||||
|
|
||||||
book, books = MarkupTemplate(MOBILE_BOOK), []
|
books = []
|
||||||
for record in items[(start-1):(start-1)+num]:
|
for record in items[(start-1):(start-1)+num]:
|
||||||
if record[FM['formats']] is None:
|
book = {'formats':record[FM['formats']], 'size':record[FM['size']]}
|
||||||
record[FM['formats']] = ''
|
if not book['formats']:
|
||||||
if record[FM['size']] is None:
|
book['formats'] = ''
|
||||||
record[FM['size']] = 0
|
if not book['size']:
|
||||||
|
book['size'] = 0
|
||||||
|
book['size'] = human_readable(book['size'])
|
||||||
|
|
||||||
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
|
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
|
||||||
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
|
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
|
||||||
record[FM['series_index']] = \
|
book['authors'] = authors
|
||||||
fmt_sidx(float(record[FM['series_index']]))
|
book['series_index'] = fmt_sidx(float(record[FM['series_index']]))
|
||||||
ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[FM['timestamp']]), \
|
book['series'] = record[FM['series']]
|
||||||
strftime('%Y/%m/%d %H:%M:%S', record[FM['pubdate']])
|
book['tags'] = record[FM['tags']]
|
||||||
books.append(book.generate(r=record, authors=authors, timestamp=ts,
|
book['title'] = record[FM['title']]
|
||||||
pubdate=pd, FM=FM).render('xml').decode('utf-8'))
|
for x in ('timestamp', 'pubdate'):
|
||||||
|
book[x] = strftime('%Y/%m/%d %H:%M:%S', record[FM[x]])
|
||||||
|
book['id'] = record[FM['id']]
|
||||||
|
books.append(book)
|
||||||
updated = self.db.last_modified()
|
updated = self.db.last_modified()
|
||||||
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'text/html; charset=utf-8'
|
cherrypy.response.headers['Content-Type'] = 'text/html; charset=utf-8'
|
||||||
@ -221,8 +284,8 @@ class MobileServer(object):
|
|||||||
|
|
||||||
url_base = "/mobile?search=" + search+";order="+order+";sort="+sort+";num="+str(num)
|
url_base = "/mobile?search=" + search+";order="+order+";sort="+sort+";num="+str(num)
|
||||||
|
|
||||||
return MOBILE.generate(books=books, start=start, updated=updated,
|
return html.tostring(build_index(books, num, search, sort, order,
|
||||||
search=search, sort=sort, order=order, num=num, FM=FM,
|
start, len(ids), url_base),
|
||||||
total=len(ids), url_base=url_base).render('html')
|
encoding='utf-8', include_meta_content_type=True,
|
||||||
|
pretty_print=True)
|
||||||
|
|
||||||
|
@ -5,296 +5,163 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import re
|
import hashlib, binascii
|
||||||
from itertools import repeat
|
from functools import partial
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
from lxml.builder import ElementMaker
|
||||||
import cherrypy
|
import cherrypy
|
||||||
|
|
||||||
from calibre.utils.genshi.template import MarkupTemplate
|
from calibre.constants import __appname__
|
||||||
from calibre.library.server.utils import strftime, expose
|
|
||||||
from calibre.ebooks.metadata import fmt_sidx, title_sort
|
|
||||||
from calibre import guess_type, prepare_string_for_xml
|
|
||||||
|
|
||||||
# Templates {{{
|
BASE_HREFS = {
|
||||||
|
0 : '/stanza',
|
||||||
|
1 : '/opds',
|
||||||
|
}
|
||||||
|
|
||||||
STANZA_ENTRY=MarkupTemplate('''\
|
# Vocabulary for building OPDS feeds {{{
|
||||||
<entry xmlns:py="http://genshi.edgewall.org/">
|
E = ElementMaker(namespace='http://www.w3.org/2005/Atom',
|
||||||
<title>${record[FM['title']]}</title>
|
nsmap={
|
||||||
<id>urn:calibre:${urn}</id>
|
None : 'http://www.w3.org/2005/Atom',
|
||||||
<author><name>${authors}</name></author>
|
'dc' : 'http://purl.org/dc/terms/',
|
||||||
<updated>${timestamp}</updated>
|
'opds' : 'http://opds-spec.org/2010/catalog',
|
||||||
<link type="${mimetype}" href="/get/${fmt}/${record[FM['id']]}" />
|
})
|
||||||
<link rel="x-stanza-cover-image" type="image/jpeg" href="/get/cover/${record[FM['id']]}" />
|
|
||||||
<link rel="x-stanza-cover-image-thumbnail" type="image/jpeg" href="/get/thumb/${record[FM['id']]}" />
|
|
||||||
<content type="xhtml">
|
|
||||||
<div xmlns="http://www.w3.org/1999/xhtml" style="text-align: center">${Markup(extra)}${record[FM['comments']]}</div>
|
|
||||||
</content>
|
|
||||||
</entry>
|
|
||||||
''')
|
|
||||||
|
|
||||||
STANZA_SUBCATALOG_ENTRY=MarkupTemplate('''\
|
|
||||||
<entry xmlns:py="http://genshi.edgewall.org/">
|
|
||||||
<title>${title}</title>
|
|
||||||
<id>urn:calibre:${id}</id>
|
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
|
||||||
<link type="application/atom+xml" href="/stanza/?${what}id=${id}" />
|
|
||||||
<content type="text">${count} books</content>
|
|
||||||
</entry>
|
|
||||||
''')
|
|
||||||
|
|
||||||
STANZA = MarkupTemplate('''\
|
FEED = E.feed
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
TITLE = E.title
|
||||||
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:py="http://genshi.edgewall.org/">
|
ID = E.id
|
||||||
<title>calibre Library</title>
|
|
||||||
<id>$id</id>
|
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
|
||||||
<link rel="search" title="Search" type="application/atom+xml" href="/stanza/?search={searchTerms}"/>
|
|
||||||
${Markup(next_link)}
|
|
||||||
<author>
|
|
||||||
<name>calibre</name>
|
|
||||||
<uri>http://calibre-ebook.com</uri>
|
|
||||||
</author>
|
|
||||||
<subtitle>
|
|
||||||
${subtitle}
|
|
||||||
</subtitle>
|
|
||||||
<py:for each="entry in data">
|
|
||||||
${Markup(entry)}
|
|
||||||
</py:for>
|
|
||||||
</feed>
|
|
||||||
''')
|
|
||||||
|
|
||||||
STANZA_MAIN = MarkupTemplate('''\
|
def UPDATED(dt, *args, **kwargs):
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
return E.updated(dt.strftime('%Y-%m-%dT%H:%M:%S+00:00'), *args, **kwargs)
|
||||||
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:py="http://genshi.edgewall.org/">
|
|
||||||
<title>calibre Library</title>
|
LINK = partial(E.link, type='application/atom+xml')
|
||||||
<id>$id</id>
|
NAVLINK = partial(E.link,
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
type='application/atom+xml;type=feed;profile=opds-catalog')
|
||||||
<link rel="search" title="Search" type="application/atom+xml" href="/stanza/?search={searchTerms}"/>
|
|
||||||
<author>
|
def SEARCH(base_href, *args, **kwargs):
|
||||||
<name>calibre</name>
|
kwargs['rel'] = 'search'
|
||||||
<uri>http://calibre-ebook.com</uri>
|
kwargs['title'] = 'Search'
|
||||||
</author>
|
kwargs['href'] = base_href+'/search/{searchTerms}'
|
||||||
<subtitle>
|
return LINK(*args, **kwargs)
|
||||||
${subtitle}
|
|
||||||
</subtitle>
|
def AUTHOR(name, uri=None):
|
||||||
<entry>
|
args = [E.name(name)]
|
||||||
<title>By Author</title>
|
if uri is not None:
|
||||||
<id>urn:uuid:fc000fa0-8c23-11de-a31d-0002a5d5c51b</id>
|
args.append(E.uri(uri))
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
return E.author(*args)
|
||||||
<link type="application/atom+xml" href="/stanza/?sortby=byauthor" />
|
|
||||||
<content type="text">Books sorted by Author</content>
|
SUBTITLE = E.subtitle
|
||||||
</entry>
|
|
||||||
<entry>
|
def NAVCATALOG_ENTRY(base_href, updated, title, description, query):
|
||||||
<title>By Title</title>
|
href = base_href+'/navcatalog/'+binascii.hexlify(query)
|
||||||
<id>urn:uuid:1df4fe40-8c24-11de-b4c6-0002a5d5c51b</id>
|
id_ = 'calibre-navcatalog:'+str(hashlib.sha1(href).hexdigest())
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
return E.entry(
|
||||||
<link type="application/atom+xml" href="/stanza/?sortby=bytitle" />
|
TITLE(title),
|
||||||
<content type="text">Books sorted by Title</content>
|
ID(id_),
|
||||||
</entry>
|
UPDATED(updated),
|
||||||
<entry>
|
E.content(description, type='text'),
|
||||||
<title>By Newest</title>
|
NAVLINK(href=href)
|
||||||
<id>urn:uuid:3c6d4940-8c24-11de-a4d7-0002a5d5c51b</id>
|
)
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
|
||||||
<link type="application/atom+xml" href="/stanza/?sortby=bynewest" />
|
|
||||||
<content type="text">Books sorted by Date</content>
|
|
||||||
</entry>
|
|
||||||
<entry>
|
|
||||||
<title>By Tag</title>
|
|
||||||
<id>urn:uuid:824921e8-db8a-4e61-7d38-f1ce41502853</id>
|
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
|
||||||
<link type="application/atom+xml" href="/stanza/?sortby=bytag" />
|
|
||||||
<content type="text">Books sorted by Tags</content>
|
|
||||||
</entry>
|
|
||||||
<entry>
|
|
||||||
<title>By Series</title>
|
|
||||||
<id>urn:uuid:512a5e50-a88f-f6b8-82aa-8f129c719f61</id>
|
|
||||||
<updated>${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}</updated>
|
|
||||||
<link type="application/atom+xml" href="/stanza/?sortby=byseries" />
|
|
||||||
<content type="text">Books sorted by Series</content>
|
|
||||||
</entry>
|
|
||||||
</feed>
|
|
||||||
''')
|
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
|
class Feed(object):
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return etree.tostring(self.root, pretty_print=True, encoding='utf-8',
|
||||||
|
xml_declaration=True)
|
||||||
|
|
||||||
|
class TopLevel(Feed):
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
updated, # datetime object in UTC
|
||||||
|
categories,
|
||||||
|
version,
|
||||||
|
id_ = 'urn:calibre:main',
|
||||||
|
):
|
||||||
|
base_href = BASE_HREFS[version]
|
||||||
|
self.base_href = base_href
|
||||||
|
subc = partial(NAVCATALOG_ENTRY, base_href, updated)
|
||||||
|
|
||||||
|
subcatalogs = [subc(_('By ')+title,
|
||||||
|
_('Books sorted by ') + desc, q) for title, desc, q in
|
||||||
|
categories]
|
||||||
|
|
||||||
|
self.root = \
|
||||||
|
FEED(
|
||||||
|
TITLE(__appname__ + ' ' + _('Library')),
|
||||||
|
ID(id_),
|
||||||
|
UPDATED(updated),
|
||||||
|
SEARCH(base_href),
|
||||||
|
AUTHOR(__appname__, uri='http://calibre-ebook.com'),
|
||||||
|
SUBTITLE(_('Books in your library')),
|
||||||
|
*subcatalogs
|
||||||
|
)
|
||||||
|
|
||||||
|
STANZA_FORMATS = frozenset(['epub', 'pdb'])
|
||||||
|
|
||||||
class OPDSServer(object):
|
class OPDSServer(object):
|
||||||
|
|
||||||
def get_matches(self, location, query):
|
def add_routes(self, connect):
|
||||||
base = self.db.data.get_matches(location, query)
|
for base in ('stanza', 'opds'):
|
||||||
epub = self.db.data.get_matches('format', '=epub')
|
version = 0 if base == 'stanza' else 1
|
||||||
pdb = self.db.data.get_matches('format', '=pdb')
|
base_href = BASE_HREFS[version]
|
||||||
return base.intersection(epub.union(pdb))
|
connect(base, base_href, self.opds, version=version)
|
||||||
|
connect('opdsnavcatalog_'+base, base_href+'/navcatalog/{which}',
|
||||||
|
self.opds_navcatalog, version=version)
|
||||||
|
connect('opdssearch_'+base, base_href+'/search/{terms}',
|
||||||
|
self.opds_search, version=version)
|
||||||
|
|
||||||
def stanza_sortby_subcategory(self, updated, sortby, offset):
|
def get_opds_allowed_ids_for_version(self, version):
|
||||||
pat = re.compile(r'\(.*\)')
|
search = '' if version > 0 else ' '.join(['format:='+x for x in
|
||||||
|
STANZA_FORMATS])
|
||||||
|
self.seach_cache(search)
|
||||||
|
|
||||||
def clean_author(x):
|
def opds_search(self, terms=None, version=0):
|
||||||
return pat.sub('', x).strip()
|
version = int(version)
|
||||||
|
if not terms or version not in BASE_HREFS:
|
||||||
|
raise cherrypy.HTTPError(404, 'Not found')
|
||||||
|
|
||||||
def author_cmp(x, y):
|
def opds_navcatalog(self, which=None, version=0):
|
||||||
x = x if ',' in x else clean_author(x).rpartition(' ')[-1]
|
version = int(version)
|
||||||
y = y if ',' in y else clean_author(y).rpartition(' ')[-1]
|
if not which or version not in BASE_HREFS:
|
||||||
return cmp(x.lower(), y.lower())
|
raise cherrypy.HTTPError(404, 'Not found')
|
||||||
|
which = binascii.unhexlify(which)
|
||||||
|
type_ = which[0]
|
||||||
|
which = which[1:]
|
||||||
|
if type_ == 'O':
|
||||||
|
return self.get_opds_all_books(which)
|
||||||
|
elif type_ == 'N':
|
||||||
|
return self.get_opds_navcatalog(which)
|
||||||
|
raise cherrypy.HTTPError(404, 'Not found')
|
||||||
|
|
||||||
def get_author(x):
|
def opds(self, version=0):
|
||||||
pref, ___, suff = clean_author(x).rpartition(' ')
|
version = int(version)
|
||||||
return suff + (', '+pref) if pref else suff
|
if version not in BASE_HREFS:
|
||||||
|
raise cherrypy.HTTPError(404, 'Not found')
|
||||||
|
categories = self.categories_cache(
|
||||||
what, subtitle = sortby[2:], ''
|
self.get_opds_allowed_ids_for_version(version))
|
||||||
if sortby == 'byseries':
|
category_meta = self.db.get_tag_browser_categories()
|
||||||
data = self.db.all_series()
|
cats = [
|
||||||
data = [(x[0], x[1], len(self.get_matches('series', '='+x[1]))) for x in data]
|
(_('Newest'), _('Date'), 'Onewest'),
|
||||||
subtitle = 'Books by series'
|
(_('Title'), _('Title'), 'Otitle'),
|
||||||
elif sortby == 'byauthor':
|
]
|
||||||
data = self.db.all_authors()
|
for category in categories:
|
||||||
data = [(x[0], x[1], len(self.get_matches('authors', '='+x[1]))) for x in data]
|
if category == 'formats':
|
||||||
subtitle = 'Books by author'
|
continue
|
||||||
elif sortby == 'bytag':
|
meta = category_meta.get(category, None)
|
||||||
data = self.db.all_tags2()
|
if meta is None:
|
||||||
data = [(x[0], x[1], len(self.get_matches('tags', '='+x[1]))) for x in data]
|
continue
|
||||||
subtitle = 'Books by tag'
|
cats.append((meta['name'], meta['name'], 'N'+category))
|
||||||
fcmp = author_cmp if sortby == 'byauthor' else cmp
|
|
||||||
data = [x for x in data if x[2] > 0]
|
|
||||||
data.sort(cmp=lambda x, y: fcmp(x[1], y[1]))
|
|
||||||
next_offset = offset + self.max_stanza_items
|
|
||||||
rdata = data[offset:next_offset]
|
|
||||||
if next_offset >= len(data):
|
|
||||||
next_offset = -1
|
|
||||||
gt = get_author if sortby == 'byauthor' else lambda x: x
|
|
||||||
entries = [STANZA_SUBCATALOG_ENTRY.generate(title=gt(title), id=id,
|
|
||||||
what=what, updated=updated, count=c).render('xml').decode('utf-8') for id,
|
|
||||||
title, c in rdata]
|
|
||||||
next_link = ''
|
|
||||||
if next_offset > -1:
|
|
||||||
next_link = ('<link rel="next" title="Next" '
|
|
||||||
'type="application/atom+xml" href="/stanza/?sortby=%s&offset=%d"/>\n'
|
|
||||||
) % (sortby, next_offset)
|
|
||||||
return STANZA.generate(subtitle=subtitle, data=entries, FM=self.db.FIELD_MAP,
|
|
||||||
updated=updated, id='urn:calibre:main', next_link=next_link).render('xml')
|
|
||||||
|
|
||||||
def stanza_main(self, updated):
|
|
||||||
return STANZA_MAIN.generate(subtitle='', data=[], FM=self.db.FIELD_MAP,
|
|
||||||
updated=updated, id='urn:calibre:main').render('xml')
|
|
||||||
|
|
||||||
@expose
|
|
||||||
def stanza(self, search=None, sortby=None, authorid=None, tagid=None,
|
|
||||||
seriesid=None, offset=0):
|
|
||||||
'Feeds to read calibre books on a ipod with stanza.'
|
|
||||||
books = []
|
|
||||||
updated = self.db.last_modified()
|
updated = self.db.last_modified()
|
||||||
offset = int(offset)
|
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
||||||
cherrypy.response.headers['Content-Type'] = 'text/xml'
|
cherrypy.response.headers['Content-Type'] = 'text/xml'
|
||||||
# Main feed
|
|
||||||
if not sortby and not search and not authorid and not tagid and not seriesid:
|
|
||||||
return self.stanza_main(updated)
|
|
||||||
if sortby in ('byseries', 'byauthor', 'bytag'):
|
|
||||||
return self.stanza_sortby_subcategory(updated, sortby, offset)
|
|
||||||
|
|
||||||
# Get matching ids
|
feed = TopLevel(updated, cats, version)
|
||||||
if authorid:
|
|
||||||
authorid=int(authorid)
|
|
||||||
au = self.db.author_name(authorid)
|
|
||||||
ids = self.get_matches('authors', au)
|
|
||||||
elif tagid:
|
|
||||||
tagid=int(tagid)
|
|
||||||
ta = self.db.tag_name(tagid)
|
|
||||||
ids = self.get_matches('tags', ta)
|
|
||||||
elif seriesid:
|
|
||||||
seriesid=int(seriesid)
|
|
||||||
se = self.db.series_name(seriesid)
|
|
||||||
ids = self.get_matches('series', se)
|
|
||||||
else:
|
|
||||||
ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set()
|
|
||||||
record_list = list(iter(self.db))
|
|
||||||
|
|
||||||
FM = self.db.FIELD_MAP
|
|
||||||
# Sort the record list
|
|
||||||
if sortby == "bytitle" or authorid or tagid:
|
|
||||||
record_list.sort(lambda x, y:
|
|
||||||
cmp(title_sort(x[FM['title']]),
|
|
||||||
title_sort(y[FM['title']])))
|
|
||||||
elif seriesid:
|
|
||||||
record_list.sort(lambda x, y:
|
|
||||||
cmp(x[FM['series_index']],
|
|
||||||
y[FM['series_index']]))
|
|
||||||
else: # Sort by date
|
|
||||||
record_list = reversed(record_list)
|
|
||||||
|
|
||||||
|
|
||||||
fmts = FM['formats']
|
|
||||||
pat = re.compile(r'EPUB|PDB', re.IGNORECASE)
|
|
||||||
record_list = [x for x in record_list if x[FM['id']] in ids and
|
|
||||||
pat.search(x[fmts] if x[fmts] else '') is not None]
|
|
||||||
next_offset = offset + self.max_stanza_items
|
|
||||||
nrecord_list = record_list[offset:next_offset]
|
|
||||||
if next_offset >= len(record_list):
|
|
||||||
next_offset = -1
|
|
||||||
|
|
||||||
next_link = ''
|
|
||||||
if next_offset > -1:
|
|
||||||
q = ['offset=%d'%next_offset]
|
|
||||||
for x in ('search', 'sortby', 'authorid', 'tagid', 'seriesid'):
|
|
||||||
val = locals()[x]
|
|
||||||
if val is not None:
|
|
||||||
val = prepare_string_for_xml(unicode(val), True)
|
|
||||||
q.append('%s=%s'%(x, val))
|
|
||||||
next_link = ('<link rel="next" title="Next" '
|
|
||||||
'type="application/atom+xml" href="/stanza/?%s"/>\n'
|
|
||||||
) % '&'.join(q)
|
|
||||||
|
|
||||||
for record in nrecord_list:
|
|
||||||
r = record[FM['formats']]
|
|
||||||
r = r.upper() if r else ''
|
|
||||||
|
|
||||||
z = record[FM['authors']]
|
|
||||||
if not z:
|
|
||||||
z = _('Unknown')
|
|
||||||
authors = ' & '.join([i.replace('|', ',') for i in
|
|
||||||
z.split(',')])
|
|
||||||
|
|
||||||
# Setup extra description
|
|
||||||
extra = []
|
|
||||||
rating = record[FM['rating']]
|
|
||||||
if rating > 0:
|
|
||||||
rating = ''.join(repeat('★', rating))
|
|
||||||
extra.append('RATING: %s<br />'%rating)
|
|
||||||
tags = record[FM['tags']]
|
|
||||||
if tags:
|
|
||||||
extra.append('TAGS: %s<br />'%\
|
|
||||||
prepare_string_for_xml(', '.join(tags.split(','))))
|
|
||||||
series = record[FM['series']]
|
|
||||||
if series:
|
|
||||||
extra.append('SERIES: %s [%s]<br />'%\
|
|
||||||
(prepare_string_for_xml(series),
|
|
||||||
fmt_sidx(float(record[FM['series_index']]))))
|
|
||||||
|
|
||||||
fmt = 'epub' if 'EPUB' in r else 'pdb'
|
|
||||||
mimetype = guess_type('dummy.'+fmt)[0]
|
|
||||||
|
|
||||||
# Create the sub-catalog, which is either a list of
|
|
||||||
# authors/tags/series or a list of books
|
|
||||||
data = dict(
|
|
||||||
record=record,
|
|
||||||
updated=updated,
|
|
||||||
authors=authors,
|
|
||||||
tags=tags,
|
|
||||||
series=series,
|
|
||||||
FM=FM,
|
|
||||||
extra='\n'.join(extra),
|
|
||||||
mimetype=mimetype,
|
|
||||||
fmt=fmt,
|
|
||||||
urn=record[FM['uuid']],
|
|
||||||
timestamp=strftime('%Y-%m-%dT%H:%M:%S+00:00',
|
|
||||||
record[FM['timestamp']])
|
|
||||||
)
|
|
||||||
books.append(STANZA_ENTRY.generate(**data)\
|
|
||||||
.render('xml').decode('utf8'))
|
|
||||||
|
|
||||||
return STANZA.generate(subtitle='', data=books, FM=FM,
|
|
||||||
next_link=next_link, updated=updated, id='urn:calibre:main').render('xml')
|
|
||||||
|
|
||||||
|
return str(feed)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,20 +5,34 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
from calibre import strftime as _strftime
|
import time
|
||||||
|
|
||||||
|
import cherrypy
|
||||||
|
|
||||||
|
from calibre import strftime as _strftime, prints
|
||||||
from calibre.utils.date import now as nowf
|
from calibre.utils.date import now as nowf
|
||||||
|
|
||||||
|
|
||||||
def expose(func):
|
def expose(func):
|
||||||
import cherrypy
|
|
||||||
|
|
||||||
def do(self, *args, **kwargs):
|
def do(*args, **kwargs):
|
||||||
|
self = func.im_self
|
||||||
|
if self.opts.develop:
|
||||||
|
start = time.time()
|
||||||
|
|
||||||
dict.update(cherrypy.response.headers, {'Server':self.server_name})
|
dict.update(cherrypy.response.headers, {'Server':self.server_name})
|
||||||
if not self.embedded:
|
if not self.embedded:
|
||||||
self.db.check_if_modified()
|
self.db.check_if_modified()
|
||||||
return func(self, *args, **kwargs)
|
ans = func(*args, **kwargs)
|
||||||
|
if self.opts.develop:
|
||||||
|
prints('Function', func.__name__, 'called with args:', args, kwargs)
|
||||||
|
prints('\tTime:', func.__name__, time.time()-start)
|
||||||
|
return ans
|
||||||
|
|
||||||
|
do.__name__ = func.__name__
|
||||||
|
|
||||||
|
return do
|
||||||
|
|
||||||
return cherrypy.expose(do)
|
|
||||||
|
|
||||||
def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
|
def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
|
||||||
if not hasattr(dt, 'timetuple'):
|
if not hasattr(dt, 'timetuple'):
|
||||||
|
@ -5,52 +5,26 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import copy, __builtin__
|
import __builtin__
|
||||||
|
|
||||||
import cherrypy
|
import cherrypy
|
||||||
|
from lxml.builder import ElementMaker
|
||||||
|
from lxml import etree
|
||||||
|
|
||||||
from calibre.utils.genshi.template import MarkupTemplate
|
from calibre.library.server.utils import strftime
|
||||||
from calibre.library.server.utils import strftime, expose
|
|
||||||
from calibre.ebooks.metadata import fmt_sidx
|
from calibre.ebooks.metadata import fmt_sidx
|
||||||
|
from calibre.constants import preferred_encoding
|
||||||
|
from calibre import isbytestring
|
||||||
|
|
||||||
# Templates {{{
|
E = ElementMaker()
|
||||||
BOOK = '''\
|
|
||||||
<book xmlns:py="http://genshi.edgewall.org/"
|
|
||||||
id="${r[FM['id']]}"
|
|
||||||
title="${r[FM['title']]}"
|
|
||||||
sort="${r[FM['sort']]}"
|
|
||||||
author_sort="${r[FM['author_sort']]}"
|
|
||||||
authors="${authors}"
|
|
||||||
rating="${r[FM['rating']]}"
|
|
||||||
timestamp="${timestamp}"
|
|
||||||
pubdate="${pubdate}"
|
|
||||||
size="${r[FM['size']]}"
|
|
||||||
isbn="${r[FM['isbn']] if r[FM['isbn']] else ''}"
|
|
||||||
formats="${r[FM['formats']] if r[FM['formats']] else ''}"
|
|
||||||
series = "${r[FM['series']] if r[FM['series']] else ''}"
|
|
||||||
series_index="${r[FM['series_index']]}"
|
|
||||||
tags="${r[FM['tags']] if r[FM['tags']] else ''}"
|
|
||||||
publisher="${r[FM['publisher']] if r[FM['publisher']] else ''}">${r[FM['comments']] if r[FM['comments']] else ''}
|
|
||||||
</book>
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
LIBRARY = MarkupTemplate('''\
|
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<library xmlns:py="http://genshi.edgewall.org/" start="$start" num="${len(books)}" total="$total" updated="${updated.strftime('%Y-%m-%dT%H:%M:%S+00:00')}">
|
|
||||||
<py:for each="book in books">
|
|
||||||
${Markup(book)}
|
|
||||||
</py:for>
|
|
||||||
</library>
|
|
||||||
''')
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
class XMLServer(object):
|
class XMLServer(object):
|
||||||
'Serves XML and the Ajax based HTML frontend'
|
'Serves XML and the Ajax based HTML frontend'
|
||||||
|
|
||||||
@expose
|
def add_routes(self, connect):
|
||||||
def library(self, start='0', num='50', sort=None, search=None,
|
connect('xml', '/xml', self.xml)
|
||||||
|
|
||||||
|
def xml(self, start='0', num='50', sort=None, search=None,
|
||||||
_=None, order='ascending'):
|
_=None, order='ascending'):
|
||||||
'''
|
'''
|
||||||
Serves metadata from the calibre database as XML.
|
Serves metadata from the calibre database as XML.
|
||||||
@ -68,30 +42,63 @@ class XMLServer(object):
|
|||||||
num = int(num)
|
num = int(num)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num)
|
raise cherrypy.HTTPError(400, 'num: %s is not an integer'%num)
|
||||||
|
|
||||||
order = order.lower().strip() == 'ascending'
|
order = order.lower().strip() == 'ascending'
|
||||||
|
|
||||||
ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set()
|
ids = self.db.data.parse(search) if search and search.strip() else self.db.data.universal_set()
|
||||||
ids = sorted(ids)
|
|
||||||
FM = self.db.FIELD_MAP
|
FM = self.db.FIELD_MAP
|
||||||
items = copy.deepcopy([r for r in iter(self.db) if r[FM['id']] in ids])
|
|
||||||
|
items = [r for r in iter(self.db) if r[FM['id']] in ids]
|
||||||
if sort is not None:
|
if sort is not None:
|
||||||
self.sort(items, sort, order)
|
self.sort(items, sort, order)
|
||||||
|
|
||||||
book, books = MarkupTemplate(BOOK), []
|
|
||||||
|
books = []
|
||||||
|
|
||||||
|
def serialize(x):
|
||||||
|
if isinstance(x, unicode):
|
||||||
|
return x
|
||||||
|
if isbytestring(x):
|
||||||
|
return x.decode(preferred_encoding, 'replace')
|
||||||
|
return unicode(x)
|
||||||
|
|
||||||
for record in items[start:start+num]:
|
for record in items[start:start+num]:
|
||||||
|
kwargs = {}
|
||||||
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
|
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
|
||||||
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
|
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
|
||||||
record[FM['series_index']] = \
|
kwargs['authors'] = authors
|
||||||
|
|
||||||
|
kwargs['series_index'] = \
|
||||||
fmt_sidx(float(record[FM['series_index']]))
|
fmt_sidx(float(record[FM['series_index']]))
|
||||||
ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[FM['timestamp']]), \
|
|
||||||
strftime('%Y/%m/%d %H:%M:%S', record[FM['pubdate']])
|
for x in ('timestamp', 'pubdate'):
|
||||||
books.append(book.generate(r=record, authors=authors, timestamp=ts,
|
kwargs[x] = strftime('%Y/%m/%d %H:%M:%S', record[FM[x]])
|
||||||
pubdate=pd, FM=FM).render('xml').decode('utf-8'))
|
|
||||||
|
for x in ('id', 'title', 'sort', 'author_sort', 'rating', 'size'):
|
||||||
|
kwargs[x] = serialize(record[FM[x]])
|
||||||
|
|
||||||
|
for x in ('isbn', 'formats', 'series', 'tags', 'publisher',
|
||||||
|
'comments'):
|
||||||
|
y = record[FM[x]]
|
||||||
|
kwargs[x] = serialize(y) if y else ''
|
||||||
|
|
||||||
|
c = kwargs.pop('comments')
|
||||||
|
books.append(E.book(c, **kwargs))
|
||||||
|
|
||||||
updated = self.db.last_modified()
|
updated = self.db.last_modified()
|
||||||
|
kwargs = dict(
|
||||||
|
start = str(start),
|
||||||
|
updated=updated.strftime('%Y-%m-%dT%H:%M:%S+00:00'),
|
||||||
|
total=str(len(ids)),
|
||||||
|
num=str(len(books)))
|
||||||
|
ans = E.library(*books, **kwargs)
|
||||||
|
|
||||||
cherrypy.response.headers['Content-Type'] = 'text/xml'
|
cherrypy.response.headers['Content-Type'] = 'text/xml'
|
||||||
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
|
||||||
return LIBRARY.generate(books=books, start=start, updated=updated,
|
|
||||||
total=len(ids), FM=FM).render('xml')
|
return etree.tostring(ans, encoding='utf-8', pretty_print=True,
|
||||||
|
xml_declaration=True)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -132,7 +132,6 @@ class PostInstall:
|
|||||||
self.mime_resources = []
|
self.mime_resources = []
|
||||||
if islinux:
|
if islinux:
|
||||||
self.setup_completion()
|
self.setup_completion()
|
||||||
self.setup_udev_rules()
|
|
||||||
self.install_man_pages()
|
self.install_man_pages()
|
||||||
if islinux:
|
if islinux:
|
||||||
self.setup_desktop_integration()
|
self.setup_desktop_integration()
|
||||||
@ -286,40 +285,6 @@ class PostInstall:
|
|||||||
raise
|
raise
|
||||||
self.task_failed('Setting up completion failed')
|
self.task_failed('Setting up completion failed')
|
||||||
|
|
||||||
def setup_udev_rules(self):
|
|
||||||
self.info('Trying to setup udev rules...')
|
|
||||||
try:
|
|
||||||
group_file = os.path.join(self.opts.staging_etc, 'group')
|
|
||||||
if not os.path.exists(group_file):
|
|
||||||
group_file = '/etc/group'
|
|
||||||
groups = open(group_file, 'rb').read()
|
|
||||||
group = 'plugdev' if 'plugdev' in groups else 'usb'
|
|
||||||
old_udev = '/etc/udev/rules.d/95-calibre.rules'
|
|
||||||
if not os.path.exists(old_udev):
|
|
||||||
old_udev = os.path.join(self.opts.staging_etc, 'udev/rules.d/95-calibre.rules')
|
|
||||||
if os.path.exists(old_udev):
|
|
||||||
try:
|
|
||||||
os.remove(old_udev)
|
|
||||||
except:
|
|
||||||
self.warn('Old udev rules found, please delete manually:',
|
|
||||||
old_udev)
|
|
||||||
if self.opts.staging_root == '/usr':
|
|
||||||
base = '/lib'
|
|
||||||
else:
|
|
||||||
base = os.path.join(self.opts.staging_root, 'lib')
|
|
||||||
base = os.path.join(base, 'udev', 'rules.d')
|
|
||||||
if not os.path.exists(base):
|
|
||||||
os.makedirs(base)
|
|
||||||
with open(os.path.join(base, '95-calibre.rules'), 'wb') as udev:
|
|
||||||
self.manifest.append(udev.name)
|
|
||||||
udev.write('''# Sony Reader PRS-500\n'''
|
|
||||||
'''SUBSYSTEMS=="usb", SYSFS{idProduct}=="029b", SYSFS{idVendor}=="054c", MODE="660", GROUP="%s"\n'''%(group,)
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
if self.opts.fatal_errors:
|
|
||||||
raise
|
|
||||||
self.task_failed('Setting up udev rules failed')
|
|
||||||
|
|
||||||
def install_man_pages(self):
|
def install_man_pages(self):
|
||||||
try:
|
try:
|
||||||
from calibre.utils.help2man import create_man_page
|
from calibre.utils.help2man import create_man_page
|
||||||
|
@ -694,8 +694,10 @@ def _prefs():
|
|||||||
help=_('Add new formats to existing book records'))
|
help=_('Add new formats to existing book records'))
|
||||||
c.add_opt('installation_uuid', default=None, help='Installation UUID')
|
c.add_opt('installation_uuid', default=None, help='Installation UUID')
|
||||||
|
|
||||||
# this is here instead of the gui preferences because calibredb can execute searches
|
# these are here instead of the gui preferences because calibredb and
|
||||||
|
# calibre server can execute searches
|
||||||
c.add_opt('saved_searches', default={}, help=_('List of named saved searches'))
|
c.add_opt('saved_searches', default={}, help=_('List of named saved searches'))
|
||||||
|
c.add_opt('user_categories', default={}, help=_('User-created tag browser categories'))
|
||||||
|
|
||||||
c.add_opt('migrated', default=False, help='For Internal use. Don\'t modify.')
|
c.add_opt('migrated', default=False, help='For Internal use. Don\'t modify.')
|
||||||
return c
|
return c
|
||||||
|
114
src/calibre/utils/ordered_dict.py
Normal file
114
src/calibre/utils/ordered_dict.py
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
|
|
||||||
|
'''
|
||||||
|
A ordered dictionary. Use the builtin type on python >= 2.7
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from collections import OrderedDict
|
||||||
|
OrderedDict
|
||||||
|
except ImportError:
|
||||||
|
from UserDict import DictMixin
|
||||||
|
|
||||||
|
class OrderedDict(dict, DictMixin):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwds):
|
||||||
|
if len(args) > 1:
|
||||||
|
raise TypeError('expected at most 1 arguments, got %d' % len(args))
|
||||||
|
try:
|
||||||
|
self.__end
|
||||||
|
except AttributeError:
|
||||||
|
self.clear()
|
||||||
|
self.update(*args, **kwds)
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.__end = end = []
|
||||||
|
end += [None, end, end] # sentinel node for doubly linked list
|
||||||
|
self.__map = {} # key --> [key, prev, next]
|
||||||
|
dict.clear(self)
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
if key not in self:
|
||||||
|
end = self.__end
|
||||||
|
curr = end[1]
|
||||||
|
curr[2] = end[1] = self.__map[key] = [key, curr, end]
|
||||||
|
dict.__setitem__(self, key, value)
|
||||||
|
|
||||||
|
def __delitem__(self, key):
|
||||||
|
dict.__delitem__(self, key)
|
||||||
|
key, prev, next = self.__map.pop(key)
|
||||||
|
prev[2] = next
|
||||||
|
next[1] = prev
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
end = self.__end
|
||||||
|
curr = end[2]
|
||||||
|
while curr is not end:
|
||||||
|
yield curr[0]
|
||||||
|
curr = curr[2]
|
||||||
|
|
||||||
|
def __reversed__(self):
|
||||||
|
end = self.__end
|
||||||
|
curr = end[1]
|
||||||
|
while curr is not end:
|
||||||
|
yield curr[0]
|
||||||
|
curr = curr[1]
|
||||||
|
|
||||||
|
def popitem(self, last=True):
|
||||||
|
if not self:
|
||||||
|
raise KeyError('dictionary is empty')
|
||||||
|
if last:
|
||||||
|
key = reversed(self).next()
|
||||||
|
else:
|
||||||
|
key = iter(self).next()
|
||||||
|
value = self.pop(key)
|
||||||
|
return key, value
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
items = [[k, self[k]] for k in self]
|
||||||
|
tmp = self.__map, self.__end
|
||||||
|
del self.__map, self.__end
|
||||||
|
inst_dict = vars(self).copy()
|
||||||
|
self.__map, self.__end = tmp
|
||||||
|
if inst_dict:
|
||||||
|
return (self.__class__, (items,), inst_dict)
|
||||||
|
return self.__class__, (items,)
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
return list(self)
|
||||||
|
|
||||||
|
setdefault = DictMixin.setdefault
|
||||||
|
update = DictMixin.update
|
||||||
|
pop = DictMixin.pop
|
||||||
|
values = DictMixin.values
|
||||||
|
items = DictMixin.items
|
||||||
|
iterkeys = DictMixin.iterkeys
|
||||||
|
itervalues = DictMixin.itervalues
|
||||||
|
iteritems = DictMixin.iteritems
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if not self:
|
||||||
|
return '%s()' % (self.__class__.__name__,)
|
||||||
|
return '%s(%r)' % (self.__class__.__name__, self.items())
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return self.__class__(self)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fromkeys(cls, iterable, value=None):
|
||||||
|
d = cls()
|
||||||
|
for key in iterable:
|
||||||
|
d[key] = value
|
||||||
|
return d
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if isinstance(other, OrderedDict):
|
||||||
|
return len(self)==len(other) and self.items() == other.items()
|
||||||
|
return dict.__eq__(self, other)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self == other
|
@ -2,207 +2,193 @@
|
|||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
from calibre.utils.genshi.template import MarkupTemplate
|
|
||||||
from calibre import preferred_encoding, strftime
|
|
||||||
|
|
||||||
|
from lxml import html, etree
|
||||||
|
from lxml.html.builder import HTML, HEAD, TITLE, STYLE, DIV, BODY, \
|
||||||
|
STRONG, BR, H1, SPAN, A, HR, UL, LI, H2, IMG, P as PT
|
||||||
|
|
||||||
class Template(MarkupTemplate):
|
from calibre import preferred_encoding, strftime, isbytestring
|
||||||
|
|
||||||
|
def CLASS(*args, **kwargs): # class is a reserved word in Python
|
||||||
|
kwargs['class'] = ' '.join(args)
|
||||||
|
return kwargs
|
||||||
|
|
||||||
|
class Template(object):
|
||||||
|
|
||||||
|
IS_HTML = True
|
||||||
|
|
||||||
def generate(self, *args, **kwargs):
|
def generate(self, *args, **kwargs):
|
||||||
if not kwargs.has_key('style'):
|
if not kwargs.has_key('style'):
|
||||||
kwargs['style'] = ''
|
kwargs['style'] = ''
|
||||||
for key in kwargs.keys():
|
for key in kwargs.keys():
|
||||||
if isinstance(kwargs[key], basestring) and not isinstance(kwargs[key], unicode):
|
if isbytestring(kwargs[key]):
|
||||||
kwargs[key] = unicode(kwargs[key], 'utf-8', 'replace')
|
kwargs[key] = kwargs[key].decode('utf-8', 'replace')
|
||||||
for arg in args:
|
if kwargs[key] is None:
|
||||||
if isinstance(arg, basestring) and not isinstance(arg, unicode):
|
kwargs[key] = u''
|
||||||
arg = unicode(arg, 'utf-8', 'replace')
|
args = list(args)
|
||||||
|
for i in range(len(args)):
|
||||||
|
if isbytestring(args[i]):
|
||||||
|
args[i] = args[i].decode('utf-8', 'replace')
|
||||||
|
if args[i] is None:
|
||||||
|
args[i] = u''
|
||||||
|
|
||||||
return MarkupTemplate.generate(self, *args, **kwargs)
|
self._generate(*args, **kwargs)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def render(self, *args, **kwargs):
|
||||||
|
if self.IS_HTML:
|
||||||
|
return html.tostring(self.root, encoding='utf-8',
|
||||||
|
include_meta_content_type=True, pretty_print=True)
|
||||||
|
return etree.tostring(self.root, encoding='utf-8', xml_declaration=True,
|
||||||
|
pretty_print=True)
|
||||||
|
|
||||||
class NavBarTemplate(Template):
|
class NavBarTemplate(Template):
|
||||||
|
|
||||||
def __init__(self):
|
def _generate(self, bottom, feed, art, number_of_articles_in_feed,
|
||||||
Template.__init__(self, u'''\
|
|
||||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
|
|
||||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
|
||||||
<html xmlns="http://www.w3.org/1999/xhtml"
|
|
||||||
xml:lang="en"
|
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
|
||||||
xmlns:py="http://genshi.edgewall.org/"
|
|
||||||
|
|
||||||
>
|
|
||||||
<head>
|
|
||||||
<style py:if="extra_css" type="text/css">
|
|
||||||
${extra_css}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="calibre_navbar calibre_rescale_70" style="text-align:${'center' if center else 'left'};">
|
|
||||||
<hr py:if="bottom" />
|
|
||||||
<p py:if="bottom" style="text-align:left">
|
|
||||||
This article was downloaded by <b>${__appname__}</b> from <a href="${url}">${url}</a>
|
|
||||||
</p>
|
|
||||||
<br py:if="bottom" /><br py:if="bottom" />
|
|
||||||
<py:if test="art != num - 1 and not bottom">
|
|
||||||
| <a href="${prefix}../article_${str(art+1)}/index.html">Next</a>
|
|
||||||
</py:if>
|
|
||||||
<py:if test="art == num - 1 and not bottom">
|
|
||||||
| <a href="${prefix}../../feed_${str(feed+1)}/index.html">Next</a>
|
|
||||||
</py:if>
|
|
||||||
| <a href="${prefix}../index.html#article_${str(art)}">Section menu</a>
|
|
||||||
<py:if test="two_levels">
|
|
||||||
| <a href="${prefix}../../index.html#feed_${str(feed)}">Main menu</a>
|
|
||||||
</py:if>
|
|
||||||
<py:if test="art != 0 and not bottom">
|
|
||||||
| <a href="${prefix}../article_${str(art-1)}/index.html">Previous</a>
|
|
||||||
</py:if>
|
|
||||||
|
|
|
||||||
<hr py:if="not bottom" />
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
''')
|
|
||||||
|
|
||||||
def generate(self, bottom, feed, art, number_of_articles_in_feed,
|
|
||||||
two_levels, url, __appname__, prefix='', center=True,
|
two_levels, url, __appname__, prefix='', center=True,
|
||||||
extra_css=None):
|
extra_css=None, style=None):
|
||||||
|
head = HEAD(TITLE('navbar'))
|
||||||
|
if style:
|
||||||
|
head.append(STYLE(style, type='text/css'))
|
||||||
|
if extra_css:
|
||||||
|
head.append(STYLE(extra_css, type='text/css'))
|
||||||
|
|
||||||
if prefix and not prefix.endswith('/'):
|
if prefix and not prefix.endswith('/'):
|
||||||
prefix += '/'
|
prefix += '/'
|
||||||
return Template.generate(self, bottom=bottom, art=art, feed=feed,
|
align = 'center' if center else 'left'
|
||||||
num=number_of_articles_in_feed,
|
navbar = DIV(CLASS('calibre_navbar', 'calibre_rescale_70',
|
||||||
two_levels=two_levels, url=url,
|
style='text-align:'+align))
|
||||||
__appname__=__appname__, prefix=prefix,
|
if bottom:
|
||||||
center=center, extra_css=extra_css)
|
navbar.append(HR())
|
||||||
|
text = 'This article was downloaded by '
|
||||||
|
p = PT(text, STRONG(__appname__), A(url, href=url), style='text-align:left')
|
||||||
|
p[0].tail = ' from '
|
||||||
|
navbar.append(BR())
|
||||||
|
navbar.append(BR())
|
||||||
|
else:
|
||||||
|
next = 'feed_%d'%(feed+1) if art == number_of_articles_in_feed - 1 \
|
||||||
|
else 'article_%d'%(art+1)
|
||||||
|
up = '../..' if art == number_of_articles_in_feed - 1 else '..'
|
||||||
|
href = '%s%s/%s/index.html'%(prefix, up, next)
|
||||||
|
navbar.text = '| '
|
||||||
|
navbar.append(A('Next', href=href))
|
||||||
|
href = '%s../index.html#article_%d'%(prefix, art)
|
||||||
|
navbar.iterchildren(reversed=True).next().tail = ' | '
|
||||||
|
navbar.append(A('Section Menu', href=href))
|
||||||
|
href = '%s../../index.html#feed_%d'%(prefix, feed)
|
||||||
|
navbar.iterchildren(reversed=True).next().tail = ' | '
|
||||||
|
navbar.append(A('Main Menu', href=href))
|
||||||
|
if art > 0 and not bottom:
|
||||||
|
href = '%s../article_%d/index.html'%(prefix, art-1)
|
||||||
|
navbar.iterchildren(reversed=True).next().tail = ' | '
|
||||||
|
navbar.append(A('Previous', href=href))
|
||||||
|
navbar.iterchildren(reversed=True).next().tail = ' | '
|
||||||
|
if not bottom:
|
||||||
|
navbar.append(HR())
|
||||||
|
|
||||||
|
self.root = HTML(head, BODY(navbar))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class IndexTemplate(Template):
|
class IndexTemplate(Template):
|
||||||
|
|
||||||
def __init__(self):
|
def _generate(self, title, datefmt, feeds, extra_css=None, style=None):
|
||||||
Template.__init__(self, u'''\
|
|
||||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
|
|
||||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
|
||||||
<html xmlns="http://www.w3.org/1999/xhtml"
|
|
||||||
xml:lang="en"
|
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
|
||||||
xmlns:py="http://genshi.edgewall.org/"
|
|
||||||
|
|
||||||
>
|
|
||||||
<head>
|
|
||||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
|
||||||
<title>${title}</title>
|
|
||||||
<style py:if="style" type="text/css">
|
|
||||||
${style}
|
|
||||||
</style>
|
|
||||||
<style py:if="extra_css" type="text/css">
|
|
||||||
${extra_css}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="calibre_rescale_100">
|
|
||||||
<h1 class="calibre_recipe_title calibre_rescale_180">${title}</h1>
|
|
||||||
<p style="text-align:right">${date}</p>
|
|
||||||
<ul class="calibre_feed_list">
|
|
||||||
<py:for each="i, feed in enumerate(feeds)">
|
|
||||||
<li py:if="feed" id="feed_${str(i)}">
|
|
||||||
<a class="feed calibre_rescale_120" href="${'feed_%d/index.html'%i}">${feed.title}</a>
|
|
||||||
</li>
|
|
||||||
</py:for>
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
''')
|
|
||||||
|
|
||||||
def generate(self, title, datefmt, feeds, extra_css=None):
|
|
||||||
if isinstance(datefmt, unicode):
|
if isinstance(datefmt, unicode):
|
||||||
datefmt = datefmt.encode(preferred_encoding)
|
datefmt = datefmt.encode(preferred_encoding)
|
||||||
date = strftime(datefmt)
|
date = strftime(datefmt)
|
||||||
return Template.generate(self, title=title, date=date, feeds=feeds,
|
head = HEAD(TITLE(title))
|
||||||
extra_css=extra_css)
|
if style:
|
||||||
|
head.append(STYLE(style, type='text/css'))
|
||||||
|
if extra_css:
|
||||||
|
head.append(STYLE(extra_css, type='text/css'))
|
||||||
|
ul = UL(CLASS('calibre_feed_list'))
|
||||||
|
for i, feed in enumerate(feeds):
|
||||||
|
if feed:
|
||||||
|
li = LI(A(feed.title, CLASS('feed', 'calibre_rescale_120',
|
||||||
|
href='feed_%d/index.html'%i)), id='feed_%d'%i)
|
||||||
|
ul.append(li)
|
||||||
|
div = DIV(
|
||||||
|
H1(title, CLASS('calibre_recipe_title', 'calibre_rescale_180')),
|
||||||
|
PT(date, style='text-align:right'),
|
||||||
|
ul,
|
||||||
|
CLASS('calibre_rescale_100'))
|
||||||
|
self.root = HTML(head, BODY(div))
|
||||||
|
|
||||||
class FeedTemplate(Template):
|
class FeedTemplate(Template):
|
||||||
|
|
||||||
def __init__(self):
|
def _generate(self, feed, cutoff, extra_css=None, style=None):
|
||||||
Template.__init__(self, u'''\
|
head = HEAD(TITLE(feed.title))
|
||||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
|
if style:
|
||||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
head.append(STYLE(style, type='text/css'))
|
||||||
<html xmlns="http://www.w3.org/1999/xhtml"
|
if extra_css:
|
||||||
xml:lang="en"
|
head.append(STYLE(extra_css, type='text/css'))
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
body = BODY(style='page-break-before:always')
|
||||||
xmlns:py="http://genshi.edgewall.org/"
|
div = DIV(
|
||||||
|
H2(feed.title,
|
||||||
|
CLASS('calibre_feed_title', 'calibre_rescale_160')),
|
||||||
|
CLASS('calibre_rescale_100')
|
||||||
|
)
|
||||||
|
body.append(div)
|
||||||
|
if getattr(feed, 'image', None):
|
||||||
|
div.append(DIV(IMG(
|
||||||
|
alt = feed.image_alt if feed.image_alt else '',
|
||||||
|
src = feed.image_url
|
||||||
|
),
|
||||||
|
CLASS('calibre_feed_image')))
|
||||||
|
if getattr(feed, 'description', None):
|
||||||
|
d = DIV(feed.description, CLASS('calibre_feed_description',
|
||||||
|
'calibre_rescale_80'))
|
||||||
|
d.append(BR())
|
||||||
|
div.append(d)
|
||||||
|
ul = UL(CLASS('calibre_article_list'))
|
||||||
|
for i, article in enumerate(feed.articles):
|
||||||
|
if not getattr(article, 'downloaded', False):
|
||||||
|
continue
|
||||||
|
li = LI(
|
||||||
|
A(article.title, CLASS('article calibre_rescale_120',
|
||||||
|
href=article.url)),
|
||||||
|
SPAN(article.formatted_date, CLASS('article_date')),
|
||||||
|
CLASS('calibre_rescale_100', id='article_%d'%i,
|
||||||
|
style='padding-bottom:0.5em')
|
||||||
|
)
|
||||||
|
if article.summary:
|
||||||
|
li.append(DIV(cutoff(article.text_summary),
|
||||||
|
CLASS('article_description', 'calibre_rescale_70')))
|
||||||
|
ul.append(li)
|
||||||
|
div.append(ul)
|
||||||
|
navbar = DIV('| ', CLASS('calibre_navbar', 'calibre_rescale_70'))
|
||||||
|
link = A('Up one level', href="../index.html")
|
||||||
|
link.tail = ' |'
|
||||||
|
navbar.append(link)
|
||||||
|
div.append(navbar)
|
||||||
|
|
||||||
>
|
self.root = HTML(head, body)
|
||||||
<head>
|
|
||||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
|
||||||
<title>${feed.title}</title>
|
|
||||||
<style py:if="style" type="text/css">
|
|
||||||
${style}
|
|
||||||
</style>
|
|
||||||
<style py:if="extra_css" type="text/css">
|
|
||||||
${extra_css}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body style="page-break-before:always">
|
|
||||||
<div class="calibre_rescale_100">
|
|
||||||
<h2 class="calibre_feed_title calibre_rescale_160">${feed.title}</h2>
|
|
||||||
<py:if test="getattr(feed, 'image', None)">
|
|
||||||
<div class="calibre_feed_image">
|
|
||||||
<img alt="${feed.image_alt}" src="${feed.image_url}" />
|
|
||||||
</div>
|
|
||||||
</py:if>
|
|
||||||
<div class="calibre_feed_description calibre_rescale_80" py:if="getattr(feed, 'description', None)">
|
|
||||||
${feed.description}<br />
|
|
||||||
</div>
|
|
||||||
<ul class="calibre_article_list">
|
|
||||||
<py:for each="i, article in enumerate(feed.articles)">
|
|
||||||
<li id="${'article_%d'%i}" py:if="getattr(article, 'downloaded',
|
|
||||||
False)" style="padding-bottom:0.5em" class="calibre_rescale_100">
|
|
||||||
<a class="article calibre_rescale_120" href="${article.url}">${article.title}</a>
|
|
||||||
<span class="article_date">${article.formatted_date}</span>
|
|
||||||
<div class="article_description calibre_rescale_70" py:if="article.summary">
|
|
||||||
${Markup(cutoff(article.text_summary))}
|
|
||||||
</div>
|
|
||||||
</li>
|
|
||||||
</py:for>
|
|
||||||
</ul>
|
|
||||||
<div class="calibre_navbar calibre_rescale_70">
|
|
||||||
| <a href="../index.html">Up one level</a> |
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
''')
|
|
||||||
|
|
||||||
def generate(self, feed, cutoff, extra_css=None):
|
|
||||||
return Template.generate(self, feed=feed, cutoff=cutoff,
|
|
||||||
extra_css=extra_css)
|
|
||||||
|
|
||||||
class EmbeddedContent(Template):
|
class EmbeddedContent(Template):
|
||||||
|
|
||||||
def __init__(self):
|
def _generate(self, article, style=None, extra_css=None):
|
||||||
Template.__init__(self, u'''\
|
content = article.content if article.content else ''
|
||||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
|
summary = article.summary if article.summary else ''
|
||||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
text = content if len(content) > len(summary) else summary
|
||||||
<html xmlns="http://www.w3.org/1999/xhtml"
|
head = HEAD(TITLE(article.title))
|
||||||
xml:lang="en"
|
if style:
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
head.append(STYLE(style, type='text/css'))
|
||||||
xmlns:py="http://genshi.edgewall.org/"
|
if extra_css:
|
||||||
|
head.append(STYLE(extra_css, type='text/css'))
|
||||||
|
|
||||||
>
|
if isbytestring(text):
|
||||||
<head>
|
text = text.decode('utf-8', 'replace')
|
||||||
<title>${article.title}</title>
|
elements = html.fragments_fromstring(text)
|
||||||
</head>
|
self.root = HTML(head,
|
||||||
|
BODY(H2(article.title), DIV()))
|
||||||
|
div = self.root.find('body').find('div')
|
||||||
|
if elements and isinstance(elements[0], unicode):
|
||||||
|
div.text = elements[0]
|
||||||
|
elements = list(elements)[1:]
|
||||||
|
for elem in elements:
|
||||||
|
elem.getparent().remove(elem)
|
||||||
|
div.append(elem)
|
||||||
|
|
||||||
<body>
|
|
||||||
<h2>${article.title}</h2>
|
|
||||||
<div>
|
|
||||||
${Markup(article.content if len(article.content if article.content else '') > len(article.summary if article.summary else '') else article.summary)}
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
''')
|
|
||||||
|
|
||||||
def generate(self, article):
|
|
||||||
return Template.generate(self, article=article)
|
|
||||||
|
142
src/routes/__init__.py
Normal file
142
src/routes/__init__.py
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
"""Provides common classes and functions most users will want access to."""
|
||||||
|
import threading, sys
|
||||||
|
|
||||||
|
class _RequestConfig(object):
|
||||||
|
"""
|
||||||
|
RequestConfig thread-local singleton
|
||||||
|
|
||||||
|
The Routes RequestConfig object is a thread-local singleton that should
|
||||||
|
be initialized by the web framework that is utilizing Routes.
|
||||||
|
"""
|
||||||
|
__shared_state = threading.local()
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self.__shared_state, name)
|
||||||
|
|
||||||
|
def __setattr__(self, name, value):
|
||||||
|
"""
|
||||||
|
If the name is environ, load the wsgi envion with load_wsgi_environ
|
||||||
|
and set the environ
|
||||||
|
"""
|
||||||
|
if name == 'environ':
|
||||||
|
self.load_wsgi_environ(value)
|
||||||
|
return self.__shared_state.__setattr__(name, value)
|
||||||
|
return self.__shared_state.__setattr__(name, value)
|
||||||
|
|
||||||
|
def __delattr__(self, name):
|
||||||
|
delattr(self.__shared_state, name)
|
||||||
|
|
||||||
|
def load_wsgi_environ(self, environ):
|
||||||
|
"""
|
||||||
|
Load the protocol/server info from the environ and store it.
|
||||||
|
Also, match the incoming URL if there's already a mapper, and
|
||||||
|
store the resulting match dict in mapper_dict.
|
||||||
|
"""
|
||||||
|
if 'HTTPS' in environ or environ.get('wsgi.url_scheme') == 'https' \
|
||||||
|
or environ.get('HTTP_X_FORWARDED_PROTO') == 'https':
|
||||||
|
self.__shared_state.protocol = 'https'
|
||||||
|
else:
|
||||||
|
self.__shared_state.protocol = 'http'
|
||||||
|
try:
|
||||||
|
self.mapper.environ = environ
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Wrap in try/except as common case is that there is a mapper
|
||||||
|
# attached to self
|
||||||
|
try:
|
||||||
|
if 'PATH_INFO' in environ:
|
||||||
|
mapper = self.mapper
|
||||||
|
path = environ['PATH_INFO']
|
||||||
|
result = mapper.routematch(path)
|
||||||
|
if result is not None:
|
||||||
|
self.__shared_state.mapper_dict = result[0]
|
||||||
|
self.__shared_state.route = result[1]
|
||||||
|
else:
|
||||||
|
self.__shared_state.mapper_dict = None
|
||||||
|
self.__shared_state.route = None
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if 'HTTP_X_FORWARDED_HOST' in environ:
|
||||||
|
self.__shared_state.host = environ['HTTP_X_FORWARDED_HOST']
|
||||||
|
elif 'HTTP_HOST' in environ:
|
||||||
|
self.__shared_state.host = environ['HTTP_HOST']
|
||||||
|
else:
|
||||||
|
self.__shared_state.host = environ['SERVER_NAME']
|
||||||
|
if environ['wsgi.url_scheme'] == 'https':
|
||||||
|
if environ['SERVER_PORT'] != '443':
|
||||||
|
self.__shared_state.host += ':' + environ['SERVER_PORT']
|
||||||
|
else:
|
||||||
|
if environ['SERVER_PORT'] != '80':
|
||||||
|
self.__shared_state.host += ':' + environ['SERVER_PORT']
|
||||||
|
|
||||||
|
def request_config(original=False):
|
||||||
|
"""
|
||||||
|
Returns the Routes RequestConfig object.
|
||||||
|
|
||||||
|
To get the Routes RequestConfig:
|
||||||
|
|
||||||
|
>>> from routes import *
|
||||||
|
>>> config = request_config()
|
||||||
|
|
||||||
|
The following attributes must be set on the config object every request:
|
||||||
|
|
||||||
|
mapper
|
||||||
|
mapper should be a Mapper instance thats ready for use
|
||||||
|
host
|
||||||
|
host is the hostname of the webapp
|
||||||
|
protocol
|
||||||
|
protocol is the protocol of the current request
|
||||||
|
mapper_dict
|
||||||
|
mapper_dict should be the dict returned by mapper.match()
|
||||||
|
redirect
|
||||||
|
redirect should be a function that issues a redirect,
|
||||||
|
and takes a url as the sole argument
|
||||||
|
prefix (optional)
|
||||||
|
Set if the application is moved under a URL prefix. Prefix
|
||||||
|
will be stripped before matching, and prepended on generation
|
||||||
|
environ (optional)
|
||||||
|
Set to the WSGI environ for automatic prefix support if the
|
||||||
|
webapp is underneath a 'SCRIPT_NAME'
|
||||||
|
|
||||||
|
Setting the environ will use information in environ to try and
|
||||||
|
populate the host/protocol/mapper_dict options if you've already
|
||||||
|
set a mapper.
|
||||||
|
|
||||||
|
**Using your own requst local**
|
||||||
|
|
||||||
|
If you have your own request local object that you'd like to use instead
|
||||||
|
of the default thread local provided by Routes, you can configure Routes
|
||||||
|
to use it::
|
||||||
|
|
||||||
|
from routes import request_config()
|
||||||
|
config = request_config()
|
||||||
|
if hasattr(config, 'using_request_local'):
|
||||||
|
config.request_local = YourLocalCallable
|
||||||
|
config = request_config()
|
||||||
|
|
||||||
|
Once you have configured request_config, its advisable you retrieve it
|
||||||
|
again to get the object you wanted. The variable you assign to
|
||||||
|
request_local is assumed to be a callable that will get the local config
|
||||||
|
object you wish.
|
||||||
|
|
||||||
|
This example tests for the presence of the 'using_request_local' attribute
|
||||||
|
which will be present if you haven't assigned it yet. This way you can
|
||||||
|
avoid repeat assignments of the request specific callable.
|
||||||
|
|
||||||
|
Should you want the original object, perhaps to change the callable its
|
||||||
|
using or stop this behavior, call request_config(original=True).
|
||||||
|
"""
|
||||||
|
obj = _RequestConfig()
|
||||||
|
try:
|
||||||
|
if obj.request_local and original is False:
|
||||||
|
return getattr(obj, 'request_local')()
|
||||||
|
except AttributeError:
|
||||||
|
obj.request_local = False
|
||||||
|
obj.using_request_local = False
|
||||||
|
return _RequestConfig()
|
||||||
|
|
||||||
|
from routes.mapper import Mapper
|
||||||
|
from routes.util import redirect_to, url_for, URLGenerator
|
||||||
|
__all__=['Mapper', 'url_for', 'URLGenerator', 'redirect_to', 'request_config']
|
4
src/routes/base.py
Normal file
4
src/routes/base.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
"""Route and Mapper core classes"""
|
||||||
|
from routes import request_config
|
||||||
|
from routes.mapper import Mapper
|
||||||
|
from routes.route import Route
|
70
src/routes/lru.py
Normal file
70
src/routes/lru.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
"""LRU caching class and decorator"""
|
||||||
|
import threading
|
||||||
|
|
||||||
|
_marker = object()
|
||||||
|
|
||||||
|
class LRUCache(object):
|
||||||
|
def __init__(self, size):
|
||||||
|
""" Implements a psueudo-LRU algorithm (CLOCK) """
|
||||||
|
if size < 1:
|
||||||
|
raise ValueError('size must be >1')
|
||||||
|
self.clock = []
|
||||||
|
for i in xrange(0, size):
|
||||||
|
self.clock.append({'key':_marker, 'ref':False})
|
||||||
|
self.size = size
|
||||||
|
self.maxpos = size - 1
|
||||||
|
self.hand = 0
|
||||||
|
self.data = {}
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
|
||||||
|
def __contains__(self, key):
|
||||||
|
return key in self.data
|
||||||
|
|
||||||
|
def __getitem__(self, key, default=None):
|
||||||
|
try:
|
||||||
|
datum = self.data[key]
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
pos, val = datum
|
||||||
|
self.clock[pos]['ref'] = True
|
||||||
|
hand = pos + 1
|
||||||
|
if hand > self.maxpos:
|
||||||
|
hand = 0
|
||||||
|
self.hand = hand
|
||||||
|
return val
|
||||||
|
|
||||||
|
def __setitem__(self, key, val, _marker=_marker):
|
||||||
|
hand = self.hand
|
||||||
|
maxpos = self.maxpos
|
||||||
|
clock = self.clock
|
||||||
|
data = self.data
|
||||||
|
lock = self.lock
|
||||||
|
|
||||||
|
end = hand - 1
|
||||||
|
if end < 0:
|
||||||
|
end = maxpos
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
current = clock[hand]
|
||||||
|
ref = current['ref']
|
||||||
|
if ref is True:
|
||||||
|
current['ref'] = False
|
||||||
|
hand = hand + 1
|
||||||
|
if hand > maxpos:
|
||||||
|
hand = 0
|
||||||
|
elif ref is False or hand == end:
|
||||||
|
lock.acquire()
|
||||||
|
try:
|
||||||
|
oldkey = current['key']
|
||||||
|
if oldkey in data:
|
||||||
|
del data[oldkey]
|
||||||
|
current['key'] = key
|
||||||
|
current['ref'] = True
|
||||||
|
data[key] = (hand, val)
|
||||||
|
hand += 1
|
||||||
|
if hand > maxpos:
|
||||||
|
hand = 0
|
||||||
|
self.hand = hand
|
||||||
|
finally:
|
||||||
|
lock.release()
|
||||||
|
break
|
1161
src/routes/mapper.py
Normal file
1161
src/routes/mapper.py
Normal file
File diff suppressed because it is too large
Load Diff
146
src/routes/middleware.py
Normal file
146
src/routes/middleware.py
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
"""Routes WSGI Middleware"""
|
||||||
|
import re
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from webob import Request
|
||||||
|
|
||||||
|
from routes.base import request_config
|
||||||
|
from routes.util import URLGenerator, url_for
|
||||||
|
|
||||||
|
log = logging.getLogger('routes.middleware')
|
||||||
|
|
||||||
|
class RoutesMiddleware(object):
|
||||||
|
"""Routing middleware that handles resolving the PATH_INFO in
|
||||||
|
addition to optionally recognizing method overriding."""
|
||||||
|
def __init__(self, wsgi_app, mapper, use_method_override=True,
|
||||||
|
path_info=True, singleton=True):
|
||||||
|
"""Create a Route middleware object
|
||||||
|
|
||||||
|
Using the use_method_override keyword will require Paste to be
|
||||||
|
installed, and your application should use Paste's WSGIRequest
|
||||||
|
object as it will properly handle POST issues with wsgi.input
|
||||||
|
should Routes check it.
|
||||||
|
|
||||||
|
If path_info is True, then should a route var contain
|
||||||
|
path_info, the SCRIPT_NAME and PATH_INFO will be altered
|
||||||
|
accordingly. This should be used with routes like:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
map.connect('blog/*path_info', controller='blog', path_info='')
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.app = wsgi_app
|
||||||
|
self.mapper = mapper
|
||||||
|
self.singleton = singleton
|
||||||
|
self.use_method_override = use_method_override
|
||||||
|
self.path_info = path_info
|
||||||
|
log_debug = self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
|
||||||
|
if self.log_debug:
|
||||||
|
log.debug("Initialized with method overriding = %s, and path "
|
||||||
|
"info altering = %s", use_method_override, path_info)
|
||||||
|
|
||||||
|
def __call__(self, environ, start_response):
|
||||||
|
"""Resolves the URL in PATH_INFO, and uses wsgi.routing_args
|
||||||
|
to pass on URL resolver results."""
|
||||||
|
old_method = None
|
||||||
|
if self.use_method_override:
|
||||||
|
req = None
|
||||||
|
|
||||||
|
# In some odd cases, there's no query string
|
||||||
|
try:
|
||||||
|
qs = environ['QUERY_STRING']
|
||||||
|
except KeyError:
|
||||||
|
qs = ''
|
||||||
|
if '_method' in qs:
|
||||||
|
req = Request(environ)
|
||||||
|
req.errors = 'ignore'
|
||||||
|
if '_method' in req.GET:
|
||||||
|
old_method = environ['REQUEST_METHOD']
|
||||||
|
environ['REQUEST_METHOD'] = req.GET['_method'].upper()
|
||||||
|
if self.log_debug:
|
||||||
|
log.debug("_method found in QUERY_STRING, altering request"
|
||||||
|
" method to %s", environ['REQUEST_METHOD'])
|
||||||
|
elif environ['REQUEST_METHOD'] == 'POST' and is_form_post(environ):
|
||||||
|
if req is None:
|
||||||
|
req = Request(environ)
|
||||||
|
req.errors = 'ignore'
|
||||||
|
if '_method' in req.POST:
|
||||||
|
old_method = environ['REQUEST_METHOD']
|
||||||
|
environ['REQUEST_METHOD'] = req.POST['_method'].upper()
|
||||||
|
if self.log_debug:
|
||||||
|
log.debug("_method found in POST data, altering request "
|
||||||
|
"method to %s", environ['REQUEST_METHOD'])
|
||||||
|
|
||||||
|
# Run the actual route matching
|
||||||
|
# -- Assignment of environ to config triggers route matching
|
||||||
|
if self.singleton:
|
||||||
|
config = request_config()
|
||||||
|
config.mapper = self.mapper
|
||||||
|
config.environ = environ
|
||||||
|
match = config.mapper_dict
|
||||||
|
route = config.route
|
||||||
|
else:
|
||||||
|
results = self.mapper.routematch(environ=environ)
|
||||||
|
if results:
|
||||||
|
match, route = results[0], results[1]
|
||||||
|
else:
|
||||||
|
match = route = None
|
||||||
|
|
||||||
|
if old_method:
|
||||||
|
environ['REQUEST_METHOD'] = old_method
|
||||||
|
|
||||||
|
if not match:
|
||||||
|
match = {}
|
||||||
|
if self.log_debug:
|
||||||
|
urlinfo = "%s %s" % (environ['REQUEST_METHOD'], environ['PATH_INFO'])
|
||||||
|
log.debug("No route matched for %s", urlinfo)
|
||||||
|
elif self.log_debug:
|
||||||
|
urlinfo = "%s %s" % (environ['REQUEST_METHOD'], environ['PATH_INFO'])
|
||||||
|
log.debug("Matched %s", urlinfo)
|
||||||
|
log.debug("Route path: '%s', defaults: %s", route.routepath,
|
||||||
|
route.defaults)
|
||||||
|
log.debug("Match dict: %s", match)
|
||||||
|
|
||||||
|
url = URLGenerator(self.mapper, environ)
|
||||||
|
environ['wsgiorg.routing_args'] = ((url), match)
|
||||||
|
environ['routes.route'] = route
|
||||||
|
environ['routes.url'] = url
|
||||||
|
|
||||||
|
if route and route.redirect:
|
||||||
|
route_name = '_redirect_%s' % id(route)
|
||||||
|
location = url(route_name, **match)
|
||||||
|
log.debug("Using redirect route, redirect to '%s' with status"
|
||||||
|
"code: %s", location, route.redirect_status)
|
||||||
|
start_response(route.redirect_status,
|
||||||
|
[('Content-Type', 'text/plain; charset=utf8'),
|
||||||
|
('Location', location)])
|
||||||
|
return []
|
||||||
|
|
||||||
|
# If the route included a path_info attribute and it should be used to
|
||||||
|
# alter the environ, we'll pull it out
|
||||||
|
if self.path_info and 'path_info' in match:
|
||||||
|
oldpath = environ['PATH_INFO']
|
||||||
|
newpath = match.get('path_info') or ''
|
||||||
|
environ['PATH_INFO'] = newpath
|
||||||
|
if not environ['PATH_INFO'].startswith('/'):
|
||||||
|
environ['PATH_INFO'] = '/' + environ['PATH_INFO']
|
||||||
|
environ['SCRIPT_NAME'] += re.sub(r'^(.*?)/' + re.escape(newpath) + '$',
|
||||||
|
r'\1', oldpath)
|
||||||
|
|
||||||
|
response = self.app(environ, start_response)
|
||||||
|
|
||||||
|
# Wrapped in try as in rare cases the attribute will be gone already
|
||||||
|
try:
|
||||||
|
del self.mapper.environ
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
return response
|
||||||
|
|
||||||
|
def is_form_post(environ):
|
||||||
|
"""Determine whether the request is a POSTed html form"""
|
||||||
|
content_type = environ.get('CONTENT_TYPE', '').lower()
|
||||||
|
if ';' in content_type:
|
||||||
|
content_type = content_type.split(';', 1)[0]
|
||||||
|
return content_type in ('application/x-www-form-urlencoded',
|
||||||
|
'multipart/form-data')
|
742
src/routes/route.py
Normal file
742
src/routes/route.py
Normal file
@ -0,0 +1,742 @@
|
|||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
if sys.version < '2.4':
|
||||||
|
from sets import ImmutableSet as frozenset
|
||||||
|
|
||||||
|
from routes.util import _url_quote as url_quote, _str_encode
|
||||||
|
|
||||||
|
|
||||||
|
class Route(object):
|
||||||
|
"""The Route object holds a route recognition and generation
|
||||||
|
routine.
|
||||||
|
|
||||||
|
See Route.__init__ docs for usage.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# reserved keys that don't count
|
||||||
|
reserved_keys = ['requirements']
|
||||||
|
|
||||||
|
# special chars to indicate a natural split in the URL
|
||||||
|
done_chars = ('/', ',', ';', '.', '#')
|
||||||
|
|
||||||
|
def __init__(self, name, routepath, **kargs):
|
||||||
|
"""Initialize a route, with a given routepath for
|
||||||
|
matching/generation
|
||||||
|
|
||||||
|
The set of keyword args will be used as defaults.
|
||||||
|
|
||||||
|
Usage::
|
||||||
|
|
||||||
|
>>> from routes.base import Route
|
||||||
|
>>> newroute = Route(None, ':controller/:action/:id')
|
||||||
|
>>> sorted(newroute.defaults.items())
|
||||||
|
[('action', 'index'), ('id', None)]
|
||||||
|
>>> newroute = Route(None, 'date/:year/:month/:day',
|
||||||
|
... controller="blog", action="view")
|
||||||
|
>>> newroute = Route(None, 'archives/:page', controller="blog",
|
||||||
|
... action="by_page", requirements = { 'page':'\d{1,2}' })
|
||||||
|
>>> newroute.reqs
|
||||||
|
{'page': '\\\d{1,2}'}
|
||||||
|
|
||||||
|
.. Note::
|
||||||
|
Route is generally not called directly, a Mapper instance
|
||||||
|
connect method should be used to add routes.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.routepath = routepath
|
||||||
|
self.sub_domains = False
|
||||||
|
self.prior = None
|
||||||
|
self.redirect = False
|
||||||
|
self.name = name
|
||||||
|
self._kargs = kargs
|
||||||
|
self.minimization = kargs.pop('_minimize', False)
|
||||||
|
self.encoding = kargs.pop('_encoding', 'utf-8')
|
||||||
|
self.reqs = kargs.get('requirements', {})
|
||||||
|
self.decode_errors = 'replace'
|
||||||
|
|
||||||
|
# Don't bother forming stuff we don't need if its a static route
|
||||||
|
self.static = kargs.pop('_static', False)
|
||||||
|
self.filter = kargs.pop('_filter', None)
|
||||||
|
self.absolute = kargs.pop('_absolute', False)
|
||||||
|
|
||||||
|
# Pull out the member/collection name if present, this applies only to
|
||||||
|
# map.resource
|
||||||
|
self.member_name = kargs.pop('_member_name', None)
|
||||||
|
self.collection_name = kargs.pop('_collection_name', None)
|
||||||
|
self.parent_resource = kargs.pop('_parent_resource', None)
|
||||||
|
|
||||||
|
# Pull out route conditions
|
||||||
|
self.conditions = kargs.pop('conditions', None)
|
||||||
|
|
||||||
|
# Determine if explicit behavior should be used
|
||||||
|
self.explicit = kargs.pop('_explicit', False)
|
||||||
|
|
||||||
|
# Since static need to be generated exactly, treat them as
|
||||||
|
# non-minimized
|
||||||
|
if self.static:
|
||||||
|
self.external = '://' in self.routepath
|
||||||
|
self.minimization = False
|
||||||
|
|
||||||
|
# Strip preceding '/' if present, and not minimizing
|
||||||
|
if routepath.startswith('/') and self.minimization:
|
||||||
|
self.routepath = routepath[1:]
|
||||||
|
self._setup_route()
|
||||||
|
|
||||||
|
def _setup_route(self):
|
||||||
|
# Build our routelist, and the keys used in the route
|
||||||
|
self.routelist = routelist = self._pathkeys(self.routepath)
|
||||||
|
routekeys = frozenset([key['name'] for key in routelist
|
||||||
|
if isinstance(key, dict)])
|
||||||
|
self.dotkeys = frozenset([key['name'] for key in routelist
|
||||||
|
if isinstance(key, dict) and
|
||||||
|
key['type'] == '.'])
|
||||||
|
|
||||||
|
if not self.minimization:
|
||||||
|
self.make_full_route()
|
||||||
|
|
||||||
|
# Build a req list with all the regexp requirements for our args
|
||||||
|
self.req_regs = {}
|
||||||
|
for key, val in self.reqs.iteritems():
|
||||||
|
self.req_regs[key] = re.compile('^' + val + '$')
|
||||||
|
# Update our defaults and set new default keys if needed. defaults
|
||||||
|
# needs to be saved
|
||||||
|
(self.defaults, defaultkeys) = self._defaults(routekeys,
|
||||||
|
self.reserved_keys,
|
||||||
|
self._kargs.copy())
|
||||||
|
# Save the maximum keys we could utilize
|
||||||
|
self.maxkeys = defaultkeys | routekeys
|
||||||
|
|
||||||
|
# Populate our minimum keys, and save a copy of our backward keys for
|
||||||
|
# quicker generation later
|
||||||
|
(self.minkeys, self.routebackwards) = self._minkeys(routelist[:])
|
||||||
|
|
||||||
|
# Populate our hardcoded keys, these are ones that are set and don't
|
||||||
|
# exist in the route
|
||||||
|
self.hardcoded = frozenset([key for key in self.maxkeys \
|
||||||
|
if key not in routekeys and self.defaults[key] is not None])
|
||||||
|
|
||||||
|
# Cache our default keys
|
||||||
|
self._default_keys = frozenset(self.defaults.keys())
|
||||||
|
|
||||||
|
def make_full_route(self):
|
||||||
|
"""Make a full routelist string for use with non-minimized
|
||||||
|
generation"""
|
||||||
|
regpath = ''
|
||||||
|
for part in self.routelist:
|
||||||
|
if isinstance(part, dict):
|
||||||
|
regpath += '%(' + part['name'] + ')s'
|
||||||
|
else:
|
||||||
|
regpath += part
|
||||||
|
self.regpath = regpath
|
||||||
|
|
||||||
|
def make_unicode(self, s):
|
||||||
|
"""Transform the given argument into a unicode string."""
|
||||||
|
if isinstance(s, unicode):
|
||||||
|
return s
|
||||||
|
elif isinstance(s, str):
|
||||||
|
return s.decode(self.encoding)
|
||||||
|
elif callable(s):
|
||||||
|
return s
|
||||||
|
else:
|
||||||
|
return unicode(s)
|
||||||
|
|
||||||
|
def _pathkeys(self, routepath):
|
||||||
|
"""Utility function to walk the route, and pull out the valid
|
||||||
|
dynamic/wildcard keys."""
|
||||||
|
collecting = False
|
||||||
|
current = ''
|
||||||
|
done_on = ''
|
||||||
|
var_type = ''
|
||||||
|
just_started = False
|
||||||
|
routelist = []
|
||||||
|
for char in routepath:
|
||||||
|
if char in [':', '*', '{'] and not collecting and not self.static \
|
||||||
|
or char in ['{'] and not collecting:
|
||||||
|
just_started = True
|
||||||
|
collecting = True
|
||||||
|
var_type = char
|
||||||
|
if char == '{':
|
||||||
|
done_on = '}'
|
||||||
|
just_started = False
|
||||||
|
if len(current) > 0:
|
||||||
|
routelist.append(current)
|
||||||
|
current = ''
|
||||||
|
elif collecting and just_started:
|
||||||
|
just_started = False
|
||||||
|
if char == '(':
|
||||||
|
done_on = ')'
|
||||||
|
else:
|
||||||
|
current = char
|
||||||
|
done_on = self.done_chars + ('-',)
|
||||||
|
elif collecting and char not in done_on:
|
||||||
|
current += char
|
||||||
|
elif collecting:
|
||||||
|
collecting = False
|
||||||
|
if var_type == '{':
|
||||||
|
if current[0] == '.':
|
||||||
|
var_type = '.'
|
||||||
|
current = current[1:]
|
||||||
|
else:
|
||||||
|
var_type = ':'
|
||||||
|
opts = current.split(':')
|
||||||
|
if len(opts) > 1:
|
||||||
|
current = opts[0]
|
||||||
|
self.reqs[current] = opts[1]
|
||||||
|
routelist.append(dict(type=var_type, name=current))
|
||||||
|
if char in self.done_chars:
|
||||||
|
routelist.append(char)
|
||||||
|
done_on = var_type = current = ''
|
||||||
|
else:
|
||||||
|
current += char
|
||||||
|
if collecting:
|
||||||
|
routelist.append(dict(type=var_type, name=current))
|
||||||
|
elif current:
|
||||||
|
routelist.append(current)
|
||||||
|
return routelist
|
||||||
|
|
||||||
|
def _minkeys(self, routelist):
|
||||||
|
"""Utility function to walk the route backwards
|
||||||
|
|
||||||
|
Will also determine the minimum keys we can handle to generate
|
||||||
|
a working route.
|
||||||
|
|
||||||
|
routelist is a list of the '/' split route path
|
||||||
|
defaults is a dict of all the defaults provided for the route
|
||||||
|
|
||||||
|
"""
|
||||||
|
minkeys = []
|
||||||
|
backcheck = routelist[:]
|
||||||
|
|
||||||
|
# If we don't honor minimization, we need all the keys in the
|
||||||
|
# route path
|
||||||
|
if not self.minimization:
|
||||||
|
for part in backcheck:
|
||||||
|
if isinstance(part, dict):
|
||||||
|
minkeys.append(part['name'])
|
||||||
|
return (frozenset(minkeys), backcheck)
|
||||||
|
|
||||||
|
gaps = False
|
||||||
|
backcheck.reverse()
|
||||||
|
for part in backcheck:
|
||||||
|
if not isinstance(part, dict) and part not in self.done_chars:
|
||||||
|
gaps = True
|
||||||
|
continue
|
||||||
|
elif not isinstance(part, dict):
|
||||||
|
continue
|
||||||
|
key = part['name']
|
||||||
|
if self.defaults.has_key(key) and not gaps:
|
||||||
|
continue
|
||||||
|
minkeys.append(key)
|
||||||
|
gaps = True
|
||||||
|
return (frozenset(minkeys), backcheck)
|
||||||
|
|
||||||
|
def _defaults(self, routekeys, reserved_keys, kargs):
|
||||||
|
"""Creates default set with values stringified
|
||||||
|
|
||||||
|
Put together our list of defaults, stringify non-None values
|
||||||
|
and add in our action/id default if they use it and didn't
|
||||||
|
specify it.
|
||||||
|
|
||||||
|
defaultkeys is a list of the currently assumed default keys
|
||||||
|
routekeys is a list of the keys found in the route path
|
||||||
|
reserved_keys is a list of keys that are not
|
||||||
|
|
||||||
|
"""
|
||||||
|
defaults = {}
|
||||||
|
# Add in a controller/action default if they don't exist
|
||||||
|
if 'controller' not in routekeys and 'controller' not in kargs \
|
||||||
|
and not self.explicit:
|
||||||
|
kargs['controller'] = 'content'
|
||||||
|
if 'action' not in routekeys and 'action' not in kargs \
|
||||||
|
and not self.explicit:
|
||||||
|
kargs['action'] = 'index'
|
||||||
|
defaultkeys = frozenset([key for key in kargs.keys() \
|
||||||
|
if key not in reserved_keys])
|
||||||
|
for key in defaultkeys:
|
||||||
|
if kargs[key] is not None:
|
||||||
|
defaults[key] = self.make_unicode(kargs[key])
|
||||||
|
else:
|
||||||
|
defaults[key] = None
|
||||||
|
if 'action' in routekeys and not defaults.has_key('action') \
|
||||||
|
and not self.explicit:
|
||||||
|
defaults['action'] = 'index'
|
||||||
|
if 'id' in routekeys and not defaults.has_key('id') \
|
||||||
|
and not self.explicit:
|
||||||
|
defaults['id'] = None
|
||||||
|
newdefaultkeys = frozenset([key for key in defaults.keys() \
|
||||||
|
if key not in reserved_keys])
|
||||||
|
|
||||||
|
return (defaults, newdefaultkeys)
|
||||||
|
|
||||||
|
def makeregexp(self, clist, include_names=True):
|
||||||
|
"""Create a regular expression for matching purposes
|
||||||
|
|
||||||
|
Note: This MUST be called before match can function properly.
|
||||||
|
|
||||||
|
clist should be a list of valid controller strings that can be
|
||||||
|
matched, for this reason makeregexp should be called by the web
|
||||||
|
framework after it knows all available controllers that can be
|
||||||
|
utilized.
|
||||||
|
|
||||||
|
include_names indicates whether this should be a match regexp
|
||||||
|
assigned to itself using regexp grouping names, or if names
|
||||||
|
should be excluded for use in a single larger regexp to
|
||||||
|
determine if any routes match
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self.minimization:
|
||||||
|
reg = self.buildnextreg(self.routelist, clist, include_names)[0]
|
||||||
|
if not reg:
|
||||||
|
reg = '/'
|
||||||
|
reg = reg + '/?' + '$'
|
||||||
|
|
||||||
|
if not reg.startswith('/'):
|
||||||
|
reg = '/' + reg
|
||||||
|
else:
|
||||||
|
reg = self.buildfullreg(clist, include_names)
|
||||||
|
|
||||||
|
reg = '^' + reg
|
||||||
|
|
||||||
|
if not include_names:
|
||||||
|
return reg
|
||||||
|
|
||||||
|
self.regexp = reg
|
||||||
|
self.regmatch = re.compile(reg)
|
||||||
|
|
||||||
|
def buildfullreg(self, clist, include_names=True):
|
||||||
|
"""Build the regexp by iterating through the routelist and
|
||||||
|
replacing dicts with the appropriate regexp match"""
|
||||||
|
regparts = []
|
||||||
|
for part in self.routelist:
|
||||||
|
if isinstance(part, dict):
|
||||||
|
var = part['name']
|
||||||
|
if var == 'controller':
|
||||||
|
partmatch = '|'.join(map(re.escape, clist))
|
||||||
|
elif part['type'] == ':':
|
||||||
|
partmatch = self.reqs.get(var) or '[^/]+?'
|
||||||
|
elif part['type'] == '.':
|
||||||
|
partmatch = self.reqs.get(var) or '[^/.]+?'
|
||||||
|
else:
|
||||||
|
partmatch = self.reqs.get(var) or '.+?'
|
||||||
|
if include_names:
|
||||||
|
regpart = '(?P<%s>%s)' % (var, partmatch)
|
||||||
|
else:
|
||||||
|
regpart = '(?:%s)' % partmatch
|
||||||
|
if part['type'] == '.':
|
||||||
|
regparts.append('(?:\.%s)??' % regpart)
|
||||||
|
else:
|
||||||
|
regparts.append(regpart)
|
||||||
|
else:
|
||||||
|
regparts.append(re.escape(part))
|
||||||
|
regexp = ''.join(regparts) + '$'
|
||||||
|
return regexp
|
||||||
|
|
||||||
|
def buildnextreg(self, path, clist, include_names=True):
|
||||||
|
"""Recursively build our regexp given a path, and a controller
|
||||||
|
list.
|
||||||
|
|
||||||
|
Returns the regular expression string, and two booleans that
|
||||||
|
can be ignored as they're only used internally by buildnextreg.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if path:
|
||||||
|
part = path[0]
|
||||||
|
else:
|
||||||
|
part = ''
|
||||||
|
reg = ''
|
||||||
|
|
||||||
|
# noreqs will remember whether the remainder has either a string
|
||||||
|
# match, or a non-defaulted regexp match on a key, allblank remembers
|
||||||
|
# if the rest could possible be completely empty
|
||||||
|
(rest, noreqs, allblank) = ('', True, True)
|
||||||
|
if len(path[1:]) > 0:
|
||||||
|
self.prior = part
|
||||||
|
(rest, noreqs, allblank) = self.buildnextreg(path[1:], clist, include_names)
|
||||||
|
|
||||||
|
if isinstance(part, dict) and part['type'] in (':', '.'):
|
||||||
|
var = part['name']
|
||||||
|
typ = part['type']
|
||||||
|
partreg = ''
|
||||||
|
|
||||||
|
# First we plug in the proper part matcher
|
||||||
|
if self.reqs.has_key(var):
|
||||||
|
if include_names:
|
||||||
|
partreg = '(?P<%s>%s)' % (var, self.reqs[var])
|
||||||
|
else:
|
||||||
|
partreg = '(?:%s)' % self.reqs[var]
|
||||||
|
if typ == '.':
|
||||||
|
partreg = '(?:\.%s)??' % partreg
|
||||||
|
elif var == 'controller':
|
||||||
|
if include_names:
|
||||||
|
partreg = '(?P<%s>%s)' % (var, '|'.join(map(re.escape, clist)))
|
||||||
|
else:
|
||||||
|
partreg = '(?:%s)' % '|'.join(map(re.escape, clist))
|
||||||
|
elif self.prior in ['/', '#']:
|
||||||
|
if include_names:
|
||||||
|
partreg = '(?P<' + var + '>[^' + self.prior + ']+?)'
|
||||||
|
else:
|
||||||
|
partreg = '(?:[^' + self.prior + ']+?)'
|
||||||
|
else:
|
||||||
|
if not rest:
|
||||||
|
if typ == '.':
|
||||||
|
exclude_chars = '/.'
|
||||||
|
else:
|
||||||
|
exclude_chars = '/'
|
||||||
|
if include_names:
|
||||||
|
partreg = '(?P<%s>[^%s]+?)' % (var, exclude_chars)
|
||||||
|
else:
|
||||||
|
partreg = '(?:[^%s]+?)' % exclude_chars
|
||||||
|
if typ == '.':
|
||||||
|
partreg = '(?:\.%s)??' % partreg
|
||||||
|
else:
|
||||||
|
end = ''.join(self.done_chars)
|
||||||
|
rem = rest
|
||||||
|
if rem[0] == '\\' and len(rem) > 1:
|
||||||
|
rem = rem[1]
|
||||||
|
elif rem.startswith('(\\') and len(rem) > 2:
|
||||||
|
rem = rem[2]
|
||||||
|
else:
|
||||||
|
rem = end
|
||||||
|
rem = frozenset(rem) | frozenset(['/'])
|
||||||
|
if include_names:
|
||||||
|
partreg = '(?P<%s>[^%s]+?)' % (var, ''.join(rem))
|
||||||
|
else:
|
||||||
|
partreg = '(?:[^%s]+?)' % ''.join(rem)
|
||||||
|
|
||||||
|
if self.reqs.has_key(var):
|
||||||
|
noreqs = False
|
||||||
|
if not self.defaults.has_key(var):
|
||||||
|
allblank = False
|
||||||
|
noreqs = False
|
||||||
|
|
||||||
|
# Now we determine if its optional, or required. This changes
|
||||||
|
# depending on what is in the rest of the match. If noreqs is
|
||||||
|
# true, then its possible the entire thing is optional as there's
|
||||||
|
# no reqs or string matches.
|
||||||
|
if noreqs:
|
||||||
|
# The rest is optional, but now we have an optional with a
|
||||||
|
# regexp. Wrap to ensure that if we match anything, we match
|
||||||
|
# our regexp first. It's still possible we could be completely
|
||||||
|
# blank as we have a default
|
||||||
|
if self.reqs.has_key(var) and self.defaults.has_key(var):
|
||||||
|
reg = '(' + partreg + rest + ')?'
|
||||||
|
|
||||||
|
# Or we have a regexp match with no default, so now being
|
||||||
|
# completely blank form here on out isn't possible
|
||||||
|
elif self.reqs.has_key(var):
|
||||||
|
allblank = False
|
||||||
|
reg = partreg + rest
|
||||||
|
|
||||||
|
# If the character before this is a special char, it has to be
|
||||||
|
# followed by this
|
||||||
|
elif self.defaults.has_key(var) and \
|
||||||
|
self.prior in (',', ';', '.'):
|
||||||
|
reg = partreg + rest
|
||||||
|
|
||||||
|
# Or we have a default with no regexp, don't touch the allblank
|
||||||
|
elif self.defaults.has_key(var):
|
||||||
|
reg = partreg + '?' + rest
|
||||||
|
|
||||||
|
# Or we have a key with no default, and no reqs. Not possible
|
||||||
|
# to be all blank from here
|
||||||
|
else:
|
||||||
|
allblank = False
|
||||||
|
reg = partreg + rest
|
||||||
|
# In this case, we have something dangling that might need to be
|
||||||
|
# matched
|
||||||
|
else:
|
||||||
|
# If they can all be blank, and we have a default here, we know
|
||||||
|
# its safe to make everything from here optional. Since
|
||||||
|
# something else in the chain does have req's though, we have
|
||||||
|
# to make the partreg here required to continue matching
|
||||||
|
if allblank and self.defaults.has_key(var):
|
||||||
|
reg = '(' + partreg + rest + ')?'
|
||||||
|
|
||||||
|
# Same as before, but they can't all be blank, so we have to
|
||||||
|
# require it all to ensure our matches line up right
|
||||||
|
else:
|
||||||
|
reg = partreg + rest
|
||||||
|
elif isinstance(part, dict) and part['type'] == '*':
|
||||||
|
var = part['name']
|
||||||
|
if noreqs:
|
||||||
|
if include_names:
|
||||||
|
reg = '(?P<%s>.*)' % var + rest
|
||||||
|
else:
|
||||||
|
reg = '(?:.*)' + rest
|
||||||
|
if not self.defaults.has_key(var):
|
||||||
|
allblank = False
|
||||||
|
noreqs = False
|
||||||
|
else:
|
||||||
|
if allblank and self.defaults.has_key(var):
|
||||||
|
if include_names:
|
||||||
|
reg = '(?P<%s>.*)' % var + rest
|
||||||
|
else:
|
||||||
|
reg = '(?:.*)' + rest
|
||||||
|
elif self.defaults.has_key(var):
|
||||||
|
if include_names:
|
||||||
|
reg = '(?P<%s>.*)' % var + rest
|
||||||
|
else:
|
||||||
|
reg = '(?:.*)' + rest
|
||||||
|
else:
|
||||||
|
if include_names:
|
||||||
|
reg = '(?P<%s>.*)' % var + rest
|
||||||
|
else:
|
||||||
|
reg = '(?:.*)' + rest
|
||||||
|
allblank = False
|
||||||
|
noreqs = False
|
||||||
|
elif part and part[-1] in self.done_chars:
|
||||||
|
if allblank:
|
||||||
|
reg = re.escape(part[:-1]) + '(' + re.escape(part[-1]) + rest
|
||||||
|
reg += ')?'
|
||||||
|
else:
|
||||||
|
allblank = False
|
||||||
|
reg = re.escape(part) + rest
|
||||||
|
|
||||||
|
# We have a normal string here, this is a req, and it prevents us from
|
||||||
|
# being all blank
|
||||||
|
else:
|
||||||
|
noreqs = False
|
||||||
|
allblank = False
|
||||||
|
reg = re.escape(part) + rest
|
||||||
|
|
||||||
|
return (reg, noreqs, allblank)
|
||||||
|
|
||||||
|
def match(self, url, environ=None, sub_domains=False,
|
||||||
|
sub_domains_ignore=None, domain_match=''):
|
||||||
|
"""Match a url to our regexp.
|
||||||
|
|
||||||
|
While the regexp might match, this operation isn't
|
||||||
|
guaranteed as there's other factors that can cause a match to
|
||||||
|
fail even though the regexp succeeds (Default that was relied
|
||||||
|
on wasn't given, requirement regexp doesn't pass, etc.).
|
||||||
|
|
||||||
|
Therefore the calling function shouldn't assume this will
|
||||||
|
return a valid dict, the other possible return is False if a
|
||||||
|
match doesn't work out.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Static routes don't match, they generate only
|
||||||
|
if self.static:
|
||||||
|
return False
|
||||||
|
|
||||||
|
match = self.regmatch.match(url)
|
||||||
|
|
||||||
|
if not match:
|
||||||
|
return False
|
||||||
|
|
||||||
|
sub_domain = None
|
||||||
|
|
||||||
|
if sub_domains and environ and 'HTTP_HOST' in environ:
|
||||||
|
host = environ['HTTP_HOST'].split(':')[0]
|
||||||
|
sub_match = re.compile('^(.+?)\.%s$' % domain_match)
|
||||||
|
subdomain = re.sub(sub_match, r'\1', host)
|
||||||
|
if subdomain not in sub_domains_ignore and host != subdomain:
|
||||||
|
sub_domain = subdomain
|
||||||
|
|
||||||
|
if self.conditions:
|
||||||
|
if 'method' in self.conditions and environ and \
|
||||||
|
environ['REQUEST_METHOD'] not in self.conditions['method']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check sub-domains?
|
||||||
|
use_sd = self.conditions.get('sub_domain')
|
||||||
|
if use_sd and not sub_domain:
|
||||||
|
return False
|
||||||
|
elif not use_sd and 'sub_domain' in self.conditions and sub_domain:
|
||||||
|
return False
|
||||||
|
if isinstance(use_sd, list) and sub_domain not in use_sd:
|
||||||
|
return False
|
||||||
|
|
||||||
|
matchdict = match.groupdict()
|
||||||
|
result = {}
|
||||||
|
extras = self._default_keys - frozenset(matchdict.keys())
|
||||||
|
for key, val in matchdict.iteritems():
|
||||||
|
if key != 'path_info' and self.encoding:
|
||||||
|
# change back into python unicode objects from the URL
|
||||||
|
# representation
|
||||||
|
try:
|
||||||
|
val = val and val.decode(self.encoding, self.decode_errors)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not val and key in self.defaults and self.defaults[key]:
|
||||||
|
result[key] = self.defaults[key]
|
||||||
|
else:
|
||||||
|
result[key] = val
|
||||||
|
for key in extras:
|
||||||
|
result[key] = self.defaults[key]
|
||||||
|
|
||||||
|
# Add the sub-domain if there is one
|
||||||
|
if sub_domains:
|
||||||
|
result['sub_domain'] = sub_domain
|
||||||
|
|
||||||
|
# If there's a function, call it with environ and expire if it
|
||||||
|
# returns False
|
||||||
|
if self.conditions and 'function' in self.conditions and \
|
||||||
|
not self.conditions['function'](environ, result):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def generate_non_minimized(self, kargs):
|
||||||
|
"""Generate a non-minimal version of the URL"""
|
||||||
|
# Iterate through the keys that are defaults, and NOT in the route
|
||||||
|
# path. If its not in kargs, or doesn't match, or is None, this
|
||||||
|
# route won't work
|
||||||
|
for k in self.maxkeys - self.minkeys:
|
||||||
|
if k not in kargs:
|
||||||
|
return False
|
||||||
|
elif self.make_unicode(kargs[k]) != \
|
||||||
|
self.make_unicode(self.defaults[k]):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Ensure that all the args in the route path are present and not None
|
||||||
|
for arg in self.minkeys:
|
||||||
|
if arg not in kargs or kargs[arg] is None:
|
||||||
|
if arg in self.dotkeys:
|
||||||
|
kargs[arg] = ''
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Encode all the argument that the regpath can use
|
||||||
|
for k in kargs:
|
||||||
|
if k in self.maxkeys:
|
||||||
|
if k in self.dotkeys:
|
||||||
|
if kargs[k]:
|
||||||
|
kargs[k] = url_quote('.' + kargs[k], self.encoding)
|
||||||
|
else:
|
||||||
|
kargs[k] = url_quote(kargs[k], self.encoding)
|
||||||
|
|
||||||
|
return self.regpath % kargs
|
||||||
|
|
||||||
|
def generate_minimized(self, kargs):
|
||||||
|
"""Generate a minimized version of the URL"""
|
||||||
|
routelist = self.routebackwards
|
||||||
|
urllist = []
|
||||||
|
gaps = False
|
||||||
|
for part in routelist:
|
||||||
|
if isinstance(part, dict) and part['type'] in (':', '.'):
|
||||||
|
arg = part['name']
|
||||||
|
|
||||||
|
# For efficiency, check these just once
|
||||||
|
has_arg = kargs.has_key(arg)
|
||||||
|
has_default = self.defaults.has_key(arg)
|
||||||
|
|
||||||
|
# Determine if we can leave this part off
|
||||||
|
# First check if the default exists and wasn't provided in the
|
||||||
|
# call (also no gaps)
|
||||||
|
if has_default and not has_arg and not gaps:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Now check to see if there's a default and it matches the
|
||||||
|
# incoming call arg
|
||||||
|
if (has_default and has_arg) and self.make_unicode(kargs[arg]) == \
|
||||||
|
self.make_unicode(self.defaults[arg]) and not gaps:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# We need to pull the value to append, if the arg is None and
|
||||||
|
# we have a default, use that
|
||||||
|
if has_arg and kargs[arg] is None and has_default and not gaps:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Otherwise if we do have an arg, use that
|
||||||
|
elif has_arg:
|
||||||
|
val = kargs[arg]
|
||||||
|
|
||||||
|
elif has_default and self.defaults[arg] is not None:
|
||||||
|
val = self.defaults[arg]
|
||||||
|
# Optional format parameter?
|
||||||
|
elif part['type'] == '.':
|
||||||
|
continue
|
||||||
|
# No arg at all? This won't work
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
urllist.append(url_quote(val, self.encoding))
|
||||||
|
if part['type'] == '.':
|
||||||
|
urllist.append('.')
|
||||||
|
|
||||||
|
if has_arg:
|
||||||
|
del kargs[arg]
|
||||||
|
gaps = True
|
||||||
|
elif isinstance(part, dict) and part['type'] == '*':
|
||||||
|
arg = part['name']
|
||||||
|
kar = kargs.get(arg)
|
||||||
|
if kar is not None:
|
||||||
|
urllist.append(url_quote(kar, self.encoding))
|
||||||
|
gaps = True
|
||||||
|
elif part and part[-1] in self.done_chars:
|
||||||
|
if not gaps and part in self.done_chars:
|
||||||
|
continue
|
||||||
|
elif not gaps:
|
||||||
|
urllist.append(part[:-1])
|
||||||
|
gaps = True
|
||||||
|
else:
|
||||||
|
gaps = True
|
||||||
|
urllist.append(part)
|
||||||
|
else:
|
||||||
|
gaps = True
|
||||||
|
urllist.append(part)
|
||||||
|
urllist.reverse()
|
||||||
|
url = ''.join(urllist)
|
||||||
|
return url
|
||||||
|
|
||||||
|
def generate(self, _ignore_req_list=False, _append_slash=False, **kargs):
|
||||||
|
"""Generate a URL from ourself given a set of keyword arguments
|
||||||
|
|
||||||
|
Toss an exception if this
|
||||||
|
set of keywords would cause a gap in the url.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Verify that our args pass any regexp requirements
|
||||||
|
if not _ignore_req_list:
|
||||||
|
for key in self.reqs.keys():
|
||||||
|
val = kargs.get(key)
|
||||||
|
if val and not self.req_regs[key].match(self.make_unicode(val)):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Verify that if we have a method arg, its in the method accept list.
|
||||||
|
# Also, method will be changed to _method for route generation
|
||||||
|
meth = kargs.get('method')
|
||||||
|
if meth:
|
||||||
|
if self.conditions and 'method' in self.conditions \
|
||||||
|
and meth.upper() not in self.conditions['method']:
|
||||||
|
return False
|
||||||
|
kargs.pop('method')
|
||||||
|
|
||||||
|
if self.minimization:
|
||||||
|
url = self.generate_minimized(kargs)
|
||||||
|
else:
|
||||||
|
url = self.generate_non_minimized(kargs)
|
||||||
|
|
||||||
|
if url is False:
|
||||||
|
return url
|
||||||
|
|
||||||
|
if not url.startswith('/') and not self.static:
|
||||||
|
url = '/' + url
|
||||||
|
extras = frozenset(kargs.keys()) - self.maxkeys
|
||||||
|
if extras:
|
||||||
|
if _append_slash and not url.endswith('/'):
|
||||||
|
url += '/'
|
||||||
|
fragments = []
|
||||||
|
# don't assume the 'extras' set preserves order: iterate
|
||||||
|
# through the ordered kargs instead
|
||||||
|
for key in kargs:
|
||||||
|
if key not in extras:
|
||||||
|
continue
|
||||||
|
if key == 'action' or key == 'controller':
|
||||||
|
continue
|
||||||
|
val = kargs[key]
|
||||||
|
if isinstance(val, (tuple, list)):
|
||||||
|
for value in val:
|
||||||
|
fragments.append((key, _str_encode(value, self.encoding)))
|
||||||
|
else:
|
||||||
|
fragments.append((key, _str_encode(val, self.encoding)))
|
||||||
|
if fragments:
|
||||||
|
url += '?'
|
||||||
|
url += urllib.urlencode(fragments)
|
||||||
|
elif _append_slash and not url.endswith('/'):
|
||||||
|
url += '/'
|
||||||
|
return url
|
503
src/routes/util.py
Normal file
503
src/routes/util.py
Normal file
@ -0,0 +1,503 @@
|
|||||||
|
"""Utility functions for use in templates / controllers
|
||||||
|
|
||||||
|
*PLEASE NOTE*: Many of these functions expect an initialized RequestConfig
|
||||||
|
object. This is expected to have been initialized for EACH REQUEST by the web
|
||||||
|
framework.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import urllib
|
||||||
|
from routes import request_config
|
||||||
|
|
||||||
|
|
||||||
|
class RoutesException(Exception):
|
||||||
|
"""Tossed during Route exceptions"""
|
||||||
|
|
||||||
|
|
||||||
|
class MatchException(RoutesException):
|
||||||
|
"""Tossed during URL matching exceptions"""
|
||||||
|
|
||||||
|
|
||||||
|
class GenerationException(RoutesException):
|
||||||
|
"""Tossed during URL generation exceptions"""
|
||||||
|
|
||||||
|
|
||||||
|
def _screenargs(kargs, mapper, environ, force_explicit=False):
|
||||||
|
"""
|
||||||
|
Private function that takes a dict, and screens it against the current
|
||||||
|
request dict to determine what the dict should look like that is used.
|
||||||
|
This is responsible for the requests "memory" of the current.
|
||||||
|
"""
|
||||||
|
# Coerce any unicode args with the encoding
|
||||||
|
encoding = mapper.encoding
|
||||||
|
for key, val in kargs.iteritems():
|
||||||
|
if isinstance(val, unicode):
|
||||||
|
kargs[key] = val.encode(encoding)
|
||||||
|
|
||||||
|
if mapper.explicit and mapper.sub_domains and not force_explicit:
|
||||||
|
return _subdomain_check(kargs, mapper, environ)
|
||||||
|
elif mapper.explicit and not force_explicit:
|
||||||
|
return kargs
|
||||||
|
|
||||||
|
controller_name = kargs.get('controller')
|
||||||
|
|
||||||
|
if controller_name and controller_name.startswith('/'):
|
||||||
|
# If the controller name starts with '/', ignore route memory
|
||||||
|
kargs['controller'] = kargs['controller'][1:]
|
||||||
|
return kargs
|
||||||
|
elif controller_name and not kargs.has_key('action'):
|
||||||
|
# Fill in an action if we don't have one, but have a controller
|
||||||
|
kargs['action'] = 'index'
|
||||||
|
|
||||||
|
route_args = environ.get('wsgiorg.routing_args')
|
||||||
|
if route_args:
|
||||||
|
memory_kargs = route_args[1].copy()
|
||||||
|
else:
|
||||||
|
memory_kargs = {}
|
||||||
|
|
||||||
|
# Remove keys from memory and kargs if kargs has them as None
|
||||||
|
for key in [key for key in kargs.keys() if kargs[key] is None]:
|
||||||
|
del kargs[key]
|
||||||
|
if memory_kargs.has_key(key):
|
||||||
|
del memory_kargs[key]
|
||||||
|
|
||||||
|
# Merge the new args on top of the memory args
|
||||||
|
memory_kargs.update(kargs)
|
||||||
|
|
||||||
|
# Setup a sub-domain if applicable
|
||||||
|
if mapper.sub_domains:
|
||||||
|
memory_kargs = _subdomain_check(memory_kargs, mapper, environ)
|
||||||
|
return memory_kargs
|
||||||
|
|
||||||
|
|
||||||
|
def _subdomain_check(kargs, mapper, environ):
|
||||||
|
"""Screen the kargs for a subdomain and alter it appropriately depending
|
||||||
|
on the current subdomain or lack therof."""
|
||||||
|
if mapper.sub_domains:
|
||||||
|
subdomain = kargs.pop('sub_domain', None)
|
||||||
|
if isinstance(subdomain, unicode):
|
||||||
|
subdomain = str(subdomain)
|
||||||
|
|
||||||
|
fullhost = environ.get('HTTP_HOST') or environ.get('SERVER_NAME')
|
||||||
|
|
||||||
|
# In case environ defaulted to {}
|
||||||
|
if not fullhost:
|
||||||
|
return kargs
|
||||||
|
|
||||||
|
hostmatch = fullhost.split(':')
|
||||||
|
host = hostmatch[0]
|
||||||
|
port = ''
|
||||||
|
if len(hostmatch) > 1:
|
||||||
|
port += ':' + hostmatch[1]
|
||||||
|
sub_match = re.compile('^.+?\.(%s)$' % mapper.domain_match)
|
||||||
|
domain = re.sub(sub_match, r'\1', host)
|
||||||
|
if subdomain and not host.startswith(subdomain) and \
|
||||||
|
subdomain not in mapper.sub_domains_ignore:
|
||||||
|
kargs['_host'] = subdomain + '.' + domain + port
|
||||||
|
elif (subdomain in mapper.sub_domains_ignore or \
|
||||||
|
subdomain is None) and domain != host:
|
||||||
|
kargs['_host'] = domain + port
|
||||||
|
return kargs
|
||||||
|
else:
|
||||||
|
return kargs
|
||||||
|
|
||||||
|
|
||||||
|
def _url_quote(string, encoding):
|
||||||
|
"""A Unicode handling version of urllib.quote."""
|
||||||
|
if encoding:
|
||||||
|
if isinstance(string, unicode):
|
||||||
|
s = string.encode(encoding)
|
||||||
|
elif isinstance(string, str):
|
||||||
|
# assume the encoding is already correct
|
||||||
|
s = string
|
||||||
|
else:
|
||||||
|
s = unicode(string).encode(encoding)
|
||||||
|
else:
|
||||||
|
s = str(string)
|
||||||
|
return urllib.quote(s, '/')
|
||||||
|
|
||||||
|
|
||||||
|
def _str_encode(string, encoding):
|
||||||
|
if encoding:
|
||||||
|
if isinstance(string, unicode):
|
||||||
|
s = string.encode(encoding)
|
||||||
|
elif isinstance(string, str):
|
||||||
|
# assume the encoding is already correct
|
||||||
|
s = string
|
||||||
|
else:
|
||||||
|
s = unicode(string).encode(encoding)
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def url_for(*args, **kargs):
|
||||||
|
"""Generates a URL
|
||||||
|
|
||||||
|
All keys given to url_for are sent to the Routes Mapper instance for
|
||||||
|
generation except for::
|
||||||
|
|
||||||
|
anchor specified the anchor name to be appened to the path
|
||||||
|
host overrides the default (current) host if provided
|
||||||
|
protocol overrides the default (current) protocol if provided
|
||||||
|
qualified creates the URL with the host/port information as
|
||||||
|
needed
|
||||||
|
|
||||||
|
The URL is generated based on the rest of the keys. When generating a new
|
||||||
|
URL, values will be used from the current request's parameters (if
|
||||||
|
present). The following rules are used to determine when and how to keep
|
||||||
|
the current requests parameters:
|
||||||
|
|
||||||
|
* If the controller is present and begins with '/', no defaults are used
|
||||||
|
* If the controller is changed, action is set to 'index' unless otherwise
|
||||||
|
specified
|
||||||
|
|
||||||
|
For example, if the current request yielded a dict of
|
||||||
|
{'controller': 'blog', 'action': 'view', 'id': 2}, with the standard
|
||||||
|
':controller/:action/:id' route, you'd get the following results::
|
||||||
|
|
||||||
|
url_for(id=4) => '/blog/view/4',
|
||||||
|
url_for(controller='/admin') => '/admin',
|
||||||
|
url_for(controller='admin') => '/admin/view/2'
|
||||||
|
url_for(action='edit') => '/blog/edit/2',
|
||||||
|
url_for(action='list', id=None) => '/blog/list'
|
||||||
|
|
||||||
|
**Static and Named Routes**
|
||||||
|
|
||||||
|
If there is a string present as the first argument, a lookup is done
|
||||||
|
against the named routes table to see if there's any matching routes. The
|
||||||
|
keyword defaults used with static routes will be sent in as GET query
|
||||||
|
arg's if a route matches.
|
||||||
|
|
||||||
|
If no route by that name is found, the string is assumed to be a raw URL.
|
||||||
|
Should the raw URL begin with ``/`` then appropriate SCRIPT_NAME data will
|
||||||
|
be added if present, otherwise the string will be used as the url with
|
||||||
|
keyword args becoming GET query args.
|
||||||
|
|
||||||
|
"""
|
||||||
|
anchor = kargs.get('anchor')
|
||||||
|
host = kargs.get('host')
|
||||||
|
protocol = kargs.get('protocol')
|
||||||
|
qualified = kargs.pop('qualified', None)
|
||||||
|
|
||||||
|
# Remove special words from kargs, convert placeholders
|
||||||
|
for key in ['anchor', 'host', 'protocol']:
|
||||||
|
if kargs.get(key):
|
||||||
|
del kargs[key]
|
||||||
|
config = request_config()
|
||||||
|
route = None
|
||||||
|
static = False
|
||||||
|
encoding = config.mapper.encoding
|
||||||
|
url = ''
|
||||||
|
if len(args) > 0:
|
||||||
|
route = config.mapper._routenames.get(args[0])
|
||||||
|
|
||||||
|
# No named route found, assume the argument is a relative path
|
||||||
|
if not route:
|
||||||
|
static = True
|
||||||
|
url = args[0]
|
||||||
|
|
||||||
|
if url.startswith('/') and hasattr(config, 'environ') \
|
||||||
|
and config.environ.get('SCRIPT_NAME'):
|
||||||
|
url = config.environ.get('SCRIPT_NAME') + url
|
||||||
|
|
||||||
|
if static:
|
||||||
|
if kargs:
|
||||||
|
url += '?'
|
||||||
|
query_args = []
|
||||||
|
for key, val in kargs.iteritems():
|
||||||
|
if isinstance(val, (list, tuple)):
|
||||||
|
for value in val:
|
||||||
|
query_args.append("%s=%s" % (
|
||||||
|
urllib.quote(unicode(key).encode(encoding)),
|
||||||
|
urllib.quote(unicode(value).encode(encoding))))
|
||||||
|
else:
|
||||||
|
query_args.append("%s=%s" % (
|
||||||
|
urllib.quote(unicode(key).encode(encoding)),
|
||||||
|
urllib.quote(unicode(val).encode(encoding))))
|
||||||
|
url += '&'.join(query_args)
|
||||||
|
environ = getattr(config, 'environ', {})
|
||||||
|
if 'wsgiorg.routing_args' not in environ:
|
||||||
|
environ = environ.copy()
|
||||||
|
mapper_dict = getattr(config, 'mapper_dict', None)
|
||||||
|
if mapper_dict is not None:
|
||||||
|
match_dict = mapper_dict.copy()
|
||||||
|
else:
|
||||||
|
match_dict = {}
|
||||||
|
environ['wsgiorg.routing_args'] = ((), match_dict)
|
||||||
|
|
||||||
|
if not static:
|
||||||
|
route_args = []
|
||||||
|
if route:
|
||||||
|
if config.mapper.hardcode_names:
|
||||||
|
route_args.append(route)
|
||||||
|
newargs = route.defaults.copy()
|
||||||
|
newargs.update(kargs)
|
||||||
|
|
||||||
|
# If this route has a filter, apply it
|
||||||
|
if route.filter:
|
||||||
|
newargs = route.filter(newargs)
|
||||||
|
|
||||||
|
if not route.static:
|
||||||
|
# Handle sub-domains
|
||||||
|
newargs = _subdomain_check(newargs, config.mapper, environ)
|
||||||
|
else:
|
||||||
|
newargs = _screenargs(kargs, config.mapper, environ)
|
||||||
|
anchor = newargs.pop('_anchor', None) or anchor
|
||||||
|
host = newargs.pop('_host', None) or host
|
||||||
|
protocol = newargs.pop('_protocol', None) or protocol
|
||||||
|
url = config.mapper.generate(*route_args, **newargs)
|
||||||
|
if anchor is not None:
|
||||||
|
url += '#' + _url_quote(anchor, encoding)
|
||||||
|
if host or protocol or qualified:
|
||||||
|
if not host and not qualified:
|
||||||
|
# Ensure we don't use a specific port, as changing the protocol
|
||||||
|
# means that we most likely need a new port
|
||||||
|
host = config.host.split(':')[0]
|
||||||
|
elif not host:
|
||||||
|
host = config.host
|
||||||
|
if not protocol:
|
||||||
|
protocol = config.protocol
|
||||||
|
if url is not None:
|
||||||
|
url = protocol + '://' + host + url
|
||||||
|
|
||||||
|
if not isinstance(url, str) and url is not None:
|
||||||
|
raise GenerationException("url_for can only return a string, got "
|
||||||
|
"unicode instead: %s" % url)
|
||||||
|
if url is None:
|
||||||
|
raise GenerationException(
|
||||||
|
"url_for could not generate URL. Called with args: %s %s" % \
|
||||||
|
(args, kargs))
|
||||||
|
return url
|
||||||
|
|
||||||
|
|
||||||
|
class URLGenerator(object):
|
||||||
|
"""The URL Generator generates URL's
|
||||||
|
|
||||||
|
It is automatically instantiated by the RoutesMiddleware and put
|
||||||
|
into the ``wsgiorg.routing_args`` tuple accessible as::
|
||||||
|
|
||||||
|
url = environ['wsgiorg.routing_args'][0][0]
|
||||||
|
|
||||||
|
Or via the ``routes.url`` key::
|
||||||
|
|
||||||
|
url = environ['routes.url']
|
||||||
|
|
||||||
|
The url object may be instantiated outside of a web context for use
|
||||||
|
in testing, however sub_domain support and fully qualified URL's
|
||||||
|
cannot be generated without supplying a dict that must contain the
|
||||||
|
key ``HTTP_HOST``.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, mapper, environ):
|
||||||
|
"""Instantiate the URLGenerator
|
||||||
|
|
||||||
|
``mapper``
|
||||||
|
The mapper object to use when generating routes.
|
||||||
|
``environ``
|
||||||
|
The environment dict used in WSGI, alternately, any dict
|
||||||
|
that contains at least an ``HTTP_HOST`` value.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.mapper = mapper
|
||||||
|
if 'SCRIPT_NAME' not in environ:
|
||||||
|
environ['SCRIPT_NAME'] = ''
|
||||||
|
self.environ = environ
|
||||||
|
|
||||||
|
def __call__(self, *args, **kargs):
|
||||||
|
"""Generates a URL
|
||||||
|
|
||||||
|
All keys given to url_for are sent to the Routes Mapper instance for
|
||||||
|
generation except for::
|
||||||
|
|
||||||
|
anchor specified the anchor name to be appened to the path
|
||||||
|
host overrides the default (current) host if provided
|
||||||
|
protocol overrides the default (current) protocol if provided
|
||||||
|
qualified creates the URL with the host/port information as
|
||||||
|
needed
|
||||||
|
|
||||||
|
"""
|
||||||
|
anchor = kargs.get('anchor')
|
||||||
|
host = kargs.get('host')
|
||||||
|
protocol = kargs.get('protocol')
|
||||||
|
qualified = kargs.pop('qualified', None)
|
||||||
|
|
||||||
|
# Remove special words from kargs, convert placeholders
|
||||||
|
for key in ['anchor', 'host', 'protocol']:
|
||||||
|
if kargs.get(key):
|
||||||
|
del kargs[key]
|
||||||
|
|
||||||
|
route = None
|
||||||
|
use_current = '_use_current' in kargs and kargs.pop('_use_current')
|
||||||
|
|
||||||
|
static = False
|
||||||
|
encoding = self.mapper.encoding
|
||||||
|
url = ''
|
||||||
|
|
||||||
|
more_args = len(args) > 0
|
||||||
|
if more_args:
|
||||||
|
route = self.mapper._routenames.get(args[0])
|
||||||
|
|
||||||
|
if not route and more_args:
|
||||||
|
static = True
|
||||||
|
url = args[0]
|
||||||
|
if url.startswith('/') and self.environ.get('SCRIPT_NAME'):
|
||||||
|
url = self.environ.get('SCRIPT_NAME') + url
|
||||||
|
|
||||||
|
if static:
|
||||||
|
if kargs:
|
||||||
|
url += '?'
|
||||||
|
query_args = []
|
||||||
|
for key, val in kargs.iteritems():
|
||||||
|
if isinstance(val, (list, tuple)):
|
||||||
|
for value in val:
|
||||||
|
query_args.append("%s=%s" % (
|
||||||
|
urllib.quote(unicode(key).encode(encoding)),
|
||||||
|
urllib.quote(unicode(value).encode(encoding))))
|
||||||
|
else:
|
||||||
|
query_args.append("%s=%s" % (
|
||||||
|
urllib.quote(unicode(key).encode(encoding)),
|
||||||
|
urllib.quote(unicode(val).encode(encoding))))
|
||||||
|
url += '&'.join(query_args)
|
||||||
|
if not static:
|
||||||
|
route_args = []
|
||||||
|
if route:
|
||||||
|
if self.mapper.hardcode_names:
|
||||||
|
route_args.append(route)
|
||||||
|
newargs = route.defaults.copy()
|
||||||
|
newargs.update(kargs)
|
||||||
|
|
||||||
|
# If this route has a filter, apply it
|
||||||
|
if route.filter:
|
||||||
|
newargs = route.filter(newargs)
|
||||||
|
if not route.static or (route.static and not route.external):
|
||||||
|
# Handle sub-domains, retain sub_domain if there is one
|
||||||
|
sub = newargs.get('sub_domain', None)
|
||||||
|
newargs = _subdomain_check(newargs, self.mapper,
|
||||||
|
self.environ)
|
||||||
|
# If the route requires a sub-domain, and we have it, restore
|
||||||
|
# it
|
||||||
|
if 'sub_domain' in route.defaults:
|
||||||
|
newargs['sub_domain'] = sub
|
||||||
|
|
||||||
|
elif use_current:
|
||||||
|
newargs = _screenargs(kargs, self.mapper, self.environ, force_explicit=True)
|
||||||
|
elif 'sub_domain' in kargs:
|
||||||
|
newargs = _subdomain_check(kargs, self.mapper, self.environ)
|
||||||
|
else:
|
||||||
|
newargs = kargs
|
||||||
|
|
||||||
|
anchor = anchor or newargs.pop('_anchor', None)
|
||||||
|
host = host or newargs.pop('_host', None)
|
||||||
|
protocol = protocol or newargs.pop('_protocol', None)
|
||||||
|
url = self.mapper.generate(*route_args, **newargs)
|
||||||
|
if anchor is not None:
|
||||||
|
url += '#' + _url_quote(anchor, encoding)
|
||||||
|
if host or protocol or qualified:
|
||||||
|
if 'routes.cached_hostinfo' not in self.environ:
|
||||||
|
cache_hostinfo(self.environ)
|
||||||
|
hostinfo = self.environ['routes.cached_hostinfo']
|
||||||
|
|
||||||
|
if not host and not qualified:
|
||||||
|
# Ensure we don't use a specific port, as changing the protocol
|
||||||
|
# means that we most likely need a new port
|
||||||
|
host = hostinfo['host'].split(':')[0]
|
||||||
|
elif not host:
|
||||||
|
host = hostinfo['host']
|
||||||
|
if not protocol:
|
||||||
|
protocol = hostinfo['protocol']
|
||||||
|
if url is not None:
|
||||||
|
if host[-1] != '/':
|
||||||
|
host += '/'
|
||||||
|
url = protocol + '://' + host + url.lstrip('/')
|
||||||
|
|
||||||
|
if not isinstance(url, str) and url is not None:
|
||||||
|
raise GenerationException("Can only return a string, got "
|
||||||
|
"unicode instead: %s" % url)
|
||||||
|
if url is None:
|
||||||
|
raise GenerationException(
|
||||||
|
"Could not generate URL. Called with args: %s %s" % \
|
||||||
|
(args, kargs))
|
||||||
|
return url
|
||||||
|
|
||||||
|
def current(self, *args, **kwargs):
|
||||||
|
"""Generate a route that includes params used on the current
|
||||||
|
request
|
||||||
|
|
||||||
|
The arguments for this method are identical to ``__call__``
|
||||||
|
except that arguments set to None will remove existing route
|
||||||
|
matches of the same name from the set of arguments used to
|
||||||
|
construct a URL.
|
||||||
|
"""
|
||||||
|
return self(_use_current=True, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def redirect_to(*args, **kargs):
|
||||||
|
"""Issues a redirect based on the arguments.
|
||||||
|
|
||||||
|
Redirect's *should* occur as a "302 Moved" header, however the web
|
||||||
|
framework may utilize a different method.
|
||||||
|
|
||||||
|
All arguments are passed to url_for to retrieve the appropriate URL, then
|
||||||
|
the resulting URL it sent to the redirect function as the URL.
|
||||||
|
"""
|
||||||
|
target = url_for(*args, **kargs)
|
||||||
|
config = request_config()
|
||||||
|
return config.redirect(target)
|
||||||
|
|
||||||
|
|
||||||
|
def cache_hostinfo(environ):
|
||||||
|
"""Processes the host information and stores a copy
|
||||||
|
|
||||||
|
This work was previously done but wasn't stored in environ, nor is
|
||||||
|
it guaranteed to be setup in the future (Routes 2 and beyond).
|
||||||
|
|
||||||
|
cache_hostinfo processes environ keys that may be present to
|
||||||
|
determine the proper host, protocol, and port information to use
|
||||||
|
when generating routes.
|
||||||
|
|
||||||
|
"""
|
||||||
|
hostinfo = {}
|
||||||
|
if environ.get('HTTPS') or environ.get('wsgi.url_scheme') == 'https' \
|
||||||
|
or environ.get('HTTP_X_FORWARDED_PROTO') == 'https':
|
||||||
|
hostinfo['protocol'] = 'https'
|
||||||
|
else:
|
||||||
|
hostinfo['protocol'] = 'http'
|
||||||
|
if environ.get('HTTP_X_FORWARDED_HOST'):
|
||||||
|
hostinfo['host'] = environ['HTTP_X_FORWARDED_HOST']
|
||||||
|
elif environ.get('HTTP_HOST'):
|
||||||
|
hostinfo['host'] = environ['HTTP_HOST']
|
||||||
|
else:
|
||||||
|
hostinfo['host'] = environ['SERVER_NAME']
|
||||||
|
if environ.get('wsgi.url_scheme') == 'https':
|
||||||
|
if environ['SERVER_PORT'] != '443':
|
||||||
|
hostinfo['host'] += ':' + environ['SERVER_PORT']
|
||||||
|
else:
|
||||||
|
if environ['SERVER_PORT'] != '80':
|
||||||
|
hostinfo['host'] += ':' + environ['SERVER_PORT']
|
||||||
|
environ['routes.cached_hostinfo'] = hostinfo
|
||||||
|
return hostinfo
|
||||||
|
|
||||||
|
|
||||||
|
def controller_scan(directory=None):
|
||||||
|
"""Scan a directory for python files and use them as controllers"""
|
||||||
|
if directory is None:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def find_controllers(dirname, prefix=''):
|
||||||
|
"""Locate controllers in a directory"""
|
||||||
|
controllers = []
|
||||||
|
for fname in os.listdir(dirname):
|
||||||
|
filename = os.path.join(dirname, fname)
|
||||||
|
if os.path.isfile(filename) and \
|
||||||
|
re.match('^[^_]{1,1}.*\.py$', fname):
|
||||||
|
controllers.append(prefix + fname[:-3])
|
||||||
|
elif os.path.isdir(filename):
|
||||||
|
controllers.extend(find_controllers(filename,
|
||||||
|
prefix=prefix+fname+'/'))
|
||||||
|
return controllers
|
||||||
|
def longest_first(fst, lst):
|
||||||
|
"""Compare the length of one string to another, shortest goes first"""
|
||||||
|
return cmp(len(lst), len(fst))
|
||||||
|
controllers = find_controllers(directory)
|
||||||
|
controllers.sort(longest_first)
|
||||||
|
return controllers
|
Loading…
x
Reference in New Issue
Block a user