mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-08 10:44:09 -04:00
Sync to trunk.
This commit is contained in:
commit
6e4dc9c2c6
BIN
resources/images/news/elpais_semanal.png
Normal file
BIN
resources/images/news/elpais_semanal.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 759 B |
BIN
resources/images/news/foxnews.png
Normal file
BIN
resources/images/news/foxnews.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 467 B |
@ -33,9 +33,9 @@ class TheAtlantic(BasicNewsRecipe):
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
sectit = soup.find('h1', attrs={'class':'sectionTitle'})
|
||||
if sectit is not None:
|
||||
texts = sectit.findAll('cufontext')
|
||||
texts = map(self.tag_to_string, texts[-2:])
|
||||
self.timefmt = ' [%s]'%(''.join(texts))
|
||||
texts = self.tag_to_string(sectit).strip().split()[-2:]
|
||||
if texts:
|
||||
self.timefmt = ' [%s]'%(' '.join(texts))
|
||||
|
||||
cover = soup.find('img', src=True, attrs={'class':'cover'})
|
||||
if cover is not None:
|
||||
@ -77,13 +77,14 @@ class TheAtlantic(BasicNewsRecipe):
|
||||
if poems:
|
||||
feeds.append(('Poems', poems))
|
||||
|
||||
self.log('Found section: Advice')
|
||||
div = soup.find(id='advice')
|
||||
title = self.tag_to_string(div.find('h4'))
|
||||
url = 'http://www.theatlantic.com'+div.find('a')['href']
|
||||
desc = self.tag_to_string(div.find('p'))
|
||||
self.log('\tFound article:', title, 'at', url)
|
||||
self.log('\t\t', desc)
|
||||
if div is not None:
|
||||
self.log('Found section: Advice')
|
||||
title = self.tag_to_string(div.find('h4'))
|
||||
url = 'http://www.theatlantic.com'+div.find('a')['href']
|
||||
desc = self.tag_to_string(div.find('p'))
|
||||
self.log('\tFound article:', title, 'at', url)
|
||||
self.log('\t\t', desc)
|
||||
|
||||
feeds.append(('Advice', [{'title':title, 'url':url, 'description':desc,
|
||||
'date':''}]))
|
||||
|
64
resources/recipes/axxon_magazine.recipe
Normal file
64
resources/recipes/axxon_magazine.recipe
Normal file
@ -0,0 +1,64 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
axxon.com.ar
|
||||
'''
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Axxon_news(BasicNewsRecipe):
|
||||
title = 'Revista Axxon'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Axxon, Ciencia Ficcion en Bits'
|
||||
publisher = 'Revista Axxon - Ciencia Ficcion'
|
||||
category = 'SF, Argentina'
|
||||
oldest_article = 31
|
||||
delay = 1
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = False
|
||||
use_embedded_content = False
|
||||
language = 'es'
|
||||
encoding = 'utf-8'
|
||||
publication_type = 'magazine'
|
||||
INDEX = 'http://axxon.com.ar/rev/'
|
||||
extra_css = ' body{font-family: Verdana,Arial,sans-serif} .editorial{font-family: serif} .posttitle{font-family: "Trebuchet MS","Lucida Grande",Verdana,Arial,sans-serif} .cuento{font-family: "Times New Roman", serif} .biografia{color: red; font-weight: bold; font-family: Verdana,Geneva,Arial,Helvetica,sans-serif} '
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
|
||||
remove_tags = [dict(name=['object','link','iframe','embed','img'])]
|
||||
remove_tags_after = [dict(attrs={'class':['editorial','correo','biografia','articulo']})]
|
||||
remove_attributes = ['width','height','font','border','align']
|
||||
|
||||
def parse_index(self):
|
||||
articles = []
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
|
||||
for item in soup.findAll('strong'):
|
||||
description = ''
|
||||
title_prefix = ''
|
||||
feed_link = item.find('a')
|
||||
if feed_link and feed_link.has_key('href') and feed_link['href'].startswith('?p='):
|
||||
url = self.INDEX + feed_link['href']
|
||||
title = title_prefix + self.tag_to_string(feed_link)
|
||||
date = strftime(self.timefmt)
|
||||
articles.append({
|
||||
'title' :title
|
||||
,'date' :date
|
||||
,'url' :url
|
||||
,'description':description
|
||||
})
|
||||
return [(soup.head.title.string, articles)]
|
||||
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
return self.adeify_images(soup)
|
||||
|
56
resources/recipes/elpais_semanal.recipe
Normal file
56
resources/recipes/elpais_semanal.recipe
Normal file
@ -0,0 +1,56 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
www.elpais.com/suple/eps/
|
||||
'''
|
||||
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class ElPaisSemanal(BasicNewsRecipe):
|
||||
title = 'El Pais semanal'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Revista semanal de diario El Pais'
|
||||
publisher = 'EL PAIS S.L.'
|
||||
category = 'news, politics, Spain'
|
||||
no_stylesheets = True
|
||||
encoding = 'cp1252'
|
||||
use_embedded_content = False
|
||||
language = 'es'
|
||||
publication_type = 'magazine'
|
||||
masthead_url = 'http://www.elpais.com/im/tit_logo_int.gif'
|
||||
index = 'http://www.elpais.com/suple/eps/'
|
||||
extra_css = ' p{text-align: justify} body{ text-align: left; font-family: Georgia,"Times New Roman",Times,serif } h2{font-family: Arial,Helvetica,sans-serif} img{margin-bottom: 0.4em} '
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
keep_only_tags=[dict(attrs={'class':['cabecera_noticia','contenido_noticia']})]
|
||||
remove_attributes=['width','height']
|
||||
remove_tags=[dict(name='link')]
|
||||
|
||||
def parse_index(self):
|
||||
articles = []
|
||||
soup = self.index_to_soup(self.index)
|
||||
for item in soup.findAll('a',attrs={'class':['g19i003','g17r003','g17i003']}):
|
||||
description = ''
|
||||
title_prefix = ''
|
||||
feed_link = item
|
||||
if item.has_key('href'):
|
||||
url = 'http://www.elpais.com' + item['href'].rpartition('/')[0]
|
||||
title = title_prefix + self.tag_to_string(feed_link)
|
||||
date = strftime(self.timefmt)
|
||||
articles.append({
|
||||
'title' :title
|
||||
,'date' :date
|
||||
,'url' :url
|
||||
,'description':description
|
||||
})
|
||||
return [(soup.head.title.string, articles)]
|
||||
|
||||
def print_version(self, url):
|
||||
return url + '?print=1'
|
74
resources/recipes/foxnews.recipe
Normal file
74
resources/recipes/foxnews.recipe
Normal file
@ -0,0 +1,74 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
foxnews.com
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class FoxNews(BasicNewsRecipe):
|
||||
title = 'FOX News'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Breaking News from FOX'
|
||||
publisher = 'FOXNews.com'
|
||||
category = 'news, breaking news, latest news, current news, world news, national news, USA'
|
||||
oldest_article = 2
|
||||
max_articles_per_feed = 200
|
||||
no_stylesheets = True
|
||||
encoding = 'utf8'
|
||||
use_embedded_content = False
|
||||
language = 'en'
|
||||
publication_type = 'newsportal'
|
||||
remove_empty_feeds = True
|
||||
extra_css = ' body{font-family: Arial,sans-serif } img{margin-bottom: 0.4em} .caption{font-size: x-small} '
|
||||
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'</title>.*?</head>', re.DOTALL|re.IGNORECASE),lambda match: '</title></head>')
|
||||
]
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
remove_attributes = ['xmlns']
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'id' :['story','browse-story-content']})
|
||||
,dict(name='div', attrs={'class':['posts articles','slideshow']})
|
||||
,dict(name='h4' , attrs={'class':'storyDate'})
|
||||
,dict(name='h1' , attrs={'xmlns:functx':'http://www.functx.com'})
|
||||
,dict(name='div', attrs={'class':'authInfo'})
|
||||
,dict(name='div', attrs={'id':'articleCont'})
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'class':['share-links','quigo quigo2','share-text','storyControls','socShare','btm-links']})
|
||||
,dict(name='div', attrs={'id' :['otherMedia','loomia_display','img-all-path','story-vcmId','story-url','pane-browse-story-comments','story_related']})
|
||||
,dict(name='ul' , attrs={'class':['tools','tools alt','tools alt2','tabs']})
|
||||
,dict(name='a' , attrs={'class':'join-discussion'})
|
||||
,dict(name='ul' , attrs={'class':['tools','tools alt','tools alt2']})
|
||||
,dict(name='p' , attrs={'class':'see_fullarchive'})
|
||||
,dict(name=['object','embed','link','script'])
|
||||
]
|
||||
|
||||
|
||||
feeds = [
|
||||
(u'Latest Headlines', u'http://feeds.foxnews.com/foxnews/latest' )
|
||||
,(u'National' , u'http://feeds.foxnews.com/foxnews/national' )
|
||||
,(u'World' , u'http://feeds.foxnews.com/foxnews/world' )
|
||||
,(u'Politics' , u'http://feeds.foxnews.com/foxnews/politics' )
|
||||
,(u'Business' , u'http://feeds.foxnews.com/foxnews/business' )
|
||||
,(u'SciTech' , u'http://feeds.foxnews.com/foxnews/scitech' )
|
||||
,(u'Health' , u'http://feeds.foxnews.com/foxnews/health' )
|
||||
,(u'Entertainment' , u'http://feeds.foxnews.com/foxnews/entertainment' )
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
return self.adeify_images(soup)
|
||||
|
167
resources/recipes/nation_ke.recipe
Normal file
167
resources/recipes/nation_ke.recipe
Normal file
@ -0,0 +1,167 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Hans Donner <hans.donner at pobox.com>'
|
||||
'''
|
||||
www.standardmedia.co.ke
|
||||
'''
|
||||
|
||||
import os
|
||||
from calibre import strftime, __appname__, __version__
|
||||
import calibre.utils.PythonMagickWand as pw
|
||||
from ctypes import byref
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.constants import preferred_encoding
|
||||
|
||||
class NationKeRecipe(BasicNewsRecipe):
|
||||
|
||||
__author__ = 'Hans Donner'
|
||||
|
||||
title = u'Sunday Nation'
|
||||
description = 'News from Kenia'
|
||||
language = 'en'
|
||||
country = 'KE'
|
||||
publication_type = 'newspaper'
|
||||
publisher = 'nation.co.ke'
|
||||
category = 'news, politics, Kenia'
|
||||
|
||||
cover_img_url = 'http://www.nation.co.ke/image/view/-/465228/medRes/33884/-/maxh/85/-/12e8pptz/-/Sunday_Logo.gif'
|
||||
masthead_url = cover_img_url
|
||||
|
||||
max_articles_per_feed = 200
|
||||
oldest_article = 2
|
||||
|
||||
use_embedded_content = False
|
||||
remove_empty_feeds = True
|
||||
|
||||
no_stylesheets = True
|
||||
extra_css = ' body{font-family: Verdana,Arial,Helvetica,sans-serif; font-size:0.7em } ' + \
|
||||
' .image{margin-bottom: 1em} '
|
||||
|
||||
keep_only_tags = dict(id='articlebody')
|
||||
|
||||
feeds = [(u'News', u'http://www.nation.co.ke/News/-/1056/1056/-/view/asFeed/-/14nfs48z/-/index.xml'),
|
||||
(u'Business', u'http://www.nation.co.ke/business/-/996/996/-/view/asFeed/-/14lpkvc/-/index.xml'),
|
||||
(u'InDepth', u'http://www.nation.co.ke/InDepth/-/452898/452898/-/view/asFeed/-/14ndbk6/-/index.xml'),
|
||||
(u'Sports', u'http://www.nation.co.ke/sports/-/1090/1090/-/view/asFeed/-/hlukmj/-/index.xml'),
|
||||
(u'Magazines', u'http://www.nation.co.ke/magazines/-/1190/1190/-/view/asFeed/-/fcxm6jz/-/index.xml'),
|
||||
(u'Op/Ed', u'http://www.nation.co.ke/oped/-/1192/1192/-/view/asFeed/-/unsp8mz/-/index.xml'),
|
||||
(u'Blogs', u'http://www.nation.co.ke/blogs/-/620/620/-/view/asFeed/-/28ia05z/-/index.xml')]
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
|
||||
def print_version(self, url):
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
soup = BeautifulSoup(self.browser.open(url).read())
|
||||
printversion = soup.find('a', text='Print')
|
||||
if printversion is None:
|
||||
return url
|
||||
else:
|
||||
return 'http://www.nation.co.ke' + printversion.parent['href']
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
return self.adeify_images(soup)
|
||||
|
||||
def get_cover_img_url(self):
|
||||
return getattr(self, 'cover_img_url', None)
|
||||
|
||||
def _download_cover_img(self):
|
||||
# hack to reuse download_cover
|
||||
old_cu = None
|
||||
try:
|
||||
old_cu = self.get_cover_ur()
|
||||
except:
|
||||
pass
|
||||
new_cu = self.get_cover_img_url()
|
||||
self.cover_url = new_cu
|
||||
self._download_cover()
|
||||
|
||||
outfile = os.path.join(self.output_dir, 'cover_img.jpg')
|
||||
self.prepare_masthead_image(self.cover_path, outfile)
|
||||
|
||||
self.cover_url = old_cu
|
||||
self.cover_img_path = outfile
|
||||
|
||||
def download_cover_img(self):
|
||||
try:
|
||||
self._download_cover_img()
|
||||
self.report_progress(1, _('Downloaded cover to %s') % self.cover_img_path)
|
||||
except:
|
||||
self.log.exception('Failed to download cover img')
|
||||
self.cover_img_path = None
|
||||
|
||||
def prepare_cover_image(self, path_to_image, out_path):
|
||||
with pw.ImageMagick():
|
||||
img = pw.NewMagickWand()
|
||||
if img < 0:
|
||||
raise RuntimeError('Out of memory')
|
||||
if not pw.MagickReadImage(img, path_to_image):
|
||||
severity = pw.ExceptionType(0)
|
||||
msg = pw.MagickGetException(img, byref(severity))
|
||||
raise IOError('Failed to read image from: %s: %s'
|
||||
%(path_to_image, msg))
|
||||
if not pw.MagickWriteImage(img, out_path):
|
||||
raise RuntimeError('Failed to save image to %s'%out_path)
|
||||
pw.DestroyMagickWand(img)
|
||||
|
||||
|
||||
def default_cover(self, cover_file):
|
||||
'''
|
||||
Create a generic cover for recipes that have a special cover img
|
||||
'''
|
||||
try:
|
||||
try:
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
Image, ImageDraw, ImageFont
|
||||
except ImportError:
|
||||
import Image, ImageDraw, ImageFont
|
||||
font_path = P('fonts/liberation/LiberationSerif-Bold.ttf')
|
||||
title = self.title if isinstance(self.title, unicode) else \
|
||||
self.title.decode(preferred_encoding, 'replace')
|
||||
date = strftime(self.timefmt)
|
||||
app = '['+__appname__ +' '+__version__+']'
|
||||
|
||||
COVER_WIDTH, COVER_HEIGHT = 590, 750
|
||||
img = Image.new('RGB', (COVER_WIDTH, COVER_HEIGHT), 'white')
|
||||
draw = ImageDraw.Draw(img)
|
||||
# Title
|
||||
font = ImageFont.truetype(font_path, 44)
|
||||
width, height = draw.textsize(title, font=font)
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
top = 15
|
||||
draw.text((left, top), title, fill=(0,0,0), font=font)
|
||||
bottom = top + height
|
||||
# Date
|
||||
font = ImageFont.truetype(font_path, 32)
|
||||
width, height = draw.textsize(date, font=font)
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
draw.text((left, bottom+15), date, fill=(0,0,0), font=font)
|
||||
# Vanity
|
||||
font = ImageFont.truetype(font_path, 28)
|
||||
width, height = draw.textsize(app, font=font)
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
top = COVER_HEIGHT - height - 15
|
||||
draw.text((left, top), app, fill=(0,0,0), font=font)
|
||||
|
||||
# Logo
|
||||
logo_file = I('library.png')
|
||||
self.download_cover_img()
|
||||
if getattr(self, 'cover_img_path', None) is not None:
|
||||
logo_file = self.cover_img_path
|
||||
self.report_progress(1, _('using cover img from %s') % logo_file)
|
||||
logo = Image.open(logo_file, 'r')
|
||||
width, height = logo.size
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
top = max(int((COVER_HEIGHT - height)/2.), 0)
|
||||
img.paste(logo, (left, top))
|
||||
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE)
|
||||
img.convert('RGB').save(cover_file, 'JPEG')
|
||||
cover_file.flush()
|
||||
except Exception, e:
|
||||
self.log.exception('Failed to generate default cover ', e)
|
||||
return False
|
||||
return True
|
@ -16,7 +16,7 @@ class NYTimes(BasicNewsRecipe):
|
||||
|
||||
title = 'New York Times Top Stories'
|
||||
__author__ = 'GRiker'
|
||||
language = _('English')
|
||||
language = 'en'
|
||||
description = 'Top Stories from the New York Times'
|
||||
|
||||
# List of sections typically included in Top Stories. Use a keyword from the
|
||||
|
160
resources/recipes/standardmedia_ke.recipe
Normal file
160
resources/recipes/standardmedia_ke.recipe
Normal file
@ -0,0 +1,160 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Hans Donner <hans.donner at pobox.com>'
|
||||
'''
|
||||
www.standardmedia.co.ke
|
||||
'''
|
||||
|
||||
import os
|
||||
from calibre import strftime, __appname__, __version__
|
||||
import calibre.utils.PythonMagickWand as pw
|
||||
from ctypes import byref
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.constants import preferred_encoding
|
||||
|
||||
class StandardMediaKeRecipe(BasicNewsRecipe):
|
||||
|
||||
__author__ = 'Hans Donner'
|
||||
|
||||
title = u'The Standard'
|
||||
description = 'News from Kenia'
|
||||
language = 'en'
|
||||
country = 'KE'
|
||||
publication_type = 'newspaper'
|
||||
publisher = 'standardmedia.co.ke'
|
||||
category = 'news, politics, Kenia'
|
||||
|
||||
cover_img_url = 'http://www.standardmedia.co.ke/images/easLogoOther.gif'
|
||||
masthead_url = cover_img_url
|
||||
|
||||
max_articles_per_feed = 200
|
||||
oldest_article = 3
|
||||
|
||||
use_embedded_content = False
|
||||
remove_empty_feeds = True
|
||||
|
||||
no_stylesheets = False
|
||||
|
||||
feeds = [(u'Headlines', u'http://www.standardmedia.co.ke/rss/headlines.php'),
|
||||
(u'Business', u'http://www.standardmedia.co.ke/rss/business.php'),
|
||||
(u'Politics', u'http://www.standardmedia.co.ke/rss/politics.php'),
|
||||
(u'Editorial', u'http://www.standardmedia.co.ke/rss/editorial.php'),
|
||||
(u'Columnists', u'http://www.standardmedia.co.ke/rss/columnists.php'),
|
||||
(u'Sports', u'http://www.standardmedia.co.ke/rss/sports.php'),
|
||||
(u'Entertainment', u'http://www.standardmedia.co.ke/rss/entertain.php')]
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
|
||||
def print_version(self, url):
|
||||
import re
|
||||
p = re.compile('http://www.standardmedia.co.ke/.*InsidePage.php')
|
||||
return p.sub('http://www.standardmedia.co.ke/print.php', url)
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
return self.adeify_images(soup)
|
||||
|
||||
def get_cover_img_url(self):
|
||||
return getattr(self, 'cover_img_url', None)
|
||||
|
||||
def _download_cover_img(self):
|
||||
# hack to reuse download_cover
|
||||
old_cu = None
|
||||
try:
|
||||
old_cu = self.get_cover_ur()
|
||||
except:
|
||||
pass
|
||||
new_cu = self.get_cover_img_url()
|
||||
self.cover_url = new_cu
|
||||
self._download_cover()
|
||||
|
||||
outfile = os.path.join(self.output_dir, 'cover_img.jpg')
|
||||
self.prepare_masthead_image(self.cover_path, outfile)
|
||||
|
||||
self.cover_url = old_cu
|
||||
self.cover_img_path = outfile
|
||||
|
||||
def download_cover_img(self):
|
||||
try:
|
||||
self._download_cover_img()
|
||||
self.report_progress(1, _('Downloaded cover to %s') % self.cover_img_path)
|
||||
except:
|
||||
self.log.exception('Failed to download cover img')
|
||||
self.cover_img_path = None
|
||||
|
||||
def prepare_cover_image(self, path_to_image, out_path):
|
||||
with pw.ImageMagick():
|
||||
img = pw.NewMagickWand()
|
||||
if img < 0:
|
||||
raise RuntimeError('Out of memory')
|
||||
if not pw.MagickReadImage(img, path_to_image):
|
||||
severity = pw.ExceptionType(0)
|
||||
msg = pw.MagickGetException(img, byref(severity))
|
||||
raise IOError('Failed to read image from: %s: %s'
|
||||
%(path_to_image, msg))
|
||||
if not pw.MagickWriteImage(img, out_path):
|
||||
raise RuntimeError('Failed to save image to %s'%out_path)
|
||||
pw.DestroyMagickWand(img)
|
||||
|
||||
|
||||
def default_cover(self, cover_file):
|
||||
'''
|
||||
Create a generic cover for recipes that have a special cover img
|
||||
'''
|
||||
try:
|
||||
try:
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
Image, ImageDraw, ImageFont
|
||||
except ImportError:
|
||||
import Image, ImageDraw, ImageFont
|
||||
font_path = P('fonts/liberation/LiberationSerif-Bold.ttf')
|
||||
title = self.title if isinstance(self.title, unicode) else \
|
||||
self.title.decode(preferred_encoding, 'replace')
|
||||
date = strftime(self.timefmt)
|
||||
app = '['+__appname__ +' '+__version__+']'
|
||||
|
||||
COVER_WIDTH, COVER_HEIGHT = 590, 750
|
||||
img = Image.new('RGB', (COVER_WIDTH, COVER_HEIGHT), 'white')
|
||||
draw = ImageDraw.Draw(img)
|
||||
# Title
|
||||
font = ImageFont.truetype(font_path, 44)
|
||||
width, height = draw.textsize(title, font=font)
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
top = 15
|
||||
draw.text((left, top), title, fill=(0,0,0), font=font)
|
||||
bottom = top + height
|
||||
# Date
|
||||
font = ImageFont.truetype(font_path, 32)
|
||||
width, height = draw.textsize(date, font=font)
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
draw.text((left, bottom+15), date, fill=(0,0,0), font=font)
|
||||
# Vanity
|
||||
font = ImageFont.truetype(font_path, 28)
|
||||
width, height = draw.textsize(app, font=font)
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
top = COVER_HEIGHT - height - 15
|
||||
draw.text((left, top), app, fill=(0,0,0), font=font)
|
||||
|
||||
# Logo
|
||||
logo_file = I('library.png')
|
||||
self.download_cover_img()
|
||||
if getattr(self, 'cover_img_path', None) is not None:
|
||||
logo_file = self.cover_img_path
|
||||
self.report_progress(1, _('using cover img from %s') % logo_file)
|
||||
logo = Image.open(logo_file, 'r')
|
||||
width, height = logo.size
|
||||
left = max(int((COVER_WIDTH - width)/2.), 0)
|
||||
top = max(int((COVER_HEIGHT - height)/2.), 0)
|
||||
img.paste(logo, (left, top))
|
||||
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE)
|
||||
img.convert('RGB').save(cover_file, 'JPEG')
|
||||
cover_file.flush()
|
||||
except Exception, e:
|
||||
self.log.exception('Failed to generate default cover ', e)
|
||||
return False
|
||||
return True
|
@ -447,6 +447,7 @@ from calibre.devices.binatone.driver import README
|
||||
from calibre.devices.hanvon.driver import N516, EB511
|
||||
from calibre.devices.edge.driver import EDGE
|
||||
from calibre.devices.teclast.driver import TECLAST_K3
|
||||
from calibre.devices.sne.driver import SNE
|
||||
|
||||
from calibre.ebooks.metadata.fetch import GoogleBooks, ISBNDB, Amazon
|
||||
from calibre.library.catalog import CSV_XML, EPUB_MOBI
|
||||
@ -524,7 +525,8 @@ plugins += [
|
||||
EB511,
|
||||
ELONEX,
|
||||
TECLAST_K3,
|
||||
EDGE
|
||||
EDGE,
|
||||
SNE
|
||||
]
|
||||
plugins += [x for x in list(locals().values()) if isinstance(x, type) and \
|
||||
x.__name__.endswith('MetadataReader')]
|
||||
|
@ -21,12 +21,15 @@ class ANDROID(USBMS):
|
||||
0x0bb4 : { 0x0c02 : [0x100], 0x0c01 : [0x100]},
|
||||
|
||||
# Motorola
|
||||
0x22b8 : { 0x41d9 : [0x216], 0x2d67 : [0x100]},
|
||||
0x22b8 : { 0x41d9 : [0x216], 0x2d67 : [0x100], 0x41db : [0x216]},
|
||||
|
||||
0x18d1 : { 0x4e11 : [0x0100, 0x226], 0x4e12: [0x0100, 0x226]},
|
||||
|
||||
# Samsung
|
||||
0x04e8 : { 0x681d : [0x0222]},
|
||||
|
||||
# Acer
|
||||
0x502 : { 0x3203 : [0x0100]},
|
||||
}
|
||||
EBOOK_DIR_MAIN = ['wordplayer/calibretransfer', 'eBooks/import', 'Books']
|
||||
EXTRA_CUSTOMIZATION_MESSAGE = _('Comma separated list of directories to '
|
||||
@ -34,9 +37,9 @@ class ANDROID(USBMS):
|
||||
'be used')
|
||||
EXTRA_CUSTOMIZATION_DEFAULT = ', '.join(EBOOK_DIR_MAIN)
|
||||
|
||||
VENDOR_NAME = ['HTC', 'MOTOROLA', 'GOOGLE_', 'ANDROID']
|
||||
VENDOR_NAME = ['HTC', 'MOTOROLA', 'GOOGLE_', 'ANDROID', 'ACER']
|
||||
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
|
||||
'__UMS_COMPOSITE', '_MB200']
|
||||
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE']
|
||||
|
||||
OSX_MAIN_MEM = 'HTC Android Phone Media'
|
||||
|
||||
|
@ -206,4 +206,8 @@ class ELONEX(EB600):
|
||||
WINDOWS_MAIN_MEM = 'EBOOK'
|
||||
WINDOWS_CARD_A_MEM = 'EBOOK'
|
||||
|
||||
@classmethod
|
||||
def can_handle(cls, dev, debug=False):
|
||||
return dev[3] == 'Elonex' and dev[4] == 'eBook'
|
||||
|
||||
|
||||
|
@ -105,6 +105,7 @@ class HANLINV5(HANLINV3):
|
||||
MAIN_MEMORY_VOLUME_LABEL = 'Hanlin V5 Internal Memory'
|
||||
STORAGE_CARD_VOLUME_LABEL = 'Hanlin V5 Storage Card'
|
||||
|
||||
OSX_EJECT_COMMAND = ['diskutil', 'unmount', 'force']
|
||||
|
||||
class BOOX(HANLINV3):
|
||||
|
||||
|
0
src/calibre/devices/sne/__init__.py
Normal file
0
src/calibre/devices/sne/__init__.py
Normal file
39
src/calibre/devices/sne/driver.py
Normal file
39
src/calibre/devices/sne/driver.py
Normal file
@ -0,0 +1,39 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
'''
|
||||
Device driver for Bookeen's Cybook Gen 3
|
||||
'''
|
||||
|
||||
from calibre.devices.usbms.driver import USBMS
|
||||
|
||||
class SNE(USBMS):
|
||||
|
||||
name = 'Samsung SNE Device Interface'
|
||||
gui_name = 'Samsung SNE'
|
||||
description = _('Communicate with the Samsung SNE eBook reader.')
|
||||
author = 'Kovid Goyal'
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
|
||||
# Ordered list of supported formats
|
||||
# Be sure these have an entry in calibre.devices.mime
|
||||
FORMATS = ['epub', 'txt']
|
||||
|
||||
VENDOR_ID = [0x04e8]
|
||||
PRODUCT_ID = [0x2051]
|
||||
BCD = [0x0323]
|
||||
|
||||
VENDOR_NAME = ''
|
||||
#WINDOWS_MAIN_MEM = 'MASS_STORAGE'
|
||||
#WINDOWS_CARD_A_MEM = 'MASS_STORAGE'
|
||||
|
||||
MAIN_MEMORY_VOLUME_LABEL = 'SNE Main Memory'
|
||||
STORAGE_CARD_VOLUME_LABEL = 'SNE Storage Card'
|
||||
|
||||
EBOOK_DIR_MAIN = 'Book'
|
||||
SUPPORTS_SUB_DIRS = True
|
||||
|
||||
|
@ -76,6 +76,7 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
# storage cards. Should be a regular expression that matches the
|
||||
# main memory mount point assigned by OS X
|
||||
OSX_MAIN_MEM_VOL_PAT = None
|
||||
OSX_EJECT_COMMAND = ['diskutil', 'eject']
|
||||
|
||||
MAIN_MEMORY_VOLUME_LABEL = ''
|
||||
STORAGE_CARD_VOLUME_LABEL = ''
|
||||
@ -669,7 +670,7 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
x = getattr(self, x, None)
|
||||
if x is not None:
|
||||
try:
|
||||
subprocess.Popen(['diskutil', 'eject', x])
|
||||
subprocess.Popen(self.OSX_EJECT_COMMAND + [x])
|
||||
except:
|
||||
pass
|
||||
|
||||
@ -813,6 +814,9 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
|
||||
settings = self.settings()
|
||||
template = self.save_template()
|
||||
if mdata.tags and _('News') in mdata.tags:
|
||||
today = time.localtime()
|
||||
template = "{title}_%d-%d-%d" % (today[0], today[1], today[2])
|
||||
use_subdirs = self.SUPPORTS_SUB_DIRS and settings.use_subdirs
|
||||
|
||||
fname = sanitize(fname)
|
||||
|
@ -129,11 +129,16 @@ class PageElement:
|
||||
|
||||
def extract(self):
|
||||
"""Destructively rips this element out of the tree."""
|
||||
# Changed by KG as list.remove uses _-eq__ which is True for two Tags
|
||||
# with the same name and attributes.
|
||||
if self.parent:
|
||||
try:
|
||||
self.parent.contents.remove(self)
|
||||
except ValueError:
|
||||
pass
|
||||
idx = None
|
||||
for i, x in enumerate(self.parent.contents):
|
||||
if x is self:
|
||||
idx = i
|
||||
break
|
||||
if idx is not None:
|
||||
self.parent.contents.pop(idx)
|
||||
|
||||
#Find the two elements that would be next to each other if
|
||||
#this element (and any children) hadn't been parsed. Connect
|
||||
@ -1075,7 +1080,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
|
||||
self.originalEncoding = None
|
||||
else:
|
||||
# Changed detection by Kovid
|
||||
markup, self.originalEncoding = chardet.xml_to_unicode(markup)
|
||||
markup, self.originalEncoding = chardet.xml_to_unicode(markup)
|
||||
if markup:
|
||||
if self.markupMassage:
|
||||
if not isList(self.markupMassage):
|
||||
@ -1090,7 +1095,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
|
||||
del(self.markupMassage)
|
||||
self.markup = markup
|
||||
self.reset()
|
||||
|
||||
|
||||
SGMLParser.feed(self, markup)
|
||||
# Close out any unfinished strings and close all the open tags.
|
||||
self.endData()
|
||||
@ -1309,7 +1314,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
|
||||
try:
|
||||
data = unichr(int(ref))
|
||||
except ValueError: # Bad numerical entity. Added by Kovid
|
||||
data = u''
|
||||
data = u''
|
||||
else:
|
||||
data = '&#%s;' % ref
|
||||
self.handle_data(data)
|
||||
@ -1663,7 +1668,7 @@ class UnicodeDammit:
|
||||
self._detectEncoding(markup)
|
||||
self.smartQuotesTo = smartQuotesTo
|
||||
self.triedEncodings = []
|
||||
|
||||
|
||||
if markup == '' or isinstance(markup, unicode):
|
||||
self.originalEncoding = None
|
||||
self.unicode = unicode(markup)
|
||||
@ -1677,7 +1682,7 @@ class UnicodeDammit:
|
||||
for proposedEncoding in (documentEncoding, sniffedEncoding):
|
||||
u = self._convertFrom(proposedEncoding)
|
||||
if u: break
|
||||
|
||||
|
||||
# If no luck and we have auto-detection library, try that:
|
||||
if not u and chardet and not isinstance(self.markup, unicode):
|
||||
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
|
||||
@ -1751,9 +1756,9 @@ class UnicodeDammit:
|
||||
elif data[:4] == '\xff\xfe\x00\x00':
|
||||
encoding = 'utf-32le'
|
||||
data = data[4:]
|
||||
|
||||
|
||||
newdata = unicode(data, encoding)
|
||||
|
||||
|
||||
return newdata
|
||||
|
||||
def _detectEncoding(self, xml_data):
|
||||
@ -1763,9 +1768,9 @@ class UnicodeDammit:
|
||||
if xml_data[:4] == '\x4c\x6f\xa7\x94':
|
||||
# EBCDIC
|
||||
xml_data = self._ebcdic_to_ascii(xml_data)
|
||||
|
||||
|
||||
# By Kovid commented out all the recoding to UTF-8 of UTF-16 and UTF-32
|
||||
# as this doesn't make sense and doesn't work for the test case
|
||||
# as this doesn't make sense and doesn't work for the test case
|
||||
# BeautifulSoup.UnicodeDammit(u'abcd'.encode('utf-16')).unicode
|
||||
elif xml_data[:4] == '\x00\x3c\x00\x3f':
|
||||
# UTF-16BE
|
||||
@ -1817,14 +1822,14 @@ class UnicodeDammit:
|
||||
xml_encoding_match = None
|
||||
if xml_encoding_match:
|
||||
xml_encoding = xml_encoding_match.groups()[0].lower()
|
||||
|
||||
|
||||
if sniffed_xml_encoding and \
|
||||
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
|
||||
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
|
||||
'utf-16', 'utf-32', 'utf_16', 'utf_32',
|
||||
'utf16', 'u16')):
|
||||
xml_encoding = sniffed_xml_encoding
|
||||
|
||||
|
||||
return xml_data, xml_encoding, sniffed_xml_encoding
|
||||
|
||||
|
||||
|
@ -96,11 +96,16 @@ def _get_cover(soup, rdr):
|
||||
r = {}
|
||||
for img in soup('img'):
|
||||
try:
|
||||
r[abs(float(img['height'])/float(img['width'])-1.25)] = img['src']
|
||||
r[abs(float(re.search(r'[0-9.]+',
|
||||
img['height']).group())/float(re.search(r'[0-9.]+',
|
||||
img['width']).group())-1.25)] = img['src']
|
||||
except KeyError:
|
||||
# interestingly, occasionally the only image without height
|
||||
# or width attrs is the cover...
|
||||
r[0] = img['src']
|
||||
except:
|
||||
# Probably invalid width, height aattributes, ignore
|
||||
continue
|
||||
l = r.keys()
|
||||
l.sort()
|
||||
if l:
|
||||
|
@ -13,7 +13,8 @@ from calibre.customize import FileTypePlugin
|
||||
|
||||
def is_comic(list_of_names):
|
||||
extensions = set([x.rpartition('.')[-1].lower() for x in list_of_names])
|
||||
return len(extensions) == 1 and iter(extensions).next() in ('jpg', 'jpeg', 'png')
|
||||
comic_extensions = set(['jpg', 'jpeg', 'png'])
|
||||
return len(extensions - comic_extensions) == 0
|
||||
|
||||
class ArchiveExtract(FileTypePlugin):
|
||||
name = 'Archive Extract'
|
||||
|
@ -51,6 +51,28 @@ class OCF(object):
|
||||
def __init__(self):
|
||||
raise NotImplementedError('Abstract base class')
|
||||
|
||||
class Encryption(object):
|
||||
|
||||
OBFUSCATION_ALGORITHMS = frozenset(['http://ns.adobe.com/pdf/enc#RC',
|
||||
'http://www.idpf.org/2008/embedding'])
|
||||
|
||||
def __init__(self, raw):
|
||||
from lxml import etree
|
||||
self.root = etree.fromstring(raw) if raw else None
|
||||
self.entries = {}
|
||||
if self.root is not None:
|
||||
for em in self.root.xpath('descendant::*[contains(name(), "EncryptionMethod")]'):
|
||||
algorithm = em.get('Algorithm', '')
|
||||
cr = em.getparent().xpath('descendant::*[contains(name(), "CipherReference")]')
|
||||
if cr:
|
||||
uri = cr[0].get('URI', '')
|
||||
if uri and algorithm:
|
||||
self.entries[uri] = algorithm
|
||||
|
||||
def is_encrypted(self, uri):
|
||||
algo = self.entries.get(uri, None)
|
||||
return algo is not None and algo not in self.OBFUSCATION_ALGORITHMS
|
||||
|
||||
|
||||
class OCFReader(OCF):
|
||||
def __init__(self):
|
||||
@ -72,6 +94,11 @@ class OCFReader(OCF):
|
||||
self.opf = OPF(f, self.root, populate_spine=False)
|
||||
except KeyError:
|
||||
raise EPubException("missing OPF package file")
|
||||
try:
|
||||
with closing(self.open(self.ENCRYPTION_PATH)) as f:
|
||||
self.encryption_meta = Encryption(f.read())
|
||||
except:
|
||||
self.encryption_meta = Encryption(None)
|
||||
|
||||
|
||||
class OCFZipReader(OCFReader):
|
||||
@ -98,7 +125,7 @@ class OCFDirReader(OCFReader):
|
||||
def open(self, path, *args, **kwargs):
|
||||
return open(os.path.join(self.root, path), *args, **kwargs)
|
||||
|
||||
def get_cover(opf, opf_path, stream):
|
||||
def get_cover(opf, opf_path, stream, reader=None):
|
||||
import posixpath
|
||||
from calibre.ebooks import render_html_svg_workaround
|
||||
from calibre.utils.logging import default_log
|
||||
@ -106,6 +133,9 @@ def get_cover(opf, opf_path, stream):
|
||||
stream.seek(0)
|
||||
zf = ZipFile(stream)
|
||||
if raster_cover:
|
||||
if reader is not None and \
|
||||
reader.encryption_meta.is_encrypted(raster_cover):
|
||||
return
|
||||
base = posixpath.dirname(opf_path)
|
||||
cpath = posixpath.normpath(posixpath.join(base, raster_cover))
|
||||
try:
|
||||
@ -122,6 +152,8 @@ def get_cover(opf, opf_path, stream):
|
||||
cpage = opf.first_spine_item()
|
||||
if not cpage:
|
||||
return
|
||||
if reader is not None and reader.encryption_meta.is_encrypted(cpage):
|
||||
return
|
||||
|
||||
with TemporaryDirectory('_epub_meta') as tdir:
|
||||
with CurrentDir(tdir):
|
||||
@ -139,7 +171,7 @@ def get_metadata(stream, extract_cover=True):
|
||||
mi = MetaInformation(reader.opf)
|
||||
if extract_cover:
|
||||
try:
|
||||
cdata = get_cover(reader.opf, reader.opf_path, stream)
|
||||
cdata = get_cover(reader.opf, reader.opf_path, stream, reader=reader)
|
||||
if cdata is not None:
|
||||
mi.cover_data = ('jpg', cdata)
|
||||
except:
|
||||
|
@ -1048,6 +1048,29 @@ class Manifest(object):
|
||||
self._data = None
|
||||
return property(fget, fset, fdel, doc=doc)
|
||||
|
||||
def unload_data_from_memory(self, memory=None):
|
||||
if isinstance(self._data, (str, bytes)):
|
||||
if memory is None:
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
pt = PersistentTemporaryFile(suffix='_oeb_base_mem_unloader.img')
|
||||
pt.write(self._data)
|
||||
pt.close()
|
||||
def loader(*args):
|
||||
with open(pt.name, 'rb') as f:
|
||||
ans = f.read()
|
||||
os.remove(pt.name)
|
||||
return ans
|
||||
self._loader = loader
|
||||
else:
|
||||
def loader2(*args):
|
||||
with open(memory, 'rb') as f:
|
||||
ans = f.read()
|
||||
return ans
|
||||
self._loader = loader2
|
||||
self._data = None
|
||||
|
||||
|
||||
|
||||
def __str__(self):
|
||||
data = self.data
|
||||
if isinstance(data, etree._Element):
|
||||
|
@ -48,5 +48,6 @@ class OEBOutput(OutputFormatPlugin):
|
||||
os.makedirs(dir)
|
||||
with open(path, 'wb') as f:
|
||||
f.write(str(item))
|
||||
item.unload_data_from_memory(memory=path)
|
||||
|
||||
|
||||
|
@ -27,9 +27,12 @@ class RescaleImages(object):
|
||||
except ImportError:
|
||||
import Image as PILImage
|
||||
|
||||
is_image_collection = getattr(self.opts, 'is_image_collection', False)
|
||||
|
||||
page_width, page_height = self.opts.dest.width, self.opts.dest.height
|
||||
if not getattr(self.opts, 'is_image_collection', False):
|
||||
if is_image_collection:
|
||||
page_width, page_height = self.opts.dest.comic_screen_size
|
||||
else:
|
||||
page_width, page_height = self.opts.dest.width, self.opts.dest.height
|
||||
page_width -= (self.opts.margin_left + self.opts.margin_right) * self.opts.dest.dpi/72.
|
||||
page_height -= (self.opts.margin_top + self.opts.margin_bottom) * self.opts.dest.dpi/72.
|
||||
for item in self.oeb.manifest:
|
||||
@ -56,17 +59,21 @@ class RescaleImages(object):
|
||||
scaled, new_width, new_height = fit_image(width, height,
|
||||
page_width, page_height)
|
||||
if scaled:
|
||||
data = None
|
||||
self.log('Rescaling image from %dx%d to %dx%d'%(
|
||||
width, height, new_width, new_height), item.href)
|
||||
if qt:
|
||||
img = img.scaled(new_width, new_height,
|
||||
Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
|
||||
item.data = pixmap_to_data(img)
|
||||
data = pixmap_to_data(img)
|
||||
else:
|
||||
im = im.resize((int(new_width), int(new_height)), PILImage.ANTIALIAS)
|
||||
of = cStringIO.StringIO()
|
||||
im.convert('RGB').save(of, 'JPEG')
|
||||
item.data = of.getvalue()
|
||||
data = of.getvalue()
|
||||
if data is not None:
|
||||
item.data = data
|
||||
item.unload_data_from_memory()
|
||||
|
||||
|
||||
|
||||
|
@ -154,7 +154,7 @@ class DeviceManager(Thread):
|
||||
if not self.do_connect(possibly_connected_devices):
|
||||
print 'Device connect failed again, giving up'
|
||||
|
||||
def umount_device(self):
|
||||
def umount_device(self, *args):
|
||||
if self.is_device_connected:
|
||||
self.connected_device.eject()
|
||||
self.ejected_devices.add(self.connected_device)
|
||||
@ -558,7 +558,8 @@ class DeviceGUI(object):
|
||||
specific_format=specific_format,
|
||||
exclude_auto=do_auto_convert)
|
||||
if do_auto_convert:
|
||||
ids = list(set(ids).difference(_auto_ids))
|
||||
nids = list(set(ids).difference(_auto_ids))
|
||||
ids = [i for i in ids if i in nids]
|
||||
else:
|
||||
_auto_ids = []
|
||||
|
||||
@ -653,7 +654,7 @@ class DeviceGUI(object):
|
||||
])
|
||||
error_dialog(self, _('Failed to email books'),
|
||||
_('Failed to email the following books:'),
|
||||
'%s'%errors
|
||||
'%s'%errors, show=True
|
||||
)
|
||||
else:
|
||||
self.status_bar.showMessage(_('Sent by email:') + ', '.join(good),
|
||||
|
@ -5,7 +5,7 @@ from PyQt4.QtGui import QDialog
|
||||
|
||||
from calibre.gui2.dialogs.search_ui import Ui_Dialog
|
||||
from calibre.gui2 import qstring_to_unicode
|
||||
from calibre.library.database2 import CONTAINS_MATCH, EQUALS_MATCH
|
||||
from calibre.library.caches import CONTAINS_MATCH, EQUALS_MATCH
|
||||
|
||||
class SearchDialog(QDialog, Ui_Dialog):
|
||||
|
||||
|
@ -17,7 +17,7 @@ from PyQt4.QtCore import QAbstractTableModel, QVariant, Qt, pyqtSignal, \
|
||||
from calibre import strftime
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.pyparsing import ParseException
|
||||
from calibre.library.database2 import FIELD_MAP, _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
|
||||
from calibre.library.caches import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
|
||||
from calibre.gui2 import NONE, TableView, qstring_to_unicode, config, \
|
||||
error_dialog
|
||||
from calibre.gui2.widgets import EnLineEdit, TagsLineEdit
|
||||
@ -560,16 +560,16 @@ class BooksModel(QAbstractTableModel):
|
||||
|
||||
def build_data_convertors(self):
|
||||
|
||||
tidx = FIELD_MAP['title']
|
||||
aidx = FIELD_MAP['authors']
|
||||
sidx = FIELD_MAP['size']
|
||||
ridx = FIELD_MAP['rating']
|
||||
pidx = FIELD_MAP['publisher']
|
||||
tmdx = FIELD_MAP['timestamp']
|
||||
pddx = FIELD_MAP['pubdate']
|
||||
srdx = FIELD_MAP['series']
|
||||
tgdx = FIELD_MAP['tags']
|
||||
siix = FIELD_MAP['series_index']
|
||||
tidx = self.db.FIELD_MAP['title']
|
||||
aidx = self.db.FIELD_MAP['authors']
|
||||
sidx = self.db.FIELD_MAP['size']
|
||||
ridx = self.db.FIELD_MAP['rating']
|
||||
pidx = self.db.FIELD_MAP['publisher']
|
||||
tmdx = self.db.FIELD_MAP['timestamp']
|
||||
pddx = self.db.FIELD_MAP['pubdate']
|
||||
srdx = self.db.FIELD_MAP['series']
|
||||
tgdx = self.db.FIELD_MAP['tags']
|
||||
siix = self.db.FIELD_MAP['series_index']
|
||||
|
||||
def authors(r):
|
||||
au = self.db.data[r][aidx]
|
||||
|
@ -71,6 +71,8 @@ class SearchBox2(QComboBox):
|
||||
self.timer = None
|
||||
self.setInsertPolicy(self.NoInsert)
|
||||
self.setMaxCount(self.MAX_COUNT)
|
||||
self.setSizeAdjustPolicy(self.AdjustToMinimumContentsLengthWithIcon)
|
||||
self.setMinimumContentsLength(50)
|
||||
|
||||
def initialize(self, opt_name, colorize=False,
|
||||
help_text=_('Search')):
|
||||
@ -212,6 +214,8 @@ class SavedSearchBox(QComboBox):
|
||||
self.help_state = True
|
||||
self.prev_search = ''
|
||||
self.setInsertPolicy(self.NoInsert)
|
||||
self.setSizeAdjustPolicy(self.AdjustToMinimumContentsLengthWithIcon)
|
||||
self.setMinimumContentsLength(10)
|
||||
|
||||
def initialize(self, _saved_searches, _search_box, colorize=False, help_text=_('Search')):
|
||||
self.tool_tip_text = self.toolTip()
|
||||
|
@ -57,7 +57,8 @@ from calibre.gui2.dialogs.choose_format import ChooseFormatDialog
|
||||
from calibre.gui2.dialogs.book_info import BookInfo
|
||||
from calibre.ebooks import BOOK_EXTENSIONS
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString
|
||||
from calibre.library.database2 import LibraryDatabase2, CoverCache
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
from calibre.library.caches import CoverCache
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
|
||||
class SaveMenu(QMenu):
|
||||
@ -193,6 +194,9 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
self.donate_action = self.system_tray_menu.addAction(
|
||||
QIcon(I('donate.svg')), _('&Donate to support calibre'))
|
||||
self.donate_button.setDefaultAction(self.donate_action)
|
||||
self.eject_action = self.system_tray_menu.addAction(
|
||||
QIcon(I('eject.svg')), _('&Eject connected device'))
|
||||
self.eject_action.setEnabled(False)
|
||||
if not config['show_donate_button']:
|
||||
self.donate_button.setVisible(False)
|
||||
self.addAction(self.quit_action)
|
||||
@ -233,6 +237,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
QObject.connect(self.location_view,
|
||||
SIGNAL('umount_device()'),
|
||||
self.device_manager.umount_device)
|
||||
self.eject_action.triggered.connect(self.device_manager.umount_device)
|
||||
|
||||
####################### Vanity ########################
|
||||
self.vanity_template = _('<p>For help see the: <a href="%s">User Manual</a>'
|
||||
@ -513,7 +518,6 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
('timestamp', Qt.DescendingOrder)))
|
||||
if not self.library_view.restore_column_widths():
|
||||
self.library_view.resizeColumnsToContents()
|
||||
self.library_view.resizeRowsToContents()
|
||||
self.search.setFocus(Qt.OtherFocusReason)
|
||||
self.cover_cache = CoverCache(self.library_path)
|
||||
self.cover_cache.start()
|
||||
@ -622,6 +626,11 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
self.status_bar.tag_view_button.toggle()
|
||||
|
||||
self._add_filesystem_book = Dispatcher(self.__add_filesystem_book)
|
||||
v = self.library_view
|
||||
if v.model().rowCount(None) > 1:
|
||||
v.resizeRowToContents(0)
|
||||
height = v.rowHeight(0)
|
||||
self.library_view.verticalHeader().setDefaultSectionSize(height)
|
||||
|
||||
|
||||
def resizeEvent(self, ev):
|
||||
@ -889,6 +898,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
self.device_manager.device.card_prefix(),
|
||||
self.device_manager.device)
|
||||
self.location_view.model().device_connected(self.device_manager.device)
|
||||
self.eject_action.setEnabled(True)
|
||||
else:
|
||||
self.save_device_view_settings()
|
||||
self.device_connected = False
|
||||
@ -900,6 +910,7 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
if self.current_view() != self.library_view:
|
||||
self.status_bar.reset_info()
|
||||
self.location_view.setCurrentIndex(self.location_view.model().index(0))
|
||||
self.eject_action.setEnabled(False)
|
||||
|
||||
def info_read(self, job):
|
||||
'''
|
||||
@ -944,7 +955,6 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
view.read_settings()
|
||||
if not view.restore_column_widths():
|
||||
view.resizeColumnsToContents()
|
||||
view.resizeRowsToContents()
|
||||
view.resize_on_select = not view.isVisible()
|
||||
self.sync_news()
|
||||
self.sync_catalogs()
|
||||
@ -2080,7 +2090,6 @@ class Main(MainWindow, Ui_MainWindow, DeviceGUI):
|
||||
self.card_b_view if page == 3 else None
|
||||
if view:
|
||||
if view.resize_on_select:
|
||||
view.resizeRowsToContents()
|
||||
if not view.restore_column_widths():
|
||||
view.resizeColumnsToContents()
|
||||
view.resize_on_select = False
|
||||
|
430
src/calibre/library/caches.py
Normal file
430
src/calibre/library/caches.py
Normal file
@ -0,0 +1,430 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import collections, glob, os, re, itertools, functools
|
||||
from itertools import repeat
|
||||
|
||||
from PyQt4.QtCore import QThread, QReadWriteLock
|
||||
from PyQt4.QtGui import QImage
|
||||
|
||||
from calibre.utils.search_query_parser import SearchQueryParser
|
||||
from calibre.utils.date import parse_date
|
||||
|
||||
class CoverCache(QThread):
|
||||
|
||||
def __init__(self, library_path, parent=None):
|
||||
QThread.__init__(self, parent)
|
||||
self.library_path = library_path
|
||||
self.id_map = None
|
||||
self.id_map_lock = QReadWriteLock()
|
||||
self.load_queue = collections.deque()
|
||||
self.load_queue_lock = QReadWriteLock(QReadWriteLock.Recursive)
|
||||
self.cache = {}
|
||||
self.cache_lock = QReadWriteLock()
|
||||
self.id_map_stale = True
|
||||
self.keep_running = True
|
||||
|
||||
def build_id_map(self):
|
||||
self.id_map_lock.lockForWrite()
|
||||
self.id_map = {}
|
||||
for f in glob.glob(os.path.join(self.library_path, '*', '* (*)', 'cover.jpg')):
|
||||
c = os.path.basename(os.path.dirname(f))
|
||||
try:
|
||||
id = int(re.search(r'\((\d+)\)', c[c.rindex('('):]).group(1))
|
||||
self.id_map[id] = f
|
||||
except:
|
||||
continue
|
||||
self.id_map_lock.unlock()
|
||||
self.id_map_stale = False
|
||||
|
||||
|
||||
def set_cache(self, ids):
|
||||
self.cache_lock.lockForWrite()
|
||||
already_loaded = set([])
|
||||
for id in self.cache.keys():
|
||||
if id in ids:
|
||||
already_loaded.add(id)
|
||||
else:
|
||||
self.cache.pop(id)
|
||||
self.cache_lock.unlock()
|
||||
ids = [i for i in ids if i not in already_loaded]
|
||||
self.load_queue_lock.lockForWrite()
|
||||
self.load_queue = collections.deque(ids)
|
||||
self.load_queue_lock.unlock()
|
||||
|
||||
|
||||
def run(self):
|
||||
while self.keep_running:
|
||||
if self.id_map is None or self.id_map_stale:
|
||||
self.build_id_map()
|
||||
while True: # Load images from the load queue
|
||||
self.load_queue_lock.lockForWrite()
|
||||
try:
|
||||
id = self.load_queue.popleft()
|
||||
except IndexError:
|
||||
break
|
||||
finally:
|
||||
self.load_queue_lock.unlock()
|
||||
|
||||
self.cache_lock.lockForRead()
|
||||
need = True
|
||||
if id in self.cache.keys():
|
||||
need = False
|
||||
self.cache_lock.unlock()
|
||||
if not need:
|
||||
continue
|
||||
path = None
|
||||
self.id_map_lock.lockForRead()
|
||||
if id in self.id_map.keys():
|
||||
path = self.id_map[id]
|
||||
else:
|
||||
self.id_map_stale = True
|
||||
self.id_map_lock.unlock()
|
||||
if path and os.access(path, os.R_OK):
|
||||
try:
|
||||
img = QImage()
|
||||
data = open(path, 'rb').read()
|
||||
img.loadFromData(data)
|
||||
if img.isNull():
|
||||
continue
|
||||
except:
|
||||
continue
|
||||
self.cache_lock.lockForWrite()
|
||||
self.cache[id] = img
|
||||
self.cache_lock.unlock()
|
||||
|
||||
self.sleep(1)
|
||||
|
||||
def stop(self):
|
||||
self.keep_running = False
|
||||
|
||||
def cover(self, id):
|
||||
val = None
|
||||
if self.cache_lock.tryLockForRead(50):
|
||||
val = self.cache.get(id, None)
|
||||
self.cache_lock.unlock()
|
||||
return val
|
||||
|
||||
def clear_cache(self):
|
||||
self.cache_lock.lockForWrite()
|
||||
self.cache = {}
|
||||
self.cache_lock.unlock()
|
||||
|
||||
def refresh(self, ids):
|
||||
self.cache_lock.lockForWrite()
|
||||
for id in ids:
|
||||
self.cache.pop(id, None)
|
||||
self.cache_lock.unlock()
|
||||
self.load_queue_lock.lockForWrite()
|
||||
for id in ids:
|
||||
self.load_queue.appendleft(id)
|
||||
self.load_queue_lock.unlock()
|
||||
|
||||
### Global utility function for get_match here and in gui2/library.py
|
||||
CONTAINS_MATCH = 0
|
||||
EQUALS_MATCH = 1
|
||||
REGEXP_MATCH = 2
|
||||
def _match(query, value, matchkind):
|
||||
for t in value:
|
||||
t = t.lower()
|
||||
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
|
||||
if ((matchkind == EQUALS_MATCH and query == t) or
|
||||
(matchkind == REGEXP_MATCH and re.search(query, t, re.I)) or ### search unanchored
|
||||
(matchkind == CONTAINS_MATCH and query in t)):
|
||||
return True
|
||||
except re.error:
|
||||
pass
|
||||
return False
|
||||
|
||||
class ResultCache(SearchQueryParser):
|
||||
|
||||
'''
|
||||
Stores sorted and filtered metadata in memory.
|
||||
'''
|
||||
|
||||
def build_relop_dict(self):
|
||||
'''
|
||||
Because the database dates have time in them, we can't use direct
|
||||
comparisons even when field_count == 3. The query has time = 0, but
|
||||
the database object has time == something. As such, a complete compare
|
||||
will almost never be correct.
|
||||
'''
|
||||
def relop_eq(db, query, field_count):
|
||||
if db.year == query.year:
|
||||
if field_count == 1:
|
||||
return True
|
||||
if db.month == query.month:
|
||||
if field_count == 2:
|
||||
return True
|
||||
return db.day == query.day
|
||||
return False
|
||||
|
||||
def relop_gt(db, query, field_count):
|
||||
if db.year > query.year:
|
||||
return True
|
||||
if field_count > 1 and db.year == query.year:
|
||||
if db.month > query.month:
|
||||
return True
|
||||
return field_count == 3 and db.month == query.month and db.day > query.day
|
||||
return False
|
||||
|
||||
def relop_lt(db, query, field_count):
|
||||
if db.year < query.year:
|
||||
return True
|
||||
if field_count > 1 and db.year == query.year:
|
||||
if db.month < query.month:
|
||||
return True
|
||||
return field_count == 3 and db.month == query.month and db.day < query.day
|
||||
return False
|
||||
|
||||
def relop_ne(db, query, field_count):
|
||||
return not relop_eq(db, query, field_count)
|
||||
|
||||
def relop_ge(db, query, field_count):
|
||||
return not relop_lt(db, query, field_count)
|
||||
|
||||
def relop_le(db, query, field_count):
|
||||
return not relop_gt(db, query, field_count)
|
||||
|
||||
self.search_relops = {'=':[1, relop_eq], '>':[1, relop_gt], '<':[1, relop_lt], \
|
||||
'!=':[2, relop_ne], '>=':[2, relop_ge], '<=':[2, relop_le]}
|
||||
|
||||
def __init__(self, FIELD_MAP):
|
||||
self.FIELD_MAP = FIELD_MAP
|
||||
self._map = self._map_filtered = self._data = []
|
||||
self.first_sort = True
|
||||
SearchQueryParser.__init__(self)
|
||||
self.build_relop_dict()
|
||||
|
||||
def __getitem__(self, row):
|
||||
return self._data[self._map_filtered[row]]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map_filtered)
|
||||
|
||||
def __iter__(self):
|
||||
for id in self._map_filtered:
|
||||
yield self._data[id]
|
||||
|
||||
def universal_set(self):
|
||||
return set([i[0] for i in self._data if i is not None])
|
||||
|
||||
def get_matches(self, location, query):
|
||||
matches = set([])
|
||||
if query and query.strip():
|
||||
location = location.lower().strip()
|
||||
|
||||
### take care of dates special case
|
||||
if location in ('pubdate', 'date'):
|
||||
if len(query) < 2:
|
||||
return matches
|
||||
relop = None
|
||||
for k in self.search_relops.keys():
|
||||
if query.startswith(k):
|
||||
(p, relop) = self.search_relops[k]
|
||||
query = query[p:]
|
||||
if relop is None:
|
||||
return matches
|
||||
loc = self.FIELD_MAP[{'date':'timestamp', 'pubdate':'pubdate'}[location]]
|
||||
qd = parse_date(query)
|
||||
field_count = query.count('-') + 1
|
||||
for item in self._data:
|
||||
if item is None: continue
|
||||
if relop(item[loc], qd, field_count):
|
||||
matches.add(item[0])
|
||||
return matches
|
||||
|
||||
### everything else
|
||||
matchkind = CONTAINS_MATCH
|
||||
if (len(query) > 1):
|
||||
if query.startswith('\\'):
|
||||
query = query[1:]
|
||||
elif query.startswith('='):
|
||||
matchkind = EQUALS_MATCH
|
||||
query = query[1:]
|
||||
elif query.startswith('~'):
|
||||
matchkind = REGEXP_MATCH
|
||||
query = query[1:]
|
||||
if matchkind != REGEXP_MATCH: ### leave case in regexps because it can be significant e.g. \S \W \D
|
||||
query = query.lower()
|
||||
|
||||
if not isinstance(query, unicode):
|
||||
query = query.decode('utf-8')
|
||||
if location in ('tag', 'author', 'format', 'comment'):
|
||||
location += 's'
|
||||
all = ('title', 'authors', 'publisher', 'tags', 'comments', 'series', 'formats', 'isbn', 'rating', 'cover')
|
||||
MAP = {}
|
||||
for x in all:
|
||||
MAP[x] = self.FIELD_MAP[x]
|
||||
EXCLUDE_FIELDS = [MAP['rating'], MAP['cover']]
|
||||
SPLITABLE_FIELDS = [MAP['authors'], MAP['tags'], MAP['formats']]
|
||||
location = [location] if location != 'all' else list(MAP.keys())
|
||||
for i, loc in enumerate(location):
|
||||
location[i] = MAP[loc]
|
||||
try:
|
||||
rating_query = int(query) * 2
|
||||
except:
|
||||
rating_query = None
|
||||
for loc in location:
|
||||
if loc == MAP['authors']:
|
||||
q = query.replace(',', '|'); ### DB stores authors with commas changed to bars, so change query
|
||||
else:
|
||||
q = query
|
||||
|
||||
for item in self._data:
|
||||
if item is None: continue
|
||||
if not item[loc]:
|
||||
if query == 'false':
|
||||
if isinstance(item[loc], basestring):
|
||||
if item[loc].strip() != '':
|
||||
continue
|
||||
matches.add(item[0])
|
||||
continue
|
||||
continue ### item is empty. No possible matches below
|
||||
|
||||
if q == 'true':
|
||||
if isinstance(item[loc], basestring):
|
||||
if item[loc].strip() == '':
|
||||
continue
|
||||
matches.add(item[0])
|
||||
continue
|
||||
if rating_query and loc == MAP['rating'] and rating_query == int(item[loc]):
|
||||
matches.add(item[0])
|
||||
continue
|
||||
if loc not in EXCLUDE_FIELDS:
|
||||
if loc in SPLITABLE_FIELDS:
|
||||
vals = item[loc].split(',') ### check individual tags/authors/formats, not the long string
|
||||
else:
|
||||
vals = [item[loc]] ### make into list to make _match happy
|
||||
if _match(q, vals, matchkind):
|
||||
matches.add(item[0])
|
||||
continue
|
||||
return matches
|
||||
|
||||
def remove(self, id):
|
||||
self._data[id] = None
|
||||
if id in self._map:
|
||||
self._map.remove(id)
|
||||
if id in self._map_filtered:
|
||||
self._map_filtered.remove(id)
|
||||
|
||||
def set(self, row, col, val, row_is_id=False):
|
||||
id = row if row_is_id else self._map_filtered[row]
|
||||
self._data[id][col] = val
|
||||
|
||||
def get(self, row, col, row_is_id=False):
|
||||
id = row if row_is_id else self._map_filtered[row]
|
||||
return self._data[id][col]
|
||||
|
||||
def index(self, id, cache=False):
|
||||
x = self._map if cache else self._map_filtered
|
||||
return x.index(id)
|
||||
|
||||
def row(self, id):
|
||||
return self.index(id)
|
||||
|
||||
def has_id(self, id):
|
||||
try:
|
||||
return self._data[id] is not None
|
||||
except IndexError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def refresh_ids(self, db, ids):
|
||||
'''
|
||||
Refresh the data in the cache for books identified by ids.
|
||||
Returns a list of affected rows or None if the rows are filtered.
|
||||
'''
|
||||
for id in ids:
|
||||
try:
|
||||
self._data[id] = db.conn.get('SELECT * from meta2 WHERE id=?',
|
||||
(id,))[0]
|
||||
self._data[id].append(db.has_cover(id, index_is_id=True))
|
||||
except IndexError:
|
||||
return None
|
||||
try:
|
||||
return map(self.row, ids)
|
||||
except ValueError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def books_added(self, ids, db):
|
||||
if not ids:
|
||||
return
|
||||
self._data.extend(repeat(None, max(ids)-len(self._data)+2))
|
||||
for id in ids:
|
||||
self._data[id] = db.conn.get('SELECT * from meta2 WHERE id=?', (id,))[0]
|
||||
self._data[id].append(db.has_cover(id, index_is_id=True))
|
||||
self._map[0:0] = ids
|
||||
self._map_filtered[0:0] = ids
|
||||
|
||||
def books_deleted(self, ids):
|
||||
for id in ids:
|
||||
self._data[id] = None
|
||||
if id in self._map: self._map.remove(id)
|
||||
if id in self._map_filtered: self._map_filtered.remove(id)
|
||||
|
||||
def count(self):
|
||||
return len(self._map)
|
||||
|
||||
def refresh(self, db, field=None, ascending=True):
|
||||
temp = db.conn.get('SELECT * FROM meta2')
|
||||
self._data = list(itertools.repeat(None, temp[-1][0]+2)) if temp else []
|
||||
for r in temp:
|
||||
self._data[r[0]] = r
|
||||
for item in self._data:
|
||||
if item is not None:
|
||||
item.append(db.has_cover(item[0], index_is_id=True))
|
||||
self._map = [i[0] for i in self._data if i is not None]
|
||||
if field is not None:
|
||||
self.sort(field, ascending)
|
||||
self._map_filtered = list(self._map)
|
||||
|
||||
def seriescmp(self, x, y):
|
||||
try:
|
||||
ans = cmp(self._data[x][9].lower(), self._data[y][9].lower())
|
||||
except AttributeError: # Some entries may be None
|
||||
ans = cmp(self._data[x][9], self._data[y][9])
|
||||
if ans != 0: return ans
|
||||
return cmp(self._data[x][10], self._data[y][10])
|
||||
|
||||
def cmp(self, loc, x, y, asstr=True, subsort=False):
|
||||
try:
|
||||
ans = cmp(self._data[x][loc].lower(), self._data[y][loc].lower()) if \
|
||||
asstr else cmp(self._data[x][loc], self._data[y][loc])
|
||||
except AttributeError: # Some entries may be None
|
||||
ans = cmp(self._data[x][loc], self._data[y][loc])
|
||||
if subsort and ans == 0:
|
||||
return cmp(self._data[x][11].lower(), self._data[y][11].lower())
|
||||
return ans
|
||||
|
||||
def sort(self, field, ascending, subsort=False):
|
||||
field = field.lower().strip()
|
||||
if field in ('author', 'tag', 'comment'):
|
||||
field += 's'
|
||||
if field == 'date': field = 'timestamp'
|
||||
elif field == 'title': field = 'sort'
|
||||
elif field == 'authors': field = 'author_sort'
|
||||
if self.first_sort:
|
||||
subsort = True
|
||||
self.first_sort = False
|
||||
fcmp = self.seriescmp if field == 'series' else \
|
||||
functools.partial(self.cmp, self.FIELD_MAP[field], subsort=subsort,
|
||||
asstr=field not in ('size', 'rating', 'timestamp'))
|
||||
|
||||
self._map.sort(cmp=fcmp, reverse=not ascending)
|
||||
self._map_filtered = [id for id in self._map if id in self._map_filtered]
|
||||
|
||||
def search(self, query):
|
||||
if not query or not query.strip():
|
||||
self._map_filtered = list(self._map)
|
||||
return
|
||||
matches = sorted(self.parse(query))
|
||||
self._map_filtered = [id for id in self._map if id in matches]
|
||||
|
||||
|
@ -119,7 +119,7 @@ def send_message(msg=''):
|
||||
def get_parser(usage):
|
||||
parser = OptionParser(usage)
|
||||
go = parser.add_option_group('GLOBAL OPTIONS')
|
||||
go.add_option('--library-path', default=None, help=_('Path to the calibre library. Default is to use the path stored in the settings.'))
|
||||
go.add_option('--library-path', '--with-library', default=None, help=_('Path to the calibre library. Default is to use the path stored in the settings.'))
|
||||
|
||||
return parser
|
||||
|
||||
@ -129,7 +129,7 @@ def get_db(dbpath, options):
|
||||
dbpath = os.path.abspath(dbpath)
|
||||
return LibraryDatabase2(dbpath)
|
||||
|
||||
def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
|
||||
def do_list(db, fields, afields, sort_by, ascending, search_text, line_width, separator,
|
||||
prefix, output_format, subtitle='Books in the calibre database'):
|
||||
if sort_by:
|
||||
db.sort(sort_by, ascending)
|
||||
@ -138,6 +138,9 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
|
||||
authors_to_string = output_format in ['stanza', 'text']
|
||||
data = db.get_data_as_dict(prefix, authors_as_string=authors_to_string)
|
||||
fields = ['id'] + fields
|
||||
title_fields = fields
|
||||
fields = [db.custom_column_label_map[x[1:]]['num'] if x[0]=='*'
|
||||
else x for x in fields]
|
||||
if output_format == 'text':
|
||||
for f in data:
|
||||
fmts = [x for x in f['formats'] if x is not None]
|
||||
@ -152,7 +155,7 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
|
||||
record[f] = record[f].replace('\n', ' ')
|
||||
for i in data:
|
||||
for j, field in enumerate(fields):
|
||||
widths[j] = max(widths[j], len(unicode(i[str(field)])))
|
||||
widths[j] = max(widths[j], len(unicode(i[field])))
|
||||
|
||||
screen_width = terminal_controller.COLS if line_width < 0 else line_width
|
||||
if not screen_width:
|
||||
@ -171,7 +174,8 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
|
||||
break
|
||||
|
||||
widths = list(base_widths)
|
||||
titles = map(lambda x, y: '%-*s%s'%(x-len(separator), y, separator), widths, fields)
|
||||
titles = map(lambda x, y: '%-*s%s'%(x-len(separator), y, separator),
|
||||
widths, title_fields)
|
||||
print terminal_controller.GREEN + ''.join(titles)+terminal_controller.NORMAL
|
||||
|
||||
wrappers = map(lambda x: TextWrapper(x-1), widths)
|
||||
@ -202,7 +206,12 @@ def do_list(db, fields, sort_by, ascending, search_text, line_width, separator,
|
||||
return template.generate(id="urn:calibre:main", data=data, subtitle=subtitle,
|
||||
sep=os.sep, quote=quote, updated=db.last_modified()).render('xml')
|
||||
|
||||
def list_option_parser():
|
||||
def list_option_parser(db=None):
|
||||
fields = set(FIELDS)
|
||||
if db is not None:
|
||||
for f in db.custom_column_label_map:
|
||||
fields.add('*'+f)
|
||||
|
||||
parser = get_parser(_(
|
||||
'''\
|
||||
%prog list [options]
|
||||
@ -211,7 +220,12 @@ List the books available in the calibre database.
|
||||
'''
|
||||
))
|
||||
parser.add_option('-f', '--fields', default='title,authors',
|
||||
help=_('The fields to display when listing books in the database. Should be a comma separated list of fields.\nAvailable fields: %s\nDefault: %%default. The special field "all" can be used to select all fields. Only has effect in the text output format.')%','.join(FIELDS))
|
||||
help=_('The fields to display when listing books in the'
|
||||
' database. Should be a comma separated list of'
|
||||
' fields.\nAvailable fields: %s\nDefault: %%default. The'
|
||||
' special field "all" can be used to select all fields.'
|
||||
' Only has effect in the text output'
|
||||
' format.')%','.join(sorted(fields)))
|
||||
parser.add_option('--sort-by', default=None,
|
||||
help=_('The field by which to sort the results.\nAvailable fields: %s\nDefault: %%default')%','.join(FIELDS))
|
||||
parser.add_option('--ascending', default=False, action='store_true',
|
||||
@ -229,25 +243,35 @@ List the books available in the calibre database.
|
||||
|
||||
|
||||
def command_list(args, dbpath):
|
||||
parser = list_option_parser()
|
||||
pre = get_parser('')
|
||||
pargs = [x for x in args if x in ('--with-library', '--library-path')
|
||||
or not x.startswith('-')]
|
||||
opts = pre.parse_args(sys.argv[:1] + pargs)[0]
|
||||
db = get_db(dbpath, opts)
|
||||
parser = list_option_parser(db=db)
|
||||
opts, args = parser.parse_args(sys.argv[:1] + args)
|
||||
afields = set(FIELDS)
|
||||
if db is not None:
|
||||
for f in db.custom_column_label_map:
|
||||
afields.add('*'+f)
|
||||
fields = [str(f.strip().lower()) for f in opts.fields.split(',')]
|
||||
if 'all' in fields:
|
||||
fields = sorted(list(FIELDS))
|
||||
if not set(fields).issubset(FIELDS):
|
||||
fields = sorted(list(afields))
|
||||
if not set(fields).issubset(afields):
|
||||
parser.print_help()
|
||||
print
|
||||
print >>sys.stderr, _('Invalid fields. Available fields:'), ','.join(sorted(FIELDS))
|
||||
prints(_('Invalid fields. Available fields:'),
|
||||
','.join(sorted(afields)), file=sys.stderr)
|
||||
return 1
|
||||
|
||||
db = get_db(dbpath, opts)
|
||||
if not opts.sort_by in FIELDS and opts.sort_by is not None:
|
||||
if not opts.sort_by in afields and opts.sort_by is not None:
|
||||
parser.print_help()
|
||||
print
|
||||
print >>sys.stderr, _('Invalid sort field. Available fields:'), ','.join(FIELDS)
|
||||
prints(_('Invalid sort field. Available fields:'), ','.join(afields),
|
||||
file=sys.stderr)
|
||||
return 1
|
||||
|
||||
print do_list(db, fields, opts.sort_by, opts.ascending, opts.search, opts.line_width, opts.separator,
|
||||
print do_list(db, fields, afields, opts.sort_by, opts.ascending, opts.search, opts.line_width, opts.separator,
|
||||
opts.prefix, opts.output_format)
|
||||
return 0
|
||||
|
||||
@ -589,6 +613,44 @@ def command_export(args, dbpath):
|
||||
do_export(get_db(dbpath, opts), ids, dir, opts)
|
||||
return 0
|
||||
|
||||
def do_add_custom_column(db, label, name, datatype, is_multiple, display):
|
||||
num = db.create_custom_column(label, name, datatype, is_multiple, display=display)
|
||||
prints('Custom column created with id: %d'%num)
|
||||
|
||||
def add_custom_column_option_parser():
|
||||
from calibre.library.custom_columns import CustomColumns
|
||||
parser = get_parser(_('''\
|
||||
%prog add_custom_column [options] label name datatype
|
||||
|
||||
Create a custom column. label is the machine friendly name of the column. Should
|
||||
not contain spaces or colons. name is the human friendly name of the column.
|
||||
datatype is one of: {0}
|
||||
''').format(', '.join(CustomColumns.CUSTOM_DATA_TYPES)))
|
||||
|
||||
parser.add_option('--is-multiple', default=False, action='store_true',
|
||||
help=_('This column stores tag like data (i.e. '
|
||||
'multiple comma separated values). Only '
|
||||
'applies if datatype is text.'))
|
||||
parser.add_option('--display', default='{}',
|
||||
help=_('A dictionary of options to customize how '
|
||||
'the data in this column will be interpreted.'))
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def command_add_custom_column(args, dbpath):
|
||||
import json
|
||||
parser = add_custom_column_option_parser()
|
||||
opts, args = parser.parse_args(args)
|
||||
if len(args) < 3:
|
||||
parser.print_help()
|
||||
print
|
||||
print >>sys.stderr, _('You must specify label, name and datatype')
|
||||
return 1
|
||||
do_add_custom_column(get_db(dbpath, opts), args[0], args[1], args[2],
|
||||
opts.is_multiple, json.loads(opts.display))
|
||||
return 0
|
||||
|
||||
def catalog_option_parser(args):
|
||||
from calibre.customize.ui import available_catalog_formats, plugin_for_catalog_format
|
||||
from calibre.utils.logging import Log
|
||||
@ -693,8 +755,107 @@ def command_catalog(args, dbpath):
|
||||
|
||||
# end of GR additions
|
||||
|
||||
def do_set_custom(db, col, id_, val, append):
|
||||
db.set_custom(id_, val, label=col, append=append)
|
||||
prints('Data set to: %r'%db.get_custom(id_, label=col, index_is_id=True))
|
||||
|
||||
def set_custom_option_parser():
|
||||
parser = get_parser(_(
|
||||
'''
|
||||
%prog set_custom [options] column id value
|
||||
|
||||
Set the value of a custom column for the book identified by id.
|
||||
You can get a list of ids using the list command.
|
||||
You can get a list of custom column names using the custom_columns
|
||||
command.
|
||||
'''))
|
||||
|
||||
parser.add_option('-a', '--append', default=False, action='store_true',
|
||||
help=_('If the column stores multiple values, append the specified '
|
||||
'values to the existing ones, instead of replacing them.'))
|
||||
return parser
|
||||
|
||||
|
||||
def command_set_custom(args, dbpath):
|
||||
parser = set_custom_option_parser()
|
||||
opts, args = parser.parse_args(args)
|
||||
if len(args) < 3:
|
||||
parser.print_help()
|
||||
print
|
||||
print >>sys.stderr, _('Error: You must specify a field name, id and value')
|
||||
return 1
|
||||
do_set_custom(get_db(dbpath, opts), args[0], int(args[1]), args[2],
|
||||
opts.append)
|
||||
return 0
|
||||
|
||||
def do_custom_columns(db, details):
|
||||
from pprint import pformat
|
||||
cols = db.custom_column_label_map
|
||||
for col, data in cols.items():
|
||||
if details:
|
||||
prints(col)
|
||||
print
|
||||
prints(pformat(data))
|
||||
print '\n'
|
||||
else:
|
||||
prints(col, '(%d)'%data['num'])
|
||||
|
||||
def custom_columns_option_parser():
|
||||
parser = get_parser(_(
|
||||
'''
|
||||
%prog custom_columns [options]
|
||||
|
||||
List available custom columns. Shows column labels and ids.
|
||||
'''))
|
||||
parser.add_option('-d', '--details', default=False, action='store_true',
|
||||
help=_('Show details for each column.'))
|
||||
return parser
|
||||
|
||||
|
||||
def command_custom_columns(args, dbpath):
|
||||
parser = custom_columns_option_parser()
|
||||
opts, args = parser.parse_args(args)
|
||||
do_custom_columns(get_db(dbpath, opts), opts.details)
|
||||
return 0
|
||||
|
||||
def do_remove_custom_column(db, label, force):
|
||||
if not force:
|
||||
q = raw_input(_('You will lose all data in the column: %r.'
|
||||
' Are you sure (y/n)? ')%label)
|
||||
if q.lower().strip() != 'y':
|
||||
return
|
||||
db.delete_custom_column(label=label)
|
||||
prints('Column %r removed.'%label)
|
||||
|
||||
def remove_custom_column_option_parser():
|
||||
parser = get_parser(_(
|
||||
'''
|
||||
%prog remove_custom_column [options] label
|
||||
|
||||
Remove the custom column identified by label. You can see available
|
||||
columns with the custom_columns command.
|
||||
'''))
|
||||
parser.add_option('-f', '--force', default=False, action='store_true',
|
||||
help=_('Do not ask for confirmation'))
|
||||
return parser
|
||||
|
||||
|
||||
def command_remove_custom_column(args, dbpath):
|
||||
parser = remove_custom_column_option_parser()
|
||||
opts, args = parser.parse_args(args)
|
||||
if len(args) < 1:
|
||||
parser.print_help()
|
||||
print
|
||||
prints(_('Error: You must specify a column label'), file=sys.stderr)
|
||||
return 1
|
||||
|
||||
do_remove_custom_column(get_db(dbpath, opts), args[0], opts.force)
|
||||
return 0
|
||||
|
||||
|
||||
COMMANDS = ('list', 'add', 'remove', 'add_format', 'remove_format',
|
||||
'show_metadata', 'set_metadata', 'export', 'catalog')
|
||||
'show_metadata', 'set_metadata', 'export', 'catalog',
|
||||
'add_custom_column', 'custom_columns', 'remove_custom_column', 'set_custom')
|
||||
|
||||
|
||||
def option_parser():
|
||||
|
439
src/calibre/library/custom_columns.py
Normal file
439
src/calibre/library/custom_columns.py
Normal file
@ -0,0 +1,439 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import json
|
||||
from functools import partial
|
||||
|
||||
from calibre import prints
|
||||
from calibre.constants import preferred_encoding
|
||||
from calibre.utils.date import parse_date
|
||||
|
||||
class CustomColumns(object):
|
||||
|
||||
CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
|
||||
'int', 'float', 'bool'])
|
||||
|
||||
def custom_table_names(self, num):
|
||||
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num
|
||||
|
||||
@property
|
||||
def custom_tables(self):
|
||||
return set([x[0] for x in self.conn.get(
|
||||
'SELECT name FROM sqlite_master WHERE type="table" AND '
|
||||
'(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')])
|
||||
|
||||
|
||||
def __init__(self):
|
||||
# Delete marked custom columns
|
||||
for record in self.conn.get(
|
||||
'SELECT id FROM custom_columns WHERE mark_for_delete=1'):
|
||||
num = record[0]
|
||||
table, lt = self.custom_table_names(num)
|
||||
self.conn.executescript('''\
|
||||
DROP INDEX IF EXISTS {table}_idx;
|
||||
DROP INDEX IF EXISTS {lt}_aidx;
|
||||
DROP INDEX IF EXISTS {lt}_bidx;
|
||||
DROP TRIGGER IF EXISTS fkc_update_{lt}_a;
|
||||
DROP TRIGGER IF EXISTS fkc_update_{lt}_b;
|
||||
DROP TRIGGER IF EXISTS fkc_insert_{lt};
|
||||
DROP TRIGGER IF EXISTS fkc_delete_{lt};
|
||||
DROP TRIGGER IF EXISTS fkc_insert_{table};
|
||||
DROP TRIGGER IF EXISTS fkc_delete_{table};
|
||||
DROP VIEW IF EXISTS tag_browser_{table};
|
||||
DROP TABLE IF EXISTS {table};
|
||||
DROP TABLE IF EXISTS {lt};
|
||||
'''.format(table=table, lt=lt)
|
||||
)
|
||||
self.conn.execute('DELETE FROM custom_columns WHERE mark_for_delete=1')
|
||||
self.conn.commit()
|
||||
|
||||
# Load metadata for custom columns
|
||||
self.custom_column_label_map, self.custom_column_num_map = {}, {}
|
||||
triggers = []
|
||||
remove = []
|
||||
custom_tables = self.custom_tables
|
||||
for record in self.conn.get(
|
||||
'SELECT label,name,datatype,editable,display,normalized,id,is_multiple FROM custom_columns'):
|
||||
data = {
|
||||
'label':record[0],
|
||||
'name':record[1],
|
||||
'datatype':record[2],
|
||||
'editable':record[3],
|
||||
'display':json.loads(record[4]),
|
||||
'normalized':record[5],
|
||||
'num':record[6],
|
||||
'is_multiple':record[7],
|
||||
}
|
||||
table, lt = self.custom_table_names(data['num'])
|
||||
if table not in custom_tables or (data['normalized'] and lt not in
|
||||
custom_tables):
|
||||
remove.append(data)
|
||||
continue
|
||||
|
||||
self.custom_column_label_map[data['label']] = data['num']
|
||||
self.custom_column_num_map[data['num']] = \
|
||||
self.custom_column_label_map[data['label']] = data
|
||||
|
||||
# Create Foreign Key triggers
|
||||
if data['normalized']:
|
||||
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%lt
|
||||
else:
|
||||
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%table
|
||||
triggers.append(trigger)
|
||||
|
||||
if remove:
|
||||
for data in remove:
|
||||
prints('WARNING: Custom column %r not found, removing.' %
|
||||
data['label'])
|
||||
self.conn.execute('DELETE FROM custom_columns WHERE id=?',
|
||||
(data['num'],))
|
||||
self.conn.commit()
|
||||
|
||||
if triggers:
|
||||
self.conn.execute('''\
|
||||
CREATE TEMP TRIGGER custom_books_delete_trg
|
||||
AFTER DELETE ON books
|
||||
BEGIN
|
||||
%s
|
||||
END;
|
||||
'''%(' \n'.join(triggers)))
|
||||
self.conn.commit()
|
||||
|
||||
# Setup data adapters
|
||||
def adapt_text(x, d):
|
||||
if d['is_multiple']:
|
||||
if x is None:
|
||||
return []
|
||||
if isinstance(x, (str, unicode, bytes)):
|
||||
x = x.split(',')
|
||||
x = [y.strip() for y in x if y.strip()]
|
||||
x = [y.decode(preferred_encoding, 'replace') if not isinstance(y,
|
||||
unicode) else y for y in x]
|
||||
return [u' '.join(y.split()) for y in x]
|
||||
else:
|
||||
return x if x is None or isinstance(x, unicode) else \
|
||||
x.decode(preferred_encoding, 'replace')
|
||||
|
||||
def adapt_datetime(x, d):
|
||||
if isinstance(x, (str, unicode, bytes)):
|
||||
x = parse_date(x, assume_utc=False, as_utc=False)
|
||||
return x
|
||||
|
||||
def adapt_bool(x, d):
|
||||
if isinstance(x, (str, unicode, bytes)):
|
||||
x = bool(int(x))
|
||||
return x
|
||||
|
||||
self.custom_data_adapters = {
|
||||
'float': lambda x,d : x if x is None else float(x),
|
||||
'int': lambda x,d : x if x is None else int(x),
|
||||
'rating':lambda x,d : x if x is None else min(10., max(0., float(x))),
|
||||
'bool': adapt_bool,
|
||||
'comments': lambda x,d: adapt_text(x, {'is_multiple':False}),
|
||||
'datetime' : adapt_datetime,
|
||||
'text':adapt_text
|
||||
}
|
||||
|
||||
def get_custom(self, idx, label=None, num=None, index_is_id=False):
|
||||
if label is not None:
|
||||
data = self.custom_column_label_map[label]
|
||||
if num is not None:
|
||||
data = self.custom_column_num_map[num]
|
||||
row = self.data._data[idx] if index_is_id else self.data[idx]
|
||||
ans = row[self.FIELD_MAP[data['num']]]
|
||||
if data['is_multiple'] and data['datatype'] == 'text':
|
||||
ans = ans.split('|') if ans else []
|
||||
if data['display'].get('sort_alpha', False):
|
||||
ans.sort(cmp=lambda x,y:cmp(x.lower(), y.lower()))
|
||||
return ans
|
||||
|
||||
def all_custom(self, label=None, num=None):
|
||||
if label is not None:
|
||||
data = self.custom_column_label_map[label]
|
||||
if num is not None:
|
||||
data = self.custom_column_num_map[num]
|
||||
table, lt = self.custom_table_names(data['num'])
|
||||
if data['normalized']:
|
||||
ans = self.conn.get('SELECT value FROM %s'%table)
|
||||
else:
|
||||
ans = self.conn.get('SELECT DISTINCT value FROM %s'%table)
|
||||
ans = set([x[0] for x in ans])
|
||||
return ans
|
||||
|
||||
def delete_custom_column(self, label=None, num=None):
|
||||
data = None
|
||||
if label is not None:
|
||||
data = self.custom_column_label_map[label]
|
||||
if num is not None:
|
||||
data = self.custom_column_num_map[num]
|
||||
if data is None:
|
||||
raise ValueError('No such column')
|
||||
self.conn.execute(
|
||||
'UPDATE custom_columns SET mark_for_delete=1 WHERE id=?',
|
||||
(data['num'],))
|
||||
self.conn.commit()
|
||||
|
||||
def set_custom_column_metadata(self, num, name=None, label=None,
|
||||
is_editable=None, display=None):
|
||||
changed = False
|
||||
if name is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET name=? WHERE id=?',
|
||||
(name, num))
|
||||
changed = True
|
||||
if label is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET label=? WHERE id=?',
|
||||
(label, num))
|
||||
changed = True
|
||||
if is_editable is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET is_editable=? WHERE id=?',
|
||||
(bool(is_editable), num))
|
||||
self.custom_column_num_map[num]['is_editable'] = bool(is_editable)
|
||||
changed = True
|
||||
if display is not None:
|
||||
self.conn.execute('UPDATE custom_columns SET display=? WHERE id=?',
|
||||
(json.dumps(display), num))
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
self.conn.commit()
|
||||
return changed
|
||||
|
||||
|
||||
|
||||
def set_custom(self, id_, val, label=None, num=None, append=False, notify=True):
|
||||
if label is not None:
|
||||
data = self.custom_column_label_map[label]
|
||||
if num is not None:
|
||||
data = self.custom_column_num_map[num]
|
||||
if not data['editable']:
|
||||
raise ValueError('Column %r is not editable'%data['label'])
|
||||
table, lt = self.custom_table_names(data['num'])
|
||||
getter = partial(self.get_custom, id_, num=data['num'],
|
||||
index_is_id=True)
|
||||
val = self.custom_data_adapters[data['datatype']](val, data)
|
||||
|
||||
if data['normalized']:
|
||||
if not append or not data['is_multiple']:
|
||||
self.conn.execute('DELETE FROM %s WHERE book=?'%lt, (id_,))
|
||||
self.conn.execute(
|
||||
'''DELETE FROM %s WHERE (SELECT COUNT(id) FROM %s WHERE
|
||||
value=%s.id) < 1''' % (table, lt, table))
|
||||
self.data._data[id_][self.FIELD_MAP[data['num']]] = None
|
||||
set_val = val if data['is_multiple'] else [val]
|
||||
existing = getter()
|
||||
if not existing:
|
||||
existing = []
|
||||
for x in set(set_val) - set(existing):
|
||||
if x is None:
|
||||
continue
|
||||
existing = list(self.all_custom(num=data['num']))
|
||||
lx = [t.lower() if hasattr(t, 'lower') else t for t in existing]
|
||||
try:
|
||||
idx = lx.index(x.lower() if hasattr(x, 'lower') else x)
|
||||
except ValueError:
|
||||
idx = -1
|
||||
if idx > -1:
|
||||
ex = existing[idx]
|
||||
xid = self.conn.get(
|
||||
'SELECT id FROM %s WHERE value=?'%table, (ex,), all=False)
|
||||
if ex != x:
|
||||
self.conn.execute(
|
||||
'UPDATE %s SET value=? WHERE id=?', (x, xid))
|
||||
else:
|
||||
xid = self.conn.execute(
|
||||
'INSERT INTO %s(value) VALUES(?)'%table, (x,)).lastrowid
|
||||
if not self.conn.get(
|
||||
'SELECT book FROM %s WHERE book=? AND value=?'%lt,
|
||||
(id_, xid), all=False):
|
||||
self.conn.execute(
|
||||
'INSERT INTO %s(book, value) VALUES (?,?)'%lt,
|
||||
(id_, xid))
|
||||
self.conn.commit()
|
||||
nval = self.conn.get(
|
||||
'SELECT custom_%s FROM meta2 WHERE id=?'%data['num'],
|
||||
(id_,), all=False)
|
||||
self.data.set(id_, self.FIELD_MAP[data['num']], nval,
|
||||
row_is_id=True)
|
||||
else:
|
||||
self.conn.execute('DELETE FROM %s WHERE book=?'%table, (id_,))
|
||||
if val is not None:
|
||||
self.conn.execute(
|
||||
'INSERT INTO %s(book,value) VALUES (?,?)'%table,
|
||||
(id_, val))
|
||||
self.conn.commit()
|
||||
nval = self.conn.get(
|
||||
'SELECT custom_%s FROM meta2 WHERE id=?'%data['num'],
|
||||
(id_,), all=False)
|
||||
self.data.set(id_, self.FIELD_MAP[data['num']], nval,
|
||||
row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id_])
|
||||
return nval
|
||||
|
||||
def clean_custom(self):
|
||||
st = ('DELETE FROM {table} WHERE (SELECT COUNT(id) FROM {lt} WHERE'
|
||||
' {lt}.value={table}.id) < 1;')
|
||||
statements = []
|
||||
for data in self.custom_column_num_map.values():
|
||||
if data['normalized']:
|
||||
table, lt = self.custom_table_names(data['num'])
|
||||
statements.append(st.format(lt=lt, table=table))
|
||||
if statements:
|
||||
self.conn.executescript(' \n'.join(statements))
|
||||
self.conn.commit()
|
||||
|
||||
def custom_columns_in_meta(self):
|
||||
lines = {}
|
||||
for data in self.custom_column_label_map.values():
|
||||
display = data['display']
|
||||
table, lt = self.custom_table_names(data['num'])
|
||||
if data['normalized']:
|
||||
query = '%s.value'
|
||||
if data['is_multiple']:
|
||||
query = 'group_concat(%s.value, "|")'
|
||||
if not display.get('sort_alpha', False):
|
||||
query = 'sort_concat(link.id, %s.value)'
|
||||
line = '''(SELECT {query} FROM {lt} AS link INNER JOIN
|
||||
{table} ON(link.value={table}.id) WHERE link.book=books.id)
|
||||
custom_{num}
|
||||
'''.format(query=query%table, lt=lt, table=table, num=data['num'])
|
||||
else:
|
||||
line = '''
|
||||
(SELECT value FROM {table} WHERE book=books.id) custom_{num}
|
||||
'''.format(table=table, num=data['num'])
|
||||
lines[data['num']] = line
|
||||
return lines
|
||||
|
||||
def create_custom_column(self, label, name, datatype, is_multiple,
|
||||
editable=True, display={}):
|
||||
if datatype not in self.CUSTOM_DATA_TYPES:
|
||||
raise ValueError('%r is not a supported data type'%datatype)
|
||||
normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
|
||||
'float')
|
||||
is_multiple = is_multiple and datatype in ('text',)
|
||||
num = self.conn.execute(
|
||||
('INSERT INTO '
|
||||
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
|
||||
'VALUES (?,?,?,?,?,?,?)'),
|
||||
(label, name, datatype, is_multiple, editable,
|
||||
json.dumps(display), normalized)).lastrowid
|
||||
|
||||
if datatype in ('rating', 'int'):
|
||||
dt = 'INT'
|
||||
elif datatype in ('text', 'comments'):
|
||||
dt = 'TEXT'
|
||||
elif datatype in ('float',):
|
||||
dt = 'REAL'
|
||||
elif datatype == 'datetime':
|
||||
dt = 'timestamp'
|
||||
elif datatype == 'bool':
|
||||
dt = 'BOOL'
|
||||
collate = 'COLLATE NOCASE' if dt == 'TEXT' else ''
|
||||
table, lt = self.custom_table_names(num)
|
||||
if normalized:
|
||||
lines = [
|
||||
'''\
|
||||
CREATE TABLE %s(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
value %s NOT NULL %s,
|
||||
UNIQUE(value));
|
||||
'''%(table, dt, collate),
|
||||
|
||||
'CREATE INDEX %s_idx ON %s (value %s);'%(table, table, collate),
|
||||
|
||||
'''\
|
||||
CREATE TABLE %s(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
book INTEGER NOT NULL,
|
||||
value INTEGER NOT NULL,
|
||||
UNIQUE(book, value)
|
||||
);'''%lt,
|
||||
|
||||
'CREATE INDEX %s_aidx ON %s (value);'%(lt,lt),
|
||||
'CREATE INDEX %s_bidx ON %s (book);'%(lt,lt),
|
||||
|
||||
'''\
|
||||
CREATE TRIGGER fkc_update_{lt}_a
|
||||
BEFORE UPDATE OF book ON {lt}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_update_{lt}_b
|
||||
BEFORE UPDATE OF author ON {lt}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_insert_{lt}
|
||||
BEFORE INSERT ON {lt}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_delete_{lt}
|
||||
AFTER DELETE ON {table}
|
||||
BEGIN
|
||||
DELETE FROM {lt} WHERE value=OLD.id;
|
||||
END;
|
||||
|
||||
CREATE VIEW tag_browser_{table} AS SELECT
|
||||
id,
|
||||
value,
|
||||
(SELECT COUNT(id) FROM {lt} WHERE value={table}.id) count
|
||||
FROM {table};
|
||||
|
||||
'''.format(lt=lt, table=table),
|
||||
|
||||
]
|
||||
else:
|
||||
lines = [
|
||||
'''\
|
||||
CREATE TABLE %s(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
book INTEGER,
|
||||
value %s NOT NULL %s,
|
||||
UNIQUE(book));
|
||||
'''%(table, dt, collate),
|
||||
|
||||
'CREATE INDEX %s_idx ON %s (book);'%(table, table),
|
||||
|
||||
'''\
|
||||
CREATE TRIGGER fkc_insert_{table}
|
||||
BEFORE INSERT ON {table}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
END;
|
||||
END;
|
||||
CREATE TRIGGER fkc_update_{table}
|
||||
BEFORE UPDATE OF book ON {table}
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
|
||||
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
|
||||
END;
|
||||
END;
|
||||
'''.format(table=table),
|
||||
]
|
||||
|
||||
script = ' \n'.join(lines)
|
||||
self.conn.executescript(script)
|
||||
self.conn.commit()
|
||||
return num
|
||||
|
||||
|
@ -6,11 +6,9 @@ __docformat__ = 'restructuredtext en'
|
||||
'''
|
||||
The database used to store ebook metadata
|
||||
'''
|
||||
import os, re, sys, shutil, cStringIO, glob, collections, textwrap, \
|
||||
itertools, functools, traceback
|
||||
import os, sys, shutil, cStringIO, glob,functools, traceback
|
||||
from itertools import repeat
|
||||
from math import floor
|
||||
from PyQt4.QtCore import QThread, QReadWriteLock
|
||||
try:
|
||||
from PIL import Image as PILImage
|
||||
PILImage
|
||||
@ -22,8 +20,10 @@ from PyQt4.QtGui import QImage
|
||||
|
||||
from calibre.ebooks.metadata import title_sort
|
||||
from calibre.library.database import LibraryDatabase
|
||||
from calibre.library.schema_upgrades import SchemaUpgrade
|
||||
from calibre.library.caches import ResultCache
|
||||
from calibre.library.custom_columns import CustomColumns
|
||||
from calibre.library.sqlite import connect, IntegrityError, DBThread
|
||||
from calibre.utils.search_query_parser import SearchQueryParser
|
||||
from calibre.ebooks.metadata import string_to_authors, authors_to_string, \
|
||||
MetaInformation, authors_to_sort_string
|
||||
from calibre.ebooks.metadata.meta import get_metadata, metadata_from_formats
|
||||
@ -32,7 +32,7 @@ from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.customize.ui import run_plugins_on_import
|
||||
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp, parse_date
|
||||
from calibre.utils.date import utcnow, now as nowf, utcfromtimestamp
|
||||
from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format
|
||||
|
||||
if iswindows:
|
||||
@ -56,423 +56,6 @@ def delete_tree(path, permanent=False):
|
||||
|
||||
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
|
||||
|
||||
FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'publisher':3, 'rating':4, 'timestamp':5,
|
||||
'size':6, 'tags':7, 'comments':8, 'series':9, 'series_index':10,
|
||||
'sort':11, 'author_sort':12, 'formats':13, 'isbn':14, 'path':15,
|
||||
'lccn':16, 'pubdate':17, 'flags':18, 'uuid':19, 'cover':20}
|
||||
INDEX_MAP = dict(zip(FIELD_MAP.values(), FIELD_MAP.keys()))
|
||||
|
||||
|
||||
class CoverCache(QThread):
|
||||
|
||||
def __init__(self, library_path, parent=None):
|
||||
QThread.__init__(self, parent)
|
||||
self.library_path = library_path
|
||||
self.id_map = None
|
||||
self.id_map_lock = QReadWriteLock()
|
||||
self.load_queue = collections.deque()
|
||||
self.load_queue_lock = QReadWriteLock(QReadWriteLock.Recursive)
|
||||
self.cache = {}
|
||||
self.cache_lock = QReadWriteLock()
|
||||
self.id_map_stale = True
|
||||
self.keep_running = True
|
||||
|
||||
def build_id_map(self):
|
||||
self.id_map_lock.lockForWrite()
|
||||
self.id_map = {}
|
||||
for f in glob.glob(os.path.join(self.library_path, '*', '* (*)', 'cover.jpg')):
|
||||
c = os.path.basename(os.path.dirname(f))
|
||||
try:
|
||||
id = int(re.search(r'\((\d+)\)', c[c.rindex('('):]).group(1))
|
||||
self.id_map[id] = f
|
||||
except:
|
||||
continue
|
||||
self.id_map_lock.unlock()
|
||||
self.id_map_stale = False
|
||||
|
||||
|
||||
def set_cache(self, ids):
|
||||
self.cache_lock.lockForWrite()
|
||||
already_loaded = set([])
|
||||
for id in self.cache.keys():
|
||||
if id in ids:
|
||||
already_loaded.add(id)
|
||||
else:
|
||||
self.cache.pop(id)
|
||||
self.cache_lock.unlock()
|
||||
ids = [i for i in ids if i not in already_loaded]
|
||||
self.load_queue_lock.lockForWrite()
|
||||
self.load_queue = collections.deque(ids)
|
||||
self.load_queue_lock.unlock()
|
||||
|
||||
|
||||
def run(self):
|
||||
while self.keep_running:
|
||||
if self.id_map is None or self.id_map_stale:
|
||||
self.build_id_map()
|
||||
while True: # Load images from the load queue
|
||||
self.load_queue_lock.lockForWrite()
|
||||
try:
|
||||
id = self.load_queue.popleft()
|
||||
except IndexError:
|
||||
break
|
||||
finally:
|
||||
self.load_queue_lock.unlock()
|
||||
|
||||
self.cache_lock.lockForRead()
|
||||
need = True
|
||||
if id in self.cache.keys():
|
||||
need = False
|
||||
self.cache_lock.unlock()
|
||||
if not need:
|
||||
continue
|
||||
path = None
|
||||
self.id_map_lock.lockForRead()
|
||||
if id in self.id_map.keys():
|
||||
path = self.id_map[id]
|
||||
else:
|
||||
self.id_map_stale = True
|
||||
self.id_map_lock.unlock()
|
||||
if path and os.access(path, os.R_OK):
|
||||
try:
|
||||
img = QImage()
|
||||
data = open(path, 'rb').read()
|
||||
img.loadFromData(data)
|
||||
if img.isNull():
|
||||
continue
|
||||
except:
|
||||
continue
|
||||
self.cache_lock.lockForWrite()
|
||||
self.cache[id] = img
|
||||
self.cache_lock.unlock()
|
||||
|
||||
self.sleep(1)
|
||||
|
||||
def stop(self):
|
||||
self.keep_running = False
|
||||
|
||||
def cover(self, id):
|
||||
val = None
|
||||
if self.cache_lock.tryLockForRead(50):
|
||||
val = self.cache.get(id, None)
|
||||
self.cache_lock.unlock()
|
||||
return val
|
||||
|
||||
def clear_cache(self):
|
||||
self.cache_lock.lockForWrite()
|
||||
self.cache = {}
|
||||
self.cache_lock.unlock()
|
||||
|
||||
def refresh(self, ids):
|
||||
self.cache_lock.lockForWrite()
|
||||
for id in ids:
|
||||
self.cache.pop(id, None)
|
||||
self.cache_lock.unlock()
|
||||
self.load_queue_lock.lockForWrite()
|
||||
for id in ids:
|
||||
self.load_queue.appendleft(id)
|
||||
self.load_queue_lock.unlock()
|
||||
|
||||
### Global utility function for get_match here and in gui2/library.py
|
||||
CONTAINS_MATCH = 0
|
||||
EQUALS_MATCH = 1
|
||||
REGEXP_MATCH = 2
|
||||
def _match(query, value, matchkind):
|
||||
for t in value:
|
||||
t = t.lower()
|
||||
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
|
||||
if ((matchkind == EQUALS_MATCH and query == t) or
|
||||
(matchkind == REGEXP_MATCH and re.search(query, t, re.I)) or ### search unanchored
|
||||
(matchkind == CONTAINS_MATCH and query in t)):
|
||||
return True
|
||||
except re.error:
|
||||
pass
|
||||
return False
|
||||
|
||||
class ResultCache(SearchQueryParser):
|
||||
|
||||
'''
|
||||
Stores sorted and filtered metadata in memory.
|
||||
'''
|
||||
|
||||
def build_relop_dict(self):
|
||||
'''
|
||||
Because the database dates have time in them, we can't use direct
|
||||
comparisons even when field_count == 3. The query has time = 0, but
|
||||
the database object has time == something. As such, a complete compare
|
||||
will almost never be correct.
|
||||
'''
|
||||
def relop_eq(db, query, field_count):
|
||||
if db.year == query.year:
|
||||
if field_count == 1:
|
||||
return True
|
||||
if db.month == query.month:
|
||||
if field_count == 2:
|
||||
return True
|
||||
return db.day == query.day
|
||||
return False
|
||||
|
||||
def relop_gt(db, query, field_count):
|
||||
if db.year > query.year:
|
||||
return True
|
||||
if field_count > 1 and db.year == query.year:
|
||||
if db.month > query.month:
|
||||
return True
|
||||
return field_count == 3 and db.month == query.month and db.day > query.day
|
||||
return False
|
||||
|
||||
def relop_lt(db, query, field_count):
|
||||
if db.year < query.year:
|
||||
return True
|
||||
if field_count > 1 and db.year == query.year:
|
||||
if db.month < query.month:
|
||||
return True
|
||||
return field_count == 3 and db.month == query.month and db.day < query.day
|
||||
return False
|
||||
|
||||
def relop_ne(db, query, field_count):
|
||||
return not relop_eq(db, query, field_count)
|
||||
|
||||
def relop_ge(db, query, field_count):
|
||||
return not relop_lt(db, query, field_count)
|
||||
|
||||
def relop_le(db, query, field_count):
|
||||
return not relop_gt(db, query, field_count)
|
||||
|
||||
self.search_relops = {'=':[1, relop_eq], '>':[1, relop_gt], '<':[1, relop_lt], \
|
||||
'!=':[2, relop_ne], '>=':[2, relop_ge], '<=':[2, relop_le]}
|
||||
|
||||
def __init__(self):
|
||||
self._map = self._map_filtered = self._data = []
|
||||
self.first_sort = True
|
||||
SearchQueryParser.__init__(self)
|
||||
self.build_relop_dict()
|
||||
|
||||
def __getitem__(self, row):
|
||||
return self._data[self._map_filtered[row]]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map_filtered)
|
||||
|
||||
def __iter__(self):
|
||||
for id in self._map_filtered:
|
||||
yield self._data[id]
|
||||
|
||||
def universal_set(self):
|
||||
return set([i[0] for i in self._data if i is not None])
|
||||
|
||||
def get_matches(self, location, query):
|
||||
matches = set([])
|
||||
if query and query.strip():
|
||||
location = location.lower().strip()
|
||||
|
||||
### take care of dates special case
|
||||
if location in ('pubdate', 'date'):
|
||||
if len(query) < 2:
|
||||
return matches
|
||||
relop = None
|
||||
for k in self.search_relops.keys():
|
||||
if query.startswith(k):
|
||||
(p, relop) = self.search_relops[k]
|
||||
query = query[p:]
|
||||
if relop is None:
|
||||
return matches
|
||||
loc = FIELD_MAP[{'date':'timestamp', 'pubdate':'pubdate'}[location]]
|
||||
qd = parse_date(query)
|
||||
field_count = query.count('-') + 1
|
||||
for item in self._data:
|
||||
if item is None: continue
|
||||
if relop(item[loc], qd, field_count):
|
||||
matches.add(item[0])
|
||||
return matches
|
||||
|
||||
### everything else
|
||||
matchkind = CONTAINS_MATCH
|
||||
if (len(query) > 1):
|
||||
if query.startswith('\\'):
|
||||
query = query[1:]
|
||||
elif query.startswith('='):
|
||||
matchkind = EQUALS_MATCH
|
||||
query = query[1:]
|
||||
elif query.startswith('~'):
|
||||
matchkind = REGEXP_MATCH
|
||||
query = query[1:]
|
||||
if matchkind != REGEXP_MATCH: ### leave case in regexps because it can be significant e.g. \S \W \D
|
||||
query = query.lower()
|
||||
|
||||
if not isinstance(query, unicode):
|
||||
query = query.decode('utf-8')
|
||||
if location in ('tag', 'author', 'format', 'comment'):
|
||||
location += 's'
|
||||
all = ('title', 'authors', 'publisher', 'tags', 'comments', 'series', 'formats', 'isbn', 'rating', 'cover')
|
||||
MAP = {}
|
||||
for x in all:
|
||||
MAP[x] = FIELD_MAP[x]
|
||||
EXCLUDE_FIELDS = [MAP['rating'], MAP['cover']]
|
||||
SPLITABLE_FIELDS = [MAP['authors'], MAP['tags'], MAP['formats']]
|
||||
location = [location] if location != 'all' else list(MAP.keys())
|
||||
for i, loc in enumerate(location):
|
||||
location[i] = MAP[loc]
|
||||
try:
|
||||
rating_query = int(query) * 2
|
||||
except:
|
||||
rating_query = None
|
||||
for loc in location:
|
||||
if loc == MAP['authors']:
|
||||
q = query.replace(',', '|'); ### DB stores authors with commas changed to bars, so change query
|
||||
else:
|
||||
q = query
|
||||
|
||||
for item in self._data:
|
||||
if item is None: continue
|
||||
if not item[loc]:
|
||||
if query == 'false':
|
||||
if isinstance(item[loc], basestring):
|
||||
if item[loc].strip() != '':
|
||||
continue
|
||||
matches.add(item[0])
|
||||
continue
|
||||
continue ### item is empty. No possible matches below
|
||||
|
||||
if q == 'true':
|
||||
if isinstance(item[loc], basestring):
|
||||
if item[loc].strip() == '':
|
||||
continue
|
||||
matches.add(item[0])
|
||||
continue
|
||||
if rating_query and loc == MAP['rating'] and rating_query == int(item[loc]):
|
||||
matches.add(item[0])
|
||||
continue
|
||||
if loc not in EXCLUDE_FIELDS:
|
||||
if loc in SPLITABLE_FIELDS:
|
||||
vals = item[loc].split(',') ### check individual tags/authors/formats, not the long string
|
||||
else:
|
||||
vals = [item[loc]] ### make into list to make _match happy
|
||||
if _match(q, vals, matchkind):
|
||||
matches.add(item[0])
|
||||
continue
|
||||
return matches
|
||||
|
||||
def remove(self, id):
|
||||
self._data[id] = None
|
||||
if id in self._map:
|
||||
self._map.remove(id)
|
||||
if id in self._map_filtered:
|
||||
self._map_filtered.remove(id)
|
||||
|
||||
def set(self, row, col, val, row_is_id=False):
|
||||
id = row if row_is_id else self._map_filtered[row]
|
||||
self._data[id][col] = val
|
||||
|
||||
def get(self, row, col, row_is_id=False):
|
||||
id = row if row_is_id else self._map_filtered[row]
|
||||
return self._data[id][col]
|
||||
|
||||
def index(self, id, cache=False):
|
||||
x = self._map if cache else self._map_filtered
|
||||
return x.index(id)
|
||||
|
||||
def row(self, id):
|
||||
return self.index(id)
|
||||
|
||||
def has_id(self, id):
|
||||
try:
|
||||
return self._data[id] is not None
|
||||
except IndexError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def refresh_ids(self, db, ids):
|
||||
'''
|
||||
Refresh the data in the cache for books identified by ids.
|
||||
Returns a list of affected rows or None if the rows are filtered.
|
||||
'''
|
||||
for id in ids:
|
||||
try:
|
||||
self._data[id] = db.conn.get('SELECT * from meta WHERE id=?',
|
||||
(id,))[0]
|
||||
self._data[id].append(db.has_cover(id, index_is_id=True))
|
||||
except IndexError:
|
||||
return None
|
||||
try:
|
||||
return map(self.row, ids)
|
||||
except ValueError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def books_added(self, ids, db):
|
||||
if not ids:
|
||||
return
|
||||
self._data.extend(repeat(None, max(ids)-len(self._data)+2))
|
||||
for id in ids:
|
||||
self._data[id] = db.conn.get('SELECT * from meta WHERE id=?', (id,))[0]
|
||||
self._data[id].append(db.has_cover(id, index_is_id=True))
|
||||
self._map[0:0] = ids
|
||||
self._map_filtered[0:0] = ids
|
||||
|
||||
def books_deleted(self, ids):
|
||||
for id in ids:
|
||||
self._data[id] = None
|
||||
if id in self._map: self._map.remove(id)
|
||||
if id in self._map_filtered: self._map_filtered.remove(id)
|
||||
|
||||
def count(self):
|
||||
return len(self._map)
|
||||
|
||||
def refresh(self, db, field=None, ascending=True):
|
||||
temp = db.conn.get('SELECT * FROM meta')
|
||||
self._data = list(itertools.repeat(None, temp[-1][0]+2)) if temp else []
|
||||
for r in temp:
|
||||
self._data[r[0]] = r
|
||||
for item in self._data:
|
||||
if item is not None:
|
||||
item.append(db.has_cover(item[0], index_is_id=True))
|
||||
self._map = [i[0] for i in self._data if i is not None]
|
||||
if field is not None:
|
||||
self.sort(field, ascending)
|
||||
self._map_filtered = list(self._map)
|
||||
|
||||
def seriescmp(self, x, y):
|
||||
try:
|
||||
ans = cmp(self._data[x][9].lower(), self._data[y][9].lower())
|
||||
except AttributeError: # Some entries may be None
|
||||
ans = cmp(self._data[x][9], self._data[y][9])
|
||||
if ans != 0: return ans
|
||||
return cmp(self._data[x][10], self._data[y][10])
|
||||
|
||||
def cmp(self, loc, x, y, asstr=True, subsort=False):
|
||||
try:
|
||||
ans = cmp(self._data[x][loc].lower(), self._data[y][loc].lower()) if \
|
||||
asstr else cmp(self._data[x][loc], self._data[y][loc])
|
||||
except AttributeError: # Some entries may be None
|
||||
ans = cmp(self._data[x][loc], self._data[y][loc])
|
||||
if subsort and ans == 0:
|
||||
return cmp(self._data[x][11].lower(), self._data[y][11].lower())
|
||||
return ans
|
||||
|
||||
def sort(self, field, ascending, subsort=False):
|
||||
field = field.lower().strip()
|
||||
if field in ('author', 'tag', 'comment'):
|
||||
field += 's'
|
||||
if field == 'date': field = 'timestamp'
|
||||
elif field == 'title': field = 'sort'
|
||||
elif field == 'authors': field = 'author_sort'
|
||||
if self.first_sort:
|
||||
subsort = True
|
||||
self.first_sort = False
|
||||
fcmp = self.seriescmp if field == 'series' else \
|
||||
functools.partial(self.cmp, FIELD_MAP[field], subsort=subsort,
|
||||
asstr=field not in ('size', 'rating', 'timestamp'))
|
||||
|
||||
self._map.sort(cmp=fcmp, reverse=not ascending)
|
||||
self._map_filtered = [id for id in self._map if id in self._map_filtered]
|
||||
|
||||
def search(self, query):
|
||||
if not query or not query.strip():
|
||||
self._map_filtered = list(self._map)
|
||||
return
|
||||
matches = sorted(self.parse(query))
|
||||
self._map_filtered = [id for id in self._map if id in matches]
|
||||
|
||||
|
||||
class Tag(object):
|
||||
@ -494,11 +77,12 @@ class Tag(object):
|
||||
return str(self)
|
||||
|
||||
|
||||
class LibraryDatabase2(LibraryDatabase):
|
||||
class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
'''
|
||||
An ebook metadata database that stores references to ebook files on disk.
|
||||
'''
|
||||
PATH_LIMIT = 40 if 'win32' in sys.platform else 100
|
||||
|
||||
@dynamic_property
|
||||
def user_version(self):
|
||||
doc = 'The user version of this database'
|
||||
@ -538,18 +122,71 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
self.connect()
|
||||
self.is_case_sensitive = not iswindows and not isosx and \
|
||||
not os.path.exists(self.dbpath.replace('metadata.db', 'MeTAdAtA.dB'))
|
||||
# Upgrade database
|
||||
while True:
|
||||
uv = self.user_version
|
||||
meth = getattr(self, 'upgrade_version_%d'%uv, None)
|
||||
if meth is None:
|
||||
break
|
||||
else:
|
||||
print 'Upgrading database to version %d...'%(uv+1)
|
||||
meth()
|
||||
self.user_version = uv+1
|
||||
SchemaUpgrade.__init__(self)
|
||||
CustomColumns.__init__(self)
|
||||
self.initialize_dynamic()
|
||||
|
||||
self.data = ResultCache()
|
||||
def initialize_dynamic(self):
|
||||
template = '''\
|
||||
(SELECT {query} FROM books_{table}_link AS link INNER JOIN
|
||||
{table} ON(link.{link_col}={table}.id) WHERE link.book=books.id)
|
||||
{col}
|
||||
'''
|
||||
columns = ['id', 'title',
|
||||
# col table link_col query
|
||||
('authors', 'authors', 'author', 'sortconcat(link.id, name)'),
|
||||
('publisher', 'publishers', 'publisher', 'name'),
|
||||
('rating', 'ratings', 'rating', 'ratings.rating'),
|
||||
'timestamp',
|
||||
'(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size',
|
||||
('tags', 'tags', 'tag', 'group_concat(name)'),
|
||||
'(SELECT text FROM comments WHERE book=books.id) comments',
|
||||
('series', 'series', 'series', 'name'),
|
||||
'series_index',
|
||||
'sort',
|
||||
'author_sort',
|
||||
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
|
||||
'isbn',
|
||||
'path',
|
||||
'lccn',
|
||||
'pubdate',
|
||||
'flags',
|
||||
'uuid'
|
||||
]
|
||||
lines = []
|
||||
for col in columns:
|
||||
line = col
|
||||
if isinstance(col, tuple):
|
||||
line = template.format(col=col[0], table=col[1],
|
||||
link_col=col[2], query=col[3])
|
||||
lines.append(line)
|
||||
|
||||
custom_map = self.custom_columns_in_meta()
|
||||
custom_cols = list(sorted(custom_map.keys()))
|
||||
lines.extend([custom_map[x] for x in custom_cols])
|
||||
|
||||
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'publisher':3, 'rating':4, 'timestamp':5,
|
||||
'size':6, 'tags':7, 'comments':8, 'series':9, 'series_index':10,
|
||||
'sort':11, 'author_sort':12, 'formats':13, 'isbn':14, 'path':15,
|
||||
'lccn':16, 'pubdate':17, 'flags':18, 'uuid':19}
|
||||
|
||||
base = max(self.FIELD_MAP.values())
|
||||
for col in custom_cols:
|
||||
self.FIELD_MAP[col] = base = base+1
|
||||
|
||||
self.FIELD_MAP['cover'] = base+1
|
||||
|
||||
script = '''
|
||||
DROP VIEW IF EXISTS meta2;
|
||||
CREATE TEMP VIEW meta2 AS
|
||||
SELECT
|
||||
{0}
|
||||
FROM books;
|
||||
'''.format(', \n'.join(lines))
|
||||
self.conn.executescript(script)
|
||||
self.conn.commit()
|
||||
|
||||
self.data = ResultCache(self.FIELD_MAP)
|
||||
self.search = self.data.search
|
||||
self.refresh = functools.partial(self.data.refresh, self)
|
||||
self.sort = self.data.sort
|
||||
@ -570,245 +207,15 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
|
||||
for prop in ('author_sort', 'authors', 'comment', 'comments', 'isbn',
|
||||
'publisher', 'rating', 'series', 'series_index', 'tags',
|
||||
'title', 'timestamp', 'uuid'):
|
||||
'title', 'timestamp', 'uuid', 'pubdate'):
|
||||
setattr(self, prop, functools.partial(get_property,
|
||||
loc=FIELD_MAP['comments' if prop == 'comment' else prop]))
|
||||
loc=self.FIELD_MAP['comments' if prop == 'comment' else prop]))
|
||||
|
||||
def initialize_database(self):
|
||||
metadata_sqlite = open(P('metadata_sqlite.sql'), 'rb').read()
|
||||
self.conn.executescript(metadata_sqlite)
|
||||
self.user_version = 1
|
||||
|
||||
def upgrade_version_1(self):
|
||||
'''
|
||||
Normalize indices.
|
||||
'''
|
||||
self.conn.executescript(textwrap.dedent('''\
|
||||
DROP INDEX authors_idx;
|
||||
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE, sort COLLATE NOCASE);
|
||||
DROP INDEX series_idx;
|
||||
CREATE INDEX series_idx ON series (name COLLATE NOCASE);
|
||||
CREATE INDEX series_sort_idx ON books (series_index, id);
|
||||
'''))
|
||||
|
||||
def upgrade_version_2(self):
|
||||
''' Fix Foreign key constraints for deleting from link tables. '''
|
||||
script = textwrap.dedent('''\
|
||||
DROP TRIGGER IF EXISTS fkc_delete_books_%(ltable)s_link;
|
||||
CREATE TRIGGER fkc_delete_on_%(table)s
|
||||
BEFORE DELETE ON %(table)s
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=OLD.id) > 0
|
||||
THEN RAISE(ABORT, 'Foreign key violation: %(table)s is still referenced')
|
||||
END;
|
||||
END;
|
||||
DELETE FROM %(table)s WHERE (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=%(table)s.id) < 1;
|
||||
''')
|
||||
self.conn.executescript(script%dict(ltable='authors', table='authors', ltable_col='author'))
|
||||
self.conn.executescript(script%dict(ltable='publishers', table='publishers', ltable_col='publisher'))
|
||||
self.conn.executescript(script%dict(ltable='tags', table='tags', ltable_col='tag'))
|
||||
self.conn.executescript(script%dict(ltable='series', table='series', ltable_col='series'))
|
||||
|
||||
def upgrade_version_3(self):
|
||||
' Add path to result cache '
|
||||
self.conn.executescript('''
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_4(self):
|
||||
'Rationalize books table'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
CREATE TEMPORARY TABLE
|
||||
books_backup(id,title,sort,timestamp,series_index,author_sort,isbn,path);
|
||||
INSERT INTO books_backup SELECT id,title,sort,timestamp,series_index,author_sort,isbn,path FROM books;
|
||||
DROP TABLE books;
|
||||
CREATE TABLE books ( id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT NOT NULL DEFAULT 'Unknown' COLLATE NOCASE,
|
||||
sort TEXT COLLATE NOCASE,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
pubdate TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
series_index REAL NOT NULL DEFAULT 1.0,
|
||||
author_sort TEXT COLLATE NOCASE,
|
||||
isbn TEXT DEFAULT "" COLLATE NOCASE,
|
||||
lccn TEXT DEFAULT "" COLLATE NOCASE,
|
||||
path TEXT NOT NULL DEFAULT "",
|
||||
flags INTEGER NOT NULL DEFAULT 1
|
||||
);
|
||||
INSERT INTO
|
||||
books (id,title,sort,timestamp,pubdate,series_index,author_sort,isbn,path)
|
||||
SELECT id,title,sort,timestamp,timestamp,series_index,author_sort,isbn,path FROM books_backup;
|
||||
DROP TABLE books_backup;
|
||||
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_5(self):
|
||||
'Update indexes/triggers for new books table'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE);
|
||||
CREATE INDEX books_idx ON books (sort COLLATE NOCASE);
|
||||
CREATE TRIGGER books_delete_trg
|
||||
AFTER DELETE ON books
|
||||
BEGIN
|
||||
DELETE FROM books_authors_link WHERE book=OLD.id;
|
||||
DELETE FROM books_publishers_link WHERE book=OLD.id;
|
||||
DELETE FROM books_ratings_link WHERE book=OLD.id;
|
||||
DELETE FROM books_series_link WHERE book=OLD.id;
|
||||
DELETE FROM books_tags_link WHERE book=OLD.id;
|
||||
DELETE FROM data WHERE book=OLD.id;
|
||||
DELETE FROM comments WHERE book=OLD.id;
|
||||
DELETE FROM conversion_options WHERE book=OLD.id;
|
||||
END;
|
||||
CREATE TRIGGER books_insert_trg
|
||||
AFTER INSERT ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
CREATE TRIGGER books_update_trg
|
||||
AFTER UPDATE ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
UPDATE books SET sort=title_sort(title) WHERE sort IS NULL;
|
||||
|
||||
END TRANSACTION;
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def upgrade_version_6(self):
|
||||
'Show authors in order'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags
|
||||
FROM books;
|
||||
END TRANSACTION;
|
||||
''')
|
||||
|
||||
def upgrade_version_7(self):
|
||||
'Add uuid column'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
ALTER TABLE books ADD COLUMN uuid TEXT;
|
||||
DROP TRIGGER IF EXISTS books_insert_trg;
|
||||
DROP TRIGGER IF EXISTS books_update_trg;
|
||||
UPDATE books SET uuid=uuid4();
|
||||
|
||||
CREATE TRIGGER books_insert_trg AFTER INSERT ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title),uuid=uuid4() WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER books_update_trg AFTER UPDATE ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags,
|
||||
uuid
|
||||
FROM books;
|
||||
|
||||
END TRANSACTION;
|
||||
''')
|
||||
|
||||
def upgrade_version_8(self):
|
||||
'Add Tag Browser views'
|
||||
def create_tag_browser_view(table_name, column_name):
|
||||
self.conn.executescript('''
|
||||
DROP VIEW IF EXISTS tag_browser_{tn};
|
||||
CREATE VIEW tag_browser_{tn} AS SELECT
|
||||
id,
|
||||
name,
|
||||
(SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count
|
||||
FROM {tn};
|
||||
'''.format(tn=table_name, cn=column_name))
|
||||
|
||||
for tn in ('authors', 'tags', 'publishers', 'series'):
|
||||
cn = tn[:-1]
|
||||
if tn == 'series':
|
||||
cn = tn
|
||||
create_tag_browser_view(tn, cn)
|
||||
|
||||
|
||||
def last_modified(self):
|
||||
''' Return last modified time as a UTC datetime object'''
|
||||
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
|
||||
@ -821,7 +228,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
def path(self, index, index_is_id=False):
|
||||
'Return the relative path to the directory containing this books files as a unicode string.'
|
||||
row = self.data._data[index] if index_is_id else self.data[index]
|
||||
return row[FIELD_MAP['path']].replace('/', os.sep)
|
||||
return row[self.FIELD_MAP['path']].replace('/', os.sep)
|
||||
|
||||
|
||||
def abspath(self, index, index_is_id=False):
|
||||
@ -908,7 +315,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
self.add_format(id, format, stream, index_is_id=True, path=tpath)
|
||||
self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['path'], path, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['path'], path, row_is_id=True)
|
||||
# Delete not needed directories
|
||||
if current_path and os.path.exists(spath):
|
||||
if self.normpath(spath) != self.normpath(tpath):
|
||||
@ -953,16 +360,6 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
return img
|
||||
return f if as_file else f.read()
|
||||
|
||||
def timestamp(self, index, index_is_id=False):
|
||||
if index_is_id:
|
||||
return self.conn.get('SELECT timestamp FROM meta WHERE id=?', (index,), all=False)
|
||||
return self.data[index][FIELD_MAP['timestamp']]
|
||||
|
||||
def pubdate(self, index, index_is_id=False):
|
||||
if index_is_id:
|
||||
return self.conn.get('SELECT pubdate FROM meta WHERE id=?', (index,), all=False)
|
||||
return self.data[index][FIELD_MAP['pubdate']]
|
||||
|
||||
def get_metadata(self, idx, index_is_id=False, get_cover=False):
|
||||
'''
|
||||
Convenience method to return metadata as a L{MetaInformation} object.
|
||||
@ -1170,6 +567,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
self.conn.execute(st%dict(ltable='publishers', table='publishers', ltable_col='publisher'))
|
||||
self.conn.execute(st%dict(ltable='tags', table='tags', ltable_col='tag'))
|
||||
self.conn.execute(st%dict(ltable='series', table='series', ltable_col='series'))
|
||||
self.clean_custom()
|
||||
self.conn.commit()
|
||||
|
||||
def get_recipes(self):
|
||||
@ -1222,10 +620,10 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
now = nowf()
|
||||
for r in self.data._data:
|
||||
if r is not None:
|
||||
if (now - r[FIELD_MAP['timestamp']]) > delta:
|
||||
tags = r[FIELD_MAP['tags']]
|
||||
if (now - r[self.FIELD_MAP['timestamp']]) > delta:
|
||||
tags = r[self.FIELD_MAP['tags']]
|
||||
if tags and tag in tags.lower():
|
||||
yield r[FIELD_MAP['id']]
|
||||
yield r[self.FIELD_MAP['id']]
|
||||
|
||||
def get_next_series_num_for(self, series):
|
||||
series_id = self.conn.get('SELECT id from series WHERE name=?',
|
||||
@ -1341,10 +739,10 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?',
|
||||
(ss, id))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['authors'],
|
||||
self.data.set(id, self.FIELD_MAP['authors'],
|
||||
','.join([a.replace(',', '|') for a in authors]),
|
||||
row_is_id=True)
|
||||
self.data.set(id, FIELD_MAP['author_sort'], ss, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['author_sort'], ss, row_is_id=True)
|
||||
self.set_path(id, True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
@ -1355,8 +753,8 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
if not isinstance(title, unicode):
|
||||
title = title.decode(preferred_encoding, 'replace')
|
||||
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
|
||||
self.data.set(id, FIELD_MAP['title'], title, row_is_id=True)
|
||||
self.data.set(id, FIELD_MAP['sort'], title_sort(title), row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['title'], title, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['sort'], title_sort(title), row_is_id=True)
|
||||
self.set_path(id, True)
|
||||
self.conn.commit()
|
||||
if notify:
|
||||
@ -1365,7 +763,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
def set_timestamp(self, id, dt, notify=True):
|
||||
if dt:
|
||||
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
|
||||
self.data.set(id, FIELD_MAP['timestamp'], dt, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
|
||||
self.conn.commit()
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
@ -1373,7 +771,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
def set_pubdate(self, id, dt, notify=True):
|
||||
if dt:
|
||||
self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id))
|
||||
self.data.set(id, FIELD_MAP['pubdate'], dt, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['pubdate'], dt, row_is_id=True)
|
||||
self.conn.commit()
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
@ -1392,7 +790,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
aid = self.conn.execute('INSERT INTO publishers(name) VALUES (?)', (publisher,)).lastrowid
|
||||
self.conn.execute('INSERT INTO books_publishers_link(book, publisher) VALUES (?,?)', (id, aid))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['publisher'], publisher, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
@ -1443,7 +841,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
(id, tid))
|
||||
self.conn.commit()
|
||||
tags = ','.join(self.get_tags(id))
|
||||
self.data.set(id, FIELD_MAP['tags'], tags, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['tags'], tags, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
@ -1502,7 +900,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
self.data.set(row, 9, series)
|
||||
except ValueError:
|
||||
pass
|
||||
self.data.set(id, FIELD_MAP['series'], series, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['series'], series, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
@ -1515,7 +913,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
idx = 1.0
|
||||
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['series_index'], idx, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['series_index'], idx, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
@ -1526,7 +924,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
rat = rat if rat else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
|
||||
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['rating'], rating, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['rating'], rating, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
@ -1534,21 +932,21 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
|
||||
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['comments'], text, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
def set_author_sort(self, id, sort, notify=True):
|
||||
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['author_sort'], sort, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['author_sort'], sort, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
def set_isbn(self, id, isbn, notify=True):
|
||||
self.conn.execute('UPDATE books SET isbn=? WHERE id=?', (isbn, id))
|
||||
self.conn.commit()
|
||||
self.data.set(id, FIELD_MAP['isbn'], isbn, row_is_id=True)
|
||||
self.data.set(id, self.FIELD_MAP['isbn'], isbn, row_is_id=True)
|
||||
if notify:
|
||||
self.notify('metadata', [id])
|
||||
|
||||
@ -1797,7 +1195,7 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
yield record
|
||||
|
||||
def all_ids(self):
|
||||
x = FIELD_MAP['id']
|
||||
x = self.FIELD_MAP['id']
|
||||
for i in iter(self):
|
||||
yield i[x]
|
||||
|
||||
@ -1816,15 +1214,17 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
FIELDS = set(['title', 'authors', 'author_sort', 'publisher', 'rating',
|
||||
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
|
||||
'isbn', 'uuid', 'pubdate'])
|
||||
for x in self.custom_column_num_map:
|
||||
FIELDS.add(x)
|
||||
data = []
|
||||
for record in self.data:
|
||||
if record is None: continue
|
||||
db_id = record[FIELD_MAP['id']]
|
||||
db_id = record[self.FIELD_MAP['id']]
|
||||
if ids is not None and db_id not in ids:
|
||||
continue
|
||||
x = {}
|
||||
for field in FIELDS:
|
||||
x[field] = record[FIELD_MAP[field]]
|
||||
x[field] = record[self.FIELD_MAP[field]]
|
||||
data.append(x)
|
||||
x['id'] = db_id
|
||||
x['formats'] = []
|
||||
@ -1834,11 +1234,11 @@ class LibraryDatabase2(LibraryDatabase):
|
||||
if authors_as_string:
|
||||
x['authors'] = authors_to_string(x['authors'])
|
||||
x['tags'] = [i.replace('|', ',').strip() for i in x['tags'].split(',')] if x['tags'] else []
|
||||
path = os.path.join(prefix, self.path(record[FIELD_MAP['id']], index_is_id=True))
|
||||
path = os.path.join(prefix, self.path(record[self.FIELD_MAP['id']], index_is_id=True))
|
||||
x['cover'] = os.path.join(path, 'cover.jpg')
|
||||
if not self.has_cover(x['id'], index_is_id=True):
|
||||
x['cover'] = None
|
||||
formats = self.formats(record[FIELD_MAP['id']], index_is_id=True)
|
||||
formats = self.formats(record[self.FIELD_MAP['id']], index_is_id=True)
|
||||
if formats:
|
||||
for fmt in formats.split(','):
|
||||
path = self.format_abspath(x['id'], fmt, index_is_id=True)
|
||||
@ -2036,7 +1436,7 @@ books_series_link feeds
|
||||
us = self.data.universal_set()
|
||||
total = float(len(us))
|
||||
for i, id in enumerate(us):
|
||||
formats = self.data.get(id, FIELD_MAP['formats'], row_is_id=True)
|
||||
formats = self.data.get(id, self.FIELD_MAP['formats'], row_is_id=True)
|
||||
if not formats:
|
||||
formats = []
|
||||
else:
|
||||
|
271
src/calibre/library/schema_upgrades.py
Normal file
271
src/calibre/library/schema_upgrades.py
Normal file
@ -0,0 +1,271 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
class SchemaUpgrade(object):
|
||||
|
||||
def __init__(self):
|
||||
# Upgrade database
|
||||
while True:
|
||||
uv = self.user_version
|
||||
meth = getattr(self, 'upgrade_version_%d'%uv, None)
|
||||
if meth is None:
|
||||
break
|
||||
else:
|
||||
print 'Upgrading database to version %d...'%(uv+1)
|
||||
meth()
|
||||
self.user_version = uv+1
|
||||
|
||||
|
||||
def upgrade_version_1(self):
|
||||
'''
|
||||
Normalize indices.
|
||||
'''
|
||||
self.conn.executescript('''\
|
||||
DROP INDEX authors_idx;
|
||||
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE, sort COLLATE NOCASE);
|
||||
DROP INDEX series_idx;
|
||||
CREATE INDEX series_idx ON series (name COLLATE NOCASE);
|
||||
CREATE INDEX series_sort_idx ON books (series_index, id);
|
||||
''')
|
||||
|
||||
def upgrade_version_2(self):
|
||||
''' Fix Foreign key constraints for deleting from link tables. '''
|
||||
script = '''\
|
||||
DROP TRIGGER IF EXISTS fkc_delete_books_%(ltable)s_link;
|
||||
CREATE TRIGGER fkc_delete_on_%(table)s
|
||||
BEFORE DELETE ON %(table)s
|
||||
BEGIN
|
||||
SELECT CASE
|
||||
WHEN (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=OLD.id) > 0
|
||||
THEN RAISE(ABORT, 'Foreign key violation: %(table)s is still referenced')
|
||||
END;
|
||||
END;
|
||||
DELETE FROM %(table)s WHERE (SELECT COUNT(id) FROM books_%(ltable)s_link WHERE %(ltable_col)s=%(table)s.id) < 1;
|
||||
'''
|
||||
self.conn.executescript(script%dict(ltable='authors', table='authors', ltable_col='author'))
|
||||
self.conn.executescript(script%dict(ltable='publishers', table='publishers', ltable_col='publisher'))
|
||||
self.conn.executescript(script%dict(ltable='tags', table='tags', ltable_col='tag'))
|
||||
self.conn.executescript(script%dict(ltable='series', table='series', ltable_col='series'))
|
||||
|
||||
def upgrade_version_3(self):
|
||||
' Add path to result cache '
|
||||
self.conn.executescript('''
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_4(self):
|
||||
'Rationalize books table'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
CREATE TEMPORARY TABLE
|
||||
books_backup(id,title,sort,timestamp,series_index,author_sort,isbn,path);
|
||||
INSERT INTO books_backup SELECT id,title,sort,timestamp,series_index,author_sort,isbn,path FROM books;
|
||||
DROP TABLE books;
|
||||
CREATE TABLE books ( id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT NOT NULL DEFAULT 'Unknown' COLLATE NOCASE,
|
||||
sort TEXT COLLATE NOCASE,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
pubdate TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
series_index REAL NOT NULL DEFAULT 1.0,
|
||||
author_sort TEXT COLLATE NOCASE,
|
||||
isbn TEXT DEFAULT "" COLLATE NOCASE,
|
||||
lccn TEXT DEFAULT "" COLLATE NOCASE,
|
||||
path TEXT NOT NULL DEFAULT "",
|
||||
flags INTEGER NOT NULL DEFAULT 1
|
||||
);
|
||||
INSERT INTO
|
||||
books (id,title,sort,timestamp,pubdate,series_index,author_sort,isbn,path)
|
||||
SELECT id,title,sort,timestamp,timestamp,series_index,author_sort,isbn,path FROM books_backup;
|
||||
DROP TABLE books_backup;
|
||||
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags
|
||||
FROM books;
|
||||
''')
|
||||
|
||||
def upgrade_version_5(self):
|
||||
'Update indexes/triggers for new books table'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE);
|
||||
CREATE INDEX books_idx ON books (sort COLLATE NOCASE);
|
||||
CREATE TRIGGER books_delete_trg
|
||||
AFTER DELETE ON books
|
||||
BEGIN
|
||||
DELETE FROM books_authors_link WHERE book=OLD.id;
|
||||
DELETE FROM books_publishers_link WHERE book=OLD.id;
|
||||
DELETE FROM books_ratings_link WHERE book=OLD.id;
|
||||
DELETE FROM books_series_link WHERE book=OLD.id;
|
||||
DELETE FROM books_tags_link WHERE book=OLD.id;
|
||||
DELETE FROM data WHERE book=OLD.id;
|
||||
DELETE FROM comments WHERE book=OLD.id;
|
||||
DELETE FROM conversion_options WHERE book=OLD.id;
|
||||
END;
|
||||
CREATE TRIGGER books_insert_trg
|
||||
AFTER INSERT ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
CREATE TRIGGER books_update_trg
|
||||
AFTER UPDATE ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
UPDATE books SET sort=title_sort(title) WHERE sort IS NULL;
|
||||
|
||||
END TRANSACTION;
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def upgrade_version_6(self):
|
||||
'Show authors in order'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags
|
||||
FROM books;
|
||||
END TRANSACTION;
|
||||
''')
|
||||
|
||||
def upgrade_version_7(self):
|
||||
'Add uuid column'
|
||||
self.conn.executescript('''
|
||||
BEGIN TRANSACTION;
|
||||
ALTER TABLE books ADD COLUMN uuid TEXT;
|
||||
DROP TRIGGER IF EXISTS books_insert_trg;
|
||||
DROP TRIGGER IF EXISTS books_update_trg;
|
||||
UPDATE books SET uuid=uuid4();
|
||||
|
||||
CREATE TRIGGER books_insert_trg AFTER INSERT ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title),uuid=uuid4() WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER books_update_trg AFTER UPDATE ON books
|
||||
BEGIN
|
||||
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
|
||||
END;
|
||||
|
||||
DROP VIEW meta;
|
||||
CREATE VIEW meta AS
|
||||
SELECT id, title,
|
||||
(SELECT sortconcat(bal.id, name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
|
||||
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
|
||||
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
|
||||
timestamp,
|
||||
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
|
||||
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
|
||||
(SELECT text FROM comments WHERE book=books.id) comments,
|
||||
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
|
||||
series_index,
|
||||
sort,
|
||||
author_sort,
|
||||
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
|
||||
isbn,
|
||||
path,
|
||||
lccn,
|
||||
pubdate,
|
||||
flags,
|
||||
uuid
|
||||
FROM books;
|
||||
|
||||
END TRANSACTION;
|
||||
''')
|
||||
|
||||
def upgrade_version_8(self):
|
||||
'Add Tag Browser views'
|
||||
def create_tag_browser_view(table_name, column_name):
|
||||
self.conn.executescript('''
|
||||
DROP VIEW IF EXISTS tag_browser_{tn};
|
||||
CREATE VIEW tag_browser_{tn} AS SELECT
|
||||
id,
|
||||
name,
|
||||
(SELECT COUNT(id) FROM books_{tn}_link WHERE {cn}={tn}.id) count
|
||||
FROM {tn};
|
||||
'''.format(tn=table_name, cn=column_name))
|
||||
|
||||
for tn in ('authors', 'tags', 'publishers', 'series'):
|
||||
cn = tn[:-1]
|
||||
if tn == 'series':
|
||||
cn = tn
|
||||
create_tag_browser_view(tn, cn)
|
||||
|
||||
def upgrade_version_9(self):
|
||||
'Add custom columns'
|
||||
self.conn.executescript('''
|
||||
CREATE TABLE custom_columns (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
label TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
datatype TEXT NOT NULL,
|
||||
mark_for_delete BOOL DEFAULT 0 NOT NULL,
|
||||
editable BOOL DEFAULT 1 NOT NULL,
|
||||
display TEXT DEFAULT "{}" NOT NULL,
|
||||
is_multiple BOOL DEFAULT 0 NOT NULL,
|
||||
normalized BOOL NOT NULL,
|
||||
UNIQUE(label)
|
||||
);
|
||||
CREATE INDEX custom_columns_idx ON custom_columns (label);
|
||||
CREATE INDEX formats_idx ON data (format);
|
||||
''')
|
||||
|
@ -25,13 +25,15 @@ from calibre.utils.genshi.template import MarkupTemplate
|
||||
from calibre import fit_image, guess_type, prepare_string_for_xml, \
|
||||
strftime as _strftime
|
||||
from calibre.library import server_config as config
|
||||
from calibre.library.database2 import LibraryDatabase2, FIELD_MAP
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
from calibre.utils.config import config_dir
|
||||
from calibre.utils.mdns import publish as publish_zeroconf, \
|
||||
stop_server as stop_zeroconf, get_external_ip
|
||||
from calibre.ebooks.metadata import fmt_sidx, title_sort
|
||||
from calibre.utils.date import now as nowf, fromtimestamp
|
||||
|
||||
listen_on = '0.0.0.0'
|
||||
|
||||
def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
|
||||
if not hasattr(dt, 'timetuple'):
|
||||
dt = nowf()
|
||||
@ -353,14 +355,13 @@ class LibraryServer(object):
|
||||
path = P('content_server')
|
||||
self.build_time = fromtimestamp(os.stat(path).st_mtime)
|
||||
self.default_cover = open(P('content_server/default_cover.jpg'), 'rb').read()
|
||||
|
||||
cherrypy.config.update({
|
||||
'log.screen' : opts.develop,
|
||||
'engine.autoreload_on' : opts.develop,
|
||||
'tools.log_headers.on' : opts.develop,
|
||||
'checker.on' : opts.develop,
|
||||
'request.show_tracebacks': show_tracebacks,
|
||||
'server.socket_host' : '0.0.0.0',
|
||||
'server.socket_host' : listen_on,
|
||||
'server.socket_port' : opts.port,
|
||||
'server.socket_timeout' : opts.timeout, #seconds
|
||||
'server.thread_pool' : opts.thread_pool, # number of threads
|
||||
@ -438,7 +439,10 @@ class LibraryServer(object):
|
||||
cherrypy.log.error(traceback.format_exc())
|
||||
|
||||
def exit(self):
|
||||
cherrypy.engine.exit()
|
||||
try:
|
||||
cherrypy.engine.exit()
|
||||
finally:
|
||||
cherrypy.server.httpserver = None
|
||||
|
||||
def get_cover(self, id, thumbnail=False):
|
||||
cover = self.db.cover(id, index_is_id=True, as_file=False)
|
||||
@ -512,18 +516,18 @@ class LibraryServer(object):
|
||||
if field == 'series':
|
||||
items.sort(cmp=self.seriescmp, reverse=not order)
|
||||
else:
|
||||
field = FIELD_MAP[field]
|
||||
field = self.db.FIELD_MAP[field]
|
||||
getter = operator.itemgetter(field)
|
||||
items.sort(cmp=lambda x, y: cmpf(getter(x), getter(y)), reverse=not order)
|
||||
|
||||
def seriescmp(self, x, y):
|
||||
si = FIELD_MAP['series']
|
||||
si = self.db.FIELD_MAP['series']
|
||||
try:
|
||||
ans = cmp(x[si].lower(), y[si].lower())
|
||||
except AttributeError: # Some entries may be None
|
||||
ans = cmp(x[si], y[si])
|
||||
if ans != 0: return ans
|
||||
return cmp(x[FIELD_MAP['series_index']], y[FIELD_MAP['series_index']])
|
||||
return cmp(x[self.db.FIELD_MAP['series_index']], y[self.db.FIELD_MAP['series_index']])
|
||||
|
||||
|
||||
def last_modified(self, updated):
|
||||
@ -585,11 +589,11 @@ class LibraryServer(object):
|
||||
next_link = ('<link rel="next" title="Next" '
|
||||
'type="application/atom+xml" href="/stanza/?sortby=%s&offset=%d"/>\n'
|
||||
) % (sortby, next_offset)
|
||||
return self.STANZA.generate(subtitle=subtitle, data=entries, FM=FIELD_MAP,
|
||||
return self.STANZA.generate(subtitle=subtitle, data=entries, FM=self.db.FIELD_MAP,
|
||||
updated=updated, id='urn:calibre:main', next_link=next_link).render('xml')
|
||||
|
||||
def stanza_main(self, updated):
|
||||
return self.STANZA_MAIN.generate(subtitle='', data=[], FM=FIELD_MAP,
|
||||
return self.STANZA_MAIN.generate(subtitle='', data=[], FM=self.db.FIELD_MAP,
|
||||
updated=updated, id='urn:calibre:main').render('xml')
|
||||
|
||||
@expose
|
||||
@ -626,15 +630,18 @@ class LibraryServer(object):
|
||||
|
||||
# Sort the record list
|
||||
if sortby == "bytitle" or authorid or tagid:
|
||||
record_list.sort(lambda x, y: cmp(title_sort(x[FIELD_MAP['title']]),
|
||||
title_sort(y[FIELD_MAP['title']])))
|
||||
record_list.sort(lambda x, y:
|
||||
cmp(title_sort(x[self.db.FIELD_MAP['title']]),
|
||||
title_sort(y[self.db.FIELD_MAP['title']])))
|
||||
elif seriesid:
|
||||
record_list.sort(lambda x, y: cmp(x[FIELD_MAP['series_index']], y[FIELD_MAP['series_index']]))
|
||||
record_list.sort(lambda x, y:
|
||||
cmp(x[self.db.FIELD_MAP['series_index']],
|
||||
y[self.db.FIELD_MAP['series_index']]))
|
||||
else: # Sort by date
|
||||
record_list = reversed(record_list)
|
||||
|
||||
|
||||
fmts = FIELD_MAP['formats']
|
||||
fmts = self.db.FIELD_MAP['formats']
|
||||
pat = re.compile(r'EPUB|PDB', re.IGNORECASE)
|
||||
record_list = [x for x in record_list if x[0] in ids and
|
||||
pat.search(x[fmts] if x[fmts] else '') is not None]
|
||||
@ -656,10 +663,10 @@ class LibraryServer(object):
|
||||
) % '&'.join(q)
|
||||
|
||||
for record in nrecord_list:
|
||||
r = record[FIELD_MAP['formats']]
|
||||
r = record[self.db.FIELD_MAP['formats']]
|
||||
r = r.upper() if r else ''
|
||||
|
||||
z = record[FIELD_MAP['authors']]
|
||||
z = record[self.db.FIELD_MAP['authors']]
|
||||
if not z:
|
||||
z = _('Unknown')
|
||||
authors = ' & '.join([i.replace('|', ',') for i in
|
||||
@ -667,19 +674,19 @@ class LibraryServer(object):
|
||||
|
||||
# Setup extra description
|
||||
extra = []
|
||||
rating = record[FIELD_MAP['rating']]
|
||||
rating = record[self.db.FIELD_MAP['rating']]
|
||||
if rating > 0:
|
||||
rating = ''.join(repeat('★', rating))
|
||||
extra.append('RATING: %s<br />'%rating)
|
||||
tags = record[FIELD_MAP['tags']]
|
||||
tags = record[self.db.FIELD_MAP['tags']]
|
||||
if tags:
|
||||
extra.append('TAGS: %s<br />'%\
|
||||
prepare_string_for_xml(', '.join(tags.split(','))))
|
||||
series = record[FIELD_MAP['series']]
|
||||
series = record[self.db.FIELD_MAP['series']]
|
||||
if series:
|
||||
extra.append('SERIES: %s [%s]<br />'%\
|
||||
(prepare_string_for_xml(series),
|
||||
fmt_sidx(float(record[FIELD_MAP['series_index']]))))
|
||||
fmt_sidx(float(record[self.db.FIELD_MAP['series_index']]))))
|
||||
|
||||
fmt = 'epub' if 'EPUB' in r else 'pdb'
|
||||
mimetype = guess_type('dummy.'+fmt)[0]
|
||||
@ -692,17 +699,17 @@ class LibraryServer(object):
|
||||
authors=authors,
|
||||
tags=tags,
|
||||
series=series,
|
||||
FM=FIELD_MAP,
|
||||
FM=self.db.FIELD_MAP,
|
||||
extra='\n'.join(extra),
|
||||
mimetype=mimetype,
|
||||
fmt=fmt,
|
||||
urn=record[FIELD_MAP['uuid']],
|
||||
urn=record[self.db.FIELD_MAP['uuid']],
|
||||
timestamp=strftime('%Y-%m-%dT%H:%M:%S+00:00', record[5])
|
||||
)
|
||||
books.append(self.STANZA_ENTRY.generate(**data)\
|
||||
.render('xml').decode('utf8'))
|
||||
|
||||
return self.STANZA.generate(subtitle='', data=books, FM=FIELD_MAP,
|
||||
return self.STANZA.generate(subtitle='', data=books, FM=self.db.FIELD_MAP,
|
||||
next_link=next_link, updated=updated, id='urn:calibre:main').render('xml')
|
||||
|
||||
|
||||
@ -741,7 +748,7 @@ class LibraryServer(object):
|
||||
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
|
||||
record[10] = fmt_sidx(float(record[10]))
|
||||
ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[5]), \
|
||||
strftime('%Y/%m/%d %H:%M:%S', record[FIELD_MAP['pubdate']])
|
||||
strftime('%Y/%m/%d %H:%M:%S', record[self.db.FIELD_MAP['pubdate']])
|
||||
books.append(book.generate(r=record, authors=authors, timestamp=ts,
|
||||
pubdate=pd).render('xml').decode('utf-8'))
|
||||
updated = self.db.last_modified()
|
||||
@ -788,7 +795,7 @@ class LibraryServer(object):
|
||||
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
|
||||
record[10] = fmt_sidx(float(record[10]))
|
||||
ts, pd = strftime('%Y/%m/%d %H:%M:%S', record[5]), \
|
||||
strftime('%Y/%m/%d %H:%M:%S', record[FIELD_MAP['pubdate']])
|
||||
strftime('%Y/%m/%d %H:%M:%S', record[self.db.FIELD_MAP['pubdate']])
|
||||
books.append(book.generate(r=record, authors=authors, timestamp=ts,
|
||||
pubdate=pd).render('xml').decode('utf-8'))
|
||||
updated = self.db.last_modified()
|
||||
|
@ -20,7 +20,9 @@ from calibre.utils.date import parse_date, isoformat
|
||||
global_lock = RLock()
|
||||
|
||||
def convert_timestamp(val):
|
||||
return parse_date(val, as_utc=False)
|
||||
if val:
|
||||
return parse_date(val, as_utc=False)
|
||||
return None
|
||||
|
||||
def adapt_datetime(dt):
|
||||
return isoformat(dt, sep=' ')
|
||||
@ -28,27 +30,32 @@ def adapt_datetime(dt):
|
||||
sqlite.register_adapter(datetime, adapt_datetime)
|
||||
sqlite.register_converter('timestamp', convert_timestamp)
|
||||
|
||||
def convert_bool(val):
|
||||
return bool(int(val))
|
||||
|
||||
sqlite.register_adapter(bool, lambda x : 1 if x else 0)
|
||||
sqlite.register_converter('bool', convert_bool)
|
||||
|
||||
|
||||
class Concatenate(object):
|
||||
'''String concatenation aggregator for sqlite'''
|
||||
def __init__(self, sep=','):
|
||||
self.sep = sep
|
||||
self.ans = ''
|
||||
self.ans = []
|
||||
|
||||
def step(self, value):
|
||||
if value is not None:
|
||||
self.ans += value + self.sep
|
||||
self.ans.append(value)
|
||||
|
||||
def finalize(self):
|
||||
if not self.ans:
|
||||
return None
|
||||
if self.sep:
|
||||
return self.ans[:-len(self.sep)]
|
||||
return self.ans
|
||||
return self.sep.join(self.ans)
|
||||
|
||||
class SortedConcatenate(object):
|
||||
'''String concatenation aggregator for sqlite, sorted by supplied index'''
|
||||
def __init__(self, sep=','):
|
||||
self.sep = sep
|
||||
sep = ','
|
||||
def __init__(self):
|
||||
self.ans = {}
|
||||
|
||||
def step(self, ndx, value):
|
||||
@ -60,6 +67,9 @@ class SortedConcatenate(object):
|
||||
return None
|
||||
return self.sep.join(map(self.ans.get, sorted(self.ans.keys())))
|
||||
|
||||
class SafeSortedConcatenate(SortedConcatenate):
|
||||
sep = '|'
|
||||
|
||||
class Connection(sqlite.Connection):
|
||||
|
||||
def get(self, *args, **kw):
|
||||
@ -92,6 +102,7 @@ class DBThread(Thread):
|
||||
self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row)
|
||||
self.conn.create_aggregate('concat', 1, Concatenate)
|
||||
self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
|
||||
self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
|
||||
self.conn.create_function('title_sort', 1, title_sort)
|
||||
self.conn.create_function('uuid4', 0, lambda : str(uuid.uuid4()))
|
||||
|
||||
|
@ -132,14 +132,31 @@ Can I use the collections feature of the SONY reader?
|
||||
|app| has full support for collections. When you add tags to a book's metadata, those tags are turned into collections when you upload the book to the SONY reader. Also, the series information is automatically
|
||||
turned into a collection on the reader. Note that the PRS-500 does not support collections for books stored on the SD card. The PRS-505 does.
|
||||
|
||||
How do I use |app| with my iPhone?
|
||||
How do I use |app| with my iPad/iPhone/iTouch?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
First install the Stanza reader on your iPhone using iTunes.
|
||||
|
||||
You can access your calibre library on a iPad/iPhone/iTouch over the air using the calibre content server.
|
||||
|
||||
First perform the following steps in |app|
|
||||
|
||||
* Set the Preferred Output Format in |app| to EPUB (The output format can be set under Preferences->General)
|
||||
* Convert the books you want to read on your iPhone to EPUB format by selecting them and clicking the Convert button.
|
||||
* Turn on the Content Server in |app|'s preferences and leave |app| running.
|
||||
|
||||
For an iPad:
|
||||
|
||||
Install the ReadMe app on your iPad using iTunes. Open Safari and browse to::
|
||||
|
||||
http://192.168.1.2:8080/
|
||||
|
||||
Replace ``192.168.1.2`` with the local IP address of the computer running |app|. If you have changed the port the |app| content server is running on, you will have to change ``8080`` as well to the new port. The local IP address is the IP address you computer is assigned on your home network. A quick Google search will tell you how to find out your local IP address.
|
||||
|
||||
The books in your |app| library will be presented as a list, 25 entries at a time. Click the right arrow to go to the next 25. You can also type in the search box to find specific books. Just click on the EPUB link of the book you want and it will be downloaded into your ReadMe library.
|
||||
|
||||
For an iPhone/iTouch:
|
||||
|
||||
Install the free Stanza reader app on your iPhone/iTouch using iTunes.
|
||||
|
||||
Now you should be able to access your books on your iPhone by opening Stanza. Go to "Get Books" and then click the "Shared" tab. Under Shared you will see an entry "Books in calibre". If you don't, make sure your iPhone is connected using the WiFi network in your house, not 3G. If the |app| catalog is still not detected in Stanza, you can add it manually in Stanza. To do this, click the "Shared" tab, then click the "Edit" button and then click "Add book source" to add a new book source. In the Add Book Source screen enter whatever name you like and in the URL field, enter the following::
|
||||
|
||||
http://192.168.1.2:8080/
|
||||
@ -148,6 +165,8 @@ Replace ``192.168.1.2`` with the local IP address of the computer running |app|.
|
||||
|
||||
If you get timeout errors while browsing the calibre catalog in Stanza, try increasing the connection timeout value in the stanza settings. Go to Info->Settings and increase the value of Download Timeout.
|
||||
|
||||
Note that neither the Stanza, nor the ReadMe apps are in anyway associated with |app|.
|
||||
|
||||
How do I use |app| with my Android phone?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -212,7 +212,8 @@ class RecursiveFetcher(object):
|
||||
if hasattr(err, 'code') and responses.has_key(err.code):
|
||||
raise FetchError, responses[err.code]
|
||||
if getattr(err, 'reason', [0])[0] == 104 or \
|
||||
getattr(getattr(err, 'args', [None])[0], 'errno', None) == -2: # Connection reset by peer or Name or service not know
|
||||
getattr(getattr(err, 'args', [None])[0], 'errno', None) in (-2,
|
||||
-3): # Connection reset by peer or Name or service not know
|
||||
self.log.debug('Temporary error, retrying in 1 second')
|
||||
time.sleep(1)
|
||||
with closing(open_func(url, timeout=self.timeout)) as f:
|
||||
|
Loading…
x
Reference in New Issue
Block a user