mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-08-11 09:13:57 -04:00
KG revisions
This commit is contained in:
commit
71eea7508f
BIN
resources/images/news/akter.png
Normal file
BIN
resources/images/news/akter.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 429 B |
78
resources/recipes/akter.recipe
Normal file
78
resources/recipes/akter.recipe
Normal file
@ -0,0 +1,78 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
akter.co.rs
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Akter(BasicNewsRecipe):
|
||||
title = 'AKTER'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'AKTER - nedeljni politicki magazin savremene Srbije'
|
||||
publisher = 'Akter Media Group d.o.o.'
|
||||
category = 'vesti, online vesti, najnovije vesti, politika, sport, ekonomija, biznis, finansije, berza, kultura, zivot, putovanja, auto, automobili, tehnologija, politicki magazin, dogadjaji, desavanja, lifestyle, zdravlje, zdravstvo, vest, novine, nedeljnik, srbija, novi sad, vojvodina, svet, drustvo, zabava, republika srpska, beograd, intervju, komentar, reportaza, arhiva vesti, news, serbia, politics'
|
||||
oldest_article = 8
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = False
|
||||
use_embedded_content = False
|
||||
encoding = 'utf-8'
|
||||
masthead_url = 'http://www.akter.co.rs/templates/gk_thenews2/images/style2/logo.png'
|
||||
language = 'sr'
|
||||
publication_type = 'magazine'
|
||||
remove_empty_feeds = True
|
||||
PREFIX = 'http://www.akter.co.rs'
|
||||
extra_css = """ @font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
|
||||
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
|
||||
.article_description,body,.lokacija{font-family: Arial,Helvetica,sans1,sans-serif}
|
||||
.color-2{display:block; margin-bottom: 10px; padding: 5px, 10px;
|
||||
border-left: 1px solid #D00000; color: #D00000}
|
||||
img{margin-bottom: 0.8em} """
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
, 'linearize_tables' : True
|
||||
}
|
||||
|
||||
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
|
||||
|
||||
feeds = [
|
||||
(u'Politika' , u'http://www.akter.co.rs/index.php/politikaprint.html' )
|
||||
,(u'Ekonomija' , u'http://www.akter.co.rs/index.php/ekonomijaprint.html')
|
||||
,(u'Life&Style' , u'http://www.akter.co.rs/index.php/lsprint.html' )
|
||||
,(u'Sport' , u'http://www.akter.co.rs/index.php/sportprint.html' )
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
return self.adeify_images(soup)
|
||||
|
||||
def print_version(self, url):
|
||||
return url + '?tmpl=component&print=1&page='
|
||||
|
||||
def parse_index(self):
|
||||
totalfeeds = []
|
||||
lfeeds = self.get_feeds()
|
||||
for feedobj in lfeeds:
|
||||
feedtitle, feedurl = feedobj
|
||||
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
|
||||
articles = []
|
||||
soup = self.index_to_soup(feedurl)
|
||||
for item in soup.findAll(attrs={'class':['sectiontableentry1','sectiontableentry2']}):
|
||||
link = item.find('a')
|
||||
url = self.PREFIX + link['href']
|
||||
title = self.tag_to_string(link)
|
||||
articles.append({
|
||||
'title' :title
|
||||
,'date' :''
|
||||
,'url' :url
|
||||
,'description':''
|
||||
})
|
||||
totalfeeds.append((feedtitle, articles))
|
||||
return totalfeeds
|
||||
|
@ -1,41 +1,43 @@
|
||||
"""
|
||||
publico.py - v1.0
|
||||
#!/usr/bin/env python
|
||||
__author__ = u'Jordi Balcells'
|
||||
__license__ = 'GPL v3'
|
||||
description = u'Jornal portugu\xeas - v1.03 (16 June 2010)'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
Copyright (c) 2009, David Rodrigues - http://sixhat.net
|
||||
All rights reserved.
|
||||
"""
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
'''
|
||||
publico.pt
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class Publico(BasicNewsRecipe):
|
||||
title = u'P\xfablico'
|
||||
__author__ = 'David Rodrigues'
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 30
|
||||
encoding='utf-8'
|
||||
class PublicoPT(BasicNewsRecipe):
|
||||
description = u'Jornal portugu\xeas'
|
||||
cover_url = 'http://static.publico.pt/files/header/img/publico.gif'
|
||||
title = u'Publico.PT'
|
||||
category = 'News, politics, culture, economy, general interest'
|
||||
oldest_article = 2
|
||||
no_stylesheets = True
|
||||
encoding = 'utf8'
|
||||
use_embedded_content = False
|
||||
language = 'pt'
|
||||
remove_empty_feeds = True
|
||||
extra_css = ' body{font-family: Arial,Helvetica,sans-serif } img{margin-bottom: 0.4em} '
|
||||
|
||||
preprocess_regexps = [(re.compile(u"\uFFFD", re.DOTALL|re.IGNORECASE), lambda match: ''),]
|
||||
keep_only_tags = [dict(attrs={'class':['content-noticia-title','artigoHeader','ECOSFERA_MANCHETE','noticia','textoPrincipal','ECOSFERA_texto_01']})]
|
||||
remove_tags = [dict(attrs={'class':['options','subcoluna']})]
|
||||
|
||||
feeds = [
|
||||
(u'Geral', u'http://feeds.feedburner.com/PublicoUltimaHora'),
|
||||
(u'Internacional', u'http://www.publico.clix.pt/rss.ashx?idCanal=11'),
|
||||
(u'Pol\xedtica', u'http://www.publico.clix.pt/rss.ashx?idCanal=12'),
|
||||
(u'Ci\xcencias', u'http://www.publico.clix.pt/rss.ashx?idCanal=13'),
|
||||
(u'Desporto', u'http://desporto.publico.pt/rss.ashx'),
|
||||
(u'Economia', u'http://www.publico.clix.pt/rss.ashx?idCanal=57'),
|
||||
(u'Educa\xe7\xe3o', u'http://www.publico.clix.pt/rss.ashx?idCanal=58'),
|
||||
(u'Local', u'http://www.publico.clix.pt/rss.ashx?idCanal=59'),
|
||||
(u'Media e Tecnologia', u'http://www.publico.clix.pt/rss.ashx?idCanal=61'),
|
||||
(u'Sociedade', u'http://www.publico.clix.pt/rss.ashx?idCanal=62')
|
||||
(u'Geral', u'http://feeds.feedburner.com/publicoRSS'),
|
||||
(u'Mundo', u'http://feeds.feedburner.com/PublicoMundo'),
|
||||
(u'Pol\xedtica', u'http://feeds.feedburner.com/PublicoPolitica'),
|
||||
(u'Economia', u'http://feeds.feedburner.com/PublicoEconomia'),
|
||||
(u'Desporto', u'http://feeds.feedburner.com/PublicoDesporto'),
|
||||
(u'Sociedade', u'http://feeds.feedburner.com/PublicoSociedade'),
|
||||
(u'Educa\xe7\xe3o', u'http://feeds.feedburner.com/PublicoEducacao'),
|
||||
(u'Ci\xeancias', u'http://feeds.feedburner.com/PublicoCiencias'),
|
||||
(u'Ecosfera', u'http://feeds.feedburner.com/PublicoEcosfera'),
|
||||
(u'Cultura', u'http://feeds.feedburner.com/PublicoCultura'),
|
||||
(u'Local', u'http://feeds.feedburner.com/PublicoLocal'),
|
||||
(u'Tecnologia', u'http://feeds.feedburner.com/PublicoTecnologia')
|
||||
]
|
||||
remove_tags = [dict(name='script'), dict(id='linhaTitulosHeader')]
|
||||
keep_only_tags = [dict(name='div')]
|
||||
|
||||
def print_version(self,url):
|
||||
s=re.findall("id=[0-9]+",url);
|
||||
return "http://ww2.publico.clix.pt/print.aspx?"+s[0]
|
||||
|
@ -10,8 +10,10 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
||||
class Slashdot(BasicNewsRecipe):
|
||||
title = u'Slashdot.org'
|
||||
description = '''Tech news. WARNING: This recipe downloads a lot
|
||||
of content and can result in your IP being banned from slashdot.org'''
|
||||
of content and may result in your IP being banned from slashdot.org'''
|
||||
oldest_article = 7
|
||||
simultaneous_downloads = 1
|
||||
delay = 3
|
||||
max_articles_per_feed = 100
|
||||
language = 'en'
|
||||
|
||||
|
@ -3,9 +3,8 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
import string
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre import strftime
|
||||
|
||||
# http://online.wsj.com/page/us_in_todays_paper.html
|
||||
|
||||
@ -72,56 +71,61 @@ class WallStreetJournal(BasicNewsRecipe):
|
||||
def parse_index(self):
|
||||
soup = self.wsj_get_index()
|
||||
|
||||
year = strftime('%Y')
|
||||
for x in soup.findAll('td', height='25', attrs={'class':'b14'}):
|
||||
txt = self.tag_to_string(x).strip()
|
||||
txt = txt.replace(u'\xa0', ' ')
|
||||
txt = txt.encode('ascii', 'ignore')
|
||||
if year in txt:
|
||||
self.timefmt = ' [%s]'%txt
|
||||
break
|
||||
date = soup.find('span', attrs={'class':'date-date'})
|
||||
if date is not None:
|
||||
self.timefmt = ' [%s]'%self.tag_to_string(date)
|
||||
|
||||
left_column = soup.find(
|
||||
text=lambda t: 'begin ITP Left Column' in str(t))
|
||||
|
||||
table = left_column.findNext('table')
|
||||
|
||||
current_section = None
|
||||
current_articles = []
|
||||
feeds = []
|
||||
for x in table.findAllNext(True):
|
||||
if x.name == 'td' and x.get('class', None) == 'b13':
|
||||
if current_articles and current_section:
|
||||
feeds.append((current_section, current_articles))
|
||||
current_section = self.tag_to_string(x.a).strip()
|
||||
current_articles = []
|
||||
self.log('\tProcessing section:', current_section)
|
||||
if current_section is not None and x.name == 'a' and \
|
||||
x.get('class', None) == 'bold80':
|
||||
title = self.tag_to_string(x)
|
||||
url = x.get('href', False)
|
||||
if not url or not title:
|
||||
continue
|
||||
url = url.partition('#')[0]
|
||||
sections = {}
|
||||
sec_order = []
|
||||
for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True):
|
||||
container = a.findParent(['li', 'div'])
|
||||
if container.name == 'div':
|
||||
section = 'Page One'
|
||||
else:
|
||||
section = ''
|
||||
sec = container.find('a', href=lambda x: x and '/search?' in x)
|
||||
if sec is not None:
|
||||
section = self.tag_to_string(sec).strip()
|
||||
if not section:
|
||||
h = container.find(['h1','h2','h3','h4','h5','h6'])
|
||||
section = self.tag_to_string(h)
|
||||
section = string.capitalize(section).replace('U.s.', 'U.S.')
|
||||
if section not in sections:
|
||||
sections[section] = []
|
||||
sec_order.append(section)
|
||||
meta = a.find(attrs={'class':'meta_sectionName'})
|
||||
if meta is not None:
|
||||
meta.extract()
|
||||
title = self.tag_to_string(a).strip() + ' [%s]'%self.tag_to_string(meta)
|
||||
url = 'http://online.wsj.com'+a['href']
|
||||
desc = ''
|
||||
d = x.findNextSibling(True)
|
||||
if d is not None and d.get('class', None) == 'arialResize':
|
||||
desc = self.tag_to_string(d)
|
||||
desc = desc.partition(u'\u2022')[0]
|
||||
self.log('\t\tFound article:', title)
|
||||
self.log('\t\t\t', url)
|
||||
if url.startswith('/'):
|
||||
url = 'http://online.wsj.com'+url
|
||||
if desc:
|
||||
self.log('\t\t\t', desc)
|
||||
current_articles.append({'title': title, 'url':url,
|
||||
p = container.find('p')
|
||||
if p is not None:
|
||||
desc = self.tag_to_string(p)
|
||||
|
||||
sections[section].append({'title':title, 'url':url,
|
||||
'description':desc, 'date':''})
|
||||
|
||||
if current_articles and current_section:
|
||||
feeds.append((current_section, current_articles))
|
||||
self.log('Found article:', title)
|
||||
|
||||
a.extract()
|
||||
for a in container.findAll('a', href=lambda x: x and '/article/'
|
||||
in x):
|
||||
url = a['href']
|
||||
if not url.startswith('http:'):
|
||||
url = 'http://online.wsj.com'+url
|
||||
title = self.tag_to_string(a).strip()
|
||||
if not title or title.startswith('['): continue
|
||||
if title:
|
||||
sections[section].append({'title':self.tag_to_string(a),
|
||||
'url':url, 'description':'', 'date':''})
|
||||
self.log('\tFound related:', title)
|
||||
|
||||
|
||||
feeds = [(sec, sections[sec]) for sec in sec_order]
|
||||
return feeds
|
||||
|
||||
|
||||
def cleanup(self):
|
||||
self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')
|
||||
|
||||
|
@ -457,9 +457,12 @@ from calibre.devices.misc import PALMPRE, AVANT
|
||||
from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG
|
||||
from calibre.devices.kobo.driver import KOBO
|
||||
|
||||
from calibre.ebooks.metadata.fetch import GoogleBooks, ISBNDB, Amazon
|
||||
from calibre.ebooks.metadata.fetch import GoogleBooks, ISBNDB, Amazon, \
|
||||
LibraryThing
|
||||
from calibre.ebooks.metadata.douban import DoubanBooks
|
||||
from calibre.library.catalog import CSV_XML, EPUB_MOBI
|
||||
plugins = [HTML2ZIP, PML2PMLZ, ArchiveExtract, GoogleBooks, ISBNDB, Amazon, CSV_XML, EPUB_MOBI]
|
||||
plugins = [HTML2ZIP, PML2PMLZ, ArchiveExtract, GoogleBooks, ISBNDB, Amazon,
|
||||
LibraryThing, DoubanBooks, CSV_XML, EPUB_MOBI]
|
||||
plugins += [
|
||||
ComicInput,
|
||||
EPUBInput,
|
||||
|
@ -21,7 +21,7 @@ from calibre.utils.config import make_config_dir, Config, ConfigProxy, \
|
||||
platform = 'linux'
|
||||
if iswindows:
|
||||
platform = 'windows'
|
||||
if isosx:
|
||||
elif isosx:
|
||||
platform = 'osx'
|
||||
|
||||
from zipfile import ZipFile
|
||||
@ -32,19 +32,25 @@ def _config():
|
||||
c.add_opt('filetype_mapping', default={}, help=_('Mapping for filetype plugins'))
|
||||
c.add_opt('plugin_customization', default={}, help=_('Local plugin customization'))
|
||||
c.add_opt('disabled_plugins', default=set([]), help=_('Disabled plugins'))
|
||||
c.add_opt('enabled_plugins', default=set([]), help=_('Enabled plugins'))
|
||||
|
||||
return ConfigProxy(c)
|
||||
|
||||
config = _config()
|
||||
|
||||
|
||||
class InvalidPlugin(ValueError):
|
||||
pass
|
||||
|
||||
class PluginNotFound(ValueError):
|
||||
pass
|
||||
|
||||
def load_plugin(path_to_zip_file):
|
||||
def find_plugin(name):
|
||||
for plugin in _initialized_plugins:
|
||||
if plugin.name == name:
|
||||
return plugin
|
||||
|
||||
|
||||
def load_plugin(path_to_zip_file): # {{{
|
||||
'''
|
||||
Load plugin from zip file or raise InvalidPlugin error
|
||||
|
||||
@ -76,11 +82,120 @@ def load_plugin(path_to_zip_file):
|
||||
|
||||
raise InvalidPlugin(_('No valid plugin found in ')+path_to_zip_file)
|
||||
|
||||
_initialized_plugins = []
|
||||
# }}}
|
||||
|
||||
# Enable/disable plugins {{{
|
||||
|
||||
def disable_plugin(plugin_or_name):
|
||||
x = getattr(plugin_or_name, 'name', plugin_or_name)
|
||||
plugin = find_plugin(x)
|
||||
if not plugin.can_be_disabled:
|
||||
raise ValueError('Plugin %s cannot be disabled'%x)
|
||||
dp = config['disabled_plugins']
|
||||
dp.add(x)
|
||||
config['disabled_plugins'] = dp
|
||||
ep = config['enabled_plugins']
|
||||
if x in ep:
|
||||
ep.remove(x)
|
||||
config['enabled_plugins'] = ep
|
||||
|
||||
def enable_plugin(plugin_or_name):
|
||||
x = getattr(plugin_or_name, 'name', plugin_or_name)
|
||||
dp = config['disabled_plugins']
|
||||
if x in dp:
|
||||
dp.remove(x)
|
||||
config['disabled_plugins'] = dp
|
||||
ep = config['enabled_plugins']
|
||||
ep.add(x)
|
||||
config['enabled_plugins'] = ep
|
||||
|
||||
default_disabled_plugins = set([
|
||||
'Douban Books',
|
||||
])
|
||||
|
||||
def is_disabled(plugin):
|
||||
if plugin.name in config['enabled_plugins']: return False
|
||||
return plugin.name in config['disabled_plugins'] or \
|
||||
plugin.name in default_disabled_plugins
|
||||
# }}}
|
||||
|
||||
# File type plugins {{{
|
||||
|
||||
_on_import = {}
|
||||
_on_preprocess = {}
|
||||
_on_postprocess = {}
|
||||
|
||||
def reread_filetype_plugins():
|
||||
global _on_import
|
||||
global _on_preprocess
|
||||
global _on_postprocess
|
||||
_on_import = {}
|
||||
_on_preprocess = {}
|
||||
_on_postprocess = {}
|
||||
|
||||
for plugin in _initialized_plugins:
|
||||
if isinstance(plugin, FileTypePlugin):
|
||||
for ft in plugin.file_types:
|
||||
if plugin.on_import:
|
||||
if not _on_import.has_key(ft):
|
||||
_on_import[ft] = []
|
||||
_on_import[ft].append(plugin)
|
||||
if plugin.on_preprocess:
|
||||
if not _on_preprocess.has_key(ft):
|
||||
_on_preprocess[ft] = []
|
||||
_on_preprocess[ft].append(plugin)
|
||||
if plugin.on_postprocess:
|
||||
if not _on_postprocess.has_key(ft):
|
||||
_on_postprocess[ft] = []
|
||||
_on_postprocess[ft].append(plugin)
|
||||
|
||||
|
||||
def _run_filetype_plugins(path_to_file, ft=None, occasion='preprocess'):
|
||||
occasion = {'import':_on_import, 'preprocess':_on_preprocess,
|
||||
'postprocess':_on_postprocess}[occasion]
|
||||
customization = config['plugin_customization']
|
||||
if ft is None:
|
||||
ft = os.path.splitext(path_to_file)[-1].lower().replace('.', '')
|
||||
nfp = path_to_file
|
||||
for plugin in occasion.get(ft, []):
|
||||
if is_disabled(plugin):
|
||||
continue
|
||||
plugin.site_customization = customization.get(plugin.name, '')
|
||||
with plugin:
|
||||
try:
|
||||
nfp = plugin.run(path_to_file)
|
||||
if not nfp:
|
||||
nfp = path_to_file
|
||||
except:
|
||||
print 'Running file type plugin %s failed with traceback:'%plugin.name
|
||||
traceback.print_exc()
|
||||
x = lambda j : os.path.normpath(os.path.normcase(j))
|
||||
if occasion == 'postprocess' and x(nfp) != x(path_to_file):
|
||||
shutil.copyfile(nfp, path_to_file)
|
||||
nfp = path_to_file
|
||||
return nfp
|
||||
|
||||
run_plugins_on_import = functools.partial(_run_filetype_plugins,
|
||||
occasion='import')
|
||||
run_plugins_on_preprocess = functools.partial(_run_filetype_plugins,
|
||||
occasion='preprocess')
|
||||
run_plugins_on_postprocess = functools.partial(_run_filetype_plugins,
|
||||
occasion='postprocess')
|
||||
# }}}
|
||||
|
||||
# Plugin customization {{{
|
||||
def customize_plugin(plugin, custom):
|
||||
d = config['plugin_customization']
|
||||
d[plugin.name] = custom.strip()
|
||||
config['plugin_customization'] = d
|
||||
|
||||
def plugin_customization(plugin):
|
||||
return config['plugin_customization'].get(plugin.name, '')
|
||||
|
||||
# }}}
|
||||
|
||||
|
||||
# Input/Output profiles {{{
|
||||
def input_profiles():
|
||||
for plugin in _initialized_plugins:
|
||||
if isinstance(plugin, InputProfile):
|
||||
@ -90,7 +205,9 @@ def output_profiles():
|
||||
for plugin in _initialized_plugins:
|
||||
if isinstance(plugin, OutputProfile):
|
||||
yield plugin
|
||||
# }}}
|
||||
|
||||
# Metadata sources {{{
|
||||
def metadata_sources(metadata_type='basic', customize=True, isbndb_key=None):
|
||||
for plugin in _initialized_plugins:
|
||||
if isinstance(plugin, MetadataSource) and \
|
||||
@ -117,31 +234,9 @@ def migrate_isbndb_key():
|
||||
if key:
|
||||
prefs.set('isbndb_com_key', '')
|
||||
set_isbndb_key(key)
|
||||
# }}}
|
||||
|
||||
def reread_filetype_plugins():
|
||||
global _on_import
|
||||
global _on_preprocess
|
||||
global _on_postprocess
|
||||
_on_import = {}
|
||||
_on_preprocess = {}
|
||||
_on_postprocess = {}
|
||||
|
||||
for plugin in _initialized_plugins:
|
||||
if isinstance(plugin, FileTypePlugin):
|
||||
for ft in plugin.file_types:
|
||||
if plugin.on_import:
|
||||
if not _on_import.has_key(ft):
|
||||
_on_import[ft] = []
|
||||
_on_import[ft].append(plugin)
|
||||
if plugin.on_preprocess:
|
||||
if not _on_preprocess.has_key(ft):
|
||||
_on_preprocess[ft] = []
|
||||
_on_preprocess[ft].append(plugin)
|
||||
if plugin.on_postprocess:
|
||||
if not _on_postprocess.has_key(ft):
|
||||
_on_postprocess[ft] = []
|
||||
_on_postprocess[ft].append(plugin)
|
||||
|
||||
# Metadata read/write {{{
|
||||
_metadata_readers = {}
|
||||
_metadata_writers = {}
|
||||
def reread_metadata_plugins():
|
||||
@ -233,51 +328,9 @@ def set_file_type_metadata(stream, mi, ftype):
|
||||
print 'Failed to set metadata for', repr(getattr(mi, 'title', ''))
|
||||
traceback.print_exc()
|
||||
|
||||
# }}}
|
||||
|
||||
def _run_filetype_plugins(path_to_file, ft=None, occasion='preprocess'):
|
||||
occasion = {'import':_on_import, 'preprocess':_on_preprocess,
|
||||
'postprocess':_on_postprocess}[occasion]
|
||||
customization = config['plugin_customization']
|
||||
if ft is None:
|
||||
ft = os.path.splitext(path_to_file)[-1].lower().replace('.', '')
|
||||
nfp = path_to_file
|
||||
for plugin in occasion.get(ft, []):
|
||||
if is_disabled(plugin):
|
||||
continue
|
||||
plugin.site_customization = customization.get(plugin.name, '')
|
||||
with plugin:
|
||||
try:
|
||||
nfp = plugin.run(path_to_file)
|
||||
if not nfp:
|
||||
nfp = path_to_file
|
||||
except:
|
||||
print 'Running file type plugin %s failed with traceback:'%plugin.name
|
||||
traceback.print_exc()
|
||||
x = lambda j : os.path.normpath(os.path.normcase(j))
|
||||
if occasion == 'postprocess' and x(nfp) != x(path_to_file):
|
||||
shutil.copyfile(nfp, path_to_file)
|
||||
nfp = path_to_file
|
||||
return nfp
|
||||
|
||||
run_plugins_on_import = functools.partial(_run_filetype_plugins,
|
||||
occasion='import')
|
||||
run_plugins_on_preprocess = functools.partial(_run_filetype_plugins,
|
||||
occasion='preprocess')
|
||||
run_plugins_on_postprocess = functools.partial(_run_filetype_plugins,
|
||||
occasion='postprocess')
|
||||
|
||||
|
||||
def initialize_plugin(plugin, path_to_zip_file):
|
||||
try:
|
||||
p = plugin(path_to_zip_file)
|
||||
p.initialize()
|
||||
return p
|
||||
except Exception:
|
||||
print 'Failed to initialize plugin:', plugin.name, plugin.version
|
||||
tb = traceback.format_exc()
|
||||
raise InvalidPlugin((_('Initialization of plugin %s failed with traceback:')
|
||||
%tb) + '\n'+tb)
|
||||
|
||||
# Add/remove plugins {{{
|
||||
|
||||
def add_plugin(path_to_zip_file):
|
||||
make_config_dir()
|
||||
@ -307,14 +360,9 @@ def remove_plugin(plugin_or_name):
|
||||
initialize_plugins()
|
||||
return removed
|
||||
|
||||
def is_disabled(plugin):
|
||||
return plugin.name in config['disabled_plugins']
|
||||
|
||||
def find_plugin(name):
|
||||
for plugin in _initialized_plugins:
|
||||
if plugin.name == name:
|
||||
return plugin
|
||||
# }}}
|
||||
|
||||
# Input/Output format plugins {{{
|
||||
|
||||
def input_format_plugins():
|
||||
for plugin in _initialized_plugins:
|
||||
@ -364,6 +412,9 @@ def available_output_formats():
|
||||
formats.add(plugin.file_type)
|
||||
return formats
|
||||
|
||||
# }}}
|
||||
|
||||
# Catalog plugins {{{
|
||||
|
||||
def catalog_plugins():
|
||||
for plugin in _initialized_plugins:
|
||||
@ -383,27 +434,32 @@ def plugin_for_catalog_format(fmt):
|
||||
if fmt.lower() in plugin.file_types:
|
||||
return plugin
|
||||
|
||||
def device_plugins():
|
||||
# }}}
|
||||
|
||||
def device_plugins(): # {{{
|
||||
for plugin in _initialized_plugins:
|
||||
if isinstance(plugin, DevicePlugin):
|
||||
if not is_disabled(plugin):
|
||||
if platform in plugin.supported_platforms:
|
||||
yield plugin
|
||||
# }}}
|
||||
|
||||
def disable_plugin(plugin_or_name):
|
||||
x = getattr(plugin_or_name, 'name', plugin_or_name)
|
||||
plugin = find_plugin(x)
|
||||
if not plugin.can_be_disabled:
|
||||
raise ValueError('Plugin %s cannot be disabled'%x)
|
||||
dp = config['disabled_plugins']
|
||||
dp.add(x)
|
||||
config['disabled_plugins'] = dp
|
||||
|
||||
def enable_plugin(plugin_or_name):
|
||||
x = getattr(plugin_or_name, 'name', plugin_or_name)
|
||||
dp = config['disabled_plugins']
|
||||
if x in dp:
|
||||
dp.remove(x)
|
||||
config['disabled_plugins'] = dp
|
||||
# Initialize plugins {{{
|
||||
|
||||
_initialized_plugins = []
|
||||
|
||||
def initialize_plugin(plugin, path_to_zip_file):
|
||||
try:
|
||||
p = plugin(path_to_zip_file)
|
||||
p.initialize()
|
||||
return p
|
||||
except Exception:
|
||||
print 'Failed to initialize plugin:', plugin.name, plugin.version
|
||||
tb = traceback.format_exc()
|
||||
raise InvalidPlugin((_('Initialization of plugin %s failed with traceback:')
|
||||
%tb) + '\n'+tb)
|
||||
|
||||
|
||||
def initialize_plugins():
|
||||
global _initialized_plugins
|
||||
@ -425,10 +481,14 @@ def initialize_plugins():
|
||||
|
||||
initialize_plugins()
|
||||
|
||||
def intialized_plugins():
|
||||
def initialized_plugins():
|
||||
for plugin in _initialized_plugins:
|
||||
yield plugin
|
||||
|
||||
# }}}
|
||||
|
||||
# CLI {{{
|
||||
|
||||
def option_parser():
|
||||
parser = OptionParser(usage=_('''\
|
||||
%prog options
|
||||
@ -449,17 +509,6 @@ def option_parser():
|
||||
help=_('Disable the named plugin'))
|
||||
return parser
|
||||
|
||||
def initialized_plugins():
|
||||
return _initialized_plugins
|
||||
|
||||
def customize_plugin(plugin, custom):
|
||||
d = config['plugin_customization']
|
||||
d[plugin.name] = custom.strip()
|
||||
config['plugin_customization'] = d
|
||||
|
||||
def plugin_customization(plugin):
|
||||
return config['plugin_customization'].get(plugin.name, '')
|
||||
|
||||
def main(args=sys.argv):
|
||||
parser = option_parser()
|
||||
if len(args) < 2:
|
||||
@ -504,3 +553,5 @@ def main(args=sys.argv):
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
# }}}
|
||||
|
||||
|
@ -81,9 +81,6 @@ class HANLINV3(USBMS):
|
||||
return drives
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class HANLINV5(HANLINV3):
|
||||
name = 'Hanlin V5 driver'
|
||||
gui_name = 'Hanlin V5'
|
||||
@ -120,8 +117,22 @@ class BOOX(HANLINV3):
|
||||
MAIN_MEMORY_VOLUME_LABEL = 'BOOX Internal Memory'
|
||||
STORAGE_CARD_VOLUME_LABEL = 'BOOX Storage Card'
|
||||
|
||||
EBOOK_DIR_MAIN = 'MyBooks'
|
||||
EBOOK_DIR_CARD_A = 'MyBooks'
|
||||
EBOOK_DIR_MAIN = ['MyBooks']
|
||||
EXTRA_CUSTOMIZATION_MESSAGE = _('Comma separated list of directories to '
|
||||
'send e-books to on the device. The first one that exists will '
|
||||
'be used.')
|
||||
EXTRA_CUSTOMIZATION_DEFAULT = ', '.join(EBOOK_DIR_MAIN)
|
||||
|
||||
# EBOOK_DIR_CARD_A = 'MyBooks' ## Am quite sure we need this.
|
||||
|
||||
def post_open_callback(self):
|
||||
opts = self.settings()
|
||||
dirs = opts.extra_customization
|
||||
if not dirs:
|
||||
dirs = self.EBOOK_DIR_MAIN
|
||||
else:
|
||||
dirs = [x.strip() for x in dirs.split(',')]
|
||||
self.EBOOK_DIR_MAIN = dirs
|
||||
|
||||
def windows_sort_drives(self, drives):
|
||||
return drives
|
||||
|
@ -28,10 +28,14 @@ def authors_to_string(authors):
|
||||
else:
|
||||
return ''
|
||||
|
||||
_bracket_pat = re.compile(r'[\[({].*?[})\]]')
|
||||
def author_to_author_sort(author):
|
||||
if not author:
|
||||
return ''
|
||||
method = tweaks['author_sort_copy_method']
|
||||
if method == 'copy' or (method == 'comma' and ',' in author):
|
||||
return author
|
||||
author = _bracket_pat.sub('', author).strip()
|
||||
tokens = author.split()
|
||||
tokens = tokens[-1:] + tokens[:-1]
|
||||
if len(tokens) > 1:
|
||||
|
258
src/calibre/ebooks/metadata/douban.py
Normal file
258
src/calibre/ebooks/metadata/douban.py
Normal file
@ -0,0 +1,258 @@
|
||||
from __future__ import with_statement
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>; 2010, Li Fanxi <lifanxi@freemindworld.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import sys, textwrap
|
||||
import traceback
|
||||
from urllib import urlencode
|
||||
from functools import partial
|
||||
from lxml import etree
|
||||
|
||||
from calibre import browser, preferred_encoding
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
from calibre.utils.config import OptionParser
|
||||
from calibre.ebooks.metadata.fetch import MetadataSource
|
||||
from calibre.utils.date import parse_date, utcnow
|
||||
|
||||
DOUBAN_API_KEY = None
|
||||
NAMESPACES = {
|
||||
'openSearch':'http://a9.com/-/spec/opensearchrss/1.0/',
|
||||
'atom' : 'http://www.w3.org/2005/Atom',
|
||||
'db': 'http://www.douban.com/xmlns/'
|
||||
}
|
||||
XPath = partial(etree.XPath, namespaces=NAMESPACES)
|
||||
total_results = XPath('//openSearch:totalResults')
|
||||
start_index = XPath('//openSearch:startIndex')
|
||||
items_per_page = XPath('//openSearch:itemsPerPage')
|
||||
entry = XPath('//atom:entry')
|
||||
entry_id = XPath('descendant::atom:id')
|
||||
title = XPath('descendant::atom:title')
|
||||
description = XPath('descendant::atom:summary')
|
||||
publisher = XPath("descendant::db:attribute[@name='publisher']")
|
||||
isbn = XPath("descendant::db:attribute[@name='isbn13']")
|
||||
date = XPath("descendant::db:attribute[@name='pubdate']")
|
||||
creator = XPath("descendant::db:attribute[@name='author']")
|
||||
tag = XPath("descendant::db:tag")
|
||||
|
||||
class DoubanBooks(MetadataSource):
|
||||
|
||||
name = 'Douban Books'
|
||||
description = _('Downloads metadata from Douban.com')
|
||||
supported_platforms = ['windows', 'osx', 'linux'] # Platforms this plugin will run on
|
||||
author = 'Li Fanxi <lifanxi@freemindworld.com>' # The author of this plugin
|
||||
version = (1, 0, 0) # The version number of this plugin
|
||||
|
||||
def fetch(self):
|
||||
try:
|
||||
self.results = search(self.title, self.book_author, self.publisher,
|
||||
self.isbn, max_results=10,
|
||||
verbose=self.verbose)
|
||||
except Exception, e:
|
||||
self.exception = e
|
||||
self.tb = traceback.format_exc()
|
||||
|
||||
def report(verbose):
|
||||
if verbose:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
class Query(object):
|
||||
|
||||
SEARCH_URL = 'http://api.douban.com/book/subjects?'
|
||||
ISBN_URL = 'http://api.douban.com/book/subject/isbn/'
|
||||
|
||||
type = "search"
|
||||
|
||||
def __init__(self, title=None, author=None, publisher=None, isbn=None,
|
||||
max_results=20, start_index=1):
|
||||
assert not(title is None and author is None and publisher is None and \
|
||||
isbn is None)
|
||||
assert (int(max_results) < 21)
|
||||
q = ''
|
||||
if isbn is not None:
|
||||
q = isbn
|
||||
self.type = 'isbn'
|
||||
else:
|
||||
def build_term(parts):
|
||||
return ' '.join(x for x in parts)
|
||||
if title is not None:
|
||||
q += build_term(title.split())
|
||||
if author is not None:
|
||||
q += (' ' if q else '') + build_term(author.split())
|
||||
if publisher is not None:
|
||||
q += (' ' if q else '') + build_term(publisher.split())
|
||||
self.type = 'search'
|
||||
|
||||
if isinstance(q, unicode):
|
||||
q = q.encode('utf-8')
|
||||
|
||||
if self.type == "isbn":
|
||||
self.url = self.ISBN_URL + q
|
||||
if DOUBAN_API_KEY is not None:
|
||||
self.url = self.url + "?apikey=" + DOUBAN_API_KEY
|
||||
else:
|
||||
self.url = self.SEARCH_URL+urlencode({
|
||||
'q':q,
|
||||
'max-results':max_results,
|
||||
'start-index':start_index,
|
||||
})
|
||||
if DOUBAN_API_KEY is not None:
|
||||
self.url = self.url + "&apikey=" + DOUBAN_API_KEY
|
||||
|
||||
def __call__(self, browser, verbose):
|
||||
if verbose:
|
||||
print 'Query:', self.url
|
||||
if self.type == "search":
|
||||
feed = etree.fromstring(browser.open(self.url).read())
|
||||
total = int(total_results(feed)[0].text)
|
||||
start = int(start_index(feed)[0].text)
|
||||
entries = entry(feed)
|
||||
new_start = start + len(entries)
|
||||
if new_start > total:
|
||||
new_start = 0
|
||||
return entries, new_start
|
||||
elif self.type == "isbn":
|
||||
feed = etree.fromstring(browser.open(self.url).read())
|
||||
entries = entry(feed)
|
||||
return entries, 0
|
||||
|
||||
class ResultList(list):
|
||||
|
||||
def get_description(self, entry, verbose):
|
||||
try:
|
||||
desc = description(entry)
|
||||
if desc:
|
||||
return 'SUMMARY:\n'+desc[0].text
|
||||
except:
|
||||
report(verbose)
|
||||
|
||||
def get_title(self, entry):
|
||||
candidates = [x.text for x in title(entry)]
|
||||
return ': '.join(candidates)
|
||||
|
||||
def get_authors(self, entry):
|
||||
m = creator(entry)
|
||||
if not m:
|
||||
m = []
|
||||
m = [x.text for x in m]
|
||||
return m
|
||||
|
||||
def get_tags(self, entry, verbose):
|
||||
try:
|
||||
btags = [x.attrib["name"] for x in tag(entry)]
|
||||
tags = []
|
||||
for t in btags:
|
||||
tags.extend([y.strip() for y in t.split('/')])
|
||||
tags = list(sorted(list(set(tags))))
|
||||
except:
|
||||
report(verbose)
|
||||
tags = []
|
||||
return [x.replace(',', ';') for x in tags]
|
||||
|
||||
def get_publisher(self, entry, verbose):
|
||||
try:
|
||||
pub = publisher(entry)[0].text
|
||||
except:
|
||||
pub = None
|
||||
return pub
|
||||
|
||||
def get_isbn(self, entry, verbose):
|
||||
try:
|
||||
isbn13 = isbn(entry)[0].text
|
||||
except Exception:
|
||||
isbn13 = None
|
||||
return isbn13
|
||||
|
||||
def get_date(self, entry, verbose):
|
||||
try:
|
||||
d = date(entry)
|
||||
if d:
|
||||
default = utcnow().replace(day=15)
|
||||
d = parse_date(d[0].text, assume_utc=True, default=default)
|
||||
else:
|
||||
d = None
|
||||
except:
|
||||
report(verbose)
|
||||
d = None
|
||||
return d
|
||||
|
||||
def populate(self, entries, browser, verbose=False):
|
||||
for x in entries:
|
||||
try:
|
||||
id_url = entry_id(x)[0].text
|
||||
title = self.get_title(x)
|
||||
except:
|
||||
report(verbose)
|
||||
mi = MetaInformation(title, self.get_authors(x))
|
||||
try:
|
||||
if DOUBAN_API_KEY is not None:
|
||||
id_url = id_url + "?apikey=" + DOUBAN_API_KEY
|
||||
raw = browser.open(id_url).read()
|
||||
feed = etree.fromstring(raw)
|
||||
x = entry(feed)[0]
|
||||
except Exception, e:
|
||||
if verbose:
|
||||
print 'Failed to get all details for an entry'
|
||||
print e
|
||||
mi.comments = self.get_description(x, verbose)
|
||||
mi.tags = self.get_tags(x, verbose)
|
||||
mi.isbn = self.get_isbn(x, verbose)
|
||||
mi.publisher = self.get_publisher(x, verbose)
|
||||
mi.pubdate = self.get_date(x, verbose)
|
||||
self.append(mi)
|
||||
|
||||
def search(title=None, author=None, publisher=None, isbn=None,
|
||||
verbose=False, max_results=40):
|
||||
br = browser()
|
||||
start, entries = 1, []
|
||||
while start > 0 and len(entries) <= max_results:
|
||||
new, start = Query(title=title, author=author, publisher=publisher,
|
||||
isbn=isbn, max_results=max_results, start_index=start)(br, verbose)
|
||||
if not new:
|
||||
break
|
||||
entries.extend(new)
|
||||
|
||||
entries = entries[:max_results]
|
||||
|
||||
ans = ResultList()
|
||||
ans.populate(entries, br, verbose)
|
||||
return ans
|
||||
|
||||
def option_parser():
|
||||
parser = OptionParser(textwrap.dedent(
|
||||
'''\
|
||||
%prog [options]
|
||||
|
||||
Fetch book metadata from Douban. You must specify one of title, author,
|
||||
publisher or ISBN. If you specify ISBN the others are ignored. Will
|
||||
fetch a maximum of 100 matches, so you should make your query as
|
||||
specific as possible.
|
||||
'''
|
||||
))
|
||||
parser.add_option('-t', '--title', help='Book title')
|
||||
parser.add_option('-a', '--author', help='Book author(s)')
|
||||
parser.add_option('-p', '--publisher', help='Book publisher')
|
||||
parser.add_option('-i', '--isbn', help='Book ISBN')
|
||||
parser.add_option('-m', '--max-results', default=10,
|
||||
help='Maximum number of results to fetch')
|
||||
parser.add_option('-v', '--verbose', default=0, action='count',
|
||||
help='Be more verbose about errors')
|
||||
return parser
|
||||
|
||||
def main(args=sys.argv):
|
||||
parser = option_parser()
|
||||
opts, args = parser.parse_args(args)
|
||||
try:
|
||||
results = search(opts.title, opts.author, opts.publisher, opts.isbn,
|
||||
verbose=opts.verbose, max_results=int(opts.max_results))
|
||||
except AssertionError:
|
||||
report(True)
|
||||
parser.print_help()
|
||||
return 1
|
||||
for result in results:
|
||||
print unicode(result).encode(preferred_encoding)
|
||||
print
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
@ -198,6 +198,38 @@ class Amazon(MetadataSource):
|
||||
self.exception = e
|
||||
self.tb = traceback.format_exc()
|
||||
|
||||
class LibraryThing(MetadataSource):
|
||||
|
||||
name = 'LibraryThing'
|
||||
metadata_type = 'social'
|
||||
description = _('Downloads series information from librarything.com')
|
||||
|
||||
def fetch(self):
|
||||
if not self.isbn:
|
||||
return
|
||||
from calibre import browser
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
import json
|
||||
br = browser()
|
||||
try:
|
||||
raw = br.open(
|
||||
'http://status.calibre-ebook.com/library_thing/metadata/'+self.isbn
|
||||
).read()
|
||||
data = json.loads(raw)
|
||||
if not data:
|
||||
return
|
||||
if 'error' in data:
|
||||
raise Exception(data['error'])
|
||||
if 'series' in data and 'series_index' in data:
|
||||
mi = MetaInformation(self.title, [])
|
||||
mi.series = data['series']
|
||||
mi.series_index = data['series_index']
|
||||
self.results = mi
|
||||
except Exception, e:
|
||||
self.exception = e
|
||||
self.tb = traceback.format_exc()
|
||||
|
||||
|
||||
def result_index(source, result):
|
||||
if not result.isbn:
|
||||
return -1
|
||||
@ -266,7 +298,7 @@ def get_social_metadata(mi, verbose=0):
|
||||
with MetadataSources(fetchers) as manager:
|
||||
manager(mi.title, mi.authors, mi.publisher, mi.isbn, verbose)
|
||||
manager.join()
|
||||
ratings, tags, comments = [], set([]), set([])
|
||||
ratings, tags, comments, series, series_index = [], set([]), set([]), None, None
|
||||
for fetcher in fetchers:
|
||||
if fetcher.results:
|
||||
dmi = fetcher.results
|
||||
@ -279,6 +311,10 @@ def get_social_metadata(mi, verbose=0):
|
||||
mi.pubdate = dmi.pubdate
|
||||
if dmi.comments:
|
||||
comments.add(dmi.comments)
|
||||
if dmi.series is not None:
|
||||
series = dmi.series
|
||||
if dmi.series_index is not None:
|
||||
series_index = dmi.series_index
|
||||
if ratings:
|
||||
rating = sum(ratings)/float(len(ratings))
|
||||
if mi.rating is None or mi.rating < 0.1:
|
||||
@ -295,6 +331,9 @@ def get_social_metadata(mi, verbose=0):
|
||||
mi.comments = ''
|
||||
for x in comments:
|
||||
mi.comments += x+'\n\n'
|
||||
if series and series_index is not None:
|
||||
mi.series = series
|
||||
mi.series_index = series_index
|
||||
|
||||
return [(x.name, x.exception, x.tb) for x in fetchers if x.exception is not
|
||||
None]
|
||||
|
@ -736,7 +736,9 @@ class OPF(object):
|
||||
def fget(self):
|
||||
ans = []
|
||||
for tag in self.tags_path(self.metadata):
|
||||
ans.append(self.get_text(tag))
|
||||
text = self.get_text(tag)
|
||||
if text and text.strip():
|
||||
ans.extend([x.strip() for x in text.split(',')])
|
||||
return ans
|
||||
|
||||
def fset(self, val):
|
||||
|
@ -61,6 +61,7 @@ class FormatState(object):
|
||||
self.italic = False
|
||||
self.bold = False
|
||||
self.strikethrough = False
|
||||
self.underline = False
|
||||
self.preserve = False
|
||||
self.family = 'serif'
|
||||
self.bgcolor = 'transparent'
|
||||
@ -79,7 +80,8 @@ class FormatState(object):
|
||||
and self.family == other.family \
|
||||
and self.bgcolor == other.bgcolor \
|
||||
and self.fgcolor == other.fgcolor \
|
||||
and self.strikethrough == other.strikethrough
|
||||
and self.strikethrough == other.strikethrough \
|
||||
and self.underline == other.underline
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
@ -251,6 +253,8 @@ class MobiMLizer(object):
|
||||
color=unicode(istate.fgcolor))
|
||||
if istate.strikethrough:
|
||||
inline = etree.SubElement(inline, XHTML('s'))
|
||||
if istate.underline:
|
||||
inline = etree.SubElement(inline, XHTML('u'))
|
||||
bstate.inline = inline
|
||||
bstate.istate = istate
|
||||
inline = bstate.inline
|
||||
@ -330,6 +334,7 @@ class MobiMLizer(object):
|
||||
istate.bgcolor = style['background-color']
|
||||
istate.fgcolor = style['color']
|
||||
istate.strikethrough = style['text-decoration'] == 'line-through'
|
||||
istate.underline = style['text-decoration'] == 'underline'
|
||||
if 'monospace' in style['font-family']:
|
||||
istate.family = 'monospace'
|
||||
elif 'sans-serif' in style['font-family']:
|
||||
|
@ -28,6 +28,7 @@ from calibre.constants import preferred_encoding, filesystem_encoding, \
|
||||
from calibre.gui2.dialogs.choose_format import ChooseFormatDialog
|
||||
from calibre.ebooks import BOOK_EXTENSIONS
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
from calibre.gui2.dialogs.delete_matching_from_device import DeleteMatchingFromDeviceDialog
|
||||
|
||||
class AnnotationsAction(object): # {{{
|
||||
|
||||
@ -471,6 +472,45 @@ class DeleteAction(object): # {{{
|
||||
if ids:
|
||||
self.tags_view.recount()
|
||||
|
||||
def remove_matching_books_from_device(self, *args):
|
||||
if not self.device_manager.is_device_connected:
|
||||
d = error_dialog(self, _('Cannot delete books'),
|
||||
_('No device is connected'))
|
||||
d.exec_()
|
||||
return
|
||||
ids = self._get_selected_ids()
|
||||
if not ids:
|
||||
#_get_selected_ids shows a dialog box if nothing is selected, so we
|
||||
#do not need to show one here
|
||||
return
|
||||
to_delete = {}
|
||||
some_to_delete = False
|
||||
for model,name in ((self.memory_view.model(), _('Main memory')),
|
||||
(self.card_a_view.model(), _('Storage Card A')),
|
||||
(self.card_b_view.model(), _('Storage Card B'))):
|
||||
to_delete[name] = (model, model.paths_for_db_ids(ids))
|
||||
if len(to_delete[name][1]) > 0:
|
||||
some_to_delete = True
|
||||
if not some_to_delete:
|
||||
d = error_dialog(self, _('No books to delete'),
|
||||
_('None of the selected books are on the device'))
|
||||
d.exec_()
|
||||
return
|
||||
d = DeleteMatchingFromDeviceDialog(self, to_delete)
|
||||
if d.exec_():
|
||||
paths = {}
|
||||
ids = {}
|
||||
for (model, id, path) in d.result:
|
||||
if model not in paths:
|
||||
paths[model] = []
|
||||
ids[model] = []
|
||||
paths[model].append(path)
|
||||
ids[model].append(id)
|
||||
for model in paths:
|
||||
job = self.remove_paths(paths[model])
|
||||
self.delete_memory[job] = (paths[model], model)
|
||||
model.mark_for_deletion(job, ids[model], rows_are_ids=True)
|
||||
self.status_bar.show_message(_('Deleting books from device.'), 1000)
|
||||
|
||||
def delete_covers(self, *args):
|
||||
ids = self._get_selected_ids()
|
||||
|
@ -1347,7 +1347,7 @@ class DeviceMixin(object): # {{{
|
||||
if reset:
|
||||
# First build a cache of the library, so the search isn't On**2
|
||||
self.db_book_title_cache = {}
|
||||
self.db_book_uuid_cache = set()
|
||||
self.db_book_uuid_cache = {}
|
||||
db = self.library_view.model().db
|
||||
for id in db.data.iterallids():
|
||||
mi = db.get_metadata(id, index_is_id=True)
|
||||
@ -1364,7 +1364,7 @@ class DeviceMixin(object): # {{{
|
||||
aus = re.sub('(?u)\W|[_]', '', aus)
|
||||
self.db_book_title_cache[title]['author_sort'][aus] = mi
|
||||
self.db_book_title_cache[title]['db_ids'][mi.application_id] = mi
|
||||
self.db_book_uuid_cache.add(mi.uuid)
|
||||
self.db_book_uuid_cache[mi.uuid] = mi.application_id
|
||||
|
||||
# Now iterate through all the books on the device, setting the
|
||||
# in_library field Fastest and most accurate key is the uuid. Second is
|
||||
@ -1376,11 +1376,13 @@ class DeviceMixin(object): # {{{
|
||||
for book in booklist:
|
||||
if getattr(book, 'uuid', None) in self.db_book_uuid_cache:
|
||||
book.in_library = True
|
||||
# ensure that the correct application_id is set
|
||||
book.application_id = self.db_book_uuid_cache[book.uuid]
|
||||
continue
|
||||
|
||||
book_title = book.title.lower() if book.title else ''
|
||||
book_title = re.sub('(?u)\W|[_]', '', book_title)
|
||||
book.in_library = False
|
||||
book.in_library = None
|
||||
d = self.db_book_title_cache.get(book_title, None)
|
||||
if d is not None:
|
||||
if getattr(book, 'application_id', None) in d['db_ids']:
|
||||
|
@ -49,6 +49,9 @@ class SocialMetadata(QDialog):
|
||||
self.mi.tags = self.worker.mi.tags
|
||||
self.mi.rating = self.worker.mi.rating
|
||||
self.mi.comments = self.worker.mi.comments
|
||||
if self.worker.mi.series:
|
||||
self.mi.series = self.worker.mi.series
|
||||
self.mi.series_index = self.worker.mi.series_index
|
||||
QDialog.accept(self)
|
||||
|
||||
@property
|
||||
|
109
src/calibre/gui2/dialogs/delete_matching_from_device.py
Normal file
109
src/calibre/gui2/dialogs/delete_matching_from_device.py
Normal file
@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__license__ = 'GPL v3'
|
||||
|
||||
from PyQt4.Qt import Qt, QDialog, QTableWidgetItem, QAbstractItemView
|
||||
|
||||
from calibre import strftime
|
||||
from calibre.ebooks.metadata import authors_to_string, authors_to_sort_string, \
|
||||
title_sort
|
||||
from calibre.gui2.dialogs.delete_matching_from_device_ui import \
|
||||
Ui_DeleteMatchingFromDeviceDialog
|
||||
from calibre.utils.date import UNDEFINED_DATE
|
||||
|
||||
class tableItem(QTableWidgetItem):
|
||||
|
||||
def __init__(self, text):
|
||||
QTableWidgetItem.__init__(self, text)
|
||||
self.setFlags(Qt.ItemIsEnabled)
|
||||
self.sort = text.lower()
|
||||
|
||||
def __ge__(self, other):
|
||||
return self.sort >= other.sort
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.sort < other.sort
|
||||
|
||||
class titleTableItem(tableItem):
|
||||
|
||||
def __init__(self, text):
|
||||
tableItem.__init__(self, text)
|
||||
self.sort = title_sort(text.lower())
|
||||
|
||||
class authorTableItem(tableItem):
|
||||
|
||||
def __init__(self, book):
|
||||
tableItem.__init__(self, authors_to_string(book.authors))
|
||||
if book.author_sort is not None:
|
||||
self.sort = book.author_sort.lower()
|
||||
else:
|
||||
self.sort = authors_to_sort_string(book.authors).lower()
|
||||
|
||||
class dateTableItem(tableItem):
|
||||
|
||||
def __init__(self, date):
|
||||
if date is not None:
|
||||
tableItem.__init__(self, strftime('%x', date))
|
||||
self.sort = date
|
||||
else:
|
||||
tableItem.__init__(self, '')
|
||||
self.sort = UNDEFINED_DATE
|
||||
|
||||
|
||||
class DeleteMatchingFromDeviceDialog(QDialog, Ui_DeleteMatchingFromDeviceDialog):
|
||||
|
||||
def __init__(self, parent, items):
|
||||
QDialog.__init__(self, parent)
|
||||
Ui_DeleteMatchingFromDeviceDialog.__init__(self)
|
||||
self.setupUi(self)
|
||||
|
||||
self.explanation.setText('<p>'+_('All checked books will be '
|
||||
'<b>permanently deleted</b> from your '
|
||||
'device. Please verify the list.'+'</p>'))
|
||||
self.buttonBox.accepted.connect(self.accepted)
|
||||
self.table.cellClicked.connect(self.cell_clicked)
|
||||
self.table.setSelectionMode(QAbstractItemView.NoSelection)
|
||||
self.table.setColumnCount(5)
|
||||
self.table.setHorizontalHeaderLabels(
|
||||
['', _('Location'), _('Title'),
|
||||
_('Author'), _('Date'), _('Format')])
|
||||
rows = 0
|
||||
for card in items:
|
||||
rows += len(items[card][1])
|
||||
self.table.setRowCount(rows)
|
||||
row = 0
|
||||
for card in items:
|
||||
(model,books) = items[card]
|
||||
for (id,book) in books:
|
||||
item = QTableWidgetItem()
|
||||
item.setFlags(Qt.ItemIsUserCheckable|Qt.ItemIsEnabled)
|
||||
item.setCheckState(Qt.Checked)
|
||||
item.setData(Qt.UserRole, (model, id, book.path))
|
||||
self.table.setItem(row, 0, item)
|
||||
self.table.setItem(row, 1, tableItem(card))
|
||||
self.table.setItem(row, 2, titleTableItem(book.title))
|
||||
self.table.setItem(row, 3, authorTableItem(book))
|
||||
self.table.setItem(row, 4, dateTableItem(book.datetime))
|
||||
self.table.setItem(row, 5, tableItem(book.path.rpartition('.')[2]))
|
||||
row += 1
|
||||
self.table.setCurrentCell(0, 1)
|
||||
self.table.resizeColumnsToContents()
|
||||
self.table.setSortingEnabled(True)
|
||||
self.table.sortByColumn(2, Qt.AscendingOrder)
|
||||
self.table.setCurrentCell(0, 1)
|
||||
|
||||
def cell_clicked(self, row, col):
|
||||
if col == 0:
|
||||
self.table.setCurrentCell(row, 1)
|
||||
|
||||
def accepted(self):
|
||||
self.result = []
|
||||
for row in range(self.table.rowCount()):
|
||||
if self.table.item(row, 0).checkState() == Qt.Unchecked:
|
||||
continue
|
||||
(model, id, path) = self.table.item(row, 0).data(Qt.UserRole).toPyObject()
|
||||
path = unicode(path)
|
||||
self.result.append((model, id, path))
|
||||
return
|
||||
|
90
src/calibre/gui2/dialogs/delete_matching_from_device.ui
Normal file
90
src/calibre/gui2/dialogs/delete_matching_from_device.ui
Normal file
@ -0,0 +1,90 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ui version="4.0">
|
||||
<class>DeleteMatchingFromDeviceDialog</class>
|
||||
<widget class="QDialog" name="DeleteMatchingFromDeviceDialog">
|
||||
<property name="geometry">
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>730</width>
|
||||
<height>342</height>
|
||||
</rect>
|
||||
</property>
|
||||
<property name="sizePolicy">
|
||||
<sizepolicy hsizetype="Expanding" vsizetype="MinimumExpanding">
|
||||
<horstretch>0</horstretch>
|
||||
<verstretch>0</verstretch>
|
||||
</sizepolicy>
|
||||
</property>
|
||||
<property name="windowTitle">
|
||||
<string>Delete from device</string>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout">
|
||||
<item>
|
||||
<widget class="QLabel" name="explanation">
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QTableWidget" name="table">
|
||||
<property name="sizePolicy">
|
||||
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
|
||||
<horstretch>0</horstretch>
|
||||
<verstretch>0</verstretch>
|
||||
</sizepolicy>
|
||||
</property>
|
||||
<property name="columnCount">
|
||||
<number>0</number>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QDialogButtonBox" name="buttonBox">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<property name="standardButtons">
|
||||
<set>QDialogButtonBox::Cancel|QDialogButtonBox::Ok</set>
|
||||
</property>
|
||||
<property name="centerButtons">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
<resources/>
|
||||
<connections>
|
||||
<connection>
|
||||
<sender>buttonBox</sender>
|
||||
<signal>accepted()</signal>
|
||||
<receiver>DeleteMatchingFromDeviceDialog</receiver>
|
||||
<slot>accept()</slot>
|
||||
<hints>
|
||||
<hint type="sourcelabel">
|
||||
<x>229</x>
|
||||
<y>211</y>
|
||||
</hint>
|
||||
<hint type="destinationlabel">
|
||||
<x>157</x>
|
||||
<y>234</y>
|
||||
</hint>
|
||||
</hints>
|
||||
</connection>
|
||||
<connection>
|
||||
<sender>buttonBox</sender>
|
||||
<signal>rejected()</signal>
|
||||
<receiver>DeleteMatchingFromDeviceDialog</receiver>
|
||||
<slot>reject()</slot>
|
||||
<hints>
|
||||
<hint type="sourcelabel">
|
||||
<x>297</x>
|
||||
<y>217</y>
|
||||
</hint>
|
||||
<hint type="destinationlabel">
|
||||
<x>286</x>
|
||||
<y>234</y>
|
||||
</hint>
|
||||
</hints>
|
||||
</connection>
|
||||
</connections>
|
||||
</ui>
|
@ -131,6 +131,10 @@ class ToolbarMixin(object): # {{{
|
||||
self.delete_all_but_selected_formats)
|
||||
self.delete_menu.addAction(
|
||||
_('Remove covers from selected books'), self.delete_covers)
|
||||
self.delete_menu.addSeparator()
|
||||
self.delete_menu.addAction(
|
||||
_('Remove matching books from device'),
|
||||
self.remove_matching_books_from_device)
|
||||
self.action_del.setMenu(self.delete_menu)
|
||||
|
||||
self.action_open_containing_folder.setShortcut(Qt.Key_O)
|
||||
@ -405,6 +409,7 @@ class LayoutMixin(object): # {{{
|
||||
self.library_view.set_current_row(0)
|
||||
m.current_changed(self.library_view.currentIndex(),
|
||||
self.library_view.currentIndex())
|
||||
self.library_view.setFocus(Qt.OtherFocusReason)
|
||||
|
||||
|
||||
def save_layout_state(self):
|
||||
|
@ -769,6 +769,7 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
'format',
|
||||
'formats',
|
||||
'title',
|
||||
'inlibrary'
|
||||
]
|
||||
|
||||
|
||||
@ -807,12 +808,23 @@ class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
'author': lambda x: ' & '.join(getattr(x, 'authors')).lower(),
|
||||
'collections':lambda x: ','.join(getattr(x, 'device_collections')).lower(),
|
||||
'format':lambda x: os.path.splitext(x.path)[1].lower(),
|
||||
'inlibrary':lambda x : getattr(x, 'in_library')
|
||||
}
|
||||
for x in ('author', 'format'):
|
||||
q[x+'s'] = q[x]
|
||||
for index, row in enumerate(self.model.db):
|
||||
for locvalue in locations:
|
||||
accessor = q[locvalue]
|
||||
if query == 'true':
|
||||
if accessor(row) is not None:
|
||||
matches.add(index)
|
||||
continue
|
||||
if query == 'false':
|
||||
if accessor(row) is None:
|
||||
matches.add(index)
|
||||
continue
|
||||
if locvalue == 'inlibrary':
|
||||
continue # this is bool, so can't match below
|
||||
try:
|
||||
### Can't separate authors because comma is used for name sep and author sep
|
||||
### Exact match might not get what you want. For that reason, turn author
|
||||
@ -862,7 +874,11 @@ class DeviceBooksModel(BooksModel): # {{{
|
||||
self.editable = True
|
||||
self.book_in_library = None
|
||||
|
||||
def mark_for_deletion(self, job, rows):
|
||||
def mark_for_deletion(self, job, rows, rows_are_ids=False):
|
||||
if rows_are_ids:
|
||||
self.marked_for_deletion[job] = rows
|
||||
self.reset()
|
||||
else:
|
||||
self.marked_for_deletion[job] = self.indices(rows)
|
||||
for row in rows:
|
||||
indices = self.row_indices(row)
|
||||
@ -888,13 +904,13 @@ class DeviceBooksModel(BooksModel): # {{{
|
||||
ans.extend(v)
|
||||
return ans
|
||||
|
||||
def clear_ondevice(self, db_ids):
|
||||
def clear_ondevice(self, db_ids, to_what=None):
|
||||
for data in self.db:
|
||||
if data is None:
|
||||
continue
|
||||
app_id = getattr(data, 'application_id', None)
|
||||
if app_id is not None and app_id in db_ids:
|
||||
data.in_library = False
|
||||
data.in_library = to_what
|
||||
self.reset()
|
||||
|
||||
def flags(self, index):
|
||||
@ -1049,6 +1065,13 @@ class DeviceBooksModel(BooksModel): # {{{
|
||||
def paths(self, rows):
|
||||
return [self.db[self.map[r.row()]].path for r in rows ]
|
||||
|
||||
def paths_for_db_ids(self, db_ids):
|
||||
res = []
|
||||
for r,b in enumerate(self.db):
|
||||
if b.application_id in db_ids:
|
||||
res.append((r,b))
|
||||
return res
|
||||
|
||||
def indices(self, rows):
|
||||
'''
|
||||
Return indices into underlying database from rows
|
||||
@ -1089,6 +1112,8 @@ class DeviceBooksModel(BooksModel): # {{{
|
||||
elif role == Qt.DecorationRole and cname == 'inlibrary':
|
||||
if self.db[self.map[row]].in_library:
|
||||
return QVariant(self.bool_yes_icon)
|
||||
elif self.db[self.map[row]].in_library is not None:
|
||||
return QVariant(self.bool_no_icon)
|
||||
elif role == Qt.TextAlignmentRole:
|
||||
cname = self.column_map[index.column()]
|
||||
ans = Qt.AlignVCenter | ALIGNMENT_MAP[self.alignment_map.get(cname,
|
||||
|
@ -127,6 +127,10 @@ class DownloadMetadata(Thread):
|
||||
self.db.set_tags(id, mi.tags)
|
||||
if mi.comments:
|
||||
self.db.set_comment(id, mi.comments)
|
||||
if mi.series:
|
||||
self.db.set_series(id, mi.series)
|
||||
if mi.series_index is not None:
|
||||
self.db.set_series_index(id, mi.series_index)
|
||||
|
||||
self.updated = set(self.fetched_metadata)
|
||||
|
||||
|
@ -18,17 +18,20 @@ from calibre.utils.config import prefs
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
|
||||
class SearchLineEdit(QLineEdit):
|
||||
key_pressed = pyqtSignal(object)
|
||||
mouse_released = pyqtSignal(object)
|
||||
focus_out = pyqtSignal(object)
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
self.emit(SIGNAL('key_pressed(PyQt_PyObject)'), event)
|
||||
self.key_pressed.emit(event)
|
||||
QLineEdit.keyPressEvent(self, event)
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
self.emit(SIGNAL('mouse_released(PyQt_PyObject)'), event)
|
||||
self.mouse_released.emit(event)
|
||||
QLineEdit.mouseReleaseEvent(self, event)
|
||||
|
||||
def focusOutEvent(self, event):
|
||||
self.emit(SIGNAL('focus_out(PyQt_PyObject)'), event)
|
||||
self.focus_out.emit(event)
|
||||
QLineEdit.focusOutEvent(self, event)
|
||||
|
||||
def dropEvent(self, ev):
|
||||
@ -68,10 +71,10 @@ class SearchBox2(QComboBox):
|
||||
self.normal_background = 'rgb(255, 255, 255, 0%)'
|
||||
self.line_edit = SearchLineEdit(self)
|
||||
self.setLineEdit(self.line_edit)
|
||||
self.connect(self.line_edit, SIGNAL('key_pressed(PyQt_PyObject)'),
|
||||
self.key_pressed, Qt.DirectConnection)
|
||||
self.connect(self.line_edit, SIGNAL('mouse_released(PyQt_PyObject)'),
|
||||
self.mouse_released, Qt.DirectConnection)
|
||||
self.line_edit.key_pressed.connect(self.key_pressed,
|
||||
type=Qt.DirectConnection)
|
||||
self.line_edit.mouse_released.connect(self.mouse_released,
|
||||
type=Qt.DirectConnection)
|
||||
self.setEditable(True)
|
||||
self.help_state = False
|
||||
self.as_you_type = True
|
||||
@ -90,14 +93,18 @@ class SearchBox2(QComboBox):
|
||||
self.help_text = help_text
|
||||
self.colorize = colorize
|
||||
self.clear_to_help()
|
||||
self.connect(self, SIGNAL('editTextChanged(QString)'), self.text_edited_slot)
|
||||
|
||||
def normalize_state(self):
|
||||
if self.help_state:
|
||||
self.setEditText('')
|
||||
self.line_edit.setStyleSheet(
|
||||
'QLineEdit { color: black; background-color: %s; }' %
|
||||
self.normal_background)
|
||||
self.help_state = False
|
||||
else:
|
||||
self.line_edit.setStyleSheet(
|
||||
'QLineEdit { color: black; background-color: %s; }' %
|
||||
self.normal_background)
|
||||
|
||||
def clear_to_help(self):
|
||||
if self.help_state:
|
||||
@ -131,17 +138,13 @@ class SearchBox2(QComboBox):
|
||||
self.line_edit.setStyleSheet('QLineEdit { color: black; background-color: %s; }' % col)
|
||||
|
||||
def key_pressed(self, event):
|
||||
if self.help_state:
|
||||
self.normalize_state()
|
||||
if not self.as_you_type:
|
||||
if event.key() in (Qt.Key_Return, Qt.Key_Enter):
|
||||
self.do_search()
|
||||
self.timer = self.startTimer(self.__class__.INTERVAL)
|
||||
|
||||
def mouse_released(self, event):
|
||||
if self.help_state:
|
||||
self.normalize_state()
|
||||
|
||||
def text_edited_slot(self, text):
|
||||
if self.as_you_type:
|
||||
self.timer = self.startTimer(self.__class__.INTERVAL)
|
||||
|
||||
@ -227,14 +230,13 @@ class SavedSearchBox(QComboBox):
|
||||
|
||||
self.line_edit = SearchLineEdit(self)
|
||||
self.setLineEdit(self.line_edit)
|
||||
self.connect(self.line_edit, SIGNAL('key_pressed(PyQt_PyObject)'),
|
||||
self.key_pressed, Qt.DirectConnection)
|
||||
self.connect(self.line_edit, SIGNAL('mouse_released(PyQt_PyObject)'),
|
||||
self.mouse_released, Qt.DirectConnection)
|
||||
self.connect(self.line_edit, SIGNAL('focus_out(PyQt_PyObject)'),
|
||||
self.focus_out, Qt.DirectConnection)
|
||||
self.connect(self, SIGNAL('activated(const QString&)'),
|
||||
self.saved_search_selected)
|
||||
self.line_edit.key_pressed.connect(self.key_pressed,
|
||||
type=Qt.DirectConnection)
|
||||
self.line_edit.mouse_released.connect(self.mouse_released,
|
||||
type=Qt.DirectConnection)
|
||||
self.line_edit.focus_out.connect(self.focus_out,
|
||||
type=Qt.DirectConnection)
|
||||
self.activated[str].connect(self.saved_search_selected)
|
||||
|
||||
completer = QCompleter(self) # turn off auto-completion
|
||||
self.setCompleter(completer)
|
||||
|
@ -136,6 +136,23 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
self.initialize_dynamic()
|
||||
|
||||
def initialize_dynamic(self):
|
||||
self.conn.executescript('''
|
||||
DROP TRIGGER IF EXISTS author_insert_trg;
|
||||
CREATE TEMP TRIGGER author_insert_trg
|
||||
AFTER INSERT ON authors
|
||||
BEGIN
|
||||
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
|
||||
END;
|
||||
DROP TRIGGER IF EXISTS author_update_trg;
|
||||
CREATE TEMP TRIGGER author_update_trg
|
||||
BEFORE UPDATE ON authors
|
||||
BEGIN
|
||||
UPDATE authors SET sort=author_to_author_sort(NEW.name)
|
||||
WHERE id=NEW.id AND name <> NEW.name;
|
||||
END;
|
||||
''')
|
||||
self.conn.execute(
|
||||
'UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL')
|
||||
self.conn.executescript(u'''
|
||||
CREATE TEMP VIEW IF NOT EXISTS tag_browser_news AS SELECT DISTINCT
|
||||
id,
|
||||
|
@ -385,28 +385,5 @@ class SchemaUpgrade(object):
|
||||
if table.startswith('custom_column_') and link_table in tables:
|
||||
create_cust_tag_browser_view(table, link_table)
|
||||
|
||||
from calibre.ebooks.metadata import author_to_author_sort
|
||||
self.conn.execute('UPDATE authors SET sort=author_to_author_sort(name)')
|
||||
|
||||
aut = self.conn.get('SELECT id, name FROM authors');
|
||||
records = []
|
||||
for (id, author) in aut:
|
||||
records.append((id, author.replace('|', ',')))
|
||||
for id,author in records:
|
||||
self.conn.execute('UPDATE authors SET sort=? WHERE id=?',
|
||||
(author_to_author_sort(author.replace('|', ',')).strip(), id))
|
||||
self.conn.commit()
|
||||
self.conn.executescript('''
|
||||
DROP TRIGGER IF EXISTS author_insert_trg;
|
||||
CREATE TRIGGER author_insert_trg
|
||||
AFTER INSERT ON authors
|
||||
BEGIN
|
||||
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
|
||||
END;
|
||||
DROP TRIGGER IF EXISTS author_update_trg;
|
||||
CREATE TRIGGER author_update_trg
|
||||
BEFORE UPDATE ON authors
|
||||
BEGIN
|
||||
UPDATE authors SET sort=author_to_author_sort(NEW.name)
|
||||
WHERE id=NEW.id AND name <> NEW.name;
|
||||
END;
|
||||
''')
|
||||
|
@ -94,6 +94,9 @@ class Connection(sqlite.Connection):
|
||||
return ans[0]
|
||||
return ans.fetchall()
|
||||
|
||||
def _author_to_author_sort(x):
|
||||
if not x: return ''
|
||||
return author_to_author_sort(x.replace('|', ','))
|
||||
|
||||
class DBThread(Thread):
|
||||
|
||||
@ -121,7 +124,7 @@ class DBThread(Thread):
|
||||
else:
|
||||
self.conn.create_function('title_sort', 1, title_sort)
|
||||
self.conn.create_function('author_to_author_sort', 1,
|
||||
lambda x: author_to_author_sort(x.replace('|', ',')))
|
||||
_author_to_author_sort)
|
||||
self.conn.create_function('uuid4', 0, lambda : str(uuid.uuid4()))
|
||||
# Dummy functions for dynamically created filters
|
||||
self.conn.create_function('books_list_filter', 1, lambda x: 1)
|
||||
|
Loading…
x
Reference in New Issue
Block a user