Merge from trunk

This commit is contained in:
Charles Haley 2010-06-17 08:24:30 +01:00
commit 146daab864
10 changed files with 512 additions and 148 deletions

View File

@ -1,41 +1,43 @@
""" #!/usr/bin/env python
publico.py - v1.0 __author__ = u'Jordi Balcells'
__license__ = 'GPL v3'
description = u'Jornal portugu\xeas - v1.03 (16 June 2010)'
__docformat__ = 'restructuredtext en'
Copyright (c) 2009, David Rodrigues - http://sixhat.net '''
All rights reserved. publico.pt
""" '''
__license__ = 'GPL 3'
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class Publico(BasicNewsRecipe): class PublicoPT(BasicNewsRecipe):
title = u'P\xfablico' description = u'Jornal portugu\xeas'
__author__ = 'David Rodrigues' cover_url = 'http://static.publico.pt/files/header/img/publico.gif'
oldest_article = 1 title = u'Publico.PT'
max_articles_per_feed = 30 category = 'News, politics, culture, economy, general interest'
encoding='utf-8' oldest_article = 2
no_stylesheets = True no_stylesheets = True
encoding = 'utf8'
use_embedded_content = False
language = 'pt' language = 'pt'
remove_empty_feeds = True
extra_css = ' body{font-family: Arial,Helvetica,sans-serif } img{margin-bottom: 0.4em} '
preprocess_regexps = [(re.compile(u"\uFFFD", re.DOTALL|re.IGNORECASE), lambda match: ''),] keep_only_tags = [dict(attrs={'class':['content-noticia-title','artigoHeader','ECOSFERA_MANCHETE','noticia','textoPrincipal','ECOSFERA_texto_01']})]
remove_tags = [dict(attrs={'class':['options','subcoluna']})]
feeds = [ feeds = [
(u'Geral', u'http://feeds.feedburner.com/PublicoUltimaHora'), (u'Geral', u'http://feeds.feedburner.com/publicoRSS'),
(u'Internacional', u'http://www.publico.clix.pt/rss.ashx?idCanal=11'), (u'Mundo', u'http://feeds.feedburner.com/PublicoMundo'),
(u'Pol\xedtica', u'http://www.publico.clix.pt/rss.ashx?idCanal=12'), (u'Pol\xedtica', u'http://feeds.feedburner.com/PublicoPolitica'),
(u'Ci\xcencias', u'http://www.publico.clix.pt/rss.ashx?idCanal=13'), (u'Economia', u'http://feeds.feedburner.com/PublicoEconomia'),
(u'Desporto', u'http://desporto.publico.pt/rss.ashx'), (u'Desporto', u'http://feeds.feedburner.com/PublicoDesporto'),
(u'Economia', u'http://www.publico.clix.pt/rss.ashx?idCanal=57'), (u'Sociedade', u'http://feeds.feedburner.com/PublicoSociedade'),
(u'Educa\xe7\xe3o', u'http://www.publico.clix.pt/rss.ashx?idCanal=58'), (u'Educa\xe7\xe3o', u'http://feeds.feedburner.com/PublicoEducacao'),
(u'Local', u'http://www.publico.clix.pt/rss.ashx?idCanal=59'), (u'Ci\xeancias', u'http://feeds.feedburner.com/PublicoCiencias'),
(u'Media e Tecnologia', u'http://www.publico.clix.pt/rss.ashx?idCanal=61'), (u'Ecosfera', u'http://feeds.feedburner.com/PublicoEcosfera'),
(u'Sociedade', u'http://www.publico.clix.pt/rss.ashx?idCanal=62') (u'Cultura', u'http://feeds.feedburner.com/PublicoCultura'),
(u'Local', u'http://feeds.feedburner.com/PublicoLocal'),
(u'Tecnologia', u'http://feeds.feedburner.com/PublicoTecnologia')
] ]
remove_tags = [dict(name='script'), dict(id='linhaTitulosHeader')]
keep_only_tags = [dict(name='div')]
def print_version(self,url):
s=re.findall("id=[0-9]+",url);
return "http://ww2.publico.clix.pt/print.aspx?"+s[0]

View File

@ -10,8 +10,10 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Slashdot(BasicNewsRecipe): class Slashdot(BasicNewsRecipe):
title = u'Slashdot.org' title = u'Slashdot.org'
description = '''Tech news. WARNING: This recipe downloads a lot description = '''Tech news. WARNING: This recipe downloads a lot
of content and can result in your IP being banned from slashdot.org''' of content and may result in your IP being banned from slashdot.org'''
oldest_article = 7 oldest_article = 7
simultaneous_downloads = 1
delay = 3
max_articles_per_feed = 100 max_articles_per_feed = 100
language = 'en' language = 'en'

View File

@ -457,9 +457,12 @@ from calibre.devices.misc import PALMPRE, AVANT
from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG
from calibre.devices.kobo.driver import KOBO from calibre.devices.kobo.driver import KOBO
from calibre.ebooks.metadata.fetch import GoogleBooks, ISBNDB, Amazon from calibre.ebooks.metadata.fetch import GoogleBooks, ISBNDB, Amazon, \
LibraryThing
from calibre.ebooks.metadata.douban import DoubanBooks
from calibre.library.catalog import CSV_XML, EPUB_MOBI from calibre.library.catalog import CSV_XML, EPUB_MOBI
plugins = [HTML2ZIP, PML2PMLZ, ArchiveExtract, GoogleBooks, ISBNDB, Amazon, CSV_XML, EPUB_MOBI] plugins = [HTML2ZIP, PML2PMLZ, ArchiveExtract, GoogleBooks, ISBNDB, Amazon,
LibraryThing, DoubanBooks, CSV_XML, EPUB_MOBI]
plugins += [ plugins += [
ComicInput, ComicInput,
EPUBInput, EPUBInput,

View File

@ -21,7 +21,7 @@ from calibre.utils.config import make_config_dir, Config, ConfigProxy, \
platform = 'linux' platform = 'linux'
if iswindows: if iswindows:
platform = 'windows' platform = 'windows'
if isosx: elif isosx:
platform = 'osx' platform = 'osx'
from zipfile import ZipFile from zipfile import ZipFile
@ -32,19 +32,25 @@ def _config():
c.add_opt('filetype_mapping', default={}, help=_('Mapping for filetype plugins')) c.add_opt('filetype_mapping', default={}, help=_('Mapping for filetype plugins'))
c.add_opt('plugin_customization', default={}, help=_('Local plugin customization')) c.add_opt('plugin_customization', default={}, help=_('Local plugin customization'))
c.add_opt('disabled_plugins', default=set([]), help=_('Disabled plugins')) c.add_opt('disabled_plugins', default=set([]), help=_('Disabled plugins'))
c.add_opt('enabled_plugins', default=set([]), help=_('Enabled plugins'))
return ConfigProxy(c) return ConfigProxy(c)
config = _config() config = _config()
class InvalidPlugin(ValueError): class InvalidPlugin(ValueError):
pass pass
class PluginNotFound(ValueError): class PluginNotFound(ValueError):
pass pass
def load_plugin(path_to_zip_file): def find_plugin(name):
for plugin in _initialized_plugins:
if plugin.name == name:
return plugin
def load_plugin(path_to_zip_file): # {{{
''' '''
Load plugin from zip file or raise InvalidPlugin error Load plugin from zip file or raise InvalidPlugin error
@ -76,11 +82,120 @@ def load_plugin(path_to_zip_file):
raise InvalidPlugin(_('No valid plugin found in ')+path_to_zip_file) raise InvalidPlugin(_('No valid plugin found in ')+path_to_zip_file)
_initialized_plugins = [] # }}}
# Enable/disable plugins {{{
def disable_plugin(plugin_or_name):
x = getattr(plugin_or_name, 'name', plugin_or_name)
plugin = find_plugin(x)
if not plugin.can_be_disabled:
raise ValueError('Plugin %s cannot be disabled'%x)
dp = config['disabled_plugins']
dp.add(x)
config['disabled_plugins'] = dp
ep = config['enabled_plugins']
if x in ep:
ep.remove(x)
config['enabled_plugins'] = ep
def enable_plugin(plugin_or_name):
x = getattr(plugin_or_name, 'name', plugin_or_name)
dp = config['disabled_plugins']
if x in dp:
dp.remove(x)
config['disabled_plugins'] = dp
ep = config['enabled_plugins']
ep.add(x)
config['enabled_plugins'] = ep
default_disabled_plugins = set([
'Douban Books',
])
def is_disabled(plugin):
if plugin.name in config['enabled_plugins']: return False
return plugin.name in config['disabled_plugins'] or \
plugin.name in default_disabled_plugins
# }}}
# File type plugins {{{
_on_import = {} _on_import = {}
_on_preprocess = {} _on_preprocess = {}
_on_postprocess = {} _on_postprocess = {}
def reread_filetype_plugins():
global _on_import
global _on_preprocess
global _on_postprocess
_on_import = {}
_on_preprocess = {}
_on_postprocess = {}
for plugin in _initialized_plugins:
if isinstance(plugin, FileTypePlugin):
for ft in plugin.file_types:
if plugin.on_import:
if not _on_import.has_key(ft):
_on_import[ft] = []
_on_import[ft].append(plugin)
if plugin.on_preprocess:
if not _on_preprocess.has_key(ft):
_on_preprocess[ft] = []
_on_preprocess[ft].append(plugin)
if plugin.on_postprocess:
if not _on_postprocess.has_key(ft):
_on_postprocess[ft] = []
_on_postprocess[ft].append(plugin)
def _run_filetype_plugins(path_to_file, ft=None, occasion='preprocess'):
occasion = {'import':_on_import, 'preprocess':_on_preprocess,
'postprocess':_on_postprocess}[occasion]
customization = config['plugin_customization']
if ft is None:
ft = os.path.splitext(path_to_file)[-1].lower().replace('.', '')
nfp = path_to_file
for plugin in occasion.get(ft, []):
if is_disabled(plugin):
continue
plugin.site_customization = customization.get(plugin.name, '')
with plugin:
try:
nfp = plugin.run(path_to_file)
if not nfp:
nfp = path_to_file
except:
print 'Running file type plugin %s failed with traceback:'%plugin.name
traceback.print_exc()
x = lambda j : os.path.normpath(os.path.normcase(j))
if occasion == 'postprocess' and x(nfp) != x(path_to_file):
shutil.copyfile(nfp, path_to_file)
nfp = path_to_file
return nfp
run_plugins_on_import = functools.partial(_run_filetype_plugins,
occasion='import')
run_plugins_on_preprocess = functools.partial(_run_filetype_plugins,
occasion='preprocess')
run_plugins_on_postprocess = functools.partial(_run_filetype_plugins,
occasion='postprocess')
# }}}
# Plugin customization {{{
def customize_plugin(plugin, custom):
d = config['plugin_customization']
d[plugin.name] = custom.strip()
config['plugin_customization'] = d
def plugin_customization(plugin):
return config['plugin_customization'].get(plugin.name, '')
# }}}
# Input/Output profiles {{{
def input_profiles(): def input_profiles():
for plugin in _initialized_plugins: for plugin in _initialized_plugins:
if isinstance(plugin, InputProfile): if isinstance(plugin, InputProfile):
@ -90,7 +205,9 @@ def output_profiles():
for plugin in _initialized_plugins: for plugin in _initialized_plugins:
if isinstance(plugin, OutputProfile): if isinstance(plugin, OutputProfile):
yield plugin yield plugin
# }}}
# Metadata sources {{{
def metadata_sources(metadata_type='basic', customize=True, isbndb_key=None): def metadata_sources(metadata_type='basic', customize=True, isbndb_key=None):
for plugin in _initialized_plugins: for plugin in _initialized_plugins:
if isinstance(plugin, MetadataSource) and \ if isinstance(plugin, MetadataSource) and \
@ -117,31 +234,9 @@ def migrate_isbndb_key():
if key: if key:
prefs.set('isbndb_com_key', '') prefs.set('isbndb_com_key', '')
set_isbndb_key(key) set_isbndb_key(key)
# }}}
def reread_filetype_plugins(): # Metadata read/write {{{
global _on_import
global _on_preprocess
global _on_postprocess
_on_import = {}
_on_preprocess = {}
_on_postprocess = {}
for plugin in _initialized_plugins:
if isinstance(plugin, FileTypePlugin):
for ft in plugin.file_types:
if plugin.on_import:
if not _on_import.has_key(ft):
_on_import[ft] = []
_on_import[ft].append(plugin)
if plugin.on_preprocess:
if not _on_preprocess.has_key(ft):
_on_preprocess[ft] = []
_on_preprocess[ft].append(plugin)
if plugin.on_postprocess:
if not _on_postprocess.has_key(ft):
_on_postprocess[ft] = []
_on_postprocess[ft].append(plugin)
_metadata_readers = {} _metadata_readers = {}
_metadata_writers = {} _metadata_writers = {}
def reread_metadata_plugins(): def reread_metadata_plugins():
@ -233,51 +328,9 @@ def set_file_type_metadata(stream, mi, ftype):
print 'Failed to set metadata for', repr(getattr(mi, 'title', '')) print 'Failed to set metadata for', repr(getattr(mi, 'title', ''))
traceback.print_exc() traceback.print_exc()
# }}}
def _run_filetype_plugins(path_to_file, ft=None, occasion='preprocess'): # Add/remove plugins {{{
occasion = {'import':_on_import, 'preprocess':_on_preprocess,
'postprocess':_on_postprocess}[occasion]
customization = config['plugin_customization']
if ft is None:
ft = os.path.splitext(path_to_file)[-1].lower().replace('.', '')
nfp = path_to_file
for plugin in occasion.get(ft, []):
if is_disabled(plugin):
continue
plugin.site_customization = customization.get(plugin.name, '')
with plugin:
try:
nfp = plugin.run(path_to_file)
if not nfp:
nfp = path_to_file
except:
print 'Running file type plugin %s failed with traceback:'%plugin.name
traceback.print_exc()
x = lambda j : os.path.normpath(os.path.normcase(j))
if occasion == 'postprocess' and x(nfp) != x(path_to_file):
shutil.copyfile(nfp, path_to_file)
nfp = path_to_file
return nfp
run_plugins_on_import = functools.partial(_run_filetype_plugins,
occasion='import')
run_plugins_on_preprocess = functools.partial(_run_filetype_plugins,
occasion='preprocess')
run_plugins_on_postprocess = functools.partial(_run_filetype_plugins,
occasion='postprocess')
def initialize_plugin(plugin, path_to_zip_file):
try:
p = plugin(path_to_zip_file)
p.initialize()
return p
except Exception:
print 'Failed to initialize plugin:', plugin.name, plugin.version
tb = traceback.format_exc()
raise InvalidPlugin((_('Initialization of plugin %s failed with traceback:')
%tb) + '\n'+tb)
def add_plugin(path_to_zip_file): def add_plugin(path_to_zip_file):
make_config_dir() make_config_dir()
@ -307,14 +360,9 @@ def remove_plugin(plugin_or_name):
initialize_plugins() initialize_plugins()
return removed return removed
def is_disabled(plugin): # }}}
return plugin.name in config['disabled_plugins']
def find_plugin(name):
for plugin in _initialized_plugins:
if plugin.name == name:
return plugin
# Input/Output format plugins {{{
def input_format_plugins(): def input_format_plugins():
for plugin in _initialized_plugins: for plugin in _initialized_plugins:
@ -364,6 +412,9 @@ def available_output_formats():
formats.add(plugin.file_type) formats.add(plugin.file_type)
return formats return formats
# }}}
# Catalog plugins {{{
def catalog_plugins(): def catalog_plugins():
for plugin in _initialized_plugins: for plugin in _initialized_plugins:
@ -383,27 +434,32 @@ def plugin_for_catalog_format(fmt):
if fmt.lower() in plugin.file_types: if fmt.lower() in plugin.file_types:
return plugin return plugin
def device_plugins(): # }}}
def device_plugins(): # {{{
for plugin in _initialized_plugins: for plugin in _initialized_plugins:
if isinstance(plugin, DevicePlugin): if isinstance(plugin, DevicePlugin):
if not is_disabled(plugin): if not is_disabled(plugin):
if platform in plugin.supported_platforms:
yield plugin yield plugin
# }}}
def disable_plugin(plugin_or_name):
x = getattr(plugin_or_name, 'name', plugin_or_name)
plugin = find_plugin(x)
if not plugin.can_be_disabled:
raise ValueError('Plugin %s cannot be disabled'%x)
dp = config['disabled_plugins']
dp.add(x)
config['disabled_plugins'] = dp
def enable_plugin(plugin_or_name): # Initialize plugins {{{
x = getattr(plugin_or_name, 'name', plugin_or_name)
dp = config['disabled_plugins'] _initialized_plugins = []
if x in dp:
dp.remove(x) def initialize_plugin(plugin, path_to_zip_file):
config['disabled_plugins'] = dp try:
p = plugin(path_to_zip_file)
p.initialize()
return p
except Exception:
print 'Failed to initialize plugin:', plugin.name, plugin.version
tb = traceback.format_exc()
raise InvalidPlugin((_('Initialization of plugin %s failed with traceback:')
%tb) + '\n'+tb)
def initialize_plugins(): def initialize_plugins():
global _initialized_plugins global _initialized_plugins
@ -425,10 +481,14 @@ def initialize_plugins():
initialize_plugins() initialize_plugins()
def intialized_plugins(): def initialized_plugins():
for plugin in _initialized_plugins: for plugin in _initialized_plugins:
yield plugin yield plugin
# }}}
# CLI {{{
def option_parser(): def option_parser():
parser = OptionParser(usage=_('''\ parser = OptionParser(usage=_('''\
%prog options %prog options
@ -449,17 +509,6 @@ def option_parser():
help=_('Disable the named plugin')) help=_('Disable the named plugin'))
return parser return parser
def initialized_plugins():
return _initialized_plugins
def customize_plugin(plugin, custom):
d = config['plugin_customization']
d[plugin.name] = custom.strip()
config['plugin_customization'] = d
def plugin_customization(plugin):
return config['plugin_customization'].get(plugin.name, '')
def main(args=sys.argv): def main(args=sys.argv):
parser = option_parser() parser = option_parser()
if len(args) < 2: if len(args) < 2:
@ -504,3 +553,5 @@ def main(args=sys.argv):
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(main()) sys.exit(main())
# }}}

View File

@ -0,0 +1,258 @@
from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>; 2010, Li Fanxi <lifanxi@freemindworld.com>'
__docformat__ = 'restructuredtext en'
import sys, textwrap
import traceback
from urllib import urlencode
from functools import partial
from lxml import etree
from calibre import browser, preferred_encoding
from calibre.ebooks.metadata import MetaInformation
from calibre.utils.config import OptionParser
from calibre.ebooks.metadata.fetch import MetadataSource
from calibre.utils.date import parse_date, utcnow
DOUBAN_API_KEY = None
NAMESPACES = {
'openSearch':'http://a9.com/-/spec/opensearchrss/1.0/',
'atom' : 'http://www.w3.org/2005/Atom',
'db': 'http://www.douban.com/xmlns/'
}
XPath = partial(etree.XPath, namespaces=NAMESPACES)
total_results = XPath('//openSearch:totalResults')
start_index = XPath('//openSearch:startIndex')
items_per_page = XPath('//openSearch:itemsPerPage')
entry = XPath('//atom:entry')
entry_id = XPath('descendant::atom:id')
title = XPath('descendant::atom:title')
description = XPath('descendant::atom:summary')
publisher = XPath("descendant::db:attribute[@name='publisher']")
isbn = XPath("descendant::db:attribute[@name='isbn13']")
date = XPath("descendant::db:attribute[@name='pubdate']")
creator = XPath("descendant::db:attribute[@name='author']")
tag = XPath("descendant::db:tag")
class DoubanBooks(MetadataSource):
name = 'Douban Books'
description = _('Downloads metadata from Douban.com')
supported_platforms = ['windows', 'osx', 'linux'] # Platforms this plugin will run on
author = 'Li Fanxi <lifanxi@freemindworld.com>' # The author of this plugin
version = (1, 0, 0) # The version number of this plugin
def fetch(self):
try:
self.results = search(self.title, self.book_author, self.publisher,
self.isbn, max_results=10,
verbose=self.verbose)
except Exception, e:
self.exception = e
self.tb = traceback.format_exc()
def report(verbose):
if verbose:
import traceback
traceback.print_exc()
class Query(object):
SEARCH_URL = 'http://api.douban.com/book/subjects?'
ISBN_URL = 'http://api.douban.com/book/subject/isbn/'
type = "search"
def __init__(self, title=None, author=None, publisher=None, isbn=None,
max_results=20, start_index=1):
assert not(title is None and author is None and publisher is None and \
isbn is None)
assert (int(max_results) < 21)
q = ''
if isbn is not None:
q = isbn
self.type = 'isbn'
else:
def build_term(parts):
return ' '.join(x for x in parts)
if title is not None:
q += build_term(title.split())
if author is not None:
q += (' ' if q else '') + build_term(author.split())
if publisher is not None:
q += (' ' if q else '') + build_term(publisher.split())
self.type = 'search'
if isinstance(q, unicode):
q = q.encode('utf-8')
if self.type == "isbn":
self.url = self.ISBN_URL + q
if DOUBAN_API_KEY is not None:
self.url = self.url + "?apikey=" + DOUBAN_API_KEY
else:
self.url = self.SEARCH_URL+urlencode({
'q':q,
'max-results':max_results,
'start-index':start_index,
})
if DOUBAN_API_KEY is not None:
self.url = self.url + "&apikey=" + DOUBAN_API_KEY
def __call__(self, browser, verbose):
if verbose:
print 'Query:', self.url
if self.type == "search":
feed = etree.fromstring(browser.open(self.url).read())
total = int(total_results(feed)[0].text)
start = int(start_index(feed)[0].text)
entries = entry(feed)
new_start = start + len(entries)
if new_start > total:
new_start = 0
return entries, new_start
elif self.type == "isbn":
feed = etree.fromstring(browser.open(self.url).read())
entries = entry(feed)
return entries, 0
class ResultList(list):
def get_description(self, entry, verbose):
try:
desc = description(entry)
if desc:
return 'SUMMARY:\n'+desc[0].text
except:
report(verbose)
def get_title(self, entry):
candidates = [x.text for x in title(entry)]
return ': '.join(candidates)
def get_authors(self, entry):
m = creator(entry)
if not m:
m = []
m = [x.text for x in m]
return m
def get_tags(self, entry, verbose):
try:
btags = [x.attrib["name"] for x in tag(entry)]
tags = []
for t in btags:
tags.extend([y.strip() for y in t.split('/')])
tags = list(sorted(list(set(tags))))
except:
report(verbose)
tags = []
return [x.replace(',', ';') for x in tags]
def get_publisher(self, entry, verbose):
try:
pub = publisher(entry)[0].text
except:
pub = None
return pub
def get_isbn(self, entry, verbose):
try:
isbn13 = isbn(entry)[0].text
except Exception:
isbn13 = None
return isbn13
def get_date(self, entry, verbose):
try:
d = date(entry)
if d:
default = utcnow().replace(day=15)
d = parse_date(d[0].text, assume_utc=True, default=default)
else:
d = None
except:
report(verbose)
d = None
return d
def populate(self, entries, browser, verbose=False):
for x in entries:
try:
id_url = entry_id(x)[0].text
title = self.get_title(x)
except:
report(verbose)
mi = MetaInformation(title, self.get_authors(x))
try:
if DOUBAN_API_KEY is not None:
id_url = id_url + "?apikey=" + DOUBAN_API_KEY
raw = browser.open(id_url).read()
feed = etree.fromstring(raw)
x = entry(feed)[0]
except Exception, e:
if verbose:
print 'Failed to get all details for an entry'
print e
mi.comments = self.get_description(x, verbose)
mi.tags = self.get_tags(x, verbose)
mi.isbn = self.get_isbn(x, verbose)
mi.publisher = self.get_publisher(x, verbose)
mi.pubdate = self.get_date(x, verbose)
self.append(mi)
def search(title=None, author=None, publisher=None, isbn=None,
verbose=False, max_results=40):
br = browser()
start, entries = 1, []
while start > 0 and len(entries) <= max_results:
new, start = Query(title=title, author=author, publisher=publisher,
isbn=isbn, max_results=max_results, start_index=start)(br, verbose)
if not new:
break
entries.extend(new)
entries = entries[:max_results]
ans = ResultList()
ans.populate(entries, br, verbose)
return ans
def option_parser():
parser = OptionParser(textwrap.dedent(
'''\
%prog [options]
Fetch book metadata from Douban. You must specify one of title, author,
publisher or ISBN. If you specify ISBN the others are ignored. Will
fetch a maximum of 100 matches, so you should make your query as
specific as possible.
'''
))
parser.add_option('-t', '--title', help='Book title')
parser.add_option('-a', '--author', help='Book author(s)')
parser.add_option('-p', '--publisher', help='Book publisher')
parser.add_option('-i', '--isbn', help='Book ISBN')
parser.add_option('-m', '--max-results', default=10,
help='Maximum number of results to fetch')
parser.add_option('-v', '--verbose', default=0, action='count',
help='Be more verbose about errors')
return parser
def main(args=sys.argv):
parser = option_parser()
opts, args = parser.parse_args(args)
try:
results = search(opts.title, opts.author, opts.publisher, opts.isbn,
verbose=opts.verbose, max_results=int(opts.max_results))
except AssertionError:
report(True)
parser.print_help()
return 1
for result in results:
print unicode(result).encode(preferred_encoding)
print
if __name__ == '__main__':
sys.exit(main())

View File

@ -198,6 +198,38 @@ class Amazon(MetadataSource):
self.exception = e self.exception = e
self.tb = traceback.format_exc() self.tb = traceback.format_exc()
class LibraryThing(MetadataSource):
name = 'LibraryThing'
metadata_type = 'social'
description = _('Downloads series information from librarything.com')
def fetch(self):
if not self.isbn:
return
from calibre import browser
from calibre.ebooks.metadata import MetaInformation
import json
br = browser()
try:
raw = br.open(
'http://status.calibre-ebook.com/library_thing/metadata/'+self.isbn
).read()
data = json.loads(raw)
if not data:
return
if 'error' in data:
raise Exception(data['error'])
if 'series' in data and 'series_index' in data:
mi = MetaInformation(self.title, [])
mi.series = data['series']
mi.series_index = data['series_index']
self.results = mi
except Exception, e:
self.exception = e
self.tb = traceback.format_exc()
def result_index(source, result): def result_index(source, result):
if not result.isbn: if not result.isbn:
return -1 return -1
@ -266,7 +298,7 @@ def get_social_metadata(mi, verbose=0):
with MetadataSources(fetchers) as manager: with MetadataSources(fetchers) as manager:
manager(mi.title, mi.authors, mi.publisher, mi.isbn, verbose) manager(mi.title, mi.authors, mi.publisher, mi.isbn, verbose)
manager.join() manager.join()
ratings, tags, comments = [], set([]), set([]) ratings, tags, comments, series, series_index = [], set([]), set([]), None, None
for fetcher in fetchers: for fetcher in fetchers:
if fetcher.results: if fetcher.results:
dmi = fetcher.results dmi = fetcher.results
@ -279,6 +311,10 @@ def get_social_metadata(mi, verbose=0):
mi.pubdate = dmi.pubdate mi.pubdate = dmi.pubdate
if dmi.comments: if dmi.comments:
comments.add(dmi.comments) comments.add(dmi.comments)
if dmi.series is not None:
series = dmi.series
if dmi.series_index is not None:
series_index = dmi.series_index
if ratings: if ratings:
rating = sum(ratings)/float(len(ratings)) rating = sum(ratings)/float(len(ratings))
if mi.rating is None or mi.rating < 0.1: if mi.rating is None or mi.rating < 0.1:
@ -295,6 +331,9 @@ def get_social_metadata(mi, verbose=0):
mi.comments = '' mi.comments = ''
for x in comments: for x in comments:
mi.comments += x+'\n\n' mi.comments += x+'\n\n'
if series and series_index is not None:
mi.series = series
mi.series_index = series_index
return [(x.name, x.exception, x.tb) for x in fetchers if x.exception is not return [(x.name, x.exception, x.tb) for x in fetchers if x.exception is not
None] None]

View File

@ -481,13 +481,13 @@ class DeleteAction(object): # {{{
ids = self._get_selected_ids() ids = self._get_selected_ids()
if not ids: if not ids:
#For some reason the delete dialog reports no selection, so #For some reason the delete dialog reports no selection, so
#we don't need to do it here #we need to do it here
return return
to_delete = {} to_delete = {}
some_to_delete = False some_to_delete = False
for model,name in ((self.memory_view.model(), _('Main memory')), for model,name in ((self.memory_view.model(), _('Main memory')),
(self.card_a_view.model(), _('Storage card A')), (self.card_a_view.model(), _('Storage Card A')),
(self.card_b_view.model(), _('Storage card A'))): (self.card_b_view.model(), _('Storage Card B'))):
to_delete[name] = (model, model.paths_for_db_ids(ids)) to_delete[name] = (model, model.paths_for_db_ids(ids))
if len(to_delete[name][1]) > 0: if len(to_delete[name][1]) > 0:
some_to_delete = True some_to_delete = True

View File

@ -49,6 +49,9 @@ class SocialMetadata(QDialog):
self.mi.tags = self.worker.mi.tags self.mi.tags = self.worker.mi.tags
self.mi.rating = self.worker.mi.rating self.mi.rating = self.worker.mi.rating
self.mi.comments = self.worker.mi.comments self.mi.comments = self.worker.mi.comments
if self.worker.mi.series:
self.mi.series = self.worker.mi.series
self.mi.series_index = self.worker.mi.series_index
QDialog.accept(self) QDialog.accept(self)
@property @property

View File

@ -131,6 +131,7 @@ class ToolbarMixin(object): # {{{
self.delete_all_but_selected_formats) self.delete_all_but_selected_formats)
self.delete_menu.addAction( self.delete_menu.addAction(
_('Remove covers from selected books'), self.delete_covers) _('Remove covers from selected books'), self.delete_covers)
self.delete_menu.addSeparator()
self.delete_menu.addAction( self.delete_menu.addAction(
_('Remove matching books from device'), _('Remove matching books from device'),
self.remove_matching_books_from_device) self.remove_matching_books_from_device)
@ -408,6 +409,7 @@ class LayoutMixin(object): # {{{
self.library_view.set_current_row(0) self.library_view.set_current_row(0)
m.current_changed(self.library_view.currentIndex(), m.current_changed(self.library_view.currentIndex(),
self.library_view.currentIndex()) self.library_view.currentIndex())
self.library_view.setFocus(Qt.OtherFocusReason)
def save_layout_state(self): def save_layout_state(self):

View File

@ -127,6 +127,10 @@ class DownloadMetadata(Thread):
self.db.set_tags(id, mi.tags) self.db.set_tags(id, mi.tags)
if mi.comments: if mi.comments:
self.db.set_comment(id, mi.comments) self.db.set_comment(id, mi.comments)
if mi.series:
self.db.set_series(id, mi.series)
if mi.series_index is not None:
self.db.set_series_index(id, mi.series_index)
self.updated = set(self.fetched_metadata) self.updated = set(self.fetched_metadata)