KG updates

This commit is contained in:
GRiker 2011-06-07 03:29:05 -06:00
commit ae0d8808dc
78 changed files with 2387 additions and 430 deletions

View File

@ -14,6 +14,7 @@ resources/scripts.pickle
resources/ebook-convert-complete.pickle resources/ebook-convert-complete.pickle
resources/builtin_recipes.xml resources/builtin_recipes.xml
resources/builtin_recipes.zip resources/builtin_recipes.zip
resources/template-functions.json
setup/installer/windows/calibre/build.log setup/installer/windows/calibre/build.log
src/calibre/translations/.errors src/calibre/translations/.errors
src/cssutils/.svn/ src/cssutils/.svn/

View File

@ -0,0 +1,36 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe(BasicNewsRecipe):
title = u'Brigitte.de'
__author__ = 'schuster'
oldest_article = 14
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'de'
remove_javascript = True
remove_empty_feeds = True
timeout = 10
cover_url = 'http://www.medienmilch.de/typo3temp/pics/Brigitte-Logo_d5feb4a6e4.jpg'
masthead_url = 'http://www.medienmilch.de/typo3temp/pics/Brigitte-Logo_d5feb4a6e4.jpg'
remove_tags = [dict(attrs={'class':['linklist', 'head', 'indent right relatedContent', 'artikel-meta segment', 'segment', 'comment commentFormWrapper segment borderBG', 'segment borderBG comments', 'segment borderBG box', 'center', 'segment nextPageLink', 'inCar']}),
dict(id=['header', 'artTools', 'context', 'interact', 'footer-navigation', 'bwNet', 'copy', 'keyboardNavigationHint']),
dict(name=['hjtrs', 'kud'])]
feeds = [(u'Mode', u'http://www.brigitte.de/mode/feed.rss'),
(u'Beauty', u'http://www.brigitte.de/beauty/feed.rss'),
(u'Luxus', u'http://www.brigitte.de/luxus/feed.rss'),
(u'Figur', u'http://www.brigitte.de/figur/feed.rss'),
(u'Gesundheit', u'http://www.brigitte.de/gesundheit/feed.rss'),
(u'Liebe&Sex', u'http://www.brigitte.de/liebe-sex/feed.rss'),
(u'Gesellschaft', u'http://www.brigitte.de/gesellschaft/feed.rss'),
(u'Kultur', u'http://www.brigitte.de/kultur/feed.rss'),
(u'Reise', u'http://www.brigitte.de/reise/feed.rss'),
(u'Kochen', u'http://www.brigitte.de/kochen/feed.rss'),
(u'Wohnen', u'http://www.brigitte.de/wohnen/feed.rss'),
(u'Job', u'http://www.brigitte.de/job/feed.rss'),
(u'Erfahrungen', u'http://www.brigitte.de/erfahrungen/feed.rss'),
]

View File

@ -1,5 +1,4 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1303841067(BasicNewsRecipe): class AdvancedUserRecipe1303841067(BasicNewsRecipe):
title = u'Express.de' title = u'Express.de'
@ -12,7 +11,6 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
extra_css = ''' extra_css = '''
h2{font-family:Arial,Helvetica,sans-serif; font-size: x-small;} h2{font-family:Arial,Helvetica,sans-serif; font-size: x-small;}
h1{ font-family:Arial,Helvetica,sans-serif; font-size:x-large; font-weight:bold;} h1{ font-family:Arial,Helvetica,sans-serif; font-size:x-large; font-weight:bold;}
''' '''
remove_javascript = True remove_javascript = True
remove_tags_befor = [dict(name='div', attrs={'class':'Datum'})] remove_tags_befor = [dict(name='div', attrs={'class':'Datum'})]
@ -25,6 +23,7 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
dict(id='Logo'), dict(id='Logo'),
dict(id='MainLinkSpacer'), dict(id='MainLinkSpacer'),
dict(id='MainLinks'), dict(id='MainLinks'),
dict(id='ContainerPfad'), #neu
dict(title='Diese Seite Bookmarken'), dict(title='Diese Seite Bookmarken'),
dict(name='span'), dict(name='span'),
@ -44,7 +43,8 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
dict(name='div', attrs={'class':'HeaderSearch'}), dict(name='div', attrs={'class':'HeaderSearch'}),
dict(name='div', attrs={'class':'sbutton'}), dict(name='div', attrs={'class':'sbutton'}),
dict(name='div', attrs={'class':'active'}), dict(name='div', attrs={'class':'active'}),
dict(name='div', attrs={'class':'MoreNews'}), #neu
dict(name='div', attrs={'class':'ContentBoxSubline'}) #neu
] ]
@ -68,7 +68,5 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
(u'Fortuna D~Dorf', u'http://www.express.de/sport/fussball/fortuna/-/3292/3292/-/view/asFeed/-/index.xml'), (u'Fortuna D~Dorf', u'http://www.express.de/sport/fussball/fortuna/-/3292/3292/-/view/asFeed/-/index.xml'),
(u'Basketball News', u'http://www.express.de/sport/basketball/-/3190/3190/-/view/asFeed/-/index.xml'), (u'Basketball News', u'http://www.express.de/sport/basketball/-/3190/3190/-/view/asFeed/-/index.xml'),
(u'Big Brother', u'http://www.express.de/news/promi-show/big-brother/-/2402/2402/-/view/asFeed/-/index.xml'), (u'Big Brother', u'http://www.express.de/news/promi-show/big-brother/-/2402/2402/-/view/asFeed/-/index.xml'),
]
]

View File

@ -0,0 +1,52 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe(BasicNewsRecipe):
title = 'Heise-online'
description = 'News vom Heise-Verlag'
__author__ = 'schuster'
use_embedded_content = False
language = 'de'
oldest_article = 2
max_articles_per_feed = 35
rescale_images = True
remove_empty_feeds = True
timeout = 5
no_stylesheets = True
remove_tags_after = dict(name ='p', attrs={'class':'editor'})
remove_tags = [dict(id='navi_top_container'),
dict(id='navi_bottom'),
dict(id='mitte_rechts'),
dict(id='navigation'),
dict(id='subnavi'),
dict(id='social_bookmarks'),
dict(id='permalink'),
dict(id='content_foren'),
dict(id='seiten_navi'),
dict(id='adbottom'),
dict(id='sitemap')]
feeds = [
('Newsticker', 'http://www.heise.de/newsticker/heise.rdf'),
('Auto', 'http://www.heise.de/autos/rss/news.rdf'),
('Foto ', 'http://www.heise.de/foto/rss/news-atom.xml'),
('Mac&i', 'http://www.heise.de/mac-and-i/news.rdf'),
('Mobile ', 'http://www.heise.de/mobil/newsticker/heise-atom.xml'),
('Netz ', 'http://www.heise.de/netze/rss/netze-atom.xml'),
('Open ', 'http://www.heise.de/open/news/news-atom.xml'),
('Resale ', 'http://www.heise.de/resale/rss/resale.rdf'),
('Security ', 'http://www.heise.de/security/news/news-atom.xml'),
('C`t', 'http://www.heise.de/ct/rss/artikel-atom.xml'),
('iX', 'http://www.heise.de/ix/news/news.rdf'),
('Mach-flott', 'http://www.heise.de/mach-flott/rss/mach-flott-atom.xml'),
('Blog: Babel-Bulletin', 'http://www.heise.de/developer/rss/babel-bulletin/blog.rdf'),
('Blog: Der Dotnet-Doktor', 'http://www.heise.de/developer/rss/dotnet-doktor/blog.rdf'),
('Blog: Bernds Management-Welt', 'http://www.heise.de/developer/rss/bernds-management-welt/blog.rdf'),
('Blog: IT conversation', 'http://www.heise.de/developer/rss/world-of-it/blog.rdf'),
('Blog: Kais bewegtes Web', 'http://www.heise.de/developer/rss/kais-bewegtes-web/blog.rdf')
]
def print_version(self, url):
return url + '?view=print'

View File

@ -3,9 +3,6 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
title = u'Max-Planck-Inst.' title = u'Max-Planck-Inst.'
__author__ = 'schuster' __author__ = 'schuster'
remove_tags = [dict(attrs={'class':['clearfix', 'lens', 'col2_box_list', 'col2_box_teaser group_ext no_print', 'dotted_line', 'col2_box_teaser', 'box_image small', 'bold', 'col2_box_teaser no_print', 'print_kontakt']}),
dict(id=['ie_clearing', 'col2', 'col2_content']),
dict(name=['script', 'noscript', 'style'])]
oldest_article = 30 oldest_article = 30
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
@ -13,6 +10,11 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
language = 'de' language = 'de'
remove_javascript = True remove_javascript = True
remove_tags = [dict(attrs={'class':['box_url', 'print_kontakt']}),
dict(id=['skiplinks'])]
def print_version(self, url): def print_version(self, url):
split_url = url.split("/") split_url = url.split("/")
print_url = 'http://www.mpg.de/print/' + split_url[3] print_url = 'http://www.mpg.de/print/' + split_url[3]

View File

@ -69,7 +69,11 @@ class Newsweek(BasicNewsRecipe):
for section, shref in self.newsweek_sections(): for section, shref in self.newsweek_sections():
self.log('Processing section', section, shref) self.log('Processing section', section, shref)
articles = [] articles = []
soups = [self.index_to_soup(shref)] try:
soups = [self.index_to_soup(shref)]
except:
self.log.warn('Section %s not found, skipping'%section)
continue
na = soups[0].find('a', rel='next') na = soups[0].find('a', rel='next')
if na: if na:
soups.append(self.index_to_soup(self.BASE_URL+na['href'])) soups.append(self.index_to_soup(self.BASE_URL+na['href']))

View File

@ -0,0 +1,35 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe(BasicNewsRecipe):
title = u'Polizeipresse - Deutschland'
__author__ = 'schuster'
description = 'Tagesaktuelle "Polizeiberichte" aus ganz Deutschland (bis auf Ortsebene).' 'Um deinen Ort/Stadt/Kreis usw. einzubinden, gehe auf "http://www.presseportal.de/polizeipresse/" und suche im oberen "Suchfeld" nach dem Namen.' 'Oberhalb der Suchergebnisse (Folgen:) auf den üblichen link zu den RSS-Feeds klicken und den RSS-link im Rezept unter "feeds" eintragen wie üblich.' 'Die Auswahl von Orten kann vereinfacht werden wenn man den Suchbegriff wie folgt eingibt:' '"Stadt-Ort".'
oldest_article = 21
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'de'
remove_javascript = True
masthead_url = 'http://www.alt-heliservice.de/images/34_BPOL_Logo_4C_g_schutzbereich.jpg'
cover_url = 'http://berlinstadtservice.de/buerger/Bundespolizei-Logo.png'
remove_tags = [
dict(name='div', attrs={'id':'logo'}),
dict(name='div', attrs={'id':'origin'}),
dict(name='pre', attrs={'class':'xml_contact'})]
def print_version(self,url):
segments = url.split('/')
printURL = 'http://www.presseportal.de/print.htx?nr=' + '/'.join(segments[5:6]) + '&type=polizei'
return printURL
feeds = [(u'Frimmerdorf', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-frimmersdorf&w=public_service'),
(u'Neurath', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-neurath&w=public_service'),
(u'Gustorf', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-gustorf&w=public_service'),
(u'Neuenhausen', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-neuenhausen&w=public_service'),
(u'Wevelinghoven', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-Wevelinghoven&w=public_service'),
(u'Grevenbroich ges.', u'http://www.presseportal.de/rss/rss2_vts.htx?q=grevenbroich&w=public_service'),
(u'Kreis Neuss ges.', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Rhein-Kreis+Neuss&w=public_service'),
]

View File

@ -20,8 +20,8 @@
<script type="text/javascript" <script type="text/javascript"
src="{prefix}/static/jquery.multiselect.min.js"></script> src="{prefix}/static/jquery.multiselect.min.js"></script>
<script type="text/javascript" src="{prefix}/static/stacktrace.js"></script>
<script type="text/javascript" src="{prefix}/static/browse/browse.js"></script> <script type="text/javascript" src="{prefix}/static/browse/browse.js"></script>
<script type="text/javascript"> <script type="text/javascript">
var sort_cookie_name = "{sort_cookie_name}"; var sort_cookie_name = "{sort_cookie_name}";

View File

@ -129,7 +129,13 @@ function toplevel() {
// }}} // }}}
function render_error(msg) { function render_error(msg) {
return '<div class="ui-widget"><div class="ui-state-error ui-corner-all" style="padding: 0pt 0.7em"><p><span class="ui-icon ui-icon-alert" style="float: left; margin-right: 0.3em">&nbsp;</span><strong>Error: </strong>'+msg+"</p></div></div>" var st = "";
try {
var st = printStackTrace();
st = st.join('\n\n');
} catch(e) {
}
return '<div class="ui-widget"><div class="ui-state-error ui-corner-all" style="padding: 0pt 0.7em"><p><span class="ui-icon ui-icon-alert" style="float: left; margin-right: 0.3em">&nbsp;</span><strong>Error: </strong>'+msg+"<pre>"+st+"</pre></p></div></div>"
} }
// Category feed {{{ // Category feed {{{

View File

@ -0,0 +1,371 @@
// Domain Public by Eric Wendelin http://eriwen.com/ (2008)
// Luke Smith http://lucassmith.name/ (2008)
// Loic Dachary <loic@dachary.org> (2008)
// Johan Euphrosine <proppy@aminche.com> (2008)
// Oyvind Sean Kinsey http://kinsey.no/blog (2010)
// Victor Homyakov <victor-homyakov@users.sourceforge.net> (2010)
//
// Information and discussions
// http://jspoker.pokersource.info/skin/test-printstacktrace.html
// http://eriwen.com/javascript/js-stack-trace/
// http://eriwen.com/javascript/stacktrace-update/
// http://pastie.org/253058
//
// guessFunctionNameFromLines comes from firebug
//
// Software License Agreement (BSD License)
//
// Copyright (c) 2007, Parakey Inc.
// All rights reserved.
//
// Redistribution and use of this software in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the
// following disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// * Neither the name of Parakey Inc. nor the names of its
// contributors may be used to endorse or promote products
// derived from this software without specific prior
// written permission of Parakey Inc.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
// IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* Main function giving a function stack trace with a forced or passed in Error
*
* @cfg {Error} e The error to create a stacktrace from (optional)
* @cfg {Boolean} guess If we should try to resolve the names of anonymous functions
* @return {Array} of Strings with functions, lines, files, and arguments where possible
*/
function printStackTrace(options) {
options = options || {guess: true};
var ex = options.e || null, guess = !!options.guess;
var p = new printStackTrace.implementation(), result = p.run(ex);
return (guess) ? p.guessAnonymousFunctions(result) : result;
}
printStackTrace.implementation = function() {
};
printStackTrace.implementation.prototype = {
run: function(ex) {
ex = ex || this.createException();
// Do not use the stored mode: different exceptions in Chrome
// may or may not have arguments or stack
var mode = this.mode(ex);
// Use either the stored mode, or resolve it
//var mode = this._mode || this.mode(ex);
if (mode === 'other') {
return this.other(arguments.callee);
} else {
return this[mode](ex);
}
},
createException: function() {
try {
this.undef();
return null;
} catch (e) {
return e;
}
},
/**
* @return {String} mode of operation for the environment in question.
*/
mode: function(e) {
if (e['arguments'] && e.stack) {
return (this._mode = 'chrome');
} else if (e.message && typeof window !== 'undefined' && window.opera) {
return (this._mode = e.stacktrace ? 'opera10' : 'opera');
} else if (e.stack) {
return (this._mode = 'firefox');
}
return (this._mode = 'other');
},
/**
* Given a context, function name, and callback function, overwrite it so that it calls
* printStackTrace() first with a callback and then runs the rest of the body.
*
* @param {Object} context of execution (e.g. window)
* @param {String} functionName to instrument
* @param {Function} function to call with a stack trace on invocation
*/
instrumentFunction: function(context, functionName, callback) {
context = context || window;
var original = context[functionName];
context[functionName] = function instrumented() {
callback.call(this, printStackTrace().slice(4));
return context[functionName]._instrumented.apply(this, arguments);
};
context[functionName]._instrumented = original;
},
/**
* Given a context and function name of a function that has been
* instrumented, revert the function to it's original (non-instrumented)
* state.
*
* @param {Object} context of execution (e.g. window)
* @param {String} functionName to de-instrument
*/
deinstrumentFunction: function(context, functionName) {
if (context[functionName].constructor === Function &&
context[functionName]._instrumented &&
context[functionName]._instrumented.constructor === Function) {
context[functionName] = context[functionName]._instrumented;
}
},
/**
* Given an Error object, return a formatted Array based on Chrome's stack string.
*
* @param e - Error object to inspect
* @return Array<String> of function calls, files and line numbers
*/
chrome: function(e) {
//return e.stack.replace(/^[^\(]+?[\n$]/gm, '').replace(/^\s+at\s+/gm, '').replace(/^Object.<anonymous>\s*\(/gm, '{anonymous}()@').split('\n');
return e.stack.replace(/^\S[^\(]+?[\n$]/gm, '').
replace(/^\s+at\s+/gm, '').
replace(/^([^\(]+?)([\n$])/gm, '{anonymous}()@$1$2').
replace(/^Object.<anonymous>\s*\(([^\)]+)\)/gm, '{anonymous}()@$1').split('\n');
},
/**
* Given an Error object, return a formatted Array based on Firefox's stack string.
*
* @param e - Error object to inspect
* @return Array<String> of function calls, files and line numbers
*/
firefox: function(e) {
return e.stack.replace(/(?:\n@:0)?\s+$/m, '').replace(/^\(/gm, '{anonymous}(').split('\n');
},
/**
* Given an Error object, return a formatted Array based on Opera 10's stacktrace string.
*
* @param e - Error object to inspect
* @return Array<String> of function calls, files and line numbers
*/
opera10: function(e) {
var stack = e.stacktrace;
var lines = stack.split('\n'), ANON = '{anonymous}', lineRE = /.*line (\d+), column (\d+) in ((<anonymous function\:?\s*(\S+))|([^\(]+)\([^\)]*\))(?: in )?(.*)\s*$/i, i, j, len;
for (i = 2, j = 0, len = lines.length; i < len - 2; i++) {
if (lineRE.test(lines[i])) {
var location = RegExp.$6 + ':' + RegExp.$1 + ':' + RegExp.$2;
var fnName = RegExp.$3;
fnName = fnName.replace(/<anonymous function\:?\s?(\S+)?>/g, ANON);
lines[j++] = fnName + '@' + location;
}
}
lines.splice(j, lines.length - j);
return lines;
},
// Opera 7.x-9.x only!
opera: function(e) {
var lines = e.message.split('\n'), ANON = '{anonymous}', lineRE = /Line\s+(\d+).*script\s+(http\S+)(?:.*in\s+function\s+(\S+))?/i, i, j, len;
for (i = 4, j = 0, len = lines.length; i < len; i += 2) {
//TODO: RegExp.exec() would probably be cleaner here
if (lineRE.test(lines[i])) {
lines[j++] = (RegExp.$3 ? RegExp.$3 + '()@' + RegExp.$2 + RegExp.$1 : ANON + '()@' + RegExp.$2 + ':' + RegExp.$1) + ' -- ' + lines[i + 1].replace(/^\s+/, '');
}
}
lines.splice(j, lines.length - j);
return lines;
},
// Safari, IE, and others
other: function(curr) {
var ANON = '{anonymous}', fnRE = /function\s*([\w\-$]+)?\s*\(/i, stack = [], fn, args, maxStackSize = 10;
while (curr && stack.length < maxStackSize) {
fn = fnRE.test(curr.toString()) ? RegExp.$1 || ANON : ANON;
args = Array.prototype.slice.call(curr['arguments'] || []);
stack[stack.length] = fn + '(' + this.stringifyArguments(args) + ')';
curr = curr.caller;
}
return stack;
},
/**
* Given arguments array as a String, subsituting type names for non-string types.
*
* @param {Arguments} object
* @return {Array} of Strings with stringified arguments
*/
stringifyArguments: function(args) {
var slice = Array.prototype.slice;
for (var i = 0; i < args.length; ++i) {
var arg = args[i];
if (arg === undefined) {
args[i] = 'undefined';
} else if (arg === null) {
args[i] = 'null';
} else if (arg.constructor) {
if (arg.constructor === Array) {
if (arg.length < 3) {
args[i] = '[' + this.stringifyArguments(arg) + ']';
} else {
args[i] = '[' + this.stringifyArguments(slice.call(arg, 0, 1)) + '...' + this.stringifyArguments(slice.call(arg, -1)) + ']';
}
} else if (arg.constructor === Object) {
args[i] = '#object';
} else if (arg.constructor === Function) {
args[i] = '#function';
} else if (arg.constructor === String) {
args[i] = '"' + arg + '"';
}
}
}
return args.join(',');
},
sourceCache: {},
/**
* @return the text from a given URL.
*/
ajax: function(url) {
var req = this.createXMLHTTPObject();
if (!req) {
return;
}
req.open('GET', url, false);
req.setRequestHeader('User-Agent', 'XMLHTTP/1.0');
req.send('');
return req.responseText;
},
/**
* Try XHR methods in order and store XHR factory.
*
* @return <Function> XHR function or equivalent
*/
createXMLHTTPObject: function() {
var xmlhttp, XMLHttpFactories = [
function() {
return new XMLHttpRequest();
}, function() {
return new ActiveXObject('Msxml2.XMLHTTP');
}, function() {
return new ActiveXObject('Msxml3.XMLHTTP');
}, function() {
return new ActiveXObject('Microsoft.XMLHTTP');
}
];
for (var i = 0; i < XMLHttpFactories.length; i++) {
try {
xmlhttp = XMLHttpFactories[i]();
// Use memoization to cache the factory
this.createXMLHTTPObject = XMLHttpFactories[i];
return xmlhttp;
} catch (e) {
}
}
},
/**
* Given a URL, check if it is in the same domain (so we can get the source
* via Ajax).
*
* @param url <String> source url
* @return False if we need a cross-domain request
*/
isSameDomain: function(url) {
return url.indexOf(location.hostname) !== -1;
},
/**
* Get source code from given URL if in the same domain.
*
* @param url <String> JS source URL
* @return <Array> Array of source code lines
*/
getSource: function(url) {
if (!(url in this.sourceCache)) {
this.sourceCache[url] = this.ajax(url).split('\n');
}
return this.sourceCache[url];
},
guessAnonymousFunctions: function(stack) {
for (var i = 0; i < stack.length; ++i) {
var reStack = /\{anonymous\}\(.*\)@(\w+:\/\/([\-\w\.]+)+(:\d+)?[^:]+):(\d+):?(\d+)?/;
var frame = stack[i], m = reStack.exec(frame);
if (m) {
var file = m[1], lineno = m[4], charno = m[7] || 0; //m[7] is character position in Chrome
if (file && this.isSameDomain(file) && lineno) {
var functionName = this.guessAnonymousFunction(file, lineno, charno);
stack[i] = frame.replace('{anonymous}', functionName);
}
}
}
return stack;
},
guessAnonymousFunction: function(url, lineNo, charNo) {
var ret;
try {
ret = this.findFunctionName(this.getSource(url), lineNo);
} catch (e) {
ret = 'getSource failed with url: ' + url + ', exception: ' + e.toString();
}
return ret;
},
findFunctionName: function(source, lineNo) {
// FIXME findFunctionName fails for compressed source
// (more than one function on the same line)
// TODO use captured args
// function {name}({args}) m[1]=name m[2]=args
var reFunctionDeclaration = /function\s+([^(]*?)\s*\(([^)]*)\)/;
// {name} = function ({args}) TODO args capture
// /['"]?([0-9A-Za-z_]+)['"]?\s*[:=]\s*function(?:[^(]*)/
var reFunctionExpression = /['"]?([0-9A-Za-z_]+)['"]?\s*[:=]\s*function\b/;
// {name} = eval()
var reFunctionEvaluation = /['"]?([0-9A-Za-z_]+)['"]?\s*[:=]\s*(?:eval|new Function)\b/;
// Walk backwards in the source lines until we find
// the line which matches one of the patterns above
var code = "", line, maxLines = 10, m;
for (var i = 0; i < maxLines; ++i) {
// FIXME lineNo is 1-based, source[] is 0-based
line = source[lineNo - i];
if (line) {
code = line + code;
m = reFunctionExpression.exec(code);
if (m && m[1]) {
return m[1];
}
m = reFunctionDeclaration.exec(code);
if (m && m[1]) {
//return m[1] + "(" + (m[2] || "") + ")";
return m[1];
}
m = reFunctionEvaluation.exec(code);
if (m && m[1]) {
return m[1];
}
}
}
return '(?)';
}
};

View File

@ -1,43 +0,0 @@
{
"and": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n while i < len(args):\n if not args[i]:\n return ''\n i += 1\n return '1'\n",
"contains": "def evaluate(self, formatter, kwargs, mi, locals,\n val, test, value_if_present, value_if_not):\n if re.search(test, val, flags=re.I):\n return value_if_present\n else:\n return value_if_not\n",
"divide": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x / y)\n",
"uppercase": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return val.upper()\n",
"strcat": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n res = ''\n for i in range(0, len(args)):\n res += args[i]\n return res\n",
"in_list": "def evaluate(self, formatter, kwargs, mi, locals, val, sep, pat, fv, nfv):\n l = [v.strip() for v in val.split(sep) if v.strip()]\n if l:\n for v in l:\n if re.search(pat, v, flags=re.I):\n return fv\n return nfv\n",
"not": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n while i < len(args):\n if args[i]:\n return '1'\n i += 1\n return ''\n",
"ifempty": "def evaluate(self, formatter, kwargs, mi, locals, val, value_if_empty):\n if val:\n return val\n else:\n return value_if_empty\n",
"booksize": "def evaluate(self, formatter, kwargs, mi, locals):\n if mi.book_size is not None:\n try:\n return str(mi.book_size)\n except:\n pass\n return ''\n",
"select": "def evaluate(self, formatter, kwargs, mi, locals, val, key):\n if not val:\n return ''\n vals = [v.strip() for v in val.split(',')]\n for v in vals:\n if v.startswith(key+':'):\n return v[len(key)+1:]\n return ''\n",
"strcmp": "def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):\n v = strcmp(x, y)\n if v < 0:\n return lt\n if v == 0:\n return eq\n return gt\n",
"first_non_empty": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n while i < len(args):\n if args[i]:\n return args[i]\n i += 1\n return ''\n",
"re": "def evaluate(self, formatter, kwargs, mi, locals, val, pattern, replacement):\n return re.sub(pattern, replacement, val, flags=re.I)\n",
"subtract": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x - y)\n",
"list_item": "def evaluate(self, formatter, kwargs, mi, locals, val, index, sep):\n if not val:\n return ''\n index = int(index)\n val = val.split(sep)\n try:\n return val[index]\n except:\n return ''\n",
"shorten": "def evaluate(self, formatter, kwargs, mi, locals,\n val, leading, center_string, trailing):\n l = max(0, int(leading))\n t = max(0, int(trailing))\n if len(val) > l + len(center_string) + t:\n return val[0:l] + center_string + ('' if t == 0 else val[-t:])\n else:\n return val\n",
"field": "def evaluate(self, formatter, kwargs, mi, locals, name):\n return formatter.get_value(name, [], kwargs)\n",
"add": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x + y)\n",
"lookup": "def evaluate(self, formatter, kwargs, mi, locals, val, *args):\n if len(args) == 2: # here for backwards compatibility\n if val:\n return formatter.vformat('{'+args[0].strip()+'}', [], kwargs)\n else:\n return formatter.vformat('{'+args[1].strip()+'}', [], kwargs)\n if (len(args) % 2) != 1:\n raise ValueError(_('lookup requires either 2 or an odd number of arguments'))\n i = 0\n while i < len(args):\n if i + 1 >= len(args):\n return formatter.vformat('{' + args[i].strip() + '}', [], kwargs)\n if re.search(args[i], val, flags=re.I):\n return formatter.vformat('{'+args[i+1].strip() + '}', [], kwargs)\n i += 2\n",
"template": "def evaluate(self, formatter, kwargs, mi, locals, template):\n template = template.replace('[[', '{').replace(']]', '}')\n return formatter.__class__().safe_format(template, kwargs, 'TEMPLATE', mi)\n",
"print": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n print args\n return None\n",
"merge_lists": "def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):\n l1 = [l.strip() for l in list1.split(separator) if l.strip()]\n l2 = [l.strip() for l in list2.split(separator) if l.strip()]\n lcl1 = set([icu_lower(l) for l in l1])\n res = []\n for i in l1:\n res.append(i)\n for i in l2:\n if icu_lower(i) not in lcl1:\n res.append(i)\n return ', '.join(sorted(res, key=sort_key))\n",
"str_in_list": "def evaluate(self, formatter, kwargs, mi, locals, val, sep, str, fv, nfv):\n l = [v.strip() for v in val.split(sep) if v.strip()]\n c = [v.strip() for v in str.split(sep) if v.strip()]\n if l:\n for v in l:\n for t in c:\n if strcmp(t, v) == 0:\n return fv\n return nfv\n",
"titlecase": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return titlecase(val)\n",
"subitems": "def evaluate(self, formatter, kwargs, mi, locals, val, start_index, end_index):\n if not val:\n return ''\n si = int(start_index)\n ei = int(end_index)\n items = [v.strip() for v in val.split(',')]\n rv = set()\n for item in items:\n component = item.split('.')\n try:\n if ei == 0:\n rv.add('.'.join(component[si:]))\n else:\n rv.add('.'.join(component[si:ei]))\n except:\n pass\n return ', '.join(sorted(rv, key=sort_key))\n",
"sublist": "def evaluate(self, formatter, kwargs, mi, locals, val, start_index, end_index, sep):\n if not val:\n return ''\n si = int(start_index)\n ei = int(end_index)\n val = val.split(sep)\n try:\n if ei == 0:\n return sep.join(val[si:])\n else:\n return sep.join(val[si:ei])\n except:\n return ''\n",
"test": "def evaluate(self, formatter, kwargs, mi, locals, val, value_if_set, value_not_set):\n if val:\n return value_if_set\n else:\n return value_not_set\n",
"eval": "def evaluate(self, formatter, kwargs, mi, locals, template):\n from formatter import eval_formatter\n template = template.replace('[[', '{').replace(']]', '}')\n return eval_formatter.safe_format(template, locals, 'EVAL', None)\n",
"multiply": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x * y)\n",
"format_date": "def evaluate(self, formatter, kwargs, mi, locals, val, format_string):\n if not val or val == 'None':\n return ''\n try:\n dt = parse_date(val)\n s = format_date(dt, format_string)\n except:\n s = 'BAD DATE'\n return s\n",
"capitalize": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return capitalize(val)\n",
"identifier_in_list": "def evaluate(self, formatter, kwargs, mi, locals, val, ident, fv, nfv):\n l = [v.strip() for v in val.split(',') if v.strip()]\n (id, _, regexp) = ident.partition(':')\n if not id:\n return nfv\n id += ':'\n if l:\n for v in l:\n if v.startswith(id):\n if not regexp or re.search(regexp, v[len(id):], flags=re.I):\n return fv\n return nfv\n",
"count": "def evaluate(self, formatter, kwargs, mi, locals, val, sep):\n return unicode(len(val.split(sep)))\n",
"lowercase": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return val.lower()\n",
"substr": "def evaluate(self, formatter, kwargs, mi, locals, str_, start_, end_):\n return str_[int(start_): len(str_) if int(end_) == 0 else int(end_)]\n",
"or": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n while i < len(args):\n if args[i]:\n return '1'\n i += 1\n return ''\n",
"switch": "def evaluate(self, formatter, kwargs, mi, locals, val, *args):\n if (len(args) % 2) != 1:\n raise ValueError(_('switch requires an odd number of arguments'))\n i = 0\n while i < len(args):\n if i + 1 >= len(args):\n return args[i]\n if re.search(args[i], val, flags=re.I):\n return args[i+1]\n i += 2\n",
"ondevice": "def evaluate(self, formatter, kwargs, mi, locals):\n if mi.ondevice_col:\n return _('Yes')\n return ''\n",
"assign": "def evaluate(self, formatter, kwargs, mi, locals, target, value):\n locals[target] = value\n return value\n",
"raw_field": "def evaluate(self, formatter, kwargs, mi, locals, name):\n return unicode(getattr(mi, name, None))\n",
"cmp": "def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):\n x = float(x if x and x != 'None' else 0)\n y = float(y if y and y != 'None' else 0)\n if x < y:\n return lt\n if x == y:\n return eq\n return gt\n"
}

View File

@ -8,8 +8,8 @@ __docformat__ = 'restructuredtext en'
import sys, os, textwrap, subprocess, shutil, tempfile, atexit, stat, shlex import sys, os, textwrap, subprocess, shutil, tempfile, atexit, stat, shlex
from setup import Command, islinux, isfreebsd, isbsd, basenames, modules, functions, \ from setup import (Command, islinux, isbsd, basenames, modules, functions,
__appname__, __version__ __appname__, __version__)
HEADER = '''\ HEADER = '''\
#!/usr/bin/env python2 #!/usr/bin/env python2

View File

@ -0,0 +1,689 @@
/*
* Memory DLL loading code
* Version 0.0.2 with additions from Thomas Heller
*
* Copyright (c) 2004-2005 by Joachim Bauch / mail@joachim-bauch.de
* http://www.joachim-bauch.de
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is MemoryModule.c
*
* The Initial Developer of the Original Code is Joachim Bauch.
*
* Portions created by Joachim Bauch are Copyright (C) 2004-2005
* Joachim Bauch. All Rights Reserved.
*
* Portions Copyright (C) 2005 Thomas Heller.
*
*/
// disable warnings about pointer <-> DWORD conversions
#pragma warning( disable : 4311 4312 )
#include <Windows.h>
#include <winnt.h>
#if DEBUG_OUTPUT
#include <stdio.h>
#endif
#ifndef IMAGE_SIZEOF_BASE_RELOCATION
// Vista SDKs no longer define IMAGE_SIZEOF_BASE_RELOCATION!?
# define IMAGE_SIZEOF_BASE_RELOCATION (sizeof(IMAGE_BASE_RELOCATION))
#endif
#include "MemoryModule.h"
/*
XXX We need to protect at least walking the 'loaded' linked list with a lock!
*/
/******************************************************************/
FINDPROC findproc;
void *findproc_data = NULL;
struct NAME_TABLE {
char *name;
DWORD ordinal;
};
typedef struct tagMEMORYMODULE {
PIMAGE_NT_HEADERS headers;
unsigned char *codeBase;
HMODULE *modules;
int numModules;
int initialized;
struct NAME_TABLE *name_table;
char *name;
int refcount;
struct tagMEMORYMODULE *next, *prev;
} MEMORYMODULE, *PMEMORYMODULE;
typedef BOOL (WINAPI *DllEntryProc)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved);
#define GET_HEADER_DICTIONARY(module, idx) &(module)->headers->OptionalHeader.DataDirectory[idx]
MEMORYMODULE *loaded; /* linked list of loaded memory modules */
/* private - insert a loaded library in a linked list */
static void _Register(char *name, MEMORYMODULE *module)
{
module->next = loaded;
if (loaded)
loaded->prev = module;
module->prev = NULL;
loaded = module;
}
/* private - remove a loaded library from a linked list */
static void _Unregister(MEMORYMODULE *module)
{
free(module->name);
if (module->prev)
module->prev->next = module->next;
if (module->next)
module->next->prev = module->prev;
if (module == loaded)
loaded = module->next;
}
/* public - replacement for GetModuleHandle() */
HMODULE MyGetModuleHandle(LPCTSTR lpModuleName)
{
MEMORYMODULE *p = loaded;
while (p) {
// If already loaded, only increment the reference count
if (0 == stricmp(lpModuleName, p->name)) {
return (HMODULE)p;
}
p = p->next;
}
return GetModuleHandle(lpModuleName);
}
/* public - replacement for LoadLibrary, but searches FIRST for memory
libraries, then for normal libraries. So, it will load libraries AS memory
module if they are found by findproc().
*/
HMODULE MyLoadLibrary(char *lpFileName)
{
MEMORYMODULE *p = loaded;
HMODULE hMod;
while (p) {
// If already loaded, only increment the reference count
if (0 == stricmp(lpFileName, p->name)) {
p->refcount++;
return (HMODULE)p;
}
p = p->next;
}
if (findproc && findproc_data) {
void *pdata = findproc(lpFileName, findproc_data);
if (pdata) {
hMod = MemoryLoadLibrary(lpFileName, pdata);
free(p);
return hMod;
}
}
hMod = LoadLibrary(lpFileName);
return hMod;
}
/* public - replacement for GetProcAddress() */
FARPROC MyGetProcAddress(HMODULE hModule, LPCSTR lpProcName)
{
MEMORYMODULE *p = loaded;
while (p) {
if ((HMODULE)p == hModule)
return MemoryGetProcAddress(p, lpProcName);
p = p->next;
}
return GetProcAddress(hModule, lpProcName);
}
/* public - replacement for FreeLibrary() */
BOOL MyFreeLibrary(HMODULE hModule)
{
MEMORYMODULE *p = loaded;
while (p) {
if ((HMODULE)p == hModule) {
if (--p->refcount == 0) {
_Unregister(p);
MemoryFreeLibrary(p);
}
return TRUE;
}
p = p->next;
}
return FreeLibrary(hModule);
}
#if DEBUG_OUTPUT
static void
OutputLastError(const char *msg)
{
LPVOID tmp;
char *tmpmsg;
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR)&tmp, 0, NULL);
tmpmsg = (char *)LocalAlloc(LPTR, strlen(msg) + strlen(tmp) + 3);
sprintf(tmpmsg, "%s: %s", msg, tmp);
OutputDebugString(tmpmsg);
LocalFree(tmpmsg);
LocalFree(tmp);
}
#endif
/*
static int dprintf(char *fmt, ...)
{
char Buffer[4096];
va_list marker;
int result;
va_start(marker, fmt);
result = vsprintf(Buffer, fmt, marker);
OutputDebugString(Buffer);
return result;
}
*/
static void
CopySections(const unsigned char *data, PIMAGE_NT_HEADERS old_headers, PMEMORYMODULE module)
{
int i, size;
unsigned char *codeBase = module->codeBase;
unsigned char *dest;
PIMAGE_SECTION_HEADER section = IMAGE_FIRST_SECTION(module->headers);
for (i=0; i<module->headers->FileHeader.NumberOfSections; i++, section++)
{
if (section->SizeOfRawData == 0)
{
// section doesn't contain data in the dll itself, but may define
// uninitialized data
size = old_headers->OptionalHeader.SectionAlignment;
if (size > 0)
{
dest = (unsigned char *)VirtualAlloc(codeBase + section->VirtualAddress,
size,
MEM_COMMIT,
PAGE_READWRITE);
section->Misc.PhysicalAddress = (DWORD)dest;
memset(dest, 0, size);
}
// section is empty
continue;
}
// commit memory block and copy data from dll
dest = (unsigned char *)VirtualAlloc(codeBase + section->VirtualAddress,
section->SizeOfRawData,
MEM_COMMIT,
PAGE_READWRITE);
memcpy(dest, data + section->PointerToRawData, section->SizeOfRawData);
section->Misc.PhysicalAddress = (DWORD)dest;
}
}
// Protection flags for memory pages (Executable, Readable, Writeable)
static int ProtectionFlags[2][2][2] = {
{
// not executable
{PAGE_NOACCESS, PAGE_WRITECOPY},
{PAGE_READONLY, PAGE_READWRITE},
}, {
// executable
{PAGE_EXECUTE, PAGE_EXECUTE_WRITECOPY},
{PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE},
},
};
static void
FinalizeSections(PMEMORYMODULE module)
{
int i;
PIMAGE_SECTION_HEADER section = IMAGE_FIRST_SECTION(module->headers);
// loop through all sections and change access flags
for (i=0; i<module->headers->FileHeader.NumberOfSections; i++, section++)
{
DWORD protect, oldProtect, size;
int executable = (section->Characteristics & IMAGE_SCN_MEM_EXECUTE) != 0;
int readable = (section->Characteristics & IMAGE_SCN_MEM_READ) != 0;
int writeable = (section->Characteristics & IMAGE_SCN_MEM_WRITE) != 0;
if (section->Characteristics & IMAGE_SCN_MEM_DISCARDABLE)
{
// section is not needed any more and can safely be freed
VirtualFree((LPVOID)section->Misc.PhysicalAddress, section->SizeOfRawData, MEM_DECOMMIT);
continue;
}
// determine protection flags based on characteristics
protect = ProtectionFlags[executable][readable][writeable];
if (section->Characteristics & IMAGE_SCN_MEM_NOT_CACHED)
protect |= PAGE_NOCACHE;
// determine size of region
size = section->SizeOfRawData;
if (size == 0)
{
if (section->Characteristics & IMAGE_SCN_CNT_INITIALIZED_DATA)
size = module->headers->OptionalHeader.SizeOfInitializedData;
else if (section->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA)
size = module->headers->OptionalHeader.SizeOfUninitializedData;
}
if (size > 0)
{
// change memory access flags
if (VirtualProtect((LPVOID)section->Misc.PhysicalAddress, section->SizeOfRawData, protect, &oldProtect) == 0)
#if DEBUG_OUTPUT
OutputLastError("Error protecting memory page")
#endif
;
}
}
}
static void
PerformBaseRelocation(PMEMORYMODULE module, DWORD delta)
{
DWORD i;
unsigned char *codeBase = module->codeBase;
PIMAGE_DATA_DIRECTORY directory = GET_HEADER_DICTIONARY(module, IMAGE_DIRECTORY_ENTRY_BASERELOC);
if (directory->Size > 0)
{
PIMAGE_BASE_RELOCATION relocation = (PIMAGE_BASE_RELOCATION)(codeBase + directory->VirtualAddress);
for (; relocation->VirtualAddress > 0; )
{
unsigned char *dest = (unsigned char *)(codeBase + relocation->VirtualAddress);
unsigned short *relInfo = (unsigned short *)((unsigned char *)relocation + IMAGE_SIZEOF_BASE_RELOCATION);
for (i=0; i<((relocation->SizeOfBlock-IMAGE_SIZEOF_BASE_RELOCATION) / 2); i++, relInfo++)
{
DWORD *patchAddrHL;
int type, offset;
// the upper 4 bits define the type of relocation
type = *relInfo >> 12;
// the lower 12 bits define the offset
offset = *relInfo & 0xfff;
switch (type)
{
case IMAGE_REL_BASED_ABSOLUTE:
// skip relocation
break;
case IMAGE_REL_BASED_HIGHLOW:
// change complete 32 bit address
patchAddrHL = (DWORD *)(dest + offset);
*patchAddrHL += delta;
break;
default:
//printf("Unknown relocation: %d\n", type);
break;
}
}
// advance to next relocation block
relocation = (PIMAGE_BASE_RELOCATION)(((DWORD)relocation) + relocation->SizeOfBlock);
}
}
}
static int
BuildImportTable(PMEMORYMODULE module)
{
int result=1;
unsigned char *codeBase = module->codeBase;
PIMAGE_DATA_DIRECTORY directory = GET_HEADER_DICTIONARY(module, IMAGE_DIRECTORY_ENTRY_IMPORT);
if (directory->Size > 0)
{
PIMAGE_IMPORT_DESCRIPTOR importDesc = (PIMAGE_IMPORT_DESCRIPTOR)(codeBase + directory->VirtualAddress);
for (; !IsBadReadPtr(importDesc, sizeof(IMAGE_IMPORT_DESCRIPTOR)) && importDesc->Name; importDesc++)
{
DWORD *thunkRef, *funcRef;
HMODULE handle;
handle = MyLoadLibrary(codeBase + importDesc->Name);
if (handle == INVALID_HANDLE_VALUE)
{
//LastError should already be set
#if DEBUG_OUTPUT
OutputLastError("Can't load library");
#endif
result = 0;
break;
}
module->modules = (HMODULE *)realloc(module->modules, (module->numModules+1)*(sizeof(HMODULE)));
if (module->modules == NULL)
{
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
result = 0;
break;
}
module->modules[module->numModules++] = handle;
if (importDesc->OriginalFirstThunk)
{
thunkRef = (DWORD *)(codeBase + importDesc->OriginalFirstThunk);
funcRef = (DWORD *)(codeBase + importDesc->FirstThunk);
} else {
// no hint table
thunkRef = (DWORD *)(codeBase + importDesc->FirstThunk);
funcRef = (DWORD *)(codeBase + importDesc->FirstThunk);
}
for (; *thunkRef; thunkRef++, funcRef++)
{
if IMAGE_SNAP_BY_ORDINAL(*thunkRef) {
*funcRef = (DWORD)MyGetProcAddress(handle, (LPCSTR)IMAGE_ORDINAL(*thunkRef));
} else {
PIMAGE_IMPORT_BY_NAME thunkData = (PIMAGE_IMPORT_BY_NAME)(codeBase + *thunkRef);
*funcRef = (DWORD)MyGetProcAddress(handle, (LPCSTR)&thunkData->Name);
}
if (*funcRef == 0)
{
SetLastError(ERROR_PROC_NOT_FOUND);
result = 0;
break;
}
}
if (!result)
break;
}
}
return result;
}
/*
MemoryLoadLibrary - load a library AS MEMORY MODULE, or return
existing MEMORY MODULE with increased refcount.
This allows to load a library AGAIN as memory module which is
already loaded as HMODULE!
*/
HMEMORYMODULE MemoryLoadLibrary(char *name, const void *data)
{
PMEMORYMODULE result;
PIMAGE_DOS_HEADER dos_header;
PIMAGE_NT_HEADERS old_header;
unsigned char *code, *headers;
DWORD locationDelta;
DllEntryProc DllEntry;
BOOL successfull;
MEMORYMODULE *p = loaded;
while (p) {
// If already loaded, only increment the reference count
if (0 == stricmp(name, p->name)) {
p->refcount++;
return (HMODULE)p;
}
p = p->next;
}
/* Do NOT check for GetModuleHandle here! */
dos_header = (PIMAGE_DOS_HEADER)data;
if (dos_header->e_magic != IMAGE_DOS_SIGNATURE)
{
SetLastError(ERROR_BAD_FORMAT);
#if DEBUG_OUTPUT
OutputDebugString("Not a valid executable file.\n");
#endif
return NULL;
}
old_header = (PIMAGE_NT_HEADERS)&((const unsigned char *)(data))[dos_header->e_lfanew];
if (old_header->Signature != IMAGE_NT_SIGNATURE)
{
SetLastError(ERROR_BAD_FORMAT);
#if DEBUG_OUTPUT
OutputDebugString("No PE header found.\n");
#endif
return NULL;
}
// reserve memory for image of library
code = (unsigned char *)VirtualAlloc((LPVOID)(old_header->OptionalHeader.ImageBase),
old_header->OptionalHeader.SizeOfImage,
MEM_RESERVE,
PAGE_READWRITE);
if (code == NULL)
// try to allocate memory at arbitrary position
code = (unsigned char *)VirtualAlloc(NULL,
old_header->OptionalHeader.SizeOfImage,
MEM_RESERVE,
PAGE_READWRITE);
if (code == NULL)
{
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
#if DEBUG_OUTPUT
OutputLastError("Can't reserve memory");
#endif
return NULL;
}
result = (PMEMORYMODULE)HeapAlloc(GetProcessHeap(), 0, sizeof(MEMORYMODULE));
result->codeBase = code;
result->numModules = 0;
result->modules = NULL;
result->initialized = 0;
result->next = result->prev = NULL;
result->refcount = 1;
result->name = strdup(name);
result->name_table = NULL;
// XXX: is it correct to commit the complete memory region at once?
// calling DllEntry raises an exception if we don't...
VirtualAlloc(code,
old_header->OptionalHeader.SizeOfImage,
MEM_COMMIT,
PAGE_READWRITE);
// commit memory for headers
headers = (unsigned char *)VirtualAlloc(code,
old_header->OptionalHeader.SizeOfHeaders,
MEM_COMMIT,
PAGE_READWRITE);
// copy PE header to code
memcpy(headers, dos_header, dos_header->e_lfanew + old_header->OptionalHeader.SizeOfHeaders);
result->headers = (PIMAGE_NT_HEADERS)&((const unsigned char *)(headers))[dos_header->e_lfanew];
// update position
result->headers->OptionalHeader.ImageBase = (DWORD)code;
// copy sections from DLL file block to new memory location
CopySections(data, old_header, result);
// adjust base address of imported data
locationDelta = (DWORD)(code - old_header->OptionalHeader.ImageBase);
if (locationDelta != 0)
PerformBaseRelocation(result, locationDelta);
// load required dlls and adjust function table of imports
if (!BuildImportTable(result))
goto error;
// mark memory pages depending on section headers and release
// sections that are marked as "discardable"
FinalizeSections(result);
// get entry point of loaded library
if (result->headers->OptionalHeader.AddressOfEntryPoint != 0)
{
DllEntry = (DllEntryProc)(code + result->headers->OptionalHeader.AddressOfEntryPoint);
if (DllEntry == 0)
{
SetLastError(ERROR_BAD_FORMAT); /* XXX ? */
#if DEBUG_OUTPUT
OutputDebugString("Library has no entry point.\n");
#endif
goto error;
}
// notify library about attaching to process
successfull = (*DllEntry)((HINSTANCE)code, DLL_PROCESS_ATTACH, 0);
if (!successfull)
{
#if DEBUG_OUTPUT
OutputDebugString("Can't attach library.\n");
#endif
goto error;
}
result->initialized = 1;
}
_Register(name, result);
return (HMEMORYMODULE)result;
error:
// cleanup
free(result->name);
MemoryFreeLibrary(result);
return NULL;
}
int _compare(const struct NAME_TABLE *p1, const struct NAME_TABLE *p2)
{
return stricmp(p1->name, p2->name);
}
int _find(const char **name, const struct NAME_TABLE *p)
{
return stricmp(*name, p->name);
}
struct NAME_TABLE *GetNameTable(PMEMORYMODULE module)
{
unsigned char *codeBase;
PIMAGE_EXPORT_DIRECTORY exports;
PIMAGE_DATA_DIRECTORY directory;
DWORD i, *nameRef;
WORD *ordinal;
struct NAME_TABLE *p, *ptab;
if (module->name_table)
return module->name_table;
codeBase = module->codeBase;
directory = GET_HEADER_DICTIONARY(module, IMAGE_DIRECTORY_ENTRY_EXPORT);
exports = (PIMAGE_EXPORT_DIRECTORY)(codeBase + directory->VirtualAddress);
nameRef = (DWORD *)(codeBase + exports->AddressOfNames);
ordinal = (WORD *)(codeBase + exports->AddressOfNameOrdinals);
p = ((PMEMORYMODULE)module)->name_table = (struct NAME_TABLE *)malloc(sizeof(struct NAME_TABLE)
* exports->NumberOfNames);
if (p == NULL)
return NULL;
ptab = p;
for (i=0; i<exports->NumberOfNames; ++i) {
p->name = (char *)(codeBase + *nameRef++);
p->ordinal = *ordinal++;
++p;
}
qsort(ptab, exports->NumberOfNames, sizeof(struct NAME_TABLE), _compare);
return ptab;
}
FARPROC MemoryGetProcAddress(HMEMORYMODULE module, const char *name)
{
unsigned char *codeBase = ((PMEMORYMODULE)module)->codeBase;
int idx=-1;
PIMAGE_EXPORT_DIRECTORY exports;
PIMAGE_DATA_DIRECTORY directory = GET_HEADER_DICTIONARY((PMEMORYMODULE)module, IMAGE_DIRECTORY_ENTRY_EXPORT);
if (directory->Size == 0)
// no export table found
return NULL;
exports = (PIMAGE_EXPORT_DIRECTORY)(codeBase + directory->VirtualAddress);
if (exports->NumberOfNames == 0 || exports->NumberOfFunctions == 0)
// DLL doesn't export anything
return NULL;
if (HIWORD(name)) {
struct NAME_TABLE *ptab;
struct NAME_TABLE *found;
ptab = GetNameTable((PMEMORYMODULE)module);
if (ptab == NULL)
// some failure
return NULL;
found = bsearch(&name, ptab, exports->NumberOfNames, sizeof(struct NAME_TABLE), _find);
if (found == NULL)
// exported symbol not found
return NULL;
idx = found->ordinal;
}
else
idx = LOWORD(name) - exports->Base;
if ((DWORD)idx > exports->NumberOfFunctions)
// name <-> ordinal number don't match
return NULL;
// AddressOfFunctions contains the RVAs to the "real" functions
return (FARPROC)(codeBase + *(DWORD *)(codeBase + exports->AddressOfFunctions + (idx*4)));
}
void MemoryFreeLibrary(HMEMORYMODULE mod)
{
int i;
PMEMORYMODULE module = (PMEMORYMODULE)mod;
if (module != NULL)
{
if (module->initialized != 0)
{
// notify library about detaching from process
DllEntryProc DllEntry = (DllEntryProc)(module->codeBase + module->headers->OptionalHeader.AddressOfEntryPoint);
(*DllEntry)((HINSTANCE)module->codeBase, DLL_PROCESS_DETACH, 0);
module->initialized = 0;
}
if (module->modules != NULL)
{
// free previously opened libraries
for (i=0; i<module->numModules; i++)
if (module->modules[i] != INVALID_HANDLE_VALUE)
MyFreeLibrary(module->modules[i]);
free(module->modules);
}
if (module->codeBase != NULL)
// release memory of library
VirtualFree(module->codeBase, 0, MEM_RELEASE);
if (module->name_table != NULL)
free(module->name_table);
HeapFree(GetProcessHeap(), 0, module);
}
}

View File

@ -0,0 +1,58 @@
/*
* Memory DLL loading code
* Version 0.0.2
*
* Copyright (c) 2004-2005 by Joachim Bauch / mail@joachim-bauch.de
* http://www.joachim-bauch.de
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is MemoryModule.h
*
* The Initial Developer of the Original Code is Joachim Bauch.
*
* Portions created by Joachim Bauch are Copyright (C) 2004-2005
* Joachim Bauch. All Rights Reserved.
*
*/
#ifndef __MEMORY_MODULE_HEADER
#define __MEMORY_MODULE_HEADER
#include <Windows.h>
typedef void *HMEMORYMODULE;
#ifdef __cplusplus
extern "C" {
#endif
typedef void *(*FINDPROC)();
extern FINDPROC findproc;
extern void *findproc_data;
HMEMORYMODULE MemoryLoadLibrary(char *, const void *);
FARPROC MemoryGetProcAddress(HMEMORYMODULE, const char *);
void MemoryFreeLibrary(HMEMORYMODULE);
BOOL MyFreeLibrary(HMODULE hModule);
HMODULE MyLoadLibrary(char *lpFileName);
FARPROC MyGetProcAddress(HMODULE hModule, LPCSTR lpProcName);
HMODULE MyGetModuleHandle(LPCTSTR lpModuleName);
#ifdef __cplusplus
}
#endif
#endif // __MEMORY_MODULE_HEADER

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<WixLocalization Culture="en-us" xmlns="http://schemas.microsoft.com/wix/2006/localization"> <WixLocalization Culture="en-us" xmlns="http://schemas.microsoft.com/wix/2006/localization">
<String Id="AdvancedWelcomeEulaDlgDescriptionPerUser">If you are upgrading from a {app} version older than 0.6.17, please uninstall {app} first. Click Advanced to change installation settings.</String> <String Id="AdvancedWelcomeEulaDlgDescriptionPerUser">Click Advanced to change installation settings.</String>
<String Id="ProgressTextFileCost">Computing space requirements, this may take upto five minutes...</String> <String Id="ProgressTextFileCost">Computing space requirements, this may take upto five minutes...</String>
<String Id="ProgressTextCostInitialize">Computing space requirements, this may take upto five minutes...</String> <String Id="ProgressTextCostInitialize">Computing space requirements, this may take upto five minutes...</String>
<String Id="ProgressTextCostFinalize">Computing space requirements, this may take upto five minutes...</String> <String Id="ProgressTextCostFinalize">Computing space requirements, this may take upto five minutes...</String>

View File

@ -8,19 +8,19 @@ __docformat__ = 'restructuredtext en'
import sys, os, shutil, glob, py_compile, subprocess, re, zipfile, time import sys, os, shutil, glob, py_compile, subprocess, re, zipfile, time
from setup import Command, modules, functions, basenames, __version__, \ from setup import (Command, modules, functions, basenames, __version__,
__appname__ __appname__)
from setup.build_environment import msvc, MT, RC from setup.build_environment import msvc, MT, RC
from setup.installer.windows.wix import WixMixIn from setup.installer.windows.wix import WixMixIn
OPENSSL_DIR = r'Q:\openssl' OPENSSL_DIR = r'Q:\openssl'
QT_DIR = 'Q:\\Qt\\4.7.3' QT_DIR = 'Q:\\Qt\\4.7.3'
QT_DLLS = ['Core', 'Gui', 'Network', 'Svg', 'WebKit', 'Xml', 'XmlPatterns'] QT_DLLS = ['Core', 'Gui', 'Network', 'Svg', 'WebKit', 'Xml', 'XmlPatterns']
LIBUSB_DIR = 'C:\\libusb'
LIBUNRAR = 'C:\\Program Files\\UnrarDLL\\unrar.dll' LIBUNRAR = 'C:\\Program Files\\UnrarDLL\\unrar.dll'
SW = r'C:\cygwin\home\kovid\sw' SW = r'C:\cygwin\home\kovid\sw'
IMAGEMAGICK = os.path.join(SW, 'build', 'ImageMagick-6.6.6', IMAGEMAGICK = os.path.join(SW, 'build', 'ImageMagick-6.6.6',
'VisualMagick', 'bin') 'VisualMagick', 'bin')
CRT = r'C:\Microsoft.VC90.CRT'
VERSION = re.sub('[a-z]\d+', '', __version__) VERSION = re.sub('[a-z]\d+', '', __version__)
WINVER = VERSION+'.0' WINVER = VERSION+'.0'
@ -71,33 +71,66 @@ class Win32Freeze(Command, WixMixIn):
self.rc_template = self.j(self.d(self.a(__file__)), 'template.rc') self.rc_template = self.j(self.d(self.a(__file__)), 'template.rc')
self.py_ver = ''.join(map(str, sys.version_info[:2])) self.py_ver = ''.join(map(str, sys.version_info[:2]))
self.lib_dir = self.j(self.base, 'Lib') self.lib_dir = self.j(self.base, 'Lib')
self.pydlib = self.j(self.base, 'pydlib')
self.pylib = self.j(self.base, 'pylib.zip') self.pylib = self.j(self.base, 'pylib.zip')
self.dll_dir = self.j(self.base, 'DLLs')
self.plugins_dir = os.path.join(self.base, 'plugins2')
self.initbase() self.initbase()
self.build_launchers() self.build_launchers()
self.add_plugins()
self.freeze() self.freeze()
self.embed_manifests() self.embed_manifests()
self.install_site_py() self.install_site_py()
self.archive_lib_dir() self.archive_lib_dir()
self.remove_CRT_from_manifests()
self.create_installer() self.create_installer()
def remove_CRT_from_manifests(self):
'''
The dependency on the CRT is removed from the manifests of all DLLs.
This allows the CRT loaded by the .exe files to be used instead.
'''
search_pat = re.compile(r'(?is)<dependency>.*Microsoft\.VC\d+\.CRT')
repl_pat = re.compile(
r'(?is)<dependency>.*?Microsoft\.VC\d+\.CRT.*?</dependency>')
for dll in glob.glob(self.j(self.dll_dir, '*.dll')):
bn = self.b(dll)
with open(dll, 'rb') as f:
raw = f.read()
match = search_pat.search(raw)
if match is None:
continue
self.info('Removing CRT dependency from manifest of: %s'%bn)
# Blank out the bytes corresponding to the dependency specification
nraw = repl_pat.sub(lambda m: b' '*len(m.group()), raw)
if len(nraw) != len(raw):
raise Exception('Something went wrong with %s'%bn)
with open(dll, 'wb') as f:
f.write(nraw)
def initbase(self): def initbase(self):
if self.e(self.base): if self.e(self.base):
shutil.rmtree(self.base) shutil.rmtree(self.base)
os.makedirs(self.base) os.makedirs(self.base)
def add_plugins(self):
self.info('Adding plugins...')
tgt = self.plugins_dir
if os.path.exists(tgt):
shutil.rmtree(tgt)
os.mkdir(tgt)
base = self.j(self.SRC, 'calibre', 'plugins')
for f in glob.glob(self.j(base, '*.pyd')):
# We dont want the manifests as the manifest in the exe will be
# used instead
shutil.copy2(f, tgt)
def freeze(self): def freeze(self):
shutil.copy2(self.j(self.src_root, 'LICENSE'), self.base) shutil.copy2(self.j(self.src_root, 'LICENSE'), self.base)
self.info('Adding plugins...') self.info('Adding CRT')
tgt = os.path.join(self.base, 'plugins') shutil.copytree(CRT, self.j(self.base, os.path.basename(CRT)))
if not os.path.exists(tgt):
os.mkdir(tgt)
base = self.j(self.SRC, 'calibre', 'plugins')
for pat in ('*.pyd', '*.manifest'):
for f in glob.glob(self.j(base, pat)):
shutil.copy2(f, tgt)
self.info('Adding resources...') self.info('Adding resources...')
tgt = self.j(self.base, 'resources') tgt = self.j(self.base, 'resources')
@ -106,7 +139,6 @@ class Win32Freeze(Command, WixMixIn):
shutil.copytree(self.j(self.src_root, 'resources'), tgt) shutil.copytree(self.j(self.src_root, 'resources'), tgt)
self.info('Adding Qt and python...') self.info('Adding Qt and python...')
self.dll_dir = self.j(self.base, 'DLLs')
shutil.copytree(r'C:\Python%s\DLLs'%self.py_ver, self.dll_dir, shutil.copytree(r'C:\Python%s\DLLs'%self.py_ver, self.dll_dir,
ignore=shutil.ignore_patterns('msvc*.dll', 'Microsoft.*')) ignore=shutil.ignore_patterns('msvc*.dll', 'Microsoft.*'))
for x in glob.glob(self.j(OPENSSL_DIR, 'bin', '*.dll')): for x in glob.glob(self.j(OPENSSL_DIR, 'bin', '*.dll')):
@ -194,14 +226,13 @@ class Win32Freeze(Command, WixMixIn):
if os.path.exists(tg): if os.path.exists(tg):
shutil.rmtree(tg) shutil.rmtree(tg)
shutil.copytree(imfd, tg) shutil.copytree(imfd, tg)
for dirpath, dirnames, filenames in os.walk(tdir):
for x in filenames:
if not x.endswith('.dll'):
os.remove(self.j(dirpath, x))
print print
print 'Adding third party dependencies' print 'Adding third party dependencies'
tdir = os.path.join(self.base, 'driver')
os.makedirs(tdir)
for pat in ('*.dll', '*.sys', '*.cat', '*.inf'):
for f in glob.glob(os.path.join(LIBUSB_DIR, pat)):
shutil.copyfile(f, os.path.join(tdir, os.path.basename(f)))
print '\tAdding unrar' print '\tAdding unrar'
shutil.copyfile(LIBUNRAR, shutil.copyfile(LIBUNRAR,
os.path.join(self.dll_dir, os.path.basename(LIBUNRAR))) os.path.join(self.dll_dir, os.path.basename(LIBUNRAR)))
@ -318,8 +349,8 @@ class Win32Freeze(Command, WixMixIn):
if not os.path.exists(self.obj_dir): if not os.path.exists(self.obj_dir):
os.makedirs(self.obj_dir) os.makedirs(self.obj_dir)
base = self.j(self.src_root, 'setup', 'installer', 'windows') base = self.j(self.src_root, 'setup', 'installer', 'windows')
sources = [self.j(base, x) for x in ['util.c']] sources = [self.j(base, x) for x in ['util.c', 'MemoryModule.c']]
headers = [self.j(base, x) for x in ['util.h']] headers = [self.j(base, x) for x in ['util.h', 'MemoryModule.h']]
objects = [self.j(self.obj_dir, self.b(x)+'.obj') for x in sources] objects = [self.j(self.obj_dir, self.b(x)+'.obj') for x in sources]
cflags = '/c /EHsc /MD /W3 /Ox /nologo /D_UNICODE'.split() cflags = '/c /EHsc /MD /W3 /Ox /nologo /D_UNICODE'.split()
cflags += ['/DPYDLL="python%s.dll"'%self.py_ver, '/IC:/Python%s/include'%self.py_ver] cflags += ['/DPYDLL="python%s.dll"'%self.py_ver, '/IC:/Python%s/include'%self.py_ver]
@ -371,43 +402,49 @@ class Win32Freeze(Command, WixMixIn):
def archive_lib_dir(self): def archive_lib_dir(self):
self.info('Putting all python code into a zip file for performance') self.info('Putting all python code into a zip file for performance')
if os.path.exists(self.pydlib):
shutil.rmtree(self.pydlib)
os.makedirs(self.pydlib)
self.zf_timestamp = time.localtime(time.time())[:6] self.zf_timestamp = time.localtime(time.time())[:6]
self.zf_names = set() self.zf_names = set()
with zipfile.ZipFile(self.pylib, 'w', zipfile.ZIP_STORED) as zf: with zipfile.ZipFile(self.pylib, 'w', zipfile.ZIP_STORED) as zf:
# Add the .pyds from python and calibre to the zip file
for x in (self.plugins_dir, self.dll_dir):
for pyd in os.listdir(x):
if pyd.endswith('.pyd') and pyd != 'sqlite_custom.pyd':
# sqlite_custom has to be a file for
# sqlite_load_extension to work
self.add_to_zipfile(zf, pyd, x)
os.remove(self.j(x, pyd))
# Add everything in Lib except site-packages to the zip file
for x in os.listdir(self.lib_dir): for x in os.listdir(self.lib_dir):
if x == 'site-packages': if x == 'site-packages':
continue continue
self.add_to_zipfile(zf, x, self.lib_dir) self.add_to_zipfile(zf, x, self.lib_dir)
sp = self.j(self.lib_dir, 'site-packages') sp = self.j(self.lib_dir, 'site-packages')
handled = set(['site.pyo']) # Special handling for PIL and pywin32
for pth in ('PIL.pth', 'pywin32.pth'): handled = set(['PIL.pth', 'pywin32.pth', 'PIL', 'win32'])
handled.add(pth) self.add_to_zipfile(zf, 'PIL', sp)
shutil.copyfile(self.j(sp, pth), self.j(self.pydlib, pth)) base = self.j(sp, 'win32', 'lib')
for d in self.get_pth_dirs(self.j(sp, pth)): for x in os.listdir(base):
shutil.copytree(d, self.j(self.pydlib, self.b(d)), True) if os.path.splitext(x)[1] not in ('.exe',):
handled.add(self.b(d)) self.add_to_zipfile(zf, x, base)
base = self.d(base)
for x in os.listdir(base):
if not os.path.isdir(self.j(base, x)):
if os.path.splitext(x)[1] not in ('.exe',):
self.add_to_zipfile(zf, x, base)
handled.add('easy-install.pth') handled.add('easy-install.pth')
for d in self.get_pth_dirs(self.j(sp, 'easy-install.pth')): for d in self.get_pth_dirs(self.j(sp, 'easy-install.pth')):
handled.add(self.b(d)) handled.add(self.b(d))
zip_safe = self.is_zip_safe(d)
for x in os.listdir(d): for x in os.listdir(d):
if x == 'EGG-INFO': if x == 'EGG-INFO':
continue continue
if zip_safe: self.add_to_zipfile(zf, x, d)
self.add_to_zipfile(zf, x, d)
else:
absp = self.j(d, x)
dest = self.j(self.pydlib, x)
if os.path.isdir(absp):
shutil.copytree(absp, dest, True)
else:
shutil.copy2(absp, dest)
# The rest of site-packages
# We dont want the site.py from site-packages
handled.add('site.pyo')
for x in os.listdir(sp): for x in os.listdir(sp):
if x in handled or x.endswith('.egg-info'): if x in handled or x.endswith('.egg-info'):
continue continue
@ -415,33 +452,18 @@ class Win32Freeze(Command, WixMixIn):
if os.path.isdir(absp): if os.path.isdir(absp):
if not os.listdir(absp): if not os.listdir(absp):
continue continue
if self.is_zip_safe(absp): self.add_to_zipfile(zf, x, sp)
self.add_to_zipfile(zf, x, sp)
else:
shutil.copytree(absp, self.j(self.pydlib, x), True)
else: else:
if x.endswith('.pyd'): self.add_to_zipfile(zf, x, sp)
shutil.copy2(absp, self.j(self.pydlib, x))
else:
self.add_to_zipfile(zf, x, sp)
shutil.rmtree(self.lib_dir) shutil.rmtree(self.lib_dir)
def is_zip_safe(self, path):
for f in walk(path):
ext = os.path.splitext(f)[1].lower()
if ext in ('.pyd', '.dll', '.exe'):
return False
return True
def get_pth_dirs(self, pth): def get_pth_dirs(self, pth):
base = os.path.dirname(pth) base = os.path.dirname(pth)
for line in open(pth).readlines(): for line in open(pth).readlines():
line = line.strip() line = line.strip()
if not line or line.startswith('#') or line.startswith('import'): if not line or line.startswith('#') or line.startswith('import'):
continue continue
if line == 'win32\\lib':
continue
candidate = self.j(base, line) candidate = self.j(base, line)
if os.path.exists(candidate): if os.path.exists(candidate):
yield candidate yield candidate
@ -463,10 +485,10 @@ class Win32Freeze(Command, WixMixIn):
self.add_to_zipfile(zf, name + os.sep + x, base) self.add_to_zipfile(zf, name + os.sep + x, base)
else: else:
ext = os.path.splitext(name)[1].lower() ext = os.path.splitext(name)[1].lower()
if ext in ('.pyd', '.dll', '.exe'): if ext in ('.dll',):
raise ValueError('Cannot add %r to zipfile'%abspath) raise ValueError('Cannot add %r to zipfile'%abspath)
zinfo.external_attr = 0600 << 16 zinfo.external_attr = 0600 << 16
if ext in ('.py', '.pyc', '.pyo'): if ext in ('.py', '.pyc', '.pyo', '.pyd'):
with open(abspath, 'rb') as f: with open(abspath, 'rb') as f:
zf.writestr(zinfo, f.read()) zf.writestr(zinfo, f.read())

View File

@ -88,7 +88,9 @@ Qt uses its own routine to locate and load "system libraries" including the open
Now, run configure and make:: Now, run configure and make::
configure -opensource -release -qt-zlib -qt-gif -qt-libmng -qt-libpng -qt-libtiff -qt-libjpeg -release -platform win32-msvc2008 -no-qt3support -webkit -xmlpatterns -no-phonon -no-style-plastique -no-style-cleanlooks -no-style-motif -no-style-cde -no-declarative -no-scripttools -no-audio-backend -no-multimedia -no-dbus -no-openvg -no-opengl -no-qt3support -confirm-license -nomake examples -nomake demos -nomake docs -openssl -I Q:\openssl\include -L Q:\openssl\lib && nmake -no-plugin-manifests is needed so that loading the plugins does not fail looking for the CRT assembly
configure -opensource -release -qt-zlib -qt-gif -qt-libmng -qt-libpng -qt-libtiff -qt-libjpeg -release -platform win32-msvc2008 -no-qt3support -webkit -xmlpatterns -no-phonon -no-style-plastique -no-style-cleanlooks -no-style-motif -no-style-cde -no-declarative -no-scripttools -no-audio-backend -no-multimedia -no-dbus -no-openvg -no-opengl -no-qt3support -confirm-license -nomake examples -nomake demos -nomake docs -no-plugin-manifests -openssl -I Q:\openssl\include -L Q:\openssl\lib && nmake
SIP SIP
----- -----

View File

@ -1,12 +1,72 @@
#!/usr/bin/env python #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import sys, os, linecache import sys
import os
import zipimport
import _memimporter
DEBUG_ZIPIMPORT = False
class ZipExtensionImporter(zipimport.zipimporter):
'''
Taken, with thanks, from the py2exe source code
'''
def __init__(self, *args, **kwargs):
zipimport.zipimporter.__init__(self, *args, **kwargs)
# We know there are no dlls in the zip file, so dont set findproc
# (performance optimization)
#_memimporter.set_find_proc(self.locate_dll_image)
def find_module(self, fullname, path=None):
result = zipimport.zipimporter.find_module(self, fullname, path)
if result:
return result
fullname = fullname.replace(".", "\\")
if (fullname + '.pyd') in self._files:
return self
return None
def locate_dll_image(self, name):
# A callback function for_memimporter.import_module. Tries to
# locate additional dlls. Returns the image as Python string,
# or None if not found.
if name in self._files:
return self.get_data(name)
return None
def load_module(self, fullname):
if sys.modules.has_key(fullname):
mod = sys.modules[fullname]
if DEBUG_ZIPIMPORT:
sys.stderr.write("import %s # previously loaded from zipfile %s\n" % (fullname, self.archive))
return mod
try:
return zipimport.zipimporter.load_module(self, fullname)
except zipimport.ZipImportError:
pass
initname = "init" + fullname.split(".")[-1] # name of initfunction
filename = fullname.replace(".", "\\")
path = filename + '.pyd'
if path in self._files:
if DEBUG_ZIPIMPORT:
sys.stderr.write("# found %s in zipfile %s\n" % (path, self.archive))
code = self.get_data(path)
mod = _memimporter.import_module(code, initname, fullname, path)
mod.__file__ = "%s\\%s" % (self.archive, path)
mod.__loader__ = self
if DEBUG_ZIPIMPORT:
sys.stderr.write("import %s # loaded from zipfile %s\n" % (fullname, mod.__file__))
return mod
raise zipimport.ZipImportError, "can't find module %s" % fullname
def __repr__(self):
return "<%s object %r>" % (self.__class__.__name__, self.archive)
def abs__file__(): def abs__file__():
@ -32,7 +92,7 @@ def aliasmbcs():
def add_calibre_vars(): def add_calibre_vars():
sys.resources_location = os.path.join(sys.app_dir, 'resources') sys.resources_location = os.path.join(sys.app_dir, 'resources')
sys.extensions_location = os.path.join(sys.app_dir, 'plugins') sys.extensions_location = os.path.join(sys.app_dir, 'plugins2')
dv = os.environ.get('CALIBRE_DEVELOP_FROM', None) dv = os.environ.get('CALIBRE_DEVELOP_FROM', None)
if dv and os.path.exists(dv): if dv and os.path.exists(dv):
@ -42,42 +102,6 @@ def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths)) dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir) return dir, os.path.normcase(dir)
def addpackage(sitedir, name):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path,
or execute it if it starts with 'import '.
"""
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for line in f:
if line.startswith("#"):
continue
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if os.path.exists(dir):
sys.path.append(dir)
def addsitedir(sitedir):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
sitedir, sitedircase = makepath(sitedir)
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name)
def run_entry_point(): def run_entry_point():
bname, mod, func = sys.calibre_basename, sys.calibre_module, sys.calibre_function bname, mod, func = sys.calibre_basename, sys.calibre_module, sys.calibre_function
sys.argv[0] = bname+'.exe' sys.argv[0] = bname+'.exe'
@ -89,6 +113,10 @@ def main():
sys.setdefaultencoding('utf-8') sys.setdefaultencoding('utf-8')
aliasmbcs() aliasmbcs()
sys.path_hooks.insert(0, ZipExtensionImporter)
sys.path_importer_cache.clear()
import linecache
def fake_getline(filename, lineno, module_globals=None): def fake_getline(filename, lineno, module_globals=None):
return '' return ''
linecache.orig_getline = linecache.getline linecache.orig_getline = linecache.getline
@ -96,10 +124,11 @@ def main():
abs__file__() abs__file__()
addsitedir(os.path.join(sys.app_dir, 'pydlib'))
add_calibre_vars() add_calibre_vars()
# Needed for pywintypes to be able to load its DLL
sys.path.append(os.path.join(sys.app_dir, 'DLLs'))
return run_entry_point() return run_entry_point()

View File

@ -1,18 +1,130 @@
/* /*
* Copyright 2009 Kovid Goyal * Copyright 2009 Kovid Goyal
* The memimporter code is taken from the py2exe project
*/ */
#include "util.h" #include "util.h"
#include <delayimp.h> #include <delayimp.h>
#include <io.h> #include <io.h>
#include <fcntl.h> #include <fcntl.h>
static char GUI_APP = 0; static char GUI_APP = 0;
static char python_dll[] = PYDLL; static char python_dll[] = PYDLL;
void set_gui_app(char yes) { GUI_APP = yes; } void set_gui_app(char yes) { GUI_APP = yes; }
char is_gui_app() { return GUI_APP; } char is_gui_app() { return GUI_APP; }
// memimporter {{{
#include "MemoryModule.h"
static char **DLL_Py_PackageContext = NULL;
static PyObject **DLL_ImportError = NULL;
static char module_doc[] =
"Importer which can load extension modules from memory";
static void *memdup(void *ptr, Py_ssize_t size)
{
void *p = malloc(size);
if (p == NULL)
return NULL;
memcpy(p, ptr, size);
return p;
}
/*
Be sure to detect errors in FindLibrary - undetected errors lead to
very strange behaviour.
*/
static void* FindLibrary(char *name, PyObject *callback)
{
PyObject *result;
char *p;
Py_ssize_t size;
if (callback == NULL)
return NULL;
result = PyObject_CallFunction(callback, "s", name);
if (result == NULL) {
PyErr_Clear();
return NULL;
}
if (-1 == PyString_AsStringAndSize(result, &p, &size)) {
PyErr_Clear();
Py_DECREF(result);
return NULL;
}
p = memdup(p, size);
Py_DECREF(result);
return p;
}
static PyObject *set_find_proc(PyObject *self, PyObject *args)
{
PyObject *callback = NULL;
if (!PyArg_ParseTuple(args, "|O:set_find_proc", &callback))
return NULL;
Py_DECREF((PyObject *)findproc_data);
Py_INCREF(callback);
findproc_data = (void *)callback;
return Py_BuildValue("i", 1);
}
static PyObject *
import_module(PyObject *self, PyObject *args)
{
char *data;
int size;
char *initfuncname;
char *modname;
char *pathname;
HMEMORYMODULE hmem;
FARPROC do_init;
char *oldcontext;
/* code, initfuncname, fqmodulename, path */
if (!PyArg_ParseTuple(args, "s#sss:import_module",
&data, &size,
&initfuncname, &modname, &pathname))
return NULL;
hmem = MemoryLoadLibrary(pathname, data);
if (!hmem) {
PyErr_Format(*DLL_ImportError,
"MemoryLoadLibrary() failed loading %s", pathname);
return NULL;
}
do_init = MemoryGetProcAddress(hmem, initfuncname);
if (!do_init) {
MemoryFreeLibrary(hmem);
PyErr_Format(*DLL_ImportError,
"Could not find function %s in memory loaded pyd", initfuncname);
return NULL;
}
oldcontext = *DLL_Py_PackageContext;
*DLL_Py_PackageContext = modname;
do_init();
*DLL_Py_PackageContext = oldcontext;
if (PyErr_Occurred())
return NULL;
/* Retrieve from sys.modules */
return PyImport_ImportModule(modname);
}
static PyMethodDef methods[] = {
{ "import_module", import_module, METH_VARARGS,
"import_module(code, initfunc, dllname[, finder]) -> module" },
{ "set_find_proc", set_find_proc, METH_VARARGS },
{ NULL, NULL }, /* Sentinel */
};
// }}}
static int _show_error(const wchar_t *preamble, const wchar_t *msg, const int code) { static int _show_error(const wchar_t *preamble, const wchar_t *msg, const int code) {
wchar_t *buf, *cbuf; wchar_t *buf, *cbuf;
buf = (wchar_t*)LocalAlloc(LMEM_ZEROINIT, sizeof(wchar_t)* buf = (wchar_t*)LocalAlloc(LMEM_ZEROINIT, sizeof(wchar_t)*
@ -61,7 +173,7 @@ int show_last_error(wchar_t *preamble) {
NULL, NULL,
dw, dw,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
msg, &msg,
0, NULL ); 0, NULL );
return _show_error(preamble, msg, (int)dw); return _show_error(preamble, msg, (int)dw);
@ -185,7 +297,7 @@ void initialize_interpreter(wchar_t *outr, wchar_t *errr,
char *dummy_argv[1] = {""}; char *dummy_argv[1] = {""};
buf = (char*)calloc(MAX_PATH, sizeof(char)); buf = (char*)calloc(MAX_PATH, sizeof(char));
path = (char*)calloc(3*MAX_PATH, sizeof(char)); path = (char*)calloc(MAX_PATH, sizeof(char));
if (!buf || !path) ExitProcess(_show_error(L"Out of memory", L"", 1)); if (!buf || !path) ExitProcess(_show_error(L"Out of memory", L"", 1));
sz = GetModuleFileNameA(NULL, buf, MAX_PATH); sz = GetModuleFileNameA(NULL, buf, MAX_PATH);
@ -198,8 +310,7 @@ void initialize_interpreter(wchar_t *outr, wchar_t *errr,
buf[strlen(buf)-1] = '\0'; buf[strlen(buf)-1] = '\0';
_snprintf_s(python_home, MAX_PATH, _TRUNCATE, "%s", buf); _snprintf_s(python_home, MAX_PATH, _TRUNCATE, "%s", buf);
_snprintf_s(path, 3*MAX_PATH, _TRUNCATE, "%s\\pylib.zip;%s\\pydlib;%s\\DLLs", _snprintf_s(path, MAX_PATH, _TRUNCATE, "%s\\pylib.zip", buf);
buf, buf, buf);
free(buf); free(buf);
@ -227,7 +338,10 @@ void initialize_interpreter(wchar_t *outr, wchar_t *errr,
if (!flag) ExitProcess(_show_error(L"Failed to get debug flag", L"", 1)); if (!flag) ExitProcess(_show_error(L"Failed to get debug flag", L"", 1));
//*flag = 1; //*flag = 1;
DLL_Py_PackageContext = (char**)GetProcAddress(dll, "_Py_PackageContext");
if (!DLL_Py_PackageContext) ExitProcess(_show_error(L"Failed to load _Py_PackageContext from dll", L"", 1));
DLL_ImportError = (PyObject**)GetProcAddress(dll, "PyExc_ImportError");
if (!DLL_ImportError) ExitProcess(_show_error(L"Failed to load PyExc_ImportError from dll", L"", 1));
Py_SetProgramName(program_name); Py_SetProgramName(program_name);
Py_SetPythonHome(python_home); Py_SetPythonHome(python_home);
@ -263,6 +377,10 @@ void initialize_interpreter(wchar_t *outr, wchar_t *errr,
PyList_SetItem(argv, i, v); PyList_SetItem(argv, i, v);
} }
PySys_SetObject("argv", argv); PySys_SetObject("argv", argv);
findproc = FindLibrary;
Py_InitModule3("_memimporter", methods, module_doc);
} }

View File

@ -11,6 +11,10 @@
SummaryCodepage='1252' /> SummaryCodepage='1252' />
<Media Id="1" Cabinet="{app}.cab" CompressionLevel="{compression}" EmbedCab="yes" /> <Media Id="1" Cabinet="{app}.cab" CompressionLevel="{compression}" EmbedCab="yes" />
<!-- The following line ensures that DLLs are replaced even if their version is the same as before. This
is necessary because of the manifest nuking that was part of making calibre isolated. But I think it
is more rigorous anyway. -->
<Property Id='REINSTALLMODE' Value='emus'/>
<Upgrade Id="{upgrade_code}"> <Upgrade Id="{upgrade_code}">
<UpgradeVersion Maximum="{version}" <UpgradeVersion Maximum="{version}"
@ -33,7 +37,6 @@
</Property> </Property>
<Directory Id='TARGETDIR' Name='SourceDir'> <Directory Id='TARGETDIR' Name='SourceDir'>
<Merge Id="VCRedist" SourceFile="{crt_msm}" DiskId="1" Language="0"/>
<Directory Id='ProgramFilesFolder' Name='PFiles'> <Directory Id='ProgramFilesFolder' Name='PFiles'>
<Directory Id='APPLICATIONFOLDER' Name='{app}' /> <Directory Id='APPLICATIONFOLDER' Name='{app}' />
</Directory> </Directory>
@ -100,10 +103,6 @@
<ComponentRef Id="RememberInstallDir"/> <ComponentRef Id="RememberInstallDir"/>
</Feature> </Feature>
<Feature Id="VCRedist" Title="Visual C++ 8.0 Runtime" AllowAdvertise="no" Display="hidden" Level="1">
<MergeRef Id="VCRedist"/>
</Feature>
<Feature Id="FSMS" Title="Start menu shortcuts" Level="1" <Feature Id="FSMS" Title="Start menu shortcuts" Level="1"
Description="Program shortcuts installed in the Start Menu"> Description="Program shortcuts installed in the Start Menu">
<ComponentRef Id="StartMenuShortcuts"/> <ComponentRef Id="StartMenuShortcuts"/>
@ -149,12 +148,13 @@
Set default folder name and allow only per machine installs. Set default folder name and allow only per machine installs.
For a per-machine installation, the default installation location For a per-machine installation, the default installation location
will be [ProgramFilesFolder][ApplicationFolderName] and the user will be [ProgramFilesFolder][ApplicationFolderName] and the user
will be able to change it in the setup UI. This is because the installer will be able to change it in the setup UI. This is no longer necessary
has to install the VC90 merge module into the system winsxs folder for python (i.e. per user installs should work) but left this way as I
to work, so per user installs are impossible anyway. dont want to deal with the complications
--> -->
<Property Id="ApplicationFolderName" Value="Calibre2" /> <Property Id="ApplicationFolderName" Value="Calibre2" />
<Property Id="WixAppFolder" Value="WixPerMachineFolder" /> <Property Id="WixAppFolder" Value="WixPerMachineFolder" />
<Property Id="ALLUSERS" Value="1" />
<WixVariable Id="WixUISupportPerUser" Value="0" /> <WixVariable Id="WixUISupportPerUser" Value="0" />
<!-- Add option to launch calibre after install --> <!-- Add option to launch calibre after install -->
@ -164,10 +164,6 @@
<CustomAction Id="LaunchApplication" BinaryKey="WixCA" <CustomAction Id="LaunchApplication" BinaryKey="WixCA"
DllEntry="WixShellExec" Impersonate="yes"/> DllEntry="WixShellExec" Impersonate="yes"/>
<InstallUISequence>
<FileCost Suppress="yes" />
</InstallUISequence>
</Product> </Product>
</Wix> </Wix>

View File

@ -35,7 +35,6 @@ class WixMixIn:
exe_map = self.smap, exe_map = self.smap,
main_icon = self.j(self.src_root, 'icons', 'library.ico'), main_icon = self.j(self.src_root, 'icons', 'library.ico'),
web_icon = self.j(self.src_root, 'icons', 'web.ico'), web_icon = self.j(self.src_root, 'icons', 'web.ico'),
crt_msm = self.j(self.SW, 'Microsoft_VC90_CRT_x86.msm')
) )
template = open(self.j(self.d(__file__), 'en-us.xml'), template = open(self.j(self.d(__file__), 'en-us.xml'),
'rb').read() 'rb').read()

View File

@ -85,7 +85,7 @@ class Translations(POT):
def mo_file(self, po_file): def mo_file(self, po_file):
locale = os.path.splitext(os.path.basename(po_file))[0] locale = os.path.splitext(os.path.basename(po_file))[0]
return locale, os.path.join(self.DEST, locale, 'LC_MESSAGES', 'messages.mo') return locale, os.path.join(self.DEST, locale, 'messages.mo')
def run(self, opts): def run(self, opts):
@ -94,9 +94,8 @@ class Translations(POT):
base = os.path.dirname(dest) base = os.path.dirname(dest)
if not os.path.exists(base): if not os.path.exists(base):
os.makedirs(base) os.makedirs(base)
if self.newer(dest, f): self.info('\tCompiling translations for', locale)
self.info('\tCompiling translations for', locale) subprocess.check_call(['msgfmt', '-o', dest, f])
subprocess.check_call(['msgfmt', '-o', dest, f])
if locale in ('en_GB', 'nds', 'te', 'yi'): if locale in ('en_GB', 'nds', 'te', 'yi'):
continue continue
pycountry = self.j(sysconfig.get_python_lib(), 'pycountry', pycountry = self.j(sysconfig.get_python_lib(), 'pycountry',
@ -123,6 +122,16 @@ class Translations(POT):
shutil.copy2(f, dest) shutil.copy2(f, dest)
self.write_stats() self.write_stats()
self.freeze_locales()
def freeze_locales(self):
zf = self.DEST + '.zip'
from calibre import CurrentDir
from calibre.utils.zipfile import ZipFile, ZIP_DEFLATED
with ZipFile(zf, 'w', ZIP_DEFLATED) as zf:
with CurrentDir(self.DEST):
zf.add_dir('.')
shutil.rmtree(self.DEST)
@property @property
def stats(self): def stats(self):

View File

@ -586,10 +586,10 @@ from calibre.devices.apple.driver import ITUNES
from calibre.devices.hanlin.driver import HANLINV3, HANLINV5, BOOX, SPECTRA from calibre.devices.hanlin.driver import HANLINV3, HANLINV5, BOOX, SPECTRA
from calibre.devices.blackberry.driver import BLACKBERRY from calibre.devices.blackberry.driver import BLACKBERRY
from calibre.devices.cybook.driver import CYBOOK, ORIZON from calibre.devices.cybook.driver import CYBOOK, ORIZON
from calibre.devices.eb600.driver import EB600, COOL_ER, SHINEBOOK, \ from calibre.devices.eb600.driver import (EB600, COOL_ER, SHINEBOOK,
POCKETBOOK360, GER2, ITALICA, ECLICTO, DBOOK, INVESBOOK, \ POCKETBOOK360, GER2, ITALICA, ECLICTO, DBOOK, INVESBOOK,
BOOQ, ELONEX, POCKETBOOK301, MENTOR, POCKETBOOK602, \ BOOQ, ELONEX, POCKETBOOK301, MENTOR, POCKETBOOK602,
POCKETBOOK701 POCKETBOOK701, POCKETBOOK360P)
from calibre.devices.iliad.driver import ILIAD from calibre.devices.iliad.driver import ILIAD
from calibre.devices.irexdr.driver import IREXDR1000, IREXDR800 from calibre.devices.irexdr.driver import IREXDR1000, IREXDR800
from calibre.devices.jetbook.driver import JETBOOK, MIBUK, JETBOOK_MINI from calibre.devices.jetbook.driver import JETBOOK, MIBUK, JETBOOK_MINI
@ -608,9 +608,9 @@ from calibre.devices.edge.driver import EDGE
from calibre.devices.teclast.driver import TECLAST_K3, NEWSMY, IPAPYRUS, \ from calibre.devices.teclast.driver import TECLAST_K3, NEWSMY, IPAPYRUS, \
SOVOS, PICO, SUNSTECH_EB700, ARCHOS7O, STASH, WEXLER SOVOS, PICO, SUNSTECH_EB700, ARCHOS7O, STASH, WEXLER
from calibre.devices.sne.driver import SNE from calibre.devices.sne.driver import SNE
from calibre.devices.misc import PALMPRE, AVANT, SWEEX, PDNOVEL, \ from calibre.devices.misc import (PALMPRE, AVANT, SWEEX, PDNOVEL,
GEMEI, VELOCITYMICRO, PDNOVEL_KOBO, LUMIREAD, ALURATEK_COLOR, \ GEMEI, VELOCITYMICRO, PDNOVEL_KOBO, LUMIREAD, ALURATEK_COLOR,
TREKSTOR, EEEREADER, NEXTBOOK TREKSTOR, EEEREADER, NEXTBOOK, ADAM)
from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG
from calibre.devices.kobo.driver import KOBO from calibre.devices.kobo.driver import KOBO
from calibre.devices.bambook.driver import BAMBOOK from calibre.devices.bambook.driver import BAMBOOK
@ -689,7 +689,7 @@ plugins += [
JETBOOK_MINI, JETBOOK_MINI,
MIBUK, MIBUK,
SHINEBOOK, SHINEBOOK,
POCKETBOOK360, POCKETBOOK301, POCKETBOOK602, POCKETBOOK701, POCKETBOOK360, POCKETBOOK301, POCKETBOOK602, POCKETBOOK701, POCKETBOOK360P,
KINDLE, KINDLE,
KINDLE2, KINDLE2,
KINDLE_DX, KINDLE_DX,
@ -744,6 +744,7 @@ plugins += [
TREKSTOR, TREKSTOR,
EEEREADER, EEEREADER,
NEXTBOOK, NEXTBOOK,
ADAM,
ITUNES, ITUNES,
BOEYE_BEX, BOEYE_BEX,
BOEYE_BDX, BOEYE_BDX,
@ -1231,7 +1232,7 @@ class StoreEpubBudStore(StoreBase):
name = 'ePub Bud' name = 'ePub Bud'
description = 'Well, it\'s pretty much just "YouTube for Children\'s eBooks. A not-for-profit organization devoted to brining self published childrens books to the world.' description = 'Well, it\'s pretty much just "YouTube for Children\'s eBooks. A not-for-profit organization devoted to brining self published childrens books to the world.'
actual_plugin = 'calibre.gui2.store.epubbud_plugin:EpubBudStore' actual_plugin = 'calibre.gui2.store.epubbud_plugin:EpubBudStore'
drm_free_only = True drm_free_only = True
headquarters = 'US' headquarters = 'US'
formats = ['EPUB'] formats = ['EPUB']
@ -1417,6 +1418,15 @@ class StoreWoblinkStore(StoreBase):
headquarters = 'PL' headquarters = 'PL'
formats = ['EPUB'] formats = ['EPUB']
class StoreZixoStore(StoreBase):
name = 'Zixo'
author = u'Tomasz Długosz'
description = u'Księgarnia z ebookami oraz książkami audio. Aby otwierać książki w formacie Zixo należy zainstalować program dostępny na stronie księgarni. Umożliwia on m.in. dodawanie zakładek i dostosowywanie rozmiaru czcionki.'
actual_plugin = 'calibre.gui2.store.zixo_plugin:ZixoStore'
headquarters = 'PL'
formats = ['PDF, ZIXO']
plugins += [ plugins += [
StoreArchiveOrgStore, StoreArchiveOrgStore,
StoreAmazonKindleStore, StoreAmazonKindleStore,
@ -1451,7 +1461,8 @@ plugins += [
StoreWeightlessBooksStore, StoreWeightlessBooksStore,
StoreWHSmithUKStore, StoreWHSmithUKStore,
StoreWizardsTowerBooksStore, StoreWizardsTowerBooksStore,
StoreWoblinkStore StoreWoblinkStore,
StoreZixoStore
] ]
# }}} # }}}

View File

@ -53,6 +53,8 @@ Run an embedded python interpreter.
default=False, action='store_true') default=False, action='store_true')
parser.add_option('-m', '--inspect-mobi', parser.add_option('-m', '--inspect-mobi',
help='Inspect the MOBI file at the specified path', default=None) help='Inspect the MOBI file at the specified path', default=None)
parser.add_option('--test-build', help='Test binary modules in build',
action='store_true', default=False)
return parser return parser
@ -232,6 +234,9 @@ def main(args=sys.argv):
elif opts.inspect_mobi is not None: elif opts.inspect_mobi is not None:
from calibre.ebooks.mobi.debug import inspect_mobi from calibre.ebooks.mobi.debug import inspect_mobi
inspect_mobi(opts.inspect_mobi) inspect_mobi(opts.inspect_mobi)
elif opts.test_build:
from calibre.test_build import test
test()
else: else:
from calibre import ipython from calibre import ipython
ipython() ipython()

View File

@ -53,6 +53,7 @@ class ANDROID(USBMS):
0x681c : [0x0222, 0x0224, 0x0400], 0x681c : [0x0222, 0x0224, 0x0400],
0x6640 : [0x0100], 0x6640 : [0x0100],
0x685e : [0x0400], 0x685e : [0x0400],
0x6860 : [0x0400],
0x6877 : [0x0400], 0x6877 : [0x0400],
}, },

View File

@ -246,6 +246,16 @@ class POCKETBOOK602(USBMS):
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['PB602', 'PB603', 'PB902', WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['PB602', 'PB603', 'PB902',
'PB903', 'PB'] 'PB903', 'PB']
class POCKETBOOK360P(POCKETBOOK602):
name = 'PocketBook 360+ Device Interface'
description = _('Communicate with the PocketBook 360+ reader.')
BCD = [0x0323]
EBOOK_DIR_MAIN = ''
VENDOR_NAME = '__POCKET'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'BOOK_USB_STORAGE'
class POCKETBOOK701(USBMS): class POCKETBOOK701(USBMS):
name = 'PocketBook 701 Device Interface' name = 'PocketBook 701 Device Interface'

View File

@ -255,6 +255,28 @@ class EEEREADER(USBMS):
VENDOR_NAME = 'LINUX' VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET' WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'
class ADAM(USBMS):
name = 'Notion Ink Adam device interface'
gui_name = 'Adam'
description = _('Communicate with the Adam tablet')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf', 'doc']
VENDOR_ID = [0x0955]
PRODUCT_ID = [0x7100]
BCD = [0x9999]
EBOOK_DIR_MAIN = 'eBooks'
VENDOR_NAME = 'NI'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['ADAM']
SUPPORTS_SUB_DIRS = True
class NEXTBOOK(USBMS): class NEXTBOOK(USBMS):
name = 'Nextbook device interface' name = 'Nextbook device interface'

View File

@ -107,6 +107,9 @@ class NOOK_COLOR(NOOK):
return filepath return filepath
def upload_cover(self, path, filename, metadata, filepath):
pass
class NOOK_TSR(NOOK): class NOOK_TSR(NOOK):
gui_name = _('Nook Simple') gui_name = _('Nook Simple')
description = _('Communicate with the Nook TSR eBook reader.') description = _('Communicate with the Nook TSR eBook reader.')

View File

@ -204,7 +204,8 @@ class CollectionsBookList(BookList):
elif fm['datatype'] == 'text' and fm['is_multiple']: elif fm['datatype'] == 'text' and fm['is_multiple']:
val = orig_val val = orig_val
elif fm['datatype'] == 'composite' and fm['is_multiple']: elif fm['datatype'] == 'composite' and fm['is_multiple']:
val = [v.strip() for v in val.split(fm['is_multiple'])] val = [v.strip() for v in
val.split(fm['is_multiple']['ui_to_list'])]
else: else:
val = [val] val = [val]

View File

@ -11,7 +11,7 @@ import os, shutil, traceback, textwrap, time, codecs
from Queue import Empty from Queue import Empty
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
from calibre import extract, CurrentDir, prints from calibre import extract, CurrentDir, prints, walk
from calibre.constants import filesystem_encoding from calibre.constants import filesystem_encoding
from calibre.ptempfile import PersistentTemporaryDirectory from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.ipc.server import Server from calibre.utils.ipc.server import Server
@ -27,6 +27,11 @@ def extract_comic(path_to_comic_file):
# names # names
tdir = tdir.decode(filesystem_encoding) tdir = tdir.decode(filesystem_encoding)
extract(path_to_comic_file, tdir) extract(path_to_comic_file, tdir)
for x in walk(tdir):
bn = os.path.basename(x)
nbn = bn.replace('#', '_')
if nbn != bn:
os.rename(x, os.path.join(os.path.dirname(x), nbn))
return tdir return tdir
def find_pages(dir, sort_on_mtime=False, verbose=False): def find_pages(dir, sort_on_mtime=False, verbose=False):
@ -362,6 +367,7 @@ class ComicInput(InputFormatPlugin):
if not line: if not line:
continue continue
fname, title = line.partition(':')[0], line.partition(':')[-1] fname, title = line.partition(':')[0], line.partition(':')[-1]
fname = fname.replace('#', '_')
fname = os.path.join(tdir, *fname.split('/')) fname = os.path.join(tdir, *fname.split('/'))
if not title: if not title:
title = os.path.basename(fname).rpartition('.')[0] title = os.path.basename(fname).rpartition('.')[0]

View File

@ -621,10 +621,7 @@ class Metadata(object):
orig_res = res orig_res = res
datatype = cmeta['datatype'] datatype = cmeta['datatype']
if datatype == 'text' and cmeta['is_multiple']: if datatype == 'text' and cmeta['is_multiple']:
if cmeta['display'].get('is_names', False): res = cmeta['is_multiple']['list_to_ui'].join(res)
res = u' & '.join(res)
else:
res = u', '.join(sorted(res, key=sort_key))
elif datatype == 'series' and series_with_index: elif datatype == 'series' and series_with_index:
if self.get_extra(key) is not None: if self.get_extra(key) is not None:
res = res + \ res = res + \
@ -668,7 +665,7 @@ class Metadata(object):
elif datatype == 'text' and fmeta['is_multiple']: elif datatype == 'text' and fmeta['is_multiple']:
if isinstance(res, dict): if isinstance(res, dict):
res = [k + ':' + v for k,v in res.items()] res = [k + ':' + v for k,v in res.items()]
res = u', '.join(sorted(res, key=sort_key)) res = fmeta['is_multiple']['list_to_ui'].join(sorted(res, key=sort_key))
elif datatype == 'series' and series_with_index: elif datatype == 'series' and series_with_index:
res = res + ' [%s]'%self.format_series_index() res = res + ' [%s]'%self.format_series_index()
elif datatype == 'datetime': elif datatype == 'datetime':

View File

@ -5,8 +5,7 @@ Created on 4 Jun 2010
''' '''
from base64 import b64encode, b64decode from base64 import b64encode, b64decode
import json import json, traceback
import traceback
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.constants import filesystem_encoding, preferred_encoding from calibre.constants import filesystem_encoding, preferred_encoding
@ -69,6 +68,40 @@ def object_to_unicode(obj, enc=preferred_encoding):
return ans return ans
return obj return obj
def encode_is_multiple(fm):
if fm.get('is_multiple', None):
# migrate is_multiple back to a character
fm['is_multiple2'] = fm.get('is_multiple', {})
dt = fm.get('datatype', None)
if dt == 'composite':
fm['is_multiple'] = ','
else:
fm['is_multiple'] = '|'
else:
fm['is_multiple'] = None
fm['is_multiple2'] = {}
def decode_is_multiple(fm):
im = fm.get('is_multiple2', None)
if im:
fm['is_multiple'] = im
del fm['is_multiple2']
else:
# Must migrate the is_multiple from char to dict
im = fm.get('is_multiple', {})
if im:
dt = fm.get('datatype', None)
if dt == 'composite':
im = {'cache_to_list': ',', 'ui_to_list': ',',
'list_to_ui': ', '}
elif fm.get('display', {}).get('is_names', False):
im = {'cache_to_list': '|', 'ui_to_list': '&',
'list_to_ui': ', '}
else:
im = {'cache_to_list': '|', 'ui_to_list': ',',
'list_to_ui': ', '}
fm['is_multiple'] = im
class JsonCodec(object): class JsonCodec(object):
def __init__(self): def __init__(self):
@ -93,9 +126,10 @@ class JsonCodec(object):
def encode_metadata_attr(self, book, key): def encode_metadata_attr(self, book, key):
if key == 'user_metadata': if key == 'user_metadata':
meta = book.get_all_user_metadata(make_copy=True) meta = book.get_all_user_metadata(make_copy=True)
for k in meta: for fm in meta.itervalues():
if meta[k]['datatype'] == 'datetime': if fm['datatype'] == 'datetime':
meta[k]['#value#'] = datetime_to_string(meta[k]['#value#']) fm['#value#'] = datetime_to_string(fm['#value#'])
encode_is_multiple(fm)
return meta return meta
if key in self.field_metadata: if key in self.field_metadata:
datatype = self.field_metadata[key]['datatype'] datatype = self.field_metadata[key]['datatype']
@ -135,9 +169,10 @@ class JsonCodec(object):
if key == 'classifiers': if key == 'classifiers':
key = 'identifiers' key = 'identifiers'
if key == 'user_metadata': if key == 'user_metadata':
for k in value: for fm in value.itervalues():
if value[k]['datatype'] == 'datetime': if fm['datatype'] == 'datetime':
value[k]['#value#'] = string_to_datetime(value[k]['#value#']) fm['#value#'] = string_to_datetime(fm['#value#'])
decode_is_multiple(fm)
return value return value
elif key in self.field_metadata: elif key in self.field_metadata:
if self.field_metadata[key]['datatype'] == 'datetime': if self.field_metadata[key]['datatype'] == 'datetime':

View File

@ -7,7 +7,7 @@ __docformat__ = 'restructuredtext en'
lxml based OPF parser. lxml based OPF parser.
''' '''
import re, sys, unittest, functools, os, uuid, glob, cStringIO, json import re, sys, unittest, functools, os, uuid, glob, cStringIO, json, copy
from urllib import unquote from urllib import unquote
from urlparse import urlparse from urlparse import urlparse
@ -453,10 +453,13 @@ class TitleSortField(MetadataField):
def serialize_user_metadata(metadata_elem, all_user_metadata, tail='\n'+(' '*8)): def serialize_user_metadata(metadata_elem, all_user_metadata, tail='\n'+(' '*8)):
from calibre.utils.config import to_json from calibre.utils.config import to_json
from calibre.ebooks.metadata.book.json_codec import object_to_unicode from calibre.ebooks.metadata.book.json_codec import (object_to_unicode,
encode_is_multiple)
for name, fm in all_user_metadata.items(): for name, fm in all_user_metadata.items():
try: try:
fm = copy.copy(fm)
encode_is_multiple(fm)
fm = object_to_unicode(fm) fm = object_to_unicode(fm)
fm = json.dumps(fm, default=to_json, ensure_ascii=False) fm = json.dumps(fm, default=to_json, ensure_ascii=False)
except: except:
@ -575,6 +578,7 @@ class OPF(object): # {{{
self._user_metadata_ = {} self._user_metadata_ = {}
temp = Metadata('x', ['x']) temp = Metadata('x', ['x'])
from calibre.utils.config import from_json from calibre.utils.config import from_json
from calibre.ebooks.metadata.book.json_codec import decode_is_multiple
elems = self.root.xpath('//*[name() = "meta" and starts-with(@name,' elems = self.root.xpath('//*[name() = "meta" and starts-with(@name,'
'"calibre:user_metadata:") and @content]') '"calibre:user_metadata:") and @content]')
for elem in elems: for elem in elems:
@ -585,6 +589,7 @@ class OPF(object): # {{{
fm = elem.get('content') fm = elem.get('content')
try: try:
fm = json.loads(fm, object_hook=from_json) fm = json.loads(fm, object_hook=from_json)
decode_is_multiple(fm)
temp.set_user_metadata(name, fm) temp.set_user_metadata(name, fm)
except: except:
prints('Failed to read user metadata:', name) prints('Failed to read user metadata:', name)

View File

@ -42,6 +42,7 @@ class Worker(Thread): # Get details {{{
months = { months = {
'de': { 'de': {
1 : ['jän'], 1 : ['jän'],
2 : ['februar'],
3 : ['märz'], 3 : ['märz'],
5 : ['mai'], 5 : ['mai'],
6 : ['juni'], 6 : ['juni'],

View File

@ -13,7 +13,13 @@ from weakref import WeakKeyDictionary
from xml.dom import SyntaxErr as CSSSyntaxError from xml.dom import SyntaxErr as CSSSyntaxError
import cssutils import cssutils
from cssutils.css import (CSSStyleRule, CSSPageRule, CSSStyleDeclaration, from cssutils.css import (CSSStyleRule, CSSPageRule, CSSStyleDeclaration,
CSSValueList, CSSFontFaceRule, cssproperties) CSSFontFaceRule, cssproperties)
try:
from cssutils.css import CSSValueList
CSSValueList
except ImportError:
# cssutils >= 0.9.8
from cssutils.css import PropertyValue as CSSValueList
from cssutils import profile as cssprofiles from cssutils import profile as cssprofiles
from lxml import etree from lxml import etree
from lxml.cssselect import css_to_xpath, ExpressionError, SelectorSyntaxError from lxml.cssselect import css_to_xpath, ExpressionError, SelectorSyntaxError

View File

@ -94,6 +94,9 @@ class DeleteAction(InterfaceAction):
self.delete_menu.addAction( self.delete_menu.addAction(
_('Remove all formats from selected books, except...'), _('Remove all formats from selected books, except...'),
self.delete_all_but_selected_formats) self.delete_all_but_selected_formats)
self.delete_menu.addAction(
_('Remove all formats from selected books'),
self.delete_all_formats)
self.delete_menu.addAction( self.delete_menu.addAction(
_('Remove covers from selected books'), self.delete_covers) _('Remove covers from selected books'), self.delete_covers)
self.delete_menu.addSeparator() self.delete_menu.addSeparator()
@ -174,6 +177,28 @@ class DeleteAction(InterfaceAction):
if ids: if ids:
self.gui.tags_view.recount() self.gui.tags_view.recount()
def delete_all_formats(self, *args):
ids = self._get_selected_ids()
if not ids:
return
if not confirm('<p>'+_('<b>All formats</b> for the selected books will '
'be <b>deleted</b> from your library.<br>'
'The book metadata will be kept. Are you sure?')
+'</p>', 'delete_all_formats', self.gui):
return
db = self.gui.library_view.model().db
for id in ids:
fmts = db.formats(id, index_is_id=True, verify_formats=False)
if fmts:
for fmt in fmts.split(','):
self.gui.library_view.model().db.remove_format(id, fmt,
index_is_id=True, notify=False)
self.gui.library_view.model().refresh_ids(ids)
self.gui.library_view.model().current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
if ids:
self.gui.tags_view.recount()
def remove_matching_books_from_device(self, *args): def remove_matching_books_from_device(self, *args):
if not self.gui.device_manager.is_device_connected: if not self.gui.device_manager.is_device_connected:
d = error_dialog(self.gui, _('Cannot delete books'), d = error_dialog(self.gui, _('Cannot delete books'),

View File

@ -226,16 +226,14 @@ class Comments(Base):
class Text(Base): class Text(Base):
def setup_ui(self, parent): def setup_ui(self, parent):
if self.col_metadata['display'].get('is_names', False): self.sep = self.col_metadata['multiple_seps']
self.sep = u' & '
else:
self.sep = u', '
values = self.all_values = list(self.db.all_custom(num=self.col_id)) values = self.all_values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key) values.sort(key=sort_key)
if self.col_metadata['is_multiple']: if self.col_metadata['is_multiple']:
w = MultiCompleteLineEdit(parent) w = MultiCompleteLineEdit(parent)
w.set_separator(self.sep.strip()) w.set_separator(self.sep['ui_to_list'])
if self.sep == u' & ': if self.sep['ui_to_list'] == '&':
w.set_space_before_sep(True) w.set_space_before_sep(True)
w.set_add_separator(tweaks['authors_completer_append_separator']) w.set_add_separator(tweaks['authors_completer_append_separator'])
w.update_items_cache(values) w.update_items_cache(values)
@ -269,12 +267,12 @@ class Text(Base):
if self.col_metadata['is_multiple']: if self.col_metadata['is_multiple']:
if not val: if not val:
val = [] val = []
self.widgets[1].setText(self.sep.join(val)) self.widgets[1].setText(self.sep['list_to_ui'].join(val))
def getter(self): def getter(self):
if self.col_metadata['is_multiple']: if self.col_metadata['is_multiple']:
val = unicode(self.widgets[1].text()).strip() val = unicode(self.widgets[1].text()).strip()
ans = [x.strip() for x in val.split(self.sep.strip()) if x.strip()] ans = [x.strip() for x in val.split(self.sep['ui_to_list']) if x.strip()]
if not ans: if not ans:
ans = None ans = None
return ans return ans
@ -899,9 +897,10 @@ class BulkText(BulkBase):
if not self.a_c_checkbox.isChecked(): if not self.a_c_checkbox.isChecked():
return return
if self.col_metadata['is_multiple']: if self.col_metadata['is_multiple']:
ism = self.col_metadata['multiple_seps']
if self.col_metadata['display'].get('is_names', False): if self.col_metadata['display'].get('is_names', False):
val = self.gui_val val = self.gui_val
add = [v.strip() for v in val.split('&') if v.strip()] add = [v.strip() for v in val.split(ism['ui_to_list']) if v.strip()]
self.db.set_custom_bulk(book_ids, add, num=self.col_id) self.db.set_custom_bulk(book_ids, add, num=self.col_id)
else: else:
remove_all, adding, rtext = self.gui_val remove_all, adding, rtext = self.gui_val
@ -911,10 +910,10 @@ class BulkText(BulkBase):
else: else:
txt = rtext txt = rtext
if txt: if txt:
remove = set([v.strip() for v in txt.split(',')]) remove = set([v.strip() for v in txt.split(ism['ui_to_list'])])
txt = adding txt = adding
if txt: if txt:
add = set([v.strip() for v in txt.split(',')]) add = set([v.strip() for v in txt.split(ism['ui_to_list'])])
else: else:
add = set() add = set()
self.db.set_custom_bulk_multiple(book_ids, add=add, self.db.set_custom_bulk_multiple(book_ids, add=add,

View File

@ -154,13 +154,16 @@ _proceed_memory = []
class ProceedNotification(MessageBox): # {{{ class ProceedNotification(MessageBox): # {{{
def __init__(self, callback, payload, html_log, log_viewer_title, title, msg, def __init__(self, callback, payload, html_log, log_viewer_title, title, msg,
det_msg='', show_copy_button=False, parent=None): det_msg='', show_copy_button=False, parent=None,
cancel_callback=None):
''' '''
A non modal popup that notifies the user that a background task has A non modal popup that notifies the user that a background task has
been completed. been completed.
:param callback: A callable that is called with payload if the user :param callback: A callable that is called with payload if the user
asks to proceed. Note that this is always called in the GUI thread asks to proceed. Note that this is always called in the GUI thread.
:param cancel_callback: A callable that is called with the payload if
the users asks not to proceed.
:param payload: Arbitrary object, passed to callback :param payload: Arbitrary object, passed to callback
:param html_log: An HTML or plain text log :param html_log: An HTML or plain text log
:param log_viewer_title: The title for the log viewer window :param log_viewer_title: The title for the log viewer window
@ -181,7 +184,7 @@ class ProceedNotification(MessageBox): # {{{
self.vlb.clicked.connect(self.show_log) self.vlb.clicked.connect(self.show_log)
self.det_msg_toggle.setVisible(bool(det_msg)) self.det_msg_toggle.setVisible(bool(det_msg))
self.setModal(False) self.setModal(False)
self.callback = callback self.callback, self.cancel_callback = callback, cancel_callback
_proceed_memory.append(self) _proceed_memory.append(self)
def show_log(self): def show_log(self):
@ -192,9 +195,11 @@ class ProceedNotification(MessageBox): # {{{
try: try:
if result == self.Accepted: if result == self.Accepted:
self.callback(self.payload) self.callback(self.payload)
elif self.cancel_callback is not None:
self.cancel_callback(self.payload)
finally: finally:
# Ensure this notification is garbage collected # Ensure this notification is garbage collected
self.callback = None self.callback = self.cancel_callback = None
self.setParent(None) self.setParent(None)
self.finished.disconnect() self.finished.disconnect()
self.vlb.clicked.disconnect() self.vlb.clicked.disconnect()

View File

@ -520,7 +520,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
elif not fm['is_multiple']: elif not fm['is_multiple']:
val = [val] val = [val]
elif fm['datatype'] == 'composite': elif fm['datatype'] == 'composite':
val = [v.strip() for v in val.split(fm['is_multiple'])] val = [v.strip() for v in val.split(fm['is_multiple']['ui_to_list'])]
elif field == 'authors': elif field == 'authors':
val = [v.replace('|', ',') for v in val] val = [v.replace('|', ',') for v in val]
else: else:
@ -655,19 +655,10 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
if self.destination_field_fm['is_multiple']: if self.destination_field_fm['is_multiple']:
if self.comma_separated.isChecked(): if self.comma_separated.isChecked():
if dest == 'authors' or \ splitter = self.destination_field_fm['is_multiple']['ui_to_list']
(self.destination_field_fm['is_custom'] and
self.destination_field_fm['datatype'] == 'text' and
self.destination_field_fm['display'].get('is_names', False)):
splitter = ' & '
else:
splitter = ','
res = [] res = []
for v in val: for v in val:
for x in v.split(splitter): res.extend([x.strip() for x in v.split(splitter) if x.strip()])
if x.strip():
res.append(x.strip())
val = res val = res
else: else:
val = [v.replace(',', '') for v in val] val = [v.replace(',', '') for v in val]

View File

@ -254,6 +254,15 @@ class TemplateDialog(QDialog, Ui_TemplateDialog):
self.textbox_changed() self.textbox_changed()
self.rule = (None, '') self.rule = (None, '')
tt = _('Template language tutorial')
self.template_tutorial.setText(
'<a href="http://manual.calibre-ebook.com/template_lang.html">'
'%s</a>'%tt)
tt = _('Template function reference')
self.template_func_reference.setText(
'<a href="http://manual.calibre-ebook.com/template_ref.html">'
'%s</a>'%tt)
def textbox_changed(self): def textbox_changed(self):
cur_text = unicode(self.textbox.toPlainText()) cur_text = unicode(self.textbox.toPlainText())
if self.last_text != cur_text: if self.last_text != cur_text:
@ -299,4 +308,4 @@ class TemplateDialog(QDialog, Ui_TemplateDialog):
return return
self.rule = (unicode(self.colored_field.currentText()), txt) self.rule = (unicode(self.colored_field.currentText()), txt)
QDialog.accept(self) QDialog.accept(self)

View File

@ -125,6 +125,20 @@
<item row="9" column="1"> <item row="9" column="1">
<widget class="QPlainTextEdit" name="source_code"/> <widget class="QPlainTextEdit" name="source_code"/>
</item> </item>
<item row="10" column="1">
<widget class="QLabel" name="template_tutorial">
<property name="openExternalLinks">
<bool>true</bool>
</property>
</widget>
</item>
<item row="11" column="1">
<widget class="QLabel" name="template_func_reference">
<property name="openExternalLinks">
<bool>true</bool>
</property>
</widget>
</item>
</layout> </layout>
</item> </item>
</layout> </layout>

View File

@ -7,7 +7,7 @@ __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import os, shutil import os, shutil
from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED from calibre.utils.zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
from PyQt4.Qt import QDialog from PyQt4.Qt import QDialog

View File

@ -197,10 +197,12 @@ class JobManager(QAbstractTableModel): # {{{
def row_to_job(self, row): def row_to_job(self, row):
return self.jobs[row] return self.jobs[row]
def has_device_jobs(self): def has_device_jobs(self, queued_also=False):
for job in self.jobs: for job in self.jobs:
if job.is_running and isinstance(job, DeviceJob): if isinstance(job, DeviceJob):
return True if job.duration is None: # Running or waiting
if (job.is_running or queued_also):
return True
return False return False
def has_jobs(self): def has_jobs(self):

View File

@ -288,6 +288,8 @@ class CcNumberDelegate(QStyledItemDelegate): # {{{
def setEditorData(self, editor, index): def setEditorData(self, editor, index):
m = index.model() m = index.model()
val = m.db.data[index.row()][m.custom_columns[m.column_map[index.column()]]['rec_index']] val = m.db.data[index.row()][m.custom_columns[m.column_map[index.column()]]['rec_index']]
if val is None:
val = 0
editor.setValue(val) editor.setValue(val)
# }}} # }}}

View File

@ -608,10 +608,11 @@ class BooksModel(QAbstractTableModel): # {{{
def text_type(r, mult=None, idx=-1): def text_type(r, mult=None, idx=-1):
text = self.db.data[r][idx] text = self.db.data[r][idx]
if text and mult is not None: if text and mult:
if mult: jv = mult['list_to_ui']
return QVariant(u' & '.join(text.split('|'))) sv = mult['cache_to_list']
return QVariant(u', '.join(sorted(text.split('|'),key=sort_key))) return QVariant(jv.join(
sorted([t.strip() for t in text.split(sv)], key=sort_key)))
return QVariant(text) return QVariant(text)
def decorated_text_type(r, idx=-1): def decorated_text_type(r, idx=-1):
@ -665,8 +666,6 @@ class BooksModel(QAbstractTableModel): # {{{
datatype = self.custom_columns[col]['datatype'] datatype = self.custom_columns[col]['datatype']
if datatype in ('text', 'comments', 'composite', 'enumeration'): if datatype in ('text', 'comments', 'composite', 'enumeration'):
mult=self.custom_columns[col]['is_multiple'] mult=self.custom_columns[col]['is_multiple']
if mult is not None:
mult = self.custom_columns[col]['display'].get('is_names', False)
self.dc[col] = functools.partial(text_type, idx=idx, mult=mult) self.dc[col] = functools.partial(text_type, idx=idx, mult=mult)
if datatype in ['text', 'composite', 'enumeration'] and not mult: if datatype in ['text', 'composite', 'enumeration'] and not mult:
if self.custom_columns[col]['display'].get('use_decorations', False): if self.custom_columns[col]['display'].get('use_decorations', False):
@ -722,9 +721,9 @@ class BooksModel(QAbstractTableModel): # {{{
if id_ in self.color_cache: if id_ in self.color_cache:
if key in self.color_cache[id_]: if key in self.color_cache[id_]:
return self.color_cache[id_][key] return self.color_cache[id_][key]
if mi is None:
mi = self.db.get_metadata(id_, index_is_id=True)
try: try:
if mi is None:
mi = self.db.get_metadata(id_, index_is_id=True)
color = composite_formatter.safe_format(fmt, mi, '', mi) color = composite_formatter.safe_format(fmt, mi, '', mi)
if color in self.colors: if color in self.colors:
color = QColor(color) color = QColor(color)

View File

@ -159,21 +159,22 @@ class ConditionEditor(QWidget): # {{{
self.action_box.clear() self.action_box.clear()
self.action_box.addItem('', '') self.action_box.addItem('', '')
col = self.current_col col = self.current_col
m = self.fm[col] if col:
dt = m['datatype'] m = self.fm[col]
if dt in self.action_map: dt = m['datatype']
actions = self.action_map[dt] if dt in self.action_map:
else: actions = self.action_map[dt]
if col == 'ondevice':
k = 'ondevice'
elif col == 'identifiers':
k = 'identifiers'
else: else:
k = 'multiple' if m['is_multiple'] else 'single' if col == 'ondevice':
actions = self.action_map[k] k = 'ondevice'
elif col == 'identifiers':
k = 'identifiers'
else:
k = 'multiple' if m['is_multiple'] else 'single'
actions = self.action_map[k]
for text, key in actions: for text, key in actions:
self.action_box.addItem(text, key) self.action_box.addItem(text, key)
self.action_box.setCurrentIndex(0) self.action_box.setCurrentIndex(0)
self.action_box.blockSignals(False) self.action_box.blockSignals(False)
self.init_value_box() self.init_value_box()
@ -184,11 +185,15 @@ class ConditionEditor(QWidget): # {{{
self.value_box.setInputMask('') self.value_box.setInputMask('')
self.value_box.setValidator(None) self.value_box.setValidator(None)
col = self.current_col col = self.current_col
if not col:
return
m = self.fm[col] m = self.fm[col]
dt = m['datatype'] dt = m['datatype']
action = self.current_action action = self.current_action
if not col or not action: if not action:
return return
m = self.fm[col]
dt = m['datatype']
tt = '' tt = ''
if col == 'identifiers': if col == 'identifiers':
tt = _('Enter either an identifier type or an ' tt = _('Enter either an identifier type or an '
@ -206,7 +211,7 @@ class ConditionEditor(QWidget): # {{{
tt = _('Enter a regular expression') tt = _('Enter a regular expression')
elif m.get('is_multiple', False): elif m.get('is_multiple', False):
tt += '\n' + _('You can match multiple values by separating' tt += '\n' + _('You can match multiple values by separating'
' them with %s')%m['is_multiple'] ' them with %s')%m['is_multiple']['ui_to_list']
self.value_box.setToolTip(tt) self.value_box.setToolTip(tt)
if action in ('is set', 'is not set', 'is true', 'is false', if action in ('is set', 'is not set', 'is true', 'is false',
'is undefined'): 'is undefined'):

View File

@ -13,6 +13,9 @@ from calibre.gui2 import error_dialog
class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn): class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
# Note: in this class, we are treating is_multiple as the boolean that
# custom_columns expects to find in its structure. It does not use the dict
column_types = { column_types = {
0:{'datatype':'text', 0:{'datatype':'text',
'text':_('Text, column shown in the tag browser'), 'text':_('Text, column shown in the tag browser'),

View File

@ -16,13 +16,28 @@
<layout class="QGridLayout" name="gridLayout"> <layout class="QGridLayout" name="gridLayout">
<item row="0" column="0"> <item row="0" column="0">
<widget class="QLabel" name="label"> <widget class="QLabel" name="label">
<property name="font">
<font>
<weight>75</weight>
<bold>true</bold>
</font>
</property>
<property name="text"> <property name="text">
<string>Customize the actions in:</string> <string>Choose the &amp;toolbar to customize:</string>
</property>
<property name="buddy">
<cstring>what</cstring>
</property> </property>
</widget> </widget>
</item> </item>
<item row="0" column="1" colspan="3"> <item row="0" column="1" colspan="3">
<widget class="QComboBox" name="what"> <widget class="QComboBox" name="what">
<property name="font">
<font>
<weight>75</weight>
<bold>true</bold>
</font>
</property>
<property name="sizeAdjustPolicy"> <property name="sizeAdjustPolicy">
<enum>QComboBox::AdjustToMinimumContentsLengthWithIcon</enum> <enum>QComboBox::AdjustToMinimumContentsLengthWithIcon</enum>
</property> </property>

View File

@ -6,21 +6,23 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>' __copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import re, urllib
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url from calibre.gui2 import open_url
from calibre.gui2.store.amazon_plugin import AmazonKindleStore from calibre.gui2.store import StorePlugin
from calibre.gui2.store.search_result import SearchResult
class AmazonDEKindleStore(AmazonKindleStore): class AmazonDEKindleStore(StorePlugin):
''' '''
For comments on the implementation, please see amazon_plugin.py For comments on the implementation, please see amazon_plugin.py
''' '''
search_url = 'http://www.amazon.de/s/?url=search-alias%3Ddigital-text&field-keywords='
details_url = 'http://amazon.de/dp/'
drm_search_text = u'Gleichzeitige Verwendung von Geräten'
drm_free_text = u'Keine Einschränkung'
def open(self, parent=None, detail_item=None, external=False): def open(self, parent=None, detail_item=None, external=False):
aff_id = {'tag': 'charhale0a-21'} aff_id = {'tag': 'charhale0a-21'}
store_link = ('http://www.amazon.de/gp/redirect.html?ie=UTF8&site-redirect=de' store_link = ('http://www.amazon.de/gp/redirect.html?ie=UTF8&site-redirect=de'
@ -32,3 +34,94 @@ class AmazonDEKindleStore(AmazonKindleStore):
'&location=http://www.amazon.de/dp/%(asin)s&site-redirect=de' '&location=http://www.amazon.de/dp/%(asin)s&site-redirect=de'
'&tag=%(tag)s&linkCode=ur2&camp=1638&creative=6742') % aff_id '&tag=%(tag)s&linkCode=ur2&camp=1638&creative=6742') % aff_id
open_url(QUrl(store_link)) open_url(QUrl(store_link))
def search(self, query, max_results=10, timeout=60):
search_url = 'http://www.amazon.de/s/?url=search-alias%3Ddigital-text&field-keywords='
url = search_url + urllib.quote_plus(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
# Amazon has two results pages.
is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
# Horizontal grid of books.
if is_shot:
data_xpath = '//div[contains(@class, "result")]'
format_xpath = './/div[@class="productTitle"]/text()'
cover_xpath = './/div[@class="productTitle"]//img/@src'
# Vertical list of books.
else:
data_xpath = '//div[@class="productData"]'
format_xpath = './/span[@class="format"]/text()'
cover_xpath = '../div[@class="productImage"]/a/img/@src'
for data in doc.xpath(data_xpath):
if counter <= 0:
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format = ''.join(data.xpath(format_xpath))
if 'kindle' not in format.lower():
continue
# We must have an asin otherwise we can't easily reference the
# book later.
asin_href = None
asin_a = data.xpath('.//div[@class="productTitle"]/a[1]')
if asin_a:
asin_href = asin_a[0].get('href', '')
m = re.search(r'/dp/(?P<asin>.+?)(/|$)', asin_href)
if m:
asin = m.group('asin')
else:
continue
else:
continue
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath('.//div[@class="productTitle"]/a/text()'))
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
if is_shot:
author = format.split(' von ')[-1]
else:
author = ''.join(data.xpath('.//div[@class="productTitle"]/span[@class="ptBrand"]/text()'))
author = author.split(' von ')[-1]
counter -= 1
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.formats = 'Kindle'
yield s
def get_details(self, search_result, timeout):
drm_search_text = u'Gleichzeitige Verwendung von Geräten'
drm_free_text = u'Keine Einschränkung'
url = 'http://amazon.de/dp/'
br = browser()
with closing(br.open(url + search_result.detail_item, timeout=timeout)) as nf:
idata = html.fromstring(nf.read())
if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "' +
drm_search_text + '")])'):
if idata.xpath('boolean(//div[@class="content"]//li[contains(., "' +
drm_free_text + '") and contains(b, "' +
drm_search_text + '")])'):
search_result.drm = SearchResult.DRM_UNLOCKED
else:
search_result.drm = SearchResult.DRM_UNKNOWN
else:
search_result.drm = SearchResult.DRM_LOCKED
return True

View File

@ -131,16 +131,22 @@ class AmazonKindleStore(StorePlugin):
# Amazon has two results pages. # Amazon has two results pages.
is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])') is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
# Horizontal grid of books. # Horizontal grid of books. Search "Paolo Bacigalupi"
if is_shot: if is_shot:
data_xpath = '//div[contains(@class, "result")]' data_xpath = '//div[contains(@class, "result")]'
format_xpath = './/div[@class="productTitle"]/text()' format_xpath = './/div[@class="productTitle"]//text()'
asin_xpath = './/div[@class="productTitle"]//a'
cover_xpath = './/div[@class="productTitle"]//img/@src' cover_xpath = './/div[@class="productTitle"]//img/@src'
# Vertical list of books. title_xpath = './/div[@class="productTitle"]/a//text()'
price_xpath = './/div[@class="newPrice"]/span/text()'
# Vertical list of books. Search "martin"
else: else:
data_xpath = '//div[@class="productData"]' data_xpath = '//div[contains(@class, "results")]//div[contains(@class, "result")]'
format_xpath = './/span[@class="format"]/text()' format_xpath = './/span[@class="binding"]//text()'
cover_xpath = '../div[@class="productImage"]/a/img/@src' asin_xpath = './/div[@class="image"]/a[1]'
cover_xpath = './/img[@class="productImage"]/@src'
title_xpath = './/a[@class="title"]/text()'
price_xpath = './/span[@class="price"]/text()'
for data in doc.xpath(data_xpath): for data in doc.xpath(data_xpath):
if counter <= 0: if counter <= 0:
@ -157,7 +163,7 @@ class AmazonKindleStore(StorePlugin):
# We must have an asin otherwise we can't easily reference the # We must have an asin otherwise we can't easily reference the
# book later. # book later.
asin_href = None asin_href = None
asin_a = data.xpath('.//div[@class="productTitle"]/a[1]') asin_a = data.xpath(asin_xpath)
if asin_a: if asin_a:
asin_href = asin_a[0].get('href', '') asin_href = asin_a[0].get('href', '')
m = re.search(r'/dp/(?P<asin>.+?)(/|$)', asin_href) m = re.search(r'/dp/(?P<asin>.+?)(/|$)', asin_href)
@ -170,14 +176,14 @@ class AmazonKindleStore(StorePlugin):
cover_url = ''.join(data.xpath(cover_xpath)) cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath('.//div[@class="productTitle"]/a/text()')) title = ''.join(data.xpath(title_xpath))
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()')) price = ''.join(data.xpath(price_xpath))
if is_shot: if is_shot:
author = format.split(' by ')[-1] author = format.split(' by ')[-1]
else: else:
author = ''.join(data.xpath('.//div[@class="productTitle"]/span[@class="ptBrand"]/text()')) author = ''.join(data.xpath('.//span[@class="ptBrand"]/text()'))
author = author.split(' by ')[-1] author = author.split('by ')[-1]
counter -= 1 counter -= 1

View File

@ -15,17 +15,14 @@ from PyQt4.Qt import QUrl
from calibre import browser from calibre import browser
from calibre.gui2 import open_url from calibre.gui2 import open_url
from calibre.gui2.store.amazon_plugin import AmazonKindleStore from calibre.gui2.store import StorePlugin
from calibre.gui2.store.search_result import SearchResult from calibre.gui2.store.search_result import SearchResult
class AmazonUKKindleStore(AmazonKindleStore): class AmazonUKKindleStore(StorePlugin):
''' '''
For comments on the implementation, please see amazon_plugin.py For comments on the implementation, please see amazon_plugin.py
''' '''
search_url = 'http://www.amazon.co.uk/s/?url=search-alias%3Ddigital-text&field-keywords='
details_url = 'http://amazon.co.uk/dp/'
def open(self, parent=None, detail_item=None, external=False): def open(self, parent=None, detail_item=None, external=False):
aff_id = {'tag': 'calcharles-21'} aff_id = {'tag': 'calcharles-21'}
store_link = 'http://www.amazon.co.uk/gp/redirect.html?ie=UTF8&location=http://www.amazon.co.uk/Kindle-eBooks/b?ie=UTF8&node=341689031&ref_=sa_menu_kbo2&tag=%(tag)s&linkCode=ur2&camp=1634&creative=19450' % aff_id store_link = 'http://www.amazon.co.uk/gp/redirect.html?ie=UTF8&location=http://www.amazon.co.uk/Kindle-eBooks/b?ie=UTF8&node=341689031&ref_=sa_menu_kbo2&tag=%(tag)s&linkCode=ur2&camp=1634&creative=19450' % aff_id
@ -36,7 +33,8 @@ class AmazonUKKindleStore(AmazonKindleStore):
open_url(QUrl(store_link)) open_url(QUrl(store_link))
def search(self, query, max_results=10, timeout=60): def search(self, query, max_results=10, timeout=60):
url = self.search_url + urllib.quote_plus(query) search_url = 'http://www.amazon.co.uk/s/?url=search-alias%3Ddigital-text&field-keywords='
url = search_url + urllib.quote_plus(query)
br = browser() br = browser()
counter = max_results counter = max_results
@ -75,15 +73,18 @@ class AmazonUKKindleStore(AmazonKindleStore):
s.title = title.strip() s.title = title.strip()
s.price = price.strip() s.price = price.strip()
s.detail_item = asin.strip() s.detail_item = asin.strip()
s.formats = 'Kindle' s.formats = ''
if is_shot: if is_shot:
# Amazon UK does not include the author on the grid layout # Amazon UK does not include the author on the grid layout
s.author = '' s.author = ''
self.get_details(s, timeout) self.get_details(s, timeout)
if s.formats != 'Kindle':
continue
else: else:
author = ''.join(data.xpath('.//div[@class="productTitle"]/span[@class="ptBrand"]/text()')) author = ''.join(data.xpath('.//div[@class="productTitle"]/span[@class="ptBrand"]/text()'))
s.author = author.split(' by ')[-1].strip() s.author = author.split(' by ')[-1].strip()
s.formats = 'Kindle'
yield s yield s
@ -92,18 +93,23 @@ class AmazonUKKindleStore(AmazonKindleStore):
if search_result.drm: if search_result.drm:
return return
url = self.details_url url = 'http://amazon.co.uk/dp/'
drm_search_text = u'Simultaneous Device Usage'
drm_free_text = u'Unlimited'
br = browser() br = browser()
with closing(br.open(url + search_result.detail_item, timeout=timeout)) as nf: with closing(br.open(url + search_result.detail_item, timeout=timeout)) as nf:
idata = html.fromstring(nf.read()) idata = html.fromstring(nf.read())
if not search_result.author: if not search_result.author:
search_result.author = ''.join(idata.xpath('//div[@class="buying" and contains(., "Author")]/a/text()')) search_result.author = ''.join(idata.xpath('//div[@class="buying" and contains(., "Author")]/a/text()'))
is_kindle = idata.xpath('boolean(//div[@class="buying"]/h1/span/span[contains(text(), "Kindle Edition")])')
if is_kindle:
search_result.formats = 'Kindle'
if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "' + if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "' +
self.drm_search_text + '")])'): drm_search_text + '")])'):
if idata.xpath('boolean(//div[@class="content"]//li[contains(., "' + if idata.xpath('boolean(//div[@class="content"]//li[contains(., "' +
self.drm_free_text + '") and contains(b, "' + drm_free_text + '") and contains(b, "' +
self.drm_search_text + '")])'): drm_search_text + '")])'):
search_result.drm = SearchResult.DRM_UNLOCKED search_result.drm = SearchResult.DRM_UNLOCKED
else: else:
search_result.drm = SearchResult.DRM_UNKNOWN search_result.drm = SearchResult.DRM_UNKNOWN

View File

@ -8,7 +8,6 @@ __docformat__ = 'restructuredtext en'
import random import random
import re import re
import urllib
from contextlib import closing from contextlib import closing
from lxml import html from lxml import html
@ -47,26 +46,26 @@ class BNStore(BasicStoreConfig, StorePlugin):
d.exec_() d.exec_()
def search(self, query, max_results=10, timeout=60): def search(self, query, max_results=10, timeout=60):
url = 'http://productsearch.barnesandnoble.com/search/results.aspx?STORE=EBOOK&SZE=%s&WRD=' % max_results query = query.replace(' ', '-')
url += urllib.quote_plus(query) url = 'http://www.barnesandnoble.com/s/%s?store=ebook&sze=%s' % (query, max_results)
br = browser() br = browser()
counter = max_results counter = max_results
with closing(br.open(url, timeout=timeout)) as f: with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read()) doc = html.fromstring(f.read())
for data in doc.xpath('//ul[contains(@class, "wgt-search-results-display")]/li[contains(@class, "search-result-item") and contains(@class, "nook-result-item")]'): for data in doc.xpath('//ul[contains(@class, "result-set")]/li[contains(@class, "result")]'):
if counter <= 0: if counter <= 0:
break break
id = ''.join(data.xpath('.//div[contains(@class, "wgt-product-image-module")]/a/@href')) id = ''.join(data.xpath('.//div[contains(@class, "image")]/a/@href'))
if not id: if not id:
continue continue
cover_url = ''.join(data.xpath('.//div[contains(@class, "wgt-product-image-module")]/a/img/@src')) cover_url = ''.join(data.xpath('.//div[contains(@class, "image")]//img/@src'))
title = ''.join(data.xpath('.//span[@class="product-title"]/a/text()')) title = ''.join(data.xpath('.//p[@class="title"]//span[@class="name"]/text()'))
author = ', '.join(data.xpath('.//span[@class="contributers-line"]/a/text()')) author = ', '.join(data.xpath('.//ul[@class="contributors"]//li[position()>1]//a/text()'))
price = ''.join(data.xpath('.//span[contains(@class, "onlinePriceValue2")]/text()')) price = ''.join(data.xpath('.//table[@class="displayed-formats"]//a[@class="subtle"]/text()'))
counter -= 1 counter -= 1
@ -74,9 +73,9 @@ class BNStore(BasicStoreConfig, StorePlugin):
s.cover_url = cover_url s.cover_url = cover_url
s.title = title.strip() s.title = title.strip()
s.author = author.strip() s.author = author.strip()
s.price = price s.price = price.strip()
s.detail_item = id.strip() s.detail_item = id.strip()
s.drm = SearchResult.DRM_UNKNOWN s.drm = SearchResult.DRM_UNKNOWN
s.formats = 'Nook' s.formats = 'Nook'
yield s yield s

View File

@ -2,7 +2,8 @@ This is a list of stores that objected, declined
or asked not to be included in the store integration. or asked not to be included in the store integration.
* Borders (http://www.borders.com/) * Borders (http://www.borders.com/)
* WH Smith (http://www.whsmith.co.uk/) * Indigo (http://www.chapters.indigo.ca/)
Refused to permit signing up for the affiliate program
* Libraria Rizzoli (http://libreriarizzoli.corriere.it/). * Libraria Rizzoli (http://libreriarizzoli.corriere.it/).
No reply with two attempts over 2 weeks No reply with two attempts over 2 weeks
* WH Smith (http://www.whsmith.co.uk/)
Refused to permit signing up for the affiliate program

View File

@ -71,7 +71,7 @@ class NextoStore(BasicStoreConfig, StorePlugin):
author = '' author = ''
with closing(br.open('http://www.nexto.pl/' + id.strip(), timeout=timeout/4)) as nf: with closing(br.open('http://www.nexto.pl/' + id.strip(), timeout=timeout/4)) as nf:
idata = html.fromstring(nf.read()) idata = html.fromstring(nf.read())
author = ''.join(idata.xpath('//div[@class="basic_data"]/p[1]/b/a/text()')) author = ', '.join(idata.xpath('//div[@class="basic_data"]/p[1]/b/a/text()'))
counter -= 1 counter -= 1

View File

@ -82,6 +82,8 @@ class SearchDialog(QDialog, Ui_Dialog):
self.restore_state() self.restore_state()
def setup_store_checks(self): def setup_store_checks(self):
first_run = self.config.get('first_run', True)
# Add check boxes for each store so the user # Add check boxes for each store so the user
# can disable searching specific stores on a # can disable searching specific stores on a
# per search basis. # per search basis.
@ -98,7 +100,7 @@ class SearchDialog(QDialog, Ui_Dialog):
icon = QIcon(I('donate.png')) icon = QIcon(I('donate.png'))
for i, x in enumerate(sorted(self.gui.istores.keys(), key=lambda x: x.lower())): for i, x in enumerate(sorted(self.gui.istores.keys(), key=lambda x: x.lower())):
cbox = QCheckBox(x) cbox = QCheckBox(x)
cbox.setChecked(existing.get(x, False)) cbox.setChecked(existing.get(x, first_run))
store_list_layout.addWidget(cbox, i, 0, 1, 1) store_list_layout.addWidget(cbox, i, 0, 1, 1)
if self.gui.istores[x].base_plugin.affiliate: if self.gui.istores[x].base_plugin.affiliate:
iw = QLabel(self) iw = QLabel(self)
@ -108,6 +110,8 @@ class SearchDialog(QDialog, Ui_Dialog):
self.store_checks[x] = cbox self.store_checks[x] = cbox
store_list_layout.setRowStretch(store_list_layout.rowCount(), 10) store_list_layout.setRowStretch(store_list_layout.rowCount(), 10)
self.store_list.setWidget(stores_check_widget) self.store_list.setWidget(stores_check_widget)
self.config['first_run'] = False
def build_adv_search(self): def build_adv_search(self):
adv = AdvSearchBuilderDialog(self) adv = AdvSearchBuilderDialog(self)
@ -281,11 +285,11 @@ class SearchDialog(QDialog, Ui_Dialog):
tab_widget.setCurrentIndex(tab_index) tab_widget.setCurrentIndex(tab_index)
d.exec_() d.exec_()
# Save dialog state. # Save dialog state.
self.config['config_dialog_geometry'] = bytearray(d.saveGeometry()) self.config['config_dialog_geometry'] = bytearray(d.saveGeometry())
self.config['config_dialog_tab_index'] = tab_widget.currentIndex() self.config['config_dialog_tab_index'] = tab_widget.currentIndex()
search_config_widget.save_settings() search_config_widget.save_settings()
self.config_changed() self.config_changed()
self.gui.load_store_plugins() self.gui.load_store_plugins()

View File

@ -6,15 +6,15 @@
<rect> <rect>
<x>0</x> <x>0</x>
<y>0</y> <y>0</y>
<width>584</width> <width>872</width>
<height>533</height> <height>610</height>
</rect> </rect>
</property> </property>
<property name="windowTitle"> <property name="windowTitle">
<string>Get Books</string> <string>Get Books</string>
</property> </property>
<property name="windowIcon"> <property name="windowIcon">
<iconset> <iconset resource="../../../../../resources/images.qrc">
<normaloff>:/images/store.png</normaloff>:/images/store.png</iconset> <normaloff>:/images/store.png</normaloff>:/images/store.png</iconset>
</property> </property>
<property name="sizeGripEnabled"> <property name="sizeGripEnabled">
@ -82,8 +82,8 @@
<rect> <rect>
<x>0</x> <x>0</x>
<y>0</y> <y>0</y>
<width>125</width> <width>173</width>
<height>127</height> <height>106</height>
</rect> </rect>
</property> </property>
</widget> </widget>
@ -255,7 +255,7 @@
</customwidget> </customwidget>
</customwidgets> </customwidgets>
<resources> <resources>
<include location="../../../../resources/images.qrc"/> <include location="../../../../../resources/images.qrc"/>
</resources> </resources>
<connections> <connections>
<connection> <connection>

View File

@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, Tomasz Długosz <tomek3d@gmail.com>'
__docformat__ = 'restructuredtext en'
import re
import urllib
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class ZixoStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://zixo.pl/e_ksiazki/start/'
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
else:
d = WebStoreDialog(self.gui, url, parent, detail_item)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://zixo.pl/wyszukiwarka/?search=' + urllib.quote(query.encode('utf-8')) + '&product_type=0'
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//div[@class="productInline"]'):
if counter <= 0:
break
id = ''.join(data.xpath('.//a[@class="productThumb"]/@href'))
if not id:
continue
cover_url = ''.join(data.xpath('.//a[@class="productThumb"]/img/@src'))
title = ''.join(data.xpath('.//a[@class="title"]/text()'))
author = ''.join(data.xpath('.//div[@class="productDescription"]/span[1]/a/text()'))
price = ''.join(data.xpath('.//div[@class="priceList"]/span/text()'))
price = re.sub('\.', ',', price)
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
s.detail_item = 'http://zixo.pl' + id.strip()
s.drm = SearchResult.DRM_LOCKED
yield s
def get_details(self, search_result, timeout):
br = browser()
with closing(br.open(search_result.detail_item, timeout=timeout)) as nf:
idata = html.fromstring(nf.read())
formats = ''.join(idata.xpath('//ul[@class="prop"]/li[3]/text()'))
formats = re.sub(r'\(.*\)', '', formats)
formats = re.sub('Zixo Reader', 'ZIXO', formats)
search_result.formats = formats
return True

View File

@ -509,7 +509,8 @@ class ResultCache(SearchQueryParser): # {{{
valq_mkind, valq = self._matchkind(query) valq_mkind, valq = self._matchkind(query)
loc = self.field_metadata[location]['rec_index'] loc = self.field_metadata[location]['rec_index']
split_char = self.field_metadata[location]['is_multiple'] split_char = self.field_metadata[location]['is_multiple'].get(
'cache_to_list', ',')
for id_ in candidates: for id_ in candidates:
item = self._data[id_] item = self._data[id_]
if item is None: if item is None:
@ -665,7 +666,8 @@ class ResultCache(SearchQueryParser): # {{{
if fm['is_multiple'] and \ if fm['is_multiple'] and \
len(query) > 1 and query.startswith('#') and \ len(query) > 1 and query.startswith('#') and \
query[1:1] in '=<>!': query[1:1] in '=<>!':
vf = lambda item, loc=fm['rec_index'], ms=fm['is_multiple']:\ vf = lambda item, loc=fm['rec_index'], \
ms=fm['is_multiple']['cache_to_list']:\
len(item[loc].split(ms)) if item[loc] is not None else 0 len(item[loc].split(ms)) if item[loc] is not None else 0
return self.get_numeric_matches(location, query[1:], return self.get_numeric_matches(location, query[1:],
candidates, val_func=vf) candidates, val_func=vf)
@ -703,7 +705,8 @@ class ResultCache(SearchQueryParser): # {{{
['composite', 'text', 'comments', 'series', 'enumeration']: ['composite', 'text', 'comments', 'series', 'enumeration']:
exclude_fields.append(db_col[x]) exclude_fields.append(db_col[x])
col_datatype[db_col[x]] = self.field_metadata[x]['datatype'] col_datatype[db_col[x]] = self.field_metadata[x]['datatype']
is_multiple_cols[db_col[x]] = self.field_metadata[x]['is_multiple'] is_multiple_cols[db_col[x]] = \
self.field_metadata[x]['is_multiple'].get('cache_to_list', None)
try: try:
rating_query = int(query) * 2 rating_query = int(query) * 2
@ -1045,13 +1048,14 @@ class SortKeyGenerator(object):
elif dt in ('text', 'comments', 'composite', 'enumeration'): elif dt in ('text', 'comments', 'composite', 'enumeration'):
if val: if val:
sep = fm['is_multiple'] if fm['is_multiple']:
if sep: jv = fm['is_multiple']['list_to_ui']
if fm['display'].get('is_names', False): sv = fm['is_multiple']['cache_to_list']
val = sep.join( if '&' in jv:
[author_to_author_sort(v) for v in val.split(sep)]) val = jv.join(
[author_to_author_sort(v) for v in val.split(sv)])
else: else:
val = sep.join(sorted(val.split(sep), val = jv.join(sorted(val.split(sv),
key=self.string_sort_key)) key=self.string_sort_key))
val = self.string_sort_key(val) val = self.string_sort_key(val)

View File

@ -79,16 +79,19 @@ class Rule(object): # {{{
if dt == 'bool': if dt == 'bool':
return self.bool_condition(col, action, val) return self.bool_condition(col, action, val)
if dt in ('int', 'float', 'rating'): if dt in ('int', 'float'):
return self.number_condition(col, action, val) return self.number_condition(col, action, val)
if dt == 'rating':
return self.rating_condition(col, action, val)
if dt == 'datetime': if dt == 'datetime':
return self.date_condition(col, action, val) return self.date_condition(col, action, val)
if dt in ('comments', 'series', 'text', 'enumeration', 'composite'): if dt in ('comments', 'series', 'text', 'enumeration', 'composite'):
ism = m.get('is_multiple', False) ism = m.get('is_multiple', False)
if ism: if ism:
return self.multiple_condition(col, action, val, ',' if ism == '|' else ism) return self.multiple_condition(col, action, val, ism['ui_to_list'])
return self.text_condition(col, action, val) return self.text_condition(col, action, val)
def identifiers_condition(self, col, action, val): def identifiers_condition(self, col, action, val):
@ -114,9 +117,16 @@ class Rule(object): # {{{
'lt': ('1', '', ''), 'lt': ('1', '', ''),
'gt': ('', '', '1') 'gt': ('', '', '1')
}[action] }[action]
lt, eq, gt = '', '1', ''
return "cmp(raw_field('%s'), %s, '%s', '%s', '%s')" % (col, val, lt, eq, gt) return "cmp(raw_field('%s'), %s, '%s', '%s', '%s')" % (col, val, lt, eq, gt)
def rating_condition(self, col, action, val):
lt, eq, gt = {
'eq': ('', '1', ''),
'lt': ('1', '', ''),
'gt': ('', '', '1')
}[action]
return "cmp(field('%s'), %s, '%s', '%s', '%s')" % (col, val, lt, eq, gt)
def date_condition(self, col, action, val): def date_condition(self, col, action, val):
lt, eq, gt = { lt, eq, gt = {
'eq': ('', '1', ''), 'eq': ('', '1', ''),

View File

@ -78,6 +78,18 @@ class CustomColumns(object):
} }
if data['display'] is None: if data['display'] is None:
data['display'] = {} data['display'] = {}
# set up the is_multiple separator dict
if data['is_multiple']:
if data['display'].get('is_names', False):
seps = {'cache_to_list': '|', 'ui_to_list': '&', 'list_to_ui': ' & '}
elif data['datatype'] == 'composite':
seps = {'cache_to_list': ',', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {'cache_to_list': '|', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {}
data['multiple_seps'] = seps
table, lt = self.custom_table_names(data['num']) table, lt = self.custom_table_names(data['num'])
if table not in custom_tables or (data['normalized'] and lt not in if table not in custom_tables or (data['normalized'] and lt not in
custom_tables): custom_tables):
@ -119,7 +131,7 @@ class CustomColumns(object):
if x is None: if x is None:
return [] return []
if isinstance(x, (str, unicode, bytes)): if isinstance(x, (str, unicode, bytes)):
x = x.split('&' if d['display'].get('is_names', False) else',') x = x.split(d['multiple_seps']['ui_to_list'])
x = [y.strip() for y in x if y.strip()] x = [y.strip() for y in x if y.strip()]
x = [y.decode(preferred_encoding, 'replace') if not isinstance(y, x = [y.decode(preferred_encoding, 'replace') if not isinstance(y,
unicode) else y for y in x] unicode) else y for y in x]
@ -181,10 +193,7 @@ class CustomColumns(object):
is_category = True is_category = True
else: else:
is_category = False is_category = False
if v['is_multiple']: is_m = v['multiple_seps']
is_m = ',' if v['datatype'] == 'composite' else '|'
else:
is_m = None
tn = 'custom_column_{0}'.format(v['num']) tn = 'custom_column_{0}'.format(v['num'])
self.field_metadata.add_custom_field(label=v['label'], self.field_metadata.add_custom_field(label=v['label'],
table=tn, column='value', datatype=v['datatype'], table=tn, column='value', datatype=v['datatype'],
@ -200,7 +209,7 @@ class CustomColumns(object):
row = self.data._data[idx] if index_is_id else self.data[idx] row = self.data._data[idx] if index_is_id else self.data[idx]
ans = row[self.FIELD_MAP[data['num']]] ans = row[self.FIELD_MAP[data['num']]]
if data['is_multiple'] and data['datatype'] == 'text': if data['is_multiple'] and data['datatype'] == 'text':
ans = ans.split('|') if ans else [] ans = ans.split(data['multiple_seps']['cache_to_list']) if ans else []
if data['display'].get('sort_alpha', False): if data['display'].get('sort_alpha', False):
ans.sort(cmp=lambda x,y:cmp(x.lower(), y.lower())) ans.sort(cmp=lambda x,y:cmp(x.lower(), y.lower()))
return ans return ans
@ -566,14 +575,21 @@ class CustomColumns(object):
def custom_columns_in_meta(self): def custom_columns_in_meta(self):
lines = {} lines = {}
for data in self.custom_column_label_map.values(): for data in self.custom_column_label_map.values():
display = data['display']
table, lt = self.custom_table_names(data['num']) table, lt = self.custom_table_names(data['num'])
if data['normalized']: if data['normalized']:
query = '%s.value' query = '%s.value'
if data['is_multiple']: if data['is_multiple']:
query = 'group_concat(%s.value, "|")' # query = 'group_concat(%s.value, "{0}")'.format(
if not display.get('sort_alpha', False): # data['multiple_seps']['cache_to_list'])
query = 'sort_concat(link.id, %s.value)' # if not display.get('sort_alpha', False):
if data['multiple_seps']['cache_to_list'] == '|':
query = 'sortconcat_bar(link.id, %s.value)'
elif data['multiple_seps']['cache_to_list'] == '&':
query = 'sortconcat_amper(link.id, %s.value)'
else:
prints('WARNING: unknown value in multiple_seps',
data['multiple_seps']['cache_to_list'])
query = 'sortconcat_bar(link.id, %s.value)'
line = '''(SELECT {query} FROM {lt} AS link INNER JOIN line = '''(SELECT {query} FROM {lt} AS link INNER JOIN
{table} ON(link.value={table}.id) WHERE link.book=books.id) {table} ON(link.value={table}.id) WHERE link.book=books.id)
custom_{num} custom_{num}

View File

@ -1250,7 +1250,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
dex = field['rec_index'] dex = field['rec_index']
for book in self.data.iterall(): for book in self.data.iterall():
if field['is_multiple']: if field['is_multiple']:
vals = [v.strip() for v in book[dex].split(field['is_multiple']) vals = [v.strip() for v in
book[dex].split(field['is_multiple']['cache_to_list'])
if v.strip()] if v.strip()]
if id_ in vals: if id_ in vals:
ans.add(book[0]) ans.add(book[0])
@ -1378,7 +1379,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
tcategories[category] = {} tcategories[category] = {}
# create a list of category/field_index for the books scan to use. # create a list of category/field_index for the books scan to use.
# This saves iterating through field_metadata for each book # This saves iterating through field_metadata for each book
md.append((category, cat['rec_index'], cat['is_multiple'], False)) md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), False))
for category in tb_cats.iterkeys(): for category in tb_cats.iterkeys():
cat = tb_cats[category] cat = tb_cats[category]
@ -1386,7 +1388,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
cat['display'].get('make_category', False): cat['display'].get('make_category', False):
tids[category] = {} tids[category] = {}
tcategories[category] = {} tcategories[category] = {}
md.append((category, cat['rec_index'], cat['is_multiple'], md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite')) cat['datatype'] == 'composite'))
#print 'end phase "collection":', time.clock() - last, 'seconds' #print 'end phase "collection":', time.clock() - last, 'seconds'
#last = time.clock() #last = time.clock()

View File

@ -50,9 +50,16 @@ class FieldMetadata(dict):
datatype: the type of information in the field. Valid values are listed in datatype: the type of information in the field. Valid values are listed in
VALID_DATA_TYPES below. VALID_DATA_TYPES below.
is_multiple: valid for the text datatype. If None, the field is to be is_multiple: valid for the text datatype. If {}, the field is to be
treated as a single term. If not None, it contains a string, and the field treated as a single term. If not None, it contains a dict of the form
is assumed to contain a list of terms separated by that string {'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '}
where the cache_to_list contains the character used to split the value in
the meta2 table, ui_to_list contains the character used to create a list
from a value shown in the ui (each resulting value must be strip()ed and
empty values removed), and list_to_ui contains the string used in join()
to create a displayable string from the list.
kind == field: is a db field. kind == field: is a db field.
kind == category: standard tag category that isn't a field. see news. kind == category: standard tag category that isn't a field. see news.
@ -97,7 +104,9 @@ class FieldMetadata(dict):
'link_column':'author', 'link_column':'author',
'category_sort':'sort', 'category_sort':'sort',
'datatype':'text', 'datatype':'text',
'is_multiple':',', 'is_multiple':{'cache_to_list': ',',
'ui_to_list': '&',
'list_to_ui': ' & '},
'kind':'field', 'kind':'field',
'name':_('Authors'), 'name':_('Authors'),
'search_terms':['authors', 'author'], 'search_terms':['authors', 'author'],
@ -109,7 +118,7 @@ class FieldMetadata(dict):
'link_column':'series', 'link_column':'series',
'category_sort':'(title_sort(name))', 'category_sort':'(title_sort(name))',
'datatype':'series', 'datatype':'series',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Series'), 'name':_('Series'),
'search_terms':['series'], 'search_terms':['series'],
@ -119,7 +128,9 @@ class FieldMetadata(dict):
('formats', {'table':None, ('formats', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':',', 'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field', 'kind':'field',
'name':_('Formats'), 'name':_('Formats'),
'search_terms':['formats', 'format'], 'search_terms':['formats', 'format'],
@ -131,7 +142,7 @@ class FieldMetadata(dict):
'link_column':'publisher', 'link_column':'publisher',
'category_sort':'name', 'category_sort':'name',
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Publishers'), 'name':_('Publishers'),
'search_terms':['publisher'], 'search_terms':['publisher'],
@ -143,7 +154,7 @@ class FieldMetadata(dict):
'link_column':'rating', 'link_column':'rating',
'category_sort':'rating', 'category_sort':'rating',
'datatype':'rating', 'datatype':'rating',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Ratings'), 'name':_('Ratings'),
'search_terms':['rating'], 'search_terms':['rating'],
@ -154,7 +165,7 @@ class FieldMetadata(dict):
'column':'name', 'column':'name',
'category_sort':'name', 'category_sort':'name',
'datatype':None, 'datatype':None,
'is_multiple':None, 'is_multiple':{},
'kind':'category', 'kind':'category',
'name':_('News'), 'name':_('News'),
'search_terms':[], 'search_terms':[],
@ -166,7 +177,9 @@ class FieldMetadata(dict):
'link_column': 'tag', 'link_column': 'tag',
'category_sort':'name', 'category_sort':'name',
'datatype':'text', 'datatype':'text',
'is_multiple':',', 'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field', 'kind':'field',
'name':_('Tags'), 'name':_('Tags'),
'search_terms':['tags', 'tag'], 'search_terms':['tags', 'tag'],
@ -176,7 +189,9 @@ class FieldMetadata(dict):
('identifiers', {'table':None, ('identifiers', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':',', 'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field', 'kind':'field',
'name':_('Identifiers'), 'name':_('Identifiers'),
'search_terms':['identifiers', 'identifier', 'isbn'], 'search_terms':['identifiers', 'identifier', 'isbn'],
@ -186,7 +201,7 @@ class FieldMetadata(dict):
('author_sort',{'table':None, ('author_sort',{'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Author Sort'), 'name':_('Author Sort'),
'search_terms':['author_sort'], 'search_terms':['author_sort'],
@ -196,7 +211,9 @@ class FieldMetadata(dict):
('au_map', {'table':None, ('au_map', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':',', 'is_multiple':{'cache_to_list': ',',
'ui_to_list': None,
'list_to_ui': None},
'kind':'field', 'kind':'field',
'name':None, 'name':None,
'search_terms':[], 'search_terms':[],
@ -206,7 +223,7 @@ class FieldMetadata(dict):
('comments', {'table':None, ('comments', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Comments'), 'name':_('Comments'),
'search_terms':['comments', 'comment'], 'search_terms':['comments', 'comment'],
@ -216,7 +233,7 @@ class FieldMetadata(dict):
('cover', {'table':None, ('cover', {'table':None,
'column':None, 'column':None,
'datatype':'int', 'datatype':'int',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':None, 'name':None,
'search_terms':['cover'], 'search_terms':['cover'],
@ -226,7 +243,7 @@ class FieldMetadata(dict):
('id', {'table':None, ('id', {'table':None,
'column':None, 'column':None,
'datatype':'int', 'datatype':'int',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':None, 'name':None,
'search_terms':[], 'search_terms':[],
@ -236,7 +253,7 @@ class FieldMetadata(dict):
('last_modified', {'table':None, ('last_modified', {'table':None,
'column':None, 'column':None,
'datatype':'datetime', 'datatype':'datetime',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Modified'), 'name':_('Modified'),
'search_terms':['last_modified'], 'search_terms':['last_modified'],
@ -246,7 +263,7 @@ class FieldMetadata(dict):
('ondevice', {'table':None, ('ondevice', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('On Device'), 'name':_('On Device'),
'search_terms':['ondevice'], 'search_terms':['ondevice'],
@ -256,7 +273,7 @@ class FieldMetadata(dict):
('path', {'table':None, ('path', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Path'), 'name':_('Path'),
'search_terms':[], 'search_terms':[],
@ -266,7 +283,7 @@ class FieldMetadata(dict):
('pubdate', {'table':None, ('pubdate', {'table':None,
'column':None, 'column':None,
'datatype':'datetime', 'datatype':'datetime',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Published'), 'name':_('Published'),
'search_terms':['pubdate'], 'search_terms':['pubdate'],
@ -276,7 +293,7 @@ class FieldMetadata(dict):
('marked', {'table':None, ('marked', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name': None, 'name': None,
'search_terms':['marked'], 'search_terms':['marked'],
@ -286,7 +303,7 @@ class FieldMetadata(dict):
('series_index',{'table':None, ('series_index',{'table':None,
'column':None, 'column':None,
'datatype':'float', 'datatype':'float',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':None, 'name':None,
'search_terms':['series_index'], 'search_terms':['series_index'],
@ -296,7 +313,7 @@ class FieldMetadata(dict):
('sort', {'table':None, ('sort', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Title Sort'), 'name':_('Title Sort'),
'search_terms':['title_sort'], 'search_terms':['title_sort'],
@ -306,7 +323,7 @@ class FieldMetadata(dict):
('size', {'table':None, ('size', {'table':None,
'column':None, 'column':None,
'datatype':'float', 'datatype':'float',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Size'), 'name':_('Size'),
'search_terms':['size'], 'search_terms':['size'],
@ -316,7 +333,7 @@ class FieldMetadata(dict):
('timestamp', {'table':None, ('timestamp', {'table':None,
'column':None, 'column':None,
'datatype':'datetime', 'datatype':'datetime',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Date'), 'name':_('Date'),
'search_terms':['date'], 'search_terms':['date'],
@ -326,7 +343,7 @@ class FieldMetadata(dict):
('title', {'table':None, ('title', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':_('Title'), 'name':_('Title'),
'search_terms':['title'], 'search_terms':['title'],
@ -336,7 +353,7 @@ class FieldMetadata(dict):
('uuid', {'table':None, ('uuid', {'table':None,
'column':None, 'column':None,
'datatype':'text', 'datatype':'text',
'is_multiple':None, 'is_multiple':{},
'kind':'field', 'kind':'field',
'name':None, 'name':None,
'search_terms':[], 'search_terms':[],
@ -508,7 +525,7 @@ class FieldMetadata(dict):
if datatype == 'series': if datatype == 'series':
key += '_index' key += '_index'
self._tb_cats[key] = {'table':None, 'column':None, self._tb_cats[key] = {'table':None, 'column':None,
'datatype':'float', 'is_multiple':None, 'datatype':'float', 'is_multiple':{},
'kind':'field', 'name':'', 'kind':'field', 'name':'',
'search_terms':[key], 'label':label+'_index', 'search_terms':[key], 'label':label+'_index',
'colnum':None, 'display':{}, 'colnum':None, 'display':{},
@ -560,7 +577,7 @@ class FieldMetadata(dict):
if icu_lower(label) != label: if icu_lower(label) != label:
st.append(icu_lower(label)) st.append(icu_lower(label))
self._tb_cats[label] = {'table':None, 'column':None, self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':None, 'datatype':None, 'is_multiple':{},
'kind':'user', 'name':name, 'kind':'user', 'name':name,
'search_terms':st, 'is_custom':False, 'search_terms':st, 'is_custom':False,
'is_category':True, 'is_csp': False} 'is_category':True, 'is_csp': False}
@ -570,7 +587,7 @@ class FieldMetadata(dict):
if label in self._tb_cats: if label in self._tb_cats:
raise ValueError('Duplicate user field [%s]'%(label)) raise ValueError('Duplicate user field [%s]'%(label))
self._tb_cats[label] = {'table':None, 'column':None, self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':None, 'datatype':None, 'is_multiple':{},
'kind':'search', 'name':name, 'kind':'search', 'name':name,
'search_terms':[], 'is_custom':False, 'search_terms':[], 'is_custom':False,
'is_category':True, 'is_csp': False} 'is_category':True, 'is_csp': False}

View File

@ -171,7 +171,7 @@ class Restore(Thread):
for x in fields: for x in fields:
if x in cfm: if x in cfm:
if x == 'is_multiple': if x == 'is_multiple':
args.append(cfm[x] is not None) args.append(bool(cfm[x]))
else: else:
args.append(cfm[x]) args.append(cfm[x])
if len(args) == len(fields): if len(args) == len(fields):

View File

@ -770,7 +770,8 @@ class BrowseServer(object):
summs.append(self.browse_summary_template.format(**args)) summs.append(self.browse_summary_template.format(**args))
return json.dumps('\n'.join(summs), ensure_ascii=False) raw = json.dumps('\n'.join(summs), ensure_ascii=False)
return raw
def browse_render_details(self, id_): def browse_render_details(self, id_):
try: try:

View File

@ -231,7 +231,8 @@ class MobileServer(object):
book['size'] = human_readable(book['size']) book['size'] = human_readable(book['size'])
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown') aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')]) aut_is = CFM['authors']['is_multiple']
authors = aut_is['list_to_ui'].join([i.replace('|', ',') for i in aus.split(',')])
book['authors'] = authors book['authors'] = authors
book['series_index'] = fmt_sidx(float(record[FM['series_index']])) book['series_index'] = fmt_sidx(float(record[FM['series_index']]))
book['series'] = record[FM['series']] book['series'] = record[FM['series']]
@ -254,8 +255,10 @@ class MobileServer(object):
continue continue
if datatype == 'text' and CFM[key]['is_multiple']: if datatype == 'text' and CFM[key]['is_multiple']:
book[key] = concat(name, book[key] = concat(name,
format_tag_string(val, ',', format_tag_string(val,
no_tag_count=True)) CFM[key]['is_multiple']['ui_to_list'],
no_tag_count=True,
joinval=CFM[key]['is_multiple']['list_to_ui']))
else: else:
book[key] = concat(name, val) book[key] = concat(name, val)

View File

@ -180,9 +180,12 @@ def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix):
if val: if val:
datatype = CFM[key]['datatype'] datatype = CFM[key]['datatype']
if datatype == 'text' and CFM[key]['is_multiple']: if datatype == 'text' and CFM[key]['is_multiple']:
extra.append('%s: %s<br />'%(xml(name), xml(format_tag_string(val, ',', extra.append('%s: %s<br />'%
ignore_max=True, (xml(name),
no_tag_count=True)))) xml(format_tag_string(val,
CFM[key]['is_multiple']['ui_to_list'],
ignore_max=True, no_tag_count=True,
joinval=CFM[key]['is_multiple']['list_to_ui']))))
elif datatype == 'comments': elif datatype == 'comments':
extra.append('%s: %s<br />'%(xml(name), comments_to_html(unicode(val)))) extra.append('%s: %s<br />'%(xml(name), comments_to_html(unicode(val))))
else: else:

View File

@ -68,7 +68,7 @@ def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
except: except:
return _strftime(fmt, nowf().timetuple()) return _strftime(fmt, nowf().timetuple())
def format_tag_string(tags, sep, ignore_max=False, no_tag_count=False): def format_tag_string(tags, sep, ignore_max=False, no_tag_count=False, joinval=', '):
MAX = sys.maxint if ignore_max else tweaks['max_content_server_tags_shown'] MAX = sys.maxint if ignore_max else tweaks['max_content_server_tags_shown']
if tags: if tags:
tlist = [t.strip() for t in tags.split(sep)] tlist = [t.strip() for t in tags.split(sep)]
@ -78,10 +78,10 @@ def format_tag_string(tags, sep, ignore_max=False, no_tag_count=False):
if len(tlist) > MAX: if len(tlist) > MAX:
tlist = tlist[:MAX]+['...'] tlist = tlist[:MAX]+['...']
if no_tag_count: if no_tag_count:
return ', '.join(tlist) if tlist else '' return joinval.join(tlist) if tlist else ''
else: else:
return u'%s:&:%s'%(tweaks['max_content_server_tags_shown'], return u'%s:&:%s'%(tweaks['max_content_server_tags_shown'],
', '.join(tlist)) if tlist else '' joinval.join(tlist)) if tlist else ''
def quote(s): def quote(s):
if isinstance(s, unicode): if isinstance(s, unicode):

View File

@ -121,8 +121,12 @@ class XMLServer(object):
name = CFM[key]['name'] name = CFM[key]['name']
custcols.append(k) custcols.append(k)
if datatype == 'text' and CFM[key]['is_multiple']: if datatype == 'text' and CFM[key]['is_multiple']:
kwargs[k] = concat('#T#'+name, format_tag_string(val,',', kwargs[k] = \
ignore_max=True)) concat('#T#'+name,
format_tag_string(val,
CFM[key]['is_multiple']['ui_to_list'],
ignore_max=True,
joinval=CFM[key]['is_multiple']['list_to_ui']))
else: else:
kwargs[k] = concat(name, val) kwargs[k] = concat(name, val)
kwargs['custcols'] = ','.join(custcols) kwargs['custcols'] = ','.join(custcols)

View File

@ -121,9 +121,12 @@ class SortedConcatenate(object):
return None return None
return self.sep.join(map(self.ans.get, sorted(self.ans.keys()))) return self.sep.join(map(self.ans.get, sorted(self.ans.keys())))
class SafeSortedConcatenate(SortedConcatenate): class SortedConcatenateBar(SortedConcatenate):
sep = '|' sep = '|'
class SortedConcatenateAmper(SortedConcatenate):
sep = '&'
class IdentifiersConcat(object): class IdentifiersConcat(object):
'''String concatenation aggregator for the identifiers map''' '''String concatenation aggregator for the identifiers map'''
def __init__(self): def __init__(self):
@ -220,7 +223,8 @@ class DBThread(Thread):
self.conn.execute('pragma cache_size=5000') self.conn.execute('pragma cache_size=5000')
encoding = self.conn.execute('pragma encoding').fetchone()[0] encoding = self.conn.execute('pragma encoding').fetchone()[0]
self.conn.create_aggregate('sortconcat', 2, SortedConcatenate) self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate) self.conn.create_aggregate('sortconcat_bar', 2, SortedConcatenateBar)
self.conn.create_aggregate('sortconcat_amper', 2, SortedConcatenateAmper)
self.conn.create_aggregate('identifiers_concat', 2, IdentifiersConcat) self.conn.create_aggregate('identifiers_concat', 2, IdentifiersConcat)
load_c_extensions(self.conn) load_c_extensions(self.conn)
self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row) self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row)

View File

@ -141,6 +141,22 @@ static void sort_concat_finalize2(sqlite3_context *context) {
} }
static void sort_concat_finalize3(sqlite3_context *context) {
SortConcatList *list;
unsigned char *ans;
list = (SortConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
if (list != NULL && list->vals != NULL && list->count > 0) {
qsort(list->vals, list->count, sizeof(list->vals[0]), sort_concat_cmp);
ans = sort_concat_do_finalize(list, '&');
if (ans != NULL) sqlite3_result_text(context, (char*)ans, -1, SQLITE_TRANSIENT);
free(ans);
sort_concat_free(list);
}
}
// }}} // }}}
// identifiers_concat {{{ // identifiers_concat {{{
@ -237,7 +253,8 @@ MYEXPORT int sqlite3_extension_init(
sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi){ sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi){
SQLITE_EXTENSION_INIT2(pApi); SQLITE_EXTENSION_INIT2(pApi);
sqlite3_create_function(db, "sortconcat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize); sqlite3_create_function(db, "sortconcat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize);
sqlite3_create_function(db, "sort_concat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize2); sqlite3_create_function(db, "sortconcat_bar", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize2);
sqlite3_create_function(db, "sortconcat_amper", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize3);
sqlite3_create_function(db, "identifiers_concat", 2, SQLITE_UTF8, NULL, NULL, identifiers_concat_step, identifiers_concat_finalize); sqlite3_create_function(db, "identifiers_concat", 2, SQLITE_UTF8, NULL, NULL, identifiers_concat_step, identifiers_concat_finalize);
return 0; return 0;
} }

View File

@ -23,7 +23,6 @@ entry_points = {
'calibre-server = calibre.library.server.main:main', 'calibre-server = calibre.library.server.main:main',
'lrf2lrs = calibre.ebooks.lrf.lrfparser:main', 'lrf2lrs = calibre.ebooks.lrf.lrfparser:main',
'lrs2lrf = calibre.ebooks.lrf.lrs.convert_from:main', 'lrs2lrf = calibre.ebooks.lrf.lrs.convert_from:main',
'librarything = calibre.ebooks.metadata.library_thing:main',
'calibre-debug = calibre.debug:main', 'calibre-debug = calibre.debug:main',
'calibredb = calibre.library.cli:main', 'calibredb = calibre.library.cli:main',
'calibre-parallel = calibre.utils.ipc.worker:main', 'calibre-parallel = calibre.utils.ipc.worker:main',

View File

@ -562,6 +562,16 @@ You have two choices:
1. Create a patch by hacking on |app| and send it to me for review and inclusion. See `Development <http://calibre-ebook.com/get-involved>`_. 1. Create a patch by hacking on |app| and send it to me for review and inclusion. See `Development <http://calibre-ebook.com/get-involved>`_.
2. `Open a ticket <http://calibre-ebook.com/bugs>`_ (you have to register and login first). Remember that |app| development is done by volunteers, so if you get no response to your feature request, it means no one feels like implementing it. 2. `Open a ticket <http://calibre-ebook.com/bugs>`_ (you have to register and login first). Remember that |app| development is done by volunteers, so if you get no response to your feature request, it means no one feels like implementing it.
Why doesn't |app| have an automatic update?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For many reasons:
* *There is no need to update every week*. If you are happy with how |app| works turn off the update notification and be on your merry way. Check back to see if you want to update once a year or so.
* Pre downloading the updates for all users in the background would mean require about 80TB of bandwidth *every week*. That costs thousands of dollars a month. And |app| is currently growing at 300,000 new users every month.
* If I implement a dialog that downloads the update and launches it, instead of going to the website as it does now, that would save the most ardent |app| updater, *at most five clicks a week*. There are far higher priority things to do in |app| development.
* If you really, really hate downloading |app| every week but still want to be upto the latest, I encourage you to run from source, which makes updating trivial. Instructions are :ref:`here <develop>`.
How is |app| licensed? How is |app| licensed?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|app| is licensed under the GNU General Public License v3 (an open source license). This means that you are free to redistribute |app| as long as you make the source code available. So if you want to put |app| on a CD with your product, you must also put the |app| source code on the CD. The source code is available for download `from googlecode <http://code.google.com/p/calibre-ebook/downloads/list>`_. You are free to use the results of conversions from |app| however you want. You cannot use code, libraries from |app| in your software without making your software open source. For details, see `The GNU GPL v3 <http://www.gnu.org/licenses/gpl.html>`_. |app| is licensed under the GNU General Public License v3 (an open source license). This means that you are free to redistribute |app| as long as you make the source code available. So if you want to put |app| on a CD with your product, you must also put the |app| source code on the CD. The source code is available for download `from googlecode <http://code.google.com/p/calibre-ebook/downloads/list>`_. You are free to use the results of conversions from |app| however you want. You cannot use code, libraries from |app| in your software without making your software open source. For details, see `The GNU GPL v3 <http://www.gnu.org/licenses/gpl.html>`_.

111
src/calibre/test_build.py Normal file
View File

@ -0,0 +1,111 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from future_builtins import map
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Test a binary calibre build to ensure that all needed binary images/libraries have loaded.
'''
import cStringIO
from calibre.constants import plugins, iswindows
def test_plugins():
for name in plugins:
mod, err = plugins[name]
if err or not mod:
raise RuntimeError('Plugin %s failed to load with error: %s' %
(name, err))
print (mod, 'loaded')
def test_lxml():
from lxml import etree
raw = '<a/>'
root = etree.fromstring(raw)
if etree.tostring(root) == raw:
print ('lxml OK!')
else:
raise RuntimeError('lxml failed')
def test_fontconfig():
from calibre.utils.fonts import fontconfig
families = fontconfig.find_font_families()
num = len(families)
if num < 10:
raise RuntimeError('Fontconfig found only %d font families'%num)
print ('Fontconfig OK! (%d families)'%num)
def test_winutil():
from calibre.devices.scanner import win_pnp_drives
matches = win_pnp_drives.scanner()
if len(matches) < 1:
raise RuntimeError('win_pnp_drives returned no drives')
print ('win_pnp_drives OK!')
def test_win32():
from calibre.utils.winshell import desktop
d = desktop()
if not d:
raise RuntimeError('winshell failed')
print ('winshell OK! (%s is the desktop)'%d)
def test_sqlite():
import sqlite3
conn = sqlite3.connect(':memory:')
from calibre.library.sqlite import load_c_extensions
if not load_c_extensions(conn, True):
raise RuntimeError('Failed to load sqlite extension')
print ('sqlite OK!')
def test_qt():
from PyQt4.Qt import (QWebView, QDialog, QImageReader, QNetworkAccessManager)
fmts = set(map(unicode, QImageReader.supportedImageFormats()))
testf = set(['jpg', 'png', 'mng', 'svg', 'ico', 'gif'])
if testf.intersection(fmts) != testf:
raise RuntimeError(
"Qt doesn't seem to be able to load its image plugins")
QWebView, QDialog
na = QNetworkAccessManager()
if not hasattr(na, 'sslErrors'):
raise RuntimeError('Qt not compiled with openssl')
print ('Qt OK!')
def test_imaging():
from calibre.utils.magick.draw import create_canvas, Image
im = create_canvas(20, 20, '#ffffff')
jpg = im.export('jpg')
Image().load(jpg)
im.export('png')
print ('ImageMagick OK!')
from PIL import Image
i = Image.open(cStringIO.StringIO(jpg))
if i.size != (20, 20):
raise RuntimeError('PIL choked!')
print ('PIL OK!')
def test_unrar():
from calibre.libunrar import _libunrar
if not _libunrar:
raise RuntimeError('Failed to load libunrar')
print ('Unrar OK!')
def test():
test_plugins()
test_lxml()
test_fontconfig()
test_sqlite()
if iswindows:
test_winutil()
test_win32()
test_qt()
test_imaging()
test_unrar()
if __name__ == '__main__':
test()

View File

@ -5,10 +5,9 @@ Dynamic language lookup of translations for user-visible strings.
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import os import cStringIO
from gettext import GNUTranslations from gettext import GNUTranslations
from calibre.utils.localization import get_lc_messages_path from calibre.utils.localization import get_lc_messages_path, ZipFile
__all__ = ['translate'] __all__ = ['translate']
@ -21,10 +20,15 @@ def translate(lang, text):
else: else:
mpath = get_lc_messages_path(lang) mpath = get_lc_messages_path(lang)
if mpath is not None: if mpath is not None:
p = os.path.join(mpath, 'messages.mo') with ZipFile(P('localization/locales.zip',
if os.path.exists(p): allow_user_override=False), 'r') as zf:
trans = GNUTranslations(open(p, 'rb')) try:
_CACHE[lang] = trans buf = cStringIO.StringIO(zf.read(mpath + '/messages.mo'))
except:
pass
else:
trans = GNUTranslations(buf)
_CACHE[lang] = trans
if trans is None: if trans is None:
return getattr(__builtins__, '_', lambda x: x)(text) return getattr(__builtins__, '_', lambda x: x)(text)
return trans.ugettext(text) return trans.ugettext(text)

View File

@ -223,8 +223,7 @@ class OptionSet(object):
if val is val is True or val is False or val is None or \ if val is val is True or val is False or val is None or \
isinstance(val, (int, float, long, basestring)): isinstance(val, (int, float, long, basestring)):
return repr(val) return repr(val)
from PyQt4.QtCore import QString if val.__class__.__name__ == 'QString':
if isinstance(val, QString):
return repr(unicode(val)) return repr(unicode(val))
pickle = cPickle.dumps(val, -1) pickle = cPickle.dumps(val, -1)
return 'cPickle.loads(%s)'%repr(pickle) return 'cPickle.loads(%s)'%repr(pickle)

View File

@ -727,13 +727,8 @@ class BuiltinNot(BuiltinFormatterFunction):
'returns the empty string. This function works well with test or ' 'returns the empty string. This function works well with test or '
'first_non_empty. You can have as many values as you want.') 'first_non_empty. You can have as many values as you want.')
def evaluate(self, formatter, kwargs, mi, locals, *args): def evaluate(self, formatter, kwargs, mi, locals, val):
i = 0 return '' if val else '1'
while i < len(args):
if args[i]:
return '1'
i += 1
return ''
class BuiltinMergeLists(BuiltinFormatterFunction): class BuiltinMergeLists(BuiltinFormatterFunction):
name = 'merge_lists' name = 'merge_lists'

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement from __future__ import absolute_import
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
@ -8,13 +8,14 @@ __docformat__ = 'restructuredtext en'
import os, locale, re, cStringIO, cPickle import os, locale, re, cStringIO, cPickle
from gettext import GNUTranslations from gettext import GNUTranslations
from zipfile import ZipFile
_available_translations = None _available_translations = None
def available_translations(): def available_translations():
global _available_translations global _available_translations
if _available_translations is None: if _available_translations is None:
stats = P('localization/stats.pickle') stats = P('localization/stats.pickle', allow_user_override=False)
if os.path.exists(stats): if os.path.exists(stats):
stats = cPickle.load(open(stats, 'rb')) stats = cPickle.load(open(stats, 'rb'))
else: else:
@ -49,21 +50,20 @@ def get_lang():
lang = 'en' lang = 'en'
return lang return lang
def messages_path(lang):
return P('localization/locales/%s/LC_MESSAGES'%lang)
def get_lc_messages_path(lang): def get_lc_messages_path(lang):
hlang = None hlang = None
if lang in available_translations(): if zf_exists():
hlang = lang if lang in available_translations():
else: hlang = lang
xlang = lang.split('_')[0] else:
if xlang in available_translations(): xlang = lang.split('_')[0]
hlang = xlang if xlang in available_translations():
if hlang is not None: hlang = xlang
return messages_path(hlang) return hlang
return None
def zf_exists():
return os.path.exists(P('localization/locales.zip',
allow_user_override=False))
def set_translators(): def set_translators():
# To test different translations invoke as # To test different translations invoke as
@ -79,12 +79,17 @@ def set_translators():
mpath = get_lc_messages_path(lang) mpath = get_lc_messages_path(lang)
if mpath is not None: if mpath is not None:
if buf is None: with ZipFile(P('localization/locales.zip',
buf = open(os.path.join(mpath, 'messages.mo'), 'rb') allow_user_override=False), 'r') as zf:
mpath = mpath.replace(os.sep+'nds'+os.sep, os.sep+'de'+os.sep) if buf is None:
isof = os.path.join(mpath, 'iso639.mo') buf = cStringIO.StringIO(zf.read(mpath + '/messages.mo'))
if os.path.exists(isof): if mpath == 'nds':
iso639 = open(isof, 'rb') mpath = 'de'
isof = mpath + '/iso639.mo'
try:
iso639 = cStringIO.StringIO(zf.read(isof))
except:
pass # No iso639 translations for this lang
if buf is not None: if buf is not None:
t = GNUTranslations(buf) t = GNUTranslations(buf)

View File

@ -148,6 +148,12 @@ def decode_arcname(name):
name = name.decode('utf-8', 'replace') name = name.decode('utf-8', 'replace')
return name return name
# Added by Kovid to reset timestamp to default if it overflows the DOS
# limits
def fixtimevar(val):
if val < 0 or val > 0xffff:
val = 0
return val
def _check_zipfile(fp): def _check_zipfile(fp):
try: try:
@ -341,6 +347,7 @@ class ZipInfo (object):
dt = self.date_time dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08: if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data # Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0 CRC = compress_size = file_size = 0
@ -365,7 +372,7 @@ class ZipInfo (object):
filename, flag_bits = self._encodeFilenameFlags() filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader, header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits, self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC, self.compress_type, fixtimevar(dostime), fixtimevar(dosdate), CRC,
compress_size, file_size, compress_size, file_size,
len(filename), len(extra)) len(filename), len(extra))
return header + filename + extra return header + filename + extra
@ -1321,8 +1328,8 @@ class ZipFile:
for zinfo in self.filelist: # write central directory for zinfo in self.filelist: # write central directory
count = count + 1 count = count + 1
dt = zinfo.date_time dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dosdate = fixtimevar((dt[0] - 1980) << 9 | dt[1] << 5 | dt[2])
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) dostime = fixtimevar(dt[3] << 11 | dt[4] << 5 | (dt[5] // 2))
extra = [] extra = []
if zinfo.file_size > ZIP64_LIMIT \ if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT: or zinfo.compress_size > ZIP64_LIMIT: