Sync to trunk.

This commit is contained in:
John Schember 2011-10-01 18:59:13 -04:00
commit c6fd082345
105 changed files with 124226 additions and 124034 deletions

View File

@ -1,26 +1,18 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class AmericanProspect(BasicNewsRecipe):
title = u'American Prospect'
__author__ = u'Michael Heinz'
oldest_article = 30
language = 'en'
max_articles_per_feed = 100
recursions = 0
no_stylesheets = True
remove_javascript = True
__author__ = u'Michael Heinz, a.peter'
version = 2
preprocess_regexps = [
(re.compile(r'<body.*?<div class="pad_10L10R">', re.DOTALL|re.IGNORECASE), lambda match: '<body><div>'),
(re.compile(r'</div>.*</body>', re.DOTALL|re.IGNORECASE), lambda match: '</div></body>'),
(re.compile('\r'),lambda match: ''),
(re.compile(r'<!-- .+? -->', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<link .+?>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<script.*?</script>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<noscript.*?</noscript>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<meta .*?/>', re.DOTALL|re.IGNORECASE), lambda match: ''),
]
oldest_article = 30
language = 'en'
max_articles_per_feed = 100
recursions = 0
no_stylesheets = True
remove_javascript = True
keep_only_tags = [dict(name='div', attrs={'class':'pad_10L10R'})]
remove_tags = [dict(name='form'), dict(name='div', attrs={'class':['bkt_caption','sharebox noprint','badgebox']})]
feeds = [(u'Articles', u'feed://www.prospect.org/articles_rss.jsp')]

View File

@ -8,21 +8,25 @@ from calibre.web.feeds.news import BasicNewsRecipe
class ChicagoTribune(BasicNewsRecipe):
title = 'Chicago Tribune'
__author__ = 'Kovid Goyal and Sujata Raman'
__author__ = 'Kovid Goyal and Sujata Raman, a.peter'
description = 'Politics, local and business news from Chicago'
language = 'en'
language = 'en'
version = 2
use_embedded_content = False
no_stylesheets = True
remove_javascript = True
use_embedded_content = False
no_stylesheets = True
remove_javascript = True
recursions = 1
keep_only_tags = [dict(name='div', attrs={'class':["story","entry-asset asset hentry"]}),
dict(name='div', attrs={'id':["pagebody","story","maincontentcontainer"]}),
]
remove_tags_after = [ {'class':['photo_article',]} ]
remove_tags_after = [{'class':['photo_article',]}]
remove_tags = [{'id':["moduleArticleTools","content-bottom","rail","articleRelates module","toolSet","relatedrailcontent","div-wrapper","beta","atp-comments","footer"]},
{'class':["clearfix","relatedTitle","articleRelates module","asset-footer","tools","comments","featurePromo","featurePromo fp-topjobs brownBackground","clearfix fullSpan brownBackground","curvedContent"]},
match_regexps = [r'page=[0-9]+']
remove_tags = [{'id':["moduleArticleTools","content-bottom","rail","articleRelates module","toolSet","relatedrailcontent","div-wrapper","beta","atp-comments","footer",'gallery-subcontent','subFooter']},
{'class':["clearfix","relatedTitle","articleRelates module","asset-footer","tools","comments","featurePromo","featurePromo fp-topjobs brownBackground","clearfix fullSpan brownBackground","curvedContent",'nextgen-share-tools','outbrainTools', 'google-ad-story-bottom']},
dict(name='font',attrs={'id':["cr-other-headlines"]})]
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
@ -37,7 +41,7 @@ class ChicagoTribune(BasicNewsRecipe):
.maincontentcontainer{font-family:Arial,Helvetica,sans-serif;font-size:small;}
.story-body{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
'''
'''
feeds = [
('Latest news', 'http://feeds.chicagotribune.com/chicagotribune/news/'),
('Local news', 'http://feeds.chicagotribune.com/chicagotribune/news/local/'),
@ -76,8 +80,12 @@ class ChicagoTribune(BasicNewsRecipe):
print article.get('feedburner_origlink', article.get('guid', article.get('link')))
return article.get('feedburner_origlink', article.get('guid', article.get('link')))
def postprocess_html(self, soup, first_fetch):
# Remove the navigation bar. It was kept until now to be able to follow
# the links to further pages. But now we don't need them anymore.
for nav in soup.findAll(attrs={'class':['toppaginate','article-nav clearfix']}):
nav.extract()
for t in soup.findAll(['table', 'tr', 'td']):
t.name = 'div'
@ -88,4 +96,3 @@ class ChicagoTribune(BasicNewsRecipe):
return soup

View File

@ -0,0 +1,96 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class FSP(BasicNewsRecipe):
title = u'Folha de S\xE3o Paulo'
__author__ = 'fluzao'
description = u'Printed edition contents. UOL subscription required (Folha subscription currently not supported).' + \
u' [Conte\xfado completo da edi\xe7\xe3o impressa. Somente para assinantes UOL.]'
INDEX = 'http://www1.folha.uol.com.br/fsp/indices/'
language = 'pt'
no_stylesheets = True
max_articles_per_feed = 40
remove_javascript = True
needs_subscription = True
remove_tags_before = dict(name='b')
remove_tags = [dict(name='td', attrs={'align':'center'})]
remove_attributes = ['height','width']
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
# fixes the problem with the section names
section_dict = {'cotidian' : 'cotidiano', 'ilustrad': 'ilustrada', \
'quadrin': 'quadrinhos' , 'opiniao' : u'opini\xE3o', \
'ciencia' : u'ci\xeancia' , 'saude' : u'sa\xfade', \
'ribeirao' : u'ribeir\xE3o' , 'equilibrio' : u'equil\xedbrio'}
# this solves the problem with truncated content in Kindle
conversion_options = {'linearize_tables' : True}
# this bit removes the footer where there are links for Proximo Texto, Texto Anterior,
# Indice e Comunicar Erros
preprocess_regexps = [(re.compile(r'<BR><BR>Texto Anterior:.*<!--/NOTICIA-->',
re.DOTALL|re.IGNORECASE), lambda match: r''),
(re.compile(r'<BR><BR>Pr&oacute;ximo Texto:.*<!--/NOTICIA-->',
re.DOTALL|re.IGNORECASE), lambda match: r'')]
def get_browser(self):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open('https://acesso.uol.com.br/login.html')
br.form = br.forms().next()
br['user'] = self.username
br['pass'] = self.password
br.submit().read()
## if 'Please try again' in raw:
## raise Exception('Your username and password are incorrect')
return br
def parse_index(self):
soup = self.index_to_soup(self.INDEX)
feeds = []
articles = []
section_title = "Preambulo"
for post in soup.findAll('a'):
# if name=True => new section
strpost = str(post)
if strpost.startswith('<a name'):
if articles:
feeds.append((section_title, articles))
self.log()
self.log('--> new section found, creating old section feed: ', section_title)
section_title = post['name']
if section_title in self.section_dict:
section_title = self.section_dict[section_title]
articles = []
self.log('--> new section title: ', section_title)
if strpost.startswith('<a href'):
url = post['href']
if url.startswith('/fsp'):
url = 'http://www1.folha.uol.com.br'+url
title = self.tag_to_string(post)
self.log()
self.log('--> post: ', post)
self.log('--> url: ', url)
self.log('--> title: ', title)
articles.append({'title':title, 'url':url})
if articles:
feeds.append((section_title, articles))
# keeping the front page url
minha_capa = feeds[0][1][1]['url']
# removing the 'Preambulo' section
del feeds[0]
# creating the url for the cover image
coverurl = feeds[0][1][0]['url']
coverurl = coverurl.replace('/opiniao/fz', '/images/cp')
coverurl = coverurl.replace('01.htm', '.jpg')
self.cover_url = coverurl
# inserting the cover page as the first article (nicer for kindle users)
feeds.insert(0,(u'primeira p\xe1gina', [{'title':u'Primeira p\xe1gina' , 'url':minha_capa}]))
return feeds

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.recipes import BasicNewsRecipe
'''Calibre recipe to convert the RSS feeds of the Leipziger Volkszeitung to an ebook.'''
class SportsIllustratedRecipe(BasicNewsRecipe) :
__author__ = 'a.peter'
__copyright__ = 'a.peter'
__license__ = 'GPL v3'
language = 'de'
description = 'Leipziger Volkszeitung Online RSS'
version = 1
title = u'Leipziger Volkszeitung Online RSS'
timefmt = ' [%d.%m.%Y]'
no_stylesheets = True
remove_javascript = True
use_embedded_content = False
publication_type = 'newspaper'
keep_only_tags = [dict(name='div', attrs={'class':'article'})]
remove_tags = [dict(name='div', attrs={'class':['ARTICLE_MORE', 'clearfloat']})]
feeds = [(u'Leipzig', u'http://nachrichten.lvz-online.de/rss/leipzig-rss.xml'),
(u'Mitteldeutschland', u'http://nachrichten.lvz-online.de/rss/mitteldeutschland-rss.xml'),
(u'Brennpunkte', u'http://nachrichten.lvz-online.de/rss/brennpunkte-rss.xml'),
(u'Polizeiticker', u'http://nachrichten.lvz-online.de/rss/polizeiticker-rss.xml'),
(u'Boulevard', u'http://nachrichten.lvz-online.de/rss/boulevard-rss.xml'),
(u'Kultur', u'http://nachrichten.lvz-online.de/rss/kultur-rss.xml'),
(u'Sport', u'http://nachrichten.lvz-online.de/rss/sport-rss.xml'),
(u'Regionalsport', u'http://nachrichten.lvz-online.de/rss/regionalsport-rss.xml'),
(u'Knipser', u'http://nachrichten.lvz-online.de/rss/knipser-rss.xml')]
def get_masthead_url(self):
return 'http://www.lvz-online.de/resources/themes/standard/images/global/logo.gif'

View File

@ -285,6 +285,15 @@ function booklist(hide_sort) {
first_page();
}
function search_result() {
var test = $("#booklist #page0").html();
if (!test) {
$("#booklist").html("No books found matching this query");
return;
}
booklist();
}
function show_details(a_dom) {
var book = $(a_dom).closest('div.summary');
var bd = $('#book_details_dialog');

View File

@ -2,7 +2,7 @@
let g:pyflakes_builtins = ["_", "dynamic_property", "__", "P", "I", "lopen", "icu_lower", "icu_upper", "icu_title", "ngettext"]
python << EOFPY
import os
import os, sys
import vipy
@ -11,8 +11,13 @@ project_dir = os.path.dirname(source_file)
src_dir = os.path.abspath(os.path.join(project_dir, 'src'))
base_dir = os.path.join(src_dir, 'calibre')
sys.path.insert(0, src_dir)
sys.resources_location = os.path.join(project_dir, 'resources')
sys.extensions_location = os.path.join(base_dir, 'plugins')
sys.executables_location = os.environ.get('CALIBRE_EXECUTABLES_PATH', '/usr/bin')
vipy.session.initialize(project_name='calibre', src_dir=src_dir,
project_dir=project_dir, base_dir=base_dir)
project_dir=project_dir, base_dir=project_dir)
def recipe_title_callback(raw):
return eval(raw.decode('utf-8')).replace(' ', '_')

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1236,7 +1236,7 @@ class StoreEbookNLStore(StoreBase):
headquarters = 'NL'
formats = ['EPUB', 'PDF']
affiliate = True
affiliate = False
class StoreEbookscomStore(StoreBase):
name = 'eBooks.com'

View File

@ -64,4 +64,6 @@ Various things that require other things before they can be migrated:
columns/categories/searches info into
self.field_metadata. Finally, implement metadata dirtied
functionality.
2. Catching DatabaseException and sqlite.Error when creating new
libraries/switching/on calibre startup.
'''

View File

@ -315,7 +315,7 @@ class HeuristicProcessor(object):
supports a range of html markup and text files
'''
# define the pieces of the regex
lookahead = "(?<=.{"+str(length)+u"}([a-zäëïöüàèìòùáćéíĺóŕńśúýâêîôûçąężıãõñæøþðßěľščťžňďřů,:“”)\IA\u00DF]|(?<!\&\w{4});))" # (?<!\&\w{4});) is a semicolon not part of an entity
em_en_lookahead = "(?<=.{"+str(length)+u"}[\u2013\u2014])"
soft_hyphen = u"\xad"
@ -770,6 +770,7 @@ class HeuristicProcessor(object):
# Multiple sequential blank paragraphs are merged with appropriate margins
# If non-blank scene breaks exist they are center aligned and styled with appropriate margins.
if getattr(self.extra_opts, 'format_scene_breaks', False):
self.log.debug('Formatting scene breaks')
html = re.sub('(?i)<div[^>]*>\s*<br(\s?/)?>\s*</div>', '<p></p>', html)
html = self.detect_whitespace(html)
html = self.detect_soft_breaks(html)

View File

@ -22,6 +22,8 @@ except:
_author_pat = re.compile(r'(?i),?\s+(and|with)\s+')
def string_to_authors(raw):
if not raw:
return []
raw = raw.replace('&&', u'\uffff')
raw = _author_pat.sub('&', raw)
authors = [a.strip().replace(u'\uffff', '&') for a in raw.split('&')]

View File

@ -149,19 +149,20 @@ def metadata_from_filename(name, pat=None):
try:
au = match.group('author')
aus = string_to_authors(au)
mi.authors = aus
if prefs['swap_author_names'] and mi.authors:
def swap(a):
if ',' in a:
parts = a.split(',', 1)
else:
parts = a.split(None, 1)
if len(parts) > 1:
t = parts[-1]
parts = parts[:-1]
parts.insert(0, t)
return ' '.join(parts)
mi.authors = [swap(x) for x in mi.authors]
if aus:
mi.authors = aus
if prefs['swap_author_names'] and mi.authors:
def swap(a):
if ',' in a:
parts = a.split(',', 1)
else:
parts = a.split(None, 1)
if len(parts) > 1:
t = parts[-1]
parts = parts[:-1]
parts.insert(0, t)
return ' '.join(parts)
mi.authors = [swap(x) for x in mi.authors]
except (IndexError, ValueError):
pass
try:

View File

@ -25,8 +25,50 @@ class Extract(ODF2XHTML):
with open(name, 'wb') as f:
f.write(data)
def filter_css(self, html, log):
def fix_markup(self, html, log):
root = etree.fromstring(html)
self.epubify_markup(root, log)
self.filter_css(root, log)
html = etree.tostring(root, encoding='utf-8',
xml_declaration=True)
return html
def epubify_markup(self, root, log):
# Fix <p><div> constructs as the asinine epubchecker complains
# about them
from calibre.ebooks.oeb.base import XPath, XHTML
pdiv = XPath('//h:p/h:div')
for div in pdiv(root):
div.getparent().tag = XHTML('div')
# Remove the position:relative as it causes problems with some epub
# renderers. Remove display: block on an image inside a div as it is
# redundant and prevents text-align:center from working in ADE
imgpath = XPath('//h:div/h:img[@style]')
for img in imgpath(root):
div = img.getparent()
if len(div) == 1:
style = div.attrib['style'].replace('position:relative', '')
if style.startswith(';'): style = style[1:]
div.attrib['style'] = style
if img.attrib.get('style', '') == 'display: block;':
del img.attrib['style']
# A div/div/img construct causes text-align:center to not work in ADE
# so set the display of the second div to inline. This should have no
# effect (apart from minor vspace issues) in a compliant HTML renderer
# but it fixes the centering of the image via a text-align:center on
# the first div in ADE
imgpath = XPath('descendant::h:div/h:div/h:img')
for img in imgpath(root):
div2 = img.getparent()
div1 = div2.getparent()
if len(div1) == len(div2) == 1:
style = div2.attrib['style']
div2.attrib['style'] = 'display:inline;'+style
def filter_css(self, root, log):
style = root.xpath('//*[local-name() = "style" and @type="text/css"]')
if style:
style = style[0]
@ -40,9 +82,6 @@ class Extract(ODF2XHTML):
extra.extend(sel_map.get(cls, []))
if extra:
x.set('class', orig + ' ' + ' '.join(extra))
html = etree.tostring(root, encoding='utf-8',
xml_declaration=True)
return html
def do_filter_css(self, css):
from cssutils import parseString
@ -86,7 +125,7 @@ class Extract(ODF2XHTML):
# the available screen real estate
html = html.replace('img { width: 100%; height: 100%; }', '')
try:
html = self.filter_css(html, log)
html = self.fix_markup(html, log)
except:
log.exception('Failed to filter CSS, conversion may be slow')
with open('index.xhtml', 'wb') as f:
@ -119,23 +158,4 @@ class ODTInput(InputFormatPlugin):
accelerators):
return Extract()(stream, '.', log)
def postprocess_book(self, oeb, opts, log):
# Fix <p><div> constructs as the asinine epubchecker complains
# about them
from calibre.ebooks.oeb.base import XPath, XHTML
path = XPath('//h:p/h:div')
path2 = XPath('//h:div[@style]/h:img[@style]')
for item in oeb.spine:
root = item.data
if not hasattr(root, 'xpath'): continue
for div in path(root):
div.getparent().tag = XHTML('div')
# This construct doesn't render well in HTML
for img in path2(root):
div = img.getparent()
if 'position:relative' in div.attrib['style'] and len(div) == 1 \
and 'img' in div[0].tag:
del div.attrib['style']

View File

@ -405,13 +405,12 @@ class ChooseLibraryAction(InterfaceAction):
else:
return
prefs['library_path'] = loc
#from calibre.utils.mem import memory
#import weakref
#from PyQt4.Qt import QTimer
#self.dbref = weakref.ref(self.gui.library_view.model().db)
#self.before_mem = memory()/1024**2
self.gui.library_moved(loc)
self.gui.library_moved(loc, allow_rebuild=True)
#QTimer.singleShot(5000, self.debug_leak)
def debug_leak(self):
@ -455,7 +454,8 @@ class ChooseLibraryAction(InterfaceAction):
self.choose_dialog_library_renamed = getattr(c, 'library_renamed', False)
def choose_library_callback(self, newloc, copy_structure=False):
self.gui.library_moved(newloc, copy_structure=copy_structure)
self.gui.library_moved(newloc, copy_structure=copy_structure,
allow_rebuild=True)
if getattr(self, 'choose_dialog_library_renamed', False):
self.stats.rename(self.pre_choose_dialog_location, prefs['library_path'])
self.build_menus()

View File

@ -13,7 +13,6 @@ from calibre.gui2.dialogs.choose_library_ui import Ui_Dialog
from calibre.gui2 import error_dialog, choose_dir
from calibre.constants import filesystem_encoding, iswindows
from calibre import isbytestring, patheq
from calibre.utils.config import prefs
from calibre.gui2.wizard import move_library
from calibre.library.database2 import LibraryDatabase2
@ -77,7 +76,6 @@ class ChooseLibrary(QDialog, Ui_Dialog):
def perform_action(self, ac, loc):
if ac in ('new', 'existing'):
prefs['library_path'] = loc
self.callback(loc, copy_structure=self.copy_structure.isChecked())
else:
self.db.prefs.disable_setting = True

View File

@ -5,12 +5,14 @@ __license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import QDialog, QLabel, QVBoxLayout, QDialogButtonBox, \
QProgressBar, QSize, QTimer, pyqtSignal, Qt
from PyQt4.Qt import (QDialog, QLabel, QVBoxLayout, QDialogButtonBox,
QProgressBar, QSize, QTimer, pyqtSignal, Qt)
from calibre.library.restore import Restore
from calibre.gui2 import error_dialog, question_dialog, warning_dialog, \
info_dialog
from calibre.gui2 import (error_dialog, question_dialog, warning_dialog,
info_dialog)
from calibre import force_unicode
from calibre.constants import filesystem_encoding
class DBRestore(QDialog):
@ -73,6 +75,19 @@ class DBRestore(QDialog):
self.msg.setText(msg)
self.pb.setValue(step)
def _show_success_msg(restorer, parent=None):
r = restorer
olddb = _('The old database was saved as: %s')%force_unicode(r.olddb,
filesystem_encoding)
if r.errors_occurred:
warning_dialog(parent, _('Success'),
_('Restoring the database succeeded with some warnings'
' click Show details to see the details. %s')%olddb,
det_msg=r.report, show=True)
else:
info_dialog(parent, _('Success'),
_('Restoring database was successful. %s')%olddb, show=True,
show_copy_button=False)
def restore_database(db, parent=None):
if not question_dialog(parent, _('Are you sure?'), '<p>'+
@ -102,14 +117,21 @@ def restore_database(db, parent=None):
_('Restoring database failed, click Show details to see details'),
det_msg=r.tb, show=True)
else:
if r.errors_occurred:
warning_dialog(parent, _('Success'),
_('Restoring the database succeeded with some warnings'
' click Show details to see the details.'),
det_msg=r.report, show=True)
else:
info_dialog(parent, _('Success'),
_('Restoring database was successful'), show=True,
show_copy_button=False)
_show_success_msg(r, parent=parent)
return True
def repair_library_at(library_path, parent=None):
d = DBRestore(parent, library_path)
d.exec_()
if d.rejected:
return False
r = d.restorer
if r.tb is not None:
error_dialog(parent, _('Failed'),
_('Restoring database failed, click Show details to see details'),
det_msg=r.tb, show=True)
return False
_show_success_msg(r, parent=parent)
return True

View File

@ -4,16 +4,15 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import sys, os, time, socket, traceback
from functools import partial
from PyQt4.Qt import QCoreApplication, QIcon, QObject, QTimer, \
QThread, pyqtSignal, Qt, QProgressDialog, QString, QPixmap, \
QSplashScreen, QApplication
from PyQt4.Qt import (QCoreApplication, QIcon, QObject, QTimer,
QPixmap, QSplashScreen, QApplication)
from calibre import prints, plugins
from calibre.constants import iswindows, __appname__, isosx, DEBUG, \
filesystem_encoding
from calibre import prints, plugins, force_unicode
from calibre.constants import (iswindows, __appname__, isosx, DEBUG,
filesystem_encoding)
from calibre.utils.ipc import ADDRESS, RC
from calibre.gui2 import ORG_NAME, APP_UID, initialize_file_icon_provider, \
Application, choose_dir, error_dialog, question_dialog, gprefs
from calibre.gui2 import (ORG_NAME, APP_UID, initialize_file_icon_provider,
Application, choose_dir, error_dialog, question_dialog, gprefs)
from calibre.gui2.main_window import option_parser as _option_parser
from calibre.utils.config import prefs, dynamic
from calibre.library.database2 import LibraryDatabase2
@ -110,36 +109,9 @@ def get_library_path(parent=None):
default_dir=get_default_library_path())
return library_path
class DBRepair(QThread):
repair_done = pyqtSignal(object, object)
progress = pyqtSignal(object, object)
def __init__(self, library_path, parent, pd):
QThread.__init__(self, parent)
self.library_path = library_path
self.pd = pd
self.progress.connect(self._callback, type=Qt.QueuedConnection)
def _callback(self, num, is_length):
if is_length:
self.pd.setRange(0, num-1)
num = 0
self.pd.setValue(num)
def callback(self, num, is_length):
self.progress.emit(num, is_length)
def run(self):
from calibre.debug import reinit_db
try:
reinit_db(os.path.join(self.library_path, 'metadata.db'),
self.callback)
db = LibraryDatabase2(self.library_path)
tb = None
except:
db, tb = None, traceback.format_exc()
self.repair_done.emit(db, tb)
def repair_library(library_path):
from calibre.gui2.dialogs.restore_library import repair_library_at
return repair_library_at(library_path)
class GuiRunner(QObject):
'''Make sure an event loop is running before starting the main work of
@ -184,9 +156,6 @@ class GuiRunner(QObject):
raise SystemExit(1)
def initialize_db_stage2(self, db, tb):
repair_pd = getattr(self, 'repair_pd', None)
if repair_pd is not None:
repair_pd.cancel()
if db is None and tb is not None:
# DB Repair failed
@ -219,23 +188,16 @@ class GuiRunner(QObject):
db = LibraryDatabase2(self.library_path)
except (sqlite.Error, DatabaseException):
repair = question_dialog(self.splash_screen, _('Corrupted database'),
_('Your calibre database appears to be corrupted. Do '
'you want calibre to try and repair it automatically? '
'If you say No, a new empty calibre library will be created.'),
_('The library database at %s appears to be corrupted. Do '
'you want calibre to try and rebuild it automatically? '
'The rebuild may not be completely successful. '
'If you say No, a new empty calibre library will be created.')
% force_unicode(self.library_path, filesystem_encoding),
det_msg=traceback.format_exc()
)
if repair:
self.repair_pd = QProgressDialog(_('Repairing database. This '
'can take a very long time for a large collection'), QString(),
0, 0)
self.repair_pd.setWindowModality(Qt.WindowModal)
self.repair_pd.show()
self.repair = DBRepair(self.library_path, self, self.repair_pd)
self.repair.repair_done.connect(self.initialize_db_stage2,
type=Qt.QueuedConnection)
self.repair.start()
return
if repair_library(self.library_path):
db = LibraryDatabase2(self.library_path)
except:
error_dialog(self.splash_screen, _('Bad database location'),
_('Bad database location %r. Will start with '

View File

@ -23,9 +23,8 @@ from calibre.gui2.store.web_store_dialog import WebStoreDialog
class EBookNLStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://ad.zanox.com/ppc/?19015168C29310186T'
url_details = ('http://ad.zanox.com/ppc/?19016028C1098154549T&ULP=[['
'http://www.ebook.nl/store/{0}]]')
url = 'http://www.ebook.nl/'
url_details = ('http://www.ebook.nl/store/{0}')
if external or self.config.get('open_external', False):
if detail_item:

View File

@ -6,7 +6,7 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import urllib2
import urllib2, re
from contextlib import closing
from lxml import html
@ -67,7 +67,10 @@ class FoylesUKStore(BasicStoreConfig, StorePlugin):
title = ''.join(data.xpath('.//a[@class="Title"]/text()'))
author = ', '.join(data.xpath('.//span[@class="Author"]/text()'))
price = ''.join(data.xpath('./ul/li[@class="Strong"]/text()'))
price = price[price.rfind(' '):]
mo = re.search('£[\d\.]+', price)
if mo is None:
continue
price = mo.group(0)
counter -= 1

View File

@ -63,9 +63,9 @@ class TagTreeItem(object): # {{{
self.category_key = category_key
self.temporary = temporary
self.tag = Tag(data, category=category_key,
is_editable=category_key not in ['news', 'search', 'identifiers'],
is_editable=category_key not in
['news', 'search', 'identifiers', 'languages'],
is_searchable=category_key not in ['search'])
elif self.type == self.TAG:
self.icon_state_map[0] = QVariant(data.icon)
self.tag = data
@ -1163,7 +1163,7 @@ class TagsModel(QAbstractItemModel): # {{{
letters_seen[subnode.tag.sort[0]] = True
charclass = ''.join(letters_seen)
if k == 'author_sort':
expr = r'%s:"~(^[%s])|(&\\s*[%s])"'%(k, charclass, charclass)
expr = r'%s:"~(^[%s])|(&\s*[%s])"'%(k, charclass, charclass)
else:
expr = r'%s:"~^[%s]"'%(k, charclass)
if node_searches[tag_item.tag.state] == 'true':

Some files were not shown because too many files have changed in this diff Show More