mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
Sync to trunk.
This commit is contained in:
commit
e27fa828f4
BIN
resources/images/dictionary.png
Normal file
BIN
resources/images/dictionary.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 5.3 KiB |
@ -15,12 +15,13 @@ class ElPais(BasicNewsRecipe):
|
||||
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'class':'zona_superior'}), dict(name='div', attrs={'class':'limpiar'}), dict(name='div', attrs={'id':'pie'})]
|
||||
extra_css = 'h1 {font: sans-serif large;} \n h2 {font: sans-serif medium;} \n h3 {font: sans-serif small;} \n h4 {font: sans-serif bold small;} \n p{ font:10pt serif}'
|
||||
|
||||
|
||||
feeds = [(u'Internacional', u'http://www.elpais.es/rss/rss_section.html?anchor=elpporint'), (u'Espana', u'http://www.elpais.es/rss/rss_section.html?anchor=elppornac'), (u'Deportes', u'http://www.elpais.es/rss/rss_section.html?anchor=elppordep'), (u'Economia', u'http://www.elpais.es/rss/rss_section.html?anchor=elpporeco'), (u'Tecnologia', u'http://www.elpais.es/rss/rss_section.html?anchor=elpportec'), (u'Cultura', u'http://www.elpais.es/rss/rss_section.html?anchor=elpporcul'), (u'Gente', u'http://www.elpais.es/rss/rss_section.html?anchor=elpporgen'), (u'Sociedad', u'http://www.elpais.es/rss/rss_section.html?anchor=elpporsoc'), (u'Opinion', u'http://www.elpais.es/rss/rss_section.html?anchor=elpporopi')]
|
||||
|
||||
def print_version(self, url):
|
||||
url = url+'?print=1'
|
||||
return url
|
||||
def print_version(self, url):
|
||||
url = url+'?print=1'
|
||||
return url
|
||||
|
@ -31,6 +31,7 @@ class IncMagazineRecipe(BasicNewsRecipe):
|
||||
|
||||
def parse_index(self):
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
self.browser.open(self.INDEX)
|
||||
|
||||
url = self.browser.geturl()
|
||||
date = url.rpartition('/')[0].rpartition('/')[2]
|
||||
|
@ -1,93 +1,93 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
'''
|
||||
'''
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
|
||||
class PeopleMag(BasicNewsRecipe):
|
||||
|
||||
title = 'People/US Magazine Mashup'
|
||||
__author__ = 'BrianG'
|
||||
language = 'en'
|
||||
description = 'Headlines from People and US Magazine'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
oldest_article = 2
|
||||
max_articles_per_feed = 50
|
||||
|
||||
extra_css = '''
|
||||
h1{font-family:verdana,arial,helvetica,sans-serif; font-size: large;}
|
||||
h2{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
|
||||
.body-content{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
|
||||
.byline {font-size: small; color: #666666; font-style:italic; }
|
||||
.lastline {font-size: small; color: #666666; font-style:italic;}
|
||||
.contact {font-size: small; color: #666666;}
|
||||
.contact p {font-size: small; color: #666666;}
|
||||
.photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;}
|
||||
.photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;}
|
||||
.article_timestamp{font-size:x-small; color:#666666;}
|
||||
a {font-family:verdana,arial,helvetica,sans-serif; font-size: x-small;}
|
||||
'''
|
||||
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class': 'panel_news_article_main'}),
|
||||
dict(name='div', attrs={'class':'article_content'}),
|
||||
dict(name='div', attrs={'class': 'headline'}),
|
||||
dict(name='div', attrs={'class': 'post'}),
|
||||
dict(name='div', attrs={'class': 'packageheadlines'}),
|
||||
dict(name='div', attrs={'class': 'snap_preview'}),
|
||||
dict(name='div', attrs={'id': 'articlebody'})
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'class':'share_comments'}),
|
||||
dict(name='p', attrs={'class':'twitter_facebook'}),
|
||||
dict(name='div', attrs={'class':'share_comments_bottom'}),
|
||||
dict(name='h2', attrs={'id':'related_content'}),
|
||||
dict(name='div', attrs={'class':'next_article'}),
|
||||
dict(name='div', attrs={'class':'prev_article'}),
|
||||
dict(name='ul', attrs={'id':'sharebar'}),
|
||||
dict(name='div', attrs={'class':'sharelinkcont'}),
|
||||
dict(name='div', attrs={'class':'categories'}),
|
||||
dict(name='ul', attrs={'class':'categories'}),
|
||||
dict(name='div', attrs={'id':'promo'}),
|
||||
dict(name='div', attrs={'class':'linksWrapper'}),
|
||||
dict(name='p', attrs={'class':'tag tvnews'}),
|
||||
dict(name='p', attrs={'class':'tag movienews'}),
|
||||
dict(name='p', attrs={'class':'tag musicnews'}),
|
||||
dict(name='p', attrs={'class':'tag couples'}),
|
||||
dict(name='p', attrs={'class':'tag gooddeeds'}),
|
||||
dict(name='p', attrs={'class':'tag weddings'}),
|
||||
dict(name='p', attrs={'class':'tag health'})
|
||||
]
|
||||
|
||||
|
||||
feeds = [
|
||||
('PEOPLE Headlines', 'http://feeds.people.com/people/headlines'),
|
||||
('US Headlines', 'http://www.usmagazine.com/celebrity_news/rss')
|
||||
]
|
||||
|
||||
def get_article_url(self, article):
|
||||
ans = article.link
|
||||
|
||||
try:
|
||||
self.log('Looking for full story link in', ans)
|
||||
soup = self.index_to_soup(ans)
|
||||
x = soup.find(text="View All")
|
||||
|
||||
if x is not None:
|
||||
ans = ans + '?viewAll=y'
|
||||
self.log('Found full story link', ans)
|
||||
except:
|
||||
pass
|
||||
return ans
|
||||
|
||||
def postprocess_html(self, soup,first):
|
||||
|
||||
for tag in soup.findAll(name='div',attrs={'class':"container_ate_qandatitle"}):
|
||||
tag.extract()
|
||||
for tag in soup.findAll(name='br'):
|
||||
tag.extract()
|
||||
|
||||
return soup
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
'''
|
||||
'''
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
|
||||
class PeopleMag(BasicNewsRecipe):
|
||||
|
||||
title = 'People/US Magazine Mashup'
|
||||
__author__ = 'BrianG'
|
||||
language = 'en'
|
||||
description = 'Headlines from People and US Magazine'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
oldest_article = 2
|
||||
max_articles_per_feed = 50
|
||||
|
||||
extra_css = '''
|
||||
h1{font-family:verdana,arial,helvetica,sans-serif; font-size: large;}
|
||||
h2{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
|
||||
.body-content{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
|
||||
.byline {font-size: small; color: #666666; font-style:italic; }
|
||||
.lastline {font-size: small; color: #666666; font-style:italic;}
|
||||
.contact {font-size: small; color: #666666;}
|
||||
.contact p {font-size: small; color: #666666;}
|
||||
.photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;}
|
||||
.photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;}
|
||||
.article_timestamp{font-size:x-small; color:#666666;}
|
||||
a {font-family:verdana,arial,helvetica,sans-serif; font-size: x-small;}
|
||||
'''
|
||||
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class': 'panel_news_article_main'}),
|
||||
dict(name='div', attrs={'class':'article_content'}),
|
||||
dict(name='div', attrs={'class': 'headline'}),
|
||||
dict(name='div', attrs={'class': 'post'}),
|
||||
dict(name='div', attrs={'class': 'packageheadlines'}),
|
||||
dict(name='div', attrs={'class': 'snap_preview'}),
|
||||
dict(name='div', attrs={'id': 'articlebody'})
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'class':'share_comments'}),
|
||||
dict(name='p', attrs={'class':'twitter_facebook'}),
|
||||
dict(name='div', attrs={'class':'share_comments_bottom'}),
|
||||
dict(name='h2', attrs={'id':'related_content'}),
|
||||
dict(name='div', attrs={'class':'next_article'}),
|
||||
dict(name='div', attrs={'class':'prev_article'}),
|
||||
dict(name='ul', attrs={'id':'sharebar'}),
|
||||
dict(name='div', attrs={'class':'sharelinkcont'}),
|
||||
dict(name='div', attrs={'class':'categories'}),
|
||||
dict(name='ul', attrs={'class':'categories'}),
|
||||
dict(name='div', attrs={'id':'promo'}),
|
||||
dict(name='div', attrs={'class':'linksWrapper'}),
|
||||
dict(name='p', attrs={'class':'tag tvnews'}),
|
||||
dict(name='p', attrs={'class':'tag movienews'}),
|
||||
dict(name='p', attrs={'class':'tag musicnews'}),
|
||||
dict(name='p', attrs={'class':'tag couples'}),
|
||||
dict(name='p', attrs={'class':'tag gooddeeds'}),
|
||||
dict(name='p', attrs={'class':'tag weddings'}),
|
||||
dict(name='p', attrs={'class':'tag health'})
|
||||
]
|
||||
|
||||
|
||||
feeds = [
|
||||
('PEOPLE Headlines', 'http://feeds.people.com/people/headlines'),
|
||||
('US Headlines', 'http://www.usmagazine.com/celebrity_news/rss')
|
||||
]
|
||||
|
||||
def get_article_url(self, article):
|
||||
ans = article.link
|
||||
|
||||
try:
|
||||
self.log('Looking for full story link in', ans)
|
||||
soup = self.index_to_soup(ans)
|
||||
x = soup.find(text="View All")
|
||||
|
||||
if x is not None:
|
||||
ans = ans + '?viewAll=y'
|
||||
self.log('Found full story link', ans)
|
||||
except:
|
||||
pass
|
||||
return ans
|
||||
|
||||
def postprocess_html(self, soup,first):
|
||||
|
||||
for tag in soup.findAll(name='div',attrs={'class':"container_ate_qandatitle"}):
|
||||
tag.extract()
|
||||
for tag in soup.findAll(name='br'):
|
||||
tag.extract()
|
||||
|
||||
return soup
|
||||
|
@ -185,7 +185,7 @@ class Develop(Command):
|
||||
script = template.format(
|
||||
module=mod, func=func,
|
||||
path=self.libdir, resources=self.sharedir,
|
||||
executables=self.staging_bindir,
|
||||
executables=self.bindir,
|
||||
extensions=self.j(self.libdir, 'calibre', 'plugins'))
|
||||
path = self.j(self.staging_bindir, name)
|
||||
if not os.path.exists(self.staging_bindir):
|
||||
|
@ -245,7 +245,7 @@ class MobiMLizer(object):
|
||||
bgcolor=istate.bgcolor)
|
||||
if istate.fgcolor != 'black':
|
||||
inline = etree.SubElement(inline, XHTML('font'),
|
||||
color=istate.fgcolor)
|
||||
color=unicode(istate.fgcolor))
|
||||
if istate.strikethrough:
|
||||
inline = etree.SubElement(inline, XHTML('s'))
|
||||
bstate.inline = inline
|
||||
|
@ -605,7 +605,6 @@ def build_forms(srcdir, info=None):
|
||||
if form.endswith('viewer%smain.ui'%os.sep):
|
||||
info('\t\tPromoting WebView')
|
||||
dat = dat.replace('self.view = QtWebKit.QWebView(', 'self.view = DocumentView(')
|
||||
dat = dat.replace('from PyQt4 import QtWebKit', '')
|
||||
if iswindows:
|
||||
dat = dat.replace('self.view = QWebView(', 'self.view = DocumentView(')
|
||||
dat = dat.replace('from QtWebKit.QWebView import QWebView', '')
|
||||
|
69
src/calibre/gui2/viewer/dictionary.py
Normal file
69
src/calibre/gui2/viewer/dictionary.py
Normal file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from PyQt4.Qt import QThread
|
||||
|
||||
from calibre.utils.dictclient import Connection
|
||||
|
||||
class Lookup(QThread):
|
||||
|
||||
TEMPLATE = u'''<html>
|
||||
<body>
|
||||
<div>
|
||||
{0}
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
|
||||
def __init__(self, word, parent=None):
|
||||
QThread.__init__(self, parent)
|
||||
|
||||
self.word = word.encode('utf-8') if isinstance(word, unicode) else word
|
||||
self.result = self.traceback = self.exception = None
|
||||
|
||||
def define(self):
|
||||
conn = Connection('dict.org')
|
||||
self.result = conn.define('!', self.word)
|
||||
if self.result:
|
||||
self.result = self.result[0].defstr
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.define()
|
||||
except Exception, e:
|
||||
import traceback
|
||||
self.exception = e
|
||||
self.traceback = traceback.format_exc()
|
||||
|
||||
def format_exception(self):
|
||||
lines = ['<b>Failed to connect to dict.org</b>', '']
|
||||
lines += self.traceback.splitlines()
|
||||
ans = '<br>'.join(lines)
|
||||
if not isinstance(ans, unicode):
|
||||
ans = ans.decode('utf-8')
|
||||
return self.TEMPLATE.format(ans)
|
||||
|
||||
def no_results(self):
|
||||
ans = _('No results found for:') + ' ' + self.word.decode('utf-8')
|
||||
return self.TEMPLATE.format(ans)
|
||||
|
||||
@property
|
||||
def html_result(self):
|
||||
if self.exception is not None:
|
||||
return self.format_exception()
|
||||
if not self.result:
|
||||
return self.no_results()
|
||||
lines = self.result.splitlines()
|
||||
lines[0] = '<b>'+lines[0]+'</b>'
|
||||
|
||||
ans = '<br>'.join(lines)
|
||||
if not isinstance(ans, unicode):
|
||||
ans = ans.decode('utf-8')
|
||||
return self.TEMPLATE.format(ans)
|
||||
|
@ -9,8 +9,8 @@ import os, math, re, glob
|
||||
from base64 import b64encode
|
||||
from PyQt4.Qt import QSize, QSizePolicy, QUrl, SIGNAL, Qt, QTimer, \
|
||||
QPainter, QPalette, QBrush, QFontDatabase, QDialog, \
|
||||
QColor, QPoint, QImage, QRegion, QVariant, \
|
||||
QFont, QObject, QApplication, pyqtSignature
|
||||
QColor, QPoint, QImage, QRegion, QVariant, QIcon, \
|
||||
QFont, QObject, QApplication, pyqtSignature, QAction
|
||||
from PyQt4.QtWebKit import QWebPage, QWebView, QWebSettings
|
||||
|
||||
from calibre.utils.config import Config, StringConfig
|
||||
@ -394,10 +394,20 @@ class DocumentView(QWebView):
|
||||
self.connect(self.document, SIGNAL('selectionChanged()'), self.selection_changed)
|
||||
self.connect(self.document, SIGNAL('animated_scroll_done()'),
|
||||
self.animated_scroll_done, Qt.QueuedConnection)
|
||||
copy_action = self.pageAction(self.document.Copy)
|
||||
copy_action.setIcon(QIcon(I('convert.svg')))
|
||||
d = self.document
|
||||
self.unimplemented_actions = list(map(self.pageAction,
|
||||
[d.DownloadImageToDisk, d.OpenLinkInNewWindow, d.DownloadLinkToDisk,
|
||||
d.OpenImageInNewWindow, d.OpenLink]))
|
||||
self.dictionary_action = QAction(QIcon(I('dictionary.png')),
|
||||
_('&Lookup in dictionary'), self)
|
||||
self.dictionary_action.setShortcut(Qt.CTRL+Qt.Key_L)
|
||||
self.dictionary_action.triggered.connect(self.lookup)
|
||||
|
||||
@property
|
||||
def copy_action(self):
|
||||
return self.document.action(QWebPage.Copy)
|
||||
return self.pageAction(self.document.Copy)
|
||||
|
||||
def animated_scroll_done(self):
|
||||
if self.manager is not None:
|
||||
@ -426,6 +436,21 @@ class DocumentView(QWebView):
|
||||
if self.manager is not None:
|
||||
self.manager.selection_changed(unicode(self.document.selectedText()))
|
||||
|
||||
def contextMenuEvent(self, ev):
|
||||
menu = self.document.createStandardContextMenu()
|
||||
for action in self.unimplemented_actions:
|
||||
menu.removeAction(action)
|
||||
text = unicode(self.selectedText())
|
||||
if text:
|
||||
menu.insertAction(list(menu.actions())[0], self.dictionary_action)
|
||||
menu.exec_(ev.globalPos())
|
||||
|
||||
def lookup(self, *args):
|
||||
if self.manager is not None:
|
||||
t = unicode(self.selectedText()).strip()
|
||||
if t:
|
||||
self.manager.lookup(t.split()[0])
|
||||
|
||||
def set_manager(self, manager):
|
||||
self.manager = manager
|
||||
self.scrollbar = manager.horizontal_scrollbar
|
||||
|
@ -25,6 +25,7 @@ from calibre.utils.config import Config, StringConfig, dynamic
|
||||
from calibre.gui2.search_box import SearchBox2
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
from calibre.customize.ui import available_input_formats
|
||||
from calibre.gui2.viewer.dictionary import Lookup
|
||||
|
||||
class TOCItem(QStandardItem):
|
||||
|
||||
@ -171,6 +172,9 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.pending_bookmark = None
|
||||
self.selected_text = None
|
||||
self.read_settings()
|
||||
self.dictionary_box.hide()
|
||||
self.close_dictionary_view.clicked.connect(lambda
|
||||
x:self.dictionary_box.hide())
|
||||
self.history = History(self.action_back, self.action_forward)
|
||||
self.metadata = Metadata(self)
|
||||
self.pos = DoubleSpinBox()
|
||||
@ -239,6 +243,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.action_bookmark.setMenu(self.bookmarks_menu)
|
||||
self.set_bookmarks([])
|
||||
|
||||
|
||||
if pathtoebook is not None:
|
||||
f = functools.partial(self.load_ebook, pathtoebook)
|
||||
QTimer.singleShot(50, f)
|
||||
@ -261,6 +266,19 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
ca.setShortcut(QKeySequence.Copy)
|
||||
self.addAction(ca)
|
||||
|
||||
def lookup(self, word):
|
||||
self.dictionary_view.setHtml('<html><body><p>'+ \
|
||||
_('Connecting to dict.org to lookup: <b>%s</b>…')%word + \
|
||||
'</p></body></html>')
|
||||
self.dictionary_box.show()
|
||||
self._lookup = Lookup(word, parent=self)
|
||||
self._lookup.finished.connect(self.looked_up)
|
||||
self._lookup.start()
|
||||
|
||||
def looked_up(self, *args):
|
||||
html = self._lookup.html_result
|
||||
self._lookup = None
|
||||
self.dictionary_view.setHtml(html)
|
||||
|
||||
def set_max_width(self):
|
||||
from calibre.gui2.viewer.documentview import config
|
||||
|
@ -50,6 +50,47 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0" colspan="2">
|
||||
<widget class="QFrame" name="dictionary_box">
|
||||
<property name="frameShape">
|
||||
<enum>QFrame::StyledPanel</enum>
|
||||
</property>
|
||||
<property name="frameShadow">
|
||||
<enum>QFrame::Raised</enum>
|
||||
</property>
|
||||
<layout class="QHBoxLayout" name="horizontalLayout">
|
||||
<item>
|
||||
<widget class="QWebView" name="dictionary_view">
|
||||
<property name="minimumSize">
|
||||
<size>
|
||||
<width>0</width>
|
||||
<height>150</height>
|
||||
</size>
|
||||
</property>
|
||||
<property name="url">
|
||||
<url>
|
||||
<string>about:blank</string>
|
||||
</url>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QToolButton" name="close_dictionary_view">
|
||||
<property name="toolTip">
|
||||
<string>Close dictionary</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>...</string>
|
||||
</property>
|
||||
<property name="icon">
|
||||
<iconset resource="../../../../resources/images.qrc">
|
||||
<normaloff>:/images/window-close.svg</normaloff>:/images/window-close.svg</iconset>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</widget>
|
||||
|
317
src/calibre/utils/dictclient.py
Normal file
317
src/calibre/utils/dictclient.py
Normal file
@ -0,0 +1,317 @@
|
||||
# Client for the DICT protocol (RFC2229)
|
||||
#
|
||||
# Copyright (C) 2002 John Goerzen
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import socket, re
|
||||
|
||||
version = '1.0'
|
||||
|
||||
def dequote(str):
|
||||
"""Will remove single or double quotes from the start and end of a string
|
||||
and return the result."""
|
||||
quotechars = "'\""
|
||||
while len(str) and str[0] in quotechars:
|
||||
str = str[1:]
|
||||
while len(str) and str[-1] in quotechars:
|
||||
str = str[0:-1]
|
||||
return str
|
||||
|
||||
def enquote(str):
|
||||
"""This function will put a string in double quotes, properly
|
||||
escaping any existing double quotes with a backslash. It will
|
||||
return the result."""
|
||||
return '"' + str.replace('"', "\\\"") + '"'
|
||||
|
||||
class Connection:
|
||||
"""This class is used to establish a connection to a database server.
|
||||
You will usually use this as the first call into the dictclient library.
|
||||
Instantiating it takes two optional arguments: a hostname (a string)
|
||||
and a port (an int). The hostname defaults to localhost
|
||||
and the port to 2628, the port specified in RFC."""
|
||||
def __init__(self, hostname = 'localhost', port = 2628):
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.sock.connect((hostname, port))
|
||||
self.rfile = self.sock.makefile("rt")
|
||||
self.wfile = self.sock.makefile("wt", 0)
|
||||
self.saveconnectioninfo()
|
||||
|
||||
def getresultcode(self):
|
||||
"""Generic function to get a result code. It will return a list
|
||||
consisting of two items: the integer result code and the text
|
||||
following. You will not usually use this function directly."""
|
||||
line = self.rfile.readline().strip()
|
||||
code, text = line.split(' ', 1)
|
||||
return [int(code), text]
|
||||
|
||||
def get200result(self):
|
||||
"""Used when expecting a single line of text -- a 200-class
|
||||
result. Returns [intcode, remaindertext]"""
|
||||
|
||||
code, text = self.getresultcode()
|
||||
if code < 200 or code >= 300:
|
||||
raise Exception, "Got '%s' when 200-class response expected" % \
|
||||
text
|
||||
return [code, text]
|
||||
|
||||
def get100block(self):
|
||||
"""Used when expecting multiple lines of text -- gets the block
|
||||
part only. Does not get any codes or anything! Returns a string."""
|
||||
data = []
|
||||
while 1:
|
||||
line = self.rfile.readline().strip()
|
||||
if line == '.':
|
||||
break
|
||||
data.append(line)
|
||||
return "\n".join(data)
|
||||
|
||||
def get100result(self):
|
||||
"""Used when expecting multiple lines of text, terminated by a period
|
||||
and a 200 code. Returns: [initialcode, [bodytext_1lineperentry],
|
||||
finalcode]"""
|
||||
code, text = self.getresultcode()
|
||||
if code < 100 or code >= 200:
|
||||
raise Exception, "Got '%s' when 100-class response expected" % \
|
||||
code
|
||||
|
||||
bodylines = self.get100block().split("\n")
|
||||
|
||||
code2 = self.get200result()[0]
|
||||
return [code, bodylines, code2]
|
||||
|
||||
def get100dict(self):
|
||||
"""Used when expecting a dictionary of results. Will read from
|
||||
the initial 100 code, to a period and the 200 code."""
|
||||
dict = {}
|
||||
for line in self.get100result()[1]:
|
||||
key, val = line.split(' ', 1)
|
||||
dict[key] = dequote(val)
|
||||
return dict
|
||||
|
||||
def saveconnectioninfo(self):
|
||||
"""Called by __init__ to handle the initial connection. Will
|
||||
save off the capabilities and messageid."""
|
||||
code, string = self.get200result()
|
||||
assert code == 220
|
||||
capstr, msgid = re.search('<(.*)> (<.*>)$', string).groups()
|
||||
self.capabilities = capstr.split('.')
|
||||
self.messageid = msgid
|
||||
|
||||
def getcapabilities(self):
|
||||
"""Returns a list of the capabilities advertised by the server."""
|
||||
return self.capabilities
|
||||
|
||||
def getmessageid(self):
|
||||
"""Returns the message id, including angle brackets."""
|
||||
return self.messageid
|
||||
|
||||
def getdbdescs(self):
|
||||
"""Gets a dict of available databases. The key is the db name
|
||||
and the value is the db description. This command may generate
|
||||
network traffic!"""
|
||||
if hasattr(self, 'dbdescs'):
|
||||
return self.dbdescs
|
||||
|
||||
self.sendcommand("SHOW DB")
|
||||
self.dbdescs = self.get100dict()
|
||||
return self.dbdescs
|
||||
|
||||
def getstratdescs(self):
|
||||
"""Gets a dict of available strategies. The key is the strat
|
||||
name and the value is the strat description. This call may
|
||||
generate network traffic!"""
|
||||
if hasattr(self, 'stratdescs'):
|
||||
return self.stratdescs
|
||||
|
||||
self.sendcommand("SHOW STRAT")
|
||||
self.stratdescs = self.get100dict()
|
||||
return self.stratdescs
|
||||
|
||||
def getdbobj(self, dbname):
|
||||
"""Gets a Database object corresponding to the database name passed
|
||||
in. This function explicitly will *not* generate network traffic.
|
||||
If you have not yet run getdbdescs(), it will fail."""
|
||||
if not hasattr(self, 'dbobjs'):
|
||||
self.dbobjs = {}
|
||||
|
||||
if self.dbobjs.has_key(dbname):
|
||||
return self.dbobjs[dbname]
|
||||
|
||||
# We use self.dbdescs explicitly since we don't want to
|
||||
# generate net traffic with this request!
|
||||
|
||||
if dbname != '*' and dbname != '!' and \
|
||||
not dbname in self.dbdescs.keys():
|
||||
raise Exception, "Invalid database name '%s'" % dbname
|
||||
|
||||
self.dbobjs[dbname] = Database(self, dbname)
|
||||
return self.dbobjs[dbname]
|
||||
|
||||
def sendcommand(self, command):
|
||||
"""Takes a command, without a newline character, and sends it to
|
||||
the server."""
|
||||
self.wfile.write(command + "\n")
|
||||
|
||||
def define(self, database, word):
|
||||
"""Returns a list of Definition objects for each matching
|
||||
definition. Parameters are the database name and the word
|
||||
to look up. This is one of the main functions you will use
|
||||
to interact with the server. Returns a list of Definition
|
||||
objects. If there are no matches, an empty list is returned.
|
||||
|
||||
Note: database may be '*' which means to search all databases,
|
||||
or '!' which means to return matches from the first database that
|
||||
has a match."""
|
||||
self.getdbdescs() # Prime the cache
|
||||
|
||||
if database != '*' and database != '!' and \
|
||||
not database in self.getdbdescs():
|
||||
raise Exception, "Invalid database '%s' specified" % database
|
||||
|
||||
self.sendcommand("DEFINE " + enquote(database) + " " + enquote(word))
|
||||
code = self.getresultcode()[0]
|
||||
|
||||
retval = []
|
||||
|
||||
if code == 552:
|
||||
# No definitions.
|
||||
return []
|
||||
if code != 150:
|
||||
raise Exception, "Unknown code %d" % code
|
||||
|
||||
while 1:
|
||||
code, text = self.getresultcode()
|
||||
if code != 151:
|
||||
break
|
||||
|
||||
resultword, resultdb = re.search('^"(.+)" (\S+)', text).groups()
|
||||
defstr = self.get100block()
|
||||
retval.append(Definition(self, self.getdbobj(resultdb),
|
||||
resultword, defstr))
|
||||
return retval
|
||||
|
||||
def match(self, database, strategy, word):
|
||||
"""Gets matches for a query. Arguments are database name,
|
||||
the strategy (see available ones in getstratdescs()), and the
|
||||
pattern/word to look for. Returns a list of Definition objects.
|
||||
If there is no match, an empty list is returned.
|
||||
|
||||
Note: database may be '*' which means to search all databases,
|
||||
or '!' which means to return matches from the first database that
|
||||
has a match."""
|
||||
self.getstratdescs() # Prime the cache
|
||||
self.getdbdescs() # Prime the cache
|
||||
if not strategy in self.getstratdescs().keys():
|
||||
raise Exception, "Invalid strategy '%s'" % strategy
|
||||
if database != '*' and database != '!' and \
|
||||
not database in self.getdbdescs().keys():
|
||||
raise Exception, "Invalid database name '%s'" % database
|
||||
|
||||
self.sendcommand("MATCH %s %s %s" % (enquote(database),
|
||||
enquote(strategy),
|
||||
enquote(word)))
|
||||
code = self.getresultcode()[0]
|
||||
if code == 552:
|
||||
# No Matches
|
||||
return []
|
||||
if code != 152:
|
||||
raise Exception, "Unexpected code %d" % code
|
||||
|
||||
retval = []
|
||||
|
||||
for matchline in self.get100block().split("\n"):
|
||||
matchdict, matchword = matchline.split(" ", 1)
|
||||
retval.append(Definition(self, self.getdbobj(matchdict),
|
||||
dequote(matchword)))
|
||||
if self.getresultcode()[0] != 250:
|
||||
raise Exception, "Unexpected end-of-list code %d" % code
|
||||
return retval
|
||||
|
||||
class Database:
|
||||
"""An object corresponding to a particular database in a server."""
|
||||
def __init__(self, dictconn, dbname):
|
||||
"""Initialize the object -- requires a Connection object and
|
||||
a database name."""
|
||||
self.conn = dictconn
|
||||
self.name = dbname
|
||||
|
||||
def getname(self):
|
||||
"""Returns the short name for this database."""
|
||||
return self.name
|
||||
|
||||
def getdescription(self):
|
||||
if hasattr(self, 'description'):
|
||||
return self.description
|
||||
if self.getname() == '*':
|
||||
self.description = 'All Databases'
|
||||
elif self.getname() == '!':
|
||||
self.description = 'First matching database'
|
||||
else:
|
||||
self.description = self.conn.getdbdescs()[self.getname()]
|
||||
return self.description
|
||||
|
||||
def getinfo(self):
|
||||
"""Returns a string of info describing this database."""
|
||||
if hasattr(self, 'info'):
|
||||
return self.info
|
||||
|
||||
if self.getname() == '*':
|
||||
self.info = "This special database will search all databases on the system."
|
||||
elif self.getname() == '!':
|
||||
self.info = "This special database will return matches from the first matching database."
|
||||
else:
|
||||
self.conn.sendcommand("SHOW INFO " + self.name)
|
||||
self.info = "\n".join(self.conn.get100result()[1])
|
||||
return self.info
|
||||
|
||||
def define(self, word):
|
||||
"""Get a definition from within this database.
|
||||
The argument, word, is the word to look up. The return value is the
|
||||
same as from Connection.define()."""
|
||||
return self.conn.define(self.getname(), word)
|
||||
|
||||
def match(self, strategy, word):
|
||||
"""Get a match from within this database.
|
||||
The argument, word, is the word to look up. The return value is
|
||||
the same as from Connection.define()."""
|
||||
return self.conn.match(self.getname(), strategy, word)
|
||||
|
||||
class Definition:
|
||||
"""An object corresponding to a single definition."""
|
||||
def __init__(self, dictconn, db, word, defstr = None):
|
||||
"""Instantiate the object. Requires: a Connection object,
|
||||
a Database object (NOT corresponding to '*' or '!' databases),
|
||||
a word. Optional: a definition string. If not supplied,
|
||||
it will be fetched if/when it is requested."""
|
||||
self.conn = dictconn
|
||||
self.db = db
|
||||
self.word = word
|
||||
self.defstr = defstr
|
||||
|
||||
def getdb(self):
|
||||
"""Get the Database object corresponding to this definition."""
|
||||
return self.db
|
||||
|
||||
def getdefstr(self):
|
||||
"""Get the definition string (the actual content) of this
|
||||
definition."""
|
||||
if not self.defstr:
|
||||
self.defstr = self.conn.define(self.getdb().getname(), self.word)[0].getdefstr()
|
||||
return self.defstr
|
||||
|
||||
def getword(self):
|
||||
"""Get the word this object describes."""
|
||||
return self.word
|
@ -1,9 +1,10 @@
|
||||
"""
|
||||
Read and write ZIP files. Modified by Kovid Goyal to support replacing files in
|
||||
Read and write ZIP files. Modified by Kovid Goyal to support replacing files in
|
||||
a zip archive.
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
from calibre import sanitize_file_name
|
||||
import struct, os, time, sys, shutil
|
||||
import binascii, cStringIO
|
||||
|
||||
@ -789,80 +790,80 @@ class ZipFile:
|
||||
|
||||
if self.debug > 2:
|
||||
print "total", total
|
||||
|
||||
def _calculate_file_offsets(self):
|
||||
for zip_info in self.filelist:
|
||||
self.fp.seek(zip_info.header_offset, 0)
|
||||
fheader = self.fp.read(30)
|
||||
if fheader[0:4] != stringFileHeader:
|
||||
raise BadZipfile, "Bad magic number for file header"
|
||||
fheader = struct.unpack(structFileHeader, fheader)
|
||||
# file_offset is computed here, since the extra field for
|
||||
# the central directory and for the local file header
|
||||
# refer to different fields, and they can have different
|
||||
# lengths
|
||||
file_offset = (zip_info.header_offset + 30
|
||||
+ fheader[_FH_FILENAME_LENGTH]
|
||||
+ fheader[_FH_EXTRA_FIELD_LENGTH])
|
||||
fname = self.fp.read(fheader[_FH_FILENAME_LENGTH])
|
||||
if fname != zip_info.orig_filename:
|
||||
raise RuntimeError(
|
||||
'File name in directory "%s" and header "%s" differ.' % (
|
||||
zip_info.orig_filename, fname))
|
||||
|
||||
zip_info.file_offset = file_offset
|
||||
|
||||
def replace(self, filename, arcname=None, compress_type=None):
|
||||
"""Delete arcname, and put the bytes from filename into the
|
||||
archive under the name arcname."""
|
||||
deleteName = arcname
|
||||
if deleteName is None:
|
||||
deleteName = filename
|
||||
self.delete(deleteName)
|
||||
self.write(filename, arcname, compress_type)
|
||||
|
||||
def replacestr(self, zinfo, bytes):
|
||||
"""Delete zinfo.filename, and write a new file into the archive. The
|
||||
contents is the string 'bytes'."""
|
||||
self.delete(zinfo.filename)
|
||||
self.writestr(zinfo, bytes)
|
||||
|
||||
def delete(self, name):
|
||||
"""Delete the file from the archive. If it appears multiple
|
||||
times only the first instance will be deleted."""
|
||||
for i in range (0, len(self.filelist)):
|
||||
if self.filelist[i].filename == name:
|
||||
if self.debug:
|
||||
print "Removing", name
|
||||
deleted_offset = self.filelist[i].header_offset
|
||||
def _calculate_file_offsets(self):
|
||||
for zip_info in self.filelist:
|
||||
self.fp.seek(zip_info.header_offset, 0)
|
||||
fheader = self.fp.read(30)
|
||||
if fheader[0:4] != stringFileHeader:
|
||||
raise BadZipfile, "Bad magic number for file header"
|
||||
fheader = struct.unpack(structFileHeader, fheader)
|
||||
# file_offset is computed here, since the extra field for
|
||||
# the central directory and for the local file header
|
||||
# refer to different fields, and they can have different
|
||||
# lengths
|
||||
file_offset = (zip_info.header_offset + 30
|
||||
+ fheader[_FH_FILENAME_LENGTH]
|
||||
+ fheader[_FH_EXTRA_FIELD_LENGTH])
|
||||
fname = self.fp.read(fheader[_FH_FILENAME_LENGTH])
|
||||
if fname != zip_info.orig_filename:
|
||||
raise RuntimeError(
|
||||
'File name in directory "%s" and header "%s" differ.' % (
|
||||
zip_info.orig_filename, fname))
|
||||
|
||||
zip_info.file_offset = file_offset
|
||||
|
||||
def replace(self, filename, arcname=None, compress_type=None):
|
||||
"""Delete arcname, and put the bytes from filename into the
|
||||
archive under the name arcname."""
|
||||
deleteName = arcname
|
||||
if deleteName is None:
|
||||
deleteName = filename
|
||||
self.delete(deleteName)
|
||||
self.write(filename, arcname, compress_type)
|
||||
|
||||
def replacestr(self, zinfo, bytes):
|
||||
"""Delete zinfo.filename, and write a new file into the archive. The
|
||||
contents is the string 'bytes'."""
|
||||
self.delete(zinfo.filename)
|
||||
self.writestr(zinfo, bytes)
|
||||
|
||||
def delete(self, name):
|
||||
"""Delete the file from the archive. If it appears multiple
|
||||
times only the first instance will be deleted."""
|
||||
for i in range (0, len(self.filelist)):
|
||||
if self.filelist[i].filename == name:
|
||||
if self.debug:
|
||||
print "Removing", name
|
||||
deleted_offset = self.filelist[i].header_offset
|
||||
deleted_size = (self.filelist[i].file_offset - self.filelist[i].header_offset) + self.filelist[i].compress_size
|
||||
zinfo_size = struct.calcsize(structCentralDir) + len(self.filelist[i].filename) + len(self.filelist[i].extra)
|
||||
# Remove the file's data from the archive.
|
||||
current_offset = self.fp.tell()
|
||||
self.fp.seek(0, 2)
|
||||
archive_size = self.fp.tell()
|
||||
self.fp.seek(deleted_offset + deleted_size)
|
||||
buf = self.fp.read()
|
||||
self.fp.seek(deleted_offset)
|
||||
self.fp.write(buf)
|
||||
self.fp.truncate(archive_size - deleted_size - zinfo_size)
|
||||
if current_offset > deleted_offset + deleted_size:
|
||||
current_offset -= deleted_size
|
||||
elif current_offset > deleted_offset:
|
||||
current_offset = deleted_offset
|
||||
self.fp.seek(current_offset, 0)
|
||||
# Remove file from central directory.
|
||||
del self.filelist[i]
|
||||
# Adjust the remaining offsets in the central directory.
|
||||
for j in range (i, len(self.filelist)):
|
||||
if self.filelist[j].header_offset > deleted_offset:
|
||||
self.filelist[j].header_offset -= deleted_size
|
||||
if self.filelist[j].file_offset > deleted_offset:
|
||||
zinfo_size = struct.calcsize(structCentralDir) + len(self.filelist[i].filename) + len(self.filelist[i].extra)
|
||||
# Remove the file's data from the archive.
|
||||
current_offset = self.fp.tell()
|
||||
self.fp.seek(0, 2)
|
||||
archive_size = self.fp.tell()
|
||||
self.fp.seek(deleted_offset + deleted_size)
|
||||
buf = self.fp.read()
|
||||
self.fp.seek(deleted_offset)
|
||||
self.fp.write(buf)
|
||||
self.fp.truncate(archive_size - deleted_size - zinfo_size)
|
||||
if current_offset > deleted_offset + deleted_size:
|
||||
current_offset -= deleted_size
|
||||
elif current_offset > deleted_offset:
|
||||
current_offset = deleted_offset
|
||||
self.fp.seek(current_offset, 0)
|
||||
# Remove file from central directory.
|
||||
del self.filelist[i]
|
||||
# Adjust the remaining offsets in the central directory.
|
||||
for j in range (i, len(self.filelist)):
|
||||
if self.filelist[j].header_offset > deleted_offset:
|
||||
self.filelist[j].header_offset -= deleted_size
|
||||
if self.filelist[j].file_offset > deleted_offset:
|
||||
self.filelist[j].file_offset -= deleted_size
|
||||
self._didModify = True
|
||||
return
|
||||
if self.debug:
|
||||
print name, "not in archive"
|
||||
self._didModify = True
|
||||
return
|
||||
if self.debug:
|
||||
print name, "not in archive"
|
||||
|
||||
def namelist(self):
|
||||
"""Return a list of file names in the archive."""
|
||||
@ -1035,10 +1036,14 @@ class ZipFile:
|
||||
os.unlink(upperdirs)
|
||||
if upperdirs and not os.path.exists(upperdirs):
|
||||
os.makedirs(upperdirs)
|
||||
|
||||
|
||||
source = self.open(member, pwd=pwd)
|
||||
if not os.path.exists(targetpath): # Could be a previously automatically created directory
|
||||
target = open(targetpath, "wb")
|
||||
try:
|
||||
target = open(targetpath, "wb")
|
||||
except IOError:
|
||||
targetpath = sanitize_file_name(targetpath)
|
||||
target = open(targetpath, "wb")
|
||||
shutil.copyfileobj(source, target)
|
||||
source.close()
|
||||
target.close()
|
||||
@ -1179,7 +1184,7 @@ class ZipFile:
|
||||
zinfo.file_size))
|
||||
self.filelist.append(zinfo)
|
||||
self.NameToInfo[zinfo.filename] = zinfo
|
||||
|
||||
|
||||
def add_dir(self, path, prefix=''):
|
||||
'''
|
||||
Add a directory recursively to the zip file with an optional prefix.
|
||||
@ -1195,10 +1200,10 @@ class ZipFile:
|
||||
if os.path.isdir(f):
|
||||
self.add_dir(f, prefix=arcname)
|
||||
else:
|
||||
self.write(f, arcname)
|
||||
self.write(f, arcname)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
|
||||
|
||||
def __del__(self):
|
||||
"""Call the "close()" method in case the user forgot."""
|
||||
@ -1294,7 +1299,7 @@ class ZipFile:
|
||||
if self.debug > 0:
|
||||
msg = 'Archive comment is too long; truncating to %d bytes' \
|
||||
% ZIP_MAX_COMMENT
|
||||
print msg
|
||||
print msg
|
||||
self.comment = self.comment[:ZIP_MAX_COMMENT]
|
||||
|
||||
endrec = struct.pack(structEndArchive, stringEndArchive,
|
||||
@ -1314,7 +1319,7 @@ def safe_replace(zipstream, name, datastream):
|
||||
Replace a file in a zip file in a safe manner. This proceeds by extracting
|
||||
and re-creating the zipfile. This is neccessary because :method:`ZipFile.replace`
|
||||
sometimes created corrupted zip files.
|
||||
|
||||
|
||||
:param zipstream: Stream from a zip file
|
||||
:param name: The name of the file to replace
|
||||
:param datastream: The data to replace the file with.
|
||||
|
Loading…
x
Reference in New Issue
Block a user