mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
merge from trunk
This commit is contained in:
commit
61ae1a8045
@ -464,5 +464,14 @@ h2.library_name {
|
||||
max-height: 50%;
|
||||
}
|
||||
|
||||
.details a.details_category_link {
|
||||
text-decoration: none;
|
||||
color: blue
|
||||
}
|
||||
|
||||
.details a.details_category_link:hover {
|
||||
color: red
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
|
||||
|
BIN
resources/images/news/bighollywood.png
Normal file
BIN
resources/images/news/bighollywood.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.0 KiB |
63
resources/recipes/bighollywood.recipe
Normal file
63
resources/recipes/bighollywood.recipe
Normal file
@ -0,0 +1,63 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
bighollywood.breitbart.com
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class BigHollywood(BasicNewsRecipe):
|
||||
title = 'Big Hollywood'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'News and articles from the media world'
|
||||
publisher = 'Big Hollywood'
|
||||
category = 'news, media, art, literature, movies, politics, USA, Hollywood'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 200
|
||||
no_stylesheets = True
|
||||
encoding = 'utf8'
|
||||
use_embedded_content = False
|
||||
language = 'en'
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'blog'
|
||||
extra_css = """
|
||||
body{font-family: Arial,sans-serif }
|
||||
"""
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
keep_only_tags=[dict(attrs={'class':'postcontent'})]
|
||||
remove_tags = [
|
||||
dict(name=['meta','link','link','iframe','embed','object'])
|
||||
,dict(name='p', attrs={'class':['post_meta_links','postfooter']})
|
||||
]
|
||||
remove_attributes=['original','onclick']
|
||||
|
||||
feeds = [(u'Articles', u'http://bighollywood.breitbart.com/feed/')]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
limg = item.find('img')
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
if limg:
|
||||
if limg['src'].endswith('BlogPrintButton.png'):
|
||||
limg.extract()
|
||||
item.name = 'div'
|
||||
item.attrs = []
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
for item in soup.findAll('img'):
|
||||
if not item.has_key('alt'):
|
||||
item['alt'] = 'image'
|
||||
return soup
|
35
resources/recipes/credit_slips.recipe
Normal file
35
resources/recipes/credit_slips.recipe
Normal file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = 'zotzot'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class CreditSlips(BasicNewsRecipe):
|
||||
__license__ = 'GPL v3'
|
||||
__author__ = 'zotzot'
|
||||
language = 'en'
|
||||
version = 1
|
||||
title = u'Credit Slips.org'
|
||||
publisher = u'Bankr-L'
|
||||
category = u'Economic blog'
|
||||
description = u'All things about credit.'
|
||||
cover_url = 'http://bit.ly/hyZSTr'
|
||||
oldest_article = 50
|
||||
max_articles_per_feed = 100
|
||||
use_embedded_content = True
|
||||
|
||||
feeds = [
|
||||
(u'Credit Slips', u'http://www.creditslips.org/creditslips/atom.xml')
|
||||
]
|
||||
conversion_options = {
|
||||
'comments': description,
|
||||
'tags': category,
|
||||
'language': 'en',
|
||||
'publisher': publisher
|
||||
}
|
||||
extra_css = '''
|
||||
body{font-family:verdana,arial,helvetica,geneva,sans-serif;}
|
||||
img {float: left; margin-right: 0.5em;}
|
||||
'''
|
64
resources/recipes/detroit_news.recipe
Normal file
64
resources/recipes/detroit_news.recipe
Normal file
@ -0,0 +1,64 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
import re
|
||||
class AdvancedUserRecipe1297291961(BasicNewsRecipe):
|
||||
title = u'Detroit News'
|
||||
language = 'en'
|
||||
__author__ = 'DTM'
|
||||
oldest_article = 2
|
||||
max_articles_per_feed = 20
|
||||
no_stylesheets = True
|
||||
conversion_options = {
|
||||
'linearize_tables' : True,
|
||||
}
|
||||
|
||||
feeds = [
|
||||
(u'Headlines', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss&mime=xml'),
|
||||
(u'Nation/World', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss09&mime=xml'),
|
||||
(u'Metro/State', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss36&mime=xml'),
|
||||
(u'Wayne County', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss01&mime=xml'),
|
||||
(u'Oakland County', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss02&mime=xml'),
|
||||
(u'Macomb County', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss03&mime=xml'),
|
||||
(u'Livingston County', u'http://detnews.com/apps/pbcs.dll/section?category=rss04&mime=xml'),
|
||||
(u'Politics/Government', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss10&mime=xml'),
|
||||
(u'Editorials', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss07&mime=xml'),
|
||||
(u'Columnists', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss43&mime=xml'),
|
||||
(u'Charlie LeDuff', u'http://detnews.com/apps/pbcs.dll/section?category=rss54&mime=xml'),
|
||||
(u'Religion', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss11&mime=xml'),
|
||||
(u'Technology', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss12&mime=xml'),
|
||||
(u'Commuting', u'http://detnews.com/apps/pbcs.dll/section?category=rss05&mime=xml'),
|
||||
(u'Schools', u'http://detnews.com/apps/pbcs.dll/section?category=rss06&mime=xml'),
|
||||
(u'Obituaries', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss08&mime=xml'),
|
||||
(u'Autos Insider', u'http://detnews.com/apps/pbcs.dll/section?category=rss25&mime=xml'),
|
||||
(u'Drive', u'http://detnews.com/apps/pbcs.dll/section?category=rss26&mime=xml'),
|
||||
(u'Business', u'http://detnews.com/apps/pbcs.dll/section?category=rss21&mime=xml'),
|
||||
(u'Personal Finance', u'http://detnews.com/apps/pbcs.dll/section?category=rss23&mime=xml'),
|
||||
(u'Real Estate', u'http://detnews.com/apps/pbcs.dll/section?category=rss24&mime=xml'),
|
||||
(u'Movies', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss28&mime=xml'),
|
||||
(u'TV', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss40&mime=xml'),
|
||||
(u'Music/Nightlife', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss30&mime=xml'),
|
||||
(u'Celebrities', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss51&mime=xml'),
|
||||
(u'The Arts', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss27&mime=xml'),
|
||||
(u'Food', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss29&mime=xml'),
|
||||
(u'Homestyle', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss31&mime=xml'),
|
||||
(u'The Green Life', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss53&mime=xml'),
|
||||
(u'Lifestyle', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss32&mime=xml'),
|
||||
(u'Health', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss34&mime=xml'),
|
||||
(u'Travel', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss52&mime=xml'),
|
||||
(u'Advice', u'http://www.detnews.com/apps/pbcs.dll/section?category=rss50&mime=xml'),
|
||||
(u'Pistons', u'http://detnews.com/apps/pbcs.dll/section?category=rss13&mime=xml'),
|
||||
(u'Lions', u'http://detnews.com/apps/pbcs.dll/section?category=rss14&mime=xml'),
|
||||
(u'Tigers', u'http://detnews.com/apps/pbcs.dll/section?category=rss15&mime=xml'),
|
||||
(u'Red Wings', u'http://detnews.com/apps/pbcs.dll/section?category=rss16&mime=xml'),
|
||||
(u'Michigan State', u'http://detnews.com/apps/pbcs.dll/section?category=rss18&mime=xml'),
|
||||
(u'University of Michigan', u'http://detnews.com/apps/pbcs.dll/section?category=rss17&mime=xml'),
|
||||
(u'Motor Sports', u'http://detnews.com/apps/pbcs.dll/section?category=rss20&mime=xml'),
|
||||
(u'Golf', u'http://detnews.com/apps/pbcs.dll/section?category=rss47&mime=xml'),
|
||||
(u'Outdoors', u'http://detnews.com/apps/pbcs.dll/section?category=rss19&mime=xml')
|
||||
]
|
||||
|
||||
def print_version(self, url):
|
||||
p = re.compile('(/\d{4}|/-1)/(rss|ENT|LIFESTYLE|OPINION|METRO)\d*')
|
||||
m = p.search(url)
|
||||
return url.replace(m.group(), '&template=printart')
|
||||
|
37
resources/recipes/epl_talk.recipe
Normal file
37
resources/recipes/epl_talk.recipe
Normal file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = 'zotzot'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
'''
|
||||
http://www.epltalk.com
|
||||
'''
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class EPLTalkRecipe(BasicNewsRecipe):
|
||||
__license__ = 'GPL v3'
|
||||
__author__ = u'The Gaffer'
|
||||
language = 'en'
|
||||
version = 1
|
||||
|
||||
title = u'EPL Talk'
|
||||
publisher = u'The Gaffer'
|
||||
publication_type = 'Blog'
|
||||
category = u'Soccer'
|
||||
description = u'News and Analysis from the English Premier League'
|
||||
cover_url = 'http://bit.ly/hJxZPu'
|
||||
|
||||
oldest_article = 45
|
||||
max_articles_per_feed = 150
|
||||
use_embedded_content = True
|
||||
remove_javascript = True
|
||||
encoding = 'utf8'
|
||||
|
||||
remove_tags_after = [dict(name='div', attrs={'class':'pd-rating'})]
|
||||
|
||||
feeds = [(u'EPL Talk', u'http://feeds.feedburner.com/EPLTalk')]
|
||||
|
||||
extra_css = '''
|
||||
body{font-family:verdana,arial,helvetica,geneva,sans-serif;}
|
||||
img {float: left; margin-right: 0.5em;}
|
||||
'''
|
39
resources/recipes/fan_graphs.recipe
Normal file
39
resources/recipes/fan_graphs.recipe
Normal file
@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011 zotzot'
|
||||
__docformat__ = 'PEP8'
|
||||
'''
|
||||
www.fangraphs.com
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class FanGraphs(BasicNewsRecipe):
|
||||
title = u'FanGraphs'
|
||||
oldest_article = 21
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
#delay = 1
|
||||
use_embedded_content = False
|
||||
encoding = 'utf8'
|
||||
publisher = 'Fangraphs'
|
||||
category = 'Baseball'
|
||||
language = 'en'
|
||||
publication_type = 'Blog'
|
||||
|
||||
description = 'Baseball statistical analysis, graphs, and projections.'
|
||||
__author__ = 'David Appelman'
|
||||
cover_url = 'http://bit.ly/g0BTdQ'
|
||||
|
||||
feeds = [
|
||||
(u'Fangraphs', u'http://feeds.feedburner.com/FanGraphs?format=xml'),
|
||||
(u'Rotographs', u'http://www.wizardrss.com/feed/feeds.feedburner.com/RotoGraphs?format=xml'),
|
||||
(u'Community', u'http://www.wizardrss.com/feed/www.fangraphs.com/community/?feed=rss2'),
|
||||
(u'NotGraphs', u'http://www.wizardrss.com/feed/www.fangraphs.com/not/?feed=rss2')]
|
||||
|
||||
extra_css = '''
|
||||
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
|
||||
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
|
||||
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
|
||||
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
|
||||
'''
|
40
resources/recipes/mediaindonesia.recipe
Normal file
40
resources/recipes/mediaindonesia.recipe
Normal file
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, bakthariq AT gmail.com'
|
||||
'''
|
||||
m.mediaindonesia.com
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
|
||||
|
||||
class Media(BasicNewsRecipe):
|
||||
title = u'Media Indonesia'
|
||||
__author__ = 'bakthariq'
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 500
|
||||
timefmt = ' [%a, %b %d, %Y]'
|
||||
language = 'id'
|
||||
category = 'News, Indonesia'
|
||||
publisher = 'Media Indonesia'
|
||||
encoding = 'utf-8'
|
||||
no_stylesheets = True
|
||||
description = 'Indonesian Media Indonesia newsportal'
|
||||
cover_url = 'http://m.mediaindonesia.com/public/img/logo.gif'
|
||||
no_javascript = True
|
||||
|
||||
remove_tags = [dict(id=['atas','merah','putih']), dict(name='a')]
|
||||
remove_tags_after = [dict(id="putih")]
|
||||
|
||||
extra_css = '''
|
||||
.judul {font-size: x-large;}
|
||||
.tgl {font-size: x-small;color:#333333;}
|
||||
.foto {font-size: x-small;}
|
||||
'''
|
||||
|
||||
feeds = [(u'Polhukam', u'http://www.mediaindonesia.com/rss/1/polhukam'), (u'Ekonomi dan Bisnis', u'http://www.mediaindonesia.com/rss/2/ekonomi-dan-bisnis'),
|
||||
(u'Internasional', u'http://www.mediaindonesia.com/rss/6/internasional'), (u'Olahraga', u'http://www.mediaindonesia.com/rss/3/olahraga'),(u'Sepakbola',
|
||||
u'http://www.mediaindonesia.com/rss/4/sepakbola'),(u'Megapolitan', u'http://www.mediaindonesia.com/rss/5/megapolitan'), (u'Sains dan Teknologi',
|
||||
u'http://www.mediaindonesia.com/rss/7/sains-dan-teknologi'), (u'Humaniora', u'http://www.mediaindonesia.com/rss/14/humaniora'), (u'Hiburan',
|
||||
u'http://www.mediaindonesia.com/rss/10/hiburan'), (u'Opini', u'http://www.mediaindonesia.com/rss/11/opini')]
|
||||
|
44
resources/recipes/oregonian.recipe
Normal file
44
resources/recipes/oregonian.recipe
Normal file
@ -0,0 +1,44 @@
|
||||
from __future__ import with_statement
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = 'zotzot'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class Oregonian(BasicNewsRecipe):
|
||||
title = u'The Oregonian'
|
||||
oldest_article = 2
|
||||
max_articles_per_feed = 100
|
||||
language = 'en'
|
||||
__author__ = 'Zotzot'
|
||||
description = 'Portland, Oregon local newspaper'
|
||||
publisher = 'Advance Publications'
|
||||
category = 'news, Portland'
|
||||
cover_url = 'http://bit.ly/gUgxGd'
|
||||
no_stylesheets = True
|
||||
masthead_url = 'http://bit.ly/eocL70'
|
||||
remove_tags = [dict(name='div', attrs={'class':['footer', 'content']})]
|
||||
use_embedded_content = False
|
||||
remove_tags_before = dict(id='article')
|
||||
remove_tags_after = dict(id='article')
|
||||
feeds = [
|
||||
#(u'Timbers', u'feed://blog.oregonlive.com/timbers_impact/atom.xml'),
|
||||
(u'News', u'http://blog.oregonlive.com/news_impact/atom.xml'),
|
||||
(u'Opinion', u'http://blog.oregonlive.com/opinion_impact/atom.xml'),
|
||||
(u'Living', u'http://blog.oregonlive.com/living_impact/atom.xml'),
|
||||
(u'Sports', u'http://blog.oregonlive.com/sports_impact/atom.xml'),
|
||||
(u'Business', u'http://blog.oregonlive.com/business_impact/atom.xml')]
|
||||
|
||||
extra_css = '''
|
||||
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
|
||||
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
|
||||
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
|
||||
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
|
||||
'''
|
||||
|
||||
|
||||
def get_article_url(self, article):
|
||||
url = BasicNewsRecipe.get_article_url(self, article)
|
||||
if '/video/' not in url:
|
||||
return url
|
@ -254,6 +254,13 @@
|
||||
<xsl:template match="fb:empty-line">
|
||||
<br/>
|
||||
</xsl:template>
|
||||
<!-- super/sub-scripts -->
|
||||
<xsl:template match="fb:sup">
|
||||
<sup><xsl:apply-templates/></sup>
|
||||
</xsl:template>
|
||||
<xsl:template match="fb:sub">
|
||||
<sub><xsl:apply-templates/></sub>
|
||||
</xsl:template>
|
||||
<!-- link -->
|
||||
<xsl:template match="fb:a">
|
||||
<xsl:element name="a">
|
||||
|
@ -6,7 +6,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, cPickle, re, anydbm, shutil
|
||||
import os, cPickle, re, anydbm, shutil, marshal
|
||||
from zlib import compress
|
||||
|
||||
from setup import Command, basenames, __appname__
|
||||
@ -194,7 +194,7 @@ class Resources(Command):
|
||||
def kanwaout(self, out):
|
||||
dic = anydbm.open(out, 'c')
|
||||
for (k, v) in self.records.iteritems():
|
||||
dic[k] = compress(cPickle.dumps(v, -1))
|
||||
dic[k] = compress(marshal.dumps(v))
|
||||
dic.close()
|
||||
|
||||
|
||||
|
@ -95,22 +95,22 @@ class TXT2TXTZ(FileTypePlugin):
|
||||
file_types = set(['txt'])
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
on_import = True
|
||||
|
||||
|
||||
def _get_image_references(self, txt, base_dir):
|
||||
images = []
|
||||
|
||||
|
||||
# Textile
|
||||
for m in re.finditer(ur'(?mu)(?:[\[{])?\!(?:\. )?(?P<path>[^\s(!]+)\s?(?:\(([^\)]+)\))?\!(?::(\S+))?(?:[\]}]|(?=\s|$))', txt):
|
||||
path = m.group('path')
|
||||
if path and not os.path.isabs(path) and guess_type(path)[0] in OEB_IMAGES and os.path.exists(os.path.join(base_dir, path)):
|
||||
images.append(path)
|
||||
|
||||
# Markdown inline
|
||||
|
||||
# Markdown inline
|
||||
for m in re.finditer(ur'(?mu)\!\[([^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*)\]\s*\((?P<path>[^\)]*)\)', txt):
|
||||
path = m.group('path')
|
||||
if path and not os.path.isabs(path) and guess_type(path)[0] in OEB_IMAGES and os.path.exists(os.path.join(base_dir, path)):
|
||||
images.append(path)
|
||||
|
||||
|
||||
# Markdown reference
|
||||
refs = {}
|
||||
for m in re.finditer(ur'(?mu)^(\ ?\ ?\ ?)\[(?P<id>[^\]]*)\]:\s*(?P<path>[^\s]*)$', txt):
|
||||
@ -123,13 +123,13 @@ class TXT2TXTZ(FileTypePlugin):
|
||||
|
||||
# Remove duplicates
|
||||
return list(set(images))
|
||||
|
||||
|
||||
def run(self, path_to_ebook):
|
||||
with open(path_to_ebook, 'rb') as ebf:
|
||||
txt = ebf.read()
|
||||
base_dir = os.path.dirname(path_to_ebook)
|
||||
images = self._get_image_references(txt, base_dir)
|
||||
|
||||
|
||||
if images:
|
||||
# Create TXTZ and put file plus images inside of it.
|
||||
import zipfile
|
||||
@ -1030,3 +1030,10 @@ plugins += [LookAndFeel, Behavior, Columns, Toolbar, Search, InputOptions,
|
||||
Email, Server, Plugins, Tweaks, Misc, TemplateFunctions]
|
||||
|
||||
#}}}
|
||||
|
||||
# New metadata download plugins {{{
|
||||
from calibre.ebooks.metadata.sources.google import GoogleBooks
|
||||
|
||||
plugins += [GoogleBooks]
|
||||
|
||||
# }}}
|
||||
|
@ -20,6 +20,7 @@ from calibre.ebooks.metadata.fetch import MetadataSource
|
||||
from calibre.utils.config import make_config_dir, Config, ConfigProxy, \
|
||||
plugin_dir, OptionParser, prefs
|
||||
from calibre.ebooks.epub.fix import ePubFixer
|
||||
from calibre.ebooks.metadata.sources.base import Source
|
||||
|
||||
platform = 'linux'
|
||||
if iswindows:
|
||||
@ -493,6 +494,17 @@ def epub_fixers():
|
||||
yield plugin
|
||||
# }}}
|
||||
|
||||
# Metadata sources2 {{{
|
||||
def metadata_plugins(capabilities):
|
||||
capabilities = frozenset(capabilities)
|
||||
for plugin in _initialized_plugins:
|
||||
if isinstance(plugin, Source) and \
|
||||
plugin.capabilities.intersection(capabilities) and \
|
||||
not is_disabled(plugin):
|
||||
yield plugin
|
||||
|
||||
# }}}
|
||||
|
||||
# Initialize plugins {{{
|
||||
|
||||
_initialized_plugins = []
|
||||
|
@ -2517,28 +2517,25 @@ class ITUNES(DriverBase):
|
||||
opf = [x for x in fnames if '.opf' in x][0]
|
||||
if opf:
|
||||
opf_tree = etree.fromstring(zf_opf.read(opf))
|
||||
ns_map = opf_tree.nsmap.keys()
|
||||
for item in ns_map:
|
||||
ns = opf_tree.nsmap[item]
|
||||
md_el = opf_tree.find(".//{%s}metadata" % ns)
|
||||
if md_el is not None:
|
||||
ts = md_el.find('.//{%s}meta[@name="calibre:timestamp"]')
|
||||
if ts:
|
||||
timestamp = ts.get('content')
|
||||
old_ts = parse_date(timestamp)
|
||||
metadata.timestamp = datetime.datetime(old_ts.year, old_ts.month, old_ts.day, old_ts.hour,
|
||||
old_ts.minute, old_ts.second, old_ts.microsecond+1, old_ts.tzinfo)
|
||||
else:
|
||||
metadata.timestamp = now()
|
||||
if DEBUG:
|
||||
self.log.info(" add timestamp: %s" % metadata.timestamp)
|
||||
break
|
||||
md_els = opf_tree.xpath('.//*[local-name()="metadata"]')
|
||||
if md_els:
|
||||
ts = md_els[0].find('.//*[@name="calibre:timestamp"]')
|
||||
if ts is not None:
|
||||
timestamp = ts.get('content')
|
||||
old_ts = parse_date(timestamp)
|
||||
metadata.timestamp = datetime.datetime(old_ts.year, old_ts.month, old_ts.day, old_ts.hour,
|
||||
old_ts.minute, old_ts.second, old_ts.microsecond+1, old_ts.tzinfo)
|
||||
if DEBUG:
|
||||
self.log.info(" existing timestamp: %s" % metadata.timestamp)
|
||||
else:
|
||||
metadata.timestamp = now()
|
||||
if DEBUG:
|
||||
self.log.info(" add timestamp: %s" % metadata.timestamp)
|
||||
else:
|
||||
metadata.timestamp = now()
|
||||
if DEBUG:
|
||||
self.log.warning(" missing <metadata> block in OPF file")
|
||||
self.log.info(" add timestamp: %s" % metadata.timestamp)
|
||||
|
||||
# Force the language declaration for iBooks 1.1
|
||||
#metadata.language = get_lang().replace('_', '-')
|
||||
|
||||
|
@ -22,7 +22,7 @@ class KOBO(USBMS):
|
||||
gui_name = 'Kobo Reader'
|
||||
description = _('Communicate with the Kobo Reader')
|
||||
author = 'Timothy Legge and Kovid Goyal'
|
||||
version = (1, 0, 7)
|
||||
version = (1, 0, 9)
|
||||
|
||||
dbversion = 0
|
||||
fwversion = 0
|
||||
@ -124,9 +124,12 @@ class KOBO(USBMS):
|
||||
if imagename is not None:
|
||||
bl[idx].thumbnail = ImageWrapper(imagename)
|
||||
if (ContentType != '6' and MimeType != 'Shortcover'):
|
||||
if self.update_metadata_item(bl[idx]):
|
||||
# print 'update_metadata_item returned true'
|
||||
changed = True
|
||||
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
|
||||
if self.update_metadata_item(bl[idx]):
|
||||
# print 'update_metadata_item returned true'
|
||||
changed = True
|
||||
else:
|
||||
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
|
||||
if lpath in playlist_map and \
|
||||
playlist_map[lpath] not in bl[idx].device_collections:
|
||||
bl[idx].device_collections.append(playlist_map[lpath])
|
||||
@ -135,7 +138,13 @@ class KOBO(USBMS):
|
||||
book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
|
||||
else:
|
||||
try:
|
||||
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
|
||||
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
|
||||
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
|
||||
else:
|
||||
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
|
||||
title = "FILE MISSING: " + title
|
||||
book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
|
||||
|
||||
except:
|
||||
debug_print("prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors, \
|
||||
"mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID)
|
||||
@ -152,6 +161,10 @@ class KOBO(USBMS):
|
||||
return changed
|
||||
|
||||
connection = sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))
|
||||
|
||||
# return bytestrings if the content cannot the decoded as unicode
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
#query = 'select count(distinct volumeId) from volume_shortcovers'
|
||||
@ -221,6 +234,10 @@ class KOBO(USBMS):
|
||||
|
||||
debug_print('delete_via_sql: ContentID: ', ContentID, 'ContentType: ', ContentType)
|
||||
connection = sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))
|
||||
|
||||
# return bytestrings if the content cannot the decoded as unicode
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
|
||||
|
||||
cursor = connection.cursor()
|
||||
t = (ContentID,)
|
||||
cursor.execute('select ImageID from content where ContentID = ?', t)
|
||||
@ -494,6 +511,10 @@ class KOBO(USBMS):
|
||||
# the last book from the collection the list of books is empty
|
||||
# and the removal of the last book would not occur
|
||||
connection = sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))
|
||||
|
||||
# return bytestrings if the content cannot the decoded as unicode
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
|
||||
|
@ -53,7 +53,7 @@ _CHARSET_ALIASES = { "macintosh" : "mac-roman",
|
||||
def force_encoding(raw, verbose, assume_utf8=False):
|
||||
from calibre.constants import preferred_encoding
|
||||
try:
|
||||
chardet = detect(raw)
|
||||
chardet = detect(raw[:1024*50])
|
||||
except:
|
||||
chardet = {'encoding':preferred_encoding, 'confidence':0}
|
||||
encoding = chardet['encoding']
|
||||
|
@ -264,10 +264,17 @@ class Dehyphenator(object):
|
||||
class CSSPreProcessor(object):
|
||||
|
||||
PAGE_PAT = re.compile(r'@page[^{]*?{[^}]*?}')
|
||||
# Remove some of the broken CSS Microsoft products
|
||||
# create, slightly dangerous as it removes to end of line
|
||||
# rather than semi-colon
|
||||
MS_PAT = re.compile(r'^\s*(mso-|panose-).+?$',
|
||||
re.MULTILINE|re.IGNORECASE)
|
||||
|
||||
def __call__(self, data, add_namespace=False):
|
||||
from calibre.ebooks.oeb.base import XHTML_CSS_NAMESPACE
|
||||
data = self.PAGE_PAT.sub('', data)
|
||||
if '\n' in data:
|
||||
data = self.MS_PAT.sub('', data)
|
||||
if not add_namespace:
|
||||
return data
|
||||
ans, namespaced = [], False
|
||||
|
@ -157,7 +157,7 @@ class HeuristicProcessor(object):
|
||||
|
||||
ITALICIZE_STYLE_PATS = [
|
||||
r'(?msu)(?<=[\s>])_(?P<words>[^_]+)_',
|
||||
r'(?msu)(?<=[\s>])/(?P<words>[^/]+)/',
|
||||
r'(?msu)(?<=[\s>])/(?P<words>[^/\*>]+)/',
|
||||
r'(?msu)(?<=[\s>])~~(?P<words>[^~]+)~~',
|
||||
r'(?msu)(?<=[\s>])\*(?P<words>[^\*]+)\*',
|
||||
r'(?msu)(?<=[\s>])~(?P<words>[^~]+)~',
|
||||
|
22
src/calibre/ebooks/metadata/sources/amazon.py
Normal file
22
src/calibre/ebooks/metadata/sources/amazon.py
Normal file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
from calibre.ebooks.metadata.sources.base import Source
|
||||
|
||||
class Amazon(Source):
|
||||
|
||||
name = 'Amazon'
|
||||
description = _('Downloads metadata from Amazon')
|
||||
|
||||
capabilities = frozenset(['identify', 'cover'])
|
||||
touched_fields = frozenset(['title', 'authors', 'isbn', 'pubdate',
|
||||
'comments', 'cover_data'])
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
@ -8,6 +10,12 @@ __docformat__ = 'restructuredtext en'
|
||||
import re
|
||||
|
||||
from calibre.customize import Plugin
|
||||
from calibre.utils.logging import ThreadSafeLog, FileStream
|
||||
|
||||
def create_log(ostream=None):
|
||||
log = ThreadSafeLog(level=ThreadSafeLog.DEBUG)
|
||||
log.outputs = [FileStream(ostream)]
|
||||
return log
|
||||
|
||||
class Source(Plugin):
|
||||
|
||||
@ -18,14 +26,47 @@ class Source(Plugin):
|
||||
|
||||
result_of_identify_is_complete = True
|
||||
|
||||
def get_author_tokens(self, authors):
|
||||
'Take a list of authors and return a list of tokens useful for a '
|
||||
'AND search query'
|
||||
# Leave ' in there for Irish names
|
||||
pat = re.compile(r'[-,:;+!@#$%^&*(){}.`~"\s\[\]/]')
|
||||
for au in authors:
|
||||
for tok in au.split():
|
||||
yield pat.sub('', tok)
|
||||
capabilities = frozenset()
|
||||
|
||||
touched_fields = frozenset()
|
||||
|
||||
# Utility functions {{{
|
||||
def get_author_tokens(self, authors, only_first_author=True):
|
||||
'''
|
||||
Take a list of authors and return a list of tokens useful for an
|
||||
AND search query. This function tries to return tokens in
|
||||
first name middle names last name order, by assuming that if a comma is
|
||||
in the author name, the name is in lastname, other names form.
|
||||
'''
|
||||
|
||||
if authors:
|
||||
# Leave ' in there for Irish names
|
||||
pat = re.compile(r'[-,:;+!@#$%^&*(){}.`~"\s\[\]/]')
|
||||
if only_first_author:
|
||||
authors = authors[:1]
|
||||
for au in authors:
|
||||
parts = au.split()
|
||||
if ',' in au:
|
||||
# au probably in ln, fn form
|
||||
parts = parts[1:] + parts[:1]
|
||||
for tok in parts:
|
||||
tok = pat.sub('', tok).strip()
|
||||
yield tok
|
||||
|
||||
|
||||
def get_title_tokens(self, title):
|
||||
'''
|
||||
Take a title and return a list of tokens useful for an AND search query.
|
||||
Excludes connectives and punctuation.
|
||||
'''
|
||||
if title:
|
||||
pat = re.compile(r'''[-,:;+!@#$%^&*(){}.`~"'\s\[\]/]''')
|
||||
title = pat.sub(' ', title)
|
||||
tokens = title.split()
|
||||
for token in tokens:
|
||||
token = token.strip()
|
||||
if token and token.lower() not in ('a', 'and', 'the'):
|
||||
yield token
|
||||
|
||||
def split_jobs(self, jobs, num):
|
||||
'Split a list of jobs into at most num groups, as evenly as possible'
|
||||
@ -40,6 +81,10 @@ class Source(Plugin):
|
||||
gr.append(job)
|
||||
return [g for g in groups if g]
|
||||
|
||||
# }}}
|
||||
|
||||
# Metadata API {{{
|
||||
|
||||
def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}):
|
||||
'''
|
||||
Identify a book by its title/author/isbn/etc.
|
||||
@ -59,3 +104,5 @@ class Source(Plugin):
|
||||
'''
|
||||
return None
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
@ -12,8 +14,9 @@ from threading import Thread
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from calibre.ebooks.metadata.sources import Source
|
||||
from calibre.ebooks.metadata.sources.base import Source
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
from calibre.utils.date import parse_date, utcnow
|
||||
from calibre import browser, as_unicode
|
||||
|
||||
@ -38,7 +41,18 @@ subject = XPath('descendant::dc:subject')
|
||||
description = XPath('descendant::dc:description')
|
||||
language = XPath('descendant::dc:language')
|
||||
|
||||
def get_details(browser, url):
|
||||
try:
|
||||
raw = browser.open_novisit(url).read()
|
||||
except Exception as e:
|
||||
gc = getattr(e, 'getcode', lambda : -1)
|
||||
if gc() != 403:
|
||||
raise
|
||||
# Google is throttling us, wait a little
|
||||
time.sleep(2)
|
||||
raw = browser.open_novisit(url).read()
|
||||
|
||||
return raw
|
||||
|
||||
def to_metadata(browser, log, entry_):
|
||||
|
||||
@ -65,8 +79,8 @@ def to_metadata(browser, log, entry_):
|
||||
|
||||
mi = Metadata(title_, authors)
|
||||
try:
|
||||
raw = browser.open_novisit(id_url).read()
|
||||
feed = etree.fromstring(raw)
|
||||
raw = get_details(browser, id_url)
|
||||
feed = etree.fromstring(xml_to_unicode(raw, strip_encoding_pats=True)[0])
|
||||
extra = entry(feed)[0]
|
||||
except:
|
||||
log.exception('Failed to get additional details for', mi.title)
|
||||
@ -142,9 +156,13 @@ class Worker(Thread):
|
||||
class GoogleBooks(Source):
|
||||
|
||||
name = 'Google Books'
|
||||
description = _('Downloads metadata from Google Books')
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={},
|
||||
start_index=1):
|
||||
capabilities = frozenset(['identify'])
|
||||
touched_fields = frozenset(['title', 'authors', 'isbn', 'tags', 'pubdate',
|
||||
'comments', 'publisher', 'author_sort']) # language currently disabled
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={}):
|
||||
BASE_URL = 'http://books.google.com/books/feeds/volumes?'
|
||||
isbn = identifiers.get('isbn', None)
|
||||
q = ''
|
||||
@ -153,11 +171,14 @@ class GoogleBooks(Source):
|
||||
elif title or authors:
|
||||
def build_term(prefix, parts):
|
||||
return ' '.join('in'+prefix + ':' + x for x in parts)
|
||||
if title is not None:
|
||||
q += build_term('title', title.split())
|
||||
if authors:
|
||||
q += ('+' if q else '')+build_term('author',
|
||||
self.get_author_tokens(authors))
|
||||
title_tokens = list(self.get_title_tokens(title))
|
||||
if title_tokens:
|
||||
q += build_term('title', title_tokens)
|
||||
author_tokens = self.get_author_tokens(authors,
|
||||
only_first_author=True)
|
||||
if author_tokens:
|
||||
q += ('+' if q else '') + build_term('author',
|
||||
author_tokens)
|
||||
|
||||
if isinstance(q, unicode):
|
||||
q = q.encode('utf-8')
|
||||
@ -166,7 +187,7 @@ class GoogleBooks(Source):
|
||||
return BASE_URL+urlencode({
|
||||
'q':q,
|
||||
'max-results':20,
|
||||
'start-index':start_index,
|
||||
'start-index':1,
|
||||
'min-viewability':'none',
|
||||
})
|
||||
|
||||
@ -182,7 +203,8 @@ class GoogleBooks(Source):
|
||||
|
||||
try:
|
||||
parser = etree.XMLParser(recover=True, no_network=True)
|
||||
feed = etree.fromstring(raw, parser=parser)
|
||||
feed = etree.fromstring(xml_to_unicode(raw,
|
||||
strip_encoding_pats=True)[0], parser=parser)
|
||||
entries = entry(feed)
|
||||
except Exception, e:
|
||||
log.exception('Failed to parse identify results')
|
||||
@ -191,25 +213,33 @@ class GoogleBooks(Source):
|
||||
|
||||
groups = self.split_jobs(entries, 5) # At most 5 threads
|
||||
if not groups:
|
||||
return
|
||||
return None
|
||||
workers = [Worker(log, entries, abort, result_queue) for entries in
|
||||
groups]
|
||||
|
||||
if abort.is_set():
|
||||
return
|
||||
return None
|
||||
|
||||
for worker in workers: worker.start()
|
||||
|
||||
has_alive_worker = True
|
||||
while has_alive_worker and not abort.is_set():
|
||||
time.sleep(0.1)
|
||||
has_alive_worker = False
|
||||
for worker in workers:
|
||||
if worker.is_alive():
|
||||
has_alive_worker = True
|
||||
time.sleep(0.1)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# To run these test use: calibre-debug -e src/calibre/ebooks/metadata/sources/google.py
|
||||
from calibre.ebooks.metadata.sources.test import (test_identify_plugin,
|
||||
isbn_test)
|
||||
test_identify_plugin(GoogleBooks.name,
|
||||
[
|
||||
(
|
||||
{'title': 'Great Expectations', 'authors':['Charles Dickens']},
|
||||
[isbn_test('9781607541592')]
|
||||
),
|
||||
])
|
||||
|
92
src/calibre/ebooks/metadata/sources/test.py
Normal file
92
src/calibre/ebooks/metadata/sources/test.py
Normal file
@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, tempfile
|
||||
from Queue import Queue, Empty
|
||||
from threading import Event
|
||||
|
||||
|
||||
from calibre.customize.ui import metadata_plugins
|
||||
from calibre import prints
|
||||
from calibre.ebooks.metadata import check_isbn
|
||||
from calibre.ebooks.metadata.sources.base import create_log
|
||||
|
||||
def isbn_test(isbn):
|
||||
isbn_ = check_isbn(isbn)
|
||||
|
||||
def test(mi):
|
||||
misbn = check_isbn(mi.isbn)
|
||||
return misbn and misbn == isbn_
|
||||
|
||||
return test
|
||||
|
||||
def test_identify_plugin(name, tests):
|
||||
'''
|
||||
:param name: Plugin name
|
||||
:param tests: List of 2-tuples. Each two tuple is of the form (args,
|
||||
test_funcs). args is a dict of keyword arguments to pass to
|
||||
the identify method. test_funcs are callables that accept a
|
||||
Metadata object and return True iff the object passes the
|
||||
test.
|
||||
'''
|
||||
plugin = None
|
||||
for x in metadata_plugins(['identify']):
|
||||
if x.name == name:
|
||||
plugin = x
|
||||
break
|
||||
prints('Testing the identify function of', plugin.name)
|
||||
|
||||
tdir = tempfile.gettempdir()
|
||||
lf = os.path.join(tdir, plugin.name.replace(' ', '')+'_identify_test.txt')
|
||||
log = create_log(open(lf, 'wb'))
|
||||
abort = Event()
|
||||
prints('Log saved to', lf)
|
||||
|
||||
for kwargs, test_funcs in tests:
|
||||
prints('Running test with:', kwargs)
|
||||
rq = Queue()
|
||||
args = (log, rq, abort)
|
||||
err = plugin.identify(*args, **kwargs)
|
||||
if err is not None:
|
||||
prints('identify returned an error for args', args)
|
||||
prints(err)
|
||||
break
|
||||
|
||||
results = []
|
||||
while True:
|
||||
try:
|
||||
results.append(rq.get_nowait())
|
||||
except Empty:
|
||||
break
|
||||
|
||||
prints('Found', len(results), 'matches:')
|
||||
|
||||
for mi in results:
|
||||
prints(mi)
|
||||
prints('\n\n')
|
||||
|
||||
match_found = None
|
||||
for mi in results:
|
||||
test_failed = False
|
||||
for tfunc in test_funcs:
|
||||
if not tfunc(mi):
|
||||
test_failed = True
|
||||
break
|
||||
if not test_failed:
|
||||
match_found = mi
|
||||
break
|
||||
|
||||
if match_found is None:
|
||||
prints('ERROR: No results that passed all tests were found')
|
||||
prints('Log saved to', lf)
|
||||
raise SystemExit(1)
|
||||
|
||||
if os.stat(lf).st_size > 10:
|
||||
prints('There were some errors, see log', lf)
|
||||
|
@ -1818,7 +1818,7 @@ class MobiWriter(object):
|
||||
text = text.strip()
|
||||
if not isinstance(text, unicode):
|
||||
text = text.decode('utf-8', 'replace')
|
||||
text = text.encode('utf-8')
|
||||
text = normalize(text).encode('utf-8')
|
||||
else :
|
||||
text = "(none)".encode('utf-8')
|
||||
return text
|
||||
|
@ -1,4 +1,3 @@
|
||||
import os.path
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
|
@ -50,7 +50,10 @@ class PMLOutput(OutputFormatPlugin):
|
||||
with open(os.path.join(tdir, 'index.pml'), 'wb') as out:
|
||||
out.write(pml.encode(opts.pml_output_encoding, 'replace'))
|
||||
|
||||
self.write_images(oeb_book.manifest, pmlmlizer.image_hrefs, tdir, opts)
|
||||
img_path = os.path.join(tdir, 'index_img')
|
||||
if not os.path.exists(img_path):
|
||||
os.makedirs(img_path)
|
||||
self.write_images(oeb_book.manifest, pmlmlizer.image_hrefs, img_path, opts)
|
||||
|
||||
log.debug('Compressing output...')
|
||||
pmlz = ZipFile(output_path, 'w')
|
||||
|
@ -151,8 +151,8 @@ class PML_HTMLizer(object):
|
||||
def prepare_pml(self, pml):
|
||||
# Give Chapters the form \\*='text'text\\*. This is used for generating
|
||||
# the TOC later.
|
||||
pml = re.sub(r'(?<=\\x)(?P<text>.*?)(?=\\x)', lambda match: '="%s"%s' % (self.strip_pml(match.group('text')), match.group('text')), pml)
|
||||
pml = re.sub(r'(?<=\\X[0-4])(?P<text>.*?)(?=\\X[0-4])', lambda match: '="%s"%s' % (self.strip_pml(match.group('text')), match.group('text')), pml)
|
||||
pml = re.sub(r'(?msu)(?P<c>\\x)(?P<text>.*?)(?P=c)', lambda match: '%s="%s"%s%s' % (match.group('c'), self.strip_pml(match.group('text')), match.group('text'), match.group('c')), pml)
|
||||
pml = re.sub(r'(?msu)(?P<c>\\X[0-4])(?P<text>.*?)(?P=c)', lambda match: '%s="%s"%s%s' % (match.group('c'), self.strip_pml(match.group('text')), match.group('text'), match.group('c')), pml)
|
||||
|
||||
# Remove comments
|
||||
pml = re.sub(r'(?mus)\\v(?P<text>.*?)\\v', '', pml)
|
||||
@ -190,9 +190,10 @@ class PML_HTMLizer(object):
|
||||
pml = re.sub(r'\\a\d\d\d', '', pml)
|
||||
pml = re.sub(r'\\U\d\d\d\d', '', pml)
|
||||
pml = re.sub(r'\\.', '', pml)
|
||||
pml.replace('\r\n', ' ')
|
||||
pml.replace('\n', ' ')
|
||||
pml.replace('\r', ' ')
|
||||
pml = pml.replace('\r\n', ' ')
|
||||
pml = pml.replace('\n', ' ')
|
||||
pml = pml.replace('\r', ' ')
|
||||
pml = pml.strip()
|
||||
|
||||
return pml
|
||||
|
||||
|
@ -180,6 +180,9 @@ class PMLMLizer(object):
|
||||
links = set(re.findall(r'(?<=\\q="#).+?(?=")', text))
|
||||
for unused in anchors.difference(links):
|
||||
text = text.replace('\\Q="%s"' % unused, '')
|
||||
|
||||
# Remove \Cn tags that are within \x and \Xn tags
|
||||
text = re.sub(ur'(?msu)(?P<t>\\(x|X[0-4]))(?P<a>.*?)(?P<c>\\C[0-4]\s*=\s*"[^"]*")(?P<b>.*?)(?P=t)', '\g<t>\g<a>\g<b>\g<t>', text)
|
||||
|
||||
# Replace bad characters.
|
||||
text = text.replace(u'\xc2', '')
|
||||
@ -255,7 +258,12 @@ class PMLMLizer(object):
|
||||
# TOC markers.
|
||||
toc_name = elem.attrib.get('name', None)
|
||||
toc_id = elem.attrib.get('id', None)
|
||||
if (toc_id or toc_name) and tag not in ('h1', 'h2','h3','h4','h5','h6',):
|
||||
# Only write the TOC marker if the tag isn't a heading and we aren't in one.
|
||||
if (toc_id or toc_name) and tag not in ('h1', 'h2','h3','h4','h5','h6') and \
|
||||
'x' not in tag_stack+tags and 'X0' not in tag_stack+tags and \
|
||||
'X1' not in tag_stack+tags and 'X2' not in tag_stack+tags and \
|
||||
'X3' not in tag_stack+tags and 'X4' not in tag_stack+tags:
|
||||
|
||||
toc_page = page.href
|
||||
if self.toc.get(toc_page, None):
|
||||
for toc_x in (toc_name, toc_id):
|
||||
@ -264,8 +272,8 @@ class PMLMLizer(object):
|
||||
toc_depth = max(min(toc_depth, 4), 0)
|
||||
text.append('\\C%s="%s"' % (toc_depth, toc_title))
|
||||
|
||||
# Process style information that needs holds a single tag
|
||||
# Commented out because every page in an OEB book starts with this style
|
||||
# Process style information that needs holds a single tag.
|
||||
# Commented out because every page in an OEB book starts with this style.
|
||||
if style['page-break-before'] == 'always':
|
||||
text.append('\\p')
|
||||
|
||||
|
@ -8,18 +8,20 @@ __docformat__ = 'restructuredtext en'
|
||||
import os
|
||||
from functools import partial
|
||||
|
||||
from PyQt4.Qt import QPixmap, QMenu
|
||||
from PyQt4.Qt import QPixmap, QMenu, QTimer
|
||||
|
||||
|
||||
from calibre.gui2 import error_dialog, choose_files, \
|
||||
choose_dir, warning_dialog, info_dialog
|
||||
from calibre.gui2.dialogs.add_empty_book import AddEmptyBookDialog
|
||||
from calibre.gui2.dialogs.progress import ProgressDialog
|
||||
from calibre.gui2.widgets import IMAGE_EXTENSIONS
|
||||
from calibre.ebooks import BOOK_EXTENSIONS
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
from calibre.constants import preferred_encoding, filesystem_encoding
|
||||
from calibre.gui2.actions import InterfaceAction
|
||||
from calibre.gui2 import config
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
|
||||
class AddAction(InterfaceAction):
|
||||
|
||||
@ -95,7 +97,6 @@ class AddAction(InterfaceAction):
|
||||
dlg = AddEmptyBookDialog(self.gui, self.gui.library_view.model().db, author)
|
||||
if dlg.exec_() == dlg.Accepted:
|
||||
num = dlg.qty_to_add
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
for x in xrange(num):
|
||||
mi = MetaInformation(_('Unknown'), dlg.selected_authors)
|
||||
self.gui.library_view.model().db.import_book(mi, [])
|
||||
@ -105,27 +106,45 @@ class AddAction(InterfaceAction):
|
||||
self.gui.tags_view.recount()
|
||||
|
||||
def add_isbns(self, books, add_tags=[]):
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
ids = set([])
|
||||
db = self.gui.library_view.model().db
|
||||
self.isbn_books = list(books)
|
||||
self.add_by_isbn_ids = set()
|
||||
self.isbn_add_tags = add_tags
|
||||
QTimer.singleShot(10, self.do_one_isbn_add)
|
||||
self.isbn_add_dialog = ProgressDialog(_('Adding'),
|
||||
_('Creating book records from ISBNs'), max=len(books),
|
||||
cancelable=False, parent=self.gui)
|
||||
self.isbn_add_dialog.exec_()
|
||||
|
||||
def do_one_isbn_add(self):
|
||||
try:
|
||||
db = self.gui.library_view.model().db
|
||||
|
||||
try:
|
||||
x = self.isbn_books.pop(0)
|
||||
except IndexError:
|
||||
self.gui.library_view.model().books_added(self.isbn_add_dialog.value)
|
||||
self.isbn_add_dialog.accept()
|
||||
orig = config['overwrite_author_title_metadata']
|
||||
config['overwrite_author_title_metadata'] = True
|
||||
try:
|
||||
self.gui.iactions['Edit Metadata'].do_download_metadata(
|
||||
self.add_by_isbn_ids)
|
||||
finally:
|
||||
config['overwrite_author_title_metadata'] = orig
|
||||
return
|
||||
|
||||
|
||||
for x in books:
|
||||
mi = MetaInformation(None)
|
||||
mi.isbn = x['isbn']
|
||||
if x['path'] is not None:
|
||||
ids.add(db.import_book(mi, [x['path']]))
|
||||
else:
|
||||
ids.add(db.import_book(mi, []))
|
||||
self.gui.library_view.model().books_added(len(books))
|
||||
orig = config['overwrite_author_title_metadata']
|
||||
config['overwrite_author_title_metadata'] = True
|
||||
try:
|
||||
self.gui.iactions['Edit Metadata'].do_download_metadata(ids)
|
||||
finally:
|
||||
config['overwrite_author_title_metadata'] = orig
|
||||
if add_tags and ids:
|
||||
db.bulk_modify_tags(ids, add=add_tags)
|
||||
|
||||
if self.isbn_add_tags:
|
||||
mi.tags = list(self.isbn_add_tags)
|
||||
fmts = [] if x['path'] is None else [x['path']]
|
||||
self.add_by_isbn_ids.add(db.import_book(mi, fmts))
|
||||
self.isbn_add_dialog.value += 1
|
||||
QTimer.singleShot(10, self.do_one_isbn_add)
|
||||
except:
|
||||
self.isbn_add_dialog.accept()
|
||||
raise
|
||||
|
||||
def files_dropped(self, paths):
|
||||
to_device = self.gui.stack.currentIndex() != 0
|
||||
|
@ -7,13 +7,14 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
from functools import partial
|
||||
|
||||
from PyQt4.Qt import QToolButton, QMenu, pyqtSignal, QIcon
|
||||
from PyQt4.Qt import QToolButton, QMenu, pyqtSignal, QIcon, QTimer
|
||||
|
||||
from calibre.gui2.actions import InterfaceAction
|
||||
from calibre.utils.smtp import config as email_config
|
||||
from calibre.constants import iswindows, isosx
|
||||
from calibre.customize.ui import is_disabled
|
||||
from calibre.devices.bambook.driver import BAMBOOK
|
||||
from calibre.gui2 import info_dialog
|
||||
|
||||
class ShareConnMenu(QMenu): # {{{
|
||||
|
||||
@ -169,5 +170,20 @@ class ConnectShareAction(InterfaceAction):
|
||||
if self.gui.content_server is None:
|
||||
self.gui.start_content_server()
|
||||
else:
|
||||
self.gui.content_server.exit()
|
||||
self.gui.content_server = None
|
||||
self.gui.content_server.threaded_exit()
|
||||
self.stopping_msg = info_dialog(self.gui, _('Stopping'),
|
||||
_('Stopping server, this could take upto a minute, please wait...'),
|
||||
show_copy_button=False)
|
||||
QTimer.singleShot(1000, self.check_exited)
|
||||
|
||||
def check_exited(self):
|
||||
if self.gui.content_server.is_running:
|
||||
QTimer.singleShot(20, self.check_exited)
|
||||
if not self.stopping_msg.isVisible():
|
||||
self.stopping_msg.exec_()
|
||||
return
|
||||
|
||||
|
||||
self.gui.content_server = None
|
||||
self.stopping_msg.accept()
|
||||
|
||||
|
@ -160,6 +160,7 @@ class MultiCompleteComboBox(EnComboBox):
|
||||
c.setCaseSensitivity(Qt.CaseSensitive)
|
||||
self.dummy_model = CompleteModel(self)
|
||||
c.setModel(self.dummy_model)
|
||||
self.lineEdit()._completer.setWidget(self)
|
||||
|
||||
def update_items_cache(self, complete_items):
|
||||
self.lineEdit().update_items_cache(complete_items)
|
||||
|
@ -867,11 +867,6 @@ class BooksModel(QAbstractTableModel): # {{{
|
||||
self.dataChanged.emit(index, index)
|
||||
return True
|
||||
|
||||
def set_search_restriction(self, s):
|
||||
self.db.data.set_search_restriction(s)
|
||||
self.search('')
|
||||
return self.rowCount(None)
|
||||
|
||||
# }}}
|
||||
|
||||
class OnDeviceSearch(SearchQueryParser): # {{{
|
||||
@ -1341,8 +1336,5 @@ class DeviceBooksModel(BooksModel): # {{{
|
||||
if prefs['manage_device_metadata']=='on_connect':
|
||||
self.editable = []
|
||||
|
||||
def set_search_restriction(self, s):
|
||||
pass
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
//Define the SIP wrapper to the pictureflow widget
|
||||
//Author - Kovid Goyal <kovid@kovidgoyal.net>
|
||||
|
||||
%Module pictureflow 1
|
||||
%Module(name=pictureflow, version=1)
|
||||
|
||||
%Import QtCore/QtCoremod.sip
|
||||
%Import QtGui/QtGuimod.sip
|
||||
|
@ -112,7 +112,7 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
getattr(self, 'enum_'+x).setVisible(col_type == 'enumeration')
|
||||
|
||||
def accept(self):
|
||||
col = unicode(self.column_name_box.text())
|
||||
col = unicode(self.column_name_box.text()).strip()
|
||||
if not col:
|
||||
return self.simple_error('', _('No lookup name was provided'))
|
||||
if re.match('^\w*$', col) is None or not col[0].isalpha() or col.lower() != col:
|
||||
@ -121,7 +121,7 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
if col.endswith('_index'):
|
||||
return self.simple_error('', _('Lookup names cannot end with _index, '
|
||||
'because these names are reserved for the index of a series column.'))
|
||||
col_heading = unicode(self.column_heading_box.text())
|
||||
col_heading = unicode(self.column_heading_box.text()).strip()
|
||||
col_type = self.column_types[self.column_type_box.currentIndex()]['datatype']
|
||||
if col_type == '*text':
|
||||
col_type='text'
|
||||
@ -153,23 +153,22 @@ class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
|
||||
display_dict = {}
|
||||
|
||||
if col_type == 'datetime':
|
||||
if self.date_format_box.text():
|
||||
display_dict = {'date_format':unicode(self.date_format_box.text())}
|
||||
if self.date_format_box.text().strip():
|
||||
display_dict = {'date_format':unicode(self.date_format_box.text()).strip()}
|
||||
else:
|
||||
display_dict = {'date_format': None}
|
||||
elif col_type == 'composite':
|
||||
if not self.composite_box.text():
|
||||
if not self.composite_box.text().strip():
|
||||
return self.simple_error('', _('You must enter a template for'
|
||||
' composite columns'))
|
||||
display_dict = {'composite_template':unicode(self.composite_box.text())}
|
||||
display_dict = {'composite_template':unicode(self.composite_box.text()).strip()}
|
||||
elif col_type == 'enumeration':
|
||||
if not self.enum_box.text():
|
||||
return self.simple_error('', _('You must enter at least one'
|
||||
' value for enumeration columns'))
|
||||
l = [v.strip() for v in unicode(self.enum_box.text()).split(',')]
|
||||
for v in l:
|
||||
if not v:
|
||||
return self.simple_error('', _('You cannot provide the empty '
|
||||
if '' in l:
|
||||
return self.simple_error('', _('You cannot provide the empty '
|
||||
'value, as it is included by default'))
|
||||
for i in range(0, len(l)-1):
|
||||
if l[i] in l[i+1:]:
|
||||
|
@ -8,7 +8,7 @@ __docformat__ = 'restructuredtext en'
|
||||
import time
|
||||
|
||||
from PyQt4.Qt import Qt, QUrl, QDialog, QSize, QVBoxLayout, QLabel, \
|
||||
QPlainTextEdit, QDialogButtonBox
|
||||
QPlainTextEdit, QDialogButtonBox, QTimer
|
||||
|
||||
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
|
||||
from calibre.gui2.preferences.server_ui import Ui_Form
|
||||
@ -16,7 +16,8 @@ from calibre.utils.search_query_parser import saved_searches
|
||||
from calibre.library.server import server_config
|
||||
from calibre.utils.config import ConfigProxy
|
||||
from calibre.gui2 import error_dialog, config, open_url, warning_dialog, \
|
||||
Dispatcher
|
||||
Dispatcher, info_dialog
|
||||
from calibre import as_unicode
|
||||
|
||||
class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
|
||||
@ -67,25 +68,36 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
|
||||
def start_server(self):
|
||||
self.set_server_options()
|
||||
from calibre.library.server.main import start_threaded_server
|
||||
self.server = start_threaded_server(self.db, server_config().parse())
|
||||
while not self.server.is_running and self.server.exception is None:
|
||||
self.gui.start_content_server(check_started=False)
|
||||
while not self.gui.content_server.is_running and self.gui.content_server.exception is None:
|
||||
time.sleep(1)
|
||||
if self.server.exception is not None:
|
||||
if self.gui.content_server.exception is not None:
|
||||
error_dialog(self, _('Failed to start content server'),
|
||||
unicode(self.server.exception)).exec_()
|
||||
as_unicode(self.gui.content_server.exception)).exec_()
|
||||
return
|
||||
self.start_button.setEnabled(False)
|
||||
self.test_button.setEnabled(True)
|
||||
self.stop_button.setEnabled(True)
|
||||
|
||||
def stop_server(self):
|
||||
from calibre.library.server.main import stop_threaded_server
|
||||
stop_threaded_server(self.server)
|
||||
self.server = None
|
||||
self.gui.content_server.threaded_exit()
|
||||
self.stopping_msg = info_dialog(self, _('Stopping'),
|
||||
_('Stopping server, this could take upto a minute, please wait...'),
|
||||
show_copy_button=False)
|
||||
QTimer.singleShot(500, self.check_exited)
|
||||
|
||||
def check_exited(self):
|
||||
if self.gui.content_server.is_running:
|
||||
QTimer.singleShot(20, self.check_exited)
|
||||
if not self.stopping_msg.isVisible():
|
||||
self.stopping_msg.exec_()
|
||||
return
|
||||
|
||||
self.gui.content_server = None
|
||||
self.start_button.setEnabled(True)
|
||||
self.test_button.setEnabled(False)
|
||||
self.stop_button.setEnabled(False)
|
||||
self.stopping_msg.accept()
|
||||
|
||||
def test_server(self):
|
||||
open_url(QUrl('http://127.0.0.1:'+str(self.opt_port.value())))
|
||||
|
@ -260,6 +260,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
self.restore_default_button.clicked.connect(self.restore_to_default)
|
||||
self.apply_button.clicked.connect(self.apply_tweak)
|
||||
self.plugin_tweaks_button.clicked.connect(self.plugin_tweaks)
|
||||
self.splitter.setStretchFactor(0, 1)
|
||||
self.splitter.setStretchFactor(1, 100)
|
||||
|
||||
|
||||
def plugin_tweaks(self):
|
||||
raw = self.tweaks.plugin_tweaks_string
|
||||
|
@ -13,109 +13,123 @@
|
||||
<property name="windowTitle">
|
||||
<string>Form</string>
|
||||
</property>
|
||||
<layout class="QGridLayout" name="gridLayout_2">
|
||||
<item row="0" column="0" rowspan="2">
|
||||
<layout class="QVBoxLayout" name="verticalLayout_2">
|
||||
<item>
|
||||
<widget class="QLabel" name="label_18">
|
||||
<property name="text">
|
||||
<string>Values for the tweaks are shown below. Edit them to change the behavior of calibre. Your changes will only take effect <b>after a restart</b> of calibre.</string>
|
||||
</property>
|
||||
<property name="wordWrap">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QListView" name="tweaks_view">
|
||||
<property name="sizePolicy">
|
||||
<sizepolicy hsizetype="Minimum" vsizetype="Expanding">
|
||||
<horstretch>0</horstretch>
|
||||
<verstretch>0</verstretch>
|
||||
</sizepolicy>
|
||||
</property>
|
||||
<property name="minimumSize">
|
||||
<size>
|
||||
<width>300</width>
|
||||
<height>0</height>
|
||||
</size>
|
||||
</property>
|
||||
<property name="alternatingRowColors">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
<property name="spacing">
|
||||
<number>5</number>
|
||||
</property>
|
||||
<property name="uniformItemSizes">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="plugin_tweaks_button">
|
||||
<property name="toolTip">
|
||||
<string>Edit tweaks for any custom plugins you have installed</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>&Plugin tweaks</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item row="0" column="1">
|
||||
<widget class="QGroupBox" name="groupBox">
|
||||
<property name="title">
|
||||
<string>Help</string>
|
||||
<layout class="QVBoxLayout" name="verticalLayout_4">
|
||||
<item>
|
||||
<widget class="QSplitter" name="splitter">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout">
|
||||
<item>
|
||||
<widget class="QPlainTextEdit" name="help">
|
||||
<property name="lineWrapMode">
|
||||
<enum>QPlainTextEdit::NoWrap</enum>
|
||||
</property>
|
||||
<property name="readOnly">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QGroupBox" name="groupBox_2">
|
||||
<property name="title">
|
||||
<string>Edit tweak</string>
|
||||
<property name="childrenCollapsible">
|
||||
<bool>false</bool>
|
||||
</property>
|
||||
<layout class="QGridLayout" name="gridLayout">
|
||||
<item row="0" column="0" colspan="2">
|
||||
<widget class="QPlainTextEdit" name="edit_tweak">
|
||||
<property name="lineWrapMode">
|
||||
<enum>QPlainTextEdit::NoWrap</enum>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
<widget class="QPushButton" name="restore_default_button">
|
||||
<property name="toolTip">
|
||||
<string>Restore this tweak to its default value</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Restore &default</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QPushButton" name="apply_button">
|
||||
<property name="toolTip">
|
||||
<string>Apply any changes you made to this tweak</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>&Apply</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
<widget class="QWidget" name="layoutWidget">
|
||||
<layout class="QVBoxLayout" name="verticalLayout_2">
|
||||
<item>
|
||||
<widget class="QLabel" name="label_18">
|
||||
<property name="text">
|
||||
<string>Values for the tweaks are shown below. Edit them to change the behavior of calibre. Your changes will only take effect <b>after a restart</b> of calibre.</string>
|
||||
</property>
|
||||
<property name="wordWrap">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QListView" name="tweaks_view">
|
||||
<property name="sizePolicy">
|
||||
<sizepolicy hsizetype="Minimum" vsizetype="Expanding">
|
||||
<horstretch>0</horstretch>
|
||||
<verstretch>0</verstretch>
|
||||
</sizepolicy>
|
||||
</property>
|
||||
<property name="minimumSize">
|
||||
<size>
|
||||
<width>300</width>
|
||||
<height>0</height>
|
||||
</size>
|
||||
</property>
|
||||
<property name="alternatingRowColors">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
<property name="spacing">
|
||||
<number>5</number>
|
||||
</property>
|
||||
<property name="uniformItemSizes">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="plugin_tweaks_button">
|
||||
<property name="toolTip">
|
||||
<string>Edit tweaks for any custom plugins you have installed</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>&Plugin tweaks</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
<widget class="QWidget" name="layoutWidget">
|
||||
<layout class="QVBoxLayout" name="verticalLayout_3">
|
||||
<item>
|
||||
<widget class="QGroupBox" name="groupBox">
|
||||
<property name="title">
|
||||
<string>Help</string>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout">
|
||||
<item>
|
||||
<widget class="QPlainTextEdit" name="help">
|
||||
<property name="lineWrapMode">
|
||||
<enum>QPlainTextEdit::NoWrap</enum>
|
||||
</property>
|
||||
<property name="readOnly">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QGroupBox" name="groupBox_2">
|
||||
<property name="title">
|
||||
<string>Edit tweak</string>
|
||||
</property>
|
||||
<layout class="QGridLayout" name="gridLayout">
|
||||
<item row="0" column="0" colspan="2">
|
||||
<widget class="QPlainTextEdit" name="edit_tweak">
|
||||
<property name="lineWrapMode">
|
||||
<enum>QPlainTextEdit::NoWrap</enum>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
<widget class="QPushButton" name="restore_default_button">
|
||||
<property name="toolTip">
|
||||
<string>Restore this tweak to its default value</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Restore &default</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QPushButton" name="apply_button">
|
||||
<property name="toolTip">
|
||||
<string>Apply any changes you made to this tweak</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>&Apply</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
|
@ -1,7 +1,7 @@
|
||||
//Define the SIP wrapper to the QProgressIndicator widget
|
||||
//Author - Kovid Goyal <kovid@kovidgoyal.net>
|
||||
|
||||
%Module progress_indicator 1
|
||||
%Module(name=progress_indicator, version=1)
|
||||
|
||||
%Import QtCore/QtCoremod.sip
|
||||
%Import QtGui/QtGuimod.sip
|
||||
|
@ -11,26 +11,13 @@ class SearchRestrictionMixin(object):
|
||||
def __init__(self):
|
||||
self.search_restriction.initialize(help_text=_('Restrict to'))
|
||||
self.search_restriction.activated[int].connect(self.apply_search_restriction)
|
||||
self.library_view.model().count_changed_signal.connect(self.restriction_count_changed)
|
||||
self.search_restriction.setSizeAdjustPolicy(self.search_restriction.AdjustToMinimumContentsLengthWithIcon)
|
||||
self.library_view.model().count_changed_signal.connect(self.set_number_of_books_shown)
|
||||
self.search_restriction.setSizeAdjustPolicy(
|
||||
self.search_restriction.AdjustToMinimumContentsLengthWithIcon)
|
||||
self.search_restriction.setMinimumContentsLength(10)
|
||||
self.search_restriction.setStatusTip(self.search_restriction.toolTip())
|
||||
self.search_count.setText(_("(all books)"))
|
||||
|
||||
'''
|
||||
Adding and deleting books while restricted creates a complexity. When added,
|
||||
they are displayed regardless of whether they match a search restriction.
|
||||
However, if they do not, they are removed at the next search. The counts
|
||||
must take this behavior into effect.
|
||||
'''
|
||||
|
||||
def restriction_count_changed(self, c):
|
||||
self.restriction_count_of_books_in_view += \
|
||||
c - self.restriction_count_of_books_in_library
|
||||
self.restriction_count_of_books_in_library = c
|
||||
if self.restriction_in_effect:
|
||||
self.set_number_of_books_shown()
|
||||
|
||||
def apply_named_search_restriction(self, name):
|
||||
if not name:
|
||||
r = 0
|
||||
@ -44,23 +31,27 @@ class SearchRestrictionMixin(object):
|
||||
def apply_search_restriction(self, i):
|
||||
r = unicode(self.search_restriction.currentText())
|
||||
if r is not None and r != '':
|
||||
self.restriction_in_effect = True
|
||||
restriction = 'search:"%s"'%(r)
|
||||
else:
|
||||
self.restriction_in_effect = False
|
||||
restriction = ''
|
||||
self.restriction_count_of_books_in_view = \
|
||||
self.library_view.model().set_search_restriction(restriction)
|
||||
self.search.clear()
|
||||
|
||||
self.saved_search.clear()
|
||||
# The order below is important. Set the restriction, force a '' search
|
||||
# to apply it, reset the tag browser to take it into account, then set
|
||||
# the book count.
|
||||
self.library_view.model().db.data.set_search_restriction(restriction)
|
||||
self.search.clear(emit_search=True)
|
||||
self.tags_view.set_search_restriction(restriction)
|
||||
self.set_number_of_books_shown()
|
||||
self.current_view().setFocus(Qt.OtherFocusReason)
|
||||
|
||||
def set_number_of_books_shown(self):
|
||||
if self.current_view() == self.library_view and self.restriction_in_effect:
|
||||
t = _("({0} of {1})").format(self.current_view().row_count(),
|
||||
self.restriction_count_of_books_in_view)
|
||||
db = self.library_view.model().db
|
||||
if self.current_view() == self.library_view and db is not None and \
|
||||
db.data.search_restriction_applied():
|
||||
rows = self.current_view().row_count()
|
||||
rbc = max(rows, db.data.get_search_restriction_book_count())
|
||||
t = _("({0} of {1})").format(rows, rbc)
|
||||
self.search_count.setStyleSheet \
|
||||
('QLabel { border-radius: 8px; background-color: yellow; }')
|
||||
else: # No restriction or not library view
|
||||
|
@ -7,6 +7,8 @@ __docformat__ = 'restructuredtext en'
|
||||
Browsing book collection by tags.
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from itertools import izip
|
||||
from functools import partial
|
||||
|
||||
@ -755,13 +757,15 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
try:
|
||||
tb_cats.add_user_category(label=cat_name, name=user_cat)
|
||||
except ValueError:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
for cat in sorted(self.db.prefs.get('grouped_search_terms', {}),
|
||||
for cat in sorted(self.db.prefs.get('grouped_search_terms', {}).keys(),
|
||||
key=sort_key):
|
||||
if (u'@' + cat) in data:
|
||||
tb_cats.add_user_category(label=u'@' + cat, name=cat)
|
||||
try:
|
||||
tb_cats.add_user_category(label=u'@' + cat, name=cat)
|
||||
except ValueError:
|
||||
traceback.print_exc()
|
||||
self.db.data.change_search_locations(self.db.field_metadata.get_search_terms())
|
||||
|
||||
if len(saved_searches().names()):
|
||||
|
@ -166,10 +166,6 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
EmailMixin.__init__(self)
|
||||
DeviceMixin.__init__(self)
|
||||
|
||||
self.restriction_count_of_books_in_view = 0
|
||||
self.restriction_count_of_books_in_library = 0
|
||||
self.restriction_in_effect = False
|
||||
|
||||
self.progress_indicator = ProgressIndicator(self)
|
||||
self.progress_indicator.pos = (0, 20)
|
||||
self.verbose = opts.verbose
|
||||
@ -311,7 +307,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
def esc(self, *args):
|
||||
self.search.clear()
|
||||
|
||||
def start_content_server(self):
|
||||
def start_content_server(self, check_started=True):
|
||||
from calibre.library.server.main import start_threaded_server
|
||||
from calibre.library.server import server_config
|
||||
self.content_server = start_threaded_server(
|
||||
@ -319,7 +315,8 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
self.content_server.state_callback = Dispatcher(
|
||||
self.iactions['Connect Share'].content_server_state_changed)
|
||||
self.content_server.state_callback(True)
|
||||
self.test_server_timer = QTimer.singleShot(10000, self.test_server)
|
||||
if check_started:
|
||||
QTimer.singleShot(10000, self.test_server)
|
||||
|
||||
def resizeEvent(self, ev):
|
||||
MainWindow.resizeEvent(self, ev)
|
||||
|
@ -181,6 +181,7 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
self._map = self._map_filtered = []
|
||||
self.first_sort = True
|
||||
self.search_restriction = ''
|
||||
self.search_restriction_book_count = 0
|
||||
self.field_metadata = field_metadata
|
||||
self.all_search_locations = field_metadata.get_search_terms()
|
||||
SearchQueryParser.__init__(self, self.all_search_locations, optimize=True)
|
||||
@ -618,12 +619,14 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
return matches
|
||||
|
||||
def search(self, query, return_matches=False):
|
||||
ans = self.search_getting_ids(query, self.search_restriction)
|
||||
ans = self.search_getting_ids(query, self.search_restriction,
|
||||
set_restriction_count=True)
|
||||
if return_matches:
|
||||
return ans
|
||||
self._map_filtered = ans
|
||||
|
||||
def search_getting_ids(self, query, search_restriction):
|
||||
def search_getting_ids(self, query, search_restriction,
|
||||
set_restriction_count=False):
|
||||
q = ''
|
||||
if not query or not query.strip():
|
||||
q = search_restriction
|
||||
@ -632,16 +635,27 @@ class ResultCache(SearchQueryParser): # {{{
|
||||
if search_restriction:
|
||||
q = u'%s (%s)' % (search_restriction, query)
|
||||
if not q:
|
||||
if set_restriction_count:
|
||||
self.search_restriction_book_count = len(self._map)
|
||||
return list(self._map)
|
||||
matches = self.parse(q)
|
||||
tmap = list(itertools.repeat(False, len(self._data)))
|
||||
for x in matches:
|
||||
tmap[x] = True
|
||||
return [x for x in self._map if tmap[x]]
|
||||
rv = [x for x in self._map if tmap[x]]
|
||||
if set_restriction_count and q == search_restriction:
|
||||
self.search_restriction_book_count = len(rv)
|
||||
return rv
|
||||
|
||||
def set_search_restriction(self, s):
|
||||
self.search_restriction = s
|
||||
|
||||
def search_restriction_applied(self):
|
||||
return bool(self.search_restriction)
|
||||
|
||||
def get_search_restriction_book_count(self):
|
||||
return self.search_restriction_book_count
|
||||
|
||||
# }}}
|
||||
|
||||
def remove(self, id):
|
||||
|
@ -353,12 +353,23 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
# Reconstruct the user categories, putting them into field_metadata
|
||||
# Assumption is that someone else will fix them if they change.
|
||||
self.field_metadata.remove_dynamic_categories()
|
||||
tb_cats = self.field_metadata
|
||||
for user_cat in sorted(self.prefs.get('user_categories', {}).keys(), key=sort_key):
|
||||
cat_name = '@' + user_cat # add the '@' to avoid name collision
|
||||
tb_cats.add_user_category(label=cat_name, name=user_cat)
|
||||
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
|
||||
|
||||
# add grouped search term user categories
|
||||
muc = self.prefs.get('grouped_search_make_user_categories', [])
|
||||
for cat in sorted(self.prefs.get('grouped_search_terms', {}).keys(), key=sort_key):
|
||||
if cat in muc:
|
||||
# There is a chance that these can be duplicates of an existing
|
||||
# user category. Print the exception and continue.
|
||||
try:
|
||||
self.field_metadata.add_user_category(label=u'@' + cat, name=cat)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
|
||||
if len(saved_searches().names()):
|
||||
tb_cats.add_search_category(label='search', name=_('Searches'))
|
||||
self.field_metadata.add_search_category(label='search', name=_('Searches'))
|
||||
|
||||
self.field_metadata.add_grouped_search_terms(
|
||||
self.prefs.get('grouped_search_terms', {}))
|
||||
|
@ -120,6 +120,8 @@ class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer, Cache,
|
||||
|
||||
self.set_database(db)
|
||||
|
||||
st = 0.1 if opts.develop else 1
|
||||
|
||||
cherrypy.config.update({
|
||||
'log.screen' : opts.develop,
|
||||
'engine.autoreload_on' : getattr(opts,
|
||||
@ -131,6 +133,7 @@ class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer, Cache,
|
||||
'server.socket_port' : opts.port,
|
||||
'server.socket_timeout' : opts.timeout, #seconds
|
||||
'server.thread_pool' : opts.thread_pool, # number of threads
|
||||
'server.shutdown_timeout': st, # minutes
|
||||
})
|
||||
if embedded or wsgi:
|
||||
cherrypy.config.update({'engine.SIGHUP' : None,
|
||||
@ -241,4 +244,9 @@ class LibraryServer(ContentServer, MobileServer, XMLServer, OPDSServer, Cache,
|
||||
except:
|
||||
pass
|
||||
|
||||
def threaded_exit(self):
|
||||
from threading import Thread
|
||||
t = Thread(target=self.exit)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
|
@ -584,7 +584,7 @@ class BrowseServer(object):
|
||||
title=_('Books in') + " " +category_name,
|
||||
script='booklist(%s);'%hide_sort, main=html)
|
||||
|
||||
def browse_get_book_args(self, mi, id_):
|
||||
def browse_get_book_args(self, mi, id_, add_category_links=False):
|
||||
fmts = self.db.formats(id_, index_is_id=True)
|
||||
if not fmts:
|
||||
fmts = ''
|
||||
@ -596,11 +596,45 @@ class BrowseServer(object):
|
||||
fmt = None
|
||||
args = {'id':id_, 'mi':mi,
|
||||
}
|
||||
ccache = self.categories_cache() if add_category_links else {}
|
||||
for key in mi.all_field_keys():
|
||||
val = mi.format_field(key)[1]
|
||||
if not val:
|
||||
val = ''
|
||||
args[key] = xml(val, True)
|
||||
if add_category_links:
|
||||
added_key = False
|
||||
fm = mi.metadata_for_field(key)
|
||||
if val and fm and fm['is_category'] and \
|
||||
key != 'formats' and fm['datatype'] not in ['rating']:
|
||||
categories = mi.get(key)
|
||||
if isinstance(categories, basestring):
|
||||
categories = [categories]
|
||||
dbtags = []
|
||||
for category in categories:
|
||||
dbtag = None
|
||||
for tag in ccache[key]:
|
||||
if tag.name == category:
|
||||
dbtag = tag
|
||||
break
|
||||
dbtags.append(dbtag)
|
||||
if None not in dbtags:
|
||||
vals = []
|
||||
for tag in dbtags:
|
||||
tval = ('<a title="Browse books by {3}: {0}"'
|
||||
' href="{1}" class="details_category_link">{2}</a>')
|
||||
href='/browse/matches/%s/%s' % \
|
||||
(quote(tag.category), quote(str(tag.id)))
|
||||
vals.append(tval.format(xml(tag.name, True),
|
||||
xml(href, True),
|
||||
xml(val if len(dbtags) == 1 else tag.name),
|
||||
xml(key, True)))
|
||||
join = ' & ' if key == 'authors' else ', '
|
||||
args[key] = join.join(vals)
|
||||
added_key = True
|
||||
if not added_key:
|
||||
args[key] = xml(val, True)
|
||||
else:
|
||||
args[key] = xml(val, True)
|
||||
fname = quote(ascii_filename(args['title']) + ' - ' +
|
||||
ascii_filename(args['authors']))
|
||||
return args, fmt, fmts, fname
|
||||
@ -674,7 +708,8 @@ class BrowseServer(object):
|
||||
except:
|
||||
return _('This book has been deleted')
|
||||
else:
|
||||
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_)
|
||||
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_,
|
||||
add_category_links=True)
|
||||
args['formats'] = ''
|
||||
if fmts:
|
||||
ofmts = [u'<a href="{4}/get/{0}/{1}_{2}.{0}" title="{3}">{3}</a>'\
|
||||
@ -690,8 +725,9 @@ class BrowseServer(object):
|
||||
if m['is_custom'] and field not in displayed_custom_fields:
|
||||
continue
|
||||
if m['datatype'] == 'comments' or field == 'comments':
|
||||
comments.append((m['name'], comments_to_html(mi.get(field,
|
||||
''))))
|
||||
val = mi.get(field, '')
|
||||
if val and val.strip():
|
||||
comments.append((m['name'], comments_to_html(val)))
|
||||
continue
|
||||
if field in ('title', 'formats') or not args.get(field, False) \
|
||||
or not m['name']:
|
||||
|
@ -186,6 +186,7 @@ class PostInstall:
|
||||
from calibre.ebooks.metadata.fetch import option_parser as fem_op
|
||||
from calibre.gui2.main import option_parser as guiop
|
||||
from calibre.utils.smtp import option_parser as smtp_op
|
||||
from calibre.library.server.main import option_parser as serv_op
|
||||
from calibre.ebooks.epub.fix.main import option_parser as fix_op
|
||||
any_formats = ['epub', 'htm', 'html', 'xhtml', 'xhtm', 'rar', 'zip',
|
||||
'txt', 'lit', 'rtf', 'pdf', 'prc', 'mobi', 'fb2', 'odt', 'lrf', 'snb']
|
||||
@ -208,6 +209,7 @@ class PostInstall:
|
||||
f.write(opts_and_exts('ebook-viewer', viewer_op, any_formats))
|
||||
f.write(opts_and_words('fetch-ebook-metadata', fem_op, []))
|
||||
f.write(opts_and_words('calibre-smtp', smtp_op, []))
|
||||
f.write(opts_and_words('calibre-server', serv_op, []))
|
||||
f.write(opts_and_exts('epub-fix', fix_op, ['epub']))
|
||||
f.write(textwrap.dedent('''
|
||||
_ebook_device_ls()
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
14703
src/calibre/translations/pa.po
Normal file
14703
src/calibre/translations/pa.po
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user