mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
0.8.45
This commit is contained in:
commit
7930b7923f
@ -19,11 +19,67 @@
|
|||||||
# new recipes:
|
# new recipes:
|
||||||
# - title:
|
# - title:
|
||||||
|
|
||||||
|
- version: 0.8.45
|
||||||
|
date: 2012-03-30
|
||||||
|
|
||||||
|
new features:
|
||||||
|
- title: "E-book viewer: Allow the up and down keys to scroll past section boundaries"
|
||||||
|
|
||||||
|
- title: "calibredb: Allow specification of basic metadata on the command line when adding books."
|
||||||
|
tickets: [951063]
|
||||||
|
|
||||||
|
- title: "Driver for Samsung Galaxy Plus GT-I9001"
|
||||||
|
|
||||||
|
- title: "KF8 Input: Support KF8 format Amazon book samples."
|
||||||
|
tickets: [963418]
|
||||||
|
|
||||||
|
- title: "When a new plugin is added to calibre for the first time, have its icon (if any) show up even when a device is connected (this can be changed by the user at the time of plugin installation)"
|
||||||
|
|
||||||
|
- title: "Add keyboard shortcuts for Bold, Italic and Underline to the comments editor in the edit metadata dialog"
|
||||||
|
tickets: [963559]
|
||||||
|
|
||||||
|
bug fixes:
|
||||||
|
- title: "E-book viewer: Fix last read position (and bookmarks in general) being inaccurate for some books."
|
||||||
|
description: "The technique for marking locations in books used by the viewer has changed. The new technique should be much more accurate than the last one, especially when the font size at which the book is being viewed is changed. Note that this change means that bookmarks created with this release of calibre will not be read by previous calibre versions. On a technical note, the viewer now uses the CFI specification from the EPUB 3 standard for bookmarks."
|
||||||
|
type: major
|
||||||
|
|
||||||
|
- title: "Workarounds for a few regressions in the user interface in 0.8.44 caused by the update to Qt 4.8.0"
|
||||||
|
|
||||||
|
- title: "Books list: Preserve the horizontal scroll position when sorting by a column"
|
||||||
|
|
||||||
|
- title: "Fix saving to disk and then adding the book back not restoring tags-like custom columns"
|
||||||
|
|
||||||
|
- title: "Linux installer: Fix completion for ebook-convert not working."
|
||||||
|
tickets: [967834]
|
||||||
|
|
||||||
|
- title: "MOBI Output: Recognize type=text in addition to type=start guide elements"
|
||||||
|
|
||||||
|
- title: "Get Books: Updates to Nexto, Ebookpoint and Woblink stores"
|
||||||
|
|
||||||
|
- title: "Fix unable to clear username/password in Fetch news dialog"
|
||||||
|
|
||||||
|
- title: "PDF Output: Fix margin specifications not being applied"
|
||||||
|
|
||||||
|
- title: "Linux installer: Manually preserve the defaults.list mimetype association file to workaround buggy xdg-desktop-menu implementations in some distros."
|
||||||
|
tickets: [926559]
|
||||||
|
|
||||||
|
- title: "E-book viewer: Fix regression that caused the ebook viewer to stop functioning if it is launched from the main calibre program and then the main calibre program is closed."
|
||||||
|
tickets: [963960]
|
||||||
|
|
||||||
|
|
||||||
|
improved recipes:
|
||||||
|
- Our Daily Bread
|
||||||
|
|
||||||
|
new recipes:
|
||||||
|
- title: NRC Handelsblad (free)
|
||||||
|
author: veezh
|
||||||
|
|
||||||
- version: 0.8.44
|
- version: 0.8.44
|
||||||
date: 2012-03-23
|
date: 2012-03-23
|
||||||
|
|
||||||
new features:
|
new features:
|
||||||
- title: "E-book viewer: A whole new full screen mode, with no toolbars to distract from the text and the ability to set the width of the column of text via Preferences in the ebook viewer."
|
- title: "E-book viewer: A whole new full screen mode."
|
||||||
|
description: "The new mode has no toolbars to distract from the text and the ability to set the width of the column of text via Preferences in the ebook viewer. Click the Fullscreen button on the toolbar in the viewer to enter fullscreen mode (or press the F11 or Ctrl+Shit+F keys)"
|
||||||
type: major
|
type: major
|
||||||
tickets: [959830]
|
tickets: [959830]
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2011'
|
__copyright__ = '2012'
|
||||||
'''
|
'''
|
||||||
lemonde.fr
|
lemonde.fr
|
||||||
'''
|
'''
|
||||||
|
import re
|
||||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||||
|
|
||||||
class LeMonde(BasicNewsRecipe):
|
class LeMonde(BasicNewsRecipe):
|
||||||
@ -24,7 +25,7 @@ class LeMonde(BasicNewsRecipe):
|
|||||||
.ariane{font-size:xx-small;}
|
.ariane{font-size:xx-small;}
|
||||||
.source{font-size:xx-small;}
|
.source{font-size:xx-small;}
|
||||||
#.href{font-size:xx-small;}
|
#.href{font-size:xx-small;}
|
||||||
.LM_caption{color:#666666; font-size:x-small;}
|
#.figcaption style{color:#666666; font-size:x-small;}
|
||||||
#.main-article-info{font-family:Arial,Helvetica,sans-serif;}
|
#.main-article-info{font-family:Arial,Helvetica,sans-serif;}
|
||||||
#full-contents{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
|
#full-contents{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
|
||||||
#match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
|
#match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
|
||||||
@ -40,8 +41,88 @@ class LeMonde(BasicNewsRecipe):
|
|||||||
|
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
auto_cleanup = True
|
filterDuplicates = True
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
for alink in soup.findAll('a'):
|
||||||
|
if alink.string is not None:
|
||||||
|
tstr = alink.string
|
||||||
|
alink.replaceWith(tstr)
|
||||||
|
return soup
|
||||||
|
|
||||||
|
preprocess_regexps = [
|
||||||
|
(re.compile(r'([0-9])%'), lambda m: m.group(1) + ' %'),
|
||||||
|
(re.compile(r'([0-9])([0-9])([0-9]) ([0-9])([0-9])([0-9])'), lambda m: m.group(1) + m.group(2) + m.group(3) + ' ' + m.group(4) + m.group(5) + m.group(6)),
|
||||||
|
(re.compile(r'([0-9]) ([0-9])([0-9])([0-9])'), lambda m: m.group(1) + ' ' + m.group(2) + m.group(3) + m.group(4)),
|
||||||
|
(re.compile(r'<span>'), lambda match: ' <span>'),
|
||||||
|
(re.compile(r'\("'), lambda match: '(« '),
|
||||||
|
(re.compile(r'"\)'), lambda match: ' »)'),
|
||||||
|
(re.compile(r'“'), lambda match: '(« '),
|
||||||
|
(re.compile(r'”'), lambda match: ' »)'),
|
||||||
|
(re.compile(r'>\''), lambda match: '>‘'),
|
||||||
|
(re.compile(r' \''), lambda match: ' ‘'),
|
||||||
|
(re.compile(r' "'), lambda match: ' « '),
|
||||||
|
(re.compile(r'>"'), lambda match: '>« '),
|
||||||
|
(re.compile(r'"<'), lambda match: ' »<'),
|
||||||
|
(re.compile(r'" '), lambda match: ' » '),
|
||||||
|
(re.compile(r'",'), lambda match: ' »,'),
|
||||||
|
(re.compile(r'\''), lambda match: '’'),
|
||||||
|
(re.compile(r'"<em>'), lambda match: '<em>« '),
|
||||||
|
(re.compile(r'"<em>"</em><em>'), lambda match: '<em>« '),
|
||||||
|
(re.compile(r'"<a href='), lambda match: '« <a href='),
|
||||||
|
(re.compile(r'</em>"'), lambda match: ' »</em>'),
|
||||||
|
(re.compile(r'</a>"'), lambda match: ' »</a>'),
|
||||||
|
(re.compile(r'"</'), lambda match: ' »</'),
|
||||||
|
(re.compile(r'>"'), lambda match: '>« '),
|
||||||
|
(re.compile(r'"<'), lambda match: ' »<'),
|
||||||
|
(re.compile(r'’"'), lambda match: '’« '),
|
||||||
|
(re.compile(r' "'), lambda match: ' « '),
|
||||||
|
(re.compile(r'" '), lambda match: ' » '),
|
||||||
|
(re.compile(r'"\.'), lambda match: ' ».'),
|
||||||
|
(re.compile(r'",'), lambda match: ' »,'),
|
||||||
|
(re.compile(r'"\?'), lambda match: ' »?'),
|
||||||
|
(re.compile(r'":'), lambda match: ' »:'),
|
||||||
|
(re.compile(r'";'), lambda match: ' »;'),
|
||||||
|
(re.compile(r'"\!'), lambda match: ' »!'),
|
||||||
|
(re.compile(r' :'), lambda match: ' :'),
|
||||||
|
(re.compile(r' ;'), lambda match: ' ;'),
|
||||||
|
(re.compile(r' \?'), lambda match: ' ?'),
|
||||||
|
(re.compile(r' \!'), lambda match: ' !'),
|
||||||
|
(re.compile(r'\s»'), lambda match: ' »'),
|
||||||
|
(re.compile(r'«\s'), lambda match: '« '),
|
||||||
|
(re.compile(r' %'), lambda match: ' %'),
|
||||||
|
(re.compile(r'\.jpg » width='), lambda match: '.jpg'),
|
||||||
|
(re.compile(r'\.png » width='), lambda match: '.png'),
|
||||||
|
(re.compile(r' – '), lambda match: ' – '),
|
||||||
|
(re.compile(r'figcaption style="display:none"'), lambda match: 'figcaption'),
|
||||||
|
(re.compile(r' – '), lambda match: ' – '),
|
||||||
|
(re.compile(r' - '), lambda match: ' – '),
|
||||||
|
(re.compile(r' -,'), lambda match: ' –,'),
|
||||||
|
(re.compile(r'»:'), lambda match: '» :'),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
keep_only_tags = [
|
||||||
|
dict(name='div', attrs={'class':['global']})
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='div', attrs={'class':['bloc_base meme_sujet']}),
|
||||||
|
dict(name='p', attrs={'class':['lire']})
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags_after = [dict(id='fb-like')]
|
||||||
|
|
||||||
|
def get_article_url(self, article):
|
||||||
|
url = article.get('guid', None)
|
||||||
|
if '/chat/' in url or '.blog' in url or '/video/' in url or '/sport/' in url or '/portfolio/' in url or '/visuel/' in url :
|
||||||
|
url = None
|
||||||
|
return url
|
||||||
|
|
||||||
|
# def get_article_url(self, article):
|
||||||
|
# link = article.get('link')
|
||||||
|
# if 'blog' not in link and ('chat' not in link):
|
||||||
|
# return link
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
('A la une', 'http://www.lemonde.fr/rss/une.xml'),
|
('A la une', 'http://www.lemonde.fr/rss/une.xml'),
|
||||||
@ -66,11 +147,3 @@ class LeMonde(BasicNewsRecipe):
|
|||||||
cover_url = link_item.img['src']
|
cover_url = link_item.img['src']
|
||||||
|
|
||||||
return cover_url
|
return cover_url
|
||||||
|
|
||||||
def get_article_url(self, article):
|
|
||||||
url = article.get('guid', None)
|
|
||||||
if '/chat/' in url or '.blog' in url or '/video/' in url or '/sport/' in url or '/portfolio/' in url or '/visuel/' in url :
|
|
||||||
url = None
|
|
||||||
return url
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
__author__ = 'faber1971'
|
||||||
|
description = 'Collection of Italian marketing websites - v1.04 (17, March 2012)'
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
@ -9,12 +11,9 @@ class AdvancedUserRecipe1327062445(BasicNewsRecipe):
|
|||||||
auto_cleanup = True
|
auto_cleanup = True
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
conversion_options = {'linearize_tables': True}
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name='ul', attrs={'id':'ads0'})
|
dict(name='ul', attrs={'id':'ads0'})
|
||||||
]
|
]
|
||||||
masthead_url = 'http://www.simrendeogun.com/wp-content/uploads/2011/06/New-Marketing-Magazine-Logo.jpg'
|
masthead_url = 'http://www.simrendeogun.com/wp-content/uploads/2011/06/New-Marketing-Magazine-Logo.jpg'
|
||||||
__author__ = 'faber1971'
|
feeds = [(u'My Marketing', u'http://feed43.com/0537744466058428.xml'), (u'My Marketing_', u'http://feed43.com/8126723074604845.xml'), (u'Venturini', u'http://robertoventurini.blogspot.com/feeds/posts/default?alt=rss'), (u'Ninja Marketing', u'http://feeds.feedburner.com/NinjaMarketing'), (u'Comunitàzione', u'http://www.comunitazione.it/feed/novita.asp'), (u'Brandforum news', u'http://www.brandforum.it/rss/news'), (u'Brandforum papers', u'http://www.brandforum.it/rss/papers'), (u'MarketingArena', u'http://feeds.feedburner.com/marketingarena'), (u'minimarketing', u'http://feeds.feedburner.com/minimarketingit'), (u'Marketing Journal', u'http://feeds.feedburner.com/marketingjournal/jPwA'), (u'Disambiguando', u'http://giovannacosenza.wordpress.com/feed/')]
|
||||||
description = 'Collection of Italian marketing websites - v1.03 (20, February 2012)'
|
|
||||||
language = 'it'
|
|
||||||
|
|
||||||
feeds = [(u'My Marketing', u'http://feed43.com/0537744466058428.xml'), (u'My Marketing_', u'http://feed43.com/8126723074604845.xml'), (u'Venturini', u'http://robertoventurini.blogspot.com/feeds/posts/default?alt=rss'), (u'Ninja Marketing', u'http://feeds.feedburner.com/NinjaMarketing'), (u'Comunitàzione', u'http://www.comunitazione.it/feed/novita.asp'), (u'Brandforum news', u'http://www.brandforum.it/rss/news'), (u'Brandforum papers', u'http://www.brandforum.it/rss/papers'), (u'MarketingArena', u'http://feeds.feedburner.com/marketingarena'), (u'minimarketing', u'http://feeds.feedburner.com/minimarketingit'), (u'Disambiguando', u'http://giovannacosenza.wordpress.com/feed/')]
|
|
||||||
|
76
recipes/nrc_handelsblad.recipe
Normal file
76
recipes/nrc_handelsblad.recipe
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012'
|
||||||
|
'''
|
||||||
|
nrc.nl
|
||||||
|
'''
|
||||||
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||||
|
|
||||||
|
class NRC(BasicNewsRecipe):
|
||||||
|
title = 'NRC Handelsblad'
|
||||||
|
__author__ = 'veezh'
|
||||||
|
description = 'Nieuws (no subscription needed)'
|
||||||
|
oldest_article = 1
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
no_stylesheets = True
|
||||||
|
#delay = 1
|
||||||
|
use_embedded_content = False
|
||||||
|
encoding = 'utf-8'
|
||||||
|
publisher = 'nrc.nl'
|
||||||
|
category = 'news, Netherlands, world'
|
||||||
|
language = 'nl'
|
||||||
|
timefmt = ''
|
||||||
|
#publication_type = 'newsportal'
|
||||||
|
extra_css = '''
|
||||||
|
h1{font-size:130%;}
|
||||||
|
#h2{font-size:100%;font-weight:normal;}
|
||||||
|
#.href{font-size:xx-small;}
|
||||||
|
.bijschrift{color:#666666; font-size:x-small;}
|
||||||
|
#.main-article-info{font-family:Arial,Helvetica,sans-serif;}
|
||||||
|
#full-contents{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
|
||||||
|
#match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
|
||||||
|
'''
|
||||||
|
#preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
|
||||||
|
conversion_options = {
|
||||||
|
'comments' : description
|
||||||
|
,'tags' : category
|
||||||
|
,'language' : language
|
||||||
|
,'publisher' : publisher
|
||||||
|
,'linearize_tables': True
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_empty_feeds = True
|
||||||
|
|
||||||
|
filterDuplicates = True
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
for alink in soup.findAll('a'):
|
||||||
|
if alink.string is not None:
|
||||||
|
tstr = alink.string
|
||||||
|
alink.replaceWith(tstr)
|
||||||
|
return soup
|
||||||
|
|
||||||
|
keep_only_tags = [dict(name='div', attrs={'class':'article'})]
|
||||||
|
remove_tags_after = [dict(id='broodtekst')]
|
||||||
|
|
||||||
|
# keep_only_tags = [
|
||||||
|
# dict(name='div', attrs={'class':['label']})
|
||||||
|
# ]
|
||||||
|
|
||||||
|
# remove_tags_after = [dict(name='dl', attrs={'class':['tags']})]
|
||||||
|
|
||||||
|
# def get_article_url(self, article):
|
||||||
|
# link = article.get('link')
|
||||||
|
# if 'blog' not in link and ('chat' not in link):
|
||||||
|
# return link
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
# ('Nieuws', 'http://www.nrc.nl/rss.php'),
|
||||||
|
('Binnenland', 'http://www.nrc.nl/nieuws/categorie/binnenland/rss.php'),
|
||||||
|
('Buitenland', 'http://www.nrc.nl/nieuws/categorie/buitenland/rss.php'),
|
||||||
|
('Economie', 'http://www.nrc.nl/nieuws/categorie/economie/rss.php'),
|
||||||
|
('Wetenschap', 'http://www.nrc.nl/nieuws/categorie/wetenschap/rss.php'),
|
||||||
|
('Cultuur', 'http://www.nrc.nl/nieuws/categorie/cultuur/rss.php'),
|
||||||
|
('Boeken', 'http://www.nrc.nl/boeken/rss.php'),
|
||||||
|
('Tech', 'http://www.nrc.nl/tech/rss.php/'),
|
||||||
|
('Klimaat', 'http://www.nrc.nl/klimaat/rss.php/'),
|
||||||
|
]
|
@ -14,6 +14,7 @@ class OurDailyBread(BasicNewsRecipe):
|
|||||||
language = 'en'
|
language = 'en'
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
auto_cleanup = True
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
category = 'ODB, Daily Devotional, Bible, Christian Devotional, Devotional, RBC Ministries, Our Daily Bread, Devotionals, Daily Devotionals, Christian Devotionals, Faith, Bible Study, Bible Studies, Scripture, RBC, religion'
|
category = 'ODB, Daily Devotional, Bible, Christian Devotional, Devotional, RBC Ministries, Our Daily Bread, Devotionals, Daily Devotionals, Christian Devotionals, Faith, Bible Study, Bible Studies, Scripture, RBC, religion'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
@ -25,12 +26,12 @@ class OurDailyBread(BasicNewsRecipe):
|
|||||||
,'linearize_tables' : True
|
,'linearize_tables' : True
|
||||||
}
|
}
|
||||||
|
|
||||||
keep_only_tags = [dict(attrs={'class':'module-content'})]
|
#keep_only_tags = [dict(attrs={'class':'module-content'})]
|
||||||
remove_tags = [
|
#remove_tags = [
|
||||||
dict(attrs={'id':'article-zoom'})
|
#dict(attrs={'id':'article-zoom'})
|
||||||
,dict(attrs={'class':'listen-now-box'})
|
#,dict(attrs={'class':'listen-now-box'})
|
||||||
]
|
#]
|
||||||
remove_tags_after = dict(attrs={'class':'readable-area'})
|
#remove_tags_after = dict(attrs={'class':'readable-area'})
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.text{font-family:Arial,Helvetica,sans-serif;font-size:x-small;}
|
.text{font-family:Arial,Helvetica,sans-serif;font-size:x-small;}
|
||||||
|
@ -6,6 +6,7 @@ Rue89
|
|||||||
|
|
||||||
__author__ = '2010-2012, Louis Gesbert <meta at antislash dot info>'
|
__author__ = '2010-2012, Louis Gesbert <meta at antislash dot info>'
|
||||||
|
|
||||||
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class Rue89(BasicNewsRecipe):
|
class Rue89(BasicNewsRecipe):
|
||||||
@ -15,23 +16,24 @@ class Rue89(BasicNewsRecipe):
|
|||||||
title = u'Rue89'
|
title = u'Rue89'
|
||||||
language = 'fr'
|
language = 'fr'
|
||||||
oldest_article = 7
|
oldest_article = 7
|
||||||
max_articles_per_feed = 12
|
max_articles_per_feed = 50
|
||||||
|
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
|
|
||||||
# From http://www.rue89.com/les-flux-rss-de-rue89
|
# From http://www.rue89.com/les-flux-rss-de-rue89
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'La Une', u'http://www.rue89.com/feed'),
|
(u'La Une', u'http://www.rue89.com/feed'),
|
||||||
(u'Rue69', u'http://www.rue89.com/rue69/feed'),
|
# Other feeds disabled, 'La Une' seems to include them all
|
||||||
(u'Eco', u'http://www.rue89.com/rue89-eco/feed'),
|
# (u'Rue69', u'http://www.rue89.com/rue69/feed'),
|
||||||
(u'Planète', u'http://www.rue89.com/rue89-planete/feed'),
|
# (u'Eco', u'http://www.rue89.com/rue89-eco/feed'),
|
||||||
(u'Sport', u'http://www.rue89.com/rue89-sport/feed'),
|
# (u'Planète', u'http://www.rue89.com/rue89-planete/feed'),
|
||||||
(u'Culture', u'http://www.rue89.com/culture/feed'),
|
# (u'Sport', u'http://www.rue89.com/rue89-sport/feed'),
|
||||||
(u'Hi-tech', u'http://www.rue89.com/hi-tech/feed'),
|
# (u'Culture', u'http://www.rue89.com/culture/feed'),
|
||||||
(u'Media', u'http://www.rue89.com/medias/feed'),
|
# (u'Hi-tech', u'http://www.rue89.com/hi-tech/feed'),
|
||||||
(u'Monde', u'http://www.rue89.com/monde/feed'),
|
# (u'Media', u'http://www.rue89.com/medias/feed'),
|
||||||
(u'Politique', u'http://www.rue89.com/politique/feed'),
|
# (u'Monde', u'http://www.rue89.com/monde/feed'),
|
||||||
(u'Societe', u'http://www.rue89.com/societe/feed'),
|
# (u'Politique', u'http://www.rue89.com/politique/feed'),
|
||||||
|
# (u'Societe', u'http://www.rue89.com/societe/feed'),
|
||||||
]
|
]
|
||||||
|
|
||||||
# Follow redirection from feedsportal.com
|
# Follow redirection from feedsportal.com
|
||||||
@ -41,19 +43,36 @@ class Rue89(BasicNewsRecipe):
|
|||||||
def print_version(self, url):
|
def print_version(self, url):
|
||||||
return url + '?imprimer=1'
|
return url + '?imprimer=1'
|
||||||
|
|
||||||
no_stylesheets = True
|
|
||||||
|
|
||||||
conversion_options = { 'smarten_punctuation' : True }
|
conversion_options = { 'smarten_punctuation' : True }
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='div', attrs={'id':'article'}),
|
dict(name='div', attrs={'id':'content'}),
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_tags_after = [
|
remove_tags_after = [
|
||||||
dict(name='div', attrs={'id':'plus_loin'}),
|
dict(name='div', attrs={'id':'plus_loin'}),
|
||||||
|
dict(name='div', attrs={'class':'stats'}),
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name='div', attrs={'id':'article_tools'}),
|
dict(name='div', attrs={'id':'article_tools'}),
|
||||||
dict(name='div', attrs={'id':'plus_loin'}),
|
dict(name='div', attrs={'id':'plus_loin'}),
|
||||||
|
dict(name='div', attrs={'class':'stats'}),
|
||||||
|
dict(name='div', attrs={'class':'tools'}),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
extra_css = "#content { padding: 0 0; }"
|
||||||
|
|
||||||
|
# Without this, parsing of video articles returns strange results
|
||||||
|
preprocess_regexps = [
|
||||||
|
(re.compile(r'<script.*?</script>', re.IGNORECASE|re.DOTALL), ''),
|
||||||
|
]
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
# Remove whole article if it's a "zapnet" (video)
|
||||||
|
if soup.find('h1', {'class':'zapnet_title'}):
|
||||||
|
return None
|
||||||
|
# Reduce h2 titles to h3
|
||||||
|
for title in soup.findAll('h2'):
|
||||||
|
title.name = 'h3'
|
||||||
|
return soup
|
||||||
|
@ -48,7 +48,7 @@ class Push(Command):
|
|||||||
threads = []
|
threads = []
|
||||||
for host in (
|
for host in (
|
||||||
r'Owner@winxp:/cygdrive/c/Documents\ and\ Settings/Owner/calibre',
|
r'Owner@winxp:/cygdrive/c/Documents\ and\ Settings/Owner/calibre',
|
||||||
'kovid@leopard_test:calibre',
|
'kovid@ox:calibre',
|
||||||
r'kovid@win7:/cygdrive/c/Users/kovid/calibre',
|
r'kovid@win7:/cygdrive/c/Users/kovid/calibre',
|
||||||
):
|
):
|
||||||
rcmd = BASE_RSYNC + EXCLUDES + ['.', host]
|
rcmd = BASE_RSYNC + EXCLUDES + ['.', host]
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -8,14 +8,14 @@ msgstr ""
|
|||||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||||
"devel@lists.alioth.debian.org>\n"
|
"devel@lists.alioth.debian.org>\n"
|
||||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||||
"PO-Revision-Date: 2012-03-21 15:46+0000\n"
|
"PO-Revision-Date: 2012-03-25 12:19+0000\n"
|
||||||
"Last-Translator: Иван Старчевић <ivanstar61@gmail.com>\n"
|
"Last-Translator: Radan Putnik <srastral@gmail.com>\n"
|
||||||
"Language-Team: Serbian <gnu@prevod.org>\n"
|
"Language-Team: Serbian <gnu@prevod.org>\n"
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=UTF-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"X-Launchpad-Export-Date: 2012-03-22 04:56+0000\n"
|
"X-Launchpad-Export-Date: 2012-03-26 04:37+0000\n"
|
||||||
"X-Generator: Launchpad (build 14981)\n"
|
"X-Generator: Launchpad (build 15008)\n"
|
||||||
"Language: sr\n"
|
"Language: sr\n"
|
||||||
|
|
||||||
#. name for aaa
|
#. name for aaa
|
||||||
@ -888,7 +888,7 @@ msgstr "Ака-Кеде"
|
|||||||
|
|
||||||
#. name for aky
|
#. name for aky
|
||||||
msgid "Aka-Kol"
|
msgid "Aka-Kol"
|
||||||
msgstr ""
|
msgstr "ака-кол"
|
||||||
|
|
||||||
#. name for akz
|
#. name for akz
|
||||||
msgid "Alabama"
|
msgid "Alabama"
|
||||||
@ -968,11 +968,11 @@ msgstr "Алтајски;Јужни"
|
|||||||
|
|
||||||
#. name for alu
|
#. name for alu
|
||||||
msgid "'Are'are"
|
msgid "'Are'are"
|
||||||
msgstr ""
|
msgstr "ареаре"
|
||||||
|
|
||||||
#. name for alw
|
#. name for alw
|
||||||
msgid "Alaba-K’abeena"
|
msgid "Alaba-K’abeena"
|
||||||
msgstr ""
|
msgstr "алаба-кабеена"
|
||||||
|
|
||||||
#. name for alx
|
#. name for alx
|
||||||
msgid "Amol"
|
msgid "Amol"
|
||||||
@ -1004,7 +1004,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for amf
|
#. name for amf
|
||||||
msgid "Hamer-Banna"
|
msgid "Hamer-Banna"
|
||||||
msgstr ""
|
msgstr "хаммер-банна"
|
||||||
|
|
||||||
#. name for amg
|
#. name for amg
|
||||||
msgid "Amarag"
|
msgid "Amarag"
|
||||||
@ -1104,7 +1104,7 @@ msgstr "Ансус"
|
|||||||
|
|
||||||
#. name for ane
|
#. name for ane
|
||||||
msgid "Xârâcùù"
|
msgid "Xârâcùù"
|
||||||
msgstr ""
|
msgstr "ксаракуу"
|
||||||
|
|
||||||
#. name for anf
|
#. name for anf
|
||||||
msgid "Animere"
|
msgid "Animere"
|
||||||
@ -1156,7 +1156,7 @@ msgstr "Јарава(Индија)"
|
|||||||
|
|
||||||
#. name for anr
|
#. name for anr
|
||||||
msgid "Andh"
|
msgid "Andh"
|
||||||
msgstr ""
|
msgstr "андх"
|
||||||
|
|
||||||
#. name for ans
|
#. name for ans
|
||||||
msgid "Anserma"
|
msgid "Anserma"
|
||||||
@ -1256,7 +1256,7 @@ msgstr "Таикат"
|
|||||||
|
|
||||||
#. name for aot
|
#. name for aot
|
||||||
msgid "A'tong"
|
msgid "A'tong"
|
||||||
msgstr ""
|
msgstr "атонг"
|
||||||
|
|
||||||
#. name for aox
|
#. name for aox
|
||||||
msgid "Atorada"
|
msgid "Atorada"
|
||||||
@ -1284,7 +1284,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for apf
|
#. name for apf
|
||||||
msgid "Agta; Pahanan"
|
msgid "Agta; Pahanan"
|
||||||
msgstr ""
|
msgstr "агта (паханан)"
|
||||||
|
|
||||||
#. name for apg
|
#. name for apg
|
||||||
msgid "Ampanang"
|
msgid "Ampanang"
|
||||||
@ -1392,7 +1392,7 @@ msgstr "Атакапа"
|
|||||||
|
|
||||||
#. name for aqr
|
#. name for aqr
|
||||||
msgid "Arhâ"
|
msgid "Arhâ"
|
||||||
msgstr ""
|
msgstr "арга"
|
||||||
|
|
||||||
#. name for aqz
|
#. name for aqz
|
||||||
msgid "Akuntsu"
|
msgid "Akuntsu"
|
||||||
@ -1424,7 +1424,7 @@ msgstr "арагонски"
|
|||||||
|
|
||||||
#. name for arh
|
#. name for arh
|
||||||
msgid "Arhuaco"
|
msgid "Arhuaco"
|
||||||
msgstr ""
|
msgstr "архуако"
|
||||||
|
|
||||||
#. name for ari
|
#. name for ari
|
||||||
msgid "Arikara"
|
msgid "Arikara"
|
||||||
@ -1504,7 +1504,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for asd
|
#. name for asd
|
||||||
msgid "Asas"
|
msgid "Asas"
|
||||||
msgstr ""
|
msgstr "асас"
|
||||||
|
|
||||||
#. name for ase
|
#. name for ase
|
||||||
msgid "American Sign Language"
|
msgid "American Sign Language"
|
||||||
@ -1532,7 +1532,7 @@ msgstr "Нсари"
|
|||||||
|
|
||||||
#. name for ask
|
#. name for ask
|
||||||
msgid "Ashkun"
|
msgid "Ashkun"
|
||||||
msgstr ""
|
msgstr "ашкун"
|
||||||
|
|
||||||
#. name for asl
|
#. name for asl
|
||||||
msgid "Asilulu"
|
msgid "Asilulu"
|
||||||
@ -1604,11 +1604,11 @@ msgstr "Заива"
|
|||||||
|
|
||||||
#. name for atc
|
#. name for atc
|
||||||
msgid "Atsahuaca"
|
msgid "Atsahuaca"
|
||||||
msgstr ""
|
msgstr "атсахуака"
|
||||||
|
|
||||||
#. name for atd
|
#. name for atd
|
||||||
msgid "Manobo; Ata"
|
msgid "Manobo; Ata"
|
||||||
msgstr ""
|
msgstr "манобо (Ата)"
|
||||||
|
|
||||||
#. name for ate
|
#. name for ate
|
||||||
msgid "Atemble"
|
msgid "Atemble"
|
||||||
@ -1648,7 +1648,7 @@ msgstr "Атон"
|
|||||||
|
|
||||||
#. name for atp
|
#. name for atp
|
||||||
msgid "Atta; Pudtol"
|
msgid "Atta; Pudtol"
|
||||||
msgstr ""
|
msgstr "атта (Пудтол)"
|
||||||
|
|
||||||
#. name for atq
|
#. name for atq
|
||||||
msgid "Aralle-Tabulahan"
|
msgid "Aralle-Tabulahan"
|
||||||
@ -1660,7 +1660,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for ats
|
#. name for ats
|
||||||
msgid "Gros Ventre"
|
msgid "Gros Ventre"
|
||||||
msgstr ""
|
msgstr "грос-вентре"
|
||||||
|
|
||||||
#. name for att
|
#. name for att
|
||||||
msgid "Atta; Pamplona"
|
msgid "Atta; Pamplona"
|
||||||
@ -1692,7 +1692,7 @@ msgstr "Арта"
|
|||||||
|
|
||||||
#. name for aua
|
#. name for aua
|
||||||
msgid "Asumboa"
|
msgid "Asumboa"
|
||||||
msgstr ""
|
msgstr "асумбоа"
|
||||||
|
|
||||||
#. name for aub
|
#. name for aub
|
||||||
msgid "Alugu"
|
msgid "Alugu"
|
||||||
@ -1712,7 +1712,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for aug
|
#. name for aug
|
||||||
msgid "Aguna"
|
msgid "Aguna"
|
||||||
msgstr ""
|
msgstr "агуна"
|
||||||
|
|
||||||
#. name for auh
|
#. name for auh
|
||||||
msgid "Aushi"
|
msgid "Aushi"
|
||||||
@ -1752,7 +1752,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for auq
|
#. name for auq
|
||||||
msgid "Anus"
|
msgid "Anus"
|
||||||
msgstr ""
|
msgstr "анус"
|
||||||
|
|
||||||
#. name for aur
|
#. name for aur
|
||||||
msgid "Aruek"
|
msgid "Aruek"
|
||||||
@ -1852,7 +1852,7 @@ msgstr "Авети"
|
|||||||
|
|
||||||
#. name for awh
|
#. name for awh
|
||||||
msgid "Awbono"
|
msgid "Awbono"
|
||||||
msgstr ""
|
msgstr "авбоно"
|
||||||
|
|
||||||
#. name for awi
|
#. name for awi
|
||||||
msgid "Aekyom"
|
msgid "Aekyom"
|
||||||
@ -1860,7 +1860,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for awk
|
#. name for awk
|
||||||
msgid "Awabakal"
|
msgid "Awabakal"
|
||||||
msgstr ""
|
msgstr "авабакал"
|
||||||
|
|
||||||
#. name for awm
|
#. name for awm
|
||||||
msgid "Arawum"
|
msgid "Arawum"
|
||||||
@ -1884,7 +1884,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for awt
|
#. name for awt
|
||||||
msgid "Araweté"
|
msgid "Araweté"
|
||||||
msgstr ""
|
msgstr "аравете"
|
||||||
|
|
||||||
#. name for awu
|
#. name for awu
|
||||||
msgid "Awyu; Central"
|
msgid "Awyu; Central"
|
||||||
@ -1912,7 +1912,7 @@ msgstr "Абипон"
|
|||||||
|
|
||||||
#. name for axg
|
#. name for axg
|
||||||
msgid "Arára; Mato Grosso"
|
msgid "Arára; Mato Grosso"
|
||||||
msgstr ""
|
msgstr "арара (Мату-Гросу)"
|
||||||
|
|
||||||
#. name for axk
|
#. name for axk
|
||||||
msgid "Yaka (Central African Republic)"
|
msgid "Yaka (Central African Republic)"
|
||||||
@ -1924,7 +1924,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for axx
|
#. name for axx
|
||||||
msgid "Xaragure"
|
msgid "Xaragure"
|
||||||
msgstr ""
|
msgstr "ксарагуре"
|
||||||
|
|
||||||
#. name for aya
|
#. name for aya
|
||||||
msgid "Awar"
|
msgid "Awar"
|
||||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
__appname__ = u'calibre'
|
__appname__ = u'calibre'
|
||||||
numeric_version = (0, 8, 44)
|
numeric_version = (0, 8, 45)
|
||||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||||
|
|
||||||
|
@ -1460,7 +1460,7 @@ class StoreNextoStore(StoreBase):
|
|||||||
actual_plugin = 'calibre.gui2.store.stores.nexto_plugin:NextoStore'
|
actual_plugin = 'calibre.gui2.store.stores.nexto_plugin:NextoStore'
|
||||||
|
|
||||||
headquarters = 'PL'
|
headquarters = 'PL'
|
||||||
formats = ['EPUB', 'PDF']
|
formats = ['EPUB', 'MOBI', 'PDF']
|
||||||
affiliate = True
|
affiliate = True
|
||||||
|
|
||||||
class StoreOpenBooksStore(StoreBase):
|
class StoreOpenBooksStore(StoreBase):
|
||||||
|
@ -187,7 +187,7 @@ class ANDROID(USBMS):
|
|||||||
'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107',
|
'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107',
|
||||||
'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855',
|
'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855',
|
||||||
'XT910', 'BOOK_A10', 'USB_2.0_DRIVER', 'I9100T', 'P999DW',
|
'XT910', 'BOOK_A10', 'USB_2.0_DRIVER', 'I9100T', 'P999DW',
|
||||||
'KTABLET_PC', 'INGENIC']
|
'KTABLET_PC', 'INGENIC', 'GT-I9001_CARD']
|
||||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||||
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||||
@ -195,7 +195,7 @@ class ANDROID(USBMS):
|
|||||||
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853',
|
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853',
|
||||||
'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD',
|
'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD',
|
||||||
'USB_2.0_DRIVER', 'I9100T', 'P999DW_SD_CARD', 'KTABLET_PC',
|
'USB_2.0_DRIVER', 'I9100T', 'P999DW_SD_CARD', 'KTABLET_PC',
|
||||||
'FILE-CD_GADGET']
|
'FILE-CD_GADGET', 'GT-I9001_CARD']
|
||||||
|
|
||||||
OSX_MAIN_MEM = 'Android Device Main Memory'
|
OSX_MAIN_MEM = 'Android Device Main Memory'
|
||||||
|
|
||||||
|
@ -527,11 +527,17 @@ class HeuristicProcessor(object):
|
|||||||
if re.findall('(<|>)', replacement_break):
|
if re.findall('(<|>)', replacement_break):
|
||||||
if re.match('^<hr', replacement_break):
|
if re.match('^<hr', replacement_break):
|
||||||
if replacement_break.find('width') != -1:
|
if replacement_break.find('width') != -1:
|
||||||
width = int(re.sub('.*?width(:|=)(?P<wnum>\d+).*', '\g<wnum>', replacement_break))
|
try:
|
||||||
replacement_break = re.sub('(?i)(width=\d+\%?|width:\s*\d+(\%|px|pt|em)?;?)', '', replacement_break)
|
width = int(re.sub('.*?width(:|=)(?P<wnum>\d+).*', '\g<wnum>', replacement_break))
|
||||||
divpercent = (100 - width) / 2
|
except:
|
||||||
hr_open = re.sub('45', str(divpercent), hr_open)
|
scene_break = hr_open+'<hr style="height: 3px; background:#505050" /></div>'
|
||||||
scene_break = hr_open+replacement_break+'</div>'
|
self.log.warn('Invalid replacement scene break'
|
||||||
|
' expression, using default')
|
||||||
|
else:
|
||||||
|
replacement_break = re.sub('(?i)(width=\d+\%?|width:\s*\d+(\%|px|pt|em)?;?)', '', replacement_break)
|
||||||
|
divpercent = (100 - width) / 2
|
||||||
|
hr_open = re.sub('45', str(divpercent), hr_open)
|
||||||
|
scene_break = hr_open+replacement_break+'</div>'
|
||||||
else:
|
else:
|
||||||
scene_break = hr_open+'<hr style="height: 3px; background:#505050" /></div>'
|
scene_break = hr_open+'<hr style="height: 3px; background:#505050" /></div>'
|
||||||
elif re.match('^<img', replacement_break):
|
elif re.match('^<img', replacement_break):
|
||||||
|
@ -108,6 +108,8 @@ def decode_is_multiple(fm):
|
|||||||
else:
|
else:
|
||||||
im = {'cache_to_list': '|', 'ui_to_list': ',',
|
im = {'cache_to_list': '|', 'ui_to_list': ',',
|
||||||
'list_to_ui': ', '}
|
'list_to_ui': ', '}
|
||||||
|
elif im is None:
|
||||||
|
im = {}
|
||||||
fm['is_multiple'] = im
|
fm['is_multiple'] = im
|
||||||
|
|
||||||
class JsonCodec(object):
|
class JsonCodec(object):
|
||||||
|
@ -205,7 +205,10 @@ class EXTHHeader(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def kf8_header_index(self):
|
def kf8_header_index(self):
|
||||||
return self.get(121, None)
|
ans = self.get(121, None)
|
||||||
|
if ans == NULL_INDEX:
|
||||||
|
ans = None
|
||||||
|
return ans
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
ans = ['*'*20 + ' EXTH Header '+ '*'*20]
|
ans = ['*'*20 + ' EXTH Header '+ '*'*20]
|
||||||
@ -328,7 +331,7 @@ class MOBIHeader(object): # {{{
|
|||||||
(self.sect_idx, self.skel_idx, self.datp_idx, self.oth_idx
|
(self.sect_idx, self.skel_idx, self.datp_idx, self.oth_idx
|
||||||
) = struct.unpack_from(b'>4L', self.raw, 248)
|
) = struct.unpack_from(b'>4L', self.raw, 248)
|
||||||
self.unknown9 = self.raw[264:self.length]
|
self.unknown9 = self.raw[264:self.length]
|
||||||
if self.meta_orth_indx != self.sect_idx:
|
if self.meta_orth_indx not in {NULL_INDEX, self.sect_idx}:
|
||||||
raise ValueError('KF8 header has different Meta orth and '
|
raise ValueError('KF8 header has different Meta orth and '
|
||||||
'section indices')
|
'section indices')
|
||||||
|
|
||||||
@ -467,9 +470,15 @@ class MOBIFile(object):
|
|||||||
if mh.file_version >= 8:
|
if mh.file_version >= 8:
|
||||||
self.kf8_type = 'standalone'
|
self.kf8_type = 'standalone'
|
||||||
elif mh.has_exth and mh.exth.kf8_header_index is not None:
|
elif mh.has_exth and mh.exth.kf8_header_index is not None:
|
||||||
self.kf8_type = 'joint'
|
|
||||||
kf8i = mh.exth.kf8_header_index
|
kf8i = mh.exth.kf8_header_index
|
||||||
mh8 = MOBIHeader(self.records[kf8i], kf8i)
|
try:
|
||||||
|
rec = self.records[kf8i-1]
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if rec.raw == b'BOUNDARY':
|
||||||
|
self.kf8_type = 'joint'
|
||||||
|
mh8 = MOBIHeader(self.records[kf8i], kf8i)
|
||||||
self.mobi8_header = mh8
|
self.mobi8_header = mh8
|
||||||
|
|
||||||
if 'huff' in self.mobi_header.compression.lower():
|
if 'huff' in self.mobi_header.compression.lower():
|
||||||
|
@ -7,9 +7,10 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import sys, os
|
import sys, os, imghdr
|
||||||
|
|
||||||
from calibre.ebooks.mobi.debug.headers import TextRecord
|
from calibre.ebooks.mobi.debug.headers import TextRecord
|
||||||
|
from calibre.ebooks.mobi.utils import read_font_record
|
||||||
|
|
||||||
class MOBIFile(object):
|
class MOBIFile(object):
|
||||||
|
|
||||||
@ -30,6 +31,7 @@ class MOBIFile(object):
|
|||||||
first_text_record+offset+h8.number_of_text_records])]
|
first_text_record+offset+h8.number_of_text_records])]
|
||||||
|
|
||||||
self.raw_text = b''.join(r.raw for r in self.text_records)
|
self.raw_text = b''.join(r.raw for r in self.text_records)
|
||||||
|
self.extract_resources()
|
||||||
|
|
||||||
def print_header(self, f=sys.stdout):
|
def print_header(self, f=sys.stdout):
|
||||||
print (str(self.mf.palmdb).encode('utf-8'), file=f)
|
print (str(self.mf.palmdb).encode('utf-8'), file=f)
|
||||||
@ -41,6 +43,42 @@ class MOBIFile(object):
|
|||||||
print (file=f)
|
print (file=f)
|
||||||
print (str(self.mf.mobi8_header).encode('utf-8'), file=f)
|
print (str(self.mf.mobi8_header).encode('utf-8'), file=f)
|
||||||
|
|
||||||
|
def extract_resources(self):
|
||||||
|
self.resource_map = []
|
||||||
|
known_types = {b'FLIS', b'FCIS', b'SRCS',
|
||||||
|
b'\xe9\x8e\r\n', b'RESC', b'BOUN', b'FDST', b'DATP',
|
||||||
|
b'AUDI', b'VIDE'}
|
||||||
|
|
||||||
|
for i, rec in enumerate(self.resource_records):
|
||||||
|
sig = rec.raw[:4]
|
||||||
|
payload = rec.raw
|
||||||
|
ext = 'dat'
|
||||||
|
prefix = 'binary'
|
||||||
|
suffix = ''
|
||||||
|
if sig in {b'HUFF', b'CDIC', b'INDX'}: continue
|
||||||
|
# TODO: Ignore CNCX records as well
|
||||||
|
if sig == b'FONT':
|
||||||
|
font = read_font_record(rec.raw)
|
||||||
|
if font['err']:
|
||||||
|
raise ValueError('Failed to read font record: %s Headers: %s'%(
|
||||||
|
font['err'], font['headers']))
|
||||||
|
payload = (font['font_data'] if font['font_data'] else
|
||||||
|
font['raw_data'])
|
||||||
|
prefix, ext = 'fonts', font['ext']
|
||||||
|
elif sig not in known_types:
|
||||||
|
q = imghdr.what(None, rec.raw)
|
||||||
|
if q:
|
||||||
|
prefix, ext = 'images', q
|
||||||
|
|
||||||
|
if prefix == 'binary':
|
||||||
|
if sig == b'\xe9\x8e\r\n':
|
||||||
|
suffix = '-EOF'
|
||||||
|
elif sig in known_types:
|
||||||
|
suffix = '-' + sig.decode('ascii')
|
||||||
|
|
||||||
|
self.resource_map.append(('%s/%06d%s.%s'%(prefix, i, suffix, ext),
|
||||||
|
payload))
|
||||||
|
|
||||||
|
|
||||||
def inspect_mobi(mobi_file, ddir):
|
def inspect_mobi(mobi_file, ddir):
|
||||||
f = MOBIFile(mobi_file)
|
f = MOBIFile(mobi_file)
|
||||||
@ -51,12 +89,14 @@ def inspect_mobi(mobi_file, ddir):
|
|||||||
with open(alltext, 'wb') as of:
|
with open(alltext, 'wb') as of:
|
||||||
of.write(f.raw_text)
|
of.write(f.raw_text)
|
||||||
|
|
||||||
for tdir, attr in [('text_records', 'text_records'), ('images',
|
for x in ('text_records', 'images', 'fonts', 'binary'):
|
||||||
'image_records'), ('binary', 'binary_records'), ('font',
|
os.mkdir(os.path.join(ddir, x))
|
||||||
'font_records')]:
|
|
||||||
tdir = os.path.join(ddir, tdir)
|
for rec in f.text_records:
|
||||||
os.mkdir(tdir)
|
rec.dump(os.path.join(ddir, 'text_records'))
|
||||||
for rec in getattr(f, attr, []):
|
|
||||||
rec.dump(tdir)
|
for href, payload in f.resource_map:
|
||||||
|
with open(os.path.join(ddir, href), 'wb') as f:
|
||||||
|
f.write(payload)
|
||||||
|
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ import struct, re, os
|
|||||||
from calibre import replace_entities
|
from calibre import replace_entities
|
||||||
from calibre.utils.date import parse_date
|
from calibre.utils.date import parse_date
|
||||||
from calibre.ebooks.mobi import MobiError
|
from calibre.ebooks.mobi import MobiError
|
||||||
from calibre.ebooks.metadata import MetaInformation
|
from calibre.ebooks.metadata import MetaInformation, check_isbn
|
||||||
from calibre.ebooks.mobi.langcodes import main_language, sub_language, mobi2iana
|
from calibre.ebooks.mobi.langcodes import main_language, sub_language, mobi2iana
|
||||||
|
|
||||||
NULL_INDEX = 0xffffffff
|
NULL_INDEX = 0xffffffff
|
||||||
@ -75,10 +75,14 @@ class EXTHHeader(object): # {{{
|
|||||||
self.mi.author_sort = au.strip()
|
self.mi.author_sort = au.strip()
|
||||||
elif idx == 101:
|
elif idx == 101:
|
||||||
self.mi.publisher = content.decode(codec, 'ignore').strip()
|
self.mi.publisher = content.decode(codec, 'ignore').strip()
|
||||||
|
if self.mi.publisher in {'Unknown', _('Unknown')}:
|
||||||
|
self.mi.publisher = None
|
||||||
elif idx == 103:
|
elif idx == 103:
|
||||||
self.mi.comments = content.decode(codec, 'ignore')
|
self.mi.comments = content.decode(codec, 'ignore')
|
||||||
elif idx == 104:
|
elif idx == 104:
|
||||||
self.mi.isbn = content.decode(codec, 'ignore').strip().replace('-', '')
|
raw = check_isbn(content.decode(codec, 'ignore').strip().replace('-', ''))
|
||||||
|
if raw:
|
||||||
|
self.mi.isbn = raw
|
||||||
elif idx == 105:
|
elif idx == 105:
|
||||||
if not self.mi.tags:
|
if not self.mi.tags:
|
||||||
self.mi.tags = []
|
self.mi.tags = []
|
||||||
@ -92,12 +96,24 @@ class EXTHHeader(object): # {{{
|
|||||||
pass
|
pass
|
||||||
elif idx == 108:
|
elif idx == 108:
|
||||||
self.mi.book_producer = content.decode(codec, 'ignore').strip()
|
self.mi.book_producer = content.decode(codec, 'ignore').strip()
|
||||||
|
elif idx == 112: # dc:source set in some EBSP amazon samples
|
||||||
|
try:
|
||||||
|
content = content.decode(codec).strip()
|
||||||
|
isig = 'urn:isbn:'
|
||||||
|
if content.lower().startswith(isig):
|
||||||
|
raw = check_isbn(content[len(isig):])
|
||||||
|
if raw and not self.mi.isbn:
|
||||||
|
self.mi.isbn = raw
|
||||||
|
except:
|
||||||
|
pass
|
||||||
elif idx == 113:
|
elif idx == 113:
|
||||||
pass # ASIN or UUID
|
pass # ASIN or UUID
|
||||||
elif idx == 116:
|
elif idx == 116:
|
||||||
self.start_offset, = struct.unpack(b'>L', content)
|
self.start_offset, = struct.unpack(b'>L', content)
|
||||||
elif idx == 121:
|
elif idx == 121:
|
||||||
self.kf8_header, = struct.unpack(b'>L', content)
|
self.kf8_header, = struct.unpack(b'>L', content)
|
||||||
|
if self.kf8_header == NULL_INDEX:
|
||||||
|
self.kf8_header = None
|
||||||
#else:
|
#else:
|
||||||
# print 'unhandled metadata record', idx, repr(content)
|
# print 'unhandled metadata record', idx, repr(content)
|
||||||
# }}}
|
# }}}
|
||||||
|
@ -39,10 +39,41 @@ def parse_indx_header(data):
|
|||||||
words = (
|
words = (
|
||||||
'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
|
'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
|
||||||
'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx'
|
'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx'
|
||||||
)
|
) + tuple('unknown%d'%i for i in xrange(27)) + ('ocnt', 'oentries',
|
||||||
|
'ordt1', 'ordt2', 'tagx')
|
||||||
num = len(words)
|
num = len(words)
|
||||||
values = struct.unpack(bytes('>%dL' % num), data[4:4*(num+1)])
|
values = struct.unpack(bytes('>%dL' % num), data[4:4*(num+1)])
|
||||||
return dict(zip(words, values))
|
ans = dict(zip(words, values))
|
||||||
|
ordt1, ordt2 = ans['ordt1'], ans['ordt2']
|
||||||
|
ans['ordt1_raw'], ans['ordt2_raw'] = [], []
|
||||||
|
ans['ordt_map'] = ''
|
||||||
|
|
||||||
|
if ordt1 > 0 and data[ordt1:ordt1+4] == b'ORDT':
|
||||||
|
# I dont know what this is, but using it seems to be unnecessary, so
|
||||||
|
# just leave it as the raw bytestring
|
||||||
|
ans['ordt1_raw'] = data[ordt1+4:ordt1+4+ans['oentries']]
|
||||||
|
if ordt2 > 0 and data[ordt2:ordt2+4] == b'ORDT':
|
||||||
|
ans['ordt2_raw'] = raw = bytearray(data[ordt2+4:ordt2+4+2*ans['oentries']])
|
||||||
|
if ans['code'] == 65002:
|
||||||
|
# This appears to be EBCDIC-UTF (65002) encoded. I can't be
|
||||||
|
# bothered to write a decoder for this (see
|
||||||
|
# http://www.unicode.org/reports/tr16/) Just how stupid is Amazon?
|
||||||
|
# Instead, we use a weird hack that seems to do the trick for all
|
||||||
|
# the books with this type of ORDT record that I have come across.
|
||||||
|
# Some EBSP book samples in KF8 format from Amazon have this type
|
||||||
|
# of encoding.
|
||||||
|
# Basically we try to interpret every second byte as a printable
|
||||||
|
# ascii character. If we cannot, we map to the ? char.
|
||||||
|
|
||||||
|
parsed = bytearray(ans['oentries'])
|
||||||
|
for i in xrange(0, 2*ans['oentries'], 2):
|
||||||
|
parsed[i//2] = raw[i+1] if 0x20 < raw[i+1] < 0x7f else ord(b'?')
|
||||||
|
ans['ordt_map'] = bytes(parsed).decode('ascii')
|
||||||
|
else:
|
||||||
|
ans['ordt_map'] = '?'*ans['oentries']
|
||||||
|
|
||||||
|
return ans
|
||||||
|
|
||||||
|
|
||||||
class CNCX(object): # {{{
|
class CNCX(object): # {{{
|
||||||
|
|
||||||
@ -163,7 +194,7 @@ def get_tag_map(control_byte_count, tagx, data, strict=False):
|
|||||||
return ans
|
return ans
|
||||||
|
|
||||||
def parse_index_record(table, data, control_byte_count, tags, codec,
|
def parse_index_record(table, data, control_byte_count, tags, codec,
|
||||||
strict=False):
|
ordt_map, strict=False):
|
||||||
header = parse_indx_header(data)
|
header = parse_indx_header(data)
|
||||||
idxt_pos = header['start']
|
idxt_pos = header['start']
|
||||||
if data[idxt_pos:idxt_pos+4] != b'IDXT':
|
if data[idxt_pos:idxt_pos+4] != b'IDXT':
|
||||||
@ -184,12 +215,11 @@ def parse_index_record(table, data, control_byte_count, tags, codec,
|
|||||||
for j in xrange(entry_count):
|
for j in xrange(entry_count):
|
||||||
start, end = idx_positions[j:j+2]
|
start, end = idx_positions[j:j+2]
|
||||||
rec = data[start:end]
|
rec = data[start:end]
|
||||||
ident, consumed = decode_string(rec, codec=codec)
|
ident, consumed = decode_string(rec, codec=codec, ordt_map=ordt_map)
|
||||||
rec = rec[consumed:]
|
rec = rec[consumed:]
|
||||||
tag_map = get_tag_map(control_byte_count, tags, rec, strict=strict)
|
tag_map = get_tag_map(control_byte_count, tags, rec, strict=strict)
|
||||||
table[ident] = tag_map
|
table[ident] = tag_map
|
||||||
|
|
||||||
|
|
||||||
def read_index(sections, idx, codec):
|
def read_index(sections, idx, codec):
|
||||||
table, cncx = OrderedDict(), CNCX([], codec)
|
table, cncx = OrderedDict(), CNCX([], codec)
|
||||||
|
|
||||||
@ -203,12 +233,13 @@ def read_index(sections, idx, codec):
|
|||||||
cncx_records = [x[0] for x in sections[off:off+indx_header['ncncx']]]
|
cncx_records = [x[0] for x in sections[off:off+indx_header['ncncx']]]
|
||||||
cncx = CNCX(cncx_records, codec)
|
cncx = CNCX(cncx_records, codec)
|
||||||
|
|
||||||
tag_section_start = indx_header['len']
|
tag_section_start = indx_header['tagx']
|
||||||
control_byte_count, tags = parse_tagx_section(data[tag_section_start:])
|
control_byte_count, tags = parse_tagx_section(data[tag_section_start:])
|
||||||
|
|
||||||
for i in xrange(idx + 1, idx + 1 + indx_count):
|
for i in xrange(idx + 1, idx + 1 + indx_count):
|
||||||
# Index record
|
# Index record
|
||||||
data = sections[i][0]
|
data = sections[i][0]
|
||||||
parse_index_record(table, data, control_byte_count, tags, codec)
|
parse_index_record(table, data, control_byte_count, tags, codec,
|
||||||
|
indx_header['ordt_map'])
|
||||||
return table, cncx
|
return table, cncx
|
||||||
|
|
||||||
|
@ -285,7 +285,11 @@ class Mobi8Reader(object):
|
|||||||
def create_guide(self):
|
def create_guide(self):
|
||||||
guide = Guide()
|
guide = Guide()
|
||||||
for ref_type, ref_title, fileno in self.guide:
|
for ref_type, ref_title, fileno in self.guide:
|
||||||
elem = self.elems[fileno]
|
try:
|
||||||
|
elem = self.elems[fileno]
|
||||||
|
except IndexError:
|
||||||
|
# Happens for thumbnailstandard in Amazon book samples
|
||||||
|
continue
|
||||||
fi = self.get_file_info(elem.insert_pos)
|
fi = self.get_file_info(elem.insert_pos)
|
||||||
idtext = self.get_id_tag(elem.insert_pos).decode(self.header.codec)
|
idtext = self.get_id_tag(elem.insert_pos).decode(self.header.codec)
|
||||||
linktgt = fi.filename
|
linktgt = fi.filename
|
||||||
|
@ -15,10 +15,12 @@ from calibre.ebooks import normalize
|
|||||||
|
|
||||||
IMAGE_MAX_SIZE = 10 * 1024 * 1024
|
IMAGE_MAX_SIZE = 10 * 1024 * 1024
|
||||||
|
|
||||||
def decode_string(raw, codec='utf-8'):
|
def decode_string(raw, codec='utf-8', ordt_map=''):
|
||||||
length, = struct.unpack(b'>B', raw[0])
|
length, = struct.unpack(b'>B', raw[0])
|
||||||
raw = raw[1:1+length]
|
raw = raw[1:1+length]
|
||||||
consumed = length+1
|
consumed = length+1
|
||||||
|
if ordt_map:
|
||||||
|
return ''.join(ordt_map[ord(x)] for x in raw), consumed
|
||||||
return raw.decode(codec), consumed
|
return raw.decode(codec), consumed
|
||||||
|
|
||||||
def decode_hex_number(raw, codec='utf-8'):
|
def decode_hex_number(raw, codec='utf-8'):
|
||||||
|
@ -161,8 +161,8 @@ class Serializer(object):
|
|||||||
self.serialize_text(ref.title, quot=True)
|
self.serialize_text(ref.title, quot=True)
|
||||||
buf.write(b'" ')
|
buf.write(b'" ')
|
||||||
if (ref.title.lower() == 'start' or
|
if (ref.title.lower() == 'start' or
|
||||||
(ref.type and ref.type.lower() in ('start',
|
(ref.type and ref.type.lower() in {'start',
|
||||||
'other.start'))):
|
'other.start', 'text'})):
|
||||||
self._start_href = ref.href
|
self._start_href = ref.href
|
||||||
self.serialize_href(ref.href)
|
self.serialize_href(ref.href)
|
||||||
# Space required or won't work, I kid you not
|
# Space required or won't work, I kid you not
|
||||||
|
11
src/calibre/ebooks/oeb/display/__init__.py
Normal file
11
src/calibre/ebooks/oeb/display/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
|
||||||
|
|
59
src/calibre/ebooks/oeb/display/webview.py
Normal file
59
src/calibre/ebooks/oeb/display/webview.py
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from calibre import guess_type
|
||||||
|
|
||||||
|
class EntityDeclarationProcessor(object): # {{{
|
||||||
|
|
||||||
|
def __init__(self, html):
|
||||||
|
self.declared_entities = {}
|
||||||
|
for match in re.finditer(r'<!\s*ENTITY\s+([^>]+)>', html):
|
||||||
|
tokens = match.group(1).split()
|
||||||
|
if len(tokens) > 1:
|
||||||
|
self.declared_entities[tokens[0].strip()] = tokens[1].strip().replace('"', '')
|
||||||
|
self.processed_html = html
|
||||||
|
for key, val in self.declared_entities.iteritems():
|
||||||
|
self.processed_html = self.processed_html.replace('&%s;'%key, val)
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def self_closing_sub(match):
|
||||||
|
tag = match.group(1)
|
||||||
|
if tag.lower().strip() == 'br':
|
||||||
|
return match.group()
|
||||||
|
return '<%s %s></%s>'%(match.group(1), match.group(2), match.group(1))
|
||||||
|
|
||||||
|
def load_html(path, view, codec='utf-8', mime_type=None,
|
||||||
|
pre_load_callback=lambda x:None):
|
||||||
|
from PyQt4.Qt import QUrl, QByteArray
|
||||||
|
if mime_type is None:
|
||||||
|
mime_type = guess_type(path)[0]
|
||||||
|
with open(path, 'rb') as f:
|
||||||
|
html = f.read().decode(codec, 'replace')
|
||||||
|
|
||||||
|
html = EntityDeclarationProcessor(html).processed_html
|
||||||
|
has_svg = re.search(r'<[:a-zA-Z]*svg', html) is not None
|
||||||
|
if 'xhtml' in mime_type:
|
||||||
|
self_closing_pat = re.compile(r'<([a-z1-6]+)\s+([^>]+)/>',
|
||||||
|
re.IGNORECASE)
|
||||||
|
html = self_closing_pat.sub(self_closing_sub, html)
|
||||||
|
|
||||||
|
html = re.sub(ur'<\s*title\s*/\s*>', u'', html, flags=re.IGNORECASE)
|
||||||
|
loading_url = QUrl.fromLocalFile(path)
|
||||||
|
pre_load_callback(loading_url)
|
||||||
|
|
||||||
|
if has_svg:
|
||||||
|
view.setContent(QByteArray(html.encode(codec)), mime_type,
|
||||||
|
loading_url)
|
||||||
|
else:
|
||||||
|
view.setHtml(html, loading_url)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -26,6 +26,8 @@ from calibre.constants import filesystem_encoding
|
|||||||
TITLEPAGE = CoverManager.SVG_TEMPLATE.decode('utf-8').replace(\
|
TITLEPAGE = CoverManager.SVG_TEMPLATE.decode('utf-8').replace(\
|
||||||
'__ar__', 'none').replace('__viewbox__', '0 0 600 800'
|
'__ar__', 'none').replace('__viewbox__', '0 0 600 800'
|
||||||
).replace('__width__', '600').replace('__height__', '800')
|
).replace('__width__', '600').replace('__height__', '800')
|
||||||
|
BM_FIELD_SEP = u'*|!|?|*'
|
||||||
|
BM_LEGACY_ESC = u'esc-text-%&*#%(){}ads19-end-esc'
|
||||||
|
|
||||||
def character_count(html):
|
def character_count(html):
|
||||||
'''
|
'''
|
||||||
@ -273,27 +275,62 @@ class EbookIterator(object):
|
|||||||
|
|
||||||
def parse_bookmarks(self, raw):
|
def parse_bookmarks(self, raw):
|
||||||
for line in raw.splitlines():
|
for line in raw.splitlines():
|
||||||
|
bm = None
|
||||||
if line.count('^') > 0:
|
if line.count('^') > 0:
|
||||||
tokens = line.rpartition('^')
|
tokens = line.rpartition('^')
|
||||||
title, ref = tokens[0], tokens[2]
|
title, ref = tokens[0], tokens[2]
|
||||||
self.bookmarks.append((title, ref))
|
try:
|
||||||
|
spine, _, pos = ref.partition('#')
|
||||||
|
spine = int(spine.strip())
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
bm = {'type':'legacy', 'title':title, 'spine':spine, 'pos':pos}
|
||||||
|
elif BM_FIELD_SEP in line:
|
||||||
|
try:
|
||||||
|
title, spine, pos = line.strip().split(BM_FIELD_SEP)
|
||||||
|
spine = int(spine)
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
# Unescape from serialization
|
||||||
|
pos = pos.replace(BM_LEGACY_ESC, u'^')
|
||||||
|
# Check for pos being a scroll fraction
|
||||||
|
try:
|
||||||
|
pos = float(pos)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
bm = {'type':'cfi', 'title':title, 'pos':pos, 'spine':spine}
|
||||||
|
|
||||||
|
if bm:
|
||||||
|
self.bookmarks.append(bm)
|
||||||
|
|
||||||
def serialize_bookmarks(self, bookmarks):
|
def serialize_bookmarks(self, bookmarks):
|
||||||
dat = []
|
dat = []
|
||||||
for title, bm in bookmarks:
|
for bm in bookmarks:
|
||||||
dat.append(u'%s^%s'%(title, bm))
|
if bm['type'] == 'legacy':
|
||||||
return (u'\n'.join(dat) +'\n').encode('utf-8')
|
rec = u'%s^%d#%s'%(bm['title'], bm['spine'], bm['pos'])
|
||||||
|
else:
|
||||||
|
pos = bm['pos']
|
||||||
|
if isinstance(pos, (int, float)):
|
||||||
|
pos = unicode(pos)
|
||||||
|
else:
|
||||||
|
pos = pos.replace(u'^', BM_LEGACY_ESC)
|
||||||
|
rec = BM_FIELD_SEP.join([bm['title'], unicode(bm['spine']), pos])
|
||||||
|
dat.append(rec)
|
||||||
|
return (u'\n'.join(dat) +u'\n')
|
||||||
|
|
||||||
def read_bookmarks(self):
|
def read_bookmarks(self):
|
||||||
self.bookmarks = []
|
self.bookmarks = []
|
||||||
bmfile = os.path.join(self.base, 'META-INF', 'calibre_bookmarks.txt')
|
bmfile = os.path.join(self.base, 'META-INF', 'calibre_bookmarks.txt')
|
||||||
raw = ''
|
raw = ''
|
||||||
if os.path.exists(bmfile):
|
if os.path.exists(bmfile):
|
||||||
raw = open(bmfile, 'rb').read().decode('utf-8')
|
with open(bmfile, 'rb') as f:
|
||||||
|
raw = f.read()
|
||||||
else:
|
else:
|
||||||
saved = self.config['bookmarks_'+self.pathtoebook]
|
saved = self.config['bookmarks_'+self.pathtoebook]
|
||||||
if saved:
|
if saved:
|
||||||
raw = saved
|
raw = saved
|
||||||
|
if not isinstance(raw, unicode):
|
||||||
|
raw = raw.decode('utf-8')
|
||||||
self.parse_bookmarks(raw)
|
self.parse_bookmarks(raw)
|
||||||
|
|
||||||
def save_bookmarks(self, bookmarks=None):
|
def save_bookmarks(self, bookmarks=None):
|
||||||
@ -306,18 +343,15 @@ class EbookIterator(object):
|
|||||||
zf = open(self.pathtoebook, 'r+b')
|
zf = open(self.pathtoebook, 'r+b')
|
||||||
except IOError:
|
except IOError:
|
||||||
return
|
return
|
||||||
safe_replace(zf, 'META-INF/calibre_bookmarks.txt', StringIO(dat),
|
safe_replace(zf, 'META-INF/calibre_bookmarks.txt',
|
||||||
|
StringIO(dat.encode('utf-8')),
|
||||||
add_missing=True)
|
add_missing=True)
|
||||||
else:
|
else:
|
||||||
self.config['bookmarks_'+self.pathtoebook] = dat
|
self.config['bookmarks_'+self.pathtoebook] = dat
|
||||||
|
|
||||||
def add_bookmark(self, bm):
|
def add_bookmark(self, bm):
|
||||||
dups = []
|
self.bookmarks = [x for x in self.bookmarks if x['title'] !=
|
||||||
for x in self.bookmarks:
|
bm['title']]
|
||||||
if x[0] == bm[0]:
|
|
||||||
dups.append(x)
|
|
||||||
for x in dups:
|
|
||||||
self.bookmarks.remove(x)
|
|
||||||
self.bookmarks.append(bm)
|
self.bookmarks.append(bm)
|
||||||
self.save_bookmarks()
|
self.save_bookmarks()
|
||||||
|
|
||||||
|
@ -8,10 +8,9 @@ __docformat__ = 'restructuredtext en'
|
|||||||
|
|
||||||
|
|
||||||
class Clean(object):
|
class Clean(object):
|
||||||
'''Clean up guide, leaving only a pointer to the cover'''
|
'''Clean up guide, leaving only known values '''
|
||||||
|
|
||||||
def __call__(self, oeb, opts):
|
def __call__(self, oeb, opts):
|
||||||
from calibre.ebooks.oeb.base import urldefrag
|
|
||||||
self.oeb, self.log, self.opts = oeb, oeb.log, opts
|
self.oeb, self.log, self.opts = oeb, oeb.log, opts
|
||||||
|
|
||||||
if 'cover' not in self.oeb.guide:
|
if 'cover' not in self.oeb.guide:
|
||||||
@ -32,10 +31,15 @@ class Clean(object):
|
|||||||
ref.type = 'cover'
|
ref.type = 'cover'
|
||||||
self.oeb.guide.refs['cover'] = ref
|
self.oeb.guide.refs['cover'] = ref
|
||||||
|
|
||||||
|
if ('start' in self.oeb.guide and 'text' not in self.oeb.guide):
|
||||||
|
# Prefer text to start as per the OPF 2.0 spec
|
||||||
|
x = self.oeb.guide['start']
|
||||||
|
self.oeb.guide.add('text', x.title, x.href)
|
||||||
|
self.oeb.guide.remove('start')
|
||||||
|
|
||||||
for x in list(self.oeb.guide):
|
for x in list(self.oeb.guide):
|
||||||
href = urldefrag(self.oeb.guide[x].href)[0]
|
if x.lower() not in {'cover', 'titlepage', 'masthead', 'toc',
|
||||||
if x.lower() not in ('cover', 'titlepage', 'masthead', 'toc',
|
'title-page', 'copyright-page', 'text'}:
|
||||||
'title-page', 'copyright-page', 'start'):
|
|
||||||
item = self.oeb.guide[x]
|
item = self.oeb.guide[x]
|
||||||
if item.title and item.title.lower() == 'start':
|
if item.title and item.title.lower() == 'start':
|
||||||
continue
|
continue
|
||||||
|
@ -18,10 +18,11 @@ from calibre.ebooks.pdf.pageoptions import unit, paper_size, \
|
|||||||
from calibre.ebooks.metadata import authors_to_string
|
from calibre.ebooks.metadata import authors_to_string
|
||||||
from calibre.ptempfile import PersistentTemporaryFile
|
from calibre.ptempfile import PersistentTemporaryFile
|
||||||
from calibre import __appname__, __version__, fit_image
|
from calibre import __appname__, __version__, fit_image
|
||||||
|
from calibre.ebooks.oeb.display.webview import load_html
|
||||||
|
|
||||||
from PyQt4 import QtCore
|
from PyQt4 import QtCore
|
||||||
from PyQt4.Qt import QUrl, QEventLoop, QObject, \
|
from PyQt4.Qt import (QEventLoop, QObject,
|
||||||
QPrinter, QMetaObject, QSizeF, Qt, QPainter, QPixmap
|
QPrinter, QMetaObject, QSizeF, Qt, QPainter, QPixmap)
|
||||||
from PyQt4.QtWebKit import QWebView
|
from PyQt4.QtWebKit import QWebView
|
||||||
|
|
||||||
from pyPdf import PdfFileWriter, PdfFileReader
|
from pyPdf import PdfFileWriter, PdfFileReader
|
||||||
@ -70,7 +71,7 @@ def get_pdf_printer(opts, for_comic=False):
|
|||||||
opts.margin_right, opts.margin_bottom, QPrinter.Point)
|
opts.margin_right, opts.margin_bottom, QPrinter.Point)
|
||||||
printer.setOrientation(orientation(opts.orientation))
|
printer.setOrientation(orientation(opts.orientation))
|
||||||
printer.setOutputFormat(QPrinter.PdfFormat)
|
printer.setOutputFormat(QPrinter.PdfFormat)
|
||||||
printer.setFullPage(True)
|
printer.setFullPage(for_comic)
|
||||||
return printer
|
return printer
|
||||||
|
|
||||||
def get_printer_page_size(opts, for_comic=False):
|
def get_printer_page_size(opts, for_comic=False):
|
||||||
@ -156,8 +157,7 @@ class PDFWriter(QObject): # {{{
|
|||||||
self.combine_queue.append(os.path.join(self.tmp_path, '%i.pdf' % (len(self.combine_queue) + 1)))
|
self.combine_queue.append(os.path.join(self.tmp_path, '%i.pdf' % (len(self.combine_queue) + 1)))
|
||||||
|
|
||||||
self.logger.debug('Processing %s...' % item)
|
self.logger.debug('Processing %s...' % item)
|
||||||
|
load_html(item, self.view)
|
||||||
self.view.load(QUrl.fromLocalFile(item))
|
|
||||||
|
|
||||||
def _render_html(self, ok):
|
def _render_html(self, ok):
|
||||||
if ok:
|
if ok:
|
||||||
@ -168,9 +168,14 @@ class PDFWriter(QObject): # {{{
|
|||||||
# We have to set the engine to Native on OS X after the call to set
|
# We have to set the engine to Native on OS X after the call to set
|
||||||
# filename. Setting a filename with .pdf as the extension causes
|
# filename. Setting a filename with .pdf as the extension causes
|
||||||
# Qt to set the format to use Qt's PDF engine even if native was
|
# Qt to set the format to use Qt's PDF engine even if native was
|
||||||
# previously set on the printer.
|
# previously set on the printer. Qt's PDF engine produces image
|
||||||
|
# based PDFs on OS X, so we cannot use it.
|
||||||
if isosx:
|
if isosx:
|
||||||
printer.setOutputFormat(QPrinter.NativeFormat)
|
printer.setOutputFormat(QPrinter.NativeFormat)
|
||||||
|
self.view.page().mainFrame().evaluateJavaScript('''
|
||||||
|
document.body.style.backgroundColor = "white";
|
||||||
|
|
||||||
|
''')
|
||||||
self.view.print_(printer)
|
self.view.print_(printer)
|
||||||
printer.abort()
|
printer.abort()
|
||||||
else:
|
else:
|
||||||
|
@ -81,8 +81,8 @@ class Worker(Thread): # {{{
|
|||||||
if prefs['add_formats_to_existing']:
|
if prefs['add_formats_to_existing']:
|
||||||
identical_book_list = newdb.find_identical_books(mi)
|
identical_book_list = newdb.find_identical_books(mi)
|
||||||
if identical_book_list: # books with same author and nearly same title exist in newdb
|
if identical_book_list: # books with same author and nearly same title exist in newdb
|
||||||
self.auto_merged_ids[x] = _('%s by %s')%(mi.title,
|
self.auto_merged_ids[x] = _('%(title)s by %(author)s')%\
|
||||||
mi.format_field('authors')[1])
|
dict(title=mi.title, author=mi.format_field('authors')[1])
|
||||||
automerged = True
|
automerged = True
|
||||||
seen_fmts = set()
|
seen_fmts = set()
|
||||||
for identical_book in identical_book_list:
|
for identical_book in identical_book_list:
|
||||||
|
@ -9,10 +9,10 @@ import re, os
|
|||||||
|
|
||||||
from lxml import html
|
from lxml import html
|
||||||
|
|
||||||
from PyQt4.Qt import QApplication, QFontInfo, QSize, QWidget, QPlainTextEdit, \
|
from PyQt4.Qt import (QApplication, QFontInfo, QSize, QWidget, QPlainTextEdit,
|
||||||
QToolBar, QVBoxLayout, QAction, QIcon, Qt, QTabWidget, QUrl, \
|
QToolBar, QVBoxLayout, QAction, QIcon, Qt, QTabWidget, QUrl,
|
||||||
QSyntaxHighlighter, QColor, QChar, QColorDialog, QMenu, QInputDialog, \
|
QSyntaxHighlighter, QColor, QChar, QColorDialog, QMenu, QInputDialog,
|
||||||
QHBoxLayout
|
QHBoxLayout, QKeySequence)
|
||||||
from PyQt4.QtWebKit import QWebView, QWebPage
|
from PyQt4.QtWebKit import QWebView, QWebPage
|
||||||
|
|
||||||
from calibre.ebooks.chardet import xml_to_unicode
|
from calibre.ebooks.chardet import xml_to_unicode
|
||||||
@ -32,6 +32,7 @@ class PageAction(QAction): # {{{
|
|||||||
type=Qt.QueuedConnection)
|
type=Qt.QueuedConnection)
|
||||||
self.page_action.changed.connect(self.update_state,
|
self.page_action.changed.connect(self.update_state,
|
||||||
type=Qt.QueuedConnection)
|
type=Qt.QueuedConnection)
|
||||||
|
self.update_state()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def page_action(self):
|
def page_action(self):
|
||||||
@ -66,6 +67,12 @@ class EditorWidget(QWebView): # {{{
|
|||||||
|
|
||||||
self.comments_pat = re.compile(r'<!--.*?-->', re.DOTALL)
|
self.comments_pat = re.compile(r'<!--.*?-->', re.DOTALL)
|
||||||
|
|
||||||
|
extra_shortcuts = {
|
||||||
|
'ToggleBold': 'Bold',
|
||||||
|
'ToggleItalic': 'Italic',
|
||||||
|
'ToggleUnderline': 'Underline',
|
||||||
|
}
|
||||||
|
|
||||||
for wac, name, icon, text, checkable in [
|
for wac, name, icon, text, checkable in [
|
||||||
('ToggleBold', 'bold', 'format-text-bold', _('Bold'), True),
|
('ToggleBold', 'bold', 'format-text-bold', _('Bold'), True),
|
||||||
('ToggleItalic', 'italic', 'format-text-italic', _('Italic'),
|
('ToggleItalic', 'italic', 'format-text-italic', _('Italic'),
|
||||||
@ -106,6 +113,9 @@ class EditorWidget(QWebView): # {{{
|
|||||||
]:
|
]:
|
||||||
ac = PageAction(wac, icon, text, checkable, self)
|
ac = PageAction(wac, icon, text, checkable, self)
|
||||||
setattr(self, 'action_'+name, ac)
|
setattr(self, 'action_'+name, ac)
|
||||||
|
ss = extra_shortcuts.get(wac, None)
|
||||||
|
if ss:
|
||||||
|
ac.setShortcut(QKeySequence(getattr(QKeySequence, ss)))
|
||||||
|
|
||||||
self.action_color = QAction(QIcon(I('format-text-color')), _('Foreground color'),
|
self.action_color = QAction(QIcon(I('format-text-color')), _('Foreground color'),
|
||||||
self)
|
self)
|
||||||
|
@ -6,8 +6,8 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
|||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
|
||||||
from PyQt4.Qt import QLineEdit, QAbstractListModel, Qt, \
|
from PyQt4.Qt import (QLineEdit, QAbstractListModel, Qt,
|
||||||
QApplication, QCompleter
|
QApplication, QCompleter, QMetaObject)
|
||||||
|
|
||||||
from calibre.utils.icu import sort_key, lower
|
from calibre.utils.icu import sort_key, lower
|
||||||
from calibre.gui2 import NONE
|
from calibre.gui2 import NONE
|
||||||
@ -182,14 +182,27 @@ class MultiCompleteComboBox(EnComboBox):
|
|||||||
def set_add_separator(self, what):
|
def set_add_separator(self, what):
|
||||||
self.lineEdit().set_add_separator(what)
|
self.lineEdit().set_add_separator(what)
|
||||||
|
|
||||||
|
def show_initial_value(self, what):
|
||||||
|
'''
|
||||||
|
Show an initial value. Handle the case of the initial value being blank
|
||||||
|
correctly (on Qt 4.8.0 having a blank value causes the first value from
|
||||||
|
the completer to be shown, when the event loop runs).
|
||||||
|
'''
|
||||||
|
what = unicode(what)
|
||||||
|
le = self.lineEdit()
|
||||||
|
if not what.strip():
|
||||||
|
QMetaObject.invokeMethod(self, 'clearEditText',
|
||||||
|
Qt.QueuedConnection)
|
||||||
|
else:
|
||||||
|
self.setEditText(what)
|
||||||
|
le.selectAll()
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from PyQt4.Qt import QDialog, QVBoxLayout
|
from PyQt4.Qt import QDialog, QVBoxLayout
|
||||||
app = QApplication([])
|
app = QApplication([])
|
||||||
d = QDialog()
|
d = QDialog()
|
||||||
d.setLayout(QVBoxLayout())
|
d.setLayout(QVBoxLayout())
|
||||||
le = MultiCompleteLineEdit(d)
|
le = MultiCompleteComboBox(d)
|
||||||
d.layout().addWidget(le)
|
d.layout().addWidget(le)
|
||||||
le.all_items = ['one', 'otwo', 'othree', 'ooone', 'ootwo', 'oothree']
|
le.all_items = ['one', 'otwo', 'othree', 'ooone', 'ootwo', 'oothree']
|
||||||
d.exec_()
|
d.exec_()
|
||||||
|
@ -9,8 +9,8 @@ __docformat__ = 'restructuredtext en'
|
|||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
|
||||||
from PyQt4.Qt import QDialog, QVBoxLayout, QLabel, QDialogButtonBox, \
|
from PyQt4.Qt import (QDialog, QVBoxLayout, QLabel, QDialogButtonBox,
|
||||||
QListWidget, QAbstractItemView
|
QListWidget, QAbstractItemView)
|
||||||
from PyQt4 import QtGui
|
from PyQt4 import QtGui
|
||||||
|
|
||||||
class ChoosePluginToolbarsDialog(QDialog):
|
class ChoosePluginToolbarsDialog(QDialog):
|
||||||
@ -39,6 +39,9 @@ class ChoosePluginToolbarsDialog(QDialog):
|
|||||||
self._locations_list.setSizePolicy(sizePolicy)
|
self._locations_list.setSizePolicy(sizePolicy)
|
||||||
for key, text in locations:
|
for key, text in locations:
|
||||||
self._locations_list.addItem(text)
|
self._locations_list.addItem(text)
|
||||||
|
if key in {'toolbar', 'toolbar-device'}:
|
||||||
|
self._locations_list.item(self._locations_list.count()-1
|
||||||
|
).setSelected(True)
|
||||||
self._layout.addWidget(self._locations_list)
|
self._layout.addWidget(self._locations_list)
|
||||||
|
|
||||||
self._footer_label = QLabel(
|
self._footer_label = QLabel(
|
||||||
|
@ -11,9 +11,9 @@ from datetime import timedelta
|
|||||||
import calendar, textwrap
|
import calendar, textwrap
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
from PyQt4.Qt import QDialog, Qt, QTime, QObject, QMenu, QHBoxLayout, \
|
from PyQt4.Qt import (QDialog, Qt, QTime, QObject, QMenu, QHBoxLayout,
|
||||||
QAction, QIcon, QMutex, QTimer, pyqtSignal, QWidget, QGridLayout, \
|
QAction, QIcon, QMutex, QTimer, pyqtSignal, QWidget, QGridLayout,
|
||||||
QCheckBox, QTimeEdit, QLabel, QLineEdit, QDoubleSpinBox
|
QCheckBox, QTimeEdit, QLabel, QLineEdit, QDoubleSpinBox)
|
||||||
|
|
||||||
from calibre.gui2.dialogs.scheduler_ui import Ui_Dialog
|
from calibre.gui2.dialogs.scheduler_ui import Ui_Dialog
|
||||||
from calibre.gui2 import config as gconf, error_dialog
|
from calibre.gui2 import config as gconf, error_dialog
|
||||||
@ -317,6 +317,8 @@ class SchedulerDialog(QDialog, Ui_Dialog):
|
|||||||
return False
|
return False
|
||||||
if un or pw:
|
if un or pw:
|
||||||
self.recipe_model.set_account_info(urn, un, pw)
|
self.recipe_model.set_account_info(urn, un, pw)
|
||||||
|
else:
|
||||||
|
self.recipe_model.clear_account_info(urn)
|
||||||
|
|
||||||
if self.schedule.isChecked():
|
if self.schedule.isChecked():
|
||||||
schedule_type, schedule = \
|
schedule_type, schedule = \
|
||||||
|
@ -128,8 +128,7 @@ class TextDelegate(QStyledItemDelegate): # {{{
|
|||||||
for item in sorted(complete_items, key=sort_key):
|
for item in sorted(complete_items, key=sort_key):
|
||||||
editor.addItem(item)
|
editor.addItem(item)
|
||||||
ct = index.data(Qt.DisplayRole).toString()
|
ct = index.data(Qt.DisplayRole).toString()
|
||||||
editor.setEditText(ct)
|
editor.show_initial_value(ct)
|
||||||
editor.lineEdit().selectAll()
|
|
||||||
else:
|
else:
|
||||||
editor = EnLineEdit(parent)
|
editor = EnLineEdit(parent)
|
||||||
return editor
|
return editor
|
||||||
@ -170,8 +169,7 @@ class CompleteDelegate(QStyledItemDelegate): # {{{
|
|||||||
for item in sorted(all_items, key=sort_key):
|
for item in sorted(all_items, key=sort_key):
|
||||||
editor.addItem(item)
|
editor.addItem(item)
|
||||||
ct = index.data(Qt.DisplayRole).toString()
|
ct = index.data(Qt.DisplayRole).toString()
|
||||||
editor.setEditText(ct)
|
editor.show_initial_value(ct)
|
||||||
editor.lineEdit().selectAll()
|
|
||||||
else:
|
else:
|
||||||
editor = EnLineEdit(parent)
|
editor = EnLineEdit(parent)
|
||||||
return editor
|
return editor
|
||||||
@ -190,8 +188,7 @@ class LanguagesDelegate(QStyledItemDelegate): # {{{
|
|||||||
editor = LanguagesEdit(parent=parent)
|
editor = LanguagesEdit(parent=parent)
|
||||||
editor.init_langs(index.model().db)
|
editor.init_langs(index.model().db)
|
||||||
ct = index.data(Qt.DisplayRole).toString()
|
ct = index.data(Qt.DisplayRole).toString()
|
||||||
editor.setEditText(ct)
|
editor.show_initial_value(ct)
|
||||||
editor.lineEdit().selectAll()
|
|
||||||
return editor
|
return editor
|
||||||
|
|
||||||
def setModelData(self, editor, model, index):
|
def setModelData(self, editor, model, index):
|
||||||
|
@ -262,9 +262,11 @@ class BooksView(QTableView): # {{{
|
|||||||
self.selected_ids = [idc(r) for r in selected_rows]
|
self.selected_ids = [idc(r) for r in selected_rows]
|
||||||
|
|
||||||
def sorting_done(self, indexc):
|
def sorting_done(self, indexc):
|
||||||
|
pos = self.horizontalScrollBar().value()
|
||||||
self.select_rows(self.selected_ids, using_ids=True, change_current=True,
|
self.select_rows(self.selected_ids, using_ids=True, change_current=True,
|
||||||
scroll=True)
|
scroll=True)
|
||||||
self.selected_ids = []
|
self.selected_ids = []
|
||||||
|
self.horizontalScrollBar().setValue(pos)
|
||||||
|
|
||||||
def sort_by_named_field(self, field, order, reset=True):
|
def sort_by_named_field(self, field, order, reset=True):
|
||||||
if field in self.column_map:
|
if field in self.column_map:
|
||||||
|
@ -882,6 +882,11 @@ class FullFetch(QDialog): # {{{
|
|||||||
self.covers_widget.chosen.connect(self.ok_clicked)
|
self.covers_widget.chosen.connect(self.ok_clicked)
|
||||||
self.stack.addWidget(self.covers_widget)
|
self.stack.addWidget(self.covers_widget)
|
||||||
|
|
||||||
|
# Workaround for Qt 4.8.0 bug that causes the frame of the window to go
|
||||||
|
# off the top of the screen if a max height is not set for the
|
||||||
|
# QWebView. Seems to only happen on windows, but keep it for all
|
||||||
|
# platforms just in case.
|
||||||
|
self.identify_widget.comments_view.setMaximumHeight(500)
|
||||||
self.resize(850, 550)
|
self.resize(850, 550)
|
||||||
|
|
||||||
self.finished.connect(self.cleanup)
|
self.finished.connect(self.cleanup)
|
||||||
|
@ -29,4 +29,4 @@ class SearchResult(object):
|
|||||||
self.plugin_author = ''
|
self.plugin_author = ''
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return self.title == other.title and self.author == other.author and self.store_name == other.store_name
|
return self.title == other.title and self.author == other.author and self.store_name == other.store_name and self.formats == other.formats
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||||
|
|
||||||
__license__ = 'GPL 3'
|
__license__ = 'GPL 3'
|
||||||
__copyright__ = '2011, Tomasz Długosz <tomek3d@gmail.com>'
|
__copyright__ = '2011-2012, Tomasz Długosz <tomek3d@gmail.com>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import re
|
import re
|
||||||
@ -64,9 +64,7 @@ class EbookpointStore(BasicStoreConfig, StorePlugin):
|
|||||||
author = ''.join(data.xpath('.//p[@class="author"]/text()'))
|
author = ''.join(data.xpath('.//p[@class="author"]/text()'))
|
||||||
price = ''.join(data.xpath('.//p[@class="price"]/ins/text()'))
|
price = ''.join(data.xpath('.//p[@class="price"]/ins/text()'))
|
||||||
|
|
||||||
with closing(br.open(id.strip(), timeout=timeout)) as nf:
|
formats = ', '.join(data.xpath('.//div[@class="ikony"]/span/text()'))
|
||||||
idata = html.fromstring(nf.read())
|
|
||||||
formats = ', '.join(idata.xpath('//dd[@class="radio-line"]/label/text()'))
|
|
||||||
|
|
||||||
counter -= 1
|
counter -= 1
|
||||||
|
|
||||||
@ -77,6 +75,6 @@ class EbookpointStore(BasicStoreConfig, StorePlugin):
|
|||||||
s.price = re.sub(r'\.',',',price)
|
s.price = re.sub(r'\.',',',price)
|
||||||
s.detail_item = id.strip()
|
s.detail_item = id.strip()
|
||||||
s.drm = SearchResult.DRM_UNLOCKED
|
s.drm = SearchResult.DRM_UNLOCKED
|
||||||
s.formats = formats.upper().strip()
|
s.formats = formats.upper()
|
||||||
|
|
||||||
yield s
|
yield s
|
||||||
|
@ -68,8 +68,8 @@ class NextoStore(BasicStoreConfig, StorePlugin):
|
|||||||
title = ''.join(data.xpath('.//a[@class="title"]/text()'))
|
title = ''.join(data.xpath('.//a[@class="title"]/text()'))
|
||||||
title = re.sub(r' - ebook$', '', title)
|
title = re.sub(r' - ebook$', '', title)
|
||||||
formats = ', '.join(data.xpath('.//ul[@class="formats_available"]/li//b/text()'))
|
formats = ', '.join(data.xpath('.//ul[@class="formats_available"]/li//b/text()'))
|
||||||
DrmFree = re.search(r'bez.DRM', formats)
|
DrmFree = re.search(r'znak', formats)
|
||||||
formats = re.sub(r'\(.+\)', '', formats)
|
formats = re.sub(r'\ ?\(.+?\)', '', formats)
|
||||||
|
|
||||||
author = ''
|
author = ''
|
||||||
with closing(br.open('http://www.nexto.pl/' + id.strip(), timeout=timeout/4)) as nf:
|
with closing(br.open('http://www.nexto.pl/' + id.strip(), timeout=timeout/4)) as nf:
|
||||||
|
@ -6,6 +6,7 @@ __license__ = 'GPL 3'
|
|||||||
__copyright__ = '2011-2012, Tomasz Długosz <tomek3d@gmail.com>'
|
__copyright__ = '2011-2012, Tomasz Długosz <tomek3d@gmail.com>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import copy
|
||||||
import re
|
import re
|
||||||
import urllib
|
import urllib
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
@ -43,9 +44,9 @@ class WoblinkStore(BasicStoreConfig, StorePlugin):
|
|||||||
url = 'http://woblink.com/publication?query=' + urllib.quote_plus(query.encode('utf-8'))
|
url = 'http://woblink.com/publication?query=' + urllib.quote_plus(query.encode('utf-8'))
|
||||||
if max_results > 10:
|
if max_results > 10:
|
||||||
if max_results > 20:
|
if max_results > 20:
|
||||||
url += '&limit=' + str(30)
|
url += '&limit=30'
|
||||||
else:
|
else:
|
||||||
url += '&limit=' + str(20)
|
url += '&limit=20'
|
||||||
|
|
||||||
br = browser()
|
br = browser()
|
||||||
|
|
||||||
@ -66,15 +67,6 @@ class WoblinkStore(BasicStoreConfig, StorePlugin):
|
|||||||
price = ''.join(data.xpath('.//div[@class="prices"]/span[1]/span/text()'))
|
price = ''.join(data.xpath('.//div[@class="prices"]/span[1]/span/text()'))
|
||||||
price = re.sub('\.', ',', price)
|
price = re.sub('\.', ',', price)
|
||||||
formats = [ form[8:-4].split('_')[0] for form in data.xpath('.//p[3]/img/@src')]
|
formats = [ form[8:-4].split('_')[0] for form in data.xpath('.//p[3]/img/@src')]
|
||||||
if 'epub' in formats:
|
|
||||||
formats.remove('epub')
|
|
||||||
formats.append('WOBLINK')
|
|
||||||
if 'E Ink' in data.xpath('.//div[@class="prices"]/img/@title'):
|
|
||||||
formats.insert(0, 'EPUB')
|
|
||||||
if 'pdf' in formats:
|
|
||||||
formats[formats.index('pdf')] = 'PDF'
|
|
||||||
|
|
||||||
counter -= 1
|
|
||||||
|
|
||||||
s = SearchResult()
|
s = SearchResult()
|
||||||
s.cover_url = 'http://woblink.com' + cover_url
|
s.cover_url = 'http://woblink.com' + cover_url
|
||||||
@ -82,7 +74,28 @@ class WoblinkStore(BasicStoreConfig, StorePlugin):
|
|||||||
s.author = author.strip()
|
s.author = author.strip()
|
||||||
s.price = price + ' zł'
|
s.price = price + ' zł'
|
||||||
s.detail_item = id.strip()
|
s.detail_item = id.strip()
|
||||||
s.drm = SearchResult.DRM_UNKNOWN if 'MOBI' in formats else SearchResult.DRM_LOCKED
|
|
||||||
s.formats = ', '.join(formats)
|
|
||||||
|
|
||||||
yield s
|
# MOBI should be send first,
|
||||||
|
if 'MOBI' in formats:
|
||||||
|
t = copy.copy(s)
|
||||||
|
t.title += ' MOBI'
|
||||||
|
t.drm = SearchResult.DRM_UNLOCKED
|
||||||
|
t.formats = 'MOBI'
|
||||||
|
formats.remove('MOBI')
|
||||||
|
|
||||||
|
counter -= 1
|
||||||
|
yield t
|
||||||
|
|
||||||
|
# and the remaining formats (if any) next
|
||||||
|
if formats:
|
||||||
|
if 'epub' in formats:
|
||||||
|
formats.remove('epub')
|
||||||
|
formats.append('WOBLINK')
|
||||||
|
if 'E Ink' in data.xpath('.//div[@class="prices"]/img/@title'):
|
||||||
|
formats.insert(0, 'EPUB')
|
||||||
|
|
||||||
|
s.drm = SearchResult.DRM_LOCKED
|
||||||
|
s.formats = ', '.join(formats).upper()
|
||||||
|
|
||||||
|
counter -= 1
|
||||||
|
yield s
|
||||||
|
@ -151,7 +151,7 @@ class UpdateMixin(object):
|
|||||||
plt = u''
|
plt = u''
|
||||||
if has_plugin_updates:
|
if has_plugin_updates:
|
||||||
plt = _(' (%d plugin updates)')%plugin_updates
|
plt = _(' (%d plugin updates)')%plugin_updates
|
||||||
msg = (u'<span style="color:red; font-weight: bold">%s: '
|
msg = (u'<span style="color:green; font-weight: bold">%s: '
|
||||||
u'<a href="update:%s">%s%s</a></span>') % (
|
u'<a href="update:%s">%s%s</a></span>') % (
|
||||||
_('Update found'), version, calibre_version, plt)
|
_('Update found'), version, calibre_version, plt)
|
||||||
else:
|
else:
|
||||||
|
@ -31,6 +31,7 @@ class BookmarkManager(QDialog, Ui_BookmarkManager):
|
|||||||
bookmarks = self.bookmarks[:]
|
bookmarks = self.bookmarks[:]
|
||||||
self._model = BookmarkTableModel(self, bookmarks)
|
self._model = BookmarkTableModel(self, bookmarks)
|
||||||
self.bookmarks_table.setModel(self._model)
|
self.bookmarks_table.setModel(self._model)
|
||||||
|
self.bookmarks_table.resizeColumnsToContents()
|
||||||
|
|
||||||
def delete_bookmark(self):
|
def delete_bookmark(self):
|
||||||
indexes = self.bookmarks_table.selectionModel().selectedIndexes()
|
indexes = self.bookmarks_table.selectionModel().selectedIndexes()
|
||||||
@ -80,7 +81,7 @@ class BookmarkManager(QDialog, Ui_BookmarkManager):
|
|||||||
if not bad:
|
if not bad:
|
||||||
bookmarks = self._model.bookmarks[:]
|
bookmarks = self._model.bookmarks[:]
|
||||||
for bm in imported:
|
for bm in imported:
|
||||||
if bm not in bookmarks and bm[0] != 'calibre_current_page_bookmark':
|
if bm not in bookmarks and bm['title'] != 'calibre_current_page_bookmark':
|
||||||
bookmarks.append(bm)
|
bookmarks.append(bm)
|
||||||
self.set_bookmarks(bookmarks)
|
self.set_bookmarks(bookmarks)
|
||||||
|
|
||||||
@ -105,13 +106,14 @@ class BookmarkTableModel(QAbstractTableModel):
|
|||||||
|
|
||||||
def data(self, index, role):
|
def data(self, index, role):
|
||||||
if role in (Qt.DisplayRole, Qt.EditRole):
|
if role in (Qt.DisplayRole, Qt.EditRole):
|
||||||
ans = self.bookmarks[index.row()][0]
|
ans = self.bookmarks[index.row()]['title']
|
||||||
return NONE if ans is None else QVariant(ans)
|
return NONE if ans is None else QVariant(ans)
|
||||||
return NONE
|
return NONE
|
||||||
|
|
||||||
def setData(self, index, value, role):
|
def setData(self, index, value, role):
|
||||||
if role == Qt.EditRole:
|
if role == Qt.EditRole:
|
||||||
self.bookmarks[index.row()] = (unicode(value.toString()).strip(), self.bookmarks[index.row()][1])
|
bm = self.bookmarks[index.row()]
|
||||||
|
bm['title'] = unicode(value.toString()).strip()
|
||||||
self.emit(SIGNAL("dataChanged(QModelIndex, QModelIndex)"), index, index)
|
self.emit(SIGNAL("dataChanged(QModelIndex, QModelIndex)"), index, index)
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
@ -4,14 +4,14 @@ __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
|||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
# Imports {{{
|
# Imports {{{
|
||||||
import os, math, re, glob, sys, zipfile
|
import os, math, glob, zipfile
|
||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
from PyQt4.Qt import (QSize, QSizePolicy, QUrl, SIGNAL, Qt,
|
from PyQt4.Qt import (QSize, QSizePolicy, QUrl, SIGNAL, Qt,
|
||||||
QPainter, QPalette, QBrush, QFontDatabase, QDialog,
|
QPainter, QPalette, QBrush, QFontDatabase, QDialog,
|
||||||
QColor, QPoint, QImage, QRegion, QVariant, QIcon,
|
QColor, QPoint, QImage, QRegion, QVariant, QIcon,
|
||||||
QFont, pyqtSignature, QAction, QByteArray, QMenu,
|
QFont, pyqtSignature, QAction, QMenu,
|
||||||
pyqtSignal, QSwipeGesture, QApplication)
|
pyqtSignal, QSwipeGesture, QApplication)
|
||||||
from PyQt4.QtWebKit import QWebPage, QWebView, QWebSettings
|
from PyQt4.QtWebKit import QWebPage, QWebView, QWebSettings
|
||||||
|
|
||||||
@ -21,10 +21,11 @@ from calibre.gui2.viewer.config_ui import Ui_Dialog
|
|||||||
from calibre.gui2.viewer.flip import SlideFlip
|
from calibre.gui2.viewer.flip import SlideFlip
|
||||||
from calibre.gui2.shortcuts import Shortcuts, ShortcutConfig
|
from calibre.gui2.shortcuts import Shortcuts, ShortcutConfig
|
||||||
from calibre.constants import iswindows
|
from calibre.constants import iswindows
|
||||||
from calibre import prints, guess_type
|
from calibre import prints
|
||||||
from calibre.gui2.viewer.keys import SHORTCUTS
|
from calibre.gui2.viewer.keys import SHORTCUTS
|
||||||
from calibre.gui2.viewer.javascript import JavaScriptLoader
|
from calibre.gui2.viewer.javascript import JavaScriptLoader
|
||||||
from calibre.gui2.viewer.position import PagePosition
|
from calibre.gui2.viewer.position import PagePosition
|
||||||
|
from calibre.ebooks.oeb.display.webview import load_html
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
@ -312,10 +313,14 @@ class Document(QWebPage): # {{{
|
|||||||
self.javascript('goto_reference("%s")'%ref)
|
self.javascript('goto_reference("%s")'%ref)
|
||||||
|
|
||||||
def goto_bookmark(self, bm):
|
def goto_bookmark(self, bm):
|
||||||
bm = bm.strip()
|
if bm['type'] == 'legacy':
|
||||||
if bm.startswith('>'):
|
bm = bm['pos']
|
||||||
bm = bm[1:].strip()
|
bm = bm.strip()
|
||||||
self.javascript('scroll_to_bookmark("%s")'%bm)
|
if bm.startswith('>'):
|
||||||
|
bm = bm[1:].strip()
|
||||||
|
self.javascript('scroll_to_bookmark("%s")'%bm)
|
||||||
|
elif bm['type'] == 'cfi':
|
||||||
|
self.page_position.to_pos(bm['pos'])
|
||||||
|
|
||||||
def javascript(self, string, typ=None):
|
def javascript(self, string, typ=None):
|
||||||
ans = self.mainFrame().evaluateJavaScript(string)
|
ans = self.mainFrame().evaluateJavaScript(string)
|
||||||
@ -366,40 +371,9 @@ class Document(QWebPage): # {{{
|
|||||||
def elem_outer_xml(self, elem):
|
def elem_outer_xml(self, elem):
|
||||||
return unicode(elem.toOuterXml())
|
return unicode(elem.toOuterXml())
|
||||||
|
|
||||||
def find_bookmark_element(self):
|
|
||||||
mf = self.mainFrame()
|
|
||||||
doc_pos = self.ypos
|
|
||||||
min_delta, min_elem = sys.maxint, None
|
|
||||||
for y in range(10, -500, -10):
|
|
||||||
for x in range(-50, 500, 10):
|
|
||||||
pos = QPoint(x, y)
|
|
||||||
result = mf.hitTestContent(pos)
|
|
||||||
if result.isNull(): continue
|
|
||||||
elem = result.enclosingBlockElement()
|
|
||||||
if elem.isNull(): continue
|
|
||||||
try:
|
|
||||||
ypos = self.element_ypos(elem)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
delta = abs(ypos - doc_pos)
|
|
||||||
if delta < 25:
|
|
||||||
return elem
|
|
||||||
if delta < min_delta:
|
|
||||||
min_elem, min_delta = elem, delta
|
|
||||||
return min_elem
|
|
||||||
|
|
||||||
|
|
||||||
def bookmark(self):
|
def bookmark(self):
|
||||||
elem = self.find_bookmark_element()
|
pos = self.page_position.current_pos
|
||||||
|
return {'type':'cfi', 'pos':pos}
|
||||||
if elem is None or self.element_ypos(elem) < 100:
|
|
||||||
bm = 'body|%f'%(float(self.ypos)/(self.height*0.7))
|
|
||||||
else:
|
|
||||||
bm = unicode(elem.evaluateJavaScript(
|
|
||||||
'calculate_bookmark(%d, this)'%self.ypos).toString())
|
|
||||||
if not bm:
|
|
||||||
bm = 'body|%f'%(float(self.ypos)/(self.height*0.7))
|
|
||||||
return bm
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def at_bottom(self):
|
def at_bottom(self):
|
||||||
@ -474,19 +448,6 @@ class Document(QWebPage): # {{{
|
|||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
class EntityDeclarationProcessor(object): # {{{
|
|
||||||
|
|
||||||
def __init__(self, html):
|
|
||||||
self.declared_entities = {}
|
|
||||||
for match in re.finditer(r'<!\s*ENTITY\s+([^>]+)>', html):
|
|
||||||
tokens = match.group(1).split()
|
|
||||||
if len(tokens) > 1:
|
|
||||||
self.declared_entities[tokens[0].strip()] = tokens[1].strip().replace('"', '')
|
|
||||||
self.processed_html = html
|
|
||||||
for key, val in self.declared_entities.iteritems():
|
|
||||||
self.processed_html = self.processed_html.replace('&%s;'%key, val)
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
class DocumentView(QWebView): # {{{
|
class DocumentView(QWebView): # {{{
|
||||||
|
|
||||||
magnification_changed = pyqtSignal(object)
|
magnification_changed = pyqtSignal(object)
|
||||||
@ -497,8 +458,6 @@ class DocumentView(QWebView): # {{{
|
|||||||
self.is_auto_repeat_event = False
|
self.is_auto_repeat_event = False
|
||||||
self.debug_javascript = debug_javascript
|
self.debug_javascript = debug_javascript
|
||||||
self.shortcuts = Shortcuts(SHORTCUTS, 'shortcuts/viewer')
|
self.shortcuts = Shortcuts(SHORTCUTS, 'shortcuts/viewer')
|
||||||
self.self_closing_pat = re.compile(r'<([a-z1-6]+)\s+([^>]+)/>',
|
|
||||||
re.IGNORECASE)
|
|
||||||
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
|
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
|
||||||
self._size_hint = QSize(510, 680)
|
self._size_hint = QSize(510, 680)
|
||||||
self.initial_pos = 0.0
|
self.initial_pos = 0.0
|
||||||
@ -689,30 +648,16 @@ class DocumentView(QWebView): # {{{
|
|||||||
def path(self):
|
def path(self):
|
||||||
return os.path.abspath(unicode(self.url().toLocalFile()))
|
return os.path.abspath(unicode(self.url().toLocalFile()))
|
||||||
|
|
||||||
def self_closing_sub(self, match):
|
|
||||||
tag = match.group(1)
|
|
||||||
if tag.lower().strip() == 'br':
|
|
||||||
return match.group()
|
|
||||||
return '<%s %s></%s>'%(match.group(1), match.group(2), match.group(1))
|
|
||||||
|
|
||||||
def load_path(self, path, pos=0.0):
|
def load_path(self, path, pos=0.0):
|
||||||
self.initial_pos = pos
|
self.initial_pos = pos
|
||||||
mt = getattr(path, 'mime_type', None)
|
|
||||||
if mt is None:
|
|
||||||
mt = guess_type(path)[0]
|
|
||||||
html = open(path, 'rb').read().decode(path.encoding, 'replace')
|
|
||||||
html = EntityDeclarationProcessor(html).processed_html
|
|
||||||
has_svg = re.search(r'<[:a-zA-Z]*svg', html) is not None
|
|
||||||
|
|
||||||
if 'xhtml' in mt:
|
def callback(lu):
|
||||||
html = self.self_closing_pat.sub(self.self_closing_sub, html)
|
self.loading_url = lu
|
||||||
if self.manager is not None:
|
if self.manager is not None:
|
||||||
self.manager.load_started()
|
self.manager.load_started()
|
||||||
self.loading_url = QUrl.fromLocalFile(path)
|
|
||||||
if has_svg:
|
load_html(path, self, codec=path.encoding, mime_type=getattr(path,
|
||||||
self.setContent(QByteArray(html.encode(path.encoding)), mt, QUrl.fromLocalFile(path))
|
'mime_type', None), pre_load_callback=callback)
|
||||||
else:
|
|
||||||
self.setHtml(html, self.loading_url)
|
|
||||||
self.turn_off_internal_scrollbars()
|
self.turn_off_internal_scrollbars()
|
||||||
|
|
||||||
def initialize_scrollbar(self):
|
def initialize_scrollbar(self):
|
||||||
@ -1010,8 +955,12 @@ class DocumentView(QWebView): # {{{
|
|||||||
finally:
|
finally:
|
||||||
self.is_auto_repeat_event = False
|
self.is_auto_repeat_event = False
|
||||||
elif key == 'Down':
|
elif key == 'Down':
|
||||||
|
if self.document.at_bottom:
|
||||||
|
self.manager.next_document()
|
||||||
self.scroll_by(y=15)
|
self.scroll_by(y=15)
|
||||||
elif key == 'Up':
|
elif key == 'Up':
|
||||||
|
if self.document.at_top:
|
||||||
|
self.manager.previous_document()
|
||||||
self.scroll_by(y=-15)
|
self.scroll_by(y=-15)
|
||||||
elif key == 'Left':
|
elif key == 'Left':
|
||||||
self.scroll_by(x=-15)
|
self.scroll_by(x=-15)
|
||||||
|
@ -27,6 +27,7 @@ from calibre.ebooks.metadata import MetaInformation
|
|||||||
from calibre.customize.ui import available_input_formats
|
from calibre.customize.ui import available_input_formats
|
||||||
from calibre.gui2.viewer.dictionary import Lookup
|
from calibre.gui2.viewer.dictionary import Lookup
|
||||||
from calibre import as_unicode, force_unicode, isbytestring
|
from calibre import as_unicode, force_unicode, isbytestring
|
||||||
|
from calibre.ptempfile import reset_base_dir
|
||||||
|
|
||||||
vprefs = JSONConfig('viewer')
|
vprefs = JSONConfig('viewer')
|
||||||
|
|
||||||
@ -512,17 +513,18 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
self.load_path(self.iterator.spine[spine_index])
|
self.load_path(self.iterator.spine[spine_index])
|
||||||
|
|
||||||
def goto_bookmark(self, bm):
|
def goto_bookmark(self, bm):
|
||||||
m = bm[1].split('#')
|
spine_index = bm['spine']
|
||||||
if len(m) > 1:
|
if spine_index > -1 and self.current_index == spine_index:
|
||||||
spine_index, m = int(m[0]), m[1]
|
if self.resize_in_progress:
|
||||||
if spine_index > -1 and self.current_index == spine_index:
|
self.view.document.page_position.set_pos(bm['pos'])
|
||||||
self.view.goto_bookmark(m)
|
|
||||||
else:
|
else:
|
||||||
self.pending_bookmark = bm
|
self.view.goto_bookmark(bm)
|
||||||
if spine_index < 0 or spine_index >= len(self.iterator.spine):
|
else:
|
||||||
spine_index = 0
|
self.pending_bookmark = bm
|
||||||
self.pending_bookmark = None
|
if spine_index < 0 or spine_index >= len(self.iterator.spine):
|
||||||
self.load_path(self.iterator.spine[spine_index])
|
spine_index = 0
|
||||||
|
self.pending_bookmark = None
|
||||||
|
self.load_path(self.iterator.spine[spine_index])
|
||||||
|
|
||||||
def toc_clicked(self, index):
|
def toc_clicked(self, index):
|
||||||
item = self.toc_model.itemFromIndex(index)
|
item = self.toc_model.itemFromIndex(index)
|
||||||
@ -699,6 +701,14 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
self.view.load_path(path, pos=pos)
|
self.view.load_path(path, pos=pos)
|
||||||
|
|
||||||
def viewport_resize_started(self, event):
|
def viewport_resize_started(self, event):
|
||||||
|
old, curr = event.size(), event.oldSize()
|
||||||
|
if not self.window_mode_changed and old.width() == curr.width():
|
||||||
|
# No relayout changes, so page position does not need to be saved
|
||||||
|
# This is needed as Qt generates a viewport resized event that
|
||||||
|
# changes only the height after a file has been loaded. This can
|
||||||
|
# cause the last read position bookmark to become slightly
|
||||||
|
# inaccurate
|
||||||
|
return
|
||||||
if not self.resize_in_progress:
|
if not self.resize_in_progress:
|
||||||
# First resize, so save the current page position
|
# First resize, so save the current page position
|
||||||
self.resize_in_progress = True
|
self.resize_in_progress = True
|
||||||
@ -746,9 +756,10 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
_('Enter title for bookmark:'), text=bm)
|
_('Enter title for bookmark:'), text=bm)
|
||||||
title = unicode(title).strip()
|
title = unicode(title).strip()
|
||||||
if ok and title:
|
if ok and title:
|
||||||
pos = self.view.bookmark()
|
bm = self.view.bookmark()
|
||||||
bookmark = '%d#%s'%(self.current_index, pos)
|
bm['spine'] = self.current_index
|
||||||
self.iterator.add_bookmark((title, bookmark))
|
bm['title'] = title
|
||||||
|
self.iterator.add_bookmark(bm)
|
||||||
self.set_bookmarks(self.iterator.bookmarks)
|
self.set_bookmarks(self.iterator.bookmarks)
|
||||||
|
|
||||||
def set_bookmarks(self, bookmarks):
|
def set_bookmarks(self, bookmarks):
|
||||||
@ -758,12 +769,12 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
current_page = None
|
current_page = None
|
||||||
self.existing_bookmarks = []
|
self.existing_bookmarks = []
|
||||||
for bm in bookmarks:
|
for bm in bookmarks:
|
||||||
if bm[0] == 'calibre_current_page_bookmark' and \
|
if bm['title'] == 'calibre_current_page_bookmark':
|
||||||
self.get_remember_current_page_opt():
|
if self.get_remember_current_page_opt():
|
||||||
current_page = bm
|
current_page = bm
|
||||||
else:
|
else:
|
||||||
self.existing_bookmarks.append(bm[0])
|
self.existing_bookmarks.append(bm['title'])
|
||||||
self.bookmarks_menu.addAction(bm[0], partial(self.goto_bookmark, bm))
|
self.bookmarks_menu.addAction(bm['title'], partial(self.goto_bookmark, bm))
|
||||||
return current_page
|
return current_page
|
||||||
|
|
||||||
def manage_bookmarks(self):
|
def manage_bookmarks(self):
|
||||||
@ -783,9 +794,10 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
return
|
return
|
||||||
if hasattr(self, 'current_index'):
|
if hasattr(self, 'current_index'):
|
||||||
try:
|
try:
|
||||||
pos = self.view.bookmark()
|
bm = self.view.bookmark()
|
||||||
bookmark = '%d#%s'%(self.current_index, pos)
|
bm['spine'] = self.current_index
|
||||||
self.iterator.add_bookmark(('calibre_current_page_bookmark', bookmark))
|
bm['title'] = 'calibre_current_page_bookmark'
|
||||||
|
self.iterator.add_bookmark(bm)
|
||||||
except:
|
except:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
@ -947,6 +959,7 @@ View an ebook.
|
|||||||
def main(args=sys.argv):
|
def main(args=sys.argv):
|
||||||
# Ensure viewer can continue to function if GUI is closed
|
# Ensure viewer can continue to function if GUI is closed
|
||||||
os.environ.pop('CALIBRE_WORKER_TEMP_DIR', None)
|
os.environ.pop('CALIBRE_WORKER_TEMP_DIR', None)
|
||||||
|
reset_base_dir()
|
||||||
|
|
||||||
parser = option_parser()
|
parser = option_parser()
|
||||||
opts, args = parser.parse_args(args)
|
opts, args = parser.parse_args(args)
|
||||||
|
@ -67,10 +67,16 @@ class PagePosition(object):
|
|||||||
|
|
||||||
def restore(self):
|
def restore(self):
|
||||||
if self._cpos is None: return
|
if self._cpos is None: return
|
||||||
if isinstance(self._cpos, (int, float)):
|
self.to_pos(self._cpos)
|
||||||
self.document.scroll_fraction = self._cpos
|
|
||||||
else:
|
|
||||||
self.scroll_to_cfi(self._cpos)
|
|
||||||
self._cpos = None
|
self._cpos = None
|
||||||
|
|
||||||
|
def to_pos(self, pos):
|
||||||
|
if isinstance(pos, (int, float)):
|
||||||
|
self.document.scroll_fraction = pos
|
||||||
|
else:
|
||||||
|
self.scroll_to_cfi(pos)
|
||||||
|
|
||||||
|
def set_pos(self, pos):
|
||||||
|
self._cpos = pos
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,8 +47,8 @@ class CheckLibrary(object):
|
|||||||
self.is_case_sensitive = db.is_case_sensitive
|
self.is_case_sensitive = db.is_case_sensitive
|
||||||
|
|
||||||
self.all_authors = frozenset([x[1] for x in db.all_authors()])
|
self.all_authors = frozenset([x[1] for x in db.all_authors()])
|
||||||
self.all_ids = frozenset([id for id in db.all_ids()])
|
self.all_ids = frozenset([id_ for id_ in db.all_ids()])
|
||||||
self.all_dbpaths = frozenset(self.dbpath(id) for id in self.all_ids)
|
self.all_dbpaths = frozenset(self.dbpath(id_) for id_ in self.all_ids)
|
||||||
self.all_lc_dbpaths = frozenset([f.lower() for f in self.all_dbpaths])
|
self.all_lc_dbpaths = frozenset([f.lower() for f in self.all_dbpaths])
|
||||||
|
|
||||||
self.db_id_regexp = re.compile(r'^.* \((\d+)\)$')
|
self.db_id_regexp = re.compile(r'^.* \((\d+)\)$')
|
||||||
@ -73,8 +73,8 @@ class CheckLibrary(object):
|
|||||||
|
|
||||||
self.failed_folders = []
|
self.failed_folders = []
|
||||||
|
|
||||||
def dbpath(self, id):
|
def dbpath(self, id_):
|
||||||
return self.db.path(id, index_is_id=True)
|
return self.db.path(id_, index_is_id=True)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def errors_occurred(self):
|
def errors_occurred(self):
|
||||||
@ -116,21 +116,21 @@ class CheckLibrary(object):
|
|||||||
self.invalid_titles.append((auth_dir, db_path, 0))
|
self.invalid_titles.append((auth_dir, db_path, 0))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
id = m.group(1)
|
id_ = m.group(1)
|
||||||
# Third check: the id must be in the DB and the paths must match
|
# Third check: the id_ must be in the DB and the paths must match
|
||||||
if self.is_case_sensitive:
|
if self.is_case_sensitive:
|
||||||
if int(id) not in self.all_ids or \
|
if int(id_) not in self.all_ids or \
|
||||||
db_path not in self.all_dbpaths:
|
db_path not in self.all_dbpaths:
|
||||||
self.extra_titles.append((title_dir, db_path, 0))
|
self.extra_titles.append((title_dir, db_path, 0))
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
if int(id) not in self.all_ids or \
|
if int(id_) not in self.all_ids or \
|
||||||
db_path.lower() not in self.all_lc_dbpaths:
|
db_path.lower() not in self.all_lc_dbpaths:
|
||||||
self.extra_titles.append((title_dir, db_path, 0))
|
self.extra_titles.append((title_dir, db_path, 0))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Record the book to check its formats
|
# Record the book to check its formats
|
||||||
self.book_dirs.append((db_path, title_dir, id))
|
self.book_dirs.append((db_path, title_dir, id_))
|
||||||
found_titles = True
|
found_titles = True
|
||||||
|
|
||||||
# Fourth check: author directories that contain no titles
|
# Fourth check: author directories that contain no titles
|
||||||
@ -145,6 +145,21 @@ class CheckLibrary(object):
|
|||||||
# Sort-of check: exception processing directory
|
# Sort-of check: exception processing directory
|
||||||
self.failed_folders.append((title_path, traceback.format_exc(), []))
|
self.failed_folders.append((title_path, traceback.format_exc(), []))
|
||||||
|
|
||||||
|
# Check for formats and covers in db for book dirs that are gone
|
||||||
|
for id_ in self.all_ids:
|
||||||
|
path = self.dbpath(id_)
|
||||||
|
if not os.path.exists(os.path.join(lib, path)):
|
||||||
|
title_dir = os.path.basename(path)
|
||||||
|
book_formats = frozenset([x for x in
|
||||||
|
self.db.format_files(id_, index_is_id=True)])
|
||||||
|
for fmt in book_formats:
|
||||||
|
self.missing_formats.append((title_dir,
|
||||||
|
os.path.join(path, fmt[0]+'.'+fmt[1].lower()), id_))
|
||||||
|
if self.db.has_cover(id_):
|
||||||
|
self.missing_covers.append((title_dir,
|
||||||
|
os.path.join(path, 'cover.jpg'), id_))
|
||||||
|
|
||||||
|
|
||||||
def is_ebook_file(self, filename):
|
def is_ebook_file(self, filename):
|
||||||
ext = os.path.splitext(filename)[1]
|
ext = os.path.splitext(filename)[1]
|
||||||
if not ext:
|
if not ext:
|
||||||
@ -226,8 +241,8 @@ class CheckLibrary(object):
|
|||||||
if self.db.has_cover(book_id):
|
if self.db.has_cover(book_id):
|
||||||
if 'cover.jpg' not in filenames:
|
if 'cover.jpg' not in filenames:
|
||||||
self.missing_covers.append((title_dir,
|
self.missing_covers.append((title_dir,
|
||||||
os.path.join(db_path, title_dir, 'cover.jpg'), book_id))
|
os.path.join(db_path, 'cover.jpg'), book_id))
|
||||||
else:
|
else:
|
||||||
if 'cover.jpg' in filenames:
|
if 'cover.jpg' in filenames:
|
||||||
self.extra_covers.append((title_dir,
|
self.extra_covers.append((title_dir,
|
||||||
os.path.join(db_path, title_dir, 'cover.jpg'), book_id))
|
os.path.join(db_path, 'cover.jpg'), book_id))
|
||||||
|
@ -204,7 +204,8 @@ class DevNull(object):
|
|||||||
pass
|
pass
|
||||||
NULL = DevNull()
|
NULL = DevNull()
|
||||||
|
|
||||||
def do_add(db, paths, one_book_per_directory, recurse, add_duplicates):
|
def do_add(db, paths, one_book_per_directory, recurse, add_duplicates, otitle,
|
||||||
|
oauthors, oisbn, otags, oseries, oseries_index):
|
||||||
orig = sys.stdout
|
orig = sys.stdout
|
||||||
#sys.stdout = NULL
|
#sys.stdout = NULL
|
||||||
try:
|
try:
|
||||||
@ -231,6 +232,11 @@ def do_add(db, paths, one_book_per_directory, recurse, add_duplicates):
|
|||||||
mi.title = os.path.splitext(os.path.basename(book))[0]
|
mi.title = os.path.splitext(os.path.basename(book))[0]
|
||||||
if not mi.authors:
|
if not mi.authors:
|
||||||
mi.authors = [_('Unknown')]
|
mi.authors = [_('Unknown')]
|
||||||
|
for x in ('title', 'authors', 'isbn', 'tags', 'series'):
|
||||||
|
val = locals()['o'+x]
|
||||||
|
if val: setattr(mi, x[1:], val)
|
||||||
|
if oseries:
|
||||||
|
mi.series_index = oseries_index
|
||||||
|
|
||||||
formats.append(format)
|
formats.append(format)
|
||||||
metadata.append(mi)
|
metadata.append(mi)
|
||||||
@ -302,39 +308,56 @@ the directory related options below.
|
|||||||
parser.add_option('-e', '--empty', action='store_true', default=False,
|
parser.add_option('-e', '--empty', action='store_true', default=False,
|
||||||
help=_('Add an empty book (a book with no formats)'))
|
help=_('Add an empty book (a book with no formats)'))
|
||||||
parser.add_option('-t', '--title', default=None,
|
parser.add_option('-t', '--title', default=None,
|
||||||
help=_('Set the title of the added empty book'))
|
help=_('Set the title of the added book(s)'))
|
||||||
parser.add_option('-a', '--authors', default=None,
|
parser.add_option('-a', '--authors', default=None,
|
||||||
help=_('Set the authors of the added empty book'))
|
help=_('Set the authors of the added book(s)'))
|
||||||
parser.add_option('-i', '--isbn', default=None,
|
parser.add_option('-i', '--isbn', default=None,
|
||||||
help=_('Set the ISBN of the added empty book'))
|
help=_('Set the ISBN of the added book(s)'))
|
||||||
|
parser.add_option('-T', '--tags', default=None,
|
||||||
|
help=_('Set the tags of the added book(s)'))
|
||||||
|
parser.add_option('-s', '--series', default=None,
|
||||||
|
help=_('Set the series of the added book(s)'))
|
||||||
|
parser.add_option('-S', '--series-index', default=1.0, type=float,
|
||||||
|
help=_('Set the series number of the added book(s)'))
|
||||||
|
|
||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
def do_add_empty(db, title, authors, isbn):
|
def do_add_empty(db, title, authors, isbn, tags, series, series_index):
|
||||||
from calibre.ebooks.metadata import MetaInformation, string_to_authors
|
from calibre.ebooks.metadata import MetaInformation
|
||||||
mi = MetaInformation(None)
|
mi = MetaInformation(None)
|
||||||
if title is not None:
|
if title is not None:
|
||||||
mi.title = title
|
mi.title = title
|
||||||
if authors:
|
if authors:
|
||||||
mi.authors = string_to_authors(authors)
|
mi.authors = authors
|
||||||
if isbn:
|
if isbn:
|
||||||
mi.isbn = isbn
|
mi.isbn = isbn
|
||||||
|
if tags:
|
||||||
|
mi.tags = tags
|
||||||
|
if series:
|
||||||
|
mi.series, mi.series_index = series, series_index
|
||||||
db.import_book(mi, [])
|
db.import_book(mi, [])
|
||||||
write_dirtied(db)
|
write_dirtied(db)
|
||||||
send_message()
|
send_message()
|
||||||
|
|
||||||
def command_add(args, dbpath):
|
def command_add(args, dbpath):
|
||||||
|
from calibre.ebooks.metadata import string_to_authors
|
||||||
parser = add_option_parser()
|
parser = add_option_parser()
|
||||||
opts, args = parser.parse_args(sys.argv[:1] + args)
|
opts, args = parser.parse_args(sys.argv[:1] + args)
|
||||||
|
aut = string_to_authors(opts.authors) if opts.authors else []
|
||||||
|
tags = [x.strip() for x in opts.tags.split(',')] if opts.tags else []
|
||||||
if opts.empty:
|
if opts.empty:
|
||||||
do_add_empty(get_db(dbpath, opts), opts.title, opts.authors, opts.isbn)
|
do_add_empty(get_db(dbpath, opts), opts.title, aut, opts.isbn, tags,
|
||||||
|
opts.series, opts.series_index)
|
||||||
return 0
|
return 0
|
||||||
if len(args) < 2:
|
if len(args) < 2:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
print
|
print
|
||||||
print >>sys.stderr, _('You must specify at least one file to add')
|
print >>sys.stderr, _('You must specify at least one file to add')
|
||||||
return 1
|
return 1
|
||||||
do_add(get_db(dbpath, opts), args[1:], opts.one_book_per_directory, opts.recurse, opts.duplicates)
|
do_add(get_db(dbpath, opts), args[1:], opts.one_book_per_directory,
|
||||||
|
opts.recurse, opts.duplicates, opts.title, opts.authors, opts.isbn,
|
||||||
|
tags, opts.series, opts.series_index)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def do_remove(db, ids):
|
def do_remove(db, ids):
|
||||||
|
@ -40,6 +40,46 @@ entry_points = {
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class PreserveMIMEDefaults(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.initial_values = {}
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
def_data_dirs = '/usr/local/share:/usr/share'
|
||||||
|
paths = os.environ.get('XDG_DATA_DIRS', def_data_dirs)
|
||||||
|
paths = paths.split(':')
|
||||||
|
paths.append(os.environ.get('XDG_DATA_HOME', os.path.expanduser(
|
||||||
|
'~/.local/share')))
|
||||||
|
paths = list(filter(os.path.isdir, paths))
|
||||||
|
if not paths:
|
||||||
|
# Env var had garbage in it, ignore it
|
||||||
|
paths = def_data_dirs.split(':')
|
||||||
|
paths = list(filter(os.path.isdir, paths))
|
||||||
|
self.paths = {os.path.join(x, 'applications/defaults.list') for x in
|
||||||
|
paths}
|
||||||
|
self.initial_values = {}
|
||||||
|
for x in self.paths:
|
||||||
|
try:
|
||||||
|
with open(x, 'rb') as f:
|
||||||
|
self.initial_values[x] = f.read()
|
||||||
|
except:
|
||||||
|
self.initial_values[x] = None
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
for path, val in self.initial_values.iteritems():
|
||||||
|
if val is None:
|
||||||
|
try:
|
||||||
|
os.remove(path)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
elif os.path.exists(path):
|
||||||
|
with open(path, 'r+b') as f:
|
||||||
|
if f.read() != val:
|
||||||
|
f.seek(0)
|
||||||
|
f.truncate()
|
||||||
|
f.write(val)
|
||||||
|
|
||||||
# Uninstall script {{{
|
# Uninstall script {{{
|
||||||
UNINSTALL = '''\
|
UNINSTALL = '''\
|
||||||
#!{python}
|
#!{python}
|
||||||
@ -202,6 +242,10 @@ class PostInstall:
|
|||||||
if not os.path.exists(os.path.dirname(f)):
|
if not os.path.exists(os.path.dirname(f)):
|
||||||
os.makedirs(os.path.dirname(f))
|
os.makedirs(os.path.dirname(f))
|
||||||
self.manifest.append(f)
|
self.manifest.append(f)
|
||||||
|
complete = 'calibre-complete'
|
||||||
|
if getattr(sys, 'frozen_path', None):
|
||||||
|
complete = os.path.join(getattr(sys, 'frozen_path'), complete)
|
||||||
|
|
||||||
self.info('Installing bash completion to', f)
|
self.info('Installing bash completion to', f)
|
||||||
with open(f, 'wb') as f:
|
with open(f, 'wb') as f:
|
||||||
f.write('# calibre Bash Shell Completion\n')
|
f.write('# calibre Bash Shell Completion\n')
|
||||||
@ -286,8 +330,8 @@ class PostInstall:
|
|||||||
}
|
}
|
||||||
complete -o nospace -F _ebook_device ebook-device
|
complete -o nospace -F _ebook_device ebook-device
|
||||||
|
|
||||||
complete -o nospace -C calibre-complete ebook-convert
|
complete -o nospace -C %s ebook-convert
|
||||||
'''))
|
''')%complete)
|
||||||
except TypeError as err:
|
except TypeError as err:
|
||||||
if 'resolve_entities' in str(err):
|
if 'resolve_entities' in str(err):
|
||||||
print 'You need python-lxml >= 2.0.5 for calibre'
|
print 'You need python-lxml >= 2.0.5 for calibre'
|
||||||
@ -333,57 +377,55 @@ class PostInstall:
|
|||||||
|
|
||||||
def setup_desktop_integration(self): # {{{
|
def setup_desktop_integration(self): # {{{
|
||||||
try:
|
try:
|
||||||
|
|
||||||
self.info('Setting up desktop integration...')
|
self.info('Setting up desktop integration...')
|
||||||
|
|
||||||
|
with TemporaryDirectory() as tdir, CurrentDir(tdir), \
|
||||||
|
PreserveMIMEDefaults():
|
||||||
|
render_img('mimetypes/lrf.png', 'calibre-lrf.png')
|
||||||
|
check_call('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-lrf.png application-lrf', shell=True)
|
||||||
|
self.icon_resources.append(('mimetypes', 'application-lrf', '128'))
|
||||||
|
check_call('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-lrf.png text-lrs', shell=True)
|
||||||
|
self.icon_resources.append(('mimetypes', 'application-lrs',
|
||||||
|
'128'))
|
||||||
|
render_img('lt.png', 'calibre-gui.png')
|
||||||
|
check_call('xdg-icon-resource install --noupdate --size 128 calibre-gui.png calibre-gui', shell=True)
|
||||||
|
self.icon_resources.append(('apps', 'calibre-gui', '128'))
|
||||||
|
render_img('viewer.png', 'calibre-viewer.png')
|
||||||
|
check_call('xdg-icon-resource install --size 128 calibre-viewer.png calibre-viewer', shell=True)
|
||||||
|
self.icon_resources.append(('apps', 'calibre-viewer', '128'))
|
||||||
|
|
||||||
with TemporaryDirectory() as tdir:
|
mimetypes = set([])
|
||||||
with CurrentDir(tdir):
|
for x in all_input_formats():
|
||||||
render_img('mimetypes/lrf.png', 'calibre-lrf.png')
|
mt = guess_type('dummy.'+x)[0]
|
||||||
check_call('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-lrf.png application-lrf', shell=True)
|
if mt and 'chemical' not in mt and 'ctc-posml' not in mt:
|
||||||
self.icon_resources.append(('mimetypes', 'application-lrf', '128'))
|
mimetypes.add(mt)
|
||||||
check_call('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-lrf.png text-lrs', shell=True)
|
|
||||||
self.icon_resources.append(('mimetypes', 'application-lrs',
|
|
||||||
'128'))
|
|
||||||
render_img('lt.png', 'calibre-gui.png')
|
|
||||||
check_call('xdg-icon-resource install --noupdate --size 128 calibre-gui.png calibre-gui', shell=True)
|
|
||||||
self.icon_resources.append(('apps', 'calibre-gui', '128'))
|
|
||||||
render_img('viewer.png', 'calibre-viewer.png')
|
|
||||||
check_call('xdg-icon-resource install --size 128 calibre-viewer.png calibre-viewer', shell=True)
|
|
||||||
self.icon_resources.append(('apps', 'calibre-viewer', '128'))
|
|
||||||
|
|
||||||
mimetypes = set([])
|
def write_mimetypes(f):
|
||||||
for x in all_input_formats():
|
f.write('MimeType=%s;\n'%';'.join(mimetypes))
|
||||||
mt = guess_type('dummy.'+x)[0]
|
|
||||||
if mt and 'chemical' not in mt and 'ctc-posml' not in mt:
|
|
||||||
mimetypes.add(mt)
|
|
||||||
|
|
||||||
def write_mimetypes(f):
|
f = open('calibre-lrfviewer.desktop', 'wb')
|
||||||
f.write('MimeType=%s;\n'%';'.join(mimetypes))
|
f.write(VIEWER)
|
||||||
|
f.close()
|
||||||
f = open('calibre-lrfviewer.desktop', 'wb')
|
f = open('calibre-ebook-viewer.desktop', 'wb')
|
||||||
f.write(VIEWER)
|
f.write(EVIEWER)
|
||||||
f.close()
|
write_mimetypes(f)
|
||||||
f = open('calibre-ebook-viewer.desktop', 'wb')
|
f.close()
|
||||||
f.write(EVIEWER)
|
f = open('calibre-gui.desktop', 'wb')
|
||||||
write_mimetypes(f)
|
f.write(GUI)
|
||||||
f.close()
|
write_mimetypes(f)
|
||||||
f = open('calibre-gui.desktop', 'wb')
|
f.close()
|
||||||
f.write(GUI)
|
des = ('calibre-gui.desktop', 'calibre-lrfviewer.desktop',
|
||||||
write_mimetypes(f)
|
'calibre-ebook-viewer.desktop')
|
||||||
f.close()
|
for x in des:
|
||||||
des = ('calibre-gui.desktop', 'calibre-lrfviewer.desktop',
|
cmd = ['xdg-desktop-menu', 'install', '--noupdate', './'+x]
|
||||||
'calibre-ebook-viewer.desktop')
|
check_call(' '.join(cmd), shell=True)
|
||||||
for x in des:
|
self.menu_resources.append(x)
|
||||||
cmd = ['xdg-desktop-menu', 'install', '--noupdate', './'+x]
|
check_call(['xdg-desktop-menu', 'forceupdate'])
|
||||||
check_call(' '.join(cmd), shell=True)
|
f = open('calibre-mimetypes', 'wb')
|
||||||
self.menu_resources.append(x)
|
f.write(MIME)
|
||||||
check_call(['xdg-desktop-menu', 'forceupdate'])
|
f.close()
|
||||||
f = open('calibre-mimetypes', 'wb')
|
self.mime_resources.append('calibre-mimetypes')
|
||||||
f.write(MIME)
|
check_call('xdg-mime install ./calibre-mimetypes', shell=True)
|
||||||
f.close()
|
|
||||||
self.mime_resources.append('calibre-mimetypes')
|
|
||||||
check_call('xdg-mime install ./calibre-mimetypes', shell=True)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
if self.opts.fatal_errors:
|
if self.opts.fatal_errors:
|
||||||
raise
|
raise
|
||||||
|
@ -74,6 +74,11 @@ def base_dir():
|
|||||||
|
|
||||||
return _base_dir
|
return _base_dir
|
||||||
|
|
||||||
|
def reset_base_dir():
|
||||||
|
global _base_dir
|
||||||
|
_base_dir = None
|
||||||
|
base_dir()
|
||||||
|
|
||||||
def force_unicode(x):
|
def force_unicode(x):
|
||||||
# Cannot use the implementation in calibre.__init__ as it causes a circular
|
# Cannot use the implementation in calibre.__init__ as it causes a circular
|
||||||
# dependency
|
# dependency
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user