mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Sync to trunk.
This commit is contained in:
commit
6c54396b4f
@ -2,6 +2,7 @@
|
|||||||
.check-cache.pickle
|
.check-cache.pickle
|
||||||
src/calibre/plugins
|
src/calibre/plugins
|
||||||
resources/images.qrc
|
resources/images.qrc
|
||||||
|
resources/compiled_coffeescript.zip
|
||||||
src/calibre/ebooks/oeb/display/test/*.js
|
src/calibre/ebooks/oeb/display/test/*.js
|
||||||
src/calibre/manual/.build/
|
src/calibre/manual/.build/
|
||||||
src/calibre/manual/cli/
|
src/calibre/manual/cli/
|
||||||
@ -16,7 +17,6 @@ resources/ebook-convert-complete.pickle
|
|||||||
resources/builtin_recipes.xml
|
resources/builtin_recipes.xml
|
||||||
resources/builtin_recipes.zip
|
resources/builtin_recipes.zip
|
||||||
resources/template-functions.json
|
resources/template-functions.json
|
||||||
resources/display/*.js
|
|
||||||
setup/installer/windows/calibre/build.log
|
setup/installer/windows/calibre/build.log
|
||||||
src/calibre/translations/.errors
|
src/calibre/translations/.errors
|
||||||
src/cssutils/.svn/
|
src/cssutils/.svn/
|
||||||
|
130
Changelog.yaml
130
Changelog.yaml
@ -19,6 +19,136 @@
|
|||||||
# new recipes:
|
# new recipes:
|
||||||
# - title:
|
# - title:
|
||||||
|
|
||||||
|
- version: 0.8.51
|
||||||
|
date: 2012-05-11
|
||||||
|
|
||||||
|
new features:
|
||||||
|
- title: "When switching libraries preserve the position and selected books if you switch back to a previously opened library."
|
||||||
|
tickets: [994514]
|
||||||
|
|
||||||
|
- title: "Conversion pipeline: Filter out the useless font-face rules inserted by Microsoft Word for every font on the system"
|
||||||
|
|
||||||
|
- title: "Driver for Motorola XT875 and Pandigital SuperNova"
|
||||||
|
tickets: [996890]
|
||||||
|
|
||||||
|
- title: "Add a colour swatch the the dialog for creating column coloring rules, to ease selection of colors"
|
||||||
|
tickets: [994811]
|
||||||
|
|
||||||
|
- title: "EPUB Output: Consolidate internal CSS generated by calibre into external stylesheets for ease of editing the EPUB"
|
||||||
|
|
||||||
|
- title: "List EPUB and MOBI at the top of the dropdown list fo formats to convert to, as they are the most common choices"
|
||||||
|
tickets: [994838]
|
||||||
|
|
||||||
|
bug fixes:
|
||||||
|
- title: "E-book viewer: Improve performance when switching between normal and fullscreen views."
|
||||||
|
tickets: [996102]
|
||||||
|
|
||||||
|
- title: "Edit metadata dialog: When running download metadata do not insert duplicate tags into the list of tags"
|
||||||
|
|
||||||
|
- title: "KF8 Input: Do not error out if the file has a few invalidly encoded bytes."
|
||||||
|
tickets: [997034]
|
||||||
|
|
||||||
|
- title: "Fix download of news in AZW3 format not working"
|
||||||
|
tickets: [996439]
|
||||||
|
|
||||||
|
- title: "Pocketbook driver: Update for new PB 611 firmware."
|
||||||
|
tickets: [903079]
|
||||||
|
|
||||||
|
- title: "ebook-convert: Error out if the user prvides extra command line args instead of silently ignoring them"
|
||||||
|
tickets: [994939]
|
||||||
|
|
||||||
|
- title: "EPUB Output: Do not self close any container tags to prevent artifacts when EPUBs are viewed using buggy browser based viewers."
|
||||||
|
tickets: [994861]
|
||||||
|
|
||||||
|
- title: "Fix regression in 0.8.50 that broke the conversion of HTML files that contained non-ascii font-face declarations, typically produced by Microsoft Word"
|
||||||
|
|
||||||
|
improved recipes:
|
||||||
|
- Mainichi news
|
||||||
|
- derStandard
|
||||||
|
- Endgadget Japan
|
||||||
|
|
||||||
|
new recipes:
|
||||||
|
- title: Mainichi English
|
||||||
|
author: Hiroshi Miura
|
||||||
|
|
||||||
|
- title: The Grid TO
|
||||||
|
author: Yusuf W
|
||||||
|
|
||||||
|
- title: National Geographic (Italy)
|
||||||
|
author: faber1971
|
||||||
|
|
||||||
|
- title: Rebelion
|
||||||
|
author: Marc Busque
|
||||||
|
|
||||||
|
- version: 0.8.50
|
||||||
|
date: 2012-05-04
|
||||||
|
|
||||||
|
new features:
|
||||||
|
- title: "Tweak Book: Allow tweaking of KF8 MOBI files. Useful to fine-tune the result of a conversion. Right click on the book and select Tweak Book to use the feature. Note that tweaking a MOBI file that contains both KF8 and older MOBI6 will cause the MOBI6 version to be discarded."
|
||||||
|
|
||||||
|
- title: "AZW3 output plugin. This output plugin generates pure KF8 mobi files. These only work on the Kindle Fire and Kindle Touch with latest firmware."
|
||||||
|
|
||||||
|
- title: "Conversion: Allow easy re-ordering of the search and replace expressions in the conversion dialog. Also apply the expressions in the same order that they were entered when doing the conversion."
|
||||||
|
|
||||||
|
- title: "Automatically add the Tag 'Sample Book' when an Amazon sample is added to calibre"
|
||||||
|
|
||||||
|
- title: "FB2 Input: Better handling of inline images."
|
||||||
|
tickets: [989869]
|
||||||
|
|
||||||
|
bug fixes:
|
||||||
|
- title: "KF8 Output: Fix section to section jumps not working for documents with multi-level ToCs"
|
||||||
|
|
||||||
|
- title: "EPUB Input: Handle the case of the metadata ToC containing a reference to the cover HTML file."
|
||||||
|
tickets: [993812]
|
||||||
|
|
||||||
|
- title: "CHM Input: Handle files with deeply nested markup and non html files listed at the start of the manifest."
|
||||||
|
tickets: [993607]
|
||||||
|
|
||||||
|
- title: "KF8 Output: Workaround Kindle Touch bug that causes the book to be rendered as black pages when a height is specified for <body>"
|
||||||
|
|
||||||
|
- title: "Fix regression in 0.8.49 that broke italics detection in heuristic processing on 32-bit systems."
|
||||||
|
tickets: [991380]
|
||||||
|
|
||||||
|
- title: "KF8 Output: Fix joint MOBI6/KF8 books not being recognized as MOBI files by older Kindles"
|
||||||
|
|
||||||
|
- title: "KF8 Output: Fix errors when processing documents with HTML comments and/or XML processing instructions"
|
||||||
|
|
||||||
|
- title: "Get Books: Amazon fix prices not being found. B&N fix details link. ebooks.com: fix cover image. Website changes to various EU stores"
|
||||||
|
|
||||||
|
- title: "FB2 Input: More robust base64 decoding to handle embedded images that are incorrectly encoded."
|
||||||
|
tickets: [990929]
|
||||||
|
|
||||||
|
- title: "Fix scrolling with the cover browser updating only the selection in the book list, not the current book."
|
||||||
|
tickets: [990881]
|
||||||
|
|
||||||
|
- title: "Save to Disk: Do not run out memory when saving very large files on systems with low RAM."
|
||||||
|
tickets: [990741]
|
||||||
|
|
||||||
|
- title: "FB2 Output: Use 2 letter language codes in preference to 3-letter ones to not break poorly implemented FB2 readers"
|
||||||
|
tickets: [990026]
|
||||||
|
|
||||||
|
- title: "EPUB Input: Auto set the media-type for OPF manifest entries with an empty media-type"
|
||||||
|
|
||||||
|
improved recipes:
|
||||||
|
- National Post
|
||||||
|
- Daily Mirror
|
||||||
|
- Sun
|
||||||
|
- Newsweek Polska
|
||||||
|
- Max-Planck
|
||||||
|
- derStandard
|
||||||
|
- tweakers.net
|
||||||
|
|
||||||
|
new recipes:
|
||||||
|
- title: George Monbiot
|
||||||
|
author: Darko Miletic
|
||||||
|
|
||||||
|
- title: El Mundo
|
||||||
|
author: atordo
|
||||||
|
|
||||||
|
- title: AraInfo and Diagonal
|
||||||
|
author: Ruben Pollan
|
||||||
|
|
||||||
|
|
||||||
- version: 0.8.49
|
- version: 0.8.49
|
||||||
date: 2012-04-27
|
date: 2012-04-27
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from claibre import browser
|
from calibre import browser
|
||||||
import re
|
import re
|
||||||
|
|
||||||
class AdvancedUserRecipe1306061239(BasicNewsRecipe):
|
class AdvancedUserRecipe1306061239(BasicNewsRecipe):
|
||||||
|
@ -7,10 +7,11 @@ __copyright__ = '2009, Gerhard Aigner <gerhard.aigner at gmail.com>'
|
|||||||
''' http://www.derstandard.at - Austrian Newspaper '''
|
''' http://www.derstandard.at - Austrian Newspaper '''
|
||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from time import strftime
|
||||||
|
|
||||||
class DerStandardRecipe(BasicNewsRecipe):
|
class DerStandardRecipe(BasicNewsRecipe):
|
||||||
title = u'derStandard'
|
title = u'derStandard'
|
||||||
__author__ = 'Gerhard Aigner and Sujata Raman and Marcel Jira'
|
__author__ = 'Gerhard Aigner and Sujata Raman and Marcel Jira and Peter Reschenhofer'
|
||||||
description = u'Nachrichten aus Österreich'
|
description = u'Nachrichten aus Österreich'
|
||||||
publisher ='derStandard.at'
|
publisher ='derStandard.at'
|
||||||
category = 'news, politics, nachrichten, Austria'
|
category = 'news, politics, nachrichten, Austria'
|
||||||
@ -88,3 +89,41 @@ class DerStandardRecipe(BasicNewsRecipe):
|
|||||||
for t in soup.findAll(['ul', 'li']):
|
for t in soup.findAll(['ul', 'li']):
|
||||||
t.name = 'div'
|
t.name = 'div'
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
highResolution = True
|
||||||
|
|
||||||
|
date = strftime("%Y/%Y%m%d")
|
||||||
|
# it is also possible for the past
|
||||||
|
#date = '2012/20120503'
|
||||||
|
|
||||||
|
urlP1 = 'http://epaper.derstandarddigital.at/'
|
||||||
|
urlP2 = 'data_ep/STAN/' + date
|
||||||
|
urlP3 = '/V.B1/'
|
||||||
|
urlP4 = 'paper.htm'
|
||||||
|
urlHTML = urlP1 + urlP2 + urlP3 + urlP4
|
||||||
|
|
||||||
|
br = self.clone_browser(self.browser)
|
||||||
|
htmlF = br.open_novisit(urlHTML)
|
||||||
|
htmlC = htmlF.read()
|
||||||
|
|
||||||
|
|
||||||
|
# URL EXAMPLE: data_ep/STAN/2012/20120504/V.B1/pages/A3B6798F-2751-4D8D-A103-C5EF22F7ACBE.htm
|
||||||
|
# consists of part2 + part3 + 'pages/' + code
|
||||||
|
# 'pages/' has length 6, code has lenght 36
|
||||||
|
|
||||||
|
index = htmlC.find(urlP2) + len(urlP2 + urlP3) + 6
|
||||||
|
code = htmlC[index:index + 36]
|
||||||
|
|
||||||
|
|
||||||
|
# URL EXAMPLE HIGH RESOLUTION: http://epaper.derstandarddigital.at/data_ep/STAN/2012/20120504/pagejpg/A3B6798F-2751-4D8D-A103-C5EF22F7ACBE_b.png
|
||||||
|
# URL EXAMPLE LOW RESOLUTION: http://epaper.derstandarddigital.at/data_ep/STAN/2012/20120504/pagejpg/2AB52F71-11C1-4859-9114-CDCD79BEFDCB.png
|
||||||
|
|
||||||
|
urlPic = urlP1 + urlP2 + '/pagejpg/' + code
|
||||||
|
|
||||||
|
if highResolution:
|
||||||
|
urlPic = urlPic + '_b'
|
||||||
|
|
||||||
|
urlPic = urlPic + '.png'
|
||||||
|
|
||||||
|
return urlPic
|
||||||
|
@ -17,7 +17,25 @@ class EndgadgetJapan(BasicNewsRecipe):
|
|||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
language = 'ja'
|
language = 'ja'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
feeds = [(u'engadget', u'http://japanese.engadget.com/rss.xml')]
|
index = 'http://japanese.engadget.com/'
|
||||||
|
remove_javascript = True
|
||||||
|
|
||||||
|
remove_tags_before = dict(name="h1", attrs={'class':"post_title"})
|
||||||
|
remove_tags_after = dict(name='div', attrs={'class':'post_body'})
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
feeds = []
|
||||||
|
newsarticles = []
|
||||||
|
soup = self.index_to_soup(self.index)
|
||||||
|
for topstories in soup.findAll('div',attrs={'class':'post_content'}):
|
||||||
|
itt = topstories.find('h4')
|
||||||
|
itema = itt.find('a',href=True)
|
||||||
|
newsarticles.append({
|
||||||
|
'title' :itema.string
|
||||||
|
,'date' :''
|
||||||
|
,'url' :itema['href']
|
||||||
|
,'description':''
|
||||||
|
})
|
||||||
|
feeds.append(('Latest Posts', newsarticles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
remove_tags_before = dict(name="div", attrs={'id':"content_wrap"})
|
|
||||||
remove_tags_after = dict(name='h3', attrs={'id':'addcomments'})
|
|
||||||
|
82
recipes/folha.recipe
Normal file
82
recipes/folha.recipe
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Darko Miletic <darko.miletic at gmail.com>'
|
||||||
|
'''
|
||||||
|
www.folha.uol.com.br
|
||||||
|
'''
|
||||||
|
import urllib
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class Folha_de_s_paulo(BasicNewsRecipe):
|
||||||
|
title = u'Folha de São Paulo - portal'
|
||||||
|
__author__ = 'Darko Miletic'
|
||||||
|
description = 'Um Jornala a servicao do Brasil'
|
||||||
|
publisher = 'Folhapress'
|
||||||
|
category = 'news, politics, Brasil'
|
||||||
|
oldest_article = 2
|
||||||
|
max_articles_per_feed = 200
|
||||||
|
no_stylesheets = True
|
||||||
|
encoding = 'cp1252'
|
||||||
|
use_embedded_content = False
|
||||||
|
language = 'pt_BR'
|
||||||
|
remove_empty_feeds = True
|
||||||
|
publication_type = 'newspaper'
|
||||||
|
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
|
||||||
|
extra_css = """
|
||||||
|
body{font-family: Arial,Helvetica,sans-serif }
|
||||||
|
img{margin-bottom: 0.4em; display:block}
|
||||||
|
"""
|
||||||
|
|
||||||
|
conversion_options = {
|
||||||
|
'comment' : description
|
||||||
|
, 'tags' : category
|
||||||
|
, 'publisher' : publisher
|
||||||
|
, 'language' : language
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_tags = [dict(name=['meta','link','base','iframe','embed','object'])]
|
||||||
|
keep_only_tags = [dict(attrs={'id':'articleNew'})]
|
||||||
|
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
(u'Poder' , u'http://feeds.folha.uol.com.br/poder/rss091.xml' )
|
||||||
|
,(u'Mundo' , u'http://feeds.folha.uol.com.br/mundo/rss091.xml' )
|
||||||
|
,(u'Mercado' , u'http://feeds.folha.uol.com.br/mercado/rss091.xml' )
|
||||||
|
,(u'Cotidiano' , u'http://feeds.folha.uol.com.br/cotidiano/rss091.xml' )
|
||||||
|
,(u'Esporte' , u'http://feeds.folha.uol.com.br/esporte/rss091.xml' )
|
||||||
|
,(u'Ilustrada' , u'http://feeds.folha.uol.com.br/ilustrada/rss091.xml' )
|
||||||
|
,(u'F5' , u'http://feeds.folha.uol.com.br/f5/rss091.xml' )
|
||||||
|
,(u'Ciência' , u'http://feeds.folha.uol.com.br/ciencia/rss091.xml' )
|
||||||
|
,(u'Tec' , u'http://feeds.folha.uol.com.br/tec/rss091.xml' )
|
||||||
|
,(u'Ambiente' , u'http://feeds.folha.uol.com.br/ambiente/rss091.xml' )
|
||||||
|
,(u'Bichos' , u'http://feeds.folha.uol.com.br/bichos/rss091.xml' )
|
||||||
|
,(u'Celebridades' , u'http://feeds.folha.uol.com.br/celebridades/rss091.xml' )
|
||||||
|
,(u'Comida' , u'http://feeds.folha.uol.com.br/comida/rss091.xml' )
|
||||||
|
,(u'Equilibrio' , u'http://feeds.folha.uol.com.br/equilibrioesaude/rss091.xml' )
|
||||||
|
,(u'Folhateen' , u'http://feeds.folha.uol.com.br/folhateen/rss091.xml' )
|
||||||
|
,(u'Folhinha' , u'http://feeds.folha.uol.com.br/folhinha/rss091.xml' )
|
||||||
|
,(u'Ilustrissima' , u'http://feeds.folha.uol.com.br/ilustrissima/rss091.xml' )
|
||||||
|
,(u'Saber' , u'http://feeds.folha.uol.com.br/saber/rss091.xml' )
|
||||||
|
,(u'Turismo' , u'http://feeds.folha.uol.com.br/turismo/rss091.xml' )
|
||||||
|
,(u'Panel do Leitor', u'http://feeds.folha.uol.com.br/folha/paineldoleitor/rss091.xml')
|
||||||
|
,(u'Publifolha' , u'http://feeds.folha.uol.com.br/folha/publifolha/rss091.xml' )
|
||||||
|
,(u'Em cima da hora', u'http://feeds.folha.uol.com.br/emcimadahora/rss091.xml' )
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_article_url(self, article):
|
||||||
|
url = BasicNewsRecipe.get_article_url(self, article)
|
||||||
|
curl = url.partition('/*')[2]
|
||||||
|
return curl
|
||||||
|
|
||||||
|
def print_version(self, url):
|
||||||
|
return 'http://tools.folha.com.br/print?site=emcimadahora&url=' + urllib.quote_plus(url)
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
soup = self.index_to_soup('http://www.folha.uol.com.br/')
|
||||||
|
cont = soup.find('div', attrs={'id':'newspaper'})
|
||||||
|
if cont:
|
||||||
|
ai = cont.find('a', href='http://www1.folha.uol.com.br/fsp/')
|
||||||
|
if ai:
|
||||||
|
return ai.img['src']
|
||||||
|
return None
|
@ -8,7 +8,7 @@ from urllib2 import Request, urlopen, URLError
|
|||||||
class FolhaOnline(BasicNewsRecipe):
|
class FolhaOnline(BasicNewsRecipe):
|
||||||
THUMBALIZR_API = '' # ---->Get your at http://www.thumbalizr.com/ and put here
|
THUMBALIZR_API = '' # ---->Get your at http://www.thumbalizr.com/ and put here
|
||||||
LANGUAGE = 'pt_br'
|
LANGUAGE = 'pt_br'
|
||||||
language = 'pt'
|
language = 'pt_BR'
|
||||||
LANGHTM = 'pt-br'
|
LANGHTM = 'pt-br'
|
||||||
ENCODING = 'cp1252'
|
ENCODING = 'cp1252'
|
||||||
ENCHTM = 'iso-8859-1'
|
ENCHTM = 'iso-8859-1'
|
||||||
|
@ -14,7 +14,7 @@ class FSP(BasicNewsRecipe):
|
|||||||
HOMEPAGE = 'http://www1.folha.uol.com.br/fsp/'
|
HOMEPAGE = 'http://www1.folha.uol.com.br/fsp/'
|
||||||
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
|
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
|
||||||
|
|
||||||
language = 'pt'
|
language = 'pt_BR'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
max_articles_per_feed = 40
|
max_articles_per_feed = 40
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
|
79
recipes/grid_to.recipe
Normal file
79
recipes/grid_to.recipe
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class TheGridTO(BasicNewsRecipe):
|
||||||
|
#: The title to use for the ebook
|
||||||
|
title = u'The Grid TO'
|
||||||
|
|
||||||
|
#: A couple of lines that describe the content this recipe downloads.
|
||||||
|
#: This will be used primarily in a GUI that presents a list of recipes.
|
||||||
|
description = (u'The Grid is a weekly city magazine and daily website providing a fresh, '
|
||||||
|
'accessible voice for Toronto.')
|
||||||
|
|
||||||
|
#: The author of this recipe
|
||||||
|
__author__ = u'Yusuf W'
|
||||||
|
|
||||||
|
#: The language that the news is in. Must be an ISO-639 code either
|
||||||
|
#: two or three characters long
|
||||||
|
language = 'en_CA'
|
||||||
|
|
||||||
|
#: Publication type
|
||||||
|
#: Set to newspaper, magazine or blog
|
||||||
|
publication_type = 'newspaper'
|
||||||
|
|
||||||
|
#: Convenient flag to disable loading of stylesheets for websites
|
||||||
|
#: that have overly complex stylesheets unsuitable for conversion
|
||||||
|
#: to ebooks formats
|
||||||
|
#: If True stylesheets are not downloaded and processed
|
||||||
|
no_stylesheets = True
|
||||||
|
|
||||||
|
#: List of tags to be removed. Specified tags are removed from downloaded HTML.
|
||||||
|
remove_tags_before = dict(name='div', id='content')
|
||||||
|
remove_tags_after = dict(name='div', id='content')
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='div', attrs={'class':'right-content pull-right'}),
|
||||||
|
dict(name='div', attrs={'class':'right-content'}),
|
||||||
|
dict(name='div', attrs={'class':'ftr-line'}),
|
||||||
|
dict(name='div', attrs={'class':'pull-right'}),
|
||||||
|
dict(name='div', id='comments'),
|
||||||
|
dict(name='div', id='tags')
|
||||||
|
]
|
||||||
|
|
||||||
|
#: Keep only the specified tags and their children.
|
||||||
|
#keep_only_tags = [dict(name='div', id='content')]
|
||||||
|
|
||||||
|
cover_margins = (0, 0, '#ffffff')
|
||||||
|
|
||||||
|
INDEX = 'http://www.thegridto.com'
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
soup = self.index_to_soup(self.INDEX)
|
||||||
|
cover_url = soup.find(attrs={'class':'article-block latest-issue'}).find('img')['src']
|
||||||
|
|
||||||
|
return cover_url
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
|
||||||
|
# Get the latest issue
|
||||||
|
soup = self.index_to_soup(self.INDEX)
|
||||||
|
a = soup.find('div', attrs={'class': 'full-content stuff-ftr'}).findAll('a')[2]
|
||||||
|
|
||||||
|
# Parse the index of the latest issue
|
||||||
|
self.INDEX = self.INDEX + a['href']
|
||||||
|
soup = self.index_to_soup(self.INDEX)
|
||||||
|
|
||||||
|
feeds = []
|
||||||
|
for section in ['city', 'life', 'culture']:
|
||||||
|
section_class = 'left-content article-listing ' + section + ' pull-left'
|
||||||
|
div = soup.find(attrs={'class': section_class})
|
||||||
|
|
||||||
|
articles = []
|
||||||
|
for tag in div.findAllNext(attrs={'class':'search-block'}):
|
||||||
|
a = tag.findAll('a', href=True)[1]
|
||||||
|
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
url = a['href']
|
||||||
|
|
||||||
|
articles.append({'title': title, 'url': url, 'description':'', 'date':''})
|
||||||
|
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
BIN
recipes/icons/folha.png
Normal file
BIN
recipes/icons/folha.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.6 KiB |
@ -20,6 +20,8 @@ class JijiDotCom(BasicNewsRecipe):
|
|||||||
top_url = 'http://www.jiji.com/'
|
top_url = 'http://www.jiji.com/'
|
||||||
|
|
||||||
feeds = [(u'\u30cb\u30e5\u30fc\u30b9', u'http://www.jiji.com/rss/ranking.rdf')]
|
feeds = [(u'\u30cb\u30e5\u30fc\u30b9', u'http://www.jiji.com/rss/ranking.rdf')]
|
||||||
|
|
||||||
|
remove_tags_before = dict(id="article-area")
|
||||||
remove_tags_after = dict(id="ad_google")
|
remove_tags_after = dict(id="ad_google")
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__author__ = 'Lorenzo Vigentini, based on Darko Miletic, Gabriele Marini'
|
__author__ = 'Lorenzo Vigentini, based on Darko Miletic, Gabriele Marini; minor fixes by faber1971'
|
||||||
__copyright__ = '2009-2011, Darko Miletic <darko.miletic at gmail.com>, Lorenzo Vigentini <l.vigentini at gmail.com>'
|
__copyright__ = '2009-2012, Darko Miletic <darko.miletic at gmail.com>, Lorenzo Vigentini <l.vigentini at gmail.com>, faber1971'
|
||||||
description = 'Italian daily newspaper - v1.01 (04, January 2010); 16.05.2010 new version; 17.10.2011 new version; 14.12.2011 new version'
|
description = 'Italian daily newspaper - v1.02 (04, January 2010); 16.05.2010 new version; 17.10.2011 new version; 14.12.2011 new version; 11.05.2012 new version'
|
||||||
|
|
||||||
'''
|
'''
|
||||||
http://www.repubblica.it/
|
http://www.repubblica.it/
|
||||||
@ -12,14 +12,14 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
|||||||
|
|
||||||
class LaRepubblica(BasicNewsRecipe):
|
class LaRepubblica(BasicNewsRecipe):
|
||||||
title = 'La Repubblica'
|
title = 'La Repubblica'
|
||||||
__author__ = 'Lorenzo Vigentini, Gabriele Marini, Darko Miletic'
|
__author__ = 'Lorenzo Vigentini, Gabriele Marini, Darko Miletic, faber1971'
|
||||||
description = 'il quotidiano online con tutte le notizie in tempo reale. News e ultime notizie. Tutti i settori: politica, cronaca, economia, sport, esteri, scienza, tecnologia, internet, spettacoli, musica, cultura, arte, mostre, libri, dvd, vhs, concerti, cinema, attori, attrici, recensioni, chat, cucina, mappe. Le citta di Repubblica: Roma, Milano, Bologna, Firenze, Palermo, Napoli, Bari, Torino.'
|
description = 'il quotidiano online con tutte le notizie in tempo reale. News e ultime notizie. Tutti i settori: politica, cronaca, economia, sport, esteri, scienza, tecnologia, internet, spettacoli, musica, cultura, arte, mostre, libri, dvd, vhs, concerti, cinema, attori, attrici, recensioni, chat, cucina, mappe. Le citta di Repubblica: Roma, Milano, Bologna, Firenze, Palermo, Napoli, Bari, Torino.'
|
||||||
masthead_url = 'http://www.repubblica.it/static/images/homepage/2010/la-repubblica-logo-home-payoff.png'
|
masthead_url = 'http://www.repubblica.it/static/images/homepage/2010/la-repubblica-logo-home-payoff.png'
|
||||||
publisher = 'Gruppo editoriale L\'Espresso'
|
publisher = 'Gruppo editoriale L\'Espresso'
|
||||||
category = 'News, politics, culture, economy, general interest'
|
category = 'News, politics, culture, economy, general interest'
|
||||||
language = 'it'
|
language = 'it'
|
||||||
timefmt = '[%a, %d %b, %Y]'
|
timefmt = '[%a, %d %b, %Y]'
|
||||||
oldest_article = 5
|
oldest_article = 1
|
||||||
encoding = 'utf8'
|
encoding = 'utf8'
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
@ -59,6 +59,7 @@ class LaRepubblica(BasicNewsRecipe):
|
|||||||
dict(attrs={'class':'articolo'}),
|
dict(attrs={'class':'articolo'}),
|
||||||
dict(attrs={'class':'body-text'}),
|
dict(attrs={'class':'body-text'}),
|
||||||
dict(name='p', attrs={'class':'disclaimer clearfix'}),
|
dict(name='p', attrs={'class':'disclaimer clearfix'}),
|
||||||
|
dict(name='div', attrs={'id':'main'}),
|
||||||
dict(attrs={'id':'contA'})
|
dict(attrs={'id':'contA'})
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -67,7 +68,7 @@ class LaRepubblica(BasicNewsRecipe):
|
|||||||
dict(name=['object','link','meta','iframe','embed']),
|
dict(name=['object','link','meta','iframe','embed']),
|
||||||
dict(name='span',attrs={'class':'linkindice'}),
|
dict(name='span',attrs={'class':'linkindice'}),
|
||||||
dict(name='div', attrs={'class':['bottom-mobile','adv adv-middle-inline']}),
|
dict(name='div', attrs={'class':['bottom-mobile','adv adv-middle-inline']}),
|
||||||
dict(name='div', attrs={'id':['rssdiv','blocco','fb-like-head']}),
|
dict(name='div', attrs={'id':['rssdiv','blocco','fb-like-head', 'sidebar']}),
|
||||||
dict(name='div', attrs={'class':['utility','fb-like-button','archive-button']}),
|
dict(name='div', attrs={'class':['utility','fb-like-button','archive-button']}),
|
||||||
dict(name='div', attrs={'class':'generalbox'}),
|
dict(name='div', attrs={'class':'generalbox'}),
|
||||||
dict(name='ul', attrs={'id':'hystory'})
|
dict(name='ul', attrs={'id':'hystory'})
|
||||||
@ -88,11 +89,12 @@ class LaRepubblica(BasicNewsRecipe):
|
|||||||
(u'Sport', u'http://www.repubblica.it/rss/sport/rss2.0.xml'),
|
(u'Sport', u'http://www.repubblica.it/rss/sport/rss2.0.xml'),
|
||||||
(u'Calcio', u'http://www.repubblica.it/rss/sport/calcio/rss2.0.xml'),
|
(u'Calcio', u'http://www.repubblica.it/rss/sport/calcio/rss2.0.xml'),
|
||||||
(u'Motori', u'http://www.repubblica.it/rss/motori/rss2.0.xml'),
|
(u'Motori', u'http://www.repubblica.it/rss/motori/rss2.0.xml'),
|
||||||
(u'Edizione Roma', u'http://roma.repubblica.it/rss/rss2.0.xml'),
|
(u'Roma', u'http://roma.repubblica.it/rss/rss2.0.xml'),
|
||||||
(u'Edizione Torino', u'http://torino.repubblica.it/rss/rss2.0.xml'),
|
(u'Torino', u'http://torino.repubblica.it/rss/rss2.0.xml'),
|
||||||
(u'Edizione Milano', u'feed://milano.repubblica.it/rss/rss2.0.xml'),
|
(u'Milano', u'feed://milano.repubblica.it/rss/rss2.0.xml'),
|
||||||
(u'Edizione Napoli', u'feed://napoli.repubblica.it/rss/rss2.0.xml'),
|
(u'Napoli', u'feed://napoli.repubblica.it/rss/rss2.0.xml'),
|
||||||
(u'Edizione Palermo', u'feed://palermo.repubblica.it/rss/rss2.0.xml')
|
(u'Bari', u'http://bari.repubblica.it/rss/rss2.0.xml'),
|
||||||
|
(u'Palermo', u'feed://palermo.repubblica.it/rss/rss2.0.xml')
|
||||||
]
|
]
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
|
@ -16,12 +16,12 @@ class MainichiDailyNews(BasicNewsRecipe):
|
|||||||
publisher = 'Mainichi Daily News'
|
publisher = 'Mainichi Daily News'
|
||||||
category = 'news, japan'
|
category = 'news, japan'
|
||||||
language = 'ja'
|
language = 'ja'
|
||||||
|
index = 'http://mainichi.jp/select/'
|
||||||
feeds = [(u'daily news', u'http://mainichi.jp/rss/etc/flash.rss')]
|
remove_javascript = True
|
||||||
|
masthead_title = u'MAINICHI DAILY NEWS'
|
||||||
|
|
||||||
remove_tags_before = {'class':"NewsTitle"}
|
remove_tags_before = {'class':"NewsTitle"}
|
||||||
remove_tags = [{'class':"RelatedArticle"}]
|
remove_tags_after = {'class':"NewsBody clr"}
|
||||||
remove_tags_after = {'class':"Credit"}
|
|
||||||
|
|
||||||
def parse_feeds(self):
|
def parse_feeds(self):
|
||||||
|
|
||||||
@ -32,9 +32,30 @@ class MainichiDailyNews(BasicNewsRecipe):
|
|||||||
for a,curarticle in enumerate(curfeed.articles):
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
if re.search(r'pheedo.jp', curarticle.url):
|
if re.search(r'pheedo.jp', curarticle.url):
|
||||||
delList.append(curarticle)
|
delList.append(curarticle)
|
||||||
|
if re.search(r'rssad.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
if len(delList)>0:
|
if len(delList)>0:
|
||||||
for d in delList:
|
for d in delList:
|
||||||
index = curfeed.articles.index(d)
|
index = curfeed.articles.index(d)
|
||||||
curfeed.articles[index:index+1] = []
|
curfeed.articles[index:index+1] = []
|
||||||
|
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(self.index)
|
||||||
|
topstories = soup.find('ul',attrs={'class':'MaiLink'})
|
||||||
|
if topstories:
|
||||||
|
newsarticles = []
|
||||||
|
for itt in topstories.findAll('li'):
|
||||||
|
itema = itt.find('a',href=True)
|
||||||
|
if itema:
|
||||||
|
newsarticles.append({
|
||||||
|
'title' :itema.string
|
||||||
|
,'date' :''
|
||||||
|
,'url' :itema['href']
|
||||||
|
,'description':''
|
||||||
|
})
|
||||||
|
feeds.append(('latest', newsarticles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
67
recipes/mainichi_en.recipe
Normal file
67
recipes/mainichi_en.recipe
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
|
'''
|
||||||
|
www.mainichi.jp
|
||||||
|
'''
|
||||||
|
|
||||||
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class MainichiEnglishNews(BasicNewsRecipe):
|
||||||
|
title = u'The Mainichi'
|
||||||
|
__author__ = 'Hiroshi Miura'
|
||||||
|
oldest_article = 2
|
||||||
|
max_articles_per_feed = 40
|
||||||
|
description = 'Japanese traditional newspaper Mainichi news in English'
|
||||||
|
publisher = 'Mainichi News'
|
||||||
|
category = 'news, japan'
|
||||||
|
language = 'en_JP'
|
||||||
|
index = 'http://mainichi.jp/english/english/index.html'
|
||||||
|
remove_javascript = True
|
||||||
|
masthead_url = 'http://mainichi.jp/english/images/themainichi.png'
|
||||||
|
|
||||||
|
remove_tags_before = {'class':"NewsTitle"}
|
||||||
|
remove_tags_after = {'class':"NewsBody clr"}
|
||||||
|
|
||||||
|
def parse_feeds(self):
|
||||||
|
|
||||||
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||||
|
|
||||||
|
for curfeed in feeds:
|
||||||
|
delList = []
|
||||||
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
|
if re.search(r'pheedo.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if re.search(r'rssad.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if len(delList)>0:
|
||||||
|
for d in delList:
|
||||||
|
index = curfeed.articles.index(d)
|
||||||
|
curfeed.articles[index:index+1] = []
|
||||||
|
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(self.index)
|
||||||
|
for section in soup.findAll('section'):
|
||||||
|
newsarticles = []
|
||||||
|
section_name = 'news'
|
||||||
|
hds = section.find('div', attrs={'class':'CategoryHead clr'})
|
||||||
|
if hds:
|
||||||
|
section_item = hds.find('h1')
|
||||||
|
if section_item:
|
||||||
|
section_name = section_item.find('a').string
|
||||||
|
items = section.find('ul', attrs={'class':'MaiLink'})
|
||||||
|
for item in items.findAll('li'):
|
||||||
|
if item:
|
||||||
|
itema = item.find('a')
|
||||||
|
newsarticles.append({
|
||||||
|
'title' :itema.string
|
||||||
|
,'date' :''
|
||||||
|
,'url' :itema['href']
|
||||||
|
,'description':''
|
||||||
|
})
|
||||||
|
feeds.append((section_name, newsarticles))
|
||||||
|
return feeds
|
||||||
|
|
@ -1,34 +0,0 @@
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
|
||||||
import re
|
|
||||||
|
|
||||||
class MainichiDailyITNews(BasicNewsRecipe):
|
|
||||||
title = u'\u6bce\u65e5\u65b0\u805e(IT&\u5bb6\u96fb)'
|
|
||||||
__author__ = 'Hiroshi Miura'
|
|
||||||
oldest_article = 2
|
|
||||||
max_articles_per_feed = 100
|
|
||||||
description = 'Japanese traditional newspaper Mainichi Daily News - IT and electronics'
|
|
||||||
publisher = 'Mainichi Daily News'
|
|
||||||
category = 'news, Japan, IT, Electronics'
|
|
||||||
language = 'ja'
|
|
||||||
|
|
||||||
feeds = [(u'IT News', u'http://mainichi.pheedo.jp/f/mainichijp_electronics')]
|
|
||||||
|
|
||||||
remove_tags_before = {'class':"NewsTitle"}
|
|
||||||
remove_tags = [{'class':"RelatedArticle"}]
|
|
||||||
remove_tags_after = {'class':"Credit"}
|
|
||||||
|
|
||||||
def parse_feeds(self):
|
|
||||||
|
|
||||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
|
||||||
|
|
||||||
for curfeed in feeds:
|
|
||||||
delList = []
|
|
||||||
for a,curarticle in enumerate(curfeed.articles):
|
|
||||||
if re.search(r'pheedo.jp', curarticle.url):
|
|
||||||
delList.append(curarticle)
|
|
||||||
if len(delList)>0:
|
|
||||||
for d in delList:
|
|
||||||
index = curfeed.articles.index(d)
|
|
||||||
curfeed.articles[index:index+1] = []
|
|
||||||
|
|
||||||
return feeds
|
|
59
recipes/mainichi_science_news.recipe
Normal file
59
recipes/mainichi_science_news.recipe
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
|
'''
|
||||||
|
www.mainichi.jp
|
||||||
|
'''
|
||||||
|
|
||||||
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class MainichiDailyScienceNews(BasicNewsRecipe):
|
||||||
|
title = u'\u6bce\u65e5\u65b0\u805e(Science)'
|
||||||
|
__author__ = 'Hiroshi Miura'
|
||||||
|
oldest_article = 2
|
||||||
|
max_articles_per_feed = 20
|
||||||
|
description = 'Japanese traditional newspaper Mainichi Daily News - science'
|
||||||
|
publisher = 'Mainichi Daily News'
|
||||||
|
category = 'news, japan'
|
||||||
|
language = 'ja'
|
||||||
|
index = 'http://mainichi.jp/select/science'
|
||||||
|
remove_javascript = True
|
||||||
|
masthead_title = u'MAINICHI DAILY NEWS'
|
||||||
|
|
||||||
|
remove_tags_before = {'class':"NewsTitle"}
|
||||||
|
remove_tags_after = {'class':"NewsBody clr"}
|
||||||
|
|
||||||
|
def parse_feeds(self):
|
||||||
|
|
||||||
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||||
|
|
||||||
|
for curfeed in feeds:
|
||||||
|
delList = []
|
||||||
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
|
if re.search(r'rssad.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if len(delList)>0:
|
||||||
|
for d in delList:
|
||||||
|
index = curfeed.articles.index(d)
|
||||||
|
curfeed.articles[index:index+1] = []
|
||||||
|
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(self.index)
|
||||||
|
topstories = soup.find('ul',attrs={'class':'MaiLink'})
|
||||||
|
if topstories:
|
||||||
|
newsarticles = []
|
||||||
|
for itt in topstories.findAll('li'):
|
||||||
|
itema = itt.find('a',href=True)
|
||||||
|
if itema:
|
||||||
|
newsarticles.append({
|
||||||
|
'title' :itema.string
|
||||||
|
,'date' :''
|
||||||
|
,'url' :itema['href']
|
||||||
|
,'description':''
|
||||||
|
})
|
||||||
|
feeds.append(('Science', newsarticles))
|
||||||
|
return feeds
|
||||||
|
|
@ -56,7 +56,7 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
|||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
remove_attributes = ['style', 'font', 'width', 'height', 'itemtype', 'itemprop', 'itemscope']#, 'href']
|
remove_attributes = ['style', 'font', 'width', 'height', 'itemtype', 'itemprop', 'itemscope']#, 'href']
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
extra_css = 'body{font-size:1em;padding:5px 0}body,a,h2{background-color:#fff;text-decoration:none;color:#000}#date,div.byline,p.article-image-caption .credits,.calibrenavbar{font-size:.5em}.article-box-fact.module-title,#date,div.byline{clear:both}.article-box-fact.module-title{margin:8px 0}.article-box-fact.module-title,h2{font-size:1.1em}h1.title{font-size:1.4em}h1.title,.article-body p,div.article-image-caption-2column,div.article-image-caption-3column,#date,div.byline{margin-bottom:.6em}div.article-box-fact div.subtitle,.article-box-fact.module-title,h1.title,p.article-image-caption{font-weight:700}div.column-1-3{margin-left:19px}div.column-1-2{display:inline}div.column-1-2,div.column-1-3{margin-right:7px}p.article-image-caption{font-size:.6em;margin-top:5px}p.article-image-caption,#date,div.byline{color:#616262}p.article-image-caption .credits{font-style:italic}div.article-image-caption{width:246px}div.article-image-caption-2column{width:373px}div.column-3{background-color:#eee;float:right;width:50%}div.column-3 module-title{border:1px solid #aaa}div.article-box-fact div.subtitle,.article-box-fact.module-title{color:#24763b}div.byline{border-top:2px solid #24763b}div.column-3,img,div.column-3,p.small,div.article-image-caption{margin:.5em}img,p.small,.column1,h2{border:0;padding:0}.column1,h1,h2{margin:0}'
|
extra_css = 'body{font-size:1em;padding:5px 0}body,a,h2{background-color:#fff;text-decoration:none;color:#000}#date,div.byline,p.article-image-caption .credits,.calibrenavbar,.calibre5{font-size:.5em}.article-box-fact.module-title,#date,div.byline{clear:both}.article-box-fact{font-size:0.7em}.article-box-fact.module-title{margin:8px 0; font-size:0.8em}h2{font-size:1em}h1.title{font-size:1.4em}h1.title,.article-body p,div.article-image-caption-2column,div.article-image-caption-3column,#date,div.byline{margin-bottom:.6em}div.article-box-fact div.subtitle,.article-box-fact.module-title,h1.title,p.article-image-caption{font-weight:700}div.column-1-3{margin-left:19px}div.column-1-2{display:inline}div.column-1-2,div.column-1-3{margin-right:7px}p.article-image-caption{font-size:.6em;margin-top:5px}p.article-image-caption,#date,div.byline{color:#616262}p.article-image-caption .credits{font-style:italic}div.article-image-caption{width:246px}div.article-image-caption-2column{width:373px}div.column-3{background-color:#eee;float:right;width:50%}div.column-3 module-title{border:1px solid #aaa}div.article-box-fact div.subtitle,.article-box-fact.module-title{color:#24763b}div.byline{border-top:2px solid #24763b}div.column-3,img,div.column-3,p.small,div.article-image-caption{margin:.5em}img,p.small,.column1,h2,.calibre5,.calibrenavbar{border:0;padding:0}.column1,h1,h2,.calibrenavbar{margin:0}'
|
||||||
|
|
||||||
|
|
||||||
preprocess_regexps = [
|
preprocess_regexps = [
|
||||||
@ -71,11 +71,11 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
|||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name=['iframe','script','noscript','style']),
|
dict(name=['iframe','script','noscript','style']),
|
||||||
dict(name='div', attrs={'class':[re.compile('column-[14]-5'),'col-179 ','col-373 ','clear','ad','navigation',re.compile('share-tools(-top)?'),'tools','metroCommentFormWrap','article-tools-below-title','related-links','padding-top-15',re.compile('^promo.*?$'),'teaser-component',re.compile('fb(-comments|_iframe_widget)')]}),
|
dict(name='div', attrs={'class':['column-4-5','column-1-5','ad-msg','col-179 ','col-373 ','clear','ad','navigation',re.compile('share-tools(-top)?'),'tools','metroCommentFormWrap','article-tools-below-title','related-links','padding-top-15',re.compile('^promo.*?$'),'teaser-component',re.compile('fb(-comments|_iframe_widget)'),'promos','header-links','promo-2']}),
|
||||||
dict(id=['column-1-5-bottom','column-4-5',re.compile('^ad(\d+|adcomp.*?)?$'),'sidebar',re.compile('^article-\d'),'comments','gallery-1']),
|
dict(id=['column-1-5-bottom','column-4-5',re.compile('^ad(\d+|adcomp.*?)?$'),'adadcomp-4','margin-5','sidebar',re.compile('^article-\d'),'comments','gallery-1']),
|
||||||
dict(name='a', attrs={'name':'comments'}),
|
dict(name='a', attrs={'name':'comments'}),
|
||||||
#dict(name='div', attrs={'data-href'}),
|
#dict(name='div', attrs={'data-href'}),
|
||||||
dict(name='img', attrs={'class':'top-line'}),
|
dict(name='img', attrs={'class':'top-line','title':'volledig scherm'}),
|
||||||
dict(attrs={'style':re.compile('^(.*(display\s?:\s?none|img-mask|white)\s?;?.*)$'),'title':'volledig scherm'})]
|
dict(attrs={'style':re.compile('^(.*(display\s?:\s?none|img-mask|white)\s?;?.*)$'),'title':'volledig scherm'})]
|
||||||
|
|
||||||
'''removed by before/after:
|
'''removed by before/after:
|
||||||
|
22
recipes/nachdenkseiten.recipe
Normal file
22
recipes/nachdenkseiten.recipe
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class Nachdenkseiten(BasicNewsRecipe):
|
||||||
|
title = u'Nachdenkseiten'
|
||||||
|
__author__ = 'jrda'
|
||||||
|
publisher = 'www.nachdenkseiten.de Albrecht Mueller und Dr. Wolfgang Lieb'
|
||||||
|
description = 'NachDenkSeiten - Die kritische Website'
|
||||||
|
category = 'news'
|
||||||
|
oldest_article = 7
|
||||||
|
use_embedded_content = False
|
||||||
|
language = 'de'
|
||||||
|
timefmt = ''
|
||||||
|
max_articles_per_feed = 6
|
||||||
|
no_stylesheets = True
|
||||||
|
encoding = 'utf-8'
|
||||||
|
remove_javascript = True
|
||||||
|
keep_only_tags = [
|
||||||
|
{'id':'content'}]
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
('News', 'http://www.nachdenkseiten.de/?feed=rss2'),
|
||||||
|
]
|
16
recipes/national_geographic_it.recipe
Normal file
16
recipes/national_geographic_it.recipe
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
__version__ = 'v1.0'
|
||||||
|
__date__ = '5, May 2012'
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class AdvancedUserRecipe1336226255(BasicNewsRecipe):
|
||||||
|
title = u'National Geographic'
|
||||||
|
__author__ = 'faber1971'
|
||||||
|
description = 'Science magazine'
|
||||||
|
language = 'it'
|
||||||
|
|
||||||
|
oldest_article = 15
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
auto_cleanup = True
|
||||||
|
remove_tags = [dict(name='div',attrs={'class':'banner-abbonamenti'})]
|
||||||
|
feeds = [(u'National Geographic', u'http://www.nationalgeographic.it/rss/all/rss2.0.xml')]
|
@ -1,5 +1,4 @@
|
|||||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
|
||||||
|
|
||||||
class NYTimes(BasicNewsRecipe):
|
class NYTimes(BasicNewsRecipe):
|
||||||
|
|
||||||
@ -11,22 +10,8 @@ class NYTimes(BasicNewsRecipe):
|
|||||||
needs_subscription = False
|
needs_subscription = False
|
||||||
|
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
#remove_tags_before = dict(name='h1', attrs={'class':'heading'})
|
auto_cleanup = True
|
||||||
remove_tags_after = dict(name='div', attrs={'class':'npStoryTools npWidth1-6 npRight npTxtStrong'})
|
auto_cleanup_keep = '//*[@class="npStoryPhoto npTxtPlain"]'
|
||||||
remove_tags = [
|
|
||||||
dict(name='iframe'),
|
|
||||||
dict(name='div', attrs={'class':['story-tools', 'npStoryTools npWidth1-6 npRight npTxtStrong']}),
|
|
||||||
#dict(name='div', attrs={'id':['qrformdiv', 'inSection', 'alpha-inner']}),
|
|
||||||
#dict(name='form', attrs={'onsubmit':''}),
|
|
||||||
dict(name='ul', attrs={'class':'npTxtAlt npGroup npTxtCentre npStoryShare npTxtStrong npTxtDim'}),
|
|
||||||
]
|
|
||||||
|
|
||||||
# def preprocess_html(self, soup):
|
|
||||||
# table = soup.find('table')
|
|
||||||
# if table is not None:
|
|
||||||
# table.extract()
|
|
||||||
# return soup
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#TO GET ARTICLE TOC
|
#TO GET ARTICLE TOC
|
||||||
@ -53,14 +38,14 @@ class NYTimes(BasicNewsRecipe):
|
|||||||
if current_section is not None and x.name == 'h5':
|
if current_section is not None and x.name == 'h5':
|
||||||
# Article found
|
# Article found
|
||||||
title = self.tag_to_string(x)
|
title = self.tag_to_string(x)
|
||||||
a = x.find('a', href=lambda x: x and 'story' in x)
|
a = x.find('a', href=True)
|
||||||
if a is None:
|
if a is None:
|
||||||
continue
|
continue
|
||||||
url = a.get('href', False)
|
url = a.get('href', False)
|
||||||
if not url or not title:
|
if not url or not title:
|
||||||
continue
|
continue
|
||||||
#if url.startswith('story'):
|
#if url.startswith('story'):
|
||||||
url = 'http://www.nationalpost.com/todays-paper/'+url
|
#url = 'http://www.nationalpost.com/todays-paper/'+url
|
||||||
self.log('\t\tFound article:', title)
|
self.log('\t\tFound article:', title)
|
||||||
self.log('\t\t\t', url)
|
self.log('\t\t\t', url)
|
||||||
current_articles.append({'title': title, 'url':url,
|
current_articles.append({'title': title, 'url':url,
|
||||||
@ -70,11 +55,4 @@ class NYTimes(BasicNewsRecipe):
|
|||||||
feeds.append((current_section, current_articles))
|
feeds.append((current_section, current_articles))
|
||||||
|
|
||||||
return feeds
|
return feeds
|
||||||
def preprocess_html(self, soup):
|
|
||||||
story = soup.find(name='div', attrs={'id':'npContentMain'})
|
|
||||||
##td = heading.findParent(name='td')
|
|
||||||
##td.extract()
|
|
||||||
soup = BeautifulSoup('<html><head><title>t</title></head><body></body></html>')
|
|
||||||
body = soup.find(name='body')
|
|
||||||
body.insert(0, story)
|
|
||||||
return soup
|
|
||||||
|
@ -11,7 +11,7 @@ import datetime
|
|||||||
|
|
||||||
|
|
||||||
class Newsweek(BasicNewsRecipe):
|
class Newsweek(BasicNewsRecipe):
|
||||||
|
|
||||||
# how many issues to go back, 0 means get the most current one
|
# how many issues to go back, 0 means get the most current one
|
||||||
BACK_ISSUES = 1
|
BACK_ISSUES = 1
|
||||||
|
|
||||||
@ -26,8 +26,8 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
language = 'pl'
|
language = 'pl'
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
|
|
||||||
temp_files = []
|
temp_files = []
|
||||||
articles_are_obfuscated = True
|
articles_are_obfuscated = True
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -40,7 +40,7 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
page = self.index_to_soup(source)
|
page = self.index_to_soup(source)
|
||||||
|
|
||||||
main_section = page.find(id='mainSection')
|
main_section = page.find(id='mainSection')
|
||||||
|
|
||||||
title = main_section.find('h1')
|
title = main_section.find('h1')
|
||||||
info = main_section.find('ul', attrs={'class' : 'articleInfo'})
|
info = main_section.find('ul', attrs={'class' : 'articleInfo'})
|
||||||
authors = info.find('li').find('h4')
|
authors = info.find('li').find('h4')
|
||||||
@ -50,25 +50,25 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
related = article.find('div', attrs={'class' : 'relatedBox'})
|
related = article.find('div', attrs={'class' : 'relatedBox'})
|
||||||
if related is not None:
|
if related is not None:
|
||||||
related.extract()
|
related.extract()
|
||||||
|
|
||||||
# remove div with social networking links and links to
|
# remove div with social networking links and links to
|
||||||
# other articles in web version
|
# other articles in web version
|
||||||
for div in article.findAll('div'):
|
for div in article.findAll('div'):
|
||||||
if div.find('span', attrs={'class' : 'google-plus'}):
|
if div.find('span', attrs={'class' : 'google-plus'}):
|
||||||
div.extract()
|
div.extract()
|
||||||
|
|
||||||
for p in div.findAll('p'):
|
for p in div.findAll('p'):
|
||||||
if p.find('span', attrs={'style' : 'color: rgb(255, 0, 0);'}):
|
if p.find('span', attrs={'style' : 'color: rgb(255, 0, 0);'}):
|
||||||
p.extract()
|
p.extract()
|
||||||
continue
|
continue
|
||||||
for a in p.findAll('a'):
|
for a in p.findAll('a'):
|
||||||
if a.find('span', attrs={'style' : 'font-size: larger;'}):
|
if a.find('span', attrs={'style' : 'font-size: larger;'}):
|
||||||
a.extract()
|
a.extract()
|
||||||
|
|
||||||
|
|
||||||
html = unicode(title) + unicode(authors) + unicode(article)
|
html = unicode(title) + unicode(authors) + unicode(article)
|
||||||
next = main_section.find('li', attrs={'class' : 'next'})
|
next = main_section.find('li', attrs={'class' : 'next'})
|
||||||
|
|
||||||
while next:
|
while next:
|
||||||
url = next.find('a')['href']
|
url = next.find('a')['href']
|
||||||
br.open(url)
|
br.open(url)
|
||||||
@ -81,11 +81,11 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
aside.extract()
|
aside.extract()
|
||||||
html = html + unicode(article)
|
html = html + unicode(article)
|
||||||
next = main_section.find('li', attrs={'class' : 'next'})
|
next = main_section.find('li', attrs={'class' : 'next'})
|
||||||
|
|
||||||
|
|
||||||
self.temp_files.append(PersistentTemporaryFile('_temparse.html'))
|
self.temp_files.append(PersistentTemporaryFile('_temparse.html'))
|
||||||
self.temp_files[-1].write(html)
|
self.temp_files[-1].write(html)
|
||||||
self.temp_files[-1].close()
|
self.temp_files[-1].close()
|
||||||
return self.temp_files[-1].name
|
return self.temp_files[-1].name
|
||||||
|
|
||||||
|
|
||||||
@ -102,9 +102,9 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
if len(options) > self.BACK_ISSUES:
|
if len(options) > self.BACK_ISSUES:
|
||||||
option = options[self.BACK_ISSUES];
|
option = options[self.BACK_ISSUES];
|
||||||
self.EDITION = option['value'].replace('http://www.newsweek.pl/wydania/','')
|
self.EDITION = option['value'].replace('http://www.newsweek.pl/wydania/','')
|
||||||
issue_soup = self.index_to_soup('http://www.newsweek.pl/wydania/' + self.EDITION)
|
self.index_to_soup('http://www.newsweek.pl/wydania/' + self.EDITION)
|
||||||
else:
|
else:
|
||||||
self.BACK_ISSUES = self.BACK_ISSUES - len(options)
|
self.BACK_ISSUES = self.BACK_ISSUES - len(options)
|
||||||
self.YEAR = self.YEAR - 1
|
self.YEAR = self.YEAR - 1
|
||||||
self.find_last_issue(archive_url + ',' + str(self.YEAR))
|
self.find_last_issue(archive_url + ',' + str(self.YEAR))
|
||||||
|
|
||||||
@ -139,14 +139,14 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
article = self.create_article(h2)
|
article = self.create_article(h2)
|
||||||
if article is None :
|
if article is None :
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if articles.has_key(section):
|
if articles.has_key(section):
|
||||||
articles[section].append(article)
|
articles[section].append(article)
|
||||||
else:
|
else:
|
||||||
articles[section] = [article]
|
articles[section] = [article]
|
||||||
sections.append(section)
|
sections.append(section)
|
||||||
|
|
||||||
|
|
||||||
for section in sections:
|
for section in sections:
|
||||||
feeds.append((section, articles[section]))
|
feeds.append((section, articles[section]))
|
||||||
return feeds
|
return feeds
|
||||||
@ -161,7 +161,7 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
a = h2.find('a')
|
a = h2.find('a')
|
||||||
if a is None:
|
if a is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
article['title'] = self.tag_to_string(a)
|
article['title'] = self.tag_to_string(a)
|
||||||
article['url'] = a['href']
|
article['url'] = a['href']
|
||||||
article['date'] = self.DATE
|
article['date'] = self.DATE
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
"""
|
"""
|
||||||
Pocket Calibre Recipe v1.0
|
Pocket Calibre Recipe v1.1
|
||||||
"""
|
"""
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '''
|
__copyright__ = '''
|
||||||
@ -73,6 +73,9 @@ class Pocket(BasicNewsRecipe):
|
|||||||
articles = []
|
articles = []
|
||||||
soup = self.index_to_soup(feedurl)
|
soup = self.index_to_soup(feedurl)
|
||||||
ritem = soup.find('ul', attrs={'id':'list'})
|
ritem = soup.find('ul', attrs={'id':'list'})
|
||||||
|
if ritem is None:
|
||||||
|
self.log.exception("Page %s skipped: invalid HTML" % (feedtitle if feedtitle else feedurl))
|
||||||
|
continue
|
||||||
for item in reversed(ritem.findAll('li')):
|
for item in reversed(ritem.findAll('li')):
|
||||||
if articlesToGrab < 1:
|
if articlesToGrab < 1:
|
||||||
break
|
break
|
||||||
@ -94,7 +97,7 @@ class Pocket(BasicNewsRecipe):
|
|||||||
self.readList.append(readLink)
|
self.readList.append(readLink)
|
||||||
totalfeeds.append((feedtitle, articles))
|
totalfeeds.append((feedtitle, articles))
|
||||||
if len(self.readList) < self.minimum_articles:
|
if len(self.readList) < self.minimum_articles:
|
||||||
raise Exception("Not enough articles in RIL! Change minimum_articles or add more.")
|
raise Exception("Not enough articles in Pocket! Change minimum_articles or add more articles.")
|
||||||
return totalfeeds
|
return totalfeeds
|
||||||
|
|
||||||
def mark_as_read(self, markList):
|
def mark_as_read(self, markList):
|
||||||
|
34
recipes/rebelion.recipe
Normal file
34
recipes/rebelion.recipe
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
import re
|
||||||
|
|
||||||
|
class RebelionRecipe (BasicNewsRecipe):
|
||||||
|
__author__ = u'Marc Busqué <marc@lamarciana.com>' #Thanks to atlantique http://www.mobileread.com/forums/member.php?u=67876
|
||||||
|
__url__ = 'http://www.lamarciana.com'
|
||||||
|
__version__ = '1.0'
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Marc Busqué <marc@lamarciana.com>'
|
||||||
|
title = u'Rebelion.org'
|
||||||
|
description = u'Rebelión pretende ser un medio de información alternativa que publique las noticias que no son consideradas importantes por los medios de comunicación tradicionales. También, dar a las noticias un tratamiento diferente en la línea de mostrar los intereses que los poderes económicos y políticos del mundo capitalista ocultan para mantener sus privilegios y el status actual. Queremos servir y ayudarnos de todos los grupos, colectivos y personas que trabajan por cambiar este mundo en una perspectiva radicalmente diferente, más justa, igualitaria y equilibrada social y ecológicamente. Es nuestro objetivo contar con la participación y colaboración de todos vosotros para que Rebelión sea un espacio serio, riguroso y actualizado en la difusión de noticias.'
|
||||||
|
url = 'http://www.rebelion.org'
|
||||||
|
language = 'es'
|
||||||
|
tags = 'contrainformación, información alternativa'
|
||||||
|
oldest_article = 1
|
||||||
|
remove_empty_feeds = True
|
||||||
|
encoding = 'latin1' #
|
||||||
|
keep_only_tags = [
|
||||||
|
{'name': 'div', 'attrs': {'id': 'CuerpoNoticia'}}
|
||||||
|
]
|
||||||
|
no_stylesheets = True
|
||||||
|
extra_css = '.autor {font-style: italic;} .titulo {font-size: 150%;} .titulo, .pretitulo {text-align: center;} #TextoNoticia {text-align:justify;} .autor, .fuente, .entradilla {font-size: 90%; text-align: left;}'
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
(u'Titulares del día', u'http://www.rebelion.org/rss_portada.php'),
|
||||||
|
]
|
||||||
|
|
||||||
|
#See http://www.mobileread.com/forums/showthread.php?t=174501
|
||||||
|
def print_version(self, url):
|
||||||
|
id = re.compile('\d*$').search(url).group()
|
||||||
|
return u'http://www.rebelion.org/noticia.php?id=%s' % id
|
39
recipes/stars_and_stripes.recipe
Normal file
39
recipes/stars_and_stripes.recipe
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
''' Stars and Stripes
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import re
|
||||||
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||||
|
|
||||||
|
|
||||||
|
class AdvancedUserRecipe1308791026(BasicNewsRecipe):
|
||||||
|
title = u'Stars and Stripes'
|
||||||
|
oldest_article = 3
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
__author__ = 'adoucette'
|
||||||
|
description = 'The U.S. militarys independent news source, featuring exclusive reports from Iraq, Afghanistan, Europe and the Far East.'
|
||||||
|
no_stylesheets = True
|
||||||
|
#delay = 1
|
||||||
|
use_embedded_content = False
|
||||||
|
encoding = 'utf8'
|
||||||
|
publisher = 'stripes.com'
|
||||||
|
category = 'news, US, world'
|
||||||
|
language = 'en_US'
|
||||||
|
publication_type = 'newsportal'
|
||||||
|
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
|
||||||
|
conversion_options = {
|
||||||
|
'comments' : description
|
||||||
|
,'tags' : category
|
||||||
|
,'language' : language
|
||||||
|
,'publisher' : publisher
|
||||||
|
,'linearize_tables': True
|
||||||
|
}
|
||||||
|
keep_only_tags = [dict(name='div', attrs={'class':['element article']})]
|
||||||
|
remove_tags_after = [dict(name='ul', attrs={'class':'inline-bookmarks'})]
|
||||||
|
feeds = [
|
||||||
|
(u'News', u'http://feeds.stripes.com/starsandstripes/news'),
|
||||||
|
(u'Sports', u'http://feeds.stripes.com/starsandstripes/sports'),
|
||||||
|
(u'Military Life', u'http://feeds.stripes.com/starsandstripes/militarylife'),
|
||||||
|
(u'Opinion', u'http://feeds.stripes.com/starsandstripes/opinion'),
|
||||||
|
(u'Travel', u'http://feeds.stripes.com/starsandstripes/travel')
|
||||||
|
]
|
@ -490,12 +490,6 @@ save_original_format = True
|
|||||||
# how many should be shown, here.
|
# how many should be shown, here.
|
||||||
gui_view_history_size = 15
|
gui_view_history_size = 15
|
||||||
|
|
||||||
#: When using the 'Tweak Book' action, which format to prefer
|
|
||||||
# When tweaking a book that has multiple formats, calibre picks one
|
|
||||||
# automatically. By default EPUB is preferred to HTMLZ. If you would like to
|
|
||||||
# prefer HTMLZ to EPUB for tweaking, change this to 'htmlz'
|
|
||||||
tweak_book_prefer = 'epub'
|
|
||||||
|
|
||||||
#: Change the font size of book details in the interface
|
#: Change the font size of book details in the interface
|
||||||
# Change the font size at which book details are rendered in the side panel and
|
# Change the font size at which book details are rendered in the side panel and
|
||||||
# comments are rendered in the metadata edit dialog. Set it to a positive or
|
# comments are rendered in the metadata edit dialog. Set it to a positive or
|
||||||
|
@ -12,14 +12,14 @@ msgstr ""
|
|||||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||||
"devel@lists.alioth.debian.org>\n"
|
"devel@lists.alioth.debian.org>\n"
|
||||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||||
"PO-Revision-Date: 2012-04-12 09:56+0000\n"
|
"PO-Revision-Date: 2012-05-03 16:09+0000\n"
|
||||||
"Last-Translator: Dídac Rios <didac@niorcs.com>\n"
|
"Last-Translator: Dídac Rios <didac@niorcs.com>\n"
|
||||||
"Language-Team: Catalan <linux@softcatala.org>\n"
|
"Language-Team: Catalan <linux@softcatala.org>\n"
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=UTF-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"X-Launchpad-Export-Date: 2012-04-13 05:26+0000\n"
|
"X-Launchpad-Export-Date: 2012-05-04 04:47+0000\n"
|
||||||
"X-Generator: Launchpad (build 15070)\n"
|
"X-Generator: Launchpad (build 15195)\n"
|
||||||
"Language: ca\n"
|
"Language: ca\n"
|
||||||
|
|
||||||
#. name for aaa
|
#. name for aaa
|
||||||
@ -9536,7 +9536,7 @@ msgstr "Ani"
|
|||||||
|
|
||||||
#. name for hni
|
#. name for hni
|
||||||
msgid "Hani"
|
msgid "Hani"
|
||||||
msgstr ""
|
msgstr "Haní"
|
||||||
|
|
||||||
#. name for hnj
|
#. name for hnj
|
||||||
msgid "Hmong Njua"
|
msgid "Hmong Njua"
|
||||||
@ -9544,7 +9544,7 @@ msgstr "Miao; Hmong Njua"
|
|||||||
|
|
||||||
#. name for hnn
|
#. name for hnn
|
||||||
msgid "Hanunoo"
|
msgid "Hanunoo"
|
||||||
msgstr ""
|
msgstr "Hanunoo"
|
||||||
|
|
||||||
#. name for hno
|
#. name for hno
|
||||||
msgid "Hindko; Northern"
|
msgid "Hindko; Northern"
|
||||||
@ -9552,35 +9552,35 @@ msgstr "Hindko; septentrional"
|
|||||||
|
|
||||||
#. name for hns
|
#. name for hns
|
||||||
msgid "Hindustani; Caribbean"
|
msgid "Hindustani; Caribbean"
|
||||||
msgstr ""
|
msgstr "Hindustaní; Caribeny"
|
||||||
|
|
||||||
#. name for hnu
|
#. name for hnu
|
||||||
msgid "Hung"
|
msgid "Hung"
|
||||||
msgstr ""
|
msgstr "Hung"
|
||||||
|
|
||||||
#. name for hoa
|
#. name for hoa
|
||||||
msgid "Hoava"
|
msgid "Hoava"
|
||||||
msgstr ""
|
msgstr "Hoava"
|
||||||
|
|
||||||
#. name for hob
|
#. name for hob
|
||||||
msgid "Mari (Madang Province)"
|
msgid "Mari (Madang Province)"
|
||||||
msgstr ""
|
msgstr "Mari (Província de Madang)"
|
||||||
|
|
||||||
#. name for hoc
|
#. name for hoc
|
||||||
msgid "Ho"
|
msgid "Ho"
|
||||||
msgstr ""
|
msgstr "Ho"
|
||||||
|
|
||||||
#. name for hod
|
#. name for hod
|
||||||
msgid "Holma"
|
msgid "Holma"
|
||||||
msgstr ""
|
msgstr "Holma"
|
||||||
|
|
||||||
#. name for hoe
|
#. name for hoe
|
||||||
msgid "Horom"
|
msgid "Horom"
|
||||||
msgstr ""
|
msgstr "Horom"
|
||||||
|
|
||||||
#. name for hoh
|
#. name for hoh
|
||||||
msgid "Hobyót"
|
msgid "Hobyót"
|
||||||
msgstr ""
|
msgstr "Hobyot"
|
||||||
|
|
||||||
#. name for hoi
|
#. name for hoi
|
||||||
msgid "Holikachuk"
|
msgid "Holikachuk"
|
||||||
@ -9588,11 +9588,11 @@ msgstr "Holikachuk"
|
|||||||
|
|
||||||
#. name for hoj
|
#. name for hoj
|
||||||
msgid "Hadothi"
|
msgid "Hadothi"
|
||||||
msgstr "Hadothi"
|
msgstr "Harautí"
|
||||||
|
|
||||||
#. name for hol
|
#. name for hol
|
||||||
msgid "Holu"
|
msgid "Holu"
|
||||||
msgstr "Holu"
|
msgstr "Holo"
|
||||||
|
|
||||||
#. name for hom
|
#. name for hom
|
||||||
msgid "Homa"
|
msgid "Homa"
|
||||||
@ -9628,11 +9628,11 @@ msgstr "Honi"
|
|||||||
|
|
||||||
#. name for hoy
|
#. name for hoy
|
||||||
msgid "Holiya"
|
msgid "Holiya"
|
||||||
msgstr ""
|
msgstr "Holiya"
|
||||||
|
|
||||||
#. name for hoz
|
#. name for hoz
|
||||||
msgid "Hozo"
|
msgid "Hozo"
|
||||||
msgstr ""
|
msgstr "Hozo"
|
||||||
|
|
||||||
#. name for hpo
|
#. name for hpo
|
||||||
msgid "Hpon"
|
msgid "Hpon"
|
||||||
@ -9644,7 +9644,7 @@ msgstr "Hawaià Pidgin; llenguatge de signes"
|
|||||||
|
|
||||||
#. name for hra
|
#. name for hra
|
||||||
msgid "Hrangkhol"
|
msgid "Hrangkhol"
|
||||||
msgstr "Hrangkhol"
|
msgstr "Hrangkol"
|
||||||
|
|
||||||
#. name for hre
|
#. name for hre
|
||||||
msgid "Hre"
|
msgid "Hre"
|
||||||
@ -9668,7 +9668,7 @@ msgstr "Horuru"
|
|||||||
|
|
||||||
#. name for hrt
|
#. name for hrt
|
||||||
msgid "Hértevin"
|
msgid "Hértevin"
|
||||||
msgstr "Hértevin"
|
msgstr "Hertevin"
|
||||||
|
|
||||||
#. name for hru
|
#. name for hru
|
||||||
msgid "Hruso"
|
msgid "Hruso"
|
||||||
@ -9724,7 +9724,7 @@ msgstr "Hitu"
|
|||||||
|
|
||||||
#. name for htx
|
#. name for htx
|
||||||
msgid "Hittite; Middle"
|
msgid "Hittite; Middle"
|
||||||
msgstr "Hittite; Middle"
|
msgstr "Hittita; mitjà"
|
||||||
|
|
||||||
#. name for hub
|
#. name for hub
|
||||||
msgid "Huambisa"
|
msgid "Huambisa"
|
||||||
@ -9732,7 +9732,7 @@ msgstr "Huambisa"
|
|||||||
|
|
||||||
#. name for huc
|
#. name for huc
|
||||||
msgid "=/Hua"
|
msgid "=/Hua"
|
||||||
msgstr ""
|
msgstr "Hua"
|
||||||
|
|
||||||
#. name for hud
|
#. name for hud
|
||||||
msgid "Huaulu"
|
msgid "Huaulu"
|
||||||
@ -9740,7 +9740,7 @@ msgstr "Huaulu"
|
|||||||
|
|
||||||
#. name for hue
|
#. name for hue
|
||||||
msgid "Huave; San Francisco Del Mar"
|
msgid "Huave; San Francisco Del Mar"
|
||||||
msgstr "Huave; San Francisco Del Mar"
|
msgstr "Huave; San Francisco"
|
||||||
|
|
||||||
#. name for huf
|
#. name for huf
|
||||||
msgid "Humene"
|
msgid "Humene"
|
||||||
@ -9756,7 +9756,7 @@ msgstr "Huilliche"
|
|||||||
|
|
||||||
#. name for hui
|
#. name for hui
|
||||||
msgid "Huli"
|
msgid "Huli"
|
||||||
msgstr "Huli"
|
msgstr "Hulí"
|
||||||
|
|
||||||
#. name for huj
|
#. name for huj
|
||||||
msgid "Miao; Northern Guiyang"
|
msgid "Miao; Northern Guiyang"
|
||||||
@ -9808,7 +9808,7 @@ msgstr "Huitoto; Murui"
|
|||||||
|
|
||||||
#. name for huv
|
#. name for huv
|
||||||
msgid "Huave; San Mateo Del Mar"
|
msgid "Huave; San Mateo Del Mar"
|
||||||
msgstr "Huave; San Mateo Del Mar"
|
msgstr "Huave; San Mateo"
|
||||||
|
|
||||||
#. name for huw
|
#. name for huw
|
||||||
msgid "Hukumina"
|
msgid "Hukumina"
|
||||||
@ -9820,35 +9820,35 @@ msgstr "Huitoto; Nüpode"
|
|||||||
|
|
||||||
#. name for huy
|
#. name for huy
|
||||||
msgid "Hulaulá"
|
msgid "Hulaulá"
|
||||||
msgstr ""
|
msgstr "Arameu; Hulaula"
|
||||||
|
|
||||||
#. name for huz
|
#. name for huz
|
||||||
msgid "Hunzib"
|
msgid "Hunzib"
|
||||||
msgstr ""
|
msgstr "Hunzib"
|
||||||
|
|
||||||
#. name for hvc
|
#. name for hvc
|
||||||
msgid "Haitian Vodoun Culture Language"
|
msgid "Haitian Vodoun Culture Language"
|
||||||
msgstr ""
|
msgstr "Haitià Vodoun"
|
||||||
|
|
||||||
#. name for hve
|
#. name for hve
|
||||||
msgid "Huave; San Dionisio Del Mar"
|
msgid "Huave; San Dionisio Del Mar"
|
||||||
msgstr ""
|
msgstr "Huave; San Dionisio"
|
||||||
|
|
||||||
#. name for hvk
|
#. name for hvk
|
||||||
msgid "Haveke"
|
msgid "Haveke"
|
||||||
msgstr ""
|
msgstr "Haveke"
|
||||||
|
|
||||||
#. name for hvn
|
#. name for hvn
|
||||||
msgid "Sabu"
|
msgid "Sabu"
|
||||||
msgstr ""
|
msgstr "Sabu"
|
||||||
|
|
||||||
#. name for hvv
|
#. name for hvv
|
||||||
msgid "Huave; Santa María Del Mar"
|
msgid "Huave; Santa María Del Mar"
|
||||||
msgstr ""
|
msgstr "Huave; Santa Maria"
|
||||||
|
|
||||||
#. name for hwa
|
#. name for hwa
|
||||||
msgid "Wané"
|
msgid "Wané"
|
||||||
msgstr ""
|
msgstr "Wané"
|
||||||
|
|
||||||
#. name for hwc
|
#. name for hwc
|
||||||
msgid "Creole English; Hawai'i"
|
msgid "Creole English; Hawai'i"
|
||||||
@ -9856,11 +9856,11 @@ msgstr "Anglès crioll; Hawaii"
|
|||||||
|
|
||||||
#. name for hwo
|
#. name for hwo
|
||||||
msgid "Hwana"
|
msgid "Hwana"
|
||||||
msgstr ""
|
msgstr "Hwana"
|
||||||
|
|
||||||
#. name for hya
|
#. name for hya
|
||||||
msgid "Hya"
|
msgid "Hya"
|
||||||
msgstr ""
|
msgstr "Hya"
|
||||||
|
|
||||||
#. name for hye
|
#. name for hye
|
||||||
msgid "Armenian"
|
msgid "Armenian"
|
||||||
@ -9868,79 +9868,79 @@ msgstr "armeni"
|
|||||||
|
|
||||||
#. name for iai
|
#. name for iai
|
||||||
msgid "Iaai"
|
msgid "Iaai"
|
||||||
msgstr ""
|
msgstr "Iaai"
|
||||||
|
|
||||||
#. name for ian
|
#. name for ian
|
||||||
msgid "Iatmul"
|
msgid "Iatmul"
|
||||||
msgstr ""
|
msgstr "Iatmulès"
|
||||||
|
|
||||||
#. name for iap
|
#. name for iap
|
||||||
msgid "Iapama"
|
msgid "Iapama"
|
||||||
msgstr ""
|
msgstr "Iapama"
|
||||||
|
|
||||||
#. name for iar
|
#. name for iar
|
||||||
msgid "Purari"
|
msgid "Purari"
|
||||||
msgstr ""
|
msgstr "Purari"
|
||||||
|
|
||||||
#. name for iba
|
#. name for iba
|
||||||
msgid "Iban"
|
msgid "Iban"
|
||||||
msgstr ""
|
msgstr "Iban"
|
||||||
|
|
||||||
#. name for ibb
|
#. name for ibb
|
||||||
msgid "Ibibio"
|
msgid "Ibibio"
|
||||||
msgstr ""
|
msgstr "Ibibio"
|
||||||
|
|
||||||
#. name for ibd
|
#. name for ibd
|
||||||
msgid "Iwaidja"
|
msgid "Iwaidja"
|
||||||
msgstr ""
|
msgstr "Iwaidja"
|
||||||
|
|
||||||
#. name for ibe
|
#. name for ibe
|
||||||
msgid "Akpes"
|
msgid "Akpes"
|
||||||
msgstr ""
|
msgstr "Akpes"
|
||||||
|
|
||||||
#. name for ibg
|
#. name for ibg
|
||||||
msgid "Ibanag"
|
msgid "Ibanag"
|
||||||
msgstr ""
|
msgstr "Ibanag"
|
||||||
|
|
||||||
#. name for ibi
|
#. name for ibi
|
||||||
msgid "Ibilo"
|
msgid "Ibilo"
|
||||||
msgstr ""
|
msgstr "Ibilo"
|
||||||
|
|
||||||
#. name for ibl
|
#. name for ibl
|
||||||
msgid "Ibaloi"
|
msgid "Ibaloi"
|
||||||
msgstr ""
|
msgstr "Ibaloi"
|
||||||
|
|
||||||
#. name for ibm
|
#. name for ibm
|
||||||
msgid "Agoi"
|
msgid "Agoi"
|
||||||
msgstr ""
|
msgstr "Agoi"
|
||||||
|
|
||||||
#. name for ibn
|
#. name for ibn
|
||||||
msgid "Ibino"
|
msgid "Ibino"
|
||||||
msgstr ""
|
msgstr "Ibino"
|
||||||
|
|
||||||
#. name for ibo
|
#. name for ibo
|
||||||
msgid "Igbo"
|
msgid "Igbo"
|
||||||
msgstr ""
|
msgstr "Ibo"
|
||||||
|
|
||||||
#. name for ibr
|
#. name for ibr
|
||||||
msgid "Ibuoro"
|
msgid "Ibuoro"
|
||||||
msgstr ""
|
msgstr "Ibuoro"
|
||||||
|
|
||||||
#. name for ibu
|
#. name for ibu
|
||||||
msgid "Ibu"
|
msgid "Ibu"
|
||||||
msgstr ""
|
msgstr "Ibu"
|
||||||
|
|
||||||
#. name for iby
|
#. name for iby
|
||||||
msgid "Ibani"
|
msgid "Ibani"
|
||||||
msgstr ""
|
msgstr "Ibani"
|
||||||
|
|
||||||
#. name for ica
|
#. name for ica
|
||||||
msgid "Ede Ica"
|
msgid "Ede Ica"
|
||||||
msgstr ""
|
msgstr "Ede Ica"
|
||||||
|
|
||||||
#. name for ich
|
#. name for ich
|
||||||
msgid "Etkywan"
|
msgid "Etkywan"
|
||||||
msgstr ""
|
msgstr "Etkywan"
|
||||||
|
|
||||||
#. name for icl
|
#. name for icl
|
||||||
msgid "Icelandic Sign Language"
|
msgid "Icelandic Sign Language"
|
||||||
@ -9952,7 +9952,7 @@ msgstr "Anglès crioll; Islander"
|
|||||||
|
|
||||||
#. name for ida
|
#. name for ida
|
||||||
msgid "Idakho-Isukha-Tiriki"
|
msgid "Idakho-Isukha-Tiriki"
|
||||||
msgstr ""
|
msgstr "Idakho-Isukha-Tiriki"
|
||||||
|
|
||||||
#. name for idb
|
#. name for idb
|
||||||
msgid "Indo-Portuguese"
|
msgid "Indo-Portuguese"
|
||||||
@ -9960,15 +9960,15 @@ msgstr "Indo-portuguès"
|
|||||||
|
|
||||||
#. name for idc
|
#. name for idc
|
||||||
msgid "Idon"
|
msgid "Idon"
|
||||||
msgstr ""
|
msgstr "Idon"
|
||||||
|
|
||||||
#. name for idd
|
#. name for idd
|
||||||
msgid "Ede Idaca"
|
msgid "Ede Idaca"
|
||||||
msgstr ""
|
msgstr "Ede Idaca"
|
||||||
|
|
||||||
#. name for ide
|
#. name for ide
|
||||||
msgid "Idere"
|
msgid "Idere"
|
||||||
msgstr ""
|
msgstr "Idere"
|
||||||
|
|
||||||
#. name for idi
|
#. name for idi
|
||||||
msgid "Idi"
|
msgid "Idi"
|
||||||
@ -9976,43 +9976,43 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for ido
|
#. name for ido
|
||||||
msgid "Ido"
|
msgid "Ido"
|
||||||
msgstr ""
|
msgstr "ido"
|
||||||
|
|
||||||
#. name for idr
|
#. name for idr
|
||||||
msgid "Indri"
|
msgid "Indri"
|
||||||
msgstr ""
|
msgstr "Indri"
|
||||||
|
|
||||||
#. name for ids
|
#. name for ids
|
||||||
msgid "Idesa"
|
msgid "Idesa"
|
||||||
msgstr ""
|
msgstr "Idesa"
|
||||||
|
|
||||||
#. name for idt
|
#. name for idt
|
||||||
msgid "Idaté"
|
msgid "Idaté"
|
||||||
msgstr ""
|
msgstr "Idaté"
|
||||||
|
|
||||||
#. name for idu
|
#. name for idu
|
||||||
msgid "Idoma"
|
msgid "Idoma"
|
||||||
msgstr ""
|
msgstr "Idoma"
|
||||||
|
|
||||||
#. name for ifa
|
#. name for ifa
|
||||||
msgid "Ifugao; Amganad"
|
msgid "Ifugao; Amganad"
|
||||||
msgstr ""
|
msgstr "Ifugao; Amganad"
|
||||||
|
|
||||||
#. name for ifb
|
#. name for ifb
|
||||||
msgid "Ifugao; Batad"
|
msgid "Ifugao; Batad"
|
||||||
msgstr ""
|
msgstr "Ifugao; Batad"
|
||||||
|
|
||||||
#. name for ife
|
#. name for ife
|
||||||
msgid "Ifè"
|
msgid "Ifè"
|
||||||
msgstr ""
|
msgstr "Ifè"
|
||||||
|
|
||||||
#. name for iff
|
#. name for iff
|
||||||
msgid "Ifo"
|
msgid "Ifo"
|
||||||
msgstr ""
|
msgstr "Ifo"
|
||||||
|
|
||||||
#. name for ifk
|
#. name for ifk
|
||||||
msgid "Ifugao; Tuwali"
|
msgid "Ifugao; Tuwali"
|
||||||
msgstr ""
|
msgstr "Ifugao; Tuwali"
|
||||||
|
|
||||||
#. name for ifm
|
#. name for ifm
|
||||||
msgid "Teke-Fuumu"
|
msgid "Teke-Fuumu"
|
||||||
@ -10020,15 +10020,15 @@ msgstr "Teke; Fuumu"
|
|||||||
|
|
||||||
#. name for ifu
|
#. name for ifu
|
||||||
msgid "Ifugao; Mayoyao"
|
msgid "Ifugao; Mayoyao"
|
||||||
msgstr ""
|
msgstr "Ifugao; Mayoyao"
|
||||||
|
|
||||||
#. name for ify
|
#. name for ify
|
||||||
msgid "Kallahan; Keley-I"
|
msgid "Kallahan; Keley-I"
|
||||||
msgstr ""
|
msgstr "Kallahan; Keley-I"
|
||||||
|
|
||||||
#. name for igb
|
#. name for igb
|
||||||
msgid "Ebira"
|
msgid "Ebira"
|
||||||
msgstr ""
|
msgstr "Ebira"
|
||||||
|
|
||||||
#. name for ige
|
#. name for ige
|
||||||
msgid "Igede"
|
msgid "Igede"
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -8,14 +8,14 @@ msgstr ""
|
|||||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||||
"devel@lists.alioth.debian.org>\n"
|
"devel@lists.alioth.debian.org>\n"
|
||||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||||
"PO-Revision-Date: 2012-03-25 12:19+0000\n"
|
"PO-Revision-Date: 2012-05-03 14:49+0000\n"
|
||||||
"Last-Translator: Radan Putnik <srastral@gmail.com>\n"
|
"Last-Translator: Иван Старчевић <ivanstar61@gmail.com>\n"
|
||||||
"Language-Team: Serbian <gnu@prevod.org>\n"
|
"Language-Team: Serbian <gnu@prevod.org>\n"
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=UTF-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"X-Launchpad-Export-Date: 2012-03-26 04:37+0000\n"
|
"X-Launchpad-Export-Date: 2012-05-04 04:47+0000\n"
|
||||||
"X-Generator: Launchpad (build 15008)\n"
|
"X-Generator: Launchpad (build 15195)\n"
|
||||||
"Language: sr\n"
|
"Language: sr\n"
|
||||||
|
|
||||||
#. name for aaa
|
#. name for aaa
|
||||||
@ -6152,7 +6152,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for deu
|
#. name for deu
|
||||||
msgid "German"
|
msgid "German"
|
||||||
msgstr "немачки"
|
msgstr "Немачки"
|
||||||
|
|
||||||
#. name for dev
|
#. name for dev
|
||||||
msgid "Domung"
|
msgid "Domung"
|
||||||
@ -8416,7 +8416,7 @@ msgstr "ирски"
|
|||||||
|
|
||||||
#. name for glg
|
#. name for glg
|
||||||
msgid "Galician"
|
msgid "Galician"
|
||||||
msgstr ""
|
msgstr "Галицијски"
|
||||||
|
|
||||||
#. name for glh
|
#. name for glh
|
||||||
msgid "Pashayi; Northwest"
|
msgid "Pashayi; Northwest"
|
||||||
@ -8472,11 +8472,11 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for gmh
|
#. name for gmh
|
||||||
msgid "German; Middle High (ca. 1050-1500)"
|
msgid "German; Middle High (ca. 1050-1500)"
|
||||||
msgstr ""
|
msgstr "Немачки; средње високи (ca. 1050-1500)"
|
||||||
|
|
||||||
#. name for gml
|
#. name for gml
|
||||||
msgid "German; Middle Low"
|
msgid "German; Middle Low"
|
||||||
msgstr ""
|
msgstr "Немачки; средње низак"
|
||||||
|
|
||||||
#. name for gmm
|
#. name for gmm
|
||||||
msgid "Gbaya-Mbodomo"
|
msgid "Gbaya-Mbodomo"
|
||||||
@ -8792,7 +8792,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for gsg
|
#. name for gsg
|
||||||
msgid "German Sign Language"
|
msgid "German Sign Language"
|
||||||
msgstr ""
|
msgstr "Немачки језик"
|
||||||
|
|
||||||
#. name for gsl
|
#. name for gsl
|
||||||
msgid "Gusilay"
|
msgid "Gusilay"
|
||||||
@ -8820,7 +8820,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for gsw
|
#. name for gsw
|
||||||
msgid "German; Swiss"
|
msgid "German; Swiss"
|
||||||
msgstr ""
|
msgstr "Немачки ; Швајцарска"
|
||||||
|
|
||||||
#. name for gta
|
#. name for gta
|
||||||
msgid "Guató"
|
msgid "Guató"
|
||||||
@ -17954,7 +17954,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for nds
|
#. name for nds
|
||||||
msgid "German; Low"
|
msgid "German; Low"
|
||||||
msgstr ""
|
msgstr "Немачки; низак"
|
||||||
|
|
||||||
#. name for ndt
|
#. name for ndt
|
||||||
msgid "Ndunga"
|
msgid "Ndunga"
|
||||||
@ -18778,7 +18778,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for nno
|
#. name for nno
|
||||||
msgid "Norwegian Nynorsk"
|
msgid "Norwegian Nynorsk"
|
||||||
msgstr "норвешки модерни"
|
msgstr "Норвешки модерни"
|
||||||
|
|
||||||
#. name for nnp
|
#. name for nnp
|
||||||
msgid "Naga; Wancho"
|
msgid "Naga; Wancho"
|
||||||
@ -18830,7 +18830,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for nob
|
#. name for nob
|
||||||
msgid "Norwegian Bokmål"
|
msgid "Norwegian Bokmål"
|
||||||
msgstr ""
|
msgstr "Норвешки (књижевни)"
|
||||||
|
|
||||||
#. name for noc
|
#. name for noc
|
||||||
msgid "Nuk"
|
msgid "Nuk"
|
||||||
@ -18886,7 +18886,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for nor
|
#. name for nor
|
||||||
msgid "Norwegian"
|
msgid "Norwegian"
|
||||||
msgstr "норвешки"
|
msgstr "Норвешки"
|
||||||
|
|
||||||
#. name for nos
|
#. name for nos
|
||||||
msgid "Nisu; Eastern"
|
msgid "Nisu; Eastern"
|
||||||
@ -19066,7 +19066,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for nsl
|
#. name for nsl
|
||||||
msgid "Norwegian Sign Language"
|
msgid "Norwegian Sign Language"
|
||||||
msgstr ""
|
msgstr "Норвешки језик"
|
||||||
|
|
||||||
#. name for nsm
|
#. name for nsm
|
||||||
msgid "Naga; Sumi"
|
msgid "Naga; Sumi"
|
||||||
@ -20406,7 +20406,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for pdc
|
#. name for pdc
|
||||||
msgid "German; Pennsylvania"
|
msgid "German; Pennsylvania"
|
||||||
msgstr ""
|
msgstr "Немачки ; Пенсилванија"
|
||||||
|
|
||||||
#. name for pdi
|
#. name for pdi
|
||||||
msgid "Pa Di"
|
msgid "Pa Di"
|
||||||
@ -22086,7 +22086,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for rmg
|
#. name for rmg
|
||||||
msgid "Norwegian; Traveller"
|
msgid "Norwegian; Traveller"
|
||||||
msgstr ""
|
msgstr "Норвешки; путнички"
|
||||||
|
|
||||||
#. name for rmh
|
#. name for rmh
|
||||||
msgid "Murkim"
|
msgid "Murkim"
|
||||||
@ -22871,7 +22871,7 @@ msgstr ""
|
|||||||
|
|
||||||
#. name for sgg
|
#. name for sgg
|
||||||
msgid "Swiss-German Sign Language"
|
msgid "Swiss-German Sign Language"
|
||||||
msgstr ""
|
msgstr "Швајцарско-Немачки језик"
|
||||||
|
|
||||||
#. name for sgh
|
#. name for sgh
|
||||||
msgid "Shughni"
|
msgid "Shughni"
|
||||||
|
@ -26,7 +26,7 @@ def get_opts_from_parser(parser):
|
|||||||
class Coffee(Command): # {{{
|
class Coffee(Command): # {{{
|
||||||
|
|
||||||
description = 'Compile coffeescript files into javascript'
|
description = 'Compile coffeescript files into javascript'
|
||||||
COFFEE_DIRS = {'ebooks/oeb/display': 'display'}
|
COFFEE_DIRS = ('ebooks/oeb/display',)
|
||||||
|
|
||||||
def add_options(self, parser):
|
def add_options(self, parser):
|
||||||
parser.add_option('--watch', '-w', action='store_true', default=False,
|
parser.add_option('--watch', '-w', action='store_true', default=False,
|
||||||
@ -47,49 +47,69 @@ class Coffee(Command): # {{{
|
|||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def show_js(self, jsfile):
|
def show_js(self, raw):
|
||||||
from pygments.lexers import JavascriptLexer
|
from pygments.lexers import JavascriptLexer
|
||||||
from pygments.formatters import TerminalFormatter
|
from pygments.formatters import TerminalFormatter
|
||||||
from pygments import highlight
|
from pygments import highlight
|
||||||
with open(jsfile, 'rb') as f:
|
|
||||||
raw = f.read()
|
|
||||||
print highlight(raw, JavascriptLexer(), TerminalFormatter())
|
print highlight(raw, JavascriptLexer(), TerminalFormatter())
|
||||||
|
|
||||||
def do_coffee_compile(self, opts, timestamp=False, ignore_errors=False):
|
def do_coffee_compile(self, opts, timestamp=False, ignore_errors=False):
|
||||||
for toplevel, dest in self.COFFEE_DIRS.iteritems():
|
src_files = {}
|
||||||
dest = self.j(self.RESOURCES, dest)
|
for src in self.COFFEE_DIRS:
|
||||||
for x in glob.glob(self.j(self.SRC, __appname__, toplevel, '*.coffee')):
|
for f in glob.glob(self.j(self.SRC, __appname__, src,
|
||||||
js = self.j(dest, os.path.basename(x.rpartition('.')[0]+'.js'))
|
'*.coffee')):
|
||||||
if self.newer(js, x):
|
bn = os.path.basename(f).rpartition('.')[0]
|
||||||
print ('\t%sCompiling %s'%(time.strftime('[%H:%M:%S] ') if
|
arcname = src.replace('/', '.') + '.' + bn + '.js'
|
||||||
timestamp else '', os.path.basename(x)))
|
src_files[arcname] = (f, os.stat(f).st_mtime)
|
||||||
try:
|
|
||||||
cs = subprocess.check_output(self.compiler +
|
existing = {}
|
||||||
[x]).decode('utf-8')
|
dest = self.j(self.RESOURCES, 'compiled_coffeescript.zip')
|
||||||
except Exception as e:
|
if os.path.exists(dest):
|
||||||
print ('\n\tCompilation of %s failed'%os.path.basename(x))
|
with zipfile.ZipFile(dest, 'r') as zf:
|
||||||
print (e)
|
for info in zf.infolist():
|
||||||
if ignore_errors:
|
mtime = time.mktime(info.date_time + (0, 0, -1))
|
||||||
with open(js, 'wb') as f:
|
arcname = info.filename
|
||||||
f.write('# Compilation from coffeescript failed')
|
if (arcname in src_files and src_files[arcname][1] <
|
||||||
else:
|
mtime):
|
||||||
raise SystemExit(1)
|
existing[arcname] = (zf.read(info), info)
|
||||||
else:
|
|
||||||
with open(js, 'wb') as f:
|
todo = set(src_files) - set(existing)
|
||||||
f.write(cs.encode('utf-8'))
|
updated = {}
|
||||||
if opts.show_js:
|
for arcname in todo:
|
||||||
self.show_js(js)
|
name = arcname.rpartition('.')[0]
|
||||||
print ('#'*80)
|
print ('\t%sCompiling %s'%(time.strftime('[%H:%M:%S] ') if
|
||||||
print ('#'*80)
|
timestamp else '', name))
|
||||||
|
src = src_files[arcname][0]
|
||||||
|
try:
|
||||||
|
js = subprocess.check_output(self.compiler +
|
||||||
|
[src]).decode('utf-8')
|
||||||
|
except Exception as e:
|
||||||
|
print ('\n\tCompilation of %s failed'%name)
|
||||||
|
print (e)
|
||||||
|
if ignore_errors:
|
||||||
|
js = u'# Compilation from coffeescript failed'
|
||||||
|
else:
|
||||||
|
raise SystemExit(1)
|
||||||
|
else:
|
||||||
|
if opts.show_js:
|
||||||
|
self.show_js(js)
|
||||||
|
print ('#'*80)
|
||||||
|
print ('#'*80)
|
||||||
|
zi = zipfile.ZipInfo()
|
||||||
|
zi.filename = arcname
|
||||||
|
zi.date_time = time.localtime()[:6]
|
||||||
|
updated[arcname] = (js.encode('utf-8'), zi)
|
||||||
|
if updated:
|
||||||
|
with zipfile.ZipFile(dest, 'w', zipfile.ZIP_STORED) as zf:
|
||||||
|
for raw, zi in updated.itervalues():
|
||||||
|
zf.writestr(zi, raw)
|
||||||
|
for raw, zi in existing.itervalues():
|
||||||
|
zf.writestr(zi, raw)
|
||||||
|
|
||||||
def clean(self):
|
def clean(self):
|
||||||
for toplevel, dest in self.COFFEE_DIRS.iteritems():
|
x = self.j(self.RESOURCES, 'compiled_coffeescript.zip')
|
||||||
dest = self.j(self.RESOURCES, dest)
|
if os.path.exists(x):
|
||||||
for x in glob.glob(self.j(self.SRC, __appname__, toplevel, '*.coffee')):
|
os.remove(x)
|
||||||
x = x.rpartition('.')[0] + '.js'
|
|
||||||
x = self.j(dest, os.path.basename(x))
|
|
||||||
if os.path.exists(x):
|
|
||||||
os.remove(x)
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
class Kakasi(Command): # {{{
|
class Kakasi(Command): # {{{
|
||||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
__appname__ = u'calibre'
|
__appname__ = u'calibre'
|
||||||
numeric_version = (0, 8, 49)
|
numeric_version = (0, 8, 51)
|
||||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||||
|
|
||||||
|
@ -295,3 +295,15 @@ class OutputFormatPlugin(Plugin):
|
|||||||
return self.oeb.metadata.publication_type and \
|
return self.oeb.metadata.publication_type and \
|
||||||
unicode(self.oeb.metadata.publication_type[0]).startswith('periodical:')
|
unicode(self.oeb.metadata.publication_type[0]).startswith('periodical:')
|
||||||
|
|
||||||
|
def specialize_css_for_output(self, log, opts, item, stylizer):
|
||||||
|
'''
|
||||||
|
Can be used to make changes to the css during the CSS flattening
|
||||||
|
process.
|
||||||
|
|
||||||
|
:param item: The item (HTML file) being processed
|
||||||
|
:param stylizer: A Stylizer object containing the flattened styles for
|
||||||
|
item. You can get the style for any element by stylizer.style(element).
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -59,9 +59,7 @@ Run an embedded python interpreter.
|
|||||||
'files and metadata, which you can edit using standard HTML '
|
'files and metadata, which you can edit using standard HTML '
|
||||||
'editing tools, and then rebuilds the file from the edited HTML. '
|
'editing tools, and then rebuilds the file from the edited HTML. '
|
||||||
'Makes no additional changes to the HTML, unlike a full calibre '
|
'Makes no additional changes to the HTML, unlike a full calibre '
|
||||||
'conversion). Note that this tool will try to open the '
|
'conversion).')
|
||||||
'folder containing the HTML files in the editor pointed to by the'
|
|
||||||
' EDITOR environment variable.')
|
|
||||||
|
|
||||||
parser.add_option('--test-build', help='Test binary modules in build',
|
parser.add_option('--test-build', help='Test binary modules in build',
|
||||||
action='store_true', default=False)
|
action='store_true', default=False)
|
||||||
@ -222,7 +220,7 @@ def main(args=sys.argv):
|
|||||||
from calibre.utils.pyconsole.main import main
|
from calibre.utils.pyconsole.main import main
|
||||||
main()
|
main()
|
||||||
elif opts.command:
|
elif opts.command:
|
||||||
sys.argv = args[:1]
|
sys.argv = args
|
||||||
exec opts.command
|
exec opts.command
|
||||||
elif opts.debug_device_driver:
|
elif opts.debug_device_driver:
|
||||||
debug_device_driver()
|
debug_device_driver()
|
||||||
|
@ -57,6 +57,7 @@ class ANDROID(USBMS):
|
|||||||
0x4316 : [0x216],
|
0x4316 : [0x216],
|
||||||
0x42d6 : [0x216],
|
0x42d6 : [0x216],
|
||||||
0x42d7 : [0x216],
|
0x42d7 : [0x216],
|
||||||
|
0x42f7 : [0x216],
|
||||||
},
|
},
|
||||||
# Freescale
|
# Freescale
|
||||||
0x15a2 : {
|
0x15a2 : {
|
||||||
@ -177,7 +178,7 @@ class ANDROID(USBMS):
|
|||||||
'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA',
|
'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA',
|
||||||
'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON',
|
'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON',
|
||||||
'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP',
|
'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP',
|
||||||
'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC', 'PMID701C']
|
'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC', 'PMID701C', 'PD']
|
||||||
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
|
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
|
||||||
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
|
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
|
||||||
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID',
|
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID',
|
||||||
@ -193,7 +194,7 @@ class ANDROID(USBMS):
|
|||||||
'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855',
|
'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855',
|
||||||
'XT910', 'BOOK_A10', 'USB_2.0_DRIVER', 'I9100T', 'P999DW',
|
'XT910', 'BOOK_A10', 'USB_2.0_DRIVER', 'I9100T', 'P999DW',
|
||||||
'KTABLET_PC', 'INGENIC', 'GT-I9001_CARD', 'USB_2.0_DRIVER',
|
'KTABLET_PC', 'INGENIC', 'GT-I9001_CARD', 'USB_2.0_DRIVER',
|
||||||
'GT-S5830L_CARD']
|
'GT-S5830L_CARD', 'UNIVERSE', 'XT875']
|
||||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||||
'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||||
@ -201,7 +202,7 @@ class ANDROID(USBMS):
|
|||||||
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853',
|
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853',
|
||||||
'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD',
|
'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD',
|
||||||
'USB_2.0_DRIVER', 'I9100T', 'P999DW_SD_CARD', 'KTABLET_PC',
|
'USB_2.0_DRIVER', 'I9100T', 'P999DW_SD_CARD', 'KTABLET_PC',
|
||||||
'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0_DRIVER']
|
'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0_DRIVER', 'XT875']
|
||||||
|
|
||||||
OSX_MAIN_MEM = 'Android Device Main Memory'
|
OSX_MAIN_MEM = 'Android Device Main Memory'
|
||||||
|
|
||||||
|
@ -92,6 +92,10 @@ class POCKETBOOK360(EB600):
|
|||||||
name = 'PocketBook 360 Device Interface'
|
name = 'PocketBook 360 Device Interface'
|
||||||
|
|
||||||
gui_name = 'PocketBook 360'
|
gui_name = 'PocketBook 360'
|
||||||
|
VENDOR_ID = [0x1f85, 0x525]
|
||||||
|
PRODUCT_ID = [0x1688, 0xa4a5]
|
||||||
|
BCD = [0x110]
|
||||||
|
|
||||||
|
|
||||||
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm', 'txt']
|
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm', 'txt']
|
||||||
|
|
||||||
|
@ -155,7 +155,11 @@ class CHMReader(CHMFile):
|
|||||||
self.hhc_path = f
|
self.hhc_path = f
|
||||||
break
|
break
|
||||||
if self.hhc_path not in files and files:
|
if self.hhc_path not in files and files:
|
||||||
self.hhc_path = files[0]
|
for f in files:
|
||||||
|
if f.partition('.')[-1].lower() in {'html', 'htm', 'xhtm',
|
||||||
|
'xhtml'}:
|
||||||
|
self.hhc_path = f
|
||||||
|
break
|
||||||
|
|
||||||
if self.hhc_path == '.hhc' and self.hhc_path not in files:
|
if self.hhc_path == '.hhc' and self.hhc_path not in files:
|
||||||
from calibre import walk
|
from calibre import walk
|
||||||
@ -165,6 +169,9 @@ class CHMReader(CHMFile):
|
|||||||
self.hhc_path = os.path.relpath(x, output_dir)
|
self.hhc_path = os.path.relpath(x, output_dir)
|
||||||
break
|
break
|
||||||
|
|
||||||
|
if self.hhc_path not in files and files:
|
||||||
|
self.hhc_path = files[0]
|
||||||
|
|
||||||
def _reformat(self, data, htmlpath):
|
def _reformat(self, data, htmlpath):
|
||||||
if self.input_encoding:
|
if self.input_encoding:
|
||||||
data = data.decode(self.input_encoding)
|
data = data.decode(self.input_encoding)
|
||||||
@ -241,7 +248,10 @@ class CHMReader(CHMFile):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
# do not prettify, it would reformat the <pre> tags!
|
# do not prettify, it would reformat the <pre> tags!
|
||||||
return str(soup)
|
try:
|
||||||
|
return str(soup)
|
||||||
|
except RuntimeError:
|
||||||
|
return data
|
||||||
|
|
||||||
def Contents(self):
|
def Contents(self):
|
||||||
if self._contents is not None:
|
if self._contents is not None:
|
||||||
|
@ -1,4 +1,25 @@
|
|||||||
from __future__ import with_statement
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
__license__ = 'GPL 3'
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
|
||||||
|
class ConversionUserFeedBack(Exception):
|
||||||
|
|
||||||
|
def __init__(self, title, msg, level='info', det_msg=''):
|
||||||
|
''' Show a simple message to the user
|
||||||
|
|
||||||
|
:param title: The title (very short description)
|
||||||
|
:param msg: The message to show the user
|
||||||
|
:param level: Must be one of 'info', 'warn' or 'error'
|
||||||
|
:param det_msg: Optional detailed message to show the user
|
||||||
|
'''
|
||||||
|
import json
|
||||||
|
Exception.__init__(self, json.dumps({'msg':msg, 'level':level,
|
||||||
|
'det_msg':det_msg, 'title':title}))
|
||||||
|
self.title, self.msg, self.det_msg = title, msg, det_msg
|
||||||
|
self.level = level
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@ from calibre.utils.logging import Log
|
|||||||
from calibre.constants import preferred_encoding
|
from calibre.constants import preferred_encoding
|
||||||
from calibre.customize.conversion import OptionRecommendation
|
from calibre.customize.conversion import OptionRecommendation
|
||||||
from calibre import patheq
|
from calibre import patheq
|
||||||
|
from calibre.ebooks.conversion import ConversionUserFeedBack
|
||||||
|
|
||||||
USAGE = '%prog ' + _('''\
|
USAGE = '%prog ' + _('''\
|
||||||
input_file output_file [options]
|
input_file output_file [options]
|
||||||
@ -304,7 +305,10 @@ def read_sr_patterns(path, log=None):
|
|||||||
def main(args=sys.argv):
|
def main(args=sys.argv):
|
||||||
log = Log()
|
log = Log()
|
||||||
parser, plumber = create_option_parser(args, log)
|
parser, plumber = create_option_parser(args, log)
|
||||||
opts = parser.parse_args(args)[0]
|
opts, leftover_args = parser.parse_args(args)
|
||||||
|
if len(leftover_args) > 3:
|
||||||
|
log.error('Extra arguments not understood:', u', '.join(leftover_args[3:]))
|
||||||
|
return 1
|
||||||
for x in ('read_metadata_from_opf', 'cover'):
|
for x in ('read_metadata_from_opf', 'cover'):
|
||||||
if getattr(opts, x, None) is not None:
|
if getattr(opts, x, None) is not None:
|
||||||
setattr(opts, x, abspath(getattr(opts, x)))
|
setattr(opts, x, abspath(getattr(opts, x)))
|
||||||
@ -317,7 +321,16 @@ def main(args=sys.argv):
|
|||||||
if n.dest]
|
if n.dest]
|
||||||
plumber.merge_ui_recommendations(recommendations)
|
plumber.merge_ui_recommendations(recommendations)
|
||||||
|
|
||||||
plumber.run()
|
try:
|
||||||
|
plumber.run()
|
||||||
|
except ConversionUserFeedBack as e:
|
||||||
|
ll = {'info': log.info, 'warn': log.warn,
|
||||||
|
'error':log.error}.get(e.level, log.info)
|
||||||
|
ll(e.title)
|
||||||
|
if e.det_msg:
|
||||||
|
log.debug(e.detmsg)
|
||||||
|
ll(e.msg)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
log(_('Output saved to'), ' ', plumber.output)
|
log(_('Output saved to'), ' ', plumber.output)
|
||||||
|
|
||||||
|
@ -65,6 +65,7 @@ class EPUBInput(InputFormatPlugin):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def rationalize_cover(self, opf, log):
|
def rationalize_cover(self, opf, log):
|
||||||
|
removed = None
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
guide_cover, guide_elem = None, None
|
guide_cover, guide_elem = None, None
|
||||||
for guide_elem in opf.iterguide():
|
for guide_elem in opf.iterguide():
|
||||||
@ -91,6 +92,7 @@ class EPUBInput(InputFormatPlugin):
|
|||||||
# specially
|
# specially
|
||||||
if not self.for_viewer:
|
if not self.for_viewer:
|
||||||
spine[0].getparent().remove(spine[0])
|
spine[0].getparent().remove(spine[0])
|
||||||
|
removed = guide_cover
|
||||||
guide_elem.set('href', 'calibre_raster_cover.jpg')
|
guide_elem.set('href', 'calibre_raster_cover.jpg')
|
||||||
from calibre.ebooks.oeb.base import OPF
|
from calibre.ebooks.oeb.base import OPF
|
||||||
t = etree.SubElement(elem[0].getparent(), OPF('item'),
|
t = etree.SubElement(elem[0].getparent(), OPF('item'),
|
||||||
@ -109,6 +111,7 @@ class EPUBInput(InputFormatPlugin):
|
|||||||
if renderer is not None:
|
if renderer is not None:
|
||||||
open('calibre_raster_cover.jpg', 'wb').write(
|
open('calibre_raster_cover.jpg', 'wb').write(
|
||||||
renderer)
|
renderer)
|
||||||
|
return removed
|
||||||
|
|
||||||
def find_opf(self):
|
def find_opf(self):
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
@ -170,7 +173,7 @@ class EPUBInput(InputFormatPlugin):
|
|||||||
for elem in opf.iterguide():
|
for elem in opf.iterguide():
|
||||||
elem.set('href', delta+elem.get('href'))
|
elem.set('href', delta+elem.get('href'))
|
||||||
|
|
||||||
self.rationalize_cover(opf, log)
|
self.removed_cover = self.rationalize_cover(opf, log)
|
||||||
|
|
||||||
self.optimize_opf_parsing = opf
|
self.optimize_opf_parsing = opf
|
||||||
for x in opf.itermanifest():
|
for x in opf.itermanifest():
|
||||||
@ -198,3 +201,17 @@ class EPUBInput(InputFormatPlugin):
|
|||||||
nopf.write(opf.render())
|
nopf.write(opf.render())
|
||||||
|
|
||||||
return os.path.abspath(u'content.opf')
|
return os.path.abspath(u'content.opf')
|
||||||
|
|
||||||
|
def postprocess_book(self, oeb, opts, log):
|
||||||
|
rc = getattr(self, 'removed_cover', None)
|
||||||
|
if rc:
|
||||||
|
cover_toc_item = None
|
||||||
|
for item in oeb.toc.iterdescendants():
|
||||||
|
if item.href == rc:
|
||||||
|
cover_toc_item = item
|
||||||
|
break
|
||||||
|
spine = {x.href for x in oeb.spine}
|
||||||
|
if (cover_toc_item is not None and cover_toc_item not in spine):
|
||||||
|
oeb.toc.item_that_refers_to_cover = cover_toc_item
|
||||||
|
|
||||||
|
|
||||||
|
@ -312,13 +312,9 @@ class EPUBOutput(OutputFormatPlugin):
|
|||||||
Perform various markup transforms to get the output to render correctly
|
Perform various markup transforms to get the output to render correctly
|
||||||
in the quirky ADE.
|
in the quirky ADE.
|
||||||
'''
|
'''
|
||||||
from calibre.ebooks.oeb.base import XPath, XHTML, OEB_STYLES, barename, urlunquote
|
from calibre.ebooks.oeb.base import XPath, XHTML, barename, urlunquote
|
||||||
|
|
||||||
stylesheet = None
|
stylesheet = self.oeb.manifest.main_stylesheet
|
||||||
for item in self.oeb.manifest:
|
|
||||||
if item.media_type.lower() in OEB_STYLES:
|
|
||||||
stylesheet = item
|
|
||||||
break
|
|
||||||
|
|
||||||
# ADE cries big wet tears when it encounters an invalid fragment
|
# ADE cries big wet tears when it encounters an invalid fragment
|
||||||
# identifier in the NCX toc.
|
# identifier in the NCX toc.
|
||||||
|
@ -12,7 +12,7 @@ class MOBIInput(InputFormatPlugin):
|
|||||||
name = 'MOBI Input'
|
name = 'MOBI Input'
|
||||||
author = 'Kovid Goyal'
|
author = 'Kovid Goyal'
|
||||||
description = 'Convert MOBI files (.mobi, .prc, .azw) to HTML'
|
description = 'Convert MOBI files (.mobi, .prc, .azw) to HTML'
|
||||||
file_types = set(['mobi', 'prc', 'azw', 'azw3'])
|
file_types = set(['mobi', 'prc', 'azw', 'azw3', 'pobi'])
|
||||||
|
|
||||||
def convert(self, stream, options, file_ext, log,
|
def convert(self, stream, options, file_ext, log,
|
||||||
accelerators):
|
accelerators):
|
||||||
|
@ -232,6 +232,10 @@ class MOBIOutput(OutputFormatPlugin):
|
|||||||
writer(oeb, output_path)
|
writer(oeb, output_path)
|
||||||
extract_mobi(output_path, opts)
|
extract_mobi(output_path, opts)
|
||||||
|
|
||||||
|
def specialize_css_for_output(self, log, opts, item, stylizer):
|
||||||
|
from calibre.ebooks.mobi.writer8.cleanup import CSSCleanup
|
||||||
|
CSSCleanup(log, opts)(item, stylizer)
|
||||||
|
|
||||||
class AZW3Output(OutputFormatPlugin):
|
class AZW3Output(OutputFormatPlugin):
|
||||||
|
|
||||||
name = 'AZW3 Output'
|
name = 'AZW3 Output'
|
||||||
@ -254,9 +258,6 @@ class AZW3Output(OutputFormatPlugin):
|
|||||||
recommended_value=False, level=OptionRecommendation.LOW,
|
recommended_value=False, level=OptionRecommendation.LOW,
|
||||||
help=_('Disable compression of the file contents.')
|
help=_('Disable compression of the file contents.')
|
||||||
),
|
),
|
||||||
OptionRecommendation(name='personal_doc', recommended_value='[PDOC]',
|
|
||||||
help=_('Tag marking book to be filed with Personal Docs')
|
|
||||||
),
|
|
||||||
OptionRecommendation(name='mobi_toc_at_start',
|
OptionRecommendation(name='mobi_toc_at_start',
|
||||||
recommended_value=False,
|
recommended_value=False,
|
||||||
help=_('When adding the Table of Contents to the book, add it at the start of the '
|
help=_('When adding the Table of Contents to the book, add it at the start of the '
|
||||||
@ -298,4 +299,8 @@ class AZW3Output(OutputFormatPlugin):
|
|||||||
kf8.write(output_path)
|
kf8.write(output_path)
|
||||||
extract_mobi(output_path, opts)
|
extract_mobi(output_path, opts)
|
||||||
|
|
||||||
|
def specialize_css_for_output(self, log, opts, item, stylizer):
|
||||||
|
from calibre.ebooks.mobi.writer8.cleanup import CSSCleanup
|
||||||
|
CSSCleanup(log, opts)(item, stylizer)
|
||||||
|
|
||||||
|
|
||||||
|
@ -99,12 +99,8 @@ class PDFOutput(OutputFormatPlugin):
|
|||||||
|
|
||||||
# Remove page-break-before on <body> element as it causes
|
# Remove page-break-before on <body> element as it causes
|
||||||
# blank pages in PDF Output
|
# blank pages in PDF Output
|
||||||
from calibre.ebooks.oeb.base import OEB_STYLES, XPath
|
from calibre.ebooks.oeb.base import XPath
|
||||||
stylesheet = None
|
stylesheet = self.oeb.manifest.main_stylesheet
|
||||||
for item in self.oeb.manifest:
|
|
||||||
if item.media_type.lower() in OEB_STYLES:
|
|
||||||
stylesheet = item
|
|
||||||
break
|
|
||||||
if stylesheet is not None:
|
if stylesheet is not None:
|
||||||
from cssutils.css import CSSRule
|
from cssutils.css import CSSRule
|
||||||
classes = set(['.calibre'])
|
classes = set(['.calibre'])
|
||||||
|
@ -4,6 +4,7 @@ __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
|||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import os, re, sys, shutil, pprint
|
import os, re, sys, shutil, pprint
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
from calibre.customize.conversion import OptionRecommendation, DummyReporter
|
from calibre.customize.conversion import OptionRecommendation, DummyReporter
|
||||||
from calibre.customize.ui import input_profiles, output_profiles, \
|
from calibre.customize.ui import input_profiles, output_profiles, \
|
||||||
@ -1010,6 +1011,13 @@ OptionRecommendation(name='search_replace',
|
|||||||
pr(0.35)
|
pr(0.35)
|
||||||
self.flush()
|
self.flush()
|
||||||
|
|
||||||
|
if self.output_plugin.file_type != 'epub':
|
||||||
|
# Remove the toc reference to the html cover, if any, except for
|
||||||
|
# epub, as the epub output plugin will do the right thing with it.
|
||||||
|
item = getattr(self.oeb.toc, 'item_that_refers_to_cover', None)
|
||||||
|
if item is not None and item.count() == 0:
|
||||||
|
self.oeb.toc.remove(item)
|
||||||
|
|
||||||
from calibre.ebooks.oeb.transforms.flatcss import CSSFlattener
|
from calibre.ebooks.oeb.transforms.flatcss import CSSFlattener
|
||||||
fbase = self.opts.base_font_size
|
fbase = self.opts.base_font_size
|
||||||
if fbase < 1e-4:
|
if fbase < 1e-4:
|
||||||
@ -1061,7 +1069,9 @@ OptionRecommendation(name='search_replace',
|
|||||||
untable=self.output_plugin.file_type in ('mobi','lit'),
|
untable=self.output_plugin.file_type in ('mobi','lit'),
|
||||||
unfloat=self.output_plugin.file_type in ('mobi', 'lit'),
|
unfloat=self.output_plugin.file_type in ('mobi', 'lit'),
|
||||||
page_break_on_body=self.output_plugin.file_type in ('mobi',
|
page_break_on_body=self.output_plugin.file_type in ('mobi',
|
||||||
'lit'))
|
'lit'),
|
||||||
|
specializer=partial(self.output_plugin.specialize_css_for_output,
|
||||||
|
self.log, self.opts))
|
||||||
flattener(self.oeb, self.opts)
|
flattener(self.oeb, self.opts)
|
||||||
|
|
||||||
self.opts.insert_blank_line = oibl
|
self.opts.insert_blank_line = oibl
|
||||||
|
@ -148,7 +148,7 @@ class HeuristicProcessor(object):
|
|||||||
return wordcount.words
|
return wordcount.words
|
||||||
|
|
||||||
def markup_italicis(self, html):
|
def markup_italicis(self, html):
|
||||||
self.log.debug("\n\n\nitalicize debugging \n\n\n")
|
#self.log.debug("\n\n\nitalicize debugging \n\n\n")
|
||||||
ITALICIZE_WORDS = [
|
ITALICIZE_WORDS = [
|
||||||
'Etc.', 'etc.', 'viz.', 'ie.', 'i.e.', 'Ie.', 'I.e.', 'eg.',
|
'Etc.', 'etc.', 'viz.', 'ie.', 'i.e.', 'Ie.', 'I.e.', 'eg.',
|
||||||
'e.g.', 'Eg.', 'E.g.', 'et al.', 'et cetera', 'n.b.', 'N.b.',
|
'e.g.', 'Eg.', 'E.g.', 'et al.', 'et cetera', 'n.b.', 'N.b.',
|
||||||
@ -179,8 +179,15 @@ class HeuristicProcessor(object):
|
|||||||
for match in re.finditer(pat, search_text):
|
for match in re.finditer(pat, search_text):
|
||||||
ital_string = str(match.group('words'))
|
ital_string = str(match.group('words'))
|
||||||
#self.log.debug("italicising "+str(match.group(0))+" with <i>"+ital_string+"</i>")
|
#self.log.debug("italicising "+str(match.group(0))+" with <i>"+ital_string+"</i>")
|
||||||
html = re.sub(re.escape(str(match.group(0))), '<i>%s</i>' % ital_string, html)
|
try:
|
||||||
|
html = re.sub(re.escape(str(match.group(0))), '<i>%s</i>' % ital_string, html)
|
||||||
|
except OverflowError:
|
||||||
|
# match.group(0) was too large to be compiled into a regex
|
||||||
|
continue
|
||||||
|
except re.error:
|
||||||
|
# the match was not a valid regular expression
|
||||||
|
continue
|
||||||
|
|
||||||
return html
|
return html
|
||||||
|
|
||||||
def markup_chapters(self, html, wordcount, blanks_between_paragraphs):
|
def markup_chapters(self, html, wordcount, blanks_between_paragraphs):
|
||||||
@ -319,13 +326,13 @@ class HeuristicProcessor(object):
|
|||||||
'''
|
'''
|
||||||
Unwraps lines based on line length and punctuation
|
Unwraps lines based on line length and punctuation
|
||||||
supports a range of html markup and text files
|
supports a range of html markup and text files
|
||||||
|
|
||||||
the lookahead regex below is meant look for any non-full stop characters - punctuation
|
the lookahead regex below is meant look for any non-full stop characters - punctuation
|
||||||
characters which can be used as a full stop should *not* be added below - e.g. ?!“”. etc
|
characters which can be used as a full stop should *not* be added below - e.g. ?!“”. etc
|
||||||
the reason for this is to prevent false positive wrapping. False positives are more
|
the reason for this is to prevent false positive wrapping. False positives are more
|
||||||
difficult to detect than false negatives during a manual review of the doc
|
difficult to detect than false negatives during a manual review of the doc
|
||||||
|
|
||||||
This function intentionally leaves hyphenated content alone as that is handled by the
|
This function intentionally leaves hyphenated content alone as that is handled by the
|
||||||
dehyphenate routine in a separate step
|
dehyphenate routine in a separate step
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
@ -113,6 +113,11 @@ class HTMLFile(object):
|
|||||||
raise IOError(msg)
|
raise IOError(msg)
|
||||||
raise IgnoreFile(msg, err.errno)
|
raise IgnoreFile(msg, err.errno)
|
||||||
|
|
||||||
|
if not src:
|
||||||
|
if level == 0:
|
||||||
|
raise ValueError('The file %s is empty'%self.path)
|
||||||
|
self.is_binary = True
|
||||||
|
|
||||||
if not self.is_binary:
|
if not self.is_binary:
|
||||||
if not encoding:
|
if not encoding:
|
||||||
encoding = detect_xml_encoding(src[:4096], verbose=verbose)[1]
|
encoding = detect_xml_encoding(src[:4096], verbose=verbose)[1]
|
||||||
|
@ -306,10 +306,15 @@ class MOBIHeader(object): # {{{
|
|||||||
self.extra_data_flags = 0
|
self.extra_data_flags = 0
|
||||||
if self.has_extra_data_flags:
|
if self.has_extra_data_flags:
|
||||||
self.unknown4 = self.raw[184:192]
|
self.unknown4 = self.raw[184:192]
|
||||||
self.fdst_idx, self.fdst_count = struct.unpack_from(b'>LL',
|
if self.file_version < 8:
|
||||||
self.raw, 192)
|
self.first_text_record, self.last_text_record = \
|
||||||
if self.fdst_count <= 1:
|
struct.unpack_from(b'>HH', self.raw, 192)
|
||||||
self.fdst_idx = NULL_INDEX
|
self.fdst_count = struct.unpack_from(b'>L', self.raw, 196)
|
||||||
|
else:
|
||||||
|
self.fdst_idx, self.fdst_count = struct.unpack_from(b'>LL',
|
||||||
|
self.raw, 192)
|
||||||
|
if self.fdst_count <= 1:
|
||||||
|
self.fdst_idx = NULL_INDEX
|
||||||
(self.fcis_number, self.fcis_count, self.flis_number,
|
(self.fcis_number, self.fcis_count, self.flis_number,
|
||||||
self.flis_count) = struct.unpack(b'>IIII',
|
self.flis_count) = struct.unpack(b'>IIII',
|
||||||
self.raw[200:216])
|
self.raw[200:216])
|
||||||
@ -409,7 +414,11 @@ class MOBIHeader(object): # {{{
|
|||||||
a('DRM Flags: %r'%self.drm_flags)
|
a('DRM Flags: %r'%self.drm_flags)
|
||||||
if self.has_extra_data_flags:
|
if self.has_extra_data_flags:
|
||||||
a('Unknown4: %r'%self.unknown4)
|
a('Unknown4: %r'%self.unknown4)
|
||||||
r('FDST Index', 'fdst_idx')
|
if hasattr(self, 'first_text_record'):
|
||||||
|
a('First content record: %d'%self.first_text_record)
|
||||||
|
a('Last content record: %d'%self.last_text_record)
|
||||||
|
else:
|
||||||
|
r('FDST Index', 'fdst_idx')
|
||||||
a('FDST Count: %d'% self.fdst_count)
|
a('FDST Count: %d'% self.fdst_count)
|
||||||
r('FCIS number', 'fcis_number')
|
r('FCIS number', 'fcis_number')
|
||||||
a('FCIS count: %d'% self.fcis_count)
|
a('FCIS count: %d'% self.fcis_count)
|
||||||
|
@ -111,7 +111,11 @@ def update_flow_links(mobi8_reader, resource_map, log):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if not isinstance(flow, unicode):
|
if not isinstance(flow, unicode):
|
||||||
flow = flow.decode(mr.header.codec)
|
try:
|
||||||
|
flow = flow.decode(mr.header.codec)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
log.error('Flow part has invalid %s encoded bytes'%mr.header.codec)
|
||||||
|
flow = flow.decode(mr.header.codec, 'replace')
|
||||||
|
|
||||||
# links to raster image files from image tags
|
# links to raster image files from image tags
|
||||||
# image_pattern
|
# image_pattern
|
||||||
|
@ -207,9 +207,9 @@ class Mobi8Reader(object):
|
|||||||
fname = 'svgimg' + nstr + '.svg'
|
fname = 'svgimg' + nstr + '.svg'
|
||||||
else:
|
else:
|
||||||
# search for CDATA and if exists inline it
|
# search for CDATA and if exists inline it
|
||||||
if flowpart.find('[CDATA[') >= 0:
|
if flowpart.find(b'[CDATA[') >= 0:
|
||||||
typ = 'css'
|
typ = 'css'
|
||||||
flowpart = '<style type="text/css">\n' + flowpart + '\n</style>\n'
|
flowpart = b'<style type="text/css">\n' + flowpart + b'\n</style>\n'
|
||||||
format = 'inline'
|
format = 'inline'
|
||||||
dir = None
|
dir = None
|
||||||
fname = None
|
fname = None
|
||||||
|
@ -31,6 +31,10 @@ def do_explode(path, dest):
|
|||||||
with CurrentDir(dest):
|
with CurrentDir(dest):
|
||||||
mr = Mobi8Reader(mr, default_log)
|
mr = Mobi8Reader(mr, default_log)
|
||||||
opf = os.path.abspath(mr())
|
opf = os.path.abspath(mr())
|
||||||
|
try:
|
||||||
|
os.remove('debug-raw.html')
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
return opf
|
return opf
|
||||||
|
|
||||||
@ -52,7 +56,10 @@ def explode(path, dest, question=lambda x:True):
|
|||||||
kf8_type = header.kf8_type
|
kf8_type = header.kf8_type
|
||||||
|
|
||||||
if kf8_type is None:
|
if kf8_type is None:
|
||||||
raise BadFormat('This MOBI file does not contain a KF8 format book')
|
raise BadFormat(_('This MOBI file does not contain a KF8 format '
|
||||||
|
'book. KF8 is the new format from Amazon. calibre can '
|
||||||
|
'only tweak MOBI files that contain KF8 books. Older '
|
||||||
|
'MOBI files without KF8 are not tweakable.'))
|
||||||
|
|
||||||
if kf8_type == 'joint':
|
if kf8_type == 'joint':
|
||||||
if not question(_('This MOBI file contains both KF8 and '
|
if not question(_('This MOBI file contains both KF8 and '
|
||||||
|
@ -382,6 +382,7 @@ class MobiWriter(object):
|
|||||||
first_image_record = len(self.records)
|
first_image_record = len(self.records)
|
||||||
self.resources.serialize(self.records, used_images)
|
self.resources.serialize(self.records, used_images)
|
||||||
resource_record_count = len(self.records) - old
|
resource_record_count = len(self.records) - old
|
||||||
|
last_content_record = len(self.records) - 1
|
||||||
|
|
||||||
# FCIS/FLIS (Seems to serve no purpose)
|
# FCIS/FLIS (Seems to serve no purpose)
|
||||||
flis_number = len(self.records)
|
flis_number = len(self.records)
|
||||||
@ -406,7 +407,7 @@ class MobiWriter(object):
|
|||||||
# header
|
# header
|
||||||
header_fields['first_resource_record'] = first_image_record
|
header_fields['first_resource_record'] = first_image_record
|
||||||
header_fields['exth_flags'] = 0b100001010000 # Kinglegen uses this
|
header_fields['exth_flags'] = 0b100001010000 # Kinglegen uses this
|
||||||
header_fields['fdst_record'] = NULL_INDEX
|
header_fields['fdst_record'] = pack(b'>HH', 1, last_content_record)
|
||||||
header_fields['fdst_count'] = 1 # Why not 0? Kindlegen uses 1
|
header_fields['fdst_count'] = 1 # Why not 0? Kindlegen uses 1
|
||||||
header_fields['flis_record'] = flis_number
|
header_fields['flis_record'] = flis_number
|
||||||
header_fields['fcis_record'] = fcis_number
|
header_fields['fcis_record'] = fcis_number
|
||||||
|
25
src/calibre/ebooks/mobi/writer8/cleanup.py
Normal file
25
src/calibre/ebooks/mobi/writer8/cleanup.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
from calibre.ebooks.oeb.base import XPath
|
||||||
|
|
||||||
|
class CSSCleanup(object):
|
||||||
|
|
||||||
|
def __init__(self, log, opts):
|
||||||
|
self.log, self.opts = log, opts
|
||||||
|
|
||||||
|
def __call__(self, item, stylizer):
|
||||||
|
if not hasattr(item.data, 'xpath'): return
|
||||||
|
|
||||||
|
# The Kindle touch displays all black pages if the height is set on
|
||||||
|
# body
|
||||||
|
for body in XPath('//h:body')(item.data):
|
||||||
|
style = stylizer.style(body)
|
||||||
|
style.drop('height')
|
||||||
|
|
@ -314,9 +314,9 @@ class KF8Writer(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Flatten the ToC into a depth first list
|
# Flatten the ToC into a depth first list
|
||||||
fl = toc.iter() if is_periodical else toc.iterdescendants()
|
fl = toc.iterdescendants()
|
||||||
for i, item in enumerate(fl):
|
for i, item in enumerate(fl):
|
||||||
entry = {'id': id(item), 'index': i, 'href':item.href,
|
entry = {'id': id(item), 'index': i, 'href':item.href or '',
|
||||||
'label':(item.title or _('Unknown')),
|
'label':(item.title or _('Unknown')),
|
||||||
'children':[]}
|
'children':[]}
|
||||||
entry['depth'] = getattr(item, 'ncx_hlvl', 0)
|
entry['depth'] = getattr(item, 'ncx_hlvl', 0)
|
||||||
|
@ -138,6 +138,8 @@ class MOBIHeader(Header): # {{{
|
|||||||
unknown2 = zeroes(8)
|
unknown2 = zeroes(8)
|
||||||
|
|
||||||
# 192: FDST
|
# 192: FDST
|
||||||
|
# In MOBI 6 the fdst record is instead two two byte fields storing the
|
||||||
|
# index of the first and last content records
|
||||||
fdst_record = DYN
|
fdst_record = DYN
|
||||||
fdst_count = DYN
|
fdst_count = DYN
|
||||||
|
|
||||||
|
@ -81,6 +81,23 @@ _css_url_re = re.compile(r'url\s*\([\'"]{0,1}(.*?)[\'"]{0,1}\)', re.I)
|
|||||||
_css_import_re = re.compile(r'@import "(.*?)"')
|
_css_import_re = re.compile(r'@import "(.*?)"')
|
||||||
_archive_re = re.compile(r'[^ ]+')
|
_archive_re = re.compile(r'[^ ]+')
|
||||||
|
|
||||||
|
# Tags that should not be self closed in epub output
|
||||||
|
self_closing_bad_tags = {'a', 'abbr', 'address', 'article', 'aside', 'audio', 'b',
|
||||||
|
'bdo', 'blockquote', 'body', 'button', 'cite', 'code', 'dd', 'del', 'details',
|
||||||
|
'dfn', 'div', 'dl', 'dt', 'em', 'fieldset', 'figcaption', 'figure', 'footer',
|
||||||
|
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'i', 'ins', 'kbd',
|
||||||
|
'label', 'legend', 'li', 'map', 'mark', 'meter', 'nav', 'ol', 'output', 'p',
|
||||||
|
'pre', 'progress', 'q', 'rp', 'rt', 'samp', 'section', 'select', 'small',
|
||||||
|
'span', 'strong', 'sub', 'summary', 'sup', 'textarea', 'time', 'ul', 'var',
|
||||||
|
'video'}
|
||||||
|
|
||||||
|
_self_closing_pat = re.compile(
|
||||||
|
r'<(?P<tag>%s)(?=[\s/])(?P<arg>[^>]*)/>'%('|'.join(self_closing_bad_tags)),
|
||||||
|
re.IGNORECASE)
|
||||||
|
|
||||||
|
def close_self_closing_tags(raw):
|
||||||
|
return _self_closing_pat.sub(r'<\g<tag>\g<arg>></\g<tag>>', raw)
|
||||||
|
|
||||||
def iterlinks(root, find_links_in_css=True):
|
def iterlinks(root, find_links_in_css=True):
|
||||||
'''
|
'''
|
||||||
Iterate over all links in a OEB Document.
|
Iterate over all links in a OEB Document.
|
||||||
@ -938,13 +955,10 @@ class Manifest(object):
|
|||||||
if isinstance(data, etree._Element):
|
if isinstance(data, etree._Element):
|
||||||
ans = xml2str(data, pretty_print=self.oeb.pretty_print)
|
ans = xml2str(data, pretty_print=self.oeb.pretty_print)
|
||||||
if self.media_type in OEB_DOCS:
|
if self.media_type in OEB_DOCS:
|
||||||
# Convert self closing div|span|a|video|audio|iframe tags
|
# Convert self closing div|span|a|video|audio|iframe|etc tags
|
||||||
# to normally closed ones, as they are interpreted
|
# to normally closed ones, as they are interpreted
|
||||||
# incorrectly by some browser based renderers
|
# incorrectly by some browser based renderers
|
||||||
ans = re.sub(
|
ans = close_self_closing_tags(ans)
|
||||||
# tag name followed by either a space or a /
|
|
||||||
r'<(?P<tag>div|a|span|video|audio|iframe)(?=[\s/])(?P<arg>[^>]*)/>',
|
|
||||||
r'<\g<tag>\g<arg>></\g<tag>>', ans)
|
|
||||||
return ans
|
return ans
|
||||||
if isinstance(data, unicode):
|
if isinstance(data, unicode):
|
||||||
return data.encode('utf-8')
|
return data.encode('utf-8')
|
||||||
@ -1142,6 +1156,19 @@ class Manifest(object):
|
|||||||
element(elem, OPF('item'), attrib=attrib)
|
element(elem, OPF('item'), attrib=attrib)
|
||||||
return elem
|
return elem
|
||||||
|
|
||||||
|
@dynamic_property
|
||||||
|
def main_stylesheet(self):
|
||||||
|
def fget(self):
|
||||||
|
ans = getattr(self, '_main_stylesheet', None)
|
||||||
|
if ans is None:
|
||||||
|
for item in self:
|
||||||
|
if item.media_type.lower() in OEB_STYLES:
|
||||||
|
ans = item
|
||||||
|
break
|
||||||
|
return ans
|
||||||
|
def fset(self, item):
|
||||||
|
self._main_stylesheet = item
|
||||||
|
return property(fget=fget, fset=fset)
|
||||||
|
|
||||||
class Spine(object):
|
class Spine(object):
|
||||||
"""Collection of manifest items composing an OEB data model book's main
|
"""Collection of manifest items composing an OEB data model book's main
|
||||||
|
@ -389,8 +389,17 @@ class CanonicalFragmentIdentifier
|
|||||||
# Drill down into iframes, etc.
|
# Drill down into iframes, etc.
|
||||||
while true
|
while true
|
||||||
target = cdoc.elementFromPoint x, y
|
target = cdoc.elementFromPoint x, y
|
||||||
if not target or target.localName == 'html'
|
if not target or target.localName in ['html', 'body']
|
||||||
log("No element at (#{ x }, #{ y })")
|
# We ignore both html and body even though body could
|
||||||
|
# have text nodes under it as performance is very poor if body
|
||||||
|
# has large margins/padding (for e.g. in fullscreen mode)
|
||||||
|
# A possible solution for this is to wrap all text node
|
||||||
|
# children of body in <span> but that is seriously ugly and
|
||||||
|
# might have side effects. Lets do this only if there are lots of
|
||||||
|
# books in the wild that actually have text children of body,
|
||||||
|
# and even in this case it might be better to change the input
|
||||||
|
# plugin to prevent this from happening.
|
||||||
|
# log("No element at (#{ x }, #{ y })")
|
||||||
return null
|
return null
|
||||||
|
|
||||||
name = target.localName
|
name = target.localName
|
||||||
|
76
src/calibre/ebooks/oeb/display/indexing.coffee
Normal file
76
src/calibre/ebooks/oeb/display/indexing.coffee
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#!/usr/bin/env coffee
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
|
||||||
|
###
|
||||||
|
Copyright 2012, Kovid Goyal <kovid@kovidgoyal.net>
|
||||||
|
Released under the GPLv3 License
|
||||||
|
###
|
||||||
|
|
||||||
|
body_height = () ->
|
||||||
|
db = document.body
|
||||||
|
dde = document.documentElement
|
||||||
|
if db? and dde?
|
||||||
|
return Math.max(db.scrollHeight, dde.scrollHeight, db.offsetHeight,
|
||||||
|
dde.offsetHeight, db.clientHeight, dde.clientHeight)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
abstop = (elem) ->
|
||||||
|
ans = elem.offsetTop
|
||||||
|
while elem.offsetParent
|
||||||
|
elem = elem.offsetParent
|
||||||
|
ans += elem.offsetTop
|
||||||
|
return ans
|
||||||
|
|
||||||
|
class BookIndexing
|
||||||
|
###
|
||||||
|
This class is a namespace to expose indexing functions via the
|
||||||
|
window.book_indexing object. The most important functions are:
|
||||||
|
|
||||||
|
anchor_positions(): Get the absolute (document co-ordinate system) position
|
||||||
|
for elements with the specified id/name attributes.
|
||||||
|
|
||||||
|
###
|
||||||
|
|
||||||
|
constructor: () ->
|
||||||
|
this.cache = {}
|
||||||
|
this.body_height_at_last_check = null
|
||||||
|
|
||||||
|
cache_valid: (anchors) ->
|
||||||
|
for a in anchors
|
||||||
|
if not Object.prototype.hasOwnProperty.call(this.cache, a)
|
||||||
|
return false
|
||||||
|
for p of this.cache
|
||||||
|
if Object.prototype.hasOwnProperty.call(this.cache, p) and p not in anchors
|
||||||
|
return false
|
||||||
|
return true
|
||||||
|
|
||||||
|
anchor_positions: (anchors, use_cache=false) ->
|
||||||
|
if use_cache and body_height() == this.body_height_at_last_check and this.cache_valid(anchors)
|
||||||
|
return this.cache
|
||||||
|
|
||||||
|
ans = {}
|
||||||
|
for anchor in anchors
|
||||||
|
elem = document.getElementById(anchor)
|
||||||
|
if elem == null
|
||||||
|
# Look for an <a name="anchor"> element
|
||||||
|
try
|
||||||
|
result = document.evaluate(
|
||||||
|
".//*[local-name() = 'a' and @name='#{ anchor }']",
|
||||||
|
document.body, null,
|
||||||
|
XPathResult.FIRST_ORDERED_NODE_TYPE, null)
|
||||||
|
elem = result.singleNodeValue
|
||||||
|
catch error
|
||||||
|
# The anchor had a ' or other invalid char
|
||||||
|
elem = null
|
||||||
|
if elem == null
|
||||||
|
pos = body_height() + 10000
|
||||||
|
else
|
||||||
|
pos = abstop(elem)
|
||||||
|
ans[anchor] = pos
|
||||||
|
this.cache = ans
|
||||||
|
this.body_height_at_last_check = body_height()
|
||||||
|
return ans
|
||||||
|
|
||||||
|
if window?
|
||||||
|
window.book_indexing = new BookIndexing()
|
||||||
|
|
@ -1,383 +0,0 @@
|
|||||||
from __future__ import with_statement
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2008 Kovid Goyal <kovid at kovidgoyal.net>'
|
|
||||||
|
|
||||||
'''
|
|
||||||
Iterate over the HTML files in an ebook. Useful for writing viewers.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import re, os, math
|
|
||||||
from cStringIO import StringIO
|
|
||||||
|
|
||||||
from PyQt4.Qt import QFontDatabase
|
|
||||||
|
|
||||||
from calibre.customize.ui import available_input_formats
|
|
||||||
from calibre.ebooks.metadata.opf2 import OPF
|
|
||||||
from calibre.ptempfile import TemporaryDirectory
|
|
||||||
from calibre.ebooks.chardet import xml_to_unicode
|
|
||||||
from calibre.utils.zipfile import safe_replace
|
|
||||||
from calibre.utils.config import DynamicConfig
|
|
||||||
from calibre.utils.logging import Log
|
|
||||||
from calibre import (guess_type, prints, prepare_string_for_xml,
|
|
||||||
xml_replace_entities)
|
|
||||||
from calibre.ebooks.oeb.transforms.cover import CoverManager
|
|
||||||
from calibre.constants import filesystem_encoding
|
|
||||||
|
|
||||||
TITLEPAGE = CoverManager.SVG_TEMPLATE.decode('utf-8').replace(\
|
|
||||||
'__ar__', 'none').replace('__viewbox__', '0 0 600 800'
|
|
||||||
).replace('__width__', '600').replace('__height__', '800')
|
|
||||||
BM_FIELD_SEP = u'*|!|?|*'
|
|
||||||
BM_LEGACY_ESC = u'esc-text-%&*#%(){}ads19-end-esc'
|
|
||||||
|
|
||||||
def character_count(html):
|
|
||||||
'''
|
|
||||||
Return the number of "significant" text characters in a HTML string.
|
|
||||||
'''
|
|
||||||
count = 0
|
|
||||||
strip_space = re.compile(r'\s+')
|
|
||||||
for match in re.finditer(r'>[^<]+<', html):
|
|
||||||
count += len(strip_space.sub(' ', match.group()))-2
|
|
||||||
return count
|
|
||||||
|
|
||||||
class UnsupportedFormatError(Exception):
|
|
||||||
|
|
||||||
def __init__(self, fmt):
|
|
||||||
Exception.__init__(self, _('%s format books are not supported')%fmt.upper())
|
|
||||||
|
|
||||||
class SpineItem(unicode):
|
|
||||||
|
|
||||||
def __new__(cls, path, mime_type=None):
|
|
||||||
ppath = path.partition('#')[0]
|
|
||||||
if not os.path.exists(path) and os.path.exists(ppath):
|
|
||||||
path = ppath
|
|
||||||
obj = super(SpineItem, cls).__new__(cls, path)
|
|
||||||
raw = open(path, 'rb').read()
|
|
||||||
raw, obj.encoding = xml_to_unicode(raw)
|
|
||||||
obj.character_count = character_count(raw)
|
|
||||||
obj.start_page = -1
|
|
||||||
obj.pages = -1
|
|
||||||
obj.max_page = -1
|
|
||||||
if mime_type is None:
|
|
||||||
mime_type = guess_type(obj)[0]
|
|
||||||
obj.mime_type = mime_type
|
|
||||||
return obj
|
|
||||||
|
|
||||||
class FakeOpts(object):
|
|
||||||
verbose = 0
|
|
||||||
breadth_first = False
|
|
||||||
max_levels = 5
|
|
||||||
input_encoding = None
|
|
||||||
|
|
||||||
def is_supported(path):
|
|
||||||
ext = os.path.splitext(path)[1].replace('.', '').lower()
|
|
||||||
ext = re.sub(r'(x{0,1})htm(l{0,1})', 'html', ext)
|
|
||||||
return ext in available_input_formats()
|
|
||||||
|
|
||||||
|
|
||||||
def write_oebbook(oeb, path):
|
|
||||||
from calibre.ebooks.oeb.writer import OEBWriter
|
|
||||||
from calibre import walk
|
|
||||||
w = OEBWriter()
|
|
||||||
w(oeb, path)
|
|
||||||
for f in walk(path):
|
|
||||||
if f.endswith('.opf'):
|
|
||||||
return f
|
|
||||||
|
|
||||||
class EbookIterator(object):
|
|
||||||
|
|
||||||
CHARACTERS_PER_PAGE = 1000
|
|
||||||
|
|
||||||
def __init__(self, pathtoebook, log=None):
|
|
||||||
self.log = log
|
|
||||||
if log is None:
|
|
||||||
self.log = Log()
|
|
||||||
pathtoebook = pathtoebook.strip()
|
|
||||||
self.pathtoebook = os.path.abspath(pathtoebook)
|
|
||||||
self.config = DynamicConfig(name='iterator')
|
|
||||||
ext = os.path.splitext(pathtoebook)[1].replace('.', '').lower()
|
|
||||||
ext = re.sub(r'(x{0,1})htm(l{0,1})', 'html', ext)
|
|
||||||
self.ebook_ext = ext.replace('original_', '')
|
|
||||||
|
|
||||||
def search(self, text, index, backwards=False):
|
|
||||||
text = prepare_string_for_xml(text.lower())
|
|
||||||
pmap = [(i, path) for i, path in enumerate(self.spine)]
|
|
||||||
if backwards:
|
|
||||||
pmap.reverse()
|
|
||||||
for i, path in pmap:
|
|
||||||
if (backwards and i < index) or (not backwards and i > index):
|
|
||||||
with open(path, 'rb') as f:
|
|
||||||
raw = f.read().decode(path.encoding)
|
|
||||||
try:
|
|
||||||
raw = xml_replace_entities(raw)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
if text in raw.lower():
|
|
||||||
return i
|
|
||||||
|
|
||||||
def find_missing_css_files(self):
|
|
||||||
for x in os.walk(os.path.dirname(self.pathtoopf)):
|
|
||||||
for f in x[-1]:
|
|
||||||
if f.endswith('.css'):
|
|
||||||
yield os.path.join(x[0], f)
|
|
||||||
|
|
||||||
def find_declared_css_files(self):
|
|
||||||
for item in self.opf.manifest:
|
|
||||||
if item.mime_type and 'css' in item.mime_type.lower():
|
|
||||||
yield item.path
|
|
||||||
|
|
||||||
def find_embedded_fonts(self):
|
|
||||||
'''
|
|
||||||
This will become unnecessary once Qt WebKit supports the @font-face rule.
|
|
||||||
'''
|
|
||||||
css_files = set(self.find_declared_css_files())
|
|
||||||
if not css_files:
|
|
||||||
css_files = set(self.find_missing_css_files())
|
|
||||||
bad_map = {}
|
|
||||||
font_family_pat = re.compile(r'font-family\s*:\s*([^;]+)')
|
|
||||||
for csspath in css_files:
|
|
||||||
try:
|
|
||||||
css = open(csspath, 'rb').read().decode('utf-8', 'replace')
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
for match in re.compile(r'@font-face\s*{([^}]+)}').finditer(css):
|
|
||||||
block = match.group(1)
|
|
||||||
family = font_family_pat.search(block)
|
|
||||||
url = re.compile(r'url\s*\([\'"]*(.+?)[\'"]*\)', re.DOTALL).search(block)
|
|
||||||
if url:
|
|
||||||
path = url.group(1).split('/')
|
|
||||||
path = os.path.join(os.path.dirname(csspath), *path)
|
|
||||||
if not os.access(path, os.R_OK):
|
|
||||||
continue
|
|
||||||
id = QFontDatabase.addApplicationFont(path)
|
|
||||||
if id != -1:
|
|
||||||
families = [unicode(f) for f in QFontDatabase.applicationFontFamilies(id)]
|
|
||||||
if family:
|
|
||||||
family = family.group(1)
|
|
||||||
specified_families = [x.strip().replace('"',
|
|
||||||
'').replace("'", '') for x in family.split(',')]
|
|
||||||
aliasing_ok = False
|
|
||||||
for f in specified_families:
|
|
||||||
bad_map[f] = families[0]
|
|
||||||
if not aliasing_ok and f in families:
|
|
||||||
aliasing_ok = True
|
|
||||||
|
|
||||||
if not aliasing_ok:
|
|
||||||
prints('WARNING: Family aliasing not fully supported.')
|
|
||||||
prints('\tDeclared family: %r not in actual families: %r'
|
|
||||||
% (family, families))
|
|
||||||
else:
|
|
||||||
prints('Loaded embedded font:', repr(family))
|
|
||||||
if bad_map:
|
|
||||||
def prepend_embedded_font(match):
|
|
||||||
for bad, good in bad_map.items():
|
|
||||||
if bad in match.group(1):
|
|
||||||
prints('Substituting font family: %s -> %s'%(bad, good))
|
|
||||||
return match.group().replace(bad, '"%s"'%good)
|
|
||||||
|
|
||||||
from calibre.ebooks.chardet import force_encoding
|
|
||||||
for csspath in css_files:
|
|
||||||
with open(csspath, 'r+b') as f:
|
|
||||||
css = f.read()
|
|
||||||
enc = force_encoding(css, False)
|
|
||||||
css = css.decode(enc, 'replace')
|
|
||||||
ncss = font_family_pat.sub(prepend_embedded_font, css)
|
|
||||||
if ncss != css:
|
|
||||||
f.seek(0)
|
|
||||||
f.truncate()
|
|
||||||
f.write(ncss.encode(enc))
|
|
||||||
|
|
||||||
def __enter__(self, processed=False, only_input_plugin=False):
|
|
||||||
self.delete_on_exit = []
|
|
||||||
self._tdir = TemporaryDirectory('_ebook_iter')
|
|
||||||
self.base = self._tdir.__enter__()
|
|
||||||
if not isinstance(self.base, unicode):
|
|
||||||
self.base = self.base.decode(filesystem_encoding)
|
|
||||||
from calibre.ebooks.conversion.plumber import Plumber, create_oebbook
|
|
||||||
plumber = Plumber(self.pathtoebook, self.base, self.log)
|
|
||||||
plumber.setup_options()
|
|
||||||
if self.pathtoebook.lower().endswith('.opf'):
|
|
||||||
plumber.opts.dont_package = True
|
|
||||||
if hasattr(plumber.opts, 'no_process'):
|
|
||||||
plumber.opts.no_process = True
|
|
||||||
|
|
||||||
plumber.input_plugin.for_viewer = True
|
|
||||||
with plumber.input_plugin:
|
|
||||||
self.pathtoopf = plumber.input_plugin(open(plumber.input, 'rb'),
|
|
||||||
plumber.opts, plumber.input_fmt, self.log,
|
|
||||||
{}, self.base)
|
|
||||||
|
|
||||||
if not only_input_plugin:
|
|
||||||
if processed or plumber.input_fmt.lower() in ('pdb', 'pdf', 'rb') and \
|
|
||||||
not hasattr(self.pathtoopf, 'manifest'):
|
|
||||||
if hasattr(self.pathtoopf, 'manifest'):
|
|
||||||
self.pathtoopf = write_oebbook(self.pathtoopf, self.base)
|
|
||||||
self.pathtoopf = create_oebbook(self.log, self.pathtoopf,
|
|
||||||
plumber.opts)
|
|
||||||
|
|
||||||
if hasattr(self.pathtoopf, 'manifest'):
|
|
||||||
self.pathtoopf = write_oebbook(self.pathtoopf, self.base)
|
|
||||||
|
|
||||||
self.book_format = os.path.splitext(self.pathtoebook)[1][1:].upper()
|
|
||||||
if getattr(plumber.input_plugin, 'is_kf8', False):
|
|
||||||
self.book_format = 'KF8'
|
|
||||||
|
|
||||||
self.opf = getattr(plumber.input_plugin, 'optimize_opf_parsing', None)
|
|
||||||
if self.opf is None:
|
|
||||||
self.opf = OPF(self.pathtoopf, os.path.dirname(self.pathtoopf))
|
|
||||||
self.language = self.opf.language
|
|
||||||
if self.language:
|
|
||||||
self.language = self.language.lower()
|
|
||||||
ordered = [i for i in self.opf.spine if i.is_linear] + \
|
|
||||||
[i for i in self.opf.spine if not i.is_linear]
|
|
||||||
self.spine = []
|
|
||||||
for i in ordered:
|
|
||||||
spath = i.path
|
|
||||||
mt = None
|
|
||||||
if i.idref is not None:
|
|
||||||
mt = self.opf.manifest.type_for_id(i.idref)
|
|
||||||
if mt is None:
|
|
||||||
mt = guess_type(spath)[0]
|
|
||||||
try:
|
|
||||||
self.spine.append(SpineItem(spath, mime_type=mt))
|
|
||||||
except:
|
|
||||||
self.log.warn('Missing spine item:', repr(spath))
|
|
||||||
|
|
||||||
cover = self.opf.cover
|
|
||||||
if self.ebook_ext in ('lit', 'mobi', 'prc', 'opf', 'fb2') and cover:
|
|
||||||
cfile = os.path.join(self.base, 'calibre_iterator_cover.html')
|
|
||||||
rcpath = os.path.relpath(cover, self.base).replace(os.sep, '/')
|
|
||||||
chtml = (TITLEPAGE%prepare_string_for_xml(rcpath, True)).encode('utf-8')
|
|
||||||
open(cfile, 'wb').write(chtml)
|
|
||||||
self.spine[0:0] = [SpineItem(cfile,
|
|
||||||
mime_type='application/xhtml+xml')]
|
|
||||||
self.delete_on_exit.append(cfile)
|
|
||||||
|
|
||||||
if self.opf.path_to_html_toc is not None and \
|
|
||||||
self.opf.path_to_html_toc not in self.spine:
|
|
||||||
try:
|
|
||||||
self.spine.append(SpineItem(self.opf.path_to_html_toc))
|
|
||||||
except:
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
|
|
||||||
sizes = [i.character_count for i in self.spine]
|
|
||||||
self.pages = [math.ceil(i/float(self.CHARACTERS_PER_PAGE)) for i in sizes]
|
|
||||||
for p, s in zip(self.pages, self.spine):
|
|
||||||
s.pages = p
|
|
||||||
start = 1
|
|
||||||
|
|
||||||
for s in self.spine:
|
|
||||||
s.start_page = start
|
|
||||||
start += s.pages
|
|
||||||
s.max_page = s.start_page + s.pages - 1
|
|
||||||
self.toc = self.opf.toc
|
|
||||||
|
|
||||||
self.read_bookmarks()
|
|
||||||
|
|
||||||
return self
|
|
||||||
|
|
||||||
def parse_bookmarks(self, raw):
|
|
||||||
for line in raw.splitlines():
|
|
||||||
bm = None
|
|
||||||
if line.count('^') > 0:
|
|
||||||
tokens = line.rpartition('^')
|
|
||||||
title, ref = tokens[0], tokens[2]
|
|
||||||
try:
|
|
||||||
spine, _, pos = ref.partition('#')
|
|
||||||
spine = int(spine.strip())
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
bm = {'type':'legacy', 'title':title, 'spine':spine, 'pos':pos}
|
|
||||||
elif BM_FIELD_SEP in line:
|
|
||||||
try:
|
|
||||||
title, spine, pos = line.strip().split(BM_FIELD_SEP)
|
|
||||||
spine = int(spine)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
# Unescape from serialization
|
|
||||||
pos = pos.replace(BM_LEGACY_ESC, u'^')
|
|
||||||
# Check for pos being a scroll fraction
|
|
||||||
try:
|
|
||||||
pos = float(pos)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
bm = {'type':'cfi', 'title':title, 'pos':pos, 'spine':spine}
|
|
||||||
|
|
||||||
if bm:
|
|
||||||
self.bookmarks.append(bm)
|
|
||||||
|
|
||||||
def serialize_bookmarks(self, bookmarks):
|
|
||||||
dat = []
|
|
||||||
for bm in bookmarks:
|
|
||||||
if bm['type'] == 'legacy':
|
|
||||||
rec = u'%s^%d#%s'%(bm['title'], bm['spine'], bm['pos'])
|
|
||||||
else:
|
|
||||||
pos = bm['pos']
|
|
||||||
if isinstance(pos, (int, float)):
|
|
||||||
pos = unicode(pos)
|
|
||||||
else:
|
|
||||||
pos = pos.replace(u'^', BM_LEGACY_ESC)
|
|
||||||
rec = BM_FIELD_SEP.join([bm['title'], unicode(bm['spine']), pos])
|
|
||||||
dat.append(rec)
|
|
||||||
return (u'\n'.join(dat) +u'\n')
|
|
||||||
|
|
||||||
def read_bookmarks(self):
|
|
||||||
self.bookmarks = []
|
|
||||||
bmfile = os.path.join(self.base, 'META-INF', 'calibre_bookmarks.txt')
|
|
||||||
raw = ''
|
|
||||||
if os.path.exists(bmfile):
|
|
||||||
with open(bmfile, 'rb') as f:
|
|
||||||
raw = f.read()
|
|
||||||
else:
|
|
||||||
saved = self.config['bookmarks_'+self.pathtoebook]
|
|
||||||
if saved:
|
|
||||||
raw = saved
|
|
||||||
if not isinstance(raw, unicode):
|
|
||||||
raw = raw.decode('utf-8')
|
|
||||||
self.parse_bookmarks(raw)
|
|
||||||
|
|
||||||
def save_bookmarks(self, bookmarks=None):
|
|
||||||
if bookmarks is None:
|
|
||||||
bookmarks = self.bookmarks
|
|
||||||
dat = self.serialize_bookmarks(bookmarks)
|
|
||||||
if os.path.splitext(self.pathtoebook)[1].lower() == '.epub' and \
|
|
||||||
os.access(self.pathtoebook, os.R_OK):
|
|
||||||
try:
|
|
||||||
zf = open(self.pathtoebook, 'r+b')
|
|
||||||
except IOError:
|
|
||||||
return
|
|
||||||
safe_replace(zf, 'META-INF/calibre_bookmarks.txt',
|
|
||||||
StringIO(dat.encode('utf-8')),
|
|
||||||
add_missing=True)
|
|
||||||
else:
|
|
||||||
self.config['bookmarks_'+self.pathtoebook] = dat
|
|
||||||
|
|
||||||
def add_bookmark(self, bm):
|
|
||||||
self.bookmarks = [x for x in self.bookmarks if x['title'] !=
|
|
||||||
bm['title']]
|
|
||||||
self.bookmarks.append(bm)
|
|
||||||
self.save_bookmarks()
|
|
||||||
|
|
||||||
def set_bookmarks(self, bookmarks):
|
|
||||||
self.bookmarks = bookmarks
|
|
||||||
|
|
||||||
def __exit__(self, *args):
|
|
||||||
self._tdir.__exit__(*args)
|
|
||||||
for x in self.delete_on_exit:
|
|
||||||
if os.path.exists(x):
|
|
||||||
os.remove(x)
|
|
||||||
|
|
||||||
def get_preprocess_html(path_to_ebook, output):
|
|
||||||
from calibre.ebooks.conversion.preprocess import HTMLPreProcessor
|
|
||||||
iterator = EbookIterator(path_to_ebook)
|
|
||||||
iterator.__enter__(only_input_plugin=True)
|
|
||||||
preprocessor = HTMLPreProcessor(None, False)
|
|
||||||
with open(output, 'wb') as out:
|
|
||||||
for path in iterator.spine:
|
|
||||||
with open(path, 'rb') as f:
|
|
||||||
html = f.read().decode('utf-8', 'replace')
|
|
||||||
html = preprocessor(html, get_preprocess_html=True)
|
|
||||||
out.write(html.encode('utf-8'))
|
|
||||||
out.write(b'\n\n' + b'-'*80 + b'\n\n')
|
|
||||||
|
|
42
src/calibre/ebooks/oeb/iterator/__init__.py
Normal file
42
src/calibre/ebooks/oeb/iterator/__init__.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import os, re
|
||||||
|
|
||||||
|
from calibre.customize.ui import available_input_formats
|
||||||
|
|
||||||
|
def is_supported(path):
|
||||||
|
ext = os.path.splitext(path)[1].replace('.', '').lower()
|
||||||
|
ext = re.sub(r'(x{0,1})htm(l{0,1})', 'html', ext)
|
||||||
|
return ext in available_input_formats()
|
||||||
|
|
||||||
|
class UnsupportedFormatError(Exception):
|
||||||
|
|
||||||
|
def __init__(self, fmt):
|
||||||
|
Exception.__init__(self, _('%s format books are not supported')%fmt.upper())
|
||||||
|
|
||||||
|
def EbookIterator(*args, **kwargs):
|
||||||
|
'For backwards compatibility'
|
||||||
|
from calibre.ebooks.oeb.iterator.book import EbookIterator
|
||||||
|
return EbookIterator(*args, **kwargs)
|
||||||
|
|
||||||
|
def get_preprocess_html(path_to_ebook, output):
|
||||||
|
from calibre.ebooks.conversion.preprocess import HTMLPreProcessor
|
||||||
|
iterator = EbookIterator(path_to_ebook)
|
||||||
|
iterator.__enter__(only_input_plugin=True, run_char_count=False,
|
||||||
|
read_anchor_map=False)
|
||||||
|
preprocessor = HTMLPreProcessor(None, False)
|
||||||
|
with open(output, 'wb') as out:
|
||||||
|
for path in iterator.spine:
|
||||||
|
with open(path, 'rb') as f:
|
||||||
|
html = f.read().decode('utf-8', 'replace')
|
||||||
|
html = preprocessor(html, get_preprocess_html=True)
|
||||||
|
out.write(html.encode('utf-8'))
|
||||||
|
out.write(b'\n\n' + b'-'*80 + b'\n\n')
|
||||||
|
|
187
src/calibre/ebooks/oeb/iterator/book.py
Normal file
187
src/calibre/ebooks/oeb/iterator/book.py
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
Iterate over the HTML files in an ebook. Useful for writing viewers.
|
||||||
|
'''
|
||||||
|
|
||||||
|
import re, os, math
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from calibre.ebooks.metadata.opf2 import OPF
|
||||||
|
from calibre.ptempfile import TemporaryDirectory
|
||||||
|
from calibre.utils.config import DynamicConfig
|
||||||
|
from calibre.utils.logging import default_log
|
||||||
|
from calibre import (guess_type, prepare_string_for_xml,
|
||||||
|
xml_replace_entities)
|
||||||
|
from calibre.ebooks.oeb.transforms.cover import CoverManager
|
||||||
|
|
||||||
|
from calibre.ebooks.oeb.iterator.spine import (SpineItem, create_indexing_data)
|
||||||
|
from calibre.ebooks.oeb.iterator.bookmarks import BookmarksMixin
|
||||||
|
|
||||||
|
TITLEPAGE = CoverManager.SVG_TEMPLATE.decode('utf-8').replace(\
|
||||||
|
'__ar__', 'none').replace('__viewbox__', '0 0 600 800'
|
||||||
|
).replace('__width__', '600').replace('__height__', '800')
|
||||||
|
|
||||||
|
class FakeOpts(object):
|
||||||
|
verbose = 0
|
||||||
|
breadth_first = False
|
||||||
|
max_levels = 5
|
||||||
|
input_encoding = None
|
||||||
|
|
||||||
|
|
||||||
|
def write_oebbook(oeb, path):
|
||||||
|
from calibre.ebooks.oeb.writer import OEBWriter
|
||||||
|
from calibre import walk
|
||||||
|
w = OEBWriter()
|
||||||
|
w(oeb, path)
|
||||||
|
for f in walk(path):
|
||||||
|
if f.endswith('.opf'):
|
||||||
|
return f
|
||||||
|
|
||||||
|
class EbookIterator(BookmarksMixin):
|
||||||
|
|
||||||
|
CHARACTERS_PER_PAGE = 1000
|
||||||
|
|
||||||
|
def __init__(self, pathtoebook, log=None):
|
||||||
|
self.log = log or default_log
|
||||||
|
pathtoebook = pathtoebook.strip()
|
||||||
|
self.pathtoebook = os.path.abspath(pathtoebook)
|
||||||
|
self.config = DynamicConfig(name='iterator')
|
||||||
|
ext = os.path.splitext(pathtoebook)[1].replace('.', '').lower()
|
||||||
|
ext = re.sub(r'(x{0,1})htm(l{0,1})', 'html', ext)
|
||||||
|
self.ebook_ext = ext.replace('original_', '')
|
||||||
|
|
||||||
|
def search(self, text, index, backwards=False):
|
||||||
|
text = prepare_string_for_xml(text.lower())
|
||||||
|
pmap = [(i, path) for i, path in enumerate(self.spine)]
|
||||||
|
if backwards:
|
||||||
|
pmap.reverse()
|
||||||
|
for i, path in pmap:
|
||||||
|
if (backwards and i < index) or (not backwards and i > index):
|
||||||
|
with open(path, 'rb') as f:
|
||||||
|
raw = f.read().decode(path.encoding)
|
||||||
|
try:
|
||||||
|
raw = xml_replace_entities(raw)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
if text in raw.lower():
|
||||||
|
return i
|
||||||
|
|
||||||
|
def __enter__(self, processed=False, only_input_plugin=False,
|
||||||
|
run_char_count=True, read_anchor_map=True):
|
||||||
|
''' Convert an ebook file into an exploded OEB book suitable for
|
||||||
|
display in viewers/preprocessing etc. '''
|
||||||
|
|
||||||
|
from calibre.ebooks.conversion.plumber import Plumber, create_oebbook
|
||||||
|
|
||||||
|
self.delete_on_exit = []
|
||||||
|
self._tdir = TemporaryDirectory('_ebook_iter')
|
||||||
|
self.base = self._tdir.__enter__()
|
||||||
|
plumber = Plumber(self.pathtoebook, self.base, self.log)
|
||||||
|
plumber.setup_options()
|
||||||
|
if self.pathtoebook.lower().endswith('.opf'):
|
||||||
|
plumber.opts.dont_package = True
|
||||||
|
if hasattr(plumber.opts, 'no_process'):
|
||||||
|
plumber.opts.no_process = True
|
||||||
|
|
||||||
|
plumber.input_plugin.for_viewer = True
|
||||||
|
with plumber.input_plugin, open(plumber.input, 'rb') as inf:
|
||||||
|
self.pathtoopf = plumber.input_plugin(inf,
|
||||||
|
plumber.opts, plumber.input_fmt, self.log,
|
||||||
|
{}, self.base)
|
||||||
|
|
||||||
|
if not only_input_plugin:
|
||||||
|
# Run the HTML preprocess/parsing from the conversion pipeline as
|
||||||
|
# well
|
||||||
|
if (processed or plumber.input_fmt.lower() in {'pdb', 'pdf', 'rb'}
|
||||||
|
and not hasattr(self.pathtoopf, 'manifest')):
|
||||||
|
if hasattr(self.pathtoopf, 'manifest'):
|
||||||
|
self.pathtoopf = write_oebbook(self.pathtoopf, self.base)
|
||||||
|
self.pathtoopf = create_oebbook(self.log, self.pathtoopf,
|
||||||
|
plumber.opts)
|
||||||
|
|
||||||
|
if hasattr(self.pathtoopf, 'manifest'):
|
||||||
|
self.pathtoopf = write_oebbook(self.pathtoopf, self.base)
|
||||||
|
|
||||||
|
self.book_format = os.path.splitext(self.pathtoebook)[1][1:].upper()
|
||||||
|
if getattr(plumber.input_plugin, 'is_kf8', False):
|
||||||
|
self.book_format = 'KF8'
|
||||||
|
|
||||||
|
self.opf = getattr(plumber.input_plugin, 'optimize_opf_parsing', None)
|
||||||
|
if self.opf is None:
|
||||||
|
self.opf = OPF(self.pathtoopf, os.path.dirname(self.pathtoopf))
|
||||||
|
self.language = self.opf.language
|
||||||
|
if self.language:
|
||||||
|
self.language = self.language.lower()
|
||||||
|
ordered = [i for i in self.opf.spine if i.is_linear] + \
|
||||||
|
[i for i in self.opf.spine if not i.is_linear]
|
||||||
|
self.spine = []
|
||||||
|
Spiny = partial(SpineItem, read_anchor_map=read_anchor_map,
|
||||||
|
run_char_count=run_char_count)
|
||||||
|
for i in ordered:
|
||||||
|
spath = i.path
|
||||||
|
mt = None
|
||||||
|
if i.idref is not None:
|
||||||
|
mt = self.opf.manifest.type_for_id(i.idref)
|
||||||
|
if mt is None:
|
||||||
|
mt = guess_type(spath)[0]
|
||||||
|
try:
|
||||||
|
self.spine.append(Spiny(spath, mime_type=mt))
|
||||||
|
except:
|
||||||
|
self.log.warn('Missing spine item:', repr(spath))
|
||||||
|
|
||||||
|
cover = self.opf.cover
|
||||||
|
if cover and self.ebook_ext in {'lit', 'mobi', 'prc', 'opf', 'fb2',
|
||||||
|
'azw', 'azw3'}:
|
||||||
|
cfile = os.path.join(self.base, 'calibre_iterator_cover.html')
|
||||||
|
rcpath = os.path.relpath(cover, self.base).replace(os.sep, '/')
|
||||||
|
chtml = (TITLEPAGE%prepare_string_for_xml(rcpath, True)).encode('utf-8')
|
||||||
|
with open(cfile, 'wb') as f:
|
||||||
|
f.write(chtml)
|
||||||
|
self.spine[0:0] = [Spiny(cfile,
|
||||||
|
mime_type='application/xhtml+xml')]
|
||||||
|
self.delete_on_exit.append(cfile)
|
||||||
|
|
||||||
|
if self.opf.path_to_html_toc is not None and \
|
||||||
|
self.opf.path_to_html_toc not in self.spine:
|
||||||
|
try:
|
||||||
|
self.spine.append(Spiny(self.opf.path_to_html_toc))
|
||||||
|
except:
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
sizes = [i.character_count for i in self.spine]
|
||||||
|
self.pages = [math.ceil(i/float(self.CHARACTERS_PER_PAGE)) for i in sizes]
|
||||||
|
for p, s in zip(self.pages, self.spine):
|
||||||
|
s.pages = p
|
||||||
|
start = 1
|
||||||
|
|
||||||
|
for s in self.spine:
|
||||||
|
s.start_page = start
|
||||||
|
start += s.pages
|
||||||
|
s.max_page = s.start_page + s.pages - 1
|
||||||
|
self.toc = self.opf.toc
|
||||||
|
if read_anchor_map:
|
||||||
|
create_indexing_data(self.spine, self.toc)
|
||||||
|
|
||||||
|
self.read_bookmarks()
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
self._tdir.__exit__(*args)
|
||||||
|
for x in self.delete_on_exit:
|
||||||
|
try:
|
||||||
|
os.remove(x)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
105
src/calibre/ebooks/oeb/iterator/bookmarks.py
Normal file
105
src/calibre/ebooks/oeb/iterator/bookmarks.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import os
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
from calibre.utils.zipfile import safe_replace
|
||||||
|
|
||||||
|
BM_FIELD_SEP = u'*|!|?|*'
|
||||||
|
BM_LEGACY_ESC = u'esc-text-%&*#%(){}ads19-end-esc'
|
||||||
|
|
||||||
|
class BookmarksMixin(object):
|
||||||
|
|
||||||
|
def parse_bookmarks(self, raw):
|
||||||
|
for line in raw.splitlines():
|
||||||
|
bm = None
|
||||||
|
if line.count('^') > 0:
|
||||||
|
tokens = line.rpartition('^')
|
||||||
|
title, ref = tokens[0], tokens[2]
|
||||||
|
try:
|
||||||
|
spine, _, pos = ref.partition('#')
|
||||||
|
spine = int(spine.strip())
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
bm = {'type':'legacy', 'title':title, 'spine':spine, 'pos':pos}
|
||||||
|
elif BM_FIELD_SEP in line:
|
||||||
|
try:
|
||||||
|
title, spine, pos = line.strip().split(BM_FIELD_SEP)
|
||||||
|
spine = int(spine)
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
# Unescape from serialization
|
||||||
|
pos = pos.replace(BM_LEGACY_ESC, u'^')
|
||||||
|
# Check for pos being a scroll fraction
|
||||||
|
try:
|
||||||
|
pos = float(pos)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
bm = {'type':'cfi', 'title':title, 'pos':pos, 'spine':spine}
|
||||||
|
|
||||||
|
if bm:
|
||||||
|
self.bookmarks.append(bm)
|
||||||
|
|
||||||
|
def serialize_bookmarks(self, bookmarks):
|
||||||
|
dat = []
|
||||||
|
for bm in bookmarks:
|
||||||
|
if bm['type'] == 'legacy':
|
||||||
|
rec = u'%s^%d#%s'%(bm['title'], bm['spine'], bm['pos'])
|
||||||
|
else:
|
||||||
|
pos = bm['pos']
|
||||||
|
if isinstance(pos, (int, float)):
|
||||||
|
pos = unicode(pos)
|
||||||
|
else:
|
||||||
|
pos = pos.replace(u'^', BM_LEGACY_ESC)
|
||||||
|
rec = BM_FIELD_SEP.join([bm['title'], unicode(bm['spine']), pos])
|
||||||
|
dat.append(rec)
|
||||||
|
return (u'\n'.join(dat) +u'\n')
|
||||||
|
|
||||||
|
def read_bookmarks(self):
|
||||||
|
self.bookmarks = []
|
||||||
|
bmfile = os.path.join(self.base, 'META-INF', 'calibre_bookmarks.txt')
|
||||||
|
raw = ''
|
||||||
|
if os.path.exists(bmfile):
|
||||||
|
with open(bmfile, 'rb') as f:
|
||||||
|
raw = f.read()
|
||||||
|
else:
|
||||||
|
saved = self.config['bookmarks_'+self.pathtoebook]
|
||||||
|
if saved:
|
||||||
|
raw = saved
|
||||||
|
if not isinstance(raw, unicode):
|
||||||
|
raw = raw.decode('utf-8')
|
||||||
|
self.parse_bookmarks(raw)
|
||||||
|
|
||||||
|
def save_bookmarks(self, bookmarks=None):
|
||||||
|
if bookmarks is None:
|
||||||
|
bookmarks = self.bookmarks
|
||||||
|
dat = self.serialize_bookmarks(bookmarks)
|
||||||
|
if os.path.splitext(self.pathtoebook)[1].lower() == '.epub' and \
|
||||||
|
os.access(self.pathtoebook, os.R_OK):
|
||||||
|
try:
|
||||||
|
zf = open(self.pathtoebook, 'r+b')
|
||||||
|
except IOError:
|
||||||
|
return
|
||||||
|
safe_replace(zf, 'META-INF/calibre_bookmarks.txt',
|
||||||
|
BytesIO(dat.encode('utf-8')),
|
||||||
|
add_missing=True)
|
||||||
|
else:
|
||||||
|
self.config['bookmarks_'+self.pathtoebook] = dat
|
||||||
|
|
||||||
|
def add_bookmark(self, bm):
|
||||||
|
self.bookmarks = [x for x in self.bookmarks if x['title'] !=
|
||||||
|
bm['title']]
|
||||||
|
self.bookmarks.append(bm)
|
||||||
|
self.save_bookmarks()
|
||||||
|
|
||||||
|
def set_bookmarks(self, bookmarks):
|
||||||
|
self.bookmarks = bookmarks
|
||||||
|
|
||||||
|
|
120
src/calibre/ebooks/oeb/iterator/spine.py
Normal file
120
src/calibre/ebooks/oeb/iterator/spine.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
from future_builtins import map
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import re, os
|
||||||
|
from functools import partial
|
||||||
|
from operator import attrgetter
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from calibre import guess_type
|
||||||
|
from calibre.ebooks.chardet import xml_to_unicode
|
||||||
|
|
||||||
|
def character_count(html):
|
||||||
|
''' Return the number of "significant" text characters in a HTML string. '''
|
||||||
|
count = 0
|
||||||
|
strip_space = re.compile(r'\s+')
|
||||||
|
for match in re.finditer(r'>[^<]+<', html):
|
||||||
|
count += len(strip_space.sub(' ', match.group()))-2
|
||||||
|
return count
|
||||||
|
|
||||||
|
def anchor_map(html):
|
||||||
|
''' Return map of all anchor names to their offsets in the html '''
|
||||||
|
ans = {}
|
||||||
|
for match in re.finditer(
|
||||||
|
r'''(?:id|name)\s*=\s*['"]([^'"]+)['"]''', html):
|
||||||
|
anchor = match.group(0)
|
||||||
|
ans[anchor] = ans.get(anchor, match.start())
|
||||||
|
return ans
|
||||||
|
|
||||||
|
class SpineItem(unicode):
|
||||||
|
|
||||||
|
def __new__(cls, path, mime_type=None, read_anchor_map=True,
|
||||||
|
run_char_count=True):
|
||||||
|
ppath = path.partition('#')[0]
|
||||||
|
if not os.path.exists(path) and os.path.exists(ppath):
|
||||||
|
path = ppath
|
||||||
|
obj = super(SpineItem, cls).__new__(cls, path)
|
||||||
|
with open(path, 'rb') as f:
|
||||||
|
raw = f.read()
|
||||||
|
raw, obj.encoding = xml_to_unicode(raw)
|
||||||
|
obj.character_count = character_count(raw) if run_char_count else 10000
|
||||||
|
obj.anchor_map = anchor_map(raw) if read_anchor_map else {}
|
||||||
|
obj.start_page = -1
|
||||||
|
obj.pages = -1
|
||||||
|
obj.max_page = -1
|
||||||
|
obj.index_entries = []
|
||||||
|
if mime_type is None:
|
||||||
|
mime_type = guess_type(obj)[0]
|
||||||
|
obj.mime_type = mime_type
|
||||||
|
return obj
|
||||||
|
|
||||||
|
class IndexEntry(object):
|
||||||
|
|
||||||
|
def __init__(self, spine, toc_entry, num):
|
||||||
|
self.num = num
|
||||||
|
self.text = toc_entry.text or _('Unknown')
|
||||||
|
self.key = toc_entry.abspath
|
||||||
|
self.anchor = self.start_anchor = toc_entry.fragment or None
|
||||||
|
try:
|
||||||
|
self.spine_pos = spine.index(self.key)
|
||||||
|
except ValueError:
|
||||||
|
self.spine_pos = -1
|
||||||
|
self.anchor_pos = 0
|
||||||
|
if self.spine_pos > -1:
|
||||||
|
self.anchor_pos = spine[self.spine_pos].anchor_map.get(self.anchor,
|
||||||
|
0)
|
||||||
|
|
||||||
|
self.depth = 0
|
||||||
|
p = toc_entry.parent
|
||||||
|
while p is not None:
|
||||||
|
self.depth += 1
|
||||||
|
p = p.parent
|
||||||
|
|
||||||
|
self.sort_key = (self.spine_pos, self.anchor_pos)
|
||||||
|
self.spine_count = len(spine)
|
||||||
|
|
||||||
|
def find_end(self, all_entries):
|
||||||
|
potential_enders = [i for i in all_entries if
|
||||||
|
i.depth <= self.depth and
|
||||||
|
(
|
||||||
|
(i.spine_pos == self.spine_pos and i.anchor_pos >
|
||||||
|
self.anchor_pos)
|
||||||
|
or
|
||||||
|
i.spine_pos > self.spine_pos
|
||||||
|
)]
|
||||||
|
if potential_enders:
|
||||||
|
# potential_enders is sorted by (spine_pos, anchor_pos)
|
||||||
|
end = potential_enders[0]
|
||||||
|
self.end_spine_pos = end.spine_pos
|
||||||
|
self.end_anchor = end.anchor
|
||||||
|
else:
|
||||||
|
self.end_spine_pos = self.spine_count - 1
|
||||||
|
self.end_anchor = None
|
||||||
|
|
||||||
|
def create_indexing_data(spine, toc):
|
||||||
|
if not toc: return
|
||||||
|
f = partial(IndexEntry, spine)
|
||||||
|
index_entries = list(map(f,
|
||||||
|
(t for t in toc.flat() if t is not toc),
|
||||||
|
(i-1 for i, t in enumerate(toc.flat()) if t is not toc)
|
||||||
|
))
|
||||||
|
index_entries.sort(key=attrgetter('sort_key'))
|
||||||
|
[ i.find_end(index_entries) for i in index_entries ]
|
||||||
|
|
||||||
|
ie = namedtuple('IndexEntry', 'entry start_anchor end_anchor')
|
||||||
|
|
||||||
|
for spine_pos, spine_item in enumerate(spine):
|
||||||
|
for i in index_entries:
|
||||||
|
if i.end_spine_pos < spine_pos or i.spine_pos > spine_pos:
|
||||||
|
continue # Does not touch this file
|
||||||
|
start = i.anchor if i.spine_pos == spine_pos else None
|
||||||
|
end = i.end_anchor if i.spine_pos == spine_pos else None
|
||||||
|
spine_item.index_entries.append(ie(i, start, end))
|
||||||
|
|
@ -361,9 +361,11 @@ def parse_html(data, log=None, decoder=None, preprocessor=None,
|
|||||||
# Remove any encoding-specifying <meta/> elements
|
# Remove any encoding-specifying <meta/> elements
|
||||||
for meta in META_XP(data):
|
for meta in META_XP(data):
|
||||||
meta.getparent().remove(meta)
|
meta.getparent().remove(meta)
|
||||||
etree.SubElement(head, XHTML('meta'),
|
meta = etree.SubElement(head, XHTML('meta'),
|
||||||
attrib={'http-equiv': 'Content-Type',
|
attrib={'http-equiv': 'Content-Type'})
|
||||||
'content': '%s; charset=utf-8' % XHTML_NS})
|
meta.set('content', 'text/html; charset=utf-8') # Ensure content is second
|
||||||
|
# attribute
|
||||||
|
|
||||||
# Ensure has a <body/>
|
# Ensure has a <body/>
|
||||||
if not xpath(data, '/h:html/h:body'):
|
if not xpath(data, '/h:html/h:body'):
|
||||||
body = xpath(data, '//h:body')
|
body = xpath(data, '//h:body')
|
||||||
|
@ -347,7 +347,11 @@ class Stylizer(object):
|
|||||||
style = self.flatten_style(rule.style)
|
style = self.flatten_style(rule.style)
|
||||||
self.page_rule.update(style)
|
self.page_rule.update(style)
|
||||||
elif isinstance(rule, CSSFontFaceRule):
|
elif isinstance(rule, CSSFontFaceRule):
|
||||||
self.font_face_rules.append(rule)
|
if rule.style.length > 1:
|
||||||
|
# Ignore the meaningless font face rules generated by the
|
||||||
|
# benighted MS Word that contain only a font-family declaration
|
||||||
|
# and nothing else
|
||||||
|
self.font_face_rules.append(rule)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def flatten_style(self, cssstyle):
|
def flatten_style(self, cssstyle):
|
||||||
@ -494,6 +498,9 @@ class Style(object):
|
|||||||
def set(self, prop, val):
|
def set(self, prop, val):
|
||||||
self._style[prop] = val
|
self._style[prop] = val
|
||||||
|
|
||||||
|
def drop(self, prop):
|
||||||
|
self._style.pop(prop, None)
|
||||||
|
|
||||||
def _update_cssdict(self, cssdict):
|
def _update_cssdict(self, cssdict):
|
||||||
self._style.update(cssdict)
|
self._style.update(cssdict)
|
||||||
|
|
||||||
|
@ -167,5 +167,9 @@ class CoverManager(object):
|
|||||||
self.oeb.guide.refs['cover'].href = item.href
|
self.oeb.guide.refs['cover'].href = item.href
|
||||||
if 'titlepage' in self.oeb.guide.refs:
|
if 'titlepage' in self.oeb.guide.refs:
|
||||||
self.oeb.guide.refs['titlepage'].href = item.href
|
self.oeb.guide.refs['titlepage'].href = item.href
|
||||||
|
titem = getattr(self.oeb.toc, 'item_that_refers_to_cover', None)
|
||||||
|
if titem is not None:
|
||||||
|
titem.href = item.href
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -102,12 +102,13 @@ def FontMapper(sbase=None, dbase=None, dkey=None):
|
|||||||
|
|
||||||
class CSSFlattener(object):
|
class CSSFlattener(object):
|
||||||
def __init__(self, fbase=None, fkey=None, lineh=None, unfloat=False,
|
def __init__(self, fbase=None, fkey=None, lineh=None, unfloat=False,
|
||||||
untable=False, page_break_on_body=False):
|
untable=False, page_break_on_body=False, specializer=None):
|
||||||
self.fbase = fbase
|
self.fbase = fbase
|
||||||
self.fkey = fkey
|
self.fkey = fkey
|
||||||
self.lineh = lineh
|
self.lineh = lineh
|
||||||
self.unfloat = unfloat
|
self.unfloat = unfloat
|
||||||
self.untable = untable
|
self.untable = untable
|
||||||
|
self.specializer = specializer
|
||||||
self.page_break_on_body = page_break_on_body
|
self.page_break_on_body = page_break_on_body
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -377,7 +378,7 @@ class CSSFlattener(object):
|
|||||||
for child in node:
|
for child in node:
|
||||||
self.flatten_node(child, stylizer, names, styles, psize, item_id, left)
|
self.flatten_node(child, stylizer, names, styles, psize, item_id, left)
|
||||||
|
|
||||||
def flatten_head(self, item, stylizer, href):
|
def flatten_head(self, item, href, global_href):
|
||||||
html = item.data
|
html = item.data
|
||||||
head = html.find(XHTML('head'))
|
head = html.find(XHTML('head'))
|
||||||
for node in head:
|
for node in head:
|
||||||
@ -389,40 +390,64 @@ class CSSFlattener(object):
|
|||||||
and node.get('type', CSS_MIME) in OEB_STYLES:
|
and node.get('type', CSS_MIME) in OEB_STYLES:
|
||||||
head.remove(node)
|
head.remove(node)
|
||||||
href = item.relhref(href)
|
href = item.relhref(href)
|
||||||
etree.SubElement(head, XHTML('link'),
|
l = etree.SubElement(head, XHTML('link'),
|
||||||
rel='stylesheet', type=CSS_MIME, href=href)
|
rel='stylesheet', type=CSS_MIME, href=href)
|
||||||
stylizer.page_rule['margin-top'] = '%fpt'%\
|
l.tail='\n'
|
||||||
float(self.context.margin_top)
|
href = item.relhref(global_href)
|
||||||
stylizer.page_rule['margin-bottom'] = '%fpt'%\
|
l = etree.SubElement(head, XHTML('link'),
|
||||||
float(self.context.margin_bottom)
|
rel='stylesheet', type=CSS_MIME, href=href)
|
||||||
|
l.tail = '\n'
|
||||||
items = stylizer.page_rule.items()
|
|
||||||
items.sort()
|
|
||||||
css = '; '.join("%s: %s" % (key, val) for key, val in items)
|
|
||||||
style = etree.SubElement(head, XHTML('style'), type=CSS_MIME)
|
|
||||||
style.text = "\n\t\t@page { %s; }" % css
|
|
||||||
rules = [r.cssText for r in stylizer.font_face_rules]
|
|
||||||
raw = '\n\n'.join(rules)
|
|
||||||
# Make URLs referring to fonts relative to this item
|
|
||||||
sheet = cssutils.parseString(raw, validate=False)
|
|
||||||
cssutils.replaceUrls(sheet, item.relhref, ignoreImportRules=True)
|
|
||||||
style.text += '\n' + sheet.cssText
|
|
||||||
|
|
||||||
def replace_css(self, css):
|
def replace_css(self, css):
|
||||||
manifest = self.oeb.manifest
|
manifest = self.oeb.manifest
|
||||||
id, href = manifest.generate('css', 'stylesheet.css')
|
|
||||||
for item in manifest.values():
|
for item in manifest.values():
|
||||||
if item.media_type in OEB_STYLES:
|
if item.media_type in OEB_STYLES:
|
||||||
manifest.remove(item)
|
manifest.remove(item)
|
||||||
item = manifest.add(id, href, CSS_MIME, data=css)
|
id, href = manifest.generate('css', 'stylesheet.css')
|
||||||
|
item = manifest.add(id, href, CSS_MIME, data=cssutils.parseString(css,
|
||||||
|
validate=False))
|
||||||
|
self.oeb.manifest.main_stylesheet = item
|
||||||
return href
|
return href
|
||||||
|
|
||||||
|
def collect_global_css(self):
|
||||||
|
global_css = defaultdict(list)
|
||||||
|
for item in self.oeb.spine:
|
||||||
|
stylizer = self.stylizers[item]
|
||||||
|
stylizer.page_rule['margin-top'] = '%gpt'%\
|
||||||
|
float(self.context.margin_top)
|
||||||
|
stylizer.page_rule['margin-bottom'] = '%gpt'%\
|
||||||
|
float(self.context.margin_bottom)
|
||||||
|
items = stylizer.page_rule.items()
|
||||||
|
items.sort()
|
||||||
|
css = ';\n'.join("%s: %s" % (key, val) for key, val in items)
|
||||||
|
css = '@page {\n%s\n}\n'%css
|
||||||
|
rules = [r.cssText for r in stylizer.font_face_rules]
|
||||||
|
raw = '\n\n'.join(rules)
|
||||||
|
css += '\n\n' + raw
|
||||||
|
global_css[css].append(item)
|
||||||
|
|
||||||
|
gc_map = {}
|
||||||
|
manifest = self.oeb.manifest
|
||||||
|
for css in global_css:
|
||||||
|
id_, href = manifest.generate('page_css', 'page_styles.css')
|
||||||
|
manifest.add(id_, href, CSS_MIME, data=cssutils.parseString(css,
|
||||||
|
validate=False))
|
||||||
|
gc_map[css] = href
|
||||||
|
|
||||||
|
ans = {}
|
||||||
|
for css, items in global_css.iteritems():
|
||||||
|
for item in items:
|
||||||
|
ans[item] = gc_map[css]
|
||||||
|
return ans
|
||||||
|
|
||||||
def flatten_spine(self):
|
def flatten_spine(self):
|
||||||
names = defaultdict(int)
|
names = defaultdict(int)
|
||||||
styles = {}
|
styles = {}
|
||||||
for item in self.oeb.spine:
|
for item in self.oeb.spine:
|
||||||
html = item.data
|
html = item.data
|
||||||
stylizer = self.stylizers[item]
|
stylizer = self.stylizers[item]
|
||||||
|
if self.specializer is not None:
|
||||||
|
self.specializer(item, stylizer)
|
||||||
body = html.find(XHTML('body'))
|
body = html.find(XHTML('body'))
|
||||||
fsize = self.context.dest.fbase
|
fsize = self.context.dest.fbase
|
||||||
self.flatten_node(body, stylizer, names, styles, fsize, item.id)
|
self.flatten_node(body, stylizer, names, styles, fsize, item.id)
|
||||||
@ -430,7 +455,8 @@ class CSSFlattener(object):
|
|||||||
items.sort()
|
items.sort()
|
||||||
css = ''.join(".%s {\n%s;\n}\n\n" % (key, val) for key, val in items)
|
css = ''.join(".%s {\n%s;\n}\n\n" % (key, val) for key, val in items)
|
||||||
href = self.replace_css(css)
|
href = self.replace_css(css)
|
||||||
|
global_css = self.collect_global_css()
|
||||||
for item in self.oeb.spine:
|
for item in self.oeb.spine:
|
||||||
stylizer = self.stylizers[item]
|
stylizer = self.stylizers[item]
|
||||||
self.flatten_head(item, stylizer, href)
|
self.flatten_head(item, href, global_css[item])
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
|
|||||||
|
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
|
|
||||||
from calibre.ebooks.oeb.base import OEB_STYLES, barename, XPath
|
from calibre.ebooks.oeb.base import barename, XPath
|
||||||
|
|
||||||
class RemoveAdobeMargins(object):
|
class RemoveAdobeMargins(object):
|
||||||
'''
|
'''
|
||||||
@ -51,10 +51,7 @@ class RemoveFakeMargins(object):
|
|||||||
self.stats = {}
|
self.stats = {}
|
||||||
self.selector_map = {}
|
self.selector_map = {}
|
||||||
|
|
||||||
for item in self.oeb.manifest:
|
stylesheet = self.oeb.manifest.main_stylesheet
|
||||||
if item.media_type.lower() in OEB_STYLES:
|
|
||||||
stylesheet = item
|
|
||||||
break
|
|
||||||
if stylesheet is None:
|
if stylesheet is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -7,11 +7,11 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import sys, os, shlex, subprocess
|
import sys, os, shlex, subprocess, shutil
|
||||||
|
|
||||||
from calibre import prints, as_unicode, walk
|
from calibre import prints, as_unicode, walk
|
||||||
from calibre.constants import iswindows, __appname__
|
from calibre.constants import iswindows, __appname__
|
||||||
from calibre.ptempfile import TemporaryDirectory
|
from calibre.ptempfile import TemporaryDirectory, TemporaryFile
|
||||||
from calibre.libunzip import extract as zipextract
|
from calibre.libunzip import extract as zipextract
|
||||||
from calibre.utils.zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
|
from calibre.utils.zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
|
||||||
from calibre.utils.ipc.simple_worker import WorkerError
|
from calibre.utils.ipc.simple_worker import WorkerError
|
||||||
@ -108,21 +108,32 @@ def tweak(ebook_file):
|
|||||||
# The question was answered with No
|
# The question was answered with No
|
||||||
return
|
return
|
||||||
|
|
||||||
ed = os.environ.get('EDITOR', None)
|
ed = os.environ.get('EDITOR', 'dummy')
|
||||||
|
cmd = shlex.split(ed)
|
||||||
|
isvim = bool([x for x in cmd[0].split('/') if x.endswith('vim')])
|
||||||
|
|
||||||
proceed = False
|
proceed = False
|
||||||
if ed is None:
|
prints('Book extracted to', tdir)
|
||||||
prints('Book extracted to', tdir)
|
|
||||||
|
if not isvim:
|
||||||
prints('Make your tweaks and once you are done,', __appname__,
|
prints('Make your tweaks and once you are done,', __appname__,
|
||||||
'will rebuild', ebook_file, 'from', tdir)
|
'will rebuild', ebook_file, 'from', tdir)
|
||||||
print()
|
print()
|
||||||
proceed = ask_cli_question('Rebuild ' + ebook_file + '?')
|
proceed = ask_cli_question('Rebuild ' + ebook_file + '?')
|
||||||
else:
|
else:
|
||||||
cmd = shlex.split(ed)
|
base = os.path.basename(ebook_file)
|
||||||
try:
|
with TemporaryFile(base+'.zip') as zipf:
|
||||||
subprocess.check_call(cmd + [tdir])
|
with ZipFile(zipf, 'w') as zf:
|
||||||
except:
|
zf.add_dir(tdir)
|
||||||
prints(ed, 'failed, aborting...')
|
try:
|
||||||
raise SystemExit(1)
|
subprocess.check_call(cmd + [zipf])
|
||||||
|
except:
|
||||||
|
prints(ed, 'failed, aborting...')
|
||||||
|
raise SystemExit(1)
|
||||||
|
with ZipFile(zipf, 'r') as zf:
|
||||||
|
shutil.rmtree(tdir)
|
||||||
|
os.mkdir(tdir)
|
||||||
|
zf.extractall(path=tdir)
|
||||||
proceed = True
|
proceed = True
|
||||||
|
|
||||||
if proceed:
|
if proceed:
|
||||||
|
@ -137,8 +137,9 @@ def _config(): # {{{
|
|||||||
c.add_opt('LRF_ebook_viewer_options', default=None,
|
c.add_opt('LRF_ebook_viewer_options', default=None,
|
||||||
help=_('Options for the LRF ebook viewer'))
|
help=_('Options for the LRF ebook viewer'))
|
||||||
c.add_opt('internally_viewed_formats', default=['LRF', 'EPUB', 'LIT',
|
c.add_opt('internally_viewed_formats', default=['LRF', 'EPUB', 'LIT',
|
||||||
'MOBI', 'PRC', 'AZW', 'HTML', 'FB2', 'PDB', 'RB', 'SNB', 'HTMLZ'],
|
'MOBI', 'PRC', 'POBI', 'AZW', 'AZW3', 'HTML', 'FB2', 'PDB', 'RB',
|
||||||
help=_('Formats that are viewed using the internal viewer'))
|
'SNB', 'HTMLZ'], help=_(
|
||||||
|
'Formats that are viewed using the internal viewer'))
|
||||||
c.add_opt('column_map', default=ALL_COLUMNS,
|
c.add_opt('column_map', default=ALL_COLUMNS,
|
||||||
help=_('Columns to be displayed in the book list'))
|
help=_('Columns to be displayed in the book list'))
|
||||||
c.add_opt('autolaunch_server', default=False, help=_('Automatically launch content server on application startup'))
|
c.add_opt('autolaunch_server', default=False, help=_('Automatically launch content server on application startup'))
|
||||||
|
@ -10,7 +10,7 @@ from functools import partial
|
|||||||
|
|
||||||
from PyQt4.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog,
|
from PyQt4.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog,
|
||||||
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QIcon, QSize,
|
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QIcon, QSize,
|
||||||
QCoreApplication)
|
QCoreApplication, pyqtSignal)
|
||||||
|
|
||||||
from calibre import isbytestring, sanitize_file_name_unicode
|
from calibre import isbytestring, sanitize_file_name_unicode
|
||||||
from calibre.constants import filesystem_encoding, iswindows
|
from calibre.constants import filesystem_encoding, iswindows
|
||||||
@ -142,6 +142,7 @@ class ChooseLibraryAction(InterfaceAction):
|
|||||||
dont_add_to = frozenset(['context-menu-device'])
|
dont_add_to = frozenset(['context-menu-device'])
|
||||||
action_add_menu = True
|
action_add_menu = True
|
||||||
action_menu_clone_qaction = _('Switch/create library...')
|
action_menu_clone_qaction = _('Switch/create library...')
|
||||||
|
restore_view_state = pyqtSignal(object)
|
||||||
|
|
||||||
def genesis(self):
|
def genesis(self):
|
||||||
self.base_text = _('%d books')
|
self.base_text = _('%d books')
|
||||||
@ -206,6 +207,17 @@ class ChooseLibraryAction(InterfaceAction):
|
|||||||
self.maintenance_menu.addAction(ac)
|
self.maintenance_menu.addAction(ac)
|
||||||
|
|
||||||
self.choose_menu.addMenu(self.maintenance_menu)
|
self.choose_menu.addMenu(self.maintenance_menu)
|
||||||
|
self.view_state_map = {}
|
||||||
|
self.restore_view_state.connect(self._restore_view_state,
|
||||||
|
type=Qt.QueuedConnection)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def preserve_state_on_switch(self):
|
||||||
|
ans = getattr(self, '_preserve_state_on_switch', None)
|
||||||
|
if ans is None:
|
||||||
|
self._preserve_state_on_switch = ans = \
|
||||||
|
self.gui.library_view.preserve_state(require_selected_ids=False)
|
||||||
|
return ans
|
||||||
|
|
||||||
def pick_random(self, *args):
|
def pick_random(self, *args):
|
||||||
self.gui.iactions['Pick Random Book'].pick_random()
|
self.gui.iactions['Pick Random Book'].pick_random()
|
||||||
@ -221,6 +233,13 @@ class ChooseLibraryAction(InterfaceAction):
|
|||||||
def library_changed(self, db):
|
def library_changed(self, db):
|
||||||
self.stats.library_used(db)
|
self.stats.library_used(db)
|
||||||
self.build_menus()
|
self.build_menus()
|
||||||
|
state = self.view_state_map.get(self.stats.canonicalize_path(
|
||||||
|
db.library_path), None)
|
||||||
|
if state is not None:
|
||||||
|
self.restore_view_state.emit(state)
|
||||||
|
|
||||||
|
def _restore_view_state(self, state):
|
||||||
|
self.preserve_state_on_switch.state = state
|
||||||
|
|
||||||
def initialization_complete(self):
|
def initialization_complete(self):
|
||||||
self.library_changed(self.gui.library_view.model().db)
|
self.library_changed(self.gui.library_view.model().db)
|
||||||
@ -401,8 +420,11 @@ class ChooseLibraryAction(InterfaceAction):
|
|||||||
def switch_requested(self, location):
|
def switch_requested(self, location):
|
||||||
if not self.change_library_allowed():
|
if not self.change_library_allowed():
|
||||||
return
|
return
|
||||||
|
db = self.gui.library_view.model().db
|
||||||
|
current_lib = self.stats.canonicalize_path(db.library_path)
|
||||||
|
self.view_state_map[current_lib] = self.preserve_state_on_switch.state
|
||||||
loc = location.replace('/', os.sep)
|
loc = location.replace('/', os.sep)
|
||||||
exists = self.gui.library_view.model().db.exists_at(loc)
|
exists = db.exists_at(loc)
|
||||||
if not exists:
|
if not exists:
|
||||||
d = MovedDialog(self.stats, location, self.gui)
|
d = MovedDialog(self.stats, location, self.gui)
|
||||||
ret = d.exec_()
|
ret = d.exec_()
|
||||||
|
@ -5,70 +5,307 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
import os
|
import os, weakref, shutil
|
||||||
|
|
||||||
from calibre.gui2 import error_dialog
|
from PyQt4.Qt import (QDialog, QVBoxLayout, QHBoxLayout, QRadioButton, QFrame,
|
||||||
|
QPushButton, QLabel, QGroupBox, QGridLayout, QIcon, QSize, QTimer)
|
||||||
|
|
||||||
|
from calibre import as_unicode
|
||||||
|
from calibre.constants import isosx
|
||||||
|
from calibre.gui2 import error_dialog, question_dialog, open_local_file
|
||||||
from calibre.gui2.actions import InterfaceAction
|
from calibre.gui2.actions import InterfaceAction
|
||||||
from calibre.gui2.dialogs.tweak_epub import TweakEpub
|
from calibre.ptempfile import (PersistentTemporaryDirectory,
|
||||||
from calibre.utils.config import tweaks
|
PersistentTemporaryFile)
|
||||||
|
from calibre.utils.config import prefs
|
||||||
|
|
||||||
|
class TweakBook(QDialog):
|
||||||
|
|
||||||
|
def __init__(self, parent, book_id, fmts, db):
|
||||||
|
QDialog.__init__(self, parent)
|
||||||
|
self.book_id, self.fmts, self.db_ref = book_id, fmts, weakref.ref(db)
|
||||||
|
self._exploded = None
|
||||||
|
self._cleanup_dirs = []
|
||||||
|
self._cleanup_files = []
|
||||||
|
|
||||||
|
self.setup_ui()
|
||||||
|
self.setWindowTitle(_('Tweak Book') + ' - ' + db.title(book_id,
|
||||||
|
index_is_id=True))
|
||||||
|
|
||||||
|
button = self.fmt_choice_buttons[0]
|
||||||
|
of = prefs['output_format'].upper()
|
||||||
|
for x in self.fmt_choice_buttons:
|
||||||
|
if unicode(x.text()) == of:
|
||||||
|
button = x
|
||||||
|
break
|
||||||
|
button.setChecked(True)
|
||||||
|
|
||||||
|
self.init_state()
|
||||||
|
for button in self.fmt_choice_buttons:
|
||||||
|
button.toggled.connect(self.init_state)
|
||||||
|
|
||||||
|
def init_state(self, *args):
|
||||||
|
self._exploded = None
|
||||||
|
self.preview_button.setEnabled(False)
|
||||||
|
self.rebuild_button.setEnabled(False)
|
||||||
|
self.explode_button.setEnabled(True)
|
||||||
|
|
||||||
|
def setup_ui(self): # {{{
|
||||||
|
self._g = g = QHBoxLayout(self)
|
||||||
|
self.setLayout(g)
|
||||||
|
self._l = l = QVBoxLayout()
|
||||||
|
g.addLayout(l)
|
||||||
|
|
||||||
|
fmts = sorted(x.upper() for x in self.fmts)
|
||||||
|
self.fmt_choice_box = QGroupBox(_('Choose the format to tweak:'), self)
|
||||||
|
self._fl = fl = QHBoxLayout()
|
||||||
|
self.fmt_choice_box.setLayout(self._fl)
|
||||||
|
self.fmt_choice_buttons = [QRadioButton(x, self) for x in fmts]
|
||||||
|
for x in self.fmt_choice_buttons:
|
||||||
|
fl.addWidget(x, stretch=10 if x is self.fmt_choice_buttons[-1] else
|
||||||
|
0)
|
||||||
|
l.addWidget(self.fmt_choice_box)
|
||||||
|
self.fmt_choice_box.setVisible(len(fmts) > 1)
|
||||||
|
|
||||||
|
self.help_label = QLabel(_('''\
|
||||||
|
<h2>About Tweak Book</h2>
|
||||||
|
<p>Tweak Book allows you to fine tune the appearance of an ebook by
|
||||||
|
making small changes to its internals. In order to use Tweak Book,
|
||||||
|
you need to know a little bit about HTML and CSS, technologies that
|
||||||
|
are used in ebooks. Follow the steps:</p>
|
||||||
|
<br>
|
||||||
|
<ol>
|
||||||
|
<li>Click "Explode Book": This will "explode" the book into its
|
||||||
|
individual internal components.<br></li>
|
||||||
|
<li>Right click on any individual file and select "Open with..." to
|
||||||
|
edit it in your favorite text editor.<br></li>
|
||||||
|
<li>When you are done Tweaking: <b>close the file browser window
|
||||||
|
and the editor windows you used to make your tweaks</b>. Then click
|
||||||
|
the "Rebuild Book" button, to update the book in your calibre
|
||||||
|
library.</li>
|
||||||
|
</ol>'''))
|
||||||
|
self.help_label.setWordWrap(True)
|
||||||
|
self._fr = QFrame()
|
||||||
|
self._fr.setFrameShape(QFrame.VLine)
|
||||||
|
g.addWidget(self._fr)
|
||||||
|
g.addWidget(self.help_label)
|
||||||
|
|
||||||
|
self._b = b = QGridLayout()
|
||||||
|
left, top, right, bottom = b.getContentsMargins()
|
||||||
|
top += top
|
||||||
|
b.setContentsMargins(left, top, right, bottom)
|
||||||
|
l.addLayout(b, stretch=10)
|
||||||
|
|
||||||
|
self.explode_button = QPushButton(QIcon(I('wizard.png')), _('&Explode Book'))
|
||||||
|
self.preview_button = QPushButton(QIcon(I('view.png')), _('&Preview Book'))
|
||||||
|
self.cancel_button = QPushButton(QIcon(I('window-close.png')), _('&Cancel'))
|
||||||
|
self.rebuild_button = QPushButton(QIcon(I('exec.png')), _('&Rebuild Book'))
|
||||||
|
|
||||||
|
self.explode_button.setToolTip(
|
||||||
|
_('Explode the book to edit its components'))
|
||||||
|
self.preview_button.setToolTip(
|
||||||
|
_('Preview the result of your tweaks'))
|
||||||
|
self.cancel_button.setToolTip(
|
||||||
|
_('Abort without saving any changes'))
|
||||||
|
self.rebuild_button.setToolTip(
|
||||||
|
_('Save your changes and update the book in the calibre library'))
|
||||||
|
|
||||||
|
a = b.addWidget
|
||||||
|
a(self.explode_button, 0, 0, 1, 1)
|
||||||
|
a(self.preview_button, 0, 1, 1, 1)
|
||||||
|
a(self.cancel_button, 1, 0, 1, 1)
|
||||||
|
a(self.rebuild_button, 1, 1, 1, 1)
|
||||||
|
|
||||||
|
for x in ('explode', 'preview', 'cancel', 'rebuild'):
|
||||||
|
getattr(self, x+'_button').clicked.connect(getattr(self, x))
|
||||||
|
|
||||||
|
self.msg = QLabel('dummy', self)
|
||||||
|
self.msg.setVisible(False)
|
||||||
|
self.msg.setStyleSheet('''
|
||||||
|
QLabel {
|
||||||
|
text-align: center;
|
||||||
|
background-color: white;
|
||||||
|
color: black;
|
||||||
|
border-width: 1px;
|
||||||
|
border-style: solid;
|
||||||
|
border-radius: 20px;
|
||||||
|
font-size: x-large;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
''')
|
||||||
|
|
||||||
|
self.resize(self.sizeHint() + QSize(40, 10))
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def show_msg(self, msg):
|
||||||
|
self.msg.setText(msg)
|
||||||
|
self.msg.resize(self.size() - QSize(50, 25))
|
||||||
|
self.msg.move((self.width() - self.msg.width())//2,
|
||||||
|
(self.height() - self.msg.height())//2)
|
||||||
|
self.msg.setVisible(True)
|
||||||
|
|
||||||
|
def hide_msg(self):
|
||||||
|
self.msg.setVisible(False)
|
||||||
|
|
||||||
|
def explode(self):
|
||||||
|
self.show_msg(_('Exploding, please wait...'))
|
||||||
|
QTimer.singleShot(5, self.do_explode)
|
||||||
|
|
||||||
|
def ask_question(self, msg):
|
||||||
|
return question_dialog(self, _('Are you sure?'), msg)
|
||||||
|
|
||||||
|
def do_explode(self):
|
||||||
|
from calibre.ebooks.tweak import get_tools, Error, WorkerError
|
||||||
|
tdir = PersistentTemporaryDirectory('_tweak_explode')
|
||||||
|
self._cleanup_dirs.append(tdir)
|
||||||
|
det_msg = None
|
||||||
|
try:
|
||||||
|
src = self.db.format(self.book_id, self.current_format,
|
||||||
|
index_is_id=True, as_path=True)
|
||||||
|
self._cleanup_files.append(src)
|
||||||
|
exploder = get_tools(self.current_format)[0]
|
||||||
|
opf = exploder(src, tdir, question=self.ask_question)
|
||||||
|
except WorkerError as e:
|
||||||
|
det_msg = e.orig_tb
|
||||||
|
except Error as e:
|
||||||
|
return error_dialog(self, _('Failed to unpack'),
|
||||||
|
(_('Could not explode the %s file.')%self.current_format) + ' '
|
||||||
|
+ as_unicode(e), show=True)
|
||||||
|
except:
|
||||||
|
import traceback
|
||||||
|
det_msg = traceback.format_exc()
|
||||||
|
finally:
|
||||||
|
self.hide_msg()
|
||||||
|
|
||||||
|
if det_msg is not None:
|
||||||
|
return error_dialog(self, _('Failed to unpack'),
|
||||||
|
_('Could not explode the %s file. Click "Show Details" for '
|
||||||
|
'more information.')%self.current_format, det_msg=det_msg,
|
||||||
|
show=True)
|
||||||
|
|
||||||
|
if opf is None:
|
||||||
|
# The question was answered with No
|
||||||
|
return
|
||||||
|
|
||||||
|
self._exploded = tdir
|
||||||
|
self.explode_button.setEnabled(False)
|
||||||
|
self.preview_button.setEnabled(True)
|
||||||
|
self.rebuild_button.setEnabled(True)
|
||||||
|
open_local_file(tdir)
|
||||||
|
|
||||||
|
def rebuild_it(self):
|
||||||
|
from calibre.ebooks.tweak import get_tools, WorkerError
|
||||||
|
src_dir = self._exploded
|
||||||
|
det_msg = None
|
||||||
|
of = PersistentTemporaryFile('_tweak_rebuild.'+self.current_format.lower())
|
||||||
|
of.close()
|
||||||
|
of = of.name
|
||||||
|
self._cleanup_files.append(of)
|
||||||
|
try:
|
||||||
|
rebuilder = get_tools(self.current_format)[1]
|
||||||
|
rebuilder(src_dir, of)
|
||||||
|
except WorkerError as e:
|
||||||
|
det_msg = e.orig_tb
|
||||||
|
except:
|
||||||
|
import traceback
|
||||||
|
det_msg = traceback.format_exc()
|
||||||
|
finally:
|
||||||
|
self.hide_msg()
|
||||||
|
|
||||||
|
if det_msg is not None:
|
||||||
|
error_dialog(self, _('Failed to rebuild file'),
|
||||||
|
_('Failed to rebuild %s. For more information, click '
|
||||||
|
'"Show details".')%self.current_format,
|
||||||
|
det_msg=det_msg, show=True)
|
||||||
|
return None
|
||||||
|
|
||||||
|
return of
|
||||||
|
|
||||||
|
def preview(self):
|
||||||
|
self.show_msg(_('Rebuilding, please wait...'))
|
||||||
|
QTimer.singleShot(5, self.do_preview)
|
||||||
|
|
||||||
|
def do_preview(self):
|
||||||
|
rebuilt = self.rebuild_it()
|
||||||
|
if rebuilt is not None:
|
||||||
|
self.parent().iactions['View']._view_file(rebuilt)
|
||||||
|
|
||||||
|
def rebuild(self):
|
||||||
|
self.show_msg(_('Rebuilding, please wait...'))
|
||||||
|
QTimer.singleShot(5, self.do_rebuild)
|
||||||
|
|
||||||
|
def do_rebuild(self):
|
||||||
|
rebuilt = self.rebuild_it()
|
||||||
|
if rebuilt is not None:
|
||||||
|
fmt = os.path.splitext(rebuilt)[1][1:].upper()
|
||||||
|
with open(rebuilt, 'rb') as f:
|
||||||
|
self.db.add_format(self.book_id, fmt, f, index_is_id=True)
|
||||||
|
self.accept()
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
self.reject()
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
if isosx and self._exploded:
|
||||||
|
try:
|
||||||
|
import appscript
|
||||||
|
self.finder = appscript.app('Finder')
|
||||||
|
self.finder.Finder_windows[os.path.basename(self._exploded)].close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for f in self._cleanup_files:
|
||||||
|
try:
|
||||||
|
os.remove(f)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for d in self._cleanup_dirs:
|
||||||
|
try:
|
||||||
|
shutil.rmtree(d)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def db(self):
|
||||||
|
return self.db_ref()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def current_format(self):
|
||||||
|
for b in self.fmt_choice_buttons:
|
||||||
|
if b.isChecked():
|
||||||
|
return unicode(b.text())
|
||||||
|
|
||||||
class TweakEpubAction(InterfaceAction):
|
class TweakEpubAction(InterfaceAction):
|
||||||
|
|
||||||
name = 'Tweak ePub'
|
name = 'Tweak ePub'
|
||||||
action_spec = (_('Tweak Book'), 'trim.png',
|
action_spec = (_('Tweak Book'), 'trim.png',
|
||||||
_('Make small changes to ePub or HTMLZ format books'),
|
_('Make small changes to ePub, HTMLZ or AZW3 format books'),
|
||||||
_('T'))
|
_('T'))
|
||||||
dont_add_to = frozenset(['context-menu-device'])
|
dont_add_to = frozenset(['context-menu-device'])
|
||||||
action_type = 'current'
|
action_type = 'current'
|
||||||
|
|
||||||
def genesis(self):
|
def genesis(self):
|
||||||
self.qaction.triggered.connect(self.edit_epub_in_situ)
|
self.qaction.triggered.connect(self.tweak_book)
|
||||||
|
|
||||||
def edit_epub_in_situ(self, *args):
|
def tweak_book(self):
|
||||||
row = self.gui.library_view.currentIndex()
|
row = self.gui.library_view.currentIndex()
|
||||||
if not row.isValid():
|
if not row.isValid():
|
||||||
return error_dialog(self.gui, _('Cannot tweak Book'),
|
return error_dialog(self.gui, _('Cannot tweak Book'),
|
||||||
_('No book selected'), show=True)
|
_('No book selected'), show=True)
|
||||||
|
|
||||||
book_id = self.gui.library_view.model().id(row)
|
book_id = self.gui.library_view.model().id(row)
|
||||||
|
db = self.gui.library_view.model().db
|
||||||
# Confirm 'EPUB' in formats
|
fmts = db.formats(book_id, index_is_id=True) or ''
|
||||||
try:
|
fmts = [x.lower().strip() for x in fmts.split(',')]
|
||||||
path_to_epub = self.gui.library_view.model().db.format(
|
tweakable_fmts = set(fmts).intersection({'epub', 'htmlz', 'azw3',
|
||||||
book_id, 'EPUB', index_is_id=True, as_path=True)
|
'mobi', 'azw'})
|
||||||
except:
|
if not tweakable_fmts:
|
||||||
path_to_epub = None
|
return error_dialog(self.gui, _('Cannot Tweak Book'),
|
||||||
|
_('The book must be in ePub, HTMLZ or AZW3 formats to tweak.'
|
||||||
# Confirm 'HTMLZ' in formats
|
'\n\nFirst convert the book to one of these formats.'),
|
||||||
try:
|
|
||||||
path_to_htmlz = self.gui.library_view.model().db.format(
|
|
||||||
book_id, 'HTMLZ', index_is_id=True, as_path=True)
|
|
||||||
except:
|
|
||||||
path_to_htmlz = None
|
|
||||||
|
|
||||||
if not path_to_epub and not path_to_htmlz:
|
|
||||||
return error_dialog(self.gui, _('Cannot tweak Book'),
|
|
||||||
_('The book must be in ePub or HTMLZ format to tweak.'
|
|
||||||
'\n\nFirst convert the book to ePub or HTMLZ.'),
|
|
||||||
show=True)
|
show=True)
|
||||||
|
dlg = TweakBook(self.gui, book_id, tweakable_fmts, db)
|
||||||
# Launch modal dialog waiting for user to tweak or cancel
|
dlg.exec_()
|
||||||
if tweaks['tweak_book_prefer'] == 'htmlz':
|
|
||||||
path_to_book = path_to_htmlz or path_to_epub
|
|
||||||
else:
|
|
||||||
path_to_book = path_to_epub or path_to_htmlz
|
|
||||||
|
|
||||||
dlg = TweakEpub(self.gui, path_to_book)
|
|
||||||
if dlg.exec_() == dlg.Accepted:
|
|
||||||
self.update_db(book_id, dlg._output)
|
|
||||||
dlg.cleanup()
|
dlg.cleanup()
|
||||||
os.remove(path_to_book)
|
|
||||||
|
|
||||||
def update_db(self, book_id, rebuilt):
|
|
||||||
'''
|
|
||||||
Update the calibre db with the tweaked epub
|
|
||||||
'''
|
|
||||||
fmt = os.path.splitext(rebuilt)[1][1:].upper()
|
|
||||||
self.gui.library_view.model().db.add_format(book_id, fmt,
|
|
||||||
open(rebuilt, 'rb'), index_is_id=True)
|
|
||||||
|
|
||||||
|
@ -161,8 +161,12 @@ class EditorWidget(QWebView): # {{{
|
|||||||
self.page().setContentEditable(True)
|
self.page().setContentEditable(True)
|
||||||
|
|
||||||
def clear_text(self, *args):
|
def clear_text(self, *args):
|
||||||
|
us = self.page().undoStack()
|
||||||
|
us.beginMacro('clear all text')
|
||||||
self.action_select_all.trigger()
|
self.action_select_all.trigger()
|
||||||
self.action_cut.trigger()
|
self.action_remove_format.trigger()
|
||||||
|
self.exec_command('delete')
|
||||||
|
us.endMacro()
|
||||||
|
|
||||||
def link_clicked(self, url):
|
def link_clicked(self, url):
|
||||||
open_url(url)
|
open_url(url)
|
||||||
@ -627,4 +631,6 @@ if __name__ == '__main__':
|
|||||||
w = Editor()
|
w = Editor()
|
||||||
w.resize(800, 600)
|
w.resize(800, 600)
|
||||||
w.show()
|
w.show()
|
||||||
|
w.html = '<b>testing</b>'
|
||||||
|
app.exec_()
|
||||||
#print w.html
|
#print w.html
|
||||||
|
@ -22,9 +22,9 @@ class PluginWidget(Widget, Ui_Form):
|
|||||||
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
|
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
|
||||||
Widget.__init__(self, parent,
|
Widget.__init__(self, parent,
|
||||||
['prefer_author_sort', 'toc_title',
|
['prefer_author_sort', 'toc_title',
|
||||||
'mobi_ignore_margins', 'mobi_toc_at_start',
|
'mobi_toc_at_start',
|
||||||
'dont_compress', 'no_inline_toc', 'share_not_sync',
|
'dont_compress', 'no_inline_toc', 'share_not_sync',
|
||||||
'personal_doc']#, 'mobi_navpoints_only_deepest']
|
]
|
||||||
)
|
)
|
||||||
self.db, self.book_id = db, book_id
|
self.db, self.book_id = db, book_id
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
<rect>
|
<rect>
|
||||||
<x>0</x>
|
<x>0</x>
|
||||||
<y>0</y>
|
<y>0</y>
|
||||||
<width>588</width>
|
<width>724</width>
|
||||||
<height>342</height>
|
<height>342</height>
|
||||||
</rect>
|
</rect>
|
||||||
</property>
|
</property>
|
||||||
@ -14,7 +14,20 @@
|
|||||||
<string>Form</string>
|
<string>Form</string>
|
||||||
</property>
|
</property>
|
||||||
<layout class="QGridLayout" name="gridLayout">
|
<layout class="QGridLayout" name="gridLayout">
|
||||||
<item row="4" column="0" colspan="2">
|
<item row="6" column="0">
|
||||||
|
<spacer name="verticalSpacer_2">
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Vertical</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>20</width>
|
||||||
|
<height>40</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item row="3" column="0" colspan="2">
|
||||||
<widget class="QCheckBox" name="opt_prefer_author_sort">
|
<widget class="QCheckBox" name="opt_prefer_author_sort">
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Use author &sort for author</string>
|
<string>Use author &sort for author</string>
|
||||||
@ -31,13 +44,10 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="1" column="1">
|
|
||||||
<widget class="QLineEdit" name="opt_toc_title"/>
|
|
||||||
</item>
|
|
||||||
<item row="5" column="0">
|
<item row="5" column="0">
|
||||||
<widget class="QCheckBox" name="opt_dont_compress">
|
<widget class="QCheckBox" name="opt_share_not_sync">
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Disable compression of the file contents</string>
|
<string>Enable sharing of book content via Facebook, etc. WARNING: Disables last read syncing</string>
|
||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
@ -55,69 +65,16 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="3" column="0">
|
<item row="1" column="1">
|
||||||
<widget class="QCheckBox" name="opt_mobi_ignore_margins">
|
<widget class="QLineEdit" name="opt_toc_title"/>
|
||||||
|
</item>
|
||||||
|
<item row="4" column="0">
|
||||||
|
<widget class="QCheckBox" name="opt_dont_compress">
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Ignore &margins</string>
|
<string>Disable compression of the file contents</string>
|
||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item row="7" column="0" colspan="2">
|
|
||||||
<widget class="QGroupBox" name="groupBox">
|
|
||||||
<property name="title">
|
|
||||||
<string>Kindle options</string>
|
|
||||||
</property>
|
|
||||||
<layout class="QVBoxLayout" name="verticalLayout">
|
|
||||||
<item>
|
|
||||||
<layout class="QHBoxLayout" name="horizontalLayout">
|
|
||||||
<item>
|
|
||||||
<widget class="QLabel" name="label_3">
|
|
||||||
<property name="text">
|
|
||||||
<string>Personal Doc tag:</string>
|
|
||||||
</property>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
<item>
|
|
||||||
<widget class="QLineEdit" name="opt_personal_doc"/>
|
|
||||||
</item>
|
|
||||||
</layout>
|
|
||||||
</item>
|
|
||||||
<item>
|
|
||||||
<widget class="QCheckBox" name="opt_share_not_sync">
|
|
||||||
<property name="text">
|
|
||||||
<string>Enable sharing of book content via Facebook, etc. WARNING: Disables last read syncing</string>
|
|
||||||
</property>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
<item>
|
|
||||||
<spacer name="verticalSpacer">
|
|
||||||
<property name="orientation">
|
|
||||||
<enum>Qt::Vertical</enum>
|
|
||||||
</property>
|
|
||||||
<property name="sizeHint" stdset="0">
|
|
||||||
<size>
|
|
||||||
<width>20</width>
|
|
||||||
<height>40</height>
|
|
||||||
</size>
|
|
||||||
</property>
|
|
||||||
</spacer>
|
|
||||||
</item>
|
|
||||||
</layout>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
<item row="8" column="0">
|
|
||||||
<spacer name="verticalSpacer_2">
|
|
||||||
<property name="orientation">
|
|
||||||
<enum>Qt::Vertical</enum>
|
|
||||||
</property>
|
|
||||||
<property name="sizeHint" stdset="0">
|
|
||||||
<size>
|
|
||||||
<width>20</width>
|
|
||||||
<height>40</height>
|
|
||||||
</size>
|
|
||||||
</property>
|
|
||||||
</spacer>
|
|
||||||
</item>
|
|
||||||
</layout>
|
</layout>
|
||||||
</widget>
|
</widget>
|
||||||
<resources/>
|
<resources/>
|
||||||
|
@ -126,7 +126,8 @@ class BulkConfig(Config):
|
|||||||
def setup_output_formats(self, db, preferred_output_format):
|
def setup_output_formats(self, db, preferred_output_format):
|
||||||
if preferred_output_format:
|
if preferred_output_format:
|
||||||
preferred_output_format = preferred_output_format.lower()
|
preferred_output_format = preferred_output_format.lower()
|
||||||
output_formats = sorted(available_output_formats())
|
output_formats = sorted(available_output_formats(),
|
||||||
|
key=lambda x:{'EPUB':'!A', 'MOBI':'!B'}.get(x.upper(), x))
|
||||||
output_formats.remove('oeb')
|
output_formats.remove('oeb')
|
||||||
preferred_output_format = preferred_output_format if \
|
preferred_output_format = preferred_output_format if \
|
||||||
preferred_output_format and preferred_output_format \
|
preferred_output_format and preferred_output_format \
|
||||||
|
@ -242,7 +242,8 @@ class Config(ResizableDialog, Ui_Dialog):
|
|||||||
preferred_output_format):
|
preferred_output_format):
|
||||||
if preferred_output_format:
|
if preferred_output_format:
|
||||||
preferred_output_format = preferred_output_format.lower()
|
preferred_output_format = preferred_output_format.lower()
|
||||||
output_formats = sorted(available_output_formats())
|
output_formats = sorted(available_output_formats(),
|
||||||
|
key=lambda x:{'EPUB':'!A', 'MOBI':'!B'}.get(x.upper(), x))
|
||||||
output_formats.remove('oeb')
|
output_formats.remove('oeb')
|
||||||
input_format, input_formats = get_input_format_for_book(db, book_id,
|
input_format, input_formats = get_input_format_for_book(db, book_id,
|
||||||
preferred_input_format)
|
preferred_input_format)
|
||||||
|
@ -417,6 +417,8 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
|||||||
self.writable_fields.sort()
|
self.writable_fields.sort()
|
||||||
self.search_field.setMaxVisibleItems(25)
|
self.search_field.setMaxVisibleItems(25)
|
||||||
self.destination_field.setMaxVisibleItems(25)
|
self.destination_field.setMaxVisibleItems(25)
|
||||||
|
self.testgrid.setColumnStretch(1, 1)
|
||||||
|
self.testgrid.setColumnStretch(2, 1)
|
||||||
offset = 10
|
offset = 10
|
||||||
self.s_r_number_of_books = min(10, len(self.ids))
|
self.s_r_number_of_books = min(10, len(self.ids))
|
||||||
for i in range(1,self.s_r_number_of_books+1):
|
for i in range(1,self.s_r_number_of_books+1):
|
||||||
|
@ -1,130 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import os, shutil
|
|
||||||
from itertools import repeat, izip
|
|
||||||
from calibre.utils.zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
|
|
||||||
|
|
||||||
from PyQt4.Qt import QDialog
|
|
||||||
|
|
||||||
from calibre.constants import isosx
|
|
||||||
from calibre.gui2 import open_local_file, error_dialog
|
|
||||||
from calibre.gui2.dialogs.tweak_epub_ui import Ui_Dialog
|
|
||||||
from calibre.libunzip import extract as zipextract
|
|
||||||
from calibre.ptempfile import (PersistentTemporaryDirectory,
|
|
||||||
PersistentTemporaryFile)
|
|
||||||
|
|
||||||
class TweakEpub(QDialog, Ui_Dialog):
|
|
||||||
'''
|
|
||||||
Display controls for tweaking ePubs
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
def __init__(self, parent, epub):
|
|
||||||
QDialog.__init__(self, parent)
|
|
||||||
|
|
||||||
self._epub = epub
|
|
||||||
self._exploded = None
|
|
||||||
self._output = None
|
|
||||||
self.ishtmlz = epub.lower().endswith('.htmlz')
|
|
||||||
self.rebuilt_name = 'rebuilt.' + ('htmlz' if self.ishtmlz else 'epub')
|
|
||||||
|
|
||||||
# Run the dialog setup generated from tweak_epub.ui
|
|
||||||
self.setupUi(self)
|
|
||||||
for x, props in [(self, ['windowTitle']), (self.label, ['text'])]+\
|
|
||||||
list(izip([self.cancel_button, self.explode_button,
|
|
||||||
self.rebuild_button, self.preview_button],
|
|
||||||
repeat(['text', 'statusTip', 'toolTip']))):
|
|
||||||
for prop in props:
|
|
||||||
val = unicode(getattr(x, prop)())
|
|
||||||
val = val.format('HTMLZ' if self.ishtmlz else 'ePub')
|
|
||||||
prop = 'set' + prop[0].upper() + prop[1:]
|
|
||||||
getattr(x, prop)(val)
|
|
||||||
|
|
||||||
self.cancel_button.clicked.connect(self.reject)
|
|
||||||
self.explode_button.clicked.connect(self.explode)
|
|
||||||
self.rebuild_button.clicked.connect(self.rebuild)
|
|
||||||
self.preview_button.clicked.connect(self.preview)
|
|
||||||
|
|
||||||
# Position update dialog overlaying top left of app window
|
|
||||||
parent_loc = parent.pos()
|
|
||||||
self.move(parent_loc.x(),parent_loc.y())
|
|
||||||
|
|
||||||
self.gui = parent
|
|
||||||
self._preview_files = []
|
|
||||||
|
|
||||||
def cleanup(self):
|
|
||||||
if isosx:
|
|
||||||
try:
|
|
||||||
import appscript
|
|
||||||
self.finder = appscript.app('Finder')
|
|
||||||
self.finder.Finder_windows[os.path.basename(self._exploded)].close()
|
|
||||||
except:
|
|
||||||
# appscript fails to load on 10.4
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Delete directory containing exploded ePub
|
|
||||||
if self._exploded is not None:
|
|
||||||
shutil.rmtree(self._exploded, ignore_errors=True)
|
|
||||||
for x in self._preview_files:
|
|
||||||
try:
|
|
||||||
os.remove(x)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def display_exploded(self):
|
|
||||||
'''
|
|
||||||
Generic subprocess launch of native file browser
|
|
||||||
User can use right-click to 'Open with ...'
|
|
||||||
'''
|
|
||||||
open_local_file(self._exploded)
|
|
||||||
|
|
||||||
def explode(self, *args):
|
|
||||||
if self._exploded is None:
|
|
||||||
self._exploded = PersistentTemporaryDirectory("_exploded", prefix='')
|
|
||||||
zipextract(self._epub, self._exploded)
|
|
||||||
self.display_exploded()
|
|
||||||
self.rebuild_button.setEnabled(True)
|
|
||||||
self.explode_button.setEnabled(False)
|
|
||||||
|
|
||||||
def do_rebuild(self, src):
|
|
||||||
with ZipFile(src, 'w', compression=ZIP_DEFLATED) as zf:
|
|
||||||
# Write mimetype
|
|
||||||
mt = os.path.join(self._exploded, 'mimetype')
|
|
||||||
if os.path.exists(mt):
|
|
||||||
zf.write(mt, 'mimetype', compress_type=ZIP_STORED)
|
|
||||||
# Write everything else
|
|
||||||
exclude_files = ['.DS_Store','mimetype','iTunesMetadata.plist',self.rebuilt_name]
|
|
||||||
for root, dirs, files in os.walk(self._exploded):
|
|
||||||
for fn in files:
|
|
||||||
if fn in exclude_files:
|
|
||||||
continue
|
|
||||||
absfn = os.path.join(root, fn)
|
|
||||||
zfn = os.path.relpath(absfn,
|
|
||||||
self._exploded).replace(os.sep, '/')
|
|
||||||
zf.write(absfn, zfn)
|
|
||||||
|
|
||||||
def preview(self):
|
|
||||||
if not self._exploded:
|
|
||||||
msg = _('You must first explode the %s before previewing.')
|
|
||||||
msg = msg%('HTMLZ' if self.ishtmlz else 'ePub')
|
|
||||||
return error_dialog(self, _('Cannot preview'), msg, show=True)
|
|
||||||
|
|
||||||
tf = PersistentTemporaryFile('.htmlz' if self.ishtmlz else '.epub')
|
|
||||||
tf.close()
|
|
||||||
self._preview_files.append(tf.name)
|
|
||||||
|
|
||||||
self.do_rebuild(tf.name)
|
|
||||||
|
|
||||||
self.gui.iactions['View']._view_file(tf.name)
|
|
||||||
|
|
||||||
def rebuild(self, *args):
|
|
||||||
self._output = os.path.join(self._exploded, self.rebuilt_name)
|
|
||||||
self.do_rebuild(self._output)
|
|
||||||
return QDialog.accept(self)
|
|
||||||
|
|
@ -1,107 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<ui version="4.0">
|
|
||||||
<class>Dialog</class>
|
|
||||||
<widget class="QDialog" name="Dialog">
|
|
||||||
<property name="windowModality">
|
|
||||||
<enum>Qt::NonModal</enum>
|
|
||||||
</property>
|
|
||||||
<property name="geometry">
|
|
||||||
<rect>
|
|
||||||
<x>0</x>
|
|
||||||
<y>0</y>
|
|
||||||
<width>382</width>
|
|
||||||
<height>265</height>
|
|
||||||
</rect>
|
|
||||||
</property>
|
|
||||||
<property name="windowTitle">
|
|
||||||
<string>Tweak {0}</string>
|
|
||||||
</property>
|
|
||||||
<property name="sizeGripEnabled">
|
|
||||||
<bool>false</bool>
|
|
||||||
</property>
|
|
||||||
<property name="modal">
|
|
||||||
<bool>false</bool>
|
|
||||||
</property>
|
|
||||||
<layout class="QGridLayout" name="gridLayout">
|
|
||||||
<item row="0" column="0" colspan="2">
|
|
||||||
<widget class="QLabel" name="label">
|
|
||||||
<property name="text">
|
|
||||||
<string><p>Explode the {0} to display contents in a file browser window. To tweak individual files, right-click, then 'Open with...' your editor of choice. When tweaks are complete, close the file browser window <b>and the editor windows you used to edit files in the ePub</b>.</p><p>Rebuild the ePub, updating your calibre library.</p></string>
|
|
||||||
</property>
|
|
||||||
<property name="wordWrap">
|
|
||||||
<bool>true</bool>
|
|
||||||
</property>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
<item row="1" column="0">
|
|
||||||
<widget class="QPushButton" name="explode_button">
|
|
||||||
<property name="toolTip">
|
|
||||||
<string>Display contents of exploded {0}</string>
|
|
||||||
</property>
|
|
||||||
<property name="statusTip">
|
|
||||||
<string>Display contents of exploded {0}</string>
|
|
||||||
</property>
|
|
||||||
<property name="text">
|
|
||||||
<string>&Explode {0}</string>
|
|
||||||
</property>
|
|
||||||
<property name="icon">
|
|
||||||
<iconset resource="../../../../resources/images.qrc">
|
|
||||||
<normaloff>:/images/wizard.png</normaloff>:/images/wizard.png</iconset>
|
|
||||||
</property>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
<item row="3" column="0">
|
|
||||||
<widget class="QPushButton" name="cancel_button">
|
|
||||||
<property name="toolTip">
|
|
||||||
<string>Discard changes</string>
|
|
||||||
</property>
|
|
||||||
<property name="statusTip">
|
|
||||||
<string>Discard changes</string>
|
|
||||||
</property>
|
|
||||||
<property name="text">
|
|
||||||
<string>&Cancel</string>
|
|
||||||
</property>
|
|
||||||
<property name="icon">
|
|
||||||
<iconset resource="../../../../resources/images.qrc">
|
|
||||||
<normaloff>:/images/window-close.png</normaloff>:/images/window-close.png</iconset>
|
|
||||||
</property>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
<item row="3" column="1">
|
|
||||||
<widget class="QPushButton" name="rebuild_button">
|
|
||||||
<property name="enabled">
|
|
||||||
<bool>false</bool>
|
|
||||||
</property>
|
|
||||||
<property name="toolTip">
|
|
||||||
<string>Rebuild {0} from exploded contents</string>
|
|
||||||
</property>
|
|
||||||
<property name="statusTip">
|
|
||||||
<string>Rebuild {0} from exploded contents</string>
|
|
||||||
</property>
|
|
||||||
<property name="text">
|
|
||||||
<string>&Rebuild {0}</string>
|
|
||||||
</property>
|
|
||||||
<property name="icon">
|
|
||||||
<iconset resource="../../../../resources/images.qrc">
|
|
||||||
<normaloff>:/images/exec.png</normaloff>:/images/exec.png</iconset>
|
|
||||||
</property>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
<item row="1" column="1">
|
|
||||||
<widget class="QPushButton" name="preview_button">
|
|
||||||
<property name="text">
|
|
||||||
<string>&Preview {0}</string>
|
|
||||||
</property>
|
|
||||||
<property name="icon">
|
|
||||||
<iconset resource="../../../../resources/images.qrc">
|
|
||||||
<normaloff>:/images/view.png</normaloff>:/images/view.png</iconset>
|
|
||||||
</property>
|
|
||||||
</widget>
|
|
||||||
</item>
|
|
||||||
</layout>
|
|
||||||
</widget>
|
|
||||||
<resources>
|
|
||||||
<include location="../../../../resources/images.qrc"/>
|
|
||||||
</resources>
|
|
||||||
<connections/>
|
|
||||||
</ui>
|
|
@ -32,8 +32,10 @@ class PreserveViewState(object): # {{{
|
|||||||
and dont affect the scroll position.
|
and dont affect the scroll position.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, view, preserve_hpos=True, preserve_vpos=True):
|
def __init__(self, view, preserve_hpos=True, preserve_vpos=True,
|
||||||
|
require_selected_ids=True):
|
||||||
self.view = view
|
self.view = view
|
||||||
|
self.require_selected_ids = require_selected_ids
|
||||||
self.selected_ids = set()
|
self.selected_ids = set()
|
||||||
self.current_id = None
|
self.current_id = None
|
||||||
self.preserve_hpos = preserve_hpos
|
self.preserve_hpos = preserve_hpos
|
||||||
@ -51,15 +53,28 @@ class PreserveViewState(object): # {{{
|
|||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
def __exit__(self, *args):
|
def __exit__(self, *args):
|
||||||
if self.selected_ids:
|
if self.selected_ids or not self.require_selected_ids:
|
||||||
if self.current_id is not None:
|
if self.current_id is not None:
|
||||||
self.view.current_id = self.current_id
|
self.view.current_id = self.current_id
|
||||||
self.view.select_rows(self.selected_ids, using_ids=True,
|
if self.selected_ids:
|
||||||
scroll=False, change_current=self.current_id is None)
|
self.view.select_rows(self.selected_ids, using_ids=True,
|
||||||
|
scroll=False, change_current=self.current_id is None)
|
||||||
if self.preserve_vpos:
|
if self.preserve_vpos:
|
||||||
self.view.verticalScrollBar().setValue(self.vscroll)
|
self.view.verticalScrollBar().setValue(self.vscroll)
|
||||||
if self.preserve_hpos:
|
if self.preserve_hpos:
|
||||||
self.view.horizontalScrollBar().setValue(self.hscroll)
|
self.view.horizontalScrollBar().setValue(self.hscroll)
|
||||||
|
|
||||||
|
@dynamic_property
|
||||||
|
def state(self):
|
||||||
|
def fget(self):
|
||||||
|
self.__enter__()
|
||||||
|
return {x:getattr(self, x) for x in ('selected_ids', 'current_id',
|
||||||
|
'vscroll', 'hscroll')}
|
||||||
|
def fset(self, state):
|
||||||
|
for k, v in state.iteritems(): setattr(self, k, v)
|
||||||
|
self.__exit__()
|
||||||
|
return property(fget=fget, fset=fset)
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
class BooksView(QTableView): # {{{
|
class BooksView(QTableView): # {{{
|
||||||
|
@ -357,7 +357,9 @@ class MetadataSingleDialogBase(ResizableDialog):
|
|||||||
old_tags = self.tags.current_val
|
old_tags = self.tags.current_val
|
||||||
tags = mi.tags if mi.tags else []
|
tags = mi.tags if mi.tags else []
|
||||||
if old_tags and merge_tags:
|
if old_tags and merge_tags:
|
||||||
tags += old_tags
|
ltags, lotags = {t.lower() for t in tags}, {t.lower() for t in
|
||||||
|
old_tags}
|
||||||
|
tags = [t for t in tags if t.lower() in ltags-lotags] + old_tags
|
||||||
self.tags.current_val = tags
|
self.tags.current_val = tags
|
||||||
if not mi.is_null('identifiers'):
|
if not mi.is_null('identifiers'):
|
||||||
current = self.identifiers.current_val
|
current = self.identifiers.current_val
|
||||||
@ -463,7 +465,12 @@ class MetadataSingleDialogBase(ResizableDialog):
|
|||||||
ResizableDialog.reject(self)
|
ResizableDialog.reject(self)
|
||||||
|
|
||||||
def save_state(self):
|
def save_state(self):
|
||||||
gprefs['metasingle_window_geometry3'] = bytearray(self.saveGeometry())
|
try:
|
||||||
|
gprefs['metasingle_window_geometry3'] = bytearray(self.saveGeometry())
|
||||||
|
except:
|
||||||
|
# Weird failure, see https://bugs.launchpad.net/bugs/995271
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
# Dialog use methods {{{
|
# Dialog use methods {{{
|
||||||
def start(self, row_list, current_row, view_slot=None,
|
def start(self, row_list, current_row, view_slot=None,
|
||||||
|
@ -955,7 +955,7 @@ class FullFetch(QDialog): # {{{
|
|||||||
# QWebView. Seems to only happen on windows, but keep it for all
|
# QWebView. Seems to only happen on windows, but keep it for all
|
||||||
# platforms just in case.
|
# platforms just in case.
|
||||||
self.identify_widget.comments_view.setMaximumHeight(500)
|
self.identify_widget.comments_view.setMaximumHeight(500)
|
||||||
self.resize(850, 550)
|
self.resize(850, 600)
|
||||||
|
|
||||||
self.finished.connect(self.cleanup)
|
self.finished.connect(self.cleanup)
|
||||||
|
|
||||||
@ -1034,7 +1034,7 @@ class CoverFetch(QDialog): # {{{
|
|||||||
self.covers_widget.chosen.connect(self.accept)
|
self.covers_widget.chosen.connect(self.accept)
|
||||||
l.addWidget(self.covers_widget)
|
l.addWidget(self.covers_widget)
|
||||||
|
|
||||||
self.resize(850, 550)
|
self.resize(850, 600)
|
||||||
|
|
||||||
self.finished.connect(self.cleanup)
|
self.finished.connect(self.cleanup)
|
||||||
|
|
||||||
|
@ -10,7 +10,8 @@ __docformat__ = 'restructuredtext en'
|
|||||||
from PyQt4.Qt import (QWidget, QDialog, QLabel, QGridLayout, QComboBox, QSize,
|
from PyQt4.Qt import (QWidget, QDialog, QLabel, QGridLayout, QComboBox, QSize,
|
||||||
QLineEdit, QIntValidator, QDoubleValidator, QFrame, QColor, Qt, QIcon,
|
QLineEdit, QIntValidator, QDoubleValidator, QFrame, QColor, Qt, QIcon,
|
||||||
QScrollArea, QPushButton, QVBoxLayout, QDialogButtonBox, QToolButton,
|
QScrollArea, QPushButton, QVBoxLayout, QDialogButtonBox, QToolButton,
|
||||||
QListView, QAbstractListModel, pyqtSignal, QSizePolicy, QSpacerItem)
|
QListView, QAbstractListModel, pyqtSignal, QSizePolicy, QSpacerItem,
|
||||||
|
QApplication)
|
||||||
|
|
||||||
from calibre import prepare_string_for_xml
|
from calibre import prepare_string_for_xml
|
||||||
from calibre.utils.icu import sort_key
|
from calibre.utils.icu import sort_key
|
||||||
@ -259,33 +260,36 @@ class RuleEditor(QDialog): # {{{
|
|||||||
l.addWidget(l3, 2, 2)
|
l.addWidget(l3, 2, 2)
|
||||||
|
|
||||||
self.color_box = QComboBox(self)
|
self.color_box = QComboBox(self)
|
||||||
|
self.color_label = QLabel('Sample text Sample text')
|
||||||
|
self.color_label.setTextFormat(Qt.RichText)
|
||||||
l.addWidget(self.color_box, 2, 3)
|
l.addWidget(self.color_box, 2, 3)
|
||||||
l.addItem(QSpacerItem(10, 10, QSizePolicy.Expanding), 2, 4)
|
l.addWidget(self.color_label, 2, 4)
|
||||||
|
l.addItem(QSpacerItem(10, 10, QSizePolicy.Expanding), 2, 5)
|
||||||
|
|
||||||
self.l4 = l4 = QLabel(
|
self.l4 = l4 = QLabel(
|
||||||
_('Only if the following conditions are all satisfied:'))
|
_('Only if the following conditions are all satisfied:'))
|
||||||
l.addWidget(l4, 3, 0, 1, 5)
|
l.addWidget(l4, 3, 0, 1, 6)
|
||||||
|
|
||||||
self.scroll_area = sa = QScrollArea(self)
|
self.scroll_area = sa = QScrollArea(self)
|
||||||
sa.setMinimumHeight(300)
|
sa.setMinimumHeight(300)
|
||||||
sa.setMinimumWidth(950)
|
sa.setMinimumWidth(950)
|
||||||
sa.setWidgetResizable(True)
|
sa.setWidgetResizable(True)
|
||||||
l.addWidget(sa, 4, 0, 1, 5)
|
l.addWidget(sa, 4, 0, 1, 6)
|
||||||
|
|
||||||
self.add_button = b = QPushButton(QIcon(I('plus.png')),
|
self.add_button = b = QPushButton(QIcon(I('plus.png')),
|
||||||
_('Add another condition'))
|
_('Add another condition'))
|
||||||
l.addWidget(b, 5, 0, 1, 5)
|
l.addWidget(b, 5, 0, 1, 6)
|
||||||
b.clicked.connect(self.add_blank_condition)
|
b.clicked.connect(self.add_blank_condition)
|
||||||
|
|
||||||
self.l5 = l5 = QLabel(_('You can disable a condition by'
|
self.l5 = l5 = QLabel(_('You can disable a condition by'
|
||||||
' blanking all of its boxes'))
|
' blanking all of its boxes'))
|
||||||
l.addWidget(l5, 6, 0, 1, 5)
|
l.addWidget(l5, 6, 0, 1, 6)
|
||||||
|
|
||||||
self.bb = bb = QDialogButtonBox(
|
self.bb = bb = QDialogButtonBox(
|
||||||
QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
|
QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
|
||||||
bb.accepted.connect(self.accept)
|
bb.accepted.connect(self.accept)
|
||||||
bb.rejected.connect(self.reject)
|
bb.rejected.connect(self.reject)
|
||||||
l.addWidget(bb, 7, 0, 1, 5)
|
l.addWidget(bb, 7, 0, 1, 6)
|
||||||
|
|
||||||
self.conditions_widget = QWidget(self)
|
self.conditions_widget = QWidget(self)
|
||||||
sa.setWidget(self.conditions_widget)
|
sa.setWidget(self.conditions_widget)
|
||||||
@ -308,8 +312,21 @@ class RuleEditor(QDialog): # {{{
|
|||||||
self.color_box.addItems(QColor.colorNames())
|
self.color_box.addItems(QColor.colorNames())
|
||||||
self.color_box.setCurrentIndex(0)
|
self.color_box.setCurrentIndex(0)
|
||||||
|
|
||||||
|
self.update_color_label()
|
||||||
|
self.color_box.currentIndexChanged.connect(self.update_color_label)
|
||||||
self.resize(self.sizeHint())
|
self.resize(self.sizeHint())
|
||||||
|
|
||||||
|
def update_color_label(self):
|
||||||
|
pal = QApplication.palette()
|
||||||
|
bg1 = unicode(pal.color(pal.Base).name())
|
||||||
|
bg2 = unicode(pal.color(pal.AlternateBase).name())
|
||||||
|
c = unicode(self.color_box.currentText())
|
||||||
|
self.color_label.setText('''
|
||||||
|
<span style="color: {c}; background-color: {bg1}"> {st} </span>
|
||||||
|
<span style="color: {c}; background-color: {bg2}"> {st} </span>
|
||||||
|
'''.format(c=c, bg1=bg1, bg2=bg2, st=_('Sample Text')))
|
||||||
|
|
||||||
|
|
||||||
def add_blank_condition(self):
|
def add_blank_condition(self):
|
||||||
c = ConditionEditor(self.fm, parent=self.conditions_widget)
|
c = ConditionEditor(self.fm, parent=self.conditions_widget)
|
||||||
self.conditions.append(c)
|
self.conditions.append(c)
|
||||||
@ -610,14 +627,13 @@ class EditRules(QWidget): # {{{
|
|||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from PyQt4.Qt import QApplication
|
|
||||||
app = QApplication([])
|
app = QApplication([])
|
||||||
|
|
||||||
from calibre.library import db
|
from calibre.library import db
|
||||||
|
|
||||||
db = db()
|
db = db()
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
d = RuleEditor(db.field_metadata)
|
d = RuleEditor(db.field_metadata)
|
||||||
d.add_blank_condition()
|
d.add_blank_condition()
|
||||||
d.exec_()
|
d.exec_()
|
||||||
|
@ -332,7 +332,7 @@ class Preferences(QMainWindow):
|
|||||||
' Please restart calibre as soon as possible.')
|
' Please restart calibre as soon as possible.')
|
||||||
if rc:
|
if rc:
|
||||||
msg = _('The changes you have made require calibre be '
|
msg = _('The changes you have made require calibre be '
|
||||||
'restarted immediately. You will not be allowed '
|
'restarted immediately. You will not be allowed to '
|
||||||
'set any more preferences, until you restart.')
|
'set any more preferences, until you restart.')
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,9 +24,9 @@ from calibre.utils.config import prefs, dynamic
|
|||||||
from calibre.utils.ipc.server import Server
|
from calibre.utils.ipc.server import Server
|
||||||
from calibre.library.database2 import LibraryDatabase2
|
from calibre.library.database2 import LibraryDatabase2
|
||||||
from calibre.customize.ui import interface_actions, available_store_plugins
|
from calibre.customize.ui import interface_actions, available_store_plugins
|
||||||
from calibre.gui2 import error_dialog, GetMetadata, open_url, \
|
from calibre.gui2 import (error_dialog, GetMetadata, open_url,
|
||||||
gprefs, max_available_height, config, info_dialog, Dispatcher, \
|
gprefs, max_available_height, config, info_dialog, Dispatcher,
|
||||||
question_dialog
|
question_dialog, warning_dialog)
|
||||||
from calibre.gui2.cover_flow import CoverFlowMixin
|
from calibre.gui2.cover_flow import CoverFlowMixin
|
||||||
from calibre.gui2.widgets import ProgressIndicator
|
from calibre.gui2.widgets import ProgressIndicator
|
||||||
from calibre.gui2.update import UpdateMixin
|
from calibre.gui2.update import UpdateMixin
|
||||||
@ -653,6 +653,23 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
|||||||
d.show()
|
d.show()
|
||||||
self._modeless_dialogs.append(d)
|
self._modeless_dialogs.append(d)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if 'calibre.ebooks.conversion.ConversionUserFeedBack:' in job.details:
|
||||||
|
if not minz:
|
||||||
|
import json
|
||||||
|
payload = job.details.rpartition(
|
||||||
|
'calibre.ebooks.conversion.ConversionUserFeedBack:')[-1]
|
||||||
|
payload = json.loads('{' + payload.partition('{')[-1])
|
||||||
|
d = {'info':info_dialog, 'warn':warning_dialog,
|
||||||
|
'error':error_dialog}.get(payload['level'],
|
||||||
|
error_dialog)
|
||||||
|
d = d(self, payload['title'],
|
||||||
|
'<p>%s</p>'%payload['msg'],
|
||||||
|
det_msg=payload['det_msg'])
|
||||||
|
d.setModal(False)
|
||||||
|
d.show()
|
||||||
|
self._modeless_dialogs.append(d)
|
||||||
|
return
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if job.killed:
|
if job.killed:
|
||||||
|
@ -9,7 +9,7 @@ import mechanize
|
|||||||
|
|
||||||
from calibre.constants import (__appname__, __version__, iswindows, isosx,
|
from calibre.constants import (__appname__, __version__, iswindows, isosx,
|
||||||
isportable)
|
isportable)
|
||||||
from calibre import browser
|
from calibre import browser, prints, as_unicode
|
||||||
from calibre.utils.config import prefs
|
from calibre.utils.config import prefs
|
||||||
from calibre.gui2 import config, dynamic, open_url
|
from calibre.gui2 import config, dynamic, open_url
|
||||||
from calibre.gui2.dialogs.plugin_updater import get_plugin_updates_available
|
from calibre.gui2.dialogs.plugin_updater import get_plugin_updates_available
|
||||||
@ -45,14 +45,14 @@ class CheckForUpdates(QThread):
|
|||||||
version = get_newest_version()
|
version = get_newest_version()
|
||||||
if version and version != __version__ and len(version) < 10:
|
if version and version != __version__ and len(version) < 10:
|
||||||
calibre_update_version = version
|
calibre_update_version = version
|
||||||
except:
|
except Exception as e:
|
||||||
traceback.print_exc()
|
prints('Failed to check for calibre update:', as_unicode(e))
|
||||||
try:
|
try:
|
||||||
update_plugins = get_plugin_updates_available()
|
update_plugins = get_plugin_updates_available()
|
||||||
if update_plugins is not None:
|
if update_plugins is not None:
|
||||||
plugins_update_found = len(update_plugins)
|
plugins_update_found = len(update_plugins)
|
||||||
except:
|
except Exception as e:
|
||||||
traceback.print_exc()
|
prints('Failed to check for plugin update:', as_unicode(e))
|
||||||
if (calibre_update_version != NO_CALIBRE_UPDATE or
|
if (calibre_update_version != NO_CALIBRE_UPDATE or
|
||||||
plugins_update_found > 0):
|
plugins_update_found > 0):
|
||||||
self.update_found.emit('%s%s%d'%(calibre_update_version,
|
self.update_found.emit('%s%s%d'%(calibre_update_version,
|
||||||
|
@ -11,6 +11,7 @@ import os, zipfile
|
|||||||
|
|
||||||
import calibre
|
import calibre
|
||||||
from calibre.utils.localization import lang_as_iso639_1
|
from calibre.utils.localization import lang_as_iso639_1
|
||||||
|
from calibre.utils.resources import compiled_coffeescript
|
||||||
|
|
||||||
class JavaScriptLoader(object):
|
class JavaScriptLoader(object):
|
||||||
|
|
||||||
@ -27,7 +28,7 @@ class JavaScriptLoader(object):
|
|||||||
}.iteritems()}
|
}.iteritems()}
|
||||||
|
|
||||||
CS = {
|
CS = {
|
||||||
'cfi':('ebooks/oeb/display/cfi.coffee', 'display/cfi.js'),
|
'cfi':'ebooks.oeb.display.cfi',
|
||||||
}
|
}
|
||||||
|
|
||||||
ORDER = ('jquery', 'jquery_scrollTo', 'bookmarks', 'referencing', 'images',
|
ORDER = ('jquery', 'jquery_scrollTo', 'bookmarks', 'referencing', 'images',
|
||||||
@ -59,21 +60,9 @@ class JavaScriptLoader(object):
|
|||||||
ans = P(src, data=True,
|
ans = P(src, data=True,
|
||||||
allow_user_override=False).decode('utf-8')
|
allow_user_override=False).decode('utf-8')
|
||||||
else:
|
else:
|
||||||
f = getattr(calibre, '__file__', None)
|
dynamic = (self._dynamic_coffeescript and
|
||||||
if self._dynamic_coffeescript and f and os.path.exists(f):
|
os.path.exists(calibre.__file__))
|
||||||
src = src[0]
|
ans = compiled_coffeescript(src, dynamic=dynamic).decode('utf-8')
|
||||||
src = os.path.join(os.path.dirname(f), *(src.split('/')))
|
|
||||||
from calibre.utils.serve_coffee import compile_coffeescript
|
|
||||||
with open(src, 'rb') as f:
|
|
||||||
cs, errors = compile_coffeescript(f.read(), src)
|
|
||||||
if errors:
|
|
||||||
for line in errors:
|
|
||||||
print (line)
|
|
||||||
raise Exception('Failed to compile coffeescript'
|
|
||||||
': %s'%src)
|
|
||||||
ans = cs
|
|
||||||
else:
|
|
||||||
ans = P(src[1], data=True, allow_user_override=False)
|
|
||||||
self._cache[name] = ans
|
self._cache[name] = ans
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
@ -1,24 +1,24 @@
|
|||||||
from __future__ import with_statement
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
import traceback, os, sys, functools, collections, re
|
|
||||||
|
import traceback, os, sys, functools, collections, textwrap
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
|
|
||||||
from PyQt4.Qt import (QApplication, Qt, QIcon, QTimer, SIGNAL, QByteArray,
|
from PyQt4.Qt import (QApplication, Qt, QIcon, QTimer, QByteArray, QSize,
|
||||||
QSize, QDoubleSpinBox, QLabel, QTextBrowser, QPropertyAnimation,
|
QDoubleSpinBox, QLabel, QTextBrowser, QPropertyAnimation, QPainter,
|
||||||
QPainter, QBrush, QColor, QStandardItemModel, QPalette, QStandardItem,
|
QBrush, QColor, pyqtSignal, QUrl, QRegExpValidator, QRegExp, QLineEdit,
|
||||||
QUrl, QRegExpValidator, QRegExp, QLineEdit, QToolButton, QMenu,
|
QToolButton, QMenu, QInputDialog, QAction, QKeySequence, QModelIndex)
|
||||||
QInputDialog, QAction, QKeySequence)
|
|
||||||
|
|
||||||
from calibre.gui2.viewer.main_ui import Ui_EbookViewer
|
from calibre.gui2.viewer.main_ui import Ui_EbookViewer
|
||||||
from calibre.gui2.viewer.printing import Printing
|
from calibre.gui2.viewer.printing import Printing
|
||||||
from calibre.gui2.viewer.bookmarkmanager import BookmarkManager
|
from calibre.gui2.viewer.bookmarkmanager import BookmarkManager
|
||||||
|
from calibre.gui2.viewer.toc import TOC
|
||||||
from calibre.gui2.widgets import ProgressIndicator
|
from calibre.gui2.widgets import ProgressIndicator
|
||||||
from calibre.gui2.main_window import MainWindow
|
from calibre.gui2.main_window import MainWindow
|
||||||
from calibre.gui2 import Application, ORG_NAME, APP_UID, choose_files, \
|
from calibre.gui2 import (Application, ORG_NAME, APP_UID, choose_files,
|
||||||
info_dialog, error_dialog, open_url, available_height
|
info_dialog, error_dialog, open_url, available_height)
|
||||||
from calibre.ebooks.oeb.iterator import EbookIterator
|
from calibre.ebooks.oeb.iterator.book import EbookIterator
|
||||||
from calibre.ebooks import DRMError
|
from calibre.ebooks import DRMError
|
||||||
from calibre.constants import islinux, isbsd, isosx, filesystem_encoding
|
from calibre.constants import islinux, isbsd, isosx, filesystem_encoding
|
||||||
from calibre.utils.config import Config, StringConfig, JSONConfig
|
from calibre.utils.config import Config, StringConfig, JSONConfig
|
||||||
@ -31,31 +31,6 @@ from calibre.ptempfile import reset_base_dir
|
|||||||
|
|
||||||
vprefs = JSONConfig('viewer')
|
vprefs = JSONConfig('viewer')
|
||||||
|
|
||||||
class TOCItem(QStandardItem):
|
|
||||||
|
|
||||||
def __init__(self, toc):
|
|
||||||
text = toc.text
|
|
||||||
if text:
|
|
||||||
text = re.sub(r'\s', ' ', text)
|
|
||||||
QStandardItem.__init__(self, text if text else '')
|
|
||||||
self.abspath = toc.abspath
|
|
||||||
self.fragment = toc.fragment
|
|
||||||
for t in toc:
|
|
||||||
self.appendRow(TOCItem(t))
|
|
||||||
self.setFlags(Qt.ItemIsEnabled|Qt.ItemIsSelectable)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def type(cls):
|
|
||||||
return QStandardItem.UserType+10
|
|
||||||
|
|
||||||
class TOC(QStandardItemModel):
|
|
||||||
|
|
||||||
def __init__(self, toc):
|
|
||||||
QStandardItemModel.__init__(self)
|
|
||||||
for t in toc:
|
|
||||||
self.appendRow(TOCItem(t))
|
|
||||||
self.setHorizontalHeaderItem(0, QStandardItem(_('Table of Contents')))
|
|
||||||
|
|
||||||
class Worker(Thread):
|
class Worker(Thread):
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
@ -142,31 +117,22 @@ class DoubleSpinBox(QDoubleSpinBox):
|
|||||||
' [{0:.0%}]'.format(float(val)/self.maximum()))
|
' [{0:.0%}]'.format(float(val)/self.maximum()))
|
||||||
self.blockSignals(False)
|
self.blockSignals(False)
|
||||||
|
|
||||||
class HelpfulLineEdit(QLineEdit):
|
class Reference(QLineEdit):
|
||||||
|
|
||||||
HELP_TEXT = _('Go to...')
|
goto = pyqtSignal(object)
|
||||||
|
|
||||||
def __init__(self, *args):
|
def __init__(self, *args):
|
||||||
QLineEdit.__init__(self, *args)
|
QLineEdit.__init__(self, *args)
|
||||||
self.default_palette = QApplication.palette(self)
|
self.setValidator(QRegExpValidator(QRegExp(r'\d+\.\d+'), self))
|
||||||
self.gray = QPalette(self.default_palette)
|
self.setToolTip(textwrap.fill('<p>'+_(
|
||||||
self.gray.setBrush(QPalette.Text, QBrush(QColor('gray')))
|
'Go to a reference. To get reference numbers, use the <i>reference '
|
||||||
self.connect(self, SIGNAL('editingFinished()'),
|
'mode</i>, by clicking the reference mode button in the toolbar.')))
|
||||||
lambda : self.emit(SIGNAL('goto(PyQt_PyObject)'), unicode(self.text())))
|
if hasattr(self, 'setPlaceholderText'):
|
||||||
self.clear_to_help_mode()
|
self.setPlaceholderText(_('Go to...'))
|
||||||
|
self.editingFinished.connect(self.editing_finished)
|
||||||
|
|
||||||
def focusInEvent(self, ev):
|
def editing_finished(self):
|
||||||
self.setPalette(QApplication.palette(self))
|
self.goto.emit(unicode(self.text()))
|
||||||
if self.in_help_mode():
|
|
||||||
self.setText('')
|
|
||||||
return QLineEdit.focusInEvent(self, ev)
|
|
||||||
|
|
||||||
def in_help_mode(self):
|
|
||||||
return unicode(self.text()) == self.HELP_TEXT
|
|
||||||
|
|
||||||
def clear_to_help_mode(self):
|
|
||||||
self.setPalette(self.gray)
|
|
||||||
self.setText(self.HELP_TEXT)
|
|
||||||
|
|
||||||
class RecentAction(QAction):
|
class RecentAction(QAction):
|
||||||
|
|
||||||
@ -207,9 +173,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
self.pos.setMinimum(1.)
|
self.pos.setMinimum(1.)
|
||||||
self.pos.setMinimumWidth(150)
|
self.pos.setMinimumWidth(150)
|
||||||
self.tool_bar2.insertWidget(self.action_find_next, self.pos)
|
self.tool_bar2.insertWidget(self.action_find_next, self.pos)
|
||||||
self.reference = HelpfulLineEdit()
|
self.reference = Reference()
|
||||||
self.reference.setValidator(QRegExpValidator(QRegExp(r'\d+\.\d+'), self.reference))
|
|
||||||
self.reference.setToolTip(_('Go to a reference. To get reference numbers, use the reference mode.'))
|
|
||||||
self.tool_bar2.insertSeparator(self.action_find_next)
|
self.tool_bar2.insertSeparator(self.action_find_next)
|
||||||
self.tool_bar2.insertWidget(self.action_find_next, self.reference)
|
self.tool_bar2.insertWidget(self.action_find_next, self.reference)
|
||||||
self.tool_bar2.insertSeparator(self.action_find_next)
|
self.tool_bar2.insertSeparator(self.action_find_next)
|
||||||
@ -233,8 +197,7 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
if isosx:
|
if isosx:
|
||||||
qs += [Qt.CTRL+Qt.Key_W]
|
qs += [Qt.CTRL+Qt.Key_W]
|
||||||
self.action_quit.setShortcuts(qs)
|
self.action_quit.setShortcuts(qs)
|
||||||
self.connect(self.action_quit, SIGNAL('triggered(bool)'),
|
self.action_quit.triggered.connect(self.quit)
|
||||||
lambda x:QApplication.instance().quit())
|
|
||||||
self.action_focus_search = QAction(self)
|
self.action_focus_search = QAction(self)
|
||||||
self.addAction(self.action_focus_search)
|
self.addAction(self.action_focus_search)
|
||||||
self.action_focus_search.setShortcuts([Qt.Key_Slash,
|
self.action_focus_search.setShortcuts([Qt.Key_Slash,
|
||||||
@ -247,42 +210,34 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
self.action_table_of_contents.setCheckable(True)
|
self.action_table_of_contents.setCheckable(True)
|
||||||
self.toc.setMinimumWidth(80)
|
self.toc.setMinimumWidth(80)
|
||||||
self.action_reference_mode.setCheckable(True)
|
self.action_reference_mode.setCheckable(True)
|
||||||
self.connect(self.action_reference_mode, SIGNAL('triggered(bool)'),
|
self.action_reference_mode.triggered[bool].connect(self.view.reference_mode)
|
||||||
lambda x: self.view.reference_mode(x))
|
self.action_metadata.triggered[bool].connect(self.metadata.setVisible)
|
||||||
self.connect(self.action_metadata, SIGNAL('triggered(bool)'), lambda x:self.metadata.setVisible(x))
|
|
||||||
self.action_table_of_contents.toggled[bool].connect(self.set_toc_visible)
|
self.action_table_of_contents.toggled[bool].connect(self.set_toc_visible)
|
||||||
self.connect(self.action_copy, SIGNAL('triggered(bool)'), self.copy)
|
self.action_copy.triggered[bool].connect(self.copy)
|
||||||
self.action_font_size_larger.triggered.connect(self.font_size_larger)
|
self.action_font_size_larger.triggered.connect(self.font_size_larger)
|
||||||
self.action_font_size_smaller.triggered.connect(self.font_size_smaller)
|
self.action_font_size_smaller.triggered.connect(self.font_size_smaller)
|
||||||
self.connect(self.action_open_ebook, SIGNAL('triggered(bool)'),
|
self.action_open_ebook.triggered[bool].connect(self.open_ebook)
|
||||||
self.open_ebook)
|
self.action_next_page.triggered.connect(self.view.next_page)
|
||||||
self.connect(self.action_next_page, SIGNAL('triggered(bool)'),
|
self.action_previous_page.triggered.connect(self.view.previous_page)
|
||||||
lambda x:self.view.next_page())
|
self.action_find_next.triggered.connect(self.find_next)
|
||||||
self.connect(self.action_previous_page, SIGNAL('triggered(bool)'),
|
self.action_find_previous.triggered.connect(self.find_previous)
|
||||||
lambda x:self.view.previous_page())
|
self.action_full_screen.triggered[bool].connect(self.toggle_fullscreen)
|
||||||
self.connect(self.action_find_next, SIGNAL('triggered(bool)'),
|
|
||||||
lambda x:self.find(unicode(self.search.text()), repeat=True))
|
|
||||||
self.connect(self.action_find_previous, SIGNAL('triggered(bool)'),
|
|
||||||
lambda x:self.find(unicode(self.search.text()),
|
|
||||||
repeat=True, backwards=True))
|
|
||||||
|
|
||||||
self.connect(self.action_full_screen, SIGNAL('triggered(bool)'),
|
|
||||||
self.toggle_fullscreen)
|
|
||||||
self.action_full_screen.setShortcuts([Qt.Key_F11, Qt.CTRL+Qt.SHIFT+Qt.Key_F])
|
self.action_full_screen.setShortcuts([Qt.Key_F11, Qt.CTRL+Qt.SHIFT+Qt.Key_F])
|
||||||
self.action_full_screen.setToolTip(_('Toggle full screen (%s)') %
|
self.action_full_screen.setToolTip(_('Toggle full screen (%s)') %
|
||||||
_(' or ').join([unicode(x.toString(x.NativeText)) for x in
|
_(' or ').join([unicode(x.toString(x.NativeText)) for x in
|
||||||
self.action_full_screen.shortcuts()]))
|
self.action_full_screen.shortcuts()]))
|
||||||
self.connect(self.action_back, SIGNAL('triggered(bool)'), self.back)
|
self.action_back.triggered[bool].connect(self.back)
|
||||||
self.connect(self.action_bookmark, SIGNAL('triggered(bool)'), self.bookmark)
|
self.action_forward.triggered[bool].connect(self.forward)
|
||||||
self.connect(self.action_forward, SIGNAL('triggered(bool)'), self.forward)
|
self.action_bookmark.triggered[bool].connect(self.bookmark)
|
||||||
self.connect(self.action_preferences, SIGNAL('triggered(bool)'), lambda x: self.view.config(self))
|
self.action_preferences.triggered.connect(lambda :
|
||||||
|
self.view.config(self))
|
||||||
self.pos.editingFinished.connect(self.goto_page_num)
|
self.pos.editingFinished.connect(self.goto_page_num)
|
||||||
self.connect(self.vertical_scrollbar, SIGNAL('valueChanged(int)'),
|
self.vertical_scrollbar.valueChanged[int].connect(lambda
|
||||||
lambda x: self.goto_page(x/100.))
|
x:self.goto_page(x/100.))
|
||||||
self.search.search.connect(self.find)
|
self.search.search.connect(self.find)
|
||||||
self.search.focus_to_library.connect(lambda: self.view.setFocus(Qt.OtherFocusReason))
|
self.search.focus_to_library.connect(lambda: self.view.setFocus(Qt.OtherFocusReason))
|
||||||
self.connect(self.toc, SIGNAL('clicked(QModelIndex)'), self.toc_clicked)
|
self.toc.clicked[QModelIndex].connect(self.toc_clicked)
|
||||||
self.connect(self.reference, SIGNAL('goto(PyQt_PyObject)'), self.goto)
|
self.reference.goto.connect(self.goto)
|
||||||
|
|
||||||
self.bookmarks_menu = QMenu()
|
self.bookmarks_menu = QMenu()
|
||||||
self.action_bookmark.setMenu(self.bookmarks_menu)
|
self.action_bookmark.setMenu(self.bookmarks_menu)
|
||||||
@ -335,8 +290,8 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
self.print_menu.addAction(QIcon(I('print-preview.png')), _('Print Preview'))
|
self.print_menu.addAction(QIcon(I('print-preview.png')), _('Print Preview'))
|
||||||
self.action_print.setMenu(self.print_menu)
|
self.action_print.setMenu(self.print_menu)
|
||||||
self.tool_bar.widgetForAction(self.action_print).setPopupMode(QToolButton.MenuButtonPopup)
|
self.tool_bar.widgetForAction(self.action_print).setPopupMode(QToolButton.MenuButtonPopup)
|
||||||
self.connect(self.action_print, SIGNAL("triggered(bool)"), partial(self.print_book, preview=False))
|
self.action_print.triggered.connect(self.print_book)
|
||||||
self.connect(self.print_menu.actions()[0], SIGNAL("triggered(bool)"), partial(self.print_book, preview=True))
|
self.print_menu.actions()[0].triggered.connect(self.print_preview)
|
||||||
ca = self.view.copy_action
|
ca = self.view.copy_action
|
||||||
ca.setShortcut(QKeySequence.Copy)
|
ca.setShortcut(QKeySequence.Copy)
|
||||||
self.addAction(ca)
|
self.addAction(ca)
|
||||||
@ -381,13 +336,22 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
m.addAction(RecentAction(path, m))
|
m.addAction(RecentAction(path, m))
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
def closeEvent(self, e):
|
def shutdown(self):
|
||||||
if self.isFullScreen():
|
if self.isFullScreen():
|
||||||
self.action_full_screen.trigger()
|
self.action_full_screen.trigger()
|
||||||
e.ignore()
|
return False
|
||||||
return
|
|
||||||
self.save_state()
|
self.save_state()
|
||||||
return MainWindow.closeEvent(self, e)
|
return True
|
||||||
|
|
||||||
|
def quit(self):
|
||||||
|
if self.shutdown():
|
||||||
|
QApplication.instance().quit()
|
||||||
|
|
||||||
|
def closeEvent(self, e):
|
||||||
|
if self.shutdown():
|
||||||
|
return MainWindow.closeEvent(self, e)
|
||||||
|
else:
|
||||||
|
e.ignore()
|
||||||
|
|
||||||
def toggle_toolbars(self):
|
def toggle_toolbars(self):
|
||||||
for x in ('tool_bar', 'tool_bar2'):
|
for x in ('tool_bar', 'tool_bar2'):
|
||||||
@ -440,8 +404,11 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
c = config().parse()
|
c = config().parse()
|
||||||
return c.remember_current_page
|
return c.remember_current_page
|
||||||
|
|
||||||
def print_book(self, preview):
|
def print_book(self):
|
||||||
Printing(self.iterator.spine, preview)
|
Printing(self.iterator.spine, False)
|
||||||
|
|
||||||
|
def print_preview(self):
|
||||||
|
Printing(self.iterator.spine, True)
|
||||||
|
|
||||||
def toggle_fullscreen(self, x):
|
def toggle_fullscreen(self, x):
|
||||||
if self.isFullScreen():
|
if self.isFullScreen():
|
||||||
@ -629,6 +596,12 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
self.pending_search_dir = 'backwards' if backwards else 'forwards'
|
self.pending_search_dir = 'backwards' if backwards else 'forwards'
|
||||||
self.load_path(self.iterator.spine[index])
|
self.load_path(self.iterator.spine[index])
|
||||||
|
|
||||||
|
def find_next(self):
|
||||||
|
self.find(unicode(self.search.text()), repeat=True)
|
||||||
|
|
||||||
|
def find_previous(self):
|
||||||
|
self.find(unicode(self.search.text()), repeat=True, backwards=True)
|
||||||
|
|
||||||
def do_search(self, text, backwards):
|
def do_search(self, text, backwards):
|
||||||
self.pending_search = None
|
self.pending_search = None
|
||||||
self.pending_search_dir = None
|
self.pending_search_dir = None
|
||||||
@ -829,11 +802,13 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
|||||||
if not title:
|
if not title:
|
||||||
title = os.path.splitext(os.path.basename(pathtoebook))[0]
|
title = os.path.splitext(os.path.basename(pathtoebook))[0]
|
||||||
if self.iterator.toc:
|
if self.iterator.toc:
|
||||||
self.toc_model = TOC(self.iterator.toc)
|
self.toc_model = TOC(self.iterator.spine, self.iterator.toc)
|
||||||
self.toc.setModel(self.toc_model)
|
self.toc.setModel(self.toc_model)
|
||||||
if self.show_toc_on_open:
|
if self.show_toc_on_open:
|
||||||
self.action_table_of_contents.setChecked(True)
|
self.action_table_of_contents.setChecked(True)
|
||||||
else:
|
else:
|
||||||
|
self.toc_model = TOC(self.iterator.spine)
|
||||||
|
self.toc.setModel(self.toc_model)
|
||||||
self.action_table_of_contents.setChecked(False)
|
self.action_table_of_contents.setChecked(False)
|
||||||
if isbytestring(pathtoebook):
|
if isbytestring(pathtoebook):
|
||||||
pathtoebook = force_unicode(pathtoebook, filesystem_encoding)
|
pathtoebook = force_unicode(pathtoebook, filesystem_encoding)
|
||||||
|
42
src/calibre/gui2/viewer/toc.py
Normal file
42
src/calibre/gui2/viewer/toc.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
|
print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import re
|
||||||
|
from PyQt4.Qt import QStandardItem, QStandardItemModel, Qt
|
||||||
|
|
||||||
|
from calibre.ebooks.metadata.toc import TOC as MTOC
|
||||||
|
|
||||||
|
class TOCItem(QStandardItem):
|
||||||
|
|
||||||
|
def __init__(self, toc):
|
||||||
|
text = toc.text
|
||||||
|
if text:
|
||||||
|
text = re.sub(r'\s', ' ', text)
|
||||||
|
QStandardItem.__init__(self, text if text else '')
|
||||||
|
self.abspath = toc.abspath
|
||||||
|
self.fragment = toc.fragment
|
||||||
|
for t in toc:
|
||||||
|
self.appendRow(TOCItem(t))
|
||||||
|
self.setFlags(Qt.ItemIsEnabled|Qt.ItemIsSelectable)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def type(cls):
|
||||||
|
return QStandardItem.UserType+10
|
||||||
|
|
||||||
|
class TOC(QStandardItemModel):
|
||||||
|
|
||||||
|
def __init__(self, spine, toc=None):
|
||||||
|
QStandardItemModel.__init__(self)
|
||||||
|
if toc is None:
|
||||||
|
toc = MTOC()
|
||||||
|
for t in toc:
|
||||||
|
self.appendRow(TOCItem(t))
|
||||||
|
self.setHorizontalHeaderItem(0, QStandardItem(_('Table of Contents')))
|
||||||
|
|
||||||
|
|
@ -42,7 +42,7 @@ The default values for the tweaks are reproduced below
|
|||||||
.. literalinclude:: ../../../resources/default_tweaks.py
|
.. literalinclude:: ../../../resources/default_tweaks.py
|
||||||
|
|
||||||
|
|
||||||
Overriding icons, templates, etcetera
|
Overriding icons, templates, et cetera
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
|app| allows you to override the static resources, like icons, templates, javascript, etc. with customized versions that you like.
|
|app| allows you to override the static resources, like icons, templates, javascript, etc. with customized versions that you like.
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user