mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
Sync to trunk.
This commit is contained in:
commit
d476a80f03
@ -19,6 +19,69 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.8.15
|
||||
date: 2011-08-19
|
||||
|
||||
new features:
|
||||
- title: "Add a 'languages' metadata field."
|
||||
type: major
|
||||
description: "This is useful if you have a multi-lingual book collection. You can now set one or more languages per book via the Edit Metadata dialog. If you want the languages
|
||||
column to be visible, then go to Preferences->Add your own columns and unhide the languages columns. You can also bulk set the languages on multiple books via the bulk edit metadata dialog. You can also have the languages show up in the book details panel on the right by going to Preferences->Look and Feel->Book details"
|
||||
|
||||
- title: "Get Books: Add XinXii store."
|
||||
|
||||
- title: "Metadata download plugin for ozon.ru, enabled only when user selects russian as their language in the welcome wizard."
|
||||
|
||||
- title: "Bambook driver: Allow direct transfer of PDF files to Bambook devices"
|
||||
|
||||
- title: "Driver for Coby MID7015A and Asus EEE Note"
|
||||
|
||||
- title: "Edit metadata dialog: The keyboard shortcut Ctrl+D can now be used to trigger a metadata download. Also show the row number of the book being edited in the titlebar"
|
||||
|
||||
- title: "Add an option to not preserve the date when using the 'Copy to Library' function (found in Preferences->Adding books)"
|
||||
|
||||
bug fixes:
|
||||
- title: "Linux binary: Use readlink -f rather than readlink -e in the launcher scripts so that they work with recent releases of busybox"
|
||||
|
||||
- title: "When bulk downloading metadata for more than 100 books at a time, automatically split up the download into batches of 100."
|
||||
tickets: [828373]
|
||||
|
||||
- title: "When deleting books from the Kindle also delete 'sidecar' .apnx and .ph1 files as the kindle does not clean them up automatically"
|
||||
tickets: [827684]
|
||||
|
||||
- title: "Fix a subtle bug in the device drivers that caused calibre to lose track of some books on the device if you used author_sort in the send to device template and your books have author sort values that differ only in case."
|
||||
tickets: [825706]
|
||||
|
||||
- title: "Fix scene break character pattern not saved in conversion preferences"
|
||||
tickets: [826038]
|
||||
|
||||
- title: "Keyboard shortcuts: Fix a bug triggered by some third party plugins that made the keyboard preferences unusable in OS X."
|
||||
tickets: [826325]
|
||||
|
||||
- title: "Search box: Fix completion no longer working after using Tag Browser to do a search. Also ensure that completer popup is always hidden when a search is performed."
|
||||
|
||||
- title: "Fix pressing Enter in the search box causes the same search to be executed twice in the plugins and keyboard shortcuts preferences panels"
|
||||
|
||||
- title: "Catalog generation: Fix error creating epub/mobi catalogs on non UTF-8 windows systems when the metadata contained non ASCII characters"
|
||||
|
||||
improved recipes:
|
||||
- Financial Times UK
|
||||
- La Tercera
|
||||
- Folha de Sao Paolo
|
||||
- Metro niews NL
|
||||
- La Nacion
|
||||
- Juventud Rebelde
|
||||
- Rzeczpospolita Online
|
||||
- Newsweek Polska
|
||||
- CNET news
|
||||
|
||||
new recipes:
|
||||
- title: El Mostrador and The Clinic
|
||||
author: Alex Mitrani
|
||||
|
||||
- title: Patente de Corso
|
||||
author: Oscar Megia Lopez
|
||||
|
||||
- version: 0.8.14
|
||||
date: 2011-08-12
|
||||
|
||||
|
@ -24,6 +24,7 @@ class FinancialTimes(BasicNewsRecipe):
|
||||
publication_type = 'newspaper'
|
||||
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
|
||||
LOGIN = 'https://registration.ft.com/registration/barrier/login'
|
||||
LOGIN2 = 'http://media.ft.com/h/subs3.html'
|
||||
INDEX = 'http://www.ft.com/uk-edition'
|
||||
PREFIX = 'http://www.ft.com'
|
||||
|
||||
@ -39,7 +40,7 @@ class FinancialTimes(BasicNewsRecipe):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
br.open(self.INDEX)
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open(self.LOGIN)
|
||||
br.open(self.LOGIN2)
|
||||
br.select_form(name='loginForm')
|
||||
br['username'] = self.username
|
||||
br['password'] = self.password
|
||||
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from datetime import datetime, timedelta
|
||||
from calibre.ebooks.BeautifulSoup import Tag,BeautifulSoup
|
||||
@ -16,7 +17,7 @@ class FolhaOnline(BasicNewsRecipe):
|
||||
news = True
|
||||
|
||||
title = u'Folha de S\xE3o Paulo'
|
||||
__author__ = 'Euler Alves'
|
||||
__author__ = 'Euler Alves and Alex Mitrani'
|
||||
description = u'Brazilian news from Folha de S\xE3o Paulo'
|
||||
publisher = u'Folha de S\xE3o Paulo'
|
||||
category = 'news, rss'
|
||||
@ -62,37 +63,50 @@ class FolhaOnline(BasicNewsRecipe):
|
||||
,dict(name='div',
|
||||
attrs={'class':[
|
||||
'openBox adslibraryArticle'
|
||||
,'toolbar'
|
||||
]})
|
||||
|
||||
,dict(name='a')
|
||||
,dict(name='iframe')
|
||||
,dict(name='link')
|
||||
,dict(name='script')
|
||||
,dict(name='li')
|
||||
]
|
||||
remove_tags_after = dict(name='div',attrs={'id':'articleEnd'})
|
||||
|
||||
feeds = [
|
||||
(u'Em cima da hora', u'http://feeds.folha.uol.com.br/emcimadahora/rss091.xml')
|
||||
,(u'Cotidiano', u'http://feeds.folha.uol.com.br/folha/cotidiano/rss091.xml')
|
||||
,(u'Brasil', u'http://feeds.folha.uol.com.br/folha/brasil/rss091.xml')
|
||||
,(u'Mundo', u'http://feeds.folha.uol.com.br/mundo/rss091.xml')
|
||||
,(u'Poder', u'http://feeds.folha.uol.com.br/poder/rss091.xml')
|
||||
,(u'Mercado', u'http://feeds.folha.uol.com.br/folha/dinheiro/rss091.xml')
|
||||
,(u'Saber', u'http://feeds.folha.uol.com.br/folha/educacao/rss091.xml')
|
||||
,(u'Tec', u'http://feeds.folha.uol.com.br/folha/informatica/rss091.xml')
|
||||
,(u'Ilustrada', u'http://feeds.folha.uol.com.br/folha/ilustrada/rss091.xml')
|
||||
,(u'Ambiente', u'http://feeds.folha.uol.com.br/ambiente/rss091.xml')
|
||||
,(u'Bichos', u'http://feeds.folha.uol.com.br/bichos/rss091.xml')
|
||||
,(u'Ci\xEAncia', u'http://feeds.folha.uol.com.br/ciencia/rss091.xml')
|
||||
,(u'Poder', u'http://feeds.folha.uol.com.br/poder/rss091.xml')
|
||||
,(u'Equil\xEDbrio e Sa\xFAde', u'http://feeds.folha.uol.com.br/equilibrioesaude/rss091.xml')
|
||||
,(u'Turismo', u'http://feeds.folha.uol.com.br/folha/turismo/rss091.xml')
|
||||
,(u'Mundo', u'http://feeds.folha.uol.com.br/mundo/rss091.xml')
|
||||
,(u'Pelo Mundo', u'http://feeds.folha.uol.com.br/pelomundo.folha.rssblog.uol.com.br/')
|
||||
,(u'Circuito integrado', u'http://feeds.folha.uol.com.br/circuitointegrado.folha.rssblog.uol.com.br/')
|
||||
,(u'Blog do Fred', u'http://feeds.folha.uol.com.br/blogdofred.folha.rssblog.uol.com.br/')
|
||||
,(u'Maria In\xEAs Dolci', u'http://feeds.folha.uol.com.br/mariainesdolci.folha.blog.uol.com.br/')
|
||||
,(u'Eduardo Ohata', u'http://feeds.folha.uol.com.br/folha/pensata/eduardoohata/rss091.xml')
|
||||
,(u'Kennedy Alencar', u'http://feeds.folha.uol.com.br/folha/pensata/kennedyalencar/rss091.xml')
|
||||
,(u'Eliane Catanh\xEAde', u'http://feeds.folha.uol.com.br/folha/pensata/elianecantanhede/rss091.xml')
|
||||
,(u'Fernado Canzian', u'http://feeds.folha.uol.com.br/folha/pensata/fernandocanzian/rss091.xml')
|
||||
,(u'Gilberto Dimenstein', u'http://feeds.folha.uol.com.br/folha/pensata/gilbertodimenstein/rss091.xml')
|
||||
,(u'H\xE9lio Schwartsman', u'http://feeds.folha.uol.com.br/folha/pensata/helioschwartsman/rss091.xml')
|
||||
,(u'Jo\xE3o Pereira Coutinho', u'http://http://feeds.folha.uol.com.br/folha/pensata/joaopereiracoutinho/rss091.xml')
|
||||
,(u'Luiz Caversan', u'http://http://feeds.folha.uol.com.br/folha/pensata/luizcaversan/rss091.xml')
|
||||
,(u'S\xE9rgio Malbergier', u'http://http://feeds.folha.uol.com.br/folha/pensata/sergiomalbergier/rss091.xml')
|
||||
,(u'Valdo Cruz', u'http://http://feeds.folha.uol.com.br/folha/pensata/valdocruz/rss091.xml')
|
||||
,(u'Esporte', u'http://feeds.folha.uol.com.br/folha/esporte/rss091.xml')
|
||||
,(u'Zapping', u'http://feeds.folha.uol.com.br/colunas/zapping/rss091.xml')
|
||||
,(u'Cida Santos', u'http://feeds.folha.uol.com.br/colunas/cidasantos/rss091.xml')
|
||||
,(u'Clóvis Rossi', u'http://feeds.folha.uol.com.br/colunas/clovisrossi/rss091.xml')
|
||||
,(u'Eliane Cantanhêde', u'http://feeds.folha.uol.com.br/colunas/elianecantanhede/rss091.xml')
|
||||
,(u'Fernando Canzian', u'http://feeds.folha.uol.com.br/colunas/fernandocanzian/rss091.xml')
|
||||
,(u'Gilberto Dimenstein', u'http://feeds.folha.uol.com.br/colunas/gilbertodimenstein/rss091.xml')
|
||||
,(u'Hélio Schwartsman', u'http://feeds.folha.uol.com.br/colunas/helioschwartsman/rss091.xml')
|
||||
,(u'Humberto Luiz Peron', u'http://feeds.folha.uol.com.br/colunas/futebolnarede/rss091.xml')
|
||||
,(u'João Pereira Coutinho', u'http://feeds.folha.uol.com.br/colunas/joaopereiracoutinho/rss091.xml')
|
||||
,(u'José Antonio Ramalho', u'http://feeds.folha.uol.com.br/colunas/canalaberto/rss091.xml')
|
||||
,(u'Kennedy Alencar', u'http://feeds.folha.uol.com.br/colunas/kennedyalencar/rss091.xml')
|
||||
,(u'Luiz Caversan', u'http://feeds.folha.uol.com.br/colunas/luizcaversan/rss091.xml')
|
||||
,(u'Luiz Rivoiro', u'http://feeds.folha.uol.com.br/colunas/paiepai/rss091.xml')
|
||||
,(u'Marcelo Leite', u'http://feeds.folha.uol.com.br/colunas/marceloleite/rss091.xml')
|
||||
,(u'Sérgio Malbergier', u'http://feeds.folha.uol.com.br/colunas/sergiomalbergier/rss091.xml')
|
||||
,(u'Sylvia Colombo', u'http://feeds.folha.uol.com.br/colunas/sylviacolombo/rss091.xml')
|
||||
,(u'Valdo Cruz', u'http://feeds.folha.uol.com.br/colunas/valdocruz/rss091.xml')
|
||||
]
|
||||
|
||||
|
||||
|
@ -7,8 +7,9 @@ latercera.com
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class LaTercera(BasicNewsRecipe):
|
||||
news = True
|
||||
title = 'La Tercera'
|
||||
__author__ = 'Darko Miletic'
|
||||
__author__ = 'Darko Miletic and Alex Mitrani'
|
||||
description = 'El sitio de noticias online de Chile'
|
||||
publisher = 'La Tercera'
|
||||
category = 'news, politics, Chile'
|
||||
@ -18,7 +19,7 @@ class LaTercera(BasicNewsRecipe):
|
||||
encoding = 'cp1252'
|
||||
use_embedded_content = False
|
||||
remove_empty_feeds = True
|
||||
language = 'es'
|
||||
language = 'es_CL'
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
@ -28,28 +29,33 @@ class LaTercera(BasicNewsRecipe):
|
||||
, 'linearize_tables' : True
|
||||
}
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'class':['span-16 articulo border','span-16 border','span-16']}) ]
|
||||
keep_only_tags = [
|
||||
dict(name='h1', attrs={'class':['titularArticulo']})
|
||||
,dict(name='h4', attrs={'class':['bajadaArt']})
|
||||
,dict(name='h5', attrs={'class':['autorArt']})
|
||||
,dict(name='div', attrs={'class':['articleContent']})
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name=['ul','input','base'])
|
||||
,dict(name='div', attrs={'id':['boxComentarios','shim','enviarAmigo']})
|
||||
,dict(name='div', attrs={'class':['ad640','span-10 imgSet A','infoRelCol']})
|
||||
,dict(name='p', attrs={'id':['mensajeError','mensajeEnviandoNoticia','mensajeExito']})
|
||||
dict(name='div', attrs={'class':['boxCompartir','keywords']})
|
||||
]
|
||||
|
||||
remove_tags_after = [
|
||||
dict(name='div', attrs={'class':['keywords']})
|
||||
]
|
||||
|
||||
|
||||
feeds = [
|
||||
(u'Noticias de ultima hora', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&ul=1')
|
||||
,(u'Nacional', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=680')
|
||||
feeds = [(u'La Tercera', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&ul=1')
|
||||
,(u'Politica', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=674')
|
||||
,(u'Nacional', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=680')
|
||||
,(u'Mundo', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=678')
|
||||
,(u'Deportes', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=656')
|
||||
,(u'Negocios', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=655')
|
||||
,(u'Entretenimiento', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=661')
|
||||
,(u'Motores', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=665')
|
||||
,(u'Santiago', u'http://www.latercera.com/feed/manager?type=rss&sc=TEFURVJDRVJB&citId=9&categoryId=1731')
|
||||
,(u'Tendencias', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=659')
|
||||
,(u'Estilo', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=660')
|
||||
,(u'Educacion', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=657')
|
||||
,(u'Cultura', u'http://www.latercera.com/feed/manager?type=rss&sc=TEFURVJDRVJB&citId=9&categoryId=1453')
|
||||
,(u'Entretención', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=661')
|
||||
,(u'Deportes', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=656')
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
|
@ -23,14 +23,21 @@ class Liberation(BasicNewsRecipe):
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='h1')
|
||||
,dict(name='div', attrs={'class':'articleContent'})
|
||||
#,dict(name='div', attrs={'class':'object-content text text-item'})
|
||||
,dict(name='div', attrs={'class':'article'})
|
||||
#,dict(name='div', attrs={'class':'articleContent'})
|
||||
,dict(name='div', attrs={'class':'entry'})
|
||||
]
|
||||
remove_tags_after = [ dict(name='div',attrs={'class':'toolbox extra_toolbox'}) ]
|
||||
remove_tags = [
|
||||
dict(name='p', attrs={'class':'clear'})
|
||||
,dict(name='ul', attrs={'class':'floatLeft clear'})
|
||||
,dict(name='div', attrs={'class':'clear floatRight'})
|
||||
,dict(name='object')
|
||||
,dict(name='div', attrs={'class':'toolbox'})
|
||||
,dict(name='div', attrs={'class':'cartridge cartridge-basic-bubble cat-zoneabo'})
|
||||
#,dict(name='div', attrs={'class':'clear block block-call-items'})
|
||||
,dict(name='div', attrs={'class':'block-content'})
|
||||
]
|
||||
|
||||
feeds = [
|
||||
|
@ -181,7 +181,7 @@ save_template_title_series_sorting = 'library_order'
|
||||
# To disable use the expression: '^$'
|
||||
# This expression is designed for articles that are followed by spaces. If you
|
||||
# also need to match articles that are followed by other characters, for example L'
|
||||
# in French, use: r"^(A\s+|The\s+|An\s+|L')" instead.
|
||||
# in French, use: "^(A\s+|The\s+|An\s+|L')" instead.
|
||||
# Default: '^(A|The|An)\s+'
|
||||
title_sort_articles=r'^(A|The|An)\s+'
|
||||
|
||||
|
@ -290,7 +290,7 @@ class LinuxFreeze(Command):
|
||||
|
||||
launcher = textwrap.dedent('''\
|
||||
#!/bin/sh
|
||||
path=`readlink -e $0`
|
||||
path=`readlink -f $0`
|
||||
base=`dirname $path`
|
||||
lib=$base/lib
|
||||
export LD_LIBRARY_PATH=$lib:$LD_LIBRARY_PATH
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 8, 14)
|
||||
numeric_version = (0, 8, 15)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -92,7 +92,7 @@ def restore_plugin_state_to_default(plugin_or_name):
|
||||
config['enabled_plugins'] = ep
|
||||
|
||||
default_disabled_plugins = set([
|
||||
'Overdrive', 'Douban Books',
|
||||
'Overdrive', 'Douban Books', 'OZON.ru',
|
||||
])
|
||||
|
||||
def is_disabled(plugin):
|
||||
|
@ -122,12 +122,17 @@ class Cache(object):
|
||||
formats = self._field_for('formats', book_id)
|
||||
mi.format_metadata = {}
|
||||
if not formats:
|
||||
formats = None
|
||||
good_formats = None
|
||||
else:
|
||||
good_formats = []
|
||||
for f in formats:
|
||||
mi.format_metadata[f] = self._format_metadata(book_id, f)
|
||||
formats = ','.join(formats)
|
||||
mi.formats = formats
|
||||
try:
|
||||
mi.format_metadata[f] = self._format_metadata(book_id, f)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
good_formats.append(f)
|
||||
mi.formats = good_formats
|
||||
mi.has_cover = _('Yes') if self._field_for('cover', book_id,
|
||||
default_value=False) else ''
|
||||
mi.tags = list(self._field_for('tags', book_id, default_value=()))
|
||||
|
@ -30,6 +30,7 @@ class ANDROID(USBMS):
|
||||
0xca2 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xca3 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xca4 : [0x100, 0x0227, 0x0226, 0x222],
|
||||
0xca9 : [0x100, 0x0227, 0x0226, 0x222]
|
||||
},
|
||||
|
||||
# Eken
|
||||
|
@ -8,7 +8,7 @@ __docformat__ = 'restructuredtext en'
|
||||
Device driver for Sanda's Bambook
|
||||
'''
|
||||
|
||||
import time, os, hashlib
|
||||
import time, os, hashlib, shutil
|
||||
from itertools import cycle
|
||||
from calibre.devices.interface import DevicePlugin
|
||||
from calibre.devices.usbms.deviceconfig import DeviceConfig
|
||||
@ -31,7 +31,7 @@ class BAMBOOK(DeviceConfig, DevicePlugin):
|
||||
|
||||
ip = None
|
||||
|
||||
FORMATS = [ "snb" ]
|
||||
FORMATS = [ "snb", "pdf" ]
|
||||
USER_CAN_ADD_NEW_FORMATS = False
|
||||
VENDOR_ID = 0x230b
|
||||
PRODUCT_ID = 0x0001
|
||||
@ -267,14 +267,59 @@ class BAMBOOK(DeviceConfig, DevicePlugin):
|
||||
for (i, f) in enumerate(files):
|
||||
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
|
||||
if not hasattr(f, 'read'):
|
||||
if self.bambook.VerifySNB(f):
|
||||
guid = self.bambook.SendFile(f, self.get_guid(metadata[i].uuid))
|
||||
if guid:
|
||||
paths.append(guid)
|
||||
else:
|
||||
print "Send fail"
|
||||
# Handle PDF File
|
||||
if f[-3:].upper() == "PDF":
|
||||
# Package the PDF file
|
||||
with TemporaryDirectory() as tdir:
|
||||
snbcdir = os.path.join(tdir, 'snbc')
|
||||
snbfdir = os.path.join(tdir, 'snbf')
|
||||
os.mkdir(snbcdir)
|
||||
os.mkdir(snbfdir)
|
||||
|
||||
tmpfile = open(os.path.join(snbfdir, 'book.snbf'), 'wb')
|
||||
tmpfile.write('''<book-snbf version="1.0">
|
||||
<head>
|
||||
<name><![CDATA[''' + metadata[i].title + ''']]></name>
|
||||
<author><![CDATA[''' + ' '.join(metadata[i].authors) + ''']]></author>
|
||||
<language>ZH-CN</language>
|
||||
<rights/>
|
||||
<publisher>calibre</publisher>
|
||||
<generator>''' + __appname__ + ' ' + __version__ + '''</generator>
|
||||
<created/>
|
||||
<abstract></abstract>
|
||||
<cover/>
|
||||
</head>
|
||||
</book-snbf>
|
||||
''')
|
||||
tmpfile.close()
|
||||
tmpfile = open(os.path.join(snbfdir, 'toc.snbf'), 'wb')
|
||||
tmpfile.write('''<toc-snbf>
|
||||
<head>
|
||||
<chapters>1</chapters>
|
||||
</head>
|
||||
<body>
|
||||
<chapter src="pdf1.pdf"><![CDATA[''' + metadata[i].title + ''']]></chapter>
|
||||
</body>
|
||||
</toc-snbf>
|
||||
''');
|
||||
tmpfile.close()
|
||||
pdf_name = os.path.join(snbcdir, "pdf1.pdf")
|
||||
shutil.copyfile(f, pdf_name)
|
||||
|
||||
with TemporaryFile('.snb') as snbfile:
|
||||
if self.bambook.PackageSNB(snbfile, tdir) and self.bambook.VerifySNB(snbfile):
|
||||
guid = self.bambook.SendFile(snbfile, self.get_guid(metadata[i].uuid))
|
||||
|
||||
elif f[-3:].upper() == 'SNB':
|
||||
if self.bambook.VerifySNB(f):
|
||||
guid = self.bambook.SendFile(f, self.get_guid(metadata[i].uuid))
|
||||
else:
|
||||
print "book invalid"
|
||||
if guid:
|
||||
paths.append(guid)
|
||||
else:
|
||||
print "Send fail"
|
||||
|
||||
ret = zip(paths, cycle([on_card]))
|
||||
self.report_progress(1.0, _('Transferring books to device...'))
|
||||
return ret
|
||||
|
@ -100,23 +100,28 @@ class FB2Input(InputFormatPlugin):
|
||||
mi.title = _('Unknown')
|
||||
if not mi.authors:
|
||||
mi.authors = [_('Unknown')]
|
||||
opf = OPFCreator(os.getcwdu(), mi)
|
||||
entries = [(f, guess_type(f)[0]) for f in os.listdir('.')]
|
||||
opf.create_manifest(entries)
|
||||
opf.create_spine(['index.xhtml'])
|
||||
cpath = None
|
||||
if mi.cover_data and mi.cover_data[1]:
|
||||
with open('fb2_cover_calibre_mi.jpg', 'wb') as f:
|
||||
f.write(mi.cover_data[1])
|
||||
opf.guide.set_cover(os.path.abspath('fb2_cover_calibre_mi.jpg'))
|
||||
cpath = os.path.abspath('fb2_cover_calibre_mi.jpg')
|
||||
else:
|
||||
for img in doc.xpath('//f:coverpage/f:image', namespaces=NAMESPACES):
|
||||
href = img.get('{%s}href'%XLINK_NS, img.get('href', None))
|
||||
if href is not None:
|
||||
if href.startswith('#'):
|
||||
href = href[1:]
|
||||
opf.guide.set_cover(os.path.abspath(href))
|
||||
cpath = os.path.abspath(href)
|
||||
break
|
||||
|
||||
opf.render(open('metadata.opf', 'wb'))
|
||||
opf = OPFCreator(os.getcwdu(), mi)
|
||||
entries = [(f, guess_type(f)[0]) for f in os.listdir('.')]
|
||||
opf.create_manifest(entries)
|
||||
opf.create_spine(['index.xhtml'])
|
||||
if cpath:
|
||||
opf.guide.set_cover(cpath)
|
||||
with open('metadata.opf', 'wb') as f:
|
||||
opf.render(f)
|
||||
return os.path.join(os.getcwd(), 'metadata.opf')
|
||||
|
||||
def extract_embedded_content(self, doc):
|
||||
|
@ -149,10 +149,7 @@ def _parse_cover_data(root, imgid, mi):
|
||||
mime_extensions = guess_all_extensions(mimetype)
|
||||
|
||||
if not mime_extensions and mimetype.startswith('image/'):
|
||||
prints("WARNING: Unsupported or misspelled mime-type '%s'. "\
|
||||
"Trying to recovery mime-type from id_ref='%s'" % (mimetype, imgid) )
|
||||
ctype = guess_type(imgid) # -> (mime-type, encoding)
|
||||
mimetype_fromid = ctype[0]
|
||||
mimetype_fromid = guess_type(imgid)[0]
|
||||
if mimetype_fromid and mimetype_fromid.startswith('image/'):
|
||||
mime_extensions = guess_all_extensions(mimetype_fromid)
|
||||
|
||||
|
@ -1027,7 +1027,7 @@ class OPF(object): # {{{
|
||||
if self.guide is not None:
|
||||
for t in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
|
||||
for item in self.guide:
|
||||
if item.type.lower() == t:
|
||||
if item.type and item.type.lower() == t:
|
||||
return item.path
|
||||
try:
|
||||
return self.guess_cover()
|
||||
|
@ -1,6 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
from xml.etree.ElementTree import _Element
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>'
|
||||
@ -12,10 +11,8 @@ import datetime
|
||||
from urllib import quote_plus
|
||||
from Queue import Queue, Empty
|
||||
from lxml import etree, html
|
||||
from lxml.etree import ElementBase
|
||||
from calibre import as_unicode
|
||||
|
||||
from calibre import prints
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
|
||||
from calibre.ebooks.metadata import check_isbn
|
||||
|
@ -1421,7 +1421,7 @@ class MOBIFile(object): # {{{
|
||||
except:
|
||||
pass
|
||||
if fmt is not None:
|
||||
self.image_records.append(ImageRecord(i, r, fmt))
|
||||
self.image_records.append(ImageRecord(len(self.image_records)+1, r, fmt))
|
||||
else:
|
||||
self.binary_records.append(BinaryRecord(i, r))
|
||||
|
||||
|
@ -314,6 +314,8 @@ def detect_periodical(toc, log=None):
|
||||
Detect if the TOC object toc contains a periodical that conforms to the
|
||||
structure required by kindlegen to generate a periodical.
|
||||
'''
|
||||
if toc.count() < 1 or not toc[0].klass == 'periodical':
|
||||
return False
|
||||
for node in toc.iterdescendants():
|
||||
if node.depth() == 1 and node.klass != 'article':
|
||||
if log is not None:
|
||||
|
@ -109,20 +109,6 @@ class TAGX(object): # {{{
|
||||
list(map(self.add_tag, (11, 0)))
|
||||
return self.header(1) + bytes(self.byts)
|
||||
|
||||
|
||||
|
||||
class TAGX_BOOK(TAGX):
|
||||
BITMASKS = dict(TAGX.BITMASKS)
|
||||
BITMASKS.update({x:(1 << i) for i, x in enumerate([1, 2, 3, 4, 21, 22, 23])})
|
||||
|
||||
@property
|
||||
def hierarchical_book(self):
|
||||
'''
|
||||
TAGX block for the primary index header of a hierarchical book
|
||||
'''
|
||||
list(map(self.add_tag, (1, 2, 3, 4, 21, 22, 23, 0)))
|
||||
return self.header(1) + bytes(self.byts)
|
||||
|
||||
@property
|
||||
def flat_book(self):
|
||||
'''
|
||||
@ -244,17 +230,6 @@ class IndexEntry(object):
|
||||
ans = buf.getvalue()
|
||||
return ans
|
||||
|
||||
class BookIndexEntry(IndexEntry):
|
||||
|
||||
@property
|
||||
def entry_type(self):
|
||||
tagx = TAGX_BOOK()
|
||||
ans = 0
|
||||
for tag in self.tag_nums:
|
||||
ans |= tagx.BITMASKS[tag]
|
||||
return ans
|
||||
|
||||
|
||||
class PeriodicalIndexEntry(IndexEntry):
|
||||
|
||||
def __init__(self, offset, label_offset, class_offset, depth):
|
||||
@ -305,9 +280,7 @@ class TBS(object): # {{{
|
||||
def __init__(self, data, is_periodical, first=False, section_map={},
|
||||
after_first=False):
|
||||
self.section_map = section_map
|
||||
#import pprint
|
||||
#pprint.pprint(data)
|
||||
#print()
|
||||
|
||||
if is_periodical:
|
||||
# The starting bytes.
|
||||
# The value is zero which I think indicates the periodical
|
||||
@ -420,6 +393,8 @@ class TBS(object): # {{{
|
||||
first_article = articles[0]
|
||||
last_article = articles[-1]
|
||||
num = len(articles)
|
||||
last_article_ends = (last_article in data['ends'] or
|
||||
last_article in data['completes'])
|
||||
|
||||
try:
|
||||
next_sec = sections[i+1]
|
||||
@ -440,6 +415,19 @@ class TBS(object): # {{{
|
||||
if next_sec is not None:
|
||||
buf.write(encode_tbs(last_article.index-next_sec.index,
|
||||
{0b1000: 0}))
|
||||
|
||||
|
||||
# If a section TOC starts and extends into the next record add
|
||||
# a trailing vwi. We detect this by TBS type==3, processing last
|
||||
# section present in the record, and the last article in that
|
||||
# section either ends or completes and doesn't finish
|
||||
# on the last byte of the record.
|
||||
elif (typ == self.type_011 and last_article_ends and
|
||||
((last_article.offset+last_article.size) % RECORD_SIZE > 0)
|
||||
):
|
||||
buf.write(encode_tbs(last_article.index-section.index-1,
|
||||
{0b1000: 0}))
|
||||
|
||||
else:
|
||||
buf.write(encode_tbs(spanner.index - parent_section_index,
|
||||
{0b0001: 0}))
|
||||
@ -447,7 +435,26 @@ class TBS(object): # {{{
|
||||
self.bytestring = buf.getvalue()
|
||||
|
||||
def book_tbs(self, data, first):
|
||||
self.bytestring = b''
|
||||
spanner = data['spans']
|
||||
if spanner is not None:
|
||||
self.bytestring = encode_tbs(spanner.index, {0b010: 0, 0b001: 0},
|
||||
flag_size=3)
|
||||
else:
|
||||
starts, completes, ends = (data['starts'], data['completes'],
|
||||
data['ends'])
|
||||
if (not completes and (
|
||||
(len(starts) == 1 and not ends) or (len(ends) == 1 and not
|
||||
starts))):
|
||||
node = starts[0] if starts else ends[0]
|
||||
self.bytestring = encode_tbs(node.index, {0b010: 0}, flag_size=3)
|
||||
else:
|
||||
nodes = []
|
||||
for x in (starts, completes, ends):
|
||||
nodes.extend(x)
|
||||
nodes.sort(key=lambda x:x.index)
|
||||
self.bytestring = encode_tbs(nodes[0].index, {0b010:0,
|
||||
0b100: len(nodes)}, flag_size=3)
|
||||
|
||||
# }}}
|
||||
|
||||
class Indexer(object): # {{{
|
||||
@ -518,6 +525,7 @@ class Indexer(object): # {{{
|
||||
for i in indices:
|
||||
offsets.append(buf.tell())
|
||||
buf.write(i.bytestring)
|
||||
|
||||
index_block = align_block(buf.getvalue())
|
||||
|
||||
# Write offsets to index entries as an IDXT block
|
||||
@ -557,9 +565,7 @@ class Indexer(object): # {{{
|
||||
tagx_block = TAGX().secondary
|
||||
else:
|
||||
tagx_block = (TAGX().periodical if self.is_periodical else
|
||||
(TAGX_BOOK().hierarchical_book if
|
||||
self.book_has_subchapters else
|
||||
TAGX_BOOK().flat_book))
|
||||
TAGX().flat_book)
|
||||
header_length = 192
|
||||
|
||||
# Ident 0 - 4
|
||||
@ -645,15 +651,13 @@ class Indexer(object): # {{{
|
||||
# }}}
|
||||
|
||||
def create_book_index(self): # {{{
|
||||
self.book_has_subchapters = False
|
||||
indices = []
|
||||
seen, sub_seen = set(), set()
|
||||
seen = set()
|
||||
id_offsets = self.serializer.id_offsets
|
||||
|
||||
# Flatten toc to contain only chapters and subchapters
|
||||
# Anything deeper than a subchapter is made into a subchapter
|
||||
chapters = []
|
||||
for node in self.oeb.toc:
|
||||
# Flatten toc so that chapter to chapter jumps work with all sub
|
||||
# chapter levels as well
|
||||
for node in self.oeb.toc.iterdescendants():
|
||||
try:
|
||||
offset = id_offsets[node.href]
|
||||
label = self.cncx[node.title]
|
||||
@ -666,77 +670,33 @@ class Indexer(object): # {{{
|
||||
continue
|
||||
seen.add(offset)
|
||||
|
||||
subchapters = []
|
||||
chapters.append((offset, label, subchapters))
|
||||
indices.append(IndexEntry(offset, label))
|
||||
|
||||
for descendant in node.iterdescendants():
|
||||
try:
|
||||
offset = id_offsets[descendant.href]
|
||||
label = self.cncx[descendant.title]
|
||||
except:
|
||||
self.log.warn('TOC item %s [%s] not found in document'%(
|
||||
descendant.title, descendant.href))
|
||||
continue
|
||||
indices.sort(key=lambda x:x.offset)
|
||||
|
||||
if offset in sub_seen:
|
||||
continue
|
||||
sub_seen.add(offset)
|
||||
subchapters.append((offset, label))
|
||||
# Set lengths
|
||||
for i, index in enumerate(indices):
|
||||
try:
|
||||
next_offset = indices[i+1].offset
|
||||
except:
|
||||
next_offset = self.serializer.body_end_offset
|
||||
index.length = next_offset - index.offset
|
||||
|
||||
subchapters.sort(key=lambda x:x[0])
|
||||
|
||||
chapters.sort(key=lambda x:x[0])
|
||||
# Remove empty indices
|
||||
indices = [x for x in indices if x.length > 0]
|
||||
|
||||
chapters = [(BookIndexEntry(x[0], x[1]), [
|
||||
BookIndexEntry(y[0], y[1]) for y in x[2]]) for x in chapters]
|
||||
# Reset lengths in case any were removed
|
||||
for i, index in enumerate(indices):
|
||||
try:
|
||||
next_offset = indices[i+1].offset
|
||||
except:
|
||||
next_offset = self.serializer.body_end_offset
|
||||
index.length = next_offset - index.offset
|
||||
|
||||
def set_length(indices):
|
||||
for i, index in enumerate(indices):
|
||||
try:
|
||||
next_offset = indices[i+1].offset
|
||||
except:
|
||||
next_offset = self.serializer.body_end_offset
|
||||
index.length = next_offset - index.offset
|
||||
|
||||
# Set chapter and subchapter lengths
|
||||
set_length([x[0] for x in chapters])
|
||||
for x in chapters:
|
||||
set_length(x[1])
|
||||
|
||||
# Remove empty chapters
|
||||
chapters = [x for x in chapters if x[0].length > 0]
|
||||
|
||||
# Remove invalid subchapters
|
||||
for i, x in enumerate(list(chapters)):
|
||||
chapter, subchapters = x
|
||||
ok_subchapters = []
|
||||
for sc in subchapters:
|
||||
if sc.offset < chapter.next_offset and sc.length > 0:
|
||||
ok_subchapters.append(sc)
|
||||
chapters[i] = (chapter, ok_subchapters)
|
||||
|
||||
# Reset chapter and subchapter lengths in case any were removed
|
||||
set_length([x[0] for x in chapters])
|
||||
for x in chapters:
|
||||
set_length(x[1])
|
||||
|
||||
# Set index and depth values
|
||||
indices = []
|
||||
for index, x in enumerate(chapters):
|
||||
x[0].index = index
|
||||
indices.append(x[0])
|
||||
|
||||
for chapter, subchapters in chapters:
|
||||
for sc in subchapters:
|
||||
index += 1
|
||||
sc.index = index
|
||||
sc.parent_index = chapter.index
|
||||
indices.append(sc)
|
||||
sc.depth = 1
|
||||
self.book_has_subchapters = True
|
||||
if subchapters:
|
||||
chapter.first_child_index = subchapters[0].index
|
||||
chapter.last_child_index = subchapters[-1].index
|
||||
# Set index values
|
||||
for index, x in enumerate(indices):
|
||||
x.index = index
|
||||
|
||||
return indices
|
||||
|
||||
@ -772,9 +732,11 @@ class Indexer(object): # {{{
|
||||
continue
|
||||
if offset in seen_sec_offsets:
|
||||
continue
|
||||
|
||||
seen_sec_offsets.add(offset)
|
||||
section = PeriodicalIndexEntry(offset, label, klass, 1)
|
||||
section.parent_index = 0
|
||||
|
||||
for art in sec:
|
||||
try:
|
||||
offset = id_offsets[art.href]
|
||||
@ -830,6 +792,7 @@ class Indexer(object): # {{{
|
||||
for art in articles:
|
||||
i += 1
|
||||
art.index = i
|
||||
|
||||
art.parent_index = sec.index
|
||||
|
||||
for sec, normalized_articles in normalized_sections:
|
||||
@ -905,6 +868,7 @@ class Indexer(object): # {{{
|
||||
'spans':None, 'offset':offset, 'record_number':i+1}
|
||||
|
||||
for index in self.indices:
|
||||
|
||||
if index.offset >= next_offset:
|
||||
# Node starts after current record
|
||||
if index.depth == deepest:
|
||||
|
@ -97,6 +97,9 @@ class MobiWriter(object):
|
||||
# Indexing {{{
|
||||
def generate_index(self):
|
||||
self.primary_index_record_idx = None
|
||||
if self.oeb.toc.count() < 1:
|
||||
self.log.warn('No TOC, MOBI index not generated')
|
||||
return
|
||||
try:
|
||||
self.indexer = Indexer(self.serializer, self.last_text_record_idx,
|
||||
len(self.records[self.last_text_record_idx]),
|
||||
@ -147,15 +150,19 @@ class MobiWriter(object):
|
||||
oeb.logger.info('Serializing images...')
|
||||
self.image_records = []
|
||||
self.image_map = {}
|
||||
self.masthead_offset = 0
|
||||
index = 1
|
||||
|
||||
mh_href = self.masthead_offset = None
|
||||
mh_href = None
|
||||
if 'masthead' in oeb.guide:
|
||||
mh_href = oeb.guide['masthead'].href
|
||||
self.image_records.append(None)
|
||||
index += 1
|
||||
elif self.is_periodical:
|
||||
# Generate a default masthead
|
||||
data = generate_masthead(unicode(self.oeb.metadata('title')[0]))
|
||||
data = generate_masthead(unicode(self.oeb.metadata['title'][0]))
|
||||
self.image_records.append(data)
|
||||
self.masthead_offset = 0
|
||||
index += 1
|
||||
|
||||
cover_href = self.cover_offset = self.thumbnail_offset = None
|
||||
if (oeb.metadata.cover and
|
||||
@ -172,13 +179,16 @@ class MobiWriter(object):
|
||||
oeb.logger.warn('Bad image file %r' % item.href)
|
||||
continue
|
||||
else:
|
||||
self.image_map[item.href] = len(self.image_records)
|
||||
self.image_records.append(data)
|
||||
if mh_href and item.href == mh_href:
|
||||
self.image_records[0] = data
|
||||
continue
|
||||
|
||||
if item.href == mh_href:
|
||||
self.masthead_offset = len(self.image_records) - 1
|
||||
elif item.href == cover_href:
|
||||
self.cover_offset = len(self.image_records) - 1
|
||||
self.image_records.append(data)
|
||||
self.image_map[item.href] = index
|
||||
index += 1
|
||||
|
||||
if cover_href and item.href == cover_href:
|
||||
self.cover_offset = self.image_map[item.href] - 1
|
||||
try:
|
||||
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
|
||||
maxsizeb=MAX_THUMB_SIZE)
|
||||
@ -186,10 +196,14 @@ class MobiWriter(object):
|
||||
oeb.logger.warn('Failed to generate thumbnail')
|
||||
else:
|
||||
self.image_records.append(data)
|
||||
self.thumbnail_offset = len(self.image_records) - 1
|
||||
self.thumbnail_offset = index - 1
|
||||
index += 1
|
||||
finally:
|
||||
item.unload_data_from_memory()
|
||||
|
||||
if self.image_records and self.image_records[0] is None:
|
||||
raise ValueError('Failed to find masthead image in manifest')
|
||||
|
||||
# }}}
|
||||
|
||||
# Text {{{
|
||||
@ -197,6 +211,7 @@ class MobiWriter(object):
|
||||
def generate_text(self):
|
||||
self.oeb.logger.info('Serializing markup content...')
|
||||
self.serializer = Serializer(self.oeb, self.image_map,
|
||||
self.is_periodical,
|
||||
write_page_breaks_after_item=self.write_page_breaks_after_item)
|
||||
text = self.serializer()
|
||||
self.text_length = len(text)
|
||||
|
@ -7,6 +7,8 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
|
||||
from calibre.ebooks.oeb.base import (OEB_DOCS, XHTML, XHTML_NS, XML_NS,
|
||||
namespace, prefixname, urlnormalize)
|
||||
from calibre.ebooks.mobi.mobiml import MBP_NS
|
||||
@ -19,7 +21,7 @@ from cStringIO import StringIO
|
||||
class Serializer(object):
|
||||
NSRMAP = {'': None, XML_NS: 'xml', XHTML_NS: '', MBP_NS: 'mbp'}
|
||||
|
||||
def __init__(self, oeb, images, write_page_breaks_after_item=True):
|
||||
def __init__(self, oeb, images, is_periodical, write_page_breaks_after_item=True):
|
||||
'''
|
||||
Write all the HTML markup in oeb into a single in memory buffer
|
||||
containing a single html document with links replaced by offsets into
|
||||
@ -35,8 +37,10 @@ class Serializer(object):
|
||||
is written after every element of the spine in ``oeb``.
|
||||
'''
|
||||
self.oeb = oeb
|
||||
# Map of image hrefs to image index in the MOBI file
|
||||
self.images = images
|
||||
self.logger = oeb.logger
|
||||
self.is_periodical = is_periodical
|
||||
self.write_page_breaks_after_item = write_page_breaks_after_item
|
||||
|
||||
# If not None, this is a number pointing to the location at which to
|
||||
@ -187,13 +191,63 @@ class Serializer(object):
|
||||
moved to the end.
|
||||
'''
|
||||
buf = self.buf
|
||||
|
||||
def serialize_toc_level(tocref, href=None):
|
||||
# add the provided toc level to the output stream
|
||||
# if href is provided add a link ref to the toc level output (e.g. feed_0/index.html)
|
||||
if href is not None:
|
||||
# resolve the section url in id_offsets
|
||||
buf.write('<mbp:pagebreak/>')
|
||||
self.id_offsets[urlnormalize(href)] = buf.tell()
|
||||
|
||||
if tocref.klass == "periodical":
|
||||
buf.write('<div> <div height="1em"></div>')
|
||||
else:
|
||||
buf.write('<div></div> <div> <h2 height="1em"><font size="+2"><b>'+tocref.title+'</b></font></h2> <div height="1em"></div>')
|
||||
|
||||
buf.write('<ul>')
|
||||
|
||||
for tocitem in tocref.nodes:
|
||||
buf.write('<li><a filepos=')
|
||||
itemhref = tocitem.href
|
||||
if tocref.klass == 'periodical':
|
||||
# This is a section node.
|
||||
# For periodical toca, the section urls are like r'feed_\d+/index.html'
|
||||
# We dont want to point to the start of the first article
|
||||
# so we change the href.
|
||||
itemhref = re.sub(r'article_\d+/', '', itemhref)
|
||||
self.href_offsets[itemhref].append(buf.tell())
|
||||
buf.write('0000000000')
|
||||
buf.write(' ><font size="+1" color="blue"><b><u>')
|
||||
buf.write(tocitem.title)
|
||||
buf.write('</u></b></font></a></li>')
|
||||
|
||||
buf.write('</ul><div height="1em"></div></div>')
|
||||
|
||||
self.anchor_offset = buf.tell()
|
||||
buf.write(b'<body>')
|
||||
self.body_start_offset = buf.tell()
|
||||
|
||||
if self.is_periodical:
|
||||
top_toc = self.oeb.toc.nodes[0]
|
||||
serialize_toc_level(top_toc)
|
||||
|
||||
spine = [item for item in self.oeb.spine if item.linear]
|
||||
spine.extend([item for item in self.oeb.spine if not item.linear])
|
||||
|
||||
for item in spine:
|
||||
|
||||
if self.is_periodical and item.is_section_start:
|
||||
for section_toc in top_toc.nodes:
|
||||
if urlnormalize(item.href) == section_toc.href:
|
||||
# create section url of the form r'feed_\d+/index.html'
|
||||
section_url = re.sub(r'article_\d+/', '', section_toc.href)
|
||||
serialize_toc_level(section_toc, section_url)
|
||||
section_toc.href = section_url
|
||||
break
|
||||
|
||||
self.serialize_item(item)
|
||||
|
||||
self.body_end_offset = buf.tell()
|
||||
buf.write(b'</body>')
|
||||
|
||||
|
@ -374,8 +374,8 @@ class Editor(QFrame): # {{{
|
||||
self.current_keys])
|
||||
if not current: current = _('None')
|
||||
|
||||
self.use_default.setText(_('Default: %s [Currently not conflicting: %s]')%
|
||||
(default, current))
|
||||
self.use_default.setText(_('Default: %(deflt)s [Currently not conflicting: %(curr)s]')%
|
||||
dict(deflt=default, curr=current))
|
||||
|
||||
if shortcut['set_to_default']:
|
||||
self.use_default.setChecked(True)
|
||||
|
@ -17,6 +17,9 @@ class LanguagesEdit(MultiCompleteComboBox):
|
||||
MultiCompleteComboBox.__init__(self, parent)
|
||||
|
||||
self._lang_map = lang_map()
|
||||
self.names_with_commas = [x for x in self._lang_map.itervalues() if ',' in x]
|
||||
self.comma_map = {k:k.replace(',', '|') for k in self.names_with_commas}
|
||||
self.comma_rmap = {v:k for k, v in self.comma_map.iteritems()}
|
||||
self._rmap = {v:k for k,v in self._lang_map.iteritems()}
|
||||
|
||||
all_items = sorted(self._lang_map.itervalues(),
|
||||
@ -25,12 +28,19 @@ class LanguagesEdit(MultiCompleteComboBox):
|
||||
for item in all_items:
|
||||
self.addItem(item)
|
||||
|
||||
@property
|
||||
def vals(self):
|
||||
raw = unicode(self.lineEdit().text())
|
||||
for k, v in self.comma_map.iteritems():
|
||||
raw = raw.replace(k, v)
|
||||
parts = [x.strip() for x in raw.split(',')]
|
||||
return [self.comma_rmap.get(x, x) for x in parts]
|
||||
|
||||
@dynamic_property
|
||||
def lang_codes(self):
|
||||
|
||||
def fget(self):
|
||||
vals = [x.strip() for x in
|
||||
unicode(self.lineEdit().text()).split(',')]
|
||||
vals = self.vals
|
||||
ans = []
|
||||
for name in vals:
|
||||
if name:
|
||||
@ -50,8 +60,7 @@ class LanguagesEdit(MultiCompleteComboBox):
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def validate(self):
|
||||
vals = [x.strip() for x in
|
||||
unicode(self.lineEdit().text()).split(',')]
|
||||
vals = self.vals
|
||||
bad = []
|
||||
for name in vals:
|
||||
if name:
|
||||
|
@ -640,6 +640,7 @@ class LibraryPage(QWizardPage, LibraryUI):
|
||||
metadata_plugins = {
|
||||
'zh' : ('Douban Books',),
|
||||
'fr' : ('Nicebooks',),
|
||||
'ru' : ('OZON.ru',),
|
||||
}.get(lang, [])
|
||||
from calibre.customize.ui import enable_plugin
|
||||
for name in metadata_plugins:
|
||||
|
@ -925,12 +925,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
formats = row[fm['formats']]
|
||||
mi.format_metadata = {}
|
||||
if not formats:
|
||||
formats = None
|
||||
good_formats = None
|
||||
else:
|
||||
formats = formats.split(',')
|
||||
good_formats = []
|
||||
for f in formats:
|
||||
mi.format_metadata[f] = self.format_metadata(id, f)
|
||||
mi.formats = formats
|
||||
try:
|
||||
mi.format_metadata[f] = self.format_metadata(id, f)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
good_formats.append(f)
|
||||
mi.formats = good_formats
|
||||
tags = row[fm['tags']]
|
||||
if tags:
|
||||
mi.tags = [i.strip() for i in tags.split(',')]
|
||||
@ -1213,7 +1219,13 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
except: # If path contains strange characters this throws an exc
|
||||
candidates = []
|
||||
if format and candidates and os.path.exists(candidates[0]):
|
||||
shutil.copyfile(candidates[0], fmt_path)
|
||||
try:
|
||||
shutil.copyfile(candidates[0], fmt_path)
|
||||
except:
|
||||
# This can happen if candidates[0] or fmt_path is too long,
|
||||
# which can happen if the user copied the library from a
|
||||
# non windows machine to a windows machine.
|
||||
return None
|
||||
return fmt_path
|
||||
|
||||
def copy_format_to(self, index, fmt, dest, index_is_id=False):
|
||||
@ -1633,7 +1645,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
item.rt += rating
|
||||
item.rc += 1
|
||||
except:
|
||||
prints(tid_cat, val)
|
||||
prints('get_categories: item', val, 'is not in', cat, 'list!')
|
||||
|
||||
#print 'end phase "books":', time.clock() - last, 'seconds'
|
||||
@ -2291,7 +2302,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
|
||||
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
|
||||
FROM books_languages_link WHERE
|
||||
lang_code=languages.id) < 1''')
|
||||
books_languages_link.lang_code=languages.id) < 1''')
|
||||
|
||||
books_to_refresh = set([book_id])
|
||||
final_languages = []
|
||||
|
@ -360,7 +360,7 @@ When you first run |app|, it will ask you for a folder in which to store your bo
|
||||
|
||||
Metadata about the books is stored in the file ``metadata.db`` at the top level of the library folder This file is is a sqlite database. When backing up your library make sure you copy the entire folder and all its sub-folders.
|
||||
|
||||
The library folder and all it's contents make up what is called a *|app| library*. You can have multiple such libraries. To manage the libraries, click the |app| icon on the toolbar. You can create new libraries, remove/rename existing ones and switch between libraries easily.
|
||||
The library folder and all it's contents make up what is called a |app| library. You can have multiple such libraries. To manage the libraries, click the |app| icon on the toolbar. You can create new libraries, remove/rename existing ones and switch between libraries easily.
|
||||
|
||||
You can copy or move books between different libraries (once you have more than one library setup) by right clicking on a book and selecting the :guilabel:`Copy to library` action.
|
||||
|
||||
@ -438,7 +438,19 @@ Simply copy the |app| library folder from the old to the new computer. You can f
|
||||
|
||||
Note that if you are transferring between different types of computers (for example Windows to OS X) then after doing the above you should also click the arrow next to the calibre icon on the tool bar, select Library Maintenance and run the Check Library action. It will warn you about any problems in your library, which you should fix by hand.
|
||||
|
||||
.. note:: A |app| library is just a folder which contains all the book files and their metadata. All the emtadata is stored in a single file called metadata.db, in the top level folder. If this file gets corrupted, you may see an empty list of books in |app|. In this case you can ask |app| to restore your books by clicking the arrow next to the |app| icon on the toolbar and selecting Library Maintenance->Restore Library.
|
||||
.. note:: A |app| library is just a folder which contains all the book files and their metadata. All the metadata is stored in a single file called metadata.db, in the top level folder. If this file gets corrupted, you may see an empty list of books in |app|. In this case you can ask |app| to restore your books by clicking the arrow next to the |app| icon on the toolbar and selecting Library Maintenance->Restore Library.
|
||||
|
||||
The list of books in |app| is blank!
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In order to understand why that happened, you have to understand what a |app| library is. At the most basic level, a |app| library is just a folder. Whenever you add a book to |app|, that book's files are copied into this folder (arranged into sub folders by author and title). Inside the |app| library folder, at the top level, you will see a file called metadata.db. This file is where |app| stores the metadata like title/author/rating/tags etc. for *every* book in your |app| library. The list of books that |app| displays is created by reading the contents of this metadata.db file.
|
||||
|
||||
There can be two reasons why |app| is showing a empty list of books:
|
||||
|
||||
* Your |app| library folder changed its location. This can happen if it was on an external disk and the drive letter for that disk changed. Or if you accidentally moved the folder. In this case, |app| cannot find its library and so starts up with an empty library instead. To remedy this, simply click the arrow next to the |app| icon in the |app| toolbar (it will say 0 books underneath it) and select Switch/create library. Click the little blue icon to select the new location of your |app| library and click OK.
|
||||
|
||||
* Your metadata.db file was deleted/corrupted. In this case, you can ask |app| to rebuild the metadata.db from its backups. Click the arrow next to the |app| icon in the |app| toolbar (it will say 0 books underneath it) and select Library maintenance->Restore database. |app| will automatically rebuild metadata.db.
|
||||
|
||||
|
||||
Content From The Web
|
||||
---------------------
|
||||
@ -446,6 +458,7 @@ Content From The Web
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
|
||||
I obtained a recipe for a news site as a .py file from somewhere, how do I use it?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Start the :guilabel:`Add custom news sources` dialog (from the :guilabel:`Fetch news` menu) and click the :guilabel:`Switch to advanced mode` button. Delete everything in the box with the recipe source code and copy paste the contents of your .py file into the box. Click :guilabel:`Add/update recipe`.
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user