mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
Sync to trunk.
This commit is contained in:
commit
d476a80f03
@ -19,6 +19,69 @@
|
|||||||
# new recipes:
|
# new recipes:
|
||||||
# - title:
|
# - title:
|
||||||
|
|
||||||
|
- version: 0.8.15
|
||||||
|
date: 2011-08-19
|
||||||
|
|
||||||
|
new features:
|
||||||
|
- title: "Add a 'languages' metadata field."
|
||||||
|
type: major
|
||||||
|
description: "This is useful if you have a multi-lingual book collection. You can now set one or more languages per book via the Edit Metadata dialog. If you want the languages
|
||||||
|
column to be visible, then go to Preferences->Add your own columns and unhide the languages columns. You can also bulk set the languages on multiple books via the bulk edit metadata dialog. You can also have the languages show up in the book details panel on the right by going to Preferences->Look and Feel->Book details"
|
||||||
|
|
||||||
|
- title: "Get Books: Add XinXii store."
|
||||||
|
|
||||||
|
- title: "Metadata download plugin for ozon.ru, enabled only when user selects russian as their language in the welcome wizard."
|
||||||
|
|
||||||
|
- title: "Bambook driver: Allow direct transfer of PDF files to Bambook devices"
|
||||||
|
|
||||||
|
- title: "Driver for Coby MID7015A and Asus EEE Note"
|
||||||
|
|
||||||
|
- title: "Edit metadata dialog: The keyboard shortcut Ctrl+D can now be used to trigger a metadata download. Also show the row number of the book being edited in the titlebar"
|
||||||
|
|
||||||
|
- title: "Add an option to not preserve the date when using the 'Copy to Library' function (found in Preferences->Adding books)"
|
||||||
|
|
||||||
|
bug fixes:
|
||||||
|
- title: "Linux binary: Use readlink -f rather than readlink -e in the launcher scripts so that they work with recent releases of busybox"
|
||||||
|
|
||||||
|
- title: "When bulk downloading metadata for more than 100 books at a time, automatically split up the download into batches of 100."
|
||||||
|
tickets: [828373]
|
||||||
|
|
||||||
|
- title: "When deleting books from the Kindle also delete 'sidecar' .apnx and .ph1 files as the kindle does not clean them up automatically"
|
||||||
|
tickets: [827684]
|
||||||
|
|
||||||
|
- title: "Fix a subtle bug in the device drivers that caused calibre to lose track of some books on the device if you used author_sort in the send to device template and your books have author sort values that differ only in case."
|
||||||
|
tickets: [825706]
|
||||||
|
|
||||||
|
- title: "Fix scene break character pattern not saved in conversion preferences"
|
||||||
|
tickets: [826038]
|
||||||
|
|
||||||
|
- title: "Keyboard shortcuts: Fix a bug triggered by some third party plugins that made the keyboard preferences unusable in OS X."
|
||||||
|
tickets: [826325]
|
||||||
|
|
||||||
|
- title: "Search box: Fix completion no longer working after using Tag Browser to do a search. Also ensure that completer popup is always hidden when a search is performed."
|
||||||
|
|
||||||
|
- title: "Fix pressing Enter in the search box causes the same search to be executed twice in the plugins and keyboard shortcuts preferences panels"
|
||||||
|
|
||||||
|
- title: "Catalog generation: Fix error creating epub/mobi catalogs on non UTF-8 windows systems when the metadata contained non ASCII characters"
|
||||||
|
|
||||||
|
improved recipes:
|
||||||
|
- Financial Times UK
|
||||||
|
- La Tercera
|
||||||
|
- Folha de Sao Paolo
|
||||||
|
- Metro niews NL
|
||||||
|
- La Nacion
|
||||||
|
- Juventud Rebelde
|
||||||
|
- Rzeczpospolita Online
|
||||||
|
- Newsweek Polska
|
||||||
|
- CNET news
|
||||||
|
|
||||||
|
new recipes:
|
||||||
|
- title: El Mostrador and The Clinic
|
||||||
|
author: Alex Mitrani
|
||||||
|
|
||||||
|
- title: Patente de Corso
|
||||||
|
author: Oscar Megia Lopez
|
||||||
|
|
||||||
- version: 0.8.14
|
- version: 0.8.14
|
||||||
date: 2011-08-12
|
date: 2011-08-12
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ class FinancialTimes(BasicNewsRecipe):
|
|||||||
publication_type = 'newspaper'
|
publication_type = 'newspaper'
|
||||||
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
|
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
|
||||||
LOGIN = 'https://registration.ft.com/registration/barrier/login'
|
LOGIN = 'https://registration.ft.com/registration/barrier/login'
|
||||||
|
LOGIN2 = 'http://media.ft.com/h/subs3.html'
|
||||||
INDEX = 'http://www.ft.com/uk-edition'
|
INDEX = 'http://www.ft.com/uk-edition'
|
||||||
PREFIX = 'http://www.ft.com'
|
PREFIX = 'http://www.ft.com'
|
||||||
|
|
||||||
@ -39,7 +40,7 @@ class FinancialTimes(BasicNewsRecipe):
|
|||||||
br = BasicNewsRecipe.get_browser()
|
br = BasicNewsRecipe.get_browser()
|
||||||
br.open(self.INDEX)
|
br.open(self.INDEX)
|
||||||
if self.username is not None and self.password is not None:
|
if self.username is not None and self.password is not None:
|
||||||
br.open(self.LOGIN)
|
br.open(self.LOGIN2)
|
||||||
br.select_form(name='loginForm')
|
br.select_form(name='loginForm')
|
||||||
br['username'] = self.username
|
br['username'] = self.username
|
||||||
br['password'] = self.password
|
br['password'] = self.password
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from calibre.ebooks.BeautifulSoup import Tag,BeautifulSoup
|
from calibre.ebooks.BeautifulSoup import Tag,BeautifulSoup
|
||||||
@ -16,7 +17,7 @@ class FolhaOnline(BasicNewsRecipe):
|
|||||||
news = True
|
news = True
|
||||||
|
|
||||||
title = u'Folha de S\xE3o Paulo'
|
title = u'Folha de S\xE3o Paulo'
|
||||||
__author__ = 'Euler Alves'
|
__author__ = 'Euler Alves and Alex Mitrani'
|
||||||
description = u'Brazilian news from Folha de S\xE3o Paulo'
|
description = u'Brazilian news from Folha de S\xE3o Paulo'
|
||||||
publisher = u'Folha de S\xE3o Paulo'
|
publisher = u'Folha de S\xE3o Paulo'
|
||||||
category = 'news, rss'
|
category = 'news, rss'
|
||||||
@ -62,37 +63,50 @@ class FolhaOnline(BasicNewsRecipe):
|
|||||||
,dict(name='div',
|
,dict(name='div',
|
||||||
attrs={'class':[
|
attrs={'class':[
|
||||||
'openBox adslibraryArticle'
|
'openBox adslibraryArticle'
|
||||||
|
,'toolbar'
|
||||||
]})
|
]})
|
||||||
|
|
||||||
,dict(name='a')
|
,dict(name='a')
|
||||||
,dict(name='iframe')
|
,dict(name='iframe')
|
||||||
,dict(name='link')
|
,dict(name='link')
|
||||||
,dict(name='script')
|
,dict(name='script')
|
||||||
|
,dict(name='li')
|
||||||
]
|
]
|
||||||
|
remove_tags_after = dict(name='div',attrs={'id':'articleEnd'})
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'Em cima da hora', u'http://feeds.folha.uol.com.br/emcimadahora/rss091.xml')
|
(u'Em cima da hora', u'http://feeds.folha.uol.com.br/emcimadahora/rss091.xml')
|
||||||
|
,(u'Cotidiano', u'http://feeds.folha.uol.com.br/folha/cotidiano/rss091.xml')
|
||||||
|
,(u'Brasil', u'http://feeds.folha.uol.com.br/folha/brasil/rss091.xml')
|
||||||
|
,(u'Mundo', u'http://feeds.folha.uol.com.br/mundo/rss091.xml')
|
||||||
|
,(u'Poder', u'http://feeds.folha.uol.com.br/poder/rss091.xml')
|
||||||
|
,(u'Mercado', u'http://feeds.folha.uol.com.br/folha/dinheiro/rss091.xml')
|
||||||
|
,(u'Saber', u'http://feeds.folha.uol.com.br/folha/educacao/rss091.xml')
|
||||||
|
,(u'Tec', u'http://feeds.folha.uol.com.br/folha/informatica/rss091.xml')
|
||||||
|
,(u'Ilustrada', u'http://feeds.folha.uol.com.br/folha/ilustrada/rss091.xml')
|
||||||
,(u'Ambiente', u'http://feeds.folha.uol.com.br/ambiente/rss091.xml')
|
,(u'Ambiente', u'http://feeds.folha.uol.com.br/ambiente/rss091.xml')
|
||||||
,(u'Bichos', u'http://feeds.folha.uol.com.br/bichos/rss091.xml')
|
,(u'Bichos', u'http://feeds.folha.uol.com.br/bichos/rss091.xml')
|
||||||
,(u'Ci\xEAncia', u'http://feeds.folha.uol.com.br/ciencia/rss091.xml')
|
,(u'Ci\xEAncia', u'http://feeds.folha.uol.com.br/ciencia/rss091.xml')
|
||||||
,(u'Poder', u'http://feeds.folha.uol.com.br/poder/rss091.xml')
|
|
||||||
,(u'Equil\xEDbrio e Sa\xFAde', u'http://feeds.folha.uol.com.br/equilibrioesaude/rss091.xml')
|
,(u'Equil\xEDbrio e Sa\xFAde', u'http://feeds.folha.uol.com.br/equilibrioesaude/rss091.xml')
|
||||||
,(u'Turismo', u'http://feeds.folha.uol.com.br/folha/turismo/rss091.xml')
|
,(u'Turismo', u'http://feeds.folha.uol.com.br/folha/turismo/rss091.xml')
|
||||||
,(u'Mundo', u'http://feeds.folha.uol.com.br/mundo/rss091.xml')
|
,(u'Esporte', u'http://feeds.folha.uol.com.br/folha/esporte/rss091.xml')
|
||||||
,(u'Pelo Mundo', u'http://feeds.folha.uol.com.br/pelomundo.folha.rssblog.uol.com.br/')
|
,(u'Zapping', u'http://feeds.folha.uol.com.br/colunas/zapping/rss091.xml')
|
||||||
,(u'Circuito integrado', u'http://feeds.folha.uol.com.br/circuitointegrado.folha.rssblog.uol.com.br/')
|
,(u'Cida Santos', u'http://feeds.folha.uol.com.br/colunas/cidasantos/rss091.xml')
|
||||||
,(u'Blog do Fred', u'http://feeds.folha.uol.com.br/blogdofred.folha.rssblog.uol.com.br/')
|
,(u'Clóvis Rossi', u'http://feeds.folha.uol.com.br/colunas/clovisrossi/rss091.xml')
|
||||||
,(u'Maria In\xEAs Dolci', u'http://feeds.folha.uol.com.br/mariainesdolci.folha.blog.uol.com.br/')
|
,(u'Eliane Cantanhêde', u'http://feeds.folha.uol.com.br/colunas/elianecantanhede/rss091.xml')
|
||||||
,(u'Eduardo Ohata', u'http://feeds.folha.uol.com.br/folha/pensata/eduardoohata/rss091.xml')
|
,(u'Fernando Canzian', u'http://feeds.folha.uol.com.br/colunas/fernandocanzian/rss091.xml')
|
||||||
,(u'Kennedy Alencar', u'http://feeds.folha.uol.com.br/folha/pensata/kennedyalencar/rss091.xml')
|
,(u'Gilberto Dimenstein', u'http://feeds.folha.uol.com.br/colunas/gilbertodimenstein/rss091.xml')
|
||||||
,(u'Eliane Catanh\xEAde', u'http://feeds.folha.uol.com.br/folha/pensata/elianecantanhede/rss091.xml')
|
,(u'Hélio Schwartsman', u'http://feeds.folha.uol.com.br/colunas/helioschwartsman/rss091.xml')
|
||||||
,(u'Fernado Canzian', u'http://feeds.folha.uol.com.br/folha/pensata/fernandocanzian/rss091.xml')
|
,(u'Humberto Luiz Peron', u'http://feeds.folha.uol.com.br/colunas/futebolnarede/rss091.xml')
|
||||||
,(u'Gilberto Dimenstein', u'http://feeds.folha.uol.com.br/folha/pensata/gilbertodimenstein/rss091.xml')
|
,(u'João Pereira Coutinho', u'http://feeds.folha.uol.com.br/colunas/joaopereiracoutinho/rss091.xml')
|
||||||
,(u'H\xE9lio Schwartsman', u'http://feeds.folha.uol.com.br/folha/pensata/helioschwartsman/rss091.xml')
|
,(u'José Antonio Ramalho', u'http://feeds.folha.uol.com.br/colunas/canalaberto/rss091.xml')
|
||||||
,(u'Jo\xE3o Pereira Coutinho', u'http://http://feeds.folha.uol.com.br/folha/pensata/joaopereiracoutinho/rss091.xml')
|
,(u'Kennedy Alencar', u'http://feeds.folha.uol.com.br/colunas/kennedyalencar/rss091.xml')
|
||||||
,(u'Luiz Caversan', u'http://http://feeds.folha.uol.com.br/folha/pensata/luizcaversan/rss091.xml')
|
,(u'Luiz Caversan', u'http://feeds.folha.uol.com.br/colunas/luizcaversan/rss091.xml')
|
||||||
,(u'S\xE9rgio Malbergier', u'http://http://feeds.folha.uol.com.br/folha/pensata/sergiomalbergier/rss091.xml')
|
,(u'Luiz Rivoiro', u'http://feeds.folha.uol.com.br/colunas/paiepai/rss091.xml')
|
||||||
,(u'Valdo Cruz', u'http://http://feeds.folha.uol.com.br/folha/pensata/valdocruz/rss091.xml')
|
,(u'Marcelo Leite', u'http://feeds.folha.uol.com.br/colunas/marceloleite/rss091.xml')
|
||||||
|
,(u'Sérgio Malbergier', u'http://feeds.folha.uol.com.br/colunas/sergiomalbergier/rss091.xml')
|
||||||
|
,(u'Sylvia Colombo', u'http://feeds.folha.uol.com.br/colunas/sylviacolombo/rss091.xml')
|
||||||
|
,(u'Valdo Cruz', u'http://feeds.folha.uol.com.br/colunas/valdocruz/rss091.xml')
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,8 +7,9 @@ latercera.com
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class LaTercera(BasicNewsRecipe):
|
class LaTercera(BasicNewsRecipe):
|
||||||
|
news = True
|
||||||
title = 'La Tercera'
|
title = 'La Tercera'
|
||||||
__author__ = 'Darko Miletic'
|
__author__ = 'Darko Miletic and Alex Mitrani'
|
||||||
description = 'El sitio de noticias online de Chile'
|
description = 'El sitio de noticias online de Chile'
|
||||||
publisher = 'La Tercera'
|
publisher = 'La Tercera'
|
||||||
category = 'news, politics, Chile'
|
category = 'news, politics, Chile'
|
||||||
@ -18,8 +19,8 @@ class LaTercera(BasicNewsRecipe):
|
|||||||
encoding = 'cp1252'
|
encoding = 'cp1252'
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
language = 'es'
|
language = 'es_CL'
|
||||||
|
|
||||||
conversion_options = {
|
conversion_options = {
|
||||||
'comment' : description
|
'comment' : description
|
||||||
, 'tags' : category
|
, 'tags' : category
|
||||||
@ -28,28 +29,33 @@ class LaTercera(BasicNewsRecipe):
|
|||||||
, 'linearize_tables' : True
|
, 'linearize_tables' : True
|
||||||
}
|
}
|
||||||
|
|
||||||
keep_only_tags = [dict(name='div', attrs={'class':['span-16 articulo border','span-16 border','span-16']}) ]
|
keep_only_tags = [
|
||||||
|
dict(name='h1', attrs={'class':['titularArticulo']})
|
||||||
|
,dict(name='h4', attrs={'class':['bajadaArt']})
|
||||||
|
,dict(name='h5', attrs={'class':['autorArt']})
|
||||||
|
,dict(name='div', attrs={'class':['articleContent']})
|
||||||
|
]
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name=['ul','input','base'])
|
dict(name='div', attrs={'class':['boxCompartir','keywords']})
|
||||||
,dict(name='div', attrs={'id':['boxComentarios','shim','enviarAmigo']})
|
]
|
||||||
,dict(name='div', attrs={'class':['ad640','span-10 imgSet A','infoRelCol']})
|
|
||||||
,dict(name='p', attrs={'id':['mensajeError','mensajeEnviandoNoticia','mensajeExito']})
|
remove_tags_after = [
|
||||||
|
dict(name='div', attrs={'class':['keywords']})
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
feeds = [
|
feeds = [(u'La Tercera', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&ul=1')
|
||||||
(u'Noticias de ultima hora', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&ul=1')
|
,(u'Politica', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=674')
|
||||||
,(u'Nacional', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=680')
|
,(u'Nacional', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=680')
|
||||||
,(u'Politica', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=674')
|
|
||||||
,(u'Mundo', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=678')
|
,(u'Mundo', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=678')
|
||||||
,(u'Deportes', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=656')
|
|
||||||
,(u'Negocios', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=655')
|
,(u'Negocios', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=655')
|
||||||
,(u'Entretenimiento', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=661')
|
,(u'Santiago', u'http://www.latercera.com/feed/manager?type=rss&sc=TEFURVJDRVJB&citId=9&categoryId=1731')
|
||||||
,(u'Motores', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=665')
|
|
||||||
,(u'Tendencias', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=659')
|
,(u'Tendencias', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=659')
|
||||||
,(u'Estilo', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=660')
|
|
||||||
,(u'Educacion', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=657')
|
,(u'Educacion', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=657')
|
||||||
|
,(u'Cultura', u'http://www.latercera.com/feed/manager?type=rss&sc=TEFURVJDRVJB&citId=9&categoryId=1453')
|
||||||
|
,(u'Entretención', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=661')
|
||||||
|
,(u'Deportes', u'http://www.latercera.com/app/rss?sc=TEFURVJDRVJB&category=656')
|
||||||
]
|
]
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
|
@ -18,21 +18,28 @@ class Liberation(BasicNewsRecipe):
|
|||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
|
|
||||||
html2lrf_options = ['--base-font-size', '10']
|
html2lrf_options = ['--base-font-size', '10']
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='h1')
|
dict(name='h1')
|
||||||
,dict(name='div', attrs={'class':'articleContent'})
|
#,dict(name='div', attrs={'class':'object-content text text-item'})
|
||||||
|
,dict(name='div', attrs={'class':'article'})
|
||||||
|
#,dict(name='div', attrs={'class':'articleContent'})
|
||||||
,dict(name='div', attrs={'class':'entry'})
|
,dict(name='div', attrs={'class':'entry'})
|
||||||
]
|
]
|
||||||
|
remove_tags_after = [ dict(name='div',attrs={'class':'toolbox extra_toolbox'}) ]
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name='p', attrs={'class':'clear'})
|
dict(name='p', attrs={'class':'clear'})
|
||||||
,dict(name='ul', attrs={'class':'floatLeft clear'})
|
,dict(name='ul', attrs={'class':'floatLeft clear'})
|
||||||
,dict(name='div', attrs={'class':'clear floatRight'})
|
,dict(name='div', attrs={'class':'clear floatRight'})
|
||||||
,dict(name='object')
|
,dict(name='object')
|
||||||
|
,dict(name='div', attrs={'class':'toolbox'})
|
||||||
|
,dict(name='div', attrs={'class':'cartridge cartridge-basic-bubble cat-zoneabo'})
|
||||||
|
#,dict(name='div', attrs={'class':'clear block block-call-items'})
|
||||||
|
,dict(name='div', attrs={'class':'block-content'})
|
||||||
]
|
]
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'La une', u'http://www.liberation.fr/rss/laune')
|
(u'La une', u'http://www.liberation.fr/rss/laune')
|
||||||
,(u'Monde' , u'http://www.liberation.fr/rss/monde')
|
,(u'Monde' , u'http://www.liberation.fr/rss/monde')
|
||||||
|
@ -181,7 +181,7 @@ save_template_title_series_sorting = 'library_order'
|
|||||||
# To disable use the expression: '^$'
|
# To disable use the expression: '^$'
|
||||||
# This expression is designed for articles that are followed by spaces. If you
|
# This expression is designed for articles that are followed by spaces. If you
|
||||||
# also need to match articles that are followed by other characters, for example L'
|
# also need to match articles that are followed by other characters, for example L'
|
||||||
# in French, use: r"^(A\s+|The\s+|An\s+|L')" instead.
|
# in French, use: "^(A\s+|The\s+|An\s+|L')" instead.
|
||||||
# Default: '^(A|The|An)\s+'
|
# Default: '^(A|The|An)\s+'
|
||||||
title_sort_articles=r'^(A|The|An)\s+'
|
title_sort_articles=r'^(A|The|An)\s+'
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ class LinuxFreeze(Command):
|
|||||||
|
|
||||||
launcher = textwrap.dedent('''\
|
launcher = textwrap.dedent('''\
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
path=`readlink -e $0`
|
path=`readlink -f $0`
|
||||||
base=`dirname $path`
|
base=`dirname $path`
|
||||||
lib=$base/lib
|
lib=$base/lib
|
||||||
export LD_LIBRARY_PATH=$lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=$lib:$LD_LIBRARY_PATH
|
||||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
__appname__ = u'calibre'
|
__appname__ = u'calibre'
|
||||||
numeric_version = (0, 8, 14)
|
numeric_version = (0, 8, 15)
|
||||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ def restore_plugin_state_to_default(plugin_or_name):
|
|||||||
config['enabled_plugins'] = ep
|
config['enabled_plugins'] = ep
|
||||||
|
|
||||||
default_disabled_plugins = set([
|
default_disabled_plugins = set([
|
||||||
'Overdrive', 'Douban Books',
|
'Overdrive', 'Douban Books', 'OZON.ru',
|
||||||
])
|
])
|
||||||
|
|
||||||
def is_disabled(plugin):
|
def is_disabled(plugin):
|
||||||
|
@ -122,12 +122,17 @@ class Cache(object):
|
|||||||
formats = self._field_for('formats', book_id)
|
formats = self._field_for('formats', book_id)
|
||||||
mi.format_metadata = {}
|
mi.format_metadata = {}
|
||||||
if not formats:
|
if not formats:
|
||||||
formats = None
|
good_formats = None
|
||||||
else:
|
else:
|
||||||
|
good_formats = []
|
||||||
for f in formats:
|
for f in formats:
|
||||||
mi.format_metadata[f] = self._format_metadata(book_id, f)
|
try:
|
||||||
formats = ','.join(formats)
|
mi.format_metadata[f] = self._format_metadata(book_id, f)
|
||||||
mi.formats = formats
|
except:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
good_formats.append(f)
|
||||||
|
mi.formats = good_formats
|
||||||
mi.has_cover = _('Yes') if self._field_for('cover', book_id,
|
mi.has_cover = _('Yes') if self._field_for('cover', book_id,
|
||||||
default_value=False) else ''
|
default_value=False) else ''
|
||||||
mi.tags = list(self._field_for('tags', book_id, default_value=()))
|
mi.tags = list(self._field_for('tags', book_id, default_value=()))
|
||||||
|
@ -30,6 +30,7 @@ class ANDROID(USBMS):
|
|||||||
0xca2 : [0x100, 0x0227, 0x0226, 0x222],
|
0xca2 : [0x100, 0x0227, 0x0226, 0x222],
|
||||||
0xca3 : [0x100, 0x0227, 0x0226, 0x222],
|
0xca3 : [0x100, 0x0227, 0x0226, 0x222],
|
||||||
0xca4 : [0x100, 0x0227, 0x0226, 0x222],
|
0xca4 : [0x100, 0x0227, 0x0226, 0x222],
|
||||||
|
0xca9 : [0x100, 0x0227, 0x0226, 0x222]
|
||||||
},
|
},
|
||||||
|
|
||||||
# Eken
|
# Eken
|
||||||
|
@ -8,7 +8,7 @@ __docformat__ = 'restructuredtext en'
|
|||||||
Device driver for Sanda's Bambook
|
Device driver for Sanda's Bambook
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import time, os, hashlib
|
import time, os, hashlib, shutil
|
||||||
from itertools import cycle
|
from itertools import cycle
|
||||||
from calibre.devices.interface import DevicePlugin
|
from calibre.devices.interface import DevicePlugin
|
||||||
from calibre.devices.usbms.deviceconfig import DeviceConfig
|
from calibre.devices.usbms.deviceconfig import DeviceConfig
|
||||||
@ -31,7 +31,7 @@ class BAMBOOK(DeviceConfig, DevicePlugin):
|
|||||||
|
|
||||||
ip = None
|
ip = None
|
||||||
|
|
||||||
FORMATS = [ "snb" ]
|
FORMATS = [ "snb", "pdf" ]
|
||||||
USER_CAN_ADD_NEW_FORMATS = False
|
USER_CAN_ADD_NEW_FORMATS = False
|
||||||
VENDOR_ID = 0x230b
|
VENDOR_ID = 0x230b
|
||||||
PRODUCT_ID = 0x0001
|
PRODUCT_ID = 0x0001
|
||||||
@ -267,14 +267,59 @@ class BAMBOOK(DeviceConfig, DevicePlugin):
|
|||||||
for (i, f) in enumerate(files):
|
for (i, f) in enumerate(files):
|
||||||
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
|
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
|
||||||
if not hasattr(f, 'read'):
|
if not hasattr(f, 'read'):
|
||||||
if self.bambook.VerifySNB(f):
|
# Handle PDF File
|
||||||
guid = self.bambook.SendFile(f, self.get_guid(metadata[i].uuid))
|
if f[-3:].upper() == "PDF":
|
||||||
if guid:
|
# Package the PDF file
|
||||||
paths.append(guid)
|
with TemporaryDirectory() as tdir:
|
||||||
else:
|
snbcdir = os.path.join(tdir, 'snbc')
|
||||||
print "Send fail"
|
snbfdir = os.path.join(tdir, 'snbf')
|
||||||
|
os.mkdir(snbcdir)
|
||||||
|
os.mkdir(snbfdir)
|
||||||
|
|
||||||
|
tmpfile = open(os.path.join(snbfdir, 'book.snbf'), 'wb')
|
||||||
|
tmpfile.write('''<book-snbf version="1.0">
|
||||||
|
<head>
|
||||||
|
<name><![CDATA[''' + metadata[i].title + ''']]></name>
|
||||||
|
<author><![CDATA[''' + ' '.join(metadata[i].authors) + ''']]></author>
|
||||||
|
<language>ZH-CN</language>
|
||||||
|
<rights/>
|
||||||
|
<publisher>calibre</publisher>
|
||||||
|
<generator>''' + __appname__ + ' ' + __version__ + '''</generator>
|
||||||
|
<created/>
|
||||||
|
<abstract></abstract>
|
||||||
|
<cover/>
|
||||||
|
</head>
|
||||||
|
</book-snbf>
|
||||||
|
''')
|
||||||
|
tmpfile.close()
|
||||||
|
tmpfile = open(os.path.join(snbfdir, 'toc.snbf'), 'wb')
|
||||||
|
tmpfile.write('''<toc-snbf>
|
||||||
|
<head>
|
||||||
|
<chapters>1</chapters>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<chapter src="pdf1.pdf"><![CDATA[''' + metadata[i].title + ''']]></chapter>
|
||||||
|
</body>
|
||||||
|
</toc-snbf>
|
||||||
|
''');
|
||||||
|
tmpfile.close()
|
||||||
|
pdf_name = os.path.join(snbcdir, "pdf1.pdf")
|
||||||
|
shutil.copyfile(f, pdf_name)
|
||||||
|
|
||||||
|
with TemporaryFile('.snb') as snbfile:
|
||||||
|
if self.bambook.PackageSNB(snbfile, tdir) and self.bambook.VerifySNB(snbfile):
|
||||||
|
guid = self.bambook.SendFile(snbfile, self.get_guid(metadata[i].uuid))
|
||||||
|
|
||||||
|
elif f[-3:].upper() == 'SNB':
|
||||||
|
if self.bambook.VerifySNB(f):
|
||||||
|
guid = self.bambook.SendFile(f, self.get_guid(metadata[i].uuid))
|
||||||
else:
|
else:
|
||||||
print "book invalid"
|
print "book invalid"
|
||||||
|
if guid:
|
||||||
|
paths.append(guid)
|
||||||
|
else:
|
||||||
|
print "Send fail"
|
||||||
|
|
||||||
ret = zip(paths, cycle([on_card]))
|
ret = zip(paths, cycle([on_card]))
|
||||||
self.report_progress(1.0, _('Transferring books to device...'))
|
self.report_progress(1.0, _('Transferring books to device...'))
|
||||||
return ret
|
return ret
|
||||||
|
@ -100,23 +100,28 @@ class FB2Input(InputFormatPlugin):
|
|||||||
mi.title = _('Unknown')
|
mi.title = _('Unknown')
|
||||||
if not mi.authors:
|
if not mi.authors:
|
||||||
mi.authors = [_('Unknown')]
|
mi.authors = [_('Unknown')]
|
||||||
opf = OPFCreator(os.getcwdu(), mi)
|
cpath = None
|
||||||
entries = [(f, guess_type(f)[0]) for f in os.listdir('.')]
|
|
||||||
opf.create_manifest(entries)
|
|
||||||
opf.create_spine(['index.xhtml'])
|
|
||||||
if mi.cover_data and mi.cover_data[1]:
|
if mi.cover_data and mi.cover_data[1]:
|
||||||
with open('fb2_cover_calibre_mi.jpg', 'wb') as f:
|
with open('fb2_cover_calibre_mi.jpg', 'wb') as f:
|
||||||
f.write(mi.cover_data[1])
|
f.write(mi.cover_data[1])
|
||||||
opf.guide.set_cover(os.path.abspath('fb2_cover_calibre_mi.jpg'))
|
cpath = os.path.abspath('fb2_cover_calibre_mi.jpg')
|
||||||
else:
|
else:
|
||||||
for img in doc.xpath('//f:coverpage/f:image', namespaces=NAMESPACES):
|
for img in doc.xpath('//f:coverpage/f:image', namespaces=NAMESPACES):
|
||||||
href = img.get('{%s}href'%XLINK_NS, img.get('href', None))
|
href = img.get('{%s}href'%XLINK_NS, img.get('href', None))
|
||||||
if href is not None:
|
if href is not None:
|
||||||
if href.startswith('#'):
|
if href.startswith('#'):
|
||||||
href = href[1:]
|
href = href[1:]
|
||||||
opf.guide.set_cover(os.path.abspath(href))
|
cpath = os.path.abspath(href)
|
||||||
|
break
|
||||||
|
|
||||||
opf.render(open('metadata.opf', 'wb'))
|
opf = OPFCreator(os.getcwdu(), mi)
|
||||||
|
entries = [(f, guess_type(f)[0]) for f in os.listdir('.')]
|
||||||
|
opf.create_manifest(entries)
|
||||||
|
opf.create_spine(['index.xhtml'])
|
||||||
|
if cpath:
|
||||||
|
opf.guide.set_cover(cpath)
|
||||||
|
with open('metadata.opf', 'wb') as f:
|
||||||
|
opf.render(f)
|
||||||
return os.path.join(os.getcwd(), 'metadata.opf')
|
return os.path.join(os.getcwd(), 'metadata.opf')
|
||||||
|
|
||||||
def extract_embedded_content(self, doc):
|
def extract_embedded_content(self, doc):
|
||||||
|
@ -147,15 +147,12 @@ def _parse_cover_data(root, imgid, mi):
|
|||||||
if elm_binary:
|
if elm_binary:
|
||||||
mimetype = elm_binary[0].get('content-type', 'image/jpeg')
|
mimetype = elm_binary[0].get('content-type', 'image/jpeg')
|
||||||
mime_extensions = guess_all_extensions(mimetype)
|
mime_extensions = guess_all_extensions(mimetype)
|
||||||
|
|
||||||
if not mime_extensions and mimetype.startswith('image/'):
|
if not mime_extensions and mimetype.startswith('image/'):
|
||||||
prints("WARNING: Unsupported or misspelled mime-type '%s'. "\
|
mimetype_fromid = guess_type(imgid)[0]
|
||||||
"Trying to recovery mime-type from id_ref='%s'" % (mimetype, imgid) )
|
|
||||||
ctype = guess_type(imgid) # -> (mime-type, encoding)
|
|
||||||
mimetype_fromid = ctype[0]
|
|
||||||
if mimetype_fromid and mimetype_fromid.startswith('image/'):
|
if mimetype_fromid and mimetype_fromid.startswith('image/'):
|
||||||
mime_extensions = guess_all_extensions(mimetype_fromid)
|
mime_extensions = guess_all_extensions(mimetype_fromid)
|
||||||
|
|
||||||
if mime_extensions:
|
if mime_extensions:
|
||||||
pic_data = elm_binary[0].text
|
pic_data = elm_binary[0].text
|
||||||
if pic_data:
|
if pic_data:
|
||||||
|
@ -1027,7 +1027,7 @@ class OPF(object): # {{{
|
|||||||
if self.guide is not None:
|
if self.guide is not None:
|
||||||
for t in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
|
for t in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
|
||||||
for item in self.guide:
|
for item in self.guide:
|
||||||
if item.type.lower() == t:
|
if item.type and item.type.lower() == t:
|
||||||
return item.path
|
return item.path
|
||||||
try:
|
try:
|
||||||
return self.guess_cover()
|
return self.guess_cover()
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||||
from xml.etree.ElementTree import _Element
|
|
||||||
|
|
||||||
__license__ = 'GPL 3'
|
__license__ = 'GPL 3'
|
||||||
__copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>'
|
__copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>'
|
||||||
@ -12,10 +11,8 @@ import datetime
|
|||||||
from urllib import quote_plus
|
from urllib import quote_plus
|
||||||
from Queue import Queue, Empty
|
from Queue import Queue, Empty
|
||||||
from lxml import etree, html
|
from lxml import etree, html
|
||||||
from lxml.etree import ElementBase
|
|
||||||
from calibre import as_unicode
|
from calibre import as_unicode
|
||||||
|
|
||||||
from calibre import prints
|
|
||||||
from calibre.ebooks.chardet import xml_to_unicode
|
from calibre.ebooks.chardet import xml_to_unicode
|
||||||
|
|
||||||
from calibre.ebooks.metadata import check_isbn
|
from calibre.ebooks.metadata import check_isbn
|
||||||
@ -27,16 +24,16 @@ class Ozon(Source):
|
|||||||
description = _('Downloads metadata and covers from OZON.ru')
|
description = _('Downloads metadata and covers from OZON.ru')
|
||||||
|
|
||||||
capabilities = frozenset(['identify', 'cover'])
|
capabilities = frozenset(['identify', 'cover'])
|
||||||
|
|
||||||
touched_fields = frozenset(['title', 'authors', 'identifier:isbn', 'identifier:ozon',
|
touched_fields = frozenset(['title', 'authors', 'identifier:isbn', 'identifier:ozon',
|
||||||
'publisher', 'pubdate', 'comments', 'series', 'rating', 'language'])
|
'publisher', 'pubdate', 'comments', 'series', 'rating', 'language'])
|
||||||
# Test purpose only, test function does not like when sometimes some filed are empty
|
# Test purpose only, test function does not like when sometimes some filed are empty
|
||||||
#touched_fields = frozenset(['title', 'authors', 'identifier:isbn', 'identifier:ozon',
|
#touched_fields = frozenset(['title', 'authors', 'identifier:isbn', 'identifier:ozon',
|
||||||
# 'publisher', 'pubdate', 'comments'])
|
# 'publisher', 'pubdate', 'comments'])
|
||||||
|
|
||||||
supports_gzip_transfer_encoding = True
|
supports_gzip_transfer_encoding = True
|
||||||
has_html_comments = True
|
has_html_comments = True
|
||||||
|
|
||||||
ozon_url = 'http://www.ozon.ru'
|
ozon_url = 'http://www.ozon.ru'
|
||||||
|
|
||||||
# match any ISBN10/13. From "Regular Expressions Cookbook"
|
# match any ISBN10/13. From "Regular Expressions Cookbook"
|
||||||
@ -53,11 +50,11 @@ class Ozon(Source):
|
|||||||
res = ('ozon', ozon_id, url)
|
res = ('ozon', ozon_id, url)
|
||||||
return res
|
return res
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def create_query(self, log, title=None, authors=None, identifiers={}): # {{{
|
def create_query(self, log, title=None, authors=None, identifiers={}): # {{{
|
||||||
# div_book -> search only books, ebooks and audio books
|
# div_book -> search only books, ebooks and audio books
|
||||||
search_url = self.ozon_url + '/webservice/webservice.asmx/SearchWebService?searchContext=div_book&searchText='
|
search_url = self.ozon_url + '/webservice/webservice.asmx/SearchWebService?searchContext=div_book&searchText='
|
||||||
|
|
||||||
isbn = _format_isbn(log, identifiers.get('isbn', None))
|
isbn = _format_isbn(log, identifiers.get('isbn', None))
|
||||||
# TODO: format isbn!
|
# TODO: format isbn!
|
||||||
qItems = set([isbn, title])
|
qItems = set([isbn, title])
|
||||||
@ -66,7 +63,7 @@ class Ozon(Source):
|
|||||||
qItems.discard(None)
|
qItems.discard(None)
|
||||||
qItems.discard('')
|
qItems.discard('')
|
||||||
qItems = map(_quoteString, qItems)
|
qItems = map(_quoteString, qItems)
|
||||||
|
|
||||||
q = ' '.join(qItems).strip()
|
q = ' '.join(qItems).strip()
|
||||||
log.info(u'search string: ' + q)
|
log.info(u'search string: ' + q)
|
||||||
|
|
||||||
@ -74,10 +71,10 @@ class Ozon(Source):
|
|||||||
q = q.encode('utf-8')
|
q = q.encode('utf-8')
|
||||||
if not q:
|
if not q:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
search_url += quote_plus(q)
|
search_url += quote_plus(q)
|
||||||
log.debug(u'search url: %r'%search_url)
|
log.debug(u'search url: %r'%search_url)
|
||||||
|
|
||||||
return search_url
|
return search_url
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
@ -93,11 +90,11 @@ class Ozon(Source):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
raw = self.browser.open_novisit(query).read()
|
raw = self.browser.open_novisit(query).read()
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.exception(u'Failed to make identify query: %r'%query)
|
log.exception(u'Failed to make identify query: %r'%query)
|
||||||
return as_unicode(e)
|
return as_unicode(e)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
parser = etree.XMLParser(recover=True, no_network=True)
|
parser = etree.XMLParser(recover=True, no_network=True)
|
||||||
feed = etree.fromstring(xml_to_unicode(raw, strip_encoding_pats=True, assume_utf8=True)[0], parser=parser)
|
feed = etree.fromstring(xml_to_unicode(raw, strip_encoding_pats=True, assume_utf8=True)[0], parser=parser)
|
||||||
@ -110,14 +107,14 @@ class Ozon(Source):
|
|||||||
return as_unicode(e)
|
return as_unicode(e)
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def get_metadata(self, log, entries, title, authors, identifiers): # {{{
|
def get_metadata(self, log, entries, title, authors, identifiers): # {{{
|
||||||
title = unicode(title).upper() if title else ''
|
title = unicode(title).upper() if title else ''
|
||||||
authors = map(unicode.upper, map(unicode, authors)) if authors else None
|
authors = map(unicode.upper, map(unicode, authors)) if authors else None
|
||||||
ozon_id = identifiers.get('ozon', None)
|
ozon_id = identifiers.get('ozon', None)
|
||||||
|
|
||||||
unk = unicode(_('Unknown')).upper()
|
unk = unicode(_('Unknown')).upper()
|
||||||
|
|
||||||
if title == unk:
|
if title == unk:
|
||||||
title = None
|
title = None
|
||||||
|
|
||||||
@ -129,7 +126,7 @@ class Ozon(Source):
|
|||||||
for miauthor in miauthors:
|
for miauthor in miauthors:
|
||||||
if author in miauthor: return True
|
if author in miauthor: return True
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def ensure_metadata_match(mi): # {{{
|
def ensure_metadata_match(mi): # {{{
|
||||||
match = True
|
match = True
|
||||||
if title:
|
if title:
|
||||||
@ -138,13 +135,13 @@ class Ozon(Source):
|
|||||||
if match and authors:
|
if match and authors:
|
||||||
miauthors = map(unicode.upper, map(unicode, mi.authors)) if mi.authors else []
|
miauthors = map(unicode.upper, map(unicode, mi.authors)) if mi.authors else []
|
||||||
match = in_authors(authors, miauthors)
|
match = in_authors(authors, miauthors)
|
||||||
|
|
||||||
if match and ozon_id:
|
if match and ozon_id:
|
||||||
mozon_id = mi.identifiers['ozon']
|
mozon_id = mi.identifiers['ozon']
|
||||||
match = ozon_id == mozon_id
|
match = ozon_id == mozon_id
|
||||||
|
|
||||||
return match
|
return match
|
||||||
|
|
||||||
metadata = []
|
metadata = []
|
||||||
for i, entry in enumerate(entries):
|
for i, entry in enumerate(entries):
|
||||||
mi = self.to_metadata(log, entry)
|
mi = self.to_metadata(log, entry)
|
||||||
@ -159,64 +156,64 @@ class Ozon(Source):
|
|||||||
|
|
||||||
def get_all_details(self, log, metadata, abort, result_queue, identifiers, timeout): # {{{
|
def get_all_details(self, log, metadata, abort, result_queue, identifiers, timeout): # {{{
|
||||||
req_isbn = identifiers.get('isbn', None)
|
req_isbn = identifiers.get('isbn', None)
|
||||||
|
|
||||||
for mi in metadata:
|
for mi in metadata:
|
||||||
if abort.is_set():
|
if abort.is_set():
|
||||||
break
|
break
|
||||||
try:
|
try:
|
||||||
ozon_id = mi.identifiers['ozon']
|
ozon_id = mi.identifiers['ozon']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.get_book_details(log, mi, timeout)
|
self.get_book_details(log, mi, timeout)
|
||||||
except:
|
except:
|
||||||
log.exception(u'Failed to get details for metadata: %s'%mi.title)
|
log.exception(u'Failed to get details for metadata: %s'%mi.title)
|
||||||
|
|
||||||
all_isbns = getattr(mi, 'all_isbns', [])
|
all_isbns = getattr(mi, 'all_isbns', [])
|
||||||
if req_isbn and all_isbns and check_isbn(req_isbn) not in all_isbns:
|
if req_isbn and all_isbns and check_isbn(req_isbn) not in all_isbns:
|
||||||
log.debug(u'skipped, no requested ISBN %s found'%req_isbn)
|
log.debug(u'skipped, no requested ISBN %s found'%req_isbn)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for isbn in all_isbns:
|
for isbn in all_isbns:
|
||||||
self.cache_isbn_to_identifier(isbn, ozon_id)
|
self.cache_isbn_to_identifier(isbn, ozon_id)
|
||||||
|
|
||||||
if mi.ozon_cover_url:
|
if mi.ozon_cover_url:
|
||||||
self.cache_identifier_to_cover_url(ozon_id, mi.ozon_cover_url)
|
self.cache_identifier_to_cover_url(ozon_id, mi.ozon_cover_url)
|
||||||
|
|
||||||
self.clean_downloaded_metadata(mi)
|
self.clean_downloaded_metadata(mi)
|
||||||
result_queue.put(mi)
|
result_queue.put(mi)
|
||||||
except:
|
except:
|
||||||
log.exception(u'Failed to get details for metadata: %s'%mi.title)
|
log.exception(u'Failed to get details for metadata: %s'%mi.title)
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def to_metadata(self, log, entry): # {{{
|
def to_metadata(self, log, entry): # {{{
|
||||||
xp_template = 'normalize-space(./*[local-name() = "{0}"]/text())'
|
xp_template = 'normalize-space(./*[local-name() = "{0}"]/text())'
|
||||||
|
|
||||||
title = entry.xpath(xp_template.format('Name'))
|
title = entry.xpath(xp_template.format('Name'))
|
||||||
author = entry.xpath(xp_template.format('Author'))
|
author = entry.xpath(xp_template.format('Author'))
|
||||||
mi = Metadata(title, author.split(','))
|
mi = Metadata(title, author.split(','))
|
||||||
|
|
||||||
ozon_id = entry.xpath(xp_template.format('ID'))
|
ozon_id = entry.xpath(xp_template.format('ID'))
|
||||||
mi.identifiers = {'ozon':ozon_id}
|
mi.identifiers = {'ozon':ozon_id}
|
||||||
|
|
||||||
mi.comments = entry.xpath(xp_template.format('Annotation'))
|
mi.comments = entry.xpath(xp_template.format('Annotation'))
|
||||||
|
|
||||||
mi.ozon_cover_url = None
|
mi.ozon_cover_url = None
|
||||||
cover = entry.xpath(xp_template.format('Picture'))
|
cover = entry.xpath(xp_template.format('Picture'))
|
||||||
if cover:
|
if cover:
|
||||||
mi.ozon_cover_url = _translateToBigCoverUrl(cover)
|
mi.ozon_cover_url = _translateToBigCoverUrl(cover)
|
||||||
|
|
||||||
rating = entry.xpath(xp_template.format('ClientRatingValue'))
|
rating = entry.xpath(xp_template.format('ClientRatingValue'))
|
||||||
if rating:
|
if rating:
|
||||||
try:
|
try:
|
||||||
#'rating', A floating point number between 0 and 10
|
#'rating', A floating point number between 0 and 10
|
||||||
# OZON raion N of 5, calibre of 10, but there is a bug? in identify
|
# OZON raion N of 5, calibre of 10, but there is a bug? in identify
|
||||||
mi.rating = float(rating)
|
mi.rating = float(rating)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
rating
|
rating
|
||||||
return mi
|
return mi
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def get_cached_cover_url(self, identifiers): # {{{
|
def get_cached_cover_url(self, identifiers): # {{{
|
||||||
url = None
|
url = None
|
||||||
ozon_id = identifiers.get('ozon', None)
|
ozon_id = identifiers.get('ozon', None)
|
||||||
@ -248,14 +245,14 @@ class Ozon(Source):
|
|||||||
cached_url = self.get_cached_cover_url(mi.identifiers)
|
cached_url = self.get_cached_cover_url(mi.identifiers)
|
||||||
if cached_url is not None:
|
if cached_url is not None:
|
||||||
break
|
break
|
||||||
|
|
||||||
if cached_url is None:
|
if cached_url is None:
|
||||||
log.info('No cover found')
|
log.info('No cover found')
|
||||||
return
|
return
|
||||||
|
|
||||||
if abort.is_set():
|
if abort.is_set():
|
||||||
return
|
return
|
||||||
|
|
||||||
log.debug('Downloading cover from:', cached_url)
|
log.debug('Downloading cover from:', cached_url)
|
||||||
try:
|
try:
|
||||||
cdata = self.browser.open_novisit(cached_url, timeout=timeout).read()
|
cdata = self.browser.open_novisit(cached_url, timeout=timeout).read()
|
||||||
@ -265,10 +262,10 @@ class Ozon(Source):
|
|||||||
log.exception(u'Failed to download cover from: %s'%cached_url)
|
log.exception(u'Failed to download cover from: %s'%cached_url)
|
||||||
return as_unicode(e)
|
return as_unicode(e)
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def get_book_details(self, log, metadata, timeout): # {{{
|
def get_book_details(self, log, metadata, timeout): # {{{
|
||||||
url = self.get_book_url(metadata.get_identifiers())[2]
|
url = self.get_book_url(metadata.get_identifiers())[2]
|
||||||
|
|
||||||
raw = self.browser.open_novisit(url, timeout=timeout).read()
|
raw = self.browser.open_novisit(url, timeout=timeout).read()
|
||||||
doc = html.fromstring(raw)
|
doc = html.fromstring(raw)
|
||||||
|
|
||||||
@ -298,14 +295,14 @@ class Ozon(Source):
|
|||||||
if matcher:
|
if matcher:
|
||||||
year = int(matcher.group(0))
|
year = int(matcher.group(0))
|
||||||
# only year is available, so use 1-st of Jan
|
# only year is available, so use 1-st of Jan
|
||||||
metadata.pubdate = datetime.datetime(year, 1, 1) #<- failed comparation in identify.py
|
metadata.pubdate = datetime.datetime(year, 1, 1) #<- failed comparation in identify.py
|
||||||
#metadata.pubdate = datetime(year, 1, 1)
|
#metadata.pubdate = datetime(year, 1, 1)
|
||||||
xpt = u'substring-after(string(../text()[contains(., "Язык")]), ": ")'
|
xpt = u'substring-after(string(../text()[contains(., "Язык")]), ": ")'
|
||||||
displLang = publishers[0].xpath(xpt)
|
displLang = publishers[0].xpath(xpt)
|
||||||
lang_code =_translageLanguageToCode(displLang)
|
lang_code =_translageLanguageToCode(displLang)
|
||||||
if lang_code:
|
if lang_code:
|
||||||
metadata.language = lang_code
|
metadata.language = lang_code
|
||||||
|
|
||||||
# overwrite comments from HTML if any
|
# overwrite comments from HTML if any
|
||||||
# tr/td[contains(.//text(), "От издателя")] -> does not work, why?
|
# tr/td[contains(.//text(), "От издателя")] -> does not work, why?
|
||||||
xpt = u'//div[contains(@class, "detail")]//tr/td//text()[contains(., "От издателя")]'\
|
xpt = u'//div[contains(@class, "detail")]//tr/td//text()[contains(., "От издателя")]'\
|
||||||
@ -323,14 +320,14 @@ class Ozon(Source):
|
|||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def _quoteString(str): # {{{
|
def _quoteString(str): # {{{
|
||||||
return '"' + str + '"' if str and str.find(' ') != -1 else str
|
return '"' + str + '"' if str and str.find(' ') != -1 else str
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
# TODO: make customizable
|
# TODO: make customizable
|
||||||
def _translateToBigCoverUrl(coverUrl): # {{{
|
def _translateToBigCoverUrl(coverUrl): # {{{
|
||||||
# http://www.ozon.ru/multimedia/books_covers/small/1002986468.gif
|
# http://www.ozon.ru/multimedia/books_covers/small/1002986468.gif
|
||||||
# http://www.ozon.ru/multimedia/books_covers/1002986468.jpg
|
# http://www.ozon.ru/multimedia/books_covers/1002986468.jpg
|
||||||
|
|
||||||
m = re.match(r'^(.+\/)small\/(.+\.).+$', coverUrl)
|
m = re.match(r'^(.+\/)small\/(.+\.).+$', coverUrl)
|
||||||
if m:
|
if m:
|
||||||
coverUrl = m.group(1) + m.group(2) + 'jpg'
|
coverUrl = m.group(1) + m.group(2) + 'jpg'
|
||||||
@ -339,12 +336,12 @@ def _translateToBigCoverUrl(coverUrl): # {{{
|
|||||||
|
|
||||||
def _get_affiliateId(): # {{{
|
def _get_affiliateId(): # {{{
|
||||||
import random
|
import random
|
||||||
|
|
||||||
aff_id = 'romuk'
|
aff_id = 'romuk'
|
||||||
# Use Kovid's affiliate id 30% of the time.
|
# Use Kovid's affiliate id 30% of the time.
|
||||||
if random.randint(1, 10) in (1, 2, 3):
|
if random.randint(1, 10) in (1, 2, 3):
|
||||||
aff_id = 'kovidgoyal'
|
aff_id = 'kovidgoyal'
|
||||||
return aff_id
|
return aff_id
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
# for now only RUS ISBN are supported
|
# for now only RUS ISBN are supported
|
||||||
@ -387,10 +384,10 @@ def _format_isbn(log, isbn): # {{{
|
|||||||
def _translageLanguageToCode(displayLang): # {{{
|
def _translageLanguageToCode(displayLang): # {{{
|
||||||
displayLang = unicode(displayLang).strip() if displayLang else None
|
displayLang = unicode(displayLang).strip() if displayLang else None
|
||||||
langTbl = { None: 'ru',
|
langTbl = { None: 'ru',
|
||||||
u'Немецкий': 'de',
|
u'Немецкий': 'de',
|
||||||
u'Английский': 'en',
|
u'Английский': 'en',
|
||||||
u'Французский': 'fr',
|
u'Французский': 'fr',
|
||||||
u'Итальянский': 'it',
|
u'Итальянский': 'it',
|
||||||
u'Испанский': 'es',
|
u'Испанский': 'es',
|
||||||
u'Китайский': 'zh',
|
u'Китайский': 'zh',
|
||||||
u'Японский': 'ja' }
|
u'Японский': 'ja' }
|
||||||
@ -406,7 +403,7 @@ if __name__ == '__main__': # tests {{{
|
|||||||
|
|
||||||
test_identify_plugin(Ozon.name,
|
test_identify_plugin(Ozon.name,
|
||||||
[
|
[
|
||||||
|
|
||||||
(
|
(
|
||||||
{'identifiers':{'isbn': '9785916572629'} },
|
{'identifiers':{'isbn': '9785916572629'} },
|
||||||
[title_test(u'На все четыре стороны', exact=True),
|
[title_test(u'На все четыре стороны', exact=True),
|
||||||
@ -442,4 +439,4 @@ if __name__ == '__main__': # tests {{{
|
|||||||
[title_test(u'Метро', exact=False)]
|
[title_test(u'Метро', exact=False)]
|
||||||
),
|
),
|
||||||
])
|
])
|
||||||
# }}}
|
# }}}
|
||||||
|
@ -1421,7 +1421,7 @@ class MOBIFile(object): # {{{
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if fmt is not None:
|
if fmt is not None:
|
||||||
self.image_records.append(ImageRecord(i, r, fmt))
|
self.image_records.append(ImageRecord(len(self.image_records)+1, r, fmt))
|
||||||
else:
|
else:
|
||||||
self.binary_records.append(BinaryRecord(i, r))
|
self.binary_records.append(BinaryRecord(i, r))
|
||||||
|
|
||||||
|
@ -314,6 +314,8 @@ def detect_periodical(toc, log=None):
|
|||||||
Detect if the TOC object toc contains a periodical that conforms to the
|
Detect if the TOC object toc contains a periodical that conforms to the
|
||||||
structure required by kindlegen to generate a periodical.
|
structure required by kindlegen to generate a periodical.
|
||||||
'''
|
'''
|
||||||
|
if toc.count() < 1 or not toc[0].klass == 'periodical':
|
||||||
|
return False
|
||||||
for node in toc.iterdescendants():
|
for node in toc.iterdescendants():
|
||||||
if node.depth() == 1 and node.klass != 'article':
|
if node.depth() == 1 and node.klass != 'article':
|
||||||
if log is not None:
|
if log is not None:
|
||||||
|
@ -109,20 +109,6 @@ class TAGX(object): # {{{
|
|||||||
list(map(self.add_tag, (11, 0)))
|
list(map(self.add_tag, (11, 0)))
|
||||||
return self.header(1) + bytes(self.byts)
|
return self.header(1) + bytes(self.byts)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class TAGX_BOOK(TAGX):
|
|
||||||
BITMASKS = dict(TAGX.BITMASKS)
|
|
||||||
BITMASKS.update({x:(1 << i) for i, x in enumerate([1, 2, 3, 4, 21, 22, 23])})
|
|
||||||
|
|
||||||
@property
|
|
||||||
def hierarchical_book(self):
|
|
||||||
'''
|
|
||||||
TAGX block for the primary index header of a hierarchical book
|
|
||||||
'''
|
|
||||||
list(map(self.add_tag, (1, 2, 3, 4, 21, 22, 23, 0)))
|
|
||||||
return self.header(1) + bytes(self.byts)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def flat_book(self):
|
def flat_book(self):
|
||||||
'''
|
'''
|
||||||
@ -244,17 +230,6 @@ class IndexEntry(object):
|
|||||||
ans = buf.getvalue()
|
ans = buf.getvalue()
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
class BookIndexEntry(IndexEntry):
|
|
||||||
|
|
||||||
@property
|
|
||||||
def entry_type(self):
|
|
||||||
tagx = TAGX_BOOK()
|
|
||||||
ans = 0
|
|
||||||
for tag in self.tag_nums:
|
|
||||||
ans |= tagx.BITMASKS[tag]
|
|
||||||
return ans
|
|
||||||
|
|
||||||
|
|
||||||
class PeriodicalIndexEntry(IndexEntry):
|
class PeriodicalIndexEntry(IndexEntry):
|
||||||
|
|
||||||
def __init__(self, offset, label_offset, class_offset, depth):
|
def __init__(self, offset, label_offset, class_offset, depth):
|
||||||
@ -305,9 +280,7 @@ class TBS(object): # {{{
|
|||||||
def __init__(self, data, is_periodical, first=False, section_map={},
|
def __init__(self, data, is_periodical, first=False, section_map={},
|
||||||
after_first=False):
|
after_first=False):
|
||||||
self.section_map = section_map
|
self.section_map = section_map
|
||||||
#import pprint
|
|
||||||
#pprint.pprint(data)
|
|
||||||
#print()
|
|
||||||
if is_periodical:
|
if is_periodical:
|
||||||
# The starting bytes.
|
# The starting bytes.
|
||||||
# The value is zero which I think indicates the periodical
|
# The value is zero which I think indicates the periodical
|
||||||
@ -420,6 +393,8 @@ class TBS(object): # {{{
|
|||||||
first_article = articles[0]
|
first_article = articles[0]
|
||||||
last_article = articles[-1]
|
last_article = articles[-1]
|
||||||
num = len(articles)
|
num = len(articles)
|
||||||
|
last_article_ends = (last_article in data['ends'] or
|
||||||
|
last_article in data['completes'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
next_sec = sections[i+1]
|
next_sec = sections[i+1]
|
||||||
@ -440,6 +415,19 @@ class TBS(object): # {{{
|
|||||||
if next_sec is not None:
|
if next_sec is not None:
|
||||||
buf.write(encode_tbs(last_article.index-next_sec.index,
|
buf.write(encode_tbs(last_article.index-next_sec.index,
|
||||||
{0b1000: 0}))
|
{0b1000: 0}))
|
||||||
|
|
||||||
|
|
||||||
|
# If a section TOC starts and extends into the next record add
|
||||||
|
# a trailing vwi. We detect this by TBS type==3, processing last
|
||||||
|
# section present in the record, and the last article in that
|
||||||
|
# section either ends or completes and doesn't finish
|
||||||
|
# on the last byte of the record.
|
||||||
|
elif (typ == self.type_011 and last_article_ends and
|
||||||
|
((last_article.offset+last_article.size) % RECORD_SIZE > 0)
|
||||||
|
):
|
||||||
|
buf.write(encode_tbs(last_article.index-section.index-1,
|
||||||
|
{0b1000: 0}))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
buf.write(encode_tbs(spanner.index - parent_section_index,
|
buf.write(encode_tbs(spanner.index - parent_section_index,
|
||||||
{0b0001: 0}))
|
{0b0001: 0}))
|
||||||
@ -447,7 +435,26 @@ class TBS(object): # {{{
|
|||||||
self.bytestring = buf.getvalue()
|
self.bytestring = buf.getvalue()
|
||||||
|
|
||||||
def book_tbs(self, data, first):
|
def book_tbs(self, data, first):
|
||||||
self.bytestring = b''
|
spanner = data['spans']
|
||||||
|
if spanner is not None:
|
||||||
|
self.bytestring = encode_tbs(spanner.index, {0b010: 0, 0b001: 0},
|
||||||
|
flag_size=3)
|
||||||
|
else:
|
||||||
|
starts, completes, ends = (data['starts'], data['completes'],
|
||||||
|
data['ends'])
|
||||||
|
if (not completes and (
|
||||||
|
(len(starts) == 1 and not ends) or (len(ends) == 1 and not
|
||||||
|
starts))):
|
||||||
|
node = starts[0] if starts else ends[0]
|
||||||
|
self.bytestring = encode_tbs(node.index, {0b010: 0}, flag_size=3)
|
||||||
|
else:
|
||||||
|
nodes = []
|
||||||
|
for x in (starts, completes, ends):
|
||||||
|
nodes.extend(x)
|
||||||
|
nodes.sort(key=lambda x:x.index)
|
||||||
|
self.bytestring = encode_tbs(nodes[0].index, {0b010:0,
|
||||||
|
0b100: len(nodes)}, flag_size=3)
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
class Indexer(object): # {{{
|
class Indexer(object): # {{{
|
||||||
@ -518,6 +525,7 @@ class Indexer(object): # {{{
|
|||||||
for i in indices:
|
for i in indices:
|
||||||
offsets.append(buf.tell())
|
offsets.append(buf.tell())
|
||||||
buf.write(i.bytestring)
|
buf.write(i.bytestring)
|
||||||
|
|
||||||
index_block = align_block(buf.getvalue())
|
index_block = align_block(buf.getvalue())
|
||||||
|
|
||||||
# Write offsets to index entries as an IDXT block
|
# Write offsets to index entries as an IDXT block
|
||||||
@ -557,9 +565,7 @@ class Indexer(object): # {{{
|
|||||||
tagx_block = TAGX().secondary
|
tagx_block = TAGX().secondary
|
||||||
else:
|
else:
|
||||||
tagx_block = (TAGX().periodical if self.is_periodical else
|
tagx_block = (TAGX().periodical if self.is_periodical else
|
||||||
(TAGX_BOOK().hierarchical_book if
|
TAGX().flat_book)
|
||||||
self.book_has_subchapters else
|
|
||||||
TAGX_BOOK().flat_book))
|
|
||||||
header_length = 192
|
header_length = 192
|
||||||
|
|
||||||
# Ident 0 - 4
|
# Ident 0 - 4
|
||||||
@ -645,15 +651,13 @@ class Indexer(object): # {{{
|
|||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def create_book_index(self): # {{{
|
def create_book_index(self): # {{{
|
||||||
self.book_has_subchapters = False
|
|
||||||
indices = []
|
indices = []
|
||||||
seen, sub_seen = set(), set()
|
seen = set()
|
||||||
id_offsets = self.serializer.id_offsets
|
id_offsets = self.serializer.id_offsets
|
||||||
|
|
||||||
# Flatten toc to contain only chapters and subchapters
|
# Flatten toc so that chapter to chapter jumps work with all sub
|
||||||
# Anything deeper than a subchapter is made into a subchapter
|
# chapter levels as well
|
||||||
chapters = []
|
for node in self.oeb.toc.iterdescendants():
|
||||||
for node in self.oeb.toc:
|
|
||||||
try:
|
try:
|
||||||
offset = id_offsets[node.href]
|
offset = id_offsets[node.href]
|
||||||
label = self.cncx[node.title]
|
label = self.cncx[node.title]
|
||||||
@ -666,77 +670,33 @@ class Indexer(object): # {{{
|
|||||||
continue
|
continue
|
||||||
seen.add(offset)
|
seen.add(offset)
|
||||||
|
|
||||||
subchapters = []
|
indices.append(IndexEntry(offset, label))
|
||||||
chapters.append((offset, label, subchapters))
|
|
||||||
|
|
||||||
for descendant in node.iterdescendants():
|
indices.sort(key=lambda x:x.offset)
|
||||||
try:
|
|
||||||
offset = id_offsets[descendant.href]
|
|
||||||
label = self.cncx[descendant.title]
|
|
||||||
except:
|
|
||||||
self.log.warn('TOC item %s [%s] not found in document'%(
|
|
||||||
descendant.title, descendant.href))
|
|
||||||
continue
|
|
||||||
|
|
||||||
if offset in sub_seen:
|
# Set lengths
|
||||||
continue
|
for i, index in enumerate(indices):
|
||||||
sub_seen.add(offset)
|
try:
|
||||||
subchapters.append((offset, label))
|
next_offset = indices[i+1].offset
|
||||||
|
except:
|
||||||
|
next_offset = self.serializer.body_end_offset
|
||||||
|
index.length = next_offset - index.offset
|
||||||
|
|
||||||
subchapters.sort(key=lambda x:x[0])
|
|
||||||
|
|
||||||
chapters.sort(key=lambda x:x[0])
|
# Remove empty indices
|
||||||
|
indices = [x for x in indices if x.length > 0]
|
||||||
|
|
||||||
chapters = [(BookIndexEntry(x[0], x[1]), [
|
# Reset lengths in case any were removed
|
||||||
BookIndexEntry(y[0], y[1]) for y in x[2]]) for x in chapters]
|
for i, index in enumerate(indices):
|
||||||
|
try:
|
||||||
|
next_offset = indices[i+1].offset
|
||||||
|
except:
|
||||||
|
next_offset = self.serializer.body_end_offset
|
||||||
|
index.length = next_offset - index.offset
|
||||||
|
|
||||||
def set_length(indices):
|
# Set index values
|
||||||
for i, index in enumerate(indices):
|
for index, x in enumerate(indices):
|
||||||
try:
|
x.index = index
|
||||||
next_offset = indices[i+1].offset
|
|
||||||
except:
|
|
||||||
next_offset = self.serializer.body_end_offset
|
|
||||||
index.length = next_offset - index.offset
|
|
||||||
|
|
||||||
# Set chapter and subchapter lengths
|
|
||||||
set_length([x[0] for x in chapters])
|
|
||||||
for x in chapters:
|
|
||||||
set_length(x[1])
|
|
||||||
|
|
||||||
# Remove empty chapters
|
|
||||||
chapters = [x for x in chapters if x[0].length > 0]
|
|
||||||
|
|
||||||
# Remove invalid subchapters
|
|
||||||
for i, x in enumerate(list(chapters)):
|
|
||||||
chapter, subchapters = x
|
|
||||||
ok_subchapters = []
|
|
||||||
for sc in subchapters:
|
|
||||||
if sc.offset < chapter.next_offset and sc.length > 0:
|
|
||||||
ok_subchapters.append(sc)
|
|
||||||
chapters[i] = (chapter, ok_subchapters)
|
|
||||||
|
|
||||||
# Reset chapter and subchapter lengths in case any were removed
|
|
||||||
set_length([x[0] for x in chapters])
|
|
||||||
for x in chapters:
|
|
||||||
set_length(x[1])
|
|
||||||
|
|
||||||
# Set index and depth values
|
|
||||||
indices = []
|
|
||||||
for index, x in enumerate(chapters):
|
|
||||||
x[0].index = index
|
|
||||||
indices.append(x[0])
|
|
||||||
|
|
||||||
for chapter, subchapters in chapters:
|
|
||||||
for sc in subchapters:
|
|
||||||
index += 1
|
|
||||||
sc.index = index
|
|
||||||
sc.parent_index = chapter.index
|
|
||||||
indices.append(sc)
|
|
||||||
sc.depth = 1
|
|
||||||
self.book_has_subchapters = True
|
|
||||||
if subchapters:
|
|
||||||
chapter.first_child_index = subchapters[0].index
|
|
||||||
chapter.last_child_index = subchapters[-1].index
|
|
||||||
|
|
||||||
return indices
|
return indices
|
||||||
|
|
||||||
@ -772,9 +732,11 @@ class Indexer(object): # {{{
|
|||||||
continue
|
continue
|
||||||
if offset in seen_sec_offsets:
|
if offset in seen_sec_offsets:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
seen_sec_offsets.add(offset)
|
seen_sec_offsets.add(offset)
|
||||||
section = PeriodicalIndexEntry(offset, label, klass, 1)
|
section = PeriodicalIndexEntry(offset, label, klass, 1)
|
||||||
section.parent_index = 0
|
section.parent_index = 0
|
||||||
|
|
||||||
for art in sec:
|
for art in sec:
|
||||||
try:
|
try:
|
||||||
offset = id_offsets[art.href]
|
offset = id_offsets[art.href]
|
||||||
@ -830,6 +792,7 @@ class Indexer(object): # {{{
|
|||||||
for art in articles:
|
for art in articles:
|
||||||
i += 1
|
i += 1
|
||||||
art.index = i
|
art.index = i
|
||||||
|
|
||||||
art.parent_index = sec.index
|
art.parent_index = sec.index
|
||||||
|
|
||||||
for sec, normalized_articles in normalized_sections:
|
for sec, normalized_articles in normalized_sections:
|
||||||
@ -905,6 +868,7 @@ class Indexer(object): # {{{
|
|||||||
'spans':None, 'offset':offset, 'record_number':i+1}
|
'spans':None, 'offset':offset, 'record_number':i+1}
|
||||||
|
|
||||||
for index in self.indices:
|
for index in self.indices:
|
||||||
|
|
||||||
if index.offset >= next_offset:
|
if index.offset >= next_offset:
|
||||||
# Node starts after current record
|
# Node starts after current record
|
||||||
if index.depth == deepest:
|
if index.depth == deepest:
|
||||||
|
@ -97,6 +97,9 @@ class MobiWriter(object):
|
|||||||
# Indexing {{{
|
# Indexing {{{
|
||||||
def generate_index(self):
|
def generate_index(self):
|
||||||
self.primary_index_record_idx = None
|
self.primary_index_record_idx = None
|
||||||
|
if self.oeb.toc.count() < 1:
|
||||||
|
self.log.warn('No TOC, MOBI index not generated')
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
self.indexer = Indexer(self.serializer, self.last_text_record_idx,
|
self.indexer = Indexer(self.serializer, self.last_text_record_idx,
|
||||||
len(self.records[self.last_text_record_idx]),
|
len(self.records[self.last_text_record_idx]),
|
||||||
@ -147,15 +150,19 @@ class MobiWriter(object):
|
|||||||
oeb.logger.info('Serializing images...')
|
oeb.logger.info('Serializing images...')
|
||||||
self.image_records = []
|
self.image_records = []
|
||||||
self.image_map = {}
|
self.image_map = {}
|
||||||
|
self.masthead_offset = 0
|
||||||
|
index = 1
|
||||||
|
|
||||||
mh_href = self.masthead_offset = None
|
mh_href = None
|
||||||
if 'masthead' in oeb.guide:
|
if 'masthead' in oeb.guide:
|
||||||
mh_href = oeb.guide['masthead'].href
|
mh_href = oeb.guide['masthead'].href
|
||||||
|
self.image_records.append(None)
|
||||||
|
index += 1
|
||||||
elif self.is_periodical:
|
elif self.is_periodical:
|
||||||
# Generate a default masthead
|
# Generate a default masthead
|
||||||
data = generate_masthead(unicode(self.oeb.metadata('title')[0]))
|
data = generate_masthead(unicode(self.oeb.metadata['title'][0]))
|
||||||
self.image_records.append(data)
|
self.image_records.append(data)
|
||||||
self.masthead_offset = 0
|
index += 1
|
||||||
|
|
||||||
cover_href = self.cover_offset = self.thumbnail_offset = None
|
cover_href = self.cover_offset = self.thumbnail_offset = None
|
||||||
if (oeb.metadata.cover and
|
if (oeb.metadata.cover and
|
||||||
@ -172,13 +179,16 @@ class MobiWriter(object):
|
|||||||
oeb.logger.warn('Bad image file %r' % item.href)
|
oeb.logger.warn('Bad image file %r' % item.href)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
self.image_map[item.href] = len(self.image_records)
|
if mh_href and item.href == mh_href:
|
||||||
self.image_records.append(data)
|
self.image_records[0] = data
|
||||||
|
continue
|
||||||
|
|
||||||
if item.href == mh_href:
|
self.image_records.append(data)
|
||||||
self.masthead_offset = len(self.image_records) - 1
|
self.image_map[item.href] = index
|
||||||
elif item.href == cover_href:
|
index += 1
|
||||||
self.cover_offset = len(self.image_records) - 1
|
|
||||||
|
if cover_href and item.href == cover_href:
|
||||||
|
self.cover_offset = self.image_map[item.href] - 1
|
||||||
try:
|
try:
|
||||||
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
|
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
|
||||||
maxsizeb=MAX_THUMB_SIZE)
|
maxsizeb=MAX_THUMB_SIZE)
|
||||||
@ -186,10 +196,14 @@ class MobiWriter(object):
|
|||||||
oeb.logger.warn('Failed to generate thumbnail')
|
oeb.logger.warn('Failed to generate thumbnail')
|
||||||
else:
|
else:
|
||||||
self.image_records.append(data)
|
self.image_records.append(data)
|
||||||
self.thumbnail_offset = len(self.image_records) - 1
|
self.thumbnail_offset = index - 1
|
||||||
|
index += 1
|
||||||
finally:
|
finally:
|
||||||
item.unload_data_from_memory()
|
item.unload_data_from_memory()
|
||||||
|
|
||||||
|
if self.image_records and self.image_records[0] is None:
|
||||||
|
raise ValueError('Failed to find masthead image in manifest')
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
# Text {{{
|
# Text {{{
|
||||||
@ -197,6 +211,7 @@ class MobiWriter(object):
|
|||||||
def generate_text(self):
|
def generate_text(self):
|
||||||
self.oeb.logger.info('Serializing markup content...')
|
self.oeb.logger.info('Serializing markup content...')
|
||||||
self.serializer = Serializer(self.oeb, self.image_map,
|
self.serializer = Serializer(self.oeb, self.image_map,
|
||||||
|
self.is_periodical,
|
||||||
write_page_breaks_after_item=self.write_page_breaks_after_item)
|
write_page_breaks_after_item=self.write_page_breaks_after_item)
|
||||||
text = self.serializer()
|
text = self.serializer()
|
||||||
self.text_length = len(text)
|
self.text_length = len(text)
|
||||||
|
@ -7,6 +7,8 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
from calibre.ebooks.oeb.base import (OEB_DOCS, XHTML, XHTML_NS, XML_NS,
|
from calibre.ebooks.oeb.base import (OEB_DOCS, XHTML, XHTML_NS, XML_NS,
|
||||||
namespace, prefixname, urlnormalize)
|
namespace, prefixname, urlnormalize)
|
||||||
from calibre.ebooks.mobi.mobiml import MBP_NS
|
from calibre.ebooks.mobi.mobiml import MBP_NS
|
||||||
@ -19,7 +21,7 @@ from cStringIO import StringIO
|
|||||||
class Serializer(object):
|
class Serializer(object):
|
||||||
NSRMAP = {'': None, XML_NS: 'xml', XHTML_NS: '', MBP_NS: 'mbp'}
|
NSRMAP = {'': None, XML_NS: 'xml', XHTML_NS: '', MBP_NS: 'mbp'}
|
||||||
|
|
||||||
def __init__(self, oeb, images, write_page_breaks_after_item=True):
|
def __init__(self, oeb, images, is_periodical, write_page_breaks_after_item=True):
|
||||||
'''
|
'''
|
||||||
Write all the HTML markup in oeb into a single in memory buffer
|
Write all the HTML markup in oeb into a single in memory buffer
|
||||||
containing a single html document with links replaced by offsets into
|
containing a single html document with links replaced by offsets into
|
||||||
@ -35,8 +37,10 @@ class Serializer(object):
|
|||||||
is written after every element of the spine in ``oeb``.
|
is written after every element of the spine in ``oeb``.
|
||||||
'''
|
'''
|
||||||
self.oeb = oeb
|
self.oeb = oeb
|
||||||
|
# Map of image hrefs to image index in the MOBI file
|
||||||
self.images = images
|
self.images = images
|
||||||
self.logger = oeb.logger
|
self.logger = oeb.logger
|
||||||
|
self.is_periodical = is_periodical
|
||||||
self.write_page_breaks_after_item = write_page_breaks_after_item
|
self.write_page_breaks_after_item = write_page_breaks_after_item
|
||||||
|
|
||||||
# If not None, this is a number pointing to the location at which to
|
# If not None, this is a number pointing to the location at which to
|
||||||
@ -187,13 +191,63 @@ class Serializer(object):
|
|||||||
moved to the end.
|
moved to the end.
|
||||||
'''
|
'''
|
||||||
buf = self.buf
|
buf = self.buf
|
||||||
|
|
||||||
|
def serialize_toc_level(tocref, href=None):
|
||||||
|
# add the provided toc level to the output stream
|
||||||
|
# if href is provided add a link ref to the toc level output (e.g. feed_0/index.html)
|
||||||
|
if href is not None:
|
||||||
|
# resolve the section url in id_offsets
|
||||||
|
buf.write('<mbp:pagebreak/>')
|
||||||
|
self.id_offsets[urlnormalize(href)] = buf.tell()
|
||||||
|
|
||||||
|
if tocref.klass == "periodical":
|
||||||
|
buf.write('<div> <div height="1em"></div>')
|
||||||
|
else:
|
||||||
|
buf.write('<div></div> <div> <h2 height="1em"><font size="+2"><b>'+tocref.title+'</b></font></h2> <div height="1em"></div>')
|
||||||
|
|
||||||
|
buf.write('<ul>')
|
||||||
|
|
||||||
|
for tocitem in tocref.nodes:
|
||||||
|
buf.write('<li><a filepos=')
|
||||||
|
itemhref = tocitem.href
|
||||||
|
if tocref.klass == 'periodical':
|
||||||
|
# This is a section node.
|
||||||
|
# For periodical toca, the section urls are like r'feed_\d+/index.html'
|
||||||
|
# We dont want to point to the start of the first article
|
||||||
|
# so we change the href.
|
||||||
|
itemhref = re.sub(r'article_\d+/', '', itemhref)
|
||||||
|
self.href_offsets[itemhref].append(buf.tell())
|
||||||
|
buf.write('0000000000')
|
||||||
|
buf.write(' ><font size="+1" color="blue"><b><u>')
|
||||||
|
buf.write(tocitem.title)
|
||||||
|
buf.write('</u></b></font></a></li>')
|
||||||
|
|
||||||
|
buf.write('</ul><div height="1em"></div></div>')
|
||||||
|
|
||||||
self.anchor_offset = buf.tell()
|
self.anchor_offset = buf.tell()
|
||||||
buf.write(b'<body>')
|
buf.write(b'<body>')
|
||||||
self.body_start_offset = buf.tell()
|
self.body_start_offset = buf.tell()
|
||||||
|
|
||||||
|
if self.is_periodical:
|
||||||
|
top_toc = self.oeb.toc.nodes[0]
|
||||||
|
serialize_toc_level(top_toc)
|
||||||
|
|
||||||
spine = [item for item in self.oeb.spine if item.linear]
|
spine = [item for item in self.oeb.spine if item.linear]
|
||||||
spine.extend([item for item in self.oeb.spine if not item.linear])
|
spine.extend([item for item in self.oeb.spine if not item.linear])
|
||||||
|
|
||||||
for item in spine:
|
for item in spine:
|
||||||
|
|
||||||
|
if self.is_periodical and item.is_section_start:
|
||||||
|
for section_toc in top_toc.nodes:
|
||||||
|
if urlnormalize(item.href) == section_toc.href:
|
||||||
|
# create section url of the form r'feed_\d+/index.html'
|
||||||
|
section_url = re.sub(r'article_\d+/', '', section_toc.href)
|
||||||
|
serialize_toc_level(section_toc, section_url)
|
||||||
|
section_toc.href = section_url
|
||||||
|
break
|
||||||
|
|
||||||
self.serialize_item(item)
|
self.serialize_item(item)
|
||||||
|
|
||||||
self.body_end_offset = buf.tell()
|
self.body_end_offset = buf.tell()
|
||||||
buf.write(b'</body>')
|
buf.write(b'</body>')
|
||||||
|
|
||||||
|
@ -374,8 +374,8 @@ class Editor(QFrame): # {{{
|
|||||||
self.current_keys])
|
self.current_keys])
|
||||||
if not current: current = _('None')
|
if not current: current = _('None')
|
||||||
|
|
||||||
self.use_default.setText(_('Default: %s [Currently not conflicting: %s]')%
|
self.use_default.setText(_('Default: %(deflt)s [Currently not conflicting: %(curr)s]')%
|
||||||
(default, current))
|
dict(deflt=default, curr=current))
|
||||||
|
|
||||||
if shortcut['set_to_default']:
|
if shortcut['set_to_default']:
|
||||||
self.use_default.setChecked(True)
|
self.use_default.setChecked(True)
|
||||||
|
@ -17,6 +17,9 @@ class LanguagesEdit(MultiCompleteComboBox):
|
|||||||
MultiCompleteComboBox.__init__(self, parent)
|
MultiCompleteComboBox.__init__(self, parent)
|
||||||
|
|
||||||
self._lang_map = lang_map()
|
self._lang_map = lang_map()
|
||||||
|
self.names_with_commas = [x for x in self._lang_map.itervalues() if ',' in x]
|
||||||
|
self.comma_map = {k:k.replace(',', '|') for k in self.names_with_commas}
|
||||||
|
self.comma_rmap = {v:k for k, v in self.comma_map.iteritems()}
|
||||||
self._rmap = {v:k for k,v in self._lang_map.iteritems()}
|
self._rmap = {v:k for k,v in self._lang_map.iteritems()}
|
||||||
|
|
||||||
all_items = sorted(self._lang_map.itervalues(),
|
all_items = sorted(self._lang_map.itervalues(),
|
||||||
@ -25,12 +28,19 @@ class LanguagesEdit(MultiCompleteComboBox):
|
|||||||
for item in all_items:
|
for item in all_items:
|
||||||
self.addItem(item)
|
self.addItem(item)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def vals(self):
|
||||||
|
raw = unicode(self.lineEdit().text())
|
||||||
|
for k, v in self.comma_map.iteritems():
|
||||||
|
raw = raw.replace(k, v)
|
||||||
|
parts = [x.strip() for x in raw.split(',')]
|
||||||
|
return [self.comma_rmap.get(x, x) for x in parts]
|
||||||
|
|
||||||
@dynamic_property
|
@dynamic_property
|
||||||
def lang_codes(self):
|
def lang_codes(self):
|
||||||
|
|
||||||
def fget(self):
|
def fget(self):
|
||||||
vals = [x.strip() for x in
|
vals = self.vals
|
||||||
unicode(self.lineEdit().text()).split(',')]
|
|
||||||
ans = []
|
ans = []
|
||||||
for name in vals:
|
for name in vals:
|
||||||
if name:
|
if name:
|
||||||
@ -50,8 +60,7 @@ class LanguagesEdit(MultiCompleteComboBox):
|
|||||||
return property(fget=fget, fset=fset)
|
return property(fget=fget, fset=fset)
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
vals = [x.strip() for x in
|
vals = self.vals
|
||||||
unicode(self.lineEdit().text()).split(',')]
|
|
||||||
bad = []
|
bad = []
|
||||||
for name in vals:
|
for name in vals:
|
||||||
if name:
|
if name:
|
||||||
|
@ -640,6 +640,7 @@ class LibraryPage(QWizardPage, LibraryUI):
|
|||||||
metadata_plugins = {
|
metadata_plugins = {
|
||||||
'zh' : ('Douban Books',),
|
'zh' : ('Douban Books',),
|
||||||
'fr' : ('Nicebooks',),
|
'fr' : ('Nicebooks',),
|
||||||
|
'ru' : ('OZON.ru',),
|
||||||
}.get(lang, [])
|
}.get(lang, [])
|
||||||
from calibre.customize.ui import enable_plugin
|
from calibre.customize.ui import enable_plugin
|
||||||
for name in metadata_plugins:
|
for name in metadata_plugins:
|
||||||
|
@ -925,12 +925,18 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
formats = row[fm['formats']]
|
formats = row[fm['formats']]
|
||||||
mi.format_metadata = {}
|
mi.format_metadata = {}
|
||||||
if not formats:
|
if not formats:
|
||||||
formats = None
|
good_formats = None
|
||||||
else:
|
else:
|
||||||
formats = formats.split(',')
|
formats = formats.split(',')
|
||||||
|
good_formats = []
|
||||||
for f in formats:
|
for f in formats:
|
||||||
mi.format_metadata[f] = self.format_metadata(id, f)
|
try:
|
||||||
mi.formats = formats
|
mi.format_metadata[f] = self.format_metadata(id, f)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
good_formats.append(f)
|
||||||
|
mi.formats = good_formats
|
||||||
tags = row[fm['tags']]
|
tags = row[fm['tags']]
|
||||||
if tags:
|
if tags:
|
||||||
mi.tags = [i.strip() for i in tags.split(',')]
|
mi.tags = [i.strip() for i in tags.split(',')]
|
||||||
@ -1213,7 +1219,13 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
except: # If path contains strange characters this throws an exc
|
except: # If path contains strange characters this throws an exc
|
||||||
candidates = []
|
candidates = []
|
||||||
if format and candidates and os.path.exists(candidates[0]):
|
if format and candidates and os.path.exists(candidates[0]):
|
||||||
shutil.copyfile(candidates[0], fmt_path)
|
try:
|
||||||
|
shutil.copyfile(candidates[0], fmt_path)
|
||||||
|
except:
|
||||||
|
# This can happen if candidates[0] or fmt_path is too long,
|
||||||
|
# which can happen if the user copied the library from a
|
||||||
|
# non windows machine to a windows machine.
|
||||||
|
return None
|
||||||
return fmt_path
|
return fmt_path
|
||||||
|
|
||||||
def copy_format_to(self, index, fmt, dest, index_is_id=False):
|
def copy_format_to(self, index, fmt, dest, index_is_id=False):
|
||||||
@ -1633,7 +1645,6 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
item.rt += rating
|
item.rt += rating
|
||||||
item.rc += 1
|
item.rc += 1
|
||||||
except:
|
except:
|
||||||
prints(tid_cat, val)
|
|
||||||
prints('get_categories: item', val, 'is not in', cat, 'list!')
|
prints('get_categories: item', val, 'is not in', cat, 'list!')
|
||||||
|
|
||||||
#print 'end phase "books":', time.clock() - last, 'seconds'
|
#print 'end phase "books":', time.clock() - last, 'seconds'
|
||||||
@ -2291,7 +2302,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
|
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
|
||||||
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
|
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
|
||||||
FROM books_languages_link WHERE
|
FROM books_languages_link WHERE
|
||||||
lang_code=languages.id) < 1''')
|
books_languages_link.lang_code=languages.id) < 1''')
|
||||||
|
|
||||||
books_to_refresh = set([book_id])
|
books_to_refresh = set([book_id])
|
||||||
final_languages = []
|
final_languages = []
|
||||||
|
@ -360,7 +360,7 @@ When you first run |app|, it will ask you for a folder in which to store your bo
|
|||||||
|
|
||||||
Metadata about the books is stored in the file ``metadata.db`` at the top level of the library folder This file is is a sqlite database. When backing up your library make sure you copy the entire folder and all its sub-folders.
|
Metadata about the books is stored in the file ``metadata.db`` at the top level of the library folder This file is is a sqlite database. When backing up your library make sure you copy the entire folder and all its sub-folders.
|
||||||
|
|
||||||
The library folder and all it's contents make up what is called a *|app| library*. You can have multiple such libraries. To manage the libraries, click the |app| icon on the toolbar. You can create new libraries, remove/rename existing ones and switch between libraries easily.
|
The library folder and all it's contents make up what is called a |app| library. You can have multiple such libraries. To manage the libraries, click the |app| icon on the toolbar. You can create new libraries, remove/rename existing ones and switch between libraries easily.
|
||||||
|
|
||||||
You can copy or move books between different libraries (once you have more than one library setup) by right clicking on a book and selecting the :guilabel:`Copy to library` action.
|
You can copy or move books between different libraries (once you have more than one library setup) by right clicking on a book and selecting the :guilabel:`Copy to library` action.
|
||||||
|
|
||||||
@ -438,7 +438,19 @@ Simply copy the |app| library folder from the old to the new computer. You can f
|
|||||||
|
|
||||||
Note that if you are transferring between different types of computers (for example Windows to OS X) then after doing the above you should also click the arrow next to the calibre icon on the tool bar, select Library Maintenance and run the Check Library action. It will warn you about any problems in your library, which you should fix by hand.
|
Note that if you are transferring between different types of computers (for example Windows to OS X) then after doing the above you should also click the arrow next to the calibre icon on the tool bar, select Library Maintenance and run the Check Library action. It will warn you about any problems in your library, which you should fix by hand.
|
||||||
|
|
||||||
.. note:: A |app| library is just a folder which contains all the book files and their metadata. All the emtadata is stored in a single file called metadata.db, in the top level folder. If this file gets corrupted, you may see an empty list of books in |app|. In this case you can ask |app| to restore your books by clicking the arrow next to the |app| icon on the toolbar and selecting Library Maintenance->Restore Library.
|
.. note:: A |app| library is just a folder which contains all the book files and their metadata. All the metadata is stored in a single file called metadata.db, in the top level folder. If this file gets corrupted, you may see an empty list of books in |app|. In this case you can ask |app| to restore your books by clicking the arrow next to the |app| icon on the toolbar and selecting Library Maintenance->Restore Library.
|
||||||
|
|
||||||
|
The list of books in |app| is blank!
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In order to understand why that happened, you have to understand what a |app| library is. At the most basic level, a |app| library is just a folder. Whenever you add a book to |app|, that book's files are copied into this folder (arranged into sub folders by author and title). Inside the |app| library folder, at the top level, you will see a file called metadata.db. This file is where |app| stores the metadata like title/author/rating/tags etc. for *every* book in your |app| library. The list of books that |app| displays is created by reading the contents of this metadata.db file.
|
||||||
|
|
||||||
|
There can be two reasons why |app| is showing a empty list of books:
|
||||||
|
|
||||||
|
* Your |app| library folder changed its location. This can happen if it was on an external disk and the drive letter for that disk changed. Or if you accidentally moved the folder. In this case, |app| cannot find its library and so starts up with an empty library instead. To remedy this, simply click the arrow next to the |app| icon in the |app| toolbar (it will say 0 books underneath it) and select Switch/create library. Click the little blue icon to select the new location of your |app| library and click OK.
|
||||||
|
|
||||||
|
* Your metadata.db file was deleted/corrupted. In this case, you can ask |app| to rebuild the metadata.db from its backups. Click the arrow next to the |app| icon in the |app| toolbar (it will say 0 books underneath it) and select Library maintenance->Restore database. |app| will automatically rebuild metadata.db.
|
||||||
|
|
||||||
|
|
||||||
Content From The Web
|
Content From The Web
|
||||||
---------------------
|
---------------------
|
||||||
@ -446,6 +458,7 @@ Content From The Web
|
|||||||
:depth: 1
|
:depth: 1
|
||||||
:local:
|
:local:
|
||||||
|
|
||||||
|
|
||||||
I obtained a recipe for a news site as a .py file from somewhere, how do I use it?
|
I obtained a recipe for a news site as a .py file from somewhere, how do I use it?
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
Start the :guilabel:`Add custom news sources` dialog (from the :guilabel:`Fetch news` menu) and click the :guilabel:`Switch to advanced mode` button. Delete everything in the box with the recipe source code and copy paste the contents of your .py file into the box. Click :guilabel:`Add/update recipe`.
|
Start the :guilabel:`Add custom news sources` dialog (from the :guilabel:`Fetch news` menu) and click the :guilabel:`Switch to advanced mode` button. Delete everything in the box with the recipe source code and copy paste the contents of your .py file into the box. Click :guilabel:`Add/update recipe`.
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user