mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
Merge from trunk
This commit is contained in:
commit
e51fa6c3fc
@ -1,39 +1,34 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
__license__ = 'GPLv3'
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class AdvancedUserRecipe1255797795(BasicNewsRecipe):
|
class AdvancedUserRecipe1311446032(BasicNewsRecipe):
|
||||||
title = u'Corren'
|
title = 'Corren'
|
||||||
language = 'sv'
|
__author__ = 'Jonas Svensson'
|
||||||
__author__ = 'Jonas Svensson'
|
description = 'News from Sweden'
|
||||||
simultaneous_downloads = 1
|
publisher = 'Corren'
|
||||||
no_stylesheets = True
|
category = 'news, politics, Sweden'
|
||||||
oldest_article = 7
|
oldest_article = 2
|
||||||
|
delay = 1
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
remove_attributes = ['onload']
|
no_stylesheets = True
|
||||||
timefmt = ''
|
use_embedded_content = False
|
||||||
|
encoding = 'iso-8859-1'
|
||||||
|
language = 'sv'
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'Toppnyheter (alla kategorier)', u'http://www.corren.se/inc/RssHandler.ashx?id=4122151&ripurl=http://www.corren.se/nyheter/'),
|
(u'Toppnyheter', u'http://www.corren.se/inc/RssHandler.ashx?id=4122151&ripurl=http://www.corren.se/nyheter/')
|
||||||
(u'Bostad', u'http://www.corren.se/inc/RssHandler.ashx?id=4122174&ripurl=http://www.corren.se/bostad/'),
|
,(u'Ekonomi', u'http://www.corren.se/inc/RssHandler.ashx?id=4122176&ripurl=http://www.corren.se/ekonomi/')
|
||||||
(u'Ekonomi & Jobb', u'http://www.corren.se/inc/RssHandler.ashx?id=4122176&ripurl=http://www.corren.se/ekonomi/'),
|
,(u'Link\xf6ping', u'http://www.corren.se/inc/RssHandler.ashx?id=4122234')
|
||||||
(u'Kultur & Nöje', u'http://www.corren.se/inc/RssHandler.ashx?id=4122192&ripurl=http://www.corren.se/kultur/'),
|
,(u'Åsikter', u'http://www.corren.se/inc/RssHandler.ashx?id=4122223,4122224,4122226,4122227,4122228,4122229,4122230')
|
||||||
(u'Mat & dryck', u'http://www.corren.se/inc/RssHandler.ashx?id=4122201&ripurl=http://www.corren.se/mat-dryck/'),
|
]
|
||||||
(u'Motor', u'http://www.corren.se/inc/RssHandler.ashx?id=4122203&ripurl=http://www.corren.se/motor/'),
|
|
||||||
(u'Sport', u'http://www.corren.se/inc/RssHandler.ashx?id=4122206&ripurl=http://www.corren.se/sport/'),
|
|
||||||
(u'Åsikter', u'http://www.corren.se/inc/RssHandler.ashx?id=4122223&ripurl=http://www.corren.se/asikter/'),
|
|
||||||
(u'Mjölby', u'http://www.corren.se/inc/RssHandler.ashx?id=4122235&ripurl=http://www.corren.se/ostergotland/mjolby/'),
|
|
||||||
(u'Motala', u'http://www.corren.se/inc/RssHandler.ashx?id=4122236&ripurl=http://www.corren.se/ostergotland/motala/')
|
|
||||||
]
|
|
||||||
|
|
||||||
def print_version(self, url):
|
|
||||||
url = url.replace("ekonomi/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("bostad/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("kultur/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("motor/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("mat-dryck/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("sport/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("asikter/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("mat-dryck/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("ostergotland/mjolby/artikel.aspx", "Print.aspx")
|
|
||||||
url = url.replace("ostergotland/motala/artikel.aspx", "Print.aspx")
|
|
||||||
return url.replace("nyheter/artikel.aspx", "Print.aspx")
|
|
||||||
|
|
||||||
|
keep_only_tags = [dict(name='div', attrs={'id':'article'}),dict(name='div', attrs={'class':'body'})]
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='ul',attrs={'class':'functions'})
|
||||||
|
,dict(name='a',attrs={'href':'javascript*'})
|
||||||
|
,dict(name='div',attrs={'class':'box'})
|
||||||
|
,dict(name='div',attrs={'class':'functionsbottom'})
|
||||||
|
]
|
||||||
|
32
recipes/dagens_industri.recipe
Normal file
32
recipes/dagens_industri.recipe
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
__license__ = 'GPLv3'
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class AdvancedUserRecipe1311450855(BasicNewsRecipe):
|
||||||
|
title = u'Dagens Industri'
|
||||||
|
__author__ = 'Jonas Svensson'
|
||||||
|
description = 'Economy news from Sweden'
|
||||||
|
publisher = 'DI'
|
||||||
|
category = 'news, politics, Sweden'
|
||||||
|
oldest_article = 2
|
||||||
|
delay = 1
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
no_stylesheets = True
|
||||||
|
use_embedded_content = False
|
||||||
|
encoding = 'utf-8'
|
||||||
|
language = 'sv'
|
||||||
|
|
||||||
|
feeds = [(u'DI', u'http://di.se/rss')]
|
||||||
|
|
||||||
|
keep_only_tags = [dict(name='h1', attrs={'id':'ctl00_ExtraWideContentRegion_WideContentRegion_MainRegion_MainContentRegion_MainBodyRegion_headlineNormal'}),dict(name='div', attrs={'id':'articleBody'})]
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='div',attrs={'class':'article-actions clear'})
|
||||||
|
,dict(name='div',attrs={'class':'article-action-popup'})
|
||||||
|
,dict(name='div',attrs={'class':'header'})
|
||||||
|
,dict(name='div',attrs={'class':'content clear'})
|
||||||
|
,dict(name='div',attrs={'id':'articleAdvertisementDiv'})
|
||||||
|
,dict(name='ul',attrs={'class':'action-list'})
|
||||||
|
]
|
@ -12,7 +12,7 @@ from datetime import date
|
|||||||
|
|
||||||
class Guardian(BasicNewsRecipe):
|
class Guardian(BasicNewsRecipe):
|
||||||
|
|
||||||
title = u'The Guardian / The Observer'
|
title = u'The Guardian and The Observer'
|
||||||
if date.today().weekday() == 6:
|
if date.today().weekday() == 6:
|
||||||
base_url = "http://www.guardian.co.uk/theobserver"
|
base_url = "http://www.guardian.co.uk/theobserver"
|
||||||
else:
|
else:
|
||||||
@ -28,7 +28,7 @@ class Guardian(BasicNewsRecipe):
|
|||||||
# List of section titles to ignore
|
# List of section titles to ignore
|
||||||
# For example: ['Sport']
|
# For example: ['Sport']
|
||||||
ignore_sections = []
|
ignore_sections = []
|
||||||
|
|
||||||
timefmt = ' [%a, %d %b %Y]'
|
timefmt = ' [%a, %d %b %Y]'
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='div', attrs={'id':["content","article_header","main-article-info",]}),
|
dict(name='div', attrs={'id':["content","article_header","main-article-info",]}),
|
||||||
@ -94,7 +94,7 @@ class Guardian(BasicNewsRecipe):
|
|||||||
prefix = section_title + ': '
|
prefix = section_title + ': '
|
||||||
for subsection in s.parent.findAll('a', attrs={'class':'book-section'}):
|
for subsection in s.parent.findAll('a', attrs={'class':'book-section'}):
|
||||||
yield (prefix + self.tag_to_string(subsection), subsection['href'])
|
yield (prefix + self.tag_to_string(subsection), subsection['href'])
|
||||||
|
|
||||||
def find_articles(self, url):
|
def find_articles(self, url):
|
||||||
soup = self.index_to_soup(url)
|
soup = self.index_to_soup(url)
|
||||||
div = soup.find('div', attrs={'class':'book-index'})
|
div = soup.find('div', attrs={'class':'book-index'})
|
||||||
@ -115,7 +115,7 @@ class Guardian(BasicNewsRecipe):
|
|||||||
'title': title, 'url':url, 'description':desc,
|
'title': title, 'url':url, 'description':desc,
|
||||||
'date' : strftime('%a, %d %b'),
|
'date' : strftime('%a, %d %b'),
|
||||||
}
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
try:
|
try:
|
||||||
feeds = []
|
feeds = []
|
||||||
|
@ -43,7 +43,7 @@ class AdvancedUserRecipe1299694372(BasicNewsRecipe):
|
|||||||
lfeeds = self.get_feeds()
|
lfeeds = self.get_feeds()
|
||||||
for feedobj in lfeeds:
|
for feedobj in lfeeds:
|
||||||
feedtitle, feedurl = feedobj
|
feedtitle, feedurl = feedobj
|
||||||
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
|
self.report_progress(0, 'Fetching feed'+' %s...'%(feedtitle if feedtitle else feedurl))
|
||||||
articles = []
|
articles = []
|
||||||
soup = self.index_to_soup(feedurl)
|
soup = self.index_to_soup(feedurl)
|
||||||
for item in soup.findAll('div', attrs={'class':'cornerControls'}):
|
for item in soup.findAll('div', attrs={'class':'cornerControls'}):
|
||||||
@ -63,3 +63,8 @@ class AdvancedUserRecipe1299694372(BasicNewsRecipe):
|
|||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
article.title = soup.find('title').contents[0].strip()
|
article.title = soup.find('title').contents[0].strip()
|
||||||
|
|
||||||
|
def postprocess_html(self, soup, first_fetch):
|
||||||
|
for link_tag in soup.findAll(attrs={"id" : "story"}):
|
||||||
|
link_tag.insert(0,'<h1>'+soup.find('title').contents[0].strip()+'</h1>')
|
||||||
|
|
||||||
|
return soup
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = "2008, Derry FitzGerald. 2009 Modified by Ray Kinsella and David O'Callaghan, 2011 Modified by Phil Burns"
|
__copyright__ = "2008, Derry FitzGerald. 2009 Modified by Ray Kinsella and David O'Callaghan, 2011 Modified by Phil Burns"
|
||||||
'''
|
'''
|
||||||
irishtimes.com
|
irishtimes.com
|
||||||
@ -10,7 +10,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
|||||||
class IrishTimes(BasicNewsRecipe):
|
class IrishTimes(BasicNewsRecipe):
|
||||||
title = u'The Irish Times'
|
title = u'The Irish Times'
|
||||||
encoding = 'ISO-8859-15'
|
encoding = 'ISO-8859-15'
|
||||||
__author__ = "Derry FitzGerald, Ray Kinsella, David O'Callaghan and Phil Burns"
|
__author__ = "Derry FitzGerald, Ray Kinsella, David O'Callaghan and Phil Burns"
|
||||||
language = 'en_IE'
|
language = 'en_IE'
|
||||||
timefmt = ' (%A, %B %d, %Y)'
|
timefmt = ' (%A, %B %d, %Y)'
|
||||||
|
|
||||||
@ -18,6 +18,7 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
oldest_article = 1.0
|
oldest_article = 1.0
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
simultaneous_downloads= 5
|
||||||
|
|
||||||
r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*')
|
r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*')
|
||||||
remove_tags = [dict(name='div', attrs={'class':'footer'})]
|
remove_tags = [dict(name='div', attrs={'class':'footer'})]
|
||||||
@ -25,17 +26,17 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'),
|
('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'),
|
||||||
('Ireland', 'http://rss.feedsportal.com/c/851/f/10845/index.rss'),
|
('Ireland', 'http://www.irishtimes.com/feeds/rss/newspaper/ireland.rss'),
|
||||||
('World', 'http://rss.feedsportal.com/c/851/f/10846/index.rss'),
|
('World', 'http://www.irishtimes.com/feeds/rss/newspaper/world.rss'),
|
||||||
('Finance', 'http://rss.feedsportal.com/c/851/f/10847/index.rss'),
|
('Finance', 'http://www.irishtimes.com/feeds/rss/newspaper/finance.rss'),
|
||||||
('Features', 'http://rss.feedsportal.com/c/851/f/10848/index.rss'),
|
('Features', 'http://www.irishtimes.com/feeds/rss/newspaper/features.rss'),
|
||||||
('Sport', 'http://rss.feedsportal.com/c/851/f/10849/index.rss'),
|
('Sport', 'http://www.irishtimes.com/feeds/rss/newspaper/sport.rss'),
|
||||||
('Opinion', 'http://rss.feedsportal.com/c/851/f/10850/index.rss'),
|
('Opinion', 'http://www.irishtimes.com/feeds/rss/newspaper/opinion.rss'),
|
||||||
('Letters', 'http://rss.feedsportal.com/c/851/f/10851/index.rss'),
|
('Letters', 'http://www.irishtimes.com/feeds/rss/newspaper/letters.rss'),
|
||||||
('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'),
|
('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'),
|
||||||
('Health', 'http://rss.feedsportal.com/c/851/f/10852/index.rss'),
|
('Health', 'http://www.irishtimes.com/feeds/rss/newspaper/health.rss'),
|
||||||
('Education & Parenting', 'http://rss.feedsportal.com/c/851/f/10853/index.rss'),
|
('Education & Parenting', 'http://www.irishtimes.com/feeds/rss/newspaper/education.rss'),
|
||||||
('Motors', 'http://rss.feedsportal.com/c/851/f/10854/index.rss'),
|
('Motors', 'http://www.irishtimes.com/feeds/rss/newspaper/motors.rss'),
|
||||||
('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'),
|
('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'),
|
||||||
('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'),
|
('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'),
|
||||||
('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'),
|
('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'),
|
||||||
@ -49,10 +50,16 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
|
|
||||||
def print_version(self, url):
|
def print_version(self, url):
|
||||||
if url.count('rss.feedsportal.com'):
|
if url.count('rss.feedsportal.com'):
|
||||||
u = url.replace('0Bhtml/story01.htm','_pf0Bhtml/story01.htm')
|
#u = url.replace('0Bhtml/story01.htm','_pf0Bhtml/story01.htm')
|
||||||
|
u = url.find('irishtimes')
|
||||||
|
u = 'http://www.irishtimes.com' + url[u + 12:]
|
||||||
|
u = u.replace('0C', '/')
|
||||||
|
u = u.replace('A', '')
|
||||||
|
u = u.replace('0Bhtml/story01.htm', '_pf.html')
|
||||||
else:
|
else:
|
||||||
u = url.replace('.html','_pf.html')
|
u = url.replace('.html','_pf.html')
|
||||||
return u
|
return u
|
||||||
|
|
||||||
def get_article_url(self, article):
|
def get_article_url(self, article):
|
||||||
return article.link
|
return article.link
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ defaults.
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
#: Auto increment series index
|
#: Auto increment series index
|
||||||
# The algorithm used to assign a new book in an existing series a series number.
|
# The algorithm used to assign a book added to an existing series a series number.
|
||||||
# New series numbers assigned using this tweak are always integer values, except
|
# New series numbers assigned using this tweak are always integer values, except
|
||||||
# if a constant non-integer is specified.
|
# if a constant non-integer is specified.
|
||||||
# Possible values are:
|
# Possible values are:
|
||||||
@ -27,7 +27,19 @@ defaults.
|
|||||||
# series_index_auto_increment = 'next'
|
# series_index_auto_increment = 'next'
|
||||||
# series_index_auto_increment = 'next_free'
|
# series_index_auto_increment = 'next_free'
|
||||||
# series_index_auto_increment = 16.5
|
# series_index_auto_increment = 16.5
|
||||||
|
#
|
||||||
|
# Set the use_series_auto_increment_tweak_when_importing tweak to True to
|
||||||
|
# use the above values when importing/adding books. If this tweak is set to
|
||||||
|
# False (the default) then the series number will be set to 1 if it is not
|
||||||
|
# explicitly set to during the import. If set to True, then the
|
||||||
|
# series index will be set according to the series_index_auto_increment setting.
|
||||||
|
# Note that the use_series_auto_increment_tweak_when_importing tweak is used
|
||||||
|
# only when a value is not provided during import. If the importing regular
|
||||||
|
# expression produces a value for series_index, or if you are reading metadata
|
||||||
|
# from books and the import plugin produces a value, than that value will
|
||||||
|
# be used irrespective of the setting of the tweak.
|
||||||
series_index_auto_increment = 'next'
|
series_index_auto_increment = 'next'
|
||||||
|
use_series_auto_increment_tweak_when_importing = False
|
||||||
|
|
||||||
#: Add separator after completing an author name
|
#: Add separator after completing an author name
|
||||||
# Should the completion separator be append
|
# Should the completion separator be append
|
||||||
|
@ -570,7 +570,7 @@ from calibre.devices.teclast.driver import (TECLAST_K3, NEWSMY, IPAPYRUS,
|
|||||||
from calibre.devices.sne.driver import SNE
|
from calibre.devices.sne.driver import SNE
|
||||||
from calibre.devices.misc import (PALMPRE, AVANT, SWEEX, PDNOVEL,
|
from calibre.devices.misc import (PALMPRE, AVANT, SWEEX, PDNOVEL,
|
||||||
GEMEI, VELOCITYMICRO, PDNOVEL_KOBO, LUMIREAD, ALURATEK_COLOR,
|
GEMEI, VELOCITYMICRO, PDNOVEL_KOBO, LUMIREAD, ALURATEK_COLOR,
|
||||||
TREKSTOR, EEEREADER, NEXTBOOK, ADAM, MOOVYBOOK)
|
TREKSTOR, EEEREADER, NEXTBOOK, ADAM, MOOVYBOOK, COBY)
|
||||||
from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG
|
from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG
|
||||||
from calibre.devices.kobo.driver import KOBO
|
from calibre.devices.kobo.driver import KOBO
|
||||||
from calibre.devices.bambook.driver import BAMBOOK
|
from calibre.devices.bambook.driver import BAMBOOK
|
||||||
@ -705,7 +705,7 @@ plugins += [
|
|||||||
EEEREADER,
|
EEEREADER,
|
||||||
NEXTBOOK,
|
NEXTBOOK,
|
||||||
ADAM,
|
ADAM,
|
||||||
MOOVYBOOK,
|
MOOVYBOOK, COBY,
|
||||||
ITUNES,
|
ITUNES,
|
||||||
BOEYE_BEX,
|
BOEYE_BEX,
|
||||||
BOEYE_BDX,
|
BOEYE_BDX,
|
||||||
@ -1228,17 +1228,6 @@ class StoreEbookscomStore(StoreBase):
|
|||||||
formats = ['EPUB', 'LIT', 'MOBI', 'PDF']
|
formats = ['EPUB', 'LIT', 'MOBI', 'PDF']
|
||||||
affiliate = True
|
affiliate = True
|
||||||
|
|
||||||
#class StoreEPubBuyDEStore(StoreBase):
|
|
||||||
# name = 'EPUBBuy DE'
|
|
||||||
# author = 'Charles Haley'
|
|
||||||
# description = u'Bei EPUBBuy.com finden Sie ausschliesslich eBooks im weitverbreiteten EPUB-Format und ohne DRM. So haben Sie die freie Wahl, wo Sie Ihr eBook lesen: Tablet, eBook-Reader, Smartphone oder einfach auf Ihrem PC. So macht eBook-Lesen Spaß!'
|
|
||||||
# actual_plugin = 'calibre.gui2.store.stores.epubbuy_de_plugin:EPubBuyDEStore'
|
|
||||||
#
|
|
||||||
# drm_free_only = True
|
|
||||||
# headquarters = 'DE'
|
|
||||||
# formats = ['EPUB']
|
|
||||||
# affiliate = True
|
|
||||||
|
|
||||||
class StoreEBookShoppeUKStore(StoreBase):
|
class StoreEBookShoppeUKStore(StoreBase):
|
||||||
name = 'ebookShoppe UK'
|
name = 'ebookShoppe UK'
|
||||||
author = u'Charles Haley'
|
author = u'Charles Haley'
|
||||||
@ -1266,16 +1255,7 @@ class StoreEKnigiStore(StoreBase):
|
|||||||
|
|
||||||
headquarters = 'BG'
|
headquarters = 'BG'
|
||||||
formats = ['EPUB', 'PDF', 'HTML']
|
formats = ['EPUB', 'PDF', 'HTML']
|
||||||
#affiliate = True
|
affiliate = True
|
||||||
|
|
||||||
class StoreEpubBudStore(StoreBase):
|
|
||||||
name = 'ePub Bud'
|
|
||||||
description = 'Well, it\'s pretty much just "YouTube for Children\'s eBooks. A not-for-profit organization devoted to brining self published childrens books to the world.'
|
|
||||||
actual_plugin = 'calibre.gui2.store.stores.epubbud_plugin:EpubBudStore'
|
|
||||||
|
|
||||||
drm_free_only = True
|
|
||||||
headquarters = 'US'
|
|
||||||
formats = ['EPUB']
|
|
||||||
|
|
||||||
class StoreFeedbooksStore(StoreBase):
|
class StoreFeedbooksStore(StoreBase):
|
||||||
name = 'Feedbooks'
|
name = 'Feedbooks'
|
||||||
@ -1311,6 +1291,7 @@ class StoreGoogleBooksStore(StoreBase):
|
|||||||
|
|
||||||
headquarters = 'US'
|
headquarters = 'US'
|
||||||
formats = ['EPUB', 'PDF', 'TXT']
|
formats = ['EPUB', 'PDF', 'TXT']
|
||||||
|
affiliate = True
|
||||||
|
|
||||||
class StoreGutenbergStore(StoreBase):
|
class StoreGutenbergStore(StoreBase):
|
||||||
name = 'Project Gutenberg'
|
name = 'Project Gutenberg'
|
||||||
@ -1394,6 +1375,17 @@ class StoreOReillyStore(StoreBase):
|
|||||||
headquarters = 'US'
|
headquarters = 'US'
|
||||||
formats = ['APK', 'DAISY', 'EPUB', 'MOBI', 'PDF']
|
formats = ['APK', 'DAISY', 'EPUB', 'MOBI', 'PDF']
|
||||||
|
|
||||||
|
class StoreOzonRUStore(StoreBase):
|
||||||
|
name = 'OZON.ru'
|
||||||
|
description = u'ebooks from OZON.ru'
|
||||||
|
actual_plugin = 'calibre.gui2.store.stores.ozon_ru_plugin:OzonRUStore'
|
||||||
|
author = 'Roman Mukhin'
|
||||||
|
|
||||||
|
drm_free_only = True
|
||||||
|
headquarters = 'RU'
|
||||||
|
formats = ['TXT', 'PDF', 'DJVU', 'RTF', 'DOC', 'JAR', 'FB2']
|
||||||
|
affiliate = True
|
||||||
|
|
||||||
class StorePragmaticBookshelfStore(StoreBase):
|
class StorePragmaticBookshelfStore(StoreBase):
|
||||||
name = 'Pragmatic Bookshelf'
|
name = 'Pragmatic Bookshelf'
|
||||||
description = u'The Pragmatic Bookshelf\'s collection of programming and tech books avaliable as ebooks.'
|
description = u'The Pragmatic Bookshelf\'s collection of programming and tech books avaliable as ebooks.'
|
||||||
@ -1491,10 +1483,8 @@ plugins += [
|
|||||||
StoreEbookNLStore,
|
StoreEbookNLStore,
|
||||||
StoreEbookscomStore,
|
StoreEbookscomStore,
|
||||||
StoreEBookShoppeUKStore,
|
StoreEBookShoppeUKStore,
|
||||||
# StoreEPubBuyDEStore,
|
|
||||||
StoreEHarlequinStore,
|
StoreEHarlequinStore,
|
||||||
StoreEKnigiStore,
|
StoreEKnigiStore,
|
||||||
StoreEpubBudStore,
|
|
||||||
StoreFeedbooksStore,
|
StoreFeedbooksStore,
|
||||||
StoreFoylesUKStore,
|
StoreFoylesUKStore,
|
||||||
StoreGandalfStore,
|
StoreGandalfStore,
|
||||||
@ -1508,6 +1498,7 @@ plugins += [
|
|||||||
StoreNextoStore,
|
StoreNextoStore,
|
||||||
StoreOpenBooksStore,
|
StoreOpenBooksStore,
|
||||||
StoreOReillyStore,
|
StoreOReillyStore,
|
||||||
|
StoreOzonRUStore,
|
||||||
StorePragmaticBookshelfStore,
|
StorePragmaticBookshelfStore,
|
||||||
StoreSmashwordsStore,
|
StoreSmashwordsStore,
|
||||||
StoreVirtualoStore,
|
StoreVirtualoStore,
|
||||||
|
@ -12,7 +12,7 @@ from datetime import datetime
|
|||||||
from dateutil.tz import tzoffset
|
from dateutil.tz import tzoffset
|
||||||
|
|
||||||
from calibre.constants import plugins
|
from calibre.constants import plugins
|
||||||
from calibre.utils.date import parse_date, local_tz
|
from calibre.utils.date import parse_date, local_tz, UNDEFINED_DATE
|
||||||
from calibre.ebooks.metadata import author_to_author_sort
|
from calibre.ebooks.metadata import author_to_author_sort
|
||||||
|
|
||||||
_c_speedup = plugins['speedup'][0]
|
_c_speedup = plugins['speedup'][0]
|
||||||
@ -29,8 +29,11 @@ def _c_convert_timestamp(val):
|
|||||||
if ret is None:
|
if ret is None:
|
||||||
return parse_date(val, as_utc=False)
|
return parse_date(val, as_utc=False)
|
||||||
year, month, day, hour, minutes, seconds, tzsecs = ret
|
year, month, day, hour, minutes, seconds, tzsecs = ret
|
||||||
return datetime(year, month, day, hour, minutes, seconds,
|
try:
|
||||||
|
return datetime(year, month, day, hour, minutes, seconds,
|
||||||
tzinfo=tzoffset(None, tzsecs)).astimezone(local_tz)
|
tzinfo=tzoffset(None, tzsecs)).astimezone(local_tz)
|
||||||
|
except OverflowError:
|
||||||
|
return UNDEFINED_DATE.astimezone(local_tz)
|
||||||
|
|
||||||
class Table(object):
|
class Table(object):
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ class ANDROID(USBMS):
|
|||||||
'7', 'A956', 'A955', 'A43', 'ANDROID_PLATFORM', 'TEGRA_2',
|
'7', 'A956', 'A955', 'A43', 'ANDROID_PLATFORM', 'TEGRA_2',
|
||||||
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE', 'A7EB', 'STREAK',
|
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE', 'A7EB', 'STREAK',
|
||||||
'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612',
|
'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612',
|
||||||
'GT-S5830_CARD']
|
'GT-S5830_CARD', 'GT-S5570_CARD']
|
||||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||||
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||||
|
@ -351,3 +351,29 @@ class MOOVYBOOK(USBMS):
|
|||||||
def get_main_ebook_dir(self, for_upload=False):
|
def get_main_ebook_dir(self, for_upload=False):
|
||||||
return 'Books' if for_upload else self.EBOOK_DIR_MAIN
|
return 'Books' if for_upload else self.EBOOK_DIR_MAIN
|
||||||
|
|
||||||
|
class COBY(USBMS):
|
||||||
|
|
||||||
|
name = 'COBY MP977 device interface'
|
||||||
|
gui_name = 'COBY'
|
||||||
|
description = _('Communicate with the COBY')
|
||||||
|
author = 'Kovid Goyal'
|
||||||
|
supported_platforms = ['windows', 'osx', 'linux']
|
||||||
|
|
||||||
|
# Ordered list of supported formats
|
||||||
|
FORMATS = ['epub', 'pdf']
|
||||||
|
|
||||||
|
VENDOR_ID = [0x1e74]
|
||||||
|
PRODUCT_ID = [0x7121]
|
||||||
|
BCD = [0x02]
|
||||||
|
VENDOR_NAME = 'USB_2.0'
|
||||||
|
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'MP977_DRIVER'
|
||||||
|
|
||||||
|
EBOOK_DIR_MAIN = ''
|
||||||
|
|
||||||
|
SUPPORTS_SUB_DIRS = False
|
||||||
|
|
||||||
|
def get_carda_ebook_dir(self, for_upload=False):
|
||||||
|
if for_upload:
|
||||||
|
return 'eBooks'
|
||||||
|
return self.EBOOK_DIR_CARD_A
|
||||||
|
|
||||||
|
@ -24,10 +24,9 @@ XPath = partial(etree.XPath, namespaces=NAMESPACES)
|
|||||||
tostring = partial(etree.tostring, method='text', encoding=unicode)
|
tostring = partial(etree.tostring, method='text', encoding=unicode)
|
||||||
|
|
||||||
def get_metadata(stream):
|
def get_metadata(stream):
|
||||||
""" Return fb2 metadata as a L{MetaInformation} object """
|
''' Return fb2 metadata as a L{MetaInformation} object '''
|
||||||
|
|
||||||
root = _get_fbroot(stream)
|
root = _get_fbroot(stream)
|
||||||
|
|
||||||
book_title = _parse_book_title(root)
|
book_title = _parse_book_title(root)
|
||||||
authors = _parse_authors(root)
|
authors = _parse_authors(root)
|
||||||
|
|
||||||
@ -166,7 +165,7 @@ def _parse_tags(root, mi):
|
|||||||
break
|
break
|
||||||
|
|
||||||
def _parse_series(root, mi):
|
def _parse_series(root, mi):
|
||||||
#calibri supports only 1 series: use the 1-st one
|
# calibri supports only 1 series: use the 1-st one
|
||||||
# pick up sequence but only from 1 secrion in prefered order
|
# pick up sequence but only from 1 secrion in prefered order
|
||||||
# except <src-title-info>
|
# except <src-title-info>
|
||||||
xp_ti = '//fb2:title-info/fb2:sequence[1]'
|
xp_ti = '//fb2:title-info/fb2:sequence[1]'
|
||||||
@ -181,11 +180,12 @@ def _parse_series(root, mi):
|
|||||||
def _parse_isbn(root, mi):
|
def _parse_isbn(root, mi):
|
||||||
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
|
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
|
||||||
isbn = XPath('normalize-space(//fb2:publish-info/fb2:isbn/text())')(root)
|
isbn = XPath('normalize-space(//fb2:publish-info/fb2:isbn/text())')(root)
|
||||||
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
|
if isbn:
|
||||||
if ',' in isbn:
|
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
|
||||||
isbn = isbn[:isbn.index(',')]
|
if ',' in isbn:
|
||||||
if check_isbn(isbn):
|
isbn = isbn[:isbn.index(',')]
|
||||||
mi.isbn = isbn
|
if check_isbn(isbn):
|
||||||
|
mi.isbn = isbn
|
||||||
|
|
||||||
def _parse_comments(root, mi):
|
def _parse_comments(root, mi):
|
||||||
# pick up annotation but only from 1 secrion <title-info>; fallback: <src-title-info>
|
# pick up annotation but only from 1 secrion <title-info>; fallback: <src-title-info>
|
||||||
@ -232,4 +232,3 @@ def _get_fbroot(stream):
|
|||||||
raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]
|
raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]
|
||||||
root = etree.fromstring(raw, parser=parser)
|
root = etree.fromstring(raw, parser=parser)
|
||||||
return root
|
return root
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ from calibre.utils.date import parse_date, isoformat
|
|||||||
from calibre.utils.localization import get_lang
|
from calibre.utils.localization import get_lang
|
||||||
from calibre import prints, guess_type
|
from calibre import prints, guess_type
|
||||||
from calibre.utils.cleantext import clean_ascii_chars
|
from calibre.utils.cleantext import clean_ascii_chars
|
||||||
|
from calibre.utils.config import tweaks
|
||||||
|
|
||||||
class Resource(object): # {{{
|
class Resource(object): # {{{
|
||||||
'''
|
'''
|
||||||
@ -527,7 +528,12 @@ class OPF(object): # {{{
|
|||||||
category = MetadataField('type')
|
category = MetadataField('type')
|
||||||
rights = MetadataField('rights')
|
rights = MetadataField('rights')
|
||||||
series = MetadataField('series', is_dc=False)
|
series = MetadataField('series', is_dc=False)
|
||||||
series_index = MetadataField('series_index', is_dc=False, formatter=float, none_is=1)
|
if tweaks['use_series_auto_increment_tweak_when_importing']:
|
||||||
|
series_index = MetadataField('series_index', is_dc=False,
|
||||||
|
formatter=float, none_is=None)
|
||||||
|
else:
|
||||||
|
series_index = MetadataField('series_index', is_dc=False,
|
||||||
|
formatter=float, none_is=1)
|
||||||
title_sort = TitleSortField('title_sort', is_dc=False)
|
title_sort = TitleSortField('title_sort', is_dc=False)
|
||||||
rating = MetadataField('rating', is_dc=False, formatter=int)
|
rating = MetadataField('rating', is_dc=False, formatter=int)
|
||||||
pubdate = MetadataField('date', formatter=parse_date,
|
pubdate = MetadataField('date', formatter=parse_date,
|
||||||
@ -1024,8 +1030,10 @@ class OPF(object): # {{{
|
|||||||
attrib = attrib or {}
|
attrib = attrib or {}
|
||||||
attrib['name'] = 'calibre:' + name
|
attrib['name'] = 'calibre:' + name
|
||||||
name = '{%s}%s' % (self.NAMESPACES['opf'], 'meta')
|
name = '{%s}%s' % (self.NAMESPACES['opf'], 'meta')
|
||||||
|
nsmap = dict(self.NAMESPACES)
|
||||||
|
del nsmap['opf']
|
||||||
elem = etree.SubElement(self.metadata, name, attrib=attrib,
|
elem = etree.SubElement(self.metadata, name, attrib=attrib,
|
||||||
nsmap=self.NAMESPACES)
|
nsmap=nsmap)
|
||||||
elem.tail = '\n'
|
elem.tail = '\n'
|
||||||
return elem
|
return elem
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ from calibre.ebooks.metadata.book.base import Metadata
|
|||||||
from calibre.utils.date import utc_tz, as_utc
|
from calibre.utils.date import utc_tz, as_utc
|
||||||
from calibre.utils.html2text import html2text
|
from calibre.utils.html2text import html2text
|
||||||
from calibre.utils.icu import lower
|
from calibre.utils.icu import lower
|
||||||
|
from calibre.utils.date import UNDEFINED_DATE
|
||||||
|
|
||||||
# Download worker {{{
|
# Download worker {{{
|
||||||
class Worker(Thread):
|
class Worker(Thread):
|
||||||
@ -490,6 +491,8 @@ def identify(log, abort, # {{{
|
|||||||
max_tags = msprefs['max_tags']
|
max_tags = msprefs['max_tags']
|
||||||
for r in results:
|
for r in results:
|
||||||
r.tags = r.tags[:max_tags]
|
r.tags = r.tags[:max_tags]
|
||||||
|
if getattr(r.pubdate, 'year', 2000) <= UNDEFINED_DATE.year:
|
||||||
|
r.pubdate = None
|
||||||
|
|
||||||
if msprefs['swap_author_names']:
|
if msprefs['swap_author_names']:
|
||||||
for r in results:
|
for r in results:
|
||||||
|
@ -151,7 +151,7 @@ class ISBNDB(Source):
|
|||||||
|
|
||||||
bl = feed.find('BookList')
|
bl = feed.find('BookList')
|
||||||
if bl is None:
|
if bl is None:
|
||||||
err = tostring(etree.find('errormessage'))
|
err = tostring(feed.find('errormessage'))
|
||||||
raise ValueError('ISBNDb query failed:' + err)
|
raise ValueError('ISBNDb query failed:' + err)
|
||||||
total_results = int(bl.get('total_results'))
|
total_results = int(bl.get('total_results'))
|
||||||
shown_results = int(bl.get('shown_results'))
|
shown_results = int(bl.get('shown_results'))
|
||||||
|
@ -12,7 +12,7 @@ from collections import OrderedDict, defaultdict
|
|||||||
from calibre.utils.date import utc_tz
|
from calibre.utils.date import utc_tz
|
||||||
from calibre.ebooks.mobi.langcodes import main_language, sub_language
|
from calibre.ebooks.mobi.langcodes import main_language, sub_language
|
||||||
from calibre.ebooks.mobi.utils import (decode_hex_number, decint,
|
from calibre.ebooks.mobi.utils import (decode_hex_number, decint,
|
||||||
get_trailing_data)
|
get_trailing_data, decode_tbs)
|
||||||
from calibre.utils.magick.draw import identify_data
|
from calibre.utils.magick.draw import identify_data
|
||||||
|
|
||||||
# PalmDB {{{
|
# PalmDB {{{
|
||||||
@ -73,7 +73,7 @@ class PalmDB(object):
|
|||||||
self.ident = self.type + self.creator
|
self.ident = self.type + self.creator
|
||||||
if self.ident not in (b'BOOKMOBI', b'TEXTREAD'):
|
if self.ident not in (b'BOOKMOBI', b'TEXTREAD'):
|
||||||
raise ValueError('Unknown book ident: %r'%self.ident)
|
raise ValueError('Unknown book ident: %r'%self.ident)
|
||||||
self.uid_seed = self.raw[68:72]
|
self.uid_seed, = struct.unpack(b'>I', self.raw[68:72])
|
||||||
self.next_rec_list_id = self.raw[72:76]
|
self.next_rec_list_id = self.raw[72:76]
|
||||||
|
|
||||||
self.number_of_records, = struct.unpack(b'>H', self.raw[76:78])
|
self.number_of_records, = struct.unpack(b'>H', self.raw[76:78])
|
||||||
@ -182,6 +182,7 @@ class EXTHHeader(object):
|
|||||||
self.records = []
|
self.records = []
|
||||||
for i in xrange(self.count):
|
for i in xrange(self.count):
|
||||||
pos = self.read_record(pos)
|
pos = self.read_record(pos)
|
||||||
|
self.records.sort(key=lambda x:x.type)
|
||||||
|
|
||||||
def read_record(self, pos):
|
def read_record(self, pos):
|
||||||
type_, length = struct.unpack(b'>II', self.raw[pos:pos+8])
|
type_, length = struct.unpack(b'>II', self.raw[pos:pos+8])
|
||||||
@ -290,7 +291,12 @@ class MOBIHeader(object): # {{{
|
|||||||
(self.fcis_number, self.fcis_count, self.flis_number,
|
(self.fcis_number, self.fcis_count, self.flis_number,
|
||||||
self.flis_count) = struct.unpack(b'>IIII',
|
self.flis_count) = struct.unpack(b'>IIII',
|
||||||
self.raw[200:216])
|
self.raw[200:216])
|
||||||
self.unknown6 = self.raw[216:240]
|
self.unknown6 = self.raw[216:224]
|
||||||
|
self.srcs_record_index = struct.unpack(b'>I',
|
||||||
|
self.raw[224:228])[0]
|
||||||
|
self.num_srcs_records = struct.unpack(b'>I',
|
||||||
|
self.raw[228:232])[0]
|
||||||
|
self.unknown7 = self.raw[232:240]
|
||||||
self.extra_data_flags = struct.unpack(b'>I',
|
self.extra_data_flags = struct.unpack(b'>I',
|
||||||
self.raw[240:244])[0]
|
self.raw[240:244])[0]
|
||||||
self.has_multibytes = bool(self.extra_data_flags & 0b1)
|
self.has_multibytes = bool(self.extra_data_flags & 0b1)
|
||||||
@ -339,7 +345,7 @@ class MOBIHeader(object): # {{{
|
|||||||
ans.append('Huffman record offset: %d'%self.huffman_record_offset)
|
ans.append('Huffman record offset: %d'%self.huffman_record_offset)
|
||||||
ans.append('Huffman record count: %d'%self.huffman_record_count)
|
ans.append('Huffman record count: %d'%self.huffman_record_count)
|
||||||
ans.append('Unknown2: %r'%self.unknown2)
|
ans.append('Unknown2: %r'%self.unknown2)
|
||||||
ans.append('EXTH flags: %r (%s)'%(self.exth_flags, self.has_exth))
|
ans.append('EXTH flags: %s (%s)'%(bin(self.exth_flags)[2:], self.has_exth))
|
||||||
if self.has_drm_data:
|
if self.has_drm_data:
|
||||||
ans.append('Unknown3: %r'%self.unknown3)
|
ans.append('Unknown3: %r'%self.unknown3)
|
||||||
ans.append('DRM Offset: %s'%self.drm_offset)
|
ans.append('DRM Offset: %s'%self.drm_offset)
|
||||||
@ -356,6 +362,9 @@ class MOBIHeader(object): # {{{
|
|||||||
ans.append('FLIS number: %d'% self.flis_number)
|
ans.append('FLIS number: %d'% self.flis_number)
|
||||||
ans.append('FLIS count: %d'% self.flis_count)
|
ans.append('FLIS count: %d'% self.flis_count)
|
||||||
ans.append('Unknown6: %r'% self.unknown6)
|
ans.append('Unknown6: %r'% self.unknown6)
|
||||||
|
ans.append('SRCS record index: %d'%self.srcs_record_index)
|
||||||
|
ans.append('Number of SRCS records?: %d'%self.num_srcs_records)
|
||||||
|
ans.append('Unknown7: %r'%self.unknown7)
|
||||||
ans.append(('Extra data flags: %s (has multibyte: %s) '
|
ans.append(('Extra data flags: %s (has multibyte: %s) '
|
||||||
'(has indexing: %s) (has uncrossable breaks: %s)')%(
|
'(has indexing: %s) (has uncrossable breaks: %s)')%(
|
||||||
bin(self.extra_data_flags), self.has_multibytes,
|
bin(self.extra_data_flags), self.has_multibytes,
|
||||||
@ -399,6 +408,7 @@ class IndexHeader(object): # {{{
|
|||||||
def __init__(self, record):
|
def __init__(self, record):
|
||||||
self.record = record
|
self.record = record
|
||||||
raw = self.record.raw
|
raw = self.record.raw
|
||||||
|
#open('/t/index_header.bin', 'wb').write(raw)
|
||||||
if raw[:4] != b'INDX':
|
if raw[:4] != b'INDX':
|
||||||
raise ValueError('Invalid Primary Index Record')
|
raise ValueError('Invalid Primary Index Record')
|
||||||
|
|
||||||
@ -406,7 +416,7 @@ class IndexHeader(object): # {{{
|
|||||||
self.unknown1 = raw[8:16]
|
self.unknown1 = raw[8:16]
|
||||||
self.index_type, = struct.unpack('>I', raw[16:20])
|
self.index_type, = struct.unpack('>I', raw[16:20])
|
||||||
self.index_type_desc = {0: 'normal', 2:
|
self.index_type_desc = {0: 'normal', 2:
|
||||||
'inflection'}.get(self.index_type, 'unknown')
|
'inflection', 6: 'calibre'}.get(self.index_type, 'unknown')
|
||||||
self.idxt_start, = struct.unpack('>I', raw[20:24])
|
self.idxt_start, = struct.unpack('>I', raw[20:24])
|
||||||
self.index_count, = struct.unpack('>I', raw[24:28])
|
self.index_count, = struct.unpack('>I', raw[24:28])
|
||||||
self.index_encoding_num, = struct.unpack('>I', raw[28:32])
|
self.index_encoding_num, = struct.unpack('>I', raw[28:32])
|
||||||
@ -415,12 +425,7 @@ class IndexHeader(object): # {{{
|
|||||||
if self.index_encoding == 'unknown':
|
if self.index_encoding == 'unknown':
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
'Unknown index encoding: %d'%self.index_encoding_num)
|
'Unknown index encoding: %d'%self.index_encoding_num)
|
||||||
self.locale_raw, = struct.unpack(b'>I', raw[32:36])
|
self.possibly_language = raw[32:36]
|
||||||
langcode = self.locale_raw
|
|
||||||
langid = langcode & 0xFF
|
|
||||||
sublangid = (langcode >> 10) & 0xFF
|
|
||||||
self.language = main_language.get(langid, 'ENGLISH')
|
|
||||||
self.sublanguage = sub_language.get(sublangid, 'NEUTRAL')
|
|
||||||
self.num_index_entries, = struct.unpack('>I', raw[36:40])
|
self.num_index_entries, = struct.unpack('>I', raw[36:40])
|
||||||
self.ordt_start, = struct.unpack('>I', raw[40:44])
|
self.ordt_start, = struct.unpack('>I', raw[40:44])
|
||||||
self.ligt_start, = struct.unpack('>I', raw[44:48])
|
self.ligt_start, = struct.unpack('>I', raw[44:48])
|
||||||
@ -480,8 +485,7 @@ class IndexHeader(object): # {{{
|
|||||||
a('Number of index records: %d'%self.index_count)
|
a('Number of index records: %d'%self.index_count)
|
||||||
a('Index encoding: %s (%d)'%(self.index_encoding,
|
a('Index encoding: %s (%d)'%(self.index_encoding,
|
||||||
self.index_encoding_num))
|
self.index_encoding_num))
|
||||||
a('Index language: %s - %s (%s)'%(self.language, self.sublanguage,
|
a('Unknown (possibly language?): %r'%(self.possibly_language))
|
||||||
hex(self.locale_raw)))
|
|
||||||
a('Number of index entries: %d'% self.num_index_entries)
|
a('Number of index entries: %d'% self.num_index_entries)
|
||||||
a('ORDT start: %d'%self.ordt_start)
|
a('ORDT start: %d'%self.ordt_start)
|
||||||
a('LIGT start: %d'%self.ligt_start)
|
a('LIGT start: %d'%self.ligt_start)
|
||||||
@ -596,10 +600,14 @@ class IndexEntry(object): # {{{
|
|||||||
0x3f : 'article',
|
0x3f : 'article',
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, ident, entry_type, raw, cncx, tagx_entries):
|
def __init__(self, ident, entry_type, raw, cncx, tagx_entries, flags=0):
|
||||||
self.index = ident
|
self.index = ident
|
||||||
self.raw = raw
|
self.raw = raw
|
||||||
self.tags = []
|
self.tags = []
|
||||||
|
self.entry_type_raw = entry_type
|
||||||
|
self.byte_size = len(raw)
|
||||||
|
|
||||||
|
orig_raw = raw
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.entry_type = self.TYPES[entry_type]
|
self.entry_type = self.TYPES[entry_type]
|
||||||
@ -619,6 +627,27 @@ class IndexEntry(object): # {{{
|
|||||||
vals.append(val)
|
vals.append(val)
|
||||||
self.tags.append(Tag(tag, vals, self.entry_type, cncx))
|
self.tags.append(Tag(tag, vals, self.entry_type, cncx))
|
||||||
|
|
||||||
|
if flags & 0b10:
|
||||||
|
# Look for optional description and author
|
||||||
|
desc_tag = [t for t in tagx_entries if t.tag == 22]
|
||||||
|
if desc_tag and raw:
|
||||||
|
val, consumed = decint(raw)
|
||||||
|
raw = raw[consumed:]
|
||||||
|
if val:
|
||||||
|
self.tags.append(Tag(desc_tag[0], [val], self.entry_type,
|
||||||
|
cncx))
|
||||||
|
if flags & 0b100:
|
||||||
|
aut_tag = [t for t in tagx_entries if t.tag == 23]
|
||||||
|
if aut_tag and raw:
|
||||||
|
val, consumed = decint(raw)
|
||||||
|
raw = raw[consumed:]
|
||||||
|
if val:
|
||||||
|
self.tags.append(Tag(aut_tag[0], [val], self.entry_type,
|
||||||
|
cncx))
|
||||||
|
|
||||||
|
self.consumed = len(orig_raw) - len(raw)
|
||||||
|
self.trailing_bytes = raw
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def label(self):
|
def label(self):
|
||||||
for tag in self.tags:
|
for tag in self.tags:
|
||||||
@ -669,13 +698,16 @@ class IndexEntry(object): # {{{
|
|||||||
return -1
|
return -1
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
ans = ['Index Entry(index=%s, entry_type=%s, length=%d)'%(
|
ans = ['Index Entry(index=%s, entry_type=%s (%s), length=%d, byte_size=%d)'%(
|
||||||
self.index, self.entry_type, len(self.tags))]
|
self.index, self.entry_type, bin(self.entry_type_raw)[2:],
|
||||||
|
len(self.tags), self.byte_size)]
|
||||||
for tag in self.tags:
|
for tag in self.tags:
|
||||||
ans.append('\t'+str(tag))
|
ans.append('\t'+str(tag))
|
||||||
if self.first_child_index != -1:
|
if self.first_child_index != -1:
|
||||||
ans.append('\tNumber of children: %d'%(self.last_child_index -
|
ans.append('\tNumber of children: %d'%(self.last_child_index -
|
||||||
self.first_child_index + 1))
|
self.first_child_index + 1))
|
||||||
|
if self.trailing_bytes:
|
||||||
|
ans.append('\tTrailing bytes: %r'%self.trailing_bytes)
|
||||||
return '\n'.join(ans)
|
return '\n'.join(ans)
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
@ -690,6 +722,7 @@ class IndexRecord(object): # {{{
|
|||||||
def __init__(self, record, index_header, cncx):
|
def __init__(self, record, index_header, cncx):
|
||||||
self.record = record
|
self.record = record
|
||||||
raw = self.record.raw
|
raw = self.record.raw
|
||||||
|
|
||||||
if raw[:4] != b'INDX':
|
if raw[:4] != b'INDX':
|
||||||
raise ValueError('Invalid Primary Index Record')
|
raise ValueError('Invalid Primary Index Record')
|
||||||
|
|
||||||
@ -713,8 +746,12 @@ class IndexRecord(object): # {{{
|
|||||||
for i in range(self.idxt_count):
|
for i in range(self.idxt_count):
|
||||||
off, = u(b'>H', indices[i*2:(i+1)*2])
|
off, = u(b'>H', indices[i*2:(i+1)*2])
|
||||||
self.index_offsets.append(off-192)
|
self.index_offsets.append(off-192)
|
||||||
|
rest = indices[(i+1)*2:]
|
||||||
|
if rest.replace(b'\0', ''): # There can be padding null bytes
|
||||||
|
raise ValueError('Extra bytes after IDXT table: %r'%rest)
|
||||||
|
|
||||||
indxt = raw[192:self.idxt_offset]
|
indxt = raw[192:self.idxt_offset]
|
||||||
|
self.size_of_indxt_block = len(indxt)
|
||||||
self.indices = []
|
self.indices = []
|
||||||
for i, off in enumerate(self.index_offsets):
|
for i, off in enumerate(self.index_offsets):
|
||||||
try:
|
try:
|
||||||
@ -723,9 +760,18 @@ class IndexRecord(object): # {{{
|
|||||||
next_off = len(indxt)
|
next_off = len(indxt)
|
||||||
index, consumed = decode_hex_number(indxt[off:])
|
index, consumed = decode_hex_number(indxt[off:])
|
||||||
entry_type = ord(indxt[off+consumed])
|
entry_type = ord(indxt[off+consumed])
|
||||||
|
d, flags = 1, 0
|
||||||
|
if index_header.index_type == 6:
|
||||||
|
flags = ord(indxt[off+consumed+d])
|
||||||
|
d += 1
|
||||||
|
pos = off+consumed+d
|
||||||
self.indices.append(IndexEntry(index, entry_type,
|
self.indices.append(IndexEntry(index, entry_type,
|
||||||
indxt[off+consumed+1:next_off], cncx, index_header.tagx_entries))
|
indxt[pos:next_off], cncx,
|
||||||
index = self.indices[-1]
|
index_header.tagx_entries, flags=flags))
|
||||||
|
|
||||||
|
rest = indxt[pos+self.indices[-1].consumed:]
|
||||||
|
if rest.replace(b'\0', ''): # There can be padding null bytes
|
||||||
|
raise ValueError('Extra bytes after IDXT table: %r'%rest)
|
||||||
|
|
||||||
def get_parent(self, index):
|
def get_parent(self, index):
|
||||||
if index.depth < 1:
|
if index.depth < 1:
|
||||||
@ -744,14 +790,15 @@ class IndexRecord(object): # {{{
|
|||||||
len(w), not bool(w.replace(b'\0', b'')) ))
|
len(w), not bool(w.replace(b'\0', b'')) ))
|
||||||
a('Header length: %d'%self.header_length)
|
a('Header length: %d'%self.header_length)
|
||||||
u(self.unknown1)
|
u(self.unknown1)
|
||||||
a('Header Type: %d'%self.header_type)
|
a('Unknown (header type? index record number? always 1?): %d'%self.header_type)
|
||||||
u(self.unknown2)
|
u(self.unknown2)
|
||||||
a('IDXT Offset: %d'%self.idxt_offset)
|
a('IDXT Offset (%d block size): %d'%(self.size_of_indxt_block,
|
||||||
|
self.idxt_offset))
|
||||||
a('IDXT Count: %d'%self.idxt_count)
|
a('IDXT Count: %d'%self.idxt_count)
|
||||||
u(self.unknown3)
|
u(self.unknown3)
|
||||||
u(self.unknown4)
|
u(self.unknown4)
|
||||||
a('Index offsets: %r'%self.index_offsets)
|
a('Index offsets: %r'%self.index_offsets)
|
||||||
a('\nIndex Entries:')
|
a('\nIndex Entries (%d entries):'%len(self.indices))
|
||||||
for entry in self.indices:
|
for entry in self.indices:
|
||||||
a(str(entry)+'\n')
|
a(str(entry)+'\n')
|
||||||
|
|
||||||
@ -797,6 +844,7 @@ class TextRecord(object): # {{{
|
|||||||
|
|
||||||
def __init__(self, idx, record, extra_data_flags, decompress):
|
def __init__(self, idx, record, extra_data_flags, decompress):
|
||||||
self.trailing_data, self.raw = get_trailing_data(record.raw, extra_data_flags)
|
self.trailing_data, self.raw = get_trailing_data(record.raw, extra_data_flags)
|
||||||
|
raw_trailing_bytes = record.raw[len(self.raw):]
|
||||||
self.raw = decompress(self.raw)
|
self.raw = decompress(self.raw)
|
||||||
if 0 in self.trailing_data:
|
if 0 in self.trailing_data:
|
||||||
self.trailing_data['multibyte_overlap'] = self.trailing_data.pop(0)
|
self.trailing_data['multibyte_overlap'] = self.trailing_data.pop(0)
|
||||||
@ -804,6 +852,7 @@ class TextRecord(object): # {{{
|
|||||||
self.trailing_data['indexing'] = self.trailing_data.pop(1)
|
self.trailing_data['indexing'] = self.trailing_data.pop(1)
|
||||||
if 2 in self.trailing_data:
|
if 2 in self.trailing_data:
|
||||||
self.trailing_data['uncrossable_breaks'] = self.trailing_data.pop(2)
|
self.trailing_data['uncrossable_breaks'] = self.trailing_data.pop(2)
|
||||||
|
self.trailing_data['raw_bytes'] = raw_trailing_bytes
|
||||||
|
|
||||||
self.idx = idx
|
self.idx = idx
|
||||||
|
|
||||||
@ -917,22 +966,27 @@ class TBSIndexing(object): # {{{
|
|||||||
ans.append(('\t\tIndex Entry: %d (Parent index: %d, '
|
ans.append(('\t\tIndex Entry: %d (Parent index: %d, '
|
||||||
'Depth: %d, Offset: %d, Size: %d) [%s]')%(
|
'Depth: %d, Offset: %d, Size: %d) [%s]')%(
|
||||||
x.index, x.parent_index, x.depth, x.offset, x.size, x.label))
|
x.index, x.parent_index, x.depth, x.offset, x.size, x.label))
|
||||||
def bin3(num):
|
def bin4(num):
|
||||||
ans = bin(num)[2:]
|
ans = bin(num)[2:]
|
||||||
return '0'*(3-len(ans)) + ans
|
return bytes('0'*(4-len(ans)) + ans)
|
||||||
|
|
||||||
|
def repr_extra(x):
|
||||||
|
return str({bin4(k):v for k, v in extra.iteritems()})
|
||||||
|
|
||||||
tbs_type = 0
|
tbs_type = 0
|
||||||
|
is_periodical = self.doc_type in (257, 258, 259)
|
||||||
if len(byts):
|
if len(byts):
|
||||||
outer, consumed = decint(byts)
|
outermost_index, extra, consumed = decode_tbs(byts, flag_size=4 if
|
||||||
|
is_periodical else 3)
|
||||||
byts = byts[consumed:]
|
byts = byts[consumed:]
|
||||||
tbs_type = outer & 0b111
|
for k in extra:
|
||||||
ans.append('TBS Type: %s (%d)'%(bin3(tbs_type), tbs_type))
|
tbs_type |= k
|
||||||
ans.append('Outer Index entry: %d'%(outer >> 3))
|
ans.append('\nTBS: %d (%s)'%(tbs_type, bin4(tbs_type)))
|
||||||
arg1, consumed = decint(byts)
|
ans.append('Outermost index: %d'%outermost_index)
|
||||||
byts = byts[consumed:]
|
ans.append('Unknown extra start bytes: %s'%repr_extra(extra))
|
||||||
ans.append('Unknown (vwi: always 0?): %d'%arg1)
|
if is_periodical: # Hierarchical periodical
|
||||||
if self.doc_type in (257, 259): # Hierarchical periodical
|
byts, a = self.interpret_periodical(tbs_type, byts,
|
||||||
byts, a = self.interpret_periodical(tbs_type, byts)
|
dat['geom'][0])
|
||||||
ans += a
|
ans += a
|
||||||
if byts:
|
if byts:
|
||||||
sbyts = tuple(hex(b)[2:] for b in byts)
|
sbyts = tuple(hex(b)[2:] for b in byts)
|
||||||
@ -941,159 +995,88 @@ class TBSIndexing(object): # {{{
|
|||||||
ans.append('')
|
ans.append('')
|
||||||
return tbs_type, ans
|
return tbs_type, ans
|
||||||
|
|
||||||
def interpret_periodical(self, tbs_type, byts):
|
def interpret_periodical(self, tbs_type, byts, record_offset):
|
||||||
ans = []
|
ans = []
|
||||||
|
|
||||||
def tbs_type_6(byts, psi=None, msg=None, fmsg='Unknown'): # {{{
|
def read_section_transitions(byts, psi=None): # {{{
|
||||||
if psi is None:
|
if psi is None:
|
||||||
# Assume parent section is 1
|
# Assume previous section is 1
|
||||||
psi = self.get_index(1)
|
psi = self.get_index(1)
|
||||||
if msg is None:
|
|
||||||
msg = ('Article index at start of record or first article'
|
|
||||||
' index, relative to parent section')
|
|
||||||
if byts:
|
|
||||||
# byts could be empty
|
|
||||||
arg, consumed = decint(byts)
|
|
||||||
byts = byts[consumed:]
|
|
||||||
flags = (arg & 0b1111)
|
|
||||||
ai = (arg >> 4)
|
|
||||||
ans.append('%s (fvwi): %d [%d absolute]'%(msg, ai,
|
|
||||||
ai+psi.index))
|
|
||||||
if flags == 1:
|
|
||||||
arg, consumed = decint(byts)
|
|
||||||
if arg == 0:
|
|
||||||
# EOF of record, otherwise ignore and hope someone else
|
|
||||||
# will deal with these bytes
|
|
||||||
byts = byts[consumed:]
|
|
||||||
ans.append('EOF (vwi: should be 0): %d'%arg)
|
|
||||||
elif flags in (4, 5):
|
|
||||||
num = byts[0]
|
|
||||||
byts = byts[1:]
|
|
||||||
ans.append('Number of article nodes in the record (byte): %d'%num)
|
|
||||||
if flags == 5:
|
|
||||||
arg, consumed = decint(byts)
|
|
||||||
byts = byts[consumed:]
|
|
||||||
ans.append('%s (vwi)): %d'%(fmsg, arg))
|
|
||||||
elif flags == 0:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
raise ValueError('Unknown flags: %d'%flags)
|
|
||||||
return byts
|
|
||||||
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
if tbs_type == 3: # {{{
|
|
||||||
arg2, consumed = decint(byts)
|
|
||||||
byts = byts[consumed:]
|
|
||||||
ans.append('Unknown (vwi: always 0?): %d'%arg2)
|
|
||||||
|
|
||||||
arg3, consumed = decint(byts)
|
|
||||||
byts = byts[consumed:]
|
|
||||||
fsi = arg3 >> 4
|
|
||||||
flags = arg3 & 0b1111
|
|
||||||
ans.append('First section index (fvwi): %d'%fsi)
|
|
||||||
psi = self.get_index(fsi)
|
|
||||||
ans.append('Flags (flag: always 0?): %d'%flags)
|
|
||||||
if flags == 4:
|
|
||||||
ans.append('Number of articles in this section: %d'%byts[0])
|
|
||||||
byts = byts[1:]
|
|
||||||
elif flags == 0:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
raise ValueError('Unknown flags value: %d'%flags)
|
|
||||||
|
|
||||||
|
|
||||||
if byts:
|
|
||||||
byts = tbs_type_6(byts, psi=psi,
|
|
||||||
msg=('First article of ending section, relative to its'
|
|
||||||
' parent\'s index'),
|
|
||||||
fmsg=('->Offset from start of record to beginning of'
|
|
||||||
' last starting section'))
|
|
||||||
while byts:
|
while byts:
|
||||||
# We have a transition not just an opening first section
|
ai, extra, consumed = decode_tbs(byts)
|
||||||
psi = self.get_index(psi.index+1)
|
|
||||||
arg, consumed = decint(byts)
|
|
||||||
off = arg >> 4
|
|
||||||
byts = byts[consumed:]
|
byts = byts[consumed:]
|
||||||
flags = arg & 0b1111
|
if extra.get(0b0010, None) is not None:
|
||||||
ans.append('Last article of ending section w.r.t. starting'
|
raise ValueError('Dont know how to interpret flag 0b0010'
|
||||||
' section offset (fvwi): %d [%d absolute]'%(off,
|
' while reading section transitions')
|
||||||
psi.index+off))
|
if extra.get(0b1000, None) is not None:
|
||||||
ans.append('Flags (always 8?): %d'%flags)
|
if len(extra) > 1:
|
||||||
byts = tbs_type_6(byts, psi=psi)
|
raise ValueError('Dont know how to interpret flags'
|
||||||
if byts:
|
' %r while reading section transitions'%extra)
|
||||||
# Ended with flag 1,and not EOF, which means there's
|
nsi = self.get_index(psi.index+1)
|
||||||
# another section transition in this record
|
ans.append('Last article in this record of section %d'
|
||||||
arg, consumed = decint(byts)
|
' (relative to next section index [%d]): '
|
||||||
byts = byts[consumed:]
|
'%d [%d absolute index]'%(psi.index, nsi.index, ai,
|
||||||
ans.append('->Offset from start of record to beginning of '
|
ai+nsi.index))
|
||||||
'last starting section: %d'%(arg))
|
psi = nsi
|
||||||
|
continue
|
||||||
|
|
||||||
|
ans.append('First article in this record of section %d'
|
||||||
|
' (relative to its parent section): '
|
||||||
|
'%d [%d absolute index]'%(psi.index, ai, ai+psi.index))
|
||||||
|
|
||||||
|
num = extra.get(0b0100, None)
|
||||||
|
if num is None:
|
||||||
|
msg = ('The section %d has at most one article'
|
||||||
|
' in this record')%psi.index
|
||||||
else:
|
else:
|
||||||
break
|
msg = ('Number of articles in this record of '
|
||||||
|
'section %d: %d')%(psi.index, num)
|
||||||
|
ans.append(msg)
|
||||||
|
|
||||||
# }}}
|
offset = extra.get(0b0001, None)
|
||||||
|
if offset is not None:
|
||||||
|
if offset == 0:
|
||||||
|
ans.append('This record is spanned by the article:'
|
||||||
|
'%d'%(ai+psi.index))
|
||||||
|
else:
|
||||||
|
ans.append('->Offset to start of next section (%d) from start'
|
||||||
|
' of record: %d [%d absolute offset]'%(psi.index+1,
|
||||||
|
offset, offset+record_offset))
|
||||||
|
return byts
|
||||||
|
# }}}
|
||||||
|
|
||||||
elif tbs_type == 7: # {{{
|
def read_starting_section(byts): # {{{
|
||||||
# This occurs for records that have no section nodes and
|
orig = byts
|
||||||
# whose parent section's index == 1
|
si, extra, consumed = decode_tbs(byts)
|
||||||
ans.append('Unknown (maybe vwi?): %r'%bytes(byts[:2]))
|
|
||||||
byts = byts[2:]
|
|
||||||
arg, consumed = decint(byts)
|
|
||||||
byts = byts[consumed:]
|
byts = byts[consumed:]
|
||||||
ai = arg >> 4
|
if len(extra) > 1 or 0b0010 in extra or 0b1000 in extra:
|
||||||
flags = arg & 0b1111
|
raise ValueError('Dont know how to interpret flags %r'
|
||||||
ans.append('Article at start of record (fvwi): %d'%ai)
|
' when reading starting section'%extra)
|
||||||
if flags == 4:
|
si = self.get_index(si)
|
||||||
num = byts[0]
|
ans.append('The section at the start of this record is:'
|
||||||
byts = byts[1:]
|
' %d'%si.index)
|
||||||
ans.append('Number of articles in record (byte): %d'%num)
|
if 0b0100 in extra:
|
||||||
elif flags == 0:
|
num = extra[0b0100]
|
||||||
pass
|
ans.append('The number of articles from the section %d'
|
||||||
elif flags == 1:
|
' in this record: %d'%(si.index, num))
|
||||||
arg, consumed = decint(byts)
|
elif 0b0001 in extra:
|
||||||
byts = byts[consumed:]
|
eof = extra[0b0001]
|
||||||
ans.append('EOF (vwi: should be 0): %d'%arg)
|
if eof != 0:
|
||||||
else:
|
raise ValueError('Unknown eof value %s when reading'
|
||||||
raise ValueError('Unknown flags value: %d'%flags)
|
' starting section. All bytes: %r'%(eof, orig))
|
||||||
|
ans.append('This record is spanned by an article from'
|
||||||
|
' the section: %d'%si.index)
|
||||||
|
return si, byts
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
elif tbs_type == 6: # {{{
|
if tbs_type & 0b0100:
|
||||||
# This is used for records spanned by an article whose parent
|
# Starting section is the first section
|
||||||
# section's index == 1 or for the opening record if it contains the
|
ssi = self.get_index(1)
|
||||||
# periodical start, section 1 start and at least one article. The
|
else:
|
||||||
# two cases are distinguished by the flags on the article index
|
ssi, byts = read_starting_section(byts)
|
||||||
# vwi.
|
|
||||||
unk = byts[0]
|
|
||||||
byts = byts[1:]
|
|
||||||
ans.append('Unknown (byte: always 2?): %d'%unk)
|
|
||||||
byts = tbs_type_6(byts)
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
elif tbs_type == 2: # {{{
|
byts = read_section_transitions(byts, ssi)
|
||||||
# This occurs for records with no section nodes and whose parent
|
|
||||||
# section's index != 1 (undefined (records before the first
|
|
||||||
# section) or > 1)
|
|
||||||
# This is also used for records that are spanned by an article
|
|
||||||
# whose parent section index > 1. In this case the flags of the
|
|
||||||
# vwi referring to the article at the start
|
|
||||||
# of the record are set to 1 instead of 4.
|
|
||||||
arg, consumed = decint(byts)
|
|
||||||
byts = byts[consumed:]
|
|
||||||
flags = (arg & 0b1111)
|
|
||||||
psi = (arg >> 4)
|
|
||||||
ans.append('Parent section index (fvwi): %d'%psi)
|
|
||||||
psi = self.get_index(psi)
|
|
||||||
ans.append('Flags: %d'%flags)
|
|
||||||
if flags == 1:
|
|
||||||
arg, consumed = decint(byts)
|
|
||||||
byts = byts[consumed:]
|
|
||||||
ans.append('Unknown (vwi?: always 0?): %d'%arg)
|
|
||||||
byts = tbs_type_6(byts, psi=psi)
|
|
||||||
elif flags == 0:
|
|
||||||
byts = tbs_type_6(byts, psi=psi)
|
|
||||||
else:
|
|
||||||
raise ValueError('Unkown flags: %d'%flags)
|
|
||||||
# }}}
|
|
||||||
|
|
||||||
return byts, ans
|
return byts, ans
|
||||||
|
|
||||||
|
@ -3,6 +3,20 @@ Reverse engineering the trailing byte sequences for hierarchical periodicals
|
|||||||
|
|
||||||
In the following, *vwi* means variable width integer and *fvwi* means a vwi whose lowest four bits are used as a flag. All the following information/inferences are from examining the output of kindlegen on a sample periodical. Given the general level of Amazon's incompetence, there are no guarantees that this information is the *best/most complete* way to do TBS indexing.
|
In the following, *vwi* means variable width integer and *fvwi* means a vwi whose lowest four bits are used as a flag. All the following information/inferences are from examining the output of kindlegen on a sample periodical. Given the general level of Amazon's incompetence, there are no guarantees that this information is the *best/most complete* way to do TBS indexing.
|
||||||
|
|
||||||
|
Sequence encoding:
|
||||||
|
|
||||||
|
0b1000 : Continuation bit
|
||||||
|
|
||||||
|
First sequences:
|
||||||
|
0b0010 : 80
|
||||||
|
0b0011 : 80 80
|
||||||
|
0b0110 : 80 2
|
||||||
|
0b0111 : 80 2 80
|
||||||
|
|
||||||
|
Other sequences:
|
||||||
|
0b0101 : 4 1a
|
||||||
|
0b0001 : c b1
|
||||||
|
|
||||||
Opening record
|
Opening record
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
@ -52,10 +66,60 @@ The text record that contains the opening node for the periodical (depth=0 node
|
|||||||
|
|
||||||
If there was only a single article, instead of 2, then the last two bytes would be: c0, i.e. there would be no byte giving the number of articles in the record.
|
If there was only a single article, instead of 2, then the last two bytes would be: c0, i.e. there would be no byte giving the number of articles in the record.
|
||||||
|
|
||||||
|
Starting record with two section transitions::
|
||||||
|
|
||||||
|
Record #1: Starts at: 0 Ends at: 4095
|
||||||
|
Contains: 7 index entries (0 ends, 4 complete, 3 starts)
|
||||||
|
TBS bytes: 86 80 2 c0 b8 c4 3
|
||||||
|
Complete:
|
||||||
|
Index Entry: 1 (Parent index: 0, Depth: 1, Offset: 564, Size: 375) [Ars Technica]
|
||||||
|
Index Entry: 5 (Parent index: 1, Depth: 2, Offset: 572, Size: 367) [Week in gaming: 3DS review, Crysis 2, George Hotz]
|
||||||
|
Index Entry: 6 (Parent index: 2, Depth: 2, Offset: 947, Size: 1014) [Max and the Magic Marker for iPad: Review]
|
||||||
|
Index Entry: 7 (Parent index: 2, Depth: 2, Offset: 1961, Size: 1077) [iPad 2 steers itself into home console gaming territory with Real Racing 2 HD]
|
||||||
|
Starts:
|
||||||
|
Index Entry: 0 (Parent index: -1, Depth: 0, Offset: 215, Size: 35372) [j_x's Google reader]
|
||||||
|
Index Entry: 2 (Parent index: 0, Depth: 1, Offset: 939, Size: 10368) [Neowin.net]
|
||||||
|
Index Entry: 8 (Parent index: 2, Depth: 2, Offset: 3038, Size: 1082) [Microsoft's Joe Belfiore still working on upcoming Zune hardware]
|
||||||
|
TBS Type: 110 (6)
|
||||||
|
Outer Index entry: 0
|
||||||
|
Unknown (vwi: always 0?): 0
|
||||||
|
Unknown (byte: always 2?): 2
|
||||||
|
Article index at start of record or first article index, relative to parent section (fvwi): 4 [5 absolute]
|
||||||
|
Remaining bytes: b8 c4 3
|
||||||
|
|
||||||
|
Starting record with three section transitions::
|
||||||
|
|
||||||
|
Record #1: Starts at: 0 Ends at: 4095
|
||||||
|
Contains: 10 index entries (0 ends, 7 complete, 3 starts)
|
||||||
|
TBS bytes: 86 80 2 c0 b8 c0 b8 c4 4
|
||||||
|
Complete:
|
||||||
|
Index Entry: 1 (Parent index: 0, Depth: 1, Offset: 564, Size: 375) [Ars Technica]
|
||||||
|
Index Entry: 2 (Parent index: 0, Depth: 1, Offset: 939, Size: 316) [Neowin.net]
|
||||||
|
Index Entry: 5 (Parent index: 1, Depth: 2, Offset: 572, Size: 367) [Week in gaming: 3DS review, Crysis 2, George Hotz]
|
||||||
|
Index Entry: 6 (Parent index: 2, Depth: 2, Offset: 947, Size: 308) [Max and the Magic Marker for iPad: Review]
|
||||||
|
Index Entry: 7 (Parent index: 3, Depth: 2, Offset: 1263, Size: 760) [OSnews Asks on Interrupts: The Results]
|
||||||
|
Index Entry: 8 (Parent index: 3, Depth: 2, Offset: 2023, Size: 693) [Apple Ditches SAMBA in Favour of Homegrown Replacement]
|
||||||
|
Index Entry: 9 (Parent index: 3, Depth: 2, Offset: 2716, Size: 747) [ITC: Apple's Mobile Products Do Not Violate Nokia Patents]
|
||||||
|
Starts:
|
||||||
|
Index Entry: 0 (Parent index: -1, Depth: 0, Offset: 215, Size: 25320) [j_x's Google reader]
|
||||||
|
Index Entry: 3 (Parent index: 0, Depth: 1, Offset: 1255, Size: 6829) [OSNews]
|
||||||
|
Index Entry: 10 (Parent index: 3, Depth: 2, Offset: 3463, Size: 666) [Transparent Monitor Embedded in Window Glass]
|
||||||
|
TBS Type: 110 (6)
|
||||||
|
Outer Index entry: 0
|
||||||
|
Unknown (vwi: always 0?): 0
|
||||||
|
Unknown (byte: always 2?): 2
|
||||||
|
Article index at start of record or first article index, relative to parent section (fvwi): 4 [5 absolute]
|
||||||
|
Remaining bytes: b8 c0 b8 c4 4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Records with no nodes
|
Records with no nodes
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
|
subtype = 010
|
||||||
|
|
||||||
These records are spanned by a single article. They are of two types:
|
These records are spanned by a single article. They are of two types:
|
||||||
|
|
||||||
1. If the parent section index is 1, TBS type of 6, like this::
|
1. If the parent section index is 1, TBS type of 6, like this::
|
||||||
@ -247,7 +311,7 @@ In such a record there is a transition from one section to the next. As such the
|
|||||||
Last article of ending section w.r.t. starting section offset (fvwi): 12 [15 absolute]
|
Last article of ending section w.r.t. starting section offset (fvwi): 12 [15 absolute]
|
||||||
Flags (always 8?): 8
|
Flags (always 8?): 8
|
||||||
Article index at start of record or first article index, relative to parent section (fvwi): 13 [16 absolute]
|
Article index at start of record or first article index, relative to parent section (fvwi): 13 [16 absolute]
|
||||||
Number of article nodes in the record (byte): 4
|
Number of article nodes in the record belonging ot the last section (byte): 4
|
||||||
|
|
||||||
|
|
||||||
Ending record
|
Ending record
|
||||||
@ -274,3 +338,26 @@ Logically, ending records must have at least one article ending, one section end
|
|||||||
|
|
||||||
If the record had only a single article end, the last two bytes would be replaced with: f0
|
If the record had only a single article end, the last two bytes would be replaced with: f0
|
||||||
|
|
||||||
|
If the last record has multiple section transitions, it is of type 6 and looks like::
|
||||||
|
|
||||||
|
Record #9: Starts at: 32768 Ends at: 34953
|
||||||
|
Contains: 9 index entries (3 ends, 6 complete, 0 starts)
|
||||||
|
TBS bytes: 86 80 2 1 d0 1 c8 1 d0 1 c8 1 d0 1 c8 1 d0
|
||||||
|
Ends:
|
||||||
|
Index Entry: 0 (Parent index: -1, Depth: 0, Offset: 215, Size: 34739) [j_x's Google reader]
|
||||||
|
Index Entry: 1 (Parent index: 0, Depth: 1, Offset: 7758, Size: 26279) [Ars Technica]
|
||||||
|
Index Entry: 14 (Parent index: 1, Depth: 2, Offset: 31929, Size: 2108) [Trademarked keyword sales may soon be restricted in Europe]
|
||||||
|
Complete:
|
||||||
|
Index Entry: 2 (Parent index: 0, Depth: 1, Offset: 34037, Size: 316) [Neowin.net]
|
||||||
|
Index Entry: 3 (Parent index: 0, Depth: 1, Offset: 34353, Size: 282) [OSNews]
|
||||||
|
Index Entry: 4 (Parent index: 0, Depth: 1, Offset: 34635, Size: 319) [Slashdot]
|
||||||
|
Index Entry: 15 (Parent index: 2, Depth: 2, Offset: 34045, Size: 308) [Max and the Magic Marker for iPad: Review]
|
||||||
|
Index Entry: 16 (Parent index: 3, Depth: 2, Offset: 34361, Size: 274) [OSnews Asks on Interrupts: The Results]
|
||||||
|
Index Entry: 17 (Parent index: 4, Depth: 2, Offset: 34643, Size: 311) [Leonard Nimoy Turns 80]
|
||||||
|
TBS Type: 110 (6)
|
||||||
|
Outer Index entry: 0
|
||||||
|
Unknown (vwi: always 0?): 0
|
||||||
|
Unknown (byte: always 2?): 2
|
||||||
|
Article index at start of record or first article index, relative to parent section (fvwi): 13 [14 absolute]
|
||||||
|
Remaining bytes: 1 c8 1 d0 1 c8 1 d0 1 c8 1 d0
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ import struct
|
|||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
from calibre.utils.magick.draw import Image, save_cover_data_to, thumbnail
|
from calibre.utils.magick.draw import Image, save_cover_data_to, thumbnail
|
||||||
|
from calibre.ebooks import normalize
|
||||||
|
|
||||||
IMAGE_MAX_SIZE = 10 * 1024 * 1024
|
IMAGE_MAX_SIZE = 10 * 1024 * 1024
|
||||||
|
|
||||||
@ -39,7 +40,10 @@ def encode_number_as_hex(num):
|
|||||||
The bytes that follow are simply the hexadecimal representation of the
|
The bytes that follow are simply the hexadecimal representation of the
|
||||||
number.
|
number.
|
||||||
'''
|
'''
|
||||||
num = bytes(hex(num)[2:])
|
num = bytes(hex(num)[2:].upper())
|
||||||
|
nlen = len(num)
|
||||||
|
if nlen % 2 != 0:
|
||||||
|
num = b'0'+num
|
||||||
ans = bytearray(num)
|
ans = bytearray(num)
|
||||||
ans.insert(0, len(num))
|
ans.insert(0, len(num))
|
||||||
return bytes(ans)
|
return bytes(ans)
|
||||||
@ -65,11 +69,14 @@ def encint(value, forward=True):
|
|||||||
If forward is True the bytes returned are suitable for prepending to the
|
If forward is True the bytes returned are suitable for prepending to the
|
||||||
output buffer, otherwise they must be append to the output buffer.
|
output buffer, otherwise they must be append to the output buffer.
|
||||||
'''
|
'''
|
||||||
|
if value < 0:
|
||||||
|
raise ValueError('Cannot encode negative numbers as vwi')
|
||||||
# Encode vwi
|
# Encode vwi
|
||||||
byts = bytearray()
|
byts = bytearray()
|
||||||
while True:
|
while True:
|
||||||
b = value & 0b01111111
|
b = value & 0b01111111
|
||||||
value >>= 7 # shift value to the right by 7 bits
|
value >>= 7 # shift value to the right by 7 bits
|
||||||
|
|
||||||
byts.append(b)
|
byts.append(b)
|
||||||
if value == 0:
|
if value == 0:
|
||||||
break
|
break
|
||||||
@ -184,7 +191,7 @@ def encode_trailing_data(raw):
|
|||||||
<data><size>
|
<data><size>
|
||||||
|
|
||||||
where size is a backwards encoded vwi whose value is the length of the
|
where size is a backwards encoded vwi whose value is the length of the
|
||||||
entire return bytestring.
|
entire returned bytestring. data is the bytestring passed in as raw.
|
||||||
|
|
||||||
This is the encoding used for trailing data entries at the end of text
|
This is the encoding used for trailing data entries at the end of text
|
||||||
records. See get_trailing_data() for details.
|
records. See get_trailing_data() for details.
|
||||||
@ -197,3 +204,131 @@ def encode_trailing_data(raw):
|
|||||||
lsize += 1
|
lsize += 1
|
||||||
return raw + encoded
|
return raw + encoded
|
||||||
|
|
||||||
|
def encode_fvwi(val, flags, flag_size=4):
|
||||||
|
'''
|
||||||
|
Encode the value val and the flag_size bits from flags as a fvwi. This encoding is
|
||||||
|
used in the trailing byte sequences for indexing. Returns encoded
|
||||||
|
bytestring.
|
||||||
|
'''
|
||||||
|
ans = val << flag_size
|
||||||
|
for i in xrange(flag_size):
|
||||||
|
ans |= (flags & (1 << i))
|
||||||
|
return encint(ans)
|
||||||
|
|
||||||
|
|
||||||
|
def decode_fvwi(byts, flag_size=4):
|
||||||
|
'''
|
||||||
|
Decode encoded fvwi. Returns number, flags, consumed
|
||||||
|
'''
|
||||||
|
arg, consumed = decint(bytes(byts))
|
||||||
|
val = arg >> flag_size
|
||||||
|
flags = 0
|
||||||
|
for i in xrange(flag_size):
|
||||||
|
flags |= (arg & (1 << i))
|
||||||
|
return val, flags, consumed
|
||||||
|
|
||||||
|
|
||||||
|
def decode_tbs(byts, flag_size=4):
|
||||||
|
'''
|
||||||
|
Trailing byte sequences for indexing consists of series of fvwi numbers.
|
||||||
|
This function reads the fvwi number and its associated flags. It them uses
|
||||||
|
the flags to read any more numbers that belong to the series. The flags are
|
||||||
|
the lowest 4 bits of the vwi (see the encode_fvwi function above).
|
||||||
|
|
||||||
|
Returns the fvwi number, a dictionary mapping flags bits to the associated
|
||||||
|
data and the number of bytes consumed.
|
||||||
|
'''
|
||||||
|
byts = bytes(byts)
|
||||||
|
val, flags, consumed = decode_fvwi(byts, flag_size=flag_size)
|
||||||
|
extra = {}
|
||||||
|
byts = byts[consumed:]
|
||||||
|
if flags & 0b1000 and flag_size > 3:
|
||||||
|
extra[0b1000] = True
|
||||||
|
if flags & 0b0010:
|
||||||
|
x, consumed2 = decint(byts)
|
||||||
|
byts = byts[consumed2:]
|
||||||
|
extra[0b0010] = x
|
||||||
|
consumed += consumed2
|
||||||
|
if flags & 0b0100:
|
||||||
|
extra[0b0100] = ord(byts[0])
|
||||||
|
byts = byts[1:]
|
||||||
|
consumed += 1
|
||||||
|
if flags & 0b0001:
|
||||||
|
x, consumed2 = decint(byts)
|
||||||
|
byts = byts[consumed2:]
|
||||||
|
extra[0b0001] = x
|
||||||
|
consumed += consumed2
|
||||||
|
return val, extra, consumed
|
||||||
|
|
||||||
|
def encode_tbs(val, extra, flag_size=4):
|
||||||
|
'''
|
||||||
|
Encode the number val and the extra data in the extra dict as an fvwi. See
|
||||||
|
decode_tbs above.
|
||||||
|
'''
|
||||||
|
flags = 0
|
||||||
|
for flag in extra:
|
||||||
|
flags |= flag
|
||||||
|
ans = encode_fvwi(val, flags, flag_size=flag_size)
|
||||||
|
|
||||||
|
if 0b0010 in extra:
|
||||||
|
ans += encint(extra[0b0010])
|
||||||
|
if 0b0100 in extra:
|
||||||
|
ans += bytes(bytearray([extra[0b0100]]))
|
||||||
|
if 0b0001 in extra:
|
||||||
|
ans += encint(extra[0b0001])
|
||||||
|
return ans
|
||||||
|
|
||||||
|
def utf8_text(text):
|
||||||
|
'''
|
||||||
|
Convert a possibly null string to utf-8 bytes, guaranteeing to return a non
|
||||||
|
empty, normalized bytestring.
|
||||||
|
'''
|
||||||
|
if text and text.strip():
|
||||||
|
text = text.strip()
|
||||||
|
if not isinstance(text, unicode):
|
||||||
|
text = text.decode('utf-8', 'replace')
|
||||||
|
text = normalize(text).encode('utf-8')
|
||||||
|
else:
|
||||||
|
text = _('Unknown').encode('utf-8')
|
||||||
|
return text
|
||||||
|
|
||||||
|
def align_block(raw, multiple=4, pad=b'\0'):
|
||||||
|
'''
|
||||||
|
Return raw with enough pad bytes append to ensure its length is a multiple
|
||||||
|
of 4.
|
||||||
|
'''
|
||||||
|
extra = len(raw) % multiple
|
||||||
|
if extra == 0: return raw
|
||||||
|
return raw + pad*(multiple - extra)
|
||||||
|
|
||||||
|
|
||||||
|
def detect_periodical(toc, log=None):
|
||||||
|
'''
|
||||||
|
Detect if the TOC object toc contains a periodical that conforms to the
|
||||||
|
structure required by kindlegen to generate a periodical.
|
||||||
|
'''
|
||||||
|
for node in toc.iterdescendants():
|
||||||
|
if node.depth() == 1 and node.klass != 'article':
|
||||||
|
if log is not None:
|
||||||
|
log.debug(
|
||||||
|
'Not a periodical: Deepest node does not have '
|
||||||
|
'class="article"')
|
||||||
|
return False
|
||||||
|
if node.depth() == 2 and node.klass != 'section':
|
||||||
|
if log is not None:
|
||||||
|
log.debug(
|
||||||
|
'Not a periodical: Second deepest node does not have'
|
||||||
|
' class="section"')
|
||||||
|
return False
|
||||||
|
if node.depth() == 3 and node.klass != 'periodical':
|
||||||
|
if log is not None:
|
||||||
|
log.debug('Not a periodical: Third deepest node'
|
||||||
|
' does not have class="periodical"')
|
||||||
|
return False
|
||||||
|
if node.depth() > 3:
|
||||||
|
if log is not None:
|
||||||
|
log.debug('Not a periodical: Has nodes of depth > 3')
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
from __future__ import (unicode_literals, division, absolute_import,
|
from __future__ import (unicode_literals, division, absolute_import,
|
||||||
print_function)
|
print_function)
|
||||||
|
from future_builtins import filter
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
@ -9,33 +10,11 @@ __docformat__ = 'restructuredtext en'
|
|||||||
|
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from cStringIO import StringIO
|
from cStringIO import StringIO
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict, defaultdict
|
||||||
|
|
||||||
from calibre.ebooks import normalize
|
from calibre.ebooks.mobi.writer2 import RECORD_SIZE
|
||||||
from calibre.ebooks.mobi.utils import encint
|
from calibre.ebooks.mobi.utils import (encint, encode_number_as_hex,
|
||||||
|
encode_tbs, align_block, utf8_text, detect_periodical)
|
||||||
def utf8_text(text):
|
|
||||||
'''
|
|
||||||
Convert a possibly null string to utf-8 bytes, guaranteeing to return a non
|
|
||||||
empty, normalized bytestring.
|
|
||||||
'''
|
|
||||||
if text and text.strip():
|
|
||||||
text = text.strip()
|
|
||||||
if not isinstance(text, unicode):
|
|
||||||
text = text.decode('utf-8', 'replace')
|
|
||||||
text = normalize(text).encode('utf-8')
|
|
||||||
else:
|
|
||||||
text = _('Unknown').encode('utf-8')
|
|
||||||
return text
|
|
||||||
|
|
||||||
def align_block(raw, multiple=4, pad=b'\0'):
|
|
||||||
'''
|
|
||||||
Return raw with enough pad bytes append to ensure its length is a multiple
|
|
||||||
of 4.
|
|
||||||
'''
|
|
||||||
extra = len(raw) % multiple
|
|
||||||
if extra == 0: return raw
|
|
||||||
return raw + pad*(multiple - extra)
|
|
||||||
|
|
||||||
|
|
||||||
class CNCX(object): # {{{
|
class CNCX(object): # {{{
|
||||||
@ -48,22 +27,15 @@ class CNCX(object): # {{{
|
|||||||
|
|
||||||
MAX_STRING_LENGTH = 500
|
MAX_STRING_LENGTH = 500
|
||||||
|
|
||||||
def __init__(self, toc, opts):
|
def __init__(self, toc, is_periodical):
|
||||||
self.strings = OrderedDict()
|
self.strings = OrderedDict()
|
||||||
|
|
||||||
for item in toc:
|
for item in toc.iterdescendants(breadth_first=True):
|
||||||
if item is self.toc: continue
|
self.strings[item.title] = 0
|
||||||
label = item.title
|
if is_periodical:
|
||||||
klass = item.klass
|
self.strings[item.klass] = 0
|
||||||
if opts.mobi_periodical:
|
|
||||||
if item.description:
|
|
||||||
self.strings[item.description] = 0
|
|
||||||
if item.author:
|
|
||||||
self.string[item.author] = 0
|
|
||||||
self.strings[label] = self.strings[klass] = 0
|
|
||||||
|
|
||||||
self.records = []
|
self.records = []
|
||||||
|
|
||||||
offset = 0
|
offset = 0
|
||||||
buf = StringIO()
|
buf = StringIO()
|
||||||
for key in tuple(self.strings.iterkeys()):
|
for key in tuple(self.strings.iterkeys()):
|
||||||
@ -79,38 +51,677 @@ class CNCX(object): # {{{
|
|||||||
self.records.append(buf.getvalue())
|
self.records.append(buf.getvalue())
|
||||||
buf.truncate(0)
|
buf.truncate(0)
|
||||||
offset = len(self.records) * 0x10000
|
offset = len(self.records) * 0x10000
|
||||||
|
buf.write(raw)
|
||||||
self.strings[key] = offset
|
self.strings[key] = offset
|
||||||
offset += len(raw)
|
offset += len(raw)
|
||||||
|
|
||||||
buf.write(b'\0') # CNCX must end with zero byte
|
|
||||||
self.records.append(align_block(buf.getvalue()))
|
self.records.append(align_block(buf.getvalue()))
|
||||||
|
|
||||||
def __getitem__(self, string):
|
def __getitem__(self, string):
|
||||||
return self.strings[string]
|
return self.strings[string]
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
class Indexer(object):
|
class IndexEntry(object): # {{{
|
||||||
|
|
||||||
def __init__(self, serializer, number_of_text_records, opts, oeb):
|
TAG_VALUES = {
|
||||||
|
'offset': 1,
|
||||||
|
'size': 2,
|
||||||
|
'label_offset': 3,
|
||||||
|
'depth': 4,
|
||||||
|
'class_offset': 5,
|
||||||
|
'parent_index': 21,
|
||||||
|
'first_child_index': 22,
|
||||||
|
'last_child_index': 23,
|
||||||
|
}
|
||||||
|
RTAG_MAP = {v:k for k, v in TAG_VALUES.iteritems()}
|
||||||
|
|
||||||
|
BITMASKS = [1, 2, 3, 4, 5, 21, 22, 23,]
|
||||||
|
|
||||||
|
def __init__(self, offset, label_offset, depth=0, class_offset=None):
|
||||||
|
self.offset, self.label_offset = offset, label_offset
|
||||||
|
self.depth, self.class_offset = depth, class_offset
|
||||||
|
|
||||||
|
self.length = 0
|
||||||
|
self.index = 0
|
||||||
|
|
||||||
|
self.parent_index = None
|
||||||
|
self.first_child_index = None
|
||||||
|
self.last_child_index = None
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return ('IndexEntry(offset=%r, depth=%r, length=%r, index=%r,'
|
||||||
|
' parent_index=%r)')%(self.offset, self.depth, self.length,
|
||||||
|
self.index, self.parent_index)
|
||||||
|
|
||||||
|
@dynamic_property
|
||||||
|
def size(self):
|
||||||
|
def fget(self): return self.length
|
||||||
|
def fset(self, val): self.length = val
|
||||||
|
return property(fget=fget, fset=fset, doc='Alias for length')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tagx_block(cls, for_periodical=True):
|
||||||
|
buf = bytearray()
|
||||||
|
|
||||||
|
def add_tag(tag, num_values=1):
|
||||||
|
buf.append(tag)
|
||||||
|
buf.append(num_values)
|
||||||
|
# bitmask
|
||||||
|
buf.append(1 << (cls.BITMASKS.index(tag)))
|
||||||
|
# eof
|
||||||
|
buf.append(0)
|
||||||
|
|
||||||
|
for tag in xrange(1, 5):
|
||||||
|
add_tag(tag)
|
||||||
|
|
||||||
|
if for_periodical:
|
||||||
|
for tag in (5, 21, 22, 23):
|
||||||
|
add_tag(tag)
|
||||||
|
|
||||||
|
# End of TAGX record
|
||||||
|
for i in xrange(3): buf.append(0)
|
||||||
|
buf.append(1)
|
||||||
|
|
||||||
|
header = b'TAGX'
|
||||||
|
header += pack(b'>I', 12+len(buf)) # table length
|
||||||
|
header += pack(b'>I', 1) # control byte count
|
||||||
|
|
||||||
|
return header + bytes(buf)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def next_offset(self):
|
||||||
|
return self.offset + self.length
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tag_nums(self):
|
||||||
|
for i in range(1, 5):
|
||||||
|
yield i
|
||||||
|
for attr in ('class_offset', 'parent_index', 'first_child_index',
|
||||||
|
'last_child_index'):
|
||||||
|
if getattr(self, attr) is not None:
|
||||||
|
yield self.TAG_VALUES[attr]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def entry_type(self):
|
||||||
|
ans = 0
|
||||||
|
for tag in self.tag_nums:
|
||||||
|
ans |= (1 << self.BITMASKS.index(tag)) # 1 << x == 2**x
|
||||||
|
return ans
|
||||||
|
|
||||||
|
@property
|
||||||
|
def bytestring(self):
|
||||||
|
buf = StringIO()
|
||||||
|
buf.write(encode_number_as_hex(self.index))
|
||||||
|
et = self.entry_type
|
||||||
|
buf.write(bytes(bytearray([et])))
|
||||||
|
|
||||||
|
for tag in self.tag_nums:
|
||||||
|
attr = self.RTAG_MAP[tag]
|
||||||
|
val = getattr(self, attr)
|
||||||
|
buf.write(encint(val))
|
||||||
|
|
||||||
|
ans = buf.getvalue()
|
||||||
|
return ans
|
||||||
|
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
class TBS(object): # {{{
|
||||||
|
|
||||||
|
'''
|
||||||
|
Take the list of index nodes starting/ending on a record and calculate the
|
||||||
|
trailing byte sequence for the record.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, data, is_periodical, first=False, section_map={},
|
||||||
|
after_first=False):
|
||||||
|
self.section_map = section_map
|
||||||
|
#import pprint
|
||||||
|
#pprint.pprint(data)
|
||||||
|
#print()
|
||||||
|
if is_periodical:
|
||||||
|
# The starting bytes.
|
||||||
|
# The value is zero which I think indicates the periodical
|
||||||
|
# index entry. The values for the various flags seem to be
|
||||||
|
# unused. If the 0b100 is present, it means that the record
|
||||||
|
# deals with section 1 (or is the final record with section
|
||||||
|
# transitions).
|
||||||
|
self.type_010 = encode_tbs(0, {0b010: 0}, flag_size=3)
|
||||||
|
self.type_011 = encode_tbs(0, {0b010: 0, 0b001: 0},
|
||||||
|
flag_size=3)
|
||||||
|
self.type_110 = encode_tbs(0, {0b100: 2, 0b010: 0},
|
||||||
|
flag_size=3)
|
||||||
|
self.type_111 = encode_tbs(0, {0b100: 2, 0b010: 0, 0b001:
|
||||||
|
0}, flag_size=3)
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
byts = b''
|
||||||
|
if after_first:
|
||||||
|
# This can happen if a record contains only text between
|
||||||
|
# the periodical start and the first section
|
||||||
|
byts = self.type_011
|
||||||
|
self.bytestring = byts
|
||||||
|
else:
|
||||||
|
depth_map = defaultdict(list)
|
||||||
|
for x in ('starts', 'ends', 'completes'):
|
||||||
|
for idx in data[x]:
|
||||||
|
depth_map[idx.depth].append(idx)
|
||||||
|
for l in depth_map.itervalues():
|
||||||
|
l.sort(key=lambda x:x.offset)
|
||||||
|
self.periodical_tbs(data, first, depth_map)
|
||||||
|
else:
|
||||||
|
if not data:
|
||||||
|
self.bytestring = b''
|
||||||
|
else:
|
||||||
|
self.book_tbs(data, first)
|
||||||
|
|
||||||
|
def periodical_tbs(self, data, first, depth_map):
|
||||||
|
buf = StringIO()
|
||||||
|
|
||||||
|
has_section_start = (depth_map[1] and
|
||||||
|
set(depth_map[1]).intersection(set(data['starts'])))
|
||||||
|
spanner = data['spans']
|
||||||
|
parent_section_index = -1
|
||||||
|
|
||||||
|
if depth_map[0]:
|
||||||
|
# We have a terminal record
|
||||||
|
|
||||||
|
# Find the first non periodical node
|
||||||
|
first_node = None
|
||||||
|
for nodes in (depth_map[1], depth_map[2]):
|
||||||
|
for node in nodes:
|
||||||
|
if (first_node is None or (node.offset, node.depth) <
|
||||||
|
(first_node.offset, first_node.depth)):
|
||||||
|
first_node = node
|
||||||
|
|
||||||
|
typ = (self.type_110 if has_section_start else self.type_010)
|
||||||
|
|
||||||
|
# parent_section_index is needed for the last record
|
||||||
|
if first_node is not None and first_node.depth > 0:
|
||||||
|
parent_section_index = (first_node.index if first_node.depth
|
||||||
|
== 1 else first_node.parent_index)
|
||||||
|
else:
|
||||||
|
parent_section_index = max(self.section_map.iterkeys())
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Non terminal record
|
||||||
|
|
||||||
|
if spanner is not None:
|
||||||
|
# record is spanned by a single article
|
||||||
|
parent_section_index = spanner.parent_index
|
||||||
|
typ = (self.type_110 if parent_section_index == 1 else
|
||||||
|
self.type_010)
|
||||||
|
elif not depth_map[1]:
|
||||||
|
# has only article nodes, i.e. spanned by a section
|
||||||
|
parent_section_index = depth_map[2][0].parent_index
|
||||||
|
typ = (self.type_111 if parent_section_index == 1 else
|
||||||
|
self.type_010)
|
||||||
|
else:
|
||||||
|
# has section transitions
|
||||||
|
if depth_map[2]:
|
||||||
|
parent_section_index = depth_map[2][0].parent_index
|
||||||
|
else:
|
||||||
|
parent_section_index = depth_map[1][0].index
|
||||||
|
typ = self.type_011
|
||||||
|
|
||||||
|
buf.write(typ)
|
||||||
|
|
||||||
|
if typ not in (self.type_110, self.type_111) and parent_section_index > 0:
|
||||||
|
# Write starting section information
|
||||||
|
if spanner is None:
|
||||||
|
num_articles = len([a for a in depth_map[1] if a.parent_index
|
||||||
|
== parent_section_index])
|
||||||
|
extra = {}
|
||||||
|
if num_articles > 1:
|
||||||
|
extra = {0b0100: num_articles}
|
||||||
|
else:
|
||||||
|
extra = {0b0001: 0}
|
||||||
|
buf.write(encode_tbs(parent_section_index, extra))
|
||||||
|
|
||||||
|
if spanner is None:
|
||||||
|
articles = depth_map[2]
|
||||||
|
sections = set([self.section_map[a.parent_index] for a in
|
||||||
|
articles])
|
||||||
|
sections = sorted(sections, key=lambda x:x.offset)
|
||||||
|
section_map = {s:[a for a in articles if a.parent_index ==
|
||||||
|
s.index] for s in sections}
|
||||||
|
for i, section in enumerate(sections):
|
||||||
|
# All the articles in this record that belong to section
|
||||||
|
articles = section_map[section]
|
||||||
|
first_article = articles[0]
|
||||||
|
last_article = articles[-1]
|
||||||
|
num = len(articles)
|
||||||
|
|
||||||
|
try:
|
||||||
|
next_sec = sections[i+1]
|
||||||
|
except:
|
||||||
|
next_sec = None
|
||||||
|
|
||||||
|
extra = {}
|
||||||
|
if num > 1:
|
||||||
|
extra[0b0100] = num
|
||||||
|
if i == 0 and next_sec is not None:
|
||||||
|
# Write offset to next section from start of record
|
||||||
|
# For some reason kindlegen only writes this offset
|
||||||
|
# for the first section transition. Imitate it.
|
||||||
|
extra[0b0001] = next_sec.offset - data['offset']
|
||||||
|
|
||||||
|
buf.write(encode_tbs(first_article.index-section.index, extra))
|
||||||
|
|
||||||
|
if next_sec is not None:
|
||||||
|
buf.write(encode_tbs(last_article.index-next_sec.index,
|
||||||
|
{0b1000: 0}))
|
||||||
|
else:
|
||||||
|
buf.write(encode_tbs(spanner.index - parent_section_index,
|
||||||
|
{0b0001: 0}))
|
||||||
|
|
||||||
|
self.bytestring = buf.getvalue()
|
||||||
|
|
||||||
|
def book_tbs(self, data, first):
|
||||||
|
self.bytestring = b''
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
class Indexer(object): # {{{
|
||||||
|
|
||||||
|
def __init__(self, serializer, number_of_text_records,
|
||||||
|
size_of_last_text_record, opts, oeb):
|
||||||
self.serializer = serializer
|
self.serializer = serializer
|
||||||
self.number_of_text_records = number_of_text_records
|
self.number_of_text_records = number_of_text_records
|
||||||
|
self.text_size = (RECORD_SIZE * (self.number_of_text_records-1) +
|
||||||
|
size_of_last_text_record)
|
||||||
self.oeb = oeb
|
self.oeb = oeb
|
||||||
self.log = oeb.log
|
self.log = oeb.log
|
||||||
self.opts = opts
|
self.opts = opts
|
||||||
|
|
||||||
self.cncx = CNCX(oeb.toc, opts)
|
self.is_periodical = detect_periodical(self.oeb.toc, self.log)
|
||||||
|
self.log('Generating MOBI index for a %s'%('periodical' if
|
||||||
|
self.is_periodical else 'book'))
|
||||||
|
self.is_flat_periodical = False
|
||||||
|
if self.is_periodical:
|
||||||
|
periodical_node = iter(oeb.toc).next()
|
||||||
|
sections = tuple(periodical_node)
|
||||||
|
self.is_flat_periodical = len(sections) == 1
|
||||||
|
|
||||||
self.records = []
|
self.records = []
|
||||||
|
|
||||||
def create_header(self):
|
self.cncx = CNCX(oeb.toc, self.is_periodical)
|
||||||
buf = StringIO()
|
|
||||||
|
|
||||||
# Ident
|
if self.is_periodical:
|
||||||
|
self.indices = self.create_periodical_index()
|
||||||
|
else:
|
||||||
|
self.indices = self.create_book_index()
|
||||||
|
|
||||||
|
self.records.append(self.create_index_record())
|
||||||
|
self.records.insert(0, self.create_header())
|
||||||
|
self.records.extend(self.cncx.records)
|
||||||
|
|
||||||
|
self.calculate_trailing_byte_sequences()
|
||||||
|
|
||||||
|
def create_index_record(self): # {{{
|
||||||
|
header_length = 192
|
||||||
|
buf = StringIO()
|
||||||
|
indices = self.indices
|
||||||
|
|
||||||
|
# Write index entries
|
||||||
|
offsets = []
|
||||||
|
for i in indices:
|
||||||
|
offsets.append(buf.tell())
|
||||||
|
buf.write(i.bytestring)
|
||||||
|
index_block = align_block(buf.getvalue())
|
||||||
|
|
||||||
|
# Write offsets to index entries as an IDXT block
|
||||||
|
idxt_block = b'IDXT'
|
||||||
|
buf.truncate(0)
|
||||||
|
for offset in offsets:
|
||||||
|
buf.write(pack(b'>H', header_length+offset))
|
||||||
|
idxt_block = align_block(idxt_block + buf.getvalue())
|
||||||
|
body = index_block + idxt_block
|
||||||
|
|
||||||
|
header = b'INDX'
|
||||||
|
buf.truncate(0)
|
||||||
|
buf.write(pack(b'>I', header_length))
|
||||||
|
buf.write(b'\0'*4) # Unknown
|
||||||
|
buf.write(pack(b'>I', 1)) # Header type? Or index record number?
|
||||||
|
buf.write(b'\0'*4) # Unknown
|
||||||
|
# IDXT block offset
|
||||||
|
buf.write(pack(b'>I', header_length + len(index_block)))
|
||||||
|
# Number of index entries
|
||||||
|
buf.write(pack(b'>I', len(offsets)))
|
||||||
|
# Unknown
|
||||||
|
buf.write(b'\xff'*8)
|
||||||
|
# Unknown
|
||||||
|
buf.write(b'\0'*156)
|
||||||
|
|
||||||
|
header += buf.getvalue()
|
||||||
|
|
||||||
|
ans = header + body
|
||||||
|
if len(ans) > 0x10000:
|
||||||
|
raise ValueError('Too many entries (%d) in the TOC'%len(offsets))
|
||||||
|
return ans
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def create_header(self): # {{{
|
||||||
|
buf = StringIO()
|
||||||
|
tagx_block = IndexEntry.tagx_block(self.is_periodical)
|
||||||
|
header_length = 192
|
||||||
|
|
||||||
|
# Ident 0 - 4
|
||||||
buf.write(b'INDX')
|
buf.write(b'INDX')
|
||||||
|
|
||||||
# Header length
|
# Header length 4 - 8
|
||||||
buf.write(pack(b'>I', 192))
|
buf.write(pack(b'>I', header_length))
|
||||||
|
|
||||||
# Index type: 0 - normal, 2 - inflection
|
# Unknown 8-16
|
||||||
|
buf.write(b'\0'*8)
|
||||||
|
|
||||||
|
# Index type: 0 - normal, 2 - inflection 16 - 20
|
||||||
buf.write(pack(b'>I', 2))
|
buf.write(pack(b'>I', 2))
|
||||||
|
|
||||||
|
# IDXT offset 20-24
|
||||||
|
buf.write(pack(b'>I', 0)) # Filled in later
|
||||||
|
|
||||||
|
# Number of index records 24-28
|
||||||
|
buf.write(pack(b'>I', len(self.records)))
|
||||||
|
|
||||||
|
# Index Encoding 28-32
|
||||||
|
buf.write(pack(b'>I', 65001)) # utf-8
|
||||||
|
|
||||||
|
# Unknown 32-36
|
||||||
|
buf.write(b'\xff'*4)
|
||||||
|
|
||||||
|
# Number of index entries 36-40
|
||||||
|
buf.write(pack(b'>I', len(self.indices)))
|
||||||
|
|
||||||
|
# ORDT offset 40-44
|
||||||
|
buf.write(pack(b'>I', 0))
|
||||||
|
|
||||||
|
# LIGT offset 44-48
|
||||||
|
buf.write(pack(b'>I', 0))
|
||||||
|
|
||||||
|
# Number of LIGT entries 48-52
|
||||||
|
buf.write(pack(b'>I', 0))
|
||||||
|
|
||||||
|
# Number of CNCX records 52-56
|
||||||
|
buf.write(pack(b'>I', len(self.cncx.records)))
|
||||||
|
|
||||||
|
# Unknown 56-180
|
||||||
|
buf.write(b'\0'*124)
|
||||||
|
|
||||||
|
# TAGX offset 180-184
|
||||||
|
buf.write(pack(b'>I', header_length))
|
||||||
|
|
||||||
|
# Unknown 184-192
|
||||||
|
buf.write(b'\0'*8)
|
||||||
|
|
||||||
|
# TAGX block
|
||||||
|
buf.write(tagx_block)
|
||||||
|
|
||||||
|
num = len(self.indices)
|
||||||
|
|
||||||
|
# The index of the last entry in the NCX
|
||||||
|
buf.write(encode_number_as_hex(num-1))
|
||||||
|
|
||||||
|
# The number of entries in the NCX
|
||||||
|
buf.write(pack(b'>H', num))
|
||||||
|
|
||||||
|
# Padding
|
||||||
|
pad = (4 - (buf.tell()%4))%4
|
||||||
|
if pad:
|
||||||
|
buf.write(b'\0'*pad)
|
||||||
|
|
||||||
|
idxt_offset = buf.tell()
|
||||||
|
|
||||||
|
buf.write(b'IDXT')
|
||||||
|
buf.write(pack(b'>H', header_length + len(tagx_block)))
|
||||||
|
buf.write(b'\0')
|
||||||
|
buf.seek(20)
|
||||||
|
buf.write(pack(b'>I', idxt_offset))
|
||||||
|
|
||||||
|
return align_block(buf.getvalue())
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def create_book_index(self): # {{{
|
||||||
|
indices = []
|
||||||
|
seen = set()
|
||||||
|
id_offsets = self.serializer.id_offsets
|
||||||
|
|
||||||
|
for node in self.oeb.toc.iterdescendants():
|
||||||
|
try:
|
||||||
|
offset = id_offsets[node.href]
|
||||||
|
label = self.cncx[node.title]
|
||||||
|
except:
|
||||||
|
self.log.warn('TOC item %s not found in document'%node.href)
|
||||||
|
continue
|
||||||
|
if offset in seen:
|
||||||
|
continue
|
||||||
|
seen.add(offset)
|
||||||
|
index = IndexEntry(offset, label)
|
||||||
|
indices.append(index)
|
||||||
|
|
||||||
|
indices.sort(key=lambda x:x.offset)
|
||||||
|
|
||||||
|
# Set lengths
|
||||||
|
for i, index in enumerate(indices):
|
||||||
|
try:
|
||||||
|
next_offset = indices[i+1].offset
|
||||||
|
except:
|
||||||
|
next_offset = self.serializer.body_end_offset
|
||||||
|
index.length = next_offset - index.offset
|
||||||
|
|
||||||
|
# Remove empty nodes
|
||||||
|
indices = [i for i in indices if i.length > 0]
|
||||||
|
|
||||||
|
# Set index values
|
||||||
|
for i, index in enumerate(indices):
|
||||||
|
index.index = i
|
||||||
|
|
||||||
|
# Set lengths again to close up any gaps left by filtering
|
||||||
|
for i, index in enumerate(indices):
|
||||||
|
try:
|
||||||
|
next_offset = indices[i+1].offset
|
||||||
|
except:
|
||||||
|
next_offset = self.serializer.body_end_offset
|
||||||
|
index.length = next_offset - index.offset
|
||||||
|
|
||||||
|
return indices
|
||||||
|
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
def create_periodical_index(self): # {{{
|
||||||
|
periodical_node = iter(self.oeb.toc).next()
|
||||||
|
periodical_node_offset = self.serializer.body_start_offset
|
||||||
|
periodical_node_size = (self.serializer.body_end_offset -
|
||||||
|
periodical_node_offset)
|
||||||
|
|
||||||
|
normalized_sections = []
|
||||||
|
|
||||||
|
id_offsets = self.serializer.id_offsets
|
||||||
|
|
||||||
|
periodical = IndexEntry(periodical_node_offset,
|
||||||
|
self.cncx[periodical_node.title],
|
||||||
|
class_offset=self.cncx[periodical_node.klass])
|
||||||
|
periodical.length = periodical_node_size
|
||||||
|
periodical.first_child_index = 1
|
||||||
|
|
||||||
|
seen_sec_offsets = set()
|
||||||
|
seen_art_offsets = set()
|
||||||
|
|
||||||
|
for sec in periodical_node:
|
||||||
|
normalized_articles = []
|
||||||
|
try:
|
||||||
|
offset = id_offsets[sec.href]
|
||||||
|
label = self.cncx[sec.title]
|
||||||
|
klass = self.cncx[sec.klass]
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
if offset in seen_sec_offsets:
|
||||||
|
continue
|
||||||
|
seen_sec_offsets.add(offset)
|
||||||
|
section = IndexEntry(offset, label, class_offset=klass, depth=1)
|
||||||
|
section.parent_index = 0
|
||||||
|
for art in sec:
|
||||||
|
try:
|
||||||
|
offset = id_offsets[art.href]
|
||||||
|
label = self.cncx[art.title]
|
||||||
|
klass = self.cncx[art.klass]
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
if offset in seen_art_offsets:
|
||||||
|
continue
|
||||||
|
seen_art_offsets.add(offset)
|
||||||
|
article = IndexEntry(offset, label, class_offset=klass,
|
||||||
|
depth=2)
|
||||||
|
normalized_articles.append(article)
|
||||||
|
if normalized_articles:
|
||||||
|
normalized_articles.sort(key=lambda x:x.offset)
|
||||||
|
normalized_sections.append((section, normalized_articles))
|
||||||
|
|
||||||
|
normalized_sections.sort(key=lambda x:x[0].offset)
|
||||||
|
|
||||||
|
# Set lengths
|
||||||
|
for s, x in enumerate(normalized_sections):
|
||||||
|
sec, normalized_articles = x
|
||||||
|
try:
|
||||||
|
sec.length = normalized_sections[s+1][0].offset - sec.offset
|
||||||
|
except:
|
||||||
|
sec.length = self.serializer.body_end_offset - sec.offset
|
||||||
|
for i, art in enumerate(normalized_articles):
|
||||||
|
try:
|
||||||
|
art.length = normalized_articles[i+1].offset - art.offset
|
||||||
|
except:
|
||||||
|
art.length = sec.offset + sec.length - art.offset
|
||||||
|
|
||||||
|
# Filter
|
||||||
|
for i, x in list(enumerate(normalized_sections)):
|
||||||
|
sec, normalized_articles = x
|
||||||
|
normalized_articles = list(filter(lambda x: x.length > 0,
|
||||||
|
normalized_articles))
|
||||||
|
normalized_sections[i] = (sec, normalized_articles)
|
||||||
|
|
||||||
|
normalized_sections = list(filter(lambda x: x[0].length > 0 and x[1],
|
||||||
|
normalized_sections))
|
||||||
|
|
||||||
|
# Set indices
|
||||||
|
i = 0
|
||||||
|
for sec, articles in normalized_sections:
|
||||||
|
i += 1
|
||||||
|
sec.index = i
|
||||||
|
sec.parent_index = 0
|
||||||
|
|
||||||
|
for sec, articles in normalized_sections:
|
||||||
|
for art in articles:
|
||||||
|
i += 1
|
||||||
|
art.index = i
|
||||||
|
art.parent_index = sec.index
|
||||||
|
|
||||||
|
for sec, normalized_articles in normalized_sections:
|
||||||
|
sec.first_child_index = normalized_articles[0].index
|
||||||
|
sec.last_child_index = normalized_articles[-1].index
|
||||||
|
|
||||||
|
# Set lengths again to close up any gaps left by filtering
|
||||||
|
for s, x in enumerate(normalized_sections):
|
||||||
|
sec, articles = x
|
||||||
|
try:
|
||||||
|
next_offset = normalized_sections[s+1][0].offset
|
||||||
|
except:
|
||||||
|
next_offset = self.serializer.body_end_offset
|
||||||
|
sec.length = next_offset - sec.offset
|
||||||
|
|
||||||
|
for a, art in enumerate(articles):
|
||||||
|
try:
|
||||||
|
next_offset = articles[a+1].offset
|
||||||
|
except:
|
||||||
|
next_offset = sec.next_offset
|
||||||
|
art.length = next_offset - art.offset
|
||||||
|
|
||||||
|
# Sanity check
|
||||||
|
for s, x in enumerate(normalized_sections):
|
||||||
|
sec, articles = x
|
||||||
|
try:
|
||||||
|
next_sec = normalized_sections[s+1][0]
|
||||||
|
except:
|
||||||
|
if (sec.length == 0 or sec.next_offset !=
|
||||||
|
self.serializer.body_end_offset):
|
||||||
|
raise ValueError('Invalid section layout')
|
||||||
|
else:
|
||||||
|
if next_sec.offset != sec.next_offset or sec.length == 0:
|
||||||
|
raise ValueError('Invalid section layout')
|
||||||
|
for a, art in enumerate(articles):
|
||||||
|
try:
|
||||||
|
next_art = articles[a+1]
|
||||||
|
except:
|
||||||
|
if (art.length == 0 or art.next_offset !=
|
||||||
|
sec.next_offset):
|
||||||
|
raise ValueError('Invalid article layout')
|
||||||
|
else:
|
||||||
|
if art.length == 0 or art.next_offset != next_art.offset:
|
||||||
|
raise ValueError('Invalid article layout')
|
||||||
|
|
||||||
|
# Flatten
|
||||||
|
indices = [periodical]
|
||||||
|
for sec, articles in normalized_sections:
|
||||||
|
indices.append(sec)
|
||||||
|
periodical.last_child_index = sec.index
|
||||||
|
|
||||||
|
for sec, articles in normalized_sections:
|
||||||
|
for a in articles:
|
||||||
|
indices.append(a)
|
||||||
|
|
||||||
|
return indices
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
# TBS {{{
|
||||||
|
def calculate_trailing_byte_sequences(self):
|
||||||
|
self.tbs_map = {}
|
||||||
|
found_node = False
|
||||||
|
sections = [i for i in self.indices if i.depth == 1]
|
||||||
|
section_map = OrderedDict((i.index, i) for i in
|
||||||
|
sorted(sections, key=lambda x:x.offset))
|
||||||
|
|
||||||
|
deepest = max(i.depth for i in self.indices)
|
||||||
|
|
||||||
|
for i in xrange(self.number_of_text_records):
|
||||||
|
offset = i * RECORD_SIZE
|
||||||
|
next_offset = offset + RECORD_SIZE
|
||||||
|
data = {'ends':[], 'completes':[], 'starts':[],
|
||||||
|
'spans':None, 'offset':offset, 'record_number':i+1}
|
||||||
|
|
||||||
|
for index in self.indices:
|
||||||
|
if index.offset >= next_offset:
|
||||||
|
# Node starts after current record
|
||||||
|
if index.depth == deepest:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
if index.next_offset <= offset:
|
||||||
|
# Node ends before current record
|
||||||
|
continue
|
||||||
|
if index.offset >= offset:
|
||||||
|
# Node starts in current record
|
||||||
|
if index.next_offset <= next_offset:
|
||||||
|
# Node ends in current record
|
||||||
|
data['completes'].append(index)
|
||||||
|
else:
|
||||||
|
data['starts'].append(index)
|
||||||
|
else:
|
||||||
|
# Node starts before current records
|
||||||
|
if index.next_offset <= next_offset:
|
||||||
|
# Node ends in current record
|
||||||
|
data['ends'].append(index)
|
||||||
|
elif index.depth == deepest:
|
||||||
|
data['spans'] = index
|
||||||
|
|
||||||
|
if (data['ends'] or data['completes'] or data['starts'] or
|
||||||
|
data['spans'] is not None):
|
||||||
|
self.tbs_map[i+1] = TBS(data, self.is_periodical, first=not
|
||||||
|
found_node, section_map=section_map)
|
||||||
|
found_node = True
|
||||||
|
else:
|
||||||
|
self.tbs_map[i+1] = TBS({}, self.is_periodical, first=False,
|
||||||
|
after_first=found_node, section_map=section_map)
|
||||||
|
|
||||||
|
def get_trailing_byte_sequence(self, num):
|
||||||
|
return self.tbs_map[num].bytestring
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
# }}}
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ from calibre.utils.filenames import ascii_filename
|
|||||||
from calibre.ebooks.mobi.writer2 import (PALMDOC, UNCOMPRESSED, RECORD_SIZE)
|
from calibre.ebooks.mobi.writer2 import (PALMDOC, UNCOMPRESSED, RECORD_SIZE)
|
||||||
from calibre.ebooks.mobi.utils import (rescale_image, encint,
|
from calibre.ebooks.mobi.utils import (rescale_image, encint,
|
||||||
encode_trailing_data)
|
encode_trailing_data)
|
||||||
|
from calibre.ebooks.mobi.writer2.indexer import Indexer
|
||||||
|
|
||||||
EXTH_CODES = {
|
EXTH_CODES = {
|
||||||
'creator': 100,
|
'creator': 100,
|
||||||
@ -28,7 +29,6 @@ EXTH_CODES = {
|
|||||||
'identifier': 104,
|
'identifier': 104,
|
||||||
'subject': 105,
|
'subject': 105,
|
||||||
'pubdate': 106,
|
'pubdate': 106,
|
||||||
'date': 106,
|
|
||||||
'review': 107,
|
'review': 107,
|
||||||
'contributor': 108,
|
'contributor': 108,
|
||||||
'rights': 109,
|
'rights': 109,
|
||||||
@ -54,6 +54,7 @@ class MobiWriter(object):
|
|||||||
self.last_text_record_idx = 1
|
self.last_text_record_idx = 1
|
||||||
|
|
||||||
def __call__(self, oeb, path_or_stream):
|
def __call__(self, oeb, path_or_stream):
|
||||||
|
self.log = oeb.log
|
||||||
if hasattr(path_or_stream, 'write'):
|
if hasattr(path_or_stream, 'write'):
|
||||||
return self.dump_stream(oeb, path_or_stream)
|
return self.dump_stream(oeb, path_or_stream)
|
||||||
with open(path_or_stream, 'w+b') as stream:
|
with open(path_or_stream, 'w+b') as stream:
|
||||||
@ -87,6 +88,25 @@ class MobiWriter(object):
|
|||||||
# Indexing {{{
|
# Indexing {{{
|
||||||
def generate_index(self):
|
def generate_index(self):
|
||||||
self.primary_index_record_idx = None
|
self.primary_index_record_idx = None
|
||||||
|
try:
|
||||||
|
self.indexer = Indexer(self.serializer, self.last_text_record_idx,
|
||||||
|
len(self.records[self.last_text_record_idx]),
|
||||||
|
self.opts, self.oeb)
|
||||||
|
except:
|
||||||
|
self.log.exception('Failed to generate MOBI index:')
|
||||||
|
else:
|
||||||
|
self.primary_index_record_idx = len(self.records)
|
||||||
|
for i in xrange(len(self.records)):
|
||||||
|
if i == 0: continue
|
||||||
|
tbs = self.indexer.get_trailing_byte_sequence(i)
|
||||||
|
self.records[i] += encode_trailing_data(tbs)
|
||||||
|
self.records.extend(self.indexer.records)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_periodical(self):
|
||||||
|
return (self.primary_index_record_idx is None or not
|
||||||
|
self.indexer.is_periodical)
|
||||||
|
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
def write_uncrossable_breaks(self): # {{{
|
def write_uncrossable_breaks(self): # {{{
|
||||||
@ -178,7 +198,6 @@ class MobiWriter(object):
|
|||||||
self.serializer = Serializer(self.oeb, self.images,
|
self.serializer = Serializer(self.oeb, self.images,
|
||||||
write_page_breaks_after_item=self.write_page_breaks_after_item)
|
write_page_breaks_after_item=self.write_page_breaks_after_item)
|
||||||
text = self.serializer()
|
text = self.serializer()
|
||||||
self.content_length = len(text)
|
|
||||||
self.text_length = len(text)
|
self.text_length = len(text)
|
||||||
text = StringIO(text)
|
text = StringIO(text)
|
||||||
nrecords = 0
|
nrecords = 0
|
||||||
@ -186,22 +205,16 @@ class MobiWriter(object):
|
|||||||
if self.compression != UNCOMPRESSED:
|
if self.compression != UNCOMPRESSED:
|
||||||
self.oeb.logger.info(' Compressing markup content...')
|
self.oeb.logger.info(' Compressing markup content...')
|
||||||
|
|
||||||
data, overlap = self.read_text_record(text)
|
while text.tell() < self.text_length:
|
||||||
|
data, overlap = self.read_text_record(text)
|
||||||
while len(data) > 0:
|
|
||||||
if self.compression == PALMDOC:
|
if self.compression == PALMDOC:
|
||||||
data = compress_doc(data)
|
data = compress_doc(data)
|
||||||
record = StringIO()
|
|
||||||
record.write(data)
|
|
||||||
|
|
||||||
self.records.append(record.getvalue())
|
data += overlap
|
||||||
|
data += pack(b'>B', len(overlap))
|
||||||
|
|
||||||
|
self.records.append(data)
|
||||||
nrecords += 1
|
nrecords += 1
|
||||||
data, overlap = self.read_text_record(text)
|
|
||||||
|
|
||||||
# Write information about the mutibyte character overlap, if any
|
|
||||||
record.write(overlap)
|
|
||||||
record.write(pack(b'>B', len(overlap)))
|
|
||||||
|
|
||||||
|
|
||||||
self.last_text_record_idx = nrecords
|
self.last_text_record_idx = nrecords
|
||||||
|
|
||||||
@ -262,10 +275,19 @@ class MobiWriter(object):
|
|||||||
exth = self.build_exth()
|
exth = self.build_exth()
|
||||||
last_content_record = len(self.records) - 1
|
last_content_record = len(self.records) - 1
|
||||||
|
|
||||||
# EOF record
|
# FCIS/FLIS (Seem to server no purpose)
|
||||||
self.records.append('\xE9\x8E\x0D\x0A')
|
flis_number = len(self.records)
|
||||||
|
self.records.append(
|
||||||
|
b'FLIS\0\0\0\x08\0\x41\0\0\0\0\0\0\xff\xff\xff\xff\0\x01\0\x03\0\0\0\x03\0\0\0\x01'+
|
||||||
|
b'\xff'*4)
|
||||||
|
fcis = b'FCIS\x00\x00\x00\x14\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00'
|
||||||
|
fcis += pack(b'>I', self.text_length)
|
||||||
|
fcis += b'\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x08\x00\x01\x00\x01\x00\x00\x00\x00'
|
||||||
|
fcis_number = len(self.records)
|
||||||
|
self.records.append(fcis)
|
||||||
|
|
||||||
self.generate_end_records()
|
# EOF record
|
||||||
|
self.records.append(b'\xE9\x8E\x0D\x0A')
|
||||||
|
|
||||||
record0 = StringIO()
|
record0 = StringIO()
|
||||||
# The MOBI Header
|
# The MOBI Header
|
||||||
@ -295,8 +317,15 @@ class MobiWriter(object):
|
|||||||
# 0x10 - 0x13 : UID
|
# 0x10 - 0x13 : UID
|
||||||
# 0x14 - 0x17 : Generator version
|
# 0x14 - 0x17 : Generator version
|
||||||
|
|
||||||
|
bt = 0x002
|
||||||
|
if self.primary_index_record_idx is not None:
|
||||||
|
if self.indexer.is_flat_periodical:
|
||||||
|
bt = 0x102
|
||||||
|
elif self.indexer.is_periodical:
|
||||||
|
bt = 0x103
|
||||||
|
|
||||||
record0.write(pack(b'>IIIII',
|
record0.write(pack(b'>IIIII',
|
||||||
0xe8, 0x002, 65001, uid, 6))
|
0xe8, bt, 65001, uid, 6))
|
||||||
|
|
||||||
# 0x18 - 0x1f : Unknown
|
# 0x18 - 0x1f : Unknown
|
||||||
record0.write(b'\xff' * 8)
|
record0.write(b'\xff' * 8)
|
||||||
@ -325,7 +354,8 @@ class MobiWriter(object):
|
|||||||
# 0x58 - 0x5b : Format version
|
# 0x58 - 0x5b : Format version
|
||||||
# 0x5c - 0x5f : First image record number
|
# 0x5c - 0x5f : First image record number
|
||||||
record0.write(pack(b'>II',
|
record0.write(pack(b'>II',
|
||||||
6, self.first_image_record if self.first_image_record else 0))
|
6, self.first_image_record if self.first_image_record else
|
||||||
|
len(self.records)-1))
|
||||||
|
|
||||||
# 0x60 - 0x63 : First HUFF/CDIC record number
|
# 0x60 - 0x63 : First HUFF/CDIC record number
|
||||||
# 0x64 - 0x67 : Number of HUFF/CDIC records
|
# 0x64 - 0x67 : Number of HUFF/CDIC records
|
||||||
@ -334,7 +364,12 @@ class MobiWriter(object):
|
|||||||
record0.write(b'\0' * 16)
|
record0.write(b'\0' * 16)
|
||||||
|
|
||||||
# 0x70 - 0x73 : EXTH flags
|
# 0x70 - 0x73 : EXTH flags
|
||||||
record0.write(pack(b'>I', 0x50))
|
# Bit 6 (0b1000000) being set indicates the presence of an EXTH header
|
||||||
|
# The purpose of the other bits is unknown
|
||||||
|
exth_flags = 0b1011000
|
||||||
|
if self.is_periodical:
|
||||||
|
exth_flags |= 0b1000
|
||||||
|
record0.write(pack(b'>I', exth_flags))
|
||||||
|
|
||||||
# 0x74 - 0x93 : Unknown
|
# 0x74 - 0x93 : Unknown
|
||||||
record0.write(b'\0' * 32)
|
record0.write(b'\0' * 32)
|
||||||
@ -359,13 +394,13 @@ class MobiWriter(object):
|
|||||||
record0.write(b'\0\0\0\x01')
|
record0.write(b'\0\0\0\x01')
|
||||||
|
|
||||||
# 0xb8 - 0xbb : FCIS record number
|
# 0xb8 - 0xbb : FCIS record number
|
||||||
record0.write(pack(b'>I', 0xffffffff))
|
record0.write(pack(b'>I', fcis_number))
|
||||||
|
|
||||||
# 0xbc - 0xbf : Unknown (FCIS record count?)
|
# 0xbc - 0xbf : Unknown (FCIS record count?)
|
||||||
record0.write(pack(b'>I', 0xffffffff))
|
record0.write(pack(b'>I', 1))
|
||||||
|
|
||||||
# 0xc0 - 0xc3 : FLIS record number
|
# 0xc0 - 0xc3 : FLIS record number
|
||||||
record0.write(pack(b'>I', 0xffffffff))
|
record0.write(pack(b'>I', flis_number))
|
||||||
|
|
||||||
# 0xc4 - 0xc7 : Unknown (FLIS record count?)
|
# 0xc4 - 0xc7 : Unknown (FLIS record count?)
|
||||||
record0.write(pack(b'>I', 1))
|
record0.write(pack(b'>I', 1))
|
||||||
@ -457,25 +492,33 @@ class MobiWriter(object):
|
|||||||
nrecs += 1
|
nrecs += 1
|
||||||
|
|
||||||
# Write cdetype
|
# Write cdetype
|
||||||
if not self.opts.mobi_periodical:
|
if self.is_periodical:
|
||||||
data = b'EBOK'
|
data = b'EBOK'
|
||||||
exth.write(pack(b'>II', 501, len(data)+8))
|
exth.write(pack(b'>II', 501, len(data)+8))
|
||||||
exth.write(data)
|
exth.write(data)
|
||||||
nrecs += 1
|
nrecs += 1
|
||||||
|
|
||||||
# Add a publication date entry
|
# Add a publication date entry
|
||||||
if oeb.metadata['date'] != [] :
|
if oeb.metadata['date']:
|
||||||
datestr = str(oeb.metadata['date'][0])
|
datestr = str(oeb.metadata['date'][0])
|
||||||
elif oeb.metadata['timestamp'] != [] :
|
elif oeb.metadata['timestamp']:
|
||||||
datestr = str(oeb.metadata['timestamp'][0])
|
datestr = str(oeb.metadata['timestamp'][0])
|
||||||
|
|
||||||
if datestr is not None:
|
if datestr is not None:
|
||||||
|
datestr = bytes(datestr)
|
||||||
|
datestr = datestr.replace(b'+00:00', b'Z')
|
||||||
exth.write(pack(b'>II', EXTH_CODES['pubdate'], len(datestr) + 8))
|
exth.write(pack(b'>II', EXTH_CODES['pubdate'], len(datestr) + 8))
|
||||||
exth.write(datestr)
|
exth.write(datestr)
|
||||||
nrecs += 1
|
nrecs += 1
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("missing date or timestamp needed for mobi_periodical")
|
raise NotImplementedError("missing date or timestamp needed for mobi_periodical")
|
||||||
|
|
||||||
|
# Write the same creator info as kindlegen 1.2
|
||||||
|
for code, val in [(204, 202), (205, 1), (206, 2), (207, 33307)]:
|
||||||
|
exth.write(pack(b'>II', code, 12))
|
||||||
|
exth.write(pack(b'>I', val))
|
||||||
|
nrecs += 1
|
||||||
|
|
||||||
if (oeb.metadata.cover and
|
if (oeb.metadata.cover and
|
||||||
unicode(oeb.metadata.cover[0]) in oeb.manifest.ids):
|
unicode(oeb.metadata.cover[0]) in oeb.manifest.ids):
|
||||||
id = unicode(oeb.metadata.cover[0])
|
id = unicode(oeb.metadata.cover[0])
|
||||||
|
@ -143,6 +143,7 @@ class Serializer(object):
|
|||||||
spine.extend([item for item in self.oeb.spine if not item.linear])
|
spine.extend([item for item in self.oeb.spine if not item.linear])
|
||||||
for item in spine:
|
for item in spine:
|
||||||
self.serialize_item(item)
|
self.serialize_item(item)
|
||||||
|
self.body_end_offset = buf.tell()
|
||||||
buf.write(b'</body>')
|
buf.write(b'</body>')
|
||||||
|
|
||||||
def serialize_item(self, item):
|
def serialize_item(self, item):
|
||||||
|
@ -1680,11 +1680,18 @@ class TOC(object):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def iterdescendants(self):
|
def iterdescendants(self, breadth_first=False):
|
||||||
"""Iterate over all descendant nodes in depth-first order."""
|
"""Iterate over all descendant nodes in depth-first order."""
|
||||||
for child in self.nodes:
|
if breadth_first:
|
||||||
for node in child.iter():
|
for child in self.nodes:
|
||||||
yield node
|
yield child
|
||||||
|
for child in self.nodes:
|
||||||
|
for node in child.iterdescendants(breadth_first=True):
|
||||||
|
yield node
|
||||||
|
else:
|
||||||
|
for child in self.nodes:
|
||||||
|
for node in child.iter():
|
||||||
|
yield node
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
"""Iterate over all immediate child nodes."""
|
"""Iterate over all immediate child nodes."""
|
||||||
|
@ -165,6 +165,7 @@ class PDFWriter(QObject): # {{{
|
|||||||
printer = get_pdf_printer(self.opts)
|
printer = get_pdf_printer(self.opts)
|
||||||
printer.setOutputFileName(item_path)
|
printer.setOutputFileName(item_path)
|
||||||
self.view.print_(printer)
|
self.view.print_(printer)
|
||||||
|
printer.abort()
|
||||||
self._render_book()
|
self._render_book()
|
||||||
|
|
||||||
def _delete_tmpdir(self):
|
def _delete_tmpdir(self):
|
||||||
@ -186,6 +187,7 @@ class PDFWriter(QObject): # {{{
|
|||||||
draw_image_page(printer, painter, p,
|
draw_image_page(printer, painter, p,
|
||||||
preserve_aspect_ratio=self.opts.preserve_cover_aspect_ratio)
|
preserve_aspect_ratio=self.opts.preserve_cover_aspect_ratio)
|
||||||
painter.end()
|
painter.end()
|
||||||
|
printer.abort()
|
||||||
|
|
||||||
|
|
||||||
def _write(self):
|
def _write(self):
|
||||||
|
@ -133,6 +133,7 @@ def render_data(mi, use_roman_numbers=True, all_fields=False):
|
|||||||
authors = []
|
authors = []
|
||||||
formatter = EvalFormatter()
|
formatter = EvalFormatter()
|
||||||
for aut in mi.authors:
|
for aut in mi.authors:
|
||||||
|
link = ''
|
||||||
if mi.author_link_map[aut]:
|
if mi.author_link_map[aut]:
|
||||||
link = mi.author_link_map[aut]
|
link = mi.author_link_map[aut]
|
||||||
elif gprefs.get('default_author_link'):
|
elif gprefs.get('default_author_link'):
|
||||||
|
@ -183,7 +183,6 @@ class Quickview(QDialog, Ui_Quickview):
|
|||||||
self.items.blockSignals(False)
|
self.items.blockSignals(False)
|
||||||
|
|
||||||
def indicate_no_items(self):
|
def indicate_no_items(self):
|
||||||
print 'no items'
|
|
||||||
self.no_valid_items = True
|
self.no_valid_items = True
|
||||||
self.items.clear()
|
self.items.clear()
|
||||||
self.items.addItem(QListWidgetItem(_('**No items found**')))
|
self.items.addItem(QListWidgetItem(_('**No items found**')))
|
||||||
|
@ -6,6 +6,8 @@ __license__ = 'GPL 3'
|
|||||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
from calibre.utils.filenames import ascii_filename
|
||||||
|
|
||||||
class StorePlugin(object): # {{{
|
class StorePlugin(object): # {{{
|
||||||
'''
|
'''
|
||||||
A plugin representing an online ebook repository (store). The store can
|
A plugin representing an online ebook repository (store). The store can
|
||||||
@ -43,7 +45,7 @@ class StorePlugin(object): # {{{
|
|||||||
The easiest way to handle affiliate money payouts is to randomly select
|
The easiest way to handle affiliate money payouts is to randomly select
|
||||||
between the author's affiliate id and calibre's affiliate id so that
|
between the author's affiliate id and calibre's affiliate id so that
|
||||||
70% of the time the author's id is used.
|
70% of the time the author's id is used.
|
||||||
|
|
||||||
See declined.txt for a list of stores that do not want to be included.
|
See declined.txt for a list of stores that do not want to be included.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
@ -53,7 +55,7 @@ class StorePlugin(object): # {{{
|
|||||||
self.gui = gui
|
self.gui = gui
|
||||||
self.name = name
|
self.name = name
|
||||||
self.base_plugin = None
|
self.base_plugin = None
|
||||||
self.config = JSONConfig('store/stores/' + self.name)
|
self.config = JSONConfig('store/stores/' + ascii_filename(self.name))
|
||||||
|
|
||||||
def open(self, gui, parent=None, detail_item=None, external=False):
|
def open(self, gui, parent=None, detail_item=None, external=False):
|
||||||
'''
|
'''
|
||||||
|
@ -23,7 +23,8 @@ from calibre.utils.search_query_parser import SearchQueryParser
|
|||||||
|
|
||||||
def comparable_price(text):
|
def comparable_price(text):
|
||||||
text = re.sub(r'[^0-9.,]', '', text)
|
text = re.sub(r'[^0-9.,]', '', text)
|
||||||
if len(text) < 3 or text[-3] not in ('.', ','):
|
delimeter = (',', '.')
|
||||||
|
if len(text) < 3 or text[-3] not in delimeter:
|
||||||
text += '00'
|
text += '00'
|
||||||
text = re.sub(r'\D', '', text)
|
text = re.sub(r'\D', '', text)
|
||||||
text = text.rjust(6, '0')
|
text = text.rjust(6, '0')
|
||||||
@ -334,6 +335,11 @@ class SearchFilter(SearchQueryParser):
|
|||||||
}
|
}
|
||||||
for x in ('author', 'download', 'format'):
|
for x in ('author', 'download', 'format'):
|
||||||
q[x+'s'] = q[x]
|
q[x+'s'] = q[x]
|
||||||
|
|
||||||
|
# make the price in query the same format as result
|
||||||
|
if location == 'price':
|
||||||
|
query = comparable_price(query)
|
||||||
|
|
||||||
for sr in self.srs:
|
for sr in self.srs:
|
||||||
for locvalue in locations:
|
for locvalue in locations:
|
||||||
accessor = q[locvalue]
|
accessor = q[locvalue]
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
|
||||||
|
|
||||||
__license__ = 'GPL 3'
|
|
||||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
|
||||||
from calibre.gui2.store.opensearch_store import OpenSearchOPDSStore
|
|
||||||
from calibre.gui2.store.search_result import SearchResult
|
|
||||||
|
|
||||||
class EpubBudStore(BasicStoreConfig, OpenSearchOPDSStore):
|
|
||||||
|
|
||||||
open_search_url = 'http://www.epubbud.com/feeds/opensearch.xml'
|
|
||||||
web_url = 'http://www.epubbud.com/'
|
|
||||||
|
|
||||||
# http://www.epubbud.com/feeds/catalog.atom
|
|
||||||
|
|
||||||
def search(self, query, max_results=10, timeout=60):
|
|
||||||
for s in OpenSearchOPDSStore.search(self, query, max_results, timeout):
|
|
||||||
s.price = '$0.00'
|
|
||||||
s.drm = SearchResult.DRM_UNLOCKED
|
|
||||||
s.formats = 'EPUB'
|
|
||||||
# Download links are broken for this store.
|
|
||||||
s.downloads = {}
|
|
||||||
yield s
|
|
@ -1,80 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
|
||||||
|
|
||||||
__license__ = 'GPL 3'
|
|
||||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
|
||||||
__docformat__ = 'restructuredtext en'
|
|
||||||
|
|
||||||
import urllib2
|
|
||||||
from contextlib import closing
|
|
||||||
|
|
||||||
from lxml import html
|
|
||||||
|
|
||||||
from PyQt4.Qt import QUrl
|
|
||||||
|
|
||||||
from calibre import browser
|
|
||||||
from calibre.gui2 import open_url
|
|
||||||
from calibre.gui2.store import StorePlugin
|
|
||||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
|
||||||
from calibre.gui2.store.search_result import SearchResult
|
|
||||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
|
||||||
|
|
||||||
class EPubBuyDEStore(BasicStoreConfig, StorePlugin):
|
|
||||||
|
|
||||||
def open(self, parent=None, detail_item=None, external=False):
|
|
||||||
url = 'http://klick.affiliwelt.net/klick.php?bannerid=47653&pid=32307&prid=2627'
|
|
||||||
url_details = ('http://klick.affiliwelt.net/klick.php?bannerid=47653'
|
|
||||||
'&pid=32307&prid=2627&prodid={0}')
|
|
||||||
|
|
||||||
if external or self.config.get('open_external', False):
|
|
||||||
if detail_item:
|
|
||||||
url = url_details.format(detail_item)
|
|
||||||
open_url(QUrl(url))
|
|
||||||
else:
|
|
||||||
detail_url = None
|
|
||||||
if detail_item:
|
|
||||||
detail_url = url_details.format(detail_item)
|
|
||||||
d = WebStoreDialog(self.gui, url, parent, detail_url)
|
|
||||||
d.setWindowTitle(self.name)
|
|
||||||
d.set_tags(self.config.get('tags', ''))
|
|
||||||
d.exec_()
|
|
||||||
|
|
||||||
def search(self, query, max_results=10, timeout=60):
|
|
||||||
url = 'http://www.epubbuy.com/search.php?search_query=' + urllib2.quote(query)
|
|
||||||
br = browser()
|
|
||||||
|
|
||||||
counter = max_results
|
|
||||||
with closing(br.open(url, timeout=timeout)) as f:
|
|
||||||
doc = html.fromstring(f.read())
|
|
||||||
for data in doc.xpath('//li[contains(@class, "ajax_block_product")]'):
|
|
||||||
if counter <= 0:
|
|
||||||
break
|
|
||||||
|
|
||||||
id = ''.join(data.xpath('./div[@class="center_block"]'
|
|
||||||
'/p[contains(text(), "artnr:")]/text()')).strip()
|
|
||||||
if not id:
|
|
||||||
continue
|
|
||||||
id = id[6:].strip()
|
|
||||||
if not id:
|
|
||||||
continue
|
|
||||||
cover_url = ''.join(data.xpath('./div[@class="center_block"]'
|
|
||||||
'/a[@class="product_img_link"]/img/@src'))
|
|
||||||
if cover_url:
|
|
||||||
cover_url = 'http://www.epubbuy.com' + cover_url
|
|
||||||
title = ''.join(data.xpath('./div[@class="center_block"]'
|
|
||||||
'/a[@class="product_img_link"]/@title'))
|
|
||||||
author = ''.join(data.xpath('./div[@class="center_block"]/a[2]/text()'))
|
|
||||||
price = ''.join(data.xpath('.//span[@class="price"]/text()'))
|
|
||||||
counter -= 1
|
|
||||||
|
|
||||||
s = SearchResult()
|
|
||||||
s.cover_url = cover_url
|
|
||||||
s.title = title.strip()
|
|
||||||
s.author = author.strip()
|
|
||||||
s.price = price
|
|
||||||
s.drm = SearchResult.DRM_UNLOCKED
|
|
||||||
s.detail_item = id
|
|
||||||
s.formats = 'ePub'
|
|
||||||
|
|
||||||
yield s
|
|
@ -6,6 +6,7 @@ __license__ = 'GPL 3'
|
|||||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import random
|
||||||
import urllib
|
import urllib
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
|
|
||||||
@ -23,7 +24,24 @@ from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
|||||||
class GoogleBooksStore(BasicStoreConfig, StorePlugin):
|
class GoogleBooksStore(BasicStoreConfig, StorePlugin):
|
||||||
|
|
||||||
def open(self, parent=None, detail_item=None, external=False):
|
def open(self, parent=None, detail_item=None, external=False):
|
||||||
url = 'http://books.google.com/'
|
aff_id = {
|
||||||
|
'lid': '41000000033185143',
|
||||||
|
'pubid': '21000000000352219',
|
||||||
|
'ganpub': 'k352219',
|
||||||
|
'ganclk': 'GOOG_1335334761',
|
||||||
|
}
|
||||||
|
# Use Kovid's affiliate id 30% of the time.
|
||||||
|
if random.randint(1, 10) in (1, 2, 3):
|
||||||
|
aff_id = {
|
||||||
|
'lid': '41000000031855266',
|
||||||
|
'pubid': '21000000000352583',
|
||||||
|
'ganpub': 'k352583',
|
||||||
|
'ganclk': 'GOOG_1335335464',
|
||||||
|
}
|
||||||
|
|
||||||
|
url = 'http://gan.doubleclick.net/gan_click?lid=%(lid)s&pubid=%(pubid)s' % aff_id
|
||||||
|
if detail_item:
|
||||||
|
detail_item += '&ganpub=%(ganpub)s&ganclk=%(ganclk)s' % aff_id
|
||||||
|
|
||||||
if external or self.config.get('open_external', False):
|
if external or self.config.get('open_external', False):
|
||||||
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
|
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
|
||||||
|
126
src/calibre/gui2/store/stores/ozon_ru_plugin.py
Normal file
126
src/calibre/gui2/store/stores/ozon_ru_plugin.py
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||||
|
|
||||||
|
__license__ = 'GPL 3'
|
||||||
|
__copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
from contextlib import closing
|
||||||
|
from lxml import etree, html
|
||||||
|
from PyQt4.Qt import QUrl
|
||||||
|
|
||||||
|
from calibre import browser, url_slash_cleaner
|
||||||
|
from calibre.ebooks.chardet import xml_to_unicode
|
||||||
|
from calibre.gui2 import open_url
|
||||||
|
from calibre.gui2.store import StorePlugin
|
||||||
|
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||||
|
from calibre.gui2.store.search_result import SearchResult
|
||||||
|
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||||
|
|
||||||
|
class OzonRUStore(BasicStoreConfig, StorePlugin):
|
||||||
|
shop_url = 'http://www.ozon.ru'
|
||||||
|
|
||||||
|
def open(self, parent=None, detail_item=None, external=False):
|
||||||
|
|
||||||
|
aff_id = '?partner=romuk'
|
||||||
|
# Use Kovid's affiliate id 30% of the time.
|
||||||
|
if random.randint(1, 10) in (1, 2, 3):
|
||||||
|
aff_id = '?partner=kovidgoyal'
|
||||||
|
|
||||||
|
url = self.shop_url + aff_id
|
||||||
|
detail_url = None
|
||||||
|
if detail_item:
|
||||||
|
# http://www.ozon.ru/context/detail/id/3037277/
|
||||||
|
detail_url = self.shop_url + '/context/detail/id/' + urllib2.quote(detail_item) + aff_id
|
||||||
|
|
||||||
|
if external or self.config.get('open_external', False):
|
||||||
|
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
|
||||||
|
else:
|
||||||
|
d = WebStoreDialog(self.gui, url, parent, detail_url)
|
||||||
|
d.setWindowTitle(self.name)
|
||||||
|
d.set_tags(self.config.get('tags', ''))
|
||||||
|
d.exec_()
|
||||||
|
|
||||||
|
|
||||||
|
def search(self, query, max_results=10, timeout=60):
|
||||||
|
search_url = self.shop_url + '/webservice/webservice.asmx/SearchWebService?'\
|
||||||
|
'searchText=%s&searchContext=ebook' % urllib2.quote(query)
|
||||||
|
|
||||||
|
counter = max_results
|
||||||
|
br = browser()
|
||||||
|
with closing(br.open(search_url, timeout=timeout)) as f:
|
||||||
|
raw = xml_to_unicode(f.read(), strip_encoding_pats=True, assume_utf8=True)[0]
|
||||||
|
doc = etree.fromstring(raw)
|
||||||
|
for data in doc.xpath('//*[local-name() = "SearchItems"]'):
|
||||||
|
if counter <= 0:
|
||||||
|
break
|
||||||
|
counter -= 1
|
||||||
|
|
||||||
|
xp_template = 'normalize-space(./*[local-name() = "{0}"]/text())'
|
||||||
|
|
||||||
|
s = SearchResult()
|
||||||
|
s.detail_item = data.xpath(xp_template.format('ID'))
|
||||||
|
s.title = data.xpath(xp_template.format('Name'))
|
||||||
|
s.author = data.xpath(xp_template.format('Author'))
|
||||||
|
s.price = data.xpath(xp_template.format('Price'))
|
||||||
|
s.cover_url = data.xpath(xp_template.format('Picture'))
|
||||||
|
if re.match("^\d+?\.\d+?$", s.price):
|
||||||
|
s.price = u'{:.2F} руб.'.format(float(s.price))
|
||||||
|
yield s
|
||||||
|
|
||||||
|
def get_details(self, search_result, timeout=60):
|
||||||
|
url = self.shop_url + '/context/detail/id/' + urllib2.quote(search_result.detail_item)
|
||||||
|
br = browser()
|
||||||
|
|
||||||
|
result = False
|
||||||
|
with closing(br.open(url, timeout=timeout)) as f:
|
||||||
|
doc = html.fromstring(f.read())
|
||||||
|
|
||||||
|
# example where we are going to find formats
|
||||||
|
# <div class="box">
|
||||||
|
# ...
|
||||||
|
# <b>Доступные форматы:</b>
|
||||||
|
# <div class="vertpadd">.epub, .fb2, .pdf, .pdf, .txt</div>
|
||||||
|
# ...
|
||||||
|
# </div>
|
||||||
|
xpt = u'normalize-space(//div[@class="box"]//*[contains(normalize-space(text()), "Доступные форматы:")][1]/following-sibling::div[1]/text())'
|
||||||
|
formats = doc.xpath(xpt)
|
||||||
|
if formats:
|
||||||
|
result = True
|
||||||
|
search_result.drm = SearchResult.DRM_UNLOCKED
|
||||||
|
search_result.formats = ', '.join(_parse_ebook_formats(formats))
|
||||||
|
# unfortunately no direct links to download books (only buy link)
|
||||||
|
# search_result.downloads['BF2'] = self.shop_url + '/order/digitalorder.aspx?id=' + + urllib2.quote(search_result.detail_item)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _parse_ebook_formats(formatsStr):
|
||||||
|
'''
|
||||||
|
Creates a list with displayable names of the formats
|
||||||
|
|
||||||
|
:param formatsStr: string with comma separated book formats
|
||||||
|
as it provided by ozon.ru
|
||||||
|
:return: a list with displayable book formats
|
||||||
|
'''
|
||||||
|
|
||||||
|
formatsUnstruct = formatsStr.lower()
|
||||||
|
formats = []
|
||||||
|
if 'epub' in formatsUnstruct:
|
||||||
|
formats.append('ePub')
|
||||||
|
if 'pdf' in formatsUnstruct:
|
||||||
|
formats.append('PDF')
|
||||||
|
if 'fb2' in formatsUnstruct:
|
||||||
|
formats.append('FB2')
|
||||||
|
if 'rtf' in formatsUnstruct:
|
||||||
|
formats.append('RTF')
|
||||||
|
if 'txt' in formatsUnstruct:
|
||||||
|
formats.append('TXT')
|
||||||
|
if 'djvu' in formatsUnstruct:
|
||||||
|
formats.append('DjVu')
|
||||||
|
if 'doc' in formatsUnstruct:
|
||||||
|
formats.append('DOC')
|
||||||
|
return formats
|
@ -15,6 +15,7 @@ from calibre.gui2 import config, dynamic, open_url
|
|||||||
from calibre.gui2.dialogs.plugin_updater import get_plugin_updates_available
|
from calibre.gui2.dialogs.plugin_updater import get_plugin_updates_available
|
||||||
|
|
||||||
URL = 'http://status.calibre-ebook.com/latest'
|
URL = 'http://status.calibre-ebook.com/latest'
|
||||||
|
#URL = 'http://localhost:8000/latest'
|
||||||
NO_CALIBRE_UPDATE = '-0.0.0'
|
NO_CALIBRE_UPDATE = '-0.0.0'
|
||||||
VSEP = '|'
|
VSEP = '|'
|
||||||
|
|
||||||
|
@ -1892,7 +1892,9 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
yield r[iindex]
|
yield r[iindex]
|
||||||
|
|
||||||
def get_next_series_num_for(self, series):
|
def get_next_series_num_for(self, series):
|
||||||
series_id = self.conn.get('SELECT id from series WHERE name=?',
|
series_id = None
|
||||||
|
if series:
|
||||||
|
series_id = self.conn.get('SELECT id from series WHERE name=?',
|
||||||
(series,), all=False)
|
(series,), all=False)
|
||||||
if series_id is None:
|
if series_id is None:
|
||||||
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
|
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
|
||||||
@ -3023,8 +3025,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
stream.seek(0)
|
stream.seek(0)
|
||||||
mi = get_metadata(stream, format, use_libprs_metadata=False)
|
mi = get_metadata(stream, format, use_libprs_metadata=False)
|
||||||
stream.seek(0)
|
stream.seek(0)
|
||||||
if not mi.series_index:
|
if mi.series_index is None:
|
||||||
mi.series_index = 1.0
|
mi.series_index = self.get_next_series_num_for(mi.series)
|
||||||
mi.tags = [_('News')]
|
mi.tags = [_('News')]
|
||||||
if arg['add_title_tag']:
|
if arg['add_title_tag']:
|
||||||
mi.tags += [arg['title']]
|
mi.tags += [arg['title']]
|
||||||
@ -3076,7 +3078,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
self._add_newbook_tag(mi)
|
self._add_newbook_tag(mi)
|
||||||
if not add_duplicates and self.has_book(mi):
|
if not add_duplicates and self.has_book(mi):
|
||||||
return None
|
return None
|
||||||
series_index = 1.0 if mi.series_index is None else mi.series_index
|
series_index = self.get_next_series_num_for(mi.series) \
|
||||||
|
if mi.series_index is None else mi.series_index
|
||||||
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
|
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
|
||||||
title = mi.title
|
title = mi.title
|
||||||
if isbytestring(aus):
|
if isbytestring(aus):
|
||||||
@ -3123,7 +3126,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
if not add_duplicates and self.has_book(mi):
|
if not add_duplicates and self.has_book(mi):
|
||||||
duplicates.append((path, format, mi))
|
duplicates.append((path, format, mi))
|
||||||
continue
|
continue
|
||||||
series_index = 1.0 if mi.series_index is None else mi.series_index
|
series_index = self.get_next_series_num_for(mi.series) \
|
||||||
|
if mi.series_index is None else mi.series_index
|
||||||
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
|
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
|
||||||
title = mi.title
|
title = mi.title
|
||||||
if isinstance(aus, str):
|
if isinstance(aus, str):
|
||||||
@ -3157,7 +3161,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
|
|
||||||
def import_book(self, mi, formats, notify=True, import_hooks=True,
|
def import_book(self, mi, formats, notify=True, import_hooks=True,
|
||||||
apply_import_tags=True, preserve_uuid=False):
|
apply_import_tags=True, preserve_uuid=False):
|
||||||
series_index = 1.0 if mi.series_index is None else mi.series_index
|
series_index = self.get_next_series_num_for(mi.series) \
|
||||||
|
if mi.series_index is None else mi.series_index
|
||||||
if apply_import_tags:
|
if apply_import_tags:
|
||||||
self._add_newbook_tag(mi)
|
self._add_newbook_tag(mi)
|
||||||
if not mi.title:
|
if not mi.title:
|
||||||
|
@ -17,7 +17,7 @@ from datetime import datetime
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
from calibre.ebooks.metadata import title_sort, author_to_author_sort
|
from calibre.ebooks.metadata import title_sort, author_to_author_sort
|
||||||
from calibre.utils.date import parse_date, isoformat, local_tz
|
from calibre.utils.date import parse_date, isoformat, local_tz, UNDEFINED_DATE
|
||||||
from calibre import isbytestring, force_unicode
|
from calibre import isbytestring, force_unicode
|
||||||
from calibre.constants import iswindows, DEBUG, plugins
|
from calibre.constants import iswindows, DEBUG, plugins
|
||||||
from calibre.utils.icu import strcmp
|
from calibre.utils.icu import strcmp
|
||||||
@ -39,8 +39,11 @@ def _c_convert_timestamp(val):
|
|||||||
if ret is None:
|
if ret is None:
|
||||||
return parse_date(val, as_utc=False)
|
return parse_date(val, as_utc=False)
|
||||||
year, month, day, hour, minutes, seconds, tzsecs = ret
|
year, month, day, hour, minutes, seconds, tzsecs = ret
|
||||||
return datetime(year, month, day, hour, minutes, seconds,
|
try:
|
||||||
|
return datetime(year, month, day, hour, minutes, seconds,
|
||||||
tzinfo=tzoffset(None, tzsecs)).astimezone(local_tz)
|
tzinfo=tzoffset(None, tzsecs)).astimezone(local_tz)
|
||||||
|
except OverflowError:
|
||||||
|
return UNDEFINED_DATE.astimezone(local_tz)
|
||||||
|
|
||||||
def _py_convert_timestamp(val):
|
def _py_convert_timestamp(val):
|
||||||
if val:
|
if val:
|
||||||
|
@ -401,7 +401,7 @@ with undefined values in the column. Searching for ``true`` will find all books
|
|||||||
values in the column. Searching for ``yes`` or ``checked`` will find all books with ``Yes`` in the column.
|
values in the column. Searching for ``yes`` or ``checked`` will find all books with ``Yes`` in the column.
|
||||||
Searching for ``no`` or ``unchecked`` will find all books with ``No`` in the column.
|
Searching for ``no`` or ``unchecked`` will find all books with ``No`` in the column.
|
||||||
|
|
||||||
Hierarchical items (e.g. A.B.C) use an extended syntax to match initial parts of the hierarchy. This is done by adding a period between the exact match indicator (=) and the text. For example, the query ``tags:=.A`` will find the tags `A` and `A.B`, but will not find the tags `AA` or `AA.B`. The query ``tags:=.A.B`` will find the tags `A.B` and `A.C`, but not the tag `A`.
|
Hierarchical items (e.g. A.B.C) use an extended syntax to match initial parts of the hierarchy. This is done by adding a period between the exact match indicator (=) and the text. For example, the query ``tags:=.A`` will find the tags `A` and `A.B`, but will not find the tags `AA` or `AA.B`. The query ``tags:=.A.B`` will find the tags `A.B` and `A.B.C`, but not the tag `A`.
|
||||||
|
|
||||||
Identifiers (e.g., isbn, doi, lccn etc) also use an extended syntax. First, note that an identifier has the form ``type:value``, as in ``isbn:123456789``. The extended syntax permits you to specify independently which type and value to search for. Both the type and the value parts of the query can use `equality`, `contains`, or `regular expression` matches. Examples:
|
Identifiers (e.g., isbn, doi, lccn etc) also use an extended syntax. First, note that an identifier has the form ``type:value``, as in ``isbn:123456789``. The extended syntax permits you to specify independently which type and value to search for. Both the type and the value parts of the query can use `equality`, `contains`, or `regular expression` matches. Examples:
|
||||||
|
|
||||||
|
@ -5541,23 +5541,23 @@ msgstr "Книги с такими же тегами"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:20
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:20
|
||||||
msgid "Get books"
|
msgid "Get books"
|
||||||
msgstr ""
|
msgstr "Загрузить книги"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:29
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:29
|
||||||
msgid "Search for ebooks"
|
msgid "Search for ebooks"
|
||||||
msgstr ""
|
msgstr "Поиск книг..."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:30
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:30
|
||||||
msgid "Search for this author"
|
msgid "Search for this author"
|
||||||
msgstr ""
|
msgstr "Поиск по автору"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:31
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:31
|
||||||
msgid "Search for this title"
|
msgid "Search for this title"
|
||||||
msgstr ""
|
msgstr "Поиск по названию"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:32
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:32
|
||||||
msgid "Search for this book"
|
msgid "Search for this book"
|
||||||
msgstr ""
|
msgstr "Поиск по книге"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:34
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:34
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:135
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:135
|
||||||
@ -5569,21 +5569,21 @@ msgstr "Магазины"
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_dialog.py:18
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_dialog.py:18
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:285
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:285
|
||||||
msgid "Choose stores"
|
msgid "Choose stores"
|
||||||
msgstr ""
|
msgstr "Выбрать магазины"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:83
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:83
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:102
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:102
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:111
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:111
|
||||||
msgid "Cannot search"
|
msgid "Cannot search"
|
||||||
msgstr ""
|
msgstr "Поиск не может быть произведён"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:130
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:130
|
||||||
msgid ""
|
msgid ""
|
||||||
"Calibre helps you find the ebooks you want by searching the websites of "
|
"Calibre helps you find the ebooks you want by searching the websites of "
|
||||||
"various commercial and public domain book sources for you."
|
"various commercial and public domain book sources for you."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Calibre помогает вам отыскать книги, которые вы хотите найти, предлагая вам "
|
"Calibre поможет Вам найти книги, предлагая "
|
||||||
"найденные веб-сайты различных коммерческих и публичных источников книг."
|
"веб-сайты различных коммерческих и публичных источников книг."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:134
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:134
|
||||||
msgid ""
|
msgid ""
|
||||||
@ -5591,6 +5591,8 @@ msgid ""
|
|||||||
"are looking for, at the best price. You also get DRM status and other useful "
|
"are looking for, at the best price. You also get DRM status and other useful "
|
||||||
"information."
|
"information."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
"Используя встроенный поиск Вы можете легко найти магазин предлагающий выгодную цену "
|
||||||
|
"для интересующей Вас книги. Также Вы получите другу полезную инфрмацию"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:138
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:138
|
||||||
msgid ""
|
msgid ""
|
||||||
@ -5608,7 +5610,7 @@ msgstr "Показать снова данное сообщение"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:149
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:149
|
||||||
msgid "About Get Books"
|
msgid "About Get Books"
|
||||||
msgstr ""
|
msgstr "О 'Загрузить книги'"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:17
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:17
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/tweak_epub_ui.py:60
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/tweak_epub_ui.py:60
|
||||||
@ -5617,7 +5619,7 @@ msgstr "Tweak EPUB"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:18
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:18
|
||||||
msgid "Make small changes to ePub format books"
|
msgid "Make small changes to ePub format books"
|
||||||
msgstr ""
|
msgstr "Внести небольшие изненения ePub в формат книги"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:19
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:19
|
||||||
msgid "T"
|
msgid "T"
|
||||||
@ -5704,7 +5706,7 @@ msgstr "Не могу открыть папку"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/view.py:220
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/view.py:220
|
||||||
msgid "This book no longer exists in your library"
|
msgid "This book no longer exists in your library"
|
||||||
msgstr ""
|
msgstr "Эта книга больше не находится в Вашей библиотеке"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/actions/view.py:227
|
#: /home/kovid/work/calibre/src/calibre/gui2/actions/view.py:227
|
||||||
#, python-format
|
#, python-format
|
||||||
@ -9167,11 +9169,11 @@ msgstr "&Показать пароль"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:122
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:122
|
||||||
msgid "Restart required"
|
msgid "Restart required"
|
||||||
msgstr ""
|
msgstr "Требуется перезапуск"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:123
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:123
|
||||||
msgid "You must restart Calibre before using this plugin!"
|
msgid "You must restart Calibre before using this plugin!"
|
||||||
msgstr ""
|
msgstr "Для использования плагина Вам нужно перезапустить Calibre!"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:164
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:164
|
||||||
#, python-format
|
#, python-format
|
||||||
@ -9183,17 +9185,17 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:136
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:136
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:111
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:111
|
||||||
msgid "All"
|
msgid "All"
|
||||||
msgstr ""
|
msgstr "Всё"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
|
||||||
msgid "Installed"
|
msgid "Installed"
|
||||||
msgstr ""
|
msgstr "Установленные"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:397
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:397
|
||||||
msgid "Not installed"
|
msgid "Not installed"
|
||||||
msgstr ""
|
msgstr "Не установленные"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
|
||||||
msgid "Update available"
|
msgid "Update available"
|
||||||
@ -9201,7 +9203,7 @@ msgstr "Доступно обновление"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
|
||||||
msgid "Plugin Name"
|
msgid "Plugin Name"
|
||||||
msgstr ""
|
msgstr "Название плагина"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
|
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/jobs.py:63
|
#: /home/kovid/work/calibre/src/calibre/gui2/jobs.py:63
|
||||||
@ -13317,7 +13319,7 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/plugins_ui.py:114
|
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/plugins_ui.py:114
|
||||||
msgid "&Load plugin from file"
|
msgid "&Load plugin from file"
|
||||||
msgstr ""
|
msgstr "Загрузить плагин из файла"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/save_template.py:33
|
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/save_template.py:33
|
||||||
msgid "Any custom field"
|
msgid "Any custom field"
|
||||||
@ -13579,11 +13581,11 @@ msgstr "Сбой запуска контент-сервера"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:106
|
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:106
|
||||||
msgid "Error log:"
|
msgid "Error log:"
|
||||||
msgstr "Лог ошибок:"
|
msgstr "Журнал ошибок:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:113
|
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:113
|
||||||
msgid "Access log:"
|
msgid "Access log:"
|
||||||
msgstr "Лог доступа:"
|
msgstr "Журнал доступа:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:128
|
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:128
|
||||||
msgid "You need to restart the server for changes to take effect"
|
msgid "You need to restart the server for changes to take effect"
|
||||||
@ -14053,7 +14055,7 @@ msgstr "Ничего"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/shortcuts.py:59
|
#: /home/kovid/work/calibre/src/calibre/gui2/shortcuts.py:59
|
||||||
msgid "Press a key..."
|
msgid "Press a key..."
|
||||||
msgstr ""
|
msgstr "Нажмите клавишу..."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/shortcuts.py:80
|
#: /home/kovid/work/calibre/src/calibre/gui2/shortcuts.py:80
|
||||||
msgid "Already assigned"
|
msgid "Already assigned"
|
||||||
@ -14108,19 +14110,19 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/basic_config_widget_ui.py:38
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/basic_config_widget_ui.py:38
|
||||||
msgid "Added Tags:"
|
msgid "Added Tags:"
|
||||||
msgstr ""
|
msgstr "Добавленные тэги:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/basic_config_widget_ui.py:39
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/basic_config_widget_ui.py:39
|
||||||
msgid "Open store in external web browswer"
|
msgid "Open store in external web browswer"
|
||||||
msgstr ""
|
msgstr "Открыть сайт магазина в интернет броузере"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:219
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:219
|
||||||
msgid "&Name:"
|
msgid "&Name:"
|
||||||
msgstr ""
|
msgstr "&Название"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:221
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:221
|
||||||
msgid "&Description:"
|
msgid "&Description:"
|
||||||
msgstr ""
|
msgstr "&Описание"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:222
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:222
|
||||||
msgid "&Headquarters:"
|
msgid "&Headquarters:"
|
||||||
@ -14140,7 +14142,7 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:217
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:217
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:220
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:220
|
||||||
msgid "true"
|
msgid "true"
|
||||||
msgstr ""
|
msgstr "да"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:229
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:229
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:231
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:231
|
||||||
@ -14148,41 +14150,41 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:218
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:218
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:221
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:221
|
||||||
msgid "false"
|
msgid "false"
|
||||||
msgstr ""
|
msgstr "нет"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:232
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:232
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:216
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:216
|
||||||
msgid "Affiliate:"
|
msgid "Affiliate:"
|
||||||
msgstr ""
|
msgstr "Партнёрство:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:235
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:235
|
||||||
msgid "Nam&e/Description ..."
|
msgid "Nam&e/Description ..."
|
||||||
msgstr ""
|
msgstr "Названи&е/Описание"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:78
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:78
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:132
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:132
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:108
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:108
|
||||||
msgid "Query:"
|
msgid "Query:"
|
||||||
msgstr ""
|
msgstr "Запрос:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:81
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:81
|
||||||
msgid "Enable"
|
msgid "Enable"
|
||||||
msgstr ""
|
msgstr "Включить"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:84
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:84
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:137
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:137
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:112
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:112
|
||||||
msgid "Invert"
|
msgid "Invert"
|
||||||
msgstr ""
|
msgstr "Инвертировать"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
||||||
msgid "Affiliate"
|
msgid "Affiliate"
|
||||||
msgstr ""
|
msgstr "Партнерство"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
||||||
msgid "Enabled"
|
msgid "Enabled"
|
||||||
msgstr ""
|
msgstr "Включено"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
||||||
msgid "Headquarters"
|
msgid "Headquarters"
|
||||||
@ -14190,7 +14192,7 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
|
||||||
msgid "No DRM"
|
msgid "No DRM"
|
||||||
msgstr ""
|
msgstr "Без DRM"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:129
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:129
|
||||||
msgid ""
|
msgid ""
|
||||||
@ -14205,13 +14207,14 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:136
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:136
|
||||||
msgid "This store only distributes ebooks without DRM."
|
msgid "This store only distributes ebooks without DRM."
|
||||||
msgstr ""
|
msgstr "Этот магазин распространяет электронные книги исключительно без DRM"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:138
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:138
|
||||||
msgid ""
|
msgid ""
|
||||||
"This store distributes ebooks with DRM. It may have some titles without DRM, "
|
"This store distributes ebooks with DRM. It may have some titles without DRM, "
|
||||||
"but you will need to check on a per title basis."
|
"but you will need to check on a per title basis."
|
||||||
msgstr ""
|
msgstr "Этот магазин распространяет электронные книги с DRM. Возможно, некоторые издания"
|
||||||
|
" доступны без DRM, но для этого надо проверять каждую книгу в отдельности."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:140
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:140
|
||||||
#, python-format
|
#, python-format
|
||||||
@ -14225,46 +14228,46 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:211
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:211
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Buying from this store supports the calibre developer: %s."
|
msgid "Buying from this store supports the calibre developer: %s."
|
||||||
msgstr ""
|
msgstr "Покупая в этом магазине Вы поддерживаете проект calibre и разработчика: %s."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:145
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:145
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "This store distributes ebooks in the following formats: %s"
|
msgid "This store distributes ebooks in the following formats: %s"
|
||||||
msgstr ""
|
msgstr "Магазин распространяет эл. книги в следующих фотрматах"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/results_view.py:47
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/results_view.py:47
|
||||||
msgid "Configure..."
|
msgid "Configure..."
|
||||||
msgstr ""
|
msgstr "Настроить..."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:99
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:99
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:99
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:99
|
||||||
msgid "Time"
|
msgid "Time"
|
||||||
msgstr ""
|
msgstr "Время"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:100
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:100
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:100
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:100
|
||||||
msgid "Number of seconds to wait for a store to respond"
|
msgid "Number of seconds to wait for a store to respond"
|
||||||
msgstr ""
|
msgstr "Время ожидания ответа магазина (в секундах)"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:101
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:101
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:101
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:101
|
||||||
msgid "Number of seconds to let a store process results"
|
msgid "Number of seconds to let a store process results"
|
||||||
msgstr ""
|
msgstr "Допустипое время обработки результата магазином (в секундах)"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:102
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:102
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:102
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:102
|
||||||
msgid "Display"
|
msgid "Display"
|
||||||
msgstr ""
|
msgstr "Показать"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:103
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:103
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:103
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:103
|
||||||
msgid "Maximum number of results to show per store"
|
msgid "Maximum number of results to show per store"
|
||||||
msgstr ""
|
msgstr "Максимальное количество результатов для показа (по каждому магазину)"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:104
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:104
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:104
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:104
|
||||||
msgid "Open search result in system browser"
|
msgid "Open search result in system browser"
|
||||||
msgstr ""
|
msgstr "Показывать результаты поиска в системном интернет броузере"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:105
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:105
|
||||||
msgid "Threads"
|
msgid "Threads"
|
||||||
@ -14288,11 +14291,11 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:105
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:105
|
||||||
msgid "Performance"
|
msgid "Performance"
|
||||||
msgstr ""
|
msgstr "Производительность"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:106
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:106
|
||||||
msgid "Number of simultaneous searches"
|
msgid "Number of simultaneous searches"
|
||||||
msgstr ""
|
msgstr "Количество одновременно выполняемых поисков"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:107
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:107
|
||||||
msgid "Number of simultaneous cache updates"
|
msgid "Number of simultaneous cache updates"
|
||||||
@ -14308,13 +14311,13 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:62
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:62
|
||||||
msgid "Search:"
|
msgid "Search:"
|
||||||
msgstr ""
|
msgstr "Поиск:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:63
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:63
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:142
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:142
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/store_dialog_ui.py:77
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/store_dialog_ui.py:77
|
||||||
msgid "Books:"
|
msgid "Books:"
|
||||||
msgstr ""
|
msgstr "Книги:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:65
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:65
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:144
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:144
|
||||||
@ -14323,20 +14326,20 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:63
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:63
|
||||||
#: /usr/src/qt-everywhere-opensource-src-4.7.2/src/gui/widgets/qdialogbuttonbox.cpp:661
|
#: /usr/src/qt-everywhere-opensource-src-4.7.2/src/gui/widgets/qdialogbuttonbox.cpp:661
|
||||||
msgid "Close"
|
msgid "Close"
|
||||||
msgstr ""
|
msgstr "Закрыть"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:212
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:212
|
||||||
msgid "&Price:"
|
msgid "&Price:"
|
||||||
msgstr ""
|
msgstr "&Цена:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:219
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:219
|
||||||
msgid "Download:"
|
msgid "Download:"
|
||||||
msgstr ""
|
msgstr "Скачать"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:222
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:222
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/adv_search_builder_ui.py:187
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/adv_search_builder_ui.py:187
|
||||||
msgid "Titl&e/Author/Price ..."
|
msgid "Titl&e/Author/Price ..."
|
||||||
msgstr ""
|
msgstr "Названи&е/Автор/Цена ..."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
||||||
msgid "DRM"
|
msgid "DRM"
|
||||||
@ -14344,11 +14347,11 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
||||||
msgid "Download"
|
msgid "Download"
|
||||||
msgstr ""
|
msgstr "Скачать"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
|
||||||
msgid "Price"
|
msgid "Price"
|
||||||
msgstr ""
|
msgstr "Цена"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:196
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:196
|
||||||
#, python-format
|
#, python-format
|
||||||
@ -14383,90 +14386,90 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:208
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:208
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The following formats can be downloaded directly: %s."
|
msgid "The following formats can be downloaded directly: %s."
|
||||||
msgstr ""
|
msgstr "Форматы доступные для непосредственного скачивания: %s."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/results_view.py:41
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/results_view.py:41
|
||||||
msgid "Download..."
|
msgid "Download..."
|
||||||
msgstr ""
|
msgstr "Скачать..."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/results_view.py:45
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/results_view.py:45
|
||||||
msgid "Goto in store..."
|
msgid "Goto in store..."
|
||||||
msgstr ""
|
msgstr "Перейти в магазин..."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:114
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:114
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Buying from this store supports the calibre developer: %s</p>"
|
msgid "Buying from this store supports the calibre developer: %s</p>"
|
||||||
msgstr ""
|
msgstr "Покупая в этом магазине Вы поддерживаете проект calibre и разработчика: %s</p>"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:276
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:276
|
||||||
msgid "Customize get books search"
|
msgid "Customize get books search"
|
||||||
msgstr ""
|
msgstr "Перенастроить под себя поиск книг для скачивания"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:286
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:286
|
||||||
msgid "Configure search"
|
msgid "Configure search"
|
||||||
msgstr ""
|
msgstr "Настроить поиск"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:336
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:336
|
||||||
msgid "Couldn't find any books matching your query."
|
msgid "Couldn't find any books matching your query."
|
||||||
msgstr "Ну удалось найти ни одной кники, соотвествующей вашему запросу."
|
msgstr "Не удалось найти ни одной книги, соотвествующей вашему запросу."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:350
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:350
|
||||||
msgid "Choose format to download to your library."
|
msgid "Choose format to download to your library."
|
||||||
msgstr ""
|
msgstr "Выберите формат для скачивания в библиотеку"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:131
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:131
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:107
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:107
|
||||||
msgid "Get Books"
|
msgid "Get Books"
|
||||||
msgstr ""
|
msgstr "Скачать книги"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:140
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:140
|
||||||
msgid "Open a selected book in the system's web browser"
|
msgid "Open a selected book in the system's web browser"
|
||||||
msgstr ""
|
msgstr "Показать выбранную книгу в системном интернет броузере"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:141
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:141
|
||||||
msgid "Open in &external browser"
|
msgid "Open in &external browser"
|
||||||
msgstr ""
|
msgstr "Показывать в системном интернет броузере"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/ebooks_com_plugin.py:96
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/ebooks_com_plugin.py:96
|
||||||
msgid "Not Available"
|
msgid "Not Available"
|
||||||
msgstr ""
|
msgstr "Недоступно"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/adv_search_builder_ui.py:179
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/adv_search_builder_ui.py:179
|
||||||
msgid ""
|
msgid ""
|
||||||
"See the <a href=\"http://calibre-ebook.com/user_manual/gui.html#the-search-"
|
"See the <a href=\"http://calibre-ebook.com/user_manual/gui.html#the-search-"
|
||||||
"interface\">User Manual</a> for more help"
|
"interface\">User Manual</a> for more help"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Смотри <a href=\"http://calibre-ebook.com/user_manual/gui.html#the-search-"
|
"Смотрите <a href=\"http://calibre-ebook.com/user_manual/gui.html#the-search-"
|
||||||
"interface\">Пользовательский мануал</a> для помощи"
|
"interface\">Руководство пользователя</a> для помощи"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_progress_dialog_ui.py:51
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_progress_dialog_ui.py:51
|
||||||
msgid "Updating book cache"
|
msgid "Updating book cache"
|
||||||
msgstr ""
|
msgstr "Обноволяется кэш книг"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:42
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:42
|
||||||
msgid "Checking last download date."
|
msgid "Checking last download date."
|
||||||
msgstr ""
|
msgstr "Проверяется врема последнего скачивания"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:48
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:48
|
||||||
msgid "Downloading book list from MobileRead."
|
msgid "Downloading book list from MobileRead."
|
||||||
msgstr ""
|
msgstr "Загружается список книг с MobileRead."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:61
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:61
|
||||||
msgid "Processing books."
|
msgid "Processing books."
|
||||||
msgstr ""
|
msgstr "Книги обрабатываются"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:71
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:71
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "%(num)s of %(tot)s books processed."
|
msgid "%(num)s of %(tot)s books processed."
|
||||||
msgstr ""
|
msgstr "обработано %(num)s из %(tot)."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/mobileread_plugin.py:62
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/mobileread_plugin.py:62
|
||||||
msgid "Updating MobileRead book cache..."
|
msgid "Updating MobileRead book cache..."
|
||||||
msgstr ""
|
msgstr "Обноволяется кэщ MobileRead книг..."
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/store_dialog_ui.py:74
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/store_dialog_ui.py:74
|
||||||
msgid "&Query:"
|
msgid "&Query:"
|
||||||
msgstr ""
|
msgstr "&Запрос:"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_control.py:73
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_control.py:73
|
||||||
msgid ""
|
msgid ""
|
||||||
@ -14480,15 +14483,15 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_control.py:86
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_control.py:86
|
||||||
msgid "File is not a supported ebook type. Save to disk?"
|
msgid "File is not a supported ebook type. Save to disk?"
|
||||||
msgstr ""
|
msgstr "Файл содержит неподдерживаемый формат эл. книги. Сохранить на диске?"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:59
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:59
|
||||||
msgid "Home"
|
msgid "Home"
|
||||||
msgstr ""
|
msgstr "Главная страница"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:60
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:60
|
||||||
msgid "Reload"
|
msgid "Reload"
|
||||||
msgstr ""
|
msgstr "Перегрузить"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:61
|
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:61
|
||||||
msgid "%p%"
|
msgid "%p%"
|
||||||
@ -14502,22 +14505,24 @@ msgstr ""
|
|||||||
msgid ""
|
msgid ""
|
||||||
"Changing the authors for several books can take a while. Are you sure?"
|
"Changing the authors for several books can take a while. Are you sure?"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
"Изменить автора нескольких книг займёт некоторое время. Вы согласны"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:729
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:729
|
||||||
msgid ""
|
msgid ""
|
||||||
"Changing the metadata for that many books can take a while. Are you sure?"
|
"Changing the metadata for that many books can take a while. Are you sure?"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
"Изменить мета-данные нескольких книг займёт некоторое время. Вы согласны"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:816
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:816
|
||||||
#: /home/kovid/work/calibre/src/calibre/library/database2.py:449
|
#: /home/kovid/work/calibre/src/calibre/library/database2.py:449
|
||||||
msgid "Searches"
|
msgid "Searches"
|
||||||
msgstr ""
|
msgstr "Поиски"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:881
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:881
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:901
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:901
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:910
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:910
|
||||||
msgid "Rename user category"
|
msgid "Rename user category"
|
||||||
msgstr ""
|
msgstr "Переименовать пользовательскую категорию"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:882
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:882
|
||||||
msgid "You cannot use periods in the name when renaming user categories"
|
msgid "You cannot use periods in the name when renaming user categories"
|
||||||
@ -14540,30 +14545,30 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:48
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:48
|
||||||
msgid "Manage Authors"
|
msgid "Manage Authors"
|
||||||
msgstr ""
|
msgstr "Упорядочнить авторов"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:50
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:50
|
||||||
msgid "Manage Series"
|
msgid "Manage Series"
|
||||||
msgstr ""
|
msgstr "Упорядочнить серии"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:52
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:52
|
||||||
msgid "Manage Publishers"
|
msgid "Manage Publishers"
|
||||||
msgstr ""
|
msgstr "Упорядочнить издателей"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:54
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:54
|
||||||
msgid "Manage Tags"
|
msgid "Manage Tags"
|
||||||
msgstr ""
|
msgstr "Упорядочнить тэги"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:56
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:56
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:465
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:465
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:469
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:469
|
||||||
msgid "Manage User Categories"
|
msgid "Manage User Categories"
|
||||||
msgstr "Управление пользовательскими категориями"
|
msgstr "Упорядочнить пользовательские категории"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:58
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:58
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:457
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:457
|
||||||
msgid "Manage Saved Searches"
|
msgid "Manage Saved Searches"
|
||||||
msgstr "Управление сохраненными поисками"
|
msgstr "Упорядочнить сохраненные поиски"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:66
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:66
|
||||||
msgid "Invalid search restriction"
|
msgid "Invalid search restriction"
|
||||||
@ -14580,17 +14585,17 @@ msgstr "Новая категория"
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:134
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:134
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:137
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:137
|
||||||
msgid "Delete user category"
|
msgid "Delete user category"
|
||||||
msgstr ""
|
msgstr "Удалить пользовательскую категорию"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:135
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:135
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "%s is not a user category"
|
msgid "%s is not a user category"
|
||||||
msgstr ""
|
msgstr "%s не является пользовательской категорией"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:138
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:138
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "%s contains items. Do you really want to delete it?"
|
msgid "%s contains items. Do you really want to delete it?"
|
||||||
msgstr ""
|
msgstr "%s содержит элементы. Вы действительно хотете её удалить?"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:159
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:159
|
||||||
msgid "Remove category"
|
msgid "Remove category"
|
||||||
@ -14599,16 +14604,16 @@ msgstr "Удалить категорию"
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:160
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:160
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "User category %s does not exist"
|
msgid "User category %s does not exist"
|
||||||
msgstr ""
|
msgstr "Пользовательская категория %s не существует"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:179
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:179
|
||||||
msgid "Add to user category"
|
msgid "Add to user category"
|
||||||
msgstr ""
|
msgstr "Добавить в пользовательские категории"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:180
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:180
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "A user category %s does not exist"
|
msgid "A user category %s does not exist"
|
||||||
msgstr ""
|
msgstr "Пользовательская категория %s не существует"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:305
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:305
|
||||||
msgid "Find item in tag browser"
|
msgid "Find item in tag browser"
|
||||||
@ -14701,7 +14706,7 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:359
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:359
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Add %s to user category"
|
msgid "Add %s to user category"
|
||||||
msgstr ""
|
msgstr "Добавить %s в пользовательские категории"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:372
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:372
|
||||||
#, python-format
|
#, python-format
|
||||||
@ -14711,7 +14716,7 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:382
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:382
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Delete search %s"
|
msgid "Delete search %s"
|
||||||
msgstr ""
|
msgstr "Удалить поиск %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:387
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:387
|
||||||
#, python-format
|
#, python-format
|
||||||
@ -14721,27 +14726,27 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:394
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:394
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Search for %s"
|
msgid "Search for %s"
|
||||||
msgstr ""
|
msgstr "Искать %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:399
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:399
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Search for everything but %s"
|
msgid "Search for everything but %s"
|
||||||
msgstr ""
|
msgstr "Искать всё кроме %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:411
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:411
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Add sub-category to %s"
|
msgid "Add sub-category to %s"
|
||||||
msgstr ""
|
msgstr "Добавить подкатегорию в %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:415
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:415
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Delete user category %s"
|
msgid "Delete user category %s"
|
||||||
msgstr ""
|
msgstr "Удалить пользовательскую категорию %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:420
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:420
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Hide category %s"
|
msgid "Hide category %s"
|
||||||
msgstr ""
|
msgstr "Скрыть категорию %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:424
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:424
|
||||||
msgid "Show category"
|
msgid "Show category"
|
||||||
@ -14750,12 +14755,12 @@ msgstr "Показать категорию"
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:434
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:434
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Search for books in category %s"
|
msgid "Search for books in category %s"
|
||||||
msgstr ""
|
msgstr "Искать книги в категории %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:440
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:440
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Search for books not in category %s"
|
msgid "Search for books not in category %s"
|
||||||
msgstr ""
|
msgstr "Искать книги НЕ в категории %s"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:449
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:449
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:454
|
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:454
|
||||||
@ -14837,7 +14842,7 @@ msgstr "Извлечь подключенное устройство"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:347
|
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:347
|
||||||
msgid "Debug mode"
|
msgid "Debug mode"
|
||||||
msgstr ""
|
msgstr "Резим отладки"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:348
|
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:348
|
||||||
#, python-format
|
#, python-format
|
||||||
@ -14875,7 +14880,7 @@ msgstr ""
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:630
|
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:630
|
||||||
msgid "Active jobs"
|
msgid "Active jobs"
|
||||||
msgstr ""
|
msgstr "Активные задания"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:698
|
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:698
|
||||||
msgid ""
|
msgid ""
|
||||||
@ -14898,11 +14903,11 @@ msgstr "Доступно обновление!"
|
|||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:84
|
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:84
|
||||||
msgid "Show this notification for future updates"
|
msgid "Show this notification for future updates"
|
||||||
msgstr ""
|
msgstr "Показвать сообщение о доступности новой версии (обнивления)"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:89
|
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:89
|
||||||
msgid "&Get update"
|
msgid "&Get update"
|
||||||
msgstr ""
|
msgstr "&Скачать обнивление"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:93
|
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:93
|
||||||
msgid "Update &plugins"
|
msgid "Update &plugins"
|
||||||
@ -14929,11 +14934,11 @@ msgstr ""
|
|||||||
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:187
|
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:187
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "There are %d plugin updates available"
|
msgid "There are %d plugin updates available"
|
||||||
msgstr ""
|
msgstr "Доступны обновления для %d плагинов"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:191
|
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:191
|
||||||
msgid "Install and configure user plugins"
|
msgid "Install and configure user plugins"
|
||||||
msgstr ""
|
msgstr "Установка и настройка пользовательских плагинов"
|
||||||
|
|
||||||
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/bookmarkmanager.py:43
|
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/bookmarkmanager.py:43
|
||||||
msgid "Edit bookmark"
|
msgid "Edit bookmark"
|
||||||
|
@ -141,7 +141,8 @@ class BaseJob(object):
|
|||||||
def log_file(self):
|
def log_file(self):
|
||||||
if self.log_path:
|
if self.log_path:
|
||||||
return open(self.log_path, 'rb')
|
return open(self.log_path, 'rb')
|
||||||
return cStringIO.StringIO(_('No details available.'))
|
return cStringIO.StringIO(_('No details available.').encode('utf-8',
|
||||||
|
'replace'))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def details(self):
|
def details(self):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user