Merge from trunk

This commit is contained in:
Charles Haley 2011-07-29 17:15:34 +01:00
commit 3bd07a26d9
41 changed files with 1264 additions and 584 deletions

View File

@ -0,0 +1,53 @@
# -*- coding: utf-8 -*-
from calibre.web.feeds.news import BasicNewsRecipe
class Aksiyon (BasicNewsRecipe):
title = u'Aksiyon Dergisi'
__author__ = u'thomass'
description = 'Haftalık haber dergisi '
oldest_article =13
max_articles_per_feed =100
no_stylesheets = True
#delay = 1
#use_embedded_content = False
encoding = 'utf-8'
publisher = 'Aksiyon'
category = 'news, haberler,TR,gazete'
language = 'tr'
publication_type = 'magazine'
#extra_css = ' body{ font-family: Verdana,Helvetica,Arial,sans-serif } .introduction{font-weight: bold} .story-feature{display: block; padding: 0; border: 1px solid; width: 40%; font-size: small} .story-feature h2{text-align: center; text-transform: uppercase} '
#keep_only_tags = [dict(name='font', attrs={'class':['newsDetail','agenda2NewsSpot']}),dict(name='span', attrs={'class':['agenda2Title']}),dict(name='div', attrs={'id':['gallery']})]
remove_tags = [dict(name='img', attrs={'src':[ 'http://medya.aksiyon.com.tr/aksiyon/images/logo/logo.bmp','/aksiyon/images/template/green/baslik0.gif','mobile/home.jpg']}) ]
cover_img_url = 'http://www.aksiyon.com.tr/aksiyon/images/aksiyon/top-page/aksiyon_top_r2_c1.jpg'
masthead_url = 'http://aksiyon.com.tr/aksiyon/images/aksiyon/top-page/aksiyon_top_r2_c1.jpg'
remove_empty_feeds= True
remove_attributes = ['width','height']
feeds = [
( u'ANASAYFA', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=0'),
( u'KARAKUTU', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=11'),
( u'EKONOMİ', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=35'),
( u'EKOANALİZ', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=284'),
( u'YAZARLAR', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=17'),
( u'KİTAPLIK', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=13'),
( u'SİNEMA', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=14'),
( u'ARKA PENCERE', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=27'),
( u'DÜNYA', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=32'),
( u'DOSYALAR', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=34'),
( u'KÜLTÜR & SANAT', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=12'),
( u'KAPAK', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=26'),
( u'SPOR', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=38'),
( u'BİLİŞİM - TEKNOLOJİ', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=39'),
( u'3. BOYUT', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=172'),
( u'HAYAT BİLGİSİ', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=283'),
( u'İŞ DÜNYASI', u'http://www.aksiyon.com.tr/aksiyon/rss?sectionId=283'),
]
def print_version(self, url):
return url.replace('http://www.aksiyon.com.tr/aksiyon/newsDetail_getNewsById.action?load=detay&', 'http://www.aksiyon.com.tr/aksiyon/mobile_detailn.action?')

View File

@ -0,0 +1,17 @@
__copyright__ = '2011, Pablo Aldama <pabloaldama at gmail.com>'
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1311839910(BasicNewsRecipe):
title = u'Caros Amigos'
oldest_article = 20
max_articles_per_feed = 100
language = 'pt_BR'
__author__ = 'Pablo Aldama'
feeds = [(u'Caros Amigos', u'http://carosamigos.terra.com.br/index/index.php?format=feed&type=rss')]
keep_only_tags = [dict(name='div', attrs={'class':['blog']})
,dict(name='div', attrs={'class':['blogcontent']})
]
remove_tags = [dict(name='div', attrs={'class':'addtoany'})]

View File

@ -0,0 +1,40 @@
import re
from lxml.html import parse
from calibre.web.feeds.news import BasicNewsRecipe
class Counterpunch(BasicNewsRecipe):
'''
Parses counterpunch.com for articles
'''
title = 'Counterpunch'
description = 'Daily political opinion from www.Counterpunch.com'
language = 'en'
__author__ = 'O. Emmerson'
keep_only_tags = [dict(name='td', attrs={'width': '522'})]
max_articles_per_feed = 10
def parse_index(self):
feeds = []
title, url = 'Counterpunch', 'http://www.counterpunch.com'
articles = self.parse_page(url)
if articles:
feeds.append((title, articles))
return feeds
def parse_page(self, url):
parsed_page = parse(url).getroot()
articles = []
unwanted_text = re.compile('Website\ of\ the|I\ urge\ you|Subscribe\ now|DONATE|\@asis\.com|donation\ button|click\ over\ to\ our')
parsed_articles = [a for a in parsed_page.cssselect("html>body>table tr>td>p[class='style2']") if not unwanted_text.search(a.text_content())]
for art in parsed_articles:
try:
author = art.text
title = art.cssselect("a")[0].text + ' by {0}'.format(author)
art_url = 'http://www.counterpunch.com/' + art.cssselect("a")[0].attrib['href']
articles.append({'title': title, 'url': art_url})
except Exception as e:
e
#print('Handler Error: ', e, 'title :', a.text_content())
pass
return articles

98
recipes/dnevnik_mk.recipe Normal file
View File

@ -0,0 +1,98 @@
#!/usr/bin/env python
__author__ = 'Darko Spasovski'
__license__ = 'GPL v3'
__copyright__ = '2011, Darko Spasovski <darko.spasovski at gmail.com>'
'''
dnevnik.com.mk
'''
import re
import datetime
from calibre.web.feeds.news import BasicNewsRecipe
from calibre import browser
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class Dnevnik(BasicNewsRecipe):
INDEX = 'http://www.dnevnik.com.mk'
__author__ = 'Darko Spasovski'
title = 'Dnevnik - mk'
description = 'Daily Macedonian newspaper'
masthead_url = 'http://www.dnevnik.com.mk/images/re-logo.gif'
language = 'mk'
publication_type = 'newspaper'
category = 'news, Macedonia'
max_articles_per_feed = 100
remove_javascript = True
no_stylesheets = True
use_embedded_content = False
preprocess_regexps = [(re.compile(i[0], re.IGNORECASE | re.DOTALL), i[1]) for i in
[
## Remove anything before the start of the article.
(r'<body.*?<\?xml version=\"1.0\"\?><!--Article start-->', lambda match: '<body>'),
## Remove anything after the end of the article.
(r'<!--Article end.*?</body>', lambda match : '</body>'),
]
]
extra_css = """
body{font-family: Arial,Helvetica,sans-serif}
.WB_DNEVNIK_Naslov{FONT-WEIGHT: bold; FONT-SIZE: 18px; FONT-FAMILY: Arial, Verdana, Tahoma; TEXT-DECORATION: none}
"""
conversion_options = {
'comment' : description,
'tags' : category,
'language' : language,
'linearize_tables' : True
}
def parse_index(self):
datum = datetime.datetime.today().strftime('%d.%m.%Y')
soup = self.index_to_soup(self.INDEX + '/default.asp?section=arhiva&arhDatum=' + datum)
feeds = []
for section in soup.findAll('td', attrs={'class':'WB_DNEVNIK_ArhivaFormTitle'}):
sectionTitle = section.contents[0].string
if sectionTitle.lower().startswith('online'):
# Skip online articles
continue
containerTable = section.findPrevious(name='table').findNextSibling(name='table')
if containerTable==None:
print 'No container table found - page layout may have been changed.'
continue
articles = []
for article in containerTable.findAll('a', attrs={'class': 'WB_DNEVNIK_ArhivaFormText'}):
title = self.tag_to_string(article, use_alt=True).strip()
articles.append({'title': title, 'url':'http://www.dnevnik.com.mk/' + article['href'], 'description':'', 'date':''})
if articles:
feeds.append((sectionTitle, articles))
return sorted(feeds, key=lambda section: self.get_weight(section))
def get_weight(self, section):
"""
Returns 'weight' of a section.
Used for sorting the sections based on their 'natural' order in the printed edition.
"""
natural_order = { u'во фокусот': 1, u'актуелно': 2, u'економија': 3,
u'отворена': 4, u'свет': 5, u'интервју': 6, u'џубокс': 7,
u'репортажа': 8, u'наш туризам': 9, u'живот': 10,
u'автомобилизам': 11, u'спорт': 12, u'омнибус': 13 }
if section[0].string.lower() in natural_order:
return natural_order[section[0].string.lower()]
else:
return 999 # section names not on the list go to the bottom
def get_cover_url(self):
datum = datetime.datetime.today().strftime('%d.%m.%Y')
soup = self.index_to_soup(self.INDEX + '/default.asp?section=arhiva&arhDatum=' + datum)
anchor = soup.find('a', attrs={'class': 'WB_DNEVNIK_MoreLink'})
if anchor != None:
raw = browser().open_novisit(self.INDEX + '/' + anchor['href']).read()
cover_soup = BeautifulSoup(raw)
url = cover_soup.find('div', attrs={'class':'WB_DNEVNIK_Datum2'}).findNext('img')['src']
return self.INDEX + '/' + url
return ''

View File

@ -6,10 +6,10 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
economist.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.BeautifulSoup import Tag, NavigableString
from collections import OrderedDict
import string, time, re
import time, re
class Economist(BasicNewsRecipe):
@ -22,7 +22,8 @@ class Economist(BasicNewsRecipe):
' perspective. Best downloaded on Friday mornings (GMT)')
extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }'
oldest_article = 7.0
cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
cover_url = 'http://media.economist.com/sites/default/files/imagecache/print-cover-thumbnail/print-covers/currentcoverus_large.jpg'
#cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
dict(attrs={'class':['dblClkTrk', 'ec-article-info', 'share_inline_header']}),
@ -67,52 +68,54 @@ class Economist(BasicNewsRecipe):
return self.economist_parse_index()
def economist_parse_index(self):
soup = BeautifulSoup(self.browser.open(self.INDEX).read(),
convertEntities=BeautifulSoup.HTML_ENTITIES)
index_started = False
feeds = {}
ans = []
key = None
for tag in soup.findAll(['h1', 'h2']):
text = ''.join(tag.findAll(text=True))
if tag.name in ('h1', 'h2') and 'Classified ads' in text:
break
if tag.name == 'h1':
if 'The world this week' in text or 'The world this year' in text:
index_started = True
if not index_started:
soup = self.index_to_soup(self.INDEX)
feeds = OrderedDict()
for section in soup.findAll(attrs={'class':lambda x: x and 'section' in
x}):
h4 = section.find('h4')
if h4 is None:
continue
text = string.capwords(text)
if text not in feeds.keys():
feeds[text] = []
if text not in ans:
ans.append(text)
key = text
section_title = self.tag_to_string(h4).strip()
if not section_title:
continue
if key is None:
self.log('Found section: %s'%section_title)
articles = []
for h5 in section.findAll('h5'):
article_title = self.tag_to_string(h5).strip()
if not article_title:
continue
a = tag.find('a', href=True)
data = h5.findNextSibling(attrs={'class':'article'})
if data is None: continue
a = data.find('a', href=True)
if a is None: continue
url = a['href']
if url.startswith('/'): url = 'http://www.economist.com'+url
url += '/print'
article_title += ': %s'%self.tag_to_string(a).strip()
articles.append({'title':article_title, 'url':url,
'description':'', 'date':''})
if not articles:
# We have last or first section
for art in section.findAll(attrs={'class':'article'}):
a = art.find('a', href=True)
if a is not None:
url = a['href']
id_ = re.search(r'story_id=(\d+)', url).group(1)
url = 'http://www.economist.com/node/%s/print'%id_
if url.startswith('Printer'):
url = '/'+url
if url.startswith('/'):
url = 'http://www.economist.com' + url
try:
subtitle = tag.previousSibling.contents[0].contents[0]
text = subtitle + ': ' + text
except:
pass
article = dict(title=text,
url = url,
description='', content='', date='')
feeds[key].append(article)
if url.startswith('/'): url = 'http://www.economist.com'+url
url += '/print'
title = self.tag_to_string(a)
if title:
articles.append({'title':title, 'url':url,
'description':'', 'date':''})
ans = [(key, feeds[key]) for key in ans if feeds.has_key(key)]
if articles:
feeds[section_title] = articles
ans = [(key, val) for key, val in feeds.iteritems()]
if not ans:
raise Exception('Could not find any articles. Has your subscription expired?')
raise Exception('Could not find any articles, either the '
'economist.com server is having trouble and you should '
'try later or the website format has changed and the '
'recipe needs to be updated.')
return ans
def eco_find_image_tables(self, soup):

View File

@ -16,7 +16,8 @@ class Economist(BasicNewsRecipe):
' Much slower than the print edition based version.')
extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }'
oldest_article = 7.0
cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
cover_url = 'http://media.economist.com/sites/default/files/imagecache/print-cover-thumbnail/print-covers/currentcoverus_large.jpg'
#cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
dict(attrs={'class':['dblClkTrk', 'ec-article-info',

Binary file not shown.

After

Width:  |  Height:  |  Size: 894 B

View File

@ -18,6 +18,7 @@ class IrishTimes(BasicNewsRecipe):
oldest_article = 1.0
max_articles_per_feed = 100
no_stylesheets = True
simultaneous_downloads= 5
r = re.compile('.*(?P<url>http:\/\/(www.irishtimes.com)|(rss.feedsportal.com\/c)\/.*\.html?).*')
remove_tags = [dict(name='div', attrs={'class':'footer'})]
@ -25,17 +26,17 @@ class IrishTimes(BasicNewsRecipe):
feeds = [
('Frontpage', 'http://www.irishtimes.com/feeds/rss/newspaper/index.rss'),
('Ireland', 'http://rss.feedsportal.com/c/851/f/10845/index.rss'),
('World', 'http://rss.feedsportal.com/c/851/f/10846/index.rss'),
('Finance', 'http://rss.feedsportal.com/c/851/f/10847/index.rss'),
('Features', 'http://rss.feedsportal.com/c/851/f/10848/index.rss'),
('Sport', 'http://rss.feedsportal.com/c/851/f/10849/index.rss'),
('Opinion', 'http://rss.feedsportal.com/c/851/f/10850/index.rss'),
('Letters', 'http://rss.feedsportal.com/c/851/f/10851/index.rss'),
('Ireland', 'http://www.irishtimes.com/feeds/rss/newspaper/ireland.rss'),
('World', 'http://www.irishtimes.com/feeds/rss/newspaper/world.rss'),
('Finance', 'http://www.irishtimes.com/feeds/rss/newspaper/finance.rss'),
('Features', 'http://www.irishtimes.com/feeds/rss/newspaper/features.rss'),
('Sport', 'http://www.irishtimes.com/feeds/rss/newspaper/sport.rss'),
('Opinion', 'http://www.irishtimes.com/feeds/rss/newspaper/opinion.rss'),
('Letters', 'http://www.irishtimes.com/feeds/rss/newspaper/letters.rss'),
('Magazine', 'http://www.irishtimes.com/feeds/rss/newspaper/magazine.rss'),
('Health', 'http://rss.feedsportal.com/c/851/f/10852/index.rss'),
('Education & Parenting', 'http://rss.feedsportal.com/c/851/f/10853/index.rss'),
('Motors', 'http://rss.feedsportal.com/c/851/f/10854/index.rss'),
('Health', 'http://www.irishtimes.com/feeds/rss/newspaper/health.rss'),
('Education & Parenting', 'http://www.irishtimes.com/feeds/rss/newspaper/education.rss'),
('Motors', 'http://www.irishtimes.com/feeds/rss/newspaper/motors.rss'),
('An Teanga Bheo', 'http://www.irishtimes.com/feeds/rss/newspaper/anteangabheo.rss'),
('Commercial Property', 'http://www.irishtimes.com/feeds/rss/newspaper/commercialproperty.rss'),
('Science Today', 'http://www.irishtimes.com/feeds/rss/newspaper/sciencetoday.rss'),
@ -49,10 +50,16 @@ class IrishTimes(BasicNewsRecipe):
def print_version(self, url):
if url.count('rss.feedsportal.com'):
u = url.replace('0Bhtml/story01.htm','_pf0Bhtml/story01.htm')
#u = url.replace('0Bhtml/story01.htm','_pf0Bhtml/story01.htm')
u = url.find('irishtimes')
u = 'http://www.irishtimes.com' + url[u + 12:]
u = u.replace('0C', '/')
u = u.replace('A', '')
u = u.replace('0Bhtml/story01.htm', '_pf.html')
else:
u = url.replace('.html','_pf.html')
return u
def get_article_url(self, article):
return article.link

View File

@ -0,0 +1,52 @@
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = 'Marcin Urban 2011'
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
class recipeMagic(BasicNewsRecipe):
title = 'National Geographic PL'
__author__ = 'Marcin Urban 2011'
description = 'legenda wśród magazynów z historią sięgającą 120 lat'
cover_url = 'http://www.guj.pl/var/guj/storage/images/media/nasze_magazyny/national_geographic/logo/ng_logo/2606-1-pol-PL/ng_logo.jpg'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
#delay = 1
use_embedded_content = False
encoding = 'utf8'
publisher = 'G+J Gruner+Jahr Polska'
category = 'news, PL,'
language = 'pl'
publication_type = 'newsportal'
extra_css = ''' body {font-family: verdana, arial, helvetica, geneva, sans-serif ;}
h1{text-align: center;}
h2{font-size: medium; font-weight: bold;}
.authordate {font-size: small; color: #696969;}
p.lead {font-weight: bold; text-align: center;}
.fot{font-size: x-small; color: #666666;} '''
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
}
remove_tags = [
dict(name='div', attrs={'class':'add_inf'}),
dict(name='div', attrs={'class':'add_f'}),
]
remove_attributes = ['width','height']
feeds = [
('National Geographic PL', 'http://www.national-geographic.pl/rss/'),
]
def print_version(self, url):
return url.replace('artykuly0Cpokaz', 'drukuj-artykul')

47
recipes/plus_info.recipe Normal file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env python
__author__ = 'Darko Spasovski'
__license__ = 'GPL v3'
__copyright__ = '2011, Darko Spasovski <darko.spasovski at gmail.com>'
'''
www.plusinfo.mk
'''
from calibre.web.feeds.news import BasicNewsRecipe
class PlusInfo(BasicNewsRecipe):
INDEX = 'www.plusinfo.mk'
title = u'+info'
__author__ = 'Darko Spasovski'
description = 'Macedonian news portal'
publication_type = 'newsportal'
category = 'news, Macedonia'
language = 'mk'
masthead_url = 'http://www.plusinfo.mk/style/images/logo.jpg'
remove_javascript = True
no_stylesheets = True
use_embedded_content = False
remove_empty_feeds = True
oldest_article = 1
max_articles_per_feed = 100
keep_only_tags = [dict(name='div', attrs={'class': 'vest'})]
remove_tags = [dict(name='div', attrs={'class':['komentari_holder', 'objava']})]
feeds = [(u'Македонија', u'http://www.plusinfo.mk/rss/makedonija'),
(u'Бизнис', u'http://www.plusinfo.mk/rss/biznis'),
(u'Скопје', u'http://www.plusinfo.mk/rss/skopje'),
(u'Култура', u'http://www.plusinfo.mk/rss/kultura'),
(u'Свет', u'http://www.plusinfo.mk/rss/svet'),
(u'Сцена', u'http://www.plusinfo.mk/rss/scena'),
(u'Здравје', u'http://www.plusinfo.mk/rss/zdravje'),
(u'Магазин', u'http://www.plusinfo.mk/rss/magazin'),
(u'Спорт', u'http://www.plusinfo.mk/rss/sport')]
# uncomment the following block if you want the print version (note: it lacks photos)
# def print_version(self,url):
# segments = url.split('/')
# printURL = '/'.join(segments[0:3]) + '/print/' + '/'.join(segments[5:])
# return printURL

View File

@ -31,7 +31,7 @@ defaults.
# Set the use_series_auto_increment_tweak_when_importing tweak to True to
# use the above values when importing/adding books. If this tweak is set to
# False (the default) then the series number will be set to 1 if it is not
# explicitly set to something else during the import. If set to True, then the
# explicitly set to during the import. If set to True, then the
# series index will be set according to the series_index_auto_increment setting.
# Note that the use_series_auto_increment_tweak_when_importing tweak is used
# only when a value is not provided during import. If the importing regular

View File

@ -373,7 +373,7 @@ class Win32Freeze(Command, WixMixIn):
src = self.j(self.src_root, 'setup', 'installer', 'windows',
'portable.c')
obj = self.j(self.obj_dir, self.b(src)+'.obj')
cflags = '/c /EHsc /MT /W3 /Ox /nologo /D_UNICODE'.split()
cflags = '/c /EHsc /MT /W3 /Ox /nologo /D_UNICODE /DUNICODE'.split()
if self.newer(obj, [src]):
self.info('Compiling', obj)
@ -386,6 +386,7 @@ class Win32Freeze(Command, WixMixIn):
cmd = [msvc.linker] + ['/INCREMENTAL:NO', '/MACHINE:X86',
'/LIBPATH:'+self.obj_dir, '/SUBSYSTEM:WINDOWS',
'/RELEASE',
'/ENTRY:wWinMainCRTStartup',
'/OUT:'+exe, self.embed_resources(exe),
obj, 'User32.lib']
self.run_builder(cmd)

View File

@ -2,15 +2,21 @@
#define UNICODE
#endif
#ifndef _UNICODE
#define _UNICODE
#endif
#include <windows.h>
#include <tchar.h>
#include <wchar.h>
#include <stdio.h>
#define BUFSIZE 4096
void show_error(LPCTSTR msg) {
MessageBeep(MB_ICONERROR);
MessageBox(NULL, msg, TEXT("Error"), MB_OK|MB_ICONERROR);
MessageBox(NULL, msg, _T("Error"), MB_OK|MB_ICONERROR);
}
void show_detailed_error(LPCTSTR preamble, LPCTSTR msg, int code) {
@ -20,7 +26,7 @@ void show_detailed_error(LPCTSTR preamble, LPCTSTR msg, int code) {
_sntprintf_s(buf,
LocalSize(buf) / sizeof(TCHAR), _TRUNCATE,
TEXT("%s\r\n %s (Error Code: %d)\r\n"),
_T("%s\r\n %s (Error Code: %d)\r\n"),
preamble, msg, code);
show_error(buf);
@ -32,7 +38,7 @@ void show_last_error_crt(LPCTSTR preamble) {
int err = 0;
_get_errno(&err);
_wcserror_s(buf, BUFSIZE, err);
_tcserror_s(buf, BUFSIZE, err);
show_detailed_error(preamble, buf, err);
}
@ -57,7 +63,7 @@ void show_last_error(LPCTSTR preamble) {
LPTSTR get_app_dir() {
LPTSTR buf, buf2, buf3;
DWORD sz;
TCHAR drive[4] = TEXT("\0\0\0");
TCHAR drive[4] = _T("\0\0\0");
errno_t err;
buf = (LPTSTR)calloc(BUFSIZE, sizeof(TCHAR));
@ -67,18 +73,18 @@ LPTSTR get_app_dir() {
sz = GetModuleFileName(NULL, buf, BUFSIZE);
if (sz == 0 || sz > BUFSIZE-1) {
show_error(TEXT("Failed to get path to calibre-portable.exe"));
show_error(_T("Failed to get path to calibre-portable.exe"));
ExitProcess(1);
}
err = _tsplitpath_s(buf, drive, 4, buf2, BUFSIZE, NULL, 0, NULL, 0);
if (err != 0) {
show_last_error_crt(TEXT("Failed to split path to calibre-portable.exe"));
show_last_error_crt(_T("Failed to split path to calibre-portable.exe"));
ExitProcess(1);
}
_sntprintf_s(buf3, BUFSIZE-1, _TRUNCATE, TEXT("%s%s"), drive, buf2);
_sntprintf_s(buf3, BUFSIZE-1, _TRUNCATE, _T("%s%s"), drive, buf2);
free(buf); free(buf2);
return buf3;
}
@ -90,18 +96,18 @@ void launch_calibre(LPCTSTR exe, LPCTSTR config_dir, LPCTSTR library_dir) {
BOOL fSuccess;
TCHAR cmdline[BUFSIZE];
if (! SetEnvironmentVariable(TEXT("CALIBRE_CONFIG_DIRECTORY"), config_dir)) {
show_last_error(TEXT("Failed to set environment variables"));
if (! SetEnvironmentVariable(_T("CALIBRE_CONFIG_DIRECTORY"), config_dir)) {
show_last_error(_T("Failed to set environment variables"));
ExitProcess(1);
}
if (! SetEnvironmentVariable(TEXT("CALIBRE_PORTABLE_BUILD"), exe)) {
show_last_error(TEXT("Failed to set environment variables"));
if (! SetEnvironmentVariable(_T("CALIBRE_PORTABLE_BUILD"), exe)) {
show_last_error(_T("Failed to set environment variables"));
ExitProcess(1);
}
dwFlags = CREATE_UNICODE_ENVIRONMENT | CREATE_NEW_PROCESS_GROUP;
_sntprintf_s(cmdline, BUFSIZE, _TRUNCATE, TEXT(" \"--with-library=%s\""), library_dir);
_sntprintf_s(cmdline, BUFSIZE, _TRUNCATE, _T(" \"--with-library=%s\""), library_dir);
ZeroMemory( &si, sizeof(si) );
si.cb = sizeof(si);
@ -119,7 +125,7 @@ void launch_calibre(LPCTSTR exe, LPCTSTR config_dir, LPCTSTR library_dir) {
);
if (fSuccess == 0) {
show_last_error(TEXT("Failed to launch the calibre program"));
show_last_error(_T("Failed to launch the calibre program"));
}
// Close process and thread handles.
@ -137,9 +143,9 @@ int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PWSTR pCmdLine
library_dir = (LPTSTR)calloc(BUFSIZE, sizeof(TCHAR));
exe = (LPTSTR)calloc(BUFSIZE, sizeof(TCHAR));
_sntprintf_s(config_dir, BUFSIZE, _TRUNCATE, TEXT("%sCalibre Settings"), app_dir);
_sntprintf_s(exe, BUFSIZE, _TRUNCATE, TEXT("%sCalibre\\calibre.exe"), app_dir);
_sntprintf_s(library_dir, BUFSIZE, _TRUNCATE, TEXT("%sCalibre Library"), app_dir);
_sntprintf_s(config_dir, BUFSIZE, _TRUNCATE, _T("%sCalibre Settings"), app_dir);
_sntprintf_s(exe, BUFSIZE, _TRUNCATE, _T("%sCalibre\\calibre.exe"), app_dir);
_sntprintf_s(library_dir, BUFSIZE, _TRUNCATE, _T("%sCalibre Library"), app_dir);
launch_calibre(exe, config_dir, library_dir);

View File

@ -570,7 +570,7 @@ from calibre.devices.teclast.driver import (TECLAST_K3, NEWSMY, IPAPYRUS,
from calibre.devices.sne.driver import SNE
from calibre.devices.misc import (PALMPRE, AVANT, SWEEX, PDNOVEL,
GEMEI, VELOCITYMICRO, PDNOVEL_KOBO, LUMIREAD, ALURATEK_COLOR,
TREKSTOR, EEEREADER, NEXTBOOK, ADAM, MOOVYBOOK)
TREKSTOR, EEEREADER, NEXTBOOK, ADAM, MOOVYBOOK, COBY)
from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG
from calibre.devices.kobo.driver import KOBO
from calibre.devices.bambook.driver import BAMBOOK
@ -705,7 +705,7 @@ plugins += [
EEEREADER,
NEXTBOOK,
ADAM,
MOOVYBOOK,
MOOVYBOOK, COBY,
ITUNES,
BOEYE_BEX,
BOEYE_BDX,
@ -1228,17 +1228,6 @@ class StoreEbookscomStore(StoreBase):
formats = ['EPUB', 'LIT', 'MOBI', 'PDF']
affiliate = True
#class StoreEPubBuyDEStore(StoreBase):
# name = 'EPUBBuy DE'
# author = 'Charles Haley'
# description = u'Bei EPUBBuy.com finden Sie ausschliesslich eBooks im weitverbreiteten EPUB-Format und ohne DRM. So haben Sie die freie Wahl, wo Sie Ihr eBook lesen: Tablet, eBook-Reader, Smartphone oder einfach auf Ihrem PC. So macht eBook-Lesen Spaß!'
# actual_plugin = 'calibre.gui2.store.stores.epubbuy_de_plugin:EPubBuyDEStore'
#
# drm_free_only = True
# headquarters = 'DE'
# formats = ['EPUB']
# affiliate = True
class StoreEBookShoppeUKStore(StoreBase):
name = 'ebookShoppe UK'
author = u'Charles Haley'
@ -1266,16 +1255,7 @@ class StoreEKnigiStore(StoreBase):
headquarters = 'BG'
formats = ['EPUB', 'PDF', 'HTML']
#affiliate = True
class StoreEpubBudStore(StoreBase):
name = 'ePub Bud'
description = 'Well, it\'s pretty much just "YouTube for Children\'s eBooks. A not-for-profit organization devoted to brining self published childrens books to the world.'
actual_plugin = 'calibre.gui2.store.stores.epubbud_plugin:EpubBudStore'
drm_free_only = True
headquarters = 'US'
formats = ['EPUB']
affiliate = True
class StoreFeedbooksStore(StoreBase):
name = 'Feedbooks'
@ -1311,6 +1291,7 @@ class StoreGoogleBooksStore(StoreBase):
headquarters = 'US'
formats = ['EPUB', 'PDF', 'TXT']
affiliate = True
class StoreGutenbergStore(StoreBase):
name = 'Project Gutenberg'
@ -1394,6 +1375,17 @@ class StoreOReillyStore(StoreBase):
headquarters = 'US'
formats = ['APK', 'DAISY', 'EPUB', 'MOBI', 'PDF']
class StoreOzonRUStore(StoreBase):
name = 'OZON.ru'
description = u'ebooks from OZON.ru'
actual_plugin = 'calibre.gui2.store.stores.ozon_ru_plugin:OzonRUStore'
author = 'Roman Mukhin'
drm_free_only = True
headquarters = 'RU'
formats = ['TXT', 'PDF', 'DJVU', 'RTF', 'DOC', 'JAR', 'FB2']
affiliate = True
class StorePragmaticBookshelfStore(StoreBase):
name = 'Pragmatic Bookshelf'
description = u'The Pragmatic Bookshelf\'s collection of programming and tech books avaliable as ebooks.'
@ -1491,10 +1483,8 @@ plugins += [
StoreEbookNLStore,
StoreEbookscomStore,
StoreEBookShoppeUKStore,
# StoreEPubBuyDEStore,
StoreEHarlequinStore,
StoreEKnigiStore,
StoreEpubBudStore,
StoreFeedbooksStore,
StoreFoylesUKStore,
StoreGandalfStore,
@ -1508,6 +1498,7 @@ plugins += [
StoreNextoStore,
StoreOpenBooksStore,
StoreOReillyStore,
StoreOzonRUStore,
StorePragmaticBookshelfStore,
StoreSmashwordsStore,
StoreVirtualoStore,

View File

@ -351,3 +351,29 @@ class MOOVYBOOK(USBMS):
def get_main_ebook_dir(self, for_upload=False):
return 'Books' if for_upload else self.EBOOK_DIR_MAIN
class COBY(USBMS):
name = 'COBY MP977 device interface'
gui_name = 'COBY'
description = _('Communicate with the COBY')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf']
VENDOR_ID = [0x1e74]
PRODUCT_ID = [0x7121]
BCD = [0x02]
VENDOR_NAME = 'USB_2.0'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'MP977_DRIVER'
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = False
def get_carda_ebook_dir(self, for_upload=False):
if for_upload:
return 'eBooks'
return self.EBOOK_DIR_CARD_A

View File

@ -24,10 +24,9 @@ XPath = partial(etree.XPath, namespaces=NAMESPACES)
tostring = partial(etree.tostring, method='text', encoding=unicode)
def get_metadata(stream):
""" Return fb2 metadata as a L{MetaInformation} object """
''' Return fb2 metadata as a L{MetaInformation} object '''
root = _get_fbroot(stream)
book_title = _parse_book_title(root)
authors = _parse_authors(root)
@ -181,6 +180,7 @@ def _parse_series(root, mi):
def _parse_isbn(root, mi):
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
isbn = XPath('normalize-space(//fb2:publish-info/fb2:isbn/text())')(root)
if isbn:
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
if ',' in isbn:
isbn = isbn[:isbn.index(',')]
@ -232,4 +232,3 @@ def _get_fbroot(stream):
raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]
root = etree.fromstring(raw, parser=parser)
return root

View File

@ -1030,8 +1030,10 @@ class OPF(object): # {{{
attrib = attrib or {}
attrib['name'] = 'calibre:' + name
name = '{%s}%s' % (self.NAMESPACES['opf'], 'meta')
nsmap = dict(self.NAMESPACES)
del nsmap['opf']
elem = etree.SubElement(self.metadata, name, attrib=attrib,
nsmap=self.NAMESPACES)
nsmap=nsmap)
elem.tail = '\n'
return elem

View File

@ -22,6 +22,7 @@ from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.date import utc_tz, as_utc
from calibre.utils.html2text import html2text
from calibre.utils.icu import lower
from calibre.utils.date import UNDEFINED_DATE
# Download worker {{{
class Worker(Thread):
@ -490,6 +491,8 @@ def identify(log, abort, # {{{
max_tags = msprefs['max_tags']
for r in results:
r.tags = r.tags[:max_tags]
if getattr(r.pubdate, 'year', 2000) <= UNDEFINED_DATE.year:
r.pubdate = None
if msprefs['swap_author_names']:
for r in results:

View File

@ -73,7 +73,7 @@ class PalmDB(object):
self.ident = self.type + self.creator
if self.ident not in (b'BOOKMOBI', b'TEXTREAD'):
raise ValueError('Unknown book ident: %r'%self.ident)
self.uid_seed = self.raw[68:72]
self.uid_seed, = struct.unpack(b'>I', self.raw[68:72])
self.next_rec_list_id = self.raw[72:76]
self.number_of_records, = struct.unpack(b'>H', self.raw[76:78])
@ -182,6 +182,7 @@ class EXTHHeader(object):
self.records = []
for i in xrange(self.count):
pos = self.read_record(pos)
self.records.sort(key=lambda x:x.type)
def read_record(self, pos):
type_, length = struct.unpack(b'>II', self.raw[pos:pos+8])
@ -290,7 +291,12 @@ class MOBIHeader(object): # {{{
(self.fcis_number, self.fcis_count, self.flis_number,
self.flis_count) = struct.unpack(b'>IIII',
self.raw[200:216])
self.unknown6 = self.raw[216:240]
self.unknown6 = self.raw[216:224]
self.srcs_record_index = struct.unpack(b'>I',
self.raw[224:228])[0]
self.num_srcs_records = struct.unpack(b'>I',
self.raw[228:232])[0]
self.unknown7 = self.raw[232:240]
self.extra_data_flags = struct.unpack(b'>I',
self.raw[240:244])[0]
self.has_multibytes = bool(self.extra_data_flags & 0b1)
@ -339,7 +345,7 @@ class MOBIHeader(object): # {{{
ans.append('Huffman record offset: %d'%self.huffman_record_offset)
ans.append('Huffman record count: %d'%self.huffman_record_count)
ans.append('Unknown2: %r'%self.unknown2)
ans.append('EXTH flags: %r (%s)'%(self.exth_flags, self.has_exth))
ans.append('EXTH flags: %s (%s)'%(bin(self.exth_flags)[2:], self.has_exth))
if self.has_drm_data:
ans.append('Unknown3: %r'%self.unknown3)
ans.append('DRM Offset: %s'%self.drm_offset)
@ -356,6 +362,9 @@ class MOBIHeader(object): # {{{
ans.append('FLIS number: %d'% self.flis_number)
ans.append('FLIS count: %d'% self.flis_count)
ans.append('Unknown6: %r'% self.unknown6)
ans.append('SRCS record index: %d'%self.srcs_record_index)
ans.append('Number of SRCS records?: %d'%self.num_srcs_records)
ans.append('Unknown7: %r'%self.unknown7)
ans.append(('Extra data flags: %s (has multibyte: %s) '
'(has indexing: %s) (has uncrossable breaks: %s)')%(
bin(self.extra_data_flags), self.has_multibytes,
@ -416,12 +425,7 @@ class IndexHeader(object): # {{{
if self.index_encoding == 'unknown':
raise ValueError(
'Unknown index encoding: %d'%self.index_encoding_num)
self.locale_raw, = struct.unpack(b'>I', raw[32:36])
langcode = self.locale_raw
langid = langcode & 0xFF
sublangid = (langcode >> 10) & 0xFF
self.language = main_language.get(langid, 'ENGLISH')
self.sublanguage = sub_language.get(sublangid, 'NEUTRAL')
self.possibly_language = raw[32:36]
self.num_index_entries, = struct.unpack('>I', raw[36:40])
self.ordt_start, = struct.unpack('>I', raw[40:44])
self.ligt_start, = struct.unpack('>I', raw[44:48])
@ -481,8 +485,7 @@ class IndexHeader(object): # {{{
a('Number of index records: %d'%self.index_count)
a('Index encoding: %s (%d)'%(self.index_encoding,
self.index_encoding_num))
a('Index language: %s - %s (%s)'%(self.language, self.sublanguage,
hex(self.locale_raw)))
a('Unknown (possibly language?): %r'%(self.possibly_language))
a('Number of index entries: %d'% self.num_index_entries)
a('ORDT start: %d'%self.ordt_start)
a('LIGT start: %d'%self.ligt_start)
@ -602,6 +605,9 @@ class IndexEntry(object): # {{{
self.raw = raw
self.tags = []
self.entry_type_raw = entry_type
self.byte_size = len(raw)
orig_raw = raw
try:
self.entry_type = self.TYPES[entry_type]
@ -639,8 +645,8 @@ class IndexEntry(object): # {{{
self.tags.append(Tag(aut_tag[0], [val], self.entry_type,
cncx))
if raw.replace(b'\x00', b''): # There can be padding null bytes
raise ValueError('Extra bytes in INDX table entry %d: %r'%(self.index, raw))
self.consumed = len(orig_raw) - len(raw)
self.trailing_bytes = raw
@property
def label(self):
@ -692,13 +698,16 @@ class IndexEntry(object): # {{{
return -1
def __str__(self):
ans = ['Index Entry(index=%s, entry_type=%s (%s), length=%d)'%(
self.index, self.entry_type, bin(self.entry_type_raw)[2:], len(self.tags))]
ans = ['Index Entry(index=%s, entry_type=%s (%s), length=%d, byte_size=%d)'%(
self.index, self.entry_type, bin(self.entry_type_raw)[2:],
len(self.tags), self.byte_size)]
for tag in self.tags:
ans.append('\t'+str(tag))
if self.first_child_index != -1:
ans.append('\tNumber of children: %d'%(self.last_child_index -
self.first_child_index + 1))
if self.trailing_bytes:
ans.append('\tTrailing bytes: %r'%self.trailing_bytes)
return '\n'.join(ans)
# }}}
@ -742,6 +751,7 @@ class IndexRecord(object): # {{{
raise ValueError('Extra bytes after IDXT table: %r'%rest)
indxt = raw[192:self.idxt_offset]
self.size_of_indxt_block = len(indxt)
self.indices = []
for i, off in enumerate(self.index_offsets):
try:
@ -754,10 +764,14 @@ class IndexRecord(object): # {{{
if index_header.index_type == 6:
flags = ord(indxt[off+consumed+d])
d += 1
pos = off+consumed+d
self.indices.append(IndexEntry(index, entry_type,
indxt[off+consumed+d:next_off], cncx,
indxt[pos:next_off], cncx,
index_header.tagx_entries, flags=flags))
index = self.indices[-1]
rest = indxt[pos+self.indices[-1].consumed:]
if rest.replace(b'\0', ''): # There can be padding null bytes
raise ValueError('Extra bytes after IDXT table: %r'%rest)
def get_parent(self, index):
if index.depth < 1:
@ -778,12 +792,13 @@ class IndexRecord(object): # {{{
u(self.unknown1)
a('Unknown (header type? index record number? always 1?): %d'%self.header_type)
u(self.unknown2)
a('IDXT Offset: %d'%self.idxt_offset)
a('IDXT Offset (%d block size): %d'%(self.size_of_indxt_block,
self.idxt_offset))
a('IDXT Count: %d'%self.idxt_count)
u(self.unknown3)
u(self.unknown4)
a('Index offsets: %r'%self.index_offsets)
a('\nIndex Entries:')
a('\nIndex Entries (%d entries):'%len(self.indices))
for entry in self.indices:
a(str(entry)+'\n')
@ -829,6 +844,7 @@ class TextRecord(object): # {{{
def __init__(self, idx, record, extra_data_flags, decompress):
self.trailing_data, self.raw = get_trailing_data(record.raw, extra_data_flags)
raw_trailing_bytes = record.raw[len(self.raw):]
self.raw = decompress(self.raw)
if 0 in self.trailing_data:
self.trailing_data['multibyte_overlap'] = self.trailing_data.pop(0)
@ -836,6 +852,7 @@ class TextRecord(object): # {{{
self.trailing_data['indexing'] = self.trailing_data.pop(1)
if 2 in self.trailing_data:
self.trailing_data['uncrossable_breaks'] = self.trailing_data.pop(2)
self.trailing_data['raw_bytes'] = raw_trailing_bytes
self.idx = idx
@ -957,15 +974,17 @@ class TBSIndexing(object): # {{{
return str({bin4(k):v for k, v in extra.iteritems()})
tbs_type = 0
is_periodical = self.doc_type in (257, 258, 259)
if len(byts):
outermost_index, extra, consumed = decode_tbs(byts)
outermost_index, extra, consumed = decode_tbs(byts, flag_size=4 if
is_periodical else 3)
byts = byts[consumed:]
for k in extra:
tbs_type |= k
ans.append('\nTBS: %d (%s)'%(tbs_type, bin4(tbs_type)))
ans.append('Outermost index: %d'%outermost_index)
ans.append('Unknown extra start bytes: %s'%repr_extra(extra))
if self.doc_type in (257, 259): # Hierarchical periodical
if is_periodical: # Hierarchical periodical
byts, a = self.interpret_periodical(tbs_type, byts,
dat['geom'][0])
ans += a
@ -1028,6 +1047,7 @@ class TBSIndexing(object): # {{{
# }}}
def read_starting_section(byts): # {{{
orig = byts
si, extra, consumed = decode_tbs(byts)
byts = byts[consumed:]
if len(extra) > 1 or 0b0010 in extra or 0b1000 in extra:
@ -1044,8 +1064,8 @@ class TBSIndexing(object): # {{{
eof = extra[0b0001]
if eof != 0:
raise ValueError('Unknown eof value %s when reading'
' starting section'%eof)
ans.append('This record is spanned by an article from'
' starting section. All bytes: %r'%(eof, orig))
ans.append('??This record has more than one article from '
' the section: %d'%si.index)
return si, byts
# }}}

View File

@ -0,0 +1,84 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, subprocess, shutil
from lxml import etree
from calibre.constants import iswindows
from calibre.customize.ui import plugin_for_output_format
from calibre.ptempfile import TemporaryDirectory
from calibre.ebooks.mobi.utils import detect_periodical
from calibre import CurrentDir
exe = 'kindlegen.exe' if iswindows else 'kindlegen'
def refactor_opf(opf, is_periodical, toc):
with open(opf, 'rb') as f:
root = etree.fromstring(f.read())
'''
for spine in root.xpath('//*[local-name() = "spine" and @toc]'):
# Do not use the NCX toc as kindlegen requires the section structure
# in the TOC to be duplicated in the HTML, asinine!
del spine.attrib['toc']
'''
if is_periodical:
metadata = root.xpath('//*[local-name() = "metadata"]')[0]
xm = etree.SubElement(metadata, 'x-metadata')
xm.tail = '\n'
xm.text = '\n\t'
mobip = etree.SubElement(xm, 'output', attrib={'encoding':"utf-8",
'content-type':"application/x-mobipocket-subscription-magazine"})
mobip.tail = '\n\t'
with open(opf, 'wb') as f:
f.write(etree.tostring(root, method='xml', encoding='utf-8',
xml_declaration=True))
def refactor_guide(oeb):
for key in list(oeb.guide):
if key not in ('toc', 'start', 'masthead'):
oeb.guide.remove(key)
def run_kindlegen(opf, log):
log.info('Running kindlegen on MOBIML created by calibre')
oname = os.path.splitext(opf)[0] + '.mobi'
p = subprocess.Popen([exe, opf, '-c1', '-verbose', '-o', oname],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
ko = p.stdout.read()
returncode = p.wait()
log.debug('kindlegen verbose output:')
log.debug(ko.decode('utf-8', 'replace'))
log.info('kindlegen returned returncode: %d'%returncode)
if not os.path.exists(oname) or os.stat(oname).st_size < 100:
raise RuntimeError('kindlegen did not produce any output. '
'kindlegen return code: %d'%returncode)
return oname
def kindlegen(oeb, opts, input_plugin, output_path):
is_periodical = detect_periodical(oeb.toc, oeb.log)
refactor_guide(oeb)
with TemporaryDirectory('_kindlegen_output') as tdir:
oeb_output = plugin_for_output_format('oeb')
oeb_output.convert(oeb, tdir, input_plugin, opts, oeb.log)
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
refactor_opf(os.path.join(tdir, opf), is_periodical, oeb.toc)
try:
if os.path.exists('/tmp/kindlegen'):
shutil.rmtree('/tmp/kindlegen')
shutil.copytree(tdir, '/tmp/kindlegen')
oeb.log('kindlegen intermediate output stored in: /tmp/kindlegen')
except:
pass
with CurrentDir(tdir):
oname = run_kindlegen(opf, oeb.log)
shutil.copyfile(oname, output_path)

View File

@ -50,6 +50,12 @@ class MOBIOutput(OutputFormatPlugin):
help=_('When adding the Table of Contents to the book, add it at the start of the '
'book instead of the end. Not recommended.')
),
OptionRecommendation(name='kindlegen',
recommended_value=False,
help=('Use kindlegen (must be in your PATH) to generate the'
' binary wrapper for the MOBI format. Useful to debug '
' the calibre MOBI output.')
),
])
@ -164,6 +170,10 @@ class MOBIOutput(OutputFormatPlugin):
MobiWriter
else:
from calibre.ebooks.mobi.writer import MobiWriter
if opts.kindlegen:
from calibre.ebooks.mobi.kindlegen import kindlegen
kindlegen(oeb, opts, input_plugin, output_path)
else:
writer = MobiWriter(opts,
write_page_breaks_after_item=write_page_breaks_after_item)
writer(oeb, output_path)

View File

@ -41,6 +41,9 @@ def encode_number_as_hex(num):
number.
'''
num = bytes(hex(num)[2:].upper())
nlen = len(num)
if nlen % 2 != 0:
num = b'0'+num
ans = bytearray(num)
ans.insert(0, len(num))
return bytes(ans)
@ -66,11 +69,14 @@ def encint(value, forward=True):
If forward is True the bytes returned are suitable for prepending to the
output buffer, otherwise they must be append to the output buffer.
'''
if value < 0:
raise ValueError('Cannot encode negative numbers as vwi')
# Encode vwi
byts = bytearray()
while True:
b = value & 0b01111111
value >>= 7 # shift value to the right by 7 bits
byts.append(b)
if value == 0:
break
@ -185,7 +191,7 @@ def encode_trailing_data(raw):
<data><size>
where size is a backwards encoded vwi whose value is the length of the
entire return bytestring.
entire returned bytestring. data is the bytestring passed in as raw.
This is the encoding used for trailing data entries at the end of text
records. See get_trailing_data() for details.
@ -198,24 +204,31 @@ def encode_trailing_data(raw):
lsize += 1
return raw + encoded
def encode_fvwi(val, flags):
def encode_fvwi(val, flags, flag_size=4):
'''
Encode the value val and the 4 bit flags flags as a fvwi. This encoding is
Encode the value val and the flag_size bits from flags as a fvwi. This encoding is
used in the trailing byte sequences for indexing. Returns encoded
bytestring.
'''
ans = (val << 4) | (flags & 0b1111)
ans = val << flag_size
for i in xrange(flag_size):
ans |= (flags & (1 << i))
return encint(ans)
def decode_fvwi(byts):
def decode_fvwi(byts, flag_size=4):
'''
Decode encoded fvwi. Returns number, flags, consumed
'''
arg, consumed = decint(bytes(byts))
return (arg >> 4), (arg & 0b1111), consumed
val = arg >> flag_size
flags = 0
for i in xrange(flag_size):
flags |= (arg & (1 << i))
return val, flags, consumed
def decode_tbs(byts):
def decode_tbs(byts, flag_size=4):
'''
Trailing byte sequences for indexing consists of series of fvwi numbers.
This function reads the fvwi number and its associated flags. It them uses
@ -226,10 +239,10 @@ def decode_tbs(byts):
data and the number of bytes consumed.
'''
byts = bytes(byts)
val, flags, consumed = decode_fvwi(byts)
val, flags, consumed = decode_fvwi(byts, flag_size=flag_size)
extra = {}
byts = byts[consumed:]
if flags & 0b1000:
if flags & 0b1000 and flag_size > 3:
extra[0b1000] = True
if flags & 0b0010:
x, consumed2 = decint(byts)
@ -247,7 +260,7 @@ def decode_tbs(byts):
consumed += consumed2
return val, extra, consumed
def encode_tbs(val, extra):
def encode_tbs(val, extra, flag_size=4):
'''
Encode the number val and the extra data in the extra dict as an fvwi. See
decode_tbs above.
@ -255,7 +268,7 @@ def encode_tbs(val, extra):
flags = 0
for flag in extra:
flags |= flag
ans = encode_fvwi(val, flags)
ans = encode_fvwi(val, flags, flag_size=flag_size)
if 0b0010 in extra:
ans += encint(extra[0b0010])
@ -289,5 +302,33 @@ def align_block(raw, multiple=4, pad=b'\0'):
return raw + pad*(multiple - extra)
def detect_periodical(toc, log=None):
'''
Detect if the TOC object toc contains a periodical that conforms to the
structure required by kindlegen to generate a periodical.
'''
for node in toc.iterdescendants():
if node.depth() == 1 and node.klass != 'article':
if log is not None:
log.debug(
'Not a periodical: Deepest node does not have '
'class="article"')
return False
if node.depth() == 2 and node.klass != 'section':
if log is not None:
log.debug(
'Not a periodical: Second deepest node does not have'
' class="section"')
return False
if node.depth() == 3 and node.klass != 'periodical':
if log is not None:
log.debug('Not a periodical: Third deepest node'
' does not have class="periodical"')
return False
if node.depth() > 3:
if log is not None:
log.debug('Not a periodical: Has nodes of depth > 3')
return False
return True

View File

@ -14,8 +14,7 @@ from collections import OrderedDict, defaultdict
from calibre.ebooks.mobi.writer2 import RECORD_SIZE
from calibre.ebooks.mobi.utils import (encint, encode_number_as_hex,
encode_trailing_data, encode_tbs, align_block, utf8_text)
from calibre.ebooks.mobi.langcodes import iana2mobi
encode_tbs, align_block, utf8_text, detect_periodical)
class CNCX(object): # {{{
@ -28,13 +27,12 @@ class CNCX(object): # {{{
MAX_STRING_LENGTH = 500
def __init__(self, toc, opts):
def __init__(self, toc, is_periodical):
self.strings = OrderedDict()
for item in toc:
if item is self.toc: continue
for item in toc.iterdescendants(breadth_first=True):
self.strings[item.title] = 0
if opts.mobi_periodical:
if is_periodical:
self.strings[item.klass] = 0
self.records = []
@ -53,11 +51,10 @@ class CNCX(object): # {{{
self.records.append(buf.getvalue())
buf.truncate(0)
offset = len(self.records) * 0x10000
buf.write(raw)
self.strings[key] = offset
offset += len(raw)
buf.write(b'\0') # CNCX must end with zero byte
self.records.append(align_block(buf.getvalue()))
def __getitem__(self, string):
@ -91,6 +88,17 @@ class IndexEntry(object): # {{{
self.first_child_index = None
self.last_child_index = None
def __repr__(self):
return ('IndexEntry(offset=%r, depth=%r, length=%r, index=%r,'
' parent_index=%r)')%(self.offset, self.depth, self.length,
self.index, self.parent_index)
@dynamic_property
def size(self):
def fget(self): return self.length
def fset(self, val): self.length = val
return property(fget=fget, fset=fset, doc='Alias for length')
@classmethod
def tagx_block(cls, for_periodical=True):
buf = bytearray()
@ -115,7 +123,7 @@ class IndexEntry(object): # {{{
buf.append(1)
header = b'TAGX'
header += pack(b'>I', len(buf)) # table length
header += pack(b'>I', 12+len(buf)) # table length
header += pack(b'>I', 1) # control byte count
return header + bytes(buf)
@ -137,7 +145,7 @@ class IndexEntry(object): # {{{
def entry_type(self):
ans = 0
for tag in self.tag_nums:
ans |= (1 << self.BITMASKS[tag]) # 1 << x == 2**x
ans |= (1 << self.BITMASKS.index(tag)) # 1 << x == 2**x
return ans
@property
@ -152,7 +160,7 @@ class IndexEntry(object): # {{{
val = getattr(self, attr)
buf.write(encint(val))
ans = buf.get_value()
ans = buf.getvalue()
return ans
# }}}
@ -164,25 +172,35 @@ class TBS(object): # {{{
trailing byte sequence for the record.
'''
def __init__(self, data, is_periodical, first=False, all_sections=[]):
if not data:
self.bytestring = encode_trailing_data(b'')
else:
self.section_map = OrderedDict((i.index, i) for i in
sorted(all_sections, key=lambda x:x.offset))
def __init__(self, data, is_periodical, first=False, section_map={},
after_first=False):
self.section_map = section_map
#import pprint
#pprint.pprint(data)
#print()
if is_periodical:
# The starting bytes.
# The value is zero which I think indicates the periodical
# index entry. The values for the various flags seem to be
# unused. If the 0b0100 is present, it means that the record
# unused. If the 0b100 is present, it means that the record
# deals with section 1 (or is the final record with section
# transitions).
self.type_010 = encode_tbs(0, {0b0010: 0})
self.type_011 = encode_tbs(0, {0b0010: 0, 0b0001: 0})
self.type_110 = encode_tbs(0, {0b0100: 2, 0b0010: 0})
self.type_111 = encode_tbs(0, {0b0100: 2, 0b0010: 0, 0b0001: 0})
self.type_010 = encode_tbs(0, {0b010: 0}, flag_size=3)
self.type_011 = encode_tbs(0, {0b010: 0, 0b001: 0},
flag_size=3)
self.type_110 = encode_tbs(0, {0b100: 2, 0b010: 0},
flag_size=3)
self.type_111 = encode_tbs(0, {0b100: 2, 0b010: 0, 0b001:
0}, flag_size=3)
if not data:
byts = b''
if after_first:
# This can happen if a record contains only text between
# the periodical start and the first section
byts = self.type_011
self.bytestring = byts
else:
depth_map = defaultdict(list)
for x in ('starts', 'ends', 'completes'):
for idx in data[x]:
@ -190,30 +208,43 @@ class TBS(object): # {{{
for l in depth_map.itervalues():
l.sort(key=lambda x:x.offset)
self.periodical_tbs(data, first, depth_map)
else:
if not data:
self.bytestring = b''
else:
self.book_tbs(data, first)
def periodical_tbs(self, data, first, depth_map):
buf = StringIO()
has_section_start = (depth_map[1] and depth_map[1][0] in
data['starts'])
has_section_start = (depth_map[1] and
set(depth_map[1]).intersection(set(data['starts'])))
spanner = data['spans']
parent_section_index = -1
if depth_map[0]:
# We have a terminal record
# Find the first non periodical node
first_node = None
for nodes in depth_map.values():
for nodes in (depth_map[1], depth_map[2]):
for node in nodes:
if (first_node is None or (node.offset, node.depth) <
(first_node.offset, first_node.depth)):
first_node = node
parent_section_index = -1
if depth_map[0]:
# We have a terminal record
typ = (self.type_110 if has_section_start else self.type_010)
if first_node.depth > 0:
# parent_section_index is needed for the last record
if first_node is not None and first_node.depth > 0:
parent_section_index = (first_node.index if first_node.depth
== 1 else first_node.parent_index)
else:
parent_section_index = max(self.section_map.iterkeys())
else:
# Non terminal record
if spanner is not None:
# record is spanned by a single article
parent_section_index = spanner.parent_index
@ -221,31 +252,37 @@ class TBS(object): # {{{
self.type_010)
elif not depth_map[1]:
# has only article nodes, i.e. spanned by a section
parent_section_index = self.depth_map[2][0].parent_index
parent_section_index = depth_map[2][0].parent_index
typ = (self.type_111 if parent_section_index == 1 else
self.type_010)
else:
# has section transitions
parent_section_index = self.depth_map[2][0].parent_index
if depth_map[2]:
parent_section_index = depth_map[2][0].parent_index
else:
parent_section_index = depth_map[1][0].index
typ = self.type_011
buf.write(typ)
if parent_section_index > 1:
if typ not in (self.type_110, self.type_111) and parent_section_index > 0:
extra = {}
# Write starting section information
if spanner is None:
num_articles = len(depth_map[1])
extra = {}
num_articles = len([a for a in depth_map[1] if a.parent_index
== parent_section_index])
if not depth_map[1]:
extra = {0b0001: 0}
if num_articles > 1:
extra = {0b0100: num_articles}
else:
extra = {0b0001: 0}
buf.write(encode_tbs(parent_section_index, extra))
if spanner is None:
articles = depth_map[2]
sections = [self.section_map[a.parent_index] for a in articles]
sections.sort(key=lambda x:x.offset)
section_map = {s:[a for a in articles is a.parent_index ==
sections = set([self.section_map[a.parent_index] for a in
articles])
sections = sorted(sections, key=lambda x:x.offset)
section_map = {s:[a for a in articles if a.parent_index ==
s.index] for s in sections}
for i, section in enumerate(sections):
# All the articles in this record that belong to section
@ -257,15 +294,15 @@ class TBS(object): # {{{
try:
next_sec = sections[i+1]
except:
next_sec == None
next_sec = None
extra = {}
if num > 1:
extra[0b0100] = num
if i == 0 and next_sec is not None:
if False and i == 0 and next_sec is not None:
# Write offset to next section from start of record
# For some reason kindlegen only writes this offset
# for the first section transition. Imitate it.
# I can't figure out exactly when Kindlegen decides to
# write this so I have disabled it for now.
extra[0b0001] = next_sec.offset - data['offset']
buf.write(encode_tbs(first_article.index-section.index, extra))
@ -277,10 +314,10 @@ class TBS(object): # {{{
buf.write(encode_tbs(spanner.index - parent_section_index,
{0b0001: 0}))
self.bytestring = encode_trailing_data(buf.getvalue())
self.bytestring = buf.getvalue()
def book_tbs(self, data, first):
self.bytestring = encode_trailing_data(b'')
self.bytestring = b''
# }}}
class Indexer(object): # {{{
@ -295,18 +332,18 @@ class Indexer(object): # {{{
self.log = oeb.log
self.opts = opts
self.is_periodical = self.detect_periodical()
self.is_periodical = detect_periodical(self.oeb.toc, self.log)
self.log('Generating MOBI index for a %s'%('periodical' if
self.is_periodical else 'book'))
self.is_flat_periodical = False
if opts.mobi_periodical:
if self.is_periodical:
periodical_node = iter(oeb.toc).next()
sections = tuple(periodical_node)
self.is_flat_periodical = len(sections) == 1
self.records = []
self.cncx = CNCX(oeb.toc, opts)
self.cncx = CNCX(oeb.toc, self.is_periodical)
if self.is_periodical:
self.indices = self.create_periodical_index()
@ -319,28 +356,6 @@ class Indexer(object): # {{{
self.calculate_trailing_byte_sequences()
def detect_periodical(self): # {{{
for node in self.oeb.toc.iterdescendants():
if node.depth() == 1 and node.klass != 'article':
self.log.debug(
'Not a periodical: Deepest node does not have '
'class="article"')
return False
if node.depth() == 2 and node.klass != 'section':
self.log.debug(
'Not a periodical: Second deepest node does not have'
' class="section"')
return False
if node.depth() == 3 and node.klass != 'periodical':
self.log.debug('Not a periodical: Third deepest node'
' does not have class="periodical"')
return False
if node.depth() > 3:
self.log.debug('Not a periodical: Has nodes of depth > 3')
return False
return True
# }}}
def create_index_record(self): # {{{
header_length = 192
buf = StringIO()
@ -405,14 +420,13 @@ class Indexer(object): # {{{
buf.write(pack(b'>I', 0)) # Filled in later
# Number of index records 24-28
buf.write(pack('b>I', len(self.records)))
buf.write(pack(b'>I', len(self.records)))
# Index Encoding 28-32
buf.write(pack(b'>I', 65001)) # utf-8
# Index language 32-36
buf.write(iana2mobi(
str(self.oeb.metadata.language[0])))
# Unknown 32-36
buf.write(b'\xff'*4)
# Number of index entries 36-40
buf.write(pack(b'>I', len(self.indices)))
@ -457,7 +471,7 @@ class Indexer(object): # {{{
idxt_offset = buf.tell()
buf.write(b'IDXT')
buf.write(header_length + len(tagx_block))
buf.write(pack(b'>H', header_length + len(tagx_block)))
buf.write(b'\0')
buf.seek(20)
buf.write(pack(b'>I', idxt_offset))
@ -481,12 +495,12 @@ class Indexer(object): # {{{
continue
seen.add(offset)
index = IndexEntry(offset, label)
self.indices.append(index)
indices.append(index)
indices.sort(key=lambda x:x.offset)
# Set lengths
for i, index in indices:
for i, index in enumerate(indices):
try:
next_offset = indices[i+1].offset
except:
@ -497,11 +511,11 @@ class Indexer(object): # {{{
indices = [i for i in indices if i.length > 0]
# Set index values
for i, index in indices:
for i, index in enumerate(indices):
index.index = i
# Set lengths again to close up any gaps left by filtering
for i, index in indices:
for i, index in enumerate(indices):
try:
next_offset = indices[i+1].offset
except:
@ -567,7 +581,7 @@ class Indexer(object): # {{{
for s, x in enumerate(normalized_sections):
sec, normalized_articles = x
try:
sec.length = normalized_sections[s+1].offset - sec.offset
sec.length = normalized_sections[s+1][0].offset - sec.offset
except:
sec.length = self.serializer.body_end_offset - sec.offset
for i, art in enumerate(normalized_articles):
@ -583,17 +597,18 @@ class Indexer(object): # {{{
normalized_articles))
normalized_sections[i] = (sec, normalized_articles)
normalized_sections = list(filter(lambda x: x[0].size > 0 and x[1],
normalized_sections = list(filter(lambda x: x[0].length > 0 and x[1],
normalized_sections))
# Set indices
i = 0
for sec, normalized_articles in normalized_sections:
for sec, articles in normalized_sections:
i += 1
sec.index = i
sec.parent_index = 0
for sec, normalized_articles in normalized_sections:
for art in normalized_articles:
for sec, articles in normalized_sections:
for art in articles:
i += 1
art.index = i
art.parent_index = sec.index
@ -606,7 +621,7 @@ class Indexer(object): # {{{
for s, x in enumerate(normalized_sections):
sec, articles = x
try:
next_offset = normalized_sections[s+1].offset
next_offset = normalized_sections[s+1][0].offset
except:
next_offset = self.serializer.body_end_offset
sec.length = next_offset - sec.offset
@ -622,7 +637,7 @@ class Indexer(object): # {{{
for s, x in enumerate(normalized_sections):
sec, articles = x
try:
next_sec = normalized_sections[s+1]
next_sec = normalized_sections[s+1][0]
except:
if (sec.length == 0 or sec.next_offset !=
self.serializer.body_end_offset):
@ -659,15 +674,24 @@ class Indexer(object): # {{{
self.tbs_map = {}
found_node = False
sections = [i for i in self.indices if i.depth == 1]
section_map = OrderedDict((i.index, i) for i in
sorted(sections, key=lambda x:x.offset))
deepest = max(i.depth for i in self.indices)
for i in xrange(self.number_of_text_records):
offset = i * RECORD_SIZE
next_offset = offset + RECORD_SIZE
data = OrderedDict([('ends',[]), ('completes',[]), ('starts',[]),
('spans', None), ('offset', offset)])
data = {'ends':[], 'completes':[], 'starts':[],
'spans':None, 'offset':offset, 'record_number':i+1}
for index in self.indices:
if index.offset >= next_offset:
# Node starts after current record
if index.depth == deepest:
break
else:
continue
if index.next_offset <= offset:
# Node ends before current record
continue
@ -683,15 +707,17 @@ class Indexer(object): # {{{
if index.next_offset <= next_offset:
# Node ends in current record
data['ends'].append(index)
else:
elif index.depth == deepest:
data['spans'] = index
if (data['ends'] or data['completes'] or data['starts'] or
data['spans'] is not None):
self.tbs_map[i+1] = TBS(data, self.is_periodical, first=not
found_node, all_sections=sections)
found_node, section_map=section_map)
found_node = True
else:
self.tbs_map[i+1] = TBS({}, self.is_periodical, first=False)
self.tbs_map[i+1] = TBS({}, self.is_periodical, first=False,
after_first=found_node, section_map=section_map)
def get_trailing_byte_sequence(self, num):
return self.tbs_map[num].bytestring

View File

@ -19,7 +19,7 @@ from calibre.ebooks.mobi.langcodes import iana2mobi
from calibre.utils.filenames import ascii_filename
from calibre.ebooks.mobi.writer2 import (PALMDOC, UNCOMPRESSED, RECORD_SIZE)
from calibre.ebooks.mobi.utils import (rescale_image, encint,
encode_trailing_data)
encode_trailing_data, align_block)
from calibre.ebooks.mobi.writer2.indexer import Indexer
EXTH_CODES = {
@ -29,7 +29,6 @@ EXTH_CODES = {
'identifier': 104,
'subject': 105,
'pubdate': 106,
'date': 106,
'review': 107,
'contributor': 108,
'rights': 109,
@ -55,6 +54,7 @@ class MobiWriter(object):
self.last_text_record_idx = 1
def __call__(self, oeb, path_or_stream):
self.log = oeb.log
if hasattr(path_or_stream, 'write'):
return self.dump_stream(oeb, path_or_stream)
with open(path_or_stream, 'w+b') as stream:
@ -90,6 +90,7 @@ class MobiWriter(object):
self.primary_index_record_idx = None
try:
self.indexer = Indexer(self.serializer, self.last_text_record_idx,
len(self.records[self.last_text_record_idx]),
self.opts, self.oeb)
except:
self.log.exception('Failed to generate MOBI index:')
@ -98,9 +99,13 @@ class MobiWriter(object):
for i in xrange(len(self.records)):
if i == 0: continue
tbs = self.indexer.get_trailing_byte_sequence(i)
self.records[i] += tbs
self.records[i] += encode_trailing_data(tbs)
self.records.extend(self.indexer.records)
@property
def is_periodical(self):
return (self.primary_index_record_idx is None or not
self.indexer.is_periodical)
# }}}
@ -193,7 +198,6 @@ class MobiWriter(object):
self.serializer = Serializer(self.oeb, self.images,
write_page_breaks_after_item=self.write_page_breaks_after_item)
text = self.serializer()
self.content_length = len(text)
self.text_length = len(text)
text = StringIO(text)
nrecords = 0
@ -201,21 +205,16 @@ class MobiWriter(object):
if self.compression != UNCOMPRESSED:
self.oeb.logger.info(' Compressing markup content...')
while text.tell() < self.text_length:
data, overlap = self.read_text_record(text)
while len(data) > 0:
if self.compression == PALMDOC:
data = compress_doc(data)
record = StringIO()
record.write(data)
self.records.append(record.getvalue())
data += overlap
data += pack(b'>B', len(overlap))
self.records.append(data)
nrecords += 1
data, overlap = self.read_text_record(text)
# Write information about the mutibyte character overlap, if any
record.write(overlap)
record.write(pack(b'>B', len(overlap)))
self.last_text_record_idx = nrecords
@ -276,8 +275,19 @@ class MobiWriter(object):
exth = self.build_exth()
last_content_record = len(self.records) - 1
# FCIS/FLIS (Seem to server no purpose)
flis_number = len(self.records)
self.records.append(
b'FLIS\0\0\0\x08\0\x41\0\0\0\0\0\0\xff\xff\xff\xff\0\x01\0\x03\0\0\0\x03\0\0\0\x01'+
b'\xff'*4)
fcis = b'FCIS\x00\x00\x00\x14\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00'
fcis += pack(b'>I', self.text_length)
fcis += b'\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x08\x00\x01\x00\x01\x00\x00\x00\x00'
fcis_number = len(self.records)
self.records.append(fcis)
# EOF record
self.records.append('\xE9\x8E\x0D\x0A')
self.records.append(b'\xE9\x8E\x0D\x0A')
record0 = StringIO()
# The MOBI Header
@ -307,8 +317,15 @@ class MobiWriter(object):
# 0x10 - 0x13 : UID
# 0x14 - 0x17 : Generator version
bt = 0x002
if self.primary_index_record_idx is not None:
if self.indexer.is_flat_periodical:
bt = 0x102
elif self.indexer.is_periodical:
bt = 0x103
record0.write(pack(b'>IIIII',
0xe8, 0x002, 65001, uid, 6))
0xe8, bt, 65001, uid, 6))
# 0x18 - 0x1f : Unknown
record0.write(b'\xff' * 8)
@ -337,7 +354,8 @@ class MobiWriter(object):
# 0x58 - 0x5b : Format version
# 0x5c - 0x5f : First image record number
record0.write(pack(b'>II',
6, self.first_image_record if self.first_image_record else 0))
6, self.first_image_record if self.first_image_record else
len(self.records)-1))
# 0x60 - 0x63 : First HUFF/CDIC record number
# 0x64 - 0x67 : Number of HUFF/CDIC records
@ -346,7 +364,12 @@ class MobiWriter(object):
record0.write(b'\0' * 16)
# 0x70 - 0x73 : EXTH flags
record0.write(pack(b'>I', 0x50))
# Bit 6 (0b1000000) being set indicates the presence of an EXTH header
# The purpose of the other bits is unknown
exth_flags = 0b1010000
if self.is_periodical:
exth_flags |= 0b1000
record0.write(pack(b'>I', exth_flags))
# 0x74 - 0x93 : Unknown
record0.write(b'\0' * 32)
@ -371,13 +394,13 @@ class MobiWriter(object):
record0.write(b'\0\0\0\x01')
# 0xb8 - 0xbb : FCIS record number
record0.write(pack(b'>I', 0xffffffff))
record0.write(pack(b'>I', fcis_number))
# 0xbc - 0xbf : Unknown (FCIS record count?)
record0.write(pack(b'>I', 0xffffffff))
record0.write(pack(b'>I', 1))
# 0xc0 - 0xc3 : FLIS record number
record0.write(pack(b'>I', 0xffffffff))
record0.write(pack(b'>I', flis_number))
# 0xc4 - 0xc7 : Unknown (FLIS record count?)
record0.write(pack(b'>I', 1))
@ -411,7 +434,7 @@ class MobiWriter(object):
# Add some buffer so that Amazon can add encryption information if this
# MOBI is submitted for publication
record0 += (b'\0' * (1024*8))
self.records[0] = record0
self.records[0] = align_block(record0)
# }}}
def build_exth(self): # EXTH Header {{{
@ -469,25 +492,32 @@ class MobiWriter(object):
nrecs += 1
# Write cdetype
if not self.opts.mobi_periodical:
if self.is_periodical:
data = b'EBOK'
exth.write(pack(b'>II', 501, len(data)+8))
exth.write(data)
nrecs += 1
# Add a publication date entry
if oeb.metadata['date'] != [] :
if oeb.metadata['date']:
datestr = str(oeb.metadata['date'][0])
elif oeb.metadata['timestamp'] != [] :
elif oeb.metadata['timestamp']:
datestr = str(oeb.metadata['timestamp'][0])
if datestr is not None:
datestr = bytes(datestr)
exth.write(pack(b'>II', EXTH_CODES['pubdate'], len(datestr) + 8))
exth.write(datestr)
nrecs += 1
else:
raise NotImplementedError("missing date or timestamp needed for mobi_periodical")
# Write the same creator info as kindlegen 1.2
for code, val in [(204, 201), (205, 1), (206, 2), (207, 33307)]:
exth.write(pack(b'>II', code, 12))
exth.write(pack(b'>I', val))
nrecs += 1
if (oeb.metadata.cover and
unicode(oeb.metadata.cover[0]) in oeb.manifest.ids):
id = unicode(oeb.metadata.cover[0])
@ -514,7 +544,8 @@ class MobiWriter(object):
'''
Write the PalmDB header
'''
title = ascii_filename(unicode(self.oeb.metadata.title[0]))
title = ascii_filename(unicode(self.oeb.metadata.title[0])).replace(
' ', '_')
title = title + (b'\0' * (32 - len(title)))
now = int(time.time())
nrecords = len(self.records)

View File

@ -1680,8 +1680,15 @@ class TOC(object):
return True
return False
def iterdescendants(self):
def iterdescendants(self, breadth_first=False):
"""Iterate over all descendant nodes in depth-first order."""
if breadth_first:
for child in self.nodes:
yield child
for child in self.nodes:
for node in child.iterdescendants(breadth_first=True):
yield node
else:
for child in self.nodes:
for node in child.iter():
yield node

View File

@ -165,6 +165,7 @@ class PDFWriter(QObject): # {{{
printer = get_pdf_printer(self.opts)
printer.setOutputFileName(item_path)
self.view.print_(printer)
printer.abort()
self._render_book()
def _delete_tmpdir(self):
@ -186,6 +187,7 @@ class PDFWriter(QObject): # {{{
draw_image_page(printer, painter, p,
preserve_aspect_ratio=self.opts.preserve_cover_aspect_ratio)
painter.end()
printer.abort()
def _write(self):

View File

@ -8,7 +8,8 @@ from functools import partial
from PyQt4.Qt import QThread, QObject, Qt, QProgressDialog, pyqtSignal, QTimer
from calibre.gui2.dialogs.progress import ProgressDialog
from calibre.gui2 import question_dialog, error_dialog, info_dialog, gprefs
from calibre.gui2 import (question_dialog, error_dialog, info_dialog, gprefs,
warning_dialog)
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ebooks.metadata import MetaInformation
from calibre.constants import preferred_encoding, filesystem_encoding, DEBUG
@ -275,6 +276,24 @@ class Adder(QObject): # {{{
_('No books found'), show=True)
return self.canceled()
books = [[b] if isinstance(b, basestring) else b for b in books]
restricted = set()
for i in xrange(len(books)):
files = books[i]
restrictedi = set(f for f in files if not os.access(f, os.R_OK))
if restrictedi:
files = [f for f in files if os.access(f, os.R_OK)]
books[i] = files
restricted |= restrictedi
if restrictedi:
det_msg = u'\n'.join(restrictedi)
warning_dialog(self.pd, _('No permission'),
_('Cannot add some files as you do not have '
' permission to access them. Click Show'
' Details to see the list of such files.'),
det_msg=det_msg, show=True)
books = list(filter(None, books))
if not books:
return self.canceled()
self.rfind = None
from calibre.ebooks.metadata.worker import read_metadata
self.rq = Queue()

View File

@ -29,12 +29,14 @@ if pictureflow is not None:
pictureflow.FlowImages.__init__(self)
self.images = []
self.captions = []
self.subtitles = []
for f in os.listdir(dirpath):
f = os.path.join(dirpath, f)
img = QImage(f)
if not img.isNull():
self.images.append(img)
self.captions.append(os.path.basename(f))
self.subtitles.append('%d bytes'%os.stat(f).st_size)
def count(self):
return len(self.images)
@ -45,6 +47,9 @@ if pictureflow is not None:
def caption(self, index):
return self.captions[index]
def subtitle(self, index):
return self.subtitles[index]
def currentChanged(self, index):
print 'current changed:', index

View File

@ -477,6 +477,8 @@ class BooksView(QTableView): # {{{
# arbitrary: scroll bar + header + some
max_width = self.width() - (self.verticalScrollBar().width() +
self.verticalHeader().width() + 10)
if max_width < 200:
max_width = 200
if new_size > max_width:
self.column_header.blockSignals(True)
self.setColumnWidth(col, max_width)
@ -567,7 +569,8 @@ class BooksView(QTableView): # {{{
if md.hasFormat('text/uri-list') and not \
md.hasFormat('application/calibre+from_library'):
urls = [unicode(u.toLocalFile()) for u in md.urls()]
return [u for u in urls if os.path.splitext(u)[1] and os.access(u, os.R_OK)]
return [u for u in urls if os.path.splitext(u)[1] and
os.path.exists(u)]
def drag_icon(self, cover, multiple):
cover = cover.scaledToHeight(120, Qt.SmoothTransformation)

View File

@ -99,6 +99,8 @@ typedef unsigned short QRgb565;
#define PFREAL_ONE (1 << PFREAL_SHIFT)
#define PFREAL_HALF (PFREAL_ONE >> 1)
#define TEXT_FLAGS (Qt::TextWordWrap|Qt::TextWrapAnywhere|Qt::TextHideMnemonic|Qt::AlignCenter)
inline PFreal fmul(PFreal a, PFreal b)
{
return ((long long)(a))*((long long)(b)) >> PFREAL_SHIFT;
@ -401,6 +403,7 @@ private:
QImage* surface(int slideIndex);
void triggerRender();
void resetSlides();
void render_text(QPainter*, int);
};
PictureFlowPrivate::PictureFlowPrivate(PictureFlow* w, int queueLength_)
@ -663,6 +666,34 @@ void PictureFlowPrivate::triggerRender()
triggerTimer.start();
}
void PictureFlowPrivate::render_text(QPainter *painter, int index) {
QRect brect, brect2;
int buffer_width, buffer_height;
QString caption, subtitle;
caption = slideImages->caption(index);
subtitle = slideImages->subtitle(index);
buffer_width = buffer.width(); buffer_height = buffer.height();
brect = painter->boundingRect(QRect(0, 0, buffer_width, fontSize), TEXT_FLAGS, caption);
brect2 = painter->boundingRect(QRect(0, 0, buffer_width, fontSize), TEXT_FLAGS, subtitle);
// So that if there is no subtitle, the caption is not flush with the bottom
if (brect2.height() < fontSize) brect2.setHeight(fontSize);
// So that the text does not occupy more than the lower half of the buffer
if (brect.height() > ((int)(buffer.height()/3.0)) - fontSize*2)
brect.setHeight(((int)buffer.height()/3.0) - fontSize*2);
brect.moveTop(buffer_height - (brect.height() + brect2.height()));
//printf("top: %d, height: %d\n", brect.top(), brect.height());
//
painter->drawText(brect, TEXT_FLAGS, caption);
brect2.moveTop(buffer_height - brect2.height());
painter->drawText(brect2, TEXT_FLAGS, slideImages->subtitle(index));
}
// Render the slides. Updates only the offscreen buffer.
void PictureFlowPrivate::render()
{
@ -708,10 +739,7 @@ void PictureFlowPrivate::render()
//painter.setPen(QColor(255,255,255,127));
if (centerIndex < slideCount() && centerIndex > -1) {
painter.drawText( QRect(0,0, buffer.width(), buffer.height()*2-fontSize*4),
Qt::AlignCenter, slideImages->caption(centerIndex));
painter.drawText( QRect(0,0, buffer.width(), buffer.height()*2-fontSize*2),
Qt::AlignCenter, slideImages->subtitle(centerIndex));
render_text(&painter, centerIndex);
}
painter.end();
@ -764,20 +792,12 @@ void PictureFlowPrivate::render()
painter.setPen(QColor(255,255,255, (255-fade) ));
if (leftTextIndex < sc && leftTextIndex > -1) {
painter.drawText( QRect(0,0, buffer.width(), buffer.height()*2 - fontSize*4),
Qt::AlignCenter, slideImages->caption(leftTextIndex));
painter.drawText( QRect(0,0, buffer.width(), buffer.height()*2 - fontSize*2),
Qt::AlignCenter, slideImages->subtitle(leftTextIndex));
render_text(&painter, leftTextIndex);
}
painter.setPen(QColor(255,255,255, fade));
if (leftTextIndex+1 < sc && leftTextIndex > -2) {
painter.drawText( QRect(0,0, buffer.width(), buffer.height()*2 - fontSize*4),
Qt::AlignCenter, slideImages->caption(leftTextIndex+1));
painter.drawText( QRect(0,0, buffer.width(), buffer.height()*2 - fontSize*2),
Qt::AlignCenter, slideImages->subtitle(leftTextIndex+1));
render_text(&painter, leftTextIndex+1);
}
painter.end();

View File

@ -22,11 +22,15 @@ from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
def comparable_price(text):
text = re.sub(r'[^0-9.,]', '', text)
if len(text) < 3 or text[-3] not in ('.', ','):
text += '00'
text = re.sub(r'\D', '', text)
text = text.rjust(6, '0')
# this keep thousand and fraction separators
match = re.search(r'(?:\d|[,.](?=\d))(?:\d*(?:[,.\' ](?=\d))?)+', text)
if match:
# replace all separators with '.'
m = re.sub(r'[.,\' ]', '.', match.group())
# remove all separators accept fraction,
# leave only 2 digits in fraction
m = re.sub(r'\.(?!\d*$)', r'', m)
text = '{0:0>8.0f}'.format(float(m) * 100.)
return text
@ -334,6 +338,11 @@ class SearchFilter(SearchQueryParser):
}
for x in ('author', 'download', 'format'):
q[x+'s'] = q[x]
# make the price in query the same format as result
if location == 'price':
query = comparable_price(query)
for sr in self.srs:
for locvalue in locations:
accessor = q[locvalue]

View File

@ -45,24 +45,26 @@ class AmazonDEKindleStore(StorePlugin):
doc = html.fromstring(f.read())
# Amazon has two results pages.
is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
# Horizontal grid of books.
if is_shot:
data_xpath = '//div[contains(@class, "result")]'
format_xpath = './/div[@class="productTitle"]/text()'
cover_xpath = './/div[@class="productTitle"]//img/@src'
# Vertical list of books.
else:
data_xpath = '//div[@class="productData"]'
# 20110725: seems that is_shot is gone.
# is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
# # Horizontal grid of books.
# if is_shot:
# data_xpath = '//div[contains(@class, "result")]'
# format_xpath = './/div[@class="productTitle"]/text()'
# cover_xpath = './/div[@class="productTitle"]//img/@src'
# # Vertical list of books.
# else:
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
format_xpath = './/span[@class="format"]/text()'
cover_xpath = '../div[@class="productImage"]/a/img/@src'
cover_xpath = './/img[@class="productImage"]/@src'
# end is_shot else
for data in doc.xpath(data_xpath):
if counter <= 0:
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# put in results for non Kindle books (author pages). So we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format = ''.join(data.xpath(format_xpath))
@ -71,27 +73,17 @@ class AmazonDEKindleStore(StorePlugin):
# We must have an asin otherwise we can't easily reference the
# book later.
asin_href = None
asin_a = data.xpath('.//div[@class="productTitle"]/a[1]')
if asin_a:
asin_href = asin_a[0].get('href', '')
m = re.search(r'/dp/(?P<asin>.+?)(/|$)', asin_href)
if m:
asin = m.group('asin')
else:
continue
else:
continue
asin = ''.join(data.xpath("@name"))
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath('.//div[@class="productTitle"]/a/text()'))
title = ''.join(data.xpath('.//div[@class="title"]/a/text()'))
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
if is_shot:
author = format.split(' von ')[-1]
else:
author = ''.join(data.xpath('.//div[@class="productTitle"]/span[@class="ptBrand"]/text()'))
# if is_shot:
# author = format.split(' von ')[-1]
# else:
author = ''.join(data.xpath('.//div[@class="title"]/span[@class="ptBrand"]/text()'))
author = author.split('von ')[-1]
counter -= 1

View File

@ -42,48 +42,55 @@ class AmazonUKKindleStore(StorePlugin):
doc = html.fromstring(f.read())
# Amazon has two results pages.
is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
# Horizontal grid of books.
if is_shot:
data_xpath = '//div[contains(@class, "result")]'
cover_xpath = './/div[@class="productTitle"]//img/@src'
# Vertical list of books.
else:
data_xpath = '//div[contains(@class, "product")]'
cover_xpath = './div[@class="productImage"]/a/img/@src'
# 20110725: seems that is_shot is gone.
# is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
# # Horizontal grid of books.
# if is_shot:
# data_xpath = '//div[contains(@class, "result")]'
# format_xpath = './/div[@class="productTitle"]/text()'
# cover_xpath = './/div[@class="productTitle"]//img/@src'
# # Vertical list of books.
# else:
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
format_xpath = './/span[@class="format"]/text()'
cover_xpath = './/img[@class="productImage"]/@src'
# end is_shot else
for data in doc.xpath(data_xpath):
if counter <= 0:
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). So we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format = ''.join(data.xpath(format_xpath))
if 'kindle' not in format.lower():
continue
# We must have an asin otherwise we can't easily reference the
# book later.
asin = ''.join(data.xpath('./@name'))
if not asin:
continue
asin = ''.join(data.xpath("@name"))
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath('.//div[@class="productTitle"]/a/text()'))
title = ''.join(data.xpath('.//div[@class="title"]/a/text()'))
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
# if is_shot:
# author = format.split(' von ')[-1]
# else:
author = ''.join(data.xpath('.//div[@class="title"]/span[@class="ptBrand"]/text()'))
author = author.split('by ')[-1]
counter -= 1
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.formats = ''
if is_shot:
# Amazon UK does not include the author on the grid layout
s.author = ''
self.get_details(s, timeout)
if s.formats != 'Kindle':
continue
else:
author = ''.join(data.xpath('.//div[@class="productTitle"]/span[@class="ptBrand"]/text()'))
s.author = author.split(' by ')[-1].strip()
s.formats = 'Kindle'
yield s

View File

@ -1,27 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.opensearch_store import OpenSearchOPDSStore
from calibre.gui2.store.search_result import SearchResult
class EpubBudStore(BasicStoreConfig, OpenSearchOPDSStore):
open_search_url = 'http://www.epubbud.com/feeds/opensearch.xml'
web_url = 'http://www.epubbud.com/'
# http://www.epubbud.com/feeds/catalog.atom
def search(self, query, max_results=10, timeout=60):
for s in OpenSearchOPDSStore.search(self, query, max_results, timeout):
s.price = '$0.00'
s.drm = SearchResult.DRM_UNLOCKED
s.formats = 'EPUB'
# Download links are broken for this store.
s.downloads = {}
yield s

View File

@ -1,80 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import urllib2
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class EPubBuyDEStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://klick.affiliwelt.net/klick.php?bannerid=47653&pid=32307&prid=2627'
url_details = ('http://klick.affiliwelt.net/klick.php?bannerid=47653'
'&pid=32307&prid=2627&prodid={0}')
if external or self.config.get('open_external', False):
if detail_item:
url = url_details.format(detail_item)
open_url(QUrl(url))
else:
detail_url = None
if detail_item:
detail_url = url_details.format(detail_item)
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.epubbuy.com/search.php?search_query=' + urllib2.quote(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//li[contains(@class, "ajax_block_product")]'):
if counter <= 0:
break
id = ''.join(data.xpath('./div[@class="center_block"]'
'/p[contains(text(), "artnr:")]/text()')).strip()
if not id:
continue
id = id[6:].strip()
if not id:
continue
cover_url = ''.join(data.xpath('./div[@class="center_block"]'
'/a[@class="product_img_link"]/img/@src'))
if cover_url:
cover_url = 'http://www.epubbuy.com' + cover_url
title = ''.join(data.xpath('./div[@class="center_block"]'
'/a[@class="product_img_link"]/@title'))
author = ''.join(data.xpath('./div[@class="center_block"]/a[2]/text()'))
price = ''.join(data.xpath('.//span[@class="price"]/text()'))
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
s.drm = SearchResult.DRM_UNLOCKED
s.detail_item = id
s.formats = 'ePub'
yield s

View File

@ -6,6 +6,7 @@ __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import random
import urllib
from contextlib import closing
@ -23,7 +24,24 @@ from calibre.gui2.store.web_store_dialog import WebStoreDialog
class GoogleBooksStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://books.google.com/'
aff_id = {
'lid': '41000000033185143',
'pubid': '21000000000352219',
'ganpub': 'k352219',
'ganclk': 'GOOG_1335334761',
}
# Use Kovid's affiliate id 30% of the time.
if random.randint(1, 10) in (1, 2, 3):
aff_id = {
'lid': '41000000031855266',
'pubid': '21000000000352583',
'ganpub': 'k352583',
'ganclk': 'GOOG_1335335464',
}
url = 'http://gan.doubleclick.net/gan_click?lid=%(lid)s&pubid=%(pubid)s' % aff_id
if detail_item:
detail_item += '&ganpub=%(ganpub)s&ganclk=%(ganclk)s' % aff_id
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))

View File

@ -24,7 +24,7 @@ class LibreDEStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://ad.zanox.com/ppc/?18817073C15644254T'
url_details = ('http://ad.zanox.com/ppc/?18845780C1371495675T&ULP=[['
url_details = ('http://ad.zanox.com/ppc/?18848208C1197627693T&ULP=[['
'http://www.libri.de/shop/action/productDetails?artiId={0}]]')
if external or self.config.get('open_external', False):

View File

@ -0,0 +1,126 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>'
__docformat__ = 'restructuredtext en'
import random
import re
import urllib2
from contextlib import closing
from lxml import etree, html
from PyQt4.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.ebooks.chardet import xml_to_unicode
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class OzonRUStore(BasicStoreConfig, StorePlugin):
shop_url = 'http://www.ozon.ru'
def open(self, parent=None, detail_item=None, external=False):
aff_id = '?partner=romuk'
# Use Kovid's affiliate id 30% of the time.
if random.randint(1, 10) in (1, 2, 3):
aff_id = '?partner=kovidgoyal'
url = self.shop_url + aff_id
detail_url = None
if detail_item:
# http://www.ozon.ru/context/detail/id/3037277/
detail_url = self.shop_url + '/context/detail/id/' + urllib2.quote(detail_item) + aff_id
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
else:
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
search_url = self.shop_url + '/webservice/webservice.asmx/SearchWebService?'\
'searchText=%s&searchContext=ebook' % urllib2.quote(query)
counter = max_results
br = browser()
with closing(br.open(search_url, timeout=timeout)) as f:
raw = xml_to_unicode(f.read(), strip_encoding_pats=True, assume_utf8=True)[0]
doc = etree.fromstring(raw)
for data in doc.xpath('//*[local-name() = "SearchItems"]'):
if counter <= 0:
break
counter -= 1
xp_template = 'normalize-space(./*[local-name() = "{0}"]/text())'
s = SearchResult()
s.detail_item = data.xpath(xp_template.format('ID'))
s.title = data.xpath(xp_template.format('Name'))
s.author = data.xpath(xp_template.format('Author'))
s.price = data.xpath(xp_template.format('Price'))
s.cover_url = data.xpath(xp_template.format('Picture'))
if re.match("^\d+?\.\d+?$", s.price):
s.price = u'{:.2F} руб.'.format(float(s.price))
yield s
def get_details(self, search_result, timeout=60):
url = self.shop_url + '/context/detail/id/' + urllib2.quote(search_result.detail_item)
br = browser()
result = False
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
# example where we are going to find formats
# <div class="box">
# ...
# <b>Доступные&nbsp;форматы:</b>
# <div class="vertpadd">.epub, .fb2, .pdf, .pdf, .txt</div>
# ...
# </div>
xpt = u'normalize-space(//div[@class="box"]//*[contains(normalize-space(text()), "Доступные форматы:")][1]/following-sibling::div[1]/text())'
formats = doc.xpath(xpt)
if formats:
result = True
search_result.drm = SearchResult.DRM_UNLOCKED
search_result.formats = ', '.join(_parse_ebook_formats(formats))
# unfortunately no direct links to download books (only buy link)
# search_result.downloads['BF2'] = self.shop_url + '/order/digitalorder.aspx?id=' + + urllib2.quote(search_result.detail_item)
return result
def _parse_ebook_formats(formatsStr):
'''
Creates a list with displayable names of the formats
:param formatsStr: string with comma separated book formats
as it provided by ozon.ru
:return: a list with displayable book formats
'''
formatsUnstruct = formatsStr.lower()
formats = []
if 'epub' in formatsUnstruct:
formats.append('ePub')
if 'pdf' in formatsUnstruct:
formats.append('PDF')
if 'fb2' in formatsUnstruct:
formats.append('FB2')
if 'rtf' in formatsUnstruct:
formats.append('RTF')
if 'txt' in formatsUnstruct:
formats.append('TXT')
if 'djvu' in formatsUnstruct:
formats.append('DjVu')
if 'doc' in formatsUnstruct:
formats.append('DOC')
return formats

View File

@ -1892,6 +1892,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
yield r[iindex]
def get_next_series_num_for(self, series):
series_id = None
if series:
series_id = self.conn.get('SELECT id from series WHERE name=?',
(series,), all=False)
if series_id is None:

View File

@ -10,13 +10,14 @@ import re, os, posixpath
import cherrypy
from calibre import fit_image, guess_type
from calibre.utils.date import fromtimestamp, utcnow
from calibre.utils.date import fromtimestamp
from calibre.library.caches import SortKeyGenerator
from calibre.library.save_to_disk import find_plugboard
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.magick.draw import (save_cover_data_to, Image,
thumbnail as generate_thumbnail)
from calibre.utils.filenames import ascii_filename
from calibre.ebooks.metadata.opf2 import metadata_to_opf
plugboard_content_server_value = 'content_server'
plugboard_content_server_formats = ['epub']
@ -32,7 +33,7 @@ class CSSortKeyGenerator(SortKeyGenerator):
class ContentServer(object):
'''
Handles actually serving content files/covers. Also has
Handles actually serving content files/covers/metadata. Also has
a few utility methods.
'''
@ -68,9 +69,8 @@ class ContentServer(object):
# }}}
def get(self, what, id):
'Serves files, covers, thumbnails from the calibre database'
'Serves files, covers, thumbnails, metadata from the calibre database'
try:
id = int(id)
except ValueError:
@ -90,6 +90,8 @@ class ContentServer(object):
thumb_height=height)
if what == 'cover':
return self.get_cover(id)
if what == 'opf':
return self.get_metadata_as_opf(id)
return self.get_format(id, what)
def static(self, name):
@ -180,6 +182,17 @@ class ContentServer(object):
cherrypy.log.error(traceback.print_exc())
raise cherrypy.HTTPError(404, 'Failed to generate cover: %r'%err)
def get_metadata_as_opf(self, id_):
cherrypy.response.headers['Content-Type'] = \
'application/oebps-package+xml; charset=UTF-8'
mi = self.db.get_metadata(id_, index_is_id=True)
data = metadata_to_opf(mi)
cherrypy.response.timeout = 3600
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(mi.last_modified)
return data
def get_format(self, id, format):
format = format.upper()
fmt = self.db.format(id, format, index_is_id=True, as_file=True,
@ -217,7 +230,8 @@ class ContentServer(object):
cherrypy.response.headers['Content-Disposition'] = \
b'attachment; filename="%s"'%fname
cherrypy.response.timeout = 3600
cherrypy.response.headers['Last-Modified'] = self.last_modified(utcnow())
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(self.db.format_last_modified(id, format))
return fmt
# }}}

View File

@ -5541,23 +5541,23 @@ msgstr "Книги с такими же тегами"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:20
msgid "Get books"
msgstr ""
msgstr "Загрузить книги"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:29
msgid "Search for ebooks"
msgstr ""
msgstr "Поиск книг..."
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:30
msgid "Search for this author"
msgstr ""
msgstr "Поиск по автору"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:31
msgid "Search for this title"
msgstr ""
msgstr "Поиск по названию"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:32
msgid "Search for this book"
msgstr ""
msgstr "Поиск по книге"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:34
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:135
@ -5569,21 +5569,21 @@ msgstr "Магазины"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_dialog.py:18
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:285
msgid "Choose stores"
msgstr ""
msgstr "Выбрать магазины"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:83
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:102
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:111
msgid "Cannot search"
msgstr ""
msgstr "Поиск не может быть произведён"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:130
msgid ""
"Calibre helps you find the ebooks you want by searching the websites of "
"various commercial and public domain book sources for you."
msgstr ""
"Calibre помогает вам отыскать книги, которые вы хотите найти, предлагая вам "
"найденные веб-сайты различных коммерческих и публичных источников книг."
"Calibre поможет Вам найти книги, предлагая "
"веб-сайты различных коммерческих и публичных источников книг."
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:134
msgid ""
@ -5591,6 +5591,8 @@ msgid ""
"are looking for, at the best price. You also get DRM status and other useful "
"information."
msgstr ""
"Используя встроенный поиск Вы можете легко найти магазин предлагающий выгодную цену "
"для интересующей Вас книги. Также Вы получите другу полезную инфрмацию"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:138
msgid ""
@ -5608,7 +5610,7 @@ msgstr "Показать снова данное сообщение"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/store.py:149
msgid "About Get Books"
msgstr ""
msgstr "О 'Загрузить книги'"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:17
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/tweak_epub_ui.py:60
@ -5617,7 +5619,7 @@ msgstr "Tweak EPUB"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:18
msgid "Make small changes to ePub format books"
msgstr ""
msgstr "Внести небольшие изненения ePub в формат книги"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/tweak_epub.py:19
msgid "T"
@ -5704,7 +5706,7 @@ msgstr "Не могу открыть папку"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/view.py:220
msgid "This book no longer exists in your library"
msgstr ""
msgstr "Эта книга больше не находится в Вашей библиотеке"
#: /home/kovid/work/calibre/src/calibre/gui2/actions/view.py:227
#, python-format
@ -9167,11 +9169,11 @@ msgstr "&Показать пароль"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:122
msgid "Restart required"
msgstr ""
msgstr "Требуется перезапуск"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:123
msgid "You must restart Calibre before using this plugin!"
msgstr ""
msgstr "Для использования плагина Вам нужно перезапустить Calibre!"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:164
#, python-format
@ -9183,17 +9185,17 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:136
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:111
msgid "All"
msgstr ""
msgstr "Всё"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
msgid "Installed"
msgstr ""
msgstr "Установленные"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:397
msgid "Not installed"
msgstr ""
msgstr "Не установленные"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:184
msgid "Update available"
@ -9201,7 +9203,7 @@ msgstr "Доступно обновление"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
msgid "Plugin Name"
msgstr ""
msgstr "Название плагина"
#: /home/kovid/work/calibre/src/calibre/gui2/dialogs/plugin_updater.py:302
#: /home/kovid/work/calibre/src/calibre/gui2/jobs.py:63
@ -13317,7 +13319,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/plugins_ui.py:114
msgid "&Load plugin from file"
msgstr ""
msgstr "Загрузить плагин из файла"
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/save_template.py:33
msgid "Any custom field"
@ -13579,11 +13581,11 @@ msgstr "Сбой запуска контент-сервера"
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:106
msgid "Error log:"
msgstr "Лог ошибок:"
msgstr "Журнал ошибок:"
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:113
msgid "Access log:"
msgstr "Лог доступа:"
msgstr "Журнал доступа:"
#: /home/kovid/work/calibre/src/calibre/gui2/preferences/server.py:128
msgid "You need to restart the server for changes to take effect"
@ -14053,7 +14055,7 @@ msgstr "Ничего"
#: /home/kovid/work/calibre/src/calibre/gui2/shortcuts.py:59
msgid "Press a key..."
msgstr ""
msgstr "Нажмите клавишу..."
#: /home/kovid/work/calibre/src/calibre/gui2/shortcuts.py:80
msgid "Already assigned"
@ -14108,19 +14110,19 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/basic_config_widget_ui.py:38
msgid "Added Tags:"
msgstr ""
msgstr "Добавленные тэги:"
#: /home/kovid/work/calibre/src/calibre/gui2/store/basic_config_widget_ui.py:39
msgid "Open store in external web browswer"
msgstr ""
msgstr "Открыть сайт магазина в интернет броузере"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:219
msgid "&Name:"
msgstr ""
msgstr "&Название"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:221
msgid "&Description:"
msgstr ""
msgstr "&Описание"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:222
msgid "&Headquarters:"
@ -14140,7 +14142,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:217
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:220
msgid "true"
msgstr ""
msgstr "да"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:229
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:231
@ -14148,41 +14150,41 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:218
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:221
msgid "false"
msgstr ""
msgstr "нет"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:232
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:216
msgid "Affiliate:"
msgstr ""
msgstr "Партнёрство:"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/adv_search_builder_ui.py:235
msgid "Nam&e/Description ..."
msgstr ""
msgstr "Названи&е/Описание"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:78
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:132
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:108
msgid "Query:"
msgstr ""
msgstr "Запрос:"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:81
msgid "Enable"
msgstr ""
msgstr "Включить"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/chooser_widget_ui.py:84
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:137
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:112
msgid "Invert"
msgstr ""
msgstr "Инвертировать"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
msgid "Affiliate"
msgstr ""
msgstr "Партнерство"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
msgid "Enabled"
msgstr ""
msgstr "Включено"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
msgid "Headquarters"
@ -14190,7 +14192,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:21
msgid "No DRM"
msgstr ""
msgstr "Без DRM"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:129
msgid ""
@ -14205,13 +14207,14 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:136
msgid "This store only distributes ebooks without DRM."
msgstr ""
msgstr "Этот магазин распространяет электронные книги исключительно без DRM"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:138
msgid ""
"This store distributes ebooks with DRM. It may have some titles without DRM, "
"but you will need to check on a per title basis."
msgstr ""
msgstr "Этот магазин распространяет электронные книги с DRM. Возможно, некоторые издания"
" доступны без DRM, но для этого надо проверять каждую книгу в отдельности."
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:140
#, python-format
@ -14225,46 +14228,46 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:211
#, python-format
msgid "Buying from this store supports the calibre developer: %s."
msgstr ""
msgstr "Покупая в этом магазине Вы поддерживаете проект calibre и разработчика: %s."
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/models.py:145
#, python-format
msgid "This store distributes ebooks in the following formats: %s"
msgstr ""
msgstr "Магазин распространяет эл. книги в следующих фотрматах"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/chooser/results_view.py:47
msgid "Configure..."
msgstr ""
msgstr "Настроить..."
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:99
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:99
msgid "Time"
msgstr ""
msgstr "Время"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:100
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:100
msgid "Number of seconds to wait for a store to respond"
msgstr ""
msgstr "Время ожидания ответа магазина (в секундах)"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:101
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:101
msgid "Number of seconds to let a store process results"
msgstr ""
msgstr "Допустипое время обработки результата магазином (в секундах)"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:102
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:102
msgid "Display"
msgstr ""
msgstr "Показать"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:103
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:103
msgid "Maximum number of results to show per store"
msgstr ""
msgstr "Максимальное количество результатов для показа (по каждому магазину)"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:104
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:104
msgid "Open search result in system browser"
msgstr ""
msgstr "Показывать результаты поиска в системном интернет броузере"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search/search_widget_ui.py:105
msgid "Threads"
@ -14288,11 +14291,11 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:105
msgid "Performance"
msgstr ""
msgstr "Производительность"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:106
msgid "Number of simultaneous searches"
msgstr ""
msgstr "Количество одновременно выполняемых поисков"
#: /home/kovid/work/calibre/src/calibre/gui2/store/config/search_widget_ui.py:107
msgid "Number of simultaneous cache updates"
@ -14308,13 +14311,13 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:62
msgid "Search:"
msgstr ""
msgstr "Поиск:"
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:63
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:142
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/store_dialog_ui.py:77
msgid "Books:"
msgstr ""
msgstr "Книги:"
#: /home/kovid/work/calibre/src/calibre/gui2/store/mobileread_store_dialog_ui.py:65
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:144
@ -14323,20 +14326,20 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:63
#: /usr/src/qt-everywhere-opensource-src-4.7.2/src/gui/widgets/qdialogbuttonbox.cpp:661
msgid "Close"
msgstr ""
msgstr "Закрыть"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:212
msgid "&Price:"
msgstr ""
msgstr "&Цена:"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:219
msgid "Download:"
msgstr ""
msgstr "Скачать"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/adv_search_builder_ui.py:222
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/adv_search_builder_ui.py:187
msgid "Titl&e/Author/Price ..."
msgstr ""
msgstr "Названи&е/Автор/Цена ..."
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
msgid "DRM"
@ -14344,11 +14347,11 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
msgid "Download"
msgstr ""
msgstr "Скачать"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:37
msgid "Price"
msgstr ""
msgstr "Цена"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:196
#, python-format
@ -14383,90 +14386,90 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/models.py:208
#, python-format
msgid "The following formats can be downloaded directly: %s."
msgstr ""
msgstr "Форматы доступные для непосредственного скачивания: %s."
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/results_view.py:41
msgid "Download..."
msgstr ""
msgstr "Скачать..."
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/results_view.py:45
msgid "Goto in store..."
msgstr ""
msgstr "Перейти в магазин..."
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:114
#, python-format
msgid "Buying from this store supports the calibre developer: %s</p>"
msgstr ""
msgstr "Покупая в этом магазине Вы поддерживаете проект calibre и разработчика: %s</p>"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:276
msgid "Customize get books search"
msgstr ""
msgstr "Перенастроить под себя поиск книг для скачивания"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:286
msgid "Configure search"
msgstr ""
msgstr "Настроить поиск"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:336
msgid "Couldn't find any books matching your query."
msgstr "Ну удалось найти ни одной кники, соотвествующей вашему запросу."
msgstr "Не удалось найти ни одной книги, соотвествующей вашему запросу."
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search.py:350
msgid "Choose format to download to your library."
msgstr ""
msgstr "Выберите формат для скачивания в библиотеку"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:131
#: /home/kovid/work/calibre/src/calibre/gui2/store/search_ui.py:107
msgid "Get Books"
msgstr ""
msgstr "Скачать книги"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:140
msgid "Open a selected book in the system's web browser"
msgstr ""
msgstr "Показать выбранную книгу в системном интернет броузере"
#: /home/kovid/work/calibre/src/calibre/gui2/store/search/search_ui.py:141
msgid "Open in &external browser"
msgstr ""
msgstr "Показывать в системном интернет броузере"
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/ebooks_com_plugin.py:96
msgid "Not Available"
msgstr ""
msgstr "Недоступно"
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/adv_search_builder_ui.py:179
msgid ""
"See the <a href=\"http://calibre-ebook.com/user_manual/gui.html#the-search-"
"interface\">User Manual</a> for more help"
msgstr ""
"Смотри <a href=\"http://calibre-ebook.com/user_manual/gui.html#the-search-"
"interface\">Пользовательский мануал</a> для помощи"
"Смотрите <a href=\"http://calibre-ebook.com/user_manual/gui.html#the-search-"
"interface\">Руководство пользователя</a> для помощи"
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_progress_dialog_ui.py:51
msgid "Updating book cache"
msgstr ""
msgstr "Обноволяется кэш книг"
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:42
msgid "Checking last download date."
msgstr ""
msgstr "Проверяется врема последнего скачивания"
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:48
msgid "Downloading book list from MobileRead."
msgstr ""
msgstr "Загружается список книг с MobileRead."
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:61
msgid "Processing books."
msgstr ""
msgstr "Книги обрабатываются"
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/cache_update_thread.py:71
#, python-format
msgid "%(num)s of %(tot)s books processed."
msgstr ""
msgstr "обработано %(num)s из %(tot)."
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/mobileread_plugin.py:62
msgid "Updating MobileRead book cache..."
msgstr ""
msgstr "Обноволяется кэщ MobileRead книг..."
#: /home/kovid/work/calibre/src/calibre/gui2/store/stores/mobileread/store_dialog_ui.py:74
msgid "&Query:"
msgstr ""
msgstr "&Запрос:"
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_control.py:73
msgid ""
@ -14480,15 +14483,15 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_control.py:86
msgid "File is not a supported ebook type. Save to disk?"
msgstr ""
msgstr "Файл содержит неподдерживаемый формат эл. книги. Сохранить на диске?"
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:59
msgid "Home"
msgstr ""
msgstr "Главная страница"
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:60
msgid "Reload"
msgstr ""
msgstr "Перегрузить"
#: /home/kovid/work/calibre/src/calibre/gui2/store/web_store_dialog_ui.py:61
msgid "%p%"
@ -14502,22 +14505,24 @@ msgstr ""
msgid ""
"Changing the authors for several books can take a while. Are you sure?"
msgstr ""
"Изменить автора нескольких книг займёт некоторое время. Вы согласны"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:729
msgid ""
"Changing the metadata for that many books can take a while. Are you sure?"
msgstr ""
"Изменить мета-данные нескольких книг займёт некоторое время. Вы согласны"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:816
#: /home/kovid/work/calibre/src/calibre/library/database2.py:449
msgid "Searches"
msgstr ""
msgstr "Поиски"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:881
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:901
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:910
msgid "Rename user category"
msgstr ""
msgstr "Переименовать пользовательскую категорию"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/model.py:882
msgid "You cannot use periods in the name when renaming user categories"
@ -14540,30 +14545,30 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:48
msgid "Manage Authors"
msgstr ""
msgstr "Упорядочнить авторов"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:50
msgid "Manage Series"
msgstr ""
msgstr "Упорядочнить серии"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:52
msgid "Manage Publishers"
msgstr ""
msgstr "Упорядочнить издателей"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:54
msgid "Manage Tags"
msgstr ""
msgstr "Упорядочнить тэги"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:56
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:465
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:469
msgid "Manage User Categories"
msgstr "Управление пользовательскими категориями"
msgstr "Упорядочнить пользовательские категории"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:58
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:457
msgid "Manage Saved Searches"
msgstr "Управление сохраненными поисками"
msgstr "Упорядочнить сохраненные поиски"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:66
msgid "Invalid search restriction"
@ -14580,17 +14585,17 @@ msgstr "Новая категория"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:134
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:137
msgid "Delete user category"
msgstr ""
msgstr "Удалить пользовательскую категорию"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:135
#, python-format
msgid "%s is not a user category"
msgstr ""
msgstr "%s не является пользовательской категорией"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:138
#, python-format
msgid "%s contains items. Do you really want to delete it?"
msgstr ""
msgstr "%s содержит элементы. Вы действительно хотете её удалить?"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:159
msgid "Remove category"
@ -14599,16 +14604,16 @@ msgstr "Удалить категорию"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:160
#, python-format
msgid "User category %s does not exist"
msgstr ""
msgstr "Пользовательская категория %s не существует"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:179
msgid "Add to user category"
msgstr ""
msgstr "Добавить в пользовательские категории"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:180
#, python-format
msgid "A user category %s does not exist"
msgstr ""
msgstr "Пользовательская категория %s не существует"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/ui.py:305
msgid "Find item in tag browser"
@ -14701,7 +14706,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:359
#, python-format
msgid "Add %s to user category"
msgstr ""
msgstr "Добавить %s в пользовательские категории"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:372
#, python-format
@ -14711,7 +14716,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:382
#, python-format
msgid "Delete search %s"
msgstr ""
msgstr "Удалить поиск %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:387
#, python-format
@ -14721,27 +14726,27 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:394
#, python-format
msgid "Search for %s"
msgstr ""
msgstr "Искать %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:399
#, python-format
msgid "Search for everything but %s"
msgstr ""
msgstr "Искать всё кроме %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:411
#, python-format
msgid "Add sub-category to %s"
msgstr ""
msgstr "Добавить подкатегорию в %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:415
#, python-format
msgid "Delete user category %s"
msgstr ""
msgstr "Удалить пользовательскую категорию %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:420
#, python-format
msgid "Hide category %s"
msgstr ""
msgstr "Скрыть категорию %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:424
msgid "Show category"
@ -14750,12 +14755,12 @@ msgstr "Показать категорию"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:434
#, python-format
msgid "Search for books in category %s"
msgstr ""
msgstr "Искать книги в категории %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:440
#, python-format
msgid "Search for books not in category %s"
msgstr ""
msgstr "Искать книги НЕ в категории %s"
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:449
#: /home/kovid/work/calibre/src/calibre/gui2/tag_browser/view.py:454
@ -14837,7 +14842,7 @@ msgstr "Извлечь подключенное устройство"
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:347
msgid "Debug mode"
msgstr ""
msgstr "Резим отладки"
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:348
#, python-format
@ -14875,7 +14880,7 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:630
msgid "Active jobs"
msgstr ""
msgstr "Активные задания"
#: /home/kovid/work/calibre/src/calibre/gui2/ui.py:698
msgid ""
@ -14898,11 +14903,11 @@ msgstr "Доступно обновление!"
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:84
msgid "Show this notification for future updates"
msgstr ""
msgstr "Показвать сообщение о доступности новой версии (обнивления)"
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:89
msgid "&Get update"
msgstr ""
msgstr "&Скачать обнивление"
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:93
msgid "Update &plugins"
@ -14929,11 +14934,11 @@ msgstr ""
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:187
#, python-format
msgid "There are %d plugin updates available"
msgstr ""
msgstr "Доступны обновления для %d плагинов"
#: /home/kovid/work/calibre/src/calibre/gui2/update.py:191
msgid "Install and configure user plugins"
msgstr ""
msgstr "Установка и настройка пользовательских плагинов"
#: /home/kovid/work/calibre/src/calibre/gui2/viewer/bookmarkmanager.py:43
msgid "Edit bookmark"