mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
sync with Kovid's branch
This commit is contained in:
commit
298d368858
1679
Changelog.old.yaml
1679
Changelog.old.yaml
File diff suppressed because it is too large
Load Diff
1814
Changelog.yaml
1814
Changelog.yaml
File diff suppressed because it is too large
Load Diff
@ -616,7 +616,10 @@ or a Remote Desktop solution.
|
||||
If you must share the actual library, use a file syncing tool like
|
||||
DropBox or rsync or Microsoft SkyDrive instead of a networked drive. Even with
|
||||
these tools there is danger of data corruption/loss, so only do this if you are
|
||||
willing to live with that risk.
|
||||
willing to live with that risk. In particular, be aware that **Google Drive**
|
||||
is incompatible with |app|, if you put your |app| library in Google Drive, you
|
||||
*will* suffer data loss. See
|
||||
`this thread <http://www.mobileread.com/forums/showthread.php?t=205581>`_ for details.
|
||||
|
||||
Content From The Web
|
||||
---------------------
|
||||
@ -692,7 +695,7 @@ Post any output you see in a help message on the `Forum <http://www.mobileread.c
|
||||
|app| freezes/crashes occasionally?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are six possible things I know of, that can cause this:
|
||||
There are several possible things I know of, that can cause this:
|
||||
|
||||
* You recently connected an external monitor or TV to your computer. In
|
||||
this case, whenever |app| opens a new window like the edit metadata
|
||||
@ -700,10 +703,6 @@ There are six possible things I know of, that can cause this:
|
||||
you dont notice it and so you think |app| has frozen. Disconnect your
|
||||
second monitor and restart calibre.
|
||||
|
||||
* You are using a Wacom branded USB mouse. There is an incompatibility between
|
||||
Wacom mice and the graphics toolkit |app| uses. Try using a non-Wacom
|
||||
mouse.
|
||||
|
||||
* If you use RoboForm, it is known to cause |app| to crash. Add |app| to
|
||||
the blacklist of programs inside RoboForm to fix this. Or uninstall
|
||||
RoboForm.
|
||||
@ -714,6 +713,13 @@ There are six possible things I know of, that can cause this:
|
||||
* Constant Guard Protection by Xfinity causes crashes in |app|. You have to
|
||||
manually allow |app| in it or uninstall Constant Guard Protection.
|
||||
|
||||
* Spybot - Search & Destroy blocks |app| from accessing its temporary files
|
||||
breaking viewing and converting of books.
|
||||
|
||||
* You are using a Wacom branded USB mouse. There is an incompatibility between
|
||||
Wacom mice and the graphics toolkit |app| uses. Try using a non-Wacom
|
||||
mouse.
|
||||
|
||||
* On some 64 bit versions of Windows there are security software/settings
|
||||
that prevent 64-bit |app| from working properly. If you are using the 64-bit
|
||||
version of |app| try switching to the 32-bit version.
|
||||
|
27
recipes/democracy_journal.recipe
Normal file
27
recipes/democracy_journal.recipe
Normal file
@ -0,0 +1,27 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class AdvancedUserRecipe1361743898(BasicNewsRecipe):
|
||||
title = u'Democracy Journal'
|
||||
description = '''A journal of ideas. Published quarterly.'''
|
||||
__author__ = u'David Nye'
|
||||
language = 'en'
|
||||
oldest_article = 90
|
||||
max_articles_per_feed = 30
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
|
||||
def parse_index(self):
|
||||
articles = []
|
||||
feeds = []
|
||||
soup = self.index_to_soup("http://www.democracyjournal.org")
|
||||
for x in soup.findAll(href=re.compile("http://www\.democracyjournal\.org/\d*/.*php$")):
|
||||
url = x.get('href')
|
||||
title = self.tag_to_string(x)
|
||||
articles.append({'title':title, 'url':url, 'description':'', 'date':''})
|
||||
feeds.append(('Articles', articles))
|
||||
return feeds
|
||||
|
||||
def print_version(self, url):
|
||||
return url + '?page=all'
|
||||
|
27
recipes/el_malpensante.recipe
Normal file
27
recipes/el_malpensante.recipe
Normal file
@ -0,0 +1,27 @@
|
||||
# coding=utf-8
|
||||
# https://github.com/iemejia/calibrecolombia
|
||||
|
||||
'''
|
||||
http://www.elmalpensante.com/
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class ElMalpensante(BasicNewsRecipe):
|
||||
title = u'El Malpensante'
|
||||
language = 'es_CO'
|
||||
__author__ = 'Ismael Mejia <iemejia@gmail.com>'
|
||||
cover_url = 'http://elmalpensante.com/img/layout/logo.gif'
|
||||
description = 'El Malpensante'
|
||||
oldest_article = 30
|
||||
simultaneous_downloads = 20
|
||||
#tags = 'news, sport, blog'
|
||||
use_embedded_content = True
|
||||
remove_empty_feeds = True
|
||||
max_articles_per_feed = 100
|
||||
feeds = [(u'Artículos', u'http://www.elmalpensante.com/articulosRSS.php'),
|
||||
(u'Malpensantías', u'http://www.elmalpensante.com/malpensantiasRSS.php'),
|
||||
(u'Margaritas', u'http://www.elmalpensante.com/margaritasRSS.php'),
|
||||
# This one is almost the same as articulos so we leave articles
|
||||
# (u'Noticias', u'http://www.elmalpensante.com/noticiasRSS.php'),
|
||||
]
|
182
recipes/financial_times_us.recipe
Normal file
182
recipes/financial_times_us.recipe
Normal file
@ -0,0 +1,182 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
http://www.ft.com/intl/us-edition
|
||||
'''
|
||||
|
||||
import datetime
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class FinancialTimes(BasicNewsRecipe):
|
||||
title = 'Financial Times (US) printed edition'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = "The Financial Times (FT) is one of the world's leading business news and information organisations, recognised internationally for its authority, integrity and accuracy."
|
||||
publisher = 'The Financial Times Ltd.'
|
||||
category = 'news, finances, politics, UK, World'
|
||||
oldest_article = 2
|
||||
language = 'en'
|
||||
max_articles_per_feed = 250
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
needs_subscription = True
|
||||
encoding = 'utf8'
|
||||
publication_type = 'newspaper'
|
||||
articles_are_obfuscated = True
|
||||
temp_files = []
|
||||
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
|
||||
LOGIN = 'https://registration.ft.com/registration/barrier/login'
|
||||
LOGIN2 = 'http://media.ft.com/h/subs3.html'
|
||||
INDEX = 'http://www.ft.com/intl/us-edition'
|
||||
PREFIX = 'http://www.ft.com'
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
, 'linearize_tables' : True
|
||||
}
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser(self)
|
||||
br.open(self.INDEX)
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open(self.LOGIN2)
|
||||
br.select_form(name='loginForm')
|
||||
br['username'] = self.username
|
||||
br['password'] = self.password
|
||||
br.submit()
|
||||
return br
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div' , attrs={'class':['fullstory fullstoryHeader', 'ft-story-header']})
|
||||
,dict(name='div' , attrs={'class':'standfirst'})
|
||||
,dict(name='div' , attrs={'id' :'storyContent'})
|
||||
,dict(name='div' , attrs={'class':['ft-story-body','index-detail']})
|
||||
,dict(name='h2' , attrs={'class':'entry-title'} )
|
||||
,dict(name='span', attrs={'class':lambda x: x and 'posted-on' in x.split()} )
|
||||
,dict(name='span', attrs={'class':'author_byline'} )
|
||||
,dict(name='div' , attrs={'class':'entry-content'} )
|
||||
]
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':'floating-con'})
|
||||
,dict(name=['meta','iframe','base','object','embed','link'])
|
||||
,dict(attrs={'class':['storyTools','story-package','screen-copy','story-package separator','expandable-image']})
|
||||
]
|
||||
remove_attributes = ['width','height','lang']
|
||||
|
||||
extra_css = """
|
||||
body{font-family: Georgia,Times,"Times New Roman",serif}
|
||||
h2{font-size:large}
|
||||
.ft-story-header{font-size: x-small}
|
||||
.container{font-size:x-small;}
|
||||
h3{font-size:x-small;color:#003399;}
|
||||
.copyright{font-size: x-small}
|
||||
img{margin-top: 0.8em; display: block}
|
||||
.lastUpdated{font-family: Arial,Helvetica,sans-serif; font-size: x-small}
|
||||
.byline,.ft-story-body,.ft-story-header{font-family: Arial,Helvetica,sans-serif}
|
||||
"""
|
||||
|
||||
def get_artlinks(self, elem):
|
||||
articles = []
|
||||
count = 0
|
||||
for item in elem.findAll('a',href=True):
|
||||
count = count + 1
|
||||
if self.test and count > 2:
|
||||
return articles
|
||||
rawlink = item['href']
|
||||
url = rawlink
|
||||
if not rawlink.startswith('http://'):
|
||||
url = self.PREFIX + rawlink
|
||||
try:
|
||||
urlverified = self.browser.open_novisit(url).geturl() # resolve redirect.
|
||||
except:
|
||||
continue
|
||||
title = self.tag_to_string(item)
|
||||
date = strftime(self.timefmt)
|
||||
articles.append({
|
||||
'title' :title
|
||||
,'date' :date
|
||||
,'url' :urlverified
|
||||
,'description':''
|
||||
})
|
||||
return articles
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
dates= self.tag_to_string(soup.find('div', attrs={'class':'btm-links'}).find('div'))
|
||||
self.timefmt = ' [%s]'%dates
|
||||
wide = soup.find('div',attrs={'class':'wide'})
|
||||
if not wide:
|
||||
return feeds
|
||||
allsections = wide.findAll(attrs={'class':lambda x: x and 'footwell' in x.split()})
|
||||
if not allsections:
|
||||
return feeds
|
||||
count = 0
|
||||
for item in allsections:
|
||||
count = count + 1
|
||||
if self.test and count > 2:
|
||||
return feeds
|
||||
fitem = item.h3
|
||||
if not fitem:
|
||||
fitem = item.h4
|
||||
ftitle = self.tag_to_string(fitem)
|
||||
self.report_progress(0, _('Fetching feed')+' %s...'%(ftitle))
|
||||
feedarts = self.get_artlinks(item.ul)
|
||||
feeds.append((ftitle,feedarts))
|
||||
return feeds
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
items = ['promo-box','promo-title',
|
||||
'promo-headline','promo-image',
|
||||
'promo-intro','promo-link','subhead']
|
||||
for item in items:
|
||||
for it in soup.findAll(item):
|
||||
it.name = 'div'
|
||||
it.attrs = []
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
limg = item.find('img')
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
if limg:
|
||||
item.name = 'div'
|
||||
item.attrs = []
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
for item in soup.findAll('img'):
|
||||
if not item.has_key('alt'):
|
||||
item['alt'] = 'image'
|
||||
return soup
|
||||
|
||||
def get_cover_url(self):
|
||||
cdate = datetime.date.today()
|
||||
if cdate.isoweekday() == 7:
|
||||
cdate -= datetime.timedelta(days=1)
|
||||
return cdate.strftime('http://specials.ft.com/vtf_pdf/%d%m%y_FRONT1_USA.pdf')
|
||||
|
||||
def get_obfuscated_article(self, url):
|
||||
count = 0
|
||||
while (count < 10):
|
||||
try:
|
||||
response = self.browser.open(url)
|
||||
html = response.read()
|
||||
count = 10
|
||||
except:
|
||||
print "Retrying download..."
|
||||
count += 1
|
||||
tfile = PersistentTemporaryFile('_fa.html')
|
||||
tfile.write(html)
|
||||
tfile.close()
|
||||
self.temp_files.append(tfile)
|
||||
return tfile.name
|
||||
|
||||
def cleanup(self):
|
||||
self.browser.open('https://registration.ft.com/registration/login/logout?location=')
|
12
recipes/geopolityka.recipe
Normal file
12
recipes/geopolityka.recipe
Normal file
@ -0,0 +1,12 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class BasicUserRecipe1361379046(BasicNewsRecipe):
|
||||
title = u'Geopolityka.org'
|
||||
language = 'pl'
|
||||
__author__ = 'chemik111'
|
||||
oldest_article = 15
|
||||
max_articles_per_feed = 100
|
||||
auto_cleanup = True
|
||||
|
||||
feeds = [(u'Rss', u'http://geopolityka.org/index.php?format=feed&type=rss')]
|
||||
|
67
recipes/hnonline.recipe
Normal file
67
recipes/hnonline.recipe
Normal file
@ -0,0 +1,67 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class HNonlineRecipe(BasicNewsRecipe):
|
||||
__license__ = 'GPL v3'
|
||||
__author__ = 'lacike'
|
||||
language = 'sk'
|
||||
version = 1
|
||||
|
||||
title = u'HNonline'
|
||||
publisher = u'HNonline'
|
||||
category = u'News, Newspaper'
|
||||
description = u'News from Slovakia'
|
||||
cover_url = u'http://hnonline.sk/img/sk/_relaunch/logo2.png'
|
||||
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 100
|
||||
use_embedded_content = False
|
||||
remove_empty_feeds = True
|
||||
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
# Feeds from: http://rss.hnonline.sk, for listing see http://rss.hnonline.sk/prehlad
|
||||
feeds = []
|
||||
feeds.append((u'HNonline|Ekonomika a firmy', u'http://rss.hnonline.sk/?p=kC1000'))
|
||||
feeds.append((u'HNonline|Slovensko', u'http://rss.hnonline.sk/?p=kC2000'))
|
||||
feeds.append((u'HNonline|Svet', u'http://rss.hnonline.sk/?p=kC3000'))
|
||||
feeds.append((u'HNonline|\u0160port', u'http://rss.hnonline.sk/?p=kC4000'))
|
||||
feeds.append((u'HNonline|Online rozhovor', u'http://rss.hnonline.sk/?p=kCR000'))
|
||||
|
||||
feeds.append((u'FinWeb|Spr\u00E1vy zo sveta financi\u00ED', u'http://rss.finweb.hnonline.sk/spravodajstvo'))
|
||||
feeds.append((u'FinWeb|Koment\u00E1re a anal\u00FDzy', u'http://rss.finweb.hnonline.sk/?p=kPC200'))
|
||||
feeds.append((u'FinWeb|Invest\u00EDcie', u'http://rss.finweb.hnonline.sk/?p=kPC300'))
|
||||
feeds.append((u'FinWeb|Svet akci\u00ED', u'http://rss.finweb.hnonline.sk/?p=kPC400'))
|
||||
feeds.append((u'FinWeb|Rozhovory', u'http://rss.finweb.hnonline.sk/?p=kPC500'))
|
||||
feeds.append((u'FinWeb|T\u00E9ma t\u00FD\u017Ed\u0148a', u'http://rss.finweb.hnonline.sk/?p=kPC600'))
|
||||
feeds.append((u'FinWeb|Rebr\u00ED\u010Dky', u'http://rss.finweb.hnonline.sk/?p=kPC700'))
|
||||
|
||||
feeds.append((u'HNstyle|Kult\u00FAra', u'http://style.hnonline.sk/?p=kTC100'))
|
||||
feeds.append((u'HNstyle|Auto-moto', u'http://style.hnonline.sk/?p=kTC200'))
|
||||
feeds.append((u'HNstyle|Digit\u00E1l', u'http://style.hnonline.sk/?p=kTC300'))
|
||||
feeds.append((u'HNstyle|Veda', u'http://style.hnonline.sk/?p=kTCV00'))
|
||||
feeds.append((u'HNstyle|Dizajn', u'http://style.hnonline.sk/?p=kTC400'))
|
||||
feeds.append((u'HNstyle|Cestovanie', u'http://style.hnonline.sk/?p=kTCc00'))
|
||||
feeds.append((u'HNstyle|V\u00EDkend', u'http://style.hnonline.sk/?p=kTC800'))
|
||||
feeds.append((u'HNstyle|Gastro', u'http://style.hnonline.sk/?p=kTC600'))
|
||||
feeds.append((u'HNstyle|M\u00F3da', u'http://style.hnonline.sk/?p=kTC700'))
|
||||
feeds.append((u'HNstyle|Modern\u00E1 \u017Eena', u'http://style.hnonline.sk/?p=kTCA00'))
|
||||
feeds.append((u'HNstyle|Pre\u010Do nie?!', u'http://style.hnonline.sk/?p=k7C000'))
|
||||
|
||||
keep_only_tags = []
|
||||
keep_only_tags.append(dict(name = 'h1', attrs = {'class': 'detail-titulek'}))
|
||||
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'detail-podtitulek'}))
|
||||
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'detail-perex'}))
|
||||
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'detail-text'}))
|
||||
|
||||
remove_tags = []
|
||||
#remove_tags.append(dict(name = 'div', attrs = {'id': re.compile('smeplayer.*')}))
|
||||
|
||||
remove_tags_after = []
|
||||
#remove_tags_after = [dict(name = 'p', attrs = {'class': 'autor_line'})]
|
||||
|
||||
extra_css = '''
|
||||
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
|
||||
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/LiberationSans.ttf)}
|
||||
body {font-family: sans1, serif1;}
|
||||
'''
|
BIN
recipes/icons/financial_times_us.png
Normal file
BIN
recipes/icons/financial_times_us.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.4 KiB |
BIN
recipes/icons/hnonline.png
Normal file
BIN
recipes/icons/hnonline.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 7.5 KiB |
BIN
recipes/icons/nezavisne_novine.png
Normal file
BIN
recipes/icons/nezavisne_novine.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 454 B |
59
recipes/nezavisne_novine.recipe
Normal file
59
recipes/nezavisne_novine.recipe
Normal file
@ -0,0 +1,59 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
www.nezavisne.com
|
||||
'''
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class NezavisneNovine(BasicNewsRecipe):
|
||||
title = 'Nezavisne novine'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Nezavisne novine - Najnovije vijesti iz BiH, Srbije, Hrvatske, Crne Gore i svijeta'
|
||||
publisher = 'NIGP "DNN"'
|
||||
category = 'news, politics, Bosnia, Balcans'
|
||||
oldest_article = 2
|
||||
max_articles_per_feed = 200
|
||||
no_stylesheets = True
|
||||
encoding = 'utf8'
|
||||
use_embedded_content = False
|
||||
language = 'sr'
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'newspaper'
|
||||
cover_url = strftime('http://pdf.nezavisne.com/slika/novina/nezavisne_novine.jpg?v=%Y%m%d')
|
||||
masthead_url = 'http://www.nezavisne.com/slika/osnova/nezavisne-novine-logo.gif'
|
||||
extra_css = """
|
||||
body{font-family: Arial,Helvetica,sans-serif }
|
||||
img{margin-bottom: 0.4em; display:block}
|
||||
"""
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
keep_only_tags = [dict(name='div', attrs={'class':'vijest'})]
|
||||
remove_tags_after = dict(name='div', attrs={'id':'wrap'})
|
||||
remove_tags = [
|
||||
dict(name=['meta','link','iframe','object'])
|
||||
,dict(name='div', attrs={'id':'wrap'})
|
||||
]
|
||||
remove_attributes=['lang','xmlns:fb','xmlns:og']
|
||||
|
||||
|
||||
feeds = [
|
||||
(u'Novosti' , u'http://feeds.feedburner.com/Novosti-NezavisneNovine' )
|
||||
,(u'Posao' , u'http://feeds.feedburner.com/Posao-NezavisneNovine' )
|
||||
,(u'Sport' , u'http://feeds.feedburner.com/Sport-NezavisneNovine' )
|
||||
,(u'Komentar' , u'http://feeds.feedburner.com/Komentari-NezavisneNovine' )
|
||||
,(u'Umjetnost i zabava' , u'http://feeds.feedburner.com/UmjetnostIZabava-NezavisneNovine' )
|
||||
,(u'Život i stil' , u'http://feeds.feedburner.com/ZivotIStil-NezavisneNovine' )
|
||||
,(u'Auto' , u'http://feeds.feedburner.com/Auto-NezavisneNovine' )
|
||||
,(u'Nauka i tehnologija', u'http://feeds.feedburner.com/NaukaITehnologija-NezavisneNovine')
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
return soup
|
33
recipes/revista_cromos.recipe
Normal file
33
recipes/revista_cromos.recipe
Normal file
@ -0,0 +1,33 @@
|
||||
# coding=utf-8
|
||||
# https://github.com/iemejia/calibrecolombia
|
||||
|
||||
'''
|
||||
http://www.cromos.com.co/
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class ElMalpensante(BasicNewsRecipe):
|
||||
title = u'Revista Cromos'
|
||||
language = 'es_CO'
|
||||
__author__ = 'Ismael Mejia <iemejia@gmail.com>'
|
||||
cover_url = 'http://www.cromos.com.co/sites/cromos.com.co/themes/cromos_theme/images/logo_morado.gif'
|
||||
description = 'Revista Cromos'
|
||||
oldest_article = 7
|
||||
simultaneous_downloads = 20
|
||||
#tags = 'news, sport, blog'
|
||||
use_embedded_content = True
|
||||
remove_empty_feeds = True
|
||||
max_articles_per_feed = 100
|
||||
feeds = [(u'Cromos', u'http://www.cromos.com.co/rss.xml'),
|
||||
(u'Moda', u'http://www.cromos.com.co/moda/feed'),
|
||||
(u'Estilo de Vida', u'http://www.cromos.com.co/estilo-de-vida/feed'),
|
||||
(u'Cuidado Personal', u'http://www.cromos.com.co/estilo-de-vida/cuidado-personal/feed'),
|
||||
(u'Salud y Alimentación', u'http://www.cromos.com.co/estilo-de-vida/salud-y-alimentacion/feed'),
|
||||
(u'Personajes', u'http://www.cromos.com.co/personajes/feed'),
|
||||
(u'Actualidad', u'http://www.cromos.com.co/personajes/actualidad/feed'),
|
||||
(u'Espectáculo', u'http://www.cromos.com.co/personajes/espectaculo/feed'),
|
||||
(u'Reportajes', u'http://www.cromos.com.co/reportajes/feed'),
|
||||
(u'Eventos', u'http://www.cromos.com.co/eventos/feed'),
|
||||
(u'Modelos', u'http://www.cromos.com.co/modelos/feed'),
|
||||
]
|
@ -1,24 +1,38 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
sciencenews.org
|
||||
'''
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Sciencenews(BasicNewsRecipe):
|
||||
title = u'ScienceNews'
|
||||
__author__ = u'Darko Miletic and Sujata Raman'
|
||||
description = u"Science News is an award-winning weekly newsmagazine covering the most important research in all fields of science. Its 16 pages each week are packed with short, accurate articles that appeal to both general readers and scientists. Published since 1922, the magazine now reaches about 150,000 subscribers and more than 1 million readers. These are the latest News Items from Science News."
|
||||
class ScienceNewsIssue(BasicNewsRecipe):
|
||||
title = u'Science News Recent Issues'
|
||||
__author__ = u'Darko Miletic, Sujata Raman and Starson17'
|
||||
description = u'''Science News is an award-winning weekly
|
||||
newsmagazine covering the most important research in all fields of science.
|
||||
Its 16 pages each week are packed with short, accurate articles that appeal
|
||||
to both general readers and scientists. Published since 1922, the magazine
|
||||
now reaches about 150,000 subscribers and more than 1 million readers.
|
||||
These are the latest News Items from Science News. This recipe downloads
|
||||
the last 30 days worth of articles.'''
|
||||
category = u'Science, Technology, News'
|
||||
publisher = u'Society for Science & the Public'
|
||||
oldest_article = 30
|
||||
language = 'en'
|
||||
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
auto_cleanup = True
|
||||
timefmt = ' [%A, %d %B, %Y]'
|
||||
recursions = 1
|
||||
remove_attributes = ['style']
|
||||
|
||||
conversion_options = {'linearize_tables' : True
|
||||
, 'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
extra_css = '''
|
||||
.content_description{font-family:georgia ;font-size:x-large; color:#646464 ; font-weight:bold;}
|
||||
@ -27,36 +41,33 @@ class Sciencenews(BasicNewsRecipe):
|
||||
.content_edition{font-family:helvetica,arial ;font-size: xx-small ;}
|
||||
.exclusive{color:#FF0000 ;}
|
||||
.anonymous{color:#14487E ;}
|
||||
.content_content{font-family:helvetica,arial ;font-size: x-small ; color:#000000;}
|
||||
.description{color:#585858;font-family:helvetica,arial ;font-size: xx-small ;}
|
||||
.content_content{font-family:helvetica,arial ;font-size: medium ; color:#000000;}
|
||||
.description{color:#585858;font-family:helvetica,arial ;font-size: large ;}
|
||||
.credit{color:#A6A6A6;font-family:helvetica,arial ;font-size: xx-small ;}
|
||||
'''
|
||||
|
||||
#keep_only_tags = [ dict(name='div', attrs={'id':'column_action'}) ]
|
||||
#remove_tags_after = dict(name='ul', attrs={'id':'content_functions_bottom'})
|
||||
#remove_tags = [
|
||||
#dict(name='ul', attrs={'id':'content_functions_bottom'})
|
||||
#,dict(name='div', attrs={'id':['content_functions_top','breadcrumb_content']})
|
||||
#,dict(name='img', attrs={'class':'icon'})
|
||||
#,dict(name='div', attrs={'class': 'embiggen'})
|
||||
#]
|
||||
keep_only_tags = [ dict(name='div', attrs={'class':'content_content'}),
|
||||
dict(name='ul', attrs={'id':'toc'})
|
||||
]
|
||||
|
||||
feeds = [(u"Science News / News Items", u'http://sciencenews.org/index.php/feed/type/news/name/news.rss/view/feed/name/all.rss')]
|
||||
feeds = [(u"Science News Current Issues", u'http://www.sciencenews.org/view/feed/type/edition/name/issues.rss')]
|
||||
|
||||
match_regexps = [
|
||||
r'www.sciencenews.org/view/feature/id/',
|
||||
r'www.sciencenews.org/view/generic/id'
|
||||
]
|
||||
|
||||
def get_cover_url(self):
|
||||
cover_url = None
|
||||
index = 'http://www.sciencenews.org/view/home'
|
||||
soup = self.index_to_soup(index)
|
||||
link_item = soup.find(name = 'img',alt = "issue")
|
||||
print link_item
|
||||
if link_item:
|
||||
cover_url = 'http://www.sciencenews.org' + link_item['src'] + '.jpg'
|
||||
|
||||
return cover_url
|
||||
|
||||
#def preprocess_html(self, soup):
|
||||
|
||||
#for tag in soup.findAll(name=['span']):
|
||||
#tag.name = 'div'
|
||||
|
||||
#return soup
|
||||
def preprocess_html(self, soup):
|
||||
for tag in soup.findAll(name=['span']):
|
||||
tag.name = 'div'
|
||||
return soup
|
||||
|
21
recipes/unperiodico.recipe
Normal file
21
recipes/unperiodico.recipe
Normal file
@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# https://github.com/iemejia/calibrecolombia
|
||||
|
||||
'''
|
||||
http://www.unperiodico.unal.edu.co/
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class UNPeriodico(BasicNewsRecipe):
|
||||
title = u'UN Periodico'
|
||||
language = 'es_CO'
|
||||
__author__ = 'Ismael Mejia <iemejia@gmail.com>'
|
||||
cover_url = 'http://www.unperiodico.unal.edu.co/fileadmin/templates/periodico/img/logoperiodico.png'
|
||||
description = 'UN Periodico'
|
||||
oldest_article = 30
|
||||
max_articles_per_feed = 100
|
||||
publication_type = 'newspaper'
|
||||
feeds = [
|
||||
(u'UNPeriodico', u'http://www.unperiodico.unal.edu.co/rss/type/rss2/')
|
||||
]
|
@ -55,20 +55,14 @@ class WallStreetJournal(BasicNewsRecipe):
|
||||
]
|
||||
remove_tags_after = [dict(id="article_story_body"), {'class':"article story"},]
|
||||
|
||||
use_javascript_to_login = True
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser(self)
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open('http://commerce.wsj.com/auth/login')
|
||||
br.select_form(nr=1)
|
||||
br['user'] = self.username
|
||||
br['password'] = self.password
|
||||
res = br.submit()
|
||||
raw = res.read()
|
||||
if 'Welcome,' not in raw and '>Logout<' not in raw and '>Log Out<' not in raw:
|
||||
raise ValueError('Failed to log in to wsj.com, check your '
|
||||
'username and password')
|
||||
return br
|
||||
def javascript_login(self, br, username, password):
|
||||
br.visit('https://id.wsj.com/access/pages/wsj/us/login_standalone.html?mg=com-wsj', timeout=120)
|
||||
f = br.select_form(nr=0)
|
||||
f['username'] = username
|
||||
f['password'] = password
|
||||
br.submit(timeout=120)
|
||||
|
||||
def populate_article_metadata(self, article, soup, first):
|
||||
if first and hasattr(self, 'add_toc_thumbnail'):
|
||||
|
@ -88,7 +88,7 @@ class ZeitEPUBAbo(BasicNewsRecipe):
|
||||
(re.compile(u' \u00AB'), lambda match: u'\u00AB '), # before closing quotation
|
||||
(re.compile(u'\u00BB '), lambda match: u' \u00BB'), # after opening quotation
|
||||
# filtering for spaces in large numbers for better readability
|
||||
(re.compile(r'(?<=\d\d)(?=\d\d\d[ ,\.;\)<\?!-])'), lambda match: u'\u2008'), # end of the number with some character following
|
||||
(re.compile(r'(?<=\d\d)(?=\d\d\d[ ,;\)<\?!-])'), lambda match: u'\u2008'), # end of the number with some character following
|
||||
(re.compile(r'(?<=\d\d)(?=\d\d\d. )'), lambda match: u'\u2008'), # end of the number with full-stop following, then space is necessary (avoid file names)
|
||||
(re.compile(u'(?<=\d)(?=\d\d\d\u2008)'), lambda match: u'\u2008'), # next level
|
||||
(re.compile(u'(?<=\d)(?=\d\d\d\u2008)'), lambda match: u'\u2008'), # next level
|
||||
|
Binary file not shown.
@ -356,6 +356,10 @@ h2.library_name {
|
||||
color: red;
|
||||
}
|
||||
|
||||
#booklist a.summary_thumb img {
|
||||
border: none
|
||||
}
|
||||
|
||||
#booklist > #pagelist { display: none; }
|
||||
|
||||
#goto_page_dialog ul {
|
||||
@ -474,5 +478,14 @@ h2.library_name {
|
||||
color: red
|
||||
}
|
||||
|
||||
.details a.details_thumb img {
|
||||
border: none
|
||||
}
|
||||
|
||||
.details #random_button {
|
||||
display:block
|
||||
}
|
||||
|
||||
|
||||
/* }}} */
|
||||
|
||||
|
@ -324,9 +324,15 @@ function show_details(a_dom) {
|
||||
function book() {
|
||||
hidesort();
|
||||
$('.details .left img').load(function() {
|
||||
var rb = $('#random_button');
|
||||
rb.button();
|
||||
var img = $('.details .left img');
|
||||
var height = $('#main').height();
|
||||
height = Math.max(height, img.height() + 100);
|
||||
var bh = 0;
|
||||
if (rb.length > 0) {
|
||||
bh = rb.height();
|
||||
}
|
||||
height = Math.max(height, img.height() + bh + 100);
|
||||
$('#main').height(height);
|
||||
});
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
<div id="details_{id}" class="details">
|
||||
<div class="left">
|
||||
<img alt="Cover of {title}" src="{prefix}/get/cover/{id}" />
|
||||
<a href="{get_url}" title="Click to read {title} in the {fmt} format" class="details_thumb"><img alt="Cover of {title}" src="{prefix}/get/cover/{id}" /></a>
|
||||
{random}
|
||||
</div>
|
||||
<div class="right">
|
||||
<div class="field formats">{formats}</div>
|
||||
|
@ -1,6 +1,6 @@
|
||||
<div id="summary_{id}" class="summary">
|
||||
<div class="left">
|
||||
<img alt="Cover of {title}" src="{prefix}/get/thumb_90_120/{id}" />
|
||||
<a href="{get_url}" class="summary_thumb" title="Click to read {title} in the {fmt} format"><img alt="Cover of {title}" src="{prefix}/get/thumb_90_120/{id}" /></a>
|
||||
{get_button}
|
||||
</div>
|
||||
<div class="right">
|
||||
|
@ -517,3 +517,17 @@ default_tweak_format = None
|
||||
# your library and your personal editing style.
|
||||
preselect_first_completion = False
|
||||
|
||||
#: Recognize numbers inside text when sorting
|
||||
# This means that when sorting on text fields like title the text "Book 2"
|
||||
# will sort before the text "Book 100". If you want this behavior, set
|
||||
# numeric_collation = True note that doing so will cause problems with text
|
||||
# that starts with numbers and is a little slower.
|
||||
numeric_collation = False
|
||||
|
||||
#: Sort the list of libraries alphabetically
|
||||
# The list of libraries in the Copy to Library and Quick Switch menus are
|
||||
# normally sorted by most used. However, if there are more than a certain
|
||||
# number of such libraries, the sorting becomes alphabetic. You can set that
|
||||
# number here. The default is ten libraries.
|
||||
many_libraries = 10
|
||||
|
||||
|
@ -38,7 +38,7 @@ binary_includes = [
|
||||
'/lib/libz.so.1',
|
||||
'/usr/lib/libtiff.so.5',
|
||||
'/lib/libbz2.so.1',
|
||||
'/usr/lib/libpoppler.so.27',
|
||||
'/usr/lib/libpoppler.so.28',
|
||||
'/usr/lib/libxml2.so.2',
|
||||
'/usr/lib/libopenjpeg.so.2',
|
||||
'/usr/lib/libxslt.so.1',
|
||||
|
@ -12,14 +12,14 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2013-01-19 20:28+0000\n"
|
||||
"PO-Revision-Date: 2013-02-19 18:01+0000\n"
|
||||
"Last-Translator: Ferran Rius <frius64@hotmail.com>\n"
|
||||
"Language-Team: Catalan <linux@softcatala.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2013-01-20 04:36+0000\n"
|
||||
"X-Generator: Launchpad (build 16430)\n"
|
||||
"X-Launchpad-Export-Date: 2013-02-20 04:50+0000\n"
|
||||
"X-Generator: Launchpad (build 16491)\n"
|
||||
"Language: ca\n"
|
||||
|
||||
#. name for aaa
|
||||
@ -1920,7 +1920,7 @@ msgstr "Arára; Mato Grosso"
|
||||
|
||||
#. name for axk
|
||||
msgid "Yaka (Central African Republic)"
|
||||
msgstr "Yaka (República Centreafricana)"
|
||||
msgstr "Yaka (República Centrafricana)"
|
||||
|
||||
#. name for axm
|
||||
msgid "Armenian; Middle"
|
||||
@ -3528,7 +3528,7 @@ msgstr "Buamu"
|
||||
|
||||
#. name for boy
|
||||
msgid "Bodo (Central African Republic)"
|
||||
msgstr "Bodo (República Centreafricana)"
|
||||
msgstr "Bodo (República Centrafricana)"
|
||||
|
||||
#. name for boz
|
||||
msgid "Bozo; Tiéyaxo"
|
||||
@ -7928,7 +7928,7 @@ msgstr "Oromo; occidental"
|
||||
|
||||
#. name for gba
|
||||
msgid "Gbaya (Central African Republic)"
|
||||
msgstr "Gbaya (República Centreafricana)"
|
||||
msgstr "Gbaya (República Centrafricana)"
|
||||
|
||||
#. name for gbb
|
||||
msgid "Kaytetye"
|
||||
@ -11184,7 +11184,7 @@ msgstr ""
|
||||
|
||||
#. name for kbn
|
||||
msgid "Kare (Central African Republic)"
|
||||
msgstr "Kare (República Centreafricana)"
|
||||
msgstr "Kare (República Centrafricana)"
|
||||
|
||||
#. name for kbo
|
||||
msgid "Keliko"
|
||||
@ -20720,7 +20720,7 @@ msgstr "Pitjantjatjara"
|
||||
|
||||
#. name for pka
|
||||
msgid "Prākrit; Ardhamāgadhī"
|
||||
msgstr ""
|
||||
msgstr "Pràcrit; Ardhamagadhi"
|
||||
|
||||
#. name for pkb
|
||||
msgid "Pokomo"
|
||||
@ -20776,31 +20776,31 @@ msgstr "Polonombauk"
|
||||
|
||||
#. name for plc
|
||||
msgid "Palawano; Central"
|
||||
msgstr ""
|
||||
msgstr "Palawà; Central"
|
||||
|
||||
#. name for pld
|
||||
msgid "Polari"
|
||||
msgstr ""
|
||||
msgstr "Polari"
|
||||
|
||||
#. name for ple
|
||||
msgid "Palu'e"
|
||||
msgstr ""
|
||||
msgstr "Palue"
|
||||
|
||||
#. name for plg
|
||||
msgid "Pilagá"
|
||||
msgstr ""
|
||||
msgstr "Pilagà"
|
||||
|
||||
#. name for plh
|
||||
msgid "Paulohi"
|
||||
msgstr ""
|
||||
msgstr "Paulohi"
|
||||
|
||||
#. name for pli
|
||||
msgid "Pali"
|
||||
msgstr ""
|
||||
msgstr "Pali"
|
||||
|
||||
#. name for plj
|
||||
msgid "Polci"
|
||||
msgstr ""
|
||||
msgstr "Polci"
|
||||
|
||||
#. name for plk
|
||||
msgid "Shina; Kohistani"
|
||||
@ -20812,19 +20812,19 @@ msgstr "Palaung; Shwe"
|
||||
|
||||
#. name for pln
|
||||
msgid "Palenquero"
|
||||
msgstr ""
|
||||
msgstr "Palenquero"
|
||||
|
||||
#. name for plo
|
||||
msgid "Popoluca; Oluta"
|
||||
msgstr ""
|
||||
msgstr "Popoluca; Oluta"
|
||||
|
||||
#. name for plp
|
||||
msgid "Palpa"
|
||||
msgstr ""
|
||||
msgstr "Palpa"
|
||||
|
||||
#. name for plq
|
||||
msgid "Palaic"
|
||||
msgstr ""
|
||||
msgstr "Palaic"
|
||||
|
||||
#. name for plr
|
||||
msgid "Senoufo; Palaka"
|
||||
@ -20840,15 +20840,15 @@ msgstr "Malgaix; Plateau"
|
||||
|
||||
#. name for plu
|
||||
msgid "Palikúr"
|
||||
msgstr ""
|
||||
msgstr "Palikur"
|
||||
|
||||
#. name for plv
|
||||
msgid "Palawano; Southwest"
|
||||
msgstr ""
|
||||
msgstr "Palawà; Sudoccidental"
|
||||
|
||||
#. name for plw
|
||||
msgid "Palawano; Brooke's Point"
|
||||
msgstr ""
|
||||
msgstr "Palawà; Brooke"
|
||||
|
||||
#. name for ply
|
||||
msgid "Bolyu"
|
||||
@ -20856,43 +20856,43 @@ msgstr ""
|
||||
|
||||
#. name for plz
|
||||
msgid "Paluan"
|
||||
msgstr ""
|
||||
msgstr "Paluà"
|
||||
|
||||
#. name for pma
|
||||
msgid "Paama"
|
||||
msgstr ""
|
||||
msgstr "Paama"
|
||||
|
||||
#. name for pmb
|
||||
msgid "Pambia"
|
||||
msgstr ""
|
||||
msgstr "Pambia"
|
||||
|
||||
#. name for pmc
|
||||
msgid "Palumata"
|
||||
msgstr ""
|
||||
msgstr "Palumata"
|
||||
|
||||
#. name for pme
|
||||
msgid "Pwaamei"
|
||||
msgstr ""
|
||||
msgstr "Pwaamei"
|
||||
|
||||
#. name for pmf
|
||||
msgid "Pamona"
|
||||
msgstr ""
|
||||
msgstr "Pamona"
|
||||
|
||||
#. name for pmh
|
||||
msgid "Prākrit; Māhārāṣṭri"
|
||||
msgstr ""
|
||||
msgstr "Pràcrit; Maharastri"
|
||||
|
||||
#. name for pmi
|
||||
msgid "Pumi; Northern"
|
||||
msgstr ""
|
||||
msgstr "Pumi; Septentrional"
|
||||
|
||||
#. name for pmj
|
||||
msgid "Pumi; Southern"
|
||||
msgstr ""
|
||||
msgstr "Pumi; Meridional"
|
||||
|
||||
#. name for pmk
|
||||
msgid "Pamlico"
|
||||
msgstr ""
|
||||
msgstr "Algonquí Carolina"
|
||||
|
||||
#. name for pml
|
||||
msgid "Lingua Franca"
|
||||
@ -20904,11 +20904,11 @@ msgstr "Pol"
|
||||
|
||||
#. name for pmn
|
||||
msgid "Pam"
|
||||
msgstr ""
|
||||
msgstr "Pam"
|
||||
|
||||
#. name for pmo
|
||||
msgid "Pom"
|
||||
msgstr ""
|
||||
msgstr "Pom"
|
||||
|
||||
#. name for pmq
|
||||
msgid "Pame; Northern"
|
||||
@ -20916,11 +20916,11 @@ msgstr "Pame; Septentrional"
|
||||
|
||||
#. name for pmr
|
||||
msgid "Paynamar"
|
||||
msgstr ""
|
||||
msgstr "Paynamar"
|
||||
|
||||
#. name for pms
|
||||
msgid "Piemontese"
|
||||
msgstr ""
|
||||
msgstr "Piemontès"
|
||||
|
||||
#. name for pmt
|
||||
msgid "Tuamotuan"
|
||||
@ -20956,7 +20956,7 @@ msgstr "Panjabi; Occidental"
|
||||
|
||||
#. name for pnc
|
||||
msgid "Pannei"
|
||||
msgstr ""
|
||||
msgstr "Pannei"
|
||||
|
||||
#. name for pne
|
||||
msgid "Penan; Western"
|
||||
@ -20964,11 +20964,11 @@ msgstr "Penan; Occidental"
|
||||
|
||||
#. name for png
|
||||
msgid "Pongu"
|
||||
msgstr ""
|
||||
msgstr "Pongu"
|
||||
|
||||
#. name for pnh
|
||||
msgid "Penrhyn"
|
||||
msgstr ""
|
||||
msgstr "Penrhyn"
|
||||
|
||||
#. name for pni
|
||||
msgid "Aoheng"
|
||||
@ -20976,27 +20976,27 @@ msgstr ""
|
||||
|
||||
#. name for pnm
|
||||
msgid "Punan Batu 1"
|
||||
msgstr ""
|
||||
msgstr "Punan Batu"
|
||||
|
||||
#. name for pnn
|
||||
msgid "Pinai-Hagahai"
|
||||
msgstr ""
|
||||
msgstr "Pinai-Hagahai"
|
||||
|
||||
#. name for pno
|
||||
msgid "Panobo"
|
||||
msgstr ""
|
||||
msgstr "Panobo"
|
||||
|
||||
#. name for pnp
|
||||
msgid "Pancana"
|
||||
msgstr ""
|
||||
msgstr "Pancana"
|
||||
|
||||
#. name for pnq
|
||||
msgid "Pana (Burkina Faso)"
|
||||
msgstr ""
|
||||
msgstr "Pana (Burkina Faso)"
|
||||
|
||||
#. name for pnr
|
||||
msgid "Panim"
|
||||
msgstr ""
|
||||
msgstr "Panim"
|
||||
|
||||
#. name for pns
|
||||
msgid "Ponosakan"
|
||||
@ -21028,7 +21028,7 @@ msgstr ""
|
||||
|
||||
#. name for pnz
|
||||
msgid "Pana (Central African Republic)"
|
||||
msgstr ""
|
||||
msgstr "Pana (República Centrafricana)"
|
||||
|
||||
#. name for poc
|
||||
msgid "Poqomam"
|
||||
@ -21056,7 +21056,7 @@ msgstr ""
|
||||
|
||||
#. name for poi
|
||||
msgid "Popoluca; Highland"
|
||||
msgstr ""
|
||||
msgstr "Popoluca; Muntanya"
|
||||
|
||||
#. name for pok
|
||||
msgid "Pokangá"
|
||||
@ -21084,7 +21084,7 @@ msgstr ""
|
||||
|
||||
#. name for poq
|
||||
msgid "Popoluca; Texistepec"
|
||||
msgstr ""
|
||||
msgstr "Popoluca; Texistepec"
|
||||
|
||||
#. name for por
|
||||
msgid "Portuguese"
|
||||
@ -21092,7 +21092,7 @@ msgstr "Portuguès"
|
||||
|
||||
#. name for pos
|
||||
msgid "Popoluca; Sayula"
|
||||
msgstr ""
|
||||
msgstr "Popoluca; Sayula"
|
||||
|
||||
#. name for pot
|
||||
msgid "Potawatomi"
|
||||
@ -21336,7 +21336,7 @@ msgstr "Paixtú; Central"
|
||||
|
||||
#. name for psu
|
||||
msgid "Prākrit; Sauraseni"
|
||||
msgstr ""
|
||||
msgstr "Pràcrit; Sauraseni"
|
||||
|
||||
#. name for psw
|
||||
msgid "Port Sandwich"
|
||||
|
@ -9,14 +9,14 @@ msgstr ""
|
||||
"Project-Id-Version: calibre\n"
|
||||
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2013-01-12 08:34+0000\n"
|
||||
"Last-Translator: Jellby <Unknown>\n"
|
||||
"PO-Revision-Date: 2013-02-26 12:21+0000\n"
|
||||
"Last-Translator: Miguel Angel del Olmo <silinio45@gmail.com>\n"
|
||||
"Language-Team: Español; Castellano <>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2013-01-13 04:37+0000\n"
|
||||
"X-Generator: Launchpad (build 16420)\n"
|
||||
"X-Launchpad-Export-Date: 2013-02-27 04:37+0000\n"
|
||||
"X-Generator: Launchpad (build 16506)\n"
|
||||
|
||||
#. name for aaa
|
||||
msgid "Ghotuo"
|
||||
@ -9708,7 +9708,7 @@ msgstr ""
|
||||
|
||||
#. name for hto
|
||||
msgid "Huitoto; Minica"
|
||||
msgstr ""
|
||||
msgstr "Huitoto; Meneca"
|
||||
|
||||
#. name for hts
|
||||
msgid "Hadza"
|
||||
@ -9736,7 +9736,7 @@ msgstr ""
|
||||
|
||||
#. name for hue
|
||||
msgid "Huave; San Francisco Del Mar"
|
||||
msgstr ""
|
||||
msgstr "Huave; San Francisco Del Mar"
|
||||
|
||||
#. name for huf
|
||||
msgid "Humene"
|
||||
@ -9792,7 +9792,7 @@ msgstr ""
|
||||
|
||||
#. name for hus
|
||||
msgid "Huastec"
|
||||
msgstr ""
|
||||
msgstr "Huasteco"
|
||||
|
||||
#. name for hut
|
||||
msgid "Humla"
|
||||
@ -9800,11 +9800,11 @@ msgstr ""
|
||||
|
||||
#. name for huu
|
||||
msgid "Huitoto; Murui"
|
||||
msgstr ""
|
||||
msgstr "Huitoto; Murui"
|
||||
|
||||
#. name for huv
|
||||
msgid "Huave; San Mateo Del Mar"
|
||||
msgstr ""
|
||||
msgstr "Huave; San Mateo Del Mar"
|
||||
|
||||
#. name for huw
|
||||
msgid "Hukumina"
|
||||
@ -9812,7 +9812,7 @@ msgstr ""
|
||||
|
||||
#. name for hux
|
||||
msgid "Huitoto; Nüpode"
|
||||
msgstr ""
|
||||
msgstr "Huitoto; Nipode"
|
||||
|
||||
#. name for huy
|
||||
msgid "Hulaulá"
|
||||
@ -9828,7 +9828,7 @@ msgstr ""
|
||||
|
||||
#. name for hve
|
||||
msgid "Huave; San Dionisio Del Mar"
|
||||
msgstr ""
|
||||
msgstr "Huave; San Dionisio Del Mar"
|
||||
|
||||
#. name for hvk
|
||||
msgid "Haveke"
|
||||
@ -9840,7 +9840,7 @@ msgstr ""
|
||||
|
||||
#. name for hvv
|
||||
msgid "Huave; Santa María Del Mar"
|
||||
msgstr ""
|
||||
msgstr "Huave; Santa María Del Mar"
|
||||
|
||||
#. name for hwa
|
||||
msgid "Wané"
|
||||
@ -9884,7 +9884,7 @@ msgstr "Iban"
|
||||
|
||||
#. name for ibb
|
||||
msgid "Ibibio"
|
||||
msgstr ""
|
||||
msgstr "Ibibio"
|
||||
|
||||
#. name for ibd
|
||||
msgid "Iwaidja"
|
||||
@ -9964,7 +9964,7 @@ msgstr ""
|
||||
|
||||
#. name for ide
|
||||
msgid "Idere"
|
||||
msgstr ""
|
||||
msgstr "Idere"
|
||||
|
||||
#. name for idi
|
||||
msgid "Idi"
|
||||
@ -9976,7 +9976,7 @@ msgstr "Ido"
|
||||
|
||||
#. name for idr
|
||||
msgid "Indri"
|
||||
msgstr ""
|
||||
msgstr "Indri"
|
||||
|
||||
#. name for ids
|
||||
msgid "Idesa"
|
||||
@ -9988,7 +9988,7 @@ msgstr ""
|
||||
|
||||
#. name for idu
|
||||
msgid "Idoma"
|
||||
msgstr ""
|
||||
msgstr "Idoma"
|
||||
|
||||
#. name for ifa
|
||||
msgid "Ifugao; Amganad"
|
||||
@ -9996,7 +9996,7 @@ msgstr ""
|
||||
|
||||
#. name for ifb
|
||||
msgid "Ifugao; Batad"
|
||||
msgstr ""
|
||||
msgstr "Ifugao; Batad"
|
||||
|
||||
#. name for ife
|
||||
msgid "Ifè"
|
||||
@ -10004,7 +10004,7 @@ msgstr ""
|
||||
|
||||
#. name for iff
|
||||
msgid "Ifo"
|
||||
msgstr ""
|
||||
msgstr "Ifo"
|
||||
|
||||
#. name for ifk
|
||||
msgid "Ifugao; Tuwali"
|
||||
@ -10064,7 +10064,7 @@ msgstr ""
|
||||
|
||||
#. name for ihi
|
||||
msgid "Ihievbe"
|
||||
msgstr ""
|
||||
msgstr "Ihievbe"
|
||||
|
||||
#. name for ihp
|
||||
msgid "Iha"
|
||||
@ -10288,15 +10288,15 @@ msgstr ""
|
||||
|
||||
#. name for iou
|
||||
msgid "Tuma-Irumu"
|
||||
msgstr ""
|
||||
msgstr "Tuma-Irumu"
|
||||
|
||||
#. name for iow
|
||||
msgid "Iowa-Oto"
|
||||
msgstr ""
|
||||
msgstr "Iowa-Oto"
|
||||
|
||||
#. name for ipi
|
||||
msgid "Ipili"
|
||||
msgstr ""
|
||||
msgstr "Ipili"
|
||||
|
||||
#. name for ipk
|
||||
msgid "Inupiaq"
|
||||
@ -10304,7 +10304,7 @@ msgstr "Iñupiaq"
|
||||
|
||||
#. name for ipo
|
||||
msgid "Ipiko"
|
||||
msgstr ""
|
||||
msgstr "Ipiko"
|
||||
|
||||
#. name for iqu
|
||||
msgid "Iquito"
|
||||
@ -30768,7 +30768,7 @@ msgstr ""
|
||||
|
||||
#. name for zts
|
||||
msgid "Zapotec; Tilquiapan"
|
||||
msgstr ""
|
||||
msgstr "Zapoteco; Tilquiapan"
|
||||
|
||||
#. name for ztt
|
||||
msgid "Zapotec; Tejalapan"
|
||||
|
@ -13,14 +13,14 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2012-04-04 19:53+0000\n"
|
||||
"Last-Translator: Antoni Kudelski <antekk@linux.pl>\n"
|
||||
"PO-Revision-Date: 2013-02-23 12:04+0000\n"
|
||||
"Last-Translator: Marcin Ostajewski (panszpik) <Unknown>\n"
|
||||
"Language-Team: Polish <translation-team-pl@lists.sourceforge.net>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2012-04-05 04:43+0000\n"
|
||||
"X-Generator: Launchpad (build 15060)\n"
|
||||
"X-Launchpad-Export-Date: 2013-02-24 04:41+0000\n"
|
||||
"X-Generator: Launchpad (build 16506)\n"
|
||||
"Language: pl\n"
|
||||
|
||||
#. name for aaa
|
||||
@ -857,11 +857,11 @@ msgstr "Akurio"
|
||||
|
||||
#. name for akp
|
||||
msgid "Siwu"
|
||||
msgstr ""
|
||||
msgstr "Siwu"
|
||||
|
||||
#. name for akq
|
||||
msgid "Ak"
|
||||
msgstr ""
|
||||
msgstr "Ak"
|
||||
|
||||
#. name for akr
|
||||
msgid "Araki"
|
||||
@ -973,7 +973,7 @@ msgstr "ałtajski południowy"
|
||||
|
||||
#. name for alu
|
||||
msgid "'Are'are"
|
||||
msgstr ""
|
||||
msgstr "'Are'are"
|
||||
|
||||
#. name for alw
|
||||
msgid "Alaba-K’abeena"
|
||||
@ -1037,7 +1037,7 @@ msgstr "War-Jaintia"
|
||||
|
||||
#. name for amm
|
||||
msgid "Ama (Papua New Guinea)"
|
||||
msgstr ""
|
||||
msgstr "Ama (Papua New Guinea)"
|
||||
|
||||
#. name for amn
|
||||
msgid "Amanab"
|
||||
@ -1061,7 +1061,7 @@ msgstr "Amarakaeri"
|
||||
|
||||
#. name for ams
|
||||
msgid "Amami-Oshima; Southern"
|
||||
msgstr ""
|
||||
msgstr "Południowy amami-oshima"
|
||||
|
||||
#. name for amt
|
||||
msgid "Amto"
|
||||
@ -1069,7 +1069,7 @@ msgstr "Amto"
|
||||
|
||||
#. name for amu
|
||||
msgid "Amuzgo; Guerrero"
|
||||
msgstr ""
|
||||
msgstr "Amuzgo; Guerrero"
|
||||
|
||||
#. name for amv
|
||||
msgid "Ambelau"
|
||||
@ -1249,7 +1249,7 @@ msgstr "Ömie"
|
||||
|
||||
#. name for aon
|
||||
msgid "Arapesh; Bumbita"
|
||||
msgstr ""
|
||||
msgstr "Arapesh; Bumbita"
|
||||
|
||||
#. name for aor
|
||||
msgid "Aore"
|
||||
@ -1289,7 +1289,7 @@ msgstr "Bukiyip"
|
||||
|
||||
#. name for apf
|
||||
msgid "Agta; Pahanan"
|
||||
msgstr ""
|
||||
msgstr "Agta; Pahanan"
|
||||
|
||||
#. name for apg
|
||||
msgid "Ampanang"
|
||||
@ -1305,19 +1305,19 @@ msgstr "Apiaká"
|
||||
|
||||
#. name for apj
|
||||
msgid "Apache; Jicarilla"
|
||||
msgstr ""
|
||||
msgstr "Apache; Jicarilla"
|
||||
|
||||
#. name for apk
|
||||
msgid "Apache; Kiowa"
|
||||
msgstr ""
|
||||
msgstr "Apache; Kiowa"
|
||||
|
||||
#. name for apl
|
||||
msgid "Apache; Lipan"
|
||||
msgstr ""
|
||||
msgstr "Apache; Lipan"
|
||||
|
||||
#. name for apm
|
||||
msgid "Apache; Mescalero-Chiricahua"
|
||||
msgstr ""
|
||||
msgstr "Apache; Mescalero-Chiricahua"
|
||||
|
||||
#. name for apn
|
||||
msgid "Apinayé"
|
||||
@ -1337,11 +1337,11 @@ msgstr "a-pucikwar"
|
||||
|
||||
#. name for apr
|
||||
msgid "Arop-Lokep"
|
||||
msgstr ""
|
||||
msgstr "Arop-Lokep"
|
||||
|
||||
#. name for aps
|
||||
msgid "Arop-Sissano"
|
||||
msgstr ""
|
||||
msgstr "Arop-Sissano"
|
||||
|
||||
#. name for apt
|
||||
msgid "Apatani"
|
||||
@ -1357,7 +1357,7 @@ msgstr "Alapmunte"
|
||||
|
||||
#. name for apw
|
||||
msgid "Apache; Western"
|
||||
msgstr ""
|
||||
msgstr "Zachodni apache"
|
||||
|
||||
#. name for apx
|
||||
msgid "Aputai"
|
||||
@ -1389,7 +1389,7 @@ msgstr "Atohwaim"
|
||||
|
||||
#. name for aqn
|
||||
msgid "Alta; Northern"
|
||||
msgstr ""
|
||||
msgstr "Północny alta"
|
||||
|
||||
#. name for aqp
|
||||
msgid "Atakapa"
|
||||
@ -1409,7 +1409,7 @@ msgstr "arabski"
|
||||
|
||||
#. name for arb
|
||||
msgid "Arabic; Standard"
|
||||
msgstr ""
|
||||
msgstr "Standardowy arabski"
|
||||
|
||||
#. name for arc
|
||||
msgid "Aramaic; Official (700-300 BCE)"
|
||||
@ -1465,15 +1465,15 @@ msgstr "arabski algierski"
|
||||
|
||||
#. name for arr
|
||||
msgid "Karo (Brazil)"
|
||||
msgstr ""
|
||||
msgstr "Karo (Brazylia)"
|
||||
|
||||
#. name for ars
|
||||
msgid "Arabic; Najdi"
|
||||
msgstr ""
|
||||
msgstr "Arabski Najdi"
|
||||
|
||||
#. name for aru
|
||||
msgid "Aruá (Amazonas State)"
|
||||
msgstr ""
|
||||
msgstr "Aruá (stan Amazonas)"
|
||||
|
||||
#. name for arv
|
||||
msgid "Arbore"
|
||||
@ -1485,7 +1485,7 @@ msgstr "arawak"
|
||||
|
||||
#. name for arx
|
||||
msgid "Aruá (Rodonia State)"
|
||||
msgstr ""
|
||||
msgstr "Aruá (stan Rodonia)"
|
||||
|
||||
#. name for ary
|
||||
msgid "Arabic; Moroccan"
|
||||
@ -1529,11 +1529,11 @@ msgstr "Abishira"
|
||||
|
||||
#. name for asi
|
||||
msgid "Buruwai"
|
||||
msgstr ""
|
||||
msgstr "Buruwai"
|
||||
|
||||
#. name for asj
|
||||
msgid "Nsari"
|
||||
msgstr ""
|
||||
msgstr "Nsari"
|
||||
|
||||
#. name for ask
|
||||
msgid "Ashkun"
|
||||
@ -1541,7 +1541,7 @@ msgstr "aszkun"
|
||||
|
||||
#. name for asl
|
||||
msgid "Asilulu"
|
||||
msgstr ""
|
||||
msgstr "Asilulu"
|
||||
|
||||
#. name for asm
|
||||
msgid "Assamese"
|
||||
@ -1549,11 +1549,11 @@ msgstr "asamski"
|
||||
|
||||
#. name for asn
|
||||
msgid "Asuriní; Xingú"
|
||||
msgstr ""
|
||||
msgstr "Asuriní; Xingú"
|
||||
|
||||
#. name for aso
|
||||
msgid "Dano"
|
||||
msgstr ""
|
||||
msgstr "Dano"
|
||||
|
||||
#. name for asp
|
||||
msgid "Algerian Sign Language"
|
||||
@ -1565,11 +1565,11 @@ msgstr "austriacki język migowy"
|
||||
|
||||
#. name for asr
|
||||
msgid "Asuri"
|
||||
msgstr ""
|
||||
msgstr "Asuri"
|
||||
|
||||
#. name for ass
|
||||
msgid "Ipulo"
|
||||
msgstr ""
|
||||
msgstr "Ipulo"
|
||||
|
||||
#. name for ast
|
||||
msgid "Asturian"
|
||||
@ -1577,11 +1577,11 @@ msgstr "asturyjski"
|
||||
|
||||
#. name for asu
|
||||
msgid "Asurini; Tocantins"
|
||||
msgstr ""
|
||||
msgstr "Asurini; Tocantins"
|
||||
|
||||
#. name for asv
|
||||
msgid "Asoa"
|
||||
msgstr ""
|
||||
msgstr "Asoa"
|
||||
|
||||
#. name for asw
|
||||
msgid "Australian Aborigines Sign Language"
|
||||
@ -1589,43 +1589,43 @@ msgstr "język migowy Aborygenów australijskich"
|
||||
|
||||
#. name for asx
|
||||
msgid "Muratayak"
|
||||
msgstr ""
|
||||
msgstr "Muratayak"
|
||||
|
||||
#. name for asy
|
||||
msgid "Asmat; Yaosakor"
|
||||
msgstr ""
|
||||
msgstr "Asmat; Yaosakor"
|
||||
|
||||
#. name for asz
|
||||
msgid "As"
|
||||
msgstr ""
|
||||
msgstr "As"
|
||||
|
||||
#. name for ata
|
||||
msgid "Pele-Ata"
|
||||
msgstr ""
|
||||
msgstr "Pele-Ata"
|
||||
|
||||
#. name for atb
|
||||
msgid "Zaiwa"
|
||||
msgstr ""
|
||||
msgstr "Zaiwa"
|
||||
|
||||
#. name for atc
|
||||
msgid "Atsahuaca"
|
||||
msgstr ""
|
||||
msgstr "Atsahuaca"
|
||||
|
||||
#. name for atd
|
||||
msgid "Manobo; Ata"
|
||||
msgstr ""
|
||||
msgstr "Manobo; Ata"
|
||||
|
||||
#. name for ate
|
||||
msgid "Atemble"
|
||||
msgstr ""
|
||||
msgstr "Atemble"
|
||||
|
||||
#. name for atg
|
||||
msgid "Ivbie North-Okpela-Arhe"
|
||||
msgstr ""
|
||||
msgstr "Ivbie North-Okpela-Arhe"
|
||||
|
||||
#. name for ati
|
||||
msgid "Attié"
|
||||
msgstr ""
|
||||
msgstr "Attié"
|
||||
|
||||
#. name for atj
|
||||
msgid "Atikamekw"
|
||||
@ -1633,111 +1633,111 @@ msgstr "atikamekw"
|
||||
|
||||
#. name for atk
|
||||
msgid "Ati"
|
||||
msgstr ""
|
||||
msgstr "Ati"
|
||||
|
||||
#. name for atl
|
||||
msgid "Agta; Mt. Iraya"
|
||||
msgstr ""
|
||||
msgstr "Agta; Mt. Iraya"
|
||||
|
||||
#. name for atm
|
||||
msgid "Ata"
|
||||
msgstr ""
|
||||
msgstr "Ata"
|
||||
|
||||
#. name for atn
|
||||
msgid "Ashtiani"
|
||||
msgstr ""
|
||||
msgstr "Ashtiani"
|
||||
|
||||
#. name for ato
|
||||
msgid "Atong"
|
||||
msgstr ""
|
||||
msgstr "Atong"
|
||||
|
||||
#. name for atp
|
||||
msgid "Atta; Pudtol"
|
||||
msgstr ""
|
||||
msgstr "Atta; Pudtol"
|
||||
|
||||
#. name for atq
|
||||
msgid "Aralle-Tabulahan"
|
||||
msgstr ""
|
||||
msgstr "Aralle-Tabulahan"
|
||||
|
||||
#. name for atr
|
||||
msgid "Waimiri-Atroari"
|
||||
msgstr ""
|
||||
msgstr "Waimiri-Atroari"
|
||||
|
||||
#. name for ats
|
||||
msgid "Gros Ventre"
|
||||
msgstr ""
|
||||
msgstr "Gros Ventre"
|
||||
|
||||
#. name for att
|
||||
msgid "Atta; Pamplona"
|
||||
msgstr ""
|
||||
msgstr "Atta; Pamplona"
|
||||
|
||||
#. name for atu
|
||||
msgid "Reel"
|
||||
msgstr ""
|
||||
msgstr "Reel"
|
||||
|
||||
#. name for atv
|
||||
msgid "Altai; Northern"
|
||||
msgstr ""
|
||||
msgstr "Altai; Northern"
|
||||
|
||||
#. name for atw
|
||||
msgid "Atsugewi"
|
||||
msgstr ""
|
||||
msgstr "Atsugewi"
|
||||
|
||||
#. name for atx
|
||||
msgid "Arutani"
|
||||
msgstr ""
|
||||
msgstr "Arutani"
|
||||
|
||||
#. name for aty
|
||||
msgid "Aneityum"
|
||||
msgstr ""
|
||||
msgstr "Aneityum"
|
||||
|
||||
#. name for atz
|
||||
msgid "Arta"
|
||||
msgstr ""
|
||||
msgstr "Arta"
|
||||
|
||||
#. name for aua
|
||||
msgid "Asumboa"
|
||||
msgstr ""
|
||||
msgstr "Asumboa"
|
||||
|
||||
#. name for aub
|
||||
msgid "Alugu"
|
||||
msgstr ""
|
||||
msgstr "Alugu"
|
||||
|
||||
#. name for auc
|
||||
msgid "Waorani"
|
||||
msgstr ""
|
||||
msgstr "Waorani"
|
||||
|
||||
#. name for aud
|
||||
msgid "Anuta"
|
||||
msgstr ""
|
||||
msgstr "Anuta"
|
||||
|
||||
#. name for aue
|
||||
msgid "=/Kx'au//'ein"
|
||||
msgstr ""
|
||||
msgstr "=/Kx'au//'ein"
|
||||
|
||||
#. name for aug
|
||||
msgid "Aguna"
|
||||
msgstr ""
|
||||
msgstr "Aguna"
|
||||
|
||||
#. name for auh
|
||||
msgid "Aushi"
|
||||
msgstr ""
|
||||
msgstr "Aushi"
|
||||
|
||||
#. name for aui
|
||||
msgid "Anuki"
|
||||
msgstr ""
|
||||
msgstr "Anuki"
|
||||
|
||||
#. name for auj
|
||||
msgid "Awjilah"
|
||||
msgstr ""
|
||||
msgstr "Awjilah"
|
||||
|
||||
#. name for auk
|
||||
msgid "Heyo"
|
||||
msgstr ""
|
||||
msgstr "Heyo"
|
||||
|
||||
#. name for aul
|
||||
msgid "Aulua"
|
||||
msgstr ""
|
||||
msgstr "Aulua"
|
||||
|
||||
#. name for aum
|
||||
msgid "Asu (Nigeria)"
|
||||
@ -1745,11 +1745,11 @@ msgstr "asu (Nigeria)"
|
||||
|
||||
#. name for aun
|
||||
msgid "One; Molmo"
|
||||
msgstr ""
|
||||
msgstr "One; Molmo"
|
||||
|
||||
#. name for auo
|
||||
msgid "Auyokawa"
|
||||
msgstr ""
|
||||
msgstr "Auyokawa"
|
||||
|
||||
#. name for aup
|
||||
msgid "Makayam"
|
||||
@ -1757,19 +1757,19 @@ msgstr ""
|
||||
|
||||
#. name for auq
|
||||
msgid "Anus"
|
||||
msgstr ""
|
||||
msgstr "Anus"
|
||||
|
||||
#. name for aur
|
||||
msgid "Aruek"
|
||||
msgstr ""
|
||||
msgstr "Aruek"
|
||||
|
||||
#. name for aut
|
||||
msgid "Austral"
|
||||
msgstr ""
|
||||
msgstr "Austral"
|
||||
|
||||
#. name for auu
|
||||
msgid "Auye"
|
||||
msgstr ""
|
||||
msgstr "Auye"
|
||||
|
||||
#. name for auw
|
||||
msgid "Awyi"
|
||||
@ -1781,7 +1781,7 @@ msgstr ""
|
||||
|
||||
#. name for auy
|
||||
msgid "Awiyaana"
|
||||
msgstr ""
|
||||
msgstr "Awiyaana"
|
||||
|
||||
#. name for auz
|
||||
msgid "Arabic; Uzbeki"
|
||||
@ -1793,11 +1793,11 @@ msgstr "awarski"
|
||||
|
||||
#. name for avb
|
||||
msgid "Avau"
|
||||
msgstr ""
|
||||
msgstr "Avau"
|
||||
|
||||
#. name for avd
|
||||
msgid "Alviri-Vidari"
|
||||
msgstr ""
|
||||
msgstr "Alviri-Vidari"
|
||||
|
||||
#. name for ave
|
||||
msgid "Avestan"
|
||||
@ -1805,11 +1805,11 @@ msgstr "awestyjski"
|
||||
|
||||
#. name for avi
|
||||
msgid "Avikam"
|
||||
msgstr ""
|
||||
msgstr "Avikam"
|
||||
|
||||
#. name for avk
|
||||
msgid "Kotava"
|
||||
msgstr ""
|
||||
msgstr "Kotava"
|
||||
|
||||
#. name for avl
|
||||
msgid "Arabic; Eastern Egyptian Bedawi"
|
||||
@ -1817,23 +1817,23 @@ msgstr ""
|
||||
|
||||
#. name for avn
|
||||
msgid "Avatime"
|
||||
msgstr ""
|
||||
msgstr "Avatime"
|
||||
|
||||
#. name for avo
|
||||
msgid "Agavotaguerra"
|
||||
msgstr ""
|
||||
msgstr "Agavotaguerra"
|
||||
|
||||
#. name for avs
|
||||
msgid "Aushiri"
|
||||
msgstr ""
|
||||
msgstr "Aushiri"
|
||||
|
||||
#. name for avt
|
||||
msgid "Au"
|
||||
msgstr ""
|
||||
msgstr "Au"
|
||||
|
||||
#. name for avu
|
||||
msgid "Avokaya"
|
||||
msgstr ""
|
||||
msgstr "Avokaya"
|
||||
|
||||
#. name for avv
|
||||
msgid "Avá-Canoeiro"
|
||||
@ -1849,7 +1849,7 @@ msgstr "awa (Papua Nowa Gwinea)"
|
||||
|
||||
#. name for awc
|
||||
msgid "Cicipu"
|
||||
msgstr ""
|
||||
msgstr "Cicipu"
|
||||
|
||||
#. name for awe
|
||||
msgid "Awetí"
|
||||
@ -1857,15 +1857,15 @@ msgstr ""
|
||||
|
||||
#. name for awh
|
||||
msgid "Awbono"
|
||||
msgstr ""
|
||||
msgstr "Awbono"
|
||||
|
||||
#. name for awi
|
||||
msgid "Aekyom"
|
||||
msgstr ""
|
||||
msgstr "Aekyom"
|
||||
|
||||
#. name for awk
|
||||
msgid "Awabakal"
|
||||
msgstr ""
|
||||
msgstr "Awabakal"
|
||||
|
||||
#. name for awm
|
||||
msgid "Arawum"
|
||||
@ -1873,31 +1873,31 @@ msgstr "arawum"
|
||||
|
||||
#. name for awn
|
||||
msgid "Awngi"
|
||||
msgstr ""
|
||||
msgstr "Awngi"
|
||||
|
||||
#. name for awo
|
||||
msgid "Awak"
|
||||
msgstr ""
|
||||
msgstr "Awak"
|
||||
|
||||
#. name for awr
|
||||
msgid "Awera"
|
||||
msgstr ""
|
||||
msgstr "Awera"
|
||||
|
||||
#. name for aws
|
||||
msgid "Awyu; South"
|
||||
msgstr ""
|
||||
msgstr "Południowy aywu"
|
||||
|
||||
#. name for awt
|
||||
msgid "Araweté"
|
||||
msgstr ""
|
||||
msgstr "Araweté"
|
||||
|
||||
#. name for awu
|
||||
msgid "Awyu; Central"
|
||||
msgstr ""
|
||||
msgstr "Środkowy aywu"
|
||||
|
||||
#. name for awv
|
||||
msgid "Awyu; Jair"
|
||||
msgstr ""
|
||||
msgstr "Awyu; Jair"
|
||||
|
||||
#. name for aww
|
||||
msgid "Awun"
|
||||
@ -1905,7 +1905,7 @@ msgstr "awun"
|
||||
|
||||
#. name for awx
|
||||
msgid "Awara"
|
||||
msgstr ""
|
||||
msgstr "Awara"
|
||||
|
||||
#. name for awy
|
||||
msgid "Awyu; Edera"
|
||||
@ -1913,15 +1913,15 @@ msgstr "ederah"
|
||||
|
||||
#. name for axb
|
||||
msgid "Abipon"
|
||||
msgstr ""
|
||||
msgstr "Abipon"
|
||||
|
||||
#. name for axg
|
||||
msgid "Arára; Mato Grosso"
|
||||
msgstr ""
|
||||
msgstr "Arára; Mato Grosso"
|
||||
|
||||
#. name for axk
|
||||
msgid "Yaka (Central African Republic)"
|
||||
msgstr ""
|
||||
msgstr "Yaka (Central African Republic)"
|
||||
|
||||
#. name for axm
|
||||
msgid "Armenian; Middle"
|
||||
@ -1929,7 +1929,7 @@ msgstr "średnioormiański"
|
||||
|
||||
#. name for axx
|
||||
msgid "Xaragure"
|
||||
msgstr ""
|
||||
msgstr "Xaragure"
|
||||
|
||||
#. name for aya
|
||||
msgid "Awar"
|
||||
@ -1937,7 +1937,7 @@ msgstr "awar"
|
||||
|
||||
#. name for ayb
|
||||
msgid "Gbe; Ayizo"
|
||||
msgstr ""
|
||||
msgstr "Gbe; Ayizo"
|
||||
|
||||
#. name for ayc
|
||||
msgid "Aymara; Southern"
|
||||
@ -1945,27 +1945,27 @@ msgstr "ajmara południowy"
|
||||
|
||||
#. name for ayd
|
||||
msgid "Ayabadhu"
|
||||
msgstr ""
|
||||
msgstr "Ayabadhu"
|
||||
|
||||
#. name for aye
|
||||
msgid "Ayere"
|
||||
msgstr ""
|
||||
msgstr "Ayere"
|
||||
|
||||
#. name for ayg
|
||||
msgid "Ginyanga"
|
||||
msgstr ""
|
||||
msgstr "Ginyanga"
|
||||
|
||||
#. name for ayh
|
||||
msgid "Arabic; Hadrami"
|
||||
msgstr ""
|
||||
msgstr "Arabski Hadrami"
|
||||
|
||||
#. name for ayi
|
||||
msgid "Leyigha"
|
||||
msgstr ""
|
||||
msgstr "Leyigha"
|
||||
|
||||
#. name for ayk
|
||||
msgid "Akuku"
|
||||
msgstr ""
|
||||
msgstr "Akuku"
|
||||
|
||||
#. name for ayl
|
||||
msgid "Arabic; Libyan"
|
||||
@ -1977,19 +1977,19 @@ msgstr "ajmara"
|
||||
|
||||
#. name for ayn
|
||||
msgid "Arabic; Sanaani"
|
||||
msgstr ""
|
||||
msgstr "Arabski Sanaani"
|
||||
|
||||
#. name for ayo
|
||||
msgid "Ayoreo"
|
||||
msgstr ""
|
||||
msgstr "Ayoreo"
|
||||
|
||||
#. name for ayp
|
||||
msgid "Arabic; North Mesopotamian"
|
||||
msgstr ""
|
||||
msgstr "Arabski; Mezopotamia Północna"
|
||||
|
||||
#. name for ayq
|
||||
msgid "Ayi (Papua New Guinea)"
|
||||
msgstr ""
|
||||
msgstr "Ayi (Papua Nowa Gwinea)"
|
||||
|
||||
#. name for ayr
|
||||
msgid "Aymara; Central"
|
||||
@ -1997,27 +1997,27 @@ msgstr "ajmara centralny"
|
||||
|
||||
#. name for ays
|
||||
msgid "Ayta; Sorsogon"
|
||||
msgstr ""
|
||||
msgstr "Ayta; Sorsogon"
|
||||
|
||||
#. name for ayt
|
||||
msgid "Ayta; Magbukun"
|
||||
msgstr ""
|
||||
msgstr "Ayta; Magbukun"
|
||||
|
||||
#. name for ayu
|
||||
msgid "Ayu"
|
||||
msgstr ""
|
||||
msgstr "Ayu"
|
||||
|
||||
#. name for ayy
|
||||
msgid "Ayta; Tayabas"
|
||||
msgstr ""
|
||||
msgstr "Ayta; Tayabas"
|
||||
|
||||
#. name for ayz
|
||||
msgid "Mai Brat"
|
||||
msgstr ""
|
||||
msgstr "Mai Brat"
|
||||
|
||||
#. name for aza
|
||||
msgid "Azha"
|
||||
msgstr ""
|
||||
msgstr "Azha"
|
||||
|
||||
#. name for azb
|
||||
msgid "Azerbaijani; South"
|
||||
@ -2029,7 +2029,7 @@ msgstr "azerski"
|
||||
|
||||
#. name for azg
|
||||
msgid "Amuzgo; San Pedro Amuzgos"
|
||||
msgstr ""
|
||||
msgstr "Amuzgo; San Pedro Amuzgos"
|
||||
|
||||
#. name for azj
|
||||
msgid "Azerbaijani; North"
|
||||
@ -2037,35 +2037,35 @@ msgstr "północnoazerski"
|
||||
|
||||
#. name for azm
|
||||
msgid "Amuzgo; Ipalapa"
|
||||
msgstr ""
|
||||
msgstr "Amuzgo; Ipalapa"
|
||||
|
||||
#. name for azo
|
||||
msgid "Awing"
|
||||
msgstr ""
|
||||
msgstr "Awing"
|
||||
|
||||
#. name for azt
|
||||
msgid "Atta; Faire"
|
||||
msgstr ""
|
||||
msgstr "Atta; Faire"
|
||||
|
||||
#. name for azz
|
||||
msgid "Nahuatl; Highland Puebla"
|
||||
msgstr ""
|
||||
msgstr "Nahuatl; Wyżyna Puebla"
|
||||
|
||||
#. name for baa
|
||||
msgid "Babatana"
|
||||
msgstr ""
|
||||
msgstr "Babatana"
|
||||
|
||||
#. name for bab
|
||||
msgid "Bainouk-Gunyuño"
|
||||
msgstr ""
|
||||
msgstr "Bainouk-Gunyuño"
|
||||
|
||||
#. name for bac
|
||||
msgid "Badui"
|
||||
msgstr ""
|
||||
msgstr "Badui"
|
||||
|
||||
#. name for bae
|
||||
msgid "Baré"
|
||||
msgstr ""
|
||||
msgstr "Baré"
|
||||
|
||||
#. name for baf
|
||||
msgid "Nubaca"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,14 +9,14 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2012-12-21 03:31+0000\n"
|
||||
"Last-Translator: Fábio Malcher Miranda <mirand863@hotmail.com>\n"
|
||||
"PO-Revision-Date: 2013-02-17 21:57+0000\n"
|
||||
"Last-Translator: Neliton Pereira Jr. <nelitonpjr@gmail.com>\n"
|
||||
"Language-Team: Brazilian Portuguese\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2012-12-22 04:59+0000\n"
|
||||
"X-Generator: Launchpad (build 16378)\n"
|
||||
"X-Launchpad-Export-Date: 2013-02-18 04:49+0000\n"
|
||||
"X-Generator: Launchpad (build 16491)\n"
|
||||
"Language: \n"
|
||||
|
||||
#. name for aaa
|
||||
@ -141,7 +141,7 @@ msgstr ""
|
||||
|
||||
#. name for abh
|
||||
msgid "Arabic; Tajiki"
|
||||
msgstr ""
|
||||
msgstr "Arábico; Tajiki"
|
||||
|
||||
#. name for abi
|
||||
msgid "Abidji"
|
||||
|
@ -13,14 +13,14 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2013-01-21 14:06+0000\n"
|
||||
"Last-Translator: Don Miguel <bmv@mail.ru>\n"
|
||||
"PO-Revision-Date: 2013-02-21 23:51+0000\n"
|
||||
"Last-Translator: Глория Хрусталёва <gloriya@hushmail.com>\n"
|
||||
"Language-Team: Russian <debian-l10n-russian@lists.debian.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2013-01-22 04:46+0000\n"
|
||||
"X-Generator: Launchpad (build 16430)\n"
|
||||
"X-Launchpad-Export-Date: 2013-02-23 05:19+0000\n"
|
||||
"X-Generator: Launchpad (build 16506)\n"
|
||||
"Language: ru\n"
|
||||
|
||||
#. name for aaa
|
||||
@ -237,7 +237,7 @@ msgstr "Ачехский"
|
||||
|
||||
#. name for acf
|
||||
msgid "Creole French; Saint Lucian"
|
||||
msgstr ""
|
||||
msgstr "Креольский французский; Сент-люсийский"
|
||||
|
||||
#. name for ach
|
||||
msgid "Acoli"
|
||||
@ -257,7 +257,7 @@ msgstr ""
|
||||
|
||||
#. name for acm
|
||||
msgid "Arabic; Mesopotamian"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Месопатамский"
|
||||
|
||||
#. name for acn
|
||||
msgid "Achang"
|
||||
@ -273,7 +273,7 @@ msgstr ""
|
||||
|
||||
#. name for acr
|
||||
msgid "Achi"
|
||||
msgstr ""
|
||||
msgstr "Ачи"
|
||||
|
||||
#. name for acs
|
||||
msgid "Acroá"
|
||||
@ -297,7 +297,7 @@ msgstr ""
|
||||
|
||||
#. name for acx
|
||||
msgid "Arabic; Omani"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Оманский"
|
||||
|
||||
#. name for acy
|
||||
msgid "Arabic; Cypriot"
|
||||
@ -369,7 +369,7 @@ msgstr ""
|
||||
|
||||
#. name for ads
|
||||
msgid "Adamorobe Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Знаковый язык Адаморобе"
|
||||
|
||||
#. name for adt
|
||||
msgid "Adnyamathanha"
|
||||
@ -389,7 +389,7 @@ msgstr ""
|
||||
|
||||
#. name for ady
|
||||
msgid "Adyghe"
|
||||
msgstr ""
|
||||
msgstr "Адыгейский"
|
||||
|
||||
#. name for adz
|
||||
msgid "Adzera"
|
||||
@ -401,7 +401,7 @@ msgstr ""
|
||||
|
||||
#. name for aeb
|
||||
msgid "Arabic; Tunisian"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Тунисский"
|
||||
|
||||
#. name for aec
|
||||
msgid "Arabic; Saidi"
|
||||
@ -409,7 +409,7 @@ msgstr ""
|
||||
|
||||
#. name for aed
|
||||
msgid "Argentine Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Аргентинский язык жестов"
|
||||
|
||||
#. name for aee
|
||||
msgid "Pashayi; Northeast"
|
||||
@ -429,7 +429,7 @@ msgstr ""
|
||||
|
||||
#. name for aen
|
||||
msgid "Armenian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Армянский язык жестов"
|
||||
|
||||
#. name for aeq
|
||||
msgid "Aer"
|
||||
@ -609,7 +609,7 @@ msgstr ""
|
||||
|
||||
#. name for agx
|
||||
msgid "Aghul"
|
||||
msgstr ""
|
||||
msgstr "Агульский"
|
||||
|
||||
#. name for agy
|
||||
msgid "Alta; Southern"
|
||||
@ -665,7 +665,7 @@ msgstr ""
|
||||
|
||||
#. name for ahr
|
||||
msgid "Ahirani"
|
||||
msgstr ""
|
||||
msgstr "Ахирани"
|
||||
|
||||
#. name for ahs
|
||||
msgid "Ashe"
|
||||
@ -701,7 +701,7 @@ msgstr ""
|
||||
|
||||
#. name for aig
|
||||
msgid "Creole English; Antigua and Barbuda"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Антигуа и Барбуда"
|
||||
|
||||
#. name for aih
|
||||
msgid "Ai-Cham"
|
||||
@ -709,7 +709,7 @@ msgstr ""
|
||||
|
||||
#. name for aii
|
||||
msgid "Neo-Aramaic; Assyrian"
|
||||
msgstr ""
|
||||
msgstr "Новоарамейский; Ассирийский"
|
||||
|
||||
#. name for aij
|
||||
msgid "Lishanid Noshan"
|
||||
@ -825,7 +825,7 @@ msgstr ""
|
||||
|
||||
#. name for akg
|
||||
msgid "Anakalangu"
|
||||
msgstr ""
|
||||
msgstr "Анакалангу"
|
||||
|
||||
#. name for akh
|
||||
msgid "Angal Heneng"
|
||||
@ -881,7 +881,7 @@ msgstr ""
|
||||
|
||||
#. name for akv
|
||||
msgid "Akhvakh"
|
||||
msgstr ""
|
||||
msgstr "Ахвахский"
|
||||
|
||||
#. name for akw
|
||||
msgid "Akwa"
|
||||
@ -897,7 +897,7 @@ msgstr ""
|
||||
|
||||
#. name for akz
|
||||
msgid "Alabama"
|
||||
msgstr ""
|
||||
msgstr "Язык племени алабама"
|
||||
|
||||
#. name for ala
|
||||
msgid "Alago"
|
||||
@ -945,7 +945,7 @@ msgstr ""
|
||||
|
||||
#. name for aln
|
||||
msgid "Albanian; Gheg"
|
||||
msgstr ""
|
||||
msgstr "Албанский; Гегский"
|
||||
|
||||
#. name for alo
|
||||
msgid "Larike-Wakasihu"
|
||||
@ -953,11 +953,11 @@ msgstr ""
|
||||
|
||||
#. name for alp
|
||||
msgid "Alune"
|
||||
msgstr ""
|
||||
msgstr "Алуне"
|
||||
|
||||
#. name for alq
|
||||
msgid "Algonquin"
|
||||
msgstr ""
|
||||
msgstr "Алгонкинский"
|
||||
|
||||
#. name for alr
|
||||
msgid "Alutor"
|
||||
@ -965,7 +965,7 @@ msgstr ""
|
||||
|
||||
#. name for als
|
||||
msgid "Albanian; Tosk"
|
||||
msgstr ""
|
||||
msgstr "Албанский; Тоскский"
|
||||
|
||||
#. name for alt
|
||||
msgid "Altai; Southern"
|
||||
@ -1037,7 +1037,7 @@ msgstr ""
|
||||
|
||||
#. name for amm
|
||||
msgid "Ama (Papua New Guinea)"
|
||||
msgstr ""
|
||||
msgstr "Ама (Папуа-Новая Гвинея)"
|
||||
|
||||
#. name for amn
|
||||
msgid "Amanab"
|
||||
@ -1077,7 +1077,7 @@ msgstr ""
|
||||
|
||||
#. name for amw
|
||||
msgid "Neo-Aramaic; Western"
|
||||
msgstr ""
|
||||
msgstr "Новоарамейский; Западный"
|
||||
|
||||
#. name for amx
|
||||
msgid "Anmatyerre"
|
||||
@ -1085,7 +1085,7 @@ msgstr ""
|
||||
|
||||
#. name for amy
|
||||
msgid "Ami"
|
||||
msgstr ""
|
||||
msgstr "Ами"
|
||||
|
||||
#. name for amz
|
||||
msgid "Atampaya"
|
||||
@ -1281,7 +1281,7 @@ msgstr ""
|
||||
|
||||
#. name for apd
|
||||
msgid "Arabic; Sudanese"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Суданский"
|
||||
|
||||
#. name for ape
|
||||
msgid "Bukiyip"
|
||||
@ -1373,7 +1373,7 @@ msgstr ""
|
||||
|
||||
#. name for aqc
|
||||
msgid "Archi"
|
||||
msgstr ""
|
||||
msgstr "Арчинский"
|
||||
|
||||
#. name for aqd
|
||||
msgid "Dogon; Ampari"
|
||||
@ -1409,11 +1409,11 @@ msgstr "Арабский"
|
||||
|
||||
#. name for arb
|
||||
msgid "Arabic; Standard"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Стандартный"
|
||||
|
||||
#. name for arc
|
||||
msgid "Aramaic; Official (700-300 BCE)"
|
||||
msgstr ""
|
||||
msgstr "Арамейский; Официальный"
|
||||
|
||||
#. name for ard
|
||||
msgid "Arabana"
|
||||
@ -1461,7 +1461,7 @@ msgstr "Арапахо"
|
||||
|
||||
#. name for arq
|
||||
msgid "Arabic; Algerian"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Алжирский"
|
||||
|
||||
#. name for arr
|
||||
msgid "Karo (Brazil)"
|
||||
@ -1489,11 +1489,11 @@ msgstr ""
|
||||
|
||||
#. name for ary
|
||||
msgid "Arabic; Moroccan"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Марокканский"
|
||||
|
||||
#. name for arz
|
||||
msgid "Arabic; Egyptian"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Египетский"
|
||||
|
||||
#. name for asa
|
||||
msgid "Asu (Tanzania)"
|
||||
@ -1537,7 +1537,7 @@ msgstr ""
|
||||
|
||||
#. name for ask
|
||||
msgid "Ashkun"
|
||||
msgstr ""
|
||||
msgstr "Ашкун"
|
||||
|
||||
#. name for asl
|
||||
msgid "Asilulu"
|
||||
@ -1573,7 +1573,7 @@ msgstr ""
|
||||
|
||||
#. name for ast
|
||||
msgid "Asturian"
|
||||
msgstr ""
|
||||
msgstr "Астурийский"
|
||||
|
||||
#. name for asu
|
||||
msgid "Asurini; Tocantins"
|
||||
@ -1693,7 +1693,7 @@ msgstr ""
|
||||
|
||||
#. name for atz
|
||||
msgid "Arta"
|
||||
msgstr ""
|
||||
msgstr "Арта"
|
||||
|
||||
#. name for aua
|
||||
msgid "Asumboa"
|
||||
@ -1969,7 +1969,7 @@ msgstr ""
|
||||
|
||||
#. name for ayl
|
||||
msgid "Arabic; Libyan"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Ливийский"
|
||||
|
||||
#. name for aym
|
||||
msgid "Aymara"
|
||||
@ -1985,7 +1985,7 @@ msgstr ""
|
||||
|
||||
#. name for ayp
|
||||
msgid "Arabic; North Mesopotamian"
|
||||
msgstr ""
|
||||
msgstr "Арабский; Северомесопатамский"
|
||||
|
||||
#. name for ayq
|
||||
msgid "Ayi (Papua New Guinea)"
|
||||
@ -2021,7 +2021,7 @@ msgstr ""
|
||||
|
||||
#. name for azb
|
||||
msgid "Azerbaijani; South"
|
||||
msgstr ""
|
||||
msgstr "Азербайджанский; Южный"
|
||||
|
||||
#. name for aze
|
||||
msgid "Azerbaijani"
|
||||
@ -2033,7 +2033,7 @@ msgstr ""
|
||||
|
||||
#. name for azj
|
||||
msgid "Azerbaijani; North"
|
||||
msgstr ""
|
||||
msgstr "Азербайджанский; Северный"
|
||||
|
||||
#. name for azm
|
||||
msgid "Amuzgo; Ipalapa"
|
||||
@ -2077,7 +2077,7 @@ msgstr ""
|
||||
|
||||
#. name for bah
|
||||
msgid "Creole English; Bahamas"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Багамский"
|
||||
|
||||
#. name for baj
|
||||
msgid "Barakai"
|
||||
@ -2113,7 +2113,7 @@ msgstr ""
|
||||
|
||||
#. name for bas
|
||||
msgid "Basa (Cameroon)"
|
||||
msgstr ""
|
||||
msgstr "Баса (Камерун)"
|
||||
|
||||
#. name for bau
|
||||
msgid "Bada (Nigeria)"
|
||||
@ -2381,7 +2381,7 @@ msgstr ""
|
||||
|
||||
#. name for bdj
|
||||
msgid "Bai"
|
||||
msgstr ""
|
||||
msgstr "Бай"
|
||||
|
||||
#. name for bdk
|
||||
msgid "Budukh"
|
||||
@ -2473,7 +2473,7 @@ msgstr ""
|
||||
|
||||
#. name for beg
|
||||
msgid "Belait"
|
||||
msgstr ""
|
||||
msgstr "Белайт"
|
||||
|
||||
#. name for beh
|
||||
msgid "Biali"
|
||||
@ -2497,7 +2497,7 @@ msgstr "Белорусский"
|
||||
|
||||
#. name for bem
|
||||
msgid "Bemba (Zambia)"
|
||||
msgstr ""
|
||||
msgstr "Бемба (Замбия)"
|
||||
|
||||
#. name for ben
|
||||
msgid "Bengali"
|
||||
@ -2641,7 +2641,7 @@ msgstr ""
|
||||
|
||||
#. name for bfy
|
||||
msgid "Bagheli"
|
||||
msgstr ""
|
||||
msgstr "Багхели"
|
||||
|
||||
#. name for bfz
|
||||
msgid "Pahari; Mahasu"
|
||||
@ -2737,7 +2737,7 @@ msgstr ""
|
||||
|
||||
#. name for bgx
|
||||
msgid "Turkish; Balkan Gagauz"
|
||||
msgstr ""
|
||||
msgstr "Турецкий; Гагаузский"
|
||||
|
||||
#. name for bgy
|
||||
msgid "Benggoi"
|
||||
@ -2753,7 +2753,7 @@ msgstr ""
|
||||
|
||||
#. name for bhb
|
||||
msgid "Bhili"
|
||||
msgstr ""
|
||||
msgstr "Бхили"
|
||||
|
||||
#. name for bhc
|
||||
msgid "Biga"
|
||||
@ -3113,7 +3113,7 @@ msgstr ""
|
||||
|
||||
#. name for bku
|
||||
msgid "Buhid"
|
||||
msgstr ""
|
||||
msgstr "Бухид"
|
||||
|
||||
#. name for bkv
|
||||
msgid "Bekwarra"
|
||||
@ -3333,7 +3333,7 @@ msgstr ""
|
||||
|
||||
#. name for bmy
|
||||
msgid "Bemba (Democratic Republic of Congo)"
|
||||
msgstr ""
|
||||
msgstr "Бемба (Демократическая Республика Конго)"
|
||||
|
||||
#. name for bmz
|
||||
msgid "Baramu"
|
||||
@ -3409,7 +3409,7 @@ msgstr ""
|
||||
|
||||
#. name for bns
|
||||
msgid "Bundeli"
|
||||
msgstr ""
|
||||
msgstr "Бундели"
|
||||
|
||||
#. name for bnu
|
||||
msgid "Bentong"
|
||||
@ -3553,7 +3553,7 @@ msgstr ""
|
||||
|
||||
#. name for bph
|
||||
msgid "Botlikh"
|
||||
msgstr ""
|
||||
msgstr "Ботлихский"
|
||||
|
||||
#. name for bpi
|
||||
msgid "Bagupi"
|
||||
@ -3613,7 +3613,7 @@ msgstr ""
|
||||
|
||||
#. name for bpw
|
||||
msgid "Bo (Papua New Guinea)"
|
||||
msgstr ""
|
||||
msgstr "Бо (Папуа-Новая Гвинея)"
|
||||
|
||||
#. name for bpx
|
||||
msgid "Bareli; Palya"
|
||||
@ -3621,7 +3621,7 @@ msgstr ""
|
||||
|
||||
#. name for bpy
|
||||
msgid "Bishnupriya"
|
||||
msgstr ""
|
||||
msgstr "Бишнуприя"
|
||||
|
||||
#. name for bpz
|
||||
msgid "Bilba"
|
||||
@ -3821,7 +3821,7 @@ msgstr ""
|
||||
|
||||
#. name for brx
|
||||
msgid "Bodo (India)"
|
||||
msgstr ""
|
||||
msgstr "Бодо (Индия)"
|
||||
|
||||
#. name for bry
|
||||
msgid "Burui"
|
||||
@ -3849,7 +3849,7 @@ msgstr ""
|
||||
|
||||
#. name for bsf
|
||||
msgid "Bauchi"
|
||||
msgstr ""
|
||||
msgstr "Баучи"
|
||||
|
||||
#. name for bsg
|
||||
msgid "Bashkardi"
|
||||
@ -3857,7 +3857,7 @@ msgstr ""
|
||||
|
||||
#. name for bsh
|
||||
msgid "Kati"
|
||||
msgstr ""
|
||||
msgstr "Кати"
|
||||
|
||||
#. name for bsi
|
||||
msgid "Bassossi"
|
||||
@ -3869,7 +3869,7 @@ msgstr ""
|
||||
|
||||
#. name for bsk
|
||||
msgid "Burushaski"
|
||||
msgstr ""
|
||||
msgstr "Бурушаски"
|
||||
|
||||
#. name for bsl
|
||||
msgid "Basa-Gumna"
|
||||
@ -4389,7 +4389,7 @@ msgstr ""
|
||||
|
||||
#. name for bxr
|
||||
msgid "Buriat; Russia"
|
||||
msgstr ""
|
||||
msgstr "Бурятский; Россия"
|
||||
|
||||
#. name for bxs
|
||||
msgid "Busam"
|
||||
@ -4553,11 +4553,11 @@ msgstr ""
|
||||
|
||||
#. name for bzj
|
||||
msgid "Kriol English; Belize"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Белиз"
|
||||
|
||||
#. name for bzk
|
||||
msgid "Creole English; Nicaragua"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Никарагуа"
|
||||
|
||||
#. name for bzl
|
||||
msgid "Boano (Sulawesi)"
|
||||
@ -5001,7 +5001,7 @@ msgstr ""
|
||||
|
||||
#. name for chm
|
||||
msgid "Mari (Russia)"
|
||||
msgstr ""
|
||||
msgstr "Марийский (Россия)"
|
||||
|
||||
#. name for chn
|
||||
msgid "Chinook jargon"
|
||||
@ -5285,7 +5285,7 @@ msgstr ""
|
||||
|
||||
#. name for cmn
|
||||
msgid "Chinese; Mandarin"
|
||||
msgstr ""
|
||||
msgstr "Китайский; Мандарин"
|
||||
|
||||
#. name for cmo
|
||||
msgid "Mnong; Central"
|
||||
@ -7581,7 +7581,7 @@ msgstr ""
|
||||
|
||||
#. name for fij
|
||||
msgid "Fijian"
|
||||
msgstr "Фиджи"
|
||||
msgstr "Фиджийский"
|
||||
|
||||
#. name for fil
|
||||
msgid "Filipino"
|
||||
@ -8037,11 +8037,11 @@ msgstr ""
|
||||
|
||||
#. name for gcf
|
||||
msgid "Creole French; Guadeloupean"
|
||||
msgstr ""
|
||||
msgstr "Креольский французский; Гваделупский"
|
||||
|
||||
#. name for gcl
|
||||
msgid "Creole English; Grenadian"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Гренадский"
|
||||
|
||||
#. name for gcn
|
||||
msgid "Gaina"
|
||||
@ -8049,7 +8049,7 @@ msgstr ""
|
||||
|
||||
#. name for gcr
|
||||
msgid "Creole French; Guianese"
|
||||
msgstr ""
|
||||
msgstr "Креольский французский; Гвианский"
|
||||
|
||||
#. name for gct
|
||||
msgid "German; Colonia Tovar"
|
||||
@ -9089,7 +9089,7 @@ msgstr ""
|
||||
|
||||
#. name for gyn
|
||||
msgid "Creole English; Guyanese"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Гайянский"
|
||||
|
||||
#. name for gyr
|
||||
msgid "Guarayu"
|
||||
@ -9853,7 +9853,7 @@ msgstr ""
|
||||
|
||||
#. name for hwc
|
||||
msgid "Creole English; Hawai'i"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Гавайский"
|
||||
|
||||
#. name for hwo
|
||||
msgid "Hwana"
|
||||
@ -10577,7 +10577,7 @@ msgstr ""
|
||||
|
||||
#. name for jam
|
||||
msgid "Creole English; Jamaican"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Ямайский"
|
||||
|
||||
#. name for jao
|
||||
msgid "Yanyuwa"
|
||||
@ -14245,7 +14245,7 @@ msgstr ""
|
||||
|
||||
#. name for lir
|
||||
msgid "English; Liberian"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Либерийский"
|
||||
|
||||
#. name for lis
|
||||
msgid "Lisu"
|
||||
@ -14661,7 +14661,7 @@ msgstr ""
|
||||
|
||||
#. name for lou
|
||||
msgid "Creole French; Louisiana"
|
||||
msgstr ""
|
||||
msgstr "Креольский французский; Луизиана"
|
||||
|
||||
#. name for lov
|
||||
msgid "Lopi"
|
||||
@ -15021,7 +15021,7 @@ msgstr ""
|
||||
|
||||
#. name for lzz
|
||||
msgid "Laz"
|
||||
msgstr ""
|
||||
msgstr "Лазский"
|
||||
|
||||
#. name for maa
|
||||
msgid "Mazatec; San Jerónimo Tecóatl"
|
||||
@ -15337,7 +15337,7 @@ msgstr ""
|
||||
|
||||
#. name for mdf
|
||||
msgid "Moksha"
|
||||
msgstr "Мокша"
|
||||
msgstr "Мокшанский"
|
||||
|
||||
#. name for mdg
|
||||
msgid "Massalat"
|
||||
@ -19993,7 +19993,7 @@ msgstr ""
|
||||
|
||||
#. name for orv
|
||||
msgid "Russian; Old"
|
||||
msgstr ""
|
||||
msgstr "Древнерусский"
|
||||
|
||||
#. name for orw
|
||||
msgid "Oro Win"
|
||||
@ -20109,7 +20109,7 @@ msgstr ""
|
||||
|
||||
#. name for oty
|
||||
msgid "Tamil; Old"
|
||||
msgstr ""
|
||||
msgstr "Древнетамильский"
|
||||
|
||||
#. name for otz
|
||||
msgid "Otomi; Ixtenco"
|
||||
@ -21897,7 +21897,7 @@ msgstr ""
|
||||
|
||||
#. name for rcf
|
||||
msgid "Creole French; Réunion"
|
||||
msgstr ""
|
||||
msgstr "Креольский французский; Реюньон"
|
||||
|
||||
#. name for rdb
|
||||
msgid "Rudbari"
|
||||
@ -23081,7 +23081,7 @@ msgstr ""
|
||||
|
||||
#. name for sin
|
||||
msgid "Sinhala"
|
||||
msgstr ""
|
||||
msgstr "Сингальский"
|
||||
|
||||
#. name for sip
|
||||
msgid "Sikkimese"
|
||||
@ -24661,7 +24661,7 @@ msgstr ""
|
||||
|
||||
#. name for tch
|
||||
msgid "Creole English; Turks And Caicos"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Тёркс и Кайкос"
|
||||
|
||||
#. name for tci
|
||||
msgid "Wára"
|
||||
@ -24957,7 +24957,7 @@ msgstr ""
|
||||
|
||||
#. name for tgh
|
||||
msgid "Creole English; Tobagonian"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Тобагский"
|
||||
|
||||
#. name for tgi
|
||||
msgid "Lawunuia"
|
||||
@ -25401,7 +25401,7 @@ msgstr ""
|
||||
|
||||
#. name for tly
|
||||
msgid "Talysh"
|
||||
msgstr ""
|
||||
msgstr "Талышский"
|
||||
|
||||
#. name for tma
|
||||
msgid "Tama (Chad)"
|
||||
@ -25845,7 +25845,7 @@ msgstr ""
|
||||
|
||||
#. name for trf
|
||||
msgid "Creole English; Trinidadian"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Тринидадский"
|
||||
|
||||
#. name for trg
|
||||
msgid "Lishán Didán"
|
||||
@ -27121,7 +27121,7 @@ msgstr ""
|
||||
|
||||
#. name for vic
|
||||
msgid "Creole English; Virgin Islands"
|
||||
msgstr ""
|
||||
msgstr "Креольский английский; Виргинские острова"
|
||||
|
||||
#. name for vid
|
||||
msgid "Vidunda"
|
||||
@ -28209,7 +28209,7 @@ msgstr ""
|
||||
|
||||
#. name for wyy
|
||||
msgid "Fijian; Western"
|
||||
msgstr ""
|
||||
msgstr "Западнофиджийский"
|
||||
|
||||
#. name for xaa
|
||||
msgid "Arabic; Andalusian"
|
||||
|
@ -9,43 +9,43 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2011-09-27 16:56+0000\n"
|
||||
"Last-Translator: Clytie Siddall <clytie@riverland.net.au>\n"
|
||||
"PO-Revision-Date: 2013-02-15 06:39+0000\n"
|
||||
"Last-Translator: baduong <Unknown>\n"
|
||||
"Language-Team: Vietnamese <gnomevi-list@lists.sourceforge.net>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-11-26 05:44+0000\n"
|
||||
"X-Generator: Launchpad (build 14381)\n"
|
||||
"X-Launchpad-Export-Date: 2013-02-16 04:56+0000\n"
|
||||
"X-Generator: Launchpad (build 16491)\n"
|
||||
"Language: vi\n"
|
||||
|
||||
#. name for aaa
|
||||
msgid "Ghotuo"
|
||||
msgstr ""
|
||||
msgstr "Ghotuo"
|
||||
|
||||
#. name for aab
|
||||
msgid "Alumu-Tesu"
|
||||
msgstr ""
|
||||
msgstr "Alumu-Tesu"
|
||||
|
||||
#. name for aac
|
||||
msgid "Ari"
|
||||
msgstr ""
|
||||
msgstr "Ari"
|
||||
|
||||
#. name for aad
|
||||
msgid "Amal"
|
||||
msgstr ""
|
||||
msgstr "Amal"
|
||||
|
||||
#. name for aae
|
||||
msgid "Albanian; Arbëreshë"
|
||||
msgstr ""
|
||||
msgstr "An-ba-ni"
|
||||
|
||||
#. name for aaf
|
||||
msgid "Aranadan"
|
||||
msgstr ""
|
||||
msgstr "Aranadan"
|
||||
|
||||
#. name for aag
|
||||
msgid "Ambrak"
|
||||
msgstr ""
|
||||
msgstr "Ambrak"
|
||||
|
||||
#. name for aah
|
||||
msgid "Arapesh; Abu'"
|
||||
@ -30817,7 +30817,7 @@ msgstr ""
|
||||
|
||||
#. name for zxx
|
||||
msgid "No linguistic content"
|
||||
msgstr ""
|
||||
msgstr "Không có nội dung kiểu ngôn ngữ"
|
||||
|
||||
#. name for zyb
|
||||
msgid "Zhuang; Yongbei"
|
||||
@ -30829,11 +30829,11 @@ msgstr ""
|
||||
|
||||
#. name for zyj
|
||||
msgid "Zhuang; Youjiang"
|
||||
msgstr ""
|
||||
msgstr "Zhuang; Youjiang"
|
||||
|
||||
#. name for zyn
|
||||
msgid "Zhuang; Yongnan"
|
||||
msgstr ""
|
||||
msgstr "Zhuang; Yongnan"
|
||||
|
||||
#. name for zyp
|
||||
msgid "Zyphe"
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 9, 19)
|
||||
numeric_version = (0, 9, 21)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -16,15 +16,14 @@ import apsw
|
||||
from calibre import isbytestring, force_unicode, prints
|
||||
from calibre.constants import (iswindows, filesystem_encoding,
|
||||
preferred_encoding)
|
||||
from calibre.ptempfile import PersistentTemporaryFile, SpooledTemporaryFile
|
||||
from calibre.db import SPOOL_SIZE
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.db.schema_upgrades import SchemaUpgrade
|
||||
from calibre.library.field_metadata import FieldMetadata
|
||||
from calibre.ebooks.metadata import title_sort, author_to_author_sort
|
||||
from calibre.utils.icu import strcmp
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.config import to_json, from_json, prefs, tweaks
|
||||
from calibre.utils.date import utcfromtimestamp, parse_date
|
||||
from calibre.utils.filenames import is_case_sensitive
|
||||
from calibre.utils.filenames import (is_case_sensitive, samefile, hardlink_file)
|
||||
from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
|
||||
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable,
|
||||
CompositeTable, LanguagesTable)
|
||||
@ -173,7 +172,9 @@ def _author_to_author_sort(x):
|
||||
return author_to_author_sort(x.replace('|', ','))
|
||||
|
||||
def icu_collator(s1, s2):
|
||||
return strcmp(force_unicode(s1, 'utf-8'), force_unicode(s2, 'utf-8'))
|
||||
return cmp(sort_key(force_unicode(s1, 'utf-8')),
|
||||
sort_key(force_unicode(s2, 'utf-8')))
|
||||
|
||||
# }}}
|
||||
|
||||
# Unused aggregators {{{
|
||||
@ -855,38 +856,75 @@ class DB(object):
|
||||
ans = {}
|
||||
if path is not None:
|
||||
stat = os.stat(path)
|
||||
ans['path'] = path
|
||||
ans['size'] = stat.st_size
|
||||
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
|
||||
return ans
|
||||
|
||||
def cover(self, path, as_file=False, as_image=False,
|
||||
as_path=False):
|
||||
def has_format(self, book_id, fmt, fname, path):
|
||||
return self.format_abspath(book_id, fmt, fname, path) is not None
|
||||
|
||||
def copy_cover_to(self, path, dest, windows_atomic_move=None, use_hardlink=False):
|
||||
path = os.path.join(self.library_path, path, 'cover.jpg')
|
||||
ret = None
|
||||
if os.access(path, os.R_OK):
|
||||
try:
|
||||
if windows_atomic_move is not None:
|
||||
if not isinstance(dest, basestring):
|
||||
raise Exception("Error, you must pass the dest as a path when"
|
||||
" using windows_atomic_move")
|
||||
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
|
||||
windows_atomic_move.copy_path_to(path, dest)
|
||||
return True
|
||||
else:
|
||||
if os.access(path, os.R_OK):
|
||||
try:
|
||||
f = lopen(path, 'rb')
|
||||
except (IOError, OSError):
|
||||
time.sleep(0.2)
|
||||
f = lopen(path, 'rb')
|
||||
except (IOError, OSError):
|
||||
time.sleep(0.2)
|
||||
f = lopen(path, 'rb')
|
||||
with f:
|
||||
if as_path:
|
||||
pt = PersistentTemporaryFile('_dbcover.jpg')
|
||||
with pt:
|
||||
shutil.copyfileobj(f, pt)
|
||||
return pt.name
|
||||
if as_file:
|
||||
ret = SpooledTemporaryFile(SPOOL_SIZE)
|
||||
shutil.copyfileobj(f, ret)
|
||||
ret.seek(0)
|
||||
else:
|
||||
ret = f.read()
|
||||
if as_image:
|
||||
from PyQt4.Qt import QImage
|
||||
i = QImage()
|
||||
i.loadFromData(ret)
|
||||
ret = i
|
||||
return ret
|
||||
with f:
|
||||
if hasattr(dest, 'write'):
|
||||
shutil.copyfileobj(f, dest)
|
||||
if hasattr(dest, 'flush'):
|
||||
dest.flush()
|
||||
return True
|
||||
elif dest and not samefile(dest, path):
|
||||
if use_hardlink:
|
||||
try:
|
||||
hardlink_file(path, dest)
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
with lopen(dest, 'wb') as d:
|
||||
shutil.copyfileobj(f, d)
|
||||
return True
|
||||
return False
|
||||
|
||||
def copy_format_to(self, book_id, fmt, fname, path, dest,
|
||||
windows_atomic_move=None, use_hardlink=False):
|
||||
path = self.format_abspath(book_id, fmt, fname, path)
|
||||
if path is None:
|
||||
return False
|
||||
if windows_atomic_move is not None:
|
||||
if not isinstance(dest, basestring):
|
||||
raise Exception("Error, you must pass the dest as a path when"
|
||||
" using windows_atomic_move")
|
||||
if dest and not samefile(dest, path):
|
||||
windows_atomic_move.copy_path_to(path, dest)
|
||||
else:
|
||||
if hasattr(dest, 'write'):
|
||||
with lopen(path, 'rb') as f:
|
||||
shutil.copyfileobj(f, dest)
|
||||
if hasattr(dest, 'flush'):
|
||||
dest.flush()
|
||||
elif dest and not samefile(dest, path):
|
||||
if use_hardlink:
|
||||
try:
|
||||
hardlink_file(path, dest)
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
with lopen(path, 'rb') as f, lopen(dest, 'wb') as d:
|
||||
shutil.copyfileobj(f, d)
|
||||
return True
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -8,16 +8,22 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, traceback
|
||||
from io import BytesIO
|
||||
from collections import defaultdict
|
||||
from functools import wraps, partial
|
||||
|
||||
from calibre.db import SPOOL_SIZE
|
||||
from calibre.db.categories import get_categories
|
||||
from calibre.db.locking import create_locks, RecordLock
|
||||
from calibre.db.errors import NoSuchFormat
|
||||
from calibre.db.fields import create_field
|
||||
from calibre.db.search import Search
|
||||
from calibre.db.tables import VirtualTable
|
||||
from calibre.db.write import get_series_values
|
||||
from calibre.db.lazy import FormatMetadata, FormatsList
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ptempfile import (base_dir, PersistentTemporaryFile,
|
||||
SpooledTemporaryFile)
|
||||
from calibre.utils.date import now
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
@ -103,27 +109,6 @@ class Cache(object):
|
||||
def field_metadata(self):
|
||||
return self.backend.field_metadata
|
||||
|
||||
def _format_abspath(self, book_id, fmt):
|
||||
'''
|
||||
Return absolute path to the ebook file of format `format`
|
||||
|
||||
WARNING: This method will return a dummy path for a network backend DB,
|
||||
so do not rely on it, use format(..., as_path=True) instead.
|
||||
|
||||
Currently used only in calibredb list, the viewer and the catalogs (via
|
||||
get_data_as_dict()).
|
||||
|
||||
Apart from the viewer, I don't believe any of the others do any file
|
||||
I/O with the results of this call.
|
||||
'''
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
return None
|
||||
if name and path:
|
||||
return self.backend.format_abspath(book_id, fmt, name, path)
|
||||
|
||||
def _get_metadata(self, book_id, get_user_categories=True): # {{{
|
||||
mi = Metadata(None, template_cache=self.formatter_template_cache)
|
||||
author_ids = self._field_ids_for('authors', book_id)
|
||||
@ -162,7 +147,7 @@ class Cache(object):
|
||||
if not formats:
|
||||
good_formats = None
|
||||
else:
|
||||
mi.format_metadata = FormatMetadata(self, id, formats)
|
||||
mi.format_metadata = FormatMetadata(self, book_id, formats)
|
||||
good_formats = FormatsList(formats, mi.format_metadata)
|
||||
mi.formats = good_formats
|
||||
mi.has_cover = _('Yes') if self._field_for('cover', book_id,
|
||||
@ -227,6 +212,12 @@ class Cache(object):
|
||||
self.fields['ondevice'] = create_field('ondevice',
|
||||
VirtualTable('ondevice'))
|
||||
|
||||
for name, field in self.fields.iteritems():
|
||||
if name[0] == '#' and name.endswith('_index'):
|
||||
field.series_field = self.fields[name[:-len('_index')]]
|
||||
elif name == 'series_index':
|
||||
field.series_field = self.fields['series']
|
||||
|
||||
@read_api
|
||||
def field_for(self, name, book_id, default_value=None):
|
||||
'''
|
||||
@ -397,15 +388,184 @@ class Cache(object):
|
||||
:param as_path: If True return the image as a path pointing to a
|
||||
temporary file
|
||||
'''
|
||||
if as_file:
|
||||
ret = SpooledTemporaryFile(SPOOL_SIZE)
|
||||
if not self.copy_cover_to(book_id, ret): return
|
||||
ret.seek(0)
|
||||
elif as_path:
|
||||
pt = PersistentTemporaryFile('_dbcover.jpg')
|
||||
with pt:
|
||||
if not self.copy_cover_to(book_id, pt): return
|
||||
ret = pt.name
|
||||
else:
|
||||
buf = BytesIO()
|
||||
if not self.copy_cover_to(book_id, buf): return
|
||||
ret = buf.getvalue()
|
||||
if as_image:
|
||||
from PyQt4.Qt import QImage
|
||||
i = QImage()
|
||||
i.loadFromData(ret)
|
||||
ret = i
|
||||
return ret
|
||||
|
||||
@api
|
||||
def copy_cover_to(self, book_id, dest, use_hardlink=False):
|
||||
'''
|
||||
Copy the cover to the file like object ``dest``. Returns False
|
||||
if no cover exists or dest is the same file as the current cover.
|
||||
dest can also be a path in which case the cover is
|
||||
copied to it iff the path is different from the current path (taking
|
||||
case sensitivity into account).
|
||||
'''
|
||||
with self.read_lock:
|
||||
try:
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
return None
|
||||
return False
|
||||
|
||||
with self.record_lock.lock(book_id):
|
||||
return self.backend.cover(path, as_file=as_file, as_image=as_image,
|
||||
as_path=as_path)
|
||||
return self.backend.copy_cover_to(path, dest,
|
||||
use_hardlink=use_hardlink)
|
||||
|
||||
@api
|
||||
def copy_format_to(self, book_id, fmt, dest, use_hardlink=False):
|
||||
'''
|
||||
Copy the format ``fmt`` to the file like object ``dest``. If the
|
||||
specified format does not exist, raises :class:`NoSuchFormat` error.
|
||||
dest can also be a path, in which case the format is copied to it, iff
|
||||
the path is different from the current path (taking case sensitivity
|
||||
into account).
|
||||
'''
|
||||
with self.read_lock:
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
raise NoSuchFormat('Record %d has no %s file'%(book_id, fmt))
|
||||
|
||||
with self.record_lock.lock(book_id):
|
||||
return self.backend.copy_format_to(book_id, fmt, name, path, dest,
|
||||
use_hardlink=use_hardlink)
|
||||
|
||||
@read_api
|
||||
def format_abspath(self, book_id, fmt):
|
||||
'''
|
||||
Return absolute path to the ebook file of format `format`
|
||||
|
||||
Currently used only in calibredb list, the viewer and the catalogs (via
|
||||
get_data_as_dict()).
|
||||
|
||||
Apart from the viewer, I don't believe any of the others do any file
|
||||
I/O with the results of this call.
|
||||
'''
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
return None
|
||||
if name and path:
|
||||
return self.backend.format_abspath(book_id, fmt, name, path)
|
||||
|
||||
@read_api
|
||||
def has_format(self, book_id, fmt):
|
||||
'Return True iff the format exists on disk'
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
return False
|
||||
return self.backend.has_format(book_id, fmt, name, path)
|
||||
|
||||
@read_api
|
||||
def formats(self, book_id, verify_formats=True):
|
||||
'''
|
||||
Return tuple of all formats for the specified book. If verify_formats
|
||||
is True, verifies that the files exist on disk.
|
||||
'''
|
||||
ans = self.field_for('formats', book_id)
|
||||
if verify_formats and ans:
|
||||
try:
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except:
|
||||
return ()
|
||||
def verify(fmt):
|
||||
try:
|
||||
name = self.fields['formats'].format_fname(book_id, fmt)
|
||||
except:
|
||||
return False
|
||||
return self.backend.has_format(book_id, fmt, name, path)
|
||||
|
||||
ans = tuple(x for x in ans if verify(x))
|
||||
return ans
|
||||
|
||||
@api
|
||||
def format(self, book_id, fmt, as_file=False, as_path=False, preserve_filename=False):
|
||||
'''
|
||||
Return the ebook format as a bytestring or `None` if the format doesn't exist,
|
||||
or we don't have permission to write to the ebook file.
|
||||
|
||||
:param as_file: If True the ebook format is returned as a file object. Note
|
||||
that the file object is a SpooledTemporaryFile, so if what you want to
|
||||
do is copy the format to another file, use :method:`copy_format_to`
|
||||
instead for performance.
|
||||
:param as_path: Copies the format file to a temp file and returns the
|
||||
path to the temp file
|
||||
:param preserve_filename: If True and returning a path the filename is
|
||||
the same as that used in the library. Note that using
|
||||
this means that repeated calls yield the same
|
||||
temp file (which is re-created each time)
|
||||
'''
|
||||
with self.read_lock:
|
||||
ext = ('.'+fmt.lower()) if fmt else ''
|
||||
try:
|
||||
fname = self.fields['formats'].format_fname(book_id, fmt)
|
||||
except:
|
||||
return None
|
||||
fname += ext
|
||||
|
||||
if as_path:
|
||||
if preserve_filename:
|
||||
bd = base_dir()
|
||||
d = os.path.join(bd, 'format_abspath')
|
||||
try:
|
||||
os.makedirs(d)
|
||||
except:
|
||||
pass
|
||||
ret = os.path.join(d, fname)
|
||||
with self.record_lock.lock(book_id):
|
||||
try:
|
||||
self.copy_format_to(book_id, fmt, ret)
|
||||
except NoSuchFormat:
|
||||
return None
|
||||
else:
|
||||
with PersistentTemporaryFile(ext) as pt, self.record_lock.lock(book_id):
|
||||
try:
|
||||
self.copy_format_to(book_id, fmt, pt)
|
||||
except NoSuchFormat:
|
||||
return None
|
||||
ret = pt.name
|
||||
elif as_file:
|
||||
ret = SpooledTemporaryFile(SPOOL_SIZE)
|
||||
with self.record_lock.lock(book_id):
|
||||
try:
|
||||
self.copy_format_to(book_id, fmt, ret)
|
||||
except NoSuchFormat:
|
||||
return None
|
||||
ret.seek(0)
|
||||
# Various bits of code try to use the name as the default
|
||||
# title when reading metadata, so set it
|
||||
ret.name = fname
|
||||
else:
|
||||
buf = BytesIO()
|
||||
with self.record_lock.lock(book_id):
|
||||
try:
|
||||
self.copy_format_to(book_id, fmt, buf)
|
||||
except NoSuchFormat:
|
||||
return None
|
||||
|
||||
ret = buf.getvalue()
|
||||
|
||||
return ret
|
||||
|
||||
@read_api
|
||||
def multisort(self, fields, ids_to_sort=None):
|
||||
@ -455,6 +615,37 @@ class Cache(object):
|
||||
return get_categories(self, sort=sort, book_ids=book_ids,
|
||||
icon_map=icon_map)
|
||||
|
||||
@write_api
|
||||
def set_field(self, name, book_id_to_val_map, allow_case_change=True):
|
||||
# TODO: Specialize title/authors to also update path
|
||||
# TODO: Handle updating caches used by composite fields
|
||||
# TODO: Ensure the sort fields are updated for title/author/series?
|
||||
f = self.fields[name]
|
||||
is_series = f.metadata['datatype'] == 'series'
|
||||
|
||||
if is_series:
|
||||
bimap, simap = {}, {}
|
||||
for k, v in book_id_to_val_map.iteritems():
|
||||
if isinstance(v, basestring):
|
||||
v, sid = get_series_values(v)
|
||||
else:
|
||||
v = sid = None
|
||||
if name.startswith('#') and sid is None:
|
||||
sid = 1.0 # The value will be set to 1.0 in the db table
|
||||
bimap[k] = v
|
||||
if sid is not None:
|
||||
simap[k] = sid
|
||||
book_id_to_val_map = bimap
|
||||
|
||||
dirtied = f.writer.set_books(
|
||||
book_id_to_val_map, self.backend, allow_case_change=allow_case_change)
|
||||
|
||||
if is_series and simap:
|
||||
sf = self.fields[f.name+'_index']
|
||||
dirtied |= sf.writer.set_books(simap, self.backend, allow_case_change=False)
|
||||
|
||||
return dirtied
|
||||
|
||||
# }}}
|
||||
|
||||
class SortKey(object):
|
||||
|
@ -12,6 +12,7 @@ from threading import Lock
|
||||
from collections import defaultdict, Counter
|
||||
|
||||
from calibre.db.tables import ONE_ONE, MANY_ONE, MANY_MANY
|
||||
from calibre.db.write import Writer
|
||||
from calibre.ebooks.metadata import title_sort
|
||||
from calibre.utils.config_base import tweaks
|
||||
from calibre.utils.icu import sort_key
|
||||
@ -21,6 +22,7 @@ from calibre.utils.localization import calibre_langcode_to_name
|
||||
class Field(object):
|
||||
|
||||
is_many = False
|
||||
is_many_many = False
|
||||
|
||||
def __init__(self, name, table):
|
||||
self.name, self.table = name, table
|
||||
@ -44,6 +46,8 @@ class Field(object):
|
||||
self.category_formatter = lambda x:'\u2605'*int(x/2)
|
||||
elif name == 'languages':
|
||||
self.category_formatter = calibre_langcode_to_name
|
||||
self.writer = Writer(self)
|
||||
self.series_field = None
|
||||
|
||||
@property
|
||||
def metadata(self):
|
||||
@ -296,6 +300,7 @@ class ManyToOneField(Field):
|
||||
class ManyToManyField(Field):
|
||||
|
||||
is_many = True
|
||||
is_many_many = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Field.__init__(self, *args, **kwargs)
|
||||
|
@ -123,9 +123,8 @@ class ManyToOneTable(Table):
|
||||
|
||||
def read_id_maps(self, db):
|
||||
for row in db.conn.execute('SELECT id, {0} FROM {1}'.format(
|
||||
self.metadata['column'], self.metadata['table'])):
|
||||
if row[1]:
|
||||
self.id_map[row[0]] = self.unserialize(row[1])
|
||||
self.metadata['column'], self.metadata['table'])):
|
||||
self.id_map[row[0]] = self.unserialize(row[1])
|
||||
|
||||
def read_maps(self, db):
|
||||
for row in db.conn.execute(
|
||||
@ -218,3 +217,4 @@ class LanguagesTable(ManyToManyTable):
|
||||
ManyToManyTable.read_id_maps(self, db)
|
||||
lm = lang_map()
|
||||
self.lang_name_map = {x:lm.get(x, x) for x in self.id_map.itervalues()}
|
||||
|
||||
|
@ -7,19 +7,36 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import unittest, os, shutil
|
||||
import unittest, os, shutil, tempfile, atexit
|
||||
from functools import partial
|
||||
from io import BytesIO
|
||||
from future_builtins import map
|
||||
|
||||
rmtree = partial(shutil.rmtree, ignore_errors=True)
|
||||
|
||||
class BaseTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.library_path = self.mkdtemp()
|
||||
self.create_db(self.library_path)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.library_path)
|
||||
|
||||
def create_db(self, library_path):
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
if LibraryDatabase2.exists_at(library_path):
|
||||
raise ValueError('A library already exists at %r'%library_path)
|
||||
src = os.path.join(os.path.dirname(__file__), 'metadata.db')
|
||||
db = os.path.join(library_path, 'metadata.db')
|
||||
shutil.copyfile(src, db)
|
||||
return db
|
||||
dest = os.path.join(library_path, 'metadata.db')
|
||||
shutil.copyfile(src, dest)
|
||||
db = LibraryDatabase2(library_path)
|
||||
db.set_cover(1, I('lt.png', data=True))
|
||||
db.set_cover(2, I('polish.png', data=True))
|
||||
db.add_format(1, 'FMT1', BytesIO(b'book1fmt1'), index_is_id=True)
|
||||
db.add_format(1, 'FMT2', BytesIO(b'book1fmt2'), index_is_id=True)
|
||||
db.add_format(2, 'FMT1', BytesIO(b'book2fmt1'), index_is_id=True)
|
||||
return dest
|
||||
|
||||
def init_cache(self, library_path):
|
||||
from calibre.db.backend import DB
|
||||
@ -29,20 +46,38 @@ class BaseTest(unittest.TestCase):
|
||||
cache.init()
|
||||
return cache
|
||||
|
||||
def mkdtemp(self):
|
||||
ans = tempfile.mkdtemp(prefix='db_test_')
|
||||
atexit.register(rmtree, ans)
|
||||
return ans
|
||||
|
||||
def init_old(self, library_path):
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
return LibraryDatabase2(library_path)
|
||||
|
||||
def clone_library(self, library_path):
|
||||
if not hasattr(self, 'clone_dir'):
|
||||
self.clone_dir = tempfile.mkdtemp()
|
||||
atexit.register(rmtree, self.clone_dir)
|
||||
self.clone_count = 0
|
||||
self.clone_count += 1
|
||||
dest = os.path.join(self.clone_dir, str(self.clone_count))
|
||||
shutil.copytree(library_path, dest)
|
||||
return dest
|
||||
|
||||
def compare_metadata(self, mi1, mi2):
|
||||
allfk1 = mi1.all_field_keys()
|
||||
allfk2 = mi2.all_field_keys()
|
||||
self.assertEqual(allfk1, allfk2)
|
||||
|
||||
all_keys = {'format_metadata', 'id', 'application_id',
|
||||
'author_sort_map', 'author_link_map', 'book_size',
|
||||
'ondevice_col', 'last_modified'}.union(allfk1)
|
||||
'author_sort_map', 'author_link_map', 'book_size',
|
||||
'ondevice_col', 'last_modified', 'has_cover',
|
||||
'cover_data'}.union(allfk1)
|
||||
for attr in all_keys:
|
||||
if attr == 'user_metadata': continue
|
||||
if attr == 'format_metadata': continue # TODO: Not implemented yet
|
||||
attr1, attr2 = getattr(mi1, attr), getattr(mi2, attr)
|
||||
if attr == 'formats':
|
||||
continue # TODO: Not implemented yet
|
||||
attr1, attr2 = map(lambda x:tuple(x) if x else (), (attr1, attr2))
|
||||
self.assertEqual(attr1, attr2,
|
||||
'%s not the same: %r != %r'%(attr, attr1, attr2))
|
||||
|
@ -7,21 +7,13 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import shutil, unittest, tempfile, datetime
|
||||
from cStringIO import StringIO
|
||||
import unittest, datetime
|
||||
|
||||
from calibre.utils.date import utc_tz
|
||||
from calibre.db.tests.base import BaseTest
|
||||
|
||||
class ReadingTest(BaseTest):
|
||||
|
||||
def setUp(self):
|
||||
self.library_path = tempfile.mkdtemp()
|
||||
self.create_db(self.library_path)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.library_path)
|
||||
|
||||
def test_read(self): # {{{
|
||||
'Test the reading of data from the database'
|
||||
cache = self.init_cache(self.library_path)
|
||||
@ -55,7 +47,7 @@ class ReadingTest(BaseTest):
|
||||
'#tags':(),
|
||||
'#yesno':None,
|
||||
'#comments': None,
|
||||
|
||||
'size':None,
|
||||
},
|
||||
|
||||
2 : {
|
||||
@ -66,7 +58,7 @@ class ReadingTest(BaseTest):
|
||||
'series' : 'A Series One',
|
||||
'series_index': 1.0,
|
||||
'tags':('Tag One', 'Tag Two'),
|
||||
'formats': (),
|
||||
'formats': ('FMT1',),
|
||||
'rating': 4.0,
|
||||
'identifiers': {'test':'one'},
|
||||
'timestamp': datetime.datetime(2011, 9, 5, 21, 6,
|
||||
@ -86,6 +78,7 @@ class ReadingTest(BaseTest):
|
||||
'#tags':('My Tag One', 'My Tag Two'),
|
||||
'#yesno':True,
|
||||
'#comments': '<div>My Comments One<p></p></div>',
|
||||
'size':9,
|
||||
},
|
||||
1 : {
|
||||
'title': 'Title Two',
|
||||
@ -96,7 +89,7 @@ class ReadingTest(BaseTest):
|
||||
'series_index': 2.0,
|
||||
'rating': 6.0,
|
||||
'tags': ('Tag One', 'News'),
|
||||
'formats':(),
|
||||
'formats':('FMT1', 'FMT2'),
|
||||
'identifiers': {'test':'two'},
|
||||
'timestamp': datetime.datetime(2011, 9, 6, 6, 0,
|
||||
tzinfo=utc_tz),
|
||||
@ -115,6 +108,7 @@ class ReadingTest(BaseTest):
|
||||
'#tags':('My Tag Two',),
|
||||
'#yesno':False,
|
||||
'#comments': '<div>My Comments Two<p></p></div>',
|
||||
'size':9,
|
||||
|
||||
},
|
||||
}
|
||||
@ -172,22 +166,41 @@ class ReadingTest(BaseTest):
|
||||
'Test get_metadata() returns the same data for both backends'
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
old = LibraryDatabase2(self.library_path)
|
||||
for i in xrange(1, 3):
|
||||
old.add_format(i, 'txt%d'%i, StringIO(b'random%d'%i),
|
||||
index_is_id=True)
|
||||
old.add_format(i, 'text%d'%i, StringIO(b'random%d'%i),
|
||||
index_is_id=True)
|
||||
|
||||
old_metadata = {i:old.get_metadata(i, index_is_id=True) for i in
|
||||
old_metadata = {i:old.get_metadata(
|
||||
i, index_is_id=True, get_cover=True, cover_as_data=True) for i in
|
||||
xrange(1, 4)}
|
||||
for mi in old_metadata.itervalues():
|
||||
mi.format_metadata = dict(mi.format_metadata)
|
||||
if mi.formats:
|
||||
mi.formats = tuple(mi.formats)
|
||||
old = None
|
||||
|
||||
cache = self.init_cache(self.library_path)
|
||||
|
||||
new_metadata = {i:cache.get_metadata(i) for i in xrange(1, 4)}
|
||||
new_metadata = {i:cache.get_metadata(
|
||||
i, get_cover=True, cover_as_data=True) for i in xrange(1, 4)}
|
||||
cache = None
|
||||
for mi2, mi1 in zip(new_metadata.values(), old_metadata.values()):
|
||||
self.compare_metadata(mi1, mi2)
|
||||
# }}}
|
||||
|
||||
def test_get_cover(self): # {{{
|
||||
'Test cover() returns the same data for both backends'
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
old = LibraryDatabase2(self.library_path)
|
||||
covers = {i: old.cover(i, index_is_id=True) for i in old.all_ids()}
|
||||
old = None
|
||||
cache = self.init_cache(self.library_path)
|
||||
for book_id, cdata in covers.iteritems():
|
||||
self.assertEqual(cdata, cache.cover(book_id), 'Reading of cover failed')
|
||||
f = cache.cover(book_id, as_file=True)
|
||||
self.assertEqual(cdata, f.read() if f else f, 'Reading of cover as file failed')
|
||||
if cdata:
|
||||
with open(cache.cover(book_id, as_path=True), 'rb') as f:
|
||||
self.assertEqual(cdata, f.read(), 'Reading of cover as path failed')
|
||||
else:
|
||||
self.assertEqual(cdata, cache.cover(book_id, as_path=True),
|
||||
'Reading of null cover as path failed')
|
||||
|
||||
# }}}
|
||||
|
||||
@ -227,8 +240,12 @@ class ReadingTest(BaseTest):
|
||||
# User categories
|
||||
'@Good Authors:One', '@Good Series.good tags:two',
|
||||
|
||||
# TODO: Tests for searching the size and #formats columns and
|
||||
# cover:true|false
|
||||
# Cover/Formats
|
||||
'cover:true', 'cover:false', 'formats:true', 'formats:false',
|
||||
'formats:#>1', 'formats:#=1', 'formats:=fmt1', 'formats:=fmt2',
|
||||
'formats:=fmt1 or formats:fmt2', '#formats:true', '#formats:false',
|
||||
'#formats:fmt1', '#formats:fmt2', '#formats:fmt1 and #formats:fmt2',
|
||||
|
||||
)}
|
||||
old = None
|
||||
|
||||
@ -262,7 +279,8 @@ class ReadingTest(BaseTest):
|
||||
(category == 'series' and attr == 'sort') or # Sorting is wrong in old
|
||||
(category == 'identifiers' and attr == 'id_set') or
|
||||
(category == '@Good Series') or # Sorting is wrong in old
|
||||
(category == 'news' and attr in {'count', 'id_set'})
|
||||
(category == 'news' and attr in {'count', 'id_set'}) or
|
||||
(category == 'formats' and attr == 'id_set')
|
||||
):
|
||||
continue
|
||||
self.assertEqual(oval, nval,
|
||||
@ -278,6 +296,38 @@ class ReadingTest(BaseTest):
|
||||
|
||||
# }}}
|
||||
|
||||
def test_get_formats(self): # {{{
|
||||
'Test reading ebook formats using the format() method'
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
old = LibraryDatabase2(self.library_path)
|
||||
ids = old.all_ids()
|
||||
lf = {i:set(old.formats(i, index_is_id=True).split(',')) if old.formats(
|
||||
i, index_is_id=True) else set() for i in ids}
|
||||
formats = {i:{f:old.format(i, f, index_is_id=True) for f in fmts} for
|
||||
i, fmts in lf.iteritems()}
|
||||
old = None
|
||||
cache = self.init_cache(self.library_path)
|
||||
for book_id, fmts in lf.iteritems():
|
||||
self.assertEqual(fmts, set(cache.formats(book_id)),
|
||||
'Set of formats is not the same')
|
||||
for fmt in fmts:
|
||||
old = formats[book_id][fmt]
|
||||
self.assertEqual(old, cache.format(book_id, fmt),
|
||||
'Old and new format disagree')
|
||||
f = cache.format(book_id, fmt, as_file=True)
|
||||
self.assertEqual(old, f.read(),
|
||||
'Failed to read format as file')
|
||||
with open(cache.format(book_id, fmt, as_path=True,
|
||||
preserve_filename=True), 'rb') as f:
|
||||
self.assertEqual(old, f.read(),
|
||||
'Failed to read format as path')
|
||||
with open(cache.format(book_id, fmt, as_path=True), 'rb') as f:
|
||||
self.assertEqual(old, f.read(),
|
||||
'Failed to read format as path')
|
||||
|
||||
|
||||
# }}}
|
||||
|
||||
def tests():
|
||||
return unittest.TestLoader().loadTestsFromTestCase(ReadingTest)
|
||||
|
||||
|
221
src/calibre/db/tests/writing.py
Normal file
221
src/calibre/db/tests/writing.py
Normal file
@ -0,0 +1,221 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import unittest
|
||||
from collections import namedtuple
|
||||
from functools import partial
|
||||
|
||||
from calibre.utils.date import UNDEFINED_DATE
|
||||
from calibre.db.tests.base import BaseTest
|
||||
|
||||
class WritingTest(BaseTest):
|
||||
|
||||
@property
|
||||
def cloned_library(self):
|
||||
return self.clone_library(self.library_path)
|
||||
|
||||
def create_getter(self, name, getter=None):
|
||||
if getter is None:
|
||||
if name.endswith('_index'):
|
||||
ans = lambda db:partial(db.get_custom_extra, index_is_id=True,
|
||||
label=name[1:].replace('_index', ''))
|
||||
else:
|
||||
ans = lambda db:partial(db.get_custom, label=name[1:],
|
||||
index_is_id=True)
|
||||
else:
|
||||
ans = lambda db:partial(getattr(db, getter), index_is_id=True)
|
||||
return ans
|
||||
|
||||
def create_setter(self, name, setter=None):
|
||||
if setter is None:
|
||||
ans = lambda db:partial(db.set_custom, label=name[1:], commit=True)
|
||||
else:
|
||||
ans = lambda db:partial(getattr(db, setter), commit=True)
|
||||
return ans
|
||||
|
||||
def create_test(self, name, vals, getter=None, setter=None ):
|
||||
T = namedtuple('Test', 'name vals getter setter')
|
||||
return T(name, vals, self.create_getter(name, getter),
|
||||
self.create_setter(name, setter))
|
||||
|
||||
def run_tests(self, tests):
|
||||
results = {}
|
||||
for test in tests:
|
||||
results[test] = []
|
||||
for val in test.vals:
|
||||
cl = self.cloned_library
|
||||
cache = self.init_cache(cl)
|
||||
cache.set_field(test.name, {1: val})
|
||||
cached_res = cache.field_for(test.name, 1)
|
||||
del cache
|
||||
db = self.init_old(cl)
|
||||
getter = test.getter(db)
|
||||
sqlite_res = getter(1)
|
||||
if test.name.endswith('_index'):
|
||||
val = float(val) if val is not None else 1.0
|
||||
self.assertEqual(sqlite_res, val,
|
||||
'Failed setting for %s with value %r, sqlite value not the same. val: %r != sqlite_val: %r'%(
|
||||
test.name, val, val, sqlite_res))
|
||||
else:
|
||||
test.setter(db)(1, val)
|
||||
old_cached_res = getter(1)
|
||||
self.assertEqual(old_cached_res, cached_res,
|
||||
'Failed setting for %s with value %r, cached value not the same. Old: %r != New: %r'%(
|
||||
test.name, val, old_cached_res, cached_res))
|
||||
db.refresh()
|
||||
old_sqlite_res = getter(1)
|
||||
self.assertEqual(old_sqlite_res, sqlite_res,
|
||||
'Failed setting for %s, sqlite value not the same: %r != %r'%(
|
||||
test.name, old_sqlite_res, sqlite_res))
|
||||
del db
|
||||
|
||||
def test_one_one(self): # {{{
|
||||
'Test setting of values in one-one fields'
|
||||
tests = [self.create_test('#yesno', (True, False, 'true', 'false', None))]
|
||||
for name, getter, setter in (
|
||||
('#series_index', None, None),
|
||||
('series_index', 'series_index', 'set_series_index'),
|
||||
('#float', None, None),
|
||||
):
|
||||
vals = ['1.5', None, 0, 1.0]
|
||||
tests.append(self.create_test(name, tuple(vals), getter, setter))
|
||||
|
||||
for name, getter, setter in (
|
||||
('pubdate', 'pubdate', 'set_pubdate'),
|
||||
('timestamp', 'timestamp', 'set_timestamp'),
|
||||
('#date', None, None),
|
||||
):
|
||||
tests.append(self.create_test(
|
||||
name, ('2011-1-12', UNDEFINED_DATE, None), getter, setter))
|
||||
|
||||
for name, getter, setter in (
|
||||
('title', 'title', 'set_title'),
|
||||
('uuid', 'uuid', 'set_uuid'),
|
||||
('author_sort', 'author_sort', 'set_author_sort'),
|
||||
('sort', 'title_sort', 'set_title_sort'),
|
||||
('#comments', None, None),
|
||||
('comments', 'comments', 'set_comment'),
|
||||
):
|
||||
vals = ['something', None]
|
||||
if name not in {'comments', '#comments'}:
|
||||
# Setting text column to '' returns None in the new backend
|
||||
# and '' in the old. I think None is more correct.
|
||||
vals.append('')
|
||||
if name == 'comments':
|
||||
# Again new behavior of deleting comment rather than setting
|
||||
# empty string is more correct.
|
||||
vals.remove(None)
|
||||
tests.append(self.create_test(name, tuple(vals), getter, setter))
|
||||
|
||||
self.run_tests(tests)
|
||||
# }}}
|
||||
|
||||
def test_many_one_basic(self): # {{{
|
||||
'Test the different code paths for writing to a many-one field'
|
||||
cl = self.cloned_library
|
||||
cache = self.init_cache(cl)
|
||||
f = cache.fields['publisher']
|
||||
item_ids = {f.ids_for_book(1)[0], f.ids_for_book(2)[0]}
|
||||
val = 'Changed'
|
||||
self.assertEqual(cache.set_field('publisher', {1:val, 2:val}), {1, 2})
|
||||
cache2 = self.init_cache(cl)
|
||||
for book_id in (1, 2):
|
||||
for c in (cache, cache2):
|
||||
self.assertEqual(c.field_for('publisher', book_id), val)
|
||||
self.assertFalse(item_ids.intersection(set(c.fields['publisher'].table.id_map)))
|
||||
del cache2
|
||||
self.assertFalse(cache.set_field('publisher', {1:val, 2:val}))
|
||||
val = val.lower()
|
||||
self.assertFalse(cache.set_field('publisher', {1:val, 2:val},
|
||||
allow_case_change=False))
|
||||
self.assertEqual(cache.set_field('publisher', {1:val, 2:val}), {1, 2})
|
||||
cache2 = self.init_cache(cl)
|
||||
for book_id in (1, 2):
|
||||
for c in (cache, cache2):
|
||||
self.assertEqual(c.field_for('publisher', book_id), val)
|
||||
del cache2
|
||||
self.assertEqual(cache.set_field('publisher', {1:'new', 2:'New'}), {1, 2})
|
||||
self.assertEqual(cache.field_for('publisher', 1).lower(), 'new')
|
||||
self.assertEqual(cache.field_for('publisher', 2).lower(), 'new')
|
||||
self.assertEqual(cache.set_field('publisher', {1:None, 2:'NEW'}), {1, 2})
|
||||
self.assertEqual(len(f.table.id_map), 1)
|
||||
self.assertEqual(cache.set_field('publisher', {2:None}), {2})
|
||||
self.assertEqual(len(f.table.id_map), 0)
|
||||
cache2 = self.init_cache(cl)
|
||||
self.assertEqual(len(cache2.fields['publisher'].table.id_map), 0)
|
||||
del cache2
|
||||
self.assertEqual(cache.set_field('publisher', {1:'one', 2:'two',
|
||||
3:'three'}), {1, 2, 3})
|
||||
self.assertEqual(cache.set_field('publisher', {1:''}), set([1]))
|
||||
self.assertEqual(cache.set_field('publisher', {1:'two'}), set([1]))
|
||||
self.assertEqual(tuple(map(f.for_book, (1,2,3))), ('two', 'two', 'three'))
|
||||
self.assertEqual(cache.set_field('publisher', {1:'Two'}), {1, 2})
|
||||
cache2 = self.init_cache(cl)
|
||||
self.assertEqual(tuple(map(f.for_book, (1,2,3))), ('Two', 'Two', 'three'))
|
||||
del cache2
|
||||
|
||||
# Enum
|
||||
self.assertFalse(cache.set_field('#enum', {1:'Not allowed'}))
|
||||
self.assertEqual(cache.set_field('#enum', {1:'One', 2:'One', 3:'Three'}), {1, 3})
|
||||
self.assertEqual(cache.set_field('#enum', {1:None}), set([1]))
|
||||
cache2 = self.init_cache(cl)
|
||||
for c in (cache, cache2):
|
||||
for i, val in {1:None, 2:'One', 3:'Three'}.iteritems():
|
||||
self.assertEqual(c.field_for('#enum', i), val)
|
||||
del cache2
|
||||
|
||||
# Rating
|
||||
self.assertFalse(cache.set_field('rating', {1:6, 2:4}))
|
||||
self.assertEqual(cache.set_field('rating', {1:0, 3:2}), {1, 3})
|
||||
self.assertEqual(cache.set_field('#rating', {1:None, 2:4, 3:8}), {1, 2, 3})
|
||||
cache2 = self.init_cache(cl)
|
||||
for c in (cache, cache2):
|
||||
for i, val in {1:None, 2:4, 3:2}.iteritems():
|
||||
self.assertEqual(c.field_for('rating', i), val)
|
||||
for i, val in {1:None, 2:4, 3:8}.iteritems():
|
||||
self.assertEqual(c.field_for('#rating', i), val)
|
||||
del cache2
|
||||
|
||||
# Series
|
||||
self.assertFalse(cache.set_field('series',
|
||||
{1:'a series one', 2:'a series one'}, allow_case_change=False))
|
||||
self.assertEqual(cache.set_field('series', {3:'Series [3]'}), set([3]))
|
||||
self.assertEqual(cache.set_field('#series', {1:'Series', 3:'Series'}),
|
||||
{1, 3})
|
||||
self.assertEqual(cache.set_field('#series', {2:'Series [0]'}), set([2]))
|
||||
cache2 = self.init_cache(cl)
|
||||
for c in (cache, cache2):
|
||||
for i, val in {1:'A Series One', 2:'A Series One', 3:'Series'}.iteritems():
|
||||
self.assertEqual(c.field_for('series', i), val)
|
||||
for i in (1, 2, 3):
|
||||
self.assertEqual(c.field_for('#series', i), 'Series')
|
||||
for i, val in {1:2, 2:1, 3:3}.iteritems():
|
||||
self.assertEqual(c.field_for('series_index', i), val)
|
||||
for i, val in {1:1, 2:0, 3:1}.iteritems():
|
||||
self.assertEqual(c.field_for('#series_index', i), val)
|
||||
del cache2
|
||||
|
||||
# }}}
|
||||
|
||||
|
||||
def test_many_many_basic(self): # {{{
|
||||
'Test the different code paths for writing to a many-one field'
|
||||
# Fields: identifiers, authors, tags, languages, #authors, #tags
|
||||
# }}}
|
||||
|
||||
def tests():
|
||||
return unittest.TestLoader().loadTestsFromTestCase(WritingTest)
|
||||
|
||||
def run():
|
||||
unittest.TextTestRunner(verbosity=2).run(tests())
|
||||
|
||||
if __name__ == '__main__':
|
||||
run()
|
||||
|
||||
|
401
src/calibre/db/write.py
Normal file
401
src/calibre/db/write.py
Normal file
@ -0,0 +1,401 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
from functools import partial
|
||||
from datetime import datetime
|
||||
|
||||
from calibre.constants import preferred_encoding, ispy3
|
||||
from calibre.utils.date import (parse_only_date, parse_date, UNDEFINED_DATE,
|
||||
isoformat)
|
||||
if ispy3:
|
||||
unicode = str
|
||||
|
||||
# Convert data into values suitable for the db {{{
|
||||
|
||||
def sqlite_datetime(x):
|
||||
return isoformat(x, sep=' ') if isinstance(x, datetime) else x
|
||||
|
||||
def single_text(x):
|
||||
if x is None:
|
||||
return x
|
||||
if not isinstance(x, unicode):
|
||||
x = x.decode(preferred_encoding, 'replace')
|
||||
x = x.strip()
|
||||
return x if x else None
|
||||
|
||||
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
|
||||
|
||||
def get_series_values(val):
|
||||
if not val:
|
||||
return (val, None)
|
||||
match = series_index_pat.match(val.strip())
|
||||
if match is not None:
|
||||
idx = match.group(2)
|
||||
try:
|
||||
idx = float(idx)
|
||||
return (match.group(1).strip(), idx)
|
||||
except:
|
||||
pass
|
||||
return (val, None)
|
||||
|
||||
def multiple_text(sep, ui_sep, x):
|
||||
if not x:
|
||||
return ()
|
||||
if isinstance(x, bytes):
|
||||
x = x.decode(preferred_encoding, 'replce')
|
||||
if isinstance(x, unicode):
|
||||
x = x.split(sep)
|
||||
else:
|
||||
x = (y.decode(preferred_encoding, 'replace') if isinstance(y, bytes)
|
||||
else y for y in x)
|
||||
ui_sep = ui_sep.strip()
|
||||
repsep = ',' if ui_sep == ';' else ';'
|
||||
x = (y.strip().replace(ui_sep, repsep) for y in x if y.strip())
|
||||
return tuple(' '.join(y.split()) for y in x if y)
|
||||
|
||||
def adapt_datetime(x):
|
||||
if isinstance(x, (unicode, bytes)):
|
||||
x = parse_date(x, assume_utc=False, as_utc=False)
|
||||
return x
|
||||
|
||||
def adapt_date(x):
|
||||
if isinstance(x, (unicode, bytes)):
|
||||
x = parse_only_date(x)
|
||||
if x is None:
|
||||
x = UNDEFINED_DATE
|
||||
return x
|
||||
|
||||
def adapt_number(typ, x):
|
||||
if x is None:
|
||||
return None
|
||||
if isinstance(x, (unicode, bytes)):
|
||||
if x.lower() == 'none':
|
||||
return None
|
||||
return typ(x)
|
||||
|
||||
def adapt_bool(x):
|
||||
if isinstance(x, (unicode, bytes)):
|
||||
x = x.lower()
|
||||
if x == 'true':
|
||||
x = True
|
||||
elif x == 'false':
|
||||
x = False
|
||||
elif x == 'none':
|
||||
x = None
|
||||
else:
|
||||
x = bool(int(x))
|
||||
return x if x is None else bool(x)
|
||||
|
||||
def get_adapter(name, metadata):
|
||||
dt = metadata['datatype']
|
||||
if dt == 'text':
|
||||
if metadata['is_multiple']:
|
||||
m = metadata['is_multiple']
|
||||
ans = partial(multiple_text, m['ui_to_list'], m['list_to_ui'])
|
||||
else:
|
||||
ans = single_text
|
||||
elif dt == 'series':
|
||||
ans = single_text
|
||||
elif dt == 'datetime':
|
||||
ans = adapt_date if name == 'pubdate' else adapt_datetime
|
||||
elif dt == 'int':
|
||||
ans = partial(adapt_number, int)
|
||||
elif dt == 'float':
|
||||
ans = partial(adapt_number, float)
|
||||
elif dt == 'bool':
|
||||
ans = adapt_bool
|
||||
elif dt == 'comments':
|
||||
ans = single_text
|
||||
elif dt == 'rating':
|
||||
ans = lambda x: None if x in {None, 0} else min(10., max(0., adapt_number(float, x)))
|
||||
elif dt == 'enumeration':
|
||||
ans = single_text
|
||||
elif dt == 'composite':
|
||||
ans = lambda x: x
|
||||
|
||||
if name == 'title':
|
||||
return lambda x: ans(x) or _('Unknown')
|
||||
if name == 'author_sort':
|
||||
return lambda x: ans(x) or ''
|
||||
if name == 'authors':
|
||||
return lambda x: ans(x) or (_('Unknown'),)
|
||||
if name in {'timestamp', 'last_modified'}:
|
||||
return lambda x: ans(x) or UNDEFINED_DATE
|
||||
if name == 'series_index':
|
||||
return lambda x: 1.0 if ans(x) is None else ans(x)
|
||||
|
||||
return ans
|
||||
# }}}
|
||||
|
||||
# One-One fields {{{
|
||||
def one_one_in_books(book_id_val_map, db, field, *args):
|
||||
'Set a one-one field in the books table'
|
||||
if book_id_val_map:
|
||||
sequence = ((sqlite_datetime(v), k) for k, v in book_id_val_map.iteritems())
|
||||
db.conn.executemany(
|
||||
'UPDATE books SET %s=? WHERE id=?'%field.metadata['column'], sequence)
|
||||
field.table.book_col_map.update(book_id_val_map)
|
||||
return set(book_id_val_map)
|
||||
|
||||
def one_one_in_other(book_id_val_map, db, field, *args):
|
||||
'Set a one-one field in the non-books table, like comments'
|
||||
deleted = tuple((k,) for k, v in book_id_val_map.iteritems() if v is None)
|
||||
if deleted:
|
||||
db.conn.executemany('DELETE FROM %s WHERE book=?'%field.metadata['table'],
|
||||
deleted)
|
||||
for book_id in deleted:
|
||||
field.table.book_col_map.pop(book_id[0], None)
|
||||
updated = {k:v for k, v in book_id_val_map.iteritems() if v is not None}
|
||||
if updated:
|
||||
db.conn.executemany('INSERT OR REPLACE INTO %s(book,%s) VALUES (?,?)'%(
|
||||
field.metadata['table'], field.metadata['column']),
|
||||
((k, sqlite_datetime(v)) for k, v in updated.iteritems()))
|
||||
field.table.book_col_map.update(updated)
|
||||
return set(book_id_val_map)
|
||||
|
||||
def custom_series_index(book_id_val_map, db, field, *args):
|
||||
series_field = field.series_field
|
||||
sequence = []
|
||||
for book_id, sidx in book_id_val_map.iteritems():
|
||||
if sidx is None:
|
||||
sidx = 1.0
|
||||
ids = series_field.ids_for_book(book_id)
|
||||
if ids:
|
||||
sequence.append((sidx, book_id, ids[0]))
|
||||
field.table.book_col_map[book_id] = sidx
|
||||
if sequence:
|
||||
db.conn.executemany('UPDATE %s SET %s=? WHERE book=? AND value=?'%(
|
||||
field.metadata['table'], field.metadata['column']), sequence)
|
||||
return {s[1] for s in sequence}
|
||||
# }}}
|
||||
|
||||
# Many-One fields {{{
|
||||
|
||||
def safe_lower(x):
|
||||
try:
|
||||
return icu_lower(x)
|
||||
except (TypeError, ValueError, KeyError, AttributeError):
|
||||
return x
|
||||
|
||||
def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
|
||||
case_changes, val_map, sql_val_map=lambda x:x):
|
||||
''' Get the db id for the value val. If val does not exist in the db it is
|
||||
inserted into the db. '''
|
||||
kval = kmap(val)
|
||||
item_id = rid_map.get(kval, None)
|
||||
if item_id is None:
|
||||
db.conn.execute('INSERT INTO %s(%s) VALUES (?)'%(
|
||||
m['table'], m['column']), (sql_val_map(val),))
|
||||
item_id = rid_map[kval] = db.conn.last_insert_rowid()
|
||||
table.id_map[item_id] = val
|
||||
table.col_book_map[item_id] = set()
|
||||
elif allow_case_change and val != table.id_map[item_id]:
|
||||
case_changes[item_id] = val
|
||||
val_map[val] = item_id
|
||||
|
||||
def change_case(case_changes, dirtied, db, table, m, sql_val_map=lambda x:x):
|
||||
db.conn.executemany(
|
||||
'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']),
|
||||
((sql_val_map(val), item_id) for item_id, val in case_changes.iteritems()))
|
||||
for item_id, val in case_changes.iteritems():
|
||||
table.id_map[item_id] = val
|
||||
dirtied.update(table.col_book_map[item_id])
|
||||
|
||||
def many_one(book_id_val_map, db, field, allow_case_change, *args):
|
||||
dirtied = set()
|
||||
m = field.metadata
|
||||
table = field.table
|
||||
dt = m['datatype']
|
||||
is_custom_series = dt == 'series' and table.name.startswith('#')
|
||||
|
||||
# Map values to db ids, including any new values
|
||||
kmap = safe_lower if dt in {'text', 'series'} else lambda x:x
|
||||
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
|
||||
val_map = {None:None}
|
||||
case_changes = {}
|
||||
for val in book_id_val_map.itervalues():
|
||||
if val is not None:
|
||||
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
|
||||
case_changes, val_map)
|
||||
|
||||
if case_changes:
|
||||
change_case(case_changes, dirtied, db, table, m)
|
||||
|
||||
book_id_item_id_map = {k:val_map[v] for k, v in book_id_val_map.iteritems()}
|
||||
|
||||
# Ignore those items whose value is the same as the current value
|
||||
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
|
||||
if v != table.book_col_map.get(k, None)}
|
||||
dirtied |= set(book_id_item_id_map)
|
||||
|
||||
# Update the book->col and col->book maps
|
||||
deleted = set()
|
||||
updated = {}
|
||||
for book_id, item_id in book_id_item_id_map.iteritems():
|
||||
old_item_id = table.book_col_map.get(book_id, None)
|
||||
if old_item_id is not None:
|
||||
table.col_book_map[old_item_id].discard(book_id)
|
||||
if item_id is None:
|
||||
table.book_col_map.pop(book_id, None)
|
||||
deleted.add(book_id)
|
||||
else:
|
||||
table.book_col_map[book_id] = item_id
|
||||
table.col_book_map[item_id].add(book_id)
|
||||
updated[book_id] = item_id
|
||||
|
||||
# Update the db link table
|
||||
if deleted:
|
||||
db.conn.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
|
||||
((k,) for k in deleted))
|
||||
if updated:
|
||||
sql = (
|
||||
'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1},extra) VALUES(?, ?, 1.0)'
|
||||
if is_custom_series else
|
||||
'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1}) VALUES(?, ?)'
|
||||
)
|
||||
db.conn.executemany(sql.format(table.link_table, m['link_column']),
|
||||
((book_id, book_id, item_id) for book_id, item_id in
|
||||
updated.iteritems()))
|
||||
|
||||
# Remove no longer used items
|
||||
remove = {item_id for item_id in table.id_map if not
|
||||
table.col_book_map.get(item_id, False)}
|
||||
if remove:
|
||||
db.conn.executemany('DELETE FROM %s WHERE id=?'%m['table'],
|
||||
((item_id,) for item_id in remove))
|
||||
for item_id in remove:
|
||||
del table.id_map[item_id]
|
||||
table.col_book_map.pop(item_id, None)
|
||||
|
||||
return dirtied
|
||||
# }}}
|
||||
|
||||
# Many-Many fields {{{
|
||||
def many_many(book_id_val_map, db, field, allow_case_change, *args):
|
||||
dirtied = set()
|
||||
m = field.metadata
|
||||
table = field.table
|
||||
dt = m['datatype']
|
||||
is_authors = field.name == 'authors'
|
||||
|
||||
# Map values to db ids, including any new values
|
||||
kmap = safe_lower if dt == 'text' else lambda x:x
|
||||
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
|
||||
sql_val_map = (lambda x:x.replace(',', '|')) if is_authors else lambda x:x
|
||||
val_map = {}
|
||||
case_changes = {}
|
||||
for vals in book_id_val_map.itervalues():
|
||||
for val in vals:
|
||||
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
|
||||
case_changes, val_map, sql_val_map=sql_val_map)
|
||||
|
||||
if case_changes:
|
||||
change_case(case_changes, dirtied, db, table, m,
|
||||
sql_val_map=sql_val_map)
|
||||
|
||||
book_id_item_id_map = {k:tuple(val_map[v] for v in vals)
|
||||
for k, vals in book_id_val_map.iteritems()}
|
||||
|
||||
# Ignore those items whose value is the same as the current value
|
||||
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
|
||||
if v != table.book_col_map.get(k, None)}
|
||||
dirtied |= set(book_id_item_id_map)
|
||||
|
||||
# Update the book->col and col->book maps
|
||||
deleted = set()
|
||||
updated = {}
|
||||
for book_id, item_ids in book_id_item_id_map.iteritems():
|
||||
old_item_ids = table.book_col_map.get(book_id, None)
|
||||
if old_item_ids:
|
||||
for old_item_id in old_item_ids:
|
||||
table.col_book_map[old_item_id].discard(book_id)
|
||||
if item_ids:
|
||||
table.book_col_map[book_id] = item_ids
|
||||
for item_id in item_ids:
|
||||
table.col_book_map[item_id].add(book_id)
|
||||
updated[book_id] = item_ids
|
||||
else:
|
||||
table.book_col_map.pop(book_id, None)
|
||||
deleted.add(book_id)
|
||||
|
||||
# Update the db link table
|
||||
if deleted:
|
||||
db.conn.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
|
||||
((k,) for k in deleted))
|
||||
if updated:
|
||||
vals = (
|
||||
(book_id, val) for book_id, vals in updated.iteritems()
|
||||
for val in vals
|
||||
)
|
||||
db.conn.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
|
||||
((k,) for k in updated))
|
||||
db.conn.executemany('INSERT INTO {0}(book,{1}) VALUES(?, ?)'.format(
|
||||
table.link_table, m['link_column']), vals)
|
||||
|
||||
# Remove no longer used items
|
||||
remove = {item_id for item_id in table.id_map if not
|
||||
table.col_book_map.get(item_id, False)}
|
||||
if remove:
|
||||
db.conn.executemany('DELETE FROM %s WHERE id=?'%m['table'],
|
||||
((item_id,) for item_id in remove))
|
||||
for item_id in remove:
|
||||
del table.id_map[item_id]
|
||||
table.col_book_map.pop(item_id, None)
|
||||
|
||||
return dirtied
|
||||
|
||||
# }}}
|
||||
|
||||
def dummy(book_id_val_map, *args):
|
||||
return set()
|
||||
|
||||
class Writer(object):
|
||||
|
||||
def __init__(self, field):
|
||||
self.adapter = get_adapter(field.name, field.metadata)
|
||||
self.name = field.name
|
||||
self.field = field
|
||||
dt = field.metadata['datatype']
|
||||
self.accept_vals = lambda x: True
|
||||
if dt == 'composite' or field.name in {
|
||||
'id', 'cover', 'size', 'path', 'formats', 'news'}:
|
||||
self.set_books_func = dummy
|
||||
elif self.name[0] == '#' and self.name.endswith('_index'):
|
||||
self.set_books_func = custom_series_index
|
||||
elif field.is_many_many:
|
||||
self.set_books_func = many_many
|
||||
elif field.is_many:
|
||||
self.set_books_func = (self.set_books_for_enum if dt ==
|
||||
'enumeration' else many_one)
|
||||
else:
|
||||
self.set_books_func = (one_one_in_books if field.metadata['table']
|
||||
== 'books' else one_one_in_other)
|
||||
if self.name in {'timestamp', 'uuid', 'sort'}:
|
||||
self.accept_vals = bool
|
||||
|
||||
def set_books(self, book_id_val_map, db, allow_case_change=True):
|
||||
book_id_val_map = {k:self.adapter(v) for k, v in
|
||||
book_id_val_map.iteritems() if self.accept_vals(v)}
|
||||
if not book_id_val_map:
|
||||
return set()
|
||||
dirtied = self.set_books_func(book_id_val_map, db, self.field,
|
||||
allow_case_change)
|
||||
return dirtied
|
||||
|
||||
def set_books_for_enum(self, book_id_val_map, db, field,
|
||||
allow_case_change):
|
||||
allowed = set(field.metadata['display']['enum_values'])
|
||||
book_id_val_map = {k:v for k, v in book_id_val_map.iteritems() if v is
|
||||
None or v in allowed}
|
||||
if not book_id_val_map:
|
||||
return set()
|
||||
return many_one(book_id_val_map, db, field, False)
|
||||
|
||||
|
@ -23,12 +23,11 @@ It also contains interfaces to various bits of calibre that do not have
|
||||
dedicated command line tools, such as font subsetting, tweaking ebooks and so
|
||||
on.
|
||||
''')
|
||||
parser.add_option('-c', '--command', help='Run python code.', default=None)
|
||||
parser.add_option('-e', '--exec-file', default=None, help='Run the python code in file.')
|
||||
parser.add_option('-f', '--subset-font', default=False,
|
||||
action='store_true', help='Subset the specified font')
|
||||
parser.add_option('-c', '--command', help='Run python code.')
|
||||
parser.add_option('-e', '--exec-file', help='Run the python code in file.')
|
||||
parser.add_option('-f', '--subset-font', help='Subset the specified font')
|
||||
parser.add_option('-d', '--debug-device-driver', default=False, action='store_true',
|
||||
help='Debug the specified device driver.')
|
||||
help='Debug device detection')
|
||||
parser.add_option('-g', '--gui', default=False, action='store_true',
|
||||
help='Run the GUI with debugging enabled. Debug output is '
|
||||
'printed to stdout and stderr.')
|
||||
@ -59,7 +58,7 @@ on.
|
||||
parser.add_option('-m', '--inspect-mobi', action='store_true',
|
||||
default=False,
|
||||
help='Inspect the MOBI file(s) at the specified path(s)')
|
||||
parser.add_option('--tweak-book', default=None,
|
||||
parser.add_option('-t', '--tweak-book', default=None,
|
||||
help='Tweak the book (exports the book as a collection of HTML '
|
||||
'files and metadata, which you can edit using standard HTML '
|
||||
'editing tools, and then rebuilds the file from the edited HTML. '
|
||||
@ -174,30 +173,24 @@ def run_debug_gui(logpath):
|
||||
from calibre.gui2.main import main
|
||||
main(['__CALIBRE_GUI_DEBUG__', logpath])
|
||||
|
||||
def run_script(path, args):
|
||||
# Load all user defined plugins so the script can import from the
|
||||
# calibre_plugins namespace
|
||||
import calibre.customize.ui as dummy
|
||||
dummy
|
||||
|
||||
sys.argv = [path] + args
|
||||
ef = os.path.abspath(path)
|
||||
base = os.path.dirname(ef)
|
||||
sys.path.insert(0, base)
|
||||
g = globals()
|
||||
g['__name__'] = '__main__'
|
||||
g['__file__'] = ef
|
||||
execfile(ef, g)
|
||||
|
||||
def main(args=sys.argv):
|
||||
from calibre.constants import debug
|
||||
debug()
|
||||
if len(args) > 2 and args[1] in ('-e', '--exec-file'):
|
||||
|
||||
# Load all plugins user defined plugins so the script can import from the
|
||||
# calibre_plugins namespace
|
||||
import calibre.customize.ui as dummy
|
||||
dummy
|
||||
|
||||
sys.argv = [args[2]] + args[3:]
|
||||
ef = os.path.abspath(args[2])
|
||||
base = os.path.dirname(ef)
|
||||
sys.path.insert(0, base)
|
||||
g = globals()
|
||||
g['__name__'] = '__main__'
|
||||
g['__file__'] = ef
|
||||
execfile(ef, g)
|
||||
return
|
||||
|
||||
if len(args) > 1 and args[1] in ('-f', '--subset-font'):
|
||||
from calibre.utils.fonts.sfnt.subset import main
|
||||
main(['subset-font']+args[2:])
|
||||
return
|
||||
|
||||
opts, args = option_parser().parse_args(args)
|
||||
if opts.gui:
|
||||
@ -258,6 +251,13 @@ def main(args=sys.argv):
|
||||
elif opts.shutdown_running_calibre:
|
||||
from calibre.gui2.main import shutdown_other
|
||||
shutdown_other()
|
||||
elif opts.subset_font:
|
||||
from calibre.utils.fonts.sfnt.subset import main
|
||||
main(['subset-font']+[opts.subset_font]+args[1:])
|
||||
elif opts.exec_file:
|
||||
run_script(opts.exec_file, args[1:])
|
||||
elif len(args) >= 2 and args[1].rpartition('.')[-1] in {'py', 'recipe'}:
|
||||
run_script(args[1], args[2:])
|
||||
else:
|
||||
from calibre import ipython
|
||||
ipython()
|
||||
|
@ -22,13 +22,14 @@ class IRIVER_STORY(USBMS):
|
||||
FORMATS = ['epub', 'fb2', 'pdf', 'djvu', 'txt']
|
||||
|
||||
VENDOR_ID = [0x1006]
|
||||
PRODUCT_ID = [0x4023, 0x4024, 0x4025, 0x4034]
|
||||
BCD = [0x0323, 0x0326]
|
||||
PRODUCT_ID = [0x4023, 0x4024, 0x4025, 0x4034, 0x4037]
|
||||
BCD = [0x0323, 0x0326, 0x226]
|
||||
|
||||
VENDOR_NAME = 'IRIVER'
|
||||
WINDOWS_MAIN_MEM = ['STORY', 'STORY_EB05', 'STORY_WI-FI', 'STORY_EB07']
|
||||
WINDOWS_MAIN_MEM = ['STORY', 'STORY_EB05', 'STORY_WI-FI', 'STORY_EB07',
|
||||
'STORY_EB12']
|
||||
WINDOWS_MAIN_MEM = re.compile(r'(%s)&'%('|'.join(WINDOWS_MAIN_MEM)))
|
||||
WINDOWS_CARD_A_MEM = ['STORY', 'STORY_SD']
|
||||
WINDOWS_CARD_A_MEM = ['STORY', 'STORY_SD', 'STORY_EB12_SD']
|
||||
WINDOWS_CARD_A_MEM = re.compile(r'(%s)&'%('|'.join(WINDOWS_CARD_A_MEM)))
|
||||
|
||||
#OSX_MAIN_MEM = 'Kindle Internal Storage Media'
|
||||
|
@ -6,7 +6,7 @@ import os, time, sys
|
||||
|
||||
from calibre.constants import preferred_encoding, DEBUG
|
||||
from calibre import isbytestring, force_unicode
|
||||
from calibre.utils.icu import strcmp
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
from calibre.devices.usbms.books import Book as Book_
|
||||
from calibre.devices.usbms.books import CollectionsBookList
|
||||
@ -239,9 +239,8 @@ class KTCollectionsBookList(CollectionsBookList):
|
||||
if y is None:
|
||||
return -1
|
||||
if isinstance(x, basestring) and isinstance(y, basestring):
|
||||
c = strcmp(force_unicode(x), force_unicode(y))
|
||||
else:
|
||||
c = cmp(x, y)
|
||||
x, y = sort_key(force_unicode(x)), sort_key(force_unicode(y))
|
||||
c = cmp(x, y)
|
||||
if c != 0:
|
||||
return c
|
||||
# same as above -- no sort_key needed here
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import division
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010-2012, Timothy Legge <timlegge@gmail.com>, Kovid Goyal <kovid@kovidgoyal.net> and David Forrester <davidfor@internode.on.net>'
|
||||
@ -13,6 +14,7 @@ Extended to support Touch firmware 2.0.0 and later and newer devices by David Fo
|
||||
'''
|
||||
|
||||
import os, time
|
||||
|
||||
from contextlib import closing
|
||||
from calibre.devices.usbms.books import BookList
|
||||
from calibre.devices.usbms.books import CollectionsBookList
|
||||
@ -33,7 +35,7 @@ class KOBO(USBMS):
|
||||
gui_name = 'Kobo Reader'
|
||||
description = _('Communicate with the Kobo Reader')
|
||||
author = 'Timothy Legge and David Forrester'
|
||||
version = (2, 0, 5)
|
||||
version = (2, 0, 6)
|
||||
|
||||
dbversion = 0
|
||||
fwversion = 0
|
||||
@ -1196,10 +1198,11 @@ class KOBO(USBMS):
|
||||
|
||||
|
||||
class KOBOTOUCH(KOBO):
|
||||
name = 'KoboTouch'
|
||||
gui_name = 'Kobo Touch'
|
||||
author = 'David Forrester'
|
||||
name = 'KoboTouch'
|
||||
gui_name = 'Kobo Touch'
|
||||
author = 'David Forrester'
|
||||
description = 'Communicate with the Kobo Touch, Glo and Mini firmware. Based on the existing Kobo driver by %s.' % (KOBO.author)
|
||||
# icon = I('devices/kobotouch.jpg')
|
||||
|
||||
supported_dbversion = 75
|
||||
min_supported_dbversion = 53
|
||||
@ -1219,14 +1222,11 @@ class KOBOTOUCH(KOBO):
|
||||
_('Delete Empty Bookshelves') +
|
||||
':::'+_('Delete any empty bookshelves from the Kobo Touch when syncing is finished. This is only for firmware V2.0.0 or later.'),
|
||||
_('Upload covers for books') +
|
||||
':::'+_('Normally, the KOBO readers get the cover image from the'
|
||||
' ebook file itself. With this option, calibre will send a '
|
||||
'separate cover image to the reader, useful if you '
|
||||
'have modified the cover.'),
|
||||
':::'+_('Upload cover images from the calibre library when sending books to the device.'),
|
||||
_('Upload Black and White Covers'),
|
||||
_('Always upload covers') +
|
||||
':::'+_('If the Upload covers option is selected, the driver will only replace covers already on the device.'
|
||||
' Select this option if you want covers uploaded the first time you send the book to the device.'),
|
||||
_('Keep cover aspect ratio') +
|
||||
':::'+_('When uploading covers, do not change the aspect ratio when resizing for the device.'
|
||||
' This is for firmware versions 2.3.1 and later.'),
|
||||
_('Show expired books') +
|
||||
':::'+_('A bug in an earlier version left non kepubs book records'
|
||||
' in the database. With this option Calibre will show the '
|
||||
@ -1278,7 +1278,7 @@ class KOBOTOUCH(KOBO):
|
||||
OPT_DELETE_BOOKSHELVES = 2
|
||||
OPT_UPLOAD_COVERS = 3
|
||||
OPT_UPLOAD_GRAYSCALE_COVERS = 4
|
||||
OPT_ALWAYS_UPLOAD_COVERS = 5
|
||||
OPT_KEEP_COVER_ASPECT_RATIO = 5
|
||||
OPT_SHOW_EXPIRED_BOOK_RECORDS = 6
|
||||
OPT_SHOW_PREVIEWS = 7
|
||||
OPT_SHOW_RECOMMENDATIONS = 8
|
||||
@ -1290,16 +1290,27 @@ class KOBOTOUCH(KOBO):
|
||||
|
||||
TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ"
|
||||
|
||||
PRODUCT_ID = [0x4163, 0x4173, 0x4183]
|
||||
BCD = [0x0110, 0x0326]
|
||||
GLO_PRODUCT_ID = [0x4173]
|
||||
MINI_PRODUCT_ID = [0x4183]
|
||||
TOUCH_PRODUCT_ID = [0x4163]
|
||||
PRODUCT_ID = GLO_PRODUCT_ID + MINI_PRODUCT_ID + TOUCH_PRODUCT_ID
|
||||
|
||||
BCD = [0x0110, 0x0326]
|
||||
|
||||
# Image file name endings. Made up of: image size, min_dbversion, max_dbversion,
|
||||
COVER_FILE_ENDINGS = {
|
||||
' - N3_LIBRARY_FULL.parsed':[(355,473),0, 99,], # Used for Details screen
|
||||
' - N3_LIBRARY_GRID.parsed':[(149,198),0, 99,], # Used for library lists
|
||||
' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
|
||||
' - N3_FULL.parsed':[(600,800),0, 99,True,], # Used for screensaver, home screen
|
||||
' - N3_LIBRARY_FULL.parsed':[(355,473),0, 99,False,], # Used for Details screen
|
||||
' - N3_LIBRARY_GRID.parsed':[(149,198),0, 99,False,], # Used for library lists
|
||||
' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,False,],
|
||||
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
|
||||
}
|
||||
GLO_COVER_FILE_ENDINGS = {
|
||||
' - N3_FULL.parsed':[(758,1024),0, 99,True,], # Used for screensaver, home screen
|
||||
' - N3_LIBRARY_FULL.parsed':[(355,479),0, 99,False,], # Used for Details screen
|
||||
' - N3_LIBRARY_GRID.parsed':[(149,201),0, 99,False,], # Used for library lists
|
||||
# ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
|
||||
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
|
||||
' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver, home screen
|
||||
}
|
||||
#Following are the sizes used with pre2.1.4 firmware
|
||||
# COVER_FILE_ENDINGS = {
|
||||
@ -1311,6 +1322,7 @@ class KOBOTOUCH(KOBO):
|
||||
# ' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver if "Full screen" is checked.
|
||||
# }
|
||||
|
||||
|
||||
def initialize(self):
|
||||
super(KOBOTOUCH, self).initialize()
|
||||
self.bookshelvelist = []
|
||||
@ -1691,7 +1703,7 @@ class KOBOTOUCH(KOBO):
|
||||
def imagefilename_from_imageID(self, ImageID):
|
||||
show_debug = self.is_debugging_title(ImageID)
|
||||
|
||||
for ending, cover_options in self.COVER_FILE_ENDINGS.items():
|
||||
for ending, cover_options in self.cover_file_endings().items():
|
||||
fpath = self._main_prefix + '.kobo/images/' + ImageID + ending
|
||||
fpath = self.normalize_path(fpath.replace('/', os.sep))
|
||||
if os.path.exists(fpath):
|
||||
@ -1730,15 +1742,19 @@ class KOBOTOUCH(KOBO):
|
||||
cleanup_values = (contentID,)
|
||||
# debug_print('KoboTouch:upload_books: Delete record left if deleted on Touch')
|
||||
cursor.execute(cleanup_query, cleanup_values)
|
||||
|
||||
|
||||
self.set_filesize_in_device_database(connection, contentID, fname)
|
||||
|
||||
if not self.copying_covers():
|
||||
imageID = self.imageid_from_contentid(contentID)
|
||||
self.delete_images(imageID)
|
||||
connection.commit()
|
||||
|
||||
cursor.close()
|
||||
except Exception as e:
|
||||
debug_print('KoboTouch:upload_books - Exception: %s'%str(e))
|
||||
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@ -1794,7 +1810,7 @@ class KOBOTOUCH(KOBO):
|
||||
path_prefix = '.kobo/images/'
|
||||
path = self._main_prefix + path_prefix + ImageID
|
||||
|
||||
for ending in self.COVER_FILE_ENDINGS.keys():
|
||||
for ending in self.cover_file_endings().keys():
|
||||
fpath = path + ending
|
||||
fpath = self.normalize_path(fpath)
|
||||
|
||||
@ -2049,23 +2065,23 @@ class KOBOTOUCH(KOBO):
|
||||
# debug_print("KoboTouch:upload_cover - path='%s' filename='%s'"%(path, filename))
|
||||
|
||||
opts = self.settings()
|
||||
if not opts.extra_customization[self.OPT_UPLOAD_COVERS]:
|
||||
if not self.copying_covers():
|
||||
# Building thumbnails disabled
|
||||
# debug_print('KoboTouch: not uploading cover')
|
||||
return
|
||||
|
||||
# Don't upload covers if book is on the SD card
|
||||
if self._card_a_prefix and path.startswith(self._card_a_prefix):
|
||||
return
|
||||
|
||||
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
|
||||
uploadgrayscale = False
|
||||
else:
|
||||
uploadgrayscale = True
|
||||
if not opts.extra_customization[self.OPT_ALWAYS_UPLOAD_COVERS]:
|
||||
always_upload_covers = False
|
||||
else:
|
||||
always_upload_covers = True
|
||||
|
||||
# debug_print('KoboTouch: uploading cover')
|
||||
try:
|
||||
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale, always_upload_covers)
|
||||
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale, self.keep_cover_aspect())
|
||||
except Exception as e:
|
||||
debug_print('KoboTouch: FAILED to upload cover=%s Exception=%s'%(filepath, str(e)))
|
||||
|
||||
@ -2077,9 +2093,9 @@ class KOBOTOUCH(KOBO):
|
||||
ImageID = ImageID.replace('.', '_')
|
||||
return ImageID
|
||||
|
||||
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, always_upload_covers=False):
|
||||
from calibre.utils.magick.draw import save_cover_data_to
|
||||
debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' always_upload_covers='%s'"%(filename, uploadgrayscale, always_upload_covers))
|
||||
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, keep_cover_aspect=False):
|
||||
from calibre.utils.magick.draw import save_cover_data_to, identify_data
|
||||
debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' "%(filename, uploadgrayscale))
|
||||
|
||||
if metadata.cover:
|
||||
show_debug = self.is_debugging_title(filename)
|
||||
@ -2122,8 +2138,8 @@ class KOBOTOUCH(KOBO):
|
||||
if show_debug:
|
||||
debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
|
||||
|
||||
for ending, cover_options in self.COVER_FILE_ENDINGS.items():
|
||||
resize, min_dbversion, max_dbversion = cover_options
|
||||
for ending, cover_options in self.cover_file_endings().items():
|
||||
resize, min_dbversion, max_dbversion, isFullsize = cover_options
|
||||
if show_debug:
|
||||
debug_print("KoboTouch:_upload_cover - resize=%s min_dbversion=%d max_dbversion=%d" % (resize, min_dbversion, max_dbversion))
|
||||
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
|
||||
@ -2132,19 +2148,28 @@ class KOBOTOUCH(KOBO):
|
||||
fpath = path + ending
|
||||
fpath = self.normalize_path(fpath.replace('/', os.sep))
|
||||
|
||||
if os.path.exists(fpath) or always_upload_covers:
|
||||
debug_print("KoboTouch:_upload_cover - path exists or always_upload_covers%s"% always_upload_covers)
|
||||
with open(cover, 'rb') as f:
|
||||
data = f.read()
|
||||
with open(cover, 'rb') as f:
|
||||
data = f.read()
|
||||
|
||||
# Return the data resized and in Grayscale if
|
||||
# required
|
||||
data = save_cover_data_to(data, 'dummy.jpg',
|
||||
grayscale=uploadgrayscale,
|
||||
resize_to=resize, return_data=True)
|
||||
if keep_cover_aspect:
|
||||
if isFullsize:
|
||||
resize = None
|
||||
else:
|
||||
width, height, fmt = identify_data(data)
|
||||
cover_aspect = width / height
|
||||
if cover_aspect > 1:
|
||||
resize = (resize[0], int(resize[0] / cover_aspect ))
|
||||
elif cover_aspect < 1:
|
||||
resize = (int(cover_aspect * resize[1]), resize[1] )
|
||||
|
||||
with open(fpath, 'wb') as f:
|
||||
f.write(data)
|
||||
# Return the data resized and in Grayscale if
|
||||
# required
|
||||
data = save_cover_data_to(data, 'dummy.jpg',
|
||||
grayscale=uploadgrayscale,
|
||||
resize_to=resize, return_data=True)
|
||||
|
||||
with open(fpath, 'wb') as f:
|
||||
f.write(data)
|
||||
except Exception as e:
|
||||
err = str(e)
|
||||
debug_print("KoboTouch:_upload_cover - Exception string: %s"%err)
|
||||
@ -2453,21 +2478,30 @@ class KOBOTOUCH(KOBO):
|
||||
return opts
|
||||
|
||||
|
||||
def isGlo(self):
|
||||
return self.detected_device.idProduct in self.GLO_PRODUCT_ID
|
||||
def isMini(self):
|
||||
return self.detected_device.idProduct in self.MINI_PRODUCT_ID
|
||||
def isTouch(self):
|
||||
return self.detected_device.idProduct in self.TOUCH_PRODUCT_ID
|
||||
|
||||
def cover_file_endings(self):
|
||||
return self.GLO_COVER_FILE_ENDINGS if self.isGlo() else self.COVER_FILE_ENDINGS
|
||||
|
||||
def copying_covers(self):
|
||||
opts = self.settings()
|
||||
return opts.extra_customization[self.OPT_UPLOAD_COVERS] or opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
|
||||
|
||||
def keep_cover_aspect(self):
|
||||
opts = self.settings()
|
||||
return opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
|
||||
|
||||
def supports_bookshelves(self):
|
||||
return self.dbversion >= self.min_supported_dbversion
|
||||
|
||||
def supports_series(self):
|
||||
return self.dbversion >= self.min_dbversion_series
|
||||
|
||||
# def is_debugging_title(self, title):
|
||||
## debug_print("KoboTouch:is_debugging - title=", title)
|
||||
# is_debugging = False
|
||||
# opts = self.settings()
|
||||
# if opts.extra_customization:
|
||||
# debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
|
||||
# is_debugging = len(debugging_title) > 0 and title.find(debugging_title) >= 0 or len(title) == 0
|
||||
#
|
||||
# return is_debugging
|
||||
|
||||
@classmethod
|
||||
def is_debugging_title(cls, title):
|
||||
|
@ -13,7 +13,7 @@ from calibre.devices.interface import BookList as _BookList
|
||||
from calibre.constants import preferred_encoding
|
||||
from calibre import isbytestring, force_unicode
|
||||
from calibre.utils.config import device_prefs, tweaks
|
||||
from calibre.utils.icu import strcmp
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.formatter import EvalFormatter
|
||||
|
||||
class Book(Metadata):
|
||||
@ -281,9 +281,8 @@ class CollectionsBookList(BookList):
|
||||
if y is None:
|
||||
return -1
|
||||
if isinstance(x, basestring) and isinstance(y, basestring):
|
||||
c = strcmp(force_unicode(x), force_unicode(y))
|
||||
else:
|
||||
c = cmp(x, y)
|
||||
x, y = sort_key(force_unicode(x)), sort_key(force_unicode(y))
|
||||
c = cmp(x, y)
|
||||
if c != 0:
|
||||
return c
|
||||
# same as above -- no sort_key needed here
|
||||
|
@ -100,6 +100,9 @@ def option_recommendation_to_cli_option(add_option, rec):
|
||||
switches = ['--disable-'+opt.long_switch]
|
||||
add_option(Option(*switches, **attrs))
|
||||
|
||||
def group_titles():
|
||||
return _('INPUT OPTIONS'), _('OUTPUT OPTIONS')
|
||||
|
||||
def add_input_output_options(parser, plumber):
|
||||
input_options, output_options = \
|
||||
plumber.input_options, plumber.output_options
|
||||
@ -109,14 +112,14 @@ def add_input_output_options(parser, plumber):
|
||||
option_recommendation_to_cli_option(group, opt)
|
||||
|
||||
if input_options:
|
||||
title = _('INPUT OPTIONS')
|
||||
title = group_titles()[0]
|
||||
io = OptionGroup(parser, title, _('Options to control the processing'
|
||||
' of the input %s file')%plumber.input_fmt)
|
||||
add_options(io.add_option, input_options)
|
||||
parser.add_option_group(io)
|
||||
|
||||
if output_options:
|
||||
title = _('OUTPUT OPTIONS')
|
||||
title = group_titles()[1]
|
||||
oo = OptionGroup(parser, title, _('Options to control the processing'
|
||||
' of the output %s')%plumber.output_fmt)
|
||||
add_options(oo.add_option, output_options)
|
||||
|
@ -60,7 +60,8 @@ class TOCAdder(object):
|
||||
else:
|
||||
oeb.guide.remove('toc')
|
||||
|
||||
if not self.has_toc or 'toc' in oeb.guide or opts.no_inline_toc:
|
||||
if (not self.has_toc or 'toc' in oeb.guide or opts.no_inline_toc or
|
||||
getattr(opts, 'mobi_passthrough', False)):
|
||||
return
|
||||
|
||||
self.log('\tGenerating in-line ToC')
|
||||
|
@ -81,6 +81,11 @@ class BookIndexing
|
||||
if elem == null
|
||||
pos = [body.scrollWidth+1000, body.scrollHeight+1000]
|
||||
else
|
||||
# Because of a bug in WebKit's getBoundingClientRect() in
|
||||
# column mode, this position can be inaccurate,
|
||||
# see https://bugs.launchpad.net/calibre/+bug/1132641 for a
|
||||
# test case. The usual symptom of the inaccuracy is br.top is
|
||||
# highly negative.
|
||||
br = elem.getBoundingClientRect()
|
||||
pos = viewport_to_document(br.left, br.top, elem.ownerDocument)
|
||||
|
||||
|
@ -75,6 +75,13 @@ class PagedDisplay
|
||||
this.margin_side = margin_side
|
||||
this.margin_bottom = margin_bottom
|
||||
|
||||
handle_rtl_body: (body_style) ->
|
||||
if body_style.direction == "rtl"
|
||||
for node in document.body.childNodes
|
||||
if node.nodeType == node.ELEMENT_NODE and window.getComputedStyle(node).direction == "rtl"
|
||||
node.style.setProperty("direction", "rtl")
|
||||
document.body.style.direction = "ltr"
|
||||
|
||||
layout: (is_single_page=false) ->
|
||||
# start_time = new Date().getTime()
|
||||
body_style = window.getComputedStyle(document.body)
|
||||
@ -84,6 +91,7 @@ class PagedDisplay
|
||||
# Check if the current document is a full screen layout like
|
||||
# cover, if so we treat it specially.
|
||||
single_screen = (document.body.scrollHeight < window.innerHeight + 75)
|
||||
this.handle_rtl_body(body_style)
|
||||
first_layout = true
|
||||
|
||||
ww = window.innerWidth
|
||||
@ -402,7 +410,22 @@ class PagedDisplay
|
||||
elem.scrollIntoView()
|
||||
if this.in_paged_mode
|
||||
# Ensure we are scrolled to the column containing elem
|
||||
this.scroll_to_xpos(calibre_utils.absleft(elem) + 5)
|
||||
|
||||
# Because of a bug in WebKit's getBoundingClientRect() in column
|
||||
# mode, this position can be inaccurate, see
|
||||
# https://bugs.launchpad.net/calibre/+bug/1132641 for a test case.
|
||||
# The usual symptom of the inaccuracy is br.top is highly negative.
|
||||
br = elem.getBoundingClientRect()
|
||||
if br.top < -1000
|
||||
# This only works because of the preceding call to
|
||||
# elem.scrollIntoView(). However, in some cases it gives
|
||||
# inaccurate results, so we prefer the bounding client rect,
|
||||
# when possible.
|
||||
left = elem.scrollLeft
|
||||
else
|
||||
left = br.left
|
||||
this.scroll_to_xpos(calibre_utils.viewport_to_document(
|
||||
left+this.margin_side, elem.scrollTop, elem.ownerDocument)[0])
|
||||
|
||||
snap_to_selection: () ->
|
||||
# Ensure that the viewport is positioned at the start of the column
|
||||
|
@ -86,7 +86,9 @@ class CalibreUtils
|
||||
absleft: (elem) -> # {{{
|
||||
# The left edge of elem in document co-ords. Works in all
|
||||
# circumstances, including column layout. Note that this will cause
|
||||
# a relayout if the render tree is dirty.
|
||||
# a relayout if the render tree is dirty. Also, because of a bug in the
|
||||
# version of WebKit bundled with Qt 4.8, this does not always work, see
|
||||
# https://bugs.launchpad.net/bugs/1132641 for a test case.
|
||||
r = elem.getBoundingClientRect()
|
||||
return this.viewport_to_document(r.left, 0, elem.ownerDocument)[0]
|
||||
# }}}
|
||||
|
@ -31,7 +31,7 @@ def self_closing_sub(match):
|
||||
return '<%s%s></%s>'%(match.group(1), match.group(2), match.group(1))
|
||||
|
||||
def load_html(path, view, codec='utf-8', mime_type=None,
|
||||
pre_load_callback=lambda x:None, path_is_html=False,
|
||||
pre_load_callback=lambda x:None, path_is_html=False,
|
||||
force_as_html=False):
|
||||
from PyQt4.Qt import QUrl, QByteArray
|
||||
if mime_type is None:
|
||||
@ -45,13 +45,13 @@ def load_html(path, view, codec='utf-8', mime_type=None,
|
||||
html = f.read().decode(codec, 'replace')
|
||||
|
||||
html = EntityDeclarationProcessor(html).processed_html
|
||||
self_closing_pat = re.compile(r'<\s*([A-Za-z1-6]+)([^>]*)/\s*>')
|
||||
self_closing_pat = re.compile(r'<\s*([:A-Za-z0-9-]+)([^>]*)/\s*>')
|
||||
html = self_closing_pat.sub(self_closing_sub, html)
|
||||
|
||||
loading_url = QUrl.fromLocalFile(path)
|
||||
pre_load_callback(loading_url)
|
||||
|
||||
if force_as_html or re.search(r'<[:a-zA-Z]*svg', html) is None:
|
||||
if force_as_html or re.search(r'<[:a-zA-Z0-9-]*svg', html) is None:
|
||||
view.setHtml(html, loading_url)
|
||||
else:
|
||||
view.setContent(QByteArray(html.encode(codec)), mime_type,
|
||||
|
@ -88,7 +88,7 @@ class Container(object):
|
||||
self.mime_map[name] = guess_type('a.opf')
|
||||
|
||||
if not hasattr(self, 'opf_name'):
|
||||
raise InvalidBook('Book has no OPF file')
|
||||
raise InvalidBook('Could not locate opf file: %r'%opfpath)
|
||||
|
||||
# Update mime map with data from the OPF
|
||||
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
|
||||
|
@ -76,7 +76,7 @@ etc.</p>'''),
|
||||
'''),
|
||||
|
||||
'smarten_punctuation': _('''\
|
||||
<p>Convert plain text, dashes, ellipsis, multiple hyphens, etc. into their
|
||||
<p>Convert plain text dashes, ellipsis, quotes, multiple hyphens, etc. into their
|
||||
typographically correct equivalents.</p>
|
||||
<p>Note that the algorithm can sometimes generate incorrect results, especially
|
||||
when single quotes at the start of contractions are involved.</p>
|
||||
@ -174,6 +174,7 @@ def gui_polish(data):
|
||||
files = data.pop('files')
|
||||
if not data.pop('metadata'):
|
||||
data.pop('opf')
|
||||
if not data.pop('do_cover'):
|
||||
data.pop('cover')
|
||||
file_map = {x:x for x in files}
|
||||
opts = ALL_OPTS.copy()
|
||||
|
@ -9,10 +9,11 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, sys
|
||||
|
||||
from calibre import prints
|
||||
from calibre import prints, as_unicode
|
||||
from calibre.ebooks.oeb.base import OEB_STYLES, OEB_DOCS, XPath
|
||||
from calibre.ebooks.oeb.polish.container import OEB_FONTS
|
||||
from calibre.utils.fonts.sfnt.subset import subset
|
||||
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
|
||||
from calibre.utils.fonts.utils import get_font_names
|
||||
|
||||
def remove_font_face_rules(container, sheet, remove_names, base):
|
||||
@ -46,9 +47,16 @@ def subset_all_fonts(container, font_stats, report):
|
||||
raw = f.read()
|
||||
font_name = get_font_names(raw)[-1]
|
||||
warnings = []
|
||||
container.log('Subsetting font: %s'%font_name)
|
||||
nraw, old_sizes, new_sizes = subset(raw, chars,
|
||||
container.log('Subsetting font: %s'%(font_name or name))
|
||||
try:
|
||||
nraw, old_sizes, new_sizes = subset(raw, chars,
|
||||
warnings=warnings)
|
||||
except UnsupportedFont as e:
|
||||
container.log.warning(
|
||||
'Unsupported font: %s, ignoring. Error: %s'%(
|
||||
name, as_unicode(e)))
|
||||
continue
|
||||
|
||||
for w in warnings:
|
||||
container.log.warn(w)
|
||||
olen = sum(old_sizes.itervalues())
|
||||
|
@ -363,7 +363,10 @@ class CSSFlattener(object):
|
||||
cssdict['font-weight'] = 'normal' # ADE chokes on font-weight medium
|
||||
|
||||
fsize = font_size
|
||||
if not self.context.disable_font_rescaling:
|
||||
is_drop_cap = (cssdict.get('float', None) == 'left' and 'font-size' in
|
||||
cssdict and len(node) == 0 and node.text and
|
||||
len(node.text) == 1)
|
||||
if not self.context.disable_font_rescaling and not is_drop_cap:
|
||||
_sbase = self.sbase if self.sbase is not None else \
|
||||
self.context.source.fbase
|
||||
dyn_rescale = dynamic_rescale_factor(node)
|
||||
@ -382,7 +385,7 @@ class CSSFlattener(object):
|
||||
|
||||
try:
|
||||
minlh = self.context.minimum_line_height / 100.
|
||||
if style['line-height'] < minlh * fsize:
|
||||
if not is_drop_cap and style['line-height'] < minlh * fsize:
|
||||
cssdict['line-height'] = str(minlh)
|
||||
except:
|
||||
self.oeb.logger.exception('Failed to set minimum line-height')
|
||||
|
@ -10,6 +10,7 @@ assumes a prior call to the flatcss transform.
|
||||
'''
|
||||
|
||||
import os, math, functools, collections, re, copy
|
||||
from collections import OrderedDict
|
||||
|
||||
from lxml.etree import XPath as _XPath
|
||||
from lxml import etree
|
||||
@ -106,8 +107,7 @@ class Split(object):
|
||||
continue
|
||||
for elem in selector(body[0]):
|
||||
if elem not in body:
|
||||
if before:
|
||||
elem.set('pb_before', '1')
|
||||
elem.set('pb_before', '1' if before else '0')
|
||||
page_breaks.add(elem)
|
||||
|
||||
for i, elem in enumerate(item.data.iter()):
|
||||
@ -134,14 +134,12 @@ class Split(object):
|
||||
id = 'calibre_pb_%d'%i
|
||||
x.set('id', id)
|
||||
xp = XPath('//*[@id=%r]'%id)
|
||||
page_breaks_.append((xp,
|
||||
x.get('pb_before', False)))
|
||||
page_breaks_.append((xp, x.get('pb_before', '0') == '1'))
|
||||
page_break_ids.append(id)
|
||||
|
||||
for elem in item.data.iter():
|
||||
elem.attrib.pop('pb_order', False)
|
||||
if elem.get('pb_before', False):
|
||||
elem.attrib.pop('pb_before')
|
||||
elem.attrib.pop('pb_before', False)
|
||||
|
||||
return page_breaks_, page_break_ids
|
||||
|
||||
@ -223,22 +221,27 @@ class FlowSplitter(object):
|
||||
self.commit()
|
||||
|
||||
def split_on_page_breaks(self, orig_tree):
|
||||
ordered_ids = []
|
||||
for elem in orig_tree.xpath('//*[@id]'):
|
||||
id = elem.get('id')
|
||||
if id in self.page_break_ids:
|
||||
ordered_ids.append(self.page_breaks[self.page_break_ids.index(id)])
|
||||
ordered_ids = OrderedDict()
|
||||
all_page_break_ids = frozenset(self.page_break_ids)
|
||||
for elem_id in orig_tree.xpath('//*/@id'):
|
||||
if elem_id in all_page_break_ids:
|
||||
ordered_ids[elem_id] = self.page_breaks[
|
||||
self.page_break_ids.index(elem_id)]
|
||||
|
||||
self.trees = [orig_tree]
|
||||
while ordered_ids:
|
||||
pb_id, (pattern, before) = ordered_ids.iteritems().next()
|
||||
del ordered_ids[pb_id]
|
||||
for i in xrange(len(self.trees)-1, -1, -1):
|
||||
tree = self.trees[i]
|
||||
elem = pattern(tree)
|
||||
if elem:
|
||||
self.log.debug('\t\tSplitting on page-break at id=%s'%
|
||||
elem[0].get('id'))
|
||||
before_tree, after_tree = self.do_split(tree, elem[0], before)
|
||||
self.trees[i:i+1] = [before_tree, after_tree]
|
||||
break
|
||||
|
||||
self.trees = []
|
||||
tree = orig_tree
|
||||
for pattern, before in ordered_ids:
|
||||
elem = pattern(tree)
|
||||
if elem:
|
||||
self.log.debug('\t\tSplitting on page-break at %s'%
|
||||
elem[0].get('id'))
|
||||
before, after = self.do_split(tree, elem[0], before)
|
||||
self.trees.append(before)
|
||||
tree = after
|
||||
self.trees.append(tree)
|
||||
trees, ids = [], set([])
|
||||
for tree in self.trees:
|
||||
@ -289,7 +292,6 @@ class FlowSplitter(object):
|
||||
if self.opts.verbose > 3 and npath != path:
|
||||
self.log.debug('\t\t\tMoved split point %s to %s'%(path, npath))
|
||||
|
||||
|
||||
return npath
|
||||
|
||||
def do_split(self, tree, split_point, before):
|
||||
@ -304,7 +306,11 @@ class FlowSplitter(object):
|
||||
root = tree.getroot()
|
||||
root2 = tree2.getroot()
|
||||
body, body2 = map(self.get_body, (root, root2))
|
||||
path = self.adjust_split_point(root, path)
|
||||
if before:
|
||||
# We cannot adjust for after since moving an after split point to a
|
||||
# parent will cause breakage if the parent contains any content
|
||||
# after the original split point
|
||||
path = self.adjust_split_point(root, path)
|
||||
split_point = root.xpath(path)[0]
|
||||
split_point2 = root2.xpath(path)[0]
|
||||
|
||||
|
@ -13,9 +13,10 @@ from operator import itemgetter
|
||||
from collections import Counter, OrderedDict
|
||||
from future_builtins import map
|
||||
|
||||
from calibre import as_unicode
|
||||
from calibre.ebooks.pdf.render.common import (Array, String, Stream,
|
||||
Dictionary, Name)
|
||||
from calibre.utils.fonts.sfnt.subset import pdf_subset
|
||||
from calibre.utils.fonts.sfnt.subset import pdf_subset, UnsupportedFont
|
||||
|
||||
STANDARD_FONTS = {
|
||||
'Times-Roman', 'Helvetica', 'Courier', 'Symbol', 'Times-Bold',
|
||||
@ -150,12 +151,16 @@ class Font(object):
|
||||
|
||||
self.used_glyphs = set()
|
||||
|
||||
def embed(self, objects):
|
||||
def embed(self, objects, debug):
|
||||
self.font_descriptor['FontFile'+('3' if self.is_otf else '2')
|
||||
] = objects.add(self.font_stream)
|
||||
self.write_widths(objects)
|
||||
self.write_to_unicode(objects)
|
||||
pdf_subset(self.metrics.sfnt, self.used_glyphs)
|
||||
try:
|
||||
pdf_subset(self.metrics.sfnt, self.used_glyphs)
|
||||
except UnsupportedFont as e:
|
||||
debug('Subsetting of %s not supported, embedding full font. Error: %s'%(
|
||||
self.metrics.names.get('full_name', 'Unknown'), as_unicode(e)))
|
||||
if self.is_otf:
|
||||
self.font_stream.write(self.metrics.sfnt['CFF '].raw)
|
||||
else:
|
||||
@ -221,7 +226,7 @@ class FontManager(object):
|
||||
}))
|
||||
return self.std_map[name]
|
||||
|
||||
def embed_fonts(self):
|
||||
def embed_fonts(self, debug):
|
||||
for font in self.fonts:
|
||||
font.embed(self.objects)
|
||||
font.embed(self.objects, debug)
|
||||
|
||||
|
@ -488,7 +488,7 @@ class PDFStream(object):
|
||||
def end(self):
|
||||
if self.current_page.getvalue():
|
||||
self.end_page()
|
||||
self.font_manager.embed_fonts()
|
||||
self.font_manager.embed_fonts(self.debug)
|
||||
inforef = self.objects.add(self.info)
|
||||
self.links.add_links()
|
||||
self.objects.pdf_serialize(self.stream)
|
||||
|
@ -15,7 +15,8 @@ from PyQt4.Qt import (QMenu, Qt, QInputDialog, QToolButton, QDialog,
|
||||
from calibre import isbytestring, sanitize_file_name_unicode
|
||||
from calibre.constants import (filesystem_encoding, iswindows,
|
||||
get_portable_base)
|
||||
from calibre.utils.config import prefs
|
||||
from calibre.utils.config import prefs, tweaks
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.gui2 import (gprefs, warning_dialog, Dispatcher, error_dialog,
|
||||
question_dialog, info_dialog, open_local_file, choose_dir)
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
@ -46,7 +47,7 @@ class LibraryUsageStats(object): # {{{
|
||||
locs = list(self.stats.keys())
|
||||
locs.sort(cmp=lambda x, y: cmp(self.stats[x], self.stats[y]),
|
||||
reverse=True)
|
||||
for key in locs[25:]:
|
||||
for key in locs[500:]:
|
||||
self.stats.pop(key)
|
||||
gprefs.set('library_usage_stats', self.stats)
|
||||
|
||||
@ -72,8 +73,9 @@ class LibraryUsageStats(object): # {{{
|
||||
locs = list(self.stats.keys())
|
||||
if lpath in locs:
|
||||
locs.remove(lpath)
|
||||
locs.sort(cmp=lambda x, y: cmp(self.stats[x], self.stats[y]),
|
||||
reverse=True)
|
||||
limit = tweaks['many_libraries']
|
||||
key = sort_key if len(locs) > limit else lambda x:self.stats[x]
|
||||
locs.sort(key=key, reverse=len(locs)<=limit)
|
||||
for loc in locs:
|
||||
yield self.pretty(loc), loc
|
||||
|
||||
|
@ -44,13 +44,18 @@ class Polish(QDialog): # {{{
|
||||
_('<h3>Smarten punctuation</h3>%s')%HELP['smarten_punctuation'],
|
||||
|
||||
'metadata':_('<h3>Updating metadata</h3>'
|
||||
'<p>This will update all metadata and covers in the'
|
||||
'<p>This will update all metadata <i>except</i> the cover in the'
|
||||
' ebook files to match the current metadata in the'
|
||||
' calibre library.</p><p>If the ebook file does not have'
|
||||
' an identifiable cover, a new cover is inserted.</p>'
|
||||
' calibre library.</p>'
|
||||
' <p>Note that most ebook'
|
||||
' formats are not capable of supporting all the'
|
||||
' metadata in calibre.</p>'),
|
||||
' metadata in calibre.</p><p>There is a separate option to'
|
||||
' update the cover.</p>'),
|
||||
'do_cover': _('<p>Update the covers in the ebook files to match the'
|
||||
' current cover in the calibre library.</p>'
|
||||
'<p>If the ebook file does not have'
|
||||
' an identifiable cover, a new cover is inserted.</p>'
|
||||
),
|
||||
'jacket':_('<h3>Book Jacket</h3>%s')%HELP['jacket'],
|
||||
'remove_jacket':_('<h3>Remove Book Jacket</h3>%s')%HELP['remove_jacket'],
|
||||
}
|
||||
@ -63,11 +68,12 @@ class Polish(QDialog): # {{{
|
||||
|
||||
count = 0
|
||||
self.all_actions = OrderedDict([
|
||||
('subset', _('Subset all embedded fonts')),
|
||||
('smarten_punctuation', _('Smarten punctuation')),
|
||||
('metadata', _('Update metadata in book files')),
|
||||
('jacket', _('Add metadata as a "book jacket" page')),
|
||||
('remove_jacket', _('Remove a previously inserted book jacket')),
|
||||
('subset', _('&Subset all embedded fonts')),
|
||||
('smarten_punctuation', _('Smarten &punctuation')),
|
||||
('metadata', _('Update &metadata in the book files')),
|
||||
('do_cover', _('Update the &cover in the book files')),
|
||||
('jacket', _('Add metadata as a "book &jacket" page')),
|
||||
('remove_jacket', _('&Remove a previously inserted book jacket')),
|
||||
])
|
||||
prefs = gprefs.get('polishing_settings', {})
|
||||
for name, text in self.all_actions.iteritems():
|
||||
@ -243,8 +249,10 @@ class Polish(QDialog): # {{{
|
||||
cover = os.path.join(base, 'cover.jpg')
|
||||
if db.copy_cover_to(book_id, cover, index_is_id=True):
|
||||
data['cover'] = cover
|
||||
is_orig = {}
|
||||
for fmt in formats:
|
||||
ext = fmt.replace('ORIGINAL_', '').lower()
|
||||
is_orig[ext.upper()] = 'ORIGINAL_' in fmt
|
||||
with open(os.path.join(base, '%s.%s'%(book_id, ext)), 'wb') as f:
|
||||
db.copy_format_to(book_id, fmt, f, index_is_id=True)
|
||||
data['files'].append(f.name)
|
||||
@ -257,7 +265,7 @@ class Polish(QDialog): # {{{
|
||||
self.pd.set_msg(_('Queueing book %(nums)s of %(tot)s (%(title)s)')%dict(
|
||||
nums=num, tot=len(self.book_id_map), title=mi.title))
|
||||
|
||||
self.jobs.append((desc, data, book_id, base))
|
||||
self.jobs.append((desc, data, book_id, base, is_orig))
|
||||
# }}}
|
||||
|
||||
class Report(QDialog): # {{{
|
||||
@ -404,11 +412,11 @@ class PolishAction(InterfaceAction):
|
||||
d = Polish(self.gui.library_view.model().db, book_id_map, parent=self.gui)
|
||||
if d.exec_() == d.Accepted and d.jobs:
|
||||
show_reports = bool(d.show_reports.isChecked())
|
||||
for desc, data, book_id, base in reversed(d.jobs):
|
||||
for desc, data, book_id, base, is_orig in reversed(d.jobs):
|
||||
job = self.gui.job_manager.run_job(
|
||||
Dispatcher(self.book_polished), 'gui_polish', args=(data,),
|
||||
description=desc)
|
||||
job.polish_args = (book_id, base, data['files'], show_reports)
|
||||
job.polish_args = (book_id, base, data['files'], show_reports, is_orig)
|
||||
if d.jobs:
|
||||
self.gui.jobs_pointer.start()
|
||||
self.gui.status_bar.show_message(
|
||||
@ -419,11 +427,11 @@ class PolishAction(InterfaceAction):
|
||||
self.gui.job_exception(job)
|
||||
return
|
||||
db = self.gui.current_db
|
||||
book_id, base, files, show_reports = job.polish_args
|
||||
book_id, base, files, show_reports, is_orig = job.polish_args
|
||||
fmts = set()
|
||||
for path in files:
|
||||
fmt = path.rpartition('.')[-1].upper()
|
||||
if tweaks['save_original_format_when_polishing']:
|
||||
if tweaks['save_original_format_when_polishing'] and not is_orig[fmt]:
|
||||
fmts.add(fmt)
|
||||
db.save_original_format(book_id, fmt, notify=False)
|
||||
with open(path, 'rb') as f:
|
||||
|
@ -327,6 +327,13 @@ class EditorWidget(QWebView): # {{{
|
||||
else:
|
||||
return QWebView.keyReleaseEvent(self, ev)
|
||||
|
||||
def contextMenuEvent(self, ev):
|
||||
menu = self.page().createStandardContextMenu()
|
||||
paste = self.pageAction(QWebPage.Paste)
|
||||
for action in menu.actions():
|
||||
if action == paste:
|
||||
menu.insertAction(action, self.pageAction(QWebPage.PasteAndMatchStyle))
|
||||
menu.exec_(ev.globalPos())
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -622,8 +622,7 @@ class BulkBase(Base):
|
||||
return
|
||||
val = self.gui_val
|
||||
val = self.normalize_ui_val(val)
|
||||
if val != self.initial_val:
|
||||
self.db.set_custom_bulk(book_ids, val, num=self.col_id, notify=notify)
|
||||
self.db.set_custom_bulk(book_ids, val, num=self.col_id, notify=notify)
|
||||
|
||||
def make_widgets(self, parent, main_widget_class, extra_label_text=''):
|
||||
w = QWidget(parent)
|
||||
@ -1030,8 +1029,7 @@ class BulkText(BulkBase):
|
||||
else:
|
||||
val = self.gui_val
|
||||
val = self.normalize_ui_val(val)
|
||||
if val != self.initial_val:
|
||||
self.db.set_custom_bulk(book_ids, val, num=self.col_id, notify=notify)
|
||||
self.db.set_custom_bulk(book_ids, val, num=self.col_id, notify=notify)
|
||||
|
||||
def getter(self):
|
||||
if self.col_metadata['is_multiple']:
|
||||
|
@ -369,7 +369,7 @@ def build_pipe(print_error=True):
|
||||
t.start()
|
||||
t.join(3.0)
|
||||
if t.is_alive():
|
||||
if iswindows():
|
||||
if iswindows:
|
||||
cant_start()
|
||||
else:
|
||||
f = os.path.expanduser('~/.calibre_calibre GUI.lock')
|
||||
|
@ -7,6 +7,7 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
from contextlib import closing
|
||||
from lxml import html
|
||||
|
||||
@ -49,7 +50,7 @@ class AmazonEUBase(StorePlugin):
|
||||
asin_xpath = '@name'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
title_xpath = './/h3[@class="newaps"]/a//text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
|
||||
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
@ -57,7 +58,7 @@ class AmazonEUBase(StorePlugin):
|
||||
break
|
||||
|
||||
# Even though we are searching digital-text only Amazon will still
|
||||
# put in results for non Kindle books (author pages). Se we need
|
||||
# put in results for non Kindle books (authors pages). Se we need
|
||||
# to explicitly check if the item is a Kindle book and ignore it
|
||||
# if it isn't.
|
||||
format_ = ''.join(data.xpath(format_xpath))
|
||||
@ -75,12 +76,13 @@ class AmazonEUBase(StorePlugin):
|
||||
cover_url = ''.join(data.xpath(cover_xpath))
|
||||
|
||||
title = ''.join(data.xpath(title_xpath))
|
||||
author = ''.join(data.xpath(author_xpath))
|
||||
try:
|
||||
if self.author_article:
|
||||
author = author.split(self.author_article, 1)[1].split(" (")[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
authors = ''.join(data.xpath(author_xpath))
|
||||
authors = re.sub('^' + self.author_article, '', authors)
|
||||
authors = re.sub(self.and_word, ' & ', authors)
|
||||
mo = re.match(r'(.*)(\(\d.*)$', authors)
|
||||
if mo:
|
||||
authors = mo.group(1).strip()
|
||||
|
||||
price = ''.join(data.xpath(price_xpath))
|
||||
|
||||
@ -89,7 +91,7 @@ class AmazonEUBase(StorePlugin):
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url.strip()
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.author = authors.strip()
|
||||
s.price = price.strip()
|
||||
s.detail_item = asin.strip()
|
||||
s.drm = SearchResult.DRM_UNKNOWN
|
||||
@ -115,3 +117,5 @@ class AmazonDEKindleStore(AmazonEUBase):
|
||||
search_url = 'http://www.amazon.de/s/?url=search-alias%3Ddigital-text&field-keywords='
|
||||
|
||||
author_article = 'von '
|
||||
|
||||
and_word = ' und '
|
@ -7,6 +7,7 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
from contextlib import closing
|
||||
from lxml import html
|
||||
|
||||
@ -48,7 +49,7 @@ class AmazonEUBase(StorePlugin):
|
||||
asin_xpath = '@name'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
title_xpath = './/h3[@class="newaps"]/a//text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
|
||||
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
@ -56,7 +57,7 @@ class AmazonEUBase(StorePlugin):
|
||||
break
|
||||
|
||||
# Even though we are searching digital-text only Amazon will still
|
||||
# put in results for non Kindle books (author pages). Se we need
|
||||
# put in results for non Kindle books (authors pages). Se we need
|
||||
# to explicitly check if the item is a Kindle book and ignore it
|
||||
# if it isn't.
|
||||
format_ = ''.join(data.xpath(format_xpath))
|
||||
@ -74,12 +75,13 @@ class AmazonEUBase(StorePlugin):
|
||||
cover_url = ''.join(data.xpath(cover_xpath))
|
||||
|
||||
title = ''.join(data.xpath(title_xpath))
|
||||
author = ''.join(data.xpath(author_xpath))
|
||||
try:
|
||||
if self.author_article:
|
||||
author = author.split(self.author_article, 1)[1].split(" (")[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
authors = ''.join(data.xpath(author_xpath))
|
||||
authors = re.sub('^' + self.author_article, '', authors)
|
||||
authors = re.sub(self.and_word, ' & ', authors)
|
||||
mo = re.match(r'(.*)(\(\d.*)$', authors)
|
||||
if mo:
|
||||
authors = mo.group(1).strip()
|
||||
|
||||
price = ''.join(data.xpath(price_xpath))
|
||||
|
||||
@ -88,7 +90,7 @@ class AmazonEUBase(StorePlugin):
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url.strip()
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.author = authors.strip()
|
||||
s.price = price.strip()
|
||||
s.detail_item = asin.strip()
|
||||
s.drm = SearchResult.DRM_UNKNOWN
|
||||
@ -113,3 +115,5 @@ class AmazonESKindleStore(AmazonEUBase):
|
||||
search_url = 'http://www.amazon.es/s/?url=search-alias%3Ddigital-text&field-keywords='
|
||||
|
||||
author_article = 'de '
|
||||
|
||||
and_word = ' y '
|
@ -7,7 +7,7 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
import re
|
||||
from contextlib import closing
|
||||
from lxml import html
|
||||
|
||||
@ -50,7 +50,7 @@ class AmazonEUBase(StorePlugin):
|
||||
asin_xpath = '@name'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
title_xpath = './/h3[@class="newaps"]/a//text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
|
||||
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
@ -58,7 +58,7 @@ class AmazonEUBase(StorePlugin):
|
||||
break
|
||||
|
||||
# Even though we are searching digital-text only Amazon will still
|
||||
# put in results for non Kindle books (author pages). Se we need
|
||||
# put in results for non Kindle books (authors pages). Se we need
|
||||
# to explicitly check if the item is a Kindle book and ignore it
|
||||
# if it isn't.
|
||||
format_ = ''.join(data.xpath(format_xpath))
|
||||
@ -76,12 +76,13 @@ class AmazonEUBase(StorePlugin):
|
||||
cover_url = ''.join(data.xpath(cover_xpath))
|
||||
|
||||
title = ''.join(data.xpath(title_xpath))
|
||||
author = ''.join(data.xpath(author_xpath))
|
||||
try:
|
||||
if self.author_article:
|
||||
author = author.split(self.author_article, 1)[1].split(" (")[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
authors = ''.join(data.xpath(author_xpath))
|
||||
authors = re.sub('^' + self.author_article, '', authors)
|
||||
authors = re.sub(self.and_word, ' & ', authors)
|
||||
mo = re.match(r'(.*)(\(\d.*)$', authors)
|
||||
if mo:
|
||||
authors = mo.group(1).strip()
|
||||
|
||||
price = ''.join(data.xpath(price_xpath))
|
||||
|
||||
@ -90,7 +91,7 @@ class AmazonEUBase(StorePlugin):
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url.strip()
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.author = authors.strip()
|
||||
s.price = price.strip()
|
||||
s.detail_item = asin.strip()
|
||||
s.drm = SearchResult.DRM_UNKNOWN
|
||||
@ -112,3 +113,5 @@ class AmazonFRKindleStore(AmazonEUBase):
|
||||
search_url = 'http://www.amazon.fr/s/?url=search-alias%3Ddigital-text&field-keywords='
|
||||
|
||||
author_article = 'de '
|
||||
|
||||
and_word = ' et '
|
||||
|
@ -7,6 +7,7 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
from contextlib import closing
|
||||
from lxml import html
|
||||
|
||||
@ -48,7 +49,7 @@ class AmazonEUBase(StorePlugin):
|
||||
asin_xpath = '@name'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
title_xpath = './/h3[@class="newaps"]/a//text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
|
||||
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
@ -56,7 +57,7 @@ class AmazonEUBase(StorePlugin):
|
||||
break
|
||||
|
||||
# Even though we are searching digital-text only Amazon will still
|
||||
# put in results for non Kindle books (author pages). Se we need
|
||||
# put in results for non Kindle books (authors pages). Se we need
|
||||
# to explicitly check if the item is a Kindle book and ignore it
|
||||
# if it isn't.
|
||||
format_ = ''.join(data.xpath(format_xpath))
|
||||
@ -74,12 +75,13 @@ class AmazonEUBase(StorePlugin):
|
||||
cover_url = ''.join(data.xpath(cover_xpath))
|
||||
|
||||
title = ''.join(data.xpath(title_xpath))
|
||||
author = ''.join(data.xpath(author_xpath))
|
||||
try:
|
||||
if self.author_article:
|
||||
author = author.split(self.author_article, 1)[1].split(" (")[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
authors = ''.join(data.xpath(author_xpath))
|
||||
authors = re.sub('^' + self.author_article, '', authors)
|
||||
authors = re.sub(self.and_word, ' & ', authors)
|
||||
mo = re.match(r'(.*)(\(\d.*)$', authors)
|
||||
if mo:
|
||||
authors = mo.group(1).strip()
|
||||
|
||||
price = ''.join(data.xpath(price_xpath))
|
||||
|
||||
@ -88,7 +90,7 @@ class AmazonEUBase(StorePlugin):
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url.strip()
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.author = authors.strip()
|
||||
s.price = price.strip()
|
||||
s.detail_item = asin.strip()
|
||||
s.drm = SearchResult.DRM_UNKNOWN
|
||||
@ -99,7 +101,6 @@ class AmazonEUBase(StorePlugin):
|
||||
def get_details(self, search_result, timeout):
|
||||
pass
|
||||
|
||||
|
||||
class AmazonITKindleStore(AmazonEUBase):
|
||||
'''
|
||||
For comments on the implementation, please see amazon_plugin.py
|
||||
@ -114,3 +115,5 @@ class AmazonITKindleStore(AmazonEUBase):
|
||||
search_url = 'http://www.amazon.it/s/?url=search-alias%3Ddigital-text&field-keywords='
|
||||
|
||||
author_article = 'di '
|
||||
|
||||
and_word = ' e '
|
@ -133,7 +133,7 @@ class AmazonKindleStore(StorePlugin):
|
||||
asin_xpath = '@name'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
title_xpath = './/h3[@class="newaps"]/a//text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
|
||||
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
|
@ -7,6 +7,7 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
from contextlib import closing
|
||||
from lxml import html
|
||||
|
||||
@ -48,7 +49,7 @@ class AmazonEUBase(StorePlugin):
|
||||
asin_xpath = '@name'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
title_xpath = './/h3[@class="newaps"]/a//text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]/text()'
|
||||
author_xpath = './/h3[@class="newaps"]//span[contains(@class, "reg")]//text()'
|
||||
price_xpath = './/ul[contains(@class, "rsltL")]//span[contains(@class, "lrg") and contains(@class, "bld")]/text()'
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
@ -56,7 +57,7 @@ class AmazonEUBase(StorePlugin):
|
||||
break
|
||||
|
||||
# Even though we are searching digital-text only Amazon will still
|
||||
# put in results for non Kindle books (author pages). Se we need
|
||||
# put in results for non Kindle books (authors pages). Se we need
|
||||
# to explicitly check if the item is a Kindle book and ignore it
|
||||
# if it isn't.
|
||||
format_ = ''.join(data.xpath(format_xpath))
|
||||
@ -74,12 +75,13 @@ class AmazonEUBase(StorePlugin):
|
||||
cover_url = ''.join(data.xpath(cover_xpath))
|
||||
|
||||
title = ''.join(data.xpath(title_xpath))
|
||||
author = ''.join(data.xpath(author_xpath))
|
||||
try:
|
||||
if self.author_article:
|
||||
author = author.split(self.author_article, 1)[1].split(" (")[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
authors = ''.join(data.xpath(author_xpath))
|
||||
authors = re.sub('^' + self.author_article, '', authors)
|
||||
authors = re.sub(self.and_word, ' & ', authors)
|
||||
mo = re.match(r'(.*)(\(\d.*)$', authors)
|
||||
if mo:
|
||||
authors = mo.group(1).strip()
|
||||
|
||||
price = ''.join(data.xpath(price_xpath))
|
||||
|
||||
@ -88,7 +90,7 @@ class AmazonEUBase(StorePlugin):
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url.strip()
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.author = authors.strip()
|
||||
s.price = price.strip()
|
||||
s.detail_item = asin.strip()
|
||||
s.drm = SearchResult.DRM_UNKNOWN
|
||||
@ -112,3 +114,5 @@ class AmazonUKKindleStore(AmazonEUBase):
|
||||
|
||||
author_article = 'by '
|
||||
|
||||
and_word = ' and '
|
||||
|
||||
|
@ -41,7 +41,7 @@ class FoylesUKStore(BasicStoreConfig, StorePlugin):
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
url = 'http://ebooks.foyles.co.uk/search_for-' + urllib2.quote(query)
|
||||
url = 'http://ebooks.foyles.co.uk/catalog/search/?query=' + urllib2.quote(query)
|
||||
|
||||
br = browser()
|
||||
|
||||
@ -58,7 +58,7 @@ class FoylesUKStore(BasicStoreConfig, StorePlugin):
|
||||
cover_url = ''.join(data.xpath('.//p[@class="doc-cover"]/a/img/@src'))
|
||||
title = ''.join(data.xpath('.//span[@class="title"]/a/text()'))
|
||||
author = ', '.join(data.xpath('.//span[@class="author"]/span[@class="author"]/text()'))
|
||||
price = ''.join(data.xpath('.//span[@class="price"]/text()'))
|
||||
price = ''.join(data.xpath('.//span[@itemprop="price"]/text()'))
|
||||
format_ = ''.join(data.xpath('.//p[@class="doc-meta-format"]/span[last()]/text()'))
|
||||
format_, ign, drm = format_.partition(' ')
|
||||
drm = SearchResult.DRM_LOCKED if 'DRM' in drm else SearchResult.DRM_UNLOCKED
|
||||
|
@ -790,8 +790,7 @@ class DocumentView(QWebView): # {{{
|
||||
self.manager.load_started()
|
||||
|
||||
load_html(path, self, codec=getattr(path, 'encoding', 'utf-8'), mime_type=getattr(path,
|
||||
'mime_type', 'text/html'), pre_load_callback=callback,
|
||||
force_as_html=True)
|
||||
'mime_type', 'text/html'), pre_load_callback=callback)
|
||||
entries = set()
|
||||
for ie in getattr(path, 'index_entries', []):
|
||||
if ie.start_anchor:
|
||||
|
@ -725,13 +725,15 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.view.shrink_fonts()
|
||||
|
||||
def magnification_changed(self, val):
|
||||
tt = _('%(which)s font size [%(sc)s]\nCurrent magnification: %(mag).1f')
|
||||
tt = '%(action)s [%(sc)s]\n'+_('Current magnification: %(mag).1f')
|
||||
sc = unicode(self.action_font_size_larger.shortcut().toString())
|
||||
self.action_font_size_larger.setToolTip(
|
||||
tt %dict(which=_('Increase'), mag=val, sc=sc))
|
||||
tt %dict(action=unicode(self.action_font_size_larger.text()),
|
||||
mag=val, sc=sc))
|
||||
sc = unicode(self.action_font_size_smaller.shortcut().toString())
|
||||
self.action_font_size_smaller.setToolTip(
|
||||
tt %dict(which=_('Decrease'), mag=val, sc=sc))
|
||||
tt %dict(action=unicode(self.action_font_size_smaller.text()),
|
||||
mag=val, sc=sc))
|
||||
self.action_font_size_larger.setEnabled(self.view.multiplier < 3)
|
||||
self.action_font_size_smaller.setEnabled(self.view.multiplier > 0.2)
|
||||
|
||||
|
@ -955,8 +955,8 @@ class LayoutButton(QToolButton):
|
||||
|
||||
def set_state_to_hide(self, *args):
|
||||
self.setChecked(True)
|
||||
label = _('Hide')
|
||||
self.setText(label + ' ' + self.label+ u' (%s)'%self.shortcut)
|
||||
self.setText(_('Hide %(label)s %(shortcut)s'%dict(
|
||||
label=self.label, shortcut=self.shortcut)))
|
||||
self.setToolTip(self.text())
|
||||
self.setStatusTip(self.text())
|
||||
|
||||
|
@ -357,8 +357,9 @@ def do_add_empty(db, title, authors, isbn, tags, series, series_index, cover):
|
||||
mi.series, mi.series_index = series, series_index
|
||||
if cover:
|
||||
mi.cover = cover
|
||||
db.import_book(mi, [])
|
||||
book_id = db.import_book(mi, [])
|
||||
write_dirtied(db)
|
||||
prints(_('Added book ids: %s')%book_id)
|
||||
send_message()
|
||||
|
||||
def command_add(args, dbpath):
|
||||
|
@ -34,7 +34,7 @@ from calibre import isbytestring
|
||||
from calibre.utils.filenames import (ascii_filename, samefile,
|
||||
WindowsAtomicFolderMove, hardlink_file)
|
||||
from calibre.utils.date import (utcnow, now as nowf, utcfromtimestamp,
|
||||
parse_only_date, UNDEFINED_DATE)
|
||||
parse_only_date, UNDEFINED_DATE, parse_date)
|
||||
from calibre.utils.config import prefs, tweaks, from_json, to_json
|
||||
from calibre.utils.icu import sort_key, strcmp, lower
|
||||
from calibre.utils.search_query_parser import saved_searches, set_saved_searches
|
||||
@ -1134,6 +1134,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
base_path = os.path.join(self.library_path, self.path(id,
|
||||
index_is_id=True))
|
||||
self.dirtied([id])
|
||||
if not os.path.exists(base_path):
|
||||
os.makedirs(base_path)
|
||||
|
||||
path = os.path.join(base_path, 'cover.jpg')
|
||||
|
||||
@ -2270,7 +2272,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
# force_changes has no effect on cover manipulation
|
||||
if mi.cover_data[1] is not None:
|
||||
doit(self.set_cover, id, mi.cover_data[1], commit=False)
|
||||
elif mi.cover is not None:
|
||||
elif isinstance(mi.cover, basestring) and mi.cover:
|
||||
if os.access(mi.cover, os.R_OK):
|
||||
with lopen(mi.cover, 'rb') as f:
|
||||
raw = f.read()
|
||||
@ -2565,6 +2567,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
|
||||
def set_timestamp(self, id, dt, notify=True, commit=True):
|
||||
if dt:
|
||||
if isinstance(dt, (unicode, bytes)):
|
||||
dt = parse_date(dt, as_utc=True, assume_utc=False)
|
||||
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
|
||||
self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
|
||||
self.dirtied([id], commit=False)
|
||||
|
@ -5,7 +5,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import operator, os, json, re
|
||||
import operator, os, json, re, time
|
||||
from binascii import hexlify, unhexlify
|
||||
from collections import OrderedDict
|
||||
|
||||
@ -590,7 +590,7 @@ class BrowseServer(object):
|
||||
entries = get_category_items(category, entries,
|
||||
self.search_restriction_name, datatype,
|
||||
self.opts.url_prefix)
|
||||
return json.dumps(entries, ensure_ascii=False)
|
||||
return json.dumps(entries, ensure_ascii=True)
|
||||
|
||||
|
||||
@Endpoint()
|
||||
@ -772,6 +772,7 @@ class BrowseServer(object):
|
||||
continue
|
||||
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_)
|
||||
args['other_formats'] = ''
|
||||
args['fmt'] = fmt
|
||||
if fmts and fmt:
|
||||
other_fmts = [x for x in fmts if x.lower() != fmt.lower()]
|
||||
if other_fmts:
|
||||
@ -794,8 +795,9 @@ class BrowseServer(object):
|
||||
args['get_button'] = \
|
||||
'<a href="%s" class="read" title="%s">%s</a>' % \
|
||||
(xml(href, True), rt, xml(_('Get')))
|
||||
args['get_url'] = xml(href, True)
|
||||
else:
|
||||
args['get_button'] = ''
|
||||
args['get_button'] = args['get_url'] = ''
|
||||
args['comments'] = comments_to_html(mi.comments)
|
||||
args['stars'] = ''
|
||||
if mi.rating:
|
||||
@ -814,10 +816,10 @@ class BrowseServer(object):
|
||||
summs.append(self.browse_summary_template.format(**args))
|
||||
|
||||
|
||||
raw = json.dumps('\n'.join(summs), ensure_ascii=False)
|
||||
raw = json.dumps('\n'.join(summs), ensure_ascii=True)
|
||||
return raw
|
||||
|
||||
def browse_render_details(self, id_):
|
||||
def browse_render_details(self, id_, add_random_button=False):
|
||||
try:
|
||||
mi = self.db.get_metadata(id_, index_is_id=True)
|
||||
except:
|
||||
@ -825,12 +827,17 @@ class BrowseServer(object):
|
||||
else:
|
||||
args, fmt, fmts, fname = self.browse_get_book_args(mi, id_,
|
||||
add_category_links=True)
|
||||
args['fmt'] = fmt
|
||||
if fmt:
|
||||
args['get_url'] = xml(self.opts.url_prefix + '/get/%s/%s_%d.%s'%(
|
||||
fmt, fname, id_, fmt), True)
|
||||
else:
|
||||
args['get_url'] = ''
|
||||
args['formats'] = ''
|
||||
if fmts:
|
||||
ofmts = [u'<a href="{4}/get/{0}/{1}_{2}.{0}" title="{3}">{3}</a>'\
|
||||
.format(fmt, fname, id_, fmt.upper(),
|
||||
self.opts.url_prefix) for fmt in
|
||||
fmts]
|
||||
.format(xfmt, fname, id_, xfmt.upper(),
|
||||
self.opts.url_prefix) for xfmt in fmts]
|
||||
ofmts = ', '.join(ofmts)
|
||||
args['formats'] = ofmts
|
||||
fields, comments = [], []
|
||||
@ -879,10 +886,18 @@ class BrowseServer(object):
|
||||
u'<div class="comment">%s</div></div>') % (xml(c[0]),
|
||||
c[1]) for c in comments]
|
||||
comments = u'<div class="comments">%s</div>'%('\n\n'.join(comments))
|
||||
random = ''
|
||||
if add_random_button:
|
||||
href = '%s/browse/random?v=%s'%(
|
||||
self.opts.url_prefix, time.time())
|
||||
random = '<a href="%s" id="random_button" title="%s">%s</a>' % (
|
||||
xml(href, True), xml(_('Choose another random book'), True),
|
||||
xml(_('Another random book')))
|
||||
|
||||
return self.browse_details_template.format(id=id_,
|
||||
title=xml(mi.title, True), fields=fields,
|
||||
formats=args['formats'], comments=comments)
|
||||
return self.browse_details_template.format(
|
||||
id=id_, title=xml(mi.title, True), fields=fields,
|
||||
get_url=args['get_url'], fmt=args['fmt'],
|
||||
formats=args['formats'], comments=comments, random=random)
|
||||
|
||||
@Endpoint(mimetype='application/json; charset=utf-8')
|
||||
def browse_details(self, id=None):
|
||||
@ -893,14 +908,14 @@ class BrowseServer(object):
|
||||
|
||||
ans = self.browse_render_details(id_)
|
||||
|
||||
return json.dumps(ans, ensure_ascii=False)
|
||||
return json.dumps(ans, ensure_ascii=True)
|
||||
|
||||
@Endpoint()
|
||||
def browse_random(self, *args, **kwargs):
|
||||
import random
|
||||
book_id = random.choice(self.db.search_getting_ids(
|
||||
'', self.search_restriction))
|
||||
ans = self.browse_render_details(book_id)
|
||||
ans = self.browse_render_details(book_id, add_random_button=True)
|
||||
return self.browse_template('').format(
|
||||
title='', script='book();', main=ans)
|
||||
|
||||
|
@ -20,7 +20,7 @@ from calibre.ebooks.metadata import title_sort, author_to_author_sort
|
||||
from calibre.utils.date import parse_date, isoformat, local_tz, UNDEFINED_DATE
|
||||
from calibre import isbytestring, force_unicode
|
||||
from calibre.constants import iswindows, DEBUG, plugins
|
||||
from calibre.utils.icu import strcmp
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre import prints
|
||||
|
||||
from dateutil.tz import tzoffset
|
||||
@ -189,7 +189,8 @@ def pynocase(one, two, encoding='utf-8'):
|
||||
return cmp(one.lower(), two.lower())
|
||||
|
||||
def icu_collator(s1, s2):
|
||||
return strcmp(force_unicode(s1, 'utf-8'), force_unicode(s2, 'utf-8'))
|
||||
return cmp(sort_key(force_unicode(s1, 'utf-8')),
|
||||
sort_key(force_unicode(s2, 'utf-8')))
|
||||
|
||||
def load_c_extensions(conn, debug=DEBUG):
|
||||
try:
|
||||
|
@ -123,6 +123,274 @@ os.remove(os.path.abspath(__file__))
|
||||
|
||||
# }}}
|
||||
|
||||
class ZshCompleter(object): # {{{
|
||||
|
||||
def __init__(self, opts):
|
||||
self.opts = opts
|
||||
self.dest = None
|
||||
base = os.path.dirname(self.opts.staging_sharedir)
|
||||
self.detect_zsh(base)
|
||||
if not self.dest and base == '/usr/share':
|
||||
# Ubuntu puts site-functions in /usr/local/share
|
||||
self.detect_zsh('/usr/local/share')
|
||||
|
||||
self.commands = {}
|
||||
|
||||
def detect_zsh(self, base):
|
||||
for x in ('vendor-completions', 'vendor-functions', 'site-functions'):
|
||||
c = os.path.join(base, 'zsh', x)
|
||||
if os.path.isdir(c) and os.access(c, os.W_OK):
|
||||
self.dest = os.path.join(c, '_calibre')
|
||||
break
|
||||
|
||||
def get_options(self, parser, cover_opts=('--cover',), opf_opts=('--opf',),
|
||||
file_map={}):
|
||||
if hasattr(parser, 'option_list'):
|
||||
options = parser.option_list
|
||||
for group in parser.option_groups:
|
||||
options += group.option_list
|
||||
else:
|
||||
options = parser
|
||||
for opt in options:
|
||||
lo, so = opt._long_opts, opt._short_opts
|
||||
if opt.takes_value():
|
||||
lo = [x+'=' for x in lo]
|
||||
so = [x+'+' for x in so]
|
||||
ostrings = lo + so
|
||||
if len(ostrings) > 1:
|
||||
ostrings = u'{%s}'%','.join(ostrings)
|
||||
else:
|
||||
ostrings = ostrings[0]
|
||||
exclude = u''
|
||||
if opt.dest is None:
|
||||
exclude = u"'(- *)'"
|
||||
h = opt.help or ''
|
||||
h = h.replace('"', "'").replace('[', '(').replace(
|
||||
']', ')').replace('\n', ' ').replace(':', '\\:')
|
||||
h = h.replace('%default', type(u'')(opt.default))
|
||||
arg = ''
|
||||
if opt.takes_value():
|
||||
arg = ':"%s":'%h
|
||||
if opt.dest in {'extract_to', 'debug_pipeline', 'to_dir', 'outbox', 'with_library', 'library_path'}:
|
||||
arg += "'_path_files -/'"
|
||||
elif opt.choices:
|
||||
arg += "(%s)"%'|'.join(opt.choices)
|
||||
elif set(file_map).intersection(set(opt._long_opts)):
|
||||
k = set(file_map).intersection(set(opt._long_opts))
|
||||
exts = file_map[tuple(k)[0]]
|
||||
if exts:
|
||||
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
|
||||
tuple(exts) + tuple(x.upper() for x in exts)))
|
||||
else:
|
||||
arg += "_files"
|
||||
elif (opt.dest in {'pidfile', 'attachment'}):
|
||||
arg += "_files"
|
||||
elif set(opf_opts).intersection(set(opt._long_opts)):
|
||||
arg += "'_files -g \"*.opf\"'"
|
||||
elif set(cover_opts).intersection(set(opt._long_opts)):
|
||||
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
|
||||
tuple(pics) + tuple(x.upper() for x in pics)))
|
||||
|
||||
help_txt = u'"[%s]"'%h
|
||||
yield u'%s%s%s%s '%(exclude, ostrings, help_txt, arg)
|
||||
|
||||
def opts_and_exts(self, name, op, exts, cover_opts=('--cover',),
|
||||
opf_opts=('--opf',), file_map={}):
|
||||
if not self.dest: return
|
||||
exts = set(exts).union(x.upper() for x in exts)
|
||||
pats = ('*.%s'%x for x in exts)
|
||||
extra = ("'*:filename:_files -g \"%s\"' "%' '.join(pats),)
|
||||
opts = '\\\n '.join(tuple(self.get_options(
|
||||
op(), cover_opts=cover_opts, opf_opts=opf_opts, file_map=file_map)) + extra)
|
||||
txt = '_arguments -s \\\n ' + opts
|
||||
self.commands[name] = txt
|
||||
|
||||
def opts_and_words(self, name, op, words, takes_files=False):
|
||||
if not self.dest: return
|
||||
extra = ("'*:filename:_files' ",) if takes_files else ()
|
||||
opts = '\\\n '.join(tuple(self.get_options(op())) + extra)
|
||||
txt = '_arguments -s \\\n ' + opts
|
||||
self.commands[name] = txt
|
||||
|
||||
def do_ebook_convert(self, f):
|
||||
from calibre.ebooks.conversion.plumber import supported_input_formats
|
||||
from calibre.web.feeds.recipes.collection import get_builtin_recipe_titles
|
||||
from calibre.customize.ui import available_output_formats
|
||||
from calibre.ebooks.conversion.cli import create_option_parser, group_titles
|
||||
from calibre.utils.logging import DevNull
|
||||
input_fmts = set(supported_input_formats())
|
||||
output_fmts = set(available_output_formats())
|
||||
iexts = {x.upper() for x in input_fmts}.union(input_fmts)
|
||||
oexts = {x.upper() for x in output_fmts}.union(output_fmts)
|
||||
w = lambda x: f.write(x if isinstance(x, bytes) else x.encode('utf-8'))
|
||||
# Arg 1
|
||||
w('\n_ebc_input_args() {')
|
||||
w('\n local extras; extras=(')
|
||||
w('\n {-h,--help}":Show Help"')
|
||||
w('\n "--version:Show program version"')
|
||||
w('\n "--list-recipes:List builtin recipe names"')
|
||||
for recipe in sorted(set(get_builtin_recipe_titles())):
|
||||
recipe = recipe.replace(':', '\\:').replace('"', '\\"')
|
||||
w(u'\n "%s.recipe"'%(recipe))
|
||||
w('\n ); _describe -t recipes "ebook-convert builtin recipes" extras')
|
||||
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in iexts)))
|
||||
w('\n}\n')
|
||||
|
||||
# Arg 2
|
||||
w('\n_ebc_output_args() {')
|
||||
w('\n local extras; extras=(')
|
||||
for x in output_fmts:
|
||||
w('\n ".{0}:Convert to a .{0} file with the same name as the input file"'.format(x))
|
||||
w('\n ); _describe -t output "ebook-convert output" extras')
|
||||
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in oexts)))
|
||||
w('\n _path_files -/')
|
||||
w('\n}\n')
|
||||
|
||||
log = DevNull()
|
||||
def get_parser(input_fmt='epub', output_fmt=None):
|
||||
of = ('dummy2.'+output_fmt) if output_fmt else 'dummy'
|
||||
return create_option_parser(('ec', 'dummy1.'+input_fmt, of, '-h'), log)[0]
|
||||
|
||||
# Common options
|
||||
input_group, output_group = group_titles()
|
||||
p = get_parser()
|
||||
opts = p.option_list
|
||||
for group in p.option_groups:
|
||||
if group.title not in {input_group, output_group}:
|
||||
opts += group.option_list
|
||||
opts.append(p.get_option('--pretty-print'))
|
||||
opts.append(p.get_option('--input-encoding'))
|
||||
opts = '\\\n '.join(tuple(
|
||||
self.get_options(opts, file_map={'--search-replace':()})))
|
||||
w('\n_ebc_common_opts() {')
|
||||
w('\n _arguments -s \\\n ' + opts)
|
||||
w('\n}\n')
|
||||
|
||||
# Input/Output format options
|
||||
for fmts, group_title, func in (
|
||||
(input_fmts, input_group, '_ebc_input_opts_%s'),
|
||||
(output_fmts, output_group, '_ebc_output_opts_%s'),
|
||||
):
|
||||
for fmt in fmts:
|
||||
is_input = group_title == input_group
|
||||
if is_input and fmt in {'rar', 'zip', 'oebzip'}: continue
|
||||
p = (get_parser(input_fmt=fmt) if is_input
|
||||
else get_parser(output_fmt=fmt))
|
||||
opts = None
|
||||
for group in p.option_groups:
|
||||
if group.title == group_title:
|
||||
opts = [o for o in group.option_list if
|
||||
'--pretty-print' not in o._long_opts and
|
||||
'--input-encoding' not in o._long_opts]
|
||||
if not opts: continue
|
||||
opts = '\\\n '.join(tuple(self.get_options(opts)))
|
||||
w('\n%s() {'%(func%fmt))
|
||||
w('\n _arguments -s \\\n ' + opts)
|
||||
w('\n}\n')
|
||||
|
||||
w('\n_ebook_convert() {')
|
||||
w('\n local iarg oarg context state_descr state line\n typeset -A opt_args\n local ret=1')
|
||||
w("\n _arguments '1: :_ebc_input_args' '*::ebook-convert output:->args' && ret=0")
|
||||
w("\n case $state in \n (args)")
|
||||
w('\n iarg=${line[1]##*.}; ')
|
||||
w("\n _arguments '1: :_ebc_output_args' '*::ebook-convert options:->args' && ret=0")
|
||||
w("\n case $state in \n (args)")
|
||||
|
||||
w('\n oarg=${line[1]##*.}')
|
||||
w('\n iarg="_ebc_input_opts_${(L)iarg}"; oarg="_ebc_output_opts_${(L)oarg}"')
|
||||
w('\n _call_function - $iarg; _call_function - $oarg; _ebc_common_opts; ret=0')
|
||||
w('\n ;;\n esac')
|
||||
|
||||
w("\n ;;\n esac\n return ret")
|
||||
w('\n}\n')
|
||||
|
||||
def do_calibredb(self, f):
|
||||
import calibre.library.cli as cli
|
||||
from calibre.customize.ui import available_catalog_formats
|
||||
parsers, descs = {}, {}
|
||||
for command in cli.COMMANDS:
|
||||
op = getattr(cli, '%s_option_parser'%command)
|
||||
args = [['t.epub']] if command == 'catalog' else []
|
||||
p = op(*args)
|
||||
if isinstance(p, tuple):
|
||||
p = p[0]
|
||||
parsers[command] = p
|
||||
lines = [x.strip().partition('.')[0] for x in p.usage.splitlines() if x.strip() and
|
||||
not x.strip().startswith('%prog')]
|
||||
descs[command] = lines[0]
|
||||
|
||||
f.write('\n_calibredb_cmds() {\n local commands; commands=(\n')
|
||||
f.write(' {-h,--help}":Show help"\n')
|
||||
f.write(' "--version:Show version"\n')
|
||||
for command, desc in descs.iteritems():
|
||||
f.write(' "%s:%s"\n'%(
|
||||
command, desc.replace(':', '\\:').replace('"', '\'')))
|
||||
f.write(' )\n _describe -t commands "calibredb command" commands \n}\n')
|
||||
|
||||
subcommands = []
|
||||
for command, parser in parsers.iteritems():
|
||||
exts = []
|
||||
if command == 'catalog':
|
||||
exts = [x.lower() for x in available_catalog_formats()]
|
||||
elif command == 'set_metadata':
|
||||
exts = ['opf']
|
||||
exts = set(exts).union(x.upper() for x in exts)
|
||||
pats = ('*.%s'%x for x in exts)
|
||||
extra = ("'*:filename:_files -g \"%s\"' "%' '.join(pats),) if exts else ()
|
||||
if command in {'add', 'add_format'}:
|
||||
extra = ("'*:filename:_files' ",)
|
||||
opts = '\\\n '.join(tuple(self.get_options(
|
||||
parser)) + extra)
|
||||
txt = ' _arguments -s \\\n ' + opts
|
||||
subcommands.append('(%s)'%command)
|
||||
subcommands.append(txt)
|
||||
subcommands.append(';;')
|
||||
|
||||
f.write('\n_calibredb() {')
|
||||
f.write(
|
||||
r'''
|
||||
local state line state_descr context
|
||||
typeset -A opt_args
|
||||
local ret=1
|
||||
|
||||
_arguments \
|
||||
'1: :_calibredb_cmds' \
|
||||
'*::calibredb subcommand options:->args' \
|
||||
&& ret=0
|
||||
|
||||
case $state in
|
||||
(args)
|
||||
case $line[1] in
|
||||
(-h|--help|--version)
|
||||
_message 'no more arguments' && ret=0
|
||||
;;
|
||||
%s
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
|
||||
return ret
|
||||
'''%'\n '.join(subcommands))
|
||||
f.write('\n}\n\n')
|
||||
|
||||
def write(self):
|
||||
if self.dest:
|
||||
self.commands['calibredb'] = ' _calibredb "$@"'
|
||||
self.commands['ebook-convert'] = ' _ebook_convert "$@"'
|
||||
with open(self.dest, 'wb') as f:
|
||||
f.write('#compdef ' + ' '.join(self.commands)+'\n')
|
||||
self.do_ebook_convert(f)
|
||||
self.do_calibredb(f)
|
||||
f.write('case $service in\n')
|
||||
for c, txt in self.commands.iteritems():
|
||||
if isinstance(txt, type(u'')):
|
||||
txt = txt.encode('utf-8')
|
||||
if isinstance(c, type(u'')):
|
||||
c = c.encode('utf-8')
|
||||
f.write(b'%s)\n%s\n;;\n'%(c, txt))
|
||||
f.write('esac\n')
|
||||
# }}}
|
||||
|
||||
class PostInstall:
|
||||
|
||||
def task_failed(self, msg):
|
||||
@ -217,7 +485,7 @@ class PostInstall:
|
||||
|
||||
def setup_completion(self): # {{{
|
||||
try:
|
||||
self.info('Setting up bash completion...')
|
||||
self.info('Setting up command-line completion...')
|
||||
from calibre.ebooks.metadata.cli import option_parser as metaop, filetypes as meta_filetypes
|
||||
from calibre.ebooks.lrf.lrfparser import option_parser as lrf2lrsop
|
||||
from calibre.gui2.lrf_renderer.main import option_parser as lrfviewerop
|
||||
@ -227,8 +495,11 @@ class PostInstall:
|
||||
from calibre.utils.smtp import option_parser as smtp_op
|
||||
from calibre.library.server.main import option_parser as serv_op
|
||||
from calibre.ebooks.oeb.polish.main import option_parser as polish_op, SUPPORTED
|
||||
from calibre.debug import option_parser as debug_op
|
||||
from calibre.ebooks import BOOK_EXTENSIONS
|
||||
from calibre.customize.ui import available_input_formats
|
||||
input_formats = sorted(all_input_formats())
|
||||
zsh = ZshCompleter(self.opts)
|
||||
bc = os.path.join(os.path.dirname(self.opts.staging_sharedir),
|
||||
'bash-completion')
|
||||
if os.path.exists(bc):
|
||||
@ -240,6 +511,9 @@ class PostInstall:
|
||||
f = os.path.join(self.opts.staging_etc, 'bash_completion.d/calibre')
|
||||
if not os.path.exists(os.path.dirname(f)):
|
||||
os.makedirs(os.path.dirname(f))
|
||||
if zsh.dest:
|
||||
self.info('Installing zsh completion to:', zsh.dest)
|
||||
self.manifest.append(zsh.dest)
|
||||
self.manifest.append(f)
|
||||
complete = 'calibre-complete'
|
||||
if getattr(sys, 'frozen_path', None):
|
||||
@ -247,20 +521,35 @@ class PostInstall:
|
||||
|
||||
self.info('Installing bash completion to', f)
|
||||
with open(f, 'wb') as f:
|
||||
def o_and_e(*args, **kwargs):
|
||||
f.write(opts_and_exts(*args, **kwargs))
|
||||
zsh.opts_and_exts(*args, **kwargs)
|
||||
def o_and_w(*args, **kwargs):
|
||||
f.write(opts_and_words(*args, **kwargs))
|
||||
zsh.opts_and_words(*args, **kwargs)
|
||||
|
||||
f.write('# calibre Bash Shell Completion\n')
|
||||
f.write(opts_and_exts('calibre', guiop, BOOK_EXTENSIONS))
|
||||
f.write(opts_and_exts('lrf2lrs', lrf2lrsop, ['lrf']))
|
||||
f.write(opts_and_exts('ebook-meta', metaop,
|
||||
list(meta_filetypes()), cover_opts=['--cover', '-c'],
|
||||
opf_opts=['--to-opf', '--from-opf']))
|
||||
f.write(opts_and_exts('ebook-polish', polish_op,
|
||||
[x.lower() for x in SUPPORTED], cover_opts=['--cover', '-c'],
|
||||
opf_opts=['--opf', '-o']))
|
||||
f.write(opts_and_exts('lrfviewer', lrfviewerop, ['lrf']))
|
||||
f.write(opts_and_exts('ebook-viewer', viewer_op, input_formats))
|
||||
f.write(opts_and_words('fetch-ebook-metadata', fem_op, []))
|
||||
f.write(opts_and_words('calibre-smtp', smtp_op, []))
|
||||
f.write(opts_and_words('calibre-server', serv_op, []))
|
||||
o_and_e('calibre', guiop, BOOK_EXTENSIONS)
|
||||
o_and_e('lrf2lrs', lrf2lrsop, ['lrf'], file_map={'--output':['lrs']})
|
||||
o_and_e('ebook-meta', metaop,
|
||||
list(meta_filetypes()), cover_opts=['--cover', '-c'],
|
||||
opf_opts=['--to-opf', '--from-opf'])
|
||||
o_and_e('ebook-polish', polish_op,
|
||||
[x.lower() for x in SUPPORTED], cover_opts=['--cover', '-c'],
|
||||
opf_opts=['--opf', '-o'])
|
||||
o_and_e('lrfviewer', lrfviewerop, ['lrf'])
|
||||
o_and_e('ebook-viewer', viewer_op, input_formats)
|
||||
o_and_w('fetch-ebook-metadata', fem_op, [])
|
||||
o_and_w('calibre-smtp', smtp_op, [])
|
||||
o_and_w('calibre-server', serv_op, [])
|
||||
o_and_e('calibre-debug', debug_op, ['py', 'recipe'], file_map={
|
||||
'--tweak-book':['epub', 'azw3', 'mobi'],
|
||||
'--subset-font':['ttf', 'otf'],
|
||||
'--exec-file':['py', 'recipe'],
|
||||
'--add-simple-plugin':['py'],
|
||||
'--inspect-mobi':['mobi', 'azw', 'azw3'],
|
||||
'--viewer':list(available_input_formats()),
|
||||
})
|
||||
f.write(textwrap.dedent('''
|
||||
_ebook_device_ls()
|
||||
{
|
||||
@ -335,6 +624,7 @@ class PostInstall:
|
||||
|
||||
complete -o nospace -C %s ebook-convert
|
||||
''')%complete)
|
||||
zsh.write()
|
||||
except TypeError as err:
|
||||
if 'resolve_entities' in str(err):
|
||||
print 'You need python-lxml >= 2.0.5 for calibre'
|
||||
@ -451,7 +741,7 @@ def options(option_parser):
|
||||
opts.extend(opt._long_opts)
|
||||
return opts
|
||||
|
||||
def opts_and_words(name, op, words):
|
||||
def opts_and_words(name, op, words, takes_files=False):
|
||||
opts = '|'.join(options(op))
|
||||
words = '|'.join([w.replace("'", "\\'") for w in words])
|
||||
fname = name.replace('-', '_')
|
||||
@ -481,12 +771,15 @@ def opts_and_words(name, op, words):
|
||||
}
|
||||
complete -F _'''%(opts, words) + fname + ' ' + name +"\n\n").encode('utf-8')
|
||||
|
||||
pics = {'jpg', 'jpeg', 'gif', 'png', 'bmp'}
|
||||
|
||||
def opts_and_exts(name, op, exts, cover_opts=('--cover',), opf_opts=()):
|
||||
def opts_and_exts(name, op, exts, cover_opts=('--cover',), opf_opts=(),
|
||||
file_map={}):
|
||||
opts = ' '.join(options(op))
|
||||
exts.extend([i.upper() for i in exts])
|
||||
exts='|'.join(exts)
|
||||
fname = name.replace('-', '_')
|
||||
spics = '|'.join(tuple(pics) + tuple(x.upper() for x in pics))
|
||||
special_exts_template = '''\
|
||||
%s )
|
||||
_filedir %s
|
||||
@ -507,7 +800,7 @@ def opts_and_exts(name, op, exts, cover_opts=('--cover',), opf_opts=()):
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
opts="%(opts)s"
|
||||
pics="@(jpg|jpeg|png|gif|bmp|JPG|JPEG|PNG|GIF|BMP)"
|
||||
pics="@(%(pics)s)"
|
||||
|
||||
case "${prev}" in
|
||||
%(extras)s
|
||||
@ -526,7 +819,7 @@ def opts_and_exts(name, op, exts, cover_opts=('--cover',), opf_opts=()):
|
||||
esac
|
||||
|
||||
}
|
||||
complete -o filenames -F _'''%dict(
|
||||
complete -o filenames -F _'''%dict(pics=spics,
|
||||
opts=opts, extras=extras, exts=exts) + fname + ' ' + name +"\n\n"
|
||||
|
||||
|
||||
@ -627,6 +920,5 @@ def main():
|
||||
PostInstall(opts)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user