mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
0.9.29
This commit is contained in:
commit
b4110b5fad
@ -20,6 +20,54 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.9.29
|
||||
date: 2013-05-03
|
||||
|
||||
new features:
|
||||
- title: "Bulk metadata download: Allow reviewing of the downloaded metadata before it is applied"
|
||||
|
||||
- title: "FB2 Output: Write ISBN, pubdate, tags and publisher metadata when creating fb2 files"
|
||||
tickets: [1174047]
|
||||
|
||||
bug fixes:
|
||||
- title: "When reading metadata from EPUB 3 files, use the first <dc:title> element rather than the last."
|
||||
tickets: [1175184]
|
||||
|
||||
- title: "Fix regression causing the search query parser to not parse search string containing newlines/tabs instead of spaces correctly"
|
||||
tickets: [1174629]
|
||||
|
||||
- title: "Kobo driver: Fix covers written to wrong place on OS X/linux when books sent to SD card. Fix covers not sent to SD card is images directory missing."
|
||||
tickets: [1174147,1174126]
|
||||
|
||||
- title: "Fix 'Preferences->Behavior->Virtual library to use when this library is opened' being applied only on calibre startup and not when switching to the library"
|
||||
|
||||
- title: "PDF metadata: When rendering the first page as the cover, respect the PDF CropBox."
|
||||
tickets: [1173795]
|
||||
|
||||
- title: "PDF Output: Fix link generation broken on windows when converting epubs if the filenames contained uppercase letters."
|
||||
tickets: [1169795]
|
||||
|
||||
- title: "Tolino driver: Fix card and main memory swapped on windows"
|
||||
tickets: [1173544]
|
||||
|
||||
- title: "FB2 Output: Fix images being ignored when converting a EPUB with image filenames that contain URL unsafe characters."
|
||||
tickets: [1173351]
|
||||
|
||||
- title: "EPUB Input: Fix page margins specified in Adobe page template files with incorrect mime-types not being removed."
|
||||
|
||||
improved recipes:
|
||||
- The New Republic
|
||||
- io9
|
||||
- What if
|
||||
- Orlando Sentinel
|
||||
- Read It Later recipe
|
||||
- Smithsonian
|
||||
- Business Week Magazine
|
||||
|
||||
new recipes:
|
||||
- title: Diario Extra
|
||||
author: Douglas Delgado
|
||||
|
||||
- version: 0.9.28
|
||||
date: 2013-04-26
|
||||
|
||||
|
@ -67,8 +67,12 @@ and you will most likely get help from one of |app|'s many developers.
|
||||
Getting the code
|
||||
------------------
|
||||
|
||||
|app| uses `Bazaar <http://bazaar-vcs.org/>`_, a distributed version control system. Bazaar is available on all the platforms |app| supports.
|
||||
After installing Bazaar, you can get the |app| source code with the command::
|
||||
You can get the |app| source code in two ways, using a version control system or
|
||||
directly downloading a `tarball <http://status.calibre-ebook.com/dist/src>`_.
|
||||
|
||||
|app| uses `Bazaar <http://bazaar-vcs.org/>`_, a distributed version control
|
||||
system. Bazaar is available on all the platforms |app| supports. After
|
||||
installing Bazaar, you can get the |app| source code with the command::
|
||||
|
||||
bzr branch lp:calibre
|
||||
|
||||
@ -124,6 +128,8 @@ discuss them in the forum or contact Kovid directly (his email address is all ov
|
||||
Windows development environment
|
||||
---------------------------------
|
||||
|
||||
.. note:: You must also get the |app| source code separately as described above.
|
||||
|
||||
Install |app| normally, using the Windows installer. Then open a Command Prompt and change to
|
||||
the previously checked out |app| code directory. For example::
|
||||
|
||||
@ -153,6 +159,8 @@ near the top of the file. Now run the command :command:`calibredb`. The very fir
|
||||
OS X development environment
|
||||
------------------------------
|
||||
|
||||
.. note:: You must also get the |app| source code separately as described above.
|
||||
|
||||
Install |app| normally using the provided .dmg. Then open a Terminal and change to
|
||||
the previously checked out |app| code directory, for example::
|
||||
|
||||
@ -183,6 +191,8 @@ window, indicating that you are running from source.
|
||||
Linux development environment
|
||||
------------------------------
|
||||
|
||||
.. note:: You must also get the |app| source code separately as described above.
|
||||
|
||||
|app| is primarily developed on Linux. You have two choices in setting up the development environment. You can install the
|
||||
|app| binary as normal and use that as a runtime environment to do your development. This approach is similar to that
|
||||
used in Windows and OS X. Alternatively, you can install |app| from source. Instructions for setting up a development
|
||||
|
@ -13,14 +13,14 @@ class BaltimoreSun(BasicNewsRecipe):
|
||||
__author__ = 'Josh Hall'
|
||||
description = 'Complete local news and blogs from Baltimore'
|
||||
language = 'en'
|
||||
version = 2.4
|
||||
oldest_article = 1.5
|
||||
version = 2.5
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 100
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
remove_empty_feeds= True
|
||||
recursions = 3
|
||||
recursions = 1
|
||||
|
||||
ignore_duplicate_articles = {'title'}
|
||||
keep_only_tags = [dict(name='div', attrs={'class':["story","entry-asset asset hentry"]}),
|
||||
|
@ -12,7 +12,7 @@ class BusinessWeekMagazine(BasicNewsRecipe):
|
||||
category = 'news'
|
||||
encoding = 'UTF-8'
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'id':'article_body_container'}),
|
||||
dict(name='div', attrs={'id':['article_body_container','story_body']}),
|
||||
]
|
||||
remove_tags = [dict(name='ui'),dict(name='li'),dict(name='div', attrs={'id':['share-email']})]
|
||||
no_javascript = True
|
||||
@ -26,43 +26,45 @@ class BusinessWeekMagazine(BasicNewsRecipe):
|
||||
|
||||
#Find date
|
||||
mag=soup.find('h2',text='Magazine')
|
||||
self.log(mag)
|
||||
dates=self.tag_to_string(mag.findNext('h3'))
|
||||
self.timefmt = u' [%s]'%dates
|
||||
|
||||
#Go to the main body
|
||||
div0 = soup.find ('div', attrs={'class':'column left'})
|
||||
div0 = soup.find('div', attrs={'class':'column left'})
|
||||
section_title = ''
|
||||
feeds = OrderedDict()
|
||||
for div in div0.findAll(['h4','h5']):
|
||||
for div in div0.findAll('a', attrs={'class': None}):
|
||||
articles = []
|
||||
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
|
||||
title=self.tag_to_string(div.a).strip()
|
||||
url=div.a['href']
|
||||
title=self.tag_to_string(div).strip()
|
||||
url=div['href']
|
||||
soup0 = self.index_to_soup(url)
|
||||
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})['href']
|
||||
articles.append({'title':title, 'url':urlprint, 'description':'', 'date':''})
|
||||
|
||||
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})
|
||||
if urlprint is not None:
|
||||
url=urlprint['href']
|
||||
articles.append({'title':title, 'url':url, 'description':'', 'date':''})
|
||||
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
div1 = soup.find ('div', attrs={'class':'column center'})
|
||||
div1 = soup.find('div', attrs={'class':'column center'})
|
||||
section_title = ''
|
||||
for div in div1.findAll(['h4','h5']):
|
||||
for div in div1.findAll('a'):
|
||||
articles = []
|
||||
desc=self.tag_to_string(div.findNext('p')).strip()
|
||||
section_title = self.tag_to_string(div.findPrevious('h3')).strip()
|
||||
title=self.tag_to_string(div.a).strip()
|
||||
url=div.a['href']
|
||||
title=self.tag_to_string(div).strip()
|
||||
url=div['href']
|
||||
soup0 = self.index_to_soup(url)
|
||||
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})['href']
|
||||
articles.append({'title':title, 'url':urlprint, 'description':desc, 'date':''})
|
||||
|
||||
urlprint=soup0.find('a', attrs={'href':re.compile('.*printer.*')})
|
||||
if urlprint is not None:
|
||||
url=urlprint['href']
|
||||
articles.append({'title':title, 'url':url, 'description':desc, 'date':''})
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
|
||||
ans = [(key, val) for key, val in feeds.iteritems()]
|
||||
return ans
|
||||
|
47
recipes/diario_extra.recipe
Normal file
47
recipes/diario_extra.recipe
Normal file
@ -0,0 +1,47 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class goonews(BasicNewsRecipe):
|
||||
__author__ = 'Douglas Delgado'
|
||||
title = u'Diario Extra'
|
||||
publisher = 'Sociedad Periodistica Extra Limitada'
|
||||
description = 'Diario de circulacion nacional de Costa Rica.'
|
||||
category = 'Spanish, Entertainment'
|
||||
masthead_url = 'http://www.diarioextra.com/img/apariencia/logo.png'
|
||||
|
||||
oldest_article = 7
|
||||
delay = 1
|
||||
max_articles_per_feed = 100
|
||||
auto_cleanup = True
|
||||
encoding = 'utf-8'
|
||||
language = 'es_CR'
|
||||
use_embedded_content = False
|
||||
remove_empty_feeds = True
|
||||
remove_javascript = True
|
||||
no_stylesheets = True
|
||||
|
||||
feeds = [(u'Nacionales',
|
||||
u'http://www.diarioextra.com/includes/rss_text.php?id=1'),
|
||||
(u'Internacionales',
|
||||
u'http://www.diarioextra.com/includes/rss_text.php?id=2'),
|
||||
(u'Sucesos',
|
||||
u'http://www.diarioextra.com/includes/rss_text.php?id=3'),
|
||||
(u'Deportes',
|
||||
u'http://www.diarioextra.com/includes/rss_text.php?id=6'),
|
||||
(u'Espectaculos',
|
||||
u'http://www.diarioextra.com/includes/rss_text.php?id=7'),
|
||||
(u'Opinion',
|
||||
u'http://www.diarioextra.com/includes/rss_text.php?id=4')]
|
||||
|
||||
def get_cover_url(self):
|
||||
index = 'http://kiosko.net/cr/np/cr_extra.html'
|
||||
soup = self.index_to_soup(index)
|
||||
for image in soup.findAll('img', src=True):
|
||||
if image['src'].endswith('cr_extra.750.jpg'):
|
||||
return image['src']
|
||||
return None
|
||||
|
||||
extra_css = '''
|
||||
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:30px;}
|
||||
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal; font-style:italic; font-size:18px;}
|
||||
'''
|
@ -16,14 +16,15 @@ class i09(BasicNewsRecipe):
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
encoding = 'utf-8'
|
||||
use_embedded_content = True
|
||||
use_embedded_content = False
|
||||
auto_cleanup = True
|
||||
language = 'en'
|
||||
masthead_url = 'http://cache.gawkerassets.com/assets/io9.com/img/logo.png'
|
||||
extra_css = '''
|
||||
body{font-family: "Lucida Grande",Helvetica,Arial,sans-serif}
|
||||
img{margin-bottom: 1em}
|
||||
h1{font-family :Arial,Helvetica,sans-serif; font-size:large}
|
||||
'''
|
||||
body{font-family: "Lucida Grande",Helvetica,Arial,sans-serif}
|
||||
img{margin-bottom: 1em}
|
||||
h1{font-family :Arial,Helvetica,sans-serif; font-size:large}
|
||||
'''
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
@ -33,10 +34,6 @@ class i09(BasicNewsRecipe):
|
||||
|
||||
feeds = [(u'Articles', u'http://feeds.gawker.com/io9/vip?format=xml')]
|
||||
|
||||
remove_tags = [
|
||||
{'class': 'feedflare'},
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
return self.adeify_images(soup)
|
||||
|
||||
|
@ -7,27 +7,26 @@ class AdvancedUserRecipe1279258912(BasicNewsRecipe):
|
||||
max_articles_per_feed = 100
|
||||
|
||||
feeds = [
|
||||
(u'News', u'http://feeds.feedburner.com/orlandosentinel/news'),
|
||||
(u'Opinion', u'http://feeds.feedburner.com/orlandosentinel/news/opinion'),
|
||||
(u'Business', u'http://feeds.feedburner.com/orlandosentinel/business'),
|
||||
(u'Technology', u'http://feeds.feedburner.com/orlandosentinel/technology'),
|
||||
(u'Space and Science', u'http://feeds.feedburner.com/orlandosentinel/news/space'),
|
||||
(u'Entertainment', u'http://feeds.feedburner.com/orlandosentinel/entertainment'),
|
||||
(u'Life and Family', u'http://feeds.feedburner.com/orlandosentinel/features/lifestyle'),
|
||||
]
|
||||
(u'News', u'http://feeds.feedburner.com/orlandosentinel/news'),
|
||||
(u'Opinion', u'http://feeds.feedburner.com/orlandosentinel/news/opinion'),
|
||||
(u'Business', u'http://feeds.feedburner.com/orlandosentinel/business'),
|
||||
(u'Technology', u'http://feeds.feedburner.com/orlandosentinel/technology'),
|
||||
(u'Space and Science', u'http://feeds.feedburner.com/orlandosentinel/news/space'),
|
||||
(u'Entertainment', u'http://feeds.feedburner.com/orlandosentinel/entertainment'),
|
||||
(u'Life and Family', u'http://feeds.feedburner.com/orlandosentinel/features/lifestyle'),
|
||||
]
|
||||
__author__ = 'rty'
|
||||
pubisher = 'OrlandoSentinel.com'
|
||||
description = 'Orlando, Florida, Newspaper'
|
||||
category = 'News, Orlando, Florida'
|
||||
|
||||
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
language = 'en'
|
||||
encoding = 'utf-8'
|
||||
conversion_options = {'linearize_tables':True}
|
||||
masthead_url = 'http://www.orlandosentinel.com/media/graphic/2009-07/46844851.gif'
|
||||
remove_empty_feeds = True
|
||||
|
||||
auto_cleanup = True
|
||||
|
||||
@ -45,7 +44,7 @@ class AdvancedUserRecipe1279258912(BasicNewsRecipe):
|
||||
link=link.split('/')[-2]
|
||||
encoding = {'0B': '.', '0C': '/', '0A': '0', '0F': '=', '0G': '&',
|
||||
'0D': '?', '0E': '-', '0N': '.com', '0L': 'http:',
|
||||
'0S':'//'}
|
||||
'0S':'//', '0H':','}
|
||||
for k, v in encoding.iteritems():
|
||||
link = link.replace(k, v)
|
||||
ans = link
|
||||
|
@ -1,6 +1,15 @@
|
||||
"""
|
||||
Pocket Calibre Recipe v1.2
|
||||
Pocket Calibre Recipe v1.3
|
||||
"""
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import urllib2
|
||||
import urllib
|
||||
import json
|
||||
import operator
|
||||
import tempfile
|
||||
import re
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '''
|
||||
2010, Darko Miletic <darko.miletic at gmail.com>
|
||||
@ -8,9 +17,6 @@ __copyright__ = '''
|
||||
2012, tBunnyMan <Wag That Tail At Me dot com>
|
||||
'''
|
||||
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
|
||||
class Pocket(BasicNewsRecipe):
|
||||
title = 'Pocket'
|
||||
@ -21,109 +27,150 @@ class Pocket(BasicNewsRecipe):
|
||||
read after downloading.'''
|
||||
publisher = 'getpocket.com'
|
||||
category = 'news, custom'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 50
|
||||
minimum_articles = 10
|
||||
mark_as_read_after_dl = True
|
||||
#Set this to False for testing
|
||||
mark_as_read_after_dl = False
|
||||
#MUST be either 'oldest' or 'newest'
|
||||
sort_method = 'oldest'
|
||||
#To filter by tag this needs to be a single tag in quotes; IE 'calibre'
|
||||
only_pull_tag = None
|
||||
|
||||
#You don't want to change anything under here unless you REALLY know what you are doing
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
needs_subscription = True
|
||||
INDEX = u'http://getpocket.com'
|
||||
LOGIN = INDEX + u'/l'
|
||||
readList = []
|
||||
articles_are_obfuscated = True
|
||||
apikey = '19eg0e47pbT32z4793Tf021k99Afl889'
|
||||
index_url = u'http://getpocket.com'
|
||||
ajax_url = u'http://getpocket.com/a/x/getArticle.php'
|
||||
read_api_url = index_url + u'/v3/get'
|
||||
modify_api_url = index_url + u'/v3/send'
|
||||
legacy_login_url = index_url + u'/l' # We use this to cheat oAuth
|
||||
articles = []
|
||||
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser(self)
|
||||
if self.username is not None:
|
||||
br.open(self.LOGIN)
|
||||
def get_browser(self, *args, **kwargs):
|
||||
"""
|
||||
We need to pretend to be a recent version of safari for the mac to prevent User-Agent checks
|
||||
Pocket api requires username and password so fail loudly if it's missing from the config.
|
||||
"""
|
||||
br = BasicNewsRecipe.get_browser(self,
|
||||
user_agent='Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-us) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4')
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open(self.legacy_login_url)
|
||||
br.select_form(nr=0)
|
||||
br['feed_id'] = self.username
|
||||
if self.password is not None:
|
||||
br['password'] = self.password
|
||||
br['password'] = self.password
|
||||
br.submit()
|
||||
else:
|
||||
self.user_error("This Recipe requires authentication, please configured user & pass")
|
||||
return br
|
||||
|
||||
def get_feeds(self):
|
||||
self.report_progress(0, ('Fetching list of pages...'))
|
||||
lfeeds = []
|
||||
i = 1
|
||||
feedurl = self.INDEX + u'/unread/1'
|
||||
while True:
|
||||
title = u'Unread articles, page ' + str(i)
|
||||
lfeeds.insert(0, (title, feedurl))
|
||||
self.report_progress(0, ('Got ') + str(i) + (' pages'))
|
||||
i += 1
|
||||
soup = self.index_to_soup(feedurl)
|
||||
ritem = soup.find('a', attrs={'id':'next', 'class':'active'})
|
||||
if ritem is None:
|
||||
break
|
||||
feedurl = self.INDEX + ritem['href']
|
||||
return lfeeds
|
||||
def get_auth_uri(self):
|
||||
"""Quick function to return the authentication part of the url"""
|
||||
uri = ""
|
||||
uri = u'{0}&apikey={1!s}'.format(uri, self.apikey)
|
||||
if self.username is None or self.password is None:
|
||||
self.user_error("Username or password is blank. Pocket no longer supports blank passwords")
|
||||
else:
|
||||
uri = u'{0}&username={1!s}'.format(uri, self.username)
|
||||
uri = u'{0}&password={1!s}'.format(uri, self.password)
|
||||
return uri
|
||||
|
||||
def get_pull_articles_uri(self):
|
||||
"""Return the part of the uri that has all of the get request settings"""
|
||||
uri = ""
|
||||
uri = u'{0}&state={1}'.format(uri, u'unread') # TODO This could be modded to allow pulling archives
|
||||
uri = u'{0}&contentType={1}'.format(uri, u'article') # TODO This COULD return images too
|
||||
uri = u'{0}&sort={1}'.format(uri, self.sort_method)
|
||||
uri = u'{0}&count={1!s}'.format(uri, self.max_articles_per_feed)
|
||||
if self.only_pull_tag is not None:
|
||||
uri = u'{0}tag={1}'.format(uri, self.only_pull_tag)
|
||||
return uri
|
||||
|
||||
def parse_index(self):
|
||||
totalfeeds = []
|
||||
articlesToGrab = self.max_articles_per_feed
|
||||
lfeeds = self.get_feeds()
|
||||
for feedobj in lfeeds:
|
||||
if articlesToGrab < 1:
|
||||
break
|
||||
feedtitle, feedurl = feedobj
|
||||
self.report_progress(0, ('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
|
||||
articles = []
|
||||
soup = self.index_to_soup(feedurl)
|
||||
ritem = soup.find('ul', attrs={'id':'list'})
|
||||
if ritem is None:
|
||||
self.log.exception("Page %s skipped: invalid HTML" % (feedtitle if feedtitle else feedurl))
|
||||
continue
|
||||
for item in reversed(ritem.findAll('li')):
|
||||
if articlesToGrab < 1:
|
||||
break
|
||||
else:
|
||||
articlesToGrab -= 1
|
||||
description = ''
|
||||
atag = item.find('a', attrs={'class':'text'})
|
||||
if atag and atag.has_key('href'):
|
||||
url = self.INDEX + atag['href']
|
||||
title = self.tag_to_string(item.div)
|
||||
date = strftime(self.timefmt)
|
||||
articles.append({
|
||||
'title' :title
|
||||
,'date' :date
|
||||
,'url' :url
|
||||
,'description':description
|
||||
})
|
||||
readLink = item.find('a', attrs={'class':'check'})['href']
|
||||
self.readList.append(readLink)
|
||||
totalfeeds.append((feedtitle, articles))
|
||||
if len(self.readList) < self.minimum_articles:
|
||||
pocket_feed = []
|
||||
fetch_url = u"{0}?{1}{2}".format(
|
||||
self.read_api_url,
|
||||
self.get_auth_uri(),
|
||||
self.get_pull_articles_uri()
|
||||
)
|
||||
try:
|
||||
request = urllib2.Request(fetch_url)
|
||||
response = urllib2.urlopen(request)
|
||||
pocket_feed = json.load(response)['list']
|
||||
except urllib2.HTTPError as e:
|
||||
self.log.exception("Pocket returned an error: {0}\nurl: {1}".format(e, fetch_url))
|
||||
return []
|
||||
except urllib2.URLError as e:
|
||||
self.log.exception("Unable to connect to getpocket.com's api: {0}\nurl: {1}".format(e, fetch_url))
|
||||
return []
|
||||
if len(pocket_feed) < self.minimum_articles:
|
||||
self.mark_as_read_after_dl = False
|
||||
if hasattr(self, 'abort_recipe_processing'):
|
||||
self.abort_recipe_processing("Only %d articles retrieved, minimum_articles not reached" % len(self.readList))
|
||||
else:
|
||||
self.log.exception("Only %d articles retrieved, minimum_articles not reached" % len(self.readList))
|
||||
return []
|
||||
return totalfeeds
|
||||
self.user_error("Only {0} articles retrieved, minimum_articles not reached".format(len(pocket_feed)))
|
||||
|
||||
def mark_as_read(self, markList):
|
||||
br = self.get_browser()
|
||||
for link in markList:
|
||||
url = self.INDEX + link
|
||||
print 'Marking read: ', url
|
||||
response = br.open(url)
|
||||
print response.info()
|
||||
for pocket_article in pocket_feed.iteritems():
|
||||
self.articles.append({
|
||||
'item_id': pocket_article[0],
|
||||
'title': pocket_article[1]['resolved_title'],
|
||||
'date': pocket_article[1]['time_updated'],
|
||||
'url': u'{0}/a/read/{1}'.format(self.index_url, pocket_article[0]),
|
||||
'real_url': pocket_article[1]['resolved_url'],
|
||||
'description': pocket_article[1]['excerpt'],
|
||||
'sort': pocket_article[1]['sort_id']
|
||||
})
|
||||
self.articles = sorted(self.articles, key=operator.itemgetter('sort'))
|
||||
print self.articles
|
||||
return [("My Pocket Articles for {0}".format(strftime('[%I:%M %p]')), self.articles)]
|
||||
|
||||
def get_obfuscated_article(self, url):
|
||||
soup = self.index_to_soup(url)
|
||||
formcheck_script_tag = soup.find('script', text=re.compile("formCheck"))
|
||||
form_check = formcheck_script_tag.split("=")[1].replace("'", "").replace(";", "").strip()
|
||||
article_id = url.split("/")[-1]
|
||||
data = urllib.urlencode({'itemId': article_id, 'formCheck': form_check})
|
||||
response = self.browser.open(self.ajax_url, data)
|
||||
article_json = json.load(response)['article']['article']
|
||||
with tempfile.NamedTemporaryFile(delete=False) as tf:
|
||||
tf.write(article_json)
|
||||
return tf.name
|
||||
|
||||
def mark_as_read(self, mark_list):
|
||||
formatted_list = []
|
||||
for article_id in mark_list:
|
||||
formatted_list.append({
|
||||
'action': 'archive',
|
||||
'item_id': article_id
|
||||
})
|
||||
command = {
|
||||
'actions': formatted_list
|
||||
}
|
||||
mark_read_url = u'{0}?{1}'.format(
|
||||
self.modify_api_url,
|
||||
self.get_auth_uri()
|
||||
)
|
||||
try:
|
||||
request = urllib2.Request(mark_read_url, json.dumps(command))
|
||||
response = urllib2.urlopen(request)
|
||||
print u'response = {0}'.format(response.info())
|
||||
except urllib2.HTTPError as e:
|
||||
self.log.exception('Pocket returned an error while archiving articles: {0}'.format(e))
|
||||
return []
|
||||
except urllib2.URLError as e:
|
||||
self.log.exception("Unable to connect to getpocket.com's modify api: {0}".format(e))
|
||||
return []
|
||||
|
||||
def cleanup(self):
|
||||
if self.mark_as_read_after_dl:
|
||||
self.mark_as_read(self.readList)
|
||||
self.mark_as_read([x[1]['item_id'] for x in self.articles])
|
||||
else:
|
||||
pass
|
||||
|
||||
def default_cover(self, cover_file):
|
||||
'''
|
||||
"""
|
||||
Create a generic cover for recipes that don't have a cover
|
||||
This override adds time to the cover
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
from calibre.ebooks import calibre_cover
|
||||
title = self.title if isinstance(self.title, unicode) else \
|
||||
@ -137,3 +184,12 @@ class Pocket(BasicNewsRecipe):
|
||||
self.log.exception('Failed to generate default cover')
|
||||
return False
|
||||
return True
|
||||
|
||||
def user_error(self, error_message):
|
||||
if hasattr(self, 'abort_recipe_processing'):
|
||||
self.abort_recipe_processing(error_message)
|
||||
else:
|
||||
self.log.exception(error_message)
|
||||
raise RuntimeError(error_message)
|
||||
|
||||
# vim:ft=python
|
||||
|
@ -25,7 +25,7 @@ class Smithsonian(BasicNewsRecipe):
|
||||
soup = self.index_to_soup(current_issue_url)
|
||||
|
||||
#Go to the main body
|
||||
div = soup.find ('div', attrs={'id':'article-body'})
|
||||
div = soup.find('div', attrs={'id':'article-body'})
|
||||
|
||||
#Find date
|
||||
date = re.sub('.*\:\W*', "", self.tag_to_string(div.find('h2')).strip())
|
||||
@ -49,16 +49,20 @@ class Smithsonian(BasicNewsRecipe):
|
||||
self.log('Found section:', section_title)
|
||||
else:
|
||||
link=post.find('a',href=True)
|
||||
article_cat=link.findPrevious('p', attrs={'class':'article-cat'})
|
||||
url=link['href']+'?c=y&story=fullstory'
|
||||
description=self.tag_to_string(post.find('p')).strip()
|
||||
desc=re.sub('\sBy\s.*', '', description, re.DOTALL)
|
||||
author=re.sub('.*By\s', '', description, re.DOTALL)
|
||||
title=self.tag_to_string(link).strip()+ u' (%s)'%author
|
||||
description=self.tag_to_string(post.findAll('p')[-1]).strip()
|
||||
title=self.tag_to_string(link).strip()
|
||||
if article_cat is not None:
|
||||
title += u' (%s)'%self.tag_to_string(article_cat).strip()
|
||||
self.log('\tFound article:', title)
|
||||
articles.append({'title':title, 'url':url, 'description':desc, 'date':''})
|
||||
articles.append({'title':title, 'url':url, 'description':description, 'date':''})
|
||||
|
||||
if articles:
|
||||
feeds[section_title] = articles
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
articles = []
|
||||
|
||||
ans = [(key, val) for key, val in feeds.iteritems()]
|
||||
return ans
|
||||
|
@ -1,68 +1,63 @@
|
||||
import re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from collections import OrderedDict
|
||||
|
||||
class TNR(BasicNewsRecipe):
|
||||
|
||||
title = 'The New Republic'
|
||||
__author__ = 'Rick Shang'
|
||||
|
||||
description = 'The New Republic is a journal of opinion with an emphasis on politics and domestic and international affairs. It carries feature articles by staff and contributing editors. The second half of each issue is devoted to book and the arts, theater, motion pictures, music and art.'
|
||||
language = 'en'
|
||||
category = 'news'
|
||||
encoding = 'UTF-8'
|
||||
remove_tags = [dict(attrs={'class':['print-logo','print-site_name','print-hr']})]
|
||||
no_javascript = True
|
||||
no_stylesheets = True
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
|
||||
#Go to the issue
|
||||
soup0 = self.index_to_soup('http://www.tnr.com/magazine-issues')
|
||||
issue = soup0.find('div',attrs={'id':'current_issue'})
|
||||
|
||||
#Find date
|
||||
date = self.tag_to_string(issue.find('div',attrs={'class':'date'})).strip()
|
||||
self.timefmt = u' [%s]'%date
|
||||
|
||||
#Go to the main body
|
||||
current_issue_url = 'http://www.tnr.com' + issue.find('a', href=True)['href']
|
||||
soup = self.index_to_soup(current_issue_url)
|
||||
div = soup.find ('div', attrs={'class':'article_detail_body'})
|
||||
|
||||
|
||||
|
||||
#Find cover
|
||||
self.cover_url = div.find('img',src=True)['src']
|
||||
|
||||
feeds = OrderedDict()
|
||||
section_title = ''
|
||||
subsection_title = ''
|
||||
for post in div.findAll('p'):
|
||||
articles = []
|
||||
em=post.find('em')
|
||||
b=post.find('b')
|
||||
a=post.find('a',href=True)
|
||||
p=post.find('img', src=True)
|
||||
#Find cover
|
||||
if p is not None:
|
||||
self.cover_url = p['src'].strip()
|
||||
if em is not None:
|
||||
section_title = self.tag_to_string(em).strip()
|
||||
subsection_title = ''
|
||||
elif b is not None:
|
||||
subsection_title=self.tag_to_string(b).strip()
|
||||
elif a is not None:
|
||||
prefix = (subsection_title+': ') if subsection_title else ''
|
||||
url=re.sub('www.tnr.com','www.tnr.com/print', a['href'])
|
||||
author=re.sub('.*by\s', '', self.tag_to_string(post), re.DOTALL)
|
||||
title=prefix + self.tag_to_string(a).strip()+ u' (%s)'%author
|
||||
articles.append({'title':title, 'url':url, 'description':'', 'date':''})
|
||||
|
||||
if articles:
|
||||
if section_title not in feeds:
|
||||
feeds[section_title] = []
|
||||
feeds[section_title] += articles
|
||||
ans = [(key, val) for key, val in feeds.iteritems()]
|
||||
return ans
|
||||
import re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
|
||||
|
||||
class TNR(BasicNewsRecipe):
|
||||
|
||||
title = 'The New Republic'
|
||||
__author__ = 'Krittika Goyal'
|
||||
|
||||
description = '''The New Republic is a journal of opinion with an emphasis
|
||||
on politics and domestic and international affairs. It carries feature
|
||||
articles by staff and contributing editors. The second half of each issue
|
||||
is devoted to book and the arts, theater, motion pictures, music and art.'''
|
||||
|
||||
language = 'en'
|
||||
encoding = 'UTF-8'
|
||||
needs_subscription = True
|
||||
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: ''),
|
||||
(re.compile(r'<script.*?</script>', re.DOTALL), lambda m: ''),
|
||||
]
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser(self)
|
||||
br.open('http://www.newrepublic.com/user')
|
||||
br.select_form(nr=1)
|
||||
try:
|
||||
br['user'] = self.username
|
||||
except:
|
||||
br['name'] = self.username
|
||||
br['pass'] = self.password
|
||||
self.log('Logging in...')
|
||||
raw = br.submit().read()
|
||||
if 'SIGN OUT' not in raw:
|
||||
raise ValueError('Failed to log in to tnr.com, check your username and password')
|
||||
self.log('Logged in successfully')
|
||||
return br
|
||||
|
||||
def parse_index(self):
|
||||
raw = self.index_to_soup('http://www.newrepublic.com/current-issue', raw=True)
|
||||
# raw = self.index_to_soup(open('/t/raw.html').read().decode('utf-8'), raw=True)
|
||||
for pat, sub in self.preprocess_regexps:
|
||||
raw = pat.sub(sub, raw)
|
||||
soup = self.index_to_soup(raw)
|
||||
feed_title = 'The New Republic Magazine Articles'
|
||||
|
||||
articles = []
|
||||
for div in soup.findAll('div', attrs={'class':lambda x: x and 'field-item' in x.split()}):
|
||||
a = div.find('a', href=True, attrs={'class':lambda x: x != 'author'})
|
||||
if a is not None:
|
||||
art_title = self.tag_to_string(a)
|
||||
url = a.get('href')
|
||||
num = re.search(r'/(\d+)/', url)
|
||||
if num is not None:
|
||||
art = num.group(1)
|
||||
url = 'http://www.newrepublic.com/node/%s/print'%art
|
||||
self.log.info('\tFound article:', art_title, 'at', url)
|
||||
article = {'title':art_title, 'url':url, 'description':'', 'date':''}
|
||||
articles.append(article)
|
||||
|
||||
return [(feed_title, articles)]
|
||||
|
||||
|
@ -13,7 +13,7 @@ class XkcdCom(BasicNewsRecipe):
|
||||
use_embedded_content = False
|
||||
oldest_article = 60
|
||||
# add image and text
|
||||
# add an horizontal line after the question
|
||||
# add an horizontal line after the question
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'(<img.*title=")([^"]+)(".*>)'),
|
||||
lambda m: '<div>%s%s<p id="photo_text">(%s)</p></div>' % (m.group(1), m.group(3), m.group(2))),
|
||||
@ -22,3 +22,6 @@ class XkcdCom(BasicNewsRecipe):
|
||||
]
|
||||
|
||||
extra_css = "#photo_text{font-size:small;}"
|
||||
|
||||
feeds = [(u'What If...', u'http://what-if.xkcd.com/feed.atom')]
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
[flake8]
|
||||
max-line-length = 160
|
||||
builtins = _,dynamic_property,__,P,I,lopen,icu_lower,icu_upper,icu_title,ngettext
|
||||
ignore = E12,E22,E231,E301,E302,E304,E401,W391
|
||||
ignore = E12,E203,E22,E231,E241,E301,E302,E304,E401,W391
|
||||
|
2
setup.py
2
setup.py
@ -63,7 +63,7 @@ def main(args=sys.argv):
|
||||
|
||||
parser = option_parser()
|
||||
command.add_all_options(parser)
|
||||
parser.set_usage('Usage: python setup.py %s [options]\n\n'%args[1]+\
|
||||
parser.set_usage('Usage: python setup.py %s [options]\n\n'%args[1]+
|
||||
command.description)
|
||||
|
||||
opts, args = parser.parse_args(args)
|
||||
|
@ -10,13 +10,13 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2013-04-24 08:47+0000\n"
|
||||
"PO-Revision-Date: 2013-04-29 11:25+0000\n"
|
||||
"Last-Translator: Tadeáš Pařík <tadeas.parik@gmail.com>\n"
|
||||
"Language-Team: Czech <debian-l10n-czech@lists.debian.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2013-04-25 05:20+0000\n"
|
||||
"X-Launchpad-Export-Date: 2013-04-30 05:16+0000\n"
|
||||
"X-Generator: Launchpad (build 16580)\n"
|
||||
"Language: cs\n"
|
||||
|
||||
@ -4354,171 +4354,171 @@ msgstr "pirlatapa"
|
||||
|
||||
#. name for bxj
|
||||
msgid "Bayungu"
|
||||
msgstr ""
|
||||
msgstr "bayungu"
|
||||
|
||||
#. name for bxk
|
||||
msgid "Bukusu"
|
||||
msgstr ""
|
||||
msgstr "bukusu"
|
||||
|
||||
#. name for bxl
|
||||
msgid "Jalkunan"
|
||||
msgstr ""
|
||||
msgstr "jalkunan"
|
||||
|
||||
#. name for bxm
|
||||
msgid "Buriat; Mongolia"
|
||||
msgstr ""
|
||||
msgstr "buriat; Mongolsko"
|
||||
|
||||
#. name for bxn
|
||||
msgid "Burduna"
|
||||
msgstr ""
|
||||
msgstr "burduna"
|
||||
|
||||
#. name for bxo
|
||||
msgid "Barikanchi"
|
||||
msgstr ""
|
||||
msgstr "barikanchi"
|
||||
|
||||
#. name for bxp
|
||||
msgid "Bebil"
|
||||
msgstr ""
|
||||
msgstr "bebil"
|
||||
|
||||
#. name for bxq
|
||||
msgid "Beele"
|
||||
msgstr ""
|
||||
msgstr "beele"
|
||||
|
||||
#. name for bxr
|
||||
msgid "Buriat; Russia"
|
||||
msgstr ""
|
||||
msgstr "buriat; Rusko"
|
||||
|
||||
#. name for bxs
|
||||
msgid "Busam"
|
||||
msgstr ""
|
||||
msgstr "busam"
|
||||
|
||||
#. name for bxu
|
||||
msgid "Buriat; China"
|
||||
msgstr ""
|
||||
msgstr "buriat; Čína"
|
||||
|
||||
#. name for bxv
|
||||
msgid "Berakou"
|
||||
msgstr ""
|
||||
msgstr "berakou"
|
||||
|
||||
#. name for bxw
|
||||
msgid "Bankagooma"
|
||||
msgstr ""
|
||||
msgstr "bankagooma"
|
||||
|
||||
#. name for bxx
|
||||
msgid "Borna (Democratic Republic of Congo)"
|
||||
msgstr ""
|
||||
msgstr "borna (Demokratická republika Kongo)"
|
||||
|
||||
#. name for bxz
|
||||
msgid "Binahari"
|
||||
msgstr ""
|
||||
msgstr "binahari"
|
||||
|
||||
#. name for bya
|
||||
msgid "Batak"
|
||||
msgstr ""
|
||||
msgstr "batak"
|
||||
|
||||
#. name for byb
|
||||
msgid "Bikya"
|
||||
msgstr ""
|
||||
msgstr "bikya"
|
||||
|
||||
#. name for byc
|
||||
msgid "Ubaghara"
|
||||
msgstr ""
|
||||
msgstr "ubaghara"
|
||||
|
||||
#. name for byd
|
||||
msgid "Benyadu'"
|
||||
msgstr ""
|
||||
msgstr "benyadu'"
|
||||
|
||||
#. name for bye
|
||||
msgid "Pouye"
|
||||
msgstr ""
|
||||
msgstr "pouye"
|
||||
|
||||
#. name for byf
|
||||
msgid "Bete"
|
||||
msgstr ""
|
||||
msgstr "bete"
|
||||
|
||||
#. name for byg
|
||||
msgid "Baygo"
|
||||
msgstr ""
|
||||
msgstr "baygo"
|
||||
|
||||
#. name for byh
|
||||
msgid "Bhujel"
|
||||
msgstr ""
|
||||
msgstr "bhujel"
|
||||
|
||||
#. name for byi
|
||||
msgid "Buyu"
|
||||
msgstr ""
|
||||
msgstr "buyu"
|
||||
|
||||
#. name for byj
|
||||
msgid "Bina (Nigeria)"
|
||||
msgstr ""
|
||||
msgstr "bina (Nigérie)"
|
||||
|
||||
#. name for byk
|
||||
msgid "Biao"
|
||||
msgstr ""
|
||||
msgstr "biao"
|
||||
|
||||
#. name for byl
|
||||
msgid "Bayono"
|
||||
msgstr ""
|
||||
msgstr "bayono"
|
||||
|
||||
#. name for bym
|
||||
msgid "Bidyara"
|
||||
msgstr ""
|
||||
msgstr "bidyara"
|
||||
|
||||
#. name for byn
|
||||
msgid "Bilin"
|
||||
msgstr ""
|
||||
msgstr "bilin"
|
||||
|
||||
#. name for byo
|
||||
msgid "Biyo"
|
||||
msgstr ""
|
||||
msgstr "biyo"
|
||||
|
||||
#. name for byp
|
||||
msgid "Bumaji"
|
||||
msgstr ""
|
||||
msgstr "bumaji"
|
||||
|
||||
#. name for byq
|
||||
msgid "Basay"
|
||||
msgstr ""
|
||||
msgstr "basay"
|
||||
|
||||
#. name for byr
|
||||
msgid "Baruya"
|
||||
msgstr ""
|
||||
msgstr "baruya"
|
||||
|
||||
#. name for bys
|
||||
msgid "Burak"
|
||||
msgstr ""
|
||||
msgstr "burak"
|
||||
|
||||
#. name for byt
|
||||
msgid "Berti"
|
||||
msgstr ""
|
||||
msgstr "berti"
|
||||
|
||||
#. name for byv
|
||||
msgid "Medumba"
|
||||
msgstr ""
|
||||
msgstr "medumba"
|
||||
|
||||
#. name for byw
|
||||
msgid "Belhariya"
|
||||
msgstr ""
|
||||
msgstr "belhariya"
|
||||
|
||||
#. name for byx
|
||||
msgid "Qaqet"
|
||||
msgstr ""
|
||||
msgstr "qaqet"
|
||||
|
||||
#. name for byy
|
||||
msgid "Buya"
|
||||
msgstr ""
|
||||
msgstr "buya"
|
||||
|
||||
#. name for byz
|
||||
msgid "Banaro"
|
||||
msgstr ""
|
||||
msgstr "banaro"
|
||||
|
||||
#. name for bza
|
||||
msgid "Bandi"
|
||||
msgstr ""
|
||||
msgstr "bandi"
|
||||
|
||||
#. name for bzb
|
||||
msgid "Andio"
|
||||
msgstr ""
|
||||
msgstr "andio"
|
||||
|
||||
#. name for bzc
|
||||
msgid "Malagasy; Southern Betsimisaraka"
|
||||
@ -4526,27 +4526,27 @@ msgstr ""
|
||||
|
||||
#. name for bzd
|
||||
msgid "Bribri"
|
||||
msgstr ""
|
||||
msgstr "bribri"
|
||||
|
||||
#. name for bze
|
||||
msgid "Bozo; Jenaama"
|
||||
msgstr ""
|
||||
msgstr "bozo; Jenaama"
|
||||
|
||||
#. name for bzf
|
||||
msgid "Boikin"
|
||||
msgstr ""
|
||||
msgstr "Boikin"
|
||||
|
||||
#. name for bzg
|
||||
msgid "Babuza"
|
||||
msgstr ""
|
||||
msgstr "babuza"
|
||||
|
||||
#. name for bzh
|
||||
msgid "Buang; Mapos"
|
||||
msgstr ""
|
||||
msgstr "buang; mapos"
|
||||
|
||||
#. name for bzi
|
||||
msgid "Bisu"
|
||||
msgstr ""
|
||||
msgstr "bisu"
|
||||
|
||||
#. name for bzj
|
||||
msgid "Kriol English; Belize"
|
||||
@ -4554,7 +4554,7 @@ msgstr ""
|
||||
|
||||
#. name for bzk
|
||||
msgid "Creole English; Nicaragua"
|
||||
msgstr ""
|
||||
msgstr "kreolská angličtina; Nikaragua"
|
||||
|
||||
#. name for bzl
|
||||
msgid "Boano (Sulawesi)"
|
||||
@ -4562,27 +4562,27 @@ msgstr ""
|
||||
|
||||
#. name for bzm
|
||||
msgid "Bolondo"
|
||||
msgstr ""
|
||||
msgstr "bolondo"
|
||||
|
||||
#. name for bzn
|
||||
msgid "Boano (Maluku)"
|
||||
msgstr ""
|
||||
msgstr "boano (Maluku)"
|
||||
|
||||
#. name for bzo
|
||||
msgid "Bozaba"
|
||||
msgstr ""
|
||||
msgstr "bozaba"
|
||||
|
||||
#. name for bzp
|
||||
msgid "Kemberano"
|
||||
msgstr ""
|
||||
msgstr "kemberano"
|
||||
|
||||
#. name for bzq
|
||||
msgid "Buli (Indonesia)"
|
||||
msgstr ""
|
||||
msgstr "buli (Indonésie)"
|
||||
|
||||
#. name for bzr
|
||||
msgid "Biri"
|
||||
msgstr ""
|
||||
msgstr "biri"
|
||||
|
||||
#. name for bzs
|
||||
msgid "Brazilian Sign Language"
|
||||
@ -4590,43 +4590,43 @@ msgstr "brazilský znakový jazyk"
|
||||
|
||||
#. name for bzt
|
||||
msgid "Brithenig"
|
||||
msgstr ""
|
||||
msgstr "brithenig"
|
||||
|
||||
#. name for bzu
|
||||
msgid "Burmeso"
|
||||
msgstr ""
|
||||
msgstr "burmeso"
|
||||
|
||||
#. name for bzv
|
||||
msgid "Bebe"
|
||||
msgstr ""
|
||||
msgstr "bebe"
|
||||
|
||||
#. name for bzw
|
||||
msgid "Basa (Nigeria)"
|
||||
msgstr ""
|
||||
msgstr "basa (Nigérie)"
|
||||
|
||||
#. name for bzx
|
||||
msgid "Bozo; Kɛlɛngaxo"
|
||||
msgstr ""
|
||||
msgstr "bozo; Kɛlɛngaxo"
|
||||
|
||||
#. name for bzy
|
||||
msgid "Obanliku"
|
||||
msgstr ""
|
||||
msgstr "obanliku"
|
||||
|
||||
#. name for bzz
|
||||
msgid "Evant"
|
||||
msgstr ""
|
||||
msgstr "evant"
|
||||
|
||||
#. name for caa
|
||||
msgid "Chortí"
|
||||
msgstr ""
|
||||
msgstr "chortí"
|
||||
|
||||
#. name for cab
|
||||
msgid "Garifuna"
|
||||
msgstr ""
|
||||
msgstr "garifuna"
|
||||
|
||||
#. name for cac
|
||||
msgid "Chuj"
|
||||
msgstr ""
|
||||
msgstr "chuj"
|
||||
|
||||
#. name for cad
|
||||
msgid "Caddo"
|
||||
@ -4634,59 +4634,59 @@ msgstr "caddo"
|
||||
|
||||
#. name for cae
|
||||
msgid "Lehar"
|
||||
msgstr ""
|
||||
msgstr "lehar"
|
||||
|
||||
#. name for caf
|
||||
msgid "Carrier; Southern"
|
||||
msgstr ""
|
||||
msgstr "carrier; jižní"
|
||||
|
||||
#. name for cag
|
||||
msgid "Nivaclé"
|
||||
msgstr ""
|
||||
msgstr "nivaclé"
|
||||
|
||||
#. name for cah
|
||||
msgid "Cahuarano"
|
||||
msgstr ""
|
||||
msgstr "cahuarano"
|
||||
|
||||
#. name for caj
|
||||
msgid "Chané"
|
||||
msgstr ""
|
||||
msgstr "chané"
|
||||
|
||||
#. name for cak
|
||||
msgid "Kaqchikel"
|
||||
msgstr ""
|
||||
msgstr "kaqchikel"
|
||||
|
||||
#. name for cal
|
||||
msgid "Carolinian"
|
||||
msgstr ""
|
||||
msgstr "carolinian"
|
||||
|
||||
#. name for cam
|
||||
msgid "Cemuhî"
|
||||
msgstr ""
|
||||
msgstr "cemuhî"
|
||||
|
||||
#. name for can
|
||||
msgid "Chambri"
|
||||
msgstr ""
|
||||
msgstr "chambri"
|
||||
|
||||
#. name for cao
|
||||
msgid "Chácobo"
|
||||
msgstr ""
|
||||
msgstr "chácobo"
|
||||
|
||||
#. name for cap
|
||||
msgid "Chipaya"
|
||||
msgstr ""
|
||||
msgstr "chipaya"
|
||||
|
||||
#. name for caq
|
||||
msgid "Nicobarese; Car"
|
||||
msgstr ""
|
||||
msgstr "nicobarese; Car"
|
||||
|
||||
#. name for car
|
||||
msgid "Carib; Galibi"
|
||||
msgstr ""
|
||||
msgstr "carib; Galibi"
|
||||
|
||||
#. name for cas
|
||||
msgid "Tsimané"
|
||||
msgstr ""
|
||||
msgstr "tsimané"
|
||||
|
||||
#. name for cat
|
||||
msgid "Catalan"
|
||||
@ -4694,71 +4694,71 @@ msgstr "katalánština"
|
||||
|
||||
#. name for cav
|
||||
msgid "Cavineña"
|
||||
msgstr ""
|
||||
msgstr "cavineña"
|
||||
|
||||
#. name for caw
|
||||
msgid "Callawalla"
|
||||
msgstr ""
|
||||
msgstr "callawalla"
|
||||
|
||||
#. name for cax
|
||||
msgid "Chiquitano"
|
||||
msgstr ""
|
||||
msgstr "chiquitano"
|
||||
|
||||
#. name for cay
|
||||
msgid "Cayuga"
|
||||
msgstr ""
|
||||
msgstr "cayuga"
|
||||
|
||||
#. name for caz
|
||||
msgid "Canichana"
|
||||
msgstr ""
|
||||
msgstr "canichana"
|
||||
|
||||
#. name for cbb
|
||||
msgid "Cabiyarí"
|
||||
msgstr ""
|
||||
msgstr "cabiyarí"
|
||||
|
||||
#. name for cbc
|
||||
msgid "Carapana"
|
||||
msgstr ""
|
||||
msgstr "carapana"
|
||||
|
||||
#. name for cbd
|
||||
msgid "Carijona"
|
||||
msgstr ""
|
||||
msgstr "carijona"
|
||||
|
||||
#. name for cbe
|
||||
msgid "Chipiajes"
|
||||
msgstr ""
|
||||
msgstr "chipiajes"
|
||||
|
||||
#. name for cbg
|
||||
msgid "Chimila"
|
||||
msgstr ""
|
||||
msgstr "chimila"
|
||||
|
||||
#. name for cbh
|
||||
msgid "Cagua"
|
||||
msgstr ""
|
||||
msgstr "cagua"
|
||||
|
||||
#. name for cbi
|
||||
msgid "Chachi"
|
||||
msgstr ""
|
||||
msgstr "chachi"
|
||||
|
||||
#. name for cbj
|
||||
msgid "Ede Cabe"
|
||||
msgstr ""
|
||||
msgstr "ede cabe"
|
||||
|
||||
#. name for cbk
|
||||
msgid "Chavacano"
|
||||
msgstr ""
|
||||
msgstr "chavacano"
|
||||
|
||||
#. name for cbl
|
||||
msgid "Chin; Bualkhaw"
|
||||
msgstr ""
|
||||
msgstr "chin; Bualkhaw"
|
||||
|
||||
#. name for cbn
|
||||
msgid "Nyahkur"
|
||||
msgstr ""
|
||||
msgstr "nyahkur"
|
||||
|
||||
#. name for cbo
|
||||
msgid "Izora"
|
||||
msgstr ""
|
||||
msgstr "izora"
|
||||
|
||||
#. name for cbr
|
||||
msgid "Cashibo-Cacataibo"
|
||||
@ -4782,15 +4782,15 @@ msgstr ""
|
||||
|
||||
#. name for cbw
|
||||
msgid "Kinabalian"
|
||||
msgstr ""
|
||||
msgstr "kinabalian"
|
||||
|
||||
#. name for cby
|
||||
msgid "Carabayo"
|
||||
msgstr ""
|
||||
msgstr "carabayo"
|
||||
|
||||
#. name for cca
|
||||
msgid "Cauca"
|
||||
msgstr ""
|
||||
msgstr "cauca"
|
||||
|
||||
#. name for ccc
|
||||
msgid "Chamicuro"
|
||||
@ -4802,19 +4802,19 @@ msgstr ""
|
||||
|
||||
#. name for cce
|
||||
msgid "Chopi"
|
||||
msgstr ""
|
||||
msgstr "chopi"
|
||||
|
||||
#. name for ccg
|
||||
msgid "Daka; Samba"
|
||||
msgstr ""
|
||||
msgstr "daka; Samba"
|
||||
|
||||
#. name for cch
|
||||
msgid "Atsam"
|
||||
msgstr ""
|
||||
msgstr "atsam"
|
||||
|
||||
#. name for ccj
|
||||
msgid "Kasanga"
|
||||
msgstr ""
|
||||
msgstr "kasanga"
|
||||
|
||||
#. name for ccl
|
||||
msgid "Cutchi-Swahili"
|
||||
@ -4822,7 +4822,7 @@ msgstr ""
|
||||
|
||||
#. name for ccm
|
||||
msgid "Creole Malay; Malaccan"
|
||||
msgstr ""
|
||||
msgstr "kreolská malajština; Malaccan"
|
||||
|
||||
#. name for cco
|
||||
msgid "Chinantec; Comaltepec"
|
||||
@ -4834,11 +4834,11 @@ msgstr ""
|
||||
|
||||
#. name for ccq
|
||||
msgid "Chaungtha"
|
||||
msgstr ""
|
||||
msgstr "chaungtha"
|
||||
|
||||
#. name for ccr
|
||||
msgid "Cacaopera"
|
||||
msgstr ""
|
||||
msgstr "cacaopera"
|
||||
|
||||
#. name for cda
|
||||
msgid "Choni"
|
||||
@ -30790,11 +30790,11 @@ msgstr ""
|
||||
|
||||
#. name for zua
|
||||
msgid "Zeem"
|
||||
msgstr ""
|
||||
msgstr "zeem"
|
||||
|
||||
#. name for zuh
|
||||
msgid "Tokano"
|
||||
msgstr ""
|
||||
msgstr "tokano"
|
||||
|
||||
#. name for zul
|
||||
msgid "Zulu"
|
||||
@ -30802,7 +30802,7 @@ msgstr "Zulu"
|
||||
|
||||
#. name for zum
|
||||
msgid "Kumzari"
|
||||
msgstr ""
|
||||
msgstr "kumzari"
|
||||
|
||||
#. name for zun
|
||||
msgid "Zuni"
|
||||
@ -30810,40 +30810,40 @@ msgstr "zunijština"
|
||||
|
||||
#. name for zuy
|
||||
msgid "Zumaya"
|
||||
msgstr ""
|
||||
msgstr "zumaya"
|
||||
|
||||
#. name for zwa
|
||||
msgid "Zay"
|
||||
msgstr ""
|
||||
msgstr "zay"
|
||||
|
||||
#. name for zxx
|
||||
msgid "No linguistic content"
|
||||
msgstr ""
|
||||
msgstr "bez lingvistického obsahu"
|
||||
|
||||
#. name for zyb
|
||||
msgid "Zhuang; Yongbei"
|
||||
msgstr ""
|
||||
msgstr "zhuang; Yongbei"
|
||||
|
||||
#. name for zyg
|
||||
msgid "Zhuang; Yang"
|
||||
msgstr ""
|
||||
msgstr "zhuang; Yang"
|
||||
|
||||
#. name for zyj
|
||||
msgid "Zhuang; Youjiang"
|
||||
msgstr ""
|
||||
msgstr "zhuang; Youjiang"
|
||||
|
||||
#. name for zyn
|
||||
msgid "Zhuang; Yongnan"
|
||||
msgstr ""
|
||||
msgstr "zhuang; Youjiang"
|
||||
|
||||
#. name for zyp
|
||||
msgid "Zyphe"
|
||||
msgstr ""
|
||||
msgstr "zyphe"
|
||||
|
||||
#. name for zza
|
||||
msgid "Zaza"
|
||||
msgstr ""
|
||||
msgstr "zaza"
|
||||
|
||||
#. name for zzj
|
||||
msgid "Zhuang; Zuojiang"
|
||||
msgstr ""
|
||||
msgstr "zhuang; Zuojiang"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -30,23 +30,23 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2011-09-27 16:53+0000\n"
|
||||
"Last-Translator: Christian Rose <menthos@menthos.com>\n"
|
||||
"PO-Revision-Date: 2013-04-28 21:03+0000\n"
|
||||
"Last-Translator: Merarom <Unknown>\n"
|
||||
"Language-Team: Swedish <sv@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-11-26 05:39+0000\n"
|
||||
"X-Generator: Launchpad (build 14381)\n"
|
||||
"X-Launchpad-Export-Date: 2013-04-29 04:38+0000\n"
|
||||
"X-Generator: Launchpad (build 16580)\n"
|
||||
"Language: sv\n"
|
||||
|
||||
#. name for aaa
|
||||
msgid "Ghotuo"
|
||||
msgstr ""
|
||||
msgstr "Ghotuo"
|
||||
|
||||
#. name for aab
|
||||
msgid "Alumu-Tesu"
|
||||
msgstr ""
|
||||
msgstr "Alumu-Tesu"
|
||||
|
||||
#. name for aac
|
||||
msgid "Ari"
|
||||
@ -58,7 +58,7 @@ msgstr ""
|
||||
|
||||
#. name for aae
|
||||
msgid "Albanian; Arbëreshë"
|
||||
msgstr ""
|
||||
msgstr "Albanska; Arbëreshë"
|
||||
|
||||
#. name for aaf
|
||||
msgid "Aranadan"
|
||||
@ -78,7 +78,7 @@ msgstr ""
|
||||
|
||||
#. name for aak
|
||||
msgid "Ankave"
|
||||
msgstr ""
|
||||
msgstr "Ankave"
|
||||
|
||||
#. name for aal
|
||||
msgid "Afade"
|
||||
@ -94,7 +94,7 @@ msgstr ""
|
||||
|
||||
#. name for aao
|
||||
msgid "Arabic; Algerian Saharan"
|
||||
msgstr ""
|
||||
msgstr "Arabiska;algeriska Sahara"
|
||||
|
||||
#. name for aap
|
||||
msgid "Arára; Pará"
|
||||
@ -114,7 +114,7 @@ msgstr ""
|
||||
|
||||
#. name for aat
|
||||
msgid "Albanian; Arvanitika"
|
||||
msgstr ""
|
||||
msgstr "Albanska; Arvanitika"
|
||||
|
||||
#. name for aau
|
||||
msgid "Abau"
|
||||
@ -218,7 +218,7 @@ msgstr ""
|
||||
|
||||
#. name for abv
|
||||
msgid "Arabic; Baharna"
|
||||
msgstr ""
|
||||
msgstr "Arabiska; Baharna"
|
||||
|
||||
#. name for abw
|
||||
msgid "Pal"
|
||||
@ -311,7 +311,7 @@ msgstr ""
|
||||
|
||||
#. name for acw
|
||||
msgid "Arabic; Hijazi"
|
||||
msgstr ""
|
||||
msgstr "Arabiska; Hijazi"
|
||||
|
||||
#. name for acx
|
||||
msgid "Arabic; Omani"
|
||||
@ -319,7 +319,7 @@ msgstr ""
|
||||
|
||||
#. name for acy
|
||||
msgid "Arabic; Cypriot"
|
||||
msgstr ""
|
||||
msgstr "Arabiska; Cypriotiska"
|
||||
|
||||
#. name for acz
|
||||
msgid "Acheron"
|
||||
@ -343,7 +343,7 @@ msgstr ""
|
||||
|
||||
#. name for adf
|
||||
msgid "Arabic; Dhofari"
|
||||
msgstr ""
|
||||
msgstr "Arabiska; Dhofari"
|
||||
|
||||
#. name for adg
|
||||
msgid "Andegerebinha"
|
||||
@ -419,11 +419,11 @@ msgstr ""
|
||||
|
||||
#. name for aeb
|
||||
msgid "Arabic; Tunisian"
|
||||
msgstr ""
|
||||
msgstr "Arabiska; Tunisiska"
|
||||
|
||||
#. name for aec
|
||||
msgid "Arabic; Saidi"
|
||||
msgstr ""
|
||||
msgstr "Arabiska,; Saidi"
|
||||
|
||||
#. name for aed
|
||||
msgid "Argentine Sign Language"
|
||||
@ -479,7 +479,7 @@ msgstr ""
|
||||
|
||||
#. name for afb
|
||||
msgid "Arabic; Gulf"
|
||||
msgstr ""
|
||||
msgstr "Arabiska,; Gulf"
|
||||
|
||||
#. name for afd
|
||||
msgid "Andai"
|
||||
@ -803,7 +803,7 @@ msgstr ""
|
||||
|
||||
#. name for ajt
|
||||
msgid "Arabic; Judeo-Tunisian"
|
||||
msgstr ""
|
||||
msgstr "Arabiska; judisk-tunisiska"
|
||||
|
||||
#. name for aju
|
||||
msgid "Arabic; Judeo-Moroccan"
|
||||
@ -963,7 +963,7 @@ msgstr ""
|
||||
|
||||
#. name for aln
|
||||
msgid "Albanian; Gheg"
|
||||
msgstr ""
|
||||
msgstr "Albanska; Gheg"
|
||||
|
||||
#. name for alo
|
||||
msgid "Larike-Wakasihu"
|
||||
@ -9431,7 +9431,7 @@ msgstr ""
|
||||
|
||||
#. name for hlb
|
||||
msgid "Halbi"
|
||||
msgstr ""
|
||||
msgstr "Halbi"
|
||||
|
||||
#. name for hld
|
||||
msgid "Halang Doan"
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 9, 28)
|
||||
numeric_version = (0, 9, 29)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -68,7 +68,7 @@ class TXT2TXTZ(FileTypePlugin):
|
||||
images.append(path)
|
||||
|
||||
# Markdown inline
|
||||
for m in re.finditer(ur'(?mu)\!\[([^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*)\]\s*\((?P<path>[^\)]*)\)', txt):
|
||||
for m in re.finditer(ur'(?mu)\!\[([^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*)\]\s*\((?P<path>[^\)]*)\)', txt): # noqa
|
||||
path = m.group('path')
|
||||
if path and not os.path.isabs(path) and guess_type(path)[0] in OEB_IMAGES and os.path.exists(os.path.join(base_dir, path)):
|
||||
images.append(path)
|
||||
@ -78,7 +78,7 @@ class TXT2TXTZ(FileTypePlugin):
|
||||
for m in re.finditer(ur'(?mu)^(\ ?\ ?\ ?)\[(?P<id>[^\]]*)\]:\s*(?P<path>[^\s]*)$', txt):
|
||||
if m.group('id') and m.group('path'):
|
||||
refs[m.group('id')] = m.group('path')
|
||||
for m in re.finditer(ur'(?mu)\!\[([^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*)\]\s*\[(?P<id>[^\]]*)\]', txt):
|
||||
for m in re.finditer(ur'(?mu)\!\[([^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*)\]\s*\[(?P<id>[^\]]*)\]', txt): # noqa
|
||||
path = refs.get(m.group('id'), None)
|
||||
if path and not os.path.isabs(path) and guess_type(path)[0] in OEB_IMAGES and os.path.exists(os.path.join(base_dir, path)):
|
||||
images.append(path)
|
||||
@ -414,7 +414,7 @@ class ZipMetadataReader(MetadataReaderPlugin):
|
||||
from calibre.ebooks.metadata.zip import get_metadata
|
||||
return get_metadata(stream)
|
||||
|
||||
plugins += [x for x in list(locals().values()) if isinstance(x, type) and \
|
||||
plugins += [x for x in list(locals().values()) if isinstance(x, type) and
|
||||
x.__name__.endswith('MetadataReader')]
|
||||
|
||||
# }}}
|
||||
@ -527,7 +527,7 @@ class TXTZMetadataWriter(MetadataWriterPlugin):
|
||||
from calibre.ebooks.metadata.extz import set_metadata
|
||||
set_metadata(stream, mi)
|
||||
|
||||
plugins += [x for x in list(locals().values()) if isinstance(x, type) and \
|
||||
plugins += [x for x in list(locals().values()) if isinstance(x, type) and
|
||||
x.__name__.endswith('MetadataWriter')]
|
||||
|
||||
# }}}
|
||||
@ -630,7 +630,6 @@ plugins += input_profiles + output_profiles
|
||||
# }}}
|
||||
|
||||
# Device driver plugins {{{
|
||||
from calibre.devices.apple.driver import ITUNES
|
||||
from calibre.devices.hanlin.driver import HANLINV3, HANLINV5, BOOX, SPECTRA
|
||||
from calibre.devices.blackberry.driver import BLACKBERRY, PLAYBOOK
|
||||
from calibre.devices.cybook.driver import CYBOOK, ORIZON
|
||||
@ -644,6 +643,7 @@ from calibre.devices.jetbook.driver import (JETBOOK, MIBUK, JETBOOK_MINI,
|
||||
JETBOOK_COLOR)
|
||||
from calibre.devices.kindle.driver import (KINDLE, KINDLE2, KINDLE_DX,
|
||||
KINDLE_FIRE)
|
||||
from calibre.devices.apple.driver import ITUNES
|
||||
from calibre.devices.nook.driver import NOOK, NOOK_COLOR
|
||||
from calibre.devices.prs505.driver import PRS505
|
||||
from calibre.devices.prst1.driver import PRST1
|
||||
@ -1263,7 +1263,7 @@ class StoreAmazonUKKindleStore(StoreBase):
|
||||
|
||||
class StoreArchiveOrgStore(StoreBase):
|
||||
name = 'Archive.org'
|
||||
description = u'An Internet library offering permanent access for researchers, historians, scholars, people with disabilities, and the general public to historical collections that exist in digital format.'
|
||||
description = u'An Internet library offering permanent access for researchers, historians, scholars, people with disabilities, and the general public to historical collections that exist in digital format.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.archive_org_plugin:ArchiveOrgStore'
|
||||
|
||||
drm_free_only = True
|
||||
@ -1290,7 +1290,7 @@ class StoreBNStore(StoreBase):
|
||||
class StoreBeamEBooksDEStore(StoreBase):
|
||||
name = 'Beam EBooks DE'
|
||||
author = 'Charles Haley'
|
||||
description = u'Bei uns finden Sie: Tausende deutschsprachige eBooks; Alle eBooks ohne hartes DRM; PDF, ePub und Mobipocket Format; Sofortige Verfügbarkeit - 24 Stunden am Tag; Günstige Preise; eBooks für viele Lesegeräte, PC,Mac und Smartphones; Viele Gratis eBooks'
|
||||
description = u'Bei uns finden Sie: Tausende deutschsprachige eBooks; Alle eBooks ohne hartes DRM; PDF, ePub und Mobipocket Format; Sofortige Verfügbarkeit - 24 Stunden am Tag; Günstige Preise; eBooks für viele Lesegeräte, PC,Mac und Smartphones; Viele Gratis eBooks' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.beam_ebooks_de_plugin:BeamEBooksDEStore'
|
||||
|
||||
drm_free_only = True
|
||||
@ -1310,7 +1310,7 @@ class StoreBiblioStore(StoreBase):
|
||||
class StoreBookotekaStore(StoreBase):
|
||||
name = 'Bookoteka'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'E-booki w Bookotece dostępne są w formacie EPUB oraz PDF. Publikacje sprzedawane w Bookotece są objęte prawami autorskimi. Zobowiązaliśmy się chronić te prawa, ale bez ograniczania dostępu do książki użytkownikowi, który nabył ją w legalny sposób. Dlatego też Bookoteka stosuje tak zwany „watermarking transakcyjny” czyli swego rodzaju znaki wodne.'
|
||||
description = u'E-booki w Bookotece dostępne są w formacie EPUB oraz PDF. Publikacje sprzedawane w Bookotece są objęte prawami autorskimi. Zobowiązaliśmy się chronić te prawa, ale bez ograniczania dostępu do książki użytkownikowi, który nabył ją w legalny sposób. Dlatego też Bookoteka stosuje tak zwany „watermarking transakcyjny” czyli swego rodzaju znaki wodne.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.bookoteka_plugin:BookotekaStore'
|
||||
|
||||
drm_free_only = True
|
||||
@ -1329,7 +1329,7 @@ class StoreChitankaStore(StoreBase):
|
||||
|
||||
class StoreDieselEbooksStore(StoreBase):
|
||||
name = 'Diesel eBooks'
|
||||
description = u'Instant access to over 2.4 million titles from hundreds of publishers including Harlequin, HarperCollins, John Wiley & Sons, McGraw-Hill, Simon & Schuster and Random House.'
|
||||
description = u'Instant access to over 2.4 million titles from hundreds of publishers including Harlequin, HarperCollins, John Wiley & Sons, McGraw-Hill, Simon & Schuster and Random House.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.diesel_ebooks_plugin:DieselEbooksStore'
|
||||
|
||||
headquarters = 'US'
|
||||
@ -1358,7 +1358,7 @@ class StoreEbookpointStore(StoreBase):
|
||||
|
||||
class StoreEbookscomStore(StoreBase):
|
||||
name = 'eBooks.com'
|
||||
description = u'Sells books in multiple electronic formats in all categories. Technical infrastructure is cutting edge, robust and scalable, with servers in the US and Europe.'
|
||||
description = u'Sells books in multiple electronic formats in all categories. Technical infrastructure is cutting edge, robust and scalable, with servers in the US and Europe.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.ebooks_com_plugin:EbookscomStore'
|
||||
|
||||
headquarters = 'US'
|
||||
@ -1386,7 +1386,7 @@ class StoreEbooksGratuitsStore(StoreBase):
|
||||
|
||||
class StoreEHarlequinStore(StoreBase):
|
||||
name = 'eHarlequin'
|
||||
description = u'A global leader in series romance and one of the world\'s leading publishers of books for women. Offers women a broad range of reading from romance to bestseller fiction, from young adult novels to erotic literature, from nonfiction to fantasy, from African-American novels to inspirational romance, and more.'
|
||||
description = u'A global leader in series romance and one of the world\'s leading publishers of books for women. Offers women a broad range of reading from romance to bestseller fiction, from young adult novels to erotic literature, from nonfiction to fantasy, from African-American novels to inspirational romance, and more.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.eharlequin_plugin:EHarlequinStore'
|
||||
|
||||
headquarters = 'CA'
|
||||
@ -1406,7 +1406,7 @@ class StoreEKnigiStore(StoreBase):
|
||||
class StoreEmpikStore(StoreBase):
|
||||
name = 'Empik'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'Empik to marka o unikalnym dziedzictwie i legendarne miejsce, dawne “okno na świat”. Jest obecna w polskim krajobrazie kulturalnym od 60 lat (wcześniej jako Kluby Międzynarodowej Prasy i Książki).'
|
||||
description = u'Empik to marka o unikalnym dziedzictwie i legendarne miejsce, dawne “okno na świat”. Jest obecna w polskim krajobrazie kulturalnym od 60 lat (wcześniej jako Kluby Międzynarodowej Prasy i Książki).' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.empik_plugin:EmpikStore'
|
||||
|
||||
headquarters = 'PL'
|
||||
@ -1425,7 +1425,7 @@ class StoreEscapeMagazineStore(StoreBase):
|
||||
|
||||
class StoreFeedbooksStore(StoreBase):
|
||||
name = 'Feedbooks'
|
||||
description = u'Feedbooks is a cloud publishing and distribution service, connected to a large ecosystem of reading systems and social networks. Provides a variety of genres from independent and classic books.'
|
||||
description = u'Feedbooks is a cloud publishing and distribution service, connected to a large ecosystem of reading systems and social networks. Provides a variety of genres from independent and classic books.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.feedbooks_plugin:FeedbooksStore'
|
||||
|
||||
headquarters = 'FR'
|
||||
@ -1451,7 +1451,7 @@ class StoreGoogleBooksStore(StoreBase):
|
||||
|
||||
class StoreGutenbergStore(StoreBase):
|
||||
name = 'Project Gutenberg'
|
||||
description = u'The first producer of free ebooks. Free in the United States because their copyright has expired. They may not be free of copyright in other countries. Readers outside of the United States must check the copyright laws of their countries before downloading or redistributing our ebooks.'
|
||||
description = u'The first producer of free ebooks. Free in the United States because their copyright has expired. They may not be free of copyright in other countries. Readers outside of the United States must check the copyright laws of their countries before downloading or redistributing our ebooks.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.gutenberg_plugin:GutenbergStore'
|
||||
|
||||
drm_free_only = True
|
||||
@ -1460,7 +1460,7 @@ class StoreGutenbergStore(StoreBase):
|
||||
|
||||
class StoreKoboStore(StoreBase):
|
||||
name = 'Kobo'
|
||||
description = u'With over 2.3 million eBooks to browse we have engaged readers in over 200 countries in Kobo eReading. Our eBook listings include New York Times Bestsellers, award winners, classics and more!'
|
||||
description = u'With over 2.3 million eBooks to browse we have engaged readers in over 200 countries in Kobo eReading. Our eBook listings include New York Times Bestsellers, award winners, classics and more!' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.kobo_plugin:KoboStore'
|
||||
|
||||
headquarters = 'CA'
|
||||
@ -1550,7 +1550,7 @@ class StoreNextoStore(StoreBase):
|
||||
class StoreNookUKStore(StoreBase):
|
||||
name = 'Nook UK'
|
||||
author = 'John Schember'
|
||||
description = u'Barnes & Noble S.à r.l, a subsidiary of Barnes & Noble, Inc., a leading retailer of content, digital media and educational products, is proud to bring the award-winning NOOK® reading experience and a leading digital bookstore to the UK.'
|
||||
description = u'Barnes & Noble S.à r.l, a subsidiary of Barnes & Noble, Inc., a leading retailer of content, digital media and educational products, is proud to bring the award-winning NOOK® reading experience and a leading digital bookstore to the UK.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.nook_uk_plugin:NookUKStore'
|
||||
|
||||
headquarters = 'UK'
|
||||
@ -1627,7 +1627,7 @@ class StoreVirtualoStore(StoreBase):
|
||||
class StoreWaterstonesUKStore(StoreBase):
|
||||
name = 'Waterstones UK'
|
||||
author = 'Charles Haley'
|
||||
description = u'Waterstone\'s mission is to be the leading Bookseller on the High Street and online providing customers the widest choice, great value and expert advice from a team passionate about Bookselling.'
|
||||
description = u'Waterstone\'s mission is to be the leading Bookseller on the High Street and online providing customers the widest choice, great value and expert advice from a team passionate about Bookselling.' # noqa
|
||||
actual_plugin = 'calibre.gui2.store.stores.waterstones_uk_plugin:WaterstonesUKStore'
|
||||
|
||||
headquarters = 'UK'
|
||||
@ -1727,6 +1727,28 @@ plugins += [
|
||||
if __name__ == '__main__':
|
||||
# Test load speed
|
||||
import subprocess, textwrap
|
||||
try:
|
||||
subprocess.check_call(['python', '-c', textwrap.dedent(
|
||||
'''
|
||||
import init_calibre # noqa
|
||||
|
||||
def doit():
|
||||
import calibre.customize.builtins as b # noqa
|
||||
|
||||
def show_stats():
|
||||
from pstats import Stats
|
||||
s = Stats('/tmp/calibre_stats')
|
||||
s.sort_stats('cumulative')
|
||||
s.print_stats(30)
|
||||
|
||||
import cProfile
|
||||
cProfile.run('doit()', '/tmp/calibre_stats')
|
||||
show_stats()
|
||||
|
||||
'''
|
||||
)])
|
||||
except subprocess.CalledProcessError:
|
||||
raise SystemExit(1)
|
||||
try:
|
||||
subprocess.check_call(['python', '-c', textwrap.dedent(
|
||||
'''
|
||||
@ -1739,7 +1761,10 @@ if __name__ == '__main__':
|
||||
|
||||
for x in ('lxml', 'calibre.ebooks.BeautifulSoup', 'uuid',
|
||||
'calibre.utils.terminal', 'calibre.utils.magick', 'PIL', 'Image',
|
||||
'sqlite3', 'mechanize', 'httplib', 'xml'):
|
||||
'sqlite3', 'mechanize', 'httplib', 'xml', 'inspect', 'urllib',
|
||||
'calibre.utils.date', 'calibre.utils.config', 'platform',
|
||||
'calibre.utils.zipfile', 'calibre.utils.formatter',
|
||||
):
|
||||
if x in sys.modules:
|
||||
ret = 1
|
||||
print (x, 'has been loaded by a plugin')
|
||||
|
@ -9,6 +9,50 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
SPOOL_SIZE = 30*1024*1024
|
||||
|
||||
def _get_next_series_num_for_list(series_indices):
|
||||
from calibre.utils.config_base import tweaks
|
||||
from math import ceil, floor
|
||||
if not series_indices:
|
||||
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
|
||||
return float(tweaks['series_index_auto_increment'])
|
||||
return 1.0
|
||||
series_indices = [x[0] for x in series_indices]
|
||||
if tweaks['series_index_auto_increment'] == 'next':
|
||||
return floor(series_indices[-1]) + 1
|
||||
if tweaks['series_index_auto_increment'] == 'first_free':
|
||||
for i in xrange(1, 10000):
|
||||
if i not in series_indices:
|
||||
return i
|
||||
# really shouldn't get here.
|
||||
if tweaks['series_index_auto_increment'] == 'next_free':
|
||||
for i in xrange(int(ceil(series_indices[0])), 10000):
|
||||
if i not in series_indices:
|
||||
return i
|
||||
# really shouldn't get here.
|
||||
if tweaks['series_index_auto_increment'] == 'last_free':
|
||||
for i in xrange(int(ceil(series_indices[-1])), 0, -1):
|
||||
if i not in series_indices:
|
||||
return i
|
||||
return series_indices[-1] + 1
|
||||
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
|
||||
return float(tweaks['series_index_auto_increment'])
|
||||
return 1.0
|
||||
|
||||
def _get_series_values(val):
|
||||
import re
|
||||
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
|
||||
if not val:
|
||||
return (val, None)
|
||||
match = series_index_pat.match(val.strip())
|
||||
if match is not None:
|
||||
idx = match.group(2)
|
||||
try:
|
||||
idx = float(idx)
|
||||
return (match.group(1).strip(), idx)
|
||||
except:
|
||||
pass
|
||||
return (val, None)
|
||||
|
||||
'''
|
||||
Rewrite of the calibre database backend.
|
||||
|
||||
|
@ -25,6 +25,7 @@ from calibre.utils.config import to_json, from_json, prefs, tweaks
|
||||
from calibre.utils.date import utcfromtimestamp, parse_date
|
||||
from calibre.utils.filenames import (is_case_sensitive, samefile, hardlink_file, ascii_filename,
|
||||
WindowsAtomicFolderMove)
|
||||
from calibre.utils.magick.draw import save_cover_data_to
|
||||
from calibre.utils.recycle_bin import delete_tree
|
||||
from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
|
||||
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable, PathTable,
|
||||
@ -973,6 +974,23 @@ class DB(object):
|
||||
return True
|
||||
return False
|
||||
|
||||
def set_cover(self, book_id, path, data):
|
||||
path = os.path.abspath(os.path.join(self.library_path, path))
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
path = os.path.join(path, 'cover.jpg')
|
||||
if callable(getattr(data, 'save', None)):
|
||||
from calibre.gui2 import pixmap_to_data
|
||||
data = pixmap_to_data(data)
|
||||
else:
|
||||
if callable(getattr(data, 'read', None)):
|
||||
data = data.read()
|
||||
try:
|
||||
save_cover_data_to(data, path)
|
||||
except (IOError, OSError):
|
||||
time.sleep(0.2)
|
||||
save_cover_data_to(data, path)
|
||||
|
||||
def copy_format_to(self, book_id, fmt, fname, path, dest,
|
||||
windows_atomic_move=None, use_hardlink=False):
|
||||
path = self.format_abspath(book_id, fmt, fname, path)
|
||||
|
@ -22,6 +22,7 @@ from calibre.db.search import Search
|
||||
from calibre.db.tables import VirtualTable
|
||||
from calibre.db.write import get_series_values
|
||||
from calibre.db.lazy import FormatMetadata, FormatsList
|
||||
from calibre.ebooks.metadata import string_to_authors
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.metadata.opf2 import metadata_to_opf
|
||||
from calibre.ptempfile import (base_dir, PersistentTemporaryFile,
|
||||
@ -669,7 +670,7 @@ class Cache(object):
|
||||
self.dirtied_cache.update(new_dirtied)
|
||||
|
||||
@write_api
|
||||
def set_field(self, name, book_id_to_val_map, allow_case_change=True):
|
||||
def set_field(self, name, book_id_to_val_map, allow_case_change=True, do_path_update=True):
|
||||
f = self.fields[name]
|
||||
is_series = f.metadata['datatype'] == 'series'
|
||||
update_path = name in {'title', 'authors'}
|
||||
@ -702,7 +703,7 @@ class Cache(object):
|
||||
for name in self.composites:
|
||||
self.fields[name].pop_cache(dirtied)
|
||||
|
||||
if dirtied and update_path:
|
||||
if dirtied and update_path and do_path_update:
|
||||
self._update_path(dirtied, mark_as_dirtied=False)
|
||||
|
||||
self._mark_as_dirty(dirtied)
|
||||
@ -822,6 +823,102 @@ class Cache(object):
|
||||
if callback is not None:
|
||||
callback(book_id, mi, True)
|
||||
|
||||
@write_api
|
||||
def set_cover(self, book_id_data_map):
|
||||
''' Set the cover for this book. data can be either a QImage,
|
||||
QPixmap, file object or bytestring '''
|
||||
|
||||
for book_id, data in book_id_data_map.iteritems():
|
||||
try:
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
except AttributeError:
|
||||
self._update_path((book_id,))
|
||||
path = self._field_for('path', book_id).replace('/', os.sep)
|
||||
|
||||
self.backend.set_cover(book_id, path, data)
|
||||
self._set_field('cover', {book_id:1 for book_id in book_id_data_map})
|
||||
|
||||
@write_api
|
||||
def set_metadata(self, book_id, mi, ignore_errors=False, force_changes=False,
|
||||
set_title=True, set_authors=True):
|
||||
if callable(getattr(mi, 'to_book_metadata', None)):
|
||||
# Handle code passing in an OPF object instead of a Metadata object
|
||||
mi = mi.to_book_metadata()
|
||||
|
||||
def set_field(name, val, **kwargs):
|
||||
self._set_field(name, {book_id:val}, **kwargs)
|
||||
|
||||
path_changed = False
|
||||
if set_title and mi.title:
|
||||
path_changed = True
|
||||
set_field('title', mi.title, do_path_update=False)
|
||||
if set_authors:
|
||||
path_changed = True
|
||||
if not mi.authors:
|
||||
mi.authors = [_('Unknown')]
|
||||
authors = []
|
||||
for a in mi.authors:
|
||||
authors += string_to_authors(a)
|
||||
set_field('authors', authors, do_path_update=False)
|
||||
|
||||
if path_changed:
|
||||
self._update_path((book_id,))
|
||||
|
||||
def protected_set_field(name, val, **kwargs):
|
||||
try:
|
||||
set_field(name, val, **kwargs)
|
||||
except:
|
||||
if ignore_errors:
|
||||
traceback.print_exc()
|
||||
else:
|
||||
raise
|
||||
|
||||
for field in ('rating', 'series_index', 'timestamp'):
|
||||
val = getattr(mi, field)
|
||||
if val is not None:
|
||||
protected_set_field(field, val)
|
||||
|
||||
# force_changes has no effect on cover manipulation
|
||||
cdata = mi.cover_data[1]
|
||||
if cdata is None and isinstance(mi.cover, basestring) and mi.cover and os.access(mi.cover, os.R_OK):
|
||||
with lopen(mi.cover, 'rb') as f:
|
||||
raw = f.read()
|
||||
if raw:
|
||||
cdata = raw
|
||||
if cdata is not None:
|
||||
self._set_cover({book_id: cdata})
|
||||
|
||||
for field in ('title_sort', 'author_sort', 'publisher', 'series',
|
||||
'tags', 'comments', 'languages', 'pubdate'):
|
||||
val = mi.get(field, None)
|
||||
if (force_changes and val is not None) or not mi.is_null(field):
|
||||
protected_set_field(field, val)
|
||||
|
||||
# identifiers will always be replaced if force_changes is True
|
||||
mi_idents = mi.get_identifiers()
|
||||
if force_changes:
|
||||
protected_set_field('identifiers', mi_idents)
|
||||
elif mi_idents:
|
||||
identifiers = self._field_for('identifiers', book_id, default_value={})
|
||||
for key, val in mi_idents.iteritems():
|
||||
if val and val.strip(): # Don't delete an existing identifier
|
||||
identifiers[icu_lower(key)] = val
|
||||
protected_set_field('identifiers', identifiers)
|
||||
|
||||
user_mi = mi.get_all_user_metadata(make_copy=False)
|
||||
fm = self.field_metadata
|
||||
for key in user_mi.iterkeys():
|
||||
if (key in fm and
|
||||
user_mi[key]['datatype'] == fm[key]['datatype'] and
|
||||
(user_mi[key]['datatype'] != 'text' or
|
||||
user_mi[key]['is_multiple'] == fm[key]['is_multiple'])):
|
||||
val = mi.get(key, None)
|
||||
if force_changes or val is not None:
|
||||
protected_set_field(key, val)
|
||||
extra = mi.get_extra(key)
|
||||
if extra is not None:
|
||||
protected_set_field(key+'_index', extra)
|
||||
|
||||
# }}}
|
||||
|
||||
class SortKey(object): # {{{
|
||||
|
@ -9,8 +9,10 @@ __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
import os, traceback
|
||||
from functools import partial
|
||||
|
||||
from calibre.db import _get_next_series_num_for_list, _get_series_values
|
||||
from calibre.db.backend import DB
|
||||
from calibre.db.cache import Cache
|
||||
from calibre.db.categories import CATEGORY_SORTS
|
||||
from calibre.db.view import View
|
||||
from calibre.utils.date import utcnow
|
||||
|
||||
@ -20,6 +22,10 @@ class LibraryDatabase(object):
|
||||
|
||||
PATH_LIMIT = DB.PATH_LIMIT
|
||||
WINDOWS_LIBRARY_PATH_LIMIT = DB.WINDOWS_LIBRARY_PATH_LIMIT
|
||||
CATEGORY_SORTS = CATEGORY_SORTS
|
||||
MATCH_TYPE = ('any', 'all')
|
||||
CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
|
||||
'int', 'float', 'bool', 'series', 'composite', 'enumeration'])
|
||||
|
||||
@classmethod
|
||||
def exists_at(cls, path):
|
||||
@ -148,3 +154,17 @@ class LibraryDatabase(object):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
# Private interface {{{
|
||||
|
||||
def __iter__(self):
|
||||
for row in self.data.iterall():
|
||||
yield row
|
||||
|
||||
def _get_next_series_num_for_list(self, series_indices):
|
||||
return _get_next_series_num_for_list(series_indices)
|
||||
|
||||
def _get_series_values(self, val):
|
||||
return _get_series_values(val)
|
||||
|
||||
# }}}
|
||||
|
||||
|
@ -6,6 +6,7 @@ from __future__ import (unicode_literals, division, absolute_import,
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import inspect
|
||||
from calibre.db.tests.base import BaseTest
|
||||
|
||||
class LegacyTest(BaseTest):
|
||||
@ -82,6 +83,7 @@ class LegacyTest(BaseTest):
|
||||
# }}}
|
||||
|
||||
def test_legacy_getters(self): # {{{
|
||||
' Test various functions to get individual bits of metadata '
|
||||
old = self.init_old()
|
||||
getters = ('path', 'abspath', 'title', 'authors', 'series',
|
||||
'publisher', 'author_sort', 'authors', 'comments',
|
||||
@ -89,11 +91,43 @@ class LegacyTest(BaseTest):
|
||||
'timestamp', 'uuid', 'pubdate', 'ondevice',
|
||||
'metadata_last_modified', 'languages')
|
||||
oldvals = {g:tuple(getattr(old, g)(x) for x in xrange(3)) + tuple(getattr(old, g)(x, True) for x in (1,2,3)) for g in getters}
|
||||
old_rows = {tuple(r)[:5] for r in old}
|
||||
old.close()
|
||||
db = self.init_legacy()
|
||||
newvals = {g:tuple(getattr(db, g)(x) for x in xrange(3)) + tuple(getattr(db, g)(x, True) for x in (1,2,3)) for g in getters}
|
||||
new_rows = {tuple(r)[:5] for r in db}
|
||||
for x in (oldvals, newvals):
|
||||
x['tags'] = tuple(set(y.split(',')) if y else y for y in x['tags'])
|
||||
self.assertEqual(oldvals, newvals)
|
||||
self.assertEqual(old_rows, new_rows)
|
||||
|
||||
# }}}
|
||||
|
||||
def test_legacy_coverage(self): # {{{
|
||||
' Check that the emulation of the legacy interface is (almost) total '
|
||||
cl = self.cloned_library
|
||||
db = self.init_old(cl)
|
||||
ndb = self.init_legacy()
|
||||
|
||||
SKIP_ATTRS = {
|
||||
'TCat_Tag', '_add_newbook_tag', '_clean_identifier', '_library_id_', '_set_authors',
|
||||
'_set_title', '_set_custom', '_update_author_in_cache',
|
||||
}
|
||||
SKIP_ARGSPEC = {
|
||||
'__init__',
|
||||
}
|
||||
|
||||
for attr in dir(db):
|
||||
if attr in SKIP_ATTRS:
|
||||
continue
|
||||
self.assertTrue(hasattr(ndb, attr), 'The attribute %s is missing' % attr)
|
||||
obj, nobj = getattr(db, attr), getattr(ndb, attr)
|
||||
if attr not in SKIP_ARGSPEC:
|
||||
try:
|
||||
argspec = inspect.getargspec(obj)
|
||||
except TypeError:
|
||||
pass
|
||||
else:
|
||||
self.assertEqual(argspec, inspect.getargspec(nobj), 'argspec for %s not the same' % attr)
|
||||
# }}}
|
||||
|
||||
|
@ -355,4 +355,7 @@ class WritingTest(BaseTest):
|
||||
ae(opf.authors, ['author1', 'author2'])
|
||||
# }}}
|
||||
|
||||
def test_set_cover(self):
|
||||
' Test setting of cover '
|
||||
self.assertTrue(False, 'TODO: test set_cover() and set_metadata()')
|
||||
|
||||
|
@ -29,11 +29,12 @@ class MarkedVirtualField(object):
|
||||
for book_id in candidates:
|
||||
yield self.marked_ids.get(book_id, default_value), {book_id}
|
||||
|
||||
class TableRow(list):
|
||||
class TableRow(object):
|
||||
|
||||
def __init__(self, book_id, view):
|
||||
self.book_id = book_id
|
||||
self.view = weakref.ref(view)
|
||||
self.column_count = view.column_count
|
||||
|
||||
def __getitem__(self, obj):
|
||||
view = self.view()
|
||||
@ -43,6 +44,13 @@ class TableRow(list):
|
||||
else:
|
||||
return view._field_getters[obj](self.book_id)
|
||||
|
||||
def __len__(self):
|
||||
return self.column_count
|
||||
|
||||
def __iter__(self):
|
||||
for i in xrange(self.column_count):
|
||||
yield self[i]
|
||||
|
||||
def format_is_multiple(x, sep=',', repl=None):
|
||||
if not x:
|
||||
return None
|
||||
@ -67,6 +75,7 @@ class View(object):
|
||||
self.search_restriction = self.base_restriction = ''
|
||||
self.search_restriction_name = self.base_restriction_name = ''
|
||||
self._field_getters = {}
|
||||
self.column_count = len(cache.backend.FIELD_MAP)
|
||||
for col, idx in cache.backend.FIELD_MAP.iteritems():
|
||||
label, fmt = col, lambda x:x
|
||||
func = {
|
||||
@ -107,7 +116,7 @@ class View(object):
|
||||
fmt = partial(format_is_multiple, sep=sep)
|
||||
self._field_getters[idx] = partial(func, label, fmt=fmt) if func == self._get else func
|
||||
|
||||
self._map = tuple(self.cache.all_book_ids())
|
||||
self._map = tuple(sorted(self.cache.all_book_ids()))
|
||||
self._map_filtered = tuple(self._map)
|
||||
|
||||
def get_property(self, id_or_index, index_is_id=False, loc=-1):
|
||||
@ -124,21 +133,21 @@ class View(object):
|
||||
return idx if index_is_id else self.index_to_id(idx)
|
||||
|
||||
def __getitem__(self, row):
|
||||
return TableRow(self._map_filtered[row], self.cache)
|
||||
return TableRow(self._map_filtered[row], self)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map_filtered)
|
||||
|
||||
def __iter__(self):
|
||||
for book_id in self._map_filtered:
|
||||
yield self._data[book_id]
|
||||
yield TableRow(book_id, self)
|
||||
|
||||
def iterall(self):
|
||||
for book_id in self._map:
|
||||
yield self[book_id]
|
||||
for book_id in self.iterallids():
|
||||
yield TableRow(book_id, self)
|
||||
|
||||
def iterallids(self):
|
||||
for book_id in self._map:
|
||||
for book_id in sorted(self._map):
|
||||
yield book_id
|
||||
|
||||
def get_field_map_field(self, row, col, index_is_id=True):
|
||||
|
@ -5,7 +5,7 @@ __copyright__ = '2010, Gregory Riker'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
import cStringIO, ctypes, datetime, os, platform, re, shutil, sys, tempfile, time
|
||||
import cStringIO, ctypes, datetime, os, re, shutil, sys, tempfile, time
|
||||
|
||||
from calibre import fit_image, confirm_config_name, strftime as _strftime
|
||||
from calibre.constants import (
|
||||
@ -16,11 +16,10 @@ from calibre.devices.interface import DevicePlugin
|
||||
from calibre.ebooks.metadata import (author_to_author_sort, authors_to_string,
|
||||
MetaInformation, title_sort)
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.config import config_dir, dynamic, prefs
|
||||
from calibre.utils.date import now, parse_date
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
from calibre.utils.config_base import config_dir, prefs
|
||||
|
||||
def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
|
||||
from calibre.utils.date import now
|
||||
|
||||
if not hasattr(dt, 'timetuple'):
|
||||
dt = now()
|
||||
@ -93,6 +92,7 @@ class AppleOpenFeedback(OpenFeedback):
|
||||
|
||||
def do_it(self, return_code):
|
||||
from calibre.utils.logging import default_log
|
||||
from calibre.utils.config import dynamic
|
||||
if return_code == self.Accepted:
|
||||
default_log.info(" Apple driver ENABLED")
|
||||
dynamic[confirm_config_name(self.cd.plugin.DISPLAY_DISABLE_DIALOG)] = False
|
||||
@ -418,6 +418,7 @@ class ITUNES(DriverBase):
|
||||
list of device books.
|
||||
|
||||
"""
|
||||
from calibre.utils.date import parse_date
|
||||
if not oncard:
|
||||
if self.verbose:
|
||||
logger().info("%s.books():" % self.__class__.__name__)
|
||||
@ -865,6 +866,7 @@ class ITUNES(DriverBase):
|
||||
Note that most of the initialization is necessarily performed in can_handle(), as
|
||||
we need to talk to iTunes to discover if there's a connected iPod
|
||||
'''
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
if self.iTunes is None:
|
||||
raise OpenFeedback(self.ITUNES_SANDBOX_LOCKOUT_MESSAGE)
|
||||
|
||||
@ -886,6 +888,7 @@ class ITUNES(DriverBase):
|
||||
if False:
|
||||
# Display a dialog recommending using 'Connect to iTunes' if user hasn't
|
||||
# previously disabled the dialog
|
||||
from calibre.utils.config import dynamic
|
||||
if dynamic.get(confirm_config_name(self.DISPLAY_DISABLE_DIALOG), True):
|
||||
raise AppleOpenFeedback(self)
|
||||
else:
|
||||
@ -1434,6 +1437,7 @@ class ITUNES(DriverBase):
|
||||
as of iTunes 9.2, iBooks 1.1, can't set artwork for PDF files via automation
|
||||
'''
|
||||
from PIL import Image as PILImage
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
|
||||
if self.verbose:
|
||||
logger().info(" %s._cover_to_thumb()" % self.__class__.__name__)
|
||||
@ -1766,6 +1770,7 @@ class ITUNES(DriverBase):
|
||||
'''
|
||||
'''
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
|
||||
logger().info(" %s.__get_epub_metadata()" % self.__class__.__name__)
|
||||
title = None
|
||||
@ -2019,6 +2024,7 @@ class ITUNES(DriverBase):
|
||||
as of iTunes 9.2, iBooks 1.1, can't set artwork for PDF files via automation
|
||||
'''
|
||||
from PIL import Image as PILImage
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
|
||||
if not self.settings().extra_customization[self.CACHE_COVERS]:
|
||||
thumb_data = None
|
||||
@ -2131,6 +2137,7 @@ class ITUNES(DriverBase):
|
||||
'''
|
||||
Calculate the exploded size of file
|
||||
'''
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
exploded_file_size = compressed_size
|
||||
format = file.rpartition('.')[2].lower()
|
||||
if format == 'epub':
|
||||
@ -2627,7 +2634,7 @@ class ITUNES(DriverBase):
|
||||
# for deletion from booklist[0] during add_books_to_metadata
|
||||
for book in self.cached_books:
|
||||
if (self.cached_books[book]['uuid'] == metadata.uuid or
|
||||
(self.cached_books[book]['title'] == metadata.title and \
|
||||
(self.cached_books[book]['title'] == metadata.title and
|
||||
self.cached_books[book]['author'] == metadata.author)):
|
||||
self.update_list.append(self.cached_books[book])
|
||||
if self.verbose:
|
||||
@ -2781,8 +2788,10 @@ class ITUNES(DriverBase):
|
||||
def _update_epub_metadata(self, fpath, metadata):
|
||||
'''
|
||||
'''
|
||||
from calibre.utils.date import parse_date, now
|
||||
from calibre.ebooks.metadata.epub import set_metadata
|
||||
from lxml import etree
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
|
||||
if self.verbose:
|
||||
logger().info(" %s._update_epub_metadata()" % self.__class__.__name__)
|
||||
@ -3254,6 +3263,7 @@ class ITUNES_ASYNC(ITUNES):
|
||||
list of device books.
|
||||
|
||||
"""
|
||||
from calibre.utils.date import parse_date
|
||||
if not oncard:
|
||||
if self.verbose:
|
||||
logger().info("%s.books()" % self.__class__.__name__)
|
||||
@ -3424,6 +3434,7 @@ class ITUNES_ASYNC(ITUNES):
|
||||
Note that most of the initialization is necessarily performed in can_handle(), as
|
||||
we need to talk to iTunes to discover if there's a connected iPod
|
||||
'''
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
if self.iTunes is None:
|
||||
raise OpenFeedback(self.ITUNES_SANDBOX_LOCKOUT_MESSAGE)
|
||||
|
||||
|
@ -60,13 +60,24 @@ class TOLINO(EB600):
|
||||
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['_TELEKOMTOLINO']
|
||||
|
||||
def linux_swap_drives(self, drives):
|
||||
if len(drives) < 2 or not drives[1] or not drives[2]: return drives
|
||||
if len(drives) < 2 or not drives[1] or not drives[2]:
|
||||
return drives
|
||||
drives = list(drives)
|
||||
t = drives[0]
|
||||
drives[0] = drives[1]
|
||||
drives[1] = t
|
||||
return tuple(drives)
|
||||
|
||||
def windows_sort_drives(self, drives):
|
||||
if len(drives) < 2:
|
||||
return drives
|
||||
main = drives.get('main', None)
|
||||
carda = drives.get('carda', None)
|
||||
if main and carda:
|
||||
drives['main'] = carda
|
||||
drives['carda'] = main
|
||||
return drives
|
||||
|
||||
class COOL_ER(EB600):
|
||||
|
||||
name = 'Cool-er device interface'
|
||||
@ -94,13 +105,11 @@ class SHINEBOOK(EB600):
|
||||
MAIN_MEMORY_VOLUME_LABEL = 'ShineBook Main Memory'
|
||||
STORAGE_CARD_VOLUME_LABEL = 'ShineBook Storage Card'
|
||||
|
||||
|
||||
@classmethod
|
||||
def can_handle(cls, dev, debug=False):
|
||||
return dev[4] == 'ShineBook'
|
||||
|
||||
|
||||
|
||||
class POCKETBOOK360(EB600):
|
||||
|
||||
# Device info on OS X
|
||||
@ -113,7 +122,6 @@ class POCKETBOOK360(EB600):
|
||||
PRODUCT_ID = [0x1688, 0xa4a5]
|
||||
BCD = [0x110]
|
||||
|
||||
|
||||
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm', 'txt']
|
||||
|
||||
VENDOR_NAME = ['PHILIPS', '__POCKET', 'POCKETBO']
|
||||
@ -312,7 +320,8 @@ class POCKETBOOK701(USBMS):
|
||||
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = '__UMS_COMPOSITE'
|
||||
|
||||
def windows_sort_drives(self, drives):
|
||||
if len(drives) < 2: return drives
|
||||
if len(drives) < 2:
|
||||
return drives
|
||||
main = drives.get('main', None)
|
||||
carda = drives.get('carda', None)
|
||||
if main and carda:
|
||||
|
@ -10,8 +10,7 @@ from calibre.utils.icu import sort_key
|
||||
|
||||
from calibre.devices.usbms.books import Book as Book_
|
||||
from calibre.devices.usbms.books import CollectionsBookList
|
||||
from calibre.utils.config import prefs
|
||||
from calibre.utils.date import parse_date
|
||||
from calibre.utils.config_base import prefs
|
||||
from calibre.devices.usbms.driver import debug_print
|
||||
from calibre.ebooks.metadata import author_to_author_sort
|
||||
|
||||
@ -19,6 +18,7 @@ class Book(Book_):
|
||||
|
||||
def __init__(self, prefix, lpath, title=None, authors=None, mime=None, date=None, ContentType=None,
|
||||
thumbnail_name=None, size=None, other=None):
|
||||
from calibre.utils.date import parse_date
|
||||
# debug_print('Book::__init__ - title=', title)
|
||||
show_debug = title is not None and title.lower().find("xxxxx") >= 0
|
||||
if show_debug:
|
||||
|
@ -26,7 +26,7 @@ from calibre.devices.usbms.driver import USBMS, debug_print
|
||||
from calibre import prints
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.constants import DEBUG
|
||||
from calibre.utils.config import prefs
|
||||
from calibre.utils.config_base import prefs
|
||||
|
||||
|
||||
class KOBO(USBMS):
|
||||
@ -35,7 +35,7 @@ class KOBO(USBMS):
|
||||
gui_name = 'Kobo Reader'
|
||||
description = _('Communicate with the Kobo Reader')
|
||||
author = 'Timothy Legge and David Forrester'
|
||||
version = (2, 0, 8)
|
||||
version = (2, 0, 9)
|
||||
|
||||
dbversion = 0
|
||||
fwversion = 0
|
||||
@ -1193,8 +1193,11 @@ class KOBO(USBMS):
|
||||
db.set_comment(db_id, mi.comments)
|
||||
|
||||
# Add bookmark file to db_id
|
||||
db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
|
||||
bm.value.path, index_is_id=True)
|
||||
# NOTE: As it is, this copied the book from the device back to the library. That meant it replaced the
|
||||
# existing file. Taking this out for that reason, but some books have a ANNOT file that could be
|
||||
# copied.
|
||||
# db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
|
||||
# bm.value.path, index_is_id=True)
|
||||
|
||||
|
||||
class KOBOTOUCH(KOBO):
|
||||
@ -1209,10 +1212,10 @@ class KOBOTOUCH(KOBO):
|
||||
min_dbversion_series = 65
|
||||
min_dbversion_archive = 71
|
||||
min_dbversion_images_on_sdcard = 77
|
||||
|
||||
|
||||
max_supported_fwversion = (2,5,1)
|
||||
min_fwversion_images_on_sdcard = (2,4,1)
|
||||
|
||||
|
||||
has_kepubs = True
|
||||
|
||||
booklist_class = KTCollectionsBookList
|
||||
@ -1366,7 +1369,7 @@ class KOBOTOUCH(KOBO):
|
||||
prefix = self._card_a_prefix if oncard == 'carda' else \
|
||||
self._card_b_prefix if oncard == 'cardb' \
|
||||
else self._main_prefix
|
||||
debug_print("KoboTouch:books - prefix='%s'"%oncard)
|
||||
debug_print("KoboTouch:books - oncard='%s', prefix='%s'"%(oncard, prefix))
|
||||
|
||||
# Determine the firmware version
|
||||
try:
|
||||
@ -2100,7 +2103,8 @@ class KOBOTOUCH(KOBO):
|
||||
:param filepath: The full path to the ebook file
|
||||
|
||||
'''
|
||||
# debug_print("KoboTouch:upload_cover - path='%s' filename='%s'"%(path, filename))
|
||||
debug_print("KoboTouch:upload_cover - path='%s' filename='%s' "%(path, filename))
|
||||
debug_print(" filepath='%s' "%(filepath))
|
||||
|
||||
opts = self.settings()
|
||||
if not self.copying_covers():
|
||||
@ -2109,7 +2113,7 @@ class KOBOTOUCH(KOBO):
|
||||
return
|
||||
|
||||
# Only upload covers to SD card if that is supported
|
||||
if self._card_a_prefix and path.startswith(self._card_a_prefix) and not self.supports_covers_on_sdcard():
|
||||
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and not self.supports_covers_on_sdcard():
|
||||
return
|
||||
|
||||
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
|
||||
@ -2133,12 +2137,13 @@ class KOBOTOUCH(KOBO):
|
||||
|
||||
|
||||
def images_path(self, path):
|
||||
if self._card_a_prefix and path.startswith(self._card_a_prefix) and self.supports_covers_on_sdcard():
|
||||
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and self.supports_covers_on_sdcard():
|
||||
path_prefix = 'koboExtStorage/images/'
|
||||
path = self._card_a_prefix + path_prefix
|
||||
path = os.path.join(self._card_a_prefix, path_prefix)
|
||||
else:
|
||||
path_prefix = '.kobo/images/'
|
||||
path = self._main_prefix + path_prefix
|
||||
path = os.path.join(self._main_prefix, path_prefix)
|
||||
|
||||
return path
|
||||
|
||||
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, keep_cover_aspect=False):
|
||||
@ -2181,11 +2186,16 @@ class KOBOTOUCH(KOBO):
|
||||
cursor.close()
|
||||
|
||||
if ImageID != None:
|
||||
path = self.images_path(path) + ImageID
|
||||
path = os.path.join(self.images_path(path), ImageID)
|
||||
|
||||
if show_debug:
|
||||
debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
|
||||
|
||||
image_dir = os.path.dirname(os.path.abspath(path))
|
||||
if not os.path.exists(image_dir):
|
||||
debug_print("KoboTouch:_upload_cover - Image directory does not exust. Creating path='%s'" % (image_dir))
|
||||
os.makedirs(image_dir)
|
||||
|
||||
for ending, cover_options in self.cover_file_endings().items():
|
||||
resize, min_dbversion, max_dbversion, isFullsize = cover_options
|
||||
if show_debug:
|
||||
|
@ -17,8 +17,6 @@ from calibre.devices.errors import PathError
|
||||
from calibre.devices.mtp.base import debug
|
||||
from calibre.devices.mtp.defaults import DeviceDefaults
|
||||
from calibre.ptempfile import SpooledTemporaryFile, PersistentTemporaryDirectory
|
||||
from calibre.utils.config import from_json, to_json, JSONConfig
|
||||
from calibre.utils.date import now, isoformat, utcnow
|
||||
from calibre.utils.filenames import shorten_components_to
|
||||
|
||||
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%(
|
||||
@ -57,6 +55,7 @@ class MTP_DEVICE(BASE):
|
||||
|
||||
@property
|
||||
def prefs(self):
|
||||
from calibre.utils.config import JSONConfig
|
||||
if self._prefs is None:
|
||||
self._prefs = p = JSONConfig('mtp_devices')
|
||||
p.defaults['format_map'] = self.FORMATS
|
||||
@ -103,6 +102,7 @@ class MTP_DEVICE(BASE):
|
||||
del self.prefs[x]
|
||||
|
||||
def open(self, device, library_uuid):
|
||||
from calibre.utils.date import isoformat, utcnow
|
||||
self.current_library_uuid = library_uuid
|
||||
self.location_paths = None
|
||||
self.driveinfo = {}
|
||||
@ -128,6 +128,8 @@ class MTP_DEVICE(BASE):
|
||||
|
||||
# Device information {{{
|
||||
def _update_drive_info(self, storage, location_code, name=None):
|
||||
from calibre.utils.date import isoformat, now
|
||||
from calibre.utils.config import from_json, to_json
|
||||
import uuid
|
||||
f = storage.find_path((self.DRIVEINFO,))
|
||||
dinfo = {}
|
||||
|
@ -11,7 +11,6 @@ import os, time, re
|
||||
from calibre.devices.usbms.driver import USBMS, debug_print
|
||||
from calibre.devices.prs505 import MEDIA_XML, MEDIA_EXT, CACHE_XML, CACHE_EXT, \
|
||||
MEDIA_THUMBNAIL, CACHE_THUMBNAIL
|
||||
from calibre.devices.prs505.sony_cache import XMLCache
|
||||
from calibre import __appname__, prints
|
||||
from calibre.devices.usbms.books import CollectionsBookList
|
||||
|
||||
@ -178,6 +177,7 @@ class PRS505(USBMS):
|
||||
return fname
|
||||
|
||||
def initialize_XML_cache(self):
|
||||
from calibre.devices.prs505.sony_cache import XMLCache
|
||||
paths, prefixes, ext_paths = {}, {}, {}
|
||||
for prefix, path, ext_path, source_id in [
|
||||
('main', MEDIA_XML, MEDIA_EXT, 0),
|
||||
|
@ -7,7 +7,7 @@ Created on 29 Jun 2012
|
||||
|
||||
@author: charles
|
||||
'''
|
||||
import socket, select, json, inspect, os, traceback, time, sys, random
|
||||
import socket, select, json, os, traceback, time, sys, random
|
||||
import posixpath
|
||||
import hashlib, threading
|
||||
import Queue
|
||||
@ -34,8 +34,7 @@ from calibre.library import current_library_name
|
||||
from calibre.library.server import server_config as content_server_config
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.ipc import eintr_retry_call
|
||||
from calibre.utils.config import from_json, tweaks
|
||||
from calibre.utils.date import isoformat, now
|
||||
from calibre.utils.config_base import tweaks
|
||||
from calibre.utils.filenames import ascii_filename as sanitize, shorten_components_to
|
||||
from calibre.utils.mdns import (publish as publish_zeroconf, unpublish as
|
||||
unpublish_zeroconf, get_all_ips)
|
||||
@ -345,6 +344,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
|
||||
def _debug(self, *args):
|
||||
# manual synchronization so we don't lose the calling method name
|
||||
import inspect
|
||||
with self.sync_lock:
|
||||
if not DEBUG:
|
||||
return
|
||||
@ -373,6 +373,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
|
||||
# copied from USBMS. Perhaps this could be a classmethod in usbms?
|
||||
def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
|
||||
from calibre.utils.date import isoformat, now
|
||||
import uuid
|
||||
if not isinstance(dinfo, dict):
|
||||
dinfo = {}
|
||||
@ -593,6 +594,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
raise ControlError(desc='Device responded with incorrect information')
|
||||
|
||||
def _receive_from_client(self, print_debug_info=True):
|
||||
from calibre.utils.config import from_json
|
||||
extra_debug = self.settings().extra_customization[self.OPT_EXTRA_DEBUG]
|
||||
try:
|
||||
v = self._read_string_from_net()
|
||||
@ -816,6 +818,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
|
||||
|
||||
@synchronous('sync_lock')
|
||||
def open(self, connected_device, library_uuid):
|
||||
from calibre.utils.date import isoformat, now
|
||||
self._debug()
|
||||
if not self.is_connected:
|
||||
# We have been called to retry the connection. Give up immediately
|
||||
|
@ -12,9 +12,8 @@ from calibre.devices.mime import mime_type_ext
|
||||
from calibre.devices.interface import BookList as _BookList
|
||||
from calibre.constants import preferred_encoding
|
||||
from calibre import isbytestring, force_unicode
|
||||
from calibre.utils.config import device_prefs, tweaks
|
||||
from calibre.utils.config_base import tweaks
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.formatter import EvalFormatter
|
||||
|
||||
class Book(Metadata):
|
||||
def __init__(self, prefix, lpath, size=None, other=None):
|
||||
@ -109,6 +108,7 @@ class CollectionsBookList(BookList):
|
||||
return None
|
||||
|
||||
def compute_category_name(self, field_key, field_value, field_meta):
|
||||
from calibre.utils.formatter import EvalFormatter
|
||||
renames = tweaks['sony_collection_renaming_rules']
|
||||
field_name = renames.get(field_key, None)
|
||||
if field_name is None:
|
||||
@ -124,6 +124,7 @@ class CollectionsBookList(BookList):
|
||||
|
||||
def get_collections(self, collection_attributes):
|
||||
from calibre.devices.usbms.driver import debug_print
|
||||
from calibre.utils.config import device_prefs
|
||||
debug_print('Starting get_collections:', device_prefs['manage_device_metadata'])
|
||||
debug_print('Renaming rules:', tweaks['sony_collection_renaming_rules'])
|
||||
debug_print('Formatting template:', tweaks['sony_collection_name_template'])
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.utils.config import Config, ConfigProxy
|
||||
from calibre.utils.config_base import Config, ConfigProxy
|
||||
|
||||
|
||||
class DeviceConfig(object):
|
||||
|
@ -20,8 +20,6 @@ from calibre.devices.usbms.cli import CLI
|
||||
from calibre.devices.usbms.device import Device
|
||||
from calibre.devices.usbms.books import BookList, Book
|
||||
from calibre.ebooks.metadata.book.json_codec import JsonCodec
|
||||
from calibre.utils.config import from_json, to_json
|
||||
from calibre.utils.date import now, isoformat
|
||||
|
||||
BASE_TIME = None
|
||||
def debug_print(*args):
|
||||
@ -58,6 +56,7 @@ class USBMS(CLI, Device):
|
||||
SCAN_FROM_ROOT = False
|
||||
|
||||
def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
|
||||
from calibre.utils.date import now, isoformat
|
||||
import uuid
|
||||
if not isinstance(dinfo, dict):
|
||||
dinfo = {}
|
||||
@ -75,6 +74,7 @@ class USBMS(CLI, Device):
|
||||
return dinfo
|
||||
|
||||
def _update_driveinfo_file(self, prefix, location_code, name=None):
|
||||
from calibre.utils.config import from_json, to_json
|
||||
if os.path.exists(os.path.join(prefix, self.DRIVEINFO)):
|
||||
with open(os.path.join(prefix, self.DRIVEINFO), 'rb') as f:
|
||||
try:
|
||||
|
@ -206,9 +206,11 @@ class EPUBInput(InputFormatPlugin):
|
||||
not_for_spine = set()
|
||||
for y in opf.itermanifest():
|
||||
id_ = y.get('id', None)
|
||||
if id_ and y.get('media-type', None) in \
|
||||
('application/vnd.adobe-page-template+xml','application/text'):
|
||||
not_for_spine.add(id_)
|
||||
if id_ and y.get('media-type', None) in {
|
||||
'application/vnd.adobe-page-template+xml', 'application/vnd.adobe.page-template+xml',
|
||||
'application/adobe-page-template+xml', 'application/adobe.page-template+xml',
|
||||
'application/text'}:
|
||||
not_for_spine.add(id_)
|
||||
|
||||
seen = set()
|
||||
for x in list(opf.iterspine()):
|
||||
|
@ -10,7 +10,6 @@ __docformat__ = 'restructuredtext en'
|
||||
import re, tempfile, os
|
||||
from functools import partial
|
||||
from itertools import izip
|
||||
from urllib import quote
|
||||
|
||||
from calibre.constants import islinux, isbsd
|
||||
from calibre.customize.conversion import (InputFormatPlugin,
|
||||
@ -223,6 +222,7 @@ class HTMLInput(InputFormatPlugin):
|
||||
return link, frag
|
||||
|
||||
def resource_adder(self, link_, base=None):
|
||||
from urllib import quote
|
||||
link, frag = self.link_to_local_path(link_, base=base)
|
||||
if link is None:
|
||||
return link_
|
||||
|
@ -10,7 +10,6 @@ import shutil
|
||||
|
||||
from calibre.customize.conversion import InputFormatPlugin
|
||||
from calibre.ptempfile import TemporaryDirectory
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
|
||||
class PMLInput(InputFormatPlugin):
|
||||
|
||||
@ -86,6 +85,7 @@ class PMLInput(InputFormatPlugin):
|
||||
accelerators):
|
||||
from calibre.ebooks.metadata.toc import TOC
|
||||
from calibre.ebooks.metadata.opf2 import OPFCreator
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
|
||||
self.options = options
|
||||
self.log = log
|
||||
|
@ -8,10 +8,9 @@ __docformat__ = 'restructuredtext en'
|
||||
Transform OEB content into FB2 markup
|
||||
'''
|
||||
|
||||
import re, textwrap, uuid
|
||||
from base64 import b64encode
|
||||
from datetime import datetime
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from lxml import etree
|
||||
|
||||
@ -19,6 +18,7 @@ from calibre import prepare_string_for_xml
|
||||
from calibre.constants import __appname__, __version__
|
||||
from calibre.utils.magick import Image
|
||||
from calibre.utils.localization import lang_as_iso639_1
|
||||
from calibre.ebooks.oeb.base import urlnormalize
|
||||
|
||||
class FB2MLizer(object):
|
||||
'''
|
||||
@ -138,6 +138,12 @@ class FB2MLizer(object):
|
||||
if not metadata['author']:
|
||||
metadata['author'] = u'<author><first-name></first-name><last-name><last-name></author>'
|
||||
|
||||
metadata['keywords'] = u''
|
||||
tags = list(map(unicode, self.oeb_book.metadata.subject))
|
||||
if tags:
|
||||
tags = ', '.join(prepare_string_for_xml(x) for x in tags)
|
||||
metadata['keywords'] = '<keywords>%s</keywords>'%tags
|
||||
|
||||
metadata['sequence'] = u''
|
||||
if self.oeb_book.metadata.series:
|
||||
index = '1'
|
||||
@ -145,6 +151,7 @@ class FB2MLizer(object):
|
||||
index = self.oeb_book.metadata.series_index[0]
|
||||
metadata['sequence'] = u'<sequence name="%s" number="%s" />' % (prepare_string_for_xml(u'%s' % self.oeb_book.metadata.series[0]), index)
|
||||
|
||||
year = publisher = isbn = u''
|
||||
identifiers = self.oeb_book.metadata['identifier']
|
||||
for x in identifiers:
|
||||
if x.get(OPF('scheme'), None).lower() == 'uuid' or unicode(x).startswith('urn:uuid:'):
|
||||
@ -154,31 +161,57 @@ class FB2MLizer(object):
|
||||
self.log.warn('No UUID identifier found')
|
||||
metadata['id'] = str(uuid.uuid4())
|
||||
|
||||
try:
|
||||
date = self.oeb_book.metadata['date'][0]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
year = '<year>%s</year>' % prepare_string_for_xml(date.value.partition('-')[0])
|
||||
|
||||
try:
|
||||
publisher = self.oeb_book.metadata['publisher'][0]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
publisher = '<publisher>%s</publisher>' % prepare_string_for_xml(publisher.value)
|
||||
|
||||
for x in identifiers:
|
||||
if x.get(OPF('scheme'), None).lower() == 'isbn':
|
||||
isbn = '<isbn>%s</isbn>' % prepare_string_for_xml(x.value)
|
||||
|
||||
metadata['year'], metadata['isbn'], metadata['publisher'] = year, isbn, publisher
|
||||
for key, value in metadata.items():
|
||||
if key not in ('author', 'cover', 'sequence'):
|
||||
if key not in ('author', 'cover', 'sequence', 'keywords', 'year', 'publisher', 'isbn'):
|
||||
metadata[key] = prepare_string_for_xml(value)
|
||||
|
||||
return u'<FictionBook xmlns="http://www.gribuser.ru/xml/fictionbook/2.0" xmlns:xlink="http://www.w3.org/1999/xlink">' \
|
||||
'<description>' \
|
||||
'<title-info>' \
|
||||
'<genre>%(genre)s</genre>' \
|
||||
'%(author)s' \
|
||||
'<book-title>%(title)s</book-title>' \
|
||||
'%(cover)s' \
|
||||
'<lang>%(lang)s</lang>' \
|
||||
'%(sequence)s' \
|
||||
'</title-info>' \
|
||||
'<document-info>' \
|
||||
'%(author)s' \
|
||||
'<program-used>%(appname)s %(version)s</program-used>' \
|
||||
'<date>%(date)s</date>' \
|
||||
'<id>%(id)s</id>' \
|
||||
'<version>1.0</version>' \
|
||||
'</document-info>' \
|
||||
'</description>' % metadata
|
||||
return textwrap.dedent(u'''
|
||||
<FictionBook xmlns="http://www.gribuser.ru/xml/fictionbook/2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<description>
|
||||
<title-info>
|
||||
<genre>%(genre)s</genre>
|
||||
%(author)s
|
||||
<book-title>%(title)s</book-title>
|
||||
%(cover)s
|
||||
<lang>%(lang)s</lang>
|
||||
%(keywords)s
|
||||
%(sequence)s
|
||||
</title-info>
|
||||
<document-info>
|
||||
%(author)s
|
||||
<program-used>%(appname)s %(version)s</program-used>
|
||||
<date>%(date)s</date>
|
||||
<id>%(id)s</id>
|
||||
<version>1.0</version>
|
||||
</document-info>
|
||||
<publish-info>
|
||||
%(year)s
|
||||
%(publisher)s
|
||||
%(isbn)s
|
||||
</publish-info>
|
||||
</description>\n''') % metadata
|
||||
|
||||
def fb2_footer(self):
|
||||
return u'</FictionBook>'
|
||||
return u'\n</FictionBook>'
|
||||
|
||||
def get_cover(self):
|
||||
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
|
||||
@ -281,7 +314,7 @@ class FB2MLizer(object):
|
||||
data += char
|
||||
images.append('<binary id="%s" content-type="image/jpeg">%s\n</binary>' % (self.image_hrefs[item.href], data))
|
||||
except Exception as e:
|
||||
self.log.error('Error: Could not include file %s because ' \
|
||||
self.log.error('Error: Could not include file %s because '
|
||||
'%s.' % (item.href, e))
|
||||
return ''.join(images)
|
||||
|
||||
@ -420,13 +453,16 @@ class FB2MLizer(object):
|
||||
if tag == 'img':
|
||||
if elem_tree.attrib.get('src', None):
|
||||
# Only write the image tag if it is in the manifest.
|
||||
if page.abshref(elem_tree.attrib['src']) in self.oeb_book.manifest.hrefs.keys():
|
||||
if page.abshref(elem_tree.attrib['src']) not in self.image_hrefs.keys():
|
||||
self.image_hrefs[page.abshref(elem_tree.attrib['src'])] = '_%s.jpg' % len(self.image_hrefs.keys())
|
||||
ihref = urlnormalize(page.abshref(elem_tree.attrib['src']))
|
||||
if ihref in self.oeb_book.manifest.hrefs:
|
||||
if ihref not in self.image_hrefs:
|
||||
self.image_hrefs[ihref] = '_%s.jpg' % len(self.image_hrefs)
|
||||
p_txt, p_tag = self.ensure_p()
|
||||
fb2_out += p_txt
|
||||
tags += p_tag
|
||||
fb2_out.append('<image xlink:href="#%s" />' % self.image_hrefs[page.abshref(elem_tree.attrib['src'])])
|
||||
fb2_out.append('<image xlink:href="#%s" />' % self.image_hrefs[ihref])
|
||||
else:
|
||||
self.log.warn(u'Ignoring image not in manifest: %s'%ihref)
|
||||
if tag in ('br', 'hr') or ems >= 1:
|
||||
if ems < 1:
|
||||
multiplier = 1
|
||||
|
@ -7,12 +7,11 @@ __docformat__ = 'restructuredtext en'
|
||||
Provides abstraction for metadata reading.writing from a variety of ebook formats.
|
||||
"""
|
||||
import os, sys, re
|
||||
from urllib import unquote, quote
|
||||
from urlparse import urlparse
|
||||
|
||||
from calibre import relpath, guess_type, remove_bracketed_text, prints
|
||||
|
||||
from calibre.utils.config import tweaks
|
||||
from calibre.utils.config_base import tweaks
|
||||
|
||||
try:
|
||||
_author_pat = re.compile(tweaks['authors_split_regex'])
|
||||
@ -188,6 +187,7 @@ class Resource(object):
|
||||
'''
|
||||
|
||||
def __init__(self, href_or_path, basedir=os.getcwdu(), is_path=True):
|
||||
from urllib import unquote
|
||||
self._href = None
|
||||
self._basedir = basedir
|
||||
self.path = None
|
||||
@ -226,6 +226,7 @@ class Resource(object):
|
||||
`basedir`: If None, the basedir of this resource is used (see :method:`set_basedir`).
|
||||
If this resource has no basedir, then the current working directory is used as the basedir.
|
||||
'''
|
||||
from urllib import quote
|
||||
if basedir is None:
|
||||
if self._basedir:
|
||||
basedir = self._basedir
|
||||
|
@ -10,7 +10,6 @@ import os
|
||||
from contextlib import closing
|
||||
|
||||
from calibre.customize import FileTypePlugin
|
||||
from calibre.utils.zipfile import ZipFile, stringFileHeader
|
||||
|
||||
def is_comic(list_of_names):
|
||||
extensions = set([x.rpartition('.')[-1].lower() for x in list_of_names
|
||||
@ -19,6 +18,7 @@ def is_comic(list_of_names):
|
||||
return len(extensions - comic_extensions) == 0
|
||||
|
||||
def archive_type(stream):
|
||||
from calibre.utils.zipfile import stringFileHeader
|
||||
try:
|
||||
pos = stream.tell()
|
||||
except:
|
||||
@ -47,6 +47,7 @@ class ArchiveExtract(FileTypePlugin):
|
||||
on_import = True
|
||||
|
||||
def run(self, archive):
|
||||
from calibre.utils.zipfile import ZipFile
|
||||
is_rar = archive.lower().endswith('.rar')
|
||||
if is_rar:
|
||||
from calibre.utils.unrar import extract_member, names
|
||||
|
@ -13,9 +13,7 @@ from calibre.ebooks.metadata.book import (SC_COPYABLE_FIELDS,
|
||||
SC_FIELDS_COPY_NOT_NULL, STANDARD_METADATA_FIELDS,
|
||||
TOP_LEVEL_IDENTIFIERS, ALL_METADATA_FIELDS)
|
||||
from calibre.library.field_metadata import FieldMetadata
|
||||
from calibre.utils.date import isoformat, format_date, parse_only_date
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.utils.formatter import TemplateFormatter
|
||||
|
||||
# Special sets used to optimize the performance of getting and setting
|
||||
# attributes on Metadata objects
|
||||
@ -44,38 +42,6 @@ NULL_VALUES = {
|
||||
|
||||
field_metadata = FieldMetadata()
|
||||
|
||||
class SafeFormat(TemplateFormatter):
|
||||
|
||||
def get_value(self, orig_key, args, kwargs):
|
||||
if not orig_key:
|
||||
return ''
|
||||
key = orig_key = orig_key.lower()
|
||||
if key != 'title_sort' and key not in TOP_LEVEL_IDENTIFIERS and \
|
||||
key not in ALL_METADATA_FIELDS:
|
||||
key = field_metadata.search_term_to_field_key(key)
|
||||
if key is None or (self.book and
|
||||
key not in self.book.all_field_keys()):
|
||||
if hasattr(self.book, orig_key):
|
||||
key = orig_key
|
||||
else:
|
||||
raise ValueError(_('Value: unknown field ') + orig_key)
|
||||
try:
|
||||
b = self.book.get_user_metadata(key, False)
|
||||
except:
|
||||
b = None
|
||||
if b and ((b['datatype'] == 'int' and self.book.get(key, 0) == 0) or
|
||||
(b['datatype'] == 'float' and self.book.get(key, 0.0) == 0.0)):
|
||||
v = ''
|
||||
else:
|
||||
v = self.book.format_field(key, series_with_index=False)[1]
|
||||
if v is None:
|
||||
return ''
|
||||
if v == '':
|
||||
return ''
|
||||
return v
|
||||
|
||||
# DEPRECATED. This is not thread safe. Do not use.
|
||||
composite_formatter = SafeFormat()
|
||||
|
||||
class Metadata(object):
|
||||
|
||||
@ -116,6 +82,7 @@ class Metadata(object):
|
||||
# List of strings or []
|
||||
self.author = list(authors) if authors else []# Needed for backward compatibility
|
||||
self.authors = list(authors) if authors else []
|
||||
from calibre.ebooks.metadata.book.formatter import SafeFormat
|
||||
self.formatter = SafeFormat()
|
||||
self.template_cache = template_cache
|
||||
|
||||
@ -454,6 +421,7 @@ class Metadata(object):
|
||||
'''
|
||||
if not ops:
|
||||
return
|
||||
from calibre.ebooks.metadata.book.formatter import SafeFormat
|
||||
formatter = SafeFormat()
|
||||
for op in ops:
|
||||
try:
|
||||
@ -633,6 +601,7 @@ class Metadata(object):
|
||||
returns the tuple (display_name, formatted_value, original_value,
|
||||
field_metadata)
|
||||
'''
|
||||
from calibre.utils.date import format_date
|
||||
|
||||
# Handle custom series index
|
||||
if key.startswith('#') and key.endswith('_index'):
|
||||
@ -717,6 +686,7 @@ class Metadata(object):
|
||||
A string representation of this object, suitable for printing to
|
||||
console
|
||||
'''
|
||||
from calibre.utils.date import isoformat
|
||||
from calibre.ebooks.metadata import authors_to_string
|
||||
ans = []
|
||||
def fmt(x, y):
|
||||
@ -809,6 +779,7 @@ def field_from_string(field, raw, field_metadata):
|
||||
elif dt == 'rating':
|
||||
val = float(raw) * 2
|
||||
elif dt == 'datetime':
|
||||
from calibre.utils.date import parse_only_date
|
||||
val = parse_only_date(raw)
|
||||
elif dt == 'bool':
|
||||
if raw.lower() in {'true', 'yes', 'y'}:
|
||||
|
48
src/calibre/ebooks/metadata/book/formatter.py
Normal file
48
src/calibre/ebooks/metadata/book/formatter.py
Normal file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=utf-8
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
from calibre.ebooks.metadata.book import TOP_LEVEL_IDENTIFIERS, ALL_METADATA_FIELDS
|
||||
|
||||
from calibre.utils.formatter import TemplateFormatter
|
||||
|
||||
class SafeFormat(TemplateFormatter):
|
||||
|
||||
def __init__(self):
|
||||
TemplateFormatter.__init__(self)
|
||||
from calibre.ebooks.metadata.book.base import field_metadata
|
||||
self.field_metadata = field_metadata
|
||||
|
||||
def get_value(self, orig_key, args, kwargs):
|
||||
if not orig_key:
|
||||
return ''
|
||||
key = orig_key = orig_key.lower()
|
||||
if key != 'title_sort' and key not in TOP_LEVEL_IDENTIFIERS and \
|
||||
key not in ALL_METADATA_FIELDS:
|
||||
key = self.field_metadata.search_term_to_field_key(key)
|
||||
if key is None or (self.book and
|
||||
key not in self.book.all_field_keys()):
|
||||
if hasattr(self.book, orig_key):
|
||||
key = orig_key
|
||||
else:
|
||||
raise ValueError(_('Value: unknown field ') + orig_key)
|
||||
try:
|
||||
b = self.book.get_user_metadata(key, False)
|
||||
except:
|
||||
b = None
|
||||
if b and ((b['datatype'] == 'int' and self.book.get(key, 0) == 0) or
|
||||
(b['datatype'] == 'float' and self.book.get(key, 0.0) == 0.0)):
|
||||
v = ''
|
||||
else:
|
||||
v = self.book.format_field(key, series_with_index=False)[1]
|
||||
if v is None:
|
||||
return ''
|
||||
if v == '':
|
||||
return ''
|
||||
return v
|
||||
|
||||
|
@ -11,17 +11,18 @@ from datetime import datetime, time
|
||||
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
|
||||
from calibre.constants import filesystem_encoding, preferred_encoding
|
||||
from calibre.library.field_metadata import FieldMetadata
|
||||
from calibre.utils.date import parse_date, isoformat, UNDEFINED_DATE, local_tz
|
||||
from calibre import isbytestring
|
||||
|
||||
# Translate datetimes to and from strings. The string form is the datetime in
|
||||
# UTC. The returned date is also UTC
|
||||
def string_to_datetime(src):
|
||||
from calibre.utils.date import parse_date
|
||||
if src == "None":
|
||||
return None
|
||||
return parse_date(src)
|
||||
|
||||
def datetime_to_string(dateval):
|
||||
from calibre.utils.date import isoformat, UNDEFINED_DATE, local_tz
|
||||
if dateval is None:
|
||||
return "None"
|
||||
if not isinstance(dateval, datetime):
|
||||
|
@ -554,6 +554,10 @@ class OPF(object): # {{{
|
||||
resolve_entities=True, assume_utf8=True)
|
||||
raw = raw[raw.find('<'):]
|
||||
self.root = etree.fromstring(raw, self.PARSER)
|
||||
try:
|
||||
self.package_version = float(self.root.get('version', None))
|
||||
except (AttributeError, TypeError, ValueError):
|
||||
self.package_version = 0
|
||||
self.metadata = self.metadata_path(self.root)
|
||||
if not self.metadata:
|
||||
raise ValueError('Malformed OPF file: No <metadata> element')
|
||||
@ -1116,7 +1120,10 @@ class OPF(object): # {{{
|
||||
def get_metadata_element(self, name):
|
||||
matches = self.metadata_elem_path(self.metadata, name=name)
|
||||
if matches:
|
||||
return matches[-1]
|
||||
num = -1
|
||||
if self.package_version >= 3 and name == 'title':
|
||||
num = 0
|
||||
return matches[num]
|
||||
|
||||
def create_metadata_element(self, name, attrib=None, is_dc=True):
|
||||
if is_dc:
|
||||
|
@ -46,7 +46,8 @@ def read_info(outputdir, get_cover):
|
||||
|
||||
ans = {}
|
||||
for line in raw.splitlines():
|
||||
if u':' not in line: continue
|
||||
if u':' not in line:
|
||||
continue
|
||||
field, val = line.partition(u':')[::2]
|
||||
val = val.strip()
|
||||
if field and val:
|
||||
@ -54,7 +55,7 @@ def read_info(outputdir, get_cover):
|
||||
|
||||
if get_cover:
|
||||
try:
|
||||
subprocess.check_call([pdftoppm, '-singlefile', '-jpeg',
|
||||
subprocess.check_call([pdftoppm, '-singlefile', '-jpeg', '-cropbox',
|
||||
'src.pdf', 'cover'])
|
||||
except subprocess.CalledProcessError as e:
|
||||
prints('pdftoppm errored out with return code: %d'%e.returncode)
|
||||
@ -69,7 +70,7 @@ def page_images(pdfpath, outputdir, first=1, last=1):
|
||||
import win32process as w
|
||||
args['creationflags'] = w.HIGH_PRIORITY_CLASS | w.CREATE_NO_WINDOW
|
||||
try:
|
||||
subprocess.check_call([pdftoppm, '-jpeg', '-f', unicode(first),
|
||||
subprocess.check_call([pdftoppm, '-cropbox', '-jpeg', '-f', unicode(first),
|
||||
'-l', unicode(last), pdfpath,
|
||||
os.path.join(outputdir, 'page-images')], **args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
|
@ -8,7 +8,6 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import socket, time, re
|
||||
from urllib import urlencode
|
||||
from threading import Thread
|
||||
from Queue import Queue, Empty
|
||||
|
||||
@ -18,7 +17,6 @@ from calibre.ebooks.metadata import check_isbn
|
||||
from calibre.ebooks.metadata.sources.base import (Source, Option, fixcase,
|
||||
fixauthors)
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import parse_only_date
|
||||
from calibre.utils.localization import canonicalize_lang
|
||||
|
||||
class Worker(Thread): # Get details {{{
|
||||
@ -495,6 +493,7 @@ class Worker(Thread): # Get details {{{
|
||||
def parse_pubdate(self, pd):
|
||||
for x in reversed(pd.xpath(self.publisher_xpath)):
|
||||
if x.tail:
|
||||
from calibre.utils.date import parse_only_date
|
||||
ans = x.tail
|
||||
date = ans.rpartition('(')[-1].replace(')', '').strip()
|
||||
date = self.delocalize_datestr(date)
|
||||
@ -637,6 +636,7 @@ class Amazon(Source):
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={}, # {{{
|
||||
domain=None):
|
||||
from urllib import urlencode
|
||||
if domain is None:
|
||||
domain = self.domain
|
||||
|
||||
|
@ -12,27 +12,9 @@ from future_builtins import map
|
||||
|
||||
from calibre import browser, random_user_agent
|
||||
from calibre.customize import Plugin
|
||||
from calibre.utils.config import JSONConfig
|
||||
from calibre.utils.titlecase import titlecase
|
||||
from calibre.utils.icu import capitalize, lower, upper
|
||||
from calibre.ebooks.metadata import check_isbn
|
||||
|
||||
msprefs = JSONConfig('metadata_sources/global.json')
|
||||
msprefs.defaults['txt_comments'] = False
|
||||
msprefs.defaults['ignore_fields'] = []
|
||||
msprefs.defaults['user_default_ignore_fields'] = []
|
||||
msprefs.defaults['max_tags'] = 20
|
||||
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
|
||||
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
|
||||
msprefs.defaults['swap_author_names'] = False
|
||||
msprefs.defaults['fewer_tags'] = True
|
||||
msprefs.defaults['find_first_edition_date'] = False
|
||||
|
||||
# Google covers are often poor quality (scans/errors) but they have high
|
||||
# resolution, so they trump covers from better sources. So make sure they
|
||||
# are only used if no other covers are found.
|
||||
msprefs.defaults['cover_priorities'] = {'Google':2, 'Google Images':2, 'Big Book Search':2}
|
||||
|
||||
def create_log(ostream=None):
|
||||
from calibre.utils.logging import ThreadSafeLog, FileStream
|
||||
log = ThreadSafeLog(level=ThreadSafeLog.DEBUG)
|
||||
@ -162,6 +144,7 @@ def fixauthors(authors):
|
||||
|
||||
def fixcase(x):
|
||||
if x:
|
||||
from calibre.utils.titlecase import titlecase
|
||||
x = titlecase(x)
|
||||
return x
|
||||
|
||||
@ -263,6 +246,7 @@ class Source(Plugin):
|
||||
@property
|
||||
def prefs(self):
|
||||
if self._config_obj is None:
|
||||
from calibre.utils.config import JSONConfig
|
||||
self._config_obj = JSONConfig('metadata_sources/%s.json'%self.name)
|
||||
return self._config_obj
|
||||
# }}}
|
||||
|
@ -13,7 +13,8 @@ from threading import Thread, Event
|
||||
from io import BytesIO
|
||||
|
||||
from calibre.customize.ui import metadata_plugins
|
||||
from calibre.ebooks.metadata.sources.base import msprefs, create_log
|
||||
from calibre.ebooks.metadata.sources.base import create_log
|
||||
from calibre.ebooks.metadata.sources.prefs import msprefs
|
||||
from calibre.utils.magick.draw import Image, save_cover_data_to
|
||||
|
||||
class Worker(Thread):
|
||||
|
@ -8,7 +8,6 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>; 2011, Li Fanxi <lifan
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import time
|
||||
from urllib import urlencode
|
||||
from functools import partial
|
||||
from Queue import Queue, Empty
|
||||
|
||||
@ -172,6 +171,7 @@ class Douban(Source):
|
||||
# }}}
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={}): # {{{
|
||||
from urllib import urlencode
|
||||
SEARCH_URL = 'http://api.douban.com/book/subjects?'
|
||||
ISBN_URL = 'http://api.douban.com/book/subject/isbn/'
|
||||
SUBJECT_URL = 'http://api.douban.com/book/subject/'
|
||||
|
@ -8,7 +8,6 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import time, hashlib
|
||||
from urllib import urlencode
|
||||
from functools import partial
|
||||
from Queue import Queue, Empty
|
||||
|
||||
@ -16,7 +15,6 @@ from calibre.ebooks.metadata import check_isbn
|
||||
from calibre.ebooks.metadata.sources.base import Source
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
from calibre.utils.date import parse_date, utcnow
|
||||
from calibre.utils.cleantext import clean_ascii_chars
|
||||
from calibre.utils.localization import canonicalize_lang
|
||||
from calibre import as_unicode
|
||||
@ -131,6 +129,7 @@ def to_metadata(browser, log, entry_, timeout): # {{{
|
||||
# pubdate
|
||||
pubdate = get_text(extra, date)
|
||||
if pubdate:
|
||||
from calibre.utils.date import parse_date, utcnow
|
||||
try:
|
||||
default = utcnow().replace(day=15)
|
||||
mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)
|
||||
@ -179,6 +178,7 @@ class GoogleBooks(Source):
|
||||
# }}}
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={}): # {{{
|
||||
from urllib import urlencode
|
||||
BASE_URL = 'http://books.google.com/books/feeds/volumes?'
|
||||
isbn = check_isbn(identifiers.get('isbn', None))
|
||||
q = ''
|
||||
|
@ -16,7 +16,8 @@ from operator import attrgetter
|
||||
from urlparse import urlparse
|
||||
|
||||
from calibre.customize.ui import metadata_plugins, all_metadata_plugins
|
||||
from calibre.ebooks.metadata.sources.base import create_log, msprefs
|
||||
from calibre.ebooks.metadata.sources.base import create_log
|
||||
from calibre.ebooks.metadata.sources.prefs import msprefs
|
||||
from calibre.ebooks.metadata.xisbn import xisbn
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import utc_tz, as_utc
|
||||
|
@ -7,7 +7,6 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from urllib import quote
|
||||
|
||||
|
||||
from calibre.ebooks.metadata import check_isbn
|
||||
@ -66,6 +65,7 @@ class ISBNDB(Source):
|
||||
return self.isbndb_key is not None
|
||||
|
||||
def create_query(self, title=None, authors=None, identifiers={}): # {{{
|
||||
from urllib import quote
|
||||
base_url = BASE_URL%self.isbndb_key
|
||||
isbn = check_isbn(identifiers.get('isbn', None))
|
||||
q = ''
|
||||
|
@ -6,14 +6,12 @@ __copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import re
|
||||
from urllib import quote_plus
|
||||
from Queue import Queue, Empty
|
||||
|
||||
from calibre import as_unicode
|
||||
from calibre.ebooks.metadata import check_isbn
|
||||
from calibre.ebooks.metadata.sources.base import Source
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.utils.date import parse_only_date
|
||||
|
||||
class Ozon(Source):
|
||||
name = 'OZON.ru'
|
||||
@ -49,6 +47,7 @@ class Ozon(Source):
|
||||
# }}}
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={}): # {{{
|
||||
from urllib import quote_plus
|
||||
# div_book -> search only books, ebooks and audio books
|
||||
search_url = self.ozon_url + '/webservice/webservice.asmx/SearchWebService?searchContext=div_book&searchText='
|
||||
|
||||
@ -390,33 +389,34 @@ def _get_affiliateId(): # {{{
|
||||
return aff_id
|
||||
# }}}
|
||||
|
||||
# for now only RUS ISBN are supported
|
||||
#http://ru.wikipedia.org/wiki/ISBN_российских_издательств
|
||||
isbn_pat = re.compile(r"""
|
||||
^
|
||||
(\d{3})? # match GS1 Prefix for ISBN13
|
||||
(5) # group identifier for rRussian-speaking countries
|
||||
( # begin variable length for Publisher
|
||||
[01]\d{1}| # 2x
|
||||
[2-6]\d{2}| # 3x
|
||||
7\d{3}| # 4x (starting with 7)
|
||||
8[0-4]\d{2}| # 4x (starting with 8)
|
||||
9[2567]\d{2}| # 4x (starting with 9)
|
||||
99[26]\d{1}| # 4x (starting with 99)
|
||||
8[5-9]\d{3}| # 5x (starting with 8)
|
||||
9[348]\d{3}| # 5x (starting with 9)
|
||||
900\d{2}| # 5x (starting with 900)
|
||||
91[0-8]\d{2}| # 5x (starting with 91)
|
||||
90[1-9]\d{3}| # 6x (starting with 90)
|
||||
919\d{3}| # 6x (starting with 919)
|
||||
99[^26]\d{4} # 7x (starting with 99)
|
||||
) # end variable length for Publisher
|
||||
(\d+) # Title
|
||||
([\dX]) # Check digit
|
||||
$
|
||||
""", re.VERBOSE)
|
||||
|
||||
def _format_isbn(log, isbn): # {{{
|
||||
# for now only RUS ISBN are supported
|
||||
#http://ru.wikipedia.org/wiki/ISBN_российских_издательств
|
||||
isbn_pat = re.compile(r"""
|
||||
^
|
||||
(\d{3})? # match GS1 Prefix for ISBN13
|
||||
(5) # group identifier for rRussian-speaking countries
|
||||
( # begin variable length for Publisher
|
||||
[01]\d{1}| # 2x
|
||||
[2-6]\d{2}| # 3x
|
||||
7\d{3}| # 4x (starting with 7)
|
||||
8[0-4]\d{2}| # 4x (starting with 8)
|
||||
9[2567]\d{2}| # 4x (starting with 9)
|
||||
99[26]\d{1}| # 4x (starting with 99)
|
||||
8[5-9]\d{3}| # 5x (starting with 8)
|
||||
9[348]\d{3}| # 5x (starting with 9)
|
||||
900\d{2}| # 5x (starting with 900)
|
||||
91[0-8]\d{2}| # 5x (starting with 91)
|
||||
90[1-9]\d{3}| # 6x (starting with 90)
|
||||
919\d{3}| # 6x (starting with 919)
|
||||
99[^26]\d{4} # 7x (starting with 99)
|
||||
) # end variable length for Publisher
|
||||
(\d+) # Title
|
||||
([\dX]) # Check digit
|
||||
$
|
||||
""", re.VERBOSE)
|
||||
|
||||
|
||||
res = check_isbn(isbn)
|
||||
if res:
|
||||
m = isbn_pat.match(res)
|
||||
@ -460,6 +460,7 @@ def _normalizeAuthorNameWithInitials(name): # {{{
|
||||
# }}}
|
||||
|
||||
def toPubdate(log, yearAsString): # {{{
|
||||
from calibre.utils.date import parse_only_date
|
||||
res = None
|
||||
if yearAsString:
|
||||
try:
|
||||
|
28
src/calibre/ebooks/metadata/sources/prefs.py
Normal file
28
src/calibre/ebooks/metadata/sources/prefs.py
Normal file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=utf-8
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
from calibre.utils.config import JSONConfig
|
||||
|
||||
msprefs = JSONConfig('metadata_sources/global.json')
|
||||
msprefs.defaults['txt_comments'] = False
|
||||
msprefs.defaults['ignore_fields'] = []
|
||||
msprefs.defaults['user_default_ignore_fields'] = []
|
||||
msprefs.defaults['max_tags'] = 20
|
||||
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
|
||||
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
|
||||
msprefs.defaults['swap_author_names'] = False
|
||||
msprefs.defaults['fewer_tags'] = True
|
||||
msprefs.defaults['find_first_edition_date'] = False
|
||||
|
||||
# Google covers are often poor quality (scans/errors) but they have high
|
||||
# resolution, so they trump covers from better sources. So make sure they
|
||||
# are only used if no other covers are found.
|
||||
msprefs.defaults['cover_priorities'] = {'Google':2, 'Google Images':2, 'Big Book Search':2}
|
||||
|
||||
|
||||
|
@ -14,8 +14,8 @@ from threading import Event
|
||||
from calibre.customize.ui import all_metadata_plugins
|
||||
from calibre import prints, sanitize_file_name2
|
||||
from calibre.ebooks.metadata import check_isbn
|
||||
from calibre.ebooks.metadata.sources.base import (create_log,
|
||||
get_cached_cover_urls, msprefs)
|
||||
from calibre.ebooks.metadata.sources.base import create_log, get_cached_cover_urls
|
||||
from calibre.ebooks.metadata.sources.prefs import msprefs
|
||||
|
||||
def isbn_test(isbn):
|
||||
isbn_ = check_isbn(isbn)
|
||||
|
@ -20,9 +20,10 @@ class RemoveAdobeMargins(object):
|
||||
self.oeb, self.opts, self.log = oeb, opts, log
|
||||
|
||||
for item in self.oeb.manifest:
|
||||
if (item.media_type in ('application/vnd.adobe-page-template+xml',
|
||||
'application/vnd.adobe.page-template+xml') and
|
||||
hasattr(item.data, 'xpath')):
|
||||
if item.media_type in {
|
||||
'application/vnd.adobe-page-template+xml', 'application/vnd.adobe.page-template+xml',
|
||||
'application/adobe-page-template+xml', 'application/adobe.page-template+xml',
|
||||
} and hasattr(item.data, 'xpath'):
|
||||
self.log('Removing page margins specified in the'
|
||||
' Adobe page template')
|
||||
for elem in item.data.xpath(
|
||||
@ -84,13 +85,12 @@ class RemoveFakeMargins(object):
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
if ( (hasattr(ti, 'startswith') and ti.startswith('-')) or
|
||||
if ((hasattr(ti, 'startswith') and ti.startswith('-')) or
|
||||
isinstance(ti, (int, float)) and ti < 0):
|
||||
raise NegativeTextIndent()
|
||||
return style.marginLeft, style.marginRight, style
|
||||
return '', '', None
|
||||
|
||||
|
||||
def process_level(self, level):
|
||||
elems = self.levels[level]
|
||||
self.stats[level+'_left'] = Counter()
|
||||
@ -107,7 +107,6 @@ class RemoveFakeMargins(object):
|
||||
remove_left = self.analyze_stats(self.stats[level+'_left'])
|
||||
remove_right = self.analyze_stats(self.stats[level+'_right'])
|
||||
|
||||
|
||||
if remove_left:
|
||||
mcl = self.stats[level+'_left'].most_common(1)[0][0]
|
||||
self.log('Removing level %s left margin of:'%level, mcl)
|
||||
|
@ -44,14 +44,18 @@ class Links(object):
|
||||
for link in links:
|
||||
href, page, rect = link
|
||||
p, frag = href.partition('#')[0::2]
|
||||
link = ((path, p, frag or None), self.pdf.get_pageref(page).obj, Array(rect))
|
||||
try:
|
||||
link = ((path, p, frag or None), self.pdf.get_pageref(page).obj, Array(rect))
|
||||
except IndexError:
|
||||
self.log.warn('Unable to find page for link: %r, ignoring it' % link)
|
||||
continue
|
||||
self.links.append(link)
|
||||
|
||||
def add_links(self):
|
||||
for link in self.links:
|
||||
path, href, frag = link[0]
|
||||
page, rect = link[1:]
|
||||
combined_path = os.path.abspath(os.path.join(os.path.dirname(path), *unquote(href).split('/')))
|
||||
combined_path = os.path.normcase(os.path.abspath(os.path.join(os.path.dirname(path), *unquote(href).split('/'))))
|
||||
is_local = not href or combined_path in self.anchors
|
||||
annot = Dictionary({
|
||||
'Type':Name('Annot'), 'Subtype':Name('Link'),
|
||||
|
@ -5,10 +5,10 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, shutil
|
||||
import os, shutil, copy
|
||||
from functools import partial
|
||||
|
||||
from PyQt4.Qt import QMenu, QModelIndex, QTimer
|
||||
from PyQt4.Qt import QMenu, QModelIndex, QTimer, QIcon
|
||||
|
||||
from calibre.gui2 import error_dialog, Dispatcher, question_dialog
|
||||
from calibre.gui2.dialogs.metadata_bulk import MetadataBulkDialog
|
||||
@ -16,7 +16,8 @@ from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
from calibre.gui2.dialogs.device_category_editor import DeviceCategoryEditor
|
||||
from calibre.gui2.actions import InterfaceAction
|
||||
from calibre.ebooks.metadata import authors_to_string
|
||||
from calibre.ebooks.metadata.opf2 import OPF
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.metadata.opf2 import OPF, metadata_to_opf
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.db.errors import NoSuchFormat
|
||||
|
||||
@ -147,14 +148,18 @@ class EditMetadataAction(InterfaceAction):
|
||||
|
||||
payload = (id_map, tdir, log_file, lm_map,
|
||||
failed_ids.union(failed_covers))
|
||||
self.gui.proceed_question(self.apply_downloaded_metadata, payload,
|
||||
review_apply = partial(self.apply_downloaded_metadata, True)
|
||||
normal_apply = partial(self.apply_downloaded_metadata, False)
|
||||
self.gui.proceed_question(normal_apply, payload,
|
||||
log_file, _('Download log'), _('Download complete'), msg,
|
||||
det_msg=det_msg, show_copy_button=show_copy_button,
|
||||
cancel_callback=partial(self.cleanup_bulk_download, tdir),
|
||||
log_is_file=True, checkbox_msg=checkbox_msg,
|
||||
checkbox_checked=False)
|
||||
checkbox_checked=False, action_callback=review_apply,
|
||||
action_label=_('Review downloaded metadata'),
|
||||
action_icon=QIcon(I('auto_author_sort.png')))
|
||||
|
||||
def apply_downloaded_metadata(self, payload, *args):
|
||||
def apply_downloaded_metadata(self, review, payload, *args):
|
||||
good_ids, tdir, log_file, lm_map, failed_ids = payload
|
||||
if not good_ids:
|
||||
return
|
||||
@ -194,6 +199,57 @@ class EditMetadataAction(InterfaceAction):
|
||||
cov = None
|
||||
id_map[bid] = (opf, cov)
|
||||
|
||||
if review:
|
||||
def get_metadata(book_id):
|
||||
oldmi = db.get_metadata(book_id, index_is_id=True, get_cover=True, cover_as_data=True)
|
||||
opf, cov = id_map[book_id]
|
||||
if opf is None:
|
||||
newmi = Metadata(oldmi.title, authors=tuple(oldmi.authors))
|
||||
else:
|
||||
with open(opf, 'rb') as f:
|
||||
newmi = OPF(f, basedir=os.path.dirname(opf), populate_spine=False).to_book_metadata()
|
||||
newmi.cover, newmi.cover_data = None, (None, None)
|
||||
for x in ('title', 'authors'):
|
||||
if newmi.is_null(x):
|
||||
# Title and author are set to null if they are
|
||||
# the same as the originals as an optimization,
|
||||
# we undo that, as it is confusing.
|
||||
newmi.set(x, copy.copy(oldmi.get(x)))
|
||||
if cov:
|
||||
with open(cov, 'rb') as f:
|
||||
newmi.cover_data = ('jpg', f.read())
|
||||
return oldmi, newmi
|
||||
from calibre.gui2.metadata.diff import CompareMany
|
||||
d = CompareMany(
|
||||
set(id_map), get_metadata, db.field_metadata, parent=self.gui,
|
||||
window_title=_('Review downloaded metadata'),
|
||||
reject_button_tooltip=_('Discard downloaded metadata for this book'),
|
||||
accept_all_tooltip=_('Use the downloaded metadata for all remaining books'),
|
||||
reject_all_tooltip=_('Discard downloaded metadata for all remaining books'),
|
||||
revert_tooltip=_('Discard the downloaded value for: %s'),
|
||||
intro_msg=_('The downloaded metadata is on the left and the original metadata'
|
||||
' is on the right. If a downloaded value is blank or unknown,'
|
||||
' the original value is used.')
|
||||
)
|
||||
if d.exec_() == d.Accepted:
|
||||
nid_map = {}
|
||||
for book_id, (changed, mi) in d.accepted.iteritems():
|
||||
if mi is None: # discarded
|
||||
continue
|
||||
if changed:
|
||||
opf, cov = id_map[book_id]
|
||||
cfile = mi.cover
|
||||
mi.cover, mi.cover_data = None, (None, None)
|
||||
with open(opf, 'wb') as f:
|
||||
f.write(metadata_to_opf(mi))
|
||||
if cfile:
|
||||
shutil.copyfile(cfile, cov)
|
||||
os.remove(cfile)
|
||||
nid_map[book_id] = id_map[book_id]
|
||||
id_map = nid_map
|
||||
else:
|
||||
id_map = {}
|
||||
|
||||
restrict_to_failed = bool(args and args[0])
|
||||
if restrict_to_failed:
|
||||
db.data.set_marked_ids(failed_ids)
|
||||
|
@ -66,6 +66,7 @@ class EditorWidget(QWebView): # {{{
|
||||
|
||||
def __init__(self, parent=None):
|
||||
QWebView.__init__(self, parent)
|
||||
self.readonly = False
|
||||
|
||||
self.comments_pat = re.compile(r'<!--.*?-->', re.DOTALL)
|
||||
|
||||
@ -163,7 +164,11 @@ class EditorWidget(QWebView): # {{{
|
||||
self.page().linkClicked.connect(self.link_clicked)
|
||||
|
||||
self.setHtml('')
|
||||
self.page().setContentEditable(True)
|
||||
self.set_readonly(False)
|
||||
|
||||
def set_readonly(self, what):
|
||||
self.readonly = what
|
||||
self.page().setContentEditable(not self.readonly)
|
||||
|
||||
def clear_text(self, *args):
|
||||
us = self.page().undoStack()
|
||||
@ -313,7 +318,7 @@ class EditorWidget(QWebView): # {{{
|
||||
# toList() is needed because PyQt on Debian is old/broken
|
||||
for body in self.page().mainFrame().documentElement().findAll('body').toList():
|
||||
body.setAttribute('style', style)
|
||||
self.page().setContentEditable(True)
|
||||
self.page().setContentEditable(not self.readonly)
|
||||
|
||||
def keyPressEvent(self, ev):
|
||||
if ev.key() in (Qt.Key_Tab, Qt.Key_Escape, Qt.Key_Backtab):
|
||||
@ -585,6 +590,7 @@ class Editor(QWidget): # {{{
|
||||
self.tabs.addTab(self.code_edit, _('HTML Source'))
|
||||
self.tabs.currentChanged[int].connect(self.change_tab)
|
||||
self.highlighter = Highlighter(self.code_edit.document())
|
||||
self.layout().setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
# toolbar1 {{{
|
||||
self.toolbar1.addAction(self.editor.action_undo)
|
||||
@ -666,6 +672,12 @@ class Editor(QWidget): # {{{
|
||||
self.toolbar2.setVisible(False)
|
||||
self.toolbar3.setVisible(False)
|
||||
|
||||
def set_readonly(self, what):
|
||||
self.editor.set_readonly(what)
|
||||
|
||||
def hide_tabs(self):
|
||||
self.tabs.tabBar().setVisible(False)
|
||||
|
||||
# }}}
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -12,7 +12,7 @@ from PyQt4.Qt import Qt, QDialog, QGridLayout, QVBoxLayout, QFont, QLabel, \
|
||||
from calibre.gui2.dialogs.metadata_bulk_ui import Ui_MetadataBulkDialog
|
||||
from calibre.gui2.dialogs.tag_editor import TagEditor
|
||||
from calibre.ebooks.metadata import string_to_authors, authors_to_string, title_sort
|
||||
from calibre.ebooks.metadata.book.base import SafeFormat
|
||||
from calibre.ebooks.metadata.book.formatter import SafeFormat
|
||||
from calibre.gui2.custom_column_widgets import populate_metadata_page
|
||||
from calibre.gui2 import error_dialog, ResizableDialog, UNDEFINED_QDATETIME, \
|
||||
gprefs, question_dialog
|
||||
|
@ -11,7 +11,8 @@ from PyQt4.Qt import (Qt, QDialog, QDialogButtonBox, QSyntaxHighlighter, QFont,
|
||||
from calibre.gui2 import error_dialog
|
||||
from calibre.gui2.dialogs.template_dialog_ui import Ui_TemplateDialog
|
||||
from calibre.utils.formatter_functions import formatter_functions
|
||||
from calibre.ebooks.metadata.book.base import SafeFormat, Metadata
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.metadata.book.formatter import SafeFormat
|
||||
from calibre.library.coloring import (displayable_columns)
|
||||
|
||||
|
||||
@ -178,7 +179,7 @@ class TemplateHighlighter(QSyntaxHighlighter):
|
||||
list = reversed(self.paren_positions[0:found_pp])
|
||||
for pp in list:
|
||||
if pp.paren == chr:
|
||||
stack += 1;
|
||||
stack += 1
|
||||
elif stack:
|
||||
stack -= 1
|
||||
else:
|
||||
|
@ -23,14 +23,17 @@ class LanguagesEdit(EditWithComplete):
|
||||
self.comma_map = {k:k.replace(',', '|') for k in self.names_with_commas}
|
||||
self.comma_rmap = {v:k for k, v in self.comma_map.iteritems()}
|
||||
self._rmap = {lower(v):k for k,v in self._lang_map.iteritems()}
|
||||
if db is not None:
|
||||
self.init_langs(db)
|
||||
self.init_langs(db)
|
||||
|
||||
def init_langs(self, db):
|
||||
pmap = {self._lang_map.get(x[1], x[1]):1 for x in
|
||||
db.get_languages_with_ids()}
|
||||
all_items = sorted(self._lang_map.itervalues(),
|
||||
key=lambda x: (-pmap.get(x, 0), sort_key(x)))
|
||||
if db is not None:
|
||||
pmap = {self._lang_map.get(x[1], x[1]):1 for x in
|
||||
db.get_languages_with_ids()}
|
||||
all_items = sorted(self._lang_map.itervalues(),
|
||||
key=lambda x: (-pmap.get(x, 0), sort_key(x)))
|
||||
else:
|
||||
all_items = sorted(self._lang_map.itervalues(),
|
||||
key=lambda x: sort_key(x))
|
||||
self.update_items_cache(all_items)
|
||||
|
||||
@property
|
||||
|
@ -173,7 +173,7 @@ class SearchBar(QWidget): # {{{
|
||||
self._layout.setContentsMargins(0,5,0,0)
|
||||
|
||||
x = QToolButton(self)
|
||||
x.setText(_('Virtual Library'))
|
||||
x.setText(_('Vi&rtual Library'))
|
||||
x.setIcon(QIcon(I('lt.png')))
|
||||
x.setObjectName("virtual_library")
|
||||
x.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
|
||||
|
@ -14,7 +14,7 @@ from PyQt4.Qt import (QAbstractTableModel, Qt, pyqtSignal, QIcon, QImage,
|
||||
from calibre.gui2 import NONE, UNDEFINED_QDATETIME, error_dialog
|
||||
from calibre.utils.search_query_parser import ParseException
|
||||
from calibre.ebooks.metadata import fmt_sidx, authors_to_string, string_to_authors
|
||||
from calibre.ebooks.metadata.book.base import SafeFormat
|
||||
from calibre.ebooks.metadata.book.formatter import SafeFormat
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.config import tweaks, device_prefs, prefs
|
||||
from calibre.utils.date import dt_factory, qt_to_dt, as_local_time
|
||||
|
@ -807,6 +807,14 @@ class BooksView(QTableView): # {{{
|
||||
sm = self.selectionModel()
|
||||
sm.select(index, sm.ClearAndSelect|sm.Rows)
|
||||
|
||||
def keyPressEvent(self, ev):
|
||||
val = self.horizontalScrollBar().value()
|
||||
ret = super(BooksView, self).keyPressEvent(ev)
|
||||
if ev.isAccepted() and ev.key() in (Qt.Key_Home, Qt.Key_End
|
||||
) and ev.modifiers() & Qt.ControlModifier:
|
||||
self.horizontalScrollBar().setValue(val)
|
||||
return ret
|
||||
|
||||
def ids_to_rows(self, ids):
|
||||
row_map = OrderedDict()
|
||||
ids = frozenset(ids)
|
||||
|
@ -101,7 +101,7 @@ class TitleEdit(EnLineEdit):
|
||||
getattr(db, 'set_'+ self.TITLE_ATTR)(id_, title, notify=False,
|
||||
commit=False)
|
||||
except (IOError, OSError) as err:
|
||||
if getattr(err, 'errno', None) == errno.EACCES: # Permission denied
|
||||
if getattr(err, 'errno', None) == errno.EACCES: # Permission denied
|
||||
import traceback
|
||||
fname = getattr(err, 'filename', None)
|
||||
p = 'Locked file: %s\n\n'%fname if fname else ''
|
||||
@ -273,7 +273,7 @@ class AuthorsEdit(EditWithComplete):
|
||||
self.books_to_refresh |= db.set_authors(id_, authors, notify=False,
|
||||
allow_case_change=True)
|
||||
except (IOError, OSError) as err:
|
||||
if getattr(err, 'errno', None) == errno.EACCES: # Permission denied
|
||||
if getattr(err, 'errno', None) == errno.EACCES: # Permission denied
|
||||
import traceback
|
||||
fname = getattr(err, 'filename', None)
|
||||
p = 'Locked file: %s\n\n'%fname if fname else ''
|
||||
@ -485,7 +485,7 @@ class SeriesEdit(EditWithComplete):
|
||||
def initialize(self, db, id_):
|
||||
self.books_to_refresh = set([])
|
||||
all_series = db.all_series()
|
||||
all_series.sort(key=lambda x : sort_key(x[1]))
|
||||
all_series.sort(key=lambda x: sort_key(x[1]))
|
||||
self.update_items_cache([x[1] for x in all_series])
|
||||
series_id = db.series_id(id_, index_is_id=True)
|
||||
inval = ''
|
||||
@ -586,7 +586,7 @@ class SeriesIndexEdit(QDoubleSpinBox):
|
||||
|
||||
# }}}
|
||||
|
||||
class BuddyLabel(QLabel): # {{{
|
||||
class BuddyLabel(QLabel): # {{{
|
||||
|
||||
def __init__(self, buddy):
|
||||
QLabel.__init__(self, buddy.LABEL)
|
||||
@ -698,11 +698,11 @@ class FormatsManager(QWidget):
|
||||
self.formats.setIconSize(QSize(32, 32))
|
||||
self.formats.setMaximumWidth(200)
|
||||
|
||||
l.addWidget(self.cover_from_format_button, 0, 0, 1, 1)
|
||||
l.addWidget(self.cover_from_format_button, 0, 0, 1, 1)
|
||||
l.addWidget(self.metadata_from_format_button, 2, 0, 1, 1)
|
||||
l.addWidget(self.add_format_button, 0, 2, 1, 1)
|
||||
l.addWidget(self.remove_format_button, 2, 2, 1, 1)
|
||||
l.addWidget(self.formats, 0, 1, 3, 1)
|
||||
l.addWidget(self.add_format_button, 0, 2, 1, 1)
|
||||
l.addWidget(self.remove_format_button, 2, 2, 1, 1)
|
||||
l.addWidget(self.formats, 0, 1, 3, 1)
|
||||
|
||||
self.temp_files = []
|
||||
|
||||
@ -882,7 +882,7 @@ class FormatsManager(QWidget):
|
||||
self.temp_files = []
|
||||
# }}}
|
||||
|
||||
class Cover(ImageView): # {{{
|
||||
class Cover(ImageView): # {{{
|
||||
|
||||
download_cover = pyqtSignal()
|
||||
|
||||
@ -1052,7 +1052,7 @@ class Cover(ImageView): # {{{
|
||||
|
||||
# }}}
|
||||
|
||||
class CommentsEdit(Editor): # {{{
|
||||
class CommentsEdit(Editor): # {{{
|
||||
|
||||
@dynamic_property
|
||||
def current_val(self):
|
||||
@ -1076,7 +1076,7 @@ class CommentsEdit(Editor): # {{{
|
||||
return True
|
||||
# }}}
|
||||
|
||||
class RatingEdit(QSpinBox): # {{{
|
||||
class RatingEdit(QSpinBox): # {{{
|
||||
LABEL = _('&Rating:')
|
||||
TOOLTIP = _('Rating of this book. 0-5 stars')
|
||||
|
||||
@ -1120,7 +1120,7 @@ class RatingEdit(QSpinBox): # {{{
|
||||
|
||||
# }}}
|
||||
|
||||
class TagsEdit(EditWithComplete): # {{{
|
||||
class TagsEdit(EditWithComplete): # {{{
|
||||
LABEL = _('Ta&gs:')
|
||||
TOOLTIP = '<p>'+_('Tags categorize the book. This is particularly '
|
||||
'useful while searching. <br><br>They can be any words '
|
||||
@ -1174,7 +1174,6 @@ class TagsEdit(EditWithComplete): # {{{
|
||||
self.current_val = d.tags
|
||||
self.all_items = db.all_tags()
|
||||
|
||||
|
||||
def commit(self, db, id_):
|
||||
self.books_to_refresh |= db.set_tags(
|
||||
id_, self.current_val, notify=False, commit=False,
|
||||
@ -1183,7 +1182,7 @@ class TagsEdit(EditWithComplete): # {{{
|
||||
|
||||
# }}}
|
||||
|
||||
class LanguagesEdit(LE): # {{{
|
||||
class LanguagesEdit(LE): # {{{
|
||||
|
||||
LABEL = _('&Languages:')
|
||||
TOOLTIP = _('A comma separated list of languages for this book')
|
||||
@ -1194,8 +1193,10 @@ class LanguagesEdit(LE): # {{{
|
||||
|
||||
@dynamic_property
|
||||
def current_val(self):
|
||||
def fget(self): return self.lang_codes
|
||||
def fset(self, val): self.lang_codes = val
|
||||
def fget(self):
|
||||
return self.lang_codes
|
||||
def fset(self, val):
|
||||
self.lang_codes = val
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def initialize(self, db, id_):
|
||||
@ -1221,7 +1222,7 @@ class LanguagesEdit(LE): # {{{
|
||||
return True
|
||||
# }}}
|
||||
|
||||
class IdentifiersEdit(QLineEdit): # {{{
|
||||
class IdentifiersEdit(QLineEdit): # {{{
|
||||
LABEL = _('I&ds:')
|
||||
BASE_TT = _('Edit the identifiers for this book. '
|
||||
'For example: \n\n%s')%(
|
||||
@ -1309,7 +1310,7 @@ class IdentifiersEdit(QLineEdit): # {{{
|
||||
|
||||
# }}}
|
||||
|
||||
class ISBNDialog(QDialog) : # {{{
|
||||
class ISBNDialog(QDialog): # {{{
|
||||
|
||||
def __init__(self, parent, txt):
|
||||
QDialog.__init__(self, parent)
|
||||
@ -1320,7 +1321,7 @@ class ISBNDialog(QDialog) : # {{{
|
||||
l.addWidget(w, 0, 0, 1, 2)
|
||||
w = QLabel(_('ISBN:'))
|
||||
l.addWidget(w, 1, 0, 1, 1)
|
||||
self.line_edit = w = QLineEdit();
|
||||
self.line_edit = w = QLineEdit()
|
||||
w.setText(txt)
|
||||
w.selectAll()
|
||||
w.textChanged.connect(self.checkText)
|
||||
@ -1361,7 +1362,7 @@ class ISBNDialog(QDialog) : # {{{
|
||||
|
||||
# }}}
|
||||
|
||||
class PublisherEdit(EditWithComplete): # {{{
|
||||
class PublisherEdit(EditWithComplete): # {{{
|
||||
LABEL = _('&Publisher:')
|
||||
|
||||
def __init__(self, parent):
|
||||
@ -1388,7 +1389,7 @@ class PublisherEdit(EditWithComplete): # {{{
|
||||
def initialize(self, db, id_):
|
||||
self.books_to_refresh = set([])
|
||||
all_publishers = db.all_publishers()
|
||||
all_publishers.sort(key=lambda x : sort_key(x[1]))
|
||||
all_publishers.sort(key=lambda x: sort_key(x[1]))
|
||||
self.update_items_cache([x[1] for x in all_publishers])
|
||||
publisher_id = db.publisher_id(id_, index_is_id=True)
|
||||
inval = ''
|
||||
@ -1421,7 +1422,7 @@ class DateEdit(QDateTimeEdit):
|
||||
ATTR = 'timestamp'
|
||||
TWEAK = 'gui_timestamp_display_format'
|
||||
|
||||
def __init__(self, parent):
|
||||
def __init__(self, parent, create_clear_button=True):
|
||||
QDateTimeEdit.__init__(self, parent)
|
||||
self.setToolTip(self.TOOLTIP)
|
||||
self.setWhatsThis(self.TOOLTIP)
|
||||
@ -1435,10 +1436,11 @@ class DateEdit(QDateTimeEdit):
|
||||
self.setCalendarWidget(self.cw)
|
||||
self.setMinimumDateTime(UNDEFINED_QDATETIME)
|
||||
self.setSpecialValueText(_('Undefined'))
|
||||
self.clear_button = QToolButton(parent)
|
||||
self.clear_button.setIcon(QIcon(I('trash.png')))
|
||||
self.clear_button.setToolTip(_('Clear date'))
|
||||
self.clear_button.clicked.connect(self.reset_date)
|
||||
if create_clear_button:
|
||||
self.clear_button = QToolButton(parent)
|
||||
self.clear_button.setIcon(QIcon(I('trash.png')))
|
||||
self.clear_button.setToolTip(_('Clear date'))
|
||||
self.clear_button.clicked.connect(self.reset_date)
|
||||
|
||||
def reset_date(self, *args):
|
||||
self.current_val = None
|
||||
|
557
src/calibre/gui2/metadata/diff.py
Normal file
557
src/calibre/gui2/metadata/diff.py
Normal file
@ -0,0 +1,557 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=utf-8
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import os
|
||||
from collections import OrderedDict, namedtuple
|
||||
from functools import partial
|
||||
from future_builtins import zip
|
||||
|
||||
from PyQt4.Qt import (
|
||||
QDialog, QWidget, QGridLayout, QLineEdit, QLabel, QToolButton, QIcon,
|
||||
QVBoxLayout, QDialogButtonBox, QApplication, pyqtSignal, QFont, QPixmap,
|
||||
QSize, QPainter, Qt, QColor, QPen, QSizePolicy, QScrollArea, QFrame)
|
||||
|
||||
from calibre import fit_image
|
||||
from calibre.ebooks.metadata import title_sort, authors_to_sort_string
|
||||
from calibre.gui2 import pixmap_to_data, gprefs
|
||||
from calibre.gui2.comments_editor import Editor
|
||||
from calibre.gui2.languages import LanguagesEdit as LE
|
||||
from calibre.gui2.metadata.basic_widgets import PubdateEdit, RatingEdit
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.date import UNDEFINED_DATE
|
||||
|
||||
Widgets = namedtuple('Widgets', 'new old label button')
|
||||
|
||||
# Widgets {{{
|
||||
|
||||
class LineEdit(QLineEdit):
|
||||
|
||||
changed = pyqtSignal()
|
||||
|
||||
def __init__(self, field, is_new, parent, metadata, extra):
|
||||
QLineEdit.__init__(self, parent)
|
||||
self.is_new = is_new
|
||||
self.field = field
|
||||
self.metadata = metadata
|
||||
if not is_new:
|
||||
self.setReadOnly(True)
|
||||
self.textChanged.connect(self.changed)
|
||||
|
||||
def from_mi(self, mi):
|
||||
val = mi.get(self.field, default='') or ''
|
||||
ism = self.metadata['is_multiple']
|
||||
if ism:
|
||||
if not val:
|
||||
val = ''
|
||||
else:
|
||||
val = ism['list_to_ui'].join(val)
|
||||
self.setText(val)
|
||||
self.setCursorPosition(0)
|
||||
|
||||
def to_mi(self, mi):
|
||||
val = unicode(self.text()).strip()
|
||||
ism = self.metadata['is_multiple']
|
||||
if ism:
|
||||
if not val:
|
||||
val = []
|
||||
else:
|
||||
val = [x.strip() for x in val.split(ism['list_to_ui']) if x.strip()]
|
||||
mi.set(self.field, val)
|
||||
if self.field == 'title':
|
||||
mi.set('title_sort', title_sort(val, lang=mi.language))
|
||||
elif self.field == 'authors':
|
||||
mi.set('author_sort', authors_to_sort_string(val))
|
||||
|
||||
@dynamic_property
|
||||
def current_val(self):
|
||||
def fget(self):
|
||||
return unicode(self.text())
|
||||
def fset(self, val):
|
||||
self.setText(val)
|
||||
self.setCursorPosition(0)
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
@property
|
||||
def is_blank(self):
|
||||
val = self.current_val.strip()
|
||||
if self.field in {'title', 'authors'}:
|
||||
return val in {'', _('Unknown')}
|
||||
return not val
|
||||
|
||||
def same_as(self, other):
|
||||
return self.current_val == other.current_val
|
||||
|
||||
class LanguagesEdit(LE):
|
||||
|
||||
changed = pyqtSignal()
|
||||
|
||||
def __init__(self, field, is_new, parent, metadata, extra):
|
||||
LE.__init__(self, parent=parent)
|
||||
self.is_new = is_new
|
||||
self.field = field
|
||||
self.metadata = metadata
|
||||
self.textChanged.connect(self.changed)
|
||||
if not is_new:
|
||||
self.lineEdit().setReadOnly(True)
|
||||
|
||||
@dynamic_property
|
||||
def current_val(self):
|
||||
def fget(self):
|
||||
return self.lang_codes
|
||||
def fset(self, val):
|
||||
self.lang_codes = val
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def from_mi(self, mi):
|
||||
self.lang_codes = mi.languages
|
||||
|
||||
def to_mi(self, mi):
|
||||
mi.languages = self.lang_codes
|
||||
|
||||
@property
|
||||
def is_blank(self):
|
||||
return not self.current_val
|
||||
|
||||
def same_as(self, other):
|
||||
return self.current_val == other.current_val
|
||||
|
||||
class RatingsEdit(RatingEdit):
|
||||
|
||||
changed = pyqtSignal()
|
||||
|
||||
def __init__(self, field, is_new, parent, metadata, extra):
|
||||
RatingEdit.__init__(self, parent)
|
||||
self.is_new = is_new
|
||||
self.field = field
|
||||
self.metadata = metadata
|
||||
self.valueChanged.connect(self.changed)
|
||||
if not is_new:
|
||||
self.setReadOnly(True)
|
||||
|
||||
def from_mi(self, mi):
|
||||
val = (mi.get(self.field, default=0) or 0)/2
|
||||
self.setValue(val)
|
||||
|
||||
def to_mi(self, mi):
|
||||
mi.set(self.field, self.value() * 2)
|
||||
|
||||
@property
|
||||
def is_blank(self):
|
||||
return self.value() == 0
|
||||
|
||||
def same_as(self, other):
|
||||
return self.current_val == other.current_val
|
||||
|
||||
class DateEdit(PubdateEdit):
|
||||
|
||||
changed = pyqtSignal()
|
||||
|
||||
def __init__(self, field, is_new, parent, metadata, extra):
|
||||
PubdateEdit.__init__(self, parent, create_clear_button=False)
|
||||
self.is_new = is_new
|
||||
self.field = field
|
||||
self.metadata = metadata
|
||||
self.setDisplayFormat(extra)
|
||||
self.dateTimeChanged.connect(self.changed)
|
||||
if not is_new:
|
||||
self.setReadOnly(True)
|
||||
|
||||
def from_mi(self, mi):
|
||||
self.current_val = mi.get(self.field, default=None)
|
||||
|
||||
def to_mi(self, mi):
|
||||
mi.set(self.field, self.current_val)
|
||||
|
||||
@property
|
||||
def is_blank(self):
|
||||
return self.current_val.year <= UNDEFINED_DATE.year
|
||||
|
||||
def same_as(self, other):
|
||||
return self.text() == other.text()
|
||||
|
||||
class SeriesEdit(LineEdit):
|
||||
|
||||
def from_mi(self, mi):
|
||||
series = mi.get(self.field, default='')
|
||||
series_index = mi.get(self.field + '_index', default=1.0)
|
||||
val = ''
|
||||
if series:
|
||||
val = '%s [%s]' % (series, mi.format_series_index(series_index))
|
||||
self.setText(val)
|
||||
self.setCursorPosition(0)
|
||||
|
||||
def to_mi(self, mi):
|
||||
val = unicode(self.text()).strip()
|
||||
try:
|
||||
series_index = float(val.rpartition('[')[-1].rstrip(']').strip())
|
||||
except:
|
||||
series_index = 1.0
|
||||
series = val.rpartition('[')[0].strip() or None
|
||||
mi.set(self.field, series)
|
||||
mi.set(self.field + '_index', series_index)
|
||||
|
||||
class IdentifiersEdit(LineEdit):
|
||||
|
||||
def from_mi(self, mi):
|
||||
val = ('%s:%s' % (k, v) for k, v in mi.identifiers.iteritems())
|
||||
self.setText(', '.join(val))
|
||||
self.setCursorPosition(0)
|
||||
|
||||
def to_mi(self, mi):
|
||||
parts = (x.strip() for x in self.current_val.split(',') if x.strip())
|
||||
val = {x.partition(':')[0].strip():x.partition(':')[-1].strip() for x in parts}
|
||||
mi.set_identifiers({k:v for k, v in val.iteritems() if k and v})
|
||||
|
||||
class CommentsEdit(Editor):
|
||||
|
||||
changed = pyqtSignal()
|
||||
|
||||
def __init__(self, field, is_new, parent, metadata, extra):
|
||||
Editor.__init__(self, parent, one_line_toolbar=False)
|
||||
self.is_new = is_new
|
||||
self.field = field
|
||||
self.metadata = metadata
|
||||
self.hide_tabs()
|
||||
if not is_new:
|
||||
self.hide_toolbars()
|
||||
self.set_readonly(True)
|
||||
|
||||
@dynamic_property
|
||||
def current_val(self):
|
||||
def fget(self):
|
||||
return self.html
|
||||
def fset(self, val):
|
||||
self.html = val or ''
|
||||
self.changed.emit()
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def from_mi(self, mi):
|
||||
val = mi.get(self.field, default='')
|
||||
self.current_val = val
|
||||
|
||||
def to_mi(self, mi):
|
||||
mi.set(self.field, self.current_val)
|
||||
|
||||
def sizeHint(self):
|
||||
return QSize(450, 200)
|
||||
|
||||
@property
|
||||
def is_blank(self):
|
||||
return not self.current_val.strip()
|
||||
|
||||
def same_as(self, other):
|
||||
return self.current_val == other.current_val
|
||||
|
||||
class CoverView(QWidget):
|
||||
|
||||
changed = pyqtSignal()
|
||||
|
||||
def __init__(self, field, is_new, parent, metadata, extra):
|
||||
QWidget.__init__(self, parent)
|
||||
self.is_new = is_new
|
||||
self.field = field
|
||||
self.metadata = metadata
|
||||
self.pixmap = None
|
||||
self.blank = QPixmap(I('blank.png'))
|
||||
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.GrowFlag|QSizePolicy.ExpandFlag)
|
||||
self.sizePolicy().setHeightForWidth(True)
|
||||
|
||||
@property
|
||||
def is_blank(self):
|
||||
return self.pixmap is None
|
||||
|
||||
@dynamic_property
|
||||
def current_val(self):
|
||||
def fget(self):
|
||||
return self.pixmap
|
||||
def fset(self, val):
|
||||
self.pixmap = val
|
||||
self.changed.emit()
|
||||
self.update()
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def from_mi(self, mi):
|
||||
p = getattr(mi, 'cover', None)
|
||||
if p and os.path.exists(p):
|
||||
pmap = QPixmap()
|
||||
with open(p, 'rb') as f:
|
||||
pmap.loadFromData(f.read())
|
||||
if not pmap.isNull():
|
||||
self.pixmap = pmap
|
||||
self.update()
|
||||
self.changed.emit()
|
||||
return
|
||||
cd = getattr(mi, 'cover_data', (None, None))
|
||||
if cd and cd[1]:
|
||||
pmap = QPixmap()
|
||||
pmap.loadFromData(cd[1])
|
||||
if not pmap.isNull():
|
||||
self.pixmap = pmap
|
||||
self.update()
|
||||
self.changed.emit()
|
||||
return
|
||||
self.pixmap = None
|
||||
self.update()
|
||||
self.changed.emit()
|
||||
|
||||
def to_mi(self, mi):
|
||||
mi.cover, mi.cover_data = None, (None, None)
|
||||
if self.pixmap is not None and not self.pixmap.isNull():
|
||||
with PersistentTemporaryFile('.jpg') as pt:
|
||||
pt.write(pixmap_to_data(self.pixmap))
|
||||
mi.cover = pt.name
|
||||
|
||||
def same_as(self, other):
|
||||
return self.current_val == other.current_val
|
||||
|
||||
def sizeHint(self):
|
||||
return QSize(225, 300)
|
||||
|
||||
def paintEvent(self, event):
|
||||
pmap = self.blank if self.pixmap is None or self.pixmap.isNull() else self.pixmap
|
||||
target = self.rect()
|
||||
scaled, width, height = fit_image(pmap.width(), pmap.height(), target.width(), target.height())
|
||||
target.setRect(target.x(), target.y(), width, height)
|
||||
p = QPainter(self)
|
||||
p.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform)
|
||||
p.drawPixmap(target, pmap)
|
||||
|
||||
if self.pixmap is not None and not self.pixmap.isNull():
|
||||
sztgt = target.adjusted(0, 0, 0, -4)
|
||||
f = p.font()
|
||||
f.setBold(True)
|
||||
p.setFont(f)
|
||||
sz = u'\u00a0%d x %d\u00a0'%(self.pixmap.width(), self.pixmap.height())
|
||||
flags = Qt.AlignBottom|Qt.AlignRight|Qt.TextSingleLine
|
||||
szrect = p.boundingRect(sztgt, flags, sz)
|
||||
p.fillRect(szrect.adjusted(0, 0, 0, 4), QColor(0, 0, 0, 200))
|
||||
p.setPen(QPen(QColor(255,255,255)))
|
||||
p.drawText(sztgt, flags, sz)
|
||||
p.end()
|
||||
# }}}
|
||||
|
||||
class CompareSingle(QWidget):
|
||||
|
||||
def __init__(
|
||||
self, field_metadata, parent=None, revert_tooltip=None,
|
||||
datetime_fmt='MMMM yyyy', blank_as_equal=True,
|
||||
fields=('title', 'authors', 'series', 'tags', 'rating', 'publisher', 'pubdate', 'identifiers', 'languages', 'comments', 'cover')):
|
||||
QWidget.__init__(self, parent)
|
||||
self.l = l = QGridLayout()
|
||||
l.setContentsMargins(0, 0, 0, 0)
|
||||
self.setLayout(l)
|
||||
revert_tooltip = revert_tooltip or _('Revert %s')
|
||||
self.current_mi = None
|
||||
self.changed_font = QFont(QApplication.font())
|
||||
self.changed_font.setBold(True)
|
||||
self.changed_font.setItalic(True)
|
||||
self.blank_as_equal = blank_as_equal
|
||||
|
||||
self.widgets = OrderedDict()
|
||||
row = 0
|
||||
|
||||
for field in fields:
|
||||
m = field_metadata[field]
|
||||
dt = m['datatype']
|
||||
extra = None
|
||||
if 'series' in {field, dt}:
|
||||
cls = SeriesEdit
|
||||
elif field == 'identifiers':
|
||||
cls = IdentifiersEdit
|
||||
elif field == 'languages':
|
||||
cls = LanguagesEdit
|
||||
elif 'comments' in {field, dt}:
|
||||
cls = CommentsEdit
|
||||
elif 'rating' in {field, dt}:
|
||||
cls = RatingsEdit
|
||||
elif dt == 'datetime':
|
||||
extra = datetime_fmt
|
||||
cls = DateEdit
|
||||
elif field == 'cover':
|
||||
cls = CoverView
|
||||
elif dt in {'text', 'enum'}:
|
||||
cls = LineEdit
|
||||
else:
|
||||
continue
|
||||
neww = cls(field, True, self, m, extra)
|
||||
neww.changed.connect(partial(self.changed, field))
|
||||
oldw = cls(field, False, self, m, extra)
|
||||
newl = QLabel('&%s:' % m['name'])
|
||||
newl.setBuddy(neww)
|
||||
button = QToolButton(self)
|
||||
button.setIcon(QIcon(I('back.png')))
|
||||
button.clicked.connect(partial(self.revert, field))
|
||||
button.setToolTip(revert_tooltip % m['name'])
|
||||
self.widgets[field] = Widgets(neww, oldw, newl, button)
|
||||
for i, w in enumerate((newl, neww, button, oldw)):
|
||||
c = i if i < 2 else i + 1
|
||||
if w is oldw:
|
||||
c += 1
|
||||
l.addWidget(w, row, c)
|
||||
row += 1
|
||||
|
||||
self.sep = f = QFrame(self)
|
||||
f.setFrameShape(f.VLine)
|
||||
l.addWidget(f, 0, 2, row, 1)
|
||||
self.sep2 = f = QFrame(self)
|
||||
f.setFrameShape(f.VLine)
|
||||
l.addWidget(f, 0, 4, row, 1)
|
||||
|
||||
def changed(self, field):
|
||||
w = self.widgets[field]
|
||||
if not w.new.same_as(w.old) and (not self.blank_as_equal or not w.new.is_blank):
|
||||
w.label.setFont(self.changed_font)
|
||||
else:
|
||||
w.label.setFont(QApplication.font())
|
||||
|
||||
def revert(self, field):
|
||||
widgets = self.widgets[field]
|
||||
neww, oldw = widgets[:2]
|
||||
neww.current_val = oldw.current_val
|
||||
|
||||
def __call__(self, oldmi, newmi):
|
||||
self.current_mi = newmi
|
||||
self.initial_vals = {}
|
||||
for field, widgets in self.widgets.iteritems():
|
||||
widgets.old.from_mi(oldmi)
|
||||
widgets.new.from_mi(newmi)
|
||||
self.initial_vals[field] = widgets.new.current_val
|
||||
|
||||
def apply_changes(self):
|
||||
changed = False
|
||||
for field, widgets in self.widgets.iteritems():
|
||||
val = widgets.new.current_val
|
||||
if val != self.initial_vals[field]:
|
||||
widgets.new.to_mi(self.current_mi)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
class CompareMany(QDialog):
|
||||
|
||||
def __init__(self, ids, get_metadata, field_metadata, parent=None,
|
||||
window_title=None,
|
||||
reject_button_tooltip=None,
|
||||
accept_all_tooltip=None,
|
||||
reject_all_tooltip=None,
|
||||
revert_tooltip=None,
|
||||
intro_msg=None,
|
||||
**kwargs):
|
||||
QDialog.__init__(self, parent)
|
||||
self.l = l = QVBoxLayout()
|
||||
self.setLayout(l)
|
||||
self.setWindowIcon(QIcon(I('auto_author_sort.png')))
|
||||
self.get_metadata = get_metadata
|
||||
self.ids = list(ids)
|
||||
self.total = len(self.ids)
|
||||
self.accepted = OrderedDict()
|
||||
self.window_title = window_title or _('Compare metadata')
|
||||
|
||||
if intro_msg:
|
||||
self.la = la = QLabel(intro_msg)
|
||||
la.setWordWrap(True)
|
||||
l.addWidget(la)
|
||||
|
||||
self.compare_widget = CompareSingle(field_metadata, parent=parent, revert_tooltip=revert_tooltip, **kwargs)
|
||||
self.sa = sa = QScrollArea()
|
||||
l.addWidget(sa)
|
||||
sa.setWidget(self.compare_widget)
|
||||
sa.setWidgetResizable(True)
|
||||
|
||||
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Cancel)
|
||||
bb.rejected.connect(self.reject)
|
||||
if self.total > 1:
|
||||
self.aarb = b = bb.addButton(_('&Accept all remaining'), bb.YesRole)
|
||||
b.setIcon(QIcon(I('ok.png')))
|
||||
if accept_all_tooltip:
|
||||
b.setToolTip(accept_all_tooltip)
|
||||
b.clicked.connect(self.accept_all_remaining)
|
||||
self.rarb = b = bb.addButton(_('Re&ject all remaining'), bb.NoRole)
|
||||
b.setIcon(QIcon(I('minus.png')))
|
||||
if reject_all_tooltip:
|
||||
b.setToolTip(reject_all_tooltip)
|
||||
b.clicked.connect(self.reject_all_remaining)
|
||||
self.sb = b = bb.addButton(_('&Reject'), bb.ActionRole)
|
||||
b.clicked.connect(partial(self.next_item, False))
|
||||
b.setIcon(QIcon(I('minus.png')))
|
||||
if reject_button_tooltip:
|
||||
b.setToolTip(reject_button_tooltip)
|
||||
self.nb = b = bb.addButton(_('&Next') if self.total > 1 else _('&OK'), bb.ActionRole)
|
||||
b.setIcon(QIcon(I('forward.png' if self.total > 1 else 'ok.png')))
|
||||
b.clicked.connect(partial(self.next_item, True))
|
||||
b.setDefault(True)
|
||||
l.addWidget(bb)
|
||||
|
||||
self.next_item(True)
|
||||
|
||||
desktop = QApplication.instance().desktop()
|
||||
geom = desktop.availableGeometry(parent or self)
|
||||
width = max(700, min(950, geom.width()-50))
|
||||
height = max(650, min(1000, geom.height()-100))
|
||||
self.resize(QSize(width, height))
|
||||
geom = gprefs.get('diff_dialog_geom', None)
|
||||
if geom is not None:
|
||||
self.restoreGeometry(geom)
|
||||
b.setFocus(Qt.OtherFocusReason)
|
||||
|
||||
def accept(self):
|
||||
gprefs.set('diff_dialog_geom', bytearray(self.saveGeometry()))
|
||||
super(CompareMany, self).accept()
|
||||
|
||||
def reject(self):
|
||||
gprefs.set('diff_dialog_geom', bytearray(self.saveGeometry()))
|
||||
super(CompareMany, self).reject()
|
||||
|
||||
@property
|
||||
def current_mi(self):
|
||||
return self.compare_widget.current_mi
|
||||
|
||||
def next_item(self, accept):
|
||||
if not self.ids:
|
||||
return self.accept()
|
||||
if self.current_mi is not None:
|
||||
changed = self.compare_widget.apply_changes()
|
||||
if self.current_mi is not None:
|
||||
old_id = self.ids.pop(0)
|
||||
self.accepted[old_id] = (changed, self.current_mi) if accept else (False, None)
|
||||
if not self.ids:
|
||||
return self.accept()
|
||||
self.setWindowTitle(self.window_title + _(' [%(num)d of %(tot)d]') % dict(
|
||||
num=(self.total - len(self.ids) + 1), tot=self.total))
|
||||
oldmi, newmi = self.get_metadata(self.ids[0])
|
||||
self.compare_widget(oldmi, newmi)
|
||||
|
||||
def accept_all_remaining(self):
|
||||
self.next_item(True)
|
||||
for id_ in self.ids:
|
||||
oldmi, newmi = self.get_metadata(id_)
|
||||
self.accepted[id_] = (False, newmi)
|
||||
self.ids = []
|
||||
self.accept()
|
||||
|
||||
def reject_all_remaining(self):
|
||||
self.next_item(False)
|
||||
for id_ in self.ids:
|
||||
oldmi, newmi = self.get_metadata(id_)
|
||||
self.accepted[id_] = (False, None)
|
||||
self.ids = []
|
||||
self.accept()
|
||||
|
||||
if __name__ == '__main__':
|
||||
app = QApplication([])
|
||||
from calibre.library import db
|
||||
db = db()
|
||||
ids = sorted(db.all_ids(), reverse=True)
|
||||
ids = tuple(zip(ids[0::2], ids[1::2]))
|
||||
gm = partial(db.get_metadata, index_is_id=True, get_cover=True, cover_as_data=True)
|
||||
get_metadata = lambda x:map(gm, ids[x])
|
||||
d = CompareMany(list(xrange(len(ids))), get_metadata, db.field_metadata)
|
||||
if d.exec_() == d.Accepted:
|
||||
for changed, mi in d.accepted.itervalues():
|
||||
if changed and mi is not None:
|
||||
print (mi)
|
||||
|
@ -421,7 +421,7 @@ class MetadataSingleDialogBase(ResizableDialog):
|
||||
ret = d.start(title=self.title.current_val, authors=self.authors.current_val,
|
||||
identifiers=self.identifiers.current_val)
|
||||
if ret == d.Accepted:
|
||||
from calibre.ebooks.metadata.sources.base import msprefs
|
||||
from calibre.ebooks.metadata.sources.prefs import msprefs
|
||||
mi = d.book
|
||||
dummy = Metadata(_('Unknown'))
|
||||
for f in msprefs['ignore_fields']:
|
||||
|
@ -14,7 +14,7 @@ from PyQt4.Qt import (QAbstractTableModel, Qt, QAbstractListModel, QWidget,
|
||||
|
||||
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
|
||||
from calibre.gui2.preferences.metadata_sources_ui import Ui_Form
|
||||
from calibre.ebooks.metadata.sources.base import msprefs
|
||||
from calibre.ebooks.metadata.sources.prefs import msprefs
|
||||
from calibre.customize.ui import (all_metadata_plugins, is_disabled,
|
||||
enable_plugin, disable_plugin, default_disabled_plugins)
|
||||
from calibre.gui2 import NONE, error_dialog, question_dialog
|
||||
|
@ -8,7 +8,7 @@ from functools import partial
|
||||
|
||||
from PyQt4.Qt import (
|
||||
Qt, QMenu, QPoint, QIcon, QDialog, QGridLayout, QLabel, QLineEdit, QComboBox,
|
||||
QDialogButtonBox, QSize, QVBoxLayout, QListWidget, QStringList, QCheckBox)
|
||||
QDialogButtonBox, QSize, QVBoxLayout, QListWidget, QStringList, QRadioButton)
|
||||
|
||||
from calibre.gui2 import error_dialog, question_dialog
|
||||
from calibre.gui2.widgets import ComboBoxWithHelp
|
||||
@ -31,7 +31,10 @@ class SelectNames(QDialog): # {{{
|
||||
self._names.setSelectionMode(self._names.ExtendedSelection)
|
||||
l.addWidget(self._names)
|
||||
|
||||
self._and = QCheckBox(_('Match all selected %s names')%txt)
|
||||
self._or = QRadioButton(_('Match any of the selected %s names')%txt)
|
||||
self._and = QRadioButton(_('Match all of the selected %s names')%txt)
|
||||
self._or.setChecked(True)
|
||||
l.addWidget(self._or)
|
||||
l.addWidget(self._and)
|
||||
|
||||
self.bb = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
|
||||
@ -173,12 +176,12 @@ class CreateVirtualLibrary(QDialog): # {{{
|
||||
txt = unicode(txt)
|
||||
while txt:
|
||||
p = txt.partition('search:')
|
||||
if p[1]: # found 'search:'
|
||||
if p[1]: # found 'search:'
|
||||
possible_search = p[2]
|
||||
if possible_search: # something follows the 'search:'
|
||||
if possible_search[0] == '"': # strip any quotes
|
||||
if possible_search: # something follows the 'search:'
|
||||
if possible_search[0] == '"': # strip any quotes
|
||||
possible_search = possible_search[1:].partition('"')
|
||||
else: # find end of the search name. Is EOL, space, rparen
|
||||
else: # find end of the search name. Is EOL, space, rparen
|
||||
sp = possible_search.find(' ')
|
||||
pp = possible_search.find(')')
|
||||
if pp < 0 or (sp > 0 and sp <= pp):
|
||||
@ -187,7 +190,7 @@ class CreateVirtualLibrary(QDialog): # {{{
|
||||
else:
|
||||
# rparen in string before space
|
||||
possible_search = possible_search.partition(')')
|
||||
txt = possible_search[2] # grab remainder of the string
|
||||
txt = possible_search[2] # grab remainder of the string
|
||||
search_name = possible_search[0]
|
||||
if search_name.startswith('='):
|
||||
search_name = search_name[1:]
|
||||
@ -318,7 +321,6 @@ class SearchRestrictionMixin(object):
|
||||
self.edit_menu = QMenu(_('Edit Virtual Library'))
|
||||
self.rm_menu = QMenu(_('Remove Virtual Library'))
|
||||
|
||||
|
||||
def add_virtual_library(self, db, name, search):
|
||||
virt_libs = db.prefs.get('virtual_libraries', {})
|
||||
virt_libs[name] = search
|
||||
|
@ -23,7 +23,7 @@ from calibre.gui2.dialogs.tag_categories import TagCategories
|
||||
from calibre.gui2.dialogs.tag_list_editor import TagListEditor
|
||||
from calibre.gui2.dialogs.edit_authors_dialog import EditAuthorsDialog
|
||||
|
||||
class TagBrowserMixin(object): # {{{
|
||||
class TagBrowserMixin(object): # {{{
|
||||
|
||||
def __init__(self, db):
|
||||
self.library_view.model().count_changed_signal.connect(self.tags_view.recount)
|
||||
@ -60,7 +60,7 @@ class TagBrowserMixin(object): # {{{
|
||||
self.do_saved_search_edit, (None,), 'search')
|
||||
):
|
||||
m = self.alter_tb.manage_menu
|
||||
m.addAction( QIcon(I(category_icon_map[cat_name])), text,
|
||||
m.addAction(QIcon(I(category_icon_map[cat_name])), text,
|
||||
partial(func, *args))
|
||||
|
||||
def do_restriction_error(self):
|
||||
@ -213,9 +213,9 @@ class TagBrowserMixin(object): # {{{
|
||||
tag_to_match=tag, data=result, sorter=key)
|
||||
d.exec_()
|
||||
if d.result() == d.Accepted:
|
||||
to_rename = d.to_rename # dict of old id to new name
|
||||
to_delete = d.to_delete # list of ids
|
||||
orig_name = d.original_names # dict of id: name
|
||||
to_rename = d.to_rename # dict of old id to new name
|
||||
to_delete = d.to_delete # list of ids
|
||||
orig_name = d.original_names # dict of id: name
|
||||
|
||||
rename_func = None
|
||||
if category == 'tags':
|
||||
@ -227,7 +227,7 @@ class TagBrowserMixin(object): # {{{
|
||||
elif category == 'publisher':
|
||||
rename_func = db.rename_publisher
|
||||
delete_func = db.delete_publisher_using_id
|
||||
else: # must be custom
|
||||
else: # must be custom
|
||||
cc_label = db.field_metadata[category]['label']
|
||||
rename_func = partial(db.rename_custom_item, label=cc_label)
|
||||
delete_func = partial(db.delete_custom_item_using_id, label=cc_label)
|
||||
@ -265,7 +265,7 @@ class TagBrowserMixin(object): # {{{
|
||||
delete_func = db.delete_series_using_id
|
||||
elif category == 'publisher':
|
||||
delete_func = db.delete_publisher_using_id
|
||||
else: # must be custom
|
||||
else: # must be custom
|
||||
cc_label = db.field_metadata[category]['label']
|
||||
delete_func = partial(db.delete_custom_item_using_id, label=cc_label)
|
||||
m = self.tags_view.model()
|
||||
@ -280,7 +280,7 @@ class TagBrowserMixin(object): # {{{
|
||||
def do_tag_item_renamed(self):
|
||||
# Clean up library view and search
|
||||
# get information to redo the selection
|
||||
rows = [r.row() for r in \
|
||||
rows = [r.row() for r in
|
||||
self.library_view.selectionModel().selectedRows()]
|
||||
m = self.library_view.model()
|
||||
ids = [m.id(r) for r in rows]
|
||||
@ -315,7 +315,7 @@ class TagBrowserMixin(object): # {{{
|
||||
|
||||
# }}}
|
||||
|
||||
class TagBrowserWidget(QWidget): # {{{
|
||||
class TagBrowserWidget(QWidget): # {{{
|
||||
|
||||
def __init__(self, parent):
|
||||
QWidget.__init__(self, parent)
|
||||
@ -407,22 +407,24 @@ class TagBrowserWidget(QWidget): # {{{
|
||||
a = sb.m.addAction(x)
|
||||
sb.bg.addAction(a)
|
||||
a.setCheckable(True)
|
||||
if i == 0: a.setChecked(True)
|
||||
if i == 0:
|
||||
a.setChecked(True)
|
||||
sb.setToolTip(
|
||||
_('Set the sort order for entries in the Tag Browser'))
|
||||
sb.setStatusTip(sb.toolTip())
|
||||
|
||||
ma = l.m.addAction(_('Match type'))
|
||||
ma = l.m.addAction(_('Search type when selecting multiple items'))
|
||||
ma.m = l.match_menu = QMenu(l.m)
|
||||
ma.setMenu(ma.m)
|
||||
ma.ag = QActionGroup(ma)
|
||||
|
||||
# Must be in the same order as db2.MATCH_TYPE
|
||||
for i, x in enumerate((_('Match any'), _('Match all'))):
|
||||
for i, x in enumerate((_('Match any of the items'), _('Match all of the items'))):
|
||||
a = ma.m.addAction(x)
|
||||
ma.ag.addAction(a)
|
||||
a.setCheckable(True)
|
||||
if i == 0: a.setChecked(True)
|
||||
if i == 0:
|
||||
a.setChecked(True)
|
||||
ma.setToolTip(
|
||||
_('When selecting multiple entries in the Tag Browser '
|
||||
'match any or all of them'))
|
||||
|
@ -573,7 +573,8 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
self.set_window_title()
|
||||
self.apply_named_search_restriction('') # reset restriction to null
|
||||
self.saved_searches_changed(recount=False) # reload the search restrictions combo box
|
||||
self.apply_named_search_restriction(db.prefs['gui_restriction'])
|
||||
if db.prefs['virtual_lib_on_startup']:
|
||||
self.apply_virtual_library(db.prefs['virtual_lib_on_startup'])
|
||||
for action in self.iactions.values():
|
||||
action.library_changed(db)
|
||||
if olddb is not None:
|
||||
@ -605,6 +606,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
if restrictions:
|
||||
restrictions = ' :: ' + restrictions
|
||||
font.setBold(True)
|
||||
font.setItalic(True)
|
||||
self.virtual_library.setFont(font)
|
||||
title = u'{0} - || {1}{2} ||'.format(
|
||||
__appname__, self.iactions['Choose Library'].library_name(), restrictions)
|
||||
|
@ -17,7 +17,6 @@ from calibre.ebooks import calibre_cover
|
||||
from calibre.library import current_library_name
|
||||
from calibre.library.catalogs import AuthorSortMismatchException, EmptyCatalogException
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.config import JSONConfig
|
||||
from calibre.utils.localization import calibre_langcode_to_name, canonicalize_lang, get_lang
|
||||
|
||||
Option = namedtuple('Option', 'option, default, dest, action, help')
|
||||
@ -191,6 +190,7 @@ class EPUB_MOBI(CatalogPlugin):
|
||||
def run(self, path_to_output, opts, db, notification=DummyReporter()):
|
||||
from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder
|
||||
from calibre.utils.logging import default_log as log
|
||||
from calibre.utils.config import JSONConfig
|
||||
|
||||
# If preset specified from the cli, insert stored options from JSON file
|
||||
if hasattr(opts, 'preset') and opts.preset:
|
||||
|
@ -11,7 +11,6 @@ import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
|
||||
from collections import defaultdict
|
||||
import threading, random
|
||||
from itertools import repeat
|
||||
from math import ceil, floor
|
||||
|
||||
from calibre import prints, force_unicode
|
||||
from calibre.ebooks.metadata import (title_sort, author_to_author_sort,
|
||||
@ -42,6 +41,7 @@ from calibre.ebooks import BOOK_EXTENSIONS, check_ebook_format
|
||||
from calibre.utils.magick.draw import save_cover_data_to
|
||||
from calibre.utils.recycle_bin import delete_file, delete_tree
|
||||
from calibre.utils.formatter_functions import load_user_template_functions
|
||||
from calibre.db import _get_next_series_num_for_list, _get_series_values
|
||||
from calibre.db.errors import NoSuchFormat
|
||||
from calibre.db.lazy import FormatMetadata, FormatsList
|
||||
from calibre.db.categories import Tag, CATEGORY_SORTS
|
||||
@ -2194,31 +2194,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
return self._get_next_series_num_for_list(series_indices)
|
||||
|
||||
def _get_next_series_num_for_list(self, series_indices):
|
||||
if not series_indices:
|
||||
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
|
||||
return float(tweaks['series_index_auto_increment'])
|
||||
return 1.0
|
||||
series_indices = [x[0] for x in series_indices]
|
||||
if tweaks['series_index_auto_increment'] == 'next':
|
||||
return floor(series_indices[-1]) + 1
|
||||
if tweaks['series_index_auto_increment'] == 'first_free':
|
||||
for i in range(1, 10000):
|
||||
if i not in series_indices:
|
||||
return i
|
||||
# really shouldn't get here.
|
||||
if tweaks['series_index_auto_increment'] == 'next_free':
|
||||
for i in range(int(ceil(series_indices[0])), 10000):
|
||||
if i not in series_indices:
|
||||
return i
|
||||
# really shouldn't get here.
|
||||
if tweaks['series_index_auto_increment'] == 'last_free':
|
||||
for i in range(int(ceil(series_indices[-1])), 0, -1):
|
||||
if i not in series_indices:
|
||||
return i
|
||||
return series_indices[-1] + 1
|
||||
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
|
||||
return float(tweaks['series_index_auto_increment'])
|
||||
return 1.0
|
||||
return _get_next_series_num_for_list(series_indices)
|
||||
|
||||
def set(self, row, column, val, allow_case_change=False):
|
||||
'''
|
||||
@ -3156,17 +3132,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
|
||||
|
||||
def _get_series_values(self, val):
|
||||
if not val:
|
||||
return (val, None)
|
||||
match = self.series_index_pat.match(val.strip())
|
||||
if match is not None:
|
||||
idx = match.group(2)
|
||||
try:
|
||||
idx = float(idx)
|
||||
return (match.group(1).strip(), idx)
|
||||
except:
|
||||
pass
|
||||
return (val, None)
|
||||
return _get_series_values(val)
|
||||
|
||||
def set_series(self, id, series, notify=True, commit=True, allow_case_change=True):
|
||||
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
|
||||
|
@ -6,7 +6,7 @@ Created on 25 May 2010
|
||||
import copy, traceback
|
||||
from collections import OrderedDict
|
||||
|
||||
from calibre.utils.config import tweaks
|
||||
from calibre.utils.config_base import tweaks
|
||||
|
||||
class TagsIcons(dict):
|
||||
'''
|
||||
@ -252,7 +252,7 @@ class FieldMetadata(dict):
|
||||
'datatype':'int',
|
||||
'is_multiple':{},
|
||||
'kind':'field',
|
||||
'name':None,
|
||||
'name':_('Cover'),
|
||||
'search_terms':['cover'],
|
||||
'is_custom':False,
|
||||
'is_category':False,
|
||||
|
@ -7,7 +7,7 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
|
||||
from calibre.utils.config import Config, StringConfig, config_dir, tweaks
|
||||
from calibre.utils.config_base import Config, StringConfig, config_dir, tweaks
|
||||
|
||||
|
||||
listen_on = tweaks['server_listen_on']
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user