Sync to trunk.

This commit is contained in:
John Schember 2011-06-05 18:12:21 -04:00
commit db1714362d
129 changed files with 100189 additions and 51268 deletions

View File

@ -19,6 +19,74 @@
# new recipes:
# - title:
- version: 0.8.4
date: 2011-06-03
new features:
- title: "New and much simpler interface for specifying column coloring via Preferences->Look & Feel->Column Coloring"
- title: "Driver for Trekstor eBook Player 5M, Samsung Galaxy SII I9100, Motorola Defy and miBuk GAMMA 6.2"
tickets: [792091, 791216]
- title: "Get Books: Add EpubBud, WH Smits and E-book Shoppe stores"
- title: "When deleting 'all formats except ...', do not delete if it leaves a book with no formats"
- title: "Change default toolbar to make it a little more new user friendly. The icons have been re-arranged and now the text is always visiblke by default. You can change that in Preferences->Look & Feeel and Preferences->Toolbar"
- title: "Windows installer: Remember and use previous settings for installing desktop icons, adding to path, etc. This makes the installer a little slower, complaints should go to Microsoft."
- title: "Template language: Add str_in_list and on_device formatter functions. Make debugging templates a little easier"
- title: "Allow the user to specify formatting for number type custom columns"
bug fixes:
- title: "Fix typo in NOOK TSR driver that prevented it from working on windows"
- title: "Fix quotes in identifiers causing Tag Browser to be blank."
tickets: [791044]
- title: "Speedup auto complete when there are lots of items (>2500) the downside being that non ASCII characters are not sorted correctly. The threshold can be controlled via Preferences->Tweaks"
tickets: [792191]
- title: "RTF Output: Fix handling of curly brackets"
tickets: [791805]
- title: "Fix searching in Get Books not working with non ASCII characters"
tickets: [791788]
- title: "Fix excessive memory consumption when moving very large files during a metadata change"
tickets: [791806]
- title: "Fix series index being overwritten even when series is turned off in bulk metadata download"
tickets: [789990]
- title: "Fix regression in templates where id and other non standard fields no longer worked."
- title: "EPUB Output: Fix crash caused by ids with non-ascii characters in them"
- title: "Try to preserve the timestamps of files in a ZIP container"
- title: "After adding books always select the most recently added book."
tickets: [789343]
improved recipes:
- bild.de
- CNN
- BBC News (fast)
- Dilema Veche
new recipes:
- title: Metro UK
author: Dave Asbury
- title: Alt om Herning and Version2.dk
author: Rasmus Lauritsen
- title: Observatorul cultural
author: song2
- version: 0.8.3
date: 2011-05-27

View File

@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
from calibre.web.feeds.recipes import BasicNewsRecipe
class AdvancedUserRecipe1303841067(BasicNewsRecipe):
title = u'Bild.de'
__author__ = 'schuster'
oldest_article = 1
max_articles_per_feed = 50
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'de'
@ -12,11 +13,25 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
# get cover from myspace
cover_url = 'http://a3.l3-images.myspacecdn.com/images02/56/0232f842170b4d349779f8379c27e073/l.jpg'
masthead_url = 'http://a3.l3-images.myspacecdn.com/images02/56/0232f842170b4d349779f8379c27e073/l.jpg'
# set what to fetch on the site
remove_tags_before = dict(name = 'h2', attrs={'id':'cover'})
remove_tags_after = dict(name ='div', attrs={'class':'back'})
# remove things on the site that we don't want
remove_tags = [dict(name='div', attrs={'class':'credit'}),
dict(name='div', attrs={'class':'index'}),
dict(name='div', attrs={'id':'zstart31'}),
dict(name='div', attrs={'class':'hentry'}),
dict(name='div', attrs={'class':'back'}),
dict(name='div', attrs={'class':'pagination'}),
dict(name='div', attrs={'class':'header'}),
dict(name='div', attrs={'class':'element floatL'}),
dict(name='div', attrs={'class':'stWrap'})
]
# thanx to kiklop74 for code (see sticky thread -> Recipes - Re-usable code)
# this one removes a lot of direct-link's
def preprocess_html(self, soup):
@ -42,5 +57,18 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
(u'Unterhaltung', u'http://rss.bild.de/bild-unterhaltung.xml'),
(u'Sport', u'http://rss.bild.de/bild-sport.xml'),
(u'Lifestyle', u'http://rss.bild.de/bild-lifestyle.xml'),
(u'Ratgeber', u'http://rss.bild.de/bild-ratgeber.xml')
(u'Ratgeber', u'http://rss.bild.de/bild-ratgeber.xml'),
(u'Reg. - Berlin', u'http://rss.bild.de/bild-berlin.xml'),
(u'Reg. - Bremen', u'http://rss.bild.de/bild-bremen.xml'),
(u'Reg. - Dresden', u'http://rss.bild.de/bild-dresden.xml'),
(u'Reg. - Düsseldorf', u'http://rss.bild.de/bild-duesseldorf.xml'),
(u'Reg. - Frankfurt-Main', u'http://rss.bild.de/bild-frankfurt-main.xml'),
(u'Reg. - Hamburg', u'http://rss.bild.de/bild-hamburg.xml'),
(u'Reg. - Hannover', u'http://rss.bild.de/bild-hannover.xml'),
(u'Reg. - Köln', u'http://rss.bild.de/bild-koeln.xml'),
(u'Reg. - Leipzig', u'http://rss.bild.de/bild-leipzig.xml'),
(u'Reg. - München', u'http://rss.bild.de/bild-muenchen.xml'),
(u'Reg. - Ruhrgebiet', u'http://rss.bild.de/bild-ruhrgebiet.xml'),
(u'Reg. - Stuttgart', u'http://rss.bild.de/bild-stuttgart.xml')
]

View File

@ -0,0 +1,36 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe(BasicNewsRecipe):
title = u'Brigitte.de'
__author__ = 'schuster'
oldest_article = 14
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'de'
remove_javascript = True
remove_empty_feeds = True
timeout = 10
cover_url = 'http://www.medienmilch.de/typo3temp/pics/Brigitte-Logo_d5feb4a6e4.jpg'
masthead_url = 'http://www.medienmilch.de/typo3temp/pics/Brigitte-Logo_d5feb4a6e4.jpg'
remove_tags = [dict(attrs={'class':['linklist', 'head', 'indent right relatedContent', 'artikel-meta segment', 'segment', 'comment commentFormWrapper segment borderBG', 'segment borderBG comments', 'segment borderBG box', 'center', 'segment nextPageLink', 'inCar']}),
dict(id=['header', 'artTools', 'context', 'interact', 'footer-navigation', 'bwNet', 'copy', 'keyboardNavigationHint']),
dict(name=['hjtrs', 'kud'])]
feeds = [(u'Mode', u'http://www.brigitte.de/mode/feed.rss'),
(u'Beauty', u'http://www.brigitte.de/beauty/feed.rss'),
(u'Luxus', u'http://www.brigitte.de/luxus/feed.rss'),
(u'Figur', u'http://www.brigitte.de/figur/feed.rss'),
(u'Gesundheit', u'http://www.brigitte.de/gesundheit/feed.rss'),
(u'Liebe&Sex', u'http://www.brigitte.de/liebe-sex/feed.rss'),
(u'Gesellschaft', u'http://www.brigitte.de/gesellschaft/feed.rss'),
(u'Kultur', u'http://www.brigitte.de/kultur/feed.rss'),
(u'Reise', u'http://www.brigitte.de/reise/feed.rss'),
(u'Kochen', u'http://www.brigitte.de/kochen/feed.rss'),
(u'Wohnen', u'http://www.brigitte.de/wohnen/feed.rss'),
(u'Job', u'http://www.brigitte.de/job/feed.rss'),
(u'Erfahrungen', u'http://www.brigitte.de/erfahrungen/feed.rss'),
]

View File

@ -1,5 +1,4 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1303841067(BasicNewsRecipe):
title = u'Express.de'
@ -12,7 +11,6 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
extra_css = '''
h2{font-family:Arial,Helvetica,sans-serif; font-size: x-small;}
h1{ font-family:Arial,Helvetica,sans-serif; font-size:x-large; font-weight:bold;}
'''
remove_javascript = True
remove_tags_befor = [dict(name='div', attrs={'class':'Datum'})]
@ -25,6 +23,7 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
dict(id='Logo'),
dict(id='MainLinkSpacer'),
dict(id='MainLinks'),
dict(id='ContainerPfad'), #neu
dict(title='Diese Seite Bookmarken'),
dict(name='span'),
@ -44,7 +43,8 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
dict(name='div', attrs={'class':'HeaderSearch'}),
dict(name='div', attrs={'class':'sbutton'}),
dict(name='div', attrs={'class':'active'}),
dict(name='div', attrs={'class':'MoreNews'}), #neu
dict(name='div', attrs={'class':'ContentBoxSubline'}) #neu
]
@ -68,7 +68,5 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
(u'Fortuna D~Dorf', u'http://www.express.de/sport/fussball/fortuna/-/3292/3292/-/view/asFeed/-/index.xml'),
(u'Basketball News', u'http://www.express.de/sport/basketball/-/3190/3190/-/view/asFeed/-/index.xml'),
(u'Big Brother', u'http://www.express.de/news/promi-show/big-brother/-/2402/2402/-/view/asFeed/-/index.xml'),
]
]

View File

@ -28,7 +28,7 @@ class Guardian(BasicNewsRecipe):
# List of section titles to ignore
# For example: ['Sport']
ignore_sections = []
timefmt = ' [%a, %d %b %Y]'
keep_only_tags = [
dict(name='div', attrs={'id':["content","article_header","main-article-info",]}),
@ -87,8 +87,14 @@ class Guardian(BasicNewsRecipe):
idx = soup.find('div', id='book-index')
for s in idx.findAll('strong', attrs={'class':'book'}):
a = s.find('a', href=True)
yield (self.tag_to_string(a), a['href'])
section_title = self.tag_to_string(a)
if not section_title in self.ignore_sections:
prefix = ''
if section_title != 'Main section':
prefix = section_title + ': '
for subsection in s.parent.findAll('a', attrs={'class':'book-section'}):
yield (prefix + self.tag_to_string(subsection), subsection['href'])
def find_articles(self, url):
soup = self.index_to_soup(url)
div = soup.find('div', attrs={'class':'book-index'})
@ -109,15 +115,12 @@ class Guardian(BasicNewsRecipe):
'title': title, 'url':url, 'description':desc,
'date' : strftime('%a, %d %b'),
}
def parse_index(self):
try:
feeds = []
for title, href in self.find_sections():
if not title in self.ignore_sections:
feeds.append((title, list(self.find_articles(href))))
feeds.append((title, list(self.find_articles(href))))
return feeds
except:
raise NotImplementedError

View File

@ -0,0 +1,52 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe(BasicNewsRecipe):
title = 'Heise-online'
description = 'News vom Heise-Verlag'
__author__ = 'schuster'
use_embedded_content = False
language = 'de'
oldest_article = 2
max_articles_per_feed = 35
rescale_images = True
remove_empty_feeds = True
timeout = 5
no_stylesheets = True
remove_tags_after = dict(name ='p', attrs={'class':'editor'})
remove_tags = [dict(id='navi_top_container'),
dict(id='navi_bottom'),
dict(id='mitte_rechts'),
dict(id='navigation'),
dict(id='subnavi'),
dict(id='social_bookmarks'),
dict(id='permalink'),
dict(id='content_foren'),
dict(id='seiten_navi'),
dict(id='adbottom'),
dict(id='sitemap')]
feeds = [
('Newsticker', 'http://www.heise.de/newsticker/heise.rdf'),
('Auto', 'http://www.heise.de/autos/rss/news.rdf'),
('Foto ', 'http://www.heise.de/foto/rss/news-atom.xml'),
('Mac&i', 'http://www.heise.de/mac-and-i/news.rdf'),
('Mobile ', 'http://www.heise.de/mobil/newsticker/heise-atom.xml'),
('Netz ', 'http://www.heise.de/netze/rss/netze-atom.xml'),
('Open ', 'http://www.heise.de/open/news/news-atom.xml'),
('Resale ', 'http://www.heise.de/resale/rss/resale.rdf'),
('Security ', 'http://www.heise.de/security/news/news-atom.xml'),
('C`t', 'http://www.heise.de/ct/rss/artikel-atom.xml'),
('iX', 'http://www.heise.de/ix/news/news.rdf'),
('Mach-flott', 'http://www.heise.de/mach-flott/rss/mach-flott-atom.xml'),
('Blog: Babel-Bulletin', 'http://www.heise.de/developer/rss/babel-bulletin/blog.rdf'),
('Blog: Der Dotnet-Doktor', 'http://www.heise.de/developer/rss/dotnet-doktor/blog.rdf'),
('Blog: Bernds Management-Welt', 'http://www.heise.de/developer/rss/bernds-management-welt/blog.rdf'),
('Blog: IT conversation', 'http://www.heise.de/developer/rss/world-of-it/blog.rdf'),
('Blog: Kais bewegtes Web', 'http://www.heise.de/developer/rss/kais-bewegtes-web/blog.rdf')
]
def print_version(self, url):
return url + '?view=print'

View File

@ -3,9 +3,6 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
title = u'Max-Planck-Inst.'
__author__ = 'schuster'
remove_tags = [dict(attrs={'class':['clearfix', 'lens', 'col2_box_list', 'col2_box_teaser group_ext no_print', 'dotted_line', 'col2_box_teaser', 'box_image small', 'bold', 'col2_box_teaser no_print', 'print_kontakt']}),
dict(id=['ie_clearing', 'col2', 'col2_content']),
dict(name=['script', 'noscript', 'style'])]
oldest_article = 30
max_articles_per_feed = 100
no_stylesheets = True
@ -13,6 +10,11 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
language = 'de'
remove_javascript = True
remove_tags = [dict(attrs={'class':['box_url', 'print_kontakt']}),
dict(id=['skiplinks'])]
def print_version(self, url):
split_url = url.split("/")
print_url = 'http://www.mpg.de/print/' + split_url[3]

View File

@ -69,7 +69,11 @@ class Newsweek(BasicNewsRecipe):
for section, shref in self.newsweek_sections():
self.log('Processing section', section, shref)
articles = []
soups = [self.index_to_soup(shref)]
try:
soups = [self.index_to_soup(shref)]
except:
self.log.warn('Section %s not found, skipping'%section)
continue
na = soups[0].find('a', rel='next')
if na:
soups.append(self.index_to_soup(self.BASE_URL+na['href']))

View File

@ -0,0 +1,35 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe(BasicNewsRecipe):
title = u'Polizeipresse - Deutschland'
__author__ = 'schuster'
description = 'Tagesaktuelle "Polizeiberichte" aus ganz Deutschland (bis auf Ortsebene).' 'Um deinen Ort/Stadt/Kreis usw. einzubinden, gehe auf "http://www.presseportal.de/polizeipresse/" und suche im oberen "Suchfeld" nach dem Namen.' 'Oberhalb der Suchergebnisse (Folgen:) auf den üblichen link zu den RSS-Feeds klicken und den RSS-link im Rezept unter "feeds" eintragen wie üblich.' 'Die Auswahl von Orten kann vereinfacht werden wenn man den Suchbegriff wie folgt eingibt:' '"Stadt-Ort".'
oldest_article = 21
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'de'
remove_javascript = True
masthead_url = 'http://www.alt-heliservice.de/images/34_BPOL_Logo_4C_g_schutzbereich.jpg'
cover_url = 'http://berlinstadtservice.de/buerger/Bundespolizei-Logo.png'
remove_tags = [
dict(name='div', attrs={'id':'logo'}),
dict(name='div', attrs={'id':'origin'}),
dict(name='pre', attrs={'class':'xml_contact'})]
def print_version(self,url):
segments = url.split('/')
printURL = 'http://www.presseportal.de/print.htx?nr=' + '/'.join(segments[5:6]) + '&type=polizei'
return printURL
feeds = [(u'Frimmerdorf', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-frimmersdorf&w=public_service'),
(u'Neurath', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-neurath&w=public_service'),
(u'Gustorf', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-gustorf&w=public_service'),
(u'Neuenhausen', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-neuenhausen&w=public_service'),
(u'Wevelinghoven', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Grevenbroich-Wevelinghoven&w=public_service'),
(u'Grevenbroich ges.', u'http://www.presseportal.de/rss/rss2_vts.htx?q=grevenbroich&w=public_service'),
(u'Kreis Neuss ges.', u'http://www.presseportal.de/rss/rss2_vts.htx?q=Rhein-Kreis+Neuss&w=public_service'),
]

View File

@ -49,6 +49,7 @@ class TelegraphUK(BasicNewsRecipe):
(u'UK News' , u'http://www.telegraph.co.uk/news/uknews/rss' )
,(u'World News' , u'http://www.telegraph.co.uk/news/worldnews/rss' )
,(u'Politics' , u'http://www.telegraph.co.uk/news/newstopics/politics/rss' )
,(u'Finance' , u'http://www.telegraph.co.uk/finance/rss' )
,(u'Technology News', u'http://www.telegraph.co.uk/scienceandtechnology/technology/technologynews/rss' )
,(u'UK News' , u'http://www.telegraph.co.uk/scienceandtechnology/technology/technologyreviews/rss')
,(u'Science News' , u'http://www.telegraph.co.uk/scienceandtechnology/science/sciencenews/rss' )

View File

@ -10,8 +10,8 @@ import re
from calibre.web.feeds.news import BasicNewsRecipe
class Time(BasicNewsRecipe):
recipe_disabled = ('This recipe has been disabled as TIME no longer'
' publish complete articles on the web.')
#recipe_disabled = ('This recipe has been disabled as TIME no longer'
# ' publish complete articles on the web.')
title = u'Time'
__author__ = 'Kovid Goyal and Sujata Raman'
description = 'Weekly magazine'

View File

@ -82,7 +82,7 @@ class ZAOBAO(BasicNewsRecipe):
return soup
def parse_feeds(self):
self.log_debug(_('ZAOBAO overrided parse_feeds()'))
self.log(_('ZAOBAO overrided parse_feeds()'))
parsed_feeds = BasicNewsRecipe.parse_feeds(self)
for id, obj in enumerate(self.INDEXES):
@ -99,7 +99,7 @@ class ZAOBAO(BasicNewsRecipe):
a_title = self.tag_to_string(a)
date = ''
description = ''
self.log_debug(_('adding %s at %s')%(a_title,a_url))
self.log(_('adding %s at %s')%(a_title,a_url))
articles.append({
'title':a_title,
'date':date,
@ -110,23 +110,23 @@ class ZAOBAO(BasicNewsRecipe):
pfeeds = feeds_from_index([(title, articles)], oldest_article=self.oldest_article,
max_articles_per_feed=self.max_articles_per_feed)
self.log_debug(_('adding %s to feed')%(title))
self.log(_('adding %s to feed')%(title))
for feed in pfeeds:
self.log_debug(_('adding feed: %s')%(feed.title))
self.log(_('adding feed: %s')%(feed.title))
feed.description = self.DESC_SENSE
parsed_feeds.append(feed)
for a, article in enumerate(feed):
self.log_debug(_('added article %s from %s')%(article.title, article.url))
self.log_debug(_('added feed %s')%(feed.title))
self.log(_('added article %s from %s')%(article.title, article.url))
self.log(_('added feed %s')%(feed.title))
for i, feed in enumerate(parsed_feeds):
# workaorund a strange problem: Somethimes the xml encoding is not apllied correctly by parse()
weired_encoding_detected = False
if not isinstance(feed.description, unicode) and self.encoding and feed.description:
self.log_debug(_('Feed %s is not encoded correctly, manually replace it')%(feed.title))
self.log(_('Feed %s is not encoded correctly, manually replace it')%(feed.title))
feed.description = feed.description.decode(self.encoding, 'replace')
elif feed.description.find(self.DESC_SENSE) == -1 and self.encoding and feed.description:
self.log_debug(_('Feed %s is weired encoded, manually redo all')%(feed.title))
self.log(_('Feed %s is weired encoded, manually redo all')%(feed.title))
feed.description = feed.description.encode('cp1252', 'replace').decode(self.encoding, 'replace')
weired_encoding_detected = True
@ -148,7 +148,7 @@ class ZAOBAO(BasicNewsRecipe):
article.text_summary = article.text_summary.encode('cp1252', 'replace').decode(self.encoding, 'replace')
if article.title == "Untitled article":
self.log_debug(_('Removing empty article %s from %s')%(article.title, article.url))
self.log(_('Removing empty article %s from %s')%(article.title, article.url))
# remove the article
feed.articles[a:a+1] = []
return parsed_feeds

View File

@ -37,7 +37,6 @@ series_index_auto_increment = 'next'
# Can be either True or False
authors_completer_append_separator = False
#: Author sort name algorithm
# The algorithm used to copy author to author_sort
# Possible values are:
@ -71,6 +70,15 @@ author_name_suffixes = ('Jr', 'Sr', 'Inc', 'Ph.D', 'Phd',
# categories_use_field_for_author_name = 'author_sort'
categories_use_field_for_author_name = 'author'
#: Completion sort order: choose when to change from lexicographic to ASCII-like
# Calibre normally uses locale-dependent lexicographic ordering when showing
# completion values. This means that the sort order is correct for the user's
# language. However, this can be slow. Performance is improved by switching to
# ascii ordering. This tweak controls when that switch happens. Set it to zero
# to always use ascii ordering. Set it to something larger than zero to switch
# to ascii ordering for performance reasons.
completion_change_to_ascii_sorting = 2500
#: Control partitioning of Tag Browser
# When partitioning the tags browser, the format of the subcategory label is
# controlled by a template: categories_collapsed_name_template if sorting by
@ -93,7 +101,6 @@ categories_collapsed_name_template = r'{first.sort:shorten(4,,0)} - {last.sort:s
categories_collapsed_rating_template = r'{first.avg_rating:4.2f:ifempty(0)} - {last.avg_rating:4.2f:ifempty(0)}'
categories_collapsed_popularity_template = r'{first.count:d} - {last.count:d}'
#: Specify columns to sort the booklist by on startup
# Provide a set of columns to be sorted on when calibre starts
# The argument is None if saved sort history is to be used
@ -244,17 +251,14 @@ sony_collection_name_template='{value}{category:| (|)}'
# Default: empty (no rules), so no collection attributes are named.
sony_collection_sorting_rules = []
#: Control how tags are applied when copying books to another library
# Set this to True to ensure that tags in 'Tags to add when adding
# a book' are added when copying books to another library
add_new_book_tags_when_importing_books = False
#: Set the maximum number of tags to show per book in the content server
max_content_server_tags_shown=5
#: Set custom metadata fields that the content server will or will not display.
# content_server_will_display is a list of custom fields to be displayed.
# content_server_wont_display is a list of custom fields not to be displayed.
@ -296,7 +300,6 @@ generate_cover_foot_font = None
# Example: doubleclick_on_library_view = 'do_nothing'
doubleclick_on_library_view = 'open_viewer'
#: Language to use when sorting.
# Setting this tweak will force sorting to use the
# collating order for the specified language. This might be useful if you run

View File

@ -5,7 +5,7 @@
"uppercase": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return val.upper()\n",
"strcat": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n res = ''\n for i in range(0, len(args)):\n res += args[i]\n return res\n",
"in_list": "def evaluate(self, formatter, kwargs, mi, locals, val, sep, pat, fv, nfv):\n l = [v.strip() for v in val.split(sep) if v.strip()]\n if l:\n for v in l:\n if re.search(pat, v, flags=re.I):\n return fv\n return nfv\n",
"multiply": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x * y)\n",
"not": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n while i < len(args):\n if args[i]:\n return '1'\n i += 1\n return ''\n",
"ifempty": "def evaluate(self, formatter, kwargs, mi, locals, val, value_if_empty):\n if val:\n return val\n else:\n return value_if_empty\n",
"booksize": "def evaluate(self, formatter, kwargs, mi, locals):\n if mi.book_size is not None:\n try:\n return str(mi.book_size)\n except:\n pass\n return ''\n",
"select": "def evaluate(self, formatter, kwargs, mi, locals, val, key):\n if not val:\n return ''\n vals = [v.strip() for v in val.split(',')]\n for v in vals:\n if v.startswith(key+':'):\n return v[len(key)+1:]\n return ''\n",
@ -27,9 +27,10 @@
"sublist": "def evaluate(self, formatter, kwargs, mi, locals, val, start_index, end_index, sep):\n if not val:\n return ''\n si = int(start_index)\n ei = int(end_index)\n val = val.split(sep)\n try:\n if ei == 0:\n return sep.join(val[si:])\n else:\n return sep.join(val[si:ei])\n except:\n return ''\n",
"test": "def evaluate(self, formatter, kwargs, mi, locals, val, value_if_set, value_not_set):\n if val:\n return value_if_set\n else:\n return value_not_set\n",
"eval": "def evaluate(self, formatter, kwargs, mi, locals, template):\n from formatter import eval_formatter\n template = template.replace('[[', '{').replace(']]', '}')\n return eval_formatter.safe_format(template, locals, 'EVAL', None)\n",
"not": "def evaluate(self, formatter, kwargs, mi, locals, *args):\n i = 0\n while i < len(args):\n if args[i]:\n return '1'\n i += 1\n return ''\n",
"format_date": "def evaluate(self, formatter, kwargs, mi, locals, val, format_string):\n if not val:\n return ''\n try:\n dt = parse_date(val)\n s = format_date(dt, format_string)\n except:\n s = 'BAD DATE'\n return s\n",
"multiply": "def evaluate(self, formatter, kwargs, mi, locals, x, y):\n x = float(x if x else 0)\n y = float(y if y else 0)\n return unicode(x * y)\n",
"format_date": "def evaluate(self, formatter, kwargs, mi, locals, val, format_string):\n if not val or val == 'None':\n return ''\n try:\n dt = parse_date(val)\n s = format_date(dt, format_string)\n except:\n s = 'BAD DATE'\n return s\n",
"capitalize": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return capitalize(val)\n",
"identifier_in_list": "def evaluate(self, formatter, kwargs, mi, locals, val, ident, fv, nfv):\n l = [v.strip() for v in val.split(',') if v.strip()]\n (id, _, regexp) = ident.partition(':')\n if not id:\n return nfv\n id += ':'\n if l:\n for v in l:\n if v.startswith(id):\n if not regexp or re.search(regexp, v[len(id):], flags=re.I):\n return fv\n return nfv\n",
"count": "def evaluate(self, formatter, kwargs, mi, locals, val, sep):\n return unicode(len(val.split(sep)))\n",
"lowercase": "def evaluate(self, formatter, kwargs, mi, locals, val):\n return val.lower()\n",
"substr": "def evaluate(self, formatter, kwargs, mi, locals, str_, start_, end_):\n return str_[int(start_): len(str_) if int(end_) == 0 else int(end_)]\n",
@ -38,5 +39,5 @@
"ondevice": "def evaluate(self, formatter, kwargs, mi, locals):\n if mi.ondevice_col:\n return _('Yes')\n return ''\n",
"assign": "def evaluate(self, formatter, kwargs, mi, locals, target, value):\n locals[target] = value\n return value\n",
"raw_field": "def evaluate(self, formatter, kwargs, mi, locals, name):\n return unicode(getattr(mi, name, None))\n",
"cmp": "def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):\n x = float(x if x else 0)\n y = float(y if y else 0)\n if x < y:\n return lt\n if x == y:\n return eq\n return gt\n"
"cmp": "def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):\n x = float(x if x and x != 'None' else 0)\n y = float(y if y and y != 'None' else 0)\n if x < y:\n return lt\n if x == y:\n return eq\n return gt\n"
}

View File

@ -187,7 +187,6 @@ msgstr ""
'''%dict(appname=__appname__, version=version, year=time.strftime('%Y'))
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:

View File

@ -85,7 +85,7 @@ class Translations(POT):
def mo_file(self, po_file):
locale = os.path.splitext(os.path.basename(po_file))[0]
return locale, os.path.join(self.DEST, locale, 'LC_MESSAGES', 'messages.mo')
return locale, os.path.join(self.DEST, locale, 'messages.mo')
def run(self, opts):
@ -94,9 +94,8 @@ class Translations(POT):
base = os.path.dirname(dest)
if not os.path.exists(base):
os.makedirs(base)
if self.newer(dest, f):
self.info('\tCompiling translations for', locale)
subprocess.check_call(['msgfmt', '-o', dest, f])
self.info('\tCompiling translations for', locale)
subprocess.check_call(['msgfmt', '-o', dest, f])
if locale in ('en_GB', 'nds', 'te', 'yi'):
continue
pycountry = self.j(sysconfig.get_python_lib(), 'pycountry',
@ -123,6 +122,16 @@ class Translations(POT):
shutil.copy2(f, dest)
self.write_stats()
self.freeze_locales()
def freeze_locales(self):
zf = self.DEST + '.zip'
from calibre import CurrentDir
from calibre.utils.zipfile import ZipFile, ZIP_DEFLATED
with ZipFile(zf, 'w', ZIP_DEFLATED) as zf:
with CurrentDir(self.DEST):
zf.add_dir('.')
shutil.rmtree(self.DEST)
@property
def stats(self):

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 3)
numeric_version = (0, 8, 4)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -53,6 +53,7 @@ class ANDROID(USBMS):
0x681c : [0x0222, 0x0224, 0x0400],
0x6640 : [0x0100],
0x685e : [0x0400],
0x6860 : [0x0400],
0x6877 : [0x0400],
},

View File

@ -2392,6 +2392,16 @@ class ITUNES(DriverBase):
self.iTunes.Windows[0].Minimized = True
self.initial_status = 'launched'
try:
# Pre-emptive test to confirm functional iTunes automation interface
foo = self.iTunes.Version
foo
except:
self.iTunes = None
raise OpenFeedback('Unable to connect to iTunes.\n' +
' iTunes automation interface non-responsive, ' +
'recommend reinstalling iTunes')
# Read the current storage path for iTunes media from the XML file
media_dir = ''
string = None
@ -2988,7 +2998,6 @@ class ITUNES(DriverBase):
newmi = book
return newmi
class ITUNES_ASYNC(ITUNES):
'''
This subclass allows the user to interact directly with iTunes via a menu option

View File

@ -224,13 +224,16 @@ class TREKSTOR(USBMS):
FORMATS = ['epub', 'txt', 'pdf']
VENDOR_ID = [0x1e68]
PRODUCT_ID = [0x0041, 0x0042]
PRODUCT_ID = [0x0041, 0x0042,
0x003e # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
]
BCD = [0x0002]
EBOOK_DIR_MAIN = 'Ebooks'
VENDOR_NAME = 'TREKSTOR'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EBOOK_PLAYER_7'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOK_PLAYER_7',
'EBOOK_PLAYER_5M']
class EEEREADER(USBMS):

View File

@ -107,6 +107,9 @@ class NOOK_COLOR(NOOK):
return filepath
def upload_cover(self, path, filename, metadata, filepath):
pass
class NOOK_TSR(NOOK):
gui_name = _('Nook Simple')
description = _('Communicate with the Nook TSR eBook reader.')
@ -117,4 +120,6 @@ class NOOK_TSR(NOOK):
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'My Files/Books'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EBOOK_DISK'
def upload_cover(self, path, filename, metadata, filepath):
pass

View File

@ -204,7 +204,8 @@ class CollectionsBookList(BookList):
elif fm['datatype'] == 'text' and fm['is_multiple']:
val = orig_val
elif fm['datatype'] == 'composite' and fm['is_multiple']:
val = [v.strip() for v in val.split(fm['is_multiple'])]
val = [v.strip() for v in
val.split(fm['is_multiple']['ui_to_list'])]
else:
val = [val]

View File

@ -621,10 +621,7 @@ class Metadata(object):
orig_res = res
datatype = cmeta['datatype']
if datatype == 'text' and cmeta['is_multiple']:
if cmeta['display'].get('is_names', False):
res = u' & '.join(res)
else:
res = u', '.join(sorted(res, key=sort_key))
res = cmeta['is_multiple']['list_to_ui'].join(res)
elif datatype == 'series' and series_with_index:
if self.get_extra(key) is not None:
res = res + \
@ -668,7 +665,7 @@ class Metadata(object):
elif datatype == 'text' and fmeta['is_multiple']:
if isinstance(res, dict):
res = [k + ':' + v for k,v in res.items()]
res = u', '.join(sorted(res, key=sort_key))
res = fmeta['is_multiple']['list_to_ui'].join(sorted(res, key=sort_key))
elif datatype == 'series' and series_with_index:
res = res + ' [%s]'%self.format_series_index()
elif datatype == 'datetime':

View File

@ -5,8 +5,7 @@ Created on 4 Jun 2010
'''
from base64 import b64encode, b64decode
import json
import traceback
import json, traceback
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.constants import filesystem_encoding, preferred_encoding
@ -69,6 +68,40 @@ def object_to_unicode(obj, enc=preferred_encoding):
return ans
return obj
def encode_is_multiple(fm):
if fm.get('is_multiple', None):
# migrate is_multiple back to a character
fm['is_multiple2'] = fm.get('is_multiple', {})
dt = fm.get('datatype', None)
if dt == 'composite':
fm['is_multiple'] = ','
else:
fm['is_multiple'] = '|'
else:
fm['is_multiple'] = None
fm['is_multiple2'] = {}
def decode_is_multiple(fm):
im = fm.get('is_multiple2', None)
if im:
fm['is_multiple'] = im
del fm['is_multiple2']
else:
# Must migrate the is_multiple from char to dict
im = fm.get('is_multiple', {})
if im:
dt = fm.get('datatype', None)
if dt == 'composite':
im = {'cache_to_list': ',', 'ui_to_list': ',',
'list_to_ui': ', '}
elif fm.get('display', {}).get('is_names', False):
im = {'cache_to_list': '|', 'ui_to_list': '&',
'list_to_ui': ', '}
else:
im = {'cache_to_list': '|', 'ui_to_list': ',',
'list_to_ui': ', '}
fm['is_multiple'] = im
class JsonCodec(object):
def __init__(self):
@ -93,9 +126,10 @@ class JsonCodec(object):
def encode_metadata_attr(self, book, key):
if key == 'user_metadata':
meta = book.get_all_user_metadata(make_copy=True)
for k in meta:
if meta[k]['datatype'] == 'datetime':
meta[k]['#value#'] = datetime_to_string(meta[k]['#value#'])
for fm in meta.itervalues():
if fm['datatype'] == 'datetime':
fm['#value#'] = datetime_to_string(fm['#value#'])
encode_is_multiple(fm)
return meta
if key in self.field_metadata:
datatype = self.field_metadata[key]['datatype']
@ -135,9 +169,10 @@ class JsonCodec(object):
if key == 'classifiers':
key = 'identifiers'
if key == 'user_metadata':
for k in value:
if value[k]['datatype'] == 'datetime':
value[k]['#value#'] = string_to_datetime(value[k]['#value#'])
for fm in value.itervalues():
if fm['datatype'] == 'datetime':
fm['#value#'] = string_to_datetime(fm['#value#'])
decode_is_multiple(fm)
return value
elif key in self.field_metadata:
if self.field_metadata[key]['datatype'] == 'datetime':

View File

@ -7,7 +7,7 @@ __docformat__ = 'restructuredtext en'
lxml based OPF parser.
'''
import re, sys, unittest, functools, os, uuid, glob, cStringIO, json
import re, sys, unittest, functools, os, uuid, glob, cStringIO, json, copy
from urllib import unquote
from urlparse import urlparse
@ -453,10 +453,13 @@ class TitleSortField(MetadataField):
def serialize_user_metadata(metadata_elem, all_user_metadata, tail='\n'+(' '*8)):
from calibre.utils.config import to_json
from calibre.ebooks.metadata.book.json_codec import object_to_unicode
from calibre.ebooks.metadata.book.json_codec import (object_to_unicode,
encode_is_multiple)
for name, fm in all_user_metadata.items():
try:
fm = copy.copy(fm)
encode_is_multiple(fm)
fm = object_to_unicode(fm)
fm = json.dumps(fm, default=to_json, ensure_ascii=False)
except:
@ -575,6 +578,7 @@ class OPF(object): # {{{
self._user_metadata_ = {}
temp = Metadata('x', ['x'])
from calibre.utils.config import from_json
from calibre.ebooks.metadata.book.json_codec import decode_is_multiple
elems = self.root.xpath('//*[name() = "meta" and starts-with(@name,'
'"calibre:user_metadata:") and @content]')
for elem in elems:
@ -585,6 +589,7 @@ class OPF(object): # {{{
fm = elem.get('content')
try:
fm = json.loads(fm, object_hook=from_json)
decode_is_multiple(fm)
temp.set_user_metadata(name, fm)
except:
prints('Failed to read user metadata:', name)

View File

@ -42,6 +42,7 @@ class Worker(Thread): # Get details {{{
months = {
'de': {
1 : ['jän'],
2 : ['februar'],
3 : ['märz'],
5 : ['mai'],
6 : ['juni'],

View File

@ -13,7 +13,13 @@ from weakref import WeakKeyDictionary
from xml.dom import SyntaxErr as CSSSyntaxError
import cssutils
from cssutils.css import (CSSStyleRule, CSSPageRule, CSSStyleDeclaration,
CSSValueList, CSSFontFaceRule, cssproperties)
CSSFontFaceRule, cssproperties)
try:
from cssutils.css import CSSValueList
CSSValueList
except ImportError:
# cssutils >= 0.9.8
from cssutils.css import PropertyValue as CSSValueList
from cssutils import profile as cssprofiles
from lxml import etree
from lxml.cssselect import css_to_xpath, ExpressionError, SelectorSyntaxError

View File

@ -68,8 +68,13 @@ TODO:
'''
def txt2rtf(text):
# Escape { and } in the text.
text = text.replace('{', r'\'7b')
text = text.replace('}', r'\'7d')
if not isinstance(text, unicode):
return text
buf = cStringIO.StringIO()
for x in text:
val = ord(x)

View File

@ -94,6 +94,9 @@ class DeleteAction(InterfaceAction):
self.delete_menu.addAction(
_('Remove all formats from selected books, except...'),
self.delete_all_but_selected_formats)
self.delete_menu.addAction(
_('Remove all formats from selected books'),
self.delete_all_formats)
self.delete_menu.addAction(
_('Remove covers from selected books'), self.delete_covers)
self.delete_menu.addSeparator()
@ -174,6 +177,28 @@ class DeleteAction(InterfaceAction):
if ids:
self.gui.tags_view.recount()
def delete_all_formats(self, *args):
ids = self._get_selected_ids()
if not ids:
return
if not confirm('<p>'+_('<b>All formats</b> for the selected books will '
'be <b>deleted</b> from your library.<br>'
'The book metadata will be kept. Are you sure?')
+'</p>', 'delete_all_formats', self.gui):
return
db = self.gui.library_view.model().db
for id in ids:
fmts = db.formats(id, index_is_id=True, verify_formats=False)
if fmts:
for fmt in fmts.split(','):
self.gui.library_view.model().db.remove_format(id, fmt,
index_is_id=True, notify=False)
self.gui.library_view.model().refresh_ids(ids)
self.gui.library_view.model().current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
if ids:
self.gui.tags_view.recount()
def remove_matching_books_from_device(self, *args):
if not self.gui.device_manager.is_device_connected:
d = error_dialog(self.gui, _('Cannot delete books'),

View File

@ -12,16 +12,23 @@ from PyQt4.Qt import QLineEdit, QAbstractListModel, Qt, \
from calibre.utils.icu import sort_key, lower
from calibre.gui2 import NONE
from calibre.gui2.widgets import EnComboBox, LineEditECM
from calibre.utils.config_base import tweaks
class CompleteModel(QAbstractListModel):
def __init__(self, parent=None):
QAbstractListModel.__init__(self, parent)
self.items = []
self.sorting = QCompleter.UnsortedModel
def set_items(self, items):
items = [unicode(x.strip()) for x in items]
self.items = list(sorted(items, key=lambda x: sort_key(x)))
if len(items) < tweaks['completion_change_to_ascii_sorting']:
self.items = sorted(items, key=lambda x: sort_key(x))
self.sorting = QCompleter.UnsortedModel
else:
self.items = sorted(items, key=lambda x:x.lower())
self.sorting = QCompleter.CaseInsensitivelySortedModel
self.lowered_items = [lower(x) for x in self.items]
self.reset()
@ -62,7 +69,7 @@ class MultiCompleteLineEdit(QLineEdit, LineEditECM):
c.setWidget(self)
c.setCompletionMode(QCompleter.PopupCompletion)
c.setCaseSensitivity(Qt.CaseInsensitive)
c.setModelSorting(QCompleter.UnsortedModel)
c.setModelSorting(self._model.sorting)
c.setCompletionRole(Qt.DisplayRole)
p = c.popup()
p.setMouseTracking(True)
@ -146,6 +153,7 @@ class MultiCompleteLineEdit(QLineEdit, LineEditECM):
return self._model.items
def fset(self, items):
self._model.set_items(items)
self._completer.setModelSorting(self._model.sorting)
return property(fget=fget, fset=fset)
class MultiCompleteComboBox(EnComboBox):

View File

@ -226,16 +226,14 @@ class Comments(Base):
class Text(Base):
def setup_ui(self, parent):
if self.col_metadata['display'].get('is_names', False):
self.sep = u' & '
else:
self.sep = u', '
self.sep = self.col_metadata['multiple_seps']
values = self.all_values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
if self.col_metadata['is_multiple']:
w = MultiCompleteLineEdit(parent)
w.set_separator(self.sep.strip())
if self.sep == u' & ':
w.set_separator(self.sep['ui_to_list'])
if self.sep['ui_to_list'] == '&':
w.set_space_before_sep(True)
w.set_add_separator(tweaks['authors_completer_append_separator'])
w.update_items_cache(values)
@ -269,12 +267,12 @@ class Text(Base):
if self.col_metadata['is_multiple']:
if not val:
val = []
self.widgets[1].setText(self.sep.join(val))
self.widgets[1].setText(self.sep['list_to_ui'].join(val))
def getter(self):
if self.col_metadata['is_multiple']:
val = unicode(self.widgets[1].text()).strip()
ans = [x.strip() for x in val.split(self.sep.strip()) if x.strip()]
ans = [x.strip() for x in val.split(self.sep['ui_to_list']) if x.strip()]
if not ans:
ans = None
return ans
@ -899,9 +897,10 @@ class BulkText(BulkBase):
if not self.a_c_checkbox.isChecked():
return
if self.col_metadata['is_multiple']:
ism = self.col_metadata['multiple_seps']
if self.col_metadata['display'].get('is_names', False):
val = self.gui_val
add = [v.strip() for v in val.split('&') if v.strip()]
add = [v.strip() for v in val.split(ism['ui_to_list']) if v.strip()]
self.db.set_custom_bulk(book_ids, add, num=self.col_id)
else:
remove_all, adding, rtext = self.gui_val
@ -911,10 +910,10 @@ class BulkText(BulkBase):
else:
txt = rtext
if txt:
remove = set([v.strip() for v in txt.split(',')])
remove = set([v.strip() for v in txt.split(ism['ui_to_list'])])
txt = adding
if txt:
add = set([v.strip() for v in txt.split(',')])
add = set([v.strip() for v in txt.split(ism['ui_to_list'])])
else:
add = set()
self.db.set_custom_bulk_multiple(book_ids, add=add,

View File

@ -520,7 +520,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
elif not fm['is_multiple']:
val = [val]
elif fm['datatype'] == 'composite':
val = [v.strip() for v in val.split(fm['is_multiple'])]
val = [v.strip() for v in val.split(fm['is_multiple']['ui_to_list'])]
elif field == 'authors':
val = [v.replace('|', ',') for v in val]
else:
@ -655,19 +655,10 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
if self.destination_field_fm['is_multiple']:
if self.comma_separated.isChecked():
if dest == 'authors' or \
(self.destination_field_fm['is_custom'] and
self.destination_field_fm['datatype'] == 'text' and
self.destination_field_fm['display'].get('is_names', False)):
splitter = ' & '
else:
splitter = ','
splitter = self.destination_field_fm['is_multiple']['ui_to_list']
res = []
for v in val:
for x in v.split(splitter):
if x.strip():
res.append(x.strip())
res.extend([x.strip() for x in v.split(splitter) if x.strip()])
val = res
else:
val = [v.replace(',', '') for v in val]

View File

@ -254,6 +254,15 @@ class TemplateDialog(QDialog, Ui_TemplateDialog):
self.textbox_changed()
self.rule = (None, '')
tt = _('Template language tutorial')
self.template_tutorial.setText(
'<a href="http://manual.calibre-ebook.com/template_lang.html">'
'%s</a>'%tt)
tt = _('Template function reference')
self.template_func_reference.setText(
'<a href="http://manual.calibre-ebook.com/template_ref.html">'
'%s</a>'%tt)
def textbox_changed(self):
cur_text = unicode(self.textbox.toPlainText())
if self.last_text != cur_text:
@ -299,4 +308,4 @@ class TemplateDialog(QDialog, Ui_TemplateDialog):
return
self.rule = (unicode(self.colored_field.currentText()), txt)
QDialog.accept(self)
QDialog.accept(self)

View File

@ -125,6 +125,20 @@
<item row="9" column="1">
<widget class="QPlainTextEdit" name="source_code"/>
</item>
<item row="10" column="1">
<widget class="QLabel" name="template_tutorial">
<property name="openExternalLinks">
<bool>true</bool>
</property>
</widget>
</item>
<item row="11" column="1">
<widget class="QLabel" name="template_func_reference">
<property name="openExternalLinks">
<bool>true</bool>
</property>
</widget>
</item>
</layout>
</item>
</layout>

View File

@ -288,6 +288,8 @@ class CcNumberDelegate(QStyledItemDelegate): # {{{
def setEditorData(self, editor, index):
m = index.model()
val = m.db.data[index.row()][m.custom_columns[m.column_map[index.column()]]['rec_index']]
if val is None:
val = 0
editor.setValue(val)
# }}}

View File

@ -608,10 +608,11 @@ class BooksModel(QAbstractTableModel): # {{{
def text_type(r, mult=None, idx=-1):
text = self.db.data[r][idx]
if text and mult is not None:
if mult:
return QVariant(u' & '.join(text.split('|')))
return QVariant(u', '.join(sorted(text.split('|'),key=sort_key)))
if text and mult:
jv = mult['list_to_ui']
sv = mult['cache_to_list']
return QVariant(jv.join(
sorted([t.strip() for t in text.split(sv)], key=sort_key)))
return QVariant(text)
def decorated_text_type(r, idx=-1):
@ -665,8 +666,6 @@ class BooksModel(QAbstractTableModel): # {{{
datatype = self.custom_columns[col]['datatype']
if datatype in ('text', 'comments', 'composite', 'enumeration'):
mult=self.custom_columns[col]['is_multiple']
if mult is not None:
mult = self.custom_columns[col]['display'].get('is_names', False)
self.dc[col] = functools.partial(text_type, idx=idx, mult=mult)
if datatype in ['text', 'composite', 'enumeration'] and not mult:
if self.custom_columns[col]['display'].get('use_decorations', False):
@ -722,9 +721,9 @@ class BooksModel(QAbstractTableModel): # {{{
if id_ in self.color_cache:
if key in self.color_cache[id_]:
return self.color_cache[id_][key]
if mi is None:
mi = self.db.get_metadata(id_, index_is_id=True)
try:
if mi is None:
mi = self.db.get_metadata(id_, index_is_id=True)
color = composite_formatter.safe_format(fmt, mi, '', mi)
if color in self.colors:
color = QColor(color)

View File

@ -12,6 +12,7 @@ from PyQt4.Qt import (QWidget, QDialog, QLabel, QGridLayout, QComboBox, QSize,
QScrollArea, QPushButton, QVBoxLayout, QDialogButtonBox, QToolButton,
QListView, QAbstractListModel, pyqtSignal, QSizePolicy, QSpacerItem)
from calibre import prepare_string_for_xml
from calibre.utils.icu import sort_key
from calibre.gui2 import error_dialog
from calibre.gui2.dialogs.template_dialog import TemplateDialog
@ -158,21 +159,22 @@ class ConditionEditor(QWidget): # {{{
self.action_box.clear()
self.action_box.addItem('', '')
col = self.current_col
m = self.fm[col]
dt = m['datatype']
if dt in self.action_map:
actions = self.action_map[dt]
else:
if col == 'ondevice':
k = 'ondevice'
elif col == 'identifiers':
k = 'identifiers'
if col:
m = self.fm[col]
dt = m['datatype']
if dt in self.action_map:
actions = self.action_map[dt]
else:
k = 'multiple' if m['is_multiple'] else 'single'
actions = self.action_map[k]
if col == 'ondevice':
k = 'ondevice'
elif col == 'identifiers':
k = 'identifiers'
else:
k = 'multiple' if m['is_multiple'] else 'single'
actions = self.action_map[k]
for text, key in actions:
self.action_box.addItem(text, key)
for text, key in actions:
self.action_box.addItem(text, key)
self.action_box.setCurrentIndex(0)
self.action_box.blockSignals(False)
self.init_value_box()
@ -183,11 +185,15 @@ class ConditionEditor(QWidget): # {{{
self.value_box.setInputMask('')
self.value_box.setValidator(None)
col = self.current_col
if not col:
return
m = self.fm[col]
dt = m['datatype']
action = self.current_action
if not col or not action:
if not action:
return
m = self.fm[col]
dt = m['datatype']
tt = ''
if col == 'identifiers':
tt = _('Enter either an identifier type or an '
@ -205,7 +211,7 @@ class ConditionEditor(QWidget): # {{{
tt = _('Enter a regular expression')
elif m.get('is_multiple', False):
tt += '\n' + _('You can match multiple values by separating'
' them with %s')%m['is_multiple']
' them with %s')%m['is_multiple']['ui_to_list']
self.value_box.setToolTip(tt)
if action in ('is set', 'is not set', 'is true', 'is false',
'is undefined'):
@ -430,7 +436,7 @@ class RulesModel(QAbstractListModel): # {{{
return _('''
<p>Advanced Rule for column <b>%s</b>:
<pre>%s</pre>
''')%(col, rule)
''')%(col, prepare_string_for_xml(rule))
conditions = [self.condition_to_html(c) for c in rule.conditions]
return _('''\
<p>Set the color of <b>%s</b> to <b>%s</b> if the following
@ -439,9 +445,10 @@ class RulesModel(QAbstractListModel): # {{{
''') % (col, rule.color, ''.join(conditions))
def condition_to_html(self, condition):
c, a, v = condition
return (
_('<li>If the <b>%s</b> column <b>%s</b> value: <b>%s</b>') %
tuple(condition))
(c, a, prepare_string_for_xml(v)))
# }}}

View File

@ -13,6 +13,9 @@ from calibre.gui2 import error_dialog
class CreateCustomColumn(QDialog, Ui_QCreateCustomColumn):
# Note: in this class, we are treating is_multiple as the boolean that
# custom_columns expects to find in its structure. It does not use the dict
column_types = {
0:{'datatype':'text',
'text':_('Text, column shown in the tag browser'),

View File

@ -44,9 +44,9 @@ class Customize(QFrame, Ui_Frame):
clear.clicked.connect(partial(self.clear_clicked, which=x))
def clear_clicked(self, which=0):
button = getattr(self, 'button%d'%which)
button.setText(_('None'))
setattr(self, 'shortcut%d'%which, None)
button = getattr(self, 'button%d'%which)
button.setText(_('None'))
setattr(self, 'shortcut%d'%which, None)
def custom_toggled(self, checked):
for w in ('1', '2'):

View File

@ -75,15 +75,18 @@ class AmazonUKKindleStore(AmazonKindleStore):
s.title = title.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.formats = 'Kindle'
s.formats = ''
if is_shot:
# Amazon UK does not include the author on the grid layout
s.author = ''
self.get_details(s, timeout)
if s.formats != 'Kindle':
continue
else:
author = ''.join(data.xpath('.//div[@class="productTitle"]/span[@class="ptBrand"]/text()'))
s.author = author.split(' by ')[-1].strip()
s.formats = 'Kindle'
yield s
@ -99,6 +102,9 @@ class AmazonUKKindleStore(AmazonKindleStore):
idata = html.fromstring(nf.read())
if not search_result.author:
search_result.author = ''.join(idata.xpath('//div[@class="buying" and contains(., "Author")]/a/text()'))
is_kindle = idata.xpath('boolean(//div[@class="buying"]/h1/span/span[contains(text(), "Kindle Edition")])')
if is_kindle:
search_result.formats = 'Kindle'
if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "' +
self.drm_search_text + '")])'):
if idata.xpath('boolean(//div[@class="content"]//li[contains(., "' +

View File

@ -33,7 +33,7 @@ class Matches(QAbstractItemModel):
total_changed = pyqtSignal(int)
HEADERS = [_('Cover'), _('Title'), _('Price'), _('DRM'), _('Store'), _('')]
HEADERS = [_('Cover'), _('Title'), _('Price'), _('DRM'), _('Store'), '']
HTML_COLS = (1, 4)
def __init__(self, cover_thread_count=2, detail_thread_count=4):
@ -58,7 +58,7 @@ class Matches(QAbstractItemModel):
self.search_filter = SearchFilter()
self.cover_pool = CoverThreadPool(cover_thread_count)
self.details_pool = DetailsThreadPool(detail_thread_count)
self.filter_results_dispatcher = FunctionDispatcher(self.filter_results)
self.got_result_details_dispatcher = FunctionDispatcher(self.got_result_details)

View File

@ -509,7 +509,8 @@ class ResultCache(SearchQueryParser): # {{{
valq_mkind, valq = self._matchkind(query)
loc = self.field_metadata[location]['rec_index']
split_char = self.field_metadata[location]['is_multiple']
split_char = self.field_metadata[location]['is_multiple'].get(
'cache_to_list', ',')
for id_ in candidates:
item = self._data[id_]
if item is None:
@ -665,7 +666,8 @@ class ResultCache(SearchQueryParser): # {{{
if fm['is_multiple'] and \
len(query) > 1 and query.startswith('#') and \
query[1:1] in '=<>!':
vf = lambda item, loc=fm['rec_index'], ms=fm['is_multiple']:\
vf = lambda item, loc=fm['rec_index'], \
ms=fm['is_multiple']['cache_to_list']:\
len(item[loc].split(ms)) if item[loc] is not None else 0
return self.get_numeric_matches(location, query[1:],
candidates, val_func=vf)
@ -703,7 +705,8 @@ class ResultCache(SearchQueryParser): # {{{
['composite', 'text', 'comments', 'series', 'enumeration']:
exclude_fields.append(db_col[x])
col_datatype[db_col[x]] = self.field_metadata[x]['datatype']
is_multiple_cols[db_col[x]] = self.field_metadata[x]['is_multiple']
is_multiple_cols[db_col[x]] = \
self.field_metadata[x]['is_multiple'].get('cache_to_list', None)
try:
rating_query = int(query) * 2
@ -1045,13 +1048,14 @@ class SortKeyGenerator(object):
elif dt in ('text', 'comments', 'composite', 'enumeration'):
if val:
sep = fm['is_multiple']
if sep:
if fm['display'].get('is_names', False):
val = sep.join(
[author_to_author_sort(v) for v in val.split(sep)])
if fm['is_multiple']:
jv = fm['is_multiple']['list_to_ui']
sv = fm['is_multiple']['cache_to_list']
if '&' in jv:
val = jv.join(
[author_to_author_sort(v) for v in val.split(sv)])
else:
val = sep.join(sorted(val.split(sep),
val = jv.join(sorted(val.split(sv),
key=self.string_sort_key))
val = self.string_sort_key(val)

View File

@ -79,16 +79,19 @@ class Rule(object): # {{{
if dt == 'bool':
return self.bool_condition(col, action, val)
if dt in ('int', 'float', 'rating'):
if dt in ('int', 'float'):
return self.number_condition(col, action, val)
if dt == 'rating':
return self.rating_condition(col, action, val)
if dt == 'datetime':
return self.date_condition(col, action, val)
if dt in ('comments', 'series', 'text', 'enumeration'):
if dt in ('comments', 'series', 'text', 'enumeration', 'composite'):
ism = m.get('is_multiple', False)
if ism:
return self.multiple_condition(col, action, val, ism)
return self.multiple_condition(col, action, val, ism['ui_to_list'])
return self.text_condition(col, action, val)
def identifiers_condition(self, col, action, val):
@ -114,9 +117,16 @@ class Rule(object): # {{{
'lt': ('1', '', ''),
'gt': ('', '', '1')
}[action]
lt, eq, gt = '', '1', ''
return "cmp(raw_field('%s'), %s, '%s', '%s', '%s')" % (col, val, lt, eq, gt)
def rating_condition(self, col, action, val):
lt, eq, gt = {
'eq': ('', '1', ''),
'lt': ('1', '', ''),
'gt': ('', '', '1')
}[action]
return "cmp(field('%s'), %s, '%s', '%s', '%s')" % (col, val, lt, eq, gt)
def date_condition(self, col, action, val):
lt, eq, gt = {
'eq': ('', '1', ''),
@ -127,10 +137,12 @@ class Rule(object): # {{{
val, lt, eq, gt)
def multiple_condition(self, col, action, val, sep):
if not sep or sep == '|':
sep = ','
if action == 'is set':
return "test('%s', '1', '')"%col
return "test(field('%s'), '1', '')"%col
if action == 'is not set':
return "test('%s', '', '1')"%col
return "test(field('%s'), '', '1')"%col
if action == 'has':
return "str_in_list(field('%s'), '%s', \"%s\", '1', '')"%(col, sep, val)
if action == 'does not have':
@ -142,9 +154,9 @@ class Rule(object): # {{{
def text_condition(self, col, action, val):
if action == 'is set':
return "test('%s', '1', '')"%col
return "test(field('%s'), '1', '')"%col
if action == 'is not set':
return "test('%s', '', '1')"%col
return "test(field('%s'), '', '1')"%col
if action == 'is':
return "strcmp(field('%s'), \"%s\", '', '1', '')"%(col, val)
if action == 'is not':
@ -183,7 +195,7 @@ def conditionable_columns(fm):
m = fm[key]
dt = m['datatype']
if m.get('name', False) and dt in ('bool', 'int', 'float', 'rating', 'series',
'comments', 'text', 'enumeration', 'datetime'):
'comments', 'text', 'enumeration', 'datetime', 'composite'):
if key == 'sort':
yield 'title_sort'
else:

View File

@ -78,6 +78,18 @@ class CustomColumns(object):
}
if data['display'] is None:
data['display'] = {}
# set up the is_multiple separator dict
if data['is_multiple']:
if data['display'].get('is_names', False):
seps = {'cache_to_list': '|', 'ui_to_list': '&', 'list_to_ui': ' & '}
elif data['datatype'] == 'composite':
seps = {'cache_to_list': ',', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {'cache_to_list': '|', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {}
data['multiple_seps'] = seps
table, lt = self.custom_table_names(data['num'])
if table not in custom_tables or (data['normalized'] and lt not in
custom_tables):
@ -119,7 +131,7 @@ class CustomColumns(object):
if x is None:
return []
if isinstance(x, (str, unicode, bytes)):
x = x.split('&' if d['display'].get('is_names', False) else',')
x = x.split(d['multiple_seps']['ui_to_list'])
x = [y.strip() for y in x if y.strip()]
x = [y.decode(preferred_encoding, 'replace') if not isinstance(y,
unicode) else y for y in x]
@ -181,10 +193,7 @@ class CustomColumns(object):
is_category = True
else:
is_category = False
if v['is_multiple']:
is_m = ',' if v['datatype'] == 'composite' else '|'
else:
is_m = None
is_m = v['multiple_seps']
tn = 'custom_column_{0}'.format(v['num'])
self.field_metadata.add_custom_field(label=v['label'],
table=tn, column='value', datatype=v['datatype'],
@ -200,7 +209,7 @@ class CustomColumns(object):
row = self.data._data[idx] if index_is_id else self.data[idx]
ans = row[self.FIELD_MAP[data['num']]]
if data['is_multiple'] and data['datatype'] == 'text':
ans = ans.split('|') if ans else []
ans = ans.split(data['multiple_seps']['cache_to_list']) if ans else []
if data['display'].get('sort_alpha', False):
ans.sort(cmp=lambda x,y:cmp(x.lower(), y.lower()))
return ans
@ -566,14 +575,21 @@ class CustomColumns(object):
def custom_columns_in_meta(self):
lines = {}
for data in self.custom_column_label_map.values():
display = data['display']
table, lt = self.custom_table_names(data['num'])
if data['normalized']:
query = '%s.value'
if data['is_multiple']:
query = 'group_concat(%s.value, "|")'
if not display.get('sort_alpha', False):
query = 'sort_concat(link.id, %s.value)'
# query = 'group_concat(%s.value, "{0}")'.format(
# data['multiple_seps']['cache_to_list'])
# if not display.get('sort_alpha', False):
if data['multiple_seps']['cache_to_list'] == '|':
query = 'sortconcat_bar(link.id, %s.value)'
elif data['multiple_seps']['cache_to_list'] == '&':
query = 'sortconcat_amper(link.id, %s.value)'
else:
prints('WARNING: unknown value in multiple_seps',
data['multiple_seps']['cache_to_list'])
query = 'sortconcat_bar(link.id, %s.value)'
line = '''(SELECT {query} FROM {lt} AS link INNER JOIN
{table} ON(link.value={table}.id) WHERE link.book=books.id)
custom_{num}

View File

@ -595,7 +595,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
if f is None:
continue
with tempfile.SpooledTemporaryFile(max_size=100*(1024**2)) as stream:
shutil.copyfileobj(f, stream)
with f:
shutil.copyfileobj(f, stream)
stream.seek(0)
self.add_format(id, format, stream, index_is_id=True,
path=tpath, notify=False)
@ -1249,7 +1250,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
dex = field['rec_index']
for book in self.data.iterall():
if field['is_multiple']:
vals = [v.strip() for v in book[dex].split(field['is_multiple'])
vals = [v.strip() for v in
book[dex].split(field['is_multiple']['cache_to_list'])
if v.strip()]
if id_ in vals:
ans.add(book[0])
@ -1377,7 +1379,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
tcategories[category] = {}
# create a list of category/field_index for the books scan to use.
# This saves iterating through field_metadata for each book
md.append((category, cat['rec_index'], cat['is_multiple'], False))
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), False))
for category in tb_cats.iterkeys():
cat = tb_cats[category]
@ -1385,7 +1388,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
cat['display'].get('make_category', False):
tids[category] = {}
tcategories[category] = {}
md.append((category, cat['rec_index'], cat['is_multiple'],
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite'))
#print 'end phase "collection":', time.clock() - last, 'seconds'
#last = time.clock()

View File

@ -50,9 +50,16 @@ class FieldMetadata(dict):
datatype: the type of information in the field. Valid values are listed in
VALID_DATA_TYPES below.
is_multiple: valid for the text datatype. If None, the field is to be
treated as a single term. If not None, it contains a string, and the field
is assumed to contain a list of terms separated by that string
is_multiple: valid for the text datatype. If {}, the field is to be
treated as a single term. If not None, it contains a dict of the form
{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '}
where the cache_to_list contains the character used to split the value in
the meta2 table, ui_to_list contains the character used to create a list
from a value shown in the ui (each resulting value must be strip()ed and
empty values removed), and list_to_ui contains the string used in join()
to create a displayable string from the list.
kind == field: is a db field.
kind == category: standard tag category that isn't a field. see news.
@ -97,7 +104,9 @@ class FieldMetadata(dict):
'link_column':'author',
'category_sort':'sort',
'datatype':'text',
'is_multiple':',',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': '&',
'list_to_ui': ' & '},
'kind':'field',
'name':_('Authors'),
'search_terms':['authors', 'author'],
@ -109,7 +118,7 @@ class FieldMetadata(dict):
'link_column':'series',
'category_sort':'(title_sort(name))',
'datatype':'series',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Series'),
'search_terms':['series'],
@ -119,7 +128,9 @@ class FieldMetadata(dict):
('formats', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':',',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Formats'),
'search_terms':['formats', 'format'],
@ -131,7 +142,7 @@ class FieldMetadata(dict):
'link_column':'publisher',
'category_sort':'name',
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Publishers'),
'search_terms':['publisher'],
@ -143,7 +154,7 @@ class FieldMetadata(dict):
'link_column':'rating',
'category_sort':'rating',
'datatype':'rating',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Ratings'),
'search_terms':['rating'],
@ -154,7 +165,7 @@ class FieldMetadata(dict):
'column':'name',
'category_sort':'name',
'datatype':None,
'is_multiple':None,
'is_multiple':{},
'kind':'category',
'name':_('News'),
'search_terms':[],
@ -166,7 +177,9 @@ class FieldMetadata(dict):
'link_column': 'tag',
'category_sort':'name',
'datatype':'text',
'is_multiple':',',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Tags'),
'search_terms':['tags', 'tag'],
@ -176,7 +189,9 @@ class FieldMetadata(dict):
('identifiers', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':',',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Identifiers'),
'search_terms':['identifiers', 'identifier', 'isbn'],
@ -186,7 +201,7 @@ class FieldMetadata(dict):
('author_sort',{'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Author Sort'),
'search_terms':['author_sort'],
@ -196,7 +211,9 @@ class FieldMetadata(dict):
('au_map', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':',',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': None,
'list_to_ui': None},
'kind':'field',
'name':None,
'search_terms':[],
@ -206,7 +223,7 @@ class FieldMetadata(dict):
('comments', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Comments'),
'search_terms':['comments', 'comment'],
@ -216,7 +233,7 @@ class FieldMetadata(dict):
('cover', {'table':None,
'column':None,
'datatype':'int',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':['cover'],
@ -226,7 +243,7 @@ class FieldMetadata(dict):
('id', {'table':None,
'column':None,
'datatype':'int',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':[],
@ -236,7 +253,7 @@ class FieldMetadata(dict):
('last_modified', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Modified'),
'search_terms':['last_modified'],
@ -246,7 +263,7 @@ class FieldMetadata(dict):
('ondevice', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('On Device'),
'search_terms':['ondevice'],
@ -256,7 +273,7 @@ class FieldMetadata(dict):
('path', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Path'),
'search_terms':[],
@ -266,7 +283,7 @@ class FieldMetadata(dict):
('pubdate', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Published'),
'search_terms':['pubdate'],
@ -276,7 +293,7 @@ class FieldMetadata(dict):
('marked', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name': None,
'search_terms':['marked'],
@ -286,7 +303,7 @@ class FieldMetadata(dict):
('series_index',{'table':None,
'column':None,
'datatype':'float',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':['series_index'],
@ -296,7 +313,7 @@ class FieldMetadata(dict):
('sort', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Title Sort'),
'search_terms':['title_sort'],
@ -306,7 +323,7 @@ class FieldMetadata(dict):
('size', {'table':None,
'column':None,
'datatype':'float',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Size'),
'search_terms':['size'],
@ -316,7 +333,7 @@ class FieldMetadata(dict):
('timestamp', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Date'),
'search_terms':['date'],
@ -326,7 +343,7 @@ class FieldMetadata(dict):
('title', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':_('Title'),
'search_terms':['title'],
@ -336,7 +353,7 @@ class FieldMetadata(dict):
('uuid', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':None,
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':[],
@ -508,7 +525,7 @@ class FieldMetadata(dict):
if datatype == 'series':
key += '_index'
self._tb_cats[key] = {'table':None, 'column':None,
'datatype':'float', 'is_multiple':None,
'datatype':'float', 'is_multiple':{},
'kind':'field', 'name':'',
'search_terms':[key], 'label':label+'_index',
'colnum':None, 'display':{},
@ -560,7 +577,7 @@ class FieldMetadata(dict):
if icu_lower(label) != label:
st.append(icu_lower(label))
self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':None,
'datatype':None, 'is_multiple':{},
'kind':'user', 'name':name,
'search_terms':st, 'is_custom':False,
'is_category':True, 'is_csp': False}
@ -570,7 +587,7 @@ class FieldMetadata(dict):
if label in self._tb_cats:
raise ValueError('Duplicate user field [%s]'%(label))
self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':None,
'datatype':None, 'is_multiple':{},
'kind':'search', 'name':name,
'search_terms':[], 'is_custom':False,
'is_category':True, 'is_csp': False}

View File

@ -171,7 +171,7 @@ class Restore(Thread):
for x in fields:
if x in cfm:
if x == 'is_multiple':
args.append(cfm[x] is not None)
args.append(bool(cfm[x]))
else:
args.append(cfm[x])
if len(args) == len(fields):

View File

@ -231,7 +231,8 @@ class MobileServer(object):
book['size'] = human_readable(book['size'])
aus = record[FM['authors']] if record[FM['authors']] else __builtin__._('Unknown')
authors = '|'.join([i.replace('|', ',') for i in aus.split(',')])
aut_is = CFM['authors']['is_multiple']
authors = aut_is['list_to_ui'].join([i.replace('|', ',') for i in aus.split(',')])
book['authors'] = authors
book['series_index'] = fmt_sidx(float(record[FM['series_index']]))
book['series'] = record[FM['series']]
@ -254,8 +255,10 @@ class MobileServer(object):
continue
if datatype == 'text' and CFM[key]['is_multiple']:
book[key] = concat(name,
format_tag_string(val, ',',
no_tag_count=True))
format_tag_string(val,
CFM[key]['is_multiple']['ui_to_list'],
no_tag_count=True,
joinval=CFM[key]['is_multiple']['list_to_ui']))
else:
book[key] = concat(name, val)

View File

@ -180,9 +180,12 @@ def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix):
if val:
datatype = CFM[key]['datatype']
if datatype == 'text' and CFM[key]['is_multiple']:
extra.append('%s: %s<br />'%(xml(name), xml(format_tag_string(val, ',',
ignore_max=True,
no_tag_count=True))))
extra.append('%s: %s<br />'%
(xml(name),
xml(format_tag_string(val,
CFM[key]['is_multiple']['ui_to_list'],
ignore_max=True, no_tag_count=True,
joinval=CFM[key]['is_multiple']['list_to_ui']))))
elif datatype == 'comments':
extra.append('%s: %s<br />'%(xml(name), comments_to_html(unicode(val))))
else:

View File

@ -68,7 +68,7 @@ def strftime(fmt='%Y/%m/%d %H:%M:%S', dt=None):
except:
return _strftime(fmt, nowf().timetuple())
def format_tag_string(tags, sep, ignore_max=False, no_tag_count=False):
def format_tag_string(tags, sep, ignore_max=False, no_tag_count=False, joinval=', '):
MAX = sys.maxint if ignore_max else tweaks['max_content_server_tags_shown']
if tags:
tlist = [t.strip() for t in tags.split(sep)]
@ -78,10 +78,10 @@ def format_tag_string(tags, sep, ignore_max=False, no_tag_count=False):
if len(tlist) > MAX:
tlist = tlist[:MAX]+['...']
if no_tag_count:
return ', '.join(tlist) if tlist else ''
return joinval.join(tlist) if tlist else ''
else:
return u'%s:&:%s'%(tweaks['max_content_server_tags_shown'],
', '.join(tlist)) if tlist else ''
joinval.join(tlist)) if tlist else ''
def quote(s):
if isinstance(s, unicode):

View File

@ -121,8 +121,12 @@ class XMLServer(object):
name = CFM[key]['name']
custcols.append(k)
if datatype == 'text' and CFM[key]['is_multiple']:
kwargs[k] = concat('#T#'+name, format_tag_string(val,',',
ignore_max=True))
kwargs[k] = \
concat('#T#'+name,
format_tag_string(val,
CFM[key]['is_multiple']['ui_to_list'],
ignore_max=True,
joinval=CFM[key]['is_multiple']['list_to_ui']))
else:
kwargs[k] = concat(name, val)
kwargs['custcols'] = ','.join(custcols)

View File

@ -121,9 +121,12 @@ class SortedConcatenate(object):
return None
return self.sep.join(map(self.ans.get, sorted(self.ans.keys())))
class SafeSortedConcatenate(SortedConcatenate):
class SortedConcatenateBar(SortedConcatenate):
sep = '|'
class SortedConcatenateAmper(SortedConcatenate):
sep = '&'
class IdentifiersConcat(object):
'''String concatenation aggregator for the identifiers map'''
def __init__(self):
@ -220,7 +223,8 @@ class DBThread(Thread):
self.conn.execute('pragma cache_size=5000')
encoding = self.conn.execute('pragma encoding').fetchone()[0]
self.conn.create_aggregate('sortconcat', 2, SortedConcatenate)
self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate)
self.conn.create_aggregate('sortconcat_bar', 2, SortedConcatenateBar)
self.conn.create_aggregate('sortconcat_amper', 2, SortedConcatenateAmper)
self.conn.create_aggregate('identifiers_concat', 2, IdentifiersConcat)
load_c_extensions(self.conn)
self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row)

View File

@ -141,6 +141,22 @@ static void sort_concat_finalize2(sqlite3_context *context) {
}
static void sort_concat_finalize3(sqlite3_context *context) {
SortConcatList *list;
unsigned char *ans;
list = (SortConcatList*) sqlite3_aggregate_context(context, sizeof(*list));
if (list != NULL && list->vals != NULL && list->count > 0) {
qsort(list->vals, list->count, sizeof(list->vals[0]), sort_concat_cmp);
ans = sort_concat_do_finalize(list, '&');
if (ans != NULL) sqlite3_result_text(context, (char*)ans, -1, SQLITE_TRANSIENT);
free(ans);
sort_concat_free(list);
}
}
// }}}
// identifiers_concat {{{
@ -237,7 +253,8 @@ MYEXPORT int sqlite3_extension_init(
sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi){
SQLITE_EXTENSION_INIT2(pApi);
sqlite3_create_function(db, "sortconcat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize);
sqlite3_create_function(db, "sort_concat", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize2);
sqlite3_create_function(db, "sortconcat_bar", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize2);
sqlite3_create_function(db, "sortconcat_amper", 2, SQLITE_UTF8, NULL, NULL, sort_concat_step, sort_concat_finalize3);
sqlite3_create_function(db, "identifiers_concat", 2, SQLITE_UTF8, NULL, NULL, identifiers_concat_step, identifiers_concat_finalize);
return 0;
}

View File

@ -23,7 +23,6 @@ entry_points = {
'calibre-server = calibre.library.server.main:main',
'lrf2lrs = calibre.ebooks.lrf.lrfparser:main',
'lrs2lrf = calibre.ebooks.lrf.lrs.convert_from:main',
'librarything = calibre.ebooks.metadata.library_thing:main',
'calibre-debug = calibre.debug:main',
'calibredb = calibre.library.cli:main',
'calibre-parallel = calibre.utils.ipc.worker:main',

View File

@ -74,10 +74,7 @@ Edit metadata
1. **Edit metadata individually**: This allows you to edit the metadata of books one-by-one, with the option of fetching metadata, including covers from the internet. It also allows you to add/remove particular ebook formats from a book.
2. **Edit metadata in bulk**: This allows you to edit common metadata fields for large numbers of books simulataneously. It operates on all the books you have selected in the :ref:`Library view <search_sort>`.
3. **Download metadata and covers**: Downloads metadata and covers (if available), for the books that are selected in the book list.
4. **Download only metadata**: Downloads only metadata (if available), for the books that are selected in the book list.
5. **Download only covers**: Downloads only covers (if available), for the books that are selected in the book list.
6. **Download only social metadata**: Downloads only social metadata such as tags and reviews (if available), for the books that are selected in the book list.
7. **Merge Book Records**: Gives you the capability of merging the metadata and formats of two or more book records together. You can choose to either delete or keep the records that were not clicked first.
4. **Merge Book Records**: Gives you the capability of merging the metadata and formats of two or more book records together. You can choose to either delete or keep the records that were not clicked first.
For more details see :ref:`metadata`.

View File

@ -11,7 +11,7 @@ You can "install" calibre onto a USB stick that you can take with you and use on
* Run a Mobile Calibre installation with both the Calibre binaries and your ebook library resident on a USB disk or other portable media. In particular it is not necessary to have Calibre installed on the Windows PC that is to run Calibre. This batch file also does not care what drive letter is assigned when you plug in the USB device. It also will not affect any settings on the host machine being a completely self-contained Calibre installation.
* Run a networked Calibre installation optimised for performance when the ebook files are located on a networked share.
If you find setting up the bat file too challenging, there is a third party portable calibre build available at `portableapps.com http://portableapps.com`_.
If you find setting up the bat file too challenging, there is a third party portable calibre build available at `portableapps.com <http://portableapps.com>`_.
This calibre-portable.bat file is intended for use on Windows based systems, but the principles are easily adapted for use on Linux or OS X based systems. Note that calibre requires the Microsoft Visual C++ 2008 runtimes to run. Most windows computers have them installed already, but it may be a good idea to have the installer for installing them on your USB stick. The installer is available from `Microsoft <http://www.microsoft.com/downloads/details.aspx?FamilyID=9b2da534-3e03-4391-8a4d-074b9f2bc1bf&displaylang=en>`_.

View File

@ -0,0 +1,49 @@
{#
basic/search.html
~~~~~~~~~~~~~~~~~
Template for the search page, using google adsense search
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{% extends "layout.html" %}
{% set title = _('Search') %}
{% block extrahead %}
<link rel="stylesheet" href="http://www.google.com/cse/style/look/default.css" type="text/css" />
{{ super() }}
{% endblock %}
{% block body %}
<h1 id="search-documentation">{{ _('Search') }}</h1>
<div id="fallback" class="admonition warning">
<script type="text/javascript">$('#fallback').hide();</script>
<p>
{% trans %}Please activate JavaScript to enable the search
functionality.{% endtrans %}
</p>
</div>
<p>
{% trans %}From here you can search these documents. Enter your search
words into the box below and click "search". Note that the search
function will automatically search for all of the words. Pages
containing fewer words won't appear in the result list.{% endtrans %}
</p>
<div id="cse" style="width: 100%;">Loading</div>
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script type="text/javascript">
google.load('search', '1');
google.setOnLoadCallback(function() {
var customSearchControl = new google.search.CustomSearchControl('partner-pub-2595272032872519:0657791363');
customSearchControl.setResultSetSize(google.search.Search.FILTERED_CSE_RESULTSET);
var options = new google.search.DrawOptions();
options.setAutoComplete(true);
customSearchControl.draw('cse', options);
var params = $.getQueryParameters();
if (params.q) {
var query = params.q[0];
$('input[name="search"]')[0].value = query;
$('input.gsc-search-button')[0].click();
}
}, true);
</script>
{% endblock %}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

16897
src/calibre/translations/br.po Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -5,10 +5,9 @@ Dynamic language lookup of translations for user-visible strings.
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import os
import cStringIO
from gettext import GNUTranslations
from calibre.utils.localization import get_lc_messages_path
from calibre.utils.localization import get_lc_messages_path, ZipFile
__all__ = ['translate']
@ -21,10 +20,15 @@ def translate(lang, text):
else:
mpath = get_lc_messages_path(lang)
if mpath is not None:
p = os.path.join(mpath, 'messages.mo')
if os.path.exists(p):
trans = GNUTranslations(open(p, 'rb'))
_CACHE[lang] = trans
with ZipFile(P('localization/locales.zip',
allow_user_override=False), 'r') as zf:
try:
buf = cStringIO.StringIO(zf.read(mpath + '/messages.mo'))
except:
pass
else:
trans = GNUTranslations(buf)
_CACHE[lang] = trans
if trans is None:
return getattr(__builtins__, '_', lambda x: x)(text)
return trans.ugettext(text)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More