merge from trunk

This commit is contained in:
ldolse 2011-02-01 13:24:04 +08:00
commit 3e749b4b2e
11 changed files with 492 additions and 70 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 358 B

View File

@ -1,25 +1,25 @@
# -*- coding: utf-8
__license__ = 'GPL v3'
__author__ = 'Luis Hernandez'
__copyright__ = 'Luis Hernandez<tolyluis@gmail.com>'
description = 'Periódico gratuito en español - v0.8 - 27 Jan 2011'
__version__ = 'v0.85'
__date__ = '31 January 2011'
'''
www.20minutos.es
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1294946868(BasicNewsRecipe):
title = u'20 Minutos'
title = u'20 Minutos new'
publisher = u'Grupo 20 Minutos'
__author__ = 'Luis Hernández'
description = 'Periódico gratuito en español'
__author__ = 'Luis Hernandez'
description = 'Free spanish newspaper'
cover_url = 'http://estaticos.20minutos.es/mmedia/especiales/corporativo/css/img/logotipos_grupo20minutos.gif'
oldest_article = 5
oldest_article = 2
max_articles_per_feed = 100
remove_javascript = True
@ -29,6 +29,7 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
encoding = 'ISO-8859-1'
language = 'es'
timefmt = '[%a, %d %b, %Y]'
remove_empty_feeds = True
keep_only_tags = [
dict(name='div', attrs={'id':['content','vinetas',]})
@ -43,13 +44,21 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
remove_tags = [
dict(name='ol', attrs={'class':['navigation',]})
,dict(name='span', attrs={'class':['action']})
,dict(name='div', attrs={'class':['twitter comments-list hidden','related-news','col','photo-gallery','calendario','article-comment','postto estirar','otras_vinetas estirar','kment','user-actions']})
,dict(name='div', attrs={'class':['twitter comments-list hidden','related-news','col','photo-gallery','photo-gallery side-art-block','calendario','article-comment','postto estirar','otras_vinetas estirar','kment','user-actions']})
,dict(name='div', attrs={'id':['twitter-destacados','eco-tabs','inner','vineta_calendario','vinetistas clearfix','otras_vinetas estirar','MIN1','main','SUP1','INT']})
,dict(name='ul', attrs={'class':['article-user-actions','stripped-list']})
,dict(name='ul', attrs={'id':['site-links']})
,dict(name='li', attrs={'class':['puntuacion','enviar','compartir']})
]
extra_css = """
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h3{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
"""
preprocess_regexps = [(re.compile(r'<a href="http://estaticos.*?[0-999]px;" target="_blank">', re.DOTALL), lambda m: '')]
feeds = [
(u'Portada' , u'http://www.20minutos.es/rss/')
,(u'Nacional' , u'http://www.20minutos.es/rss/nacional/')
@ -65,6 +74,6 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
,(u'Empleo' , u'http://www.20minutos.es/rss/empleo/')
,(u'Cine' , u'http://www.20minutos.es/rss/cine/')
,(u'Musica' , u'http://www.20minutos.es/rss/musica/')
,(u'Vinetas' , u'http://www.20minutos.es/rss/vinetas/')
,(u'Vinetas' , u'http://www.20minutos.es/rss/vinetas/')
,(u'Comunidad20' , u'http://www.20minutos.es/rss/zona20/')
]

View File

@ -0,0 +1,71 @@
__license__ = 'GPL v3'
__author__ = 'Luis Hernandez'
__copyright__ = 'Luis Hernandez<tolyluis@gmail.com>'
__version__ = 'v1.2'
__date__ = '31 January 2011'
'''
http://www.cincodias.com/
'''
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1294946868(BasicNewsRecipe):
title = u'Cinco Dias'
publisher = u'Grupo Prisa'
__author__ = 'Luis Hernandez'
description = 'spanish web about money and bussiness, free edition'
cover_url = 'http://www.prisa.com/images/logos/logo_cinco_dias.gif'
oldest_article = 2
max_articles_per_feed = 100
remove_javascript = True
no_stylesheets = True
use_embedded_content = False
language = 'es'
remove_empty_feeds = True
encoding = 'ISO-8859-1'
timefmt = '[%a, %d %b, %Y]'
keep_only_tags = [
dict(name='div', attrs={'class':['cab_articulo cab_noticia','pos_3','txt_noticia','mod_despiece']})
,dict(name='p', attrs={'class':['cintillo']})
]
remove_tags_before = dict(name='div' , attrs={'class':['publi_h']})
remove_tags_after = dict(name='div' , attrs={'class':['tab_util util_estadisticas']})
remove_tags = [
dict(name='div', attrs={'class':['util-1','util-2','util-3','inner estirar','inner1','inner2','inner3','cont','tab_util util_estadisticas','tab_util util_enviar','mod_list_inf','mod_similares','mod_divisas','mod_sectores','mod_termometro','mod post','mod_img','mod_txt','nivel estirar','barra estirar','info_brujula btnBrujula','utilidad_brujula estirar']})
,dict(name='li', attrs={'class':['lnk-fcbook','lnk-retweet','lnk-meneame','desplegable','comentarios','list-options','estirar']})
,dict(name='ul', attrs={'class':['lista-izquierda','list-options','estirar']})
,dict(name='p', attrs={'class':['autor']})
]
extra_css = """
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h1{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
h3{font-family: sans-serif; font-size:100%; font-style: italic; text-align: justify; }
"""
feeds = [
(u'Ultima Hora' , u'http://www.cincodias.com/rss/feed.html?feedId=17029')
,(u'Empresas' , u'http://www.cincodias.com/rss/feed.html?feedId=19')
,(u'Mercados' , u'http://www.cincodias.com/rss/feed.html?feedId=20')
,(u'Economia' , u'http://www.cincodias.com/rss/feed.html?feedId=21')
,(u'Tecnorama' , u'http://www.cincodias.com/rss/feed.html?feedId=17230')
,(u'Tecnologia' , u'http://www.cincodias.com/rss/feed.html?feedId=17106')
,(u'Finanzas Personales' , u'http://www.cincodias.com/rss/feed.html?feedId=22')
,(u'Fiscalidad' , u'http://www.cincodias.com/rss/feed.html?feedId=17107')
,(u'Vivienda' , u'http://www.cincodias.com/rss/feed.html?feedId=17108')
,(u'Tendencias' , u'http://www.cincodias.com/rss/feed.html?feedId=17109')
,(u'Empleo' , u'http://www.cincodias.com/rss/feed.html?feedId=17110')
,(u'IBEX 35' , u'http://www.cincodias.com/rss/feed.html?feedId=17125')
,(u'Sectores' , u'http://www.cincodias.com/rss/feed.html?feedId=17126')
,(u'Opinion' , u'http://www.cincodias.com/rss/feed.html?feedId=17105')
]

View File

@ -1,73 +1,92 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
'''
latimes.com
www.latimes.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class LATimes(BasicNewsRecipe):
title = u'The Los Angeles Times'
__author__ = u'Darko Miletic and Sujata Raman'
description = u'News from Los Angeles'
oldest_article = 7
max_articles_per_feed = 100
language = 'en'
title = 'Los Angeles Times'
__author__ = 'Darko Miletic'
description = 'The Los Angeles Times is a leading source of news on Southern California, entertainment, movies, television, music, politics, business, health, technology, travel, sports, environment, economics, autos, jobs, real estate and other topics affecting California'
publisher = 'Tribune Company'
category = 'news, politics, USA, Los Angeles, world'
oldest_article = 2
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'utf8'
use_embedded_content = False
encoding = 'utf-8'
lang = 'en-US'
language = 'en'
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://www.latimes.com/images/logo.png'
cover_url = 'http://www.latimes.com/includes/sectionfronts/A1.pdf'
extra_css = """
body{font-family: Georgia,"Times New Roman",Times,serif }
img{margin-bottom: 0.4em; margin-top: 0.8em; display:block}
h2{font-size: 1.1em}
.deckhead{font-size: small; text-transform: uppercase}
.small{color: gray; font-size: small}
.date,.time,.copyright{font-size: x-small; color:gray; font-style:italic;}
"""
conversion_options = {
'comment' : description
, 'language' : lang
}
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
, 'linearize_tables' : 'Yes'
}
extra_css = '''
h1{font-family :Georgia,"Times New Roman",Times,serif; font-size:large; }
h2{font-family :Georgia,"Times New Roman",Times,serif; font-size:x-small;}
.story{font-family :Georgia,"Times New Roman",Times,serif; font-size: x-small;}
.entry-body{font-family :Georgia,"Times New Roman",Times,serif; font-size: x-small;}
.entry-more{font-family :Georgia,"Times New Roman",Times,serif; font-size: x-small;}
.credit{color:#666666; font-family :Georgia,"Times New Roman",Times,serif; font-size: xx-small;}
.small{color:#666666; font-family :Georgia,"Times New Roman",Times,serif; font-size: xx-small;}
.byline{font-family :Georgia,"Times New Roman",Times,serif; font-size: xx-small;}
.date{font-family :Georgia,"Times New Roman",Times,serif; font-size: xx-small;color:#930000; font-style:italic;}
.time{font-family :Georgia,"Times New Roman",Times,serif; font-size: xx-small;color:#930000; font-style:italic;}
.copyright{font-family :Georgia,"Times New Roman",Times,serif; font-size: xx-small;color:#930000; }
.subhead{font-family :Georgia,"Times New Roman",Times,serif; font-size:x-small;}
'''
# recursions = 1
# match_regexps = [r'http://www.latimes.com/.*page=[2-9]']
keep_only_tags = [dict(name='div', attrs={'class':["story" ,"entry"] })]
keep_only_tags = [
dict(name='div', attrs={'class':'story'})
,dict(attrs={'class':['entry-header','time','entry-content']})
]
remove_tags_after=dict(name='p', attrs={'class':'copyright'})
remove_tags = [
dict(name=['meta','link','iframe','object','embed'])
,dict(attrs={'class':['toolSet','articlerail','googleAd','entry-footer-left','entry-footer-right','entry-footer-social','google-ad-story-bottom','sphereTools']})
,dict(attrs={'id':['article-promo','googleads','moduleArticleToolsContainer','gallery-subcontent']})
]
remove_attributes=['lang','xmlns:fb','xmlns:og','border','xtags','i','article_body']
remove_tags = [ dict(name='div', attrs={'class':['articlerail',"sphereTools","tools","toppaginate","entry-footer-left","entry-footer-right"]}),
dict(name='div', attrs={'id':["moduleArticleToolsContainer",]}),
dict(name='p', attrs={'class':["entry-footer",]}),
dict(name='ul', attrs={'class':"article-nav clearfix"}),
dict(name=['iframe'])
]
feeds = [(u'News', u'http://feeds.latimes.com/latimes/news')
,(u'Local','http://feeds.latimes.com/latimes/news/local')
,(u'MostEmailed','http://feeds.latimes.com/MostEmailed')
,(u'Politics','http://feeds.latimes.com/latimes/news/local/politics/cal/')
,('OrangeCounty','http://feeds.latimes.com/latimes/news/local/orange/')
,('National','http://feeds.latimes.com/latimes/news/nationworld/nation')
,('Politics','http://feeds.latimes.com/latimes/news/politics/')
,('Business','http://feeds.latimes.com/latimes/business')
,('Sports','http://feeds.latimes.com/latimes/sports/')
,('Entertainment','http://feeds.latimes.com/latimes/entertainment/')
]
feeds = [
(u'Top News' , u'http://feeds.latimes.com/latimes/news' )
,(u'Local News' , u'http://feeds.latimes.com/latimes/news/local' )
,(u'National' , u'http://feeds.latimes.com/latimes/news/nationworld/nation' )
,(u'National Politics' , u'http://feeds.latimes.com/latimes/news/politics/' )
,(u'Business' , u'http://feeds.latimes.com/latimes/business' )
,(u'Education' , u'http://feeds.latimes.com/latimes/news/education' )
,(u'Environment' , u'http://feeds.latimes.com/latimes/news/science/environment' )
,(u'Religion' , u'http://feeds.latimes.com/latimes/features/religion' )
,(u'Science' , u'http://feeds.latimes.com/latimes/news/science' )
,(u'Technology' , u'http://feeds.latimes.com/latimes/technology' )
,(u'Africa' , u'http://feeds.latimes.com/latimes/africa' )
,(u'Asia' , u'http://feeds.latimes.com/latimes/asia' )
,(u'Europe' , u'http://feeds.latimes.com/latimes/europe' )
,(u'Latin America' , u'http://feeds.latimes.com/latimes/latinamerica' )
,(u'Middle East' , u'http://feeds.latimes.com/latimes/middleeast' )
,(u'Arts&Culture' , u'http://feeds.feedburner.com/latimes/entertainment/news/arts' )
,(u'Entertainment News' , u'http://feeds.feedburner.com/latimes/entertainment/news/' )
,(u'Movie News' , u'http://feeds.feedburner.com/latimes/entertainment/news/movies/' )
,(u'Movie Reviews' , u'http://feeds.feedburner.com/movies/reviews/' )
,(u'Music News' , u'http://feeds.feedburner.com/latimes/entertainment/news/music/' )
,(u'Pop Album Reviews' , u'http://feeds.feedburner.com/latimes/pop-album-reviews' )
,(u'Restaurant Reviews' , u'http://feeds.feedburner.com/latimes/restaurant/reviews' )
,(u'Theatar and Dance' , u'http://feeds.feedburner.com/latimes/theaterdance' )
,(u'Autos' , u'http://feeds.latimes.com/latimes/classified/automotive/highway1/')
,(u'Books' , u'http://feeds.latimes.com/features/books' )
,(u'Food' , u'http://feeds.latimes.com/latimes/features/food/' )
,(u'Health' , u'http://feeds.latimes.com/latimes/features/health/' )
,(u'Real Estate' , u'http://feeds.latimes.com/latimes/classified/realestate/' )
,(u'Commentary' , u'http://feeds2.feedburner.com/latimes/news/opinion/commentary/' )
,(u'Sports' , u'http://feeds.latimes.com/latimes/sports/' )
]
def get_article_url(self, article):
ans = article.get('feedburner_origlink').rpartition('?')[0]
ans = BasicNewsRecipe.get_article_url(self, article).rpartition('?')[0]
try:
self.log('Looking for full story link in', ans)
@ -83,4 +102,22 @@ class LATimes(BasicNewsRecipe):
pass
return ans
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
for item in soup.findAll('a'):
limg = item.find('img')
if item.string is not None:
str = item.string
item.replaceWith(str)
else:
if limg:
item.name ='div'
item.attrs =[]
else:
str = self.tag_to_string(item)
item.replaceWith(str)
return soup

View File

@ -139,6 +139,13 @@ class CHMReader(CHMFile):
if self.hhc_path not in files and files:
self.hhc_path = files[0]
if self.hhc_path == '.hhc' and self.hhc_path not in files:
from calibre import walk
for x in walk(output_dir):
if os.path.basename(x).lower() in ('index.htm', 'index.html'):
self.hhc_path = os.path.relpath(x, output_dir)
break
def _reformat(self, data, htmlpath):
try:
data = xml_to_unicode(data, strip_encoding_pats=True)[0]

View File

@ -175,6 +175,19 @@ class EPUBInput(InputFormatPlugin):
raise ValueError(
'EPUB files with DTBook markup are not supported')
for x in list(opf.iterspine()):
ref = x.get('idref', None)
if ref is None:
x.getparent().remove(x)
continue
for y in opf.itermanifest():
if y.get('id', None) == ref and y.get('media-type', None) in \
('application/vnd.adobe-page-template+xml',):
p = x.getparent()
if p is not None:
p.remove(x)
break
with open('content.opf', 'wb') as nopf:
nopf.write(opf.render())

View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from calibre.customize import Plugin
class Source(Plugin):
type = _('Metadata source')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
result_of_identify_is_complete = True
def get_author_tokens(self, authors):
'Take a list of authors and return a list of tokens useful for a '
'AND search query'
# Leave ' in there for Irish names
pat = re.compile(r'[-,:;+!@#$%^&*(){}.`~"\s\[\]/]')
for au in authors:
for tok in au.split():
yield pat.sub('', tok)
def split_jobs(self, jobs, num):
'Split a list of jobs into at most num groups, as evenly as possible'
groups = [[] for i in range(num)]
jobs = list(jobs)
while jobs:
for gr in groups:
try:
job = jobs.pop()
except IndexError:
break
gr.append(job)
return [g for g in groups if g]
def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}):
'''
Identify a book by its title/author/isbn/etc.
:param log: A log object, use it to output debugging information/errors
:param result_queue: A result Queue, results should be put into it.
Each result is a Metadata object
:param abort: If abort.is_set() returns True, abort further processing
and return as soon as possible
:param title: The title of the book, can be None
:param authors: A list of authors of the book, can be None
:param identifiers: A dictionary of other identifiers, most commonly
{'isbn':'1234...'}
:return: None if no errors occurred, otherwise a unicode representation
of the error suitable for showing to the user
'''
return None

View File

@ -0,0 +1,215 @@
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import time
from urllib import urlencode
from functools import partial
from threading import Thread
from lxml import etree
from calibre.ebooks.metadata.sources import Source
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.date import parse_date, utcnow
from calibre import browser, as_unicode
NAMESPACES = {
'openSearch':'http://a9.com/-/spec/opensearchrss/1.0/',
'atom' : 'http://www.w3.org/2005/Atom',
'dc': 'http://purl.org/dc/terms'
}
XPath = partial(etree.XPath, namespaces=NAMESPACES)
total_results = XPath('//openSearch:totalResults')
start_index = XPath('//openSearch:startIndex')
items_per_page = XPath('//openSearch:itemsPerPage')
entry = XPath('//atom:entry')
entry_id = XPath('descendant::atom:id')
creator = XPath('descendant::dc:creator')
identifier = XPath('descendant::dc:identifier')
title = XPath('descendant::dc:title')
date = XPath('descendant::dc:date')
publisher = XPath('descendant::dc:publisher')
subject = XPath('descendant::dc:subject')
description = XPath('descendant::dc:description')
language = XPath('descendant::dc:language')
def to_metadata(browser, log, entry_):
def get_text(extra, x):
try:
ans = x(extra)
if ans:
ans = ans[0].text
if ans and ans.strip():
return ans.strip()
except:
log.exception('Programming error:')
return None
id_url = entry_id(entry_)[0].text
title_ = ': '.join([x.text for x in title(entry_)]).strip()
authors = [x.text.strip() for x in creator(entry_) if x.text]
if not authors:
authors = [_('Unknown')]
if not id_url or not title:
# Silently discard this entry
return None
mi = Metadata(title_, authors)
try:
raw = browser.open(id_url).read()
feed = etree.fromstring(raw)
extra = entry(feed)[0]
except:
log.exception('Failed to get additional details for', mi.title)
return mi
mi.comments = get_text(extra, description)
#mi.language = get_text(extra, language)
mi.publisher = get_text(extra, publisher)
# Author sort
for x in creator(extra):
for key, val in x.attrib.items():
if key.endswith('file-as') and val and val.strip():
mi.author_sort = val
break
# ISBN
isbns = []
for x in identifier(extra):
t = str(x.text).strip()
if t[:5].upper() in ('ISBN:', 'LCCN:', 'OCLC:'):
if t[:5].upper() == 'ISBN:':
isbns.append(t[5:])
if isbns:
mi.isbn = sorted(isbns, key=len)[-1]
# Tags
try:
btags = [x.text for x in subject(extra) if x.text]
tags = []
for t in btags:
tags.extend([y.strip() for y in t.split('/')])
tags = list(sorted(list(set(tags))))
except:
log.exception('Failed to parse tags:')
tags = []
if tags:
mi.tags = [x.replace(',', ';') for x in tags]
# pubdate
pubdate = get_text(extra, date)
if pubdate:
try:
default = utcnow().replace(day=15)
mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)
except:
log.exception('Failed to parse pubdate')
return mi
class Worker(Thread):
def __init__(self, log, entries, abort, result_queue):
self.browser, self.log, self.entries = browser(), log, entries
self.abort, self.result_queue = abort, result_queue
Thread.__init__(self)
self.daemon = True
def run(self):
for i in self.entries:
try:
ans = to_metadata(self.browser, self.log, i)
if ans is not None:
self.result_queue.put(ans)
except:
self.log.exception(
'Failed to get metadata for identify entry:',
etree.tostring(i))
if self.abort.is_set():
break
class GoogleBooks(Source):
name = 'Google Books'
def create_query(self, log, title=None, authors=None, identifiers={},
start_index=1):
BASE_URL = 'http://books.google.com/books/feeds/volumes?'
isbn = identifiers.get('isbn', None)
q = ''
if isbn is not None:
q += 'isbn:'+isbn
elif title or authors:
def build_term(prefix, parts):
return ' '.join('in'+prefix + ':' + x for x in parts)
if title is not None:
q += build_term('title', title.split())
if authors:
q += ('+' if q else '')+build_term('author',
self.get_author_tokens(authors))
if isinstance(q, unicode):
q = q.encode('utf-8')
if not q:
return None
return BASE_URL+urlencode({
'q':q,
'max-results':20,
'start-index':start_index,
'min-viewability':'none',
})
def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}):
query = self.create_query(log, title=title, authors=authors,
identifiers=identifiers)
try:
raw = browser().open_novisit(query).read()
except Exception, e:
log.exception('Failed to make identify query: %r'%query)
return as_unicode(e)
try:
parser = etree.XMLParser(recover=True, no_network=True)
feed = etree.fromstring(raw, parser=parser)
entries = entry(feed)
except Exception, e:
log.exception('Failed to parse identify results')
return as_unicode(e)
groups = self.split_jobs(entries, 5) # At most 5 threads
if not groups:
return
workers = [Worker(log, entries, abort, result_queue) for entries in
groups]
if abort.is_set():
return
for worker in workers: worker.start()
has_alive_worker = True
while has_alive_worker and not abort.is_set():
has_alive_worker = False
for worker in workers:
if worker.is_alive():
has_alive_worker = True
time.sleep(0.1)
return None

View File

@ -429,10 +429,12 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
old_extensions.add(ext)
for ext in new_extensions:
self.db.add_format(self.row, ext, open(paths[ext], 'rb'), notify=False)
db_extensions = set([f.lower() for f in self.db.formats(self.row).split(',')])
dbfmts = self.db.formats(self.row)
db_extensions = set([f.lower() for f in (dbfmts.split(',') if dbfmts
else [])])
extensions = new_extensions.union(old_extensions)
for ext in db_extensions:
if ext not in extensions:
if ext not in extensions and ext in self.original_formats:
self.db.remove_format(self.row, ext, notify=False)
def show_format(self, item, *args):
@ -576,6 +578,7 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
self.orig_date = qt_to_dt(self.date.date())
exts = self.db.formats(row)
self.original_formats = []
if exts:
exts = exts.split(',')
for ext in exts:
@ -586,6 +589,7 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
if size is None:
continue
Format(self.formats, ext, size, timestamp=timestamp)
self.original_formats.append(ext.lower())
self.initialize_combos()

View File

@ -472,6 +472,7 @@ class FormatsManager(QWidget): # {{{
def initialize(self, db, id_):
self.changed = False
exts = db.formats(id_, index_is_id=True)
self.original_val = set([])
if exts:
exts = exts.split(',')
for ext in exts:
@ -482,6 +483,7 @@ class FormatsManager(QWidget): # {{{
if size is None:
continue
Format(self.formats, ext, size, timestamp=timestamp)
self.original_val.add(ext.lower())
def commit(self, db, id_):
if not self.changed:
@ -500,11 +502,12 @@ class FormatsManager(QWidget): # {{{
for ext in new_extensions:
db.add_format(id_, ext, open(paths[ext], 'rb'), notify=False,
index_is_id=True)
db_extensions = set([f.lower() for f in db.formats(id_,
index_is_id=True).split(',')])
dbfmts = db.formats(id_, index_is_id=True)
db_extensions = set([f.lower() for f in (dbfmts.split(',') if dbfmts
else [])])
extensions = new_extensions.union(old_extensions)
for ext in db_extensions:
if ext not in extensions:
if ext not in extensions and ext in self.original_val:
db.remove_format(id_, ext, notify=False, index_is_id=True)
self.changed = False

View File

@ -391,6 +391,8 @@ Take your pick:
* A tribute to the SONY Librie which was the first e-ink based e-book reader
* My wife chose it ;-)
|app| is pronounced as cal-i-ber *not* ca-libre. If you're wondering, |app| is the British/commonwealth spelling for caliber. Being Indian, that's the natural spelling for me.
Why does |app| show only some of my fonts on OS X?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|app| embeds fonts in ebook files it creates. E-book files support embedding only TrueType (.ttf) fonts. Most fonts on OS X systems are in .dfont format, thus they cannot be embedded. |app| shows only TrueType fonts found on your system. You can obtain many TrueType fonts on the web. Simply download the .ttf files and add them to the Library/Fonts directory in your home directory.