Merge from trunk

This commit is contained in:
Charles Haley 2010-09-10 16:32:11 +01:00
commit 683f204466
3 changed files with 47 additions and 49 deletions

View File

@ -13,50 +13,51 @@ from calibre.web.feeds.news import BasicNewsRecipe
class TazDigiabo(BasicNewsRecipe): class TazDigiabo(BasicNewsRecipe):
title = u'Taz Digiabo' title = u'Taz Digiabo'
description = u'Das EPUB DigiAbo der Taz' description = u'Das EPUB DigiAbo der Taz'
language = 'de' language = 'de'
lang = 'de-DE' lang = 'de-DE'
__author__ = 'Lars Jacob' __author__ = 'Lars Jacob'
needs_subscription = True needs_subscription = True
conversion_options = { conversion_options = {
'no_default_epub_cover' : True 'no_default_epub_cover' : True
} }
def build_index(self): def build_index(self):
if self.username is not None and self.password is not None: if self.username is not None and self.password is not None:
domain = "http://www.taz.de" domain = "http://www.taz.de"
url = domain + "/epub/" url = domain + "/epub/"
auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(realm='TAZ-ABO', auth_handler.add_password(realm='TAZ-ABO',
uri=url, uri=url,
user=self.username, user=self.username,
passwd=self.password) passwd=self.password)
opener = urllib2.build_opener(auth_handler) opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener) urllib2.install_opener(opener)
try: try:
f = urllib2.urlopen(url) f = urllib2.urlopen(url)
except urllib2.HTTPError: except urllib2.HTTPError:
self.report_progress(0,_('Can\'t login to download issue')) self.report_progress(0,_('Can\'t login to download issue'))
return raise ValueError('Failed to login, check your username and'
' password')
tmp = tempfile.TemporaryFile() tmp = tempfile.TemporaryFile()
self.report_progress(0,_('downloading epub')) self.report_progress(0,_('downloading epub'))
tmp.write(f.read()) tmp.write(f.read())
zfile = zipfile.ZipFile(tmp, 'r') zfile = zipfile.ZipFile(tmp, 'r')
self.report_progress(0,_('extracting epub')) self.report_progress(0,_('extracting epub'))
zfile.extractall(self.output_dir) zfile.extractall(self.output_dir)
tmp.close() tmp.close()
index = os.path.join(self.output_dir, 'content.opf') index = os.path.join(self.output_dir, 'content.opf')
self.report_progress(1,_('epub downloaded and extracted')) self.report_progress(1,_('epub downloaded and extracted'))
return index return index

View File

@ -1105,7 +1105,8 @@ class OPFCreator(MetaInformation):
spine.set('toc', 'ncx') spine.set('toc', 'ncx')
if self.spine is not None: if self.spine is not None:
for ref in self.spine: for ref in self.spine:
spine.append(E.itemref(idref=ref.id)) if ref.id is not None:
spine.append(E.itemref(idref=ref.id))
guide = E.guide() guide = E.guide()
if self.guide is not None: if self.guide is not None:
for ref in self.guide: for ref in self.guide:

View File

@ -1306,15 +1306,14 @@ class DeviceMixin(object): # {{{
def book_on_device(self, id, format=None, reset=False): def book_on_device(self, id, format=None, reset=False):
''' '''
Return an indication of whether the given book represented by its db id Return an indication of whether the given book represented by its db id
is on the currently connected device. It returns a 6 element list. The is on the currently connected device. It returns a 5 element list. The
first three elements represent memory locations main, carda, and cardb, first three elements represent memory locations main, carda, and cardb,
and are true if the book is identifiably in that memory. The fourth and are true if the book is identifiably in that memory. The fourth
is a count of how many instances of the book were found across all is a count of how many instances of the book were found across all
the memory locations. The fifth is the type of match. The type can be the memory locations. The fifth is a set of paths to the
one of: None, 'uuid', 'db_id', 'metadata'. The sixth is a set of paths to the
matching books on the device. matching books on the device.
''' '''
loc = [None, None, None, 0, None, set([])] loc = [None, None, None, 0, set([])]
if reset: if reset:
self.book_db_id_cache = None self.book_db_id_cache = None
@ -1322,10 +1321,8 @@ class DeviceMixin(object): # {{{
self.book_db_uuid_path_map = None self.book_db_uuid_path_map = None
return return
string_pat = re.compile('(?u)\W|[_]') if not hasattr(self, 'db_book_uuid_cache'):
def clean_string(x): return loc
x = x.lower() if x else ''
return string_pat.sub('', x)
if self.book_db_id_cache is None: if self.book_db_id_cache is None:
self.book_db_id_cache = [] self.book_db_id_cache = []
@ -1343,7 +1340,8 @@ class DeviceMixin(object): # {{{
self.book_db_id_cache[i].add(db_id) self.book_db_id_cache[i].add(db_id)
if db_id not in self.book_db_uuid_path_map: if db_id not in self.book_db_uuid_path_map:
self.book_db_uuid_path_map[db_id] = set() self.book_db_uuid_path_map[db_id] = set()
self.book_db_uuid_path_map[db_id].add(book.lpath) if getattr(book, 'lpath', False):
self.book_db_uuid_path_map[db_id].add(book.lpath)
c = self.book_db_id_counts.get(db_id, 0) c = self.book_db_id_counts.get(db_id, 0)
self.book_db_id_counts[db_id] = c + 1 self.book_db_id_counts[db_id] = c + 1
@ -1351,9 +1349,7 @@ class DeviceMixin(object): # {{{
if id in self.book_db_id_cache[i]: if id in self.book_db_id_cache[i]:
loc[i] = True loc[i] = True
loc[3] = self.book_db_id_counts.get(id, 0) loc[3] = self.book_db_id_counts.get(id, 0)
loc[4] = 'uuid' loc[4] |= self.book_db_uuid_path_map[id]
loc[5] |= self.book_db_uuid_path_map[id]
continue
return loc return loc
def set_books_in_library(self, booklists, reset=False): def set_books_in_library(self, booklists, reset=False):
@ -1434,7 +1430,7 @@ class DeviceMixin(object): # {{{
continue continue
# Sonys know their db_id independent of the application_id # Sonys know their db_id independent of the application_id
# in the metadata cache. Check that as well. # in the metadata cache. Check that as well.
if book.db_id in d['db_ids']: if getattr(book, 'db_id', None) in d['db_ids']:
book.in_library = True book.in_library = True
book.application_id = \ book.application_id = \
d['db_ids'][book.db_id].application_id d['db_ids'][book.db_id].application_id