mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge from trunk
This commit is contained in:
commit
67a61b7760
@ -30,7 +30,7 @@ Lets pick a couple of feeds that look interesting:
|
||||
#. Business Travel: http://feeds.portfolio.com/portfolio/businesstravel
|
||||
#. Tech Observer: http://feeds.portfolio.com/portfolio/thetechobserver
|
||||
|
||||
I got the URLs by clicking the little orange RSS icon next to each feed name. To make |app| download the feeds and convert them into an ebook, you should click the :guilabel:`Fetch news` button and then the :guilabel:`Add a custom news source` menu item. A dialog similar to that shown below should open up.
|
||||
I got the URLs by clicking the little orange RSS icon next to each feed name. To make |app| download the feeds and convert them into an ebook, you should right click the :guilabel:`Fetch news` button and then the :guilabel:`Add a custom news source` menu item. A dialog similar to that shown below should open up.
|
||||
|
||||
.. image:: images/custom_news.png
|
||||
:align: center
|
||||
|
@ -9,6 +9,7 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
|
||||
from calibre import replace_entities
|
||||
from calibre.ebooks.metadata.toc import TOC
|
||||
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
|
||||
from calibre.ebooks.mobi.reader.index import read_index
|
||||
@ -88,7 +89,8 @@ def build_toc(index_entries):
|
||||
for lvl in sorted(levels):
|
||||
for item in level_map[lvl]:
|
||||
parent = num_map[item['parent']]
|
||||
child = parent.add_item(item['href'], item['idtag'], item['text'])
|
||||
child = parent.add_item(item['href'], item['idtag'],
|
||||
replace_entities(item['text'], encoding=None))
|
||||
num_map[item['num']] = child
|
||||
|
||||
# Set play orders in depth first order
|
||||
|
@ -76,15 +76,13 @@ def tostring(raw, **kwargs):
|
||||
|
||||
class Chunk(object):
|
||||
|
||||
def __init__(self, raw, parent_tag):
|
||||
def __init__(self, raw, selector):
|
||||
self.raw = raw
|
||||
self.starts_tags = []
|
||||
self.ends_tags = []
|
||||
self.insert_pos = None
|
||||
self.parent_tag = parent_tag
|
||||
self.parent_is_body = False
|
||||
self.is_last_chunk = False
|
||||
self.is_first_chunk = False
|
||||
self.selector = "%s-//*[@aid='%s']"%selector
|
||||
|
||||
def __len__(self):
|
||||
return len(self.raw)
|
||||
@ -97,11 +95,6 @@ class Chunk(object):
|
||||
return 'Chunk(len=%r insert_pos=%r starts_tags=%r ends_tags=%r)'%(
|
||||
len(self.raw), self.insert_pos, self.starts_tags, self.ends_tags)
|
||||
|
||||
@property
|
||||
def selector(self):
|
||||
typ = 'S' if (self.is_last_chunk and not self.parent_is_body) else 'P'
|
||||
return "%s-//*[@aid='%s']"%(typ, self.parent_tag)
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
class Skeleton(object):
|
||||
@ -251,13 +244,13 @@ class Chunker(object):
|
||||
|
||||
def step_into_tag(self, tag, chunks):
|
||||
aid = tag.get('aid')
|
||||
is_body = tag.tag == 'body'
|
||||
self.chunk_selector = ('P', aid)
|
||||
|
||||
first_chunk_idx = len(chunks)
|
||||
|
||||
# First handle any text
|
||||
if tag.text and tag.text.strip(): # Leave pure whitespace in the skel
|
||||
chunks.extend(self.chunk_up_text(tag.text, aid))
|
||||
chunks.extend(self.chunk_up_text(tag.text))
|
||||
tag.text = None
|
||||
|
||||
# Now loop over children
|
||||
@ -266,21 +259,21 @@ class Chunker(object):
|
||||
if child.tag == etree.Entity:
|
||||
chunks.append(raw)
|
||||
if child.tail:
|
||||
chunks.extend(self.chunk_up_text(child.tail, aid))
|
||||
chunks.extend(self.chunk_up_text(child.tail))
|
||||
continue
|
||||
raw = close_self_closing_tags(raw)
|
||||
if len(raw) > CHUNK_SIZE and child.get('aid', None):
|
||||
self.step_into_tag(child, chunks)
|
||||
if child.tail and child.tail.strip(): # Leave pure whitespace
|
||||
chunks.extend(self.chunk_up_text(child.tail, aid))
|
||||
chunks.extend(self.chunk_up_text(child.tail))
|
||||
child.tail = None
|
||||
else:
|
||||
if len(raw) > CHUNK_SIZE:
|
||||
self.log.warn('Tag %s has no aid and a too large chunk'
|
||||
' size. Adding anyway.'%child.tag)
|
||||
chunks.append(Chunk(raw, aid))
|
||||
chunks.append(Chunk(raw, self.chunk_selector))
|
||||
if child.tail:
|
||||
chunks.extend(self.chunk_up_text(child.tail, aid))
|
||||
chunks.extend(self.chunk_up_text(child.tail))
|
||||
tag.remove(child)
|
||||
|
||||
if len(chunks) <= first_chunk_idx and chunks:
|
||||
@ -293,12 +286,9 @@ class Chunker(object):
|
||||
my_chunks = chunks[first_chunk_idx:]
|
||||
if my_chunks:
|
||||
my_chunks[0].is_first_chunk = True
|
||||
my_chunks[-1].is_last_chunk = True
|
||||
if is_body:
|
||||
for chunk in my_chunks:
|
||||
chunk.parent_is_body = True
|
||||
self.chunk_selector = ('S', aid)
|
||||
|
||||
def chunk_up_text(self, text, parent_tag):
|
||||
def chunk_up_text(self, text):
|
||||
text = text.encode('utf-8')
|
||||
ans = []
|
||||
|
||||
@ -314,7 +304,7 @@ class Chunker(object):
|
||||
while rest:
|
||||
start, rest = split_multibyte_text(rest)
|
||||
ans.append(b'<span class="AmznBigTextBlock">' + start + '</span>')
|
||||
return [Chunk(x, parent_tag) for x in ans]
|
||||
return [Chunk(x, self.chunk_selector) for x in ans]
|
||||
|
||||
def merge_small_chunks(self, chunks):
|
||||
ans = chunks[:1]
|
||||
|
@ -135,21 +135,22 @@ def dnd_has_extension(md, extensions):
|
||||
prints('Debugging DND event')
|
||||
for f in md.formats():
|
||||
f = unicode(f)
|
||||
prints(f, repr(data_as_string(f, md))[:300], '\n')
|
||||
raw = data_as_string(f, md)
|
||||
prints(f, len(raw), repr(raw[:300]), '\n')
|
||||
print ()
|
||||
if has_firefox_ext(md, extensions):
|
||||
return True
|
||||
if md.hasUrls():
|
||||
urls = [unicode(u.toString()) for u in
|
||||
md.urls()]
|
||||
purls = [urlparse(u) for u in urls]
|
||||
paths = [u2p(x) for x in purls]
|
||||
paths = [urlparse(u).path for u in urls]
|
||||
exts = frozenset([posixpath.splitext(u)[1][1:].lower() for u in
|
||||
paths if u])
|
||||
if DEBUG:
|
||||
prints('URLS:', urls)
|
||||
prints('Paths:', paths)
|
||||
prints('Extensions:', exts)
|
||||
|
||||
exts = frozenset([posixpath.splitext(u)[1][1:].lower() for u in
|
||||
paths])
|
||||
return bool(exts.intersection(frozenset(extensions)))
|
||||
return False
|
||||
|
||||
|
@ -507,8 +507,8 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.clock_label.setVisible(True)
|
||||
self.clock_label.setText('99:99 AA')
|
||||
self.clock_timer.start(1000)
|
||||
self.clock_label.setStyleSheet(self.clock_label_style%
|
||||
tuple(self.view.document.colors()))
|
||||
self.clock_label.setStyleSheet(self.clock_label_style%(
|
||||
'rgba(0, 0, 0, 0)', self.view.document.colors()[1]))
|
||||
self.clock_label.resize(self.clock_label.sizeHint())
|
||||
sw = QApplication.desktop().screenGeometry(self.view)
|
||||
self.clock_label.move(sw.width() - self.vertical_scrollbar.width() - 15
|
||||
|
@ -2637,8 +2637,10 @@ Author '{0}':
|
||||
navLabelTag.insert(0, textTag)
|
||||
navPointByLetterTag.insert(0,navLabelTag)
|
||||
contentTag = Tag(soup, 'content')
|
||||
if authors_by_letter[1] == self.SYMBOLS:
|
||||
contentTag['src'] = "%s#%s_authors" % (HTML_file, authors_by_letter[1])
|
||||
else:
|
||||
contentTag['src'] = "%s#%s_authors" % (HTML_file, self.generateUnicodeName(authors_by_letter[1]))
|
||||
|
||||
navPointByLetterTag.insert(1,contentTag)
|
||||
|
||||
if self.generateForKindle:
|
||||
|
Loading…
x
Reference in New Issue
Block a user