mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Sync to trunk.
This commit is contained in:
commit
a6d239b03e
18
recipes/calibre_blog.recipe
Normal file
18
recipes/calibre_blog.recipe
Normal file
@ -0,0 +1,18 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class CalibreBlog(BasicNewsRecipe):
|
||||
title = u'Calibre Blog'
|
||||
language = 'en'
|
||||
__author__ = 'Krittika Goyal'
|
||||
oldest_article = 1000 #days
|
||||
max_articles_per_feed = 5
|
||||
use_embedded_content = False
|
||||
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
|
||||
|
||||
feeds = [
|
||||
('Article',
|
||||
'http://blog.calibre-ebook.com/feeds/posts/default'),
|
||||
]
|
@ -56,6 +56,7 @@ class ElUniversal(BasicNewsRecipe):
|
||||
]
|
||||
|
||||
def print_version(self, url):
|
||||
rp,sep,rest = url.rpartition('/')
|
||||
return rp + sep + 'imp_' + rest
|
||||
return url + '-imp'
|
||||
|
||||
def get_article_url(self, article):
|
||||
return article.get('guid', None)
|
||||
|
BIN
recipes/icons/metro_news_nl.png (PNG Image, 16x16 pixels).png
Normal file
BIN
recipes/icons/metro_news_nl.png (PNG Image, 16x16 pixels).png
Normal file
Binary file not shown.
After Width: | Height: | Size: 712 B |
@ -1,9 +1,21 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
from calibre.utils.magick import Image
|
||||
|
||||
|
||||
''' Version 1.2, updated cover image to match the changed website.
|
||||
added info date on title
|
||||
version 1.4 Updated tags, delay and added autoclean 22-09-2011
|
||||
version 1.5 Changes due to changes in site
|
||||
version 1.6 Added css, removed auto cleanup, added buitenland section, added use_embedded_content, added remove_attributes
|
||||
Added som processing on pictures
|
||||
Removed links in html
|
||||
Removed extre white characters
|
||||
changed handling of self closing span
|
||||
'''
|
||||
|
||||
class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
title = u'Metro Nieuws NL'
|
||||
# Version 1.2, updated cover image to match the changed website.
|
||||
# added info date on title
|
||||
oldest_article = 2
|
||||
max_articles_per_feed = 100
|
||||
__author__ = u'DrMerry'
|
||||
@ -11,8 +23,8 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
language = u'nl'
|
||||
simultaneous_downloads = 5
|
||||
#delay = 1
|
||||
auto_cleanup = True
|
||||
auto_cleanup_keep = '//div[@class="article-image-caption-2column"]|//div[@id="date"]'
|
||||
#auto_cleanup = True
|
||||
#auto_cleanup_keep = '//div[@class="article-image-caption-2column"]/*|//div[@id="date"]/*|//div[@class="article-image-caption-3column"]/*'
|
||||
timefmt = ' [%A, %d %b %Y]'
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
@ -20,22 +32,73 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
cover_url = 'http://www.oldreadmetro.com/img/en/metroholland/last/1/small.jpg'
|
||||
publication_type = 'newspaper'
|
||||
remove_tags_before = dict(name='div', attrs={'id':'date'})
|
||||
remove_tags_after = dict(name='div', attrs={'id':'column-1-3'})
|
||||
remove_tags_after = dict(name='div', attrs={'class':'article-body'})
|
||||
encoding = 'utf-8'
|
||||
extra_css = 'body{font-size:12px} #date, .article-image-caption {font-size: 0.583em} h2 {font-size: 0.917em} p.small, span, li, li span span, p, b, i, u, p.small.article-paragraph, p.small.article-paragraph p, p.small.article-paragraph span, p span, span {font-size: 0.833em} h1 {font-size: 1em}'
|
||||
remove_attributes = ['style', 'font', 'width', 'height']
|
||||
use_embedded_content = False
|
||||
extra_css = 'body {padding:5px 0px; background:#fff;font-size: 13px;}\
|
||||
#date {clear: both;margin-left: 19px;font-size: 11px;font-weight: 300;color: #616262;height: 15px;}\
|
||||
.article-box-fact.module-title {clear:both;border-top:1px solid black;border-bottom:4px solid black;padding: 8px 0;color: #24763b;font-family: arial, sans-serif;font-size: 14px;font-weight: bold;}\
|
||||
h1.title {color: #000000;font-size: 44px;padding-bottom: 10px;line-height: 1.15;font-weight: 300;} h2.subtitle {font-size: 13px;font-weight: 700;padding-bottom: 10px;}\
|
||||
.article-body p{padding-bottom:10px;}div.column-1-3{float: left;display: inline;width: 567px;margin-left: 19px;border-right: 1px solid #CACACA;padding-right: 9px;}\
|
||||
div.column-1-2 {float: left;display: inline;width: 373px;padding-right: 7px;border-right: 1px solid #CACACA;}\
|
||||
p.article-image-caption {font-size: 12px;font-weight: 300;line-height: 1.4;color: #616262;margin-top: 5px;} \
|
||||
p.article-image-caption .credits {font-style: italic;font-size: 10px;}\
|
||||
div.article-image-caption {width: 246px;margin-bottom: 5px;margin-left: 10px;}\
|
||||
div.article-image-caption-2column {margin-bottom: 10px;width: 373px;} div.article-image-caption-3column {}\
|
||||
img {border:0px;} .img-mask {position:absolute;top:0px;left:0px;}'
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'class':[ 'article-image-caption-2column', 'article-image-caption-3column', 'article-body', 'article-box-fact']}),
|
||||
dict(name='div', attrs={'id':['date']}),
|
||||
dict(name='h1', attrs={'class':['title']}),
|
||||
dict(name='h2', attrs={'class':['subtitle']})]
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'class':[ 'metroCommentFormWrap',
|
||||
'commentForm', 'metroCommentInnerWrap', 'article-slideshow-counter-container', 'article-slideshow-control', 'ad', 'header-links',
|
||||
'art-rgt','pluck-app pluck-comm', 'share-and-byline', 'article-tools-below-title', 'col-179 ', 'related-links', 'clear padding-top-15', 'share-tools', 'article-page-auto-pushes', 'footer-edit']}),
|
||||
dict(name='div', attrs={'id':['article-2', 'article-4', 'article-1', 'navigation', 'footer', 'header', 'comments', 'sidebar']}),
|
||||
dict(name='div', attrs={'id':['article-2', 'article-4', 'article-1', 'navigation', 'footer', 'header', 'comments', 'sidebar', 'share-and-byline']}),
|
||||
dict(name='iframe')]
|
||||
|
||||
preprocess_regexps = [(re.compile(r'(<p>( |\s)*</p>|<a[^>]*>Tweet</a>|<a[^>]*>|</a>|<!--.*?-->)', re.DOTALL|re.IGNORECASE),lambda match: ''),
|
||||
(re.compile(r'( |\s\s)+\s*', re.DOTALL|re.IGNORECASE),lambda match: ' '),
|
||||
(re.compile(r'([\s>])([^\s>]+)(<span[^>]+) />', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: match.group(1) + match.group(3) + '>' + match.group(2) + '</span>'),
|
||||
]
|
||||
|
||||
def postprocess_html(self, soup, first):
|
||||
for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')):
|
||||
iurl = tag['src']
|
||||
img = Image()
|
||||
img.open(iurl)
|
||||
#width, height = img.size
|
||||
#print '***img is: ', iurl, '\n****width is: ', width, 'height is: ', height
|
||||
img.trim(0)
|
||||
img.save(iurl)
|
||||
'''
|
||||
#width, height = img.size
|
||||
#print '***TRIMMED img width is: ', width, 'height is: ', height
|
||||
left=0
|
||||
top=0
|
||||
border_color='#ffffff'
|
||||
width, height = img.size
|
||||
#print '***retrieved img width is: ', width, 'height is: ', height
|
||||
height_correction = 1.17
|
||||
canvas = create_canvas(width, height*height_correction,border_color)
|
||||
canvas.compose(img, left, top)
|
||||
#img = canvas
|
||||
canvas.save(iurl)
|
||||
#width, height = canvas.size
|
||||
#print '***NEW img width is: ', width, 'height is: ', height
|
||||
'''
|
||||
return soup
|
||||
|
||||
feeds = [
|
||||
(u'Binnenland', u'http://www.metronieuws.nl/rss.xml?c=1277377288-3'),
|
||||
(u'Economie', u'http://www.metronieuws.nl/rss.xml?c=1278070988-0'),
|
||||
(u'Den Haag', u'http://www.metronieuws.nl/rss.xml?c=1289013337-3'),
|
||||
(u'Rotterdam', u'http://www.metronieuws.nl/rss.xml?c=1289013337-2'),
|
||||
(u'Amsterdam', u'http://www.metronieuws.nl/rss.xml?c=1289013337-1'),
|
||||
(u'Buitenland', u'http://www.metronieuws.nl/rss.xml?c=1277377288-4'),
|
||||
(u'Columns', u'http://www.metronieuws.nl/rss.xml?c=1277377288-17'),
|
||||
(u'Entertainment', u'http://www.metronieuws.nl/rss.xml?c=1277377288-2'),
|
||||
(u'Dot', u'http://www.metronieuws.nl/rss.xml?c=1283166782-12'),
|
||||
|
@ -8,7 +8,7 @@ class AdvancedUserRecipe1294342201(BasicNewsRecipe):
|
||||
title = u'New London Day'
|
||||
__author__ = 'Being'
|
||||
description = 'State, local and business news from New London, CT'
|
||||
language = 'en_GB'
|
||||
language = 'en'
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 200
|
||||
|
||||
|
@ -10,9 +10,8 @@ class AdvancedUserRecipe1286819935(BasicNewsRecipe):
|
||||
remove_attributes = ['style']
|
||||
language = 'ru'
|
||||
|
||||
feeds = [(u'Articles', u'http://www.novayagazeta.ru/rss_number.xml')]
|
||||
feeds = [(u'Articles', u'http://www.novayagazeta.ru/rss/all.xml')]
|
||||
|
||||
|
||||
def print_version(self, url):
|
||||
return url + '?print=true'
|
||||
|
||||
return '%s%s' % (url, '?print=1')
|
||||
|
22
recipes/silicon_republic.recipe
Normal file
22
recipes/silicon_republic.recipe
Normal file
@ -0,0 +1,22 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011 Neil Grogan'
|
||||
#
|
||||
# Silicon Republic Recipe
|
||||
#
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class SiliconRepublic(BasicNewsRecipe):
|
||||
title = u'Silicon Republic'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
__author__ = u'Neil Grogan'
|
||||
language = 'en_IE'
|
||||
|
||||
remove_tags = [dict(attrs={'class':['thumb','txt','compactbox','icons','catlist','catlistinner','taglist','taglistinner','social','also-in','also-in-inner','also-in-footer','zonek-dfp','paneladvert','rcadvert','panel','h2b']}),
|
||||
dict(id=['header','logo','header-right','sitesearch','rsslinks','topnav','topvideos','topvideos-list','topnews','topnews-list','slideshow','slides','compactheader','compactnews','compactfeatures','article-type','contactlinks-header','banner-zone-k-dfp','footer-related','directory-services','also-in-section','featuredrelated1','featuredrelated2','featuredrelated3','featuredrelated4','advert2-dfp']),
|
||||
dict(name=['script', 'style'])]
|
||||
|
||||
|
||||
feeds = [(u'News', u'http://www.siliconrepublic.com/feeds/')]
|
||||
|
@ -12,21 +12,18 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
||||
class DailyTelegraph(BasicNewsRecipe):
|
||||
title = u'The Australian'
|
||||
__author__ = u'Matthew Briggs and Sujata Raman'
|
||||
description = u'National broadsheet newspaper from down under - colloquially known as The Oz'
|
||||
description = (u'National broadsheet newspaper from down under - colloquially known as The Oz'
|
||||
'. You will need to have a subscription to '
|
||||
'http://www.theaustralian.com.au to get full articles.')
|
||||
language = 'en_AU'
|
||||
|
||||
oldest_article = 2
|
||||
needs_subscription = 'optional'
|
||||
max_articles_per_feed = 30
|
||||
remove_javascript = True
|
||||
no_stylesheets = True
|
||||
encoding = 'utf8'
|
||||
|
||||
html2lrf_options = [
|
||||
'--comment' , description
|
||||
, '--category' , 'news, Australia'
|
||||
, '--publisher' , title
|
||||
]
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'id': 'story'})]
|
||||
|
||||
#remove_tags = [dict(name=['object','link'])]
|
||||
@ -67,6 +64,19 @@ class DailyTelegraph(BasicNewsRecipe):
|
||||
(u'Commercial Property', u'http://feeds.news.com.au/public/rss/2.0/aus_business_commercial_property_708.xml'),
|
||||
(u'Mining', u'http://feeds.news.com.au/public/rss/2.0/aus_business_mining_704.xml')]
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser(self)
|
||||
if self.username and self.password:
|
||||
br.open('http://www.theaustralian.com.au')
|
||||
br.select_form(nr=0)
|
||||
br['username'] = self.username
|
||||
br['password'] = self.password
|
||||
raw = br.submit().read()
|
||||
if '>log out' not in raw.lower():
|
||||
raise ValueError('Failed to log in to www.theaustralian.com.au'
|
||||
' are your username and password correct?')
|
||||
return br
|
||||
|
||||
def get_article_url(self, article):
|
||||
return article.id
|
||||
|
||||
@ -76,14 +86,4 @@ class DailyTelegraph(BasicNewsRecipe):
|
||||
|
||||
#return br.geturl()
|
||||
|
||||
def get_cover_url(self):
|
||||
|
||||
href = 'http://www.theaustralian.news.com.au/'
|
||||
|
||||
soup = self.index_to_soup(href)
|
||||
img = soup.find('img',alt ="AUS HP promo digital2")
|
||||
print img
|
||||
if img :
|
||||
cover_url = img['src']
|
||||
|
||||
return cover_url
|
||||
|
@ -7,15 +7,15 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: calibre\n"
|
||||
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"POT-Creation-Date: 2011-09-02 16:21+0000\n"
|
||||
"PO-Revision-Date: 2011-09-21 13:48+0000\n"
|
||||
"Last-Translator: Jellby <Unknown>\n"
|
||||
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
|
||||
"PO-Revision-Date: 2011-10-22 22:04+0000\n"
|
||||
"Last-Translator: Fitoschido <fitoschido@gmail.com>\n"
|
||||
"Language-Team: Spanish <es@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-09-22 04:47+0000\n"
|
||||
"X-Generator: Launchpad (build 13996)\n"
|
||||
"X-Launchpad-Export-Date: 2011-10-23 05:13+0000\n"
|
||||
"X-Generator: Launchpad (build 14170)\n"
|
||||
|
||||
#. name for aaa
|
||||
msgid "Ghotuo"
|
||||
@ -5911,7 +5911,7 @@ msgstr "Gwahatike"
|
||||
|
||||
#. name for dai
|
||||
msgid "Day"
|
||||
msgstr "Day"
|
||||
msgstr "Día"
|
||||
|
||||
#. name for daj
|
||||
msgid "Daju; Dar Fur"
|
||||
@ -18231,7 +18231,7 @@ msgstr ""
|
||||
|
||||
#. name for nhi
|
||||
msgid "Nahuatl; Zacatlán-Ahuacatlán-Tepetzintla"
|
||||
msgstr "Náhuatl de Zacatlán; Ahuacatlán y Tepetzintla"
|
||||
msgstr "Náhuatl de Zacatlán-Ahuacatlán-Tepetzintla"
|
||||
|
||||
#. name for nhk
|
||||
msgid "Nahuatl; Isthmus-Cosoleacaque"
|
||||
|
@ -10,14 +10,14 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
|
||||
"PO-Revision-Date: 2011-09-27 18:36+0000\n"
|
||||
"Last-Translator: Kovid Goyal <Unknown>\n"
|
||||
"PO-Revision-Date: 2011-10-25 19:06+0000\n"
|
||||
"Last-Translator: zeugma <Unknown>\n"
|
||||
"Language-Team: Turkish <gnome-turk@gnome.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-09-28 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 14049)\n"
|
||||
"X-Launchpad-Export-Date: 2011-10-26 05:13+0000\n"
|
||||
"X-Generator: Launchpad (build 14189)\n"
|
||||
"Language: tr\n"
|
||||
|
||||
#. name for aaa
|
||||
@ -54,7 +54,7 @@ msgstr ""
|
||||
|
||||
#. name for aai
|
||||
msgid "Arifama-Miniafia"
|
||||
msgstr ""
|
||||
msgstr "Arifama-Miniafia"
|
||||
|
||||
#. name for aak
|
||||
msgid "Ankave"
|
||||
@ -122,7 +122,7 @@ msgstr "Bankon"
|
||||
|
||||
#. name for abc
|
||||
msgid "Ayta; Ambala"
|
||||
msgstr ""
|
||||
msgstr "Ayta; Ambala"
|
||||
|
||||
#. name for abd
|
||||
msgid "Manide"
|
||||
@ -130,11 +130,11 @@ msgstr "Manide"
|
||||
|
||||
#. name for abe
|
||||
msgid "Abnaki; Western"
|
||||
msgstr ""
|
||||
msgstr "Abnaki; Western"
|
||||
|
||||
#. name for abf
|
||||
msgid "Abai Sungai"
|
||||
msgstr ""
|
||||
msgstr "Abai Sungai"
|
||||
|
||||
#. name for abg
|
||||
msgid "Abaga"
|
||||
@ -146,7 +146,7 @@ msgstr "Arapça; Tacikçe"
|
||||
|
||||
#. name for abi
|
||||
msgid "Abidji"
|
||||
msgstr ""
|
||||
msgstr "Abidji"
|
||||
|
||||
#. name for abj
|
||||
msgid "Aka-Bea"
|
||||
@ -158,7 +158,7 @@ msgstr "Abhazca"
|
||||
|
||||
#. name for abl
|
||||
msgid "Lampung Nyo"
|
||||
msgstr ""
|
||||
msgstr "Lampung Nyo"
|
||||
|
||||
#. name for abm
|
||||
msgid "Abanyom"
|
||||
@ -282,7 +282,7 @@ msgstr "Achterhoeks"
|
||||
|
||||
#. name for acu
|
||||
msgid "Achuar-Shiwiar"
|
||||
msgstr ""
|
||||
msgstr "Achuar-Shiwiar"
|
||||
|
||||
#. name for acv
|
||||
msgid "Achumawi"
|
||||
|
112
src/calibre/devices/kobo/bookmark.py
Normal file
112
src/calibre/devices/kobo/bookmark.py
Normal file
@ -0,0 +1,112 @@
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
from contextlib import closing
|
||||
|
||||
import sqlite3 as sqlite
|
||||
|
||||
class Bookmark(): # {{{
|
||||
'''
|
||||
A simple class fetching bookmark data
|
||||
kobo-specific
|
||||
'''
|
||||
def __init__(self, db_path, contentid, path, id, book_format, bookmark_extension):
|
||||
self.book_format = book_format
|
||||
self.bookmark_extension = bookmark_extension
|
||||
self.book_length = 0 # Not Used
|
||||
self.id = id
|
||||
self.last_read = 0
|
||||
self.last_read_location = 0 # Not Used
|
||||
self.path = path
|
||||
self.timestamp = 0
|
||||
self.user_notes = None
|
||||
self.db_path = db_path
|
||||
self.contentid = contentid
|
||||
self.percent_read = 0
|
||||
self.get_bookmark_data()
|
||||
self.get_book_length() # Not Used
|
||||
|
||||
def get_bookmark_data(self):
|
||||
''' Return the timestamp and last_read_location '''
|
||||
|
||||
user_notes = {}
|
||||
self.timestamp = os.path.getmtime(self.path)
|
||||
with closing(sqlite.connect(self.db_path)) as connection:
|
||||
# return bytestrings if the content cannot the decoded as unicode
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
|
||||
|
||||
cursor = connection.cursor()
|
||||
t = (self.contentid,)
|
||||
|
||||
cursor.execute('select bm.bookmarkid, bm.contentid, bm.volumeid, '
|
||||
'bm.text, bm.annotation, bm.ChapterProgress, '
|
||||
'bm.StartContainerChildIndex, bm.StartOffset, c.BookTitle, '
|
||||
'c.TITLE, c.volumeIndex, c.___NumPages '
|
||||
'from Bookmark bm inner join Content c on '
|
||||
'bm.contentid = c.contentid and '
|
||||
'bm.volumeid = ? order by bm.volumeid, bm.chapterprogress', t)
|
||||
|
||||
previous_chapter = 0
|
||||
bm_count = 0
|
||||
for row in cursor:
|
||||
current_chapter = row[10]
|
||||
if previous_chapter == current_chapter:
|
||||
bm_count = bm_count + 1
|
||||
else:
|
||||
bm_count = 0
|
||||
|
||||
text = row[3]
|
||||
annotation = row[4]
|
||||
|
||||
# A dog ear (bent upper right corner) is a bookmark
|
||||
if row[6] == row[7] == 0: # StartContainerChildIndex = StartOffset = 0
|
||||
e_type = 'Bookmark'
|
||||
text = row[9]
|
||||
# highlight is text with no annotation
|
||||
elif text is not None and (annotation is None or annotation == ""):
|
||||
e_type = 'Highlight'
|
||||
elif text and annotation:
|
||||
e_type = 'Annotation'
|
||||
else:
|
||||
e_type = 'Unknown annotation type'
|
||||
|
||||
note_id = row[10] + bm_count
|
||||
chapter_title = row[9]
|
||||
# book_title = row[8]
|
||||
chapter_progress = min(round(float(100*row[5]),2),100)
|
||||
user_notes[note_id] = dict(id=self.id,
|
||||
displayed_location=note_id,
|
||||
type=e_type,
|
||||
text=text,
|
||||
annotation=annotation,
|
||||
chapter=row[10],
|
||||
chapter_title=chapter_title,
|
||||
chapter_progress=chapter_progress)
|
||||
previous_chapter = row[10]
|
||||
# debug_print("e_type:" , e_type, '\t', 'loc: ', note_id, 'text: ', text,
|
||||
# 'annotation: ', annotation, 'chapter_title: ', chapter_title,
|
||||
# 'chapter_progress: ', chapter_progress, 'date: ')
|
||||
|
||||
cursor.execute('select datelastread, ___PercentRead from content '
|
||||
'where bookid is Null and '
|
||||
'contentid = ?', t)
|
||||
for row in cursor:
|
||||
self.last_read = row[0]
|
||||
self.percent_read = row[1]
|
||||
# print row[1]
|
||||
cursor.close()
|
||||
|
||||
# self.last_read_location = self.last_read - self.pdf_page_offset
|
||||
self.user_notes = user_notes
|
||||
|
||||
|
||||
def get_book_length(self):
|
||||
#TL self.book_length = 0
|
||||
#TL self.book_length = int(unpack('>I', record0[0x04:0x08])[0])
|
||||
pass
|
||||
|
||||
# }}}
|
@ -2,15 +2,16 @@
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Timothy Legge <timlegge at gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__copyright__ = '2010, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
import os, time, calendar
|
||||
import sqlite3 as sqlite
|
||||
from contextlib import closing
|
||||
from calibre.devices.usbms.books import BookList
|
||||
from calibre.devices.kobo.books import Book
|
||||
from calibre.devices.kobo.books import ImageWrapper
|
||||
from calibre.devices.kobo.bookmark import Bookmark
|
||||
from calibre.devices.mime import mime_type_ext
|
||||
from calibre.devices.usbms.driver import USBMS, debug_print
|
||||
from calibre import prints
|
||||
@ -24,7 +25,7 @@ class KOBO(USBMS):
|
||||
gui_name = 'Kobo Reader'
|
||||
description = _('Communicate with the Kobo Reader')
|
||||
author = 'Timothy Legge'
|
||||
version = (1, 0, 10)
|
||||
version = (1, 0, 11)
|
||||
|
||||
dbversion = 0
|
||||
fwversion = 0
|
||||
@ -47,6 +48,7 @@ class KOBO(USBMS):
|
||||
|
||||
EBOOK_DIR_MAIN = ''
|
||||
SUPPORTS_SUB_DIRS = True
|
||||
SUPPORTS_ANNOTATIONS = True
|
||||
|
||||
VIRTUAL_BOOK_EXTENSIONS = frozenset(['kobo'])
|
||||
|
||||
@ -77,11 +79,6 @@ class KOBO(USBMS):
|
||||
self.book_class = Book
|
||||
self.dbversion = 7
|
||||
|
||||
def create_annotations_path(self, mdata, device_path=None):
|
||||
if device_path:
|
||||
return device_path
|
||||
return USBMS.create_annotations_path(self, mdata)
|
||||
|
||||
def books(self, oncard=None, end_session=True):
|
||||
from calibre.ebooks.metadata.meta import path_to_ext
|
||||
|
||||
@ -111,6 +108,7 @@ class KOBO(USBMS):
|
||||
|
||||
if self.fwversion != '1.0' and self.fwversion != '1.4':
|
||||
self.has_kepubs = True
|
||||
debug_print('Version of driver: ', self.version, 'Has kepubs:', self.has_kepubs)
|
||||
debug_print('Version of firmware: ', self.fwversion, 'Has kepubs:', self.has_kepubs)
|
||||
|
||||
self.booklist_class.rebuild_collections = self.rebuild_collections
|
||||
@ -893,3 +891,198 @@ class KOBO(USBMS):
|
||||
tf.write(r.read())
|
||||
paths[idx] = tf.name
|
||||
return paths
|
||||
|
||||
def create_annotations_path(self, mdata, device_path=None):
|
||||
if device_path:
|
||||
return device_path
|
||||
return USBMS.create_annotations_path(self, mdata)
|
||||
|
||||
def get_annotations(self, path_map):
|
||||
EPUB_FORMATS = [u'epub']
|
||||
epub_formats = set(EPUB_FORMATS)
|
||||
|
||||
def get_storage():
|
||||
storage = []
|
||||
if self._main_prefix:
|
||||
storage.append(os.path.join(self._main_prefix, self.EBOOK_DIR_MAIN))
|
||||
if self._card_a_prefix:
|
||||
storage.append(os.path.join(self._card_a_prefix, self.EBOOK_DIR_CARD_A))
|
||||
if self._card_b_prefix:
|
||||
storage.append(os.path.join(self._card_b_prefix, self.EBOOK_DIR_CARD_B))
|
||||
return storage
|
||||
|
||||
def resolve_bookmark_paths(storage, path_map):
|
||||
pop_list = []
|
||||
book_ext = {}
|
||||
for id in path_map:
|
||||
file_fmts = set()
|
||||
for fmt in path_map[id]['fmts']:
|
||||
file_fmts.add(fmt)
|
||||
bookmark_extension = None
|
||||
if file_fmts.intersection(epub_formats):
|
||||
book_extension = list(file_fmts.intersection(epub_formats))[0]
|
||||
bookmark_extension = 'epub'
|
||||
|
||||
if bookmark_extension:
|
||||
for vol in storage:
|
||||
bkmk_path = path_map[id]['path']
|
||||
bkmk_path = bkmk_path
|
||||
if os.path.exists(bkmk_path):
|
||||
path_map[id] = bkmk_path
|
||||
book_ext[id] = book_extension
|
||||
break
|
||||
else:
|
||||
pop_list.append(id)
|
||||
else:
|
||||
pop_list.append(id)
|
||||
|
||||
# Remove non-existent bookmark templates
|
||||
for id in pop_list:
|
||||
path_map.pop(id)
|
||||
return path_map, book_ext
|
||||
|
||||
storage = get_storage()
|
||||
path_map, book_ext = resolve_bookmark_paths(storage, path_map)
|
||||
|
||||
bookmarked_books = {}
|
||||
for id in path_map:
|
||||
extension = os.path.splitext(path_map[id])[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path_map[id])
|
||||
ContentID = self.contentid_from_path(path_map[id], ContentType)
|
||||
|
||||
bookmark_ext = extension
|
||||
|
||||
db_path = self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite')
|
||||
myBookmark = Bookmark(db_path, ContentID, path_map[id], id, book_ext[id], bookmark_ext)
|
||||
bookmarked_books[id] = self.UserAnnotation(type='kobo_bookmark', value=myBookmark)
|
||||
|
||||
# This returns as job.result in gui2.ui.annotations_fetched(self,job)
|
||||
return bookmarked_books
|
||||
|
||||
def generate_annotation_html(self, bookmark):
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString
|
||||
# Returns <div class="user_annotations"> ... </div>
|
||||
#last_read_location = bookmark.last_read_location
|
||||
#timestamp = bookmark.timestamp
|
||||
percent_read = bookmark.percent_read
|
||||
debug_print("Date: ", bookmark.last_read)
|
||||
if bookmark.last_read is not None:
|
||||
try:
|
||||
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S"))))
|
||||
except:
|
||||
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S.%f"))))
|
||||
else:
|
||||
#self.datetime = time.gmtime()
|
||||
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
|
||||
|
||||
# debug_print("Percent read: ", percent_read)
|
||||
ka_soup = BeautifulSoup()
|
||||
dtc = 0
|
||||
divTag = Tag(ka_soup,'div')
|
||||
divTag['class'] = 'user_annotations'
|
||||
|
||||
# Add the last-read location
|
||||
spanTag = Tag(ka_soup, 'span')
|
||||
spanTag['style'] = 'font-weight:normal'
|
||||
if bookmark.book_format == 'epub':
|
||||
spanTag.insert(0,NavigableString(
|
||||
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") % \
|
||||
dict(time=last_read,
|
||||
#loc=last_read_location,
|
||||
pr=percent_read)))
|
||||
else:
|
||||
spanTag.insert(0,NavigableString(
|
||||
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") % \
|
||||
dict(time=last_read,
|
||||
#loc=last_read_location,
|
||||
pr=percent_read)))
|
||||
|
||||
divTag.insert(dtc, spanTag)
|
||||
dtc += 1
|
||||
divTag.insert(dtc, Tag(ka_soup,'br'))
|
||||
dtc += 1
|
||||
|
||||
if bookmark.user_notes:
|
||||
user_notes = bookmark.user_notes
|
||||
annotations = []
|
||||
|
||||
# Add the annotations sorted by location
|
||||
for location in sorted(user_notes):
|
||||
if user_notes[location]['type'] == 'Bookmark':
|
||||
annotations.append(
|
||||
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br />%(annotation)s<br /><hr />') % \
|
||||
dict(chapter=user_notes[location]['chapter'],
|
||||
dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type'],
|
||||
chapter_title=user_notes[location]['chapter_title'],
|
||||
chapter_progress=user_notes[location]['chapter_progress'],
|
||||
annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else ""))
|
||||
elif user_notes[location]['type'] == 'Highlight':
|
||||
annotations.append(
|
||||
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><hr />') % \
|
||||
dict(chapter=user_notes[location]['chapter'],
|
||||
dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type'],
|
||||
chapter_title=user_notes[location]['chapter_title'],
|
||||
chapter_progress=user_notes[location]['chapter_progress'],
|
||||
text=user_notes[location]['text']))
|
||||
elif user_notes[location]['type'] == 'Annotation':
|
||||
annotations.append(
|
||||
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') % \
|
||||
dict(chapter=user_notes[location]['chapter'],
|
||||
dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type'],
|
||||
chapter_title=user_notes[location]['chapter_title'],
|
||||
chapter_progress=user_notes[location]['chapter_progress'],
|
||||
text=user_notes[location]['text'],
|
||||
annotation=user_notes[location]['annotation']))
|
||||
else:
|
||||
annotations.append(
|
||||
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') % \
|
||||
dict(chapter=user_notes[location]['chapter'],
|
||||
dl=user_notes[location]['displayed_location'],
|
||||
typ=user_notes[location]['type'],
|
||||
chapter_title=user_notes[location]['chapter_title'],
|
||||
chapter_progress=user_notes[location]['chapter_progress'],
|
||||
text=user_notes[location]['text'], \
|
||||
annotation=user_notes[location]['annotation']))
|
||||
|
||||
for annotation in annotations:
|
||||
divTag.insert(dtc, annotation)
|
||||
dtc += 1
|
||||
|
||||
ka_soup.insert(0,divTag)
|
||||
return ka_soup
|
||||
|
||||
def add_annotation_to_library(self, db, db_id, annotation):
|
||||
from calibre.ebooks.BeautifulSoup import Tag
|
||||
bm = annotation
|
||||
ignore_tags = set(['Catalog', 'Clippings'])
|
||||
|
||||
if bm.type == 'kobo_bookmark':
|
||||
mi = db.get_metadata(db_id, index_is_id=True)
|
||||
user_notes_soup = self.generate_annotation_html(bm.value)
|
||||
if mi.comments:
|
||||
a_offset = mi.comments.find('<div class="user_annotations">')
|
||||
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
|
||||
|
||||
if a_offset >= 0:
|
||||
mi.comments = mi.comments[:a_offset]
|
||||
if ad_offset >= 0:
|
||||
mi.comments = mi.comments[:ad_offset]
|
||||
if set(mi.tags).intersection(ignore_tags):
|
||||
return
|
||||
if mi.comments:
|
||||
hrTag = Tag(user_notes_soup,'hr')
|
||||
hrTag['class'] = 'annotations_divider'
|
||||
user_notes_soup.insert(0, hrTag)
|
||||
|
||||
mi.comments += unicode(user_notes_soup.prettify())
|
||||
else:
|
||||
mi.comments = unicode(user_notes_soup.prettify())
|
||||
# Update library comments
|
||||
db.set_comment(db_id, mi.comments)
|
||||
|
||||
# Add bookmark file to db_id
|
||||
db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
|
||||
bm.value.path, index_is_id=True)
|
||||
|
@ -127,7 +127,7 @@ class FB2Input(InputFormatPlugin):
|
||||
def extract_embedded_content(self, doc):
|
||||
self.binary_map = {}
|
||||
for elem in doc.xpath('./*'):
|
||||
if 'binary' in elem.tag and elem.attrib.has_key('id'):
|
||||
if elem.text and 'binary' in elem.tag and elem.attrib.has_key('id'):
|
||||
ct = elem.get('content-type', '')
|
||||
fname = elem.attrib['id']
|
||||
ext = ct.rpartition('/')[-1].lower()
|
||||
|
@ -138,6 +138,7 @@ class MobiMLizer(object):
|
||||
self.mobimlize_elem(body, stylizer, BlockState(nbody),
|
||||
[FormatState()])
|
||||
item.data = nroot
|
||||
#print etree.tostring(nroot)
|
||||
|
||||
def mobimlize_font(self, ptsize):
|
||||
return self.fnums[self.fmap[ptsize]]
|
||||
@ -233,9 +234,19 @@ class MobiMLizer(object):
|
||||
elif tag in TABLE_TAGS:
|
||||
para.attrib['valign'] = 'top'
|
||||
if istate.ids:
|
||||
last = bstate.body[-1]
|
||||
for id in istate.ids:
|
||||
last.addprevious(etree.Element(XHTML('a'), attrib={'id': id}))
|
||||
for id_ in istate.ids:
|
||||
anchor = etree.Element(XHTML('a'), attrib={'id': id_})
|
||||
if tag == 'li':
|
||||
try:
|
||||
last = bstate.body[-1][-1]
|
||||
except:
|
||||
break
|
||||
last.insert(0, anchor)
|
||||
anchor.tail = last.text
|
||||
last.text = None
|
||||
else:
|
||||
last = bstate.body[-1]
|
||||
last.addprevious(anchor)
|
||||
istate.ids.clear()
|
||||
if not text:
|
||||
return
|
||||
|
@ -601,7 +601,7 @@ class MobiWriter(object):
|
||||
Write the PalmDB header
|
||||
'''
|
||||
title = ascii_filename(unicode(self.oeb.metadata.title[0])).replace(
|
||||
' ', '_')[:32]
|
||||
' ', '_')[:31]
|
||||
title = title + (b'\0' * (32 - len(title)))
|
||||
now = int(time.time())
|
||||
nrecords = len(self.records)
|
||||
|
@ -74,7 +74,10 @@ class Extract(ODF2XHTML):
|
||||
style = style[0]
|
||||
css = style.text
|
||||
if css:
|
||||
style.text, sel_map = self.do_filter_css(css)
|
||||
css, sel_map = self.do_filter_css(css)
|
||||
if not isinstance(css, unicode):
|
||||
css = css.decode('utf-8', 'ignore')
|
||||
style.text = css
|
||||
for x in root.xpath('//*[@class]'):
|
||||
extra = []
|
||||
orig = x.get('class')
|
||||
|
@ -326,6 +326,18 @@ class CoverView(QWidget): # {{{
|
||||
if id_ is not None:
|
||||
self.cover_removed.emit(id_)
|
||||
|
||||
def update_tooltip(self, current_path):
|
||||
try:
|
||||
sz = self.pixmap.size()
|
||||
except:
|
||||
sz = QSize(0, 0)
|
||||
self.setToolTip(
|
||||
'<p>'+_('Double-click to open Book Details window') +
|
||||
'<br><br>' + _('Path') + ': ' + current_path +
|
||||
'<br><br>' + _('Cover size: %(width)d x %(height)d')%dict(
|
||||
width=sz.width(), height=sz.height())
|
||||
)
|
||||
|
||||
# }}}
|
||||
|
||||
# Book Info {{{
|
||||
@ -561,16 +573,7 @@ class BookDetails(QWidget): # {{{
|
||||
|
||||
def update_layout(self):
|
||||
self._layout.do_layout(self.rect())
|
||||
try:
|
||||
sz = self.cover_view.pixmap.size()
|
||||
except:
|
||||
sz = QSize(0, 0)
|
||||
self.setToolTip(
|
||||
'<p>'+_('Double-click to open Book Details window') +
|
||||
'<br><br>' + _('Path') + ': ' + self.current_path +
|
||||
'<br><br>' + _('Cover size: %(width)d x %(height)d')%dict(
|
||||
width=sz.width(), height=sz.height())
|
||||
)
|
||||
self.cover_view.update_tooltip(self.current_path)
|
||||
|
||||
def reset_info(self):
|
||||
self.show_data(Metadata(_('Unknown')))
|
||||
|
@ -850,15 +850,16 @@ class DeviceMixin(object): # {{{
|
||||
self.refresh_ondevice()
|
||||
device_signals.device_metadata_available.emit()
|
||||
|
||||
def refresh_ondevice(self, reset_only = False):
|
||||
def refresh_ondevice(self, reset_only=False):
|
||||
'''
|
||||
Force the library view to refresh, taking into consideration new
|
||||
device books information
|
||||
'''
|
||||
self.book_on_device(None, reset=True)
|
||||
if reset_only:
|
||||
return
|
||||
self.library_view.model().refresh_ondevice()
|
||||
with self.library_view.preserve_state():
|
||||
self.book_on_device(None, reset=True)
|
||||
if reset_only:
|
||||
return
|
||||
self.library_view.model().refresh_ondevice()
|
||||
|
||||
# }}}
|
||||
|
||||
@ -888,7 +889,6 @@ class DeviceMixin(object): # {{{
|
||||
# if set_books_in_library did not.
|
||||
if not self.set_books_in_library(self.booklists(), reset=True, add_as_step_to_job=job):
|
||||
self.upload_booklists(job)
|
||||
self.book_on_device(None, reset=True)
|
||||
# We need to reset the ondevice flags in the library. Use a big hammer,
|
||||
# so we don't need to worry about whether some succeeded or not.
|
||||
self.refresh_ondevice(reset_only=False)
|
||||
@ -1319,9 +1319,7 @@ class DeviceMixin(object): # {{{
|
||||
# If it does not, then do it here.
|
||||
if not self.set_books_in_library(self.booklists(), reset=True, add_as_step_to_job=job):
|
||||
self.upload_booklists(job)
|
||||
with self.library_view.preserve_selected_books:
|
||||
self.book_on_device(None, reset=True)
|
||||
self.refresh_ondevice()
|
||||
self.refresh_ondevice()
|
||||
|
||||
view = self.card_a_view if on_card == 'carda' else \
|
||||
self.card_b_view if on_card == 'cardb' else self.memory_view
|
||||
|
@ -23,24 +23,43 @@ from calibre.gui2.library import DEFAULT_SORT
|
||||
from calibre.constants import filesystem_encoding
|
||||
from calibre import force_unicode
|
||||
|
||||
class PreserveSelection(object): # {{{
|
||||
class PreserveViewState(object): # {{{
|
||||
|
||||
'''
|
||||
Save the set of selected books at enter time. If at exit time there are no
|
||||
selected books, restore the previous selection.
|
||||
selected books, restore the previous selection, the previous current index
|
||||
and dont affect the scroll position.
|
||||
'''
|
||||
|
||||
def __init__(self, view):
|
||||
def __init__(self, view, preserve_hpos=True, preserve_vpos=True):
|
||||
self.view = view
|
||||
self.selected_ids = []
|
||||
self.selected_ids = set()
|
||||
self.current_id = None
|
||||
self.preserve_hpos = preserve_hpos
|
||||
self.preserve_vpos = preserve_vpos
|
||||
self.vscroll = self.hscroll = 0
|
||||
|
||||
def __enter__(self):
|
||||
self.selected_ids = self.view.get_selected_ids()
|
||||
try:
|
||||
self.selected_ids = self.view.get_selected_ids()
|
||||
self.current_id = self.view.current_id
|
||||
self.vscroll = self.view.verticalScrollBar().value()
|
||||
self.hscroll = self.view.horizontalScrollBar().value()
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def __exit__(self, *args):
|
||||
current = self.view.get_selected_ids()
|
||||
if not current:
|
||||
self.view.select_rows(self.selected_ids, using_ids=True)
|
||||
if not current and self.selected_ids:
|
||||
if self.current_id is not None:
|
||||
self.view.current_id = self.current_id
|
||||
self.view.select_rows(self.selected_ids, using_ids=True,
|
||||
scroll=False, change_current=self.current_id is None)
|
||||
if self.preserve_vpos:
|
||||
self.view.verticalScrollBar().setValue(self.vscroll)
|
||||
if self.preserve_hpos:
|
||||
self.view.horizontalScrollBar().setValue(self.hscroll)
|
||||
# }}}
|
||||
|
||||
class BooksView(QTableView): # {{{
|
||||
@ -104,7 +123,7 @@ class BooksView(QTableView): # {{{
|
||||
self.setSelectionBehavior(QAbstractItemView.SelectRows)
|
||||
self.setSortingEnabled(True)
|
||||
self.selectionModel().currentRowChanged.connect(self._model.current_changed)
|
||||
self.preserve_selected_books = PreserveSelection(self)
|
||||
self.preserve_state = partial(PreserveViewState, self)
|
||||
|
||||
# {{{ Column Header setup
|
||||
self.can_add_columns = True
|
||||
@ -788,6 +807,23 @@ class BooksView(QTableView): # {{{
|
||||
ans.append(i)
|
||||
return ans
|
||||
|
||||
@dynamic_property
|
||||
def current_id(self):
|
||||
def fget(self):
|
||||
try:
|
||||
return self.model().id(self.currentIndex())
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
def fset(self, val):
|
||||
if val is None: return
|
||||
m = self.model()
|
||||
for row in xrange(m.rowCount(QModelIndex())):
|
||||
if m.id(row) == val:
|
||||
self.set_current_row(row, select=False)
|
||||
break
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def close(self):
|
||||
self._model.close()
|
||||
|
||||
|
@ -30,6 +30,13 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
(_('Automatic management'), 'on_connect')]
|
||||
r('manage_device_metadata', prefs, choices=choices)
|
||||
|
||||
if gui.device_manager.is_device_connected:
|
||||
self.opt_manage_device_metadata.setEnabled(False)
|
||||
self.opt_manage_device_metadata.setToolTip(
|
||||
_('Cannot change metadata management while a device is connected'))
|
||||
self.mm_label.setText('Metadata management (disabled while '
|
||||
'device connected)')
|
||||
|
||||
self.send_template.changed_signal.connect(self.changed_signal.emit)
|
||||
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
</property>
|
||||
<layout class="QGridLayout" name="gridLayout">
|
||||
<item row="0" column="0">
|
||||
<widget class="QLabel" name="label_4">
|
||||
<widget class="QLabel" name="mm_label">
|
||||
<property name="sizePolicy">
|
||||
<sizepolicy hsizetype="Maximum" vsizetype="Preferred">
|
||||
<horstretch>0</horstretch>
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user