mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Sync to trunk.
This commit is contained in:
commit
88c7c99939
BIN
resources/images/news/DrawAndCook.png
Normal file
BIN
resources/images/news/DrawAndCook.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 575 B |
@ -1,8 +1,11 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
import re
|
||||
|
||||
class DrawAndCook(BasicNewsRecipe):
|
||||
title = 'DrawAndCook'
|
||||
__author__ = 'Starson17'
|
||||
__version__ = 'v1.10'
|
||||
__date__ = '13 March 2011'
|
||||
description = 'Drawings of recipes!'
|
||||
language = 'en'
|
||||
publisher = 'Starson17'
|
||||
@ -13,6 +16,7 @@ class DrawAndCook(BasicNewsRecipe):
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
cover_url = 'http://farm5.static.flickr.com/4043/4471139063_4dafced67f_o.jpg'
|
||||
INDEX = 'http://www.theydrawandcook.com'
|
||||
max_articles_per_feed = 30
|
||||
|
||||
remove_attributes = ['style', 'font']
|
||||
@ -34,20 +38,21 @@ class DrawAndCook(BasicNewsRecipe):
|
||||
date = ''
|
||||
current_articles = []
|
||||
soup = self.index_to_soup(url)
|
||||
recipes = soup.findAll('div', attrs={'class': 'date-outer'})
|
||||
featured_major_slider = soup.find(name='div', attrs={'id':'featured_major_slider'})
|
||||
recipes = featured_major_slider.findAll('li', attrs={'data-id': re.compile(r'artwork_entry_\d+', re.DOTALL)})
|
||||
for recipe in recipes:
|
||||
title = recipe.h3.a.string
|
||||
page_url = recipe.h3.a['href']
|
||||
page_url = self.INDEX + recipe.a['href']
|
||||
print 'page_url is: ', page_url
|
||||
title = recipe.find('strong').string
|
||||
print 'title is: ', title
|
||||
current_articles.append({'title': title, 'url': page_url, 'description':'', 'date':date})
|
||||
return current_articles
|
||||
|
||||
|
||||
keep_only_tags = [dict(name='h3', attrs={'class':'post-title entry-title'})
|
||||
,dict(name='div', attrs={'class':'post-body entry-content'})
|
||||
keep_only_tags = [dict(name='h1', attrs={'id':'page_title'})
|
||||
,dict(name='section', attrs={'id':'artwork'})
|
||||
]
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'class':['separator']})
|
||||
,dict(name='div', attrs={'class':['post-share-buttons']})
|
||||
remove_tags = [dict(name='article', attrs={'id':['recipe_actions', 'metadata']})
|
||||
]
|
||||
|
||||
extra_css = '''
|
||||
|
@ -1,23 +1,12 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
www.instapaper.com
|
||||
'''
|
||||
|
||||
import urllib
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Instapaper(BasicNewsRecipe):
|
||||
title = 'Instapaper.com'
|
||||
class AdvancedUserRecipe1299694372(BasicNewsRecipe):
|
||||
title = u'Instapaper'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = '''Personalized news feeds. Go to instapaper.com to
|
||||
setup up your news. Fill in your instapaper
|
||||
username, and leave the password field
|
||||
below blank.'''
|
||||
publisher = 'Instapaper.com'
|
||||
category = 'news, custom'
|
||||
oldest_article = 7
|
||||
category = 'info, custom, Instapaper'
|
||||
oldest_article = 365
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
@ -25,16 +14,9 @@ class Instapaper(BasicNewsRecipe):
|
||||
INDEX = u'http://www.instapaper.com'
|
||||
LOGIN = INDEX + u'/user/login'
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
}
|
||||
|
||||
feeds = [
|
||||
(u'Unread articles' , INDEX + u'/u' )
|
||||
,(u'Starred articles', INDEX + u'/starred')
|
||||
]
|
||||
|
||||
feeds = [(u'Instapaper Unread', u'http://www.instapaper.com/u'), (u'Instapaper Starred', u'http://www.instapaper.com/starred')]
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
@ -70,7 +52,3 @@ class Instapaper(BasicNewsRecipe):
|
||||
})
|
||||
totalfeeds.append((feedtitle, articles))
|
||||
return totalfeeds
|
||||
|
||||
def print_version(self, url):
|
||||
return self.INDEX + '/text?u=' + urllib.quote(url)
|
||||
|
||||
|
89
resources/recipes/modoros.recipe
Normal file
89
resources/recipes/modoros.recipe
Normal file
@ -0,0 +1,89 @@
|
||||
import re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from calibre.constants import config_dir, CONFIG_DIR_MODE
|
||||
import os, os.path, urllib
|
||||
from hashlib import md5
|
||||
|
||||
class ModorosBlogHu(BasicNewsRecipe):
|
||||
__author__ = 'Zsolt Botykai'
|
||||
title = u'Modoros Blog'
|
||||
description = u"Modoros.blog.hu"
|
||||
oldest_article = 10000
|
||||
max_articles_per_feed = 10000
|
||||
reverse_article_order = True
|
||||
language = 'hu'
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
no_stylesheets = True
|
||||
feeds = [(u'Modoros Blog', u'http://modoros.blog.hu/rss')]
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'<!--megosztas -->.*?</body>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '</body>'),
|
||||
(re.compile(r'<p align="left"'), lambda m: '<p'),
|
||||
(re.compile(r'<noscript.+?noscript>', re.DOTALL|re.IGNORECASE), lambda m: ''),
|
||||
(re.compile(r'<img style="position: absolute;top:-10px.+?>', re.DOTALL|re.IGNORECASE), lambda m: ''),
|
||||
(re.compile(r'<p>( | )*?</p>', re.DOTALL|re.IGNORECASE), lambda match: ''),
|
||||
]
|
||||
extra_css = '''
|
||||
body { background-color: white; color: black }
|
||||
'''
|
||||
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':['csucs']}) ,
|
||||
dict(name='img', attrs={'style':['position: absolute;top:-10px;left:-10px;']}) ,
|
||||
dict(name='div', attrs={'class':['tovabb-is-van', \
|
||||
'page-break', \
|
||||
'clear']}) ,
|
||||
dict(name='span', attrs={'class':['hozzaszolas-szamlalo']})
|
||||
]
|
||||
|
||||
masthead_url='http://modoros.blog.hu/media/skins/modoros-neon/img/modorosblog-felirat.png'
|
||||
|
||||
def get_cover_url(self):
|
||||
return 'http://modoros.blog.hu/media/skins/modoros-neon/img/modorosblog-felirat.png'
|
||||
|
||||
# As seen here: http://www.mobileread.com/forums/showpost.php?p=1295505&postcount=10
|
||||
def parse_feeds(self):
|
||||
recipe_dir = os.path.join(config_dir,'recipes')
|
||||
hash_dir = os.path.join(recipe_dir,'recipe_storage')
|
||||
feed_dir = os.path.join(hash_dir,self.title.encode('utf-8').replace('/',':'))
|
||||
if not os.path.isdir(feed_dir):
|
||||
os.makedirs(feed_dir,mode=CONFIG_DIR_MODE)
|
||||
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
|
||||
for feed in feeds:
|
||||
feed_hash = urllib.quote(feed.title.encode('utf-8'),safe='')
|
||||
feed_fn = os.path.join(feed_dir,feed_hash)
|
||||
|
||||
past_items = set()
|
||||
if os.path.exists(feed_fn):
|
||||
with file(feed_fn) as f:
|
||||
for h in f:
|
||||
past_items.add(h.strip())
|
||||
|
||||
cur_items = set()
|
||||
for article in feed.articles[:]:
|
||||
item_hash = md5()
|
||||
if article.content: item_hash.update(article.content.encode('utf-8'))
|
||||
if article.summary: item_hash.update(article.summary.encode('utf-8'))
|
||||
item_hash = item_hash.hexdigest()
|
||||
if article.url:
|
||||
item_hash = article.url + ':' + item_hash
|
||||
cur_items.add(item_hash)
|
||||
if item_hash in past_items:
|
||||
feed.articles.remove(article)
|
||||
with file(feed_fn,'w') as f:
|
||||
for h in cur_items:
|
||||
f.write(h+'\n')
|
||||
|
||||
remove = [f for f in feeds if len(f) == 0 and
|
||||
self.remove_empty_feeds]
|
||||
for f in remove:
|
||||
feeds.remove(f)
|
||||
|
||||
return feeds
|
||||
|
109
resources/recipes/office_space.recipe
Normal file
109
resources/recipes/office_space.recipe
Normal file
@ -0,0 +1,109 @@
|
||||
import re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from calibre.constants import config_dir, CONFIG_DIR_MODE
|
||||
import os, os.path, urllib
|
||||
from hashlib import md5
|
||||
|
||||
class OfficeSpaceBlogHu(BasicNewsRecipe):
|
||||
__author__ = 'Zsolt Botykai'
|
||||
title = u'Office Space Blog'
|
||||
description = u"officespace.blog.hu"
|
||||
oldest_article = 10000
|
||||
max_articles_per_feed = 10000
|
||||
reverse_article_order = True
|
||||
language = 'hu'
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
no_stylesheets = True
|
||||
feeds = [(u'Office Space Blog', u'http://officespace.blog.hu/rss')]
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
title = u'Irodai patkényok'
|
||||
feeds = [(u'Office Space', u'http://officespace.blog.hu/rss')]
|
||||
|
||||
masthead_url='http://m.blog.hu/of/officespace/ipfejlec7.jpg'
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'id':['mainWrapper']})
|
||||
]
|
||||
|
||||
# 1.: I like justified lines more
|
||||
# 2.: remove empty paragraphs
|
||||
# 3.: drop header and sidebar
|
||||
# 4.: drop comments counter
|
||||
# 5.: drop everything after article-tags
|
||||
# 6-8.: drop audit images
|
||||
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'<p align="left"'), lambda m: '<p'),
|
||||
(re.compile(r'<p>( | )*?</p>', re.DOTALL|re.IGNORECASE), lambda match: ''),
|
||||
(re.compile(r'<body[^>]+>.*?<div id="mainIn"', re.DOTALL|re.IGNORECASE), lambda match: '<body><div id="mainIn"'),
|
||||
(re.compile(r'<h3 class="comments">.*?</h3>', re.DOTALL|re.IGNORECASE), lambda match: ''),
|
||||
(re.compile(r'<div class="related">.*?</body>', re.DOTALL|re.IGNORECASE), lambda match: '<body>'),
|
||||
(re.compile(r'<img style="position: absolute;" src="[^"]+pixel\?uc.*?>', re.DOTALL|re.IGNORECASE), lambda match: ''),
|
||||
(re.compile(r'<noscript.+?noscript>', re.DOTALL|re.IGNORECASE), lambda m: ''),
|
||||
(re.compile(r'<img style="position: absolute;top:-10px.+?>', re.DOTALL|re.IGNORECASE), lambda m: ''),
|
||||
]
|
||||
extra_css = '''
|
||||
body { background-color: white; color: black }
|
||||
'''
|
||||
|
||||
def get_cover_url(self):
|
||||
return 'http://m.blog.hu/of/officespace/ipfejlec7.jpg'
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for tagz in soup.findAll('h3', attrs={'class':'tags'}):
|
||||
for taglink in tagz.findAll('a'):
|
||||
if taglink.string is not None:
|
||||
tstr = taglink.string + ','
|
||||
taglink.replaceWith(tstr)
|
||||
|
||||
for alink in soup.findAll('a'):
|
||||
if alink.string is not None:
|
||||
tstr = alink.string
|
||||
alink.replaceWith(tstr)
|
||||
|
||||
return soup
|
||||
|
||||
# As seen here: http://www.mobileread.com/forums/showpost.php?p=1295505&postcount=10
|
||||
def parse_feeds(self):
|
||||
recipe_dir = os.path.join(config_dir,'recipes')
|
||||
hash_dir = os.path.join(recipe_dir,'recipe_storage')
|
||||
feed_dir = os.path.join(hash_dir,self.title.encode('utf-8').replace('/',':'))
|
||||
if not os.path.isdir(feed_dir):
|
||||
os.makedirs(feed_dir,mode=CONFIG_DIR_MODE)
|
||||
|
||||
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||
|
||||
for feed in feeds:
|
||||
feed_hash = urllib.quote(feed.title.encode('utf-8'),safe='')
|
||||
feed_fn = os.path.join(feed_dir,feed_hash)
|
||||
|
||||
past_items = set()
|
||||
if os.path.exists(feed_fn):
|
||||
with file(feed_fn) as f:
|
||||
for h in f:
|
||||
past_items.add(h.strip())
|
||||
|
||||
cur_items = set()
|
||||
for article in feed.articles[:]:
|
||||
item_hash = md5()
|
||||
if article.content: item_hash.update(article.content.encode('utf-8'))
|
||||
if article.summary: item_hash.update(article.summary.encode('utf-8'))
|
||||
item_hash = item_hash.hexdigest()
|
||||
if article.url:
|
||||
item_hash = article.url + ':' + item_hash
|
||||
cur_items.add(item_hash)
|
||||
if item_hash in past_items:
|
||||
feed.articles.remove(article)
|
||||
with file(feed_fn,'w') as f:
|
||||
for h in cur_items:
|
||||
f.write(h+'\n')
|
||||
|
||||
remove = [f for f in feeds if len(f) == 0 and
|
||||
self.remove_empty_feeds]
|
||||
for f in remove:
|
||||
feeds.remove(f)
|
||||
|
||||
return feeds
|
||||
|
15
resources/recipes/pro_linux_de.recipe
Normal file
15
resources/recipes/pro_linux_de.recipe
Normal file
@ -0,0 +1,15 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class AdvancedUserRecipe1295265555(BasicNewsRecipe):
|
||||
title = u'Pro-Linux.de'
|
||||
language = 'de'
|
||||
__author__ = 'Bobus'
|
||||
oldest_article = 3
|
||||
max_articles_per_feed = 100
|
||||
|
||||
feeds = [(u'Pro-Linux', u'http://www.pro-linux.de/backend/pro-linux.rdf')]
|
||||
|
||||
def print_version(self, url):
|
||||
return url.replace('/news/1/', '/news/1/print/').replace('/artikel/2/', '/artikel/2/print/')
|
||||
|
||||
remove_tags_after = [dict(name='div', attrs={'class':'print_links'})]
|
@ -1,24 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class AdvancedUserRecipe1286819935(BasicNewsRecipe):
|
||||
class RBC_ru(BasicNewsRecipe):
|
||||
title = u'RBC.ru'
|
||||
__author__ = 'A. Chewi'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
description = u'Российское информационное агентство «РосБизнесКонсалтинг» (РБК) - ленты новостей политики, экономики и финансов, аналитические материалы, комментарии и прогнозы, тематические статьи'
|
||||
needs_subscription = False
|
||||
cover_url = 'http://pics.rbc.ru/img/fp_v4/skin/img/logo.gif'
|
||||
cover_margins = (80, 160, '#ffffff')
|
||||
oldest_article = 10
|
||||
max_articles_per_feed = 50
|
||||
summary_length = 200
|
||||
remove_empty_feeds = True
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
conversion_options = {'linearize_tables' : True}
|
||||
remove_attributes = ['style']
|
||||
language = 'ru'
|
||||
timefmt = ' [%a, %d %b, %Y]'
|
||||
|
||||
keep_only_tags = [dict(name='h2', attrs={}),
|
||||
dict(name='div', attrs={'class': 'box _ga1_on_'}),
|
||||
dict(name='h1', attrs={'class': 'news_section'}),
|
||||
dict(name='div', attrs={'class': 'news_body dotted_border_bottom'}),
|
||||
dict(name='table', attrs={'class': 'newsBody'}),
|
||||
dict(name='h2', attrs={'class': 'black'})]
|
||||
|
||||
feeds = [(u'Главные новости', u'http://static.feed.rbc.ru/rbc/internal/rss.rbc.ru/rbc.ru/mainnews.rss'),
|
||||
(u'Политика', u'http://static.feed.rbc.ru/rbc/internal/rss.rbc.ru/rbc.ru/politics.rss'),
|
||||
(u'Экономика', u'http://static.feed.rbc.ru/rbc/internal/rss.rbc.ru/rbc.ru/economics.rss'),
|
||||
@ -26,6 +27,12 @@ class AdvancedUserRecipe1286819935(BasicNewsRecipe):
|
||||
(u'Происшествия', u'http://static.feed.rbc.ru/rbc/internal/rss.rbc.ru/rbc.ru/incidents.rss'),
|
||||
(u'Финансовые новости Quote.rbc.ru', u'http://static.feed.rbc.ru/rbc/internal/rss.rbc.ru/quote.ru/mainnews.rss')]
|
||||
|
||||
keep_only_tags = [dict(name='h2', attrs={}),
|
||||
dict(name='div', attrs={'class': 'box _ga1_on_'}),
|
||||
dict(name='h1', attrs={'class': 'news_section'}),
|
||||
dict(name='div', attrs={'class': 'news_body dotted_border_bottom'}),
|
||||
dict(name='table', attrs={'class': 'newsBody'}),
|
||||
dict(name='h2', attrs={'class': 'black'})]
|
||||
|
||||
remove_tags = [dict(name='div', attrs={'class': "video-frame"}),
|
||||
dict(name='div', attrs={'class': "photo-container videoContainer videoSWFLinks videoPreviewSlideContainer notes"}),
|
||||
|
@ -1,6 +1,7 @@
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
#from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
from urllib import quote
|
||||
import re
|
||||
|
||||
class SportsIllustratedRecipe(BasicNewsRecipe) :
|
||||
__author__ = 'kwetal'
|
||||
@ -15,57 +16,44 @@ class SportsIllustratedRecipe(BasicNewsRecipe) :
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
|
||||
INDEX = 'http://sportsillustrated.cnn.com/'
|
||||
INDEX = 'http://sportsillustrated.cnn.com/vault/cover/home/index.htm'
|
||||
|
||||
def parse_index(self):
|
||||
answer = []
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
# Find the link to the current issue on the front page. SI Cover
|
||||
cover = soup.find('img', attrs = {'alt' : 'Read All Articles', 'style' : 'vertical-align:bottom;'})
|
||||
if cover:
|
||||
currentIssue = cover.parent['href']
|
||||
if currentIssue:
|
||||
# Open the index of current issue
|
||||
|
||||
index = self.index_to_soup(currentIssue)
|
||||
self.log('\tLooking for current issue in: ' + currentIssue)
|
||||
# Now let us see if they updated their frontpage
|
||||
nav = index.find('div', attrs = {'class': 'siv_trav_top'})
|
||||
if nav:
|
||||
img = nav.find('img', attrs = {'src': 'http://i.cdn.turner.com/sivault/.element/img/1.0/btn_next_v2.jpg'})
|
||||
if img:
|
||||
parent = img.parent
|
||||
if parent.name == 'a':
|
||||
# They didn't update their frontpage; Load the next issue from here
|
||||
href = self.INDEX + parent['href']
|
||||
index = self.index_to_soup(href)
|
||||
self.log('\tLooking for current issue in: ' + href)
|
||||
#Loop through all of the "latest" covers until we find one that actually has articles
|
||||
for item in soup.findAll('div', attrs={'id': re.compile("ecomthumb_latest_*")}):
|
||||
regex = re.compile('ecomthumb_latest_(\d*)')
|
||||
result = regex.search(str(item))
|
||||
current_issue_number = str(result.group(1))
|
||||
current_issue_link = 'http://sportsillustrated.cnn.com/vault/cover/toc/' + current_issue_number + '/index.htm'
|
||||
self.log('Checking this link for a TOC: ', current_issue_link)
|
||||
|
||||
index = self.index_to_soup(current_issue_link)
|
||||
if index:
|
||||
if index.find('div', 'siv_noArticleMessage'):
|
||||
nav = index.find('div', attrs = {'class': 'siv_trav_top'})
|
||||
if nav:
|
||||
# Their frontpage points to an issue without any articles; Use the previous issue
|
||||
img = nav.find('img', attrs = {'src': 'http://i.cdn.turner.com/sivault/.element/img/1.0/btn_previous_v2.jpg'})
|
||||
if img:
|
||||
parent = img.parent
|
||||
if parent.name == 'a':
|
||||
href = self.INDEX + parent['href']
|
||||
index = self.index_to_soup(href)
|
||||
self.log('\tLooking for current issue in: ' + href)
|
||||
|
||||
self.log('No TOC for this one. Skipping...')
|
||||
else:
|
||||
self.log('Found a TOC... Using this link')
|
||||
break
|
||||
|
||||
# Find all articles.
|
||||
list = index.find('div', attrs = {'class' : 'siv_artList'})
|
||||
if list:
|
||||
self.log ('found siv_artList')
|
||||
articles = []
|
||||
# Get all the artcles ready for calibre.
|
||||
counter = 0
|
||||
for headline in list.findAll('div', attrs = {'class' : 'headline'}):
|
||||
counter = counter + 1
|
||||
title = self.tag_to_string(headline.a) + '\n' + self.tag_to_string(headline.findNextSibling('div', attrs = {'class' : 'info'}))
|
||||
url = self.INDEX + headline.a['href']
|
||||
description = self.tag_to_string(headline.findNextSibling('a').div)
|
||||
article = {'title' : title, 'date' : u'', 'url' : url, 'description' : description}
|
||||
|
||||
articles.append(article)
|
||||
if counter > 5:
|
||||
break
|
||||
|
||||
# See if we can find a meaningfull title
|
||||
feedTitle = 'Current Issue'
|
||||
@ -82,6 +70,7 @@ class SportsIllustratedRecipe(BasicNewsRecipe) :
|
||||
# This is the url and the parameters that work to get the print version.
|
||||
printUrl = 'http://si.printthis.clickability.com/pt/printThis?clickMap=printThis'
|
||||
printUrl += '&fb=Y&partnerID=2356&url=' + quote(url)
|
||||
self.log('PrintURL: ' , printUrl)
|
||||
|
||||
return printUrl
|
||||
|
||||
@ -116,4 +105,3 @@ class SportsIllustratedRecipe(BasicNewsRecipe) :
|
||||
|
||||
return homeMadeSoup
|
||||
'''
|
||||
|
||||
|
@ -3,7 +3,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import uuid, sys, os, re, logging, time, \
|
||||
import uuid, sys, os, re, logging, time, random, \
|
||||
__builtin__, warnings, multiprocessing
|
||||
from urllib import getproxies
|
||||
__builtin__.__dict__['dynamic_property'] = lambda(func): func(None)
|
||||
@ -268,6 +268,17 @@ def get_parsed_proxy(typ='http', debug=True):
|
||||
prints('Using http proxy', str(ans))
|
||||
return ans
|
||||
|
||||
def random_user_agent():
|
||||
choices = [
|
||||
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11'
|
||||
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)'
|
||||
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)'
|
||||
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'
|
||||
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/0.2.153.1 Safari/525.19'
|
||||
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11'
|
||||
]
|
||||
return choices[random.randint(0, len(choices)-1)]
|
||||
|
||||
|
||||
def browser(honor_time=True, max_time=2, mobile_browser=False, user_agent=None):
|
||||
'''
|
||||
|
@ -1031,7 +1031,8 @@ plugins += [LookAndFeel, Behavior, Columns, Toolbar, Search, InputOptions,
|
||||
|
||||
# New metadata download plugins {{{
|
||||
from calibre.ebooks.metadata.sources.google import GoogleBooks
|
||||
from calibre.ebooks.metadata.sources.amazon import Amazon
|
||||
|
||||
plugins += [GoogleBooks]
|
||||
plugins += [GoogleBooks, Amazon]
|
||||
|
||||
# }}}
|
||||
|
@ -47,7 +47,7 @@ def get_connected_device():
|
||||
|
||||
for d in connected_devices:
|
||||
try:
|
||||
d.open()
|
||||
d.open(None)
|
||||
except:
|
||||
continue
|
||||
else:
|
||||
@ -121,7 +121,7 @@ def debug(ioreg_to_tmp=False, buf=None):
|
||||
out('Trying to open', dev.name, '...', end=' ')
|
||||
try:
|
||||
dev.reset(detected_device=det)
|
||||
dev.open()
|
||||
dev.open(None)
|
||||
out('OK')
|
||||
except:
|
||||
import traceback
|
||||
|
@ -48,6 +48,7 @@ class ANDROID(USBMS):
|
||||
0x04e8 : { 0x681d : [0x0222, 0x0223, 0x0224, 0x0400],
|
||||
0x681c : [0x0222, 0x0224, 0x0400],
|
||||
0x6640 : [0x0100],
|
||||
0x6877 : [0x0400],
|
||||
},
|
||||
|
||||
# Acer
|
||||
@ -97,7 +98,7 @@ class ANDROID(USBMS):
|
||||
'SCH-I500_CARD', 'SPH-D700_CARD', 'MB810', 'GT-P1000', 'DESIRE',
|
||||
'SGH-T849', '_MB300', 'A70S', 'S_ANDROID', 'A101IT', 'A70H',
|
||||
'IDEOS_TABLET', 'MYTOUCH_4G', 'UMS_COMPOSITE', 'SCH-I800_CARD',
|
||||
'7', 'A956']
|
||||
'7', 'A956', 'A955']
|
||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||
'A70S', 'A101IT', '7']
|
||||
|
@ -115,6 +115,8 @@ class KOBO(USBMS):
|
||||
playlist_map[lpath]= "Im_Reading"
|
||||
elif readstatus == 2:
|
||||
playlist_map[lpath]= "Read"
|
||||
elif readstatus == 3:
|
||||
playlist_map[lpath]= "Closed"
|
||||
|
||||
path = self.normalize_path(path)
|
||||
# print "Normalized FileName: " + path
|
||||
@ -599,11 +601,47 @@ class KOBO(USBMS):
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=2,FirstTimeReading=\'true\' where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Rinished')
|
||||
debug_print('Database Exception: Unable set book as Finished')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set ReadStatus as Finished')
|
||||
if category == 'Closed':
|
||||
# Reset Im_Reading list in the database
|
||||
if oncard == 'carda':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 3 and ContentID like \'file:///mnt/sd/%\''
|
||||
elif oncard != 'carda' and oncard != 'cardb':
|
||||
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 3 and ContentID not like \'file:///mnt/sd/%\''
|
||||
|
||||
try:
|
||||
cursor.execute (query)
|
||||
except:
|
||||
debug_print('Database Exception: Unable to reset Closed list')
|
||||
raise
|
||||
else:
|
||||
# debug_print('Commit: Reset Closed list')
|
||||
connection.commit()
|
||||
|
||||
for book in books:
|
||||
# debug_print('Title:', book.title, 'lpath:', book.path)
|
||||
book.device_collections = ['Closed']
|
||||
|
||||
extension = os.path.splitext(book.path)[1]
|
||||
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
|
||||
|
||||
ContentID = self.contentid_from_path(book.path, ContentType)
|
||||
# datelastread = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
|
||||
|
||||
t = (ContentID,)
|
||||
|
||||
try:
|
||||
cursor.execute('update content set ReadStatus=3,FirstTimeReading=\'true\' where BookID is Null and ContentID = ?', t)
|
||||
except:
|
||||
debug_print('Database Exception: Unable set book as Closed')
|
||||
raise
|
||||
else:
|
||||
connection.commit()
|
||||
# debug_print('Database: Commit set ReadStatus as Closed')
|
||||
else: # No collections
|
||||
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
|
||||
print "Reseting ReadStatus to 0"
|
||||
|
@ -221,7 +221,8 @@ class PRS505(USBMS):
|
||||
os.path.splitext(os.path.basename(p))[0],
|
||||
book, p)
|
||||
except:
|
||||
debug_print('FAILED to upload cover', p)
|
||||
debug_print('FAILED to upload cover',
|
||||
prefix, book.lpath)
|
||||
else:
|
||||
debug_print('PRS505: NOT uploading covers in sync_booklists')
|
||||
|
||||
|
@ -10,7 +10,7 @@ driver. It is intended to be subclassed with the relevant parts implemented
|
||||
for a particular device.
|
||||
'''
|
||||
|
||||
import os, re, time, json, uuid
|
||||
import os, re, time, json, uuid, functools
|
||||
from itertools import cycle
|
||||
|
||||
from calibre.constants import numeric_version
|
||||
@ -372,15 +372,21 @@ class USBMS(CLI, Device):
|
||||
|
||||
@classmethod
|
||||
def build_template_regexp(cls):
|
||||
def replfunc(match):
|
||||
if match.group(1) in ['title', 'series', 'series_index', 'isbn']:
|
||||
return '(?P<' + match.group(1) + '>.+?)'
|
||||
elif match.group(1) in ['authors', 'author_sort']:
|
||||
def replfunc(match, seen=None):
|
||||
v = match.group(1)
|
||||
if v in ['title', 'series', 'series_index', 'isbn']:
|
||||
if v not in seen:
|
||||
seen |= set([v])
|
||||
return '(?P<' + v + '>.+?)'
|
||||
elif v in ['authors', 'author_sort']:
|
||||
if v not in seen:
|
||||
seen |= set([v])
|
||||
return '(?P<author>.+?)'
|
||||
else:
|
||||
return '(.+?)'
|
||||
s = set()
|
||||
f = functools.partial(replfunc, seen=s)
|
||||
template = cls.save_template().rpartition('/')[2]
|
||||
return re.compile(re.sub('{([^}]*)}', replfunc, template) + '([_\d]*$)')
|
||||
return re.compile(re.sub('{([^}]*)}', f, template) + '([_\d]*$)')
|
||||
|
||||
@classmethod
|
||||
def path_to_unicode(cls, path):
|
||||
|
@ -154,17 +154,16 @@ def get_metadata(br, asin, mi):
|
||||
return False
|
||||
if root.xpath('//*[@id="errorMessage"]'):
|
||||
return False
|
||||
ratings = root.xpath('//form[@id="handleBuy"]/descendant::*[@class="asinReviewsSummary"]')
|
||||
if ratings:
|
||||
|
||||
ratings = root.xpath('//div[@class="jumpBar"]/descendant::span[@class="asinReviewsSummary"]')
|
||||
pat = re.compile(r'([0-9.]+) out of (\d+) stars')
|
||||
r = ratings[0]
|
||||
for elem in r.xpath('descendant::*[@title]'):
|
||||
t = elem.get('title')
|
||||
if ratings:
|
||||
for elem in ratings[0].xpath('descendant::*[@title]'):
|
||||
t = elem.get('title').strip()
|
||||
m = pat.match(t)
|
||||
if m is not None:
|
||||
try:
|
||||
mi.rating = float(m.group(1))/float(m.group(2)) * 5
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
@ -216,6 +215,7 @@ def main(args=sys.argv):
|
||||
print 'Failed to downlaod social metadata for', title
|
||||
return 1
|
||||
#print '\n\n', time.time() - st, '\n\n'
|
||||
print mi
|
||||
print '\n'
|
||||
|
||||
return 0
|
||||
|
@ -127,6 +127,8 @@ class Metadata(object):
|
||||
field, val = self._clean_identifier(field, val)
|
||||
_data['identifiers'].update({field: val})
|
||||
elif field == 'identifiers':
|
||||
if not val:
|
||||
val = copy.copy(NULL_VALUES.get('identifiers', None))
|
||||
self.set_identifiers(val)
|
||||
elif field in STANDARD_METADATA_FIELDS:
|
||||
if val is None:
|
||||
@ -169,10 +171,13 @@ class Metadata(object):
|
||||
pass
|
||||
return default
|
||||
|
||||
def get_extra(self, field):
|
||||
def get_extra(self, field, default=None):
|
||||
_data = object.__getattribute__(self, '_data')
|
||||
if field in _data['user_metadata'].iterkeys():
|
||||
try:
|
||||
return _data['user_metadata'][field]['#extra#']
|
||||
except:
|
||||
return default
|
||||
raise AttributeError(
|
||||
'Metadata object has no attribute named: '+ repr(field))
|
||||
|
||||
@ -646,6 +651,9 @@ class Metadata(object):
|
||||
fmt('Published', isoformat(self.pubdate))
|
||||
if self.rights is not None:
|
||||
fmt('Rights', unicode(self.rights))
|
||||
if self.identifiers:
|
||||
fmt('Identifiers', u', '.join(['%s:%s'%(k, v) for k, v in
|
||||
self.identifiers.iteritems()]))
|
||||
for key in self.custom_field_keys():
|
||||
val = self.get(key, None)
|
||||
if val:
|
||||
|
@ -1251,6 +1251,7 @@ def metadata_to_opf(mi, as_string=True):
|
||||
from lxml import etree
|
||||
import textwrap
|
||||
from calibre.ebooks.oeb.base import OPF, DC
|
||||
from calibre.utils.cleantext import clean_ascii_chars
|
||||
|
||||
if not mi.application_id:
|
||||
mi.application_id = str(uuid.uuid4())
|
||||
@ -1306,7 +1307,7 @@ def metadata_to_opf(mi, as_string=True):
|
||||
if hasattr(mi, 'category') and mi.category:
|
||||
factory(DC('type'), mi.category)
|
||||
if mi.comments:
|
||||
factory(DC('description'), mi.comments)
|
||||
factory(DC('description'), clean_ascii_chars(mi.comments))
|
||||
if mi.publisher:
|
||||
factory(DC('publisher'), mi.publisher)
|
||||
for key, val in mi.get_identifiers().iteritems():
|
||||
|
@ -7,16 +7,347 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import socket, time, re
|
||||
from urllib import urlencode
|
||||
from threading import Thread
|
||||
|
||||
from lxml.html import soupparser, tostring
|
||||
|
||||
from calibre import as_unicode
|
||||
from calibre.ebooks.metadata import check_isbn
|
||||
from calibre.ebooks.metadata.sources.base import Source
|
||||
from calibre.utils.cleantext import clean_ascii_chars
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.library.comments import sanitize_comments_html
|
||||
|
||||
class Worker(Thread):
|
||||
|
||||
'''
|
||||
Get book details from amazons book page in a separate thread
|
||||
'''
|
||||
|
||||
def __init__(self, url, result_queue, browser, log, timeout=20):
|
||||
self.url, self.result_queue = url, result_queue
|
||||
self.log, self.timeout = log, timeout
|
||||
self.browser = browser.clone_browser()
|
||||
self.cover_url = self.amazon_id = None
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.get_details()
|
||||
except:
|
||||
self.log.error('get_details failed for url: %r'%self.url)
|
||||
|
||||
def get_details(self):
|
||||
try:
|
||||
raw = self.browser.open_novisit(self.url, timeout=self.timeout).read().strip()
|
||||
except Exception, e:
|
||||
if callable(getattr(e, 'getcode', None)) and \
|
||||
e.getcode() == 404:
|
||||
self.log.error('URL malformed: %r'%self.url)
|
||||
return
|
||||
attr = getattr(e, 'args', [None])
|
||||
attr = attr if attr else [None]
|
||||
if isinstance(attr[0], socket.timeout):
|
||||
msg = 'Amazon timed out. Try again later.'
|
||||
self.log.error(msg)
|
||||
else:
|
||||
msg = 'Failed to make details query: %r'%self.url
|
||||
self.log.exception(msg)
|
||||
return
|
||||
|
||||
raw = xml_to_unicode(raw, strip_encoding_pats=True,
|
||||
resolve_entities=True)[0]
|
||||
|
||||
if '<title>404 - ' in raw:
|
||||
self.log.error('URL malformed: %r'%self.url)
|
||||
return
|
||||
|
||||
try:
|
||||
root = soupparser.fromstring(clean_ascii_chars(raw))
|
||||
except:
|
||||
msg = 'Failed to parse amazon details page: %r'%self.url
|
||||
self.log.exception(msg)
|
||||
return
|
||||
|
||||
errmsg = root.xpath('//*[@id="errorMessage"]')
|
||||
if errmsg:
|
||||
msg = 'Failed to parse amazon details page: %r'%self.url
|
||||
msg += tostring(errmsg, method='text', encoding=unicode).strip()
|
||||
self.log.error(msg)
|
||||
return
|
||||
|
||||
self.parse_details(root)
|
||||
|
||||
def parse_details(self, root):
|
||||
try:
|
||||
asin = self.parse_asin(root)
|
||||
except:
|
||||
self.log.exception('Error parsing asin for url: %r'%self.url)
|
||||
asin = None
|
||||
|
||||
try:
|
||||
title = self.parse_title(root)
|
||||
except:
|
||||
self.log.exception('Error parsing title for url: %r'%self.url)
|
||||
title = None
|
||||
|
||||
try:
|
||||
authors = self.parse_authors(root)
|
||||
except:
|
||||
self.log.exception('Error parsing authors for url: %r'%self.url)
|
||||
authors = []
|
||||
|
||||
|
||||
if not title or not authors or not asin:
|
||||
self.log.error('Could not find title/authors/asin for %r'%self.url)
|
||||
self.log.error('ASIN: %r Title: %r Authors: %r'%(asin, title,
|
||||
authors))
|
||||
return
|
||||
|
||||
mi = Metadata(title, authors)
|
||||
mi.set_identifier('amazon', asin)
|
||||
self.amazon_id = asin
|
||||
|
||||
try:
|
||||
mi.rating = self.parse_ratings(root)
|
||||
except:
|
||||
self.log.exception('Error parsing ratings for url: %r'%self.url)
|
||||
|
||||
try:
|
||||
mi.comments = self.parse_comments(root)
|
||||
except:
|
||||
self.log.exception('Error parsing comments for url: %r'%self.url)
|
||||
|
||||
try:
|
||||
self.cover_url = self.parse_cover(root)
|
||||
except:
|
||||
self.log.exception('Error parsing cover for url: %r'%self.url)
|
||||
|
||||
self.result_queue.put(mi)
|
||||
|
||||
def parse_asin(self, root):
|
||||
link = root.xpath('//link[@rel="canonical" and @href]')
|
||||
for l in link:
|
||||
return l.get('href').rpartition('/')[-1]
|
||||
|
||||
def parse_title(self, root):
|
||||
tdiv = root.xpath('//h1[@class="parseasinTitle"]')[0]
|
||||
actual_title = tdiv.xpath('descendant::*[@id="btAsinTitle"]')
|
||||
if actual_title:
|
||||
title = tostring(actual_title[0], encoding=unicode,
|
||||
method='text').strip()
|
||||
else:
|
||||
title = tostring(tdiv, encoding=unicode, method='text').strip()
|
||||
return re.sub(r'[([].*[)]]', '', title).strip()
|
||||
|
||||
def parse_authors(self, root):
|
||||
bdiv = root.xpath('//div[@class="buying"]')[0]
|
||||
aname = bdiv.xpath('descendant::span[@class="contributorNameTrigger"]')
|
||||
authors = [tostring(x, encoding=unicode, method='text').strip() for x
|
||||
in aname]
|
||||
return authors
|
||||
|
||||
def parse_ratings(self, root):
|
||||
ratings = root.xpath('//form[@id="handleBuy"]/descendant::*[@class="asinReviewsSummary"]')
|
||||
pat = re.compile(r'([0-9.]+) out of (\d+) stars')
|
||||
if ratings:
|
||||
for elem in ratings[0].xpath('descendant::*[@title]'):
|
||||
t = elem.get('title')
|
||||
m = pat.match(t)
|
||||
if m is not None:
|
||||
try:
|
||||
return float(m.group(1))/float(m.group(2)) * 5
|
||||
except:
|
||||
pass
|
||||
|
||||
def parse_comments(self, root):
|
||||
desc = root.xpath('//div[@id="productDescription"]/*[@class="content"]')
|
||||
if desc:
|
||||
desc = desc[0]
|
||||
for c in desc.xpath('descendant::*[@class="seeAll" or'
|
||||
' @class="emptyClear" or @href]'):
|
||||
c.getparent().remove(c)
|
||||
desc = tostring(desc, method='html', encoding=unicode).strip()
|
||||
# remove all attributes from tags
|
||||
desc = re.sub(r'<([a-zA-Z0-9]+)\s[^>]+>', r'<\1>', desc)
|
||||
# Collapse whitespace
|
||||
#desc = re.sub('\n+', '\n', desc)
|
||||
#desc = re.sub(' +', ' ', desc)
|
||||
# Remove the notice about text referring to out of print editions
|
||||
desc = re.sub(r'(?s)<em>--This text ref.*?</em>', '', desc)
|
||||
# Remove comments
|
||||
desc = re.sub(r'(?s)<!--.*?-->', '', desc)
|
||||
return sanitize_comments_html(desc)
|
||||
|
||||
def parse_cover(self, root):
|
||||
imgs = root.xpath('//img[@id="prodImage" and @src]')
|
||||
if imgs:
|
||||
src = imgs[0].get('src')
|
||||
parts = src.split('/')
|
||||
if len(parts) > 3:
|
||||
bn = parts[-1]
|
||||
sparts = bn.split('_')
|
||||
if len(sparts) > 2:
|
||||
bn = sparts[0] + sparts[-1]
|
||||
return ('/'.join(parts[:-1]))+'/'+bn
|
||||
|
||||
|
||||
class Amazon(Source):
|
||||
|
||||
name = 'Amazon'
|
||||
description = _('Downloads metadata from Amazon')
|
||||
|
||||
capabilities = frozenset(['identify', 'cover'])
|
||||
touched_fields = frozenset(['title', 'authors', 'isbn', 'pubdate',
|
||||
'comments', 'cover_data'])
|
||||
capabilities = frozenset(['identify'])
|
||||
touched_fields = frozenset(['title', 'authors', 'isbn', 'pubdate', 'comments'])
|
||||
|
||||
AMAZON_DOMAINS = {
|
||||
'com': _('US'),
|
||||
'fr' : _('France'),
|
||||
'de' : _('Germany'),
|
||||
}
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={}):
|
||||
domain = self.prefs.get('domain', 'com')
|
||||
|
||||
# See the amazon detailed search page to get all options
|
||||
q = { 'search-alias' : 'aps',
|
||||
'unfiltered' : '1',
|
||||
}
|
||||
|
||||
if domain == 'com':
|
||||
q['sort'] = 'relevanceexprank'
|
||||
else:
|
||||
q['sort'] = 'relevancerank'
|
||||
|
||||
asin = identifiers.get('amazon', None)
|
||||
isbn = check_isbn(identifiers.get('isbn', None))
|
||||
|
||||
if asin is not None:
|
||||
q['field-keywords'] = asin
|
||||
elif isbn is not None:
|
||||
q['field-isbn'] = isbn
|
||||
else:
|
||||
# Only return book results
|
||||
q['search-alias'] = 'stripbooks'
|
||||
if title:
|
||||
title_tokens = list(self.get_title_tokens(title))
|
||||
if title_tokens:
|
||||
q['field-title'] = ' '.join(title_tokens)
|
||||
if authors:
|
||||
author_tokens = self.get_author_tokens(authors,
|
||||
only_first_author=True)
|
||||
if author_tokens:
|
||||
q['field-author'] = ' '.join(author_tokens)
|
||||
|
||||
if not ('field-keywords' in q or 'field-isbn' in q or
|
||||
('field-title' in q and 'field-author' in q)):
|
||||
# Insufficient metadata to make an identify query
|
||||
return None
|
||||
|
||||
utf8q = dict([(x.encode('utf-8'), y.encode('utf-8')) for x, y in
|
||||
q.iteritems()])
|
||||
url = 'http://www.amazon.%s/s/?'%domain + urlencode(utf8q)
|
||||
return url
|
||||
|
||||
|
||||
def identify(self, log, result_queue, abort, title=None, authors=None,
|
||||
identifiers={}, timeout=20):
|
||||
query = self.create_query(log, title=title, authors=authors,
|
||||
identifiers=identifiers)
|
||||
if query is None:
|
||||
log.error('Insufficient metadata to construct query')
|
||||
return
|
||||
br = self.browser
|
||||
try:
|
||||
raw = br.open_novisit(query, timeout=timeout).read().strip()
|
||||
except Exception, e:
|
||||
if callable(getattr(e, 'getcode', None)) and \
|
||||
e.getcode() == 404:
|
||||
log.error('Query malformed: %r'%query)
|
||||
return
|
||||
attr = getattr(e, 'args', [None])
|
||||
attr = attr if attr else [None]
|
||||
if isinstance(attr[0], socket.timeout):
|
||||
msg = _('Amazon timed out. Try again later.')
|
||||
log.error(msg)
|
||||
else:
|
||||
msg = 'Failed to make identify query: %r'%query
|
||||
log.exception(msg)
|
||||
return as_unicode(msg)
|
||||
|
||||
|
||||
raw = xml_to_unicode(raw, strip_encoding_pats=True,
|
||||
resolve_entities=True)[0]
|
||||
|
||||
if '<title>404 - ' in raw:
|
||||
log.error('No matches found for query: %r'%query)
|
||||
return
|
||||
|
||||
try:
|
||||
root = soupparser.fromstring(clean_ascii_chars(raw))
|
||||
except:
|
||||
msg = 'Failed to parse amazon page for query: %r'%query
|
||||
log.exception(msg)
|
||||
return msg
|
||||
|
||||
errmsg = root.xpath('//*[@id="errorMessage"]')
|
||||
if errmsg:
|
||||
msg = tostring(errmsg, method='text', encoding=unicode).strip()
|
||||
log.error(msg)
|
||||
# The error is almost always a not found error
|
||||
return
|
||||
|
||||
matches = []
|
||||
for div in root.xpath(r'//div[starts-with(@id, "result_")]'):
|
||||
for a in div.xpath(r'descendant::a[@class="title" and @href]'):
|
||||
title = tostring(a, method='text', encoding=unicode).lower()
|
||||
if 'bulk pack' not in title:
|
||||
matches.append(a.get('href'))
|
||||
break
|
||||
|
||||
# Keep only the top 5 matches as the matches are sorted by relevance by
|
||||
# Amazon so lower matches are not likely to be very relevant
|
||||
matches = matches[:5]
|
||||
|
||||
if not matches:
|
||||
log.error('No matches found with query: %r'%query)
|
||||
return
|
||||
|
||||
workers = [Worker(url, result_queue, br, log) for url in matches]
|
||||
|
||||
for w in workers:
|
||||
w.start()
|
||||
# Don't send all requests at the same time
|
||||
time.sleep(0.1)
|
||||
|
||||
while not abort.is_set():
|
||||
a_worker_is_alive = False
|
||||
for w in workers:
|
||||
w.join(0.2)
|
||||
if abort.is_set():
|
||||
break
|
||||
if w.is_alive():
|
||||
a_worker_is_alive = True
|
||||
if not a_worker_is_alive:
|
||||
break
|
||||
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# To run these test use: calibre-debug -e
|
||||
# src/calibre/ebooks/metadata/sources/amazon.py
|
||||
from calibre.ebooks.metadata.sources.test import (test_identify_plugin,
|
||||
title_test)
|
||||
test_identify_plugin(Amazon.name,
|
||||
[
|
||||
|
||||
(
|
||||
{'identifiers':{'isbn': '0743273567'}},
|
||||
[title_test('The great gatsby', exact=True)]
|
||||
),
|
||||
])
|
||||
|
||||
|
||||
|
@ -9,8 +9,12 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
import re, threading
|
||||
|
||||
from calibre import browser, random_user_agent
|
||||
from calibre.customize import Plugin
|
||||
from calibre.utils.logging import ThreadSafeLog, FileStream
|
||||
from calibre.utils.config import JSONConfig
|
||||
|
||||
msprefs = JSONConfig('metadata_sources.json')
|
||||
|
||||
def create_log(ostream=None):
|
||||
log = ThreadSafeLog(level=ThreadSafeLog.DEBUG)
|
||||
@ -24,8 +28,6 @@ class Source(Plugin):
|
||||
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
|
||||
result_of_identify_is_complete = True
|
||||
|
||||
capabilities = frozenset()
|
||||
|
||||
touched_fields = frozenset()
|
||||
@ -34,6 +36,27 @@ class Source(Plugin):
|
||||
Plugin.__init__(self, *args, **kwargs)
|
||||
self._isbn_to_identifier_cache = {}
|
||||
self.cache_lock = threading.RLock()
|
||||
self._config_obj = None
|
||||
self._browser = None
|
||||
|
||||
# Configuration {{{
|
||||
|
||||
@property
|
||||
def prefs(self):
|
||||
if self._config_obj is None:
|
||||
self._config_obj = JSONConfig('metadata_sources/%s.json'%self.name)
|
||||
return self._config_obj
|
||||
# }}}
|
||||
|
||||
# Browser {{{
|
||||
|
||||
@property
|
||||
def browser(self):
|
||||
if self._browser is None:
|
||||
self._browser = browser(user_agent=random_user_agent())
|
||||
return self._browser
|
||||
|
||||
# }}}
|
||||
|
||||
# Utility functions {{{
|
||||
|
||||
|
@ -19,7 +19,7 @@ from calibre.ebooks.metadata.book.base import Metadata
|
||||
from calibre.ebooks.chardet import xml_to_unicode
|
||||
from calibre.utils.date import parse_date, utcnow
|
||||
from calibre.utils.cleantext import clean_ascii_chars
|
||||
from calibre import browser, as_unicode
|
||||
from calibre import as_unicode
|
||||
|
||||
NAMESPACES = {
|
||||
'openSearch':'http://a9.com/-/spec/opensearchrss/1.0/',
|
||||
@ -150,7 +150,7 @@ class GoogleBooks(Source):
|
||||
|
||||
def create_query(self, log, title=None, authors=None, identifiers={}):
|
||||
BASE_URL = 'http://books.google.com/books/feeds/volumes?'
|
||||
isbn = identifiers.get('isbn', None)
|
||||
isbn = check_isbn(identifiers.get('isbn', None))
|
||||
q = ''
|
||||
if isbn is not None:
|
||||
q += 'isbn:'+isbn
|
||||
@ -209,10 +209,10 @@ class GoogleBooks(Source):
|
||||
break
|
||||
|
||||
def identify(self, log, result_queue, abort, title=None, authors=None,
|
||||
identifiers={}, timeout=5):
|
||||
identifiers={}, timeout=20):
|
||||
query = self.create_query(log, title=title, authors=authors,
|
||||
identifiers=identifiers)
|
||||
br = browser()
|
||||
br = self.browser()
|
||||
try:
|
||||
raw = br.open_novisit(query, timeout=timeout).read()
|
||||
except Exception, e:
|
||||
|
@ -81,6 +81,7 @@ class DetectStructure(object):
|
||||
page_break_after = 'display: block; page-break-after: always'
|
||||
for item, elem in self.detected_chapters:
|
||||
text = xml2text(elem).strip()
|
||||
text = re.sub(r'\s+', ' ', text.strip())
|
||||
self.log('\tDetected chapter:', text[:50])
|
||||
if chapter_mark == 'none':
|
||||
continue
|
||||
@ -137,7 +138,8 @@ class DetectStructure(object):
|
||||
text = elem.get('title', '')
|
||||
if not text:
|
||||
text = elem.get('alt', '')
|
||||
text = text[:100].strip()
|
||||
text = re.sub(r'\s+', ' ', text.strip())
|
||||
text = text[:1000].strip()
|
||||
id = elem.get('id', 'calibre_toc_%d'%counter)
|
||||
elem.set('id', id)
|
||||
href = '#'.join((item.href, id))
|
||||
|
@ -53,7 +53,7 @@ if pictureflow is not None:
|
||||
def __init__(self, model, buffer=20):
|
||||
pictureflow.FlowImages.__init__(self)
|
||||
self.model = model
|
||||
self.model.modelReset.connect(self.reset)
|
||||
self.model.modelReset.connect(self.reset, type=Qt.QueuedConnection)
|
||||
|
||||
def count(self):
|
||||
return self.model.count()
|
||||
@ -83,6 +83,8 @@ if pictureflow is not None:
|
||||
|
||||
class CoverFlow(pictureflow.PictureFlow):
|
||||
|
||||
dc_signal = pyqtSignal()
|
||||
|
||||
def __init__(self, parent=None):
|
||||
pictureflow.PictureFlow.__init__(self, parent,
|
||||
config['cover_flow_queue_length']+1)
|
||||
@ -90,6 +92,8 @@ if pictureflow is not None:
|
||||
self.setFocusPolicy(Qt.WheelFocus)
|
||||
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,
|
||||
QSizePolicy.Expanding))
|
||||
self.dc_signal.connect(self._data_changed,
|
||||
type=Qt.QueuedConnection)
|
||||
|
||||
def sizeHint(self):
|
||||
return self.minimumSize()
|
||||
@ -101,6 +105,12 @@ if pictureflow is not None:
|
||||
elif ev.delta() > 0:
|
||||
self.showPrevious()
|
||||
|
||||
def dataChanged(self):
|
||||
self.dc_signal.emit()
|
||||
|
||||
def _data_changed(self):
|
||||
pictureflow.PictureFlow.dataChanged(self)
|
||||
|
||||
|
||||
else:
|
||||
CoverFlow = None
|
||||
@ -135,8 +145,7 @@ class CoverFlowMixin(object):
|
||||
self.cover_flow = None
|
||||
if CoverFlow is not None:
|
||||
self.cf_last_updated_at = None
|
||||
self.cover_flow_sync_timer = QTimer(self)
|
||||
self.cover_flow_sync_timer.timeout.connect(self.cover_flow_do_sync)
|
||||
self.cover_flow_syncing_enabled = False
|
||||
self.cover_flow_sync_flag = True
|
||||
self.cover_flow = CoverFlow(parent=self)
|
||||
self.cover_flow.currentChanged.connect(self.sync_listview_to_cf)
|
||||
@ -179,14 +188,15 @@ class CoverFlowMixin(object):
|
||||
self.cover_flow.setFocus(Qt.OtherFocusReason)
|
||||
if CoverFlow is not None:
|
||||
self.cover_flow.setCurrentSlide(self.library_view.currentIndex().row())
|
||||
self.cover_flow_sync_timer.start(500)
|
||||
self.cover_flow_syncing_enabled = True
|
||||
QTimer.singleShot(500, self.cover_flow_do_sync)
|
||||
self.library_view.setCurrentIndex(
|
||||
self.library_view.currentIndex())
|
||||
self.library_view.scroll_to_row(self.library_view.currentIndex().row())
|
||||
|
||||
def cover_browser_hidden(self):
|
||||
if CoverFlow is not None:
|
||||
self.cover_flow_sync_timer.stop()
|
||||
self.cover_flow_syncing_enabled = False
|
||||
idx = self.library_view.model().index(self.cover_flow.currentSlide(), 0)
|
||||
if idx.isValid():
|
||||
sm = self.library_view.selectionModel()
|
||||
@ -242,6 +252,8 @@ class CoverFlowMixin(object):
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
if self.cover_flow_syncing_enabled:
|
||||
QTimer.singleShot(500, self.cover_flow_do_sync)
|
||||
|
||||
def sync_listview_to_cf(self, row):
|
||||
self.cf_last_updated_at = time.time()
|
||||
|
@ -202,13 +202,19 @@ class CheckLibraryDialog(QDialog):
|
||||
<p><i>Delete marked</i> is used to remove extra files/folders/covers that
|
||||
have no entries in the database. Check the box next to the item you want
|
||||
to delete. Use with caution.</p>
|
||||
<p><i>Fix marked</i> is applicable only to covers (the two lines marked
|
||||
'fixable'). In the case of missing cover files, checking the fixable
|
||||
box and pushing this button will remove the cover mark from the
|
||||
database for all the files in that category. In the case of extra
|
||||
cover files, checking the fixable box and pushing this button will
|
||||
add the cover mark to the database for all the files in that
|
||||
category.</p>
|
||||
|
||||
<p><i>Fix marked</i> is applicable only to covers and missing formats
|
||||
(the three lines marked 'fixable'). In the case of missing cover files,
|
||||
checking the fixable box and pushing this button will tell calibre that
|
||||
there is no cover for all of the books listed. Use this option if you
|
||||
are not going to restore the covers from a backup. In the case of extra
|
||||
cover files, checking the fixable box and pushing this button will tell
|
||||
calibre that the cover files it found are correct for all the books
|
||||
listed. Use this when you are not going to delete the file(s). In the
|
||||
case of missing formats, checking the fixable box and pushing this
|
||||
button will tell calibre that the formats are really gone. Use this if
|
||||
you are not going to restore the formats from a backup.</p>
|
||||
|
||||
'''))
|
||||
|
||||
self.log = QTreeWidget(self)
|
||||
@ -381,6 +387,19 @@ class CheckLibraryDialog(QDialog):
|
||||
unicode(it.text(1))))
|
||||
self.run_the_check()
|
||||
|
||||
def fix_missing_formats(self):
|
||||
tl = self.top_level_items['missing_formats']
|
||||
child_count = tl.childCount()
|
||||
for i in range(0, child_count):
|
||||
item = tl.child(i);
|
||||
id = item.data(0, Qt.UserRole).toInt()[0]
|
||||
all = self.db.formats(id, index_is_id=True, verify_formats=False)
|
||||
all = set([f.strip() for f in all.split(',')]) if all else set()
|
||||
valid = self.db.formats(id, index_is_id=True, verify_formats=True)
|
||||
valid = set([f.strip() for f in valid.split(',')]) if valid else set()
|
||||
for fmt in all-valid:
|
||||
self.db.remove_format(id, fmt, index_is_id=True, db_only=True)
|
||||
|
||||
def fix_missing_covers(self):
|
||||
tl = self.top_level_items['missing_covers']
|
||||
child_count = tl.childCount()
|
||||
|
@ -783,6 +783,12 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
||||
books_to_refresh = self.db.set_custom(id, val, label=dfm['label'],
|
||||
extra=extra, commit=False,
|
||||
allow_case_change=True)
|
||||
elif dest.startswith('#') and dest.endswith('_index'):
|
||||
label = self.db.field_metadata[dest[:-6]]['label']
|
||||
series = self.db.get_custom(id, label=label, index_is_id=True)
|
||||
books_to_refresh = self.db.set_custom(id, series, label=label,
|
||||
extra=val, commit=False,
|
||||
allow_case_change=True)
|
||||
else:
|
||||
if dest == 'comments':
|
||||
setter = self.db.set_comment
|
||||
|
@ -9,12 +9,13 @@ from PyQt4.QtGui import QDialog
|
||||
from calibre.gui2.dialogs.saved_search_editor_ui import Ui_SavedSearchEditor
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.gui2 import error_dialog
|
||||
from calibre.gui2.dialogs.confirm_delete import confirm
|
||||
|
||||
class SavedSearchEditor(QDialog, Ui_SavedSearchEditor):
|
||||
|
||||
def __init__(self, window, initial_search=None):
|
||||
QDialog.__init__(self, window)
|
||||
def __init__(self, parent, initial_search=None):
|
||||
QDialog.__init__(self, parent)
|
||||
Ui_SavedSearchEditor.__init__(self)
|
||||
self.setupUi(self)
|
||||
|
||||
@ -22,12 +23,13 @@ class SavedSearchEditor(QDialog, Ui_SavedSearchEditor):
|
||||
self.connect(self.search_name_box, SIGNAL('currentIndexChanged(int)'),
|
||||
self.current_index_changed)
|
||||
self.connect(self.delete_search_button, SIGNAL('clicked()'), self.del_search)
|
||||
self.rename_button.clicked.connect(self.rename_search)
|
||||
|
||||
self.current_search_name = None
|
||||
self.searches = {}
|
||||
self.searches_to_delete = []
|
||||
for name in saved_searches().names():
|
||||
self.searches[name] = saved_searches().lookup(name)
|
||||
self.search_names = set([icu_lower(n) for n in saved_searches().names()])
|
||||
|
||||
self.populate_search_list()
|
||||
if initial_search is not None and initial_search in self.searches:
|
||||
@ -42,6 +44,11 @@ class SavedSearchEditor(QDialog, Ui_SavedSearchEditor):
|
||||
search_name = unicode(self.input_box.text()).strip()
|
||||
if search_name == '':
|
||||
return False
|
||||
if icu_lower(search_name) in self.search_names:
|
||||
error_dialog(self, _('Saved search already exists'),
|
||||
_('The saved search %s already exists, perhaps with '
|
||||
'different case')%search_name).exec_()
|
||||
return False
|
||||
if search_name not in self.searches:
|
||||
self.searches[search_name] = ''
|
||||
self.populate_search_list()
|
||||
@ -57,10 +64,25 @@ class SavedSearchEditor(QDialog, Ui_SavedSearchEditor):
|
||||
+'</p>', 'saved_search_editor_delete', self):
|
||||
return
|
||||
del self.searches[self.current_search_name]
|
||||
self.searches_to_delete.append(self.current_search_name)
|
||||
self.current_search_name = None
|
||||
self.search_name_box.removeItem(self.search_name_box.currentIndex())
|
||||
|
||||
def rename_search(self):
|
||||
new_search_name = unicode(self.input_box.text()).strip()
|
||||
if new_search_name == '':
|
||||
return False
|
||||
if icu_lower(new_search_name) in self.search_names:
|
||||
error_dialog(self, _('Saved search already exists'),
|
||||
_('The saved search %s already exists, perhaps with '
|
||||
'different case')%new_search_name).exec_()
|
||||
return False
|
||||
if self.current_search_name in self.searches:
|
||||
self.searches[new_search_name] = self.searches[self.current_search_name]
|
||||
del self.searches[self.current_search_name]
|
||||
self.populate_search_list()
|
||||
self.select_search(new_search_name)
|
||||
return True
|
||||
|
||||
def select_search(self, name):
|
||||
self.search_name_box.setCurrentIndex(self.search_name_box.findText(name))
|
||||
|
||||
@ -78,7 +100,7 @@ class SavedSearchEditor(QDialog, Ui_SavedSearchEditor):
|
||||
def accept(self):
|
||||
if self.current_search_name:
|
||||
self.searches[self.current_search_name] = unicode(self.search_text.toPlainText())
|
||||
for name in self.searches_to_delete:
|
||||
for name in saved_searches().names():
|
||||
saved_searches().delete(name)
|
||||
for name in self.searches:
|
||||
saved_searches().add(name, self.searches[name])
|
||||
|
@ -134,6 +134,20 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="0" column="6">
|
||||
<widget class="QToolButton" name="rename_button">
|
||||
<property name="toolTip">
|
||||
<string>Rename the current search to what is in the box</string>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>...</string>
|
||||
</property>
|
||||
<property name="icon">
|
||||
<iconset>
|
||||
<normaloff>:/images/edit-undo.png</normaloff>:/images/edit-undo.png</iconset>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item row="1" column="0">
|
||||
|
@ -6,7 +6,7 @@
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>767</width>
|
||||
<width>792</width>
|
||||
<height>575</height>
|
||||
</rect>
|
||||
</property>
|
||||
@ -44,7 +44,7 @@
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>469</width>
|
||||
<width>486</width>
|
||||
<height>504</height>
|
||||
</rect>
|
||||
</property>
|
||||
|
@ -1,10 +1,14 @@
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
import StringIO, traceback, sys
|
||||
|
||||
from PyQt4.Qt import QMainWindow, QString, Qt, QFont, QCoreApplication, SIGNAL,\
|
||||
QAction, QMenu, QMenuBar, QIcon, pyqtSignal
|
||||
import StringIO, traceback, sys, gc
|
||||
|
||||
from PyQt4.Qt import QMainWindow, QString, Qt, QFont, QTimer, \
|
||||
QAction, QMenu, QMenuBar, QIcon, pyqtSignal, QObject
|
||||
from calibre.gui2.dialogs.conversion_error import ConversionErrorDialog
|
||||
from calibre.utils.config import OptionParser
|
||||
from calibre.gui2 import error_dialog
|
||||
@ -16,7 +20,8 @@ Usage: %prog [options]
|
||||
Launch the Graphical User Interface
|
||||
'''):
|
||||
parser = OptionParser(usage)
|
||||
parser.add_option('--redirect-console-output', default=False, action='store_true', dest='redirect',
|
||||
# The b is required because of a regression in optparse.py in python 2.7.0
|
||||
parser.add_option(b'--redirect-console-output', default=False, action='store_true', dest='redirect',
|
||||
help=_('Redirect console output to a dialog window (both stdout and stderr). Useful on windows where GUI apps do not have a output streams.'))
|
||||
return parser
|
||||
|
||||
@ -35,6 +40,53 @@ class DebugWindow(ConversionErrorDialog):
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
class GarbageCollector(QObject):
|
||||
|
||||
'''
|
||||
Disable automatic garbage collection and instead collect manually
|
||||
every INTERVAL milliseconds.
|
||||
|
||||
This is done to ensure that garbage collection only happens in the GUI
|
||||
thread, as otherwise Qt can crash.
|
||||
'''
|
||||
|
||||
INTERVAL = 5000
|
||||
|
||||
def __init__(self, parent, debug=False):
|
||||
QObject.__init__(self, parent)
|
||||
self.debug = debug
|
||||
|
||||
self.timer = QTimer(self)
|
||||
self.timer.timeout.connect(self.check)
|
||||
|
||||
self.threshold = gc.get_threshold()
|
||||
gc.disable()
|
||||
self.timer.start(self.INTERVAL)
|
||||
#gc.set_debug(gc.DEBUG_SAVEALL)
|
||||
|
||||
def check(self):
|
||||
#return self.debug_cycles()
|
||||
l0, l1, l2 = gc.get_count()
|
||||
if self.debug:
|
||||
print ('gc_check called:', l0, l1, l2)
|
||||
if l0 > self.threshold[0]:
|
||||
num = gc.collect(0)
|
||||
if self.debug:
|
||||
print ('collecting gen 0, found:', num, 'unreachable')
|
||||
if l1 > self.threshold[1]:
|
||||
num = gc.collect(1)
|
||||
if self.debug:
|
||||
print ('collecting gen 1, found:', num, 'unreachable')
|
||||
if l2 > self.threshold[2]:
|
||||
num = gc.collect(2)
|
||||
if self.debug:
|
||||
print ('collecting gen 2, found:', num, 'unreachable')
|
||||
|
||||
def debug_cycles(self):
|
||||
gc.collect()
|
||||
for obj in gc.garbage:
|
||||
print (obj, repr(obj), type(obj))
|
||||
|
||||
class MainWindow(QMainWindow):
|
||||
|
||||
___menu_bar = None
|
||||
@ -64,19 +116,15 @@ class MainWindow(QMainWindow):
|
||||
quit_action.setMenuRole(QAction.QuitRole)
|
||||
return preferences_action, quit_action
|
||||
|
||||
def __init__(self, opts, parent=None):
|
||||
def __init__(self, opts, parent=None, disable_automatic_gc=False):
|
||||
QMainWindow.__init__(self, parent)
|
||||
app = QCoreApplication.instance()
|
||||
if app is not None:
|
||||
self.connect(app, SIGNAL('unixSignal(int)'), self.unix_signal)
|
||||
if disable_automatic_gc:
|
||||
self._gc = GarbageCollector(self, debug=False)
|
||||
if getattr(opts, 'redirect', False):
|
||||
self.__console_redirect = DebugWindow(self)
|
||||
sys.stdout = sys.stderr = self.__console_redirect
|
||||
self.__console_redirect.show()
|
||||
|
||||
def unix_signal(self, signal):
|
||||
print 'Received signal:', repr(signal)
|
||||
|
||||
def unhandled_exception(self, type, value, tb):
|
||||
if type == KeyboardInterrupt:
|
||||
self.keyboard_interrupt.emit()
|
||||
|
@ -439,7 +439,8 @@ void PictureFlowPrivate::setImages(FlowImages *images)
|
||||
QObject::disconnect(slideImages, SIGNAL(dataChanged()), widget, SLOT(dataChanged()));
|
||||
slideImages = images;
|
||||
dataChanged();
|
||||
QObject::connect(slideImages, SIGNAL(dataChanged()), widget, SLOT(dataChanged()));
|
||||
QObject::connect(slideImages, SIGNAL(dataChanged()), widget, SLOT(dataChanged()),
|
||||
Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
int PictureFlowPrivate::slideCount() const
|
||||
|
@ -67,6 +67,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
if db.field_metadata[k]['is_category'] and
|
||||
db.field_metadata[k]['datatype'] in ['text', 'series', 'enumeration']])
|
||||
choices -= set(['authors', 'publisher', 'formats', 'news', 'identifiers'])
|
||||
choices |= set(['search'])
|
||||
self.opt_categories_using_hierarchy.update_items_cache(choices)
|
||||
r('categories_using_hierarchy', db.prefs, setting=CommaSeparatedList,
|
||||
choices=sorted(list(choices), key=sort_key))
|
||||
|
@ -55,6 +55,10 @@ class BaseModel(QAbstractListModel):
|
||||
text = _('Choose library')
|
||||
return QVariant(text)
|
||||
if role == Qt.DecorationRole:
|
||||
if hasattr(self._data[row], 'qaction'):
|
||||
icon = self._data[row].qaction.icon()
|
||||
if not icon.isNull():
|
||||
return QVariant(icon)
|
||||
ic = action[1]
|
||||
if ic is None:
|
||||
ic = 'blank.png'
|
||||
|
@ -453,6 +453,9 @@ class SavedSearchBoxMixin(object): # {{{
|
||||
d = SavedSearchEditor(self, search)
|
||||
d.exec_()
|
||||
if d.result() == d.Accepted:
|
||||
self.do_rebuild_saved_searches()
|
||||
|
||||
def do_rebuild_saved_searches(self):
|
||||
self.saved_searches_changed()
|
||||
self.saved_search.clear()
|
||||
|
||||
|
@ -81,6 +81,7 @@ class TagsView(QTreeView): # {{{
|
||||
add_subcategory = pyqtSignal(object)
|
||||
tag_list_edit = pyqtSignal(object, object)
|
||||
saved_search_edit = pyqtSignal(object)
|
||||
rebuild_saved_searches = pyqtSignal()
|
||||
author_sort_edit = pyqtSignal(object, object)
|
||||
tag_item_renamed = pyqtSignal()
|
||||
search_item_renamed = pyqtSignal()
|
||||
@ -111,6 +112,8 @@ class TagsView(QTreeView): # {{{
|
||||
self.collapse_model = gprefs['tags_browser_partition_method']
|
||||
self.search_icon = QIcon(I('search.png'))
|
||||
self.user_category_icon = QIcon(I('tb_folder.png'))
|
||||
self.delete_icon = QIcon(I('list_remove.png'))
|
||||
self.rename_icon = QIcon(I('edit-undo.png'))
|
||||
|
||||
def set_pane_is_visible(self, to_what):
|
||||
pv = self.pane_is_visible
|
||||
@ -251,6 +254,10 @@ class TagsView(QTreeView): # {{{
|
||||
if action == 'delete_user_category':
|
||||
self.delete_user_category.emit(key)
|
||||
return
|
||||
if action == 'delete_search':
|
||||
saved_searches().delete(key)
|
||||
self.rebuild_saved_searches.emit()
|
||||
return
|
||||
if action == 'delete_item_from_user_category':
|
||||
tag = index.tag
|
||||
if len(index.children) > 0:
|
||||
@ -284,6 +291,14 @@ class TagsView(QTreeView): # {{{
|
||||
return
|
||||
|
||||
def show_context_menu(self, point):
|
||||
def display_name( tag):
|
||||
if tag.category == 'search':
|
||||
n = tag.name
|
||||
if len(n) > 45:
|
||||
n = n[:45] + '...'
|
||||
return "'" + n + "'"
|
||||
return tag.name
|
||||
|
||||
index = self.indexAt(point)
|
||||
self.context_menu = QMenu(self)
|
||||
|
||||
@ -313,18 +328,19 @@ class TagsView(QTreeView): # {{{
|
||||
# the possibility of renaming that item.
|
||||
if tag.is_editable:
|
||||
# Add the 'rename' items
|
||||
self.context_menu.addAction(_('Rename %s')%tag.name,
|
||||
self.context_menu.addAction(self.rename_icon,
|
||||
_('Rename %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='edit_item',
|
||||
index=index))
|
||||
if key == 'authors':
|
||||
self.context_menu.addAction(_('Edit sort for %s')%tag.name,
|
||||
self.context_menu.addAction(_('Edit sort for %s')%display_name(tag),
|
||||
partial(self.context_menu_handler,
|
||||
action='edit_author_sort', index=tag.id))
|
||||
|
||||
# is_editable is also overloaded to mean 'can be added
|
||||
# to a user category'
|
||||
m = self.context_menu.addMenu(self.user_category_icon,
|
||||
_('Add %s to user category')%tag.name)
|
||||
_('Add %s to user category')%display_name(tag))
|
||||
nt = self.model().category_node_tree
|
||||
def add_node_tree(tree_dict, m, path):
|
||||
p = path[:]
|
||||
@ -341,28 +357,37 @@ class TagsView(QTreeView): # {{{
|
||||
add_node_tree(tree_dict[k], tm, p)
|
||||
p.pop()
|
||||
add_node_tree(nt, m, [])
|
||||
|
||||
elif key == 'search':
|
||||
self.context_menu.addAction(self.rename_icon,
|
||||
_('Rename %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='edit_item',
|
||||
index=index))
|
||||
self.context_menu.addAction(self.delete_icon,
|
||||
_('Delete search %s')%display_name(tag),
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_search', key=tag.name))
|
||||
if key.startswith('@') and not item.is_gst:
|
||||
self.context_menu.addAction(self.user_category_icon,
|
||||
_('Remove %s from category %s')%(tag.name, item.py_name),
|
||||
_('Remove %s from category %s')%
|
||||
(display_name(tag), item.py_name),
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_item_from_user_category',
|
||||
key = key, index = tag_item))
|
||||
# Add the search for value items. All leaf nodes are searchable
|
||||
self.context_menu.addAction(self.search_icon,
|
||||
_('Search for %s')%tag.name,
|
||||
_('Search for %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='search',
|
||||
search_state=TAG_SEARCH_STATES['mark_plus'],
|
||||
index=index))
|
||||
self.context_menu.addAction(self.search_icon,
|
||||
_('Search for everything but %s')%tag.name,
|
||||
_('Search for everything but %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='search',
|
||||
search_state=TAG_SEARCH_STATES['mark_minus'],
|
||||
index=index))
|
||||
self.context_menu.addSeparator()
|
||||
elif key.startswith('@') and not item.is_gst:
|
||||
if item.can_be_edited:
|
||||
self.context_menu.addAction(self.user_category_icon,
|
||||
self.context_menu.addAction(self.rename_icon,
|
||||
_('Rename %s')%item.py_name,
|
||||
partial(self.context_menu_handler, action='edit_item',
|
||||
index=index))
|
||||
@ -370,7 +395,7 @@ class TagsView(QTreeView): # {{{
|
||||
_('Add sub-category to %s')%item.py_name,
|
||||
partial(self.context_menu_handler,
|
||||
action='add_subcategory', key=key))
|
||||
self.context_menu.addAction(self.user_category_icon,
|
||||
self.context_menu.addAction(self.delete_icon,
|
||||
_('Delete user category %s')%item.py_name,
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_user_category', key=key))
|
||||
@ -533,7 +558,9 @@ class TagsView(QTreeView): # {{{
|
||||
self.setModel(self._model)
|
||||
except:
|
||||
# The DB must be gone. Set the model to None and hope that someone
|
||||
# will call set_database later. I don't know if this in fact works
|
||||
# will call set_database later. I don't know if this in fact works.
|
||||
# But perhaps a Bad Thing Happened, so print the exception
|
||||
traceback.print_exc()
|
||||
self._model = None
|
||||
self.setModel(None)
|
||||
# }}}
|
||||
@ -678,7 +705,8 @@ class TagTreeItem(object): # {{{
|
||||
break
|
||||
elif self.tag.state == TAG_SEARCH_STATES['mark_plusplus'] or\
|
||||
self.tag.state == TAG_SEARCH_STATES['mark_minusminus']:
|
||||
if self.tag.is_hierarchical and len(self.children):
|
||||
if self.tag.is_searchable and self.tag.is_hierarchical \
|
||||
and len(self.children):
|
||||
break
|
||||
else:
|
||||
break
|
||||
@ -1258,19 +1286,22 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
if t.type != TagTreeItem.CATEGORY])
|
||||
if (comp,tag.category) in child_map:
|
||||
node_parent = child_map[(comp,tag.category)]
|
||||
node_parent.tag.is_hierarchical = True
|
||||
node_parent.tag.is_hierarchical = key != 'search'
|
||||
else:
|
||||
if i < len(components)-1:
|
||||
t = copy.copy(tag)
|
||||
t.original_name = '.'.join(components[:i+1])
|
||||
if key != 'search':
|
||||
# This 'manufactured' intermediate node can
|
||||
# be searched, but cannot be edited.
|
||||
t.is_editable = False
|
||||
else:
|
||||
t.is_searchable = t.is_editable = False
|
||||
else:
|
||||
t = tag
|
||||
if not in_uc:
|
||||
t.original_name = t.name
|
||||
t.is_hierarchical = True
|
||||
t.is_hierarchical = key != 'search'
|
||||
t.name = comp
|
||||
self.beginInsertRows(category_index, 999999, 1)
|
||||
node_parent = TagTreeItem(parent=node_parent, data=t,
|
||||
@ -1762,6 +1793,7 @@ class TagBrowserMixin(object): # {{{
|
||||
self.tags_view.add_subcategory.connect(self.do_add_subcategory)
|
||||
self.tags_view.add_item_to_user_cat.connect(self.do_add_item_to_user_cat)
|
||||
self.tags_view.saved_search_edit.connect(self.do_saved_search_edit)
|
||||
self.tags_view.rebuild_saved_searches.connect(self.do_rebuild_saved_searches)
|
||||
self.tags_view.author_sort_edit.connect(self.do_author_sort_edit)
|
||||
self.tags_view.tag_item_renamed.connect(self.do_tag_item_renamed)
|
||||
self.tags_view.search_item_renamed.connect(self.saved_searches_changed)
|
||||
|
@ -9,7 +9,7 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
'''The main GUI'''
|
||||
|
||||
import collections, os, sys, textwrap, time
|
||||
import collections, os, sys, textwrap, time, gc
|
||||
from Queue import Queue, Empty
|
||||
from threading import Thread
|
||||
from PyQt4.Qt import Qt, SIGNAL, QTimer, QHelpEvent, QAction, \
|
||||
@ -95,7 +95,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
|
||||
|
||||
def __init__(self, opts, parent=None, gui_debug=None):
|
||||
MainWindow.__init__(self, opts, parent)
|
||||
MainWindow.__init__(self, opts, parent=parent, disable_automatic_gc=True)
|
||||
self.opts = opts
|
||||
self.device_connected = None
|
||||
self.gui_debug = gui_debug
|
||||
@ -298,6 +298,9 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
raise
|
||||
self.device_manager.set_current_library_uuid(db.library_id)
|
||||
|
||||
# Collect cycles now
|
||||
gc.collect()
|
||||
|
||||
if show_gui and self.gui_debug is not None:
|
||||
info_dialog(self, _('Debug mode'), '<p>' +
|
||||
_('You have started calibre in debug mode. After you '
|
||||
@ -399,6 +402,7 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
elif msg.startswith('refreshdb:'):
|
||||
self.library_view.model().refresh()
|
||||
self.library_view.model().research()
|
||||
self.tags_view.recount()
|
||||
else:
|
||||
print msg
|
||||
|
||||
@ -463,6 +467,9 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
||||
self.card_a_view.reset()
|
||||
self.card_b_view.reset()
|
||||
self.device_manager.set_current_library_uuid(db.library_id)
|
||||
# Run a garbage collection now so that it does not freeze the
|
||||
# interface later
|
||||
gc.collect()
|
||||
|
||||
|
||||
def set_window_title(self):
|
||||
|
@ -225,6 +225,12 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
self.action_quit.setShortcuts(qs)
|
||||
self.connect(self.action_quit, SIGNAL('triggered(bool)'),
|
||||
lambda x:QApplication.instance().quit())
|
||||
self.action_focus_search = QAction(self)
|
||||
self.addAction(self.action_focus_search)
|
||||
self.action_focus_search.setShortcuts([Qt.Key_Slash,
|
||||
QKeySequence(QKeySequence.Find)])
|
||||
self.action_focus_search.triggered.connect(lambda x:
|
||||
self.search.setFocus(Qt.OtherFocusReason))
|
||||
self.action_copy.setDisabled(True)
|
||||
self.action_metadata.setCheckable(True)
|
||||
self.action_metadata.setShortcut(Qt.CTRL+Qt.Key_I)
|
||||
@ -293,6 +299,9 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
ca.setShortcut(QKeySequence.Copy)
|
||||
self.addAction(ca)
|
||||
self.open_history_menu = QMenu()
|
||||
self.clear_recent_history_action = QAction(
|
||||
_('Clear list of recently opened books'), self)
|
||||
self.clear_recent_history_action.triggered.connect(self.clear_recent_history)
|
||||
self.build_recent_menu()
|
||||
self.action_open_ebook.setMenu(self.open_history_menu)
|
||||
self.open_history_menu.triggered[QAction].connect(self.open_recent)
|
||||
@ -301,11 +310,19 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
|
||||
self.restore_state()
|
||||
|
||||
def clear_recent_history(self, *args):
|
||||
vprefs.set('viewer_open_history', [])
|
||||
self.build_recent_menu()
|
||||
|
||||
def build_recent_menu(self):
|
||||
m = self.open_history_menu
|
||||
m.clear()
|
||||
recent = vprefs.get('viewer_open_history', [])
|
||||
if recent:
|
||||
m.addAction(self.clear_recent_history_action)
|
||||
m.addSeparator()
|
||||
count = 0
|
||||
for path in vprefs.get('viewer_open_history', []):
|
||||
for path in recent:
|
||||
if count > 9:
|
||||
break
|
||||
if os.path.exists(path):
|
||||
@ -494,12 +511,6 @@ class EbookViewer(MainWindow, Ui_EbookViewer):
|
||||
if self.view.search(text, backwards=backwards):
|
||||
self.scrolled(self.view.scroll_fraction)
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
if event.key() == Qt.Key_Slash:
|
||||
self.search.setFocus(Qt.OtherFocusReason)
|
||||
else:
|
||||
return MainWindow.keyPressEvent(self, event)
|
||||
|
||||
def internal_link_clicked(self, frac):
|
||||
self.history.add(self.pos.value())
|
||||
|
||||
|
@ -92,7 +92,8 @@ class SendEmail(QWidget, Ui_Form):
|
||||
pa = self.preferred_to_address()
|
||||
to_set = pa is not None
|
||||
if self.set_email_settings(to_set):
|
||||
if question_dialog(self, _('OK to proceed?'),
|
||||
opts = smtp_prefs().parse()
|
||||
if not opts.relay_password or question_dialog(self, _('OK to proceed?'),
|
||||
_('This will display your email password on the screen'
|
||||
'. Is it OK to proceed?'), show_copy_button=False):
|
||||
TestEmail(pa, self).exec_()
|
||||
@ -204,10 +205,24 @@ class SendEmail(QWidget, Ui_Form):
|
||||
username = unicode(self.relay_username.text()).strip()
|
||||
password = unicode(self.relay_password.text()).strip()
|
||||
host = unicode(self.relay_host.text()).strip()
|
||||
if host and not (username and password):
|
||||
enc_method = ('TLS' if self.relay_tls.isChecked() else 'SSL'
|
||||
if self.relay_ssl.isChecked() else 'NONE')
|
||||
if host:
|
||||
# Validate input
|
||||
if ((username and not password) or (not username and password)):
|
||||
error_dialog(self, _('Bad configuration'),
|
||||
_('You must set the username and password for '
|
||||
'the mail server.')).exec_()
|
||||
_('You must either set both the username <b>and</b> password for '
|
||||
'the mail server or no username and no password at all.')).exec_()
|
||||
return False
|
||||
if not username and not password and enc_method != 'NONE':
|
||||
error_dialog(self, _('Bad configuration'),
|
||||
_('Please enter a username and password or set'
|
||||
' encryption to None ')).exec_()
|
||||
return False
|
||||
if not (username and password) and not question_dialog(self,
|
||||
_('Are you sure?'),
|
||||
_('No username and password set for mailserver. Most '
|
||||
' mailservers need a username and password. Are you sure?')):
|
||||
return False
|
||||
conf = smtp_prefs()
|
||||
conf.set('from_', from_)
|
||||
@ -215,8 +230,7 @@ class SendEmail(QWidget, Ui_Form):
|
||||
conf.set('relay_port', self.relay_port.value())
|
||||
conf.set('relay_username', username if username else None)
|
||||
conf.set('relay_password', hexlify(password))
|
||||
conf.set('encryption', 'TLS' if self.relay_tls.isChecked() else 'SSL'
|
||||
if self.relay_ssl.isChecked() else 'NONE')
|
||||
conf.set('encryption', enc_method)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -123,14 +123,22 @@ REGEXP_MATCH = 2
|
||||
def _match(query, value, matchkind):
|
||||
if query.startswith('..'):
|
||||
query = query[1:]
|
||||
prefix_match_ok = False
|
||||
sq = query[1:]
|
||||
internal_match_ok = True
|
||||
else:
|
||||
prefix_match_ok = True
|
||||
internal_match_ok = False
|
||||
for t in value:
|
||||
t = icu_lower(t)
|
||||
try: ### ignore regexp exceptions, required because search-ahead tries before typing is finished
|
||||
if (matchkind == EQUALS_MATCH):
|
||||
if prefix_match_ok and query[0] == '.':
|
||||
if internal_match_ok:
|
||||
if query == t:
|
||||
return True
|
||||
comps = [c.strip() for c in t.split('.') if c.strip()]
|
||||
for comp in comps:
|
||||
if sq == comp:
|
||||
return True
|
||||
elif query[0] == '.':
|
||||
if t.startswith(query[1:]):
|
||||
ql = len(query) - 1
|
||||
if (len(t) == ql) or (t[ql:ql+1] == '.'):
|
||||
|
@ -27,7 +27,7 @@ CHECKS = [('invalid_titles', _('Invalid titles'), True, False),
|
||||
('extra_titles', _('Extra titles'), True, False),
|
||||
('invalid_authors', _('Invalid authors'), True, False),
|
||||
('extra_authors', _('Extra authors'), True, False),
|
||||
('missing_formats', _('Missing book formats'), False, False),
|
||||
('missing_formats', _('Missing book formats'), False, True),
|
||||
('extra_formats', _('Extra book formats'), True, False),
|
||||
('extra_files', _('Unknown files in books'), True, False),
|
||||
('missing_covers', _('Missing covers files'), False, True),
|
||||
|
@ -56,7 +56,7 @@ class Tag(object):
|
||||
self.is_hierarchical = False
|
||||
self.is_editable = is_editable
|
||||
self.is_searchable = is_searchable
|
||||
self.id_set = id_set
|
||||
self.id_set = id_set if id_set is not None else set([])
|
||||
self.avg_rating = avg/2.0 if avg is not None else 0
|
||||
self.sort = sort
|
||||
if self.avg_rating > 0:
|
||||
@ -1154,12 +1154,15 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
if notify:
|
||||
self.notify('delete', [id])
|
||||
|
||||
def remove_format(self, index, format, index_is_id=False, notify=True, commit=True):
|
||||
def remove_format(self, index, format, index_is_id=False, notify=True,
|
||||
commit=True, db_only=False):
|
||||
id = index if index_is_id else self.id(index)
|
||||
name = self.conn.get('SELECT name FROM data WHERE book=? AND format=?', (id, format), all=False)
|
||||
if name:
|
||||
path = self.format_abspath(id, format, index_is_id=True)
|
||||
if not db_only:
|
||||
try:
|
||||
path = self.format_abspath(id, format, index_is_id=True)
|
||||
if path:
|
||||
delete_file(path)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
@ -1690,10 +1693,20 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
self.notify('metadata', [id])
|
||||
return books_to_refresh
|
||||
|
||||
def set_metadata(self, id, mi, ignore_errors=False,
|
||||
set_title=True, set_authors=True, commit=True):
|
||||
def set_metadata(self, id, mi, ignore_errors=False, set_title=True,
|
||||
set_authors=True, commit=True, force_changes=False):
|
||||
'''
|
||||
Set metadata for the book `id` from the `Metadata` object `mi`
|
||||
|
||||
Setting force_changes=True will force set_metadata to update fields even
|
||||
if mi contains empty values. In this case, 'None' is distinguished from
|
||||
'empty'. If mi.XXX is None, the XXX is not replaced, otherwise it is.
|
||||
The tags, identifiers, and cover attributes are special cases. Tags and
|
||||
identifiers cannot be set to None so then will always be replaced if
|
||||
force_changes is true. You must ensure that mi contains the values you
|
||||
want the book to have. Covers are always changed if a new cover is
|
||||
provided, but are never deleted. Also note that force_changes has no
|
||||
effect on setting title or authors.
|
||||
'''
|
||||
if callable(getattr(mi, 'to_book_metadata', None)):
|
||||
# Handle code passing in a OPF object instead of a Metadata object
|
||||
@ -1707,6 +1720,11 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
traceback.print_exc()
|
||||
else:
|
||||
raise
|
||||
|
||||
def should_replace_field(attr):
|
||||
return (force_changes and (mi.get(attr, None) is not None)) or \
|
||||
not mi.is_null(attr)
|
||||
|
||||
path_changed = False
|
||||
if set_title and mi.title:
|
||||
self._set_title(id, mi.title)
|
||||
@ -1721,16 +1739,21 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
path_changed = True
|
||||
if path_changed:
|
||||
self.set_path(id, index_is_id=True)
|
||||
if mi.author_sort:
|
||||
|
||||
if should_replace_field('author_sort'):
|
||||
doit(self.set_author_sort, id, mi.author_sort, notify=False,
|
||||
commit=False)
|
||||
if mi.publisher:
|
||||
if should_replace_field('publisher'):
|
||||
doit(self.set_publisher, id, mi.publisher, notify=False,
|
||||
commit=False)
|
||||
if mi.rating:
|
||||
|
||||
# Setting rating to zero is acceptable.
|
||||
if mi.rating is not None:
|
||||
doit(self.set_rating, id, mi.rating, notify=False, commit=False)
|
||||
if mi.series:
|
||||
if should_replace_field('series'):
|
||||
doit(self.set_series, id, mi.series, notify=False, commit=False)
|
||||
|
||||
# force_changes has no effect on cover manipulation
|
||||
if mi.cover_data[1] is not None:
|
||||
doit(self.set_cover, id, mi.cover_data[1], commit=False)
|
||||
elif mi.cover is not None:
|
||||
@ -1739,21 +1762,30 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
raw = f.read()
|
||||
if raw:
|
||||
doit(self.set_cover, id, raw, commit=False)
|
||||
if mi.tags:
|
||||
|
||||
# if force_changes is true, tags are always replaced because the
|
||||
# attribute cannot be set to None.
|
||||
if should_replace_field('tags'):
|
||||
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
|
||||
if mi.comments:
|
||||
|
||||
if should_replace_field('comments'):
|
||||
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
|
||||
if mi.series_index:
|
||||
|
||||
# Setting series_index to zero is acceptable
|
||||
if mi.series_index is not None:
|
||||
doit(self.set_series_index, id, mi.series_index, notify=False,
|
||||
commit=False)
|
||||
if mi.pubdate:
|
||||
if should_replace_field('pubdate'):
|
||||
doit(self.set_pubdate, id, mi.pubdate, notify=False, commit=False)
|
||||
if getattr(mi, 'timestamp', None) is not None:
|
||||
doit(self.set_timestamp, id, mi.timestamp, notify=False,
|
||||
commit=False)
|
||||
|
||||
# identifiers will always be replaced if force_changes is True
|
||||
mi_idents = mi.get_identifiers()
|
||||
if mi_idents:
|
||||
if force_changes:
|
||||
self.set_identifiers(id, mi_idents, notify=False, commit=False)
|
||||
elif mi_idents:
|
||||
identifiers = self.get_identifiers(id, index_is_id=True)
|
||||
for key, val in mi_idents.iteritems():
|
||||
if val and val.strip(): # Don't delete an existing identifier
|
||||
@ -1765,9 +1797,9 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
for key in user_mi.iterkeys():
|
||||
if key in self.field_metadata and \
|
||||
user_mi[key]['datatype'] == self.field_metadata[key]['datatype']:
|
||||
doit(self.set_custom, id,
|
||||
val=mi.get(key),
|
||||
extra=mi.get_extra(key),
|
||||
val = mi.get(key, None)
|
||||
if force_changes or val is not None:
|
||||
doit(self.set_custom, id, val=val, extra=mi.get_extra(key),
|
||||
label=user_mi[key]['label'], commit=False)
|
||||
if commit:
|
||||
self.conn.commit()
|
||||
@ -2358,6 +2390,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
@param tags: list of strings
|
||||
@param append: If True existing tags are not removed
|
||||
'''
|
||||
if not tags:
|
||||
tags = []
|
||||
if not append:
|
||||
self.conn.execute('DELETE FROM books_tags_link WHERE book=?', (id,))
|
||||
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
|
||||
@ -2508,6 +2542,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
self.notify('metadata', [id])
|
||||
|
||||
def set_rating(self, id, rating, notify=True, commit=True):
|
||||
if not rating:
|
||||
rating = 0
|
||||
rating = int(rating)
|
||||
self.conn.execute('DELETE FROM books_ratings_link WHERE book=?',(id,))
|
||||
rat = self.conn.get('SELECT id FROM ratings WHERE rating=?', (rating,), all=False)
|
||||
@ -2522,7 +2558,10 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
|
||||
def set_comment(self, id, text, notify=True, commit=True):
|
||||
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
|
||||
if text:
|
||||
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
|
||||
else:
|
||||
text = ''
|
||||
if commit:
|
||||
self.conn.commit()
|
||||
self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
|
||||
@ -2531,6 +2570,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
self.notify('metadata', [id])
|
||||
|
||||
def set_author_sort(self, id, sort, notify=True, commit=True):
|
||||
if not sort:
|
||||
sort = ''
|
||||
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
|
||||
self.dirtied([id], commit=False)
|
||||
if commit:
|
||||
@ -2602,6 +2643,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
|
||||
def set_identifiers(self, id_, identifiers, notify=True, commit=True):
|
||||
cleaned = {}
|
||||
if not identifiers:
|
||||
identifiers = {}
|
||||
for typ, val in identifiers.iteritems():
|
||||
typ, val = self._clean_identifier(typ, val)
|
||||
if val:
|
||||
|
@ -12,7 +12,7 @@ import cherrypy
|
||||
|
||||
from calibre.constants import filesystem_encoding
|
||||
from calibre import isbytestring, force_unicode, fit_image, \
|
||||
prepare_string_for_xml as xml
|
||||
prepare_string_for_xml
|
||||
from calibre.utils.ordered_dict import OrderedDict
|
||||
from calibre.utils.filenames import ascii_filename
|
||||
from calibre.utils.config import prefs, tweaks
|
||||
@ -23,6 +23,10 @@ from calibre.library.server import custom_fields_to_display
|
||||
from calibre.library.field_metadata import category_icon_map
|
||||
from calibre.library.server.utils import quote, unquote
|
||||
|
||||
def xml(*args, **kwargs):
|
||||
ans = prepare_string_for_xml(*args, **kwargs)
|
||||
return ans.replace(''', ''')
|
||||
|
||||
def render_book_list(ids, prefix, suffix=''): # {{{
|
||||
pages = []
|
||||
num = len(ids)
|
||||
|
@ -508,9 +508,9 @@ You have two choices:
|
||||
1. Create a patch by hacking on |app| and send it to me for review and inclusion. See `Development <http://calibre-ebook.com/get-involved>`_.
|
||||
2. `Open a ticket <http://bugs.calibre-ebook.com/newticket>`_ (you have to register and login first). Remember that |app| development is done by volunteers, so if you get no response to your feature request, it means no one feels like implementing it.
|
||||
|
||||
Can I include |app| on a CD to be distributed with my product/magazine?
|
||||
How is |app| licensed?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|app| is licensed under the GNU General Public License v3 (an open source license). This means that you are free to redistribute |app| as long as you make the source code available. So if you want to put |app| on a CD with your product, you must also put the |app| source code on the CD. The source code is available for download `from googlecode <http://code.google.com/p/calibre-ebook/downloads/list>`_.
|
||||
|app| is licensed under the GNU General Public License v3 (an open source license). This means that you are free to redistribute |app| as long as you make the source code available. So if you want to put |app| on a CD with your product, you must also put the |app| source code on the CD. The source code is available for download `from googlecode <http://code.google.com/p/calibre-ebook/downloads/list>`_. You are free to use the results of conversions from |app| however you want. You cannot use code, libraries from |app| in your software without maing your software open source. For details, see `The GNU GPL v3 http://www.gnu.org/licenses/gpl.html`_.
|
||||
|
||||
How do I run calibre from my USB stick?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -105,3 +105,13 @@ After creating the saved search, you can use it as a restriction.
|
||||
.. image:: images/sg_restrict2.jpg
|
||||
:align: center
|
||||
|
||||
Useful Template Functions
|
||||
-------------------------
|
||||
|
||||
You might want to use the genre information in a template, such as with save to disk or send to device. The question might then be "How do I get the outermost genre name or names?" An |app| template function, subitems, is provided to make doing this easier.
|
||||
|
||||
For example, assume you want to add the outermost genre level to the save-to-disk template to make genre folders, as in "History/The Gathering Storm - Churchill, Winston". To do this, you must extract the first level of the hierarchy and add it to the front along with a slash to indicate that it should make a folder. The template below accomplishes this::
|
||||
|
||||
{#genre:subitems(0,1)||/}{title} - {authors}
|
||||
|
||||
See :ref:`The |app| template language <templatelangcalibre>` for more information templates and the subitem function.
|
@ -129,7 +129,7 @@ The functions available are:
|
||||
* ``switch(pattern, value, pattern, value, ..., else_value)`` -- for each ``pattern, value`` pair, checks if the field matches the regular expression ``pattern`` and if so, returns that ``value``. If no ``pattern`` matches, then ``else_value`` is returned. You can have as many ``pattern, value`` pairs as you want.
|
||||
* ``lookup(pattern, field, pattern, field, ..., else_field)`` -- like switch, except the arguments are field (metadata) names, not text. The value of the appropriate field will be fetched and used. Note that because composite columns are fields, you can use this function in one composite field to use the value of some other composite field. This is extremely useful when constructing variable save paths (more later).
|
||||
* ``select(key)`` -- interpret the field as a comma-separated list of items, with the items being of the form "id:value". Find the pair with the id equal to key, and return the corresponding value. This function is particularly useful for extracting a value such as an isbn from the set of identifiers for a book.
|
||||
* ``subitems(val, start_index, end_index)`` -- This function is used to break apart lists of tag-like hierarchical items such as genres. It interprets the value as a comma-separated list of tag-like items, where each item is a period-separated list. Returns a new list made by first finding all the period-separated tag-like items, then for each such item extracting the `start_index` th to the `end_index` th components, then combining the results back together. The first component in a period-separated list has an index of zero. If an index is negative, then it counts from the end of the list. As a special case, an end_index of zero is assumed to be the length of the list. Examples::
|
||||
* ``subitems(val, start_index, end_index)`` -- This function is used to break apart lists of tag-like hierarchical items such as genres. It interprets the value as a comma-separated list of tag-like items, where each item is a period-separated list. Returns a new list made by first finding all the period-separated tag-like items, then for each such item extracting the components from `start_index` to `end_index`, then combining the results back together. The first component in a period-separated list has an index of zero. If an index is negative, then it counts from the end of the list. As a special case, an end_index of zero is assumed to be the length of the list. Examples::
|
||||
|
||||
Assuming a #genre column containing "A.B.C":
|
||||
{#genre:subitems(0,1)} returns "A"
|
||||
@ -139,7 +139,7 @@ The functions available are:
|
||||
{#genre:subitems(0,1)} returns "A, D"
|
||||
{#genre:subitems(0,2)} returns "A.B, D.E"
|
||||
|
||||
* ``sublist(val, start_index, end_index, separator)`` -- interpret the value as a list of items separated by `separator`, returning a new list made from the `start_index` th to the `end_index` th item. The first item is number zero. If an index is negative, then it counts from the end of the list. As a special case, an end_index of zero is assumed to be the length of the list. Examples assuming that the tags column (which is comma-separated) contains "A, B ,C"::
|
||||
* ``sublist(val, start_index, end_index, separator)`` -- interpret the value as a list of items separated by `separator`, returning a new list made from the items from `start_index`to `end_index`. The first item is number zero. If an index is negative, then it counts from the end of the list. As a special case, an end_index of zero is assumed to be the length of the list. Examples assuming that the tags column (which is comma-separated) contains "A, B ,C"::
|
||||
|
||||
{tags:sublist(0,1,\,)} returns "A"
|
||||
{tags:sublist(-1,0,\,)} returns "C"
|
||||
|
Loading…
x
Reference in New Issue
Block a user