Merge from trunk

This commit is contained in:
Charles Haley 2010-10-13 12:23:44 +01:00
commit 1caa991f6d
15 changed files with 395 additions and 232 deletions

View File

@ -82,6 +82,10 @@ body {
-moz-border-radius: 5px;
-webkit-border-radius: 5px;
text-shadow: #27211b 1px 1px 1px;
-moz-box-shadow: 5px 5px 5px #222;
-webkit-box-shadow: 5px 5px 5px #222;
box-shadow: 5px 5px 5px #222;
}
#nav-container {
@ -208,6 +212,7 @@ h2.library_name {
-moz-box-shadow: 5px 5px 5px #ccc;
-webkit-box-shadow: 5px 5px 5px #ccc;
box-shadow: 5px 5px 5px #ccc;
text-shadow: #27211b 1px 1px 1px;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 631 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@ -1,7 +1,5 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
'''
www.business-standard.com
'''
@ -28,30 +26,22 @@ class BusinessStandard(BasicNewsRecipe):
,'publisher' : publisher
,'linearize_tables': True
}
remove_attributes=['style']
remove_tags = [dict(name=['object','link','script','iframe'])]
keep_only_tags=[dict(attrs={'class':'TableClas'})]
remove_tags = [
dict(name=['object','link','script','iframe','base','meta'])
,dict(attrs={'class':'rightDiv2'})
,dict(name='table',attrs={'width':'450px'})
]
remove_attributes=['width','height']
feeds = [
(u'News Now' , u'http://feeds.business-standard.com/News-Now.xml' )
,(u'Banking & finance' , u'http://feeds.business-standard.com/Banking-Finance-All.xml' )
,(u'Companies & Industry', u'http://feeds.business-standard.com/Companies-Industry-All.xml')
,(u'Economy & Policy' , u'http://feeds.business-standard.com/Economy-Policy-All.xml' )
,(u'Tech World' , u'http://feeds.business-standard.com/Tech-World-All.xml' )
,(u'Life & Leisure' , u'http://feeds.business-standard.com/Life-Leisure-All.xml' )
,(u'Markets & Investing' , u'http://feeds.business-standard.com/Markets-Investing-All.xml' )
,(u'Management & Mktg' , u'http://feeds.business-standard.com/Management-Mktg-All.xml' )
,(u'Automobiles' , u'http://feeds.business-standard.com/Automobiles.xml' )
,(u'Aviation' , u'http://feeds.business-standard.com/Aviation.xml' )
(u'News Now' , u'http://feeds.business-standard.com/rss/online.xml')
,(u'Banking & finance' , u'http://feeds.business-standard.com/rss/3_0.xml' )
,(u'Companies & Industry', u'http://feeds.business-standard.com/rss/2_0.xml' )
,(u'Economy & Policy' , u'http://feeds.business-standard.com/rss/4_0.xml' )
,(u'Tech World' , u'http://feeds.business-standard.com/rss/8_0.xml' )
,(u'Life & Leisure' , u'http://feeds.business-standard.com/rss/6_0.xml' )
,(u'Markets & Investing' , u'http://feeds.business-standard.com/rss/1_0.xml' )
,(u'Management & Mktg' , u'http://feeds.business-standard.com/rss/7_0.xml' )
,(u'Opinion' , u'http://feeds.business-standard.com/rss/5_0.xml' )
]
def print_version(self, url):
autono = url.rpartition('autono=')[2]
tp = 'on'
hk = url.rpartition('bKeyFlag=')[1]
if hk == '':
tp = ''
return 'http://www.business-standard.com/india/printpage.php?autono=' + autono + '&tp=' + tp
def get_article_url(self, article):
return article.get('guid', None)

View File

@ -1,7 +1,5 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
'''
emol.com
'''
@ -19,43 +17,34 @@ class ElMercurio(BasicNewsRecipe):
no_stylesheets = True
use_embedded_content = False
encoding = 'cp1252'
cover_url = 'http://www.emol.com/especiales/logo_emol/logo_emol.gif'
masthead_url = 'http://www.emol.com/especiales/logo_emol/logo_emol.gif'
remove_javascript = True
use_embedded_content = False
language = 'es'
html2lrf_options = [
'--comment', description
, '--category', category
, '--publisher', publisher
]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
keep_only_tags = [
dict(name='div', attrs={'class':'despliegue-txt_750px'})
,dict(name='div', attrs={'id':'div_cuerpo_participa'})
]
remove_tags = [
dict(name='div', attrs={'class':'contenedor_despliegue-col-left300'})
,dict(name='div', attrs={'id':['div_centro_dn_opc','div_cabezera','div_secciones','div_contenidos','div_pie','nav']})
]
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
keep_only_tags = [dict(name='div', attrs={'id':['cont_iz_titulobajada','cont_iz_creditos_1_a','cont_iz_cuerpo']})]
remove_tags = [dict(name='div', attrs={'id':'cont_iz_cuerpo_relacionados'})]
remove_attributes = ['height','width']
feeds = [
(u'Noticias de ultima hora', u'http://www.emol.com/rss20/rss.asp?canal=0')
,(u'Nacional', u'http://www.emol.com/rss20/rss.asp?canal=1')
,(u'Mundo', u'http://www.emol.com/rss20/rss.asp?canal=2')
,(u'Deportes', u'http://www.emol.com/rss20/rss.asp?canal=4')
,(u'Magazine', u'http://www.emol.com/rss20/rss.asp?canal=6')
,(u'Tecnologia', u'http://www.emol.com/rss20/rss.asp?canal=5')
,(u'La Musica', u'http://www.emol.com/rss20/rss.asp?canal=7')
(u'Noticias de ultima hora', u'http://rss.emol.com/rss.asp?canal=0')
,(u'Nacional', u'http://rss.emol.com/rss.asp?canal=1')
,(u'Mundo', u'http://rss.emol.com/rss.asp?canal=2')
,(u'Deportes', u'http://rss.emol.com/rss.asp?canal=4')
,(u'Magazine', u'http://rss.emol.com/rss.asp?canal=6')
,(u'Tecnologia', u'http://rss.emol.com/rss.asp?canal=5')
]
def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Language" content="es-CL"/>'
soup.head.insert(0,mtag)
for item in soup.findAll(style=True):
del item['style']
return soup
language = 'es'

View File

@ -0,0 +1,74 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
ft.com
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class FinancialTimes(BasicNewsRecipe):
title = u'Financial Times - UK printed edition'
__author__ = 'Darko Miletic'
description = 'Financial world news'
oldest_article = 2
language = 'en_GB'
max_articles_per_feed = 250
no_stylesheets = True
use_embedded_content = False
needs_subscription = True
encoding = 'utf8'
simultaneous_downloads= 1
delay = 1
LOGIN = 'https://registration.ft.com/registration/barrier/login'
INDEX = 'http://www.ft.com/uk-edition'
PREFIX = 'http://www.ft.com'
def get_browser(self):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open(self.LOGIN)
br.select_form(name='loginForm')
br['username'] = self.username
br['password'] = self.password
br.submit()
return br
keep_only_tags = [ dict(name='div', attrs={'id':'cont'}) ]
remove_tags_after = dict(name='p', attrs={'class':'copyright'})
remove_tags = [
dict(name='div', attrs={'id':'floating-con'})
,dict(name=['meta','iframe','base','object','embed','link'])
]
remove_attributes = ['width','height','lang']
extra_css = """
body{font-family:Arial,Helvetica,sans-serif;}
h2{font-size:large;}
.ft-story-header{font-size:xx-small;}
.ft-story-body{font-size:small;}
a{color:#003399;}
.container{font-size:x-small;}
h3{font-size:x-small;color:#003399;}
.copyright{font-size: x-small}
"""
def parse_index(self):
articles = []
soup = self.index_to_soup(self.INDEX)
wide = soup.find('div',attrs={'class':'wide'})
if wide:
for item in wide.findAll('a',href=True):
url = self.PREFIX + item['href']
title = self.tag_to_string(item)
date = strftime(self.timefmt)
articles.append({
'title' :title
,'date' :date
,'url' :url
,'description':''
})
return [('FT UK edition',articles)]
def preprocess_html(self, soup):
return self.adeify_images(soup)

View File

@ -0,0 +1,35 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = u'2010, Tomasz Dlugosz <tomek3d@gmail.com>'
'''
frazpc.pl
'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class FrazPC(BasicNewsRecipe):
title = u'frazpc.pl'
publisher = u'frazpc.pl'
description = u'Tw\xf3j Vortal Technologiczny'
language = 'pl'
__author__ = u'Tomasz D\u0142ugosz'
oldest_article = 7
max_articles_per_feed = 100
use_embedded_content = False
no_stylesheets = True
feeds = [(u'Aktualno\u015bci', u'http://www.frazpc.pl/feed'), (u'Recenzje', u'http://www.frazpc.pl/kat/recenzje-2/feed') ]
keep_only_tags = [dict(name='div', attrs={'id':'FRAZ_CONTENT'})]
remove_tags = [dict(name='p', attrs={'class':'gray tagsP fs11'})]
preprocess_regexps = [
(re.compile(i[0], re.IGNORECASE | re.DOTALL), i[1]) for i in
[(r'<div id="post-[0-9]*"', lambda match: '<div id="FRAZ_CONTENT"'),
(r'href="/f/news/', lambda match: 'href="http://www.frazpc.pl/f/news/'),
(r' &nbsp; <a href="http://www.frazpc.pl/[^>]*?">(Skomentuj|Komentarz(e)?\([0-9]*\))</a>&nbsp; \|', lambda match: '')]
]
remove_attributes = [ 'width', 'height' ]

View File

@ -1,6 +1,6 @@
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
'''
miamiherald.com
'''
@ -15,13 +15,11 @@ class TheMiamiHerald(BasicNewsRecipe):
max_articles_per_feed = 100
publisher = u'The Miami Herald'
category = u'miami herald, weather, dolphins, news, miami news, local news, miamiherald, miami newspaper, miamiherald.com, miami, the miami herald, broward, miami-dade'
language = 'en'
language = 'en'
no_stylesheets = True
use_embedded_content = False
encoding = 'cp1252'
remove_javascript = True
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-size:large; color:#1A272F; }
.subheadline{font-family:Arial,Helvetica,sans-serif; font-size:30%; color: #666666;}
@ -33,50 +31,35 @@ class TheMiamiHerald(BasicNewsRecipe):
.imageCaption{font-family:Arial,Helvetica,sans-serif; font-size:30%; color:#666666; }
'''
keep_only_tags = [dict(name='div', attrs={'id':['storyBody','storyPhotoContentArea']}),
]
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
keep_only_tags = [dict(name='div', attrs={'id':'wide'}),]
remove_tags = [dict(name=['object','link','embed']),
dict(name='div', attrs={'class':["imageBuyButton","shareLinksArea","storyTools","spill_navigation pagination","circPromoArea","storyTools_footer","storyYahooContentMatch"]}) ,
dict(name='div', attrs={'id':["pluck","mlt","storyAssets"]}) ]
remove_tags = [dict(name=['object','link','embed','iframe','meta'])]
feeds = [
(u'Breaking News' , u'http://www.miamiherald.com/416/index.xml' )
,(u'Miami-Dade' , u'http://www.miamiherald.com/460/index.xml' )
,(u'Broward' , u'http://www.miamiherald.com/467/index.xml' )
,(u'Florida Keys' , u'http://www.miamiherald.com/505/index.xml' )
,(u'Florida' , u'http://www.miamiherald.com/569/index.xml' )
,(u'Nation' , u'http://www.miamiherald.com/509/index.xml' )
,(u'World' , u'http://www.miamiherald.com/578/index.xml' )
,(u'Americas' , u'http://www.miamiherald.com/579/index.xml' )
,(u'Cuba' , u'http://www.miamiherald.com/581/index.xml' )
,(u'Haiti' , u'http://www.miamiherald.com/582/index.xml' )
,(u'Politics' , u'http://www.miamiherald.com/515/index.xml' )
,(u'Education' , u'http://www.miamiherald.com/295/index.xml' )
,(u'Environment' , u'http://www.miamiherald.com/573/index.xml' )
(u'Breaking News' , u'http://www.miamiherald.com/news/breaking-news/index.xml' )
,(u'Miami-Dade' , u'http://www.miamiherald.com/news/miami-dade/index.xml' )
,(u'Broward' , u'http://www.miamiherald.com/news/broward/index.xml' )
,(u'Florida Keys' , u'http://www.miamiherald.com/news/florida-keys/index.xml' )
,(u'Florida' , u'http://www.miamiherald.com/news/florida/index.xml' )
,(u'Nation' , u'http://www.miamiherald.com/news/nation/index.xml' )
,(u'World' , u'http://www.miamiherald.com/news/world/index.xml' )
,(u'Americas' , u'http://www.miamiherald.com/news/americas/index.xml' )
,(u'Cuba' , u'http://www.miamiherald.com/news/americas/cuba/index.xml' )
,(u'Haiti' , u'http://www.miamiherald.com/news/americas/haiti/index.xml' )
,(u'Politics' , u'http://www.miamiherald.com/news/politics/index.xml' )
,(u'Education' , u'http://www.miamiherald.com/news/education/index.xml' )
,(u'Environment' , u'http://www.miamiherald.com/news/environment/index.xml' )
]
def get_article_url(self, article):
ans = article.get('guid', None)
print ans
try:
self.log('Looking for full story link in', ans)
soup = self.index_to_soup(ans)
x = soup.find(text="Full Story")
if x is not None:
a = x.parent
if a and a.has_key('href'):
ans = 'http://www.miamiherald.com'+a['href']
self.log('Found full story link', ans)
except:
pass
return ans
def print_version(self, url):
art, sep, rest = url.rpartition('/')
art2, sep2, rest2 = art.rpartition('/')
return art2 + '/v-print/' + rest2 + '/' + rest

View File

@ -0,0 +1,18 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1286819935(BasicNewsRecipe):
title = u'Novaya Gazeta'
__author__ = 'muwa'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
conversion_options = {'linearize_tables' : True}
remove_attributes = ['style']
language = 'ru'
feeds = [(u'Articles', u'http://www.novayagazeta.ru/rss_number.xml')]
def print_version(self, url):
return url + '?print=true'

View File

@ -0,0 +1,82 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__author__ = 'Tony Stegall'
__copyright__ = '2010, Tony Stegall or Tonythebookworm on mobileread.com'
__version__ = 'v1.01'
__date__ = '07, October 2010'
__description__ = 'Rolling Stones Mag'
'''
http://www.rollingstone.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class RollingStones(BasicNewsRecipe):
__author__ = 'Tony Stegall'
description = 'Rolling Stones Mag'
cover_url = 'http://gallery.celebritypro.com/data/media/648/kid-rock-rolling-stone-cover.jpg'
masthead_url = 'http://origin.myfonts.com/s/ec/cc-200804/Rolling_Stone-logo.gif'
title = 'Rolling Stones Mag'
category = 'Music Reviews, Movie Reviews, entertainment news'
language = 'en'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 15
max_articles_per_feed = 25
use_embedded_content = False
no_stylesheets = True
remove_javascript = True
#####################################################################################
# cleanup section #
#####################################################################################
keep_only_tags = [
dict(name='div', attrs={'class':['c65l']}),
dict(name='div', attrs={'id':['col1']}),
]
remove_tags = [
dict(name='div', attrs={'class': ['storyActions upper','storyActions lowerArticleNav']}),
dict(name='div', attrs={'id': ['comments','related']}),
]
feeds = [
(u'News', u'http://www.rollingstone.com/siteServices/rss/allNews'),
(u'Blogs', u'http://www.rollingstone.com/siteServices/rss/allBlogs'),
(u'Movie Reviews', u'http://www.rollingstone.com/siteServices/rss/movieReviews'),
(u'Album Reviews', u'http://www.rollingstone.com/siteServices/rss/albumReviews'),
(u'Song Reviews', u'http://www.rollingstone.com/siteServices/rss/songReviews'),
]
def get_article_url(self, article):
return article.get('guid', None)
def append_page(self, soup, appendtag, position):
'''
Some are the articles are multipage so the below function
will get the articles that have <next>
'''
pager = soup.find('li',attrs={'class':'next'})
if pager:
nexturl = pager.a['href']
soup2 = self.index_to_soup(nexturl)
texttag = soup2.find('div', attrs={'id':'storyTextContainer'})
for it in texttag.findAll(style=True):
del it['style']
newpos = len(texttag.contents)
self.append_page(soup2,texttag,newpos)
texttag.extract()
appendtag.insert(position,texttag)

View File

@ -94,7 +94,7 @@ class PageProcessor(list):
from calibre.utils.magick import PixelWand
for i, wand in enumerate(self.pages):
pw = PixelWand()
pw.color = 'white'
pw.color = '#ffffff'
wand.set_border_color(pw)
if self.rotate:

View File

@ -29,7 +29,7 @@ class TagDelegate(QItemDelegate): # {{{
def paint(self, painter, option, index):
item = index.internalPointer()
if item.type != TagTreeItem.TAG:
if True or item.type != TagTreeItem.TAG:
QItemDelegate.paint(self, painter, option, index)
return
r = option.rect
@ -89,7 +89,8 @@ class TagsView(QTreeView): # {{{
self.hidden_categories = config['tag_browser_hidden_categories']
self._model = TagsModel(db, parent=self,
hidden_categories=self.hidden_categories,
search_restriction=None)
search_restriction=None,
drag_drop_finished=self.drag_drop_finished)
self.sort_by = sort_by
self.tag_match = tag_match
self.db = db
@ -109,103 +110,6 @@ class TagsView(QTreeView): # {{{
def database_changed(self, event, ids):
self.refresh_required.emit()
def dragEnterEvent(self, event):
md = event.mimeData()
if md.hasFormat("application/calibre+from_library"):
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
allowed = False
idx = self.indexAt(event.pos())
m = self.model()
p = m.parent(idx)
if idx.isValid() and p.isValid():
item = m.data(p, Qt.UserRole)
fm = self.db.metadata_for_field(item.category_key)
if item.category_key in \
('tags', 'series', 'authors', 'rating', 'publisher') or\
(fm['is_custom'] and \
fm['datatype'] in ['text', 'rating', 'series']):
allowed = True
if allowed:
event.acceptProposedAction()
else:
event.ignore()
def dropEvent(self, event):
idx = self.indexAt(event.pos())
m = self.model()
p = m.parent(idx)
if idx.isValid() and p.isValid():
item = m.data(p, Qt.UserRole)
if item.type == TagTreeItem.CATEGORY:
fm = self.db.metadata_for_field(item.category_key)
if item.category_key in \
('tags', 'series', 'authors', 'rating', 'publisher') or\
(fm['is_custom'] and \
fm['datatype'] in ['text', 'rating', 'series']):
child = m.data(idx, Qt.UserRole)
md = event.mimeData()
mime = 'application/calibre+from_library'
ids = list(map(int, str(md.data(mime)).split()))
self.handle_drop(item, child, ids)
event.accept()
return
event.ignore()
def handle_drop(self, parent, child, ids):
# print 'Dropped ids:', ids, parent.category_key, child.tag.name
key = parent.category_key
if (key == 'authors' and len(ids) >= 5):
if not confirm('<p>'+_('Changing the authors for several books can '
'take a while. Are you sure?')
+'</p>', 'tag_browser_drop_authors', self):
return
elif len(ids) > 15:
if not confirm('<p>'+_('Changing the metadata for that many books '
'can take a while. Are you sure?')
+'</p>', 'tag_browser_many_changes', self):
return
fm = self.db.metadata_for_field(key)
is_multiple = fm['is_multiple']
val = child.tag.name
for id in ids:
mi = self.db.get_metadata(id, index_is_id=True)
# Prepare to ignore the author, unless it is changed. Title is
# always ignored -- see the call to set_metadata
set_authors = False
# Author_sort cannot change explicitly. Changing the author might
# change it.
mi.author_sort = None # Never will change by itself.
if key == 'authors':
mi.authors = [val]
set_authors=True
elif fm['datatype'] == 'rating':
mi.set(key, len(val) * 2)
elif fm['is_custom'] and fm['datatype'] == 'series':
mi.set(key, val, extra=1.0)
elif is_multiple:
new_val = mi.get(key, [])
if val in new_val:
# Fortunately, only one field can change, so the continue
# won't break anything
continue
new_val.append(val)
mi.set(key, new_val)
else:
mi.set(key, val)
self.db.set_metadata(id, mi, set_title=False,
set_authors=set_authors, commit=False)
self.db.commit()
self.drag_drop_finished.emit(ids)
@property
def match_all(self):
return self.tag_match and self.tag_match.currentIndex() > 0
@ -374,7 +278,8 @@ class TagsView(QTreeView): # {{{
try:
self._model = TagsModel(self.db, parent=self,
hidden_categories=self.hidden_categories,
search_restriction=self.search_restriction)
search_restriction=self.search_restriction,
drag_drop_finished=self.drag_drop_finished)
self.setModel(self._model)
except:
# The DB must be gone. Set the model to None and hope that someone
@ -469,7 +374,8 @@ class TagTreeItem(object): # {{{
class TagsModel(QAbstractItemModel): # {{{
def __init__(self, db, parent, hidden_categories=None, search_restriction=None):
def __init__(self, db, parent, hidden_categories=None,
search_restriction=None, drag_drop_finished=None):
QAbstractItemModel.__init__(self, parent)
# must do this here because 'QPixmap: Must construct a QApplication
@ -487,6 +393,7 @@ class TagsModel(QAbstractItemModel): # {{{
':user' : QIcon(I('drawer.png')),
'search' : QIcon(I('search.png'))})
self.categories_with_ratings = ['authors', 'series', 'publisher', 'tags']
self.drag_drop_finished = drag_drop_finished
self.icon_state_map = [None, QIcon(I('plus.png')), QIcon(I('minus.png'))]
self.db = db
@ -519,6 +426,82 @@ class TagsModel(QAbstractItemModel): # {{{
tag.avg_rating = None
TagTreeItem(parent=c, data=tag, icon_map=self.icon_state_map)
def mimeTypes(self):
return ["application/calibre+from_library"]
def dropMimeData(self, md, action, row, column, parent):
if not md.hasFormat("application/calibre+from_library") or \
action != Qt.CopyAction:
return False
idx = parent
p = self.parent(idx)
if idx.isValid() and p.isValid():
item = self.data(p, Qt.UserRole)
fm = self.db.metadata_for_field(item.category_key)
if item.category_key in \
('tags', 'series', 'authors', 'rating', 'publisher') or \
(fm['is_custom'] and \
fm['datatype'] in ['text', 'rating', 'series']):
child = self.data(idx, Qt.UserRole)
mime = 'application/calibre+from_library'
ids = list(map(int, str(md.data(mime)).split()))
self.handle_drop(item, child, ids)
return True
return False
def handle_drop(self, parent, child, ids):
# print 'Dropped ids:', ids, parent.category_key, child.tag.name
key = parent.category_key
if (key == 'authors' and len(ids) >= 5):
if not confirm('<p>'+_('Changing the authors for several books can '
'take a while. Are you sure?')
+'</p>', 'tag_browser_drop_authors', self.parent()):
return
elif len(ids) > 15:
if not confirm('<p>'+_('Changing the metadata for that many books '
'can take a while. Are you sure?')
+'</p>', 'tag_browser_many_changes', self.parent()):
return
fm = self.db.metadata_for_field(key)
is_multiple = fm['is_multiple']
val = child.tag.name
for id in ids:
mi = self.db.get_metadata(id, index_is_id=True)
# Prepare to ignore the author, unless it is changed. Title is
# always ignored -- see the call to set_metadata
set_authors = False
# Author_sort cannot change explicitly. Changing the author might
# change it.
mi.author_sort = None # Never will change by itself.
if key == 'authors':
mi.authors = [val]
set_authors=True
elif fm['datatype'] == 'rating':
mi.set(key, len(val) * 2)
elif fm['is_custom'] and fm['datatype'] == 'series':
mi.set(key, val, extra=1.0)
elif is_multiple:
new_val = mi.get(key, [])
if val in new_val:
# Fortunately, only one field can change, so the continue
# won't break anything
continue
new_val.append(val)
mi.set(key, new_val)
else:
mi.set(key, val)
self.db.set_metadata(id, mi, set_title=False,
set_authors=set_authors, commit=False)
self.db.commit()
self.drag_drop_finished.emit(ids)
def set_search_restriction(self, s):
self.search_restriction = s
@ -655,7 +638,7 @@ class TagsModel(QAbstractItemModel): # {{{
return ans
def supportedDropActions(self):
return Qt.CopyAction|Qt.MoveAction
return Qt.CopyAction
def path_for_index(self, index):
ans = []

View File

@ -18,7 +18,7 @@ from calibre.constants import __appname__
from calibre.ebooks.metadata import fmt_sidx
from calibre.library.comments import comments_to_html
from calibre.library.server import custom_fields_to_display
from calibre.library.server.utils import format_tag_string
from calibre.library.server.utils import format_tag_string, Offsets
from calibre import guess_type
from calibre.utils.ordered_dict import OrderedDict
@ -321,26 +321,6 @@ class CategoryGroupFeed(NavFeed):
self.root.append(CATALOG_GROUP_ENTRY(item, which, base_href, version, updated))
class OPDSOffsets(object):
def __init__(self, offset, delta, total):
if offset < 0:
offset = 0
if offset >= total:
raise cherrypy.HTTPError(404, 'Invalid offset: %r'%offset)
last_allowed_index = total - 1
last_current_index = offset + delta - 1
self.offset = offset
self.next_offset = last_current_index + 1
if self.next_offset > last_allowed_index:
self.next_offset = -1
self.previous_offset = self.offset - delta
if self.previous_offset < 0:
self.previous_offset = 0
self.last_offset = last_allowed_index - delta
if self.last_offset < 0:
self.last_offset = 0
class OPDSServer(object):
@ -374,7 +354,7 @@ class OPDSServer(object):
items = [x for x in self.db.data.iterall() if x[idx] in ids]
self.sort(items, sort_by, ascending)
max_items = self.opts.max_opds_items
offsets = OPDSOffsets(offset, max_items, len(items))
offsets = Offsets(offset, max_items, len(items))
items = items[offsets.offset:offsets.offset+max_items]
updated = self.db.last_modified()
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
@ -448,7 +428,7 @@ class OPDSServer(object):
id_ = 'calibre-category-group-feed:'+category+':'+which
max_items = self.opts.max_opds_items
offsets = OPDSOffsets(offset, max_items, len(items))
offsets = Offsets(offset, max_items, len(items))
items = list(items)[offsets.offset:offsets.offset+max_items]
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
@ -495,7 +475,7 @@ class OPDSServer(object):
if len(items) <= MAX_ITEMS:
max_items = self.opts.max_opds_items
offsets = OPDSOffsets(offset, max_items, len(items))
offsets = Offsets(offset, max_items, len(items))
items = list(items)[offsets.offset:offsets.offset+max_items]
ans = CategoryFeed(items, which, id_, updated, version, offsets,
page_url, up_url, self.db)
@ -516,7 +496,7 @@ class OPDSServer(object):
getattr(y, 'sort', y.name).startswith(x)])
items = [Group(x, y) for x, y in category_groups.items()]
max_items = self.opts.max_opds_items
offsets = OPDSOffsets(offset, max_items, len(items))
offsets = Offsets(offset, max_items, len(items))
items = items[offsets.offset:offsets.offset+max_items]
ans = CategoryGroupFeed(items, which, id_, updated, version, offsets,
page_url, up_url)

View File

@ -13,6 +13,27 @@ from calibre import strftime as _strftime, prints
from calibre.utils.date import now as nowf
from calibre.utils.config import tweaks
class Offsets(object):
'Calculate offsets for a paginated view'
def __init__(self, offset, delta, total):
if offset < 0:
offset = 0
if offset >= total:
raise cherrypy.HTTPError(404, 'Invalid offset: %r'%offset)
last_allowed_index = total - 1
last_current_index = offset + delta - 1
self.offset = offset
self.next_offset = last_current_index + 1
if self.next_offset > last_allowed_index:
self.next_offset = -1
self.previous_offset = self.offset - delta
if self.previous_offset < 0:
self.previous_offset = 0
self.last_offset = last_allowed_index - delta
if self.last_offset < 0:
self.last_offset = 0
def expose(func):

View File

@ -14,7 +14,10 @@ from calibre.ptempfile import PersistentTemporaryFile, base_dir
if iswindows:
import win32process
_windows_null_file = open(os.devnull, 'wb')
try:
_windows_null_file = open(os.devnull, 'wb')
except:
raise RuntimeError('NUL %r file missing in windows'%os.devnull)
class Worker(object):
'''