Merge from trunk

This commit is contained in:
Charles Haley 2012-06-18 23:10:19 +02:00
commit f6da689719
6 changed files with 71 additions and 31 deletions

View File

@ -0,0 +1,28 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1325420346(BasicNewsRecipe):
title = u'Homopedia'
__author__ = 'rainbowwarrior'
language = 'pl'
oldest_article = 7
max_articles_per_feed = 100
publication_type = 'newspaper'
masthead_url = 'http://a5.sphotos.ak.fbcdn.net/hphotos-ak-snc6/67335_168352243178437_166186720061656_594975_5800720_n.jpg'
encoding = 'utf-8'
def get_cover_url(self):
return 'http://a7.sphotos.ak.fbcdn.net/hphotos-ak-snc4/65568_166186970061631_166186720061656_580324_7584264_n.jpg'
feeds = [(u'Nowe has\u0142a', u'http://www.homopedia.pl/w/index.php?title=Specjalna:Nowe_strony&feed=atom&hideliu=&hidepatrolled=&hidebots=&hideredirs=1&limit=50&namespace=0'), (u'Blog', u'http://blog.homopedia.pl/feeds/posts/default')]
def get_article_url(self, article):
artl = article.get('link', None)
rest, sep, article_id = artl.rpartition('/')
return 'http://www.homopedia.pl/w/index.php?redirect=no&printable=yes&title=' + article_id
remove_tags = [dict(name='div', attrs={'class':'noprint'}),dict(name='ul', attrs={'class':'noprint'}),dict(name='ul', attrs={'id':'footer-places'}),dict(name='li', attrs={'id':'footer-info-viewcount'}),dict(name='span', attrs={'class':'editsection'}),dict(name='div', attrs={'id':'jump-to-nav'})]
remove_tags_before = dict(dict(name = 'h2', attrs = {'class' : 'post-title'}))
remove_tags_after = dict(dict(name = 'a', attrs = {'class' : 'timestamp-link'}))
extra_css = 'p{text-indent:1.5em!important;padding:0!important;margin;0!important}'

View File

@ -12,6 +12,7 @@ class Metro_Montreal(BasicNewsRecipe):
use_embedded_content = False use_embedded_content = False
remove_javascript = True remove_javascript = True
no_stylesheets = True no_stylesheets = True
auto_cleanup = True
encoding = 'utf-8' encoding = 'utf-8'
extra_css = '.headline {font-size: x-large;} \n .fact {padding-top: 10pt}' extra_css = '.headline {font-size: x-large;} \n .fact {padding-top: 10pt}'

View File

@ -13,34 +13,33 @@ class AdventureGamers(BasicNewsRecipe):
language = 'fr' language = 'fr'
__author__ = 'Darko Miletic' __author__ = 'Darko Miletic'
description = 'science news' description = 'science news'
publisher = 'Monde durable' publisher = 'Monde durable'
category = 'environnement, developpement durable, science & vie, science et vie' category = 'environnement, developpement durable, science & vie, science et vie'
oldest_article = 30 oldest_article = 30
delay = 2 delay = 2
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
auto_cleanup = True
encoding = 'utf-8' encoding = 'utf-8'
remove_javascript = True remove_javascript = True
use_embedded_content = False use_embedded_content = False
html2lrf_options = [ html2lrf_options = [
'--comment', description '--comment', description
, '--category', category , '--category', category
, '--publisher', publisher , '--publisher', publisher
] ]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
keep_only_tags = [dict(name='div', attrs={'class':'post'})] html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
feeds = [(u'Articles', u'http://mondedurable.science-et-vie.com/comments/feed/')]
remove_tags = [dict(name=['object','link','embed','form','img'])]
feeds = [(u'Articles', u'http://mondedurable.science-et-vie.com/feed/')]
def preprocess_html(self, soup): def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>' mtag = '<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>'
soup.head.insert(0,mtag) soup.head.insert(0,mtag)
for item in soup.findAll(style=True): for item in soup.findAll(style=True):
del item['style'] del item['style']
return soup return soup

View File

@ -521,7 +521,8 @@ class PluginUpdaterDialog(SizePersistedDialog):
layout.addWidget(self.description) layout.addWidget(self.description)
self.button_box = QDialogButtonBox(QDialogButtonBox.Close) self.button_box = QDialogButtonBox(QDialogButtonBox.Close)
self.button_box.rejected.connect(self._close_clicked) self.button_box.rejected.connect(self.reject)
self.finished.connect(self._finished)
self.install_button = self.button_box.addButton(_('&Install'), QDialogButtonBox.AcceptRole) self.install_button = self.button_box.addButton(_('&Install'), QDialogButtonBox.AcceptRole)
self.install_button.setToolTip(_('Install the selected plugin')) self.install_button.setToolTip(_('Install the selected plugin'))
self.install_button.clicked.connect(self._install_clicked) self.install_button.clicked.connect(self._install_clicked)
@ -584,12 +585,10 @@ class PluginUpdaterDialog(SizePersistedDialog):
self.configure_action.setEnabled(False) self.configure_action.setEnabled(False)
self.plugin_view.addAction(self.configure_action) self.plugin_view.addAction(self.configure_action)
def _close_clicked(self): def _finished(self, *args):
# Force our toolbar/action to be updated based on uninstalled updates
if self.model: if self.model:
update_plugins = filter(filter_upgradeable_plugins, self.model.display_plugins) update_plugins = filter(filter_upgradeable_plugins, self.model.display_plugins)
self.gui.recalc_update_label(len(update_plugins)) self.gui.recalc_update_label(len(update_plugins))
self.reject()
def _plugin_current_changed(self, current, previous): def _plugin_current_changed(self, current, previous):
if current.isValid(): if current.isValid():

View File

@ -243,20 +243,22 @@ def do_add(db, paths, one_book_per_directory, recurse, add_duplicates, otitle,
metadata.append(mi) metadata.append(mi)
file_duplicates = [] file_duplicates = []
added_ids = set()
if files: if files:
file_duplicates = db.add_books(files, formats, metadata, file_duplicates, ids = db.add_books(files, formats, metadata,
add_duplicates=add_duplicates) add_duplicates=add_duplicates,
if file_duplicates: return_ids=True)
file_duplicates = file_duplicates[0] added_ids |= set(ids)
dir_dups = [] dir_dups = []
for dir in dirs: for dir in dirs:
if recurse: if recurse:
dir_dups.extend(db.recursive_import(dir, single_book_per_directory=one_book_per_directory)) dir_dups.extend(db.recursive_import(dir,
single_book_per_directory=one_book_per_directory,
added_ids=added_ids))
else: else:
func = db.import_book_directory if one_book_per_directory else db.import_book_directory_multiple func = db.import_book_directory if one_book_per_directory else db.import_book_directory_multiple
dups = func(dir) dups = func(dir, added_ids=added_ids)
if not dups: if not dups:
dups = [] dups = []
dir_dups.extend(dups) dir_dups.extend(dups)
@ -265,7 +267,8 @@ def do_add(db, paths, one_book_per_directory, recurse, add_duplicates, otitle,
if add_duplicates: if add_duplicates:
for mi, formats in dir_dups: for mi, formats in dir_dups:
db.import_book(mi, formats) book_id = db.import_book(mi, formats)
added_ids.add(book_id)
else: else:
if dir_dups or file_duplicates: if dir_dups or file_duplicates:
print >>sys.stderr, _('The following books were not added as ' print >>sys.stderr, _('The following books were not added as '
@ -287,6 +290,9 @@ def do_add(db, paths, one_book_per_directory, recurse, add_duplicates, otitle,
print >>sys.stderr, '\t\t ', path print >>sys.stderr, '\t\t ', path
write_dirtied(db) write_dirtied(db)
if added_ids:
prints(_('Added book ids: %s')%(', '.join(map(type(u''),
added_ids))))
send_message() send_message()
finally: finally:
sys.stdout = orig sys.stdout = orig

View File

@ -3566,7 +3566,8 @@ books_series_link feeds
for formats in books.values(): for formats in books.values():
yield formats yield formats
def import_book_directory_multiple(self, dirpath, callback=None): def import_book_directory_multiple(self, dirpath, callback=None,
added_ids=None):
from calibre.ebooks.metadata.meta import metadata_from_formats from calibre.ebooks.metadata.meta import metadata_from_formats
duplicates = [] duplicates = []
@ -3577,13 +3578,15 @@ books_series_link feeds
if self.has_book(mi): if self.has_book(mi):
duplicates.append((mi, formats)) duplicates.append((mi, formats))
continue continue
self.import_book(mi, formats) book_id = self.import_book(mi, formats)
if added_ids is not None:
added_ids.add(book_id)
if callable(callback): if callable(callback):
if callback(mi.title): if callback(mi.title):
break break
return duplicates return duplicates
def import_book_directory(self, dirpath, callback=None): def import_book_directory(self, dirpath, callback=None, added_ids=None):
from calibre.ebooks.metadata.meta import metadata_from_formats from calibre.ebooks.metadata.meta import metadata_from_formats
dirpath = os.path.abspath(dirpath) dirpath = os.path.abspath(dirpath)
formats = self.find_books_in_directory(dirpath, True) formats = self.find_books_in_directory(dirpath, True)
@ -3595,17 +3598,21 @@ books_series_link feeds
return return
if self.has_book(mi): if self.has_book(mi):
return [(mi, formats)] return [(mi, formats)]
self.import_book(mi, formats) book_id = self.import_book(mi, formats)
if added_ids is not None:
added_ids.add(book_id)
if callable(callback): if callable(callback):
callback(mi.title) callback(mi.title)
def recursive_import(self, root, single_book_per_directory=True, callback=None): def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
root = os.path.abspath(root) root = os.path.abspath(root)
duplicates = [] duplicates = []
for dirpath in os.walk(root): for dirpath in os.walk(root):
res = self.import_book_directory(dirpath[0], callback=callback) if \ res = (self.import_book_directory(dirpath[0], callback=callback,
single_book_per_directory else \ added_ids=added_ids) if single_book_per_directory else
self.import_book_directory_multiple(dirpath[0], callback=callback) self.import_book_directory_multiple(dirpath[0],
callback=callback, added_ids=added_ids))
if res is not None: if res is not None:
duplicates.extend(res) duplicates.extend(res)
if callable(callback): if callable(callback):