Merge from trunk

This commit is contained in:
Charles Haley 2011-01-26 17:18:57 +00:00
commit d241f8cf4e
6 changed files with 31 additions and 13 deletions

View File

@ -22,8 +22,11 @@ class Economist(BasicNewsRecipe):
oldest_article = 7.0
cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
remove_tags = [dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
dict(attrs={'class':['dblClkTrk', 'ec-article-info']})]
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
dict(attrs={'class':['dblClkTrk', 'ec-article-info']}),
{'class': lambda x: x and 'share-links-header' in x},
]
keep_only_tags = [dict(id='ec-article-body')]
needs_subscription = False
no_stylesheets = True

View File

@ -16,8 +16,11 @@ class Economist(BasicNewsRecipe):
oldest_article = 7.0
cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
remove_tags = [dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
dict(attrs={'class':['dblClkTrk', 'ec-article-info']})]
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
dict(attrs={'class':['dblClkTrk', 'ec-article-info']}),
{'class': lambda x: x and 'share-links-header' in x},
]
keep_only_tags = [dict(id='ec-article-body')]
no_stylesheets = True
preprocess_regexps = [(re.compile('</html>.*', re.DOTALL),

View File

@ -499,14 +499,15 @@ class PML_HTMLizer(object):
indent_state = {'t': False, 'T': False}
adv_indent_val = ''
# Keep track of the number of empty lines
# between paragraphs. When we reach a set number
# we assume it's a soft scene break.
empty_count = 0
for s in self.STATES:
self.state[s] = [False, ''];
for line in pml.splitlines():
if not line:
continue
parsed = []
empty = True
basic_indent = indent_state['t']
@ -592,7 +593,12 @@ class PML_HTMLizer(object):
parsed.append(text)
c = line.read(1)
if not empty:
if empty:
empty_count += 1
if empty_count == 3:
output.append('<p>&nbsp;</p>')
else:
empty_count = 0
text = self.end_line()
parsed.append(text)

View File

@ -1186,14 +1186,12 @@ class TagBrowserMixin(object): # {{{
def do_user_categories_edit(self, on_category=None):
db = self.library_view.model().db
d = TagCategories(self, db, on_category)
d.exec_()
if d.result() == d.Accepted:
if d.exec_() == d.Accepted:
db.prefs.set('user_categories', d.categories)
db.field_metadata.remove_user_categories()
for k in d.categories:
db.field_metadata.add_user_category('@' + k, k)
db.data.sqp_initialize(db.field_metadata.get_search_terms(),
optimize=True)
db.data.sqp_change_locations(db.field_metadata.get_search_terms())
self.tags_view.set_new_model()
self.tags_view.recount()

View File

@ -9,6 +9,7 @@ import json
from calibre.constants import preferred_encoding
from calibre.utils.config import to_json, from_json
from calibre import prints
class DBPrefs(dict):
@ -17,7 +18,11 @@ class DBPrefs(dict):
self.db = db
self.defaults = {}
for key, val in self.db.conn.get('SELECT key,val FROM preferences'):
val = self.raw_to_object(val)
try:
val = self.raw_to_object(val)
except:
prints('Failed to read value for:', key, 'from db')
continue
dict.__setitem__(self, key, val)
def raw_to_object(self, raw):

View File

@ -121,6 +121,9 @@ class SearchQueryParser(object):
def __init__(self, locations, test=False, optimize=False):
self.sqp_initialize(locations, test=test, optimize=optimize)
def sqp_change_locations(self, locations):
self.sqp_initialize(locations, optimize=self.optimize)
def sqp_initialize(self, locations, test=False, optimize=False):
self._tests_failed = False
self.optimize = optimize