This commit is contained in:
GRiker 2011-06-17 13:07:18 -06:00
commit 2252f7453d
89 changed files with 67440 additions and 44997 deletions

View File

@ -19,6 +19,69 @@
# new recipes:
# - title:
- version: 0.8.6
date: 2011-06-17
new features:
- title: "Builtin support for downloading and installing/updating calibre plugins. Go to Preferences->Plugins and click 'Get new plugins'"
description: "When updates for installed plugins are available, calibre will automatically (unobtrusively) notify you"
type: major
- title: "Metadata download configuration: Allow defining a set of 'default' fields for metadata download and quichly switching to/from them"
- title: "Allow clicking on the news category in the Tag Browser to display all downloaded periodicals"
- title: "Driver for the Libre Air"
- title: "Email sending: Allow user to stop email jobs (note that stopping may not actually prevent the email from being sent, depending on when the stop happens). Also automatically abort email sending if it takes longer than 15mins."
tickets: [795960]
bug fixes:
- title: "MOBI Output: Allow setting of background color on tables also set the border attribute on the table if the table has any border related css defined."
tickets: [797580]
- title: "Nook TSR: Put news sent to the device in My Files/Newspapers instaed of My Files/Books."
tickets: [796674]
- title: "MOBI Output: Fix a bug where linking to the very first element in an HTML file could sometimes result in the link pointing to the last element in the previous file."
tickets: [797214]
- title: "CSV catalog: Convert HTML comments to plain text"
- title: "HTML Input: Ignore links to text files."
tickets: [791568]
- title: "EPUB Output: Change orphaned <td> tags to <div> as they cause ADE to crash."
- title: "Fix 'Stop selected jobs' button trying to stop the same job multiple times"
- title: "Database: Explicitly test for case sensitivity on OS X instead of assuming a case insensitive filesystem."
tickets: [796258]
- title: "Get Books: More fixes to the Amazon store plugin"
- title: "FB2 Input: Do not specify font families/background colors"
improved recipes:
- Philadelphia Inquirer
- Macleans Magazone
- Metro UK
new recipes:
- title: "Christian Post, Down To Earth and Words Without Borders"
author: sexymax15
- title: "Noticias R7"
author: Diniz Bortolotto
- title: "UK Daily Mirror"
author: Dave Asbury
- title: "New Musical Express Magazine"
author: scissors
- version: 0.8.5
date: 2011-06-10

View File

@ -0,0 +1,37 @@
#created by sexymax15 ....sexymax15@gmail.com
#christian post recipe
from calibre.web.feeds.news import BasicNewsRecipe
class ChristianPost(BasicNewsRecipe):
title = 'The Christian Post'
__author__ = 'sexymax15'
description = 'Homepage'
language = 'en'
no_stylesheets = True
use_embedded_content = False
oldest_article = 30
max_articles_per_feed = 15
remove_empty_feeds = True
no_stylesheets = True
remove_javascript = True
extra_css = '''
h1 {color:#008852;font-family:Arial,Helvetica,sans-serif; font-size:20px; font-size-adjust:none; font-stretch:normal; font-style:normal; font-variant:normal; font-weight:bold; line-height:18px;}
h2 {color:#4D4D4D;font-family:Arial,Helvetica,sans-serif; font-size:16px; font-size-adjust:none; font-stretch:normal; font-style:normal; font-variant:normal; font-weight:bold; line-height:16px; } '''
feeds = [
('Homepage', 'http://www.christianpost.com/services/rss/feed/'),
('Most Popular', 'http://www.christianpost.com/services/rss/feed/most-popular'),
('Entertainment', 'http://www.christianpost.com/services/rss/feed/entertainment/'),
('Politics', 'http://www.christianpost.com/services/rss/feed/politics/'),
('Living', 'http://www.christianpost.com/services/rss/feed/living/'),
('Business', 'http://www.christianpost.com/services/rss/feed/business/'),
('Opinion', 'http://www.christianpost.com/services/rss/feed/opinion/')
]
def print_version(self, url):
return url +'print.html'

View File

@ -0,0 +1,18 @@
from calibre.web.feeds.recipes import BasicNewsRecipe
class AdvancedUserRecipe1307834113(BasicNewsRecipe):
title = u'Down To Earth'
oldest_article = 300
__author__ = 'sexymax15'
max_articles_per_feed = 30
no_stylesheets = True
remove_javascript = True
remove_attributes = ['width','height']
use_embedded_content = False
language = 'en_IN'
remove_empty_feeds = True
remove_tags_before = dict(name='div', id='PageContent')
remove_tags_after = [dict(name='div'),{'class':'box'}]
remove_tags =[{'class':'box'}]
feeds = [(u'editor', u'http://www.downtoearth.org.in/taxonomy/term/20348/0/feed'), (u'cover story', u'http://www.downtoearth.org.in/taxonomy/term/20345/0/feed'), (u'special report', u'http://www.downtoearth.org.in/taxonomy/term/20384/0/feed'), (u'features', u'http://www.downtoearth.org.in/taxonomy/term/20350/0/feed'), (u'news', u'http://www.downtoearth.org.in/taxonomy/term/20366/0/feed'), (u'debate', u'http://www.downtoearth.org.in/taxonomy/term/20347/0/feed'), (u'natural disasters', u'http://www.downtoearth.org.in/taxonomy/term/20822/0/feed')]

View File

@ -1,239 +1,28 @@
#!/usr/bin/env python
from calibre.web.feeds.news import BasicNewsRecipe
__license__ = 'GPL v3'
'''
macleans.ca
'''
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag
from datetime import timedelta, date
class Macleans(BasicNewsRecipe):
class AdvancedUserRecipe1308306308(BasicNewsRecipe):
title = u'Macleans Magazine'
__author__ = 'Nick Redding'
language = 'en_CA'
description = ('Macleans Magazine')
__author__ = 'sexymax15'
oldest_article = 30
max_articles_per_feed = 12
use_embedded_content = False
remove_empty_feeds = True
no_stylesheets = True
timefmt = ' [%b %d]'
remove_javascript = True
remove_tags = [dict(name ='img'),dict (id='header'),{'class':'postmetadata'}]
remove_tags_after = {'class':'postmetadata'}
# customization notes: delete sections you are not interested in
# set oldest_article to the maximum number of days back from today to include articles
sectionlist = [
['http://www2.macleans.ca/','Front Page'],
['http://www2.macleans.ca/category/canada/','Canada'],
['http://www2.macleans.ca/category/world-from-the-magazine/','World'],
['http://www2.macleans.ca/category/business','Business'],
['http://www2.macleans.ca/category/arts-culture/','Culture'],
['http://www2.macleans.ca/category/opinion','Opinion'],
['http://www2.macleans.ca/category/health-from-the-magazine/','Health'],
['http://www2.macleans.ca/category/environment-from-the-magazine/','Environment'],
['http://www2.macleans.ca/category/education/','On Campus'],
['http://www2.macleans.ca/category/travel-from-the-magazine/','Travel']
]
oldest_article = 7
# formatting for print version of articles
extra_css = '''h2{font-family:Times,serif; font-size:large;}
small {font-family:Times,serif; font-size:xx-small; list-style-type: none;}
'''
# tag handling for print version of articles
keep_only_tags = [dict(id='tw-print')]
remove_tags = [dict({'class':'postmetadata'})]
def preprocess_html(self,soup):
for img_tag in soup.findAll('img'):
parent_tag = img_tag.parent
if parent_tag.name == 'a':
new_tag = Tag(soup,'p')
new_tag.insert(0,img_tag)
parent_tag.replaceWith(new_tag)
elif parent_tag.name == 'p':
if not self.tag_to_string(parent_tag) == '':
new_div = Tag(soup,'div')
new_tag = Tag(soup,'p')
new_tag.insert(0,img_tag)
parent_tag.replaceWith(new_div)
new_div.insert(0,new_tag)
new_div.insert(1,parent_tag)
return soup
def parse_index(self):
articles = {}
key = None
ans = []
def parse_index_page(page_url,page_title):
def decode_date(datestr):
dmysplit = datestr.strip().lower().split(',')
mdsplit = dmysplit[1].split()
m = ['january','february','march','april','may','june','july','august','september','october','november','december'].index(mdsplit[0])+1
d = int(mdsplit[1])
y = int(dmysplit[2].split()[0])
return date(y,m,d)
def article_title(tag):
atag = tag.find('a',href=True)
if not atag:
return ''
return self.tag_to_string(atag)
def article_url(tag):
atag = tag.find('a',href=True)
if not atag:
return ''
return atag['href']+'print/'
def article_description(tag):
for p_tag in tag.findAll('p'):
d = self.tag_to_string(p_tag,False)
if not d == '':
return d
return ''
def compound_h4_h3_title(tag):
if tag.h4:
if tag.h3:
return self.tag_to_string(tag.h4,False)+u'\u2014'+self.tag_to_string(tag.h3,False)
else:
return self.tag_to_string(tag.h4,False)
elif tag.h3:
return self.tag_to_string(tag.h3,False)
else:
return ''
def compound_h2_h4_title(tag):
if tag.h2:
if tag.h4:
return self.tag_to_string(tag.h2,False)+u'\u2014'+self.tag_to_string(tag.h4,False)
else:
return self.tag_to_string(tag.h2,False)
elif tag.h4:
return self.tag_to_string(tag.h4,False)
else:
return ''
def handle_article(header_tag, outer_tag):
if header_tag:
url = article_url(header_tag)
title = article_title(header_tag)
author_date_tag = outer_tag.h4
if author_date_tag:
author_date = self.tag_to_string(author_date_tag,False).split(' - ')
author = author_date[0].strip()
article_date = decode_date(author_date[1])
earliest_date = date.today() - timedelta(days=self.oldest_article)
if article_date < earliest_date:
self.log("Skipping article dated %s" % author_date[1])
else:
excerpt_div = outer_tag.find('div','excerpt')
if excerpt_div:
description = article_description(excerpt_div)
else:
description = ''
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date=author_date[1],description=description,author=author,content=''))
def handle_category_article(cat, header_tag, outer_tag):
url = article_url(header_tag)
title = article_title(header_tag)
if not title == '':
title = cat+u'\u2014'+title
a_tag = outer_tag.find('span','authorLink')
if a_tag:
author = self.tag_to_string(a_tag,False)
a_tag.parent.extract()
else:
author = ''
description = article_description(outer_tag)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description=description,author=author,content=''))
soup = self.index_to_soup(page_url)
if page_title == 'Front Page':
# special processing for the front page
top_stories = soup.find('div',{ "id" : "macleansFeatured" })
if top_stories:
for div_slide in top_stories.findAll('div','slide'):
url = article_url(div_slide)
div_title = div_slide.find('div','header')
if div_title:
title = self.tag_to_string(div_title,False)
else:
title = ''
description = article_description(div_slide)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
from_macleans = soup.find('div',{ "id" : "fromMacleans" })
if from_macleans:
for li_tag in from_macleans.findAll('li','fromMacleansArticle'):
title = compound_h4_h3_title(li_tag)
url = article_url(li_tag)
description = article_description(li_tag)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
blog_central = soup.find('div',{ "id" : "bloglist" })
if blog_central:
for li_tag in blog_central.findAll('li'):
title = compound_h2_h4_title(li_tag)
if li_tag.h4:
url = article_url(li_tag.h4)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description='',author='',content=''))
# need_to_know = soup.find('div',{ "id" : "needToKnow" })
# if need_to_know:
# for div_tag in need_to_know('div',attrs={'class' : re.compile("^needToKnowArticle")}):
# title = compound_h4_h3_title(div_tag)
# url = article_url(div_tag)
# description = article_description(div_tag)
# if not articles.has_key(page_title):
# articles[page_title] = []
# articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
for news_category in soup.findAll('div','newsCategory'):
news_cat = self.tag_to_string(news_category.h4,False)
handle_category_article(news_cat, news_category.find('h2'), news_category.find('div'))
for news_item in news_category.findAll('li'):
handle_category_article(news_cat,news_item.h3,news_item)
return
# find the div containing the highlight article
div_post = soup.find('div','post')
if div_post:
h1_tag = div_post.h1
handle_article(h1_tag,div_post)
# find the divs containing the rest of the articles
div_other = div_post.find('div', { "id" : "categoryOtherPosts" })
if div_other:
for div_entry in div_other.findAll('div','entry'):
h2_tag = div_entry.h2
handle_article(h2_tag,div_entry)
for page_name,page_title in self.sectionlist:
parse_index_page(page_name,page_title)
ans.append(page_title)
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return ans
feeds = [(u'Blog Central', u'http://www2.macleans.ca/category/blog-central/feed/'),
(u'Canada', u'http://www2.macleans.ca/category/canada/feed/'),
(u'World', u'http://www2.macleans.ca/category/world-from-the-magazine/feed/'),
(u'Business', u'http://www2.macleans.ca/category/business/feed/'),
(u'Arts & Culture', u'http://www2.macleans.ca/category/arts-culture/feed/'),
(u'Opinion', u'http://www2.macleans.ca/category/opinion/feed/'),
(u'Health', u'http://www2.macleans.ca/category/health-from-the-magazine/feed/'),
(u'Environment', u'http://www2.macleans.ca/category/environment-from-the-magazine/feed/')]
def print_version(self, url):
return url + 'print/'

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1295081935(BasicNewsRecipe):
title = u'Mail & Guardian ZA News'
__author__ = '77ja65'
language = 'en'
language = 'en_ZA'
oldest_article = 7
max_articles_per_feed = 30
no_stylesheets = True

View File

@ -1,85 +1,45 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
'''
philly.com/inquirer/
'''
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.web.feeds.news import BasicNewsRecipe
class Philly(BasicNewsRecipe):
title = 'Philadelphia Inquirer'
__author__ = 'RadikalDissent and Sujata Raman'
class AdvancedUserRecipe1308312288(BasicNewsRecipe):
title = u'Philadelphia Inquirer'
__author__ = 'sexymax15'
language = 'en'
description = 'Daily news from the Philadelphia Inquirer'
no_stylesheets = True
oldest_article = 15
max_articles_per_feed = 20
use_embedded_content = False
oldest_article = 1
max_articles_per_feed = 25
remove_empty_feeds = True
no_stylesheets = True
remove_javascript = True
extra_css = '''
h1{font-family:verdana,arial,helvetica,sans-serif; font-size: large;}
h2{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
.body-content{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
.byline {font-size: small; color: #666666; font-style:italic; }
.lastline {font-size: small; color: #666666; font-style:italic;}
.contact {font-size: small; color: #666666;}
.contact p {font-size: small; color: #666666;}
#photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;}
.photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;}
#photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;}
.photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;}
.article_timestamp{font-size:x-small; color:#666666;}
a {font-family:verdana,arial,helvetica,sans-serif; font-size: x-small;}
'''
# remove_tags_before = {'class':'article_timestamp'}
#remove_tags_after = {'class':'graylabel'}
keep_only_tags= [dict(name=['h1','p'])]
remove_tags = [dict(name=['hr','dl','dt','img','meta','iframe','link','script','form','input','label']),
dict(id=['toggleConfirmEmailDiv','toggleTOS','toggleUsernameMsgDiv','toggleConfirmYear','navT1_philly','secondaryNav','navPlacement','globalPrimaryNav'
,'ugc-footer-philly','bv_footer_include','footer','header',
'container_rag_bottom','section_rectangle','contentrightside'])
,{'class':['megamenu3 megamenu','container misc','container_inner misc_inner'
,'misccontainer_left_32','headlineonly','misccontainer_middle_32'
,'misccontainer_right_32','headline formBegin',
'post_balloon','relatedlist','linkssubhead','b_sq','dotted-rule-above'
,'container','headlines-digest','graylabel','container_inner'
,'rlinks_colorbar1','rlinks_colorbar2','supercontainer','container_5col_left','container_image_left',
'digest-headline2','digest-lead','container_5col_leftmiddle',
'container_5col_middlemiddle','container_5col_rightmiddle'
,'container_5col_right','divclear','supercontainer_outer force-width',
'supercontainer','containertitle kicker-title',
'pollquestion','pollchoice','photomore','pollbutton','container rssbox','containertitle video ',
'containertitle_image ','container_tabtwo','selected'
,'shadetabs','selected','tabcontentstyle','tabcontent','inner_container'
,'arrow','container_ad','containertitlespacer','adUnit','tracking','sitemsg_911 clearfix']}]
keep_only_tags = [
dict(name='div', attrs={'class':'story-content'}),
dict(name='div', attrs={'id': 'contentinside'})
]
extra_css = """
h1{font-family: Georgia,serif; font-size: xx-large}
remove_tags = [
dict(name='div', attrs={'class':['linkssubhead','post_balloon','relatedlist','pollquestion','b_sq']}),
dict(name='dl', attrs={'class':'relatedlist'}),
dict(name='div', attrs={'id':['photoNav','sidebar_adholder']}),
dict(name='a', attrs={'class': ['headlineonly','bl']}),
dict(name='img', attrs={'class':'img_noborder'})
]
# def print_version(self, url):
# return url + '?viewAll=y'
"""
feeds = [
('Front Page', 'http://www.philly.com/inquirer_front_page.rss'),
('Business', 'http://www.philly.com/inq_business.rss'),
#('News', 'http://www.philly.com/inquirer/news/index.rss'),
('Nation', 'http://www.philly.com/inq_news_world_us.rss'),
('Local', 'http://www.philly.com/inquirer_local.rss'),
('Health', 'http://www.philly.com/inquirer_health_science.rss'),
('Education', 'http://www.philly.com/inquirer_education.rss'),
('Editorial and opinion', 'http://www.philly.com/inq_news_editorial.rss'),
('Sports', 'http://www.philly.com/inquirer_sports.rss')
]
feeds = [(u'News', u'http://www.philly.com/philly_news.rss')]
def get_article_url(self, article):
ans = article.link
try:
self.log('Looking for full story link in', ans)
soup = self.index_to_soup(ans)
x = soup.find(text="View All")
if x is not None:
ans = ans + '?viewAll=y'
self.log('Found full story link', ans)
except:
pass
return ans
def postprocess_html(self, soup,first):
for tag in soup.findAll(name='div',attrs={'class':"container_ate_qandatitle"}):
tag.extract()
for tag in soup.findAll(name='br'):
tag.extract()
return soup

View File

@ -0,0 +1,25 @@
#recipe created by sexymax15.....sexymax15@gmail.com
#Words without Borders recipe
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1308302002(BasicNewsRecipe):
title = u'Words Without Borders'
language = 'en'
__author__ = 'sexymax15'
oldest_article = 90
max_articles_per_feed = 30
use_embedded_content = False
remove_empty_feeds = True
no_stylesheets = True
remove_javascript = True
keep_only_tags = {'class':'span-14 article'}
remove_tags_after = [{'class':'addthis_toolbox addthis_default_style no_print'}]
remove_tags = [{'class':['posterous_quote_citation','button']}]
extra_css = """
h1{font-family: Georgia,serif; font-size: large}h2{font-family: Georgia,serif; font-size: large} """
feeds = [(u'wwb', u'http://feeds.feedburner.com/wwborders?format=xml')]

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 5)
numeric_version = (0, 8, 6)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -867,11 +867,11 @@ class ActionStore(InterfaceActionBase):
from calibre.gui2.store.config.store import save_settings as save
save(config_widget)
class ActionPluginUpdates(InterfaceActionBase):
name = 'Plugin Updates'
class ActionPluginUpdater(InterfaceActionBase):
name = 'Plugin Updater'
author = 'Grant Drake'
description = 'Queries the MobileRead forums for updates to plugins to install'
actual_plugin = 'calibre.gui2.actions.plugin_updates:PluginUpdatesAction'
actual_plugin = 'calibre.gui2.actions.plugin_updates:PluginUpdaterAction'
plugins += [ActionAdd, ActionFetchAnnotations, ActionGenerateCatalog,
ActionConvert, ActionDelete, ActionEditMetadata, ActionView,
@ -880,7 +880,7 @@ plugins += [ActionAdd, ActionFetchAnnotations, ActionGenerateCatalog,
ActionSendToDevice, ActionHelp, ActionPreferences, ActionSimilarBooks,
ActionAddToLibrary, ActionEditCollections, ActionChooseLibrary,
ActionCopyToLibrary, ActionTweakEpub, ActionNextMatch, ActionStore,
ActionPluginUpdates]
ActionPluginUpdater]
# }}}

View File

@ -21,6 +21,7 @@ from calibre.ebooks.metadata import check_isbn
msprefs = JSONConfig('metadata_sources/global.json')
msprefs.defaults['txt_comments'] = False
msprefs.defaults['ignore_fields'] = []
msprefs.defaults['user_default_ignore_fields'] = []
msprefs.defaults['max_tags'] = 20
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds

View File

@ -85,6 +85,10 @@ class ISBNMerge(object):
isbns, min_year = xisbn.get_isbn_pool(isbn)
if not isbns:
isbns = frozenset([isbn])
if isbns in self.pools:
# xISBN had a brain fart
pool = self.pools[isbns]
else:
self.pools[isbns] = pool = (min_year, [])
if not self.pool_has_result_from_same_source(pool, result):

View File

@ -45,6 +45,11 @@ class xISBN(object):
ans.append(rec)
return ans
def isbns_in_data(self, data):
for rec in data:
for i in rec.get('isbn', []):
yield i
def get_data(self, isbn):
isbn = self.purify(isbn)
with self.lock:
@ -57,8 +62,7 @@ class xISBN(object):
data = []
id_ = len(self._data)
self._data.append(data)
for rec in data:
for i in rec.get('isbn', []):
for i in self.isbns_in_data(data):
self._map[i] = id_
self._map[isbn] = id_
return self._data[self._map[isbn]]

View File

@ -443,11 +443,15 @@ class MobiMLizer(object):
tag = 'span' if tag == 'td' else 'div'
if tag == 'table':
col = style.backgroundColor
if col:
elem.set('bgcolor', col)
css = style.cssdict()
if 'border' in css or 'border-width' in css:
elem.set('border', '1')
if tag in TABLE_TAGS:
for attr in ('rowspan', 'colspan', 'width', 'border', 'scope'):
for attr in ('rowspan', 'colspan', 'width', 'border', 'scope',
'bgcolor'):
if attr in elem.attrib:
istate.attrib[attr] = elem.attrib[attr]
if tag == 'q':

View File

@ -11,7 +11,6 @@ __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import os, itertools, re, logging, copy, unicodedata
from weakref import WeakKeyDictionary
from xml.dom import SyntaxErr as CSSSyntaxError
import cssutils
from cssutils.css import (CSSStyleRule, CSSPageRule, CSSStyleDeclaration,
CSSFontFaceRule, cssproperties)
try:
@ -20,7 +19,8 @@ try:
except ImportError:
# cssutils >= 0.9.8
from cssutils.css import PropertyValue as CSSValueList
from cssutils import profile as cssprofiles
from cssutils import (profile as cssprofiles, parseString, parseStyle, log as
cssutils_log, CSSParser, profiles)
from lxml import etree
from lxml.cssselect import css_to_xpath, ExpressionError, SelectorSyntaxError
from calibre import force_unicode
@ -28,7 +28,7 @@ from calibre.ebooks import unit_convert
from calibre.ebooks.oeb.base import XHTML, XHTML_NS, CSS_MIME, OEB_STYLES
from calibre.ebooks.oeb.base import XPNSMAP, xpath, urlnormalize
cssutils.log.setLevel(logging.WARN)
cssutils_log.setLevel(logging.WARN)
_html_css_stylesheet = None
@ -36,7 +36,7 @@ def html_css_stylesheet():
global _html_css_stylesheet
if _html_css_stylesheet is None:
html_css = open(P('templates/html.css'), 'rb').read()
_html_css_stylesheet = cssutils.parseString(html_css)
_html_css_stylesheet = parseString(html_css)
_html_css_stylesheet.namespaces['h'] = XHTML_NS
return _html_css_stylesheet
@ -157,11 +157,11 @@ class Stylizer(object):
# Add cssutils parsing profiles from output_profile
for profile in self.opts.output_profile.extra_css_modules:
cssutils.profile.addProfile(profile['name'],
cssprofiles.addProfile(profile['name'],
profile['props'],
profile['macros'])
parser = cssutils.CSSParser(fetcher=self._fetch_css_file,
parser = CSSParser(fetcher=self._fetch_css_file,
log=logging.getLogger('calibre.css'))
self.font_face_rules = []
for elem in head:
@ -473,6 +473,7 @@ class Style(object):
self._width = None
self._height = None
self._lineHeight = None
self._bgcolor = None
stylizer._styles[element] = self
def set(self, prop, val):
@ -533,6 +534,48 @@ class Style(object):
def pt_to_px(self, value):
return (self._profile.dpi / 72.0) * value
@property
def backgroundColor(self):
'''
Return the background color by parsing both the background-color and
background shortcut properties. Note that inheritance/default values
are not used. None is returned if no background color is set.
'''
def validate_color(col):
return cssprofiles.validateWithProfile('color',
col,
profiles=[profiles.Profiles.CSS_LEVEL_2])[1]
if self._bgcolor is None:
col = None
val = self._style.get('background-color', None)
if val and validate_color(val):
col = val
else:
val = self._style.get('background', None)
if val is not None:
try:
style = parseStyle('background: '+val)
val = style.getProperty('background').cssValue
try:
val = list(val)
except:
# val is CSSPrimitiveValue
val = [val]
for c in val:
c = c.cssText
if validate_color(c):
col = c
break
except:
pass
if col is None:
self._bgcolor = False
else:
self._bgcolor = col
return self._bgcolor if self._bgcolor else None
@property
def fontSize(self):
def normalize_fontsize(value, base):

View File

@ -287,6 +287,18 @@ class ChooseLibraryAction(InterfaceAction):
'rate of approximately 1 book every three seconds.'), show=True)
def restore_database(self):
m = self.gui.library_view.model()
db = m.db
if (iswindows and len(db.library_path) >
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT):
return error_dialog(self.gui, _('Too long'),
_('Path to library too long. Must be less than'
' %d characters. Move your library to a location with'
' a shorter path using Windows Explorer, then point'
' calibre to the new location and try again.')%
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT,
show=True)
from calibre.gui2.dialogs.restore_library import restore_database
m = self.gui.library_view.model()
m.stop_metadata_backup()

View File

@ -12,10 +12,10 @@ from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.plugin_updater import (PluginUpdaterDialog,
FILTER_ALL, FILTER_UPDATE_AVAILABLE)
class PluginUpdatesAction(InterfaceAction):
class PluginUpdaterAction(InterfaceAction):
name = 'Plugin Updates'
action_spec = (_('Plugin Updates'), None, None, None)
name = 'Plugin Updater'
action_spec = (_('Plugin Updater'), None, None, None)
action_type = 'current'
def genesis(self):

View File

@ -388,6 +388,10 @@ class MetadataSingleDialogBase(ResizableDialog):
def apply_changes(self):
self.changed.add(self.book_id)
if self.db is None:
# break_cycles has already been called, don't know why this should
# happen but a user reported it
return True
for widget in self.basic_metadata_widgets:
try:
if not widget.commit(self.db, self.book_id):

View File

@ -224,6 +224,20 @@ class FieldsModel(QAbstractListModel): # {{{
Qt.Unchecked])
msprefs['ignore_fields'] = list(ignored_fields.union(changed))
def user_default_state(self, field):
return (Qt.Unchecked if field in msprefs.get('user_default_ignore_fields',[])
else Qt.Checked)
def select_user_defaults(self):
self.overrides = dict([(f, self.user_default_state(f)) for f in self.fields])
self.reset()
def commit_user_defaults(self):
default_ignored_fields = set([x for x in msprefs['user_default_ignore_fields'] if x not in
self.overrides])
changed = set([k for k, v in self.overrides.iteritems() if v ==
Qt.Unchecked])
msprefs['user_default_ignore_fields'] = list(default_ignored_fields.union(changed))
# }}}
@ -286,6 +300,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
self.select_all_button.clicked.connect(self.changed_signal)
self.clear_all_button.clicked.connect(self.fields_model.clear_all)
self.clear_all_button.clicked.connect(self.changed_signal)
self.select_default_button.clicked.connect(self.fields_model.select_user_defaults)
self.select_default_button.clicked.connect(self.changed_signal)
self.set_as_default_button.clicked.connect(self.fields_model.commit_user_defaults)
def configure_plugin(self):
for index in self.sources_view.selectionModel().selectedRows():

View File

@ -102,6 +102,26 @@
</property>
</widget>
</item>
<item row="2" column="0">
<widget class="QPushButton" name="select_default_button">
<property name="text">
<string>&amp;Select default</string>
</property>
<property name="toolTip">
<string>Restore your own subset of checked fields that you define using the 'Set as default' button</string>
</property>
</widget>
</item>
<item row="2" column="1">
<widget class="QPushButton" name="set_as_default_button">
<property name="text">
<string>&amp;Set as default</string>
</property>
<property name="toolTip">
<string>Store the currently checked fields as a default you can restore using the 'Select default' button</string>
</property>
</widget>
</item>
</layout>
</widget>
</item>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

17095
src/calibre/translations/si.po Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -121,6 +121,7 @@ _extra_lang_codes = {
'en_YE' : _('English (Yemen)'),
'en_IE' : _('English (Ireland)'),
'en_CN' : _('English (China)'),
'en_ZA' : _('English (South Africa)'),
'es_PY' : _('Spanish (Paraguay)'),
'es_UY' : _('Spanish (Uruguay)'),
'es_AR' : _('Spanish (Argentina)'),

View File

@ -21,8 +21,8 @@ NS = 'http://calibre-ebook.com/recipe_collection'
E = ElementMaker(namespace=NS, nsmap={None:NS})
def iterate_over_builtin_recipe_files():
exclude = ['craigslist', 'iht', 'outlook_india', 'toronto_sun',
'indian_express', 'india_today', 'livemint']
exclude = ['craigslist', 'iht', 'toronto_sun',
'india_today', 'livemint']
d = os.path.dirname
base = os.path.join(d(d(d(d(d(d(os.path.abspath(__file__))))))), 'recipes')
for f in os.listdir(base):