Merge from trunk

This commit is contained in:
Charles Haley 2011-06-18 07:12:20 +01:00
commit ce25e1ab74
83 changed files with 67381 additions and 44985 deletions

View File

@ -19,6 +19,69 @@
# new recipes: # new recipes:
# - title: # - title:
- version: 0.8.6
date: 2011-06-17
new features:
- title: "Builtin support for downloading and installing/updating calibre plugins. Go to Preferences->Plugins and click 'Get new plugins'"
description: "When updates for installed plugins are available, calibre will automatically (unobtrusively) notify you"
type: major
- title: "Metadata download configuration: Allow defining a set of 'default' fields for metadata download and quichly switching to/from them"
- title: "Allow clicking on the news category in the Tag Browser to display all downloaded periodicals"
- title: "Driver for the Libre Air"
- title: "Email sending: Allow user to stop email jobs (note that stopping may not actually prevent the email from being sent, depending on when the stop happens). Also automatically abort email sending if it takes longer than 15mins."
tickets: [795960]
bug fixes:
- title: "MOBI Output: Allow setting of background color on tables also set the border attribute on the table if the table has any border related css defined."
tickets: [797580]
- title: "Nook TSR: Put news sent to the device in My Files/Newspapers instaed of My Files/Books."
tickets: [796674]
- title: "MOBI Output: Fix a bug where linking to the very first element in an HTML file could sometimes result in the link pointing to the last element in the previous file."
tickets: [797214]
- title: "CSV catalog: Convert HTML comments to plain text"
- title: "HTML Input: Ignore links to text files."
tickets: [791568]
- title: "EPUB Output: Change orphaned <td> tags to <div> as they cause ADE to crash."
- title: "Fix 'Stop selected jobs' button trying to stop the same job multiple times"
- title: "Database: Explicitly test for case sensitivity on OS X instead of assuming a case insensitive filesystem."
tickets: [796258]
- title: "Get Books: More fixes to the Amazon store plugin"
- title: "FB2 Input: Do not specify font families/background colors"
improved recipes:
- Philadelphia Inquirer
- Macleans Magazone
- Metro UK
new recipes:
- title: "Christian Post, Down To Earth and Words Without Borders"
author: sexymax15
- title: "Noticias R7"
author: Diniz Bortolotto
- title: "UK Daily Mirror"
author: Dave Asbury
- title: "New Musical Express Magazine"
author: scissors
- version: 0.8.5 - version: 0.8.5
date: 2011-06-10 date: 2011-06-10

View File

@ -0,0 +1,37 @@
#created by sexymax15 ....sexymax15@gmail.com
#christian post recipe
from calibre.web.feeds.news import BasicNewsRecipe
class ChristianPost(BasicNewsRecipe):
title = 'The Christian Post'
__author__ = 'sexymax15'
description = 'Homepage'
language = 'en'
no_stylesheets = True
use_embedded_content = False
oldest_article = 30
max_articles_per_feed = 15
remove_empty_feeds = True
no_stylesheets = True
remove_javascript = True
extra_css = '''
h1 {color:#008852;font-family:Arial,Helvetica,sans-serif; font-size:20px; font-size-adjust:none; font-stretch:normal; font-style:normal; font-variant:normal; font-weight:bold; line-height:18px;}
h2 {color:#4D4D4D;font-family:Arial,Helvetica,sans-serif; font-size:16px; font-size-adjust:none; font-stretch:normal; font-style:normal; font-variant:normal; font-weight:bold; line-height:16px; } '''
feeds = [
('Homepage', 'http://www.christianpost.com/services/rss/feed/'),
('Most Popular', 'http://www.christianpost.com/services/rss/feed/most-popular'),
('Entertainment', 'http://www.christianpost.com/services/rss/feed/entertainment/'),
('Politics', 'http://www.christianpost.com/services/rss/feed/politics/'),
('Living', 'http://www.christianpost.com/services/rss/feed/living/'),
('Business', 'http://www.christianpost.com/services/rss/feed/business/'),
('Opinion', 'http://www.christianpost.com/services/rss/feed/opinion/')
]
def print_version(self, url):
return url +'print.html'

View File

@ -0,0 +1,18 @@
from calibre.web.feeds.recipes import BasicNewsRecipe
class AdvancedUserRecipe1307834113(BasicNewsRecipe):
title = u'Down To Earth'
oldest_article = 300
__author__ = 'sexymax15'
max_articles_per_feed = 30
no_stylesheets = True
remove_javascript = True
remove_attributes = ['width','height']
use_embedded_content = False
language = 'en_IN'
remove_empty_feeds = True
remove_tags_before = dict(name='div', id='PageContent')
remove_tags_after = [dict(name='div'),{'class':'box'}]
remove_tags =[{'class':'box'}]
feeds = [(u'editor', u'http://www.downtoearth.org.in/taxonomy/term/20348/0/feed'), (u'cover story', u'http://www.downtoearth.org.in/taxonomy/term/20345/0/feed'), (u'special report', u'http://www.downtoearth.org.in/taxonomy/term/20384/0/feed'), (u'features', u'http://www.downtoearth.org.in/taxonomy/term/20350/0/feed'), (u'news', u'http://www.downtoearth.org.in/taxonomy/term/20366/0/feed'), (u'debate', u'http://www.downtoearth.org.in/taxonomy/term/20347/0/feed'), (u'natural disasters', u'http://www.downtoearth.org.in/taxonomy/term/20822/0/feed')]

View File

@ -1,239 +1,28 @@
#!/usr/bin/env python #!/usr/bin/env python
from calibre.web.feeds.news import BasicNewsRecipe
__license__ = 'GPL v3' class AdvancedUserRecipe1308306308(BasicNewsRecipe):
'''
macleans.ca
'''
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag
from datetime import timedelta, date
class Macleans(BasicNewsRecipe):
title = u'Macleans Magazine' title = u'Macleans Magazine'
__author__ = 'Nick Redding'
language = 'en_CA' language = 'en_CA'
description = ('Macleans Magazine') __author__ = 'sexymax15'
oldest_article = 30
max_articles_per_feed = 12
use_embedded_content = False
remove_empty_feeds = True
no_stylesheets = True no_stylesheets = True
timefmt = ' [%b %d]' remove_javascript = True
remove_tags = [dict(name ='img'),dict (id='header'),{'class':'postmetadata'}]
remove_tags_after = {'class':'postmetadata'}
# customization notes: delete sections you are not interested in feeds = [(u'Blog Central', u'http://www2.macleans.ca/category/blog-central/feed/'),
# set oldest_article to the maximum number of days back from today to include articles (u'Canada', u'http://www2.macleans.ca/category/canada/feed/'),
sectionlist = [ (u'World', u'http://www2.macleans.ca/category/world-from-the-magazine/feed/'),
['http://www2.macleans.ca/','Front Page'], (u'Business', u'http://www2.macleans.ca/category/business/feed/'),
['http://www2.macleans.ca/category/canada/','Canada'], (u'Arts & Culture', u'http://www2.macleans.ca/category/arts-culture/feed/'),
['http://www2.macleans.ca/category/world-from-the-magazine/','World'], (u'Opinion', u'http://www2.macleans.ca/category/opinion/feed/'),
['http://www2.macleans.ca/category/business','Business'], (u'Health', u'http://www2.macleans.ca/category/health-from-the-magazine/feed/'),
['http://www2.macleans.ca/category/arts-culture/','Culture'], (u'Environment', u'http://www2.macleans.ca/category/environment-from-the-magazine/feed/')]
['http://www2.macleans.ca/category/opinion','Opinion'], def print_version(self, url):
['http://www2.macleans.ca/category/health-from-the-magazine/','Health'], return url + 'print/'
['http://www2.macleans.ca/category/environment-from-the-magazine/','Environment'],
['http://www2.macleans.ca/category/education/','On Campus'],
['http://www2.macleans.ca/category/travel-from-the-magazine/','Travel']
]
oldest_article = 7
# formatting for print version of articles
extra_css = '''h2{font-family:Times,serif; font-size:large;}
small {font-family:Times,serif; font-size:xx-small; list-style-type: none;}
'''
# tag handling for print version of articles
keep_only_tags = [dict(id='tw-print')]
remove_tags = [dict({'class':'postmetadata'})]
def preprocess_html(self,soup):
for img_tag in soup.findAll('img'):
parent_tag = img_tag.parent
if parent_tag.name == 'a':
new_tag = Tag(soup,'p')
new_tag.insert(0,img_tag)
parent_tag.replaceWith(new_tag)
elif parent_tag.name == 'p':
if not self.tag_to_string(parent_tag) == '':
new_div = Tag(soup,'div')
new_tag = Tag(soup,'p')
new_tag.insert(0,img_tag)
parent_tag.replaceWith(new_div)
new_div.insert(0,new_tag)
new_div.insert(1,parent_tag)
return soup
def parse_index(self):
articles = {}
key = None
ans = []
def parse_index_page(page_url,page_title):
def decode_date(datestr):
dmysplit = datestr.strip().lower().split(',')
mdsplit = dmysplit[1].split()
m = ['january','february','march','april','may','june','july','august','september','october','november','december'].index(mdsplit[0])+1
d = int(mdsplit[1])
y = int(dmysplit[2].split()[0])
return date(y,m,d)
def article_title(tag):
atag = tag.find('a',href=True)
if not atag:
return ''
return self.tag_to_string(atag)
def article_url(tag):
atag = tag.find('a',href=True)
if not atag:
return ''
return atag['href']+'print/'
def article_description(tag):
for p_tag in tag.findAll('p'):
d = self.tag_to_string(p_tag,False)
if not d == '':
return d
return ''
def compound_h4_h3_title(tag):
if tag.h4:
if tag.h3:
return self.tag_to_string(tag.h4,False)+u'\u2014'+self.tag_to_string(tag.h3,False)
else:
return self.tag_to_string(tag.h4,False)
elif tag.h3:
return self.tag_to_string(tag.h3,False)
else:
return ''
def compound_h2_h4_title(tag):
if tag.h2:
if tag.h4:
return self.tag_to_string(tag.h2,False)+u'\u2014'+self.tag_to_string(tag.h4,False)
else:
return self.tag_to_string(tag.h2,False)
elif tag.h4:
return self.tag_to_string(tag.h4,False)
else:
return ''
def handle_article(header_tag, outer_tag):
if header_tag:
url = article_url(header_tag)
title = article_title(header_tag)
author_date_tag = outer_tag.h4
if author_date_tag:
author_date = self.tag_to_string(author_date_tag,False).split(' - ')
author = author_date[0].strip()
article_date = decode_date(author_date[1])
earliest_date = date.today() - timedelta(days=self.oldest_article)
if article_date < earliest_date:
self.log("Skipping article dated %s" % author_date[1])
else:
excerpt_div = outer_tag.find('div','excerpt')
if excerpt_div:
description = article_description(excerpt_div)
else:
description = ''
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date=author_date[1],description=description,author=author,content=''))
def handle_category_article(cat, header_tag, outer_tag):
url = article_url(header_tag)
title = article_title(header_tag)
if not title == '':
title = cat+u'\u2014'+title
a_tag = outer_tag.find('span','authorLink')
if a_tag:
author = self.tag_to_string(a_tag,False)
a_tag.parent.extract()
else:
author = ''
description = article_description(outer_tag)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description=description,author=author,content=''))
soup = self.index_to_soup(page_url)
if page_title == 'Front Page':
# special processing for the front page
top_stories = soup.find('div',{ "id" : "macleansFeatured" })
if top_stories:
for div_slide in top_stories.findAll('div','slide'):
url = article_url(div_slide)
div_title = div_slide.find('div','header')
if div_title:
title = self.tag_to_string(div_title,False)
else:
title = ''
description = article_description(div_slide)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
from_macleans = soup.find('div',{ "id" : "fromMacleans" })
if from_macleans:
for li_tag in from_macleans.findAll('li','fromMacleansArticle'):
title = compound_h4_h3_title(li_tag)
url = article_url(li_tag)
description = article_description(li_tag)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
blog_central = soup.find('div',{ "id" : "bloglist" })
if blog_central:
for li_tag in blog_central.findAll('li'):
title = compound_h2_h4_title(li_tag)
if li_tag.h4:
url = article_url(li_tag.h4)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date='',description='',author='',content=''))
# need_to_know = soup.find('div',{ "id" : "needToKnow" })
# if need_to_know:
# for div_tag in need_to_know('div',attrs={'class' : re.compile("^needToKnowArticle")}):
# title = compound_h4_h3_title(div_tag)
# url = article_url(div_tag)
# description = article_description(div_tag)
# if not articles.has_key(page_title):
# articles[page_title] = []
# articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
for news_category in soup.findAll('div','newsCategory'):
news_cat = self.tag_to_string(news_category.h4,False)
handle_category_article(news_cat, news_category.find('h2'), news_category.find('div'))
for news_item in news_category.findAll('li'):
handle_category_article(news_cat,news_item.h3,news_item)
return
# find the div containing the highlight article
div_post = soup.find('div','post')
if div_post:
h1_tag = div_post.h1
handle_article(h1_tag,div_post)
# find the divs containing the rest of the articles
div_other = div_post.find('div', { "id" : "categoryOtherPosts" })
if div_other:
for div_entry in div_other.findAll('div','entry'):
h2_tag = div_entry.h2
handle_article(h2_tag,div_entry)
for page_name,page_title in self.sectionlist:
parse_index_page(page_name,page_title)
ans.append(page_title)
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return ans

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1295081935(BasicNewsRecipe): class AdvancedUserRecipe1295081935(BasicNewsRecipe):
title = u'Mail & Guardian ZA News' title = u'Mail & Guardian ZA News'
__author__ = '77ja65' __author__ = '77ja65'
language = 'en' language = 'en_ZA'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 30 max_articles_per_feed = 30
no_stylesheets = True no_stylesheets = True

View File

@ -1,85 +1,45 @@
#!/usr/bin/env python #!/usr/bin/env python
__license__ = 'GPL v3' from calibre.web.feeds.news import BasicNewsRecipe
'''
philly.com/inquirer/
'''
from calibre.web.feeds.recipes import BasicNewsRecipe
class Philly(BasicNewsRecipe): class AdvancedUserRecipe1308312288(BasicNewsRecipe):
title = u'Philadelphia Inquirer'
title = 'Philadelphia Inquirer' __author__ = 'sexymax15'
__author__ = 'RadikalDissent and Sujata Raman'
language = 'en' language = 'en'
description = 'Daily news from the Philadelphia Inquirer' description = 'Daily news from the Philadelphia Inquirer'
no_stylesheets = True oldest_article = 15
use_embedded_content = False max_articles_per_feed = 20
oldest_article = 1 use_embedded_content = False
max_articles_per_feed = 25 remove_empty_feeds = True
no_stylesheets = True
remove_javascript = True
extra_css = ''' # remove_tags_before = {'class':'article_timestamp'}
h1{font-family:verdana,arial,helvetica,sans-serif; font-size: large;} #remove_tags_after = {'class':'graylabel'}
h2{font-family:verdana,arial,helvetica,sans-serif; font-size: small;} keep_only_tags= [dict(name=['h1','p'])]
.body-content{font-family:verdana,arial,helvetica,sans-serif; font-size: small;} remove_tags = [dict(name=['hr','dl','dt','img','meta','iframe','link','script','form','input','label']),
.byline {font-size: small; color: #666666; font-style:italic; } dict(id=['toggleConfirmEmailDiv','toggleTOS','toggleUsernameMsgDiv','toggleConfirmYear','navT1_philly','secondaryNav','navPlacement','globalPrimaryNav'
.lastline {font-size: small; color: #666666; font-style:italic;} ,'ugc-footer-philly','bv_footer_include','footer','header',
.contact {font-size: small; color: #666666;} 'container_rag_bottom','section_rectangle','contentrightside'])
.contact p {font-size: small; color: #666666;} ,{'class':['megamenu3 megamenu','container misc','container_inner misc_inner'
#photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;} ,'misccontainer_left_32','headlineonly','misccontainer_middle_32'
.photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;} ,'misccontainer_right_32','headline formBegin',
#photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;} 'post_balloon','relatedlist','linkssubhead','b_sq','dotted-rule-above'
.photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;} ,'container','headlines-digest','graylabel','container_inner'
.article_timestamp{font-size:x-small; color:#666666;} ,'rlinks_colorbar1','rlinks_colorbar2','supercontainer','container_5col_left','container_image_left',
a {font-family:verdana,arial,helvetica,sans-serif; font-size: x-small;} 'digest-headline2','digest-lead','container_5col_leftmiddle',
''' 'container_5col_middlemiddle','container_5col_rightmiddle'
,'container_5col_right','divclear','supercontainer_outer force-width',
'supercontainer','containertitle kicker-title',
'pollquestion','pollchoice','photomore','pollbutton','container rssbox','containertitle video ',
'containertitle_image ','container_tabtwo','selected'
,'shadetabs','selected','tabcontentstyle','tabcontent','inner_container'
,'arrow','container_ad','containertitlespacer','adUnit','tracking','sitemsg_911 clearfix']}]
keep_only_tags = [ extra_css = """
dict(name='div', attrs={'class':'story-content'}), h1{font-family: Georgia,serif; font-size: xx-large}
dict(name='div', attrs={'id': 'contentinside'})
]
remove_tags = [ """
dict(name='div', attrs={'class':['linkssubhead','post_balloon','relatedlist','pollquestion','b_sq']}),
dict(name='dl', attrs={'class':'relatedlist'}),
dict(name='div', attrs={'id':['photoNav','sidebar_adholder']}),
dict(name='a', attrs={'class': ['headlineonly','bl']}),
dict(name='img', attrs={'class':'img_noborder'})
]
# def print_version(self, url):
# return url + '?viewAll=y'
feeds = [ feeds = [(u'News', u'http://www.philly.com/philly_news.rss')]
('Front Page', 'http://www.philly.com/inquirer_front_page.rss'),
('Business', 'http://www.philly.com/inq_business.rss'),
#('News', 'http://www.philly.com/inquirer/news/index.rss'),
('Nation', 'http://www.philly.com/inq_news_world_us.rss'),
('Local', 'http://www.philly.com/inquirer_local.rss'),
('Health', 'http://www.philly.com/inquirer_health_science.rss'),
('Education', 'http://www.philly.com/inquirer_education.rss'),
('Editorial and opinion', 'http://www.philly.com/inq_news_editorial.rss'),
('Sports', 'http://www.philly.com/inquirer_sports.rss')
]
def get_article_url(self, article):
ans = article.link
try:
self.log('Looking for full story link in', ans)
soup = self.index_to_soup(ans)
x = soup.find(text="View All")
if x is not None:
ans = ans + '?viewAll=y'
self.log('Found full story link', ans)
except:
pass
return ans
def postprocess_html(self, soup,first):
for tag in soup.findAll(name='div',attrs={'class':"container_ate_qandatitle"}):
tag.extract()
for tag in soup.findAll(name='br'):
tag.extract()
return soup

View File

@ -0,0 +1,25 @@
#recipe created by sexymax15.....sexymax15@gmail.com
#Words without Borders recipe
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1308302002(BasicNewsRecipe):
title = u'Words Without Borders'
language = 'en'
__author__ = 'sexymax15'
oldest_article = 90
max_articles_per_feed = 30
use_embedded_content = False
remove_empty_feeds = True
no_stylesheets = True
remove_javascript = True
keep_only_tags = {'class':'span-14 article'}
remove_tags_after = [{'class':'addthis_toolbox addthis_default_style no_print'}]
remove_tags = [{'class':['posterous_quote_citation','button']}]
extra_css = """
h1{font-family: Georgia,serif; font-size: large}h2{font-family: Georgia,serif; font-size: large} """
feeds = [(u'wwb', u'http://feeds.feedburner.com/wwborders?format=xml')]

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
__appname__ = u'calibre' __appname__ = u'calibre'
numeric_version = (0, 8, 5) numeric_version = (0, 8, 6)
__version__ = u'.'.join(map(unicode, numeric_version)) __version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>" __author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -21,6 +21,7 @@ from calibre.ebooks.metadata import check_isbn
msprefs = JSONConfig('metadata_sources/global.json') msprefs = JSONConfig('metadata_sources/global.json')
msprefs.defaults['txt_comments'] = False msprefs.defaults['txt_comments'] = False
msprefs.defaults['ignore_fields'] = [] msprefs.defaults['ignore_fields'] = []
msprefs.defaults['user_default_ignore_fields'] = []
msprefs.defaults['max_tags'] = 20 msprefs.defaults['max_tags'] = 20
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds

View File

@ -287,6 +287,18 @@ class ChooseLibraryAction(InterfaceAction):
'rate of approximately 1 book every three seconds.'), show=True) 'rate of approximately 1 book every three seconds.'), show=True)
def restore_database(self): def restore_database(self):
m = self.gui.library_view.model()
db = m.db
if (iswindows and len(db.library_path) >
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT):
return error_dialog(self.gui, _('Too long'),
_('Path to library too long. Must be less than'
' %d characters. Move your library to a location with'
' a shorter path using Windows Explorer, then point'
' calibre to the new location and try again.')%
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT,
show=True)
from calibre.gui2.dialogs.restore_library import restore_database from calibre.gui2.dialogs.restore_library import restore_database
m = self.gui.library_view.model() m = self.gui.library_view.model()
m.stop_metadata_backup() m.stop_metadata_backup()

View File

@ -224,6 +224,20 @@ class FieldsModel(QAbstractListModel): # {{{
Qt.Unchecked]) Qt.Unchecked])
msprefs['ignore_fields'] = list(ignored_fields.union(changed)) msprefs['ignore_fields'] = list(ignored_fields.union(changed))
def user_default_state(self, field):
return (Qt.Unchecked if field in msprefs.get('user_default_ignore_fields',[])
else Qt.Checked)
def select_user_defaults(self):
self.overrides = dict([(f, self.user_default_state(f)) for f in self.fields])
self.reset()
def commit_user_defaults(self):
default_ignored_fields = set([x for x in msprefs['user_default_ignore_fields'] if x not in
self.overrides])
changed = set([k for k, v in self.overrides.iteritems() if v ==
Qt.Unchecked])
msprefs['user_default_ignore_fields'] = list(default_ignored_fields.union(changed))
# }}} # }}}
@ -286,6 +300,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
self.select_all_button.clicked.connect(self.changed_signal) self.select_all_button.clicked.connect(self.changed_signal)
self.clear_all_button.clicked.connect(self.fields_model.clear_all) self.clear_all_button.clicked.connect(self.fields_model.clear_all)
self.clear_all_button.clicked.connect(self.changed_signal) self.clear_all_button.clicked.connect(self.changed_signal)
self.select_default_button.clicked.connect(self.fields_model.select_user_defaults)
self.select_default_button.clicked.connect(self.changed_signal)
self.set_as_default_button.clicked.connect(self.fields_model.commit_user_defaults)
def configure_plugin(self): def configure_plugin(self):
for index in self.sources_view.selectionModel().selectedRows(): for index in self.sources_view.selectionModel().selectedRows():

View File

@ -102,6 +102,26 @@
</property> </property>
</widget> </widget>
</item> </item>
<item row="2" column="0">
<widget class="QPushButton" name="select_default_button">
<property name="text">
<string>&amp;Select default</string>
</property>
<property name="toolTip">
<string>Restore your own subset of checked fields that you define using the 'Set as default' button</string>
</property>
</widget>
</item>
<item row="2" column="1">
<widget class="QPushButton" name="set_as_default_button">
<property name="text">
<string>&amp;Set as default</string>
</property>
<property name="toolTip">
<string>Store the currently checked fields as a default you can restore using the 'Select default' button</string>
</property>
</widget>
</item>
</layout> </layout>
</widget> </widget>
</item> </item>

View File

@ -657,6 +657,7 @@ Some limitations of PDF input are:
* Some PDFs store their images upside down with a rotation instruction, |app| currently doesn't support that instruction, so the images will be rotated in the output as well. * Some PDFs store their images upside down with a rotation instruction, |app| currently doesn't support that instruction, so the images will be rotated in the output as well.
* Links and Tables of Contents are not supported * Links and Tables of Contents are not supported
* PDFs that use embedded non-unicode fonts to represent non-English characters will result in garbled output for those characters * PDFs that use embedded non-unicode fonts to represent non-English characters will result in garbled output for those characters
* Some PDFs are made up of photographs of the page with OCRed text behind them. In such cases |app| uses the OCRed text, which can be very different from what you see when you view the PDF file
To re-iterate **PDF is a really, really bad** format to use as input. If you absolutely must use PDF, then be prepared for an To re-iterate **PDF is a really, really bad** format to use as input. If you absolutely must use PDF, then be prepared for an
output ranging anywhere from decent to unusable, depending on the input PDF. output ranging anywhere from decent to unusable, depending on the input PDF.

View File

@ -28,7 +28,7 @@ For example, adding support for a new device to |app| typically involves writing
a device driver plugin. You can browse the a device driver plugin. You can browse the
`built-in drivers <http://bazaar.launchpad.net/%7Ekovid/calibre/trunk/files/head%3A/src/calibre/devices/>`_. Similarly, adding support `built-in drivers <http://bazaar.launchpad.net/%7Ekovid/calibre/trunk/files/head%3A/src/calibre/devices/>`_. Similarly, adding support
for new conversion formats involves writing input/output format plugins. Another example of the modular design is the :ref:`recipe system <news>` for for new conversion formats involves writing input/output format plugins. Another example of the modular design is the :ref:`recipe system <news>` for
fetching news. fetching news. For more examples of plugins designed to add features to |app|, see the `plugin index <http://www.mobileread.com/forums/showthread.php?t=118764>`_.
Code layout Code layout
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
@ -36,10 +36,21 @@ Code layout
All the |app| python code is in the ``calibre`` package. This package contains the following main sub-packages All the |app| python code is in the ``calibre`` package. This package contains the following main sub-packages
* devices - All the device drivers. Just look through some of the built-in drivers to get an idea for how they work. * devices - All the device drivers. Just look through some of the built-in drivers to get an idea for how they work.
* ebooks - All the ebook conversion code. A good starting point is ``calibre.ebooks.conversion.cli`` which is the
module powering the :command:`ebook-convert` command. * For details, see: devices.interface which defines the interface supported by device drivers and devices.usbms which
* library - The database backed and the content server. defines a generic driver that connects to a USBMS device. All USBMS based drivers in calibre inherit from it.
* gui2 - The Graphical User Interface.
* ebooks - All the ebook conversion/metadata code. A good starting point is ``calibre.ebooks.conversion.cli`` which is the
module powering the :command:`ebook-convert` command. The conversion process is controlled via conversion.plumber.
The format independent code is all in ebooks.oeb and the format dependent stuff is in ebooks.format_name.
* Metadata reading writing and downloading is all in ebooks.metadata
* library - The database backed and the content server. See library.database2 for the interface to the calibre library. library.server is the calibre Content Server.
* gui2 - The Graphical User Interface. GUI initialization happens in gui2.main and gui2.ui. The ebook-viewer is in gui2.viewer.
If you need help understanding the code, post in the `development forum <http://www.mobileread.com/forums/forumdisplay.php?f=240>`_
and you will most likely get help from one of |app|'s many developers.
Getting the code Getting the code
------------------ ------------------
@ -82,9 +93,9 @@ Now whenever you commit changes to your branch with the command::
bzr commit -m "Comment describing your change" bzr commit -m "Comment describing your change"
I can merge it directly from you branch into the main |app| source tree. You should also subscribe to the |app| I can merge it directly from you branch into the main |app| source tree. You should also keep an eye on the |app|
developers mailing list `calibre-devs <https://launchpad.net/~calibre-devs>`_. Before making major changes, you should `development forum <http://www.mobileread.com/forums/forumdisplay.php?f=240>`. Before making major changes, you should
discuss them on the mailing list or the #calibre IRC channel on Freenode to ensure that the changes will be accepted once you're done. discuss them in the forum or contact Kovid directly (his email address is all over the source code).
Windows development environment Windows development environment
--------------------------------- ---------------------------------

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

17095
src/calibre/translations/si.po Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -121,6 +121,7 @@ _extra_lang_codes = {
'en_YE' : _('English (Yemen)'), 'en_YE' : _('English (Yemen)'),
'en_IE' : _('English (Ireland)'), 'en_IE' : _('English (Ireland)'),
'en_CN' : _('English (China)'), 'en_CN' : _('English (China)'),
'en_ZA' : _('English (South Africa)'),
'es_PY' : _('Spanish (Paraguay)'), 'es_PY' : _('Spanish (Paraguay)'),
'es_UY' : _('Spanish (Uruguay)'), 'es_UY' : _('Spanish (Uruguay)'),
'es_AR' : _('Spanish (Argentina)'), 'es_AR' : _('Spanish (Argentina)'),