mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
merge from trunk
This commit is contained in:
commit
30a3afc620
@ -19,6 +19,69 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.8.6
|
||||
date: 2011-06-17
|
||||
|
||||
new features:
|
||||
- title: "Builtin support for downloading and installing/updating calibre plugins. Go to Preferences->Plugins and click 'Get new plugins'"
|
||||
description: "When updates for installed plugins are available, calibre will automatically (unobtrusively) notify you"
|
||||
type: major
|
||||
|
||||
- title: "Metadata download configuration: Allow defining a set of 'default' fields for metadata download and quichly switching to/from them"
|
||||
|
||||
- title: "Allow clicking on the news category in the Tag Browser to display all downloaded periodicals"
|
||||
|
||||
- title: "Driver for the Libre Air"
|
||||
|
||||
- title: "Email sending: Allow user to stop email jobs (note that stopping may not actually prevent the email from being sent, depending on when the stop happens). Also automatically abort email sending if it takes longer than 15mins."
|
||||
tickets: [795960]
|
||||
|
||||
bug fixes:
|
||||
- title: "MOBI Output: Allow setting of background color on tables also set the border attribute on the table if the table has any border related css defined."
|
||||
tickets: [797580]
|
||||
|
||||
- title: "Nook TSR: Put news sent to the device in My Files/Newspapers instaed of My Files/Books."
|
||||
tickets: [796674]
|
||||
|
||||
- title: "MOBI Output: Fix a bug where linking to the very first element in an HTML file could sometimes result in the link pointing to the last element in the previous file."
|
||||
tickets: [797214]
|
||||
|
||||
- title: "CSV catalog: Convert HTML comments to plain text"
|
||||
|
||||
- title: "HTML Input: Ignore links to text files."
|
||||
tickets: [791568]
|
||||
|
||||
- title: "EPUB Output: Change orphaned <td> tags to <div> as they cause ADE to crash."
|
||||
|
||||
- title: "Fix 'Stop selected jobs' button trying to stop the same job multiple times"
|
||||
|
||||
- title: "Database: Explicitly test for case sensitivity on OS X instead of assuming a case insensitive filesystem."
|
||||
tickets: [796258]
|
||||
|
||||
- title: "Get Books: More fixes to the Amazon store plugin"
|
||||
|
||||
- title: "FB2 Input: Do not specify font families/background colors"
|
||||
|
||||
|
||||
improved recipes:
|
||||
- Philadelphia Inquirer
|
||||
- Macleans Magazone
|
||||
- Metro UK
|
||||
|
||||
new recipes:
|
||||
- title: "Christian Post, Down To Earth and Words Without Borders"
|
||||
author: sexymax15
|
||||
|
||||
- title: "Noticias R7"
|
||||
author: Diniz Bortolotto
|
||||
|
||||
- title: "UK Daily Mirror"
|
||||
author: Dave Asbury
|
||||
|
||||
- title: "New Musical Express Magazine"
|
||||
author: scissors
|
||||
|
||||
|
||||
- version: 0.8.5
|
||||
date: 2011-06-10
|
||||
|
||||
|
37
recipes/christian_post.recipe
Normal file
37
recipes/christian_post.recipe
Normal file
@ -0,0 +1,37 @@
|
||||
#created by sexymax15 ....sexymax15@gmail.com
|
||||
#christian post recipe
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class ChristianPost(BasicNewsRecipe):
|
||||
|
||||
title = 'The Christian Post'
|
||||
__author__ = 'sexymax15'
|
||||
description = 'Homepage'
|
||||
language = 'en'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
oldest_article = 30
|
||||
max_articles_per_feed = 15
|
||||
|
||||
remove_empty_feeds = True
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
extra_css = '''
|
||||
h1 {color:#008852;font-family:Arial,Helvetica,sans-serif; font-size:20px; font-size-adjust:none; font-stretch:normal; font-style:normal; font-variant:normal; font-weight:bold; line-height:18px;}
|
||||
h2 {color:#4D4D4D;font-family:Arial,Helvetica,sans-serif; font-size:16px; font-size-adjust:none; font-stretch:normal; font-style:normal; font-variant:normal; font-weight:bold; line-height:16px; } '''
|
||||
|
||||
|
||||
feeds = [
|
||||
('Homepage', 'http://www.christianpost.com/services/rss/feed/'),
|
||||
('Most Popular', 'http://www.christianpost.com/services/rss/feed/most-popular'),
|
||||
('Entertainment', 'http://www.christianpost.com/services/rss/feed/entertainment/'),
|
||||
('Politics', 'http://www.christianpost.com/services/rss/feed/politics/'),
|
||||
('Living', 'http://www.christianpost.com/services/rss/feed/living/'),
|
||||
('Business', 'http://www.christianpost.com/services/rss/feed/business/'),
|
||||
('Opinion', 'http://www.christianpost.com/services/rss/feed/opinion/')
|
||||
]
|
||||
|
||||
def print_version(self, url):
|
||||
return url +'print.html'
|
||||
|
18
recipes/down_to_earth.recipe
Normal file
18
recipes/down_to_earth.recipe
Normal file
@ -0,0 +1,18 @@
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
|
||||
class AdvancedUserRecipe1307834113(BasicNewsRecipe):
|
||||
|
||||
title = u'Down To Earth'
|
||||
oldest_article = 300
|
||||
__author__ = 'sexymax15'
|
||||
max_articles_per_feed = 30
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
remove_attributes = ['width','height']
|
||||
use_embedded_content = False
|
||||
language = 'en_IN'
|
||||
remove_empty_feeds = True
|
||||
remove_tags_before = dict(name='div', id='PageContent')
|
||||
remove_tags_after = [dict(name='div'),{'class':'box'}]
|
||||
remove_tags =[{'class':'box'}]
|
||||
feeds = [(u'editor', u'http://www.downtoearth.org.in/taxonomy/term/20348/0/feed'), (u'cover story', u'http://www.downtoearth.org.in/taxonomy/term/20345/0/feed'), (u'special report', u'http://www.downtoearth.org.in/taxonomy/term/20384/0/feed'), (u'features', u'http://www.downtoearth.org.in/taxonomy/term/20350/0/feed'), (u'news', u'http://www.downtoearth.org.in/taxonomy/term/20366/0/feed'), (u'debate', u'http://www.downtoearth.org.in/taxonomy/term/20347/0/feed'), (u'natural disasters', u'http://www.downtoearth.org.in/taxonomy/term/20822/0/feed')]
|
@ -1,239 +1,28 @@
|
||||
#!/usr/bin/env python
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
|
||||
'''
|
||||
macleans.ca
|
||||
'''
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from calibre.ebooks.BeautifulSoup import Tag
|
||||
from datetime import timedelta, date
|
||||
|
||||
class Macleans(BasicNewsRecipe):
|
||||
class AdvancedUserRecipe1308306308(BasicNewsRecipe):
|
||||
title = u'Macleans Magazine'
|
||||
__author__ = 'Nick Redding'
|
||||
language = 'en_CA'
|
||||
description = ('Macleans Magazine')
|
||||
__author__ = 'sexymax15'
|
||||
oldest_article = 30
|
||||
max_articles_per_feed = 12
|
||||
|
||||
use_embedded_content = False
|
||||
|
||||
remove_empty_feeds = True
|
||||
no_stylesheets = True
|
||||
timefmt = ' [%b %d]'
|
||||
remove_javascript = True
|
||||
remove_tags = [dict(name ='img'),dict (id='header'),{'class':'postmetadata'}]
|
||||
remove_tags_after = {'class':'postmetadata'}
|
||||
|
||||
# customization notes: delete sections you are not interested in
|
||||
# set oldest_article to the maximum number of days back from today to include articles
|
||||
sectionlist = [
|
||||
['http://www2.macleans.ca/','Front Page'],
|
||||
['http://www2.macleans.ca/category/canada/','Canada'],
|
||||
['http://www2.macleans.ca/category/world-from-the-magazine/','World'],
|
||||
['http://www2.macleans.ca/category/business','Business'],
|
||||
['http://www2.macleans.ca/category/arts-culture/','Culture'],
|
||||
['http://www2.macleans.ca/category/opinion','Opinion'],
|
||||
['http://www2.macleans.ca/category/health-from-the-magazine/','Health'],
|
||||
['http://www2.macleans.ca/category/environment-from-the-magazine/','Environment'],
|
||||
['http://www2.macleans.ca/category/education/','On Campus'],
|
||||
['http://www2.macleans.ca/category/travel-from-the-magazine/','Travel']
|
||||
]
|
||||
oldest_article = 7
|
||||
|
||||
# formatting for print version of articles
|
||||
extra_css = '''h2{font-family:Times,serif; font-size:large;}
|
||||
small {font-family:Times,serif; font-size:xx-small; list-style-type: none;}
|
||||
'''
|
||||
|
||||
# tag handling for print version of articles
|
||||
keep_only_tags = [dict(id='tw-print')]
|
||||
remove_tags = [dict({'class':'postmetadata'})]
|
||||
|
||||
|
||||
def preprocess_html(self,soup):
|
||||
for img_tag in soup.findAll('img'):
|
||||
parent_tag = img_tag.parent
|
||||
if parent_tag.name == 'a':
|
||||
new_tag = Tag(soup,'p')
|
||||
new_tag.insert(0,img_tag)
|
||||
parent_tag.replaceWith(new_tag)
|
||||
elif parent_tag.name == 'p':
|
||||
if not self.tag_to_string(parent_tag) == '':
|
||||
new_div = Tag(soup,'div')
|
||||
new_tag = Tag(soup,'p')
|
||||
new_tag.insert(0,img_tag)
|
||||
parent_tag.replaceWith(new_div)
|
||||
new_div.insert(0,new_tag)
|
||||
new_div.insert(1,parent_tag)
|
||||
return soup
|
||||
|
||||
def parse_index(self):
|
||||
|
||||
|
||||
|
||||
articles = {}
|
||||
key = None
|
||||
ans = []
|
||||
|
||||
def parse_index_page(page_url,page_title):
|
||||
|
||||
def decode_date(datestr):
|
||||
dmysplit = datestr.strip().lower().split(',')
|
||||
mdsplit = dmysplit[1].split()
|
||||
m = ['january','february','march','april','may','june','july','august','september','october','november','december'].index(mdsplit[0])+1
|
||||
d = int(mdsplit[1])
|
||||
y = int(dmysplit[2].split()[0])
|
||||
return date(y,m,d)
|
||||
|
||||
def article_title(tag):
|
||||
atag = tag.find('a',href=True)
|
||||
if not atag:
|
||||
return ''
|
||||
return self.tag_to_string(atag)
|
||||
|
||||
def article_url(tag):
|
||||
atag = tag.find('a',href=True)
|
||||
if not atag:
|
||||
return ''
|
||||
return atag['href']+'print/'
|
||||
|
||||
def article_description(tag):
|
||||
for p_tag in tag.findAll('p'):
|
||||
d = self.tag_to_string(p_tag,False)
|
||||
if not d == '':
|
||||
return d
|
||||
return ''
|
||||
|
||||
def compound_h4_h3_title(tag):
|
||||
if tag.h4:
|
||||
if tag.h3:
|
||||
return self.tag_to_string(tag.h4,False)+u'\u2014'+self.tag_to_string(tag.h3,False)
|
||||
else:
|
||||
return self.tag_to_string(tag.h4,False)
|
||||
elif tag.h3:
|
||||
return self.tag_to_string(tag.h3,False)
|
||||
else:
|
||||
return ''
|
||||
|
||||
def compound_h2_h4_title(tag):
|
||||
if tag.h2:
|
||||
if tag.h4:
|
||||
return self.tag_to_string(tag.h2,False)+u'\u2014'+self.tag_to_string(tag.h4,False)
|
||||
else:
|
||||
return self.tag_to_string(tag.h2,False)
|
||||
elif tag.h4:
|
||||
return self.tag_to_string(tag.h4,False)
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
||||
def handle_article(header_tag, outer_tag):
|
||||
if header_tag:
|
||||
url = article_url(header_tag)
|
||||
title = article_title(header_tag)
|
||||
author_date_tag = outer_tag.h4
|
||||
if author_date_tag:
|
||||
author_date = self.tag_to_string(author_date_tag,False).split(' - ')
|
||||
author = author_date[0].strip()
|
||||
article_date = decode_date(author_date[1])
|
||||
earliest_date = date.today() - timedelta(days=self.oldest_article)
|
||||
if article_date < earliest_date:
|
||||
self.log("Skipping article dated %s" % author_date[1])
|
||||
else:
|
||||
excerpt_div = outer_tag.find('div','excerpt')
|
||||
if excerpt_div:
|
||||
description = article_description(excerpt_div)
|
||||
else:
|
||||
description = ''
|
||||
if not articles.has_key(page_title):
|
||||
articles[page_title] = []
|
||||
articles[page_title].append(dict(title=title,url=url,date=author_date[1],description=description,author=author,content=''))
|
||||
|
||||
def handle_category_article(cat, header_tag, outer_tag):
|
||||
url = article_url(header_tag)
|
||||
title = article_title(header_tag)
|
||||
if not title == '':
|
||||
title = cat+u'\u2014'+title
|
||||
a_tag = outer_tag.find('span','authorLink')
|
||||
if a_tag:
|
||||
author = self.tag_to_string(a_tag,False)
|
||||
a_tag.parent.extract()
|
||||
else:
|
||||
author = ''
|
||||
description = article_description(outer_tag)
|
||||
if not articles.has_key(page_title):
|
||||
articles[page_title] = []
|
||||
articles[page_title].append(dict(title=title,url=url,date='',description=description,author=author,content=''))
|
||||
|
||||
|
||||
soup = self.index_to_soup(page_url)
|
||||
|
||||
if page_title == 'Front Page':
|
||||
# special processing for the front page
|
||||
top_stories = soup.find('div',{ "id" : "macleansFeatured" })
|
||||
if top_stories:
|
||||
for div_slide in top_stories.findAll('div','slide'):
|
||||
url = article_url(div_slide)
|
||||
div_title = div_slide.find('div','header')
|
||||
if div_title:
|
||||
title = self.tag_to_string(div_title,False)
|
||||
else:
|
||||
title = ''
|
||||
description = article_description(div_slide)
|
||||
if not articles.has_key(page_title):
|
||||
articles[page_title] = []
|
||||
articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
|
||||
|
||||
from_macleans = soup.find('div',{ "id" : "fromMacleans" })
|
||||
if from_macleans:
|
||||
for li_tag in from_macleans.findAll('li','fromMacleansArticle'):
|
||||
title = compound_h4_h3_title(li_tag)
|
||||
url = article_url(li_tag)
|
||||
description = article_description(li_tag)
|
||||
if not articles.has_key(page_title):
|
||||
articles[page_title] = []
|
||||
articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
|
||||
|
||||
blog_central = soup.find('div',{ "id" : "bloglist" })
|
||||
if blog_central:
|
||||
for li_tag in blog_central.findAll('li'):
|
||||
title = compound_h2_h4_title(li_tag)
|
||||
if li_tag.h4:
|
||||
url = article_url(li_tag.h4)
|
||||
if not articles.has_key(page_title):
|
||||
articles[page_title] = []
|
||||
articles[page_title].append(dict(title=title,url=url,date='',description='',author='',content=''))
|
||||
|
||||
# need_to_know = soup.find('div',{ "id" : "needToKnow" })
|
||||
# if need_to_know:
|
||||
# for div_tag in need_to_know('div',attrs={'class' : re.compile("^needToKnowArticle")}):
|
||||
# title = compound_h4_h3_title(div_tag)
|
||||
# url = article_url(div_tag)
|
||||
# description = article_description(div_tag)
|
||||
# if not articles.has_key(page_title):
|
||||
# articles[page_title] = []
|
||||
# articles[page_title].append(dict(title=title,url=url,date='',description=description,author='',content=''))
|
||||
|
||||
for news_category in soup.findAll('div','newsCategory'):
|
||||
news_cat = self.tag_to_string(news_category.h4,False)
|
||||
handle_category_article(news_cat, news_category.find('h2'), news_category.find('div'))
|
||||
for news_item in news_category.findAll('li'):
|
||||
handle_category_article(news_cat,news_item.h3,news_item)
|
||||
|
||||
return
|
||||
|
||||
# find the div containing the highlight article
|
||||
div_post = soup.find('div','post')
|
||||
if div_post:
|
||||
h1_tag = div_post.h1
|
||||
handle_article(h1_tag,div_post)
|
||||
|
||||
# find the divs containing the rest of the articles
|
||||
div_other = div_post.find('div', { "id" : "categoryOtherPosts" })
|
||||
if div_other:
|
||||
for div_entry in div_other.findAll('div','entry'):
|
||||
h2_tag = div_entry.h2
|
||||
handle_article(h2_tag,div_entry)
|
||||
|
||||
|
||||
|
||||
for page_name,page_title in self.sectionlist:
|
||||
parse_index_page(page_name,page_title)
|
||||
ans.append(page_title)
|
||||
|
||||
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
|
||||
return ans
|
||||
feeds = [(u'Blog Central', u'http://www2.macleans.ca/category/blog-central/feed/'),
|
||||
(u'Canada', u'http://www2.macleans.ca/category/canada/feed/'),
|
||||
(u'World', u'http://www2.macleans.ca/category/world-from-the-magazine/feed/'),
|
||||
(u'Business', u'http://www2.macleans.ca/category/business/feed/'),
|
||||
(u'Arts & Culture', u'http://www2.macleans.ca/category/arts-culture/feed/'),
|
||||
(u'Opinion', u'http://www2.macleans.ca/category/opinion/feed/'),
|
||||
(u'Health', u'http://www2.macleans.ca/category/health-from-the-magazine/feed/'),
|
||||
(u'Environment', u'http://www2.macleans.ca/category/environment-from-the-magazine/feed/')]
|
||||
def print_version(self, url):
|
||||
return url + 'print/'
|
||||
|
@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
||||
class AdvancedUserRecipe1295081935(BasicNewsRecipe):
|
||||
title = u'Mail & Guardian ZA News'
|
||||
__author__ = '77ja65'
|
||||
language = 'en'
|
||||
language = 'en_ZA'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 30
|
||||
no_stylesheets = True
|
||||
|
@ -1,3 +1,4 @@
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
title = u'Metro UK'
|
||||
@ -10,6 +11,7 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||
remove_empty_feeds = True
|
||||
remove_javascript = True
|
||||
|
||||
preprocess_regexps = [(re.compile(r'Tweet'), lambda a : '')]
|
||||
|
||||
language = 'en_GB'
|
||||
|
||||
|
@ -1,85 +1,45 @@
|
||||
#!/usr/bin/env python
|
||||
__license__ = 'GPL v3'
|
||||
'''
|
||||
philly.com/inquirer/
|
||||
'''
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Philly(BasicNewsRecipe):
|
||||
|
||||
title = 'Philadelphia Inquirer'
|
||||
__author__ = 'RadikalDissent and Sujata Raman'
|
||||
class AdvancedUserRecipe1308312288(BasicNewsRecipe):
|
||||
title = u'Philadelphia Inquirer'
|
||||
__author__ = 'sexymax15'
|
||||
language = 'en'
|
||||
description = 'Daily news from the Philadelphia Inquirer'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 25
|
||||
oldest_article = 15
|
||||
max_articles_per_feed = 20
|
||||
use_embedded_content = False
|
||||
remove_empty_feeds = True
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
extra_css = '''
|
||||
h1{font-family:verdana,arial,helvetica,sans-serif; font-size: large;}
|
||||
h2{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
|
||||
.body-content{font-family:verdana,arial,helvetica,sans-serif; font-size: small;}
|
||||
.byline {font-size: small; color: #666666; font-style:italic; }
|
||||
.lastline {font-size: small; color: #666666; font-style:italic;}
|
||||
.contact {font-size: small; color: #666666;}
|
||||
.contact p {font-size: small; color: #666666;}
|
||||
#photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;}
|
||||
.photoCaption { font-family:verdana,arial,helvetica,sans-serif; font-size:x-small;}
|
||||
#photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;}
|
||||
.photoCredit{ font-family:verdana,arial,helvetica,sans-serif; font-size:x-small; color:#666666;}
|
||||
.article_timestamp{font-size:x-small; color:#666666;}
|
||||
a {font-family:verdana,arial,helvetica,sans-serif; font-size: x-small;}
|
||||
'''
|
||||
# remove_tags_before = {'class':'article_timestamp'}
|
||||
#remove_tags_after = {'class':'graylabel'}
|
||||
keep_only_tags= [dict(name=['h1','p'])]
|
||||
remove_tags = [dict(name=['hr','dl','dt','img','meta','iframe','link','script','form','input','label']),
|
||||
dict(id=['toggleConfirmEmailDiv','toggleTOS','toggleUsernameMsgDiv','toggleConfirmYear','navT1_philly','secondaryNav','navPlacement','globalPrimaryNav'
|
||||
,'ugc-footer-philly','bv_footer_include','footer','header',
|
||||
'container_rag_bottom','section_rectangle','contentrightside'])
|
||||
,{'class':['megamenu3 megamenu','container misc','container_inner misc_inner'
|
||||
,'misccontainer_left_32','headlineonly','misccontainer_middle_32'
|
||||
,'misccontainer_right_32','headline formBegin',
|
||||
'post_balloon','relatedlist','linkssubhead','b_sq','dotted-rule-above'
|
||||
,'container','headlines-digest','graylabel','container_inner'
|
||||
,'rlinks_colorbar1','rlinks_colorbar2','supercontainer','container_5col_left','container_image_left',
|
||||
'digest-headline2','digest-lead','container_5col_leftmiddle',
|
||||
'container_5col_middlemiddle','container_5col_rightmiddle'
|
||||
,'container_5col_right','divclear','supercontainer_outer force-width',
|
||||
'supercontainer','containertitle kicker-title',
|
||||
'pollquestion','pollchoice','photomore','pollbutton','container rssbox','containertitle video ',
|
||||
'containertitle_image ','container_tabtwo','selected'
|
||||
,'shadetabs','selected','tabcontentstyle','tabcontent','inner_container'
|
||||
,'arrow','container_ad','containertitlespacer','adUnit','tracking','sitemsg_911 clearfix']}]
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class':'story-content'}),
|
||||
dict(name='div', attrs={'id': 'contentinside'})
|
||||
]
|
||||
extra_css = """
|
||||
h1{font-family: Georgia,serif; font-size: xx-large}
|
||||
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'class':['linkssubhead','post_balloon','relatedlist','pollquestion','b_sq']}),
|
||||
dict(name='dl', attrs={'class':'relatedlist'}),
|
||||
dict(name='div', attrs={'id':['photoNav','sidebar_adholder']}),
|
||||
dict(name='a', attrs={'class': ['headlineonly','bl']}),
|
||||
dict(name='img', attrs={'class':'img_noborder'})
|
||||
]
|
||||
# def print_version(self, url):
|
||||
# return url + '?viewAll=y'
|
||||
"""
|
||||
|
||||
|
||||
feeds = [
|
||||
('Front Page', 'http://www.philly.com/inquirer_front_page.rss'),
|
||||
('Business', 'http://www.philly.com/inq_business.rss'),
|
||||
#('News', 'http://www.philly.com/inquirer/news/index.rss'),
|
||||
('Nation', 'http://www.philly.com/inq_news_world_us.rss'),
|
||||
('Local', 'http://www.philly.com/inquirer_local.rss'),
|
||||
('Health', 'http://www.philly.com/inquirer_health_science.rss'),
|
||||
('Education', 'http://www.philly.com/inquirer_education.rss'),
|
||||
('Editorial and opinion', 'http://www.philly.com/inq_news_editorial.rss'),
|
||||
('Sports', 'http://www.philly.com/inquirer_sports.rss')
|
||||
]
|
||||
feeds = [(u'News', u'http://www.philly.com/philly_news.rss')]
|
||||
|
||||
def get_article_url(self, article):
|
||||
ans = article.link
|
||||
|
||||
try:
|
||||
self.log('Looking for full story link in', ans)
|
||||
soup = self.index_to_soup(ans)
|
||||
x = soup.find(text="View All")
|
||||
|
||||
if x is not None:
|
||||
ans = ans + '?viewAll=y'
|
||||
self.log('Found full story link', ans)
|
||||
except:
|
||||
pass
|
||||
return ans
|
||||
|
||||
def postprocess_html(self, soup,first):
|
||||
|
||||
for tag in soup.findAll(name='div',attrs={'class':"container_ate_qandatitle"}):
|
||||
tag.extract()
|
||||
for tag in soup.findAll(name='br'):
|
||||
tag.extract()
|
||||
|
||||
return soup
|
||||
|
25
recipes/words_without_borders.recipe
Normal file
25
recipes/words_without_borders.recipe
Normal file
@ -0,0 +1,25 @@
|
||||
#recipe created by sexymax15.....sexymax15@gmail.com
|
||||
#Words without Borders recipe
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class AdvancedUserRecipe1308302002(BasicNewsRecipe):
|
||||
title = u'Words Without Borders'
|
||||
language = 'en'
|
||||
__author__ = 'sexymax15'
|
||||
oldest_article = 90
|
||||
max_articles_per_feed = 30
|
||||
use_embedded_content = False
|
||||
|
||||
remove_empty_feeds = True
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
keep_only_tags = {'class':'span-14 article'}
|
||||
remove_tags_after = [{'class':'addthis_toolbox addthis_default_style no_print'}]
|
||||
remove_tags = [{'class':['posterous_quote_citation','button']}]
|
||||
extra_css = """
|
||||
h1{font-family: Georgia,serif; font-size: large}h2{font-family: Georgia,serif; font-size: large} """
|
||||
|
||||
|
||||
|
||||
feeds = [(u'wwb', u'http://feeds.feedburner.com/wwborders?format=xml')]
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 8, 5)
|
||||
numeric_version = (0, 8, 6)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -457,7 +457,7 @@ class HTMLInput(InputFormatPlugin):
|
||||
href=bhref)
|
||||
guessed = self.guess_type(href)[0]
|
||||
media_type = guessed or self.BINARY_MIME
|
||||
if 'text' in media_type:
|
||||
if media_type == 'text/plain':
|
||||
self.log.warn('Ignoring link to text file %r'%link_)
|
||||
return None
|
||||
|
||||
|
@ -21,6 +21,7 @@ from calibre.ebooks.metadata import check_isbn
|
||||
msprefs = JSONConfig('metadata_sources/global.json')
|
||||
msprefs.defaults['txt_comments'] = False
|
||||
msprefs.defaults['ignore_fields'] = []
|
||||
msprefs.defaults['user_default_ignore_fields'] = []
|
||||
msprefs.defaults['max_tags'] = 20
|
||||
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
|
||||
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
|
||||
|
@ -1055,6 +1055,12 @@ class Manifest(object):
|
||||
and len(a) == 0 and not a.text:
|
||||
remove_elem(a)
|
||||
|
||||
# Convert <br>s with content into paragraphs as ADE can't handle
|
||||
# them
|
||||
for br in xpath(data, '//h:br'):
|
||||
if len(br) > 0 or br.text:
|
||||
br.tag = XHTML('div')
|
||||
|
||||
return data
|
||||
|
||||
def _parse_txt(self, data):
|
||||
@ -1156,7 +1162,7 @@ class Manifest(object):
|
||||
data = self._parse_xml(data)
|
||||
elif self.media_type.lower() in OEB_STYLES:
|
||||
data = self._parse_css(data)
|
||||
elif 'text' in self.media_type.lower():
|
||||
elif self.media_type.lower() == 'text/plain':
|
||||
self.oeb.log.warn('%s contains data in TXT format'%self.href,
|
||||
'converting to HTML')
|
||||
data = self._parse_txt(data)
|
||||
|
@ -287,6 +287,18 @@ class ChooseLibraryAction(InterfaceAction):
|
||||
'rate of approximately 1 book every three seconds.'), show=True)
|
||||
|
||||
def restore_database(self):
|
||||
m = self.gui.library_view.model()
|
||||
db = m.db
|
||||
if (iswindows and len(db.library_path) >
|
||||
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT):
|
||||
return error_dialog(self.gui, _('Too long'),
|
||||
_('Path to library too long. Must be less than'
|
||||
' %d characters. Move your library to a location with'
|
||||
' a shorter path using Windows Explorer, then point'
|
||||
' calibre to the new location and try again.')%
|
||||
LibraryDatabase2.WINDOWS_LIBRARY_PATH_LIMIT,
|
||||
show=True)
|
||||
|
||||
from calibre.gui2.dialogs.restore_library import restore_database
|
||||
m = self.gui.library_view.model()
|
||||
m.stop_metadata_backup()
|
||||
|
@ -119,6 +119,7 @@ class DeviceManager(Thread): # {{{
|
||||
self.sleep_time = sleep_time
|
||||
self.connected_slot = connected_slot
|
||||
self.jobs = Queue.Queue(0)
|
||||
self.job_steps = Queue.Queue(0)
|
||||
self.keep_going = True
|
||||
self.job_manager = job_manager
|
||||
self.reported_errors = set([])
|
||||
@ -235,6 +236,12 @@ class DeviceManager(Thread): # {{{
|
||||
self.connected_device.unmount_device()
|
||||
|
||||
def next(self):
|
||||
if not self.job_steps.empty():
|
||||
try:
|
||||
return self.job_steps.get_nowait()
|
||||
except Queue.Empty:
|
||||
pass
|
||||
|
||||
if not self.jobs.empty():
|
||||
try:
|
||||
return self.jobs.get_nowait()
|
||||
@ -271,13 +278,20 @@ class DeviceManager(Thread): # {{{
|
||||
break
|
||||
time.sleep(self.sleep_time)
|
||||
|
||||
def create_job(self, func, done, description, args=[], kwargs={}):
|
||||
def create_job_step(self, func, done, description, to_job, args=[], kwargs={}):
|
||||
job = DeviceJob(func, done, self.job_manager,
|
||||
args=args, kwargs=kwargs, description=description)
|
||||
self.job_manager.add_job(job)
|
||||
self.jobs.put(job)
|
||||
if (done is None or isinstance(done, FunctionDispatcher)) and \
|
||||
(to_job is not None and to_job == self.current_job):
|
||||
self.job_steps.put(job)
|
||||
else:
|
||||
self.jobs.put(job)
|
||||
return job
|
||||
|
||||
def create_job(self, func, done, description, args=[], kwargs={}):
|
||||
return self.create_job_step(func, done, description, None, args, kwargs)
|
||||
|
||||
def has_card(self):
|
||||
try:
|
||||
return bool(self.device.card_prefix())
|
||||
@ -295,10 +309,10 @@ class DeviceManager(Thread): # {{{
|
||||
self._device_information = {'info': info, 'prefixes': cp, 'freespace': fs}
|
||||
return info, cp, fs
|
||||
|
||||
def get_device_information(self, done):
|
||||
def get_device_information(self, done, add_as_step_to_job=None):
|
||||
'''Get device information and free space on device'''
|
||||
return self.create_job(self._get_device_information, done,
|
||||
description=_('Get device information'))
|
||||
return self.create_job_step(self._get_device_information, done,
|
||||
description=_('Get device information'), to_job=add_as_step_to_job)
|
||||
|
||||
def get_current_device_information(self):
|
||||
return self._device_information
|
||||
@ -310,36 +324,38 @@ class DeviceManager(Thread): # {{{
|
||||
cardblist = self.device.books(oncard='cardb')
|
||||
return (mainlist, cardalist, cardblist)
|
||||
|
||||
def books(self, done):
|
||||
def books(self, done, add_as_step_to_job=None):
|
||||
'''Return callable that returns the list of books on device as two booklists'''
|
||||
return self.create_job(self._books, done, description=_('Get list of books on device'))
|
||||
return self.create_job_step(self._books, done,
|
||||
description=_('Get list of books on device'), to_job=add_as_step_to_job)
|
||||
|
||||
def _annotations(self, path_map):
|
||||
return self.device.get_annotations(path_map)
|
||||
|
||||
def annotations(self, done, path_map):
|
||||
def annotations(self, done, path_map, add_as_step_to_job=None):
|
||||
'''Return mapping of ids to annotations. Each annotation is of the
|
||||
form (type, location_info, content). path_map is a mapping of
|
||||
ids to paths on the device.'''
|
||||
return self.create_job(self._annotations, done, args=[path_map],
|
||||
description=_('Get annotations from device'))
|
||||
return self.create_job_step(self._annotations, done, args=[path_map],
|
||||
description=_('Get annotations from device'), to_job=add_as_step_to_job)
|
||||
|
||||
def _sync_booklists(self, booklists):
|
||||
'''Sync metadata to device'''
|
||||
self.device.sync_booklists(booklists, end_session=False)
|
||||
return self.device.card_prefix(end_session=False), self.device.free_space()
|
||||
|
||||
def sync_booklists(self, done, booklists, plugboards):
|
||||
def sync_booklists(self, done, booklists, plugboards, add_as_step_to_job=None):
|
||||
if hasattr(self.connected_device, 'set_plugboards') and \
|
||||
callable(self.connected_device.set_plugboards):
|
||||
self.connected_device.set_plugboards(plugboards, find_plugboard)
|
||||
return self.create_job(self._sync_booklists, done, args=[booklists],
|
||||
description=_('Send metadata to device'))
|
||||
return self.create_job_step(self._sync_booklists, done, args=[booklists],
|
||||
description=_('Send metadata to device'), to_job=add_as_step_to_job)
|
||||
|
||||
def upload_collections(self, done, booklist, on_card):
|
||||
return self.create_job(booklist.rebuild_collections, done,
|
||||
def upload_collections(self, done, booklist, on_card, add_as_step_to_job=None):
|
||||
return self.create_job_step(booklist.rebuild_collections, done,
|
||||
args=[booklist, on_card],
|
||||
description=_('Send collections to device'))
|
||||
description=_('Send collections to device'),
|
||||
to_job=add_as_step_to_job)
|
||||
|
||||
def _upload_books(self, files, names, on_card=None, metadata=None, plugboards=None):
|
||||
'''Upload books to device: '''
|
||||
@ -374,11 +390,12 @@ class DeviceManager(Thread): # {{{
|
||||
metadata=metadata, end_session=False)
|
||||
|
||||
def upload_books(self, done, files, names, on_card=None, titles=None,
|
||||
metadata=None, plugboards=None):
|
||||
metadata=None, plugboards=None, add_as_step_to_job=None):
|
||||
desc = _('Upload %d books to device')%len(names)
|
||||
if titles:
|
||||
desc += u':' + u', '.join(titles)
|
||||
return self.create_job(self._upload_books, done, args=[files, names],
|
||||
return self.create_job_step(self._upload_books, done, to_job=add_as_step_to_job,
|
||||
args=[files, names],
|
||||
kwargs={'on_card':on_card,'metadata':metadata,'plugboards':plugboards}, description=desc)
|
||||
|
||||
def add_books_to_metadata(self, locations, metadata, booklists):
|
||||
@ -388,9 +405,10 @@ class DeviceManager(Thread): # {{{
|
||||
'''Remove books from device'''
|
||||
self.device.delete_books(paths, end_session=True)
|
||||
|
||||
def delete_books(self, done, paths):
|
||||
return self.create_job(self._delete_books, done, args=[paths],
|
||||
description=_('Delete books from device'))
|
||||
def delete_books(self, done, paths, add_as_step_to_job=None):
|
||||
return self.create_job_step(self._delete_books, done, args=[paths],
|
||||
description=_('Delete books from device'),
|
||||
to_job=add_as_step_to_job)
|
||||
|
||||
def remove_books_from_metadata(self, paths, booklists):
|
||||
self.device.remove_books_from_metadata(paths, booklists)
|
||||
@ -405,9 +423,10 @@ class DeviceManager(Thread): # {{{
|
||||
self.device.get_file(path, f)
|
||||
f.close()
|
||||
|
||||
def save_books(self, done, paths, target):
|
||||
return self.create_job(self._save_books, done, args=[paths, target],
|
||||
description=_('Download books from device'))
|
||||
def save_books(self, done, paths, target, add_as_step_to_job=None):
|
||||
return self.create_job_step(self._save_books, done, args=[paths, target],
|
||||
description=_('Download books from device'),
|
||||
to_job=add_as_step_to_job)
|
||||
|
||||
def _view_book(self, path, target):
|
||||
f = open(target, 'wb')
|
||||
@ -415,9 +434,9 @@ class DeviceManager(Thread): # {{{
|
||||
f.close()
|
||||
return target
|
||||
|
||||
def view_book(self, done, path, target):
|
||||
return self.create_job(self._view_book, done, args=[path, target],
|
||||
description=_('View book on device'))
|
||||
def view_book(self, done, path, target, add_as_step_to_job=None):
|
||||
return self.create_job_step(self._view_book, done, args=[path, target],
|
||||
description=_('View book on device'), to_job=add_as_step_to_job)
|
||||
|
||||
def set_current_library_uuid(self, uuid):
|
||||
self.current_library_uuid = uuid
|
||||
@ -778,7 +797,8 @@ class DeviceMixin(object): # {{{
|
||||
self.device_manager.device.icon)
|
||||
self.bars_manager.update_bars()
|
||||
self.status_bar.device_connected(info[0])
|
||||
self.device_manager.books(FunctionDispatcher(self.metadata_downloaded))
|
||||
self.device_manager.books(FunctionDispatcher(self.metadata_downloaded),
|
||||
add_as_step_to_job=job)
|
||||
|
||||
def metadata_downloaded(self, job):
|
||||
'''
|
||||
@ -788,7 +808,7 @@ class DeviceMixin(object): # {{{
|
||||
self.device_job_exception(job)
|
||||
return
|
||||
# set_books_in_library might schedule a sync_booklists job
|
||||
self.set_books_in_library(job.result, reset=True)
|
||||
self.set_books_in_library(job.result, reset=True, add_as_step_to_job=job)
|
||||
mainlist, cardalist, cardblist = job.result
|
||||
self.memory_view.set_database(mainlist)
|
||||
self.memory_view.set_editable(self.device_manager.device.CAN_SET_METADATA,
|
||||
@ -843,8 +863,8 @@ class DeviceMixin(object): # {{{
|
||||
# set_books_in_library even though books were not added because
|
||||
# the deleted book might have been an exact match. Upload the booklists
|
||||
# if set_books_in_library did not.
|
||||
if not self.set_books_in_library(self.booklists(), reset=True):
|
||||
self.upload_booklists()
|
||||
if not self.set_books_in_library(self.booklists(), reset=True, add_as_step_to_job=job):
|
||||
self.upload_booklists(job)
|
||||
self.book_on_device(None, reset=True)
|
||||
# We need to reset the ondevice flags in the library. Use a big hammer,
|
||||
# so we don't need to worry about whether some succeeded or not.
|
||||
@ -1193,13 +1213,14 @@ class DeviceMixin(object): # {{{
|
||||
self.device_manager.sync_booklists(Dispatcher(lambda x: x),
|
||||
self.booklists(), plugboards)
|
||||
|
||||
def upload_booklists(self):
|
||||
def upload_booklists(self, add_as_step_to_job=None):
|
||||
'''
|
||||
Upload metadata to device.
|
||||
'''
|
||||
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
|
||||
self.device_manager.sync_booklists(FunctionDispatcher(self.metadata_synced),
|
||||
self.booklists(), plugboards)
|
||||
self.booklists(), plugboards,
|
||||
add_as_step_to_job=add_as_step_to_job)
|
||||
|
||||
def metadata_synced(self, job):
|
||||
'''
|
||||
@ -1274,8 +1295,8 @@ class DeviceMixin(object): # {{{
|
||||
# because the UUID changed. Force both the device and the library view
|
||||
# to refresh the flags. Set_books_in_library could upload the booklists.
|
||||
# If it does not, then do it here.
|
||||
if not self.set_books_in_library(self.booklists(), reset=True):
|
||||
self.upload_booklists()
|
||||
if not self.set_books_in_library(self.booklists(), reset=True, add_as_step_to_job=job):
|
||||
self.upload_booklists(job)
|
||||
with self.library_view.preserve_selected_books:
|
||||
self.book_on_device(None, reset=True)
|
||||
self.refresh_ondevice()
|
||||
@ -1335,7 +1356,7 @@ class DeviceMixin(object): # {{{
|
||||
loc[4] |= self.book_db_uuid_path_map[id]
|
||||
return loc
|
||||
|
||||
def set_books_in_library(self, booklists, reset=False):
|
||||
def set_books_in_library(self, booklists, reset=False, add_as_step_to_job=None):
|
||||
'''
|
||||
Set the ondevice indications in the device database.
|
||||
This method should be called before book_on_device is called, because
|
||||
@ -1487,7 +1508,7 @@ class DeviceMixin(object): # {{{
|
||||
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
|
||||
self.device_manager.sync_booklists(
|
||||
FunctionDispatcher(self.metadata_synced), booklists,
|
||||
plugboards)
|
||||
plugboards, add_as_step_to_job)
|
||||
return update_metadata
|
||||
# }}}
|
||||
|
||||
|
@ -224,6 +224,20 @@ class FieldsModel(QAbstractListModel): # {{{
|
||||
Qt.Unchecked])
|
||||
msprefs['ignore_fields'] = list(ignored_fields.union(changed))
|
||||
|
||||
def user_default_state(self, field):
|
||||
return (Qt.Unchecked if field in msprefs.get('user_default_ignore_fields',[])
|
||||
else Qt.Checked)
|
||||
|
||||
def select_user_defaults(self):
|
||||
self.overrides = dict([(f, self.user_default_state(f)) for f in self.fields])
|
||||
self.reset()
|
||||
|
||||
def commit_user_defaults(self):
|
||||
default_ignored_fields = set([x for x in msprefs['user_default_ignore_fields'] if x not in
|
||||
self.overrides])
|
||||
changed = set([k for k, v in self.overrides.iteritems() if v ==
|
||||
Qt.Unchecked])
|
||||
msprefs['user_default_ignore_fields'] = list(default_ignored_fields.union(changed))
|
||||
|
||||
# }}}
|
||||
|
||||
@ -286,6 +300,9 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
|
||||
self.select_all_button.clicked.connect(self.changed_signal)
|
||||
self.clear_all_button.clicked.connect(self.fields_model.clear_all)
|
||||
self.clear_all_button.clicked.connect(self.changed_signal)
|
||||
self.select_default_button.clicked.connect(self.fields_model.select_user_defaults)
|
||||
self.select_default_button.clicked.connect(self.changed_signal)
|
||||
self.set_as_default_button.clicked.connect(self.fields_model.commit_user_defaults)
|
||||
|
||||
def configure_plugin(self):
|
||||
for index in self.sources_view.selectionModel().selectedRows():
|
||||
|
@ -102,6 +102,26 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QPushButton" name="select_default_button">
|
||||
<property name="text">
|
||||
<string>&Select default</string>
|
||||
</property>
|
||||
<property name="toolTip">
|
||||
<string>Restore your own subset of checked fields that you define using the 'Set as default' button</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<widget class="QPushButton" name="set_as_default_button">
|
||||
<property name="text">
|
||||
<string>&Set as default</string>
|
||||
</property>
|
||||
<property name="toolTip">
|
||||
<string>Store the currently checked fields as a default you can restore using the 'Select default' button</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
|
@ -727,6 +727,15 @@ class TagTreeItem(object): # {{{
|
||||
else:
|
||||
self.tag.state = set_to
|
||||
|
||||
def all_children(self):
|
||||
res = []
|
||||
def recurse(nodes, res):
|
||||
for t in nodes:
|
||||
res.append(t)
|
||||
recurse(t.children, res)
|
||||
recurse(self.children, res)
|
||||
return res
|
||||
|
||||
def child_tags(self):
|
||||
res = []
|
||||
def recurse(nodes, res):
|
||||
@ -1269,6 +1278,7 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
category_icon = category_node.icon,
|
||||
category_key=category_node.category_key,
|
||||
icon_map=self.icon_state_map)
|
||||
sub_cat.tag.is_searchable = False
|
||||
self.endInsertRows()
|
||||
else: # by 'first letter'
|
||||
cl = cl_list[idx]
|
||||
@ -1651,7 +1661,16 @@ class TagsModel(QAbstractItemModel): # {{{
|
||||
ans.append('%s:%s'%(node.category_key, node_searches[node.tag.state]))
|
||||
|
||||
key = node.category_key
|
||||
for tag_item in node.child_tags():
|
||||
for tag_item in node.all_children():
|
||||
if tag_item.type == TagTreeItem.CATEGORY:
|
||||
if self.collapse_model == 'first letter' and \
|
||||
tag_item.temporary and not key.startswith('@') \
|
||||
and tag_item.tag.state:
|
||||
if node_searches[tag_item.tag.state] == 'true':
|
||||
ans.append('%s:~^%s'%(key, tag_item.py_name))
|
||||
else:
|
||||
ans.append('(not %s:~^%s )'%(key, tag_item.py_name))
|
||||
continue
|
||||
tag = tag_item.tag
|
||||
if tag.state != TAG_SEARCH_STATES['clear']:
|
||||
if tag.state == TAG_SEARCH_STATES['mark_minus'] or \
|
||||
|
@ -179,7 +179,7 @@ class UpdateMixin(object):
|
||||
|
||||
def plugin_update_found(self, number_of_updates):
|
||||
# Change the plugin icon to indicate there are updates available
|
||||
plugin = self.iactions.get('Plugin Updates', None)
|
||||
plugin = self.iactions.get('Plugin Updater', None)
|
||||
if not plugin:
|
||||
return
|
||||
if number_of_updates:
|
||||
|
@ -145,7 +145,7 @@ def _match(query, value, matchkind):
|
||||
return True
|
||||
elif query == t:
|
||||
return True
|
||||
elif ((matchkind == REGEXP_MATCH and re.search(query, t, re.I)) or ### search unanchored
|
||||
elif ((matchkind == REGEXP_MATCH and re.search(query, t, re.I|re.UNICODE)) or ### search unanchored
|
||||
(matchkind == CONTAINS_MATCH and query in t)):
|
||||
return True
|
||||
except re.error:
|
||||
|
@ -24,6 +24,7 @@ NON_EBOOK_EXTENSIONS = frozenset([
|
||||
class RestoreDatabase(LibraryDatabase2):
|
||||
|
||||
PATH_LIMIT = 10
|
||||
WINDOWS_LIBRARY_PATH_LIMIT = 180
|
||||
|
||||
def set_path(self, *args, **kwargs):
|
||||
pass
|
||||
|
@ -657,6 +657,7 @@ Some limitations of PDF input are:
|
||||
* Some PDFs store their images upside down with a rotation instruction, |app| currently doesn't support that instruction, so the images will be rotated in the output as well.
|
||||
* Links and Tables of Contents are not supported
|
||||
* PDFs that use embedded non-unicode fonts to represent non-English characters will result in garbled output for those characters
|
||||
* Some PDFs are made up of photographs of the page with OCRed text behind them. In such cases |app| uses the OCRed text, which can be very different from what you see when you view the PDF file
|
||||
|
||||
To re-iterate **PDF is a really, really bad** format to use as input. If you absolutely must use PDF, then be prepared for an
|
||||
output ranging anywhere from decent to unusable, depending on the input PDF.
|
||||
|
@ -28,7 +28,7 @@ For example, adding support for a new device to |app| typically involves writing
|
||||
a device driver plugin. You can browse the
|
||||
`built-in drivers <http://bazaar.launchpad.net/%7Ekovid/calibre/trunk/files/head%3A/src/calibre/devices/>`_. Similarly, adding support
|
||||
for new conversion formats involves writing input/output format plugins. Another example of the modular design is the :ref:`recipe system <news>` for
|
||||
fetching news.
|
||||
fetching news. For more examples of plugins designed to add features to |app|, see the `plugin index <http://www.mobileread.com/forums/showthread.php?t=118764>`_.
|
||||
|
||||
Code layout
|
||||
^^^^^^^^^^^^^^
|
||||
@ -36,10 +36,21 @@ Code layout
|
||||
All the |app| python code is in the ``calibre`` package. This package contains the following main sub-packages
|
||||
|
||||
* devices - All the device drivers. Just look through some of the built-in drivers to get an idea for how they work.
|
||||
* ebooks - All the ebook conversion code. A good starting point is ``calibre.ebooks.conversion.cli`` which is the
|
||||
module powering the :command:`ebook-convert` command.
|
||||
* library - The database backed and the content server.
|
||||
* gui2 - The Graphical User Interface.
|
||||
|
||||
* For details, see: devices.interface which defines the interface supported by device drivers and devices.usbms which
|
||||
defines a generic driver that connects to a USBMS device. All USBMS based drivers in calibre inherit from it.
|
||||
|
||||
* ebooks - All the ebook conversion/metadata code. A good starting point is ``calibre.ebooks.conversion.cli`` which is the
|
||||
module powering the :command:`ebook-convert` command. The conversion process is controlled via conversion.plumber.
|
||||
The format independent code is all in ebooks.oeb and the format dependent stuff is in ebooks.format_name.
|
||||
|
||||
* Metadata reading writing and downloading is all in ebooks.metadata
|
||||
|
||||
* library - The database backed and the content server. See library.database2 for the interface to the calibre library. library.server is the calibre Content Server.
|
||||
* gui2 - The Graphical User Interface. GUI initialization happens in gui2.main and gui2.ui. The ebook-viewer is in gui2.viewer.
|
||||
|
||||
If you need help understanding the code, post in the `development forum <http://www.mobileread.com/forums/forumdisplay.php?f=240>`_
|
||||
and you will most likely get help from one of |app|'s many developers.
|
||||
|
||||
Getting the code
|
||||
------------------
|
||||
@ -82,9 +93,9 @@ Now whenever you commit changes to your branch with the command::
|
||||
|
||||
bzr commit -m "Comment describing your change"
|
||||
|
||||
I can merge it directly from you branch into the main |app| source tree. You should also subscribe to the |app|
|
||||
developers mailing list `calibre-devs <https://launchpad.net/~calibre-devs>`_. Before making major changes, you should
|
||||
discuss them on the mailing list or the #calibre IRC channel on Freenode to ensure that the changes will be accepted once you're done.
|
||||
I can merge it directly from you branch into the main |app| source tree. You should also keep an eye on the |app|
|
||||
`development forum <http://www.mobileread.com/forums/forumdisplay.php?f=240>`. Before making major changes, you should
|
||||
discuss them in the forum or contact Kovid directly (his email address is all over the source code).
|
||||
|
||||
Windows development environment
|
||||
---------------------------------
|
||||
|
@ -131,7 +131,7 @@ Follow these steps to find the problem:
|
||||
* Make sure that you are connecting only a single device to your computer at a time. Do not have another |app| supported device like an iPhone/iPad etc. at the same time.
|
||||
* If you are connecting an Apple iDevice (iPad, iPod Touch, iPhone), use the 'Connect to iTunes' method in the 'Getting started' instructions in `Calibre + Apple iDevices: Start here <http://www.mobileread.com/forums/showthread.php?t=118559>`_.
|
||||
* Make sure you are running the latest version of |app|. The latest version can always be downloaded from `the calibre website <http://calibre-ebook.com/download>`_.
|
||||
* Ensure your operating system is seeing the device. That is, the device should be mounted as a disk that you can access using Windows explorer or whatever the file management program on your computer is.
|
||||
* Ensure your operating system is seeing the device. That is, the device should be mounted as a disk, that you can access using Windows explorer or whatever the file management program on your computer is. On Windows your device **must have been assigned a drive letter**, like K:.
|
||||
* In calibre, go to Preferences->Plugins->Device Interface plugin and make sure the plugin for your device is enabled, the plugin icon next to it should be green when it is enabled.
|
||||
* If all the above steps fail, go to Preferences->Miscellaneous and click debug device detection with your device attached and post the output as a ticket on `the calibre bug tracker <http://bugs.calibre-ebook.com>`_.
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
17095
src/calibre/translations/si.po
Normal file
17095
src/calibre/translations/si.po
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -121,6 +121,7 @@ _extra_lang_codes = {
|
||||
'en_YE' : _('English (Yemen)'),
|
||||
'en_IE' : _('English (Ireland)'),
|
||||
'en_CN' : _('English (China)'),
|
||||
'en_ZA' : _('English (South Africa)'),
|
||||
'es_PY' : _('Spanish (Paraguay)'),
|
||||
'es_UY' : _('Spanish (Uruguay)'),
|
||||
'es_AR' : _('Spanish (Argentina)'),
|
||||
|
Loading…
x
Reference in New Issue
Block a user