Merge from trunk
@ -44,7 +44,7 @@
|
|||||||
|
|
||||||
- title: "MOBI Output: Fix a memory leak and a crash in the palmdoc compression routine"
|
- title: "MOBI Output: Fix a memory leak and a crash in the palmdoc compression routine"
|
||||||
|
|
||||||
- title: "Metadata download: Fix a regressiont at resulted in a failed download for some books"
|
- title: "Metadata download: Fix a regression that resulted in a failed download for some books"
|
||||||
|
|
||||||
new recipes:
|
new recipes:
|
||||||
- title: "Foreign Policy and Alo!"
|
- title: "Foreign Policy and Alo!"
|
||||||
|
Before Width: | Height: | Size: 89 KiB After Width: | Height: | Size: 3.8 KiB |
Before Width: | Height: | Size: 75 KiB After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 117 KiB After Width: | Height: | Size: 5.0 KiB |
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 4.6 KiB |
Before Width: | Height: | Size: 109 KiB After Width: | Height: | Size: 4.1 KiB |
Before Width: | Height: | Size: 58 KiB After Width: | Height: | Size: 6.3 KiB |
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 3.6 KiB |
Before Width: | Height: | Size: 69 KiB After Width: | Height: | Size: 7.2 KiB |
Before Width: | Height: | Size: 73 KiB After Width: | Height: | Size: 2.2 KiB |
64
resources/recipes/big_oven.recipe
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class BigOven(BasicNewsRecipe):
|
||||||
|
title = 'BigOven'
|
||||||
|
__author__ = 'Starson17'
|
||||||
|
description = 'Recipes for the Foodie in us all. Registration is free. A fake username and password just gives smaller photos.'
|
||||||
|
language = 'en'
|
||||||
|
category = 'news, food, recipes, gourmet'
|
||||||
|
publisher = 'Starson17'
|
||||||
|
use_embedded_content= False
|
||||||
|
no_stylesheets = True
|
||||||
|
oldest_article = 24
|
||||||
|
remove_javascript = True
|
||||||
|
remove_empty_feeds = True
|
||||||
|
cover_url = 'http://www.software.com/images/products/BigOven%20Logo_177_216.JPG'
|
||||||
|
max_articles_per_feed = 30
|
||||||
|
needs_subscription = True
|
||||||
|
|
||||||
|
conversion_options = {'linearize_tables' : True
|
||||||
|
, 'comment' : description
|
||||||
|
, 'tags' : category
|
||||||
|
, 'publisher' : publisher
|
||||||
|
, 'language' : language
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_browser(self):
|
||||||
|
br = BasicNewsRecipe.get_browser()
|
||||||
|
if self.username is not None and self.password is not None:
|
||||||
|
br.open('http://www.bigoven.com/')
|
||||||
|
br.select_form(name='form1')
|
||||||
|
br['TopMenu_bo1$email'] = self.username
|
||||||
|
br['TopMenu_bo1$password'] = self.password
|
||||||
|
br.submit()
|
||||||
|
return br
|
||||||
|
|
||||||
|
remove_attributes = ['style', 'font']
|
||||||
|
|
||||||
|
keep_only_tags = [dict(name='h1')
|
||||||
|
,dict(name='div', attrs={'class':'img'})
|
||||||
|
,dict(name='div', attrs={'id':'intro'})
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags = [dict(name='div', attrs={'style':["overflow: visible;"]})
|
||||||
|
,dict(name='div', attrs={'class':['ctas']})
|
||||||
|
#,dict(name='a', attrs={'class':['edit']})
|
||||||
|
,dict(name='p', attrs={'class':['byline']})
|
||||||
|
]
|
||||||
|
|
||||||
|
feeds = [(u'4 & 5 Star Rated Recipes', u'http://feeds.feedburner.com/Bigovencom-RecipeRaves?format=xml')]
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
for tag in soup.findAll(name='a', attrs={'class':['edit']}):
|
||||||
|
tag.parent.extract()
|
||||||
|
for tag in soup.findAll(name='a', attrs={'class':['deflink']}):
|
||||||
|
tag.replaceWith(tag.string)
|
||||||
|
return soup
|
||||||
|
|
||||||
|
extra_css = '''
|
||||||
|
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
|
||||||
|
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:medium;}
|
||||||
|
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
|
||||||
|
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
|
||||||
|
'''
|
||||||
|
|
@ -1,14 +1,29 @@
|
|||||||
import re
|
#!/usr/bin/env python
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2010 elsuave'
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
class EandP(BasicNewsRecipe):
|
class EandP(BasicNewsRecipe):
|
||||||
title = u'Editor and Publisher'
|
title = u'Editor and Publisher'
|
||||||
__author__ = u'Xanthan Gum'
|
__author__ = u'elsuave (modified from Xanthan Gum)'
|
||||||
description = 'News about newspapers and journalism.'
|
description = 'News about newspapers and journalism.'
|
||||||
|
publisher = 'Editor and Publisher'
|
||||||
|
category = 'news, journalism, industry'
|
||||||
language = 'en'
|
language = 'en'
|
||||||
no_stylesheets = True
|
max_articles_per_feed = 25
|
||||||
|
no_stylesheets = True
|
||||||
|
use_embedded_content = False
|
||||||
|
encoding = 'utf8'
|
||||||
|
cover_url = 'http://www.editorandpublisher.com/images/EP_main_logo.gif'
|
||||||
|
remove_javascript = True
|
||||||
|
|
||||||
oldest_article = 7
|
html2lrf_options = [
|
||||||
max_articles_per_feed = 100
|
'--comment', description
|
||||||
|
, '--category', category
|
||||||
|
, '--publisher', publisher
|
||||||
|
]
|
||||||
|
|
||||||
|
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
|
||||||
|
|
||||||
# Font formatting code borrowed from kwetal
|
# Font formatting code borrowed from kwetal
|
||||||
|
|
||||||
@ -18,17 +33,21 @@ class EandP(BasicNewsRecipe):
|
|||||||
h2{font-size: large;}
|
h2{font-size: large;}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Delete everything before the article
|
# Keep only div:itemmgap
|
||||||
|
|
||||||
remove_tags_before = dict(name='font', attrs={'class':'titlebar_black'})
|
keep_only_tags = [
|
||||||
|
dict(name='div', attrs={'class':'itemmgap'})
|
||||||
|
]
|
||||||
|
|
||||||
# Delete everything after the article
|
# Remove commenting/social media lins
|
||||||
|
|
||||||
preprocess_regexps = [(re.compile(r'<!--endclickprintinclude-->.*</body>', re.DOTALL|re.IGNORECASE),
|
remove_tags_after = [dict(name='div', attrs={'class':'clear'})]
|
||||||
lambda match: '</body>'),]
|
|
||||||
|
|
||||||
|
feeds = [(u'Breaking News', u'http://www.editorandpublisher.com/GenerateRssFeed.aspx'),
|
||||||
|
(u'Business News', u'http://www.editorandpublisher.com/GenerateRssFeed.aspx?CategoryId=2'),
|
||||||
|
(u'Ad/Circ News', u'http://www.editorandpublisher.com/GenerateRssFeed.aspx?CategoryId=3'),
|
||||||
|
(u'Newsroom', u'http://www.editorandpublisher.com/GenerateRssFeed.aspx?CategoryId=4'),
|
||||||
|
(u'Technology News', u'http://www.editorandpublisher.com/GenerateRssFeed.aspx?CategoryId=5'),
|
||||||
|
(u'Syndicates News', u'http://www.editorandpublisher.com/GenerateRssFeed.aspx?CategoryId=7')]
|
||||||
|
|
||||||
feeds = [(u'Breaking News', u'http://feeds.feedburner.com/EditorAndPublisher-BreakingNews'),
|
|
||||||
(u'Business News', u'http://feeds.feedburner.com/EditorAndPublisher-BusinessNews'),
|
|
||||||
(u'Newsroom', u'http://feeds.feedburner.com/EditorAndPublisher-Newsroom'),
|
|
||||||
(u'Technology News', u'http://feeds.feedburner.com/EditorAndPublisher-Technology'),
|
|
||||||
(u'Syndicates News', u'http://feeds.feedburner.com/EditorAndPublisher-Syndicates')]
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
|
__copyright__ = '2010, elsuave'
|
||||||
'''
|
'''
|
||||||
estadao.com.br
|
estadao.com.br
|
||||||
'''
|
'''
|
||||||
@ -10,12 +10,12 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
|||||||
|
|
||||||
class Estadao(BasicNewsRecipe):
|
class Estadao(BasicNewsRecipe):
|
||||||
title = 'O Estado de S. Paulo'
|
title = 'O Estado de S. Paulo'
|
||||||
__author__ = 'Darko Miletic'
|
__author__ = 'elsuave (modified from Darko Miletic)'
|
||||||
description = 'News from Brasil in Portuguese'
|
description = 'News from Brasil in Portuguese'
|
||||||
publisher = 'O Estado de S. Paulo'
|
publisher = 'O Estado de S. Paulo'
|
||||||
category = 'news, politics, Brasil'
|
category = 'news, politics, Brasil'
|
||||||
oldest_article = 2
|
oldest_article = 2
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 25
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
encoding = 'utf8'
|
encoding = 'utf8'
|
||||||
@ -30,13 +30,14 @@ class Estadao(BasicNewsRecipe):
|
|||||||
|
|
||||||
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
|
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
|
||||||
|
|
||||||
keep_only_tags = [dict(name='div', attrs={'id':'c1'})]
|
keep_only_tags = [
|
||||||
|
dict(name='div', attrs={'class':['bb-md-noticia','c5']})
|
||||||
|
]
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name=['script','object','form','ul'])
|
dict(name=['script','object','form','ul'])
|
||||||
,dict(name='div', attrs={'id':['votacao','estadaohoje']})
|
,dict(name='div', attrs={'class':['fnt2 Color_04 bold','right fnt2 innerTop15 dvTmFont','™_01 right outerLeft15','tituloBox','tags']})
|
||||||
,dict(name='p', attrs={'id':'ctrl_texto'})
|
,dict(name='div', attrs={'id':['bb-md-noticia-subcom']})
|
||||||
,dict(name='p', attrs={'class':'texto'})
|
|
||||||
]
|
]
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
@ -51,13 +52,12 @@ class Estadao(BasicNewsRecipe):
|
|||||||
,(u'Vida &', u'http://www.estadao.com.br/rss/vidae.xml')
|
,(u'Vida &', u'http://www.estadao.com.br/rss/vidae.xml')
|
||||||
]
|
]
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
|
||||||
ifr = soup.find('iframe')
|
|
||||||
if ifr:
|
|
||||||
ifr.extract()
|
|
||||||
for item in soup.findAll(style=True):
|
|
||||||
del item['style']
|
|
||||||
return soup
|
|
||||||
|
|
||||||
language = 'pt'
|
language = 'pt'
|
||||||
|
|
||||||
|
def get_article_url(self, article):
|
||||||
|
url = BasicNewsRecipe.get_article_url(self, article)
|
||||||
|
if '/Multimidia/' not in url:
|
||||||
|
return url
|
||||||
|
|
||||||
|
@ -351,9 +351,13 @@ def search(title=None, author=None, publisher=None, isbn=None, isbndb_key=None,
|
|||||||
if len(results) > 1:
|
if len(results) > 1:
|
||||||
if not results[0].comments or len(results[0].comments) == 0:
|
if not results[0].comments or len(results[0].comments) == 0:
|
||||||
for r in results[1:]:
|
for r in results[1:]:
|
||||||
if title.lower() == r.title[:len(title)].lower() and r.comments and len(r.comments):
|
try:
|
||||||
results[0].comments = r.comments
|
if title and title.lower() == r.title[:len(title)].lower() \
|
||||||
break
|
and r.comments and len(r.comments):
|
||||||
|
results[0].comments = r.comments
|
||||||
|
break
|
||||||
|
except:
|
||||||
|
pass
|
||||||
# Find a pubdate
|
# Find a pubdate
|
||||||
pubdate = None
|
pubdate = None
|
||||||
for r in results:
|
for r in results:
|
||||||
|
@ -323,7 +323,6 @@ class AddAction(object): # {{{
|
|||||||
accept = True
|
accept = True
|
||||||
if accept:
|
if accept:
|
||||||
event.accept()
|
event.accept()
|
||||||
self.cover_cache.refresh([cid])
|
|
||||||
self.library_view.model().current_changed(current_idx, current_idx)
|
self.library_view.model().current_changed(current_idx, current_idx)
|
||||||
|
|
||||||
def __add_filesystem_book(self, paths, allow_device=True):
|
def __add_filesystem_book(self, paths, allow_device=True):
|
||||||
|
@ -115,6 +115,7 @@ class CoverFlowMixin(object):
|
|||||||
self.sync_cf_to_listview)
|
self.sync_cf_to_listview)
|
||||||
self.db_images = DatabaseImages(self.library_view.model())
|
self.db_images = DatabaseImages(self.library_view.model())
|
||||||
self.cover_flow.setImages(self.db_images)
|
self.cover_flow.setImages(self.db_images)
|
||||||
|
self.cover_flow.itemActivated.connect(self.view_specific_book)
|
||||||
else:
|
else:
|
||||||
self.cover_flow = QLabel('<p>'+_('Cover browser could not be loaded')
|
self.cover_flow = QLabel('<p>'+_('Cover browser could not be loaded')
|
||||||
+'<br>'+pictureflowerror)
|
+'<br>'+pictureflowerror)
|
||||||
|
@ -579,12 +579,10 @@ void PictureFlowPrivate::resetSlides()
|
|||||||
|
|
||||||
static QImage prepareSurface(QImage img, int w, int h)
|
static QImage prepareSurface(QImage img, int w, int h)
|
||||||
{
|
{
|
||||||
Qt::TransformationMode mode = Qt::SmoothTransformation;
|
img = img.scaled(w, h, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||||
img = img.scaled(w, h, Qt::IgnoreAspectRatio, mode);
|
|
||||||
|
|
||||||
// slightly larger, to accommodate for the reflection
|
// slightly larger, to accommodate for the reflection
|
||||||
int hs = int(h * REFLECTION_FACTOR);
|
int hs = int(h * REFLECTION_FACTOR);
|
||||||
int hofs = 0;
|
|
||||||
|
|
||||||
// offscreen buffer: black is sweet
|
// offscreen buffer: black is sweet
|
||||||
QImage result(hs, w, QImage::Format_RGB16);
|
QImage result(hs, w, QImage::Format_RGB16);
|
||||||
@ -595,21 +593,20 @@ static QImage prepareSurface(QImage img, int w, int h)
|
|||||||
// (and much better and faster to work row-wise, i.e in one scanline)
|
// (and much better and faster to work row-wise, i.e in one scanline)
|
||||||
for(int x = 0; x < w; x++)
|
for(int x = 0; x < w; x++)
|
||||||
for(int y = 0; y < h; y++)
|
for(int y = 0; y < h; y++)
|
||||||
result.setPixel(hofs + y, x, img.pixel(x, y));
|
result.setPixel(y, x, img.pixel(x, y));
|
||||||
|
|
||||||
// create the reflection
|
// create the reflection
|
||||||
int ht = hs - h - hofs;
|
int ht = hs - h;
|
||||||
int hte = ht;
|
|
||||||
for(int x = 0; x < w; x++)
|
for(int x = 0; x < w; x++)
|
||||||
for(int y = 0; y < ht; y++)
|
for(int y = 0; y < ht; y++)
|
||||||
{
|
{
|
||||||
QRgb color = img.pixel(x, img.height()-y-1);
|
QRgb color = img.pixel(x, img.height()-y-1);
|
||||||
//QRgb565 color = img.scanLine(img.height()-y-1) + x*sizeof(QRgb565); //img.pixel(x, img.height()-y-1);
|
//QRgb565 color = img.scanLine(img.height()-y-1) + x*sizeof(QRgb565); //img.pixel(x, img.height()-y-1);
|
||||||
int a = qAlpha(color);
|
int a = qAlpha(color);
|
||||||
int r = qRed(color) * a / 256 * (hte - y) / hte * 3/5;
|
int r = qRed(color) * a / 256 * (ht - y) / ht * 3/5;
|
||||||
int g = qGreen(color) * a / 256 * (hte - y) / hte * 3/5;
|
int g = qGreen(color) * a / 256 * (ht - y) / ht * 3/5;
|
||||||
int b = qBlue(color) * a / 256 * (hte - y) / hte * 3/5;
|
int b = qBlue(color) * a / 256 * (ht - y) / ht * 3/5;
|
||||||
result.setPixel(h+hofs+y, x, qRgb(r, g, b));
|
result.setPixel(h+y, x, qRgb(r, g, b));
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -798,12 +795,20 @@ QRect PictureFlowPrivate::renderCenterSlide(const SlideInfo &slide) {
|
|||||||
int sw = src->height();
|
int sw = src->height();
|
||||||
int sh = src->width();
|
int sh = src->width();
|
||||||
int h = buffer.height();
|
int h = buffer.height();
|
||||||
QRect rect(buffer.width()/2 - sw/2, 0, sw, h-1);
|
int srcoff = 0;
|
||||||
int left = rect.left();
|
int left = buffer.width()/2 - sw/2;
|
||||||
|
if (left < 0) {
|
||||||
|
srcoff = -left;
|
||||||
|
sw += left;
|
||||||
|
left = 0;
|
||||||
|
}
|
||||||
|
QRect rect(left, 0, sw, h-1);
|
||||||
|
int xcon = MIN(h-1, sh-1);
|
||||||
|
int ycon = MIN(sw, buffer.width() - left);
|
||||||
|
|
||||||
for(int x = 0; x < MIN(h-1, sh-1); x++)
|
for(int x = 0; x < xcon; x++)
|
||||||
for(int y = 0; y < sw; y++)
|
for(int y = 0; y < ycon; y++)
|
||||||
buffer.setPixel(left + y, 1+x, src->pixel(x, y));
|
buffer.setPixel(left + y, 1+x, src->pixel(x, srcoff+y));
|
||||||
|
|
||||||
return rect;
|
return rect;
|
||||||
}
|
}
|
||||||
|
@ -238,7 +238,7 @@ class RecursiveFetcher(object):
|
|||||||
soup = BeautifulSoup(u'<a href="'+url+'" />')
|
soup = BeautifulSoup(u'<a href="'+url+'" />')
|
||||||
self.log.debug('Downloading')
|
self.log.debug('Downloading')
|
||||||
res = self.process_links(soup, url, 0, into_dir='')
|
res = self.process_links(soup, url, 0, into_dir='')
|
||||||
self.log.debug('%s saved to %s'%( url, res))
|
self.log.debug(url, 'saved to', res)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def is_link_ok(self, url):
|
def is_link_ok(self, url):
|
||||||
@ -281,7 +281,7 @@ class RecursiveFetcher(object):
|
|||||||
try:
|
try:
|
||||||
data = self.fetch_url(iurl)
|
data = self.fetch_url(iurl)
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception('Could not fetch stylesheet %s'% iurl)
|
self.log.exception('Could not fetch stylesheet ', iurl)
|
||||||
continue
|
continue
|
||||||
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
|
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
|
||||||
with self.stylemap_lock:
|
with self.stylemap_lock:
|
||||||
@ -304,7 +304,7 @@ class RecursiveFetcher(object):
|
|||||||
try:
|
try:
|
||||||
data = self.fetch_url(iurl)
|
data = self.fetch_url(iurl)
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception('Could not fetch stylesheet %s'% iurl)
|
self.log.exception('Could not fetch stylesheet ', iurl)
|
||||||
continue
|
continue
|
||||||
c += 1
|
c += 1
|
||||||
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
|
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
|
||||||
@ -337,7 +337,7 @@ class RecursiveFetcher(object):
|
|||||||
# Skip empty GIF files as PIL errors on them anyway
|
# Skip empty GIF files as PIL errors on them anyway
|
||||||
continue
|
continue
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.exception('Could not fetch image %s'% iurl)
|
self.log.exception('Could not fetch image ', iurl)
|
||||||
continue
|
continue
|
||||||
c += 1
|
c += 1
|
||||||
fname = ascii_filename('img'+str(c))
|
fname = ascii_filename('img'+str(c))
|
||||||
@ -423,7 +423,7 @@ class RecursiveFetcher(object):
|
|||||||
newbaseurl = dsrc.newurl
|
newbaseurl = dsrc.newurl
|
||||||
if len(dsrc) == 0 or \
|
if len(dsrc) == 0 or \
|
||||||
len(re.compile('<!--.*?-->', re.DOTALL).sub('', dsrc).strip()) == 0:
|
len(re.compile('<!--.*?-->', re.DOTALL).sub('', dsrc).strip()) == 0:
|
||||||
raise ValueError('No content at URL %s'%iurl)
|
raise ValueError('No content at URL %r'%iurl)
|
||||||
if callable(self.encoding):
|
if callable(self.encoding):
|
||||||
dsrc = self.encoding(dsrc)
|
dsrc = self.encoding(dsrc)
|
||||||
elif self.encoding is not None:
|
elif self.encoding is not None:
|
||||||
|