Fix #1891 (Updated various recipes for better EPUB support)

This commit is contained in:
Kovid Goyal 2009-02-19 10:24:48 -08:00
parent 3c05e850d5
commit f179a74a07
4 changed files with 58 additions and 56 deletions

View File

@ -1,29 +1,40 @@
#!/usr/bin/env python #!/usr/bin/env python
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
''' '''
harpers.org harpers.org
''' '''
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Harpers(BasicNewsRecipe): class Harpers(BasicNewsRecipe):
title = u"Harper's Magazine" title = u"Harper's Magazine"
__author__ = u'Darko Miletic' __author__ = u'Darko Miletic'
language = _('English') language = _('English')
description = u"Harper's Magazine: Founded June 1850." description = u"Harper's Magazine: Founded June 1850."
oldest_article = 30 publisher = "Harper's Magazine "
max_articles_per_feed = 100 category = 'news, politics, USA'
no_stylesheets = True oldest_article = 30
use_embedded_content = False max_articles_per_feed = 100
timefmt = ' [%A, %d %B, %Y]' no_stylesheets = True
use_embedded_content = False
keep_only_tags = [ dict(name='div', attrs={'id':'cached'}) ] remove_javascript = True
remove_tags = [
dict(name='table', attrs={'class':'rcnt'}) html2lrf_options = [
,dict(name='table', attrs={'class':'rcnt topline'}) '--comment', description
] , '--category', category
, '--publisher', publisher
feeds = [ ]
(u"Harper's Magazine", u'http://www.harpers.org/rss/frontpage-rss20.xml')
] html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\noverride_css=" p {text-indent: 0em; margin-top: 0em; margin-bottom: 0.5em} img {margin-top: 0em; margin-bottom: 0.4em}"'
keep_only_tags = [ dict(name='div', attrs={'id':'cached'}) ]
remove_tags = [
dict(name='table', attrs={'class':'rcnt'})
,dict(name='table', attrs={'class':'rcnt topline'})
,dict(name=['link','object','embed'])
]
feeds = [(u"Harper's Magazine", u'http://www.harpers.org/rss/frontpage-rss20.xml')]

View File

@ -10,8 +10,8 @@ images and pdf's are ignored
from calibre import strftime from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Harpers_full(BasicNewsRecipe): class Harpers_full(BasicNewsRecipe):
title = u"Harper's Magazine - articles from printed edition" title = u"Harper's Magazine - articles from printed edition"
__author__ = u'Darko Miletic' __author__ = u'Darko Miletic'
@ -23,7 +23,8 @@ class Harpers_full(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
simultaneous_downloads = 1 simultaneous_downloads = 1
delay = 1 delay = 1
language = _('English')
needs_subscription = True needs_subscription = True
INDEX = strftime('http://www.harpers.org/archive/%Y/%m') INDEX = strftime('http://www.harpers.org/archive/%Y/%m')
LOGIN = 'http://www.harpers.org' LOGIN = 'http://www.harpers.org'
@ -31,12 +32,12 @@ class Harpers_full(BasicNewsRecipe):
remove_javascript = True remove_javascript = True
html2lrf_options = [ html2lrf_options = [
'--comment', description '--comment', description
, '--category', category , '--category', category
, '--publisher', publisher , '--publisher', publisher
] ]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"' html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\noverride_css=" p {text-indent: 0em; margin-top: 0em; margin-bottom: 0.5em} img {margin-top: 0em; margin-bottom: 0.4em}"'
keep_only_tags = [ dict(name='div', attrs={'id':'cached'}) ] keep_only_tags = [ dict(name='div', attrs={'id':'cached'}) ]
remove_tags = [ remove_tags = [
@ -71,10 +72,4 @@ class Harpers_full(BasicNewsRecipe):
,'description':'' ,'description':''
}) })
return [(soup.head.title.string, articles)] return [(soup.head.title.string, articles)]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return soup
language = _('English')

View File

@ -17,9 +17,6 @@ class Pobjeda(BasicNewsRecipe):
description = 'News from Montenegro' description = 'News from Montenegro'
publisher = 'Pobjeda a.d.' publisher = 'Pobjeda a.d.'
category = 'news, politics, Montenegro' category = 'news, politics, Montenegro'
language = _('Serbian')
oldest_article = 2
max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
remove_javascript = True remove_javascript = True
encoding = 'utf8' encoding = 'utf8'
@ -30,12 +27,14 @@ class Pobjeda(BasicNewsRecipe):
html2lrf_options = [ html2lrf_options = [
'--comment', description '--comment', description
, '--base-font-size', '10'
, '--category', category , '--category', category
, '--publisher', publisher , '--publisher', publisher
] ]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"' html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\noverride_css=" p {text-indent: 0em; margin-top: 0em; margin-bottom: 0.5em} img {margin-top: 0em; margin-bottom: 0.4em}"'
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')] preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
keep_only_tags = [dict(name='div', attrs={'class':'vijest'})] keep_only_tags = [dict(name='div', attrs={'class':'vijest'})]
@ -64,8 +63,6 @@ class Pobjeda(BasicNewsRecipe):
soup.html['lang'] = 'sr-Latn-ME' soup.html['lang'] = 'sr-Latn-ME'
mtag = '<meta http-equiv="Content-Language" content="sr-Latn-ME"/>' mtag = '<meta http-equiv="Content-Language" content="sr-Latn-ME"/>'
soup.head.insert(0,mtag) soup.head.insert(0,mtag)
for item in soup.findAll(style=True):
del item['style']
return soup return soup
def get_cover_url(self): def get_cover_url(self):
@ -81,16 +78,16 @@ class Pobjeda(BasicNewsRecipe):
lfeeds = self.get_feeds() lfeeds = self.get_feeds()
for feedobj in lfeeds: for feedobj in lfeeds:
feedtitle, feedurl = feedobj feedtitle, feedurl = feedobj
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl)) self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
articles = [] articles = []
soup = self.index_to_soup(feedurl) soup = self.index_to_soup(feedurl)
for item in soup.findAll('div', attrs={'class':'vijest'}): for item in soup.findAll('div', attrs={'class':'vijest'}):
description = self.tag_to_string(item.h2) description = self.tag_to_string(item.h2)
atag = item.h1.find('a') atag = item.h1.find('a')
if atag: if atag and atag.has_key('href'):
url = self.INDEX + '/' + atag['href'] url = self.INDEX + '/' + atag['href']
title = self.tag_to_string(atag) title = self.tag_to_string(atag)
date = strftime(self.timefmt) date = strftime(self.timefmt)
articles.append({ articles.append({
'title' :title 'title' :title
,'date' :date ,'date' :date

View File

@ -32,7 +32,7 @@ class PressOnline(BasicNewsRecipe):
, '--publisher', publisher , '--publisher', publisher
] ]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"' html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\noverride_css=" p {text-indent: 0em; margin-top: 0em; margin-bottom: 0.5em} img {margin-top: 0em; margin-bottom: 0.4em}"'
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')] preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
@ -61,7 +61,6 @@ class PressOnline(BasicNewsRecipe):
soup.html['lang'] = 'sr-Latn-RS' soup.html['lang'] = 'sr-Latn-RS'
mtag = '<meta http-equiv="Content-Language" content="sr-Latn-RS"/>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8">' mtag = '<meta http-equiv="Content-Language" content="sr-Latn-RS"/>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8">'
soup.head.insert(0,mtag) soup.head.insert(0,mtag)
img = soup.find('img') for img in soup.findAll('img', align=True):
if img: del img['align']
del img['align']
return soup return soup