Fix #1008276 (New recipe for The Christian Science Monitor)

This commit is contained in:
Kovid Goyal 2012-06-04 09:58:14 +05:30
parent 71ea547f06
commit acf009ad35

View File

@ -1,152 +1,111 @@
#!/usr/bin/env python __license__ = 'GPL v3'
__license__ = 'GPL v3' __copyright__ = '2012, Darko Miletic <darko.miletic at gmail.com>'
__author__ = 'Kovid Goyal and Sujata Raman, Lorenzo Vigentini' '''
__copyright__ = '2009, Kovid Goyal and Sujata Raman' www.csmonitor.com
__version__ = 'v1.02' '''
__date__ = '10, January 2010'
__description__ = 'Providing context and clarity on national and international news, peoples and cultures'
'''csmonitor.com'''
import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class CSMonitor(BasicNewsRecipe):
class ChristianScienceMonitor(BasicNewsRecipe): title = 'The Christian Science Monitor - daily'
__author__ = 'Darko Miletic'
__author__ = 'Kovid Goyal' description = 'The Christian Science Monitor is an international news organization that delivers thoughtful, global coverage via its website, weekly magazine, daily news briefing, and email newsletters.'
description = 'Providing context and clarity on national and international news, peoples and cultures' publisher = 'The Christian Science Monitor'
category = 'news, politics, USA'
cover_url = 'http://www.csmonitor.com/extension/csm_base/design/csm_design/images/csmlogo_179x46.gif' oldest_article = 2
title = 'Christian Science Monitor' max_articles_per_feed = 200
publisher = 'The Christian Science Monitor' no_stylesheets = True
category = 'News, politics, culture, economy, general interest' encoding = 'utf8'
language = 'en'
encoding = 'utf-8'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 16
max_articles_per_feed = 20
use_embedded_content = False use_embedded_content = False
recursion = 10 language = 'en'
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://www.csmonitor.com/extension/csm_base/design/csm_design/images/csmlogo_179x46.gif'
extra_css = """
body{font-family: Arial,Tahoma,Verdana,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
.head {font-family: Georgia,"Times New Roman",Times,serif}
.sByline,.caption{font-size: x-small}
.hide{display: none}
.sLoc{font-weight: bold}
ul{list-style-type: none}
"""
remove_javascript = True conversion_options = {
no_stylesheets = True 'comment' : description
requires_version = (0, 8, 39) , 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
def preprocess_raw_html(self, raw, url): remove_tags = [
try: dict(name=['meta','link','iframe','object','embed'])
from html5lib import parse ,dict(attrs={'class':['podStoryRel','bottom-rel','hide']})
root = parse(raw, namespaceHTMLElements=False, ,dict(attrs={'id':['pgallerycarousel_enlarge','pgallerycarousel_related']})
treebuilder='lxml').getroot() ]
from lxml import etree keep_only_tags = [
for tag in root.xpath( dict(name='h1', attrs={'class':'head'})
'//script|//style|//noscript|//meta|//link|//object'): ,dict(name='h2', attrs={'class':'subhead'})
tag.getparent().remove(tag) ,dict(attrs={'class':['sByline','podStoryGal','ui-body-header','sBody']})
for elem in list(root.iterdescendants(tag=etree.Comment)): ]
elem.getparent().remove(elem) remove_attributes=['xmlns:fb']
ans = etree.tostring(root, encoding=unicode)
ans = re.sub('.*<html', '<html', ans, flags=re.DOTALL)
return ans
except:
import traceback
traceback.print_exc()
raise
def index_to_soup(self, url): feeds = [
raw = BasicNewsRecipe.index_to_soup(self, url, (u'USA' , u'http://rss.csmonitor.com/feeds/usa' )
raw=True).decode('utf-8') ,(u'World' , u'http://rss.csmonitor.com/feeds/world' )
raw = self.preprocess_raw_html(raw, url) ,(u'Politics' , u'http://rss.csmonitor.com/feeds/politics' )
return BasicNewsRecipe.index_to_soup(self, raw) ,(u'Business' , u'http://rss.csmonitor.com/feeds/wam' )
,(u'Commentary' , u'http://rss.csmonitor.com/feeds/commentary' )
def append_page(self, soup, appendtag, position): ,(u'Books' , u'http://rss.csmonitor.com/feeds/books' )
nav = soup.find('div',attrs={'class':'navigation'}) ,(u'Arts' , u'http://rss.csmonitor.com/feeds/arts' )
if nav: ,(u'Environment' , u'http://rss.csmonitor.com/feeds/environment')
pager = nav.findAll('a') ,(u'Innovation' , u'http://rss.csmonitor.com/feeds/scitech' )
for part in pager: ,(u'Living' , u'http://rss.csmonitor.com/feeds/living' )
if 'Next' in part: ,(u'Science' , u'http://rss.csmonitor.com/feeds/science' )
nexturl = ('http://www.csmonitor.com' + ,(u'The Culture' , u'http://rss.csmonitor.com/feeds/theculture' )
re.findall(r'href="(.*?)"', str(part))[0]) ,(u'The Home Forum', u'http://rss.csmonitor.com/feeds/homeforum' )
soup2 = self.index_to_soup(nexturl) ,(u'Articles' , u'http://rss.csmonitor.com/feeds/csarticles' )
texttag = soup2.find('div', ]
attrs={'class': re.compile('list-article-.*')})
trash_c = soup2.findAll(attrs={'class': 'list-description'}) def append_page(self, soup):
trash_h = soup2.h1 pager = soup.find('div', attrs={'class':'navigation'})
for tc in trash_c: tc.extract() if pager:
trash_h.extract() nexttag = pager.find(attrs={'id':'next-button'})
if nexttag:
newpos = len(texttag.contents) nurl = 'http://www.csmonitor.com' + nexttag['href']
self.append_page(soup2, texttag, newpos) soup2 = self.index_to_soup(nurl)
texttag.extract() texttag = soup2.find(attrs={'class':'sBody'})
appendtag.insert(position, texttag) if texttag:
appendtag = soup.find(attrs={'class':'sBody'})
for citem in texttag.findAll(attrs={'class':['podStoryRel','bottom-rel','hide']}):
citem.extract()
self.append_page(soup2)
texttag.extract()
pager.extract()
appendtag.append(texttag)
def preprocess_html(self, soup): def preprocess_html(self, soup):
PRINT_RE = re.compile(r'/layout/set/print/content/view/print/[0-9]*') self.append_page(soup)
html = str(soup) pager = soup.find('div', attrs={'class':'navigation'})
try: if pager:
print_found = PRINT_RE.findall(html) pager.extract()
except Exception: for item in soup.findAll('a'):
pass limg = item.find('img')
if print_found: if item.string is not None:
print_url = 'http://www.csmonitor.com' + print_found[0] str = item.string
print_soup = self.index_to_soup(print_url) item.replaceWith(str)
else: else:
self.append_page(soup, soup.body, 3) if limg:
item.name = 'div'
trash_a = soup.findAll(attrs={'class': re.compile('navigation.*')}) item.attrs = []
trash_b = soup.findAll(attrs={'style': re.compile('.*')}) else:
trash_d = soup.findAll(attrs={'class': 'sByline'}) str = self.tag_to_string(item)
for ta in trash_a: ta.extract() item.replaceWith(str)
for tb in trash_b: tb.extract() for item in soup.findAll('img'):
for td in trash_d: td.extract() if 'scorecardresearch' in item['src']:
item.extract()
print_soup = soup else:
return print_soup if not item.has_key('alt'):
item['alt'] = 'image'
extra_css = ''' return soup
h1{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: large}
.sub{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: small;}
.byline{ font-family:Arial,Helvetica,sans-serif ; color:#999999; font-size: x-small;}
.postdate{color:#999999 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
h3{color:#999999 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
.photoCutline{ color:#333333 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
.photoCredit{ color:#999999 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
#story{font-family:Arial,Tahoma,Verdana,Helvetica,sans-serif ; font-size: small; }
#main{font-family:Arial,Tahoma,Verdana,Helvetica,sans-serif ; font-size: small; }
#photo-details{ font-family:Arial,Helvetica,sans-serif ; color:#999999; font-size: x-small;}
span.name{color:#205B87;font-family: Georgia,Times,"Times New Roman",serif; font-size: x-small}
p#dateline{color:#444444 ; font-family:Arial,Helvetica,sans-serif ; font-style:italic;} '''
feeds = [(u'Top Stories', u'http://rss.csmonitor.com/feeds/top'),
(u'World' , u'http://rss.csmonitor.com/feeds/world'),
(u'USA' , u'http://rss.csmonitor.com/feeds/usa'),
(u'Commentary' , u'http://rss.csmonitor.com/feeds/commentary'),
(u'Money' , u'http://rss.csmonitor.com/feeds/wam'),
(u'Learning' , u'http://rss.csmonitor.com/feeds/learning'),
(u'Living', u'http://rss.csmonitor.com/feeds/living'),
(u'Innovation', u'http://rss.csmonitor.com/feeds/scitech'),
(u'Gardening', u'http://rss.csmonitor.com/feeds/gardening'),
(u'Environment',u'http://rss.csmonitor.com/feeds/environment'),
(u'Arts', u'http://rss.csmonitor.com/feeds/arts'),
(u'Books', u'http://rss.csmonitor.com/feeds/books'),
(u'Home Forum' , u'http://rss.csmonitor.com/feeds/homeforum')
]
keep_only_tags = [dict(name='div', attrs={'id':'mainColumn'}), ]
remove_tags = [
dict(name='div', attrs={'id':['story-tools','videoPlayer','storyRelatedBottom','enlarge-photo','photo-paginate']}),
dict(name=['div','a'], attrs={'class':
['storyToolbar cfx','podStoryRel','spacer3',
'divvy spacer7','comment','storyIncludeBottom',
'hide', 'podBrdr']}),
dict(name='ul', attrs={'class':[ 'centerliststories']}) ,
dict(name='form', attrs={'id':[ 'commentform']}) ,
dict(name='div', attrs={'class': ['ui-comments']})
]
remove_tags_after = [ dict(name='div', attrs={'class':[ 'ad csmAd']}),
dict(name='div', attrs={'class': [re.compile('navigation.*')]}),
dict(name='div', attrs={'style': [re.compile('.*')]})
]