Update Nikkei News

This commit is contained in:
Kovid Goyal 2014-11-18 14:28:54 +05:30
parent 10773ceb24
commit f28a295362

View File

@ -26,18 +26,20 @@ class NikkeiNet_paper_subscription(BasicNewsRecipe):
# {'class':"cmnc-zoom"}, # {'class':"cmnc-zoom"},
{'class':"cmn-hide"}, {'class':"cmn-hide"},
{'name':'form'}, {'name':'form'},
{'class':'cmn-print_headline cmn-clearfix'},
{'id':'ABOUT_NIKKEI'},
] ]
remove_tags_after = {'class':"cmn-indent"} remove_tags_after = {'class':"cmn-indent"}
def get_browser(self): def get_browser(self):
br = BasicNewsRecipe.get_browser(self) br = BasicNewsRecipe.get_browser(self)
#pp.pprint(self.parse_index()) # pp.pprint(self.parse_index())
#exit(1) # exit(1)
#br.set_debug_http(True) # br.set_debug_http(True)
#br.set_debug_redirects(True) # br.set_debug_redirects(True)
#br.set_debug_responses(True) # br.set_debug_responses(True)
if self.username is not None and self.password is not None: if self.username is not None and self.password is not None:
print "-------------------------open top page-------------------------------------" print "-------------------------open top page-------------------------------------"
@ -47,9 +49,9 @@ class NikkeiNet_paper_subscription(BasicNewsRecipe):
url = br.links(url_regex="www.nikkei.com/etc/accounts/login").next().url url = br.links(url_regex="www.nikkei.com/etc/accounts/login").next().url
except StopIteration: except StopIteration:
url = 'http://www.nikkei.com/etc/accounts/login?dps=3&pageflag=top&url=http%3A%2F%2Fwww.nikkei.com%2F' url = 'http://www.nikkei.com/etc/accounts/login?dps=3&pageflag=top&url=http%3A%2F%2Fwww.nikkei.com%2F'
br.open(url) #br.follow_link(link) br.open(url) # br.follow_link(link)
#response = br.response() #response = br.response()
#print response.get_data() # print response.get_data()
print "-------------------------JS redirect(send autoPostForm)--------------------" print "-------------------------JS redirect(send autoPostForm)--------------------"
br.select_form(name='autoPostForm') br.select_form(name='autoPostForm')
br.submit() br.submit()
@ -64,9 +66,9 @@ class NikkeiNet_paper_subscription(BasicNewsRecipe):
br.select_form(nr=0) br.select_form(nr=0)
br.submit() br.submit()
#br.set_debug_http(False) # br.set_debug_http(False)
#br.set_debug_redirects(False) # br.set_debug_redirects(False)
#br.set_debug_responses(False) # br.set_debug_responses(False)
return br return br
def cleanup(self): def cleanup(self):
@ -77,18 +79,18 @@ class NikkeiNet_paper_subscription(BasicNewsRecipe):
print "-------------------------get index of paper--------------------------------" print "-------------------------get index of paper--------------------------------"
result = [] result = []
soup = self.index_to_soup('http://www.nikkei.com/paper/') soup = self.index_to_soup('http://www.nikkei.com/paper/')
#soup = self.index_to_soup(self.test_data()) # soup = self.index_to_soup(self.test_data())
sections = soup.findAll('div', 'cmn-section kn-special JSID_baseSection') sections = soup.findAll('div', 'cmn-section kn-special JSID_baseSection')
if len(sections) == 0: if len(sections) == 0:
sections = soup.findAll('div', 'cmn-section kn-special') sections = soup.findAll('div', 'cmn-section kn-special')
for sect in sections: for sect in sections:
sect_title = sect.find('h3', 'cmnc-title').string sect_title = sect.find('h3', 'cmnc-title').string
sect_result = [] sect_result = []
for elem in sect.findAll(attrs={'class':['cmn-article_title']}): for elem in sect.findAll(attrs={'class':['cmn-article_title']}):
if elem.span.a == None or elem.span.a['href'].startswith('javascript') : if elem.span.a is None or elem.span.a['href'].startswith('javascript') :
continue continue
url = 'http://www.nikkei.com' + elem.span.a['href'] url = 'http://www.nikkei.com' + elem.span.a['href']
url = re.sub("/article/", "/print-article/", url) # print version. url = re.sub("/article/", "/print-article/", url) # print version.
span = elem.span.a.span span = elem.span.a.span
if ((span is not None) and (len(span.contents) > 1)): if ((span is not None) and (len(span.contents) > 1)):
title = span.contents[1].string title = span.contents[1].string
@ -97,3 +99,8 @@ class NikkeiNet_paper_subscription(BasicNewsRecipe):
result.append([sect_title, sect_result]) result.append([sect_title, sect_result])
return result return result
def populate_article_metadata(self, article, soup, first):
elm = soup.find('div', {"class":"cmn-article_text JSID_key_fonttxt"})
elm_text = ''.join([s.string for s in elm])
article.summary = elm_text
article.text_summary = elm_text