Merge from trunk
BIN
resources/images/news/cnetjapan_digital.png
Normal file
After Width: | Height: | Size: 892 B |
BIN
resources/images/news/cnetjapan_release.png
Normal file
After Width: | Height: | Size: 892 B |
BIN
resources/images/news/mainichi.png
Normal file
After Width: | Height: | Size: 953 B |
BIN
resources/images/news/mainichi_it_news.png
Normal file
After Width: | Height: | Size: 953 B |
Before Width: | Height: | Size: 948 B After Width: | Height: | Size: 948 B |
BIN
resources/images/news/nikkei_sub_industry.png
Normal file
After Width: | Height: | Size: 948 B |
BIN
resources/images/news/yomiuri.png
Normal file
After Width: | Height: | Size: 660 B |
@ -7,7 +7,9 @@ class CNetJapan(BasicNewsRecipe):
|
|||||||
max_articles_per_feed = 30
|
max_articles_per_feed = 30
|
||||||
__author__ = 'Hiroshi Miura'
|
__author__ = 'Hiroshi Miura'
|
||||||
|
|
||||||
feeds = [(u'cnet rss', u'http://feeds.japan.cnet.com/cnet/rss')]
|
feeds = [(u'CNet News', u'http://feed.japan.cnet.com/rss/index.rdf'),
|
||||||
|
(u'CNet Blog', u'http://feed.japan.cnet.com/rss/blog/index.rdf')
|
||||||
|
]
|
||||||
language = 'ja'
|
language = 'ja'
|
||||||
encoding = 'Shift_JIS'
|
encoding = 'Shift_JIS'
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
@ -21,12 +23,29 @@ class CNetJapan(BasicNewsRecipe):
|
|||||||
lambda match: '<!-- removed -->'),
|
lambda match: '<!-- removed -->'),
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_tags_before = dict(name="h2")
|
remove_tags_before = dict(id="contents_l")
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
{'class':"social_bkm_share"},
|
{'class':"social_bkm_share"},
|
||||||
{'class':"social_bkm_print"},
|
{'class':"social_bkm_print"},
|
||||||
{'class':"block20 clearfix"},
|
{'class':"block20 clearfix"},
|
||||||
dict(name="div",attrs={'id':'bookreview'}),
|
dict(name="div",attrs={'id':'bookreview'}),
|
||||||
|
{'class':"tag_left_ttl"},
|
||||||
|
{'class':"tag_right"}
|
||||||
]
|
]
|
||||||
remove_tags_after = {'class':"block20"}
|
remove_tags_after = {'class':"block20"}
|
||||||
|
|
||||||
|
def parse_feeds(self):
|
||||||
|
|
||||||
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||||
|
|
||||||
|
for curfeed in feeds:
|
||||||
|
delList = []
|
||||||
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
|
if re.search(r'pheedo.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if len(delList)>0:
|
||||||
|
for d in delList:
|
||||||
|
index = curfeed.articles.index(d)
|
||||||
|
curfeed.articles[index:index+1] = []
|
||||||
|
|
||||||
|
return feeds
|
||||||
|
49
resources/recipes/cnetjapan_digital.recipe
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class CNetJapanDigital(BasicNewsRecipe):
|
||||||
|
title = u'CNET Japan Digital'
|
||||||
|
oldest_article = 3
|
||||||
|
max_articles_per_feed = 30
|
||||||
|
__author__ = 'Hiroshi Miura'
|
||||||
|
|
||||||
|
feeds = [(u'CNet digital',u'http://feed.japan.cnet.com/rss/digital/index.rdf') ]
|
||||||
|
language = 'ja'
|
||||||
|
encoding = 'Shift_JIS'
|
||||||
|
remove_javascript = True
|
||||||
|
|
||||||
|
preprocess_regexps = [
|
||||||
|
(re.compile(ur'<!--\u25B2contents_left END\u25B2-->.*</body>', re.DOTALL|re.IGNORECASE|re.UNICODE),
|
||||||
|
lambda match: '</body>'),
|
||||||
|
(re.compile(r'<!--AD_ELU_HEADER-->.*</body>', re.DOTALL|re.IGNORECASE),
|
||||||
|
lambda match: '</body>'),
|
||||||
|
(re.compile(ur'<!-- \u25B2\u95A2\u9023\u30BF\u30B0\u25B2 -->.*<!-- \u25B2ZDNet\u25B2 -->', re.UNICODE),
|
||||||
|
lambda match: '<!-- removed -->'),
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags_before = dict(id="contents_l")
|
||||||
|
remove_tags = [
|
||||||
|
{'class':"social_bkm_share"},
|
||||||
|
{'class':"social_bkm_print"},
|
||||||
|
{'class':"block20 clearfix"},
|
||||||
|
dict(name="div",attrs={'id':'bookreview'}),
|
||||||
|
{'class':"tag_left_ttl"},
|
||||||
|
{'class':"tag_right"}
|
||||||
|
]
|
||||||
|
remove_tags_after = {'class':"block20"}
|
||||||
|
|
||||||
|
def parse_feeds(self):
|
||||||
|
|
||||||
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||||
|
|
||||||
|
for curfeed in feeds:
|
||||||
|
delList = []
|
||||||
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
|
if re.search(r'pheedo.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if len(delList)>0:
|
||||||
|
for d in delList:
|
||||||
|
index = curfeed.articles.index(d)
|
||||||
|
curfeed.articles[index:index+1] = []
|
||||||
|
|
||||||
|
return feeds
|
48
resources/recipes/cnetjapan_release.recipe
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class CNetJapanRelease(BasicNewsRecipe):
|
||||||
|
title = u'CNET Japan release'
|
||||||
|
oldest_article = 3
|
||||||
|
max_articles_per_feed = 30
|
||||||
|
__author__ = 'Hiroshi Miura'
|
||||||
|
|
||||||
|
feeds = [(u'CNet Release', u'http://feed.japan.cnet.com/rss/release/index.rdf') ]
|
||||||
|
language = 'ja'
|
||||||
|
encoding = 'Shift_JIS'
|
||||||
|
remove_javascript = True
|
||||||
|
|
||||||
|
preprocess_regexps = [
|
||||||
|
(re.compile(ur'<!--\u25B2contents_left END\u25B2-->.*</body>', re.DOTALL|re.IGNORECASE|re.UNICODE),
|
||||||
|
lambda match: '</body>'),
|
||||||
|
(re.compile(r'<!--AD_ELU_HEADER-->.*</body>', re.DOTALL|re.IGNORECASE),
|
||||||
|
lambda match: '</body>'),
|
||||||
|
(re.compile(ur'<!-- \u25B2\u95A2\u9023\u30BF\u30B0\u25B2 -->.*<!-- \u25B2ZDNet\u25B2 -->', re.UNICODE),
|
||||||
|
lambda match: '<!-- removed -->'),
|
||||||
|
]
|
||||||
|
|
||||||
|
remove_tags_before = dict(id="contents_l")
|
||||||
|
remove_tags = [
|
||||||
|
{'class':"social_bkm_share"},
|
||||||
|
{'class':"social_bkm_print"},
|
||||||
|
{'class':"block20 clearfix"},
|
||||||
|
dict(name="div",attrs={'id':'bookreview'}),
|
||||||
|
{'class':"tag_left_ttl"}
|
||||||
|
]
|
||||||
|
remove_tags_after = {'class':"block20"}
|
||||||
|
|
||||||
|
def parse_feeds(self):
|
||||||
|
|
||||||
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||||
|
|
||||||
|
for curfeed in feeds:
|
||||||
|
delList = []
|
||||||
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
|
if re.search(r'pheedo.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if len(delList)>0:
|
||||||
|
for d in delList:
|
||||||
|
index = curfeed.articles.index(d)
|
||||||
|
curfeed.articles[index:index+1] = []
|
||||||
|
|
||||||
|
return feeds
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
'''
|
'''
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
'''
|
'''
|
||||||
@ -14,13 +12,20 @@ class JijiDotCom(BasicNewsRecipe):
|
|||||||
description = 'World News from Jiji Press'
|
description = 'World News from Jiji Press'
|
||||||
publisher = 'Jiji Press Ltd.'
|
publisher = 'Jiji Press Ltd.'
|
||||||
category = 'news'
|
category = 'news'
|
||||||
encoding = 'utf-8'
|
|
||||||
oldest_article = 6
|
oldest_article = 6
|
||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
|
encoding = 'euc_jisx0213'
|
||||||
language = 'ja'
|
language = 'ja'
|
||||||
cover_url = 'http://www.jiji.com/img/top_header_logo2.gif'
|
|
||||||
masthead_url = 'http://jen.jiji.com/images/logo_jijipress.gif'
|
masthead_url = 'http://jen.jiji.com/images/logo_jijipress.gif'
|
||||||
|
top_url = 'http://www.jiji.com/'
|
||||||
|
|
||||||
feeds = [(u'\u30cb\u30e5\u30fc\u30b9', u'http://www.jiji.com/rss/ranking.rdf')]
|
feeds = [(u'\u30cb\u30e5\u30fc\u30b9', u'http://www.jiji.com/rss/ranking.rdf')]
|
||||||
remove_tags_after = dict(id="ad_google")
|
remove_tags_after = dict(id="ad_google")
|
||||||
|
|
||||||
|
def get_cover_url(self):
|
||||||
|
cover_url = 'http://www.jiji.com/img/top_header_logo2.gif'
|
||||||
|
soup = self.index_to_soup(self.top_url)
|
||||||
|
cover_item = soup.find('div', attrs={'class':'top-pad-photos'})
|
||||||
|
if cover_item:
|
||||||
|
cover_url = self.top_url + cover_item.img['src']
|
||||||
|
return cover_url
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
'''
|
'''
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
@ -16,9 +15,13 @@ class MSNSankeiNewsProduct(BasicNewsRecipe):
|
|||||||
max_articles_per_feed = 100
|
max_articles_per_feed = 100
|
||||||
encoding = 'Shift_JIS'
|
encoding = 'Shift_JIS'
|
||||||
language = 'ja'
|
language = 'ja'
|
||||||
|
cover_url = 'http://sankei.jp.msn.com/images/common/sankeShinbunLogo.jpg'
|
||||||
|
masthead_url = 'http://sankei.jp.msn.com/images/common/sankeiNewsLogo.gif'
|
||||||
|
|
||||||
feeds = [(u'\u65b0\u5546\u54c1', u'http://sankei.jp.msn.com/rss/news/release.xml')]
|
feeds = [(u'\u65b0\u5546\u54c1', u'http://sankei.jp.msn.com/rss/news/release.xml')]
|
||||||
|
|
||||||
remove_tags_before = dict(id="__r_article_title__")
|
remove_tags_before = dict(id="__r_article_title__")
|
||||||
remove_tags_after = dict(id="ajax_release_news")
|
remove_tags_after = dict(id="ajax_release_news")
|
||||||
remove_tags = [{'class':"parent chromeCustom6G"}]
|
remove_tags = [{'class':"parent chromeCustom6G"},
|
||||||
|
dict(id="RelatedImg")
|
||||||
|
]
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
'''
|
'''
|
||||||
@ -9,9 +7,9 @@ www.nikkei.com
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class NikkeiNet(BasicNewsRecipe):
|
class NikkeiNet(BasicNewsRecipe):
|
||||||
title = u'\u65e5\u7d4c\u65b0\u805e\u96fb\u5b50\u7248(Free)'
|
title = u'\u65e5\u7d4c\u65b0\u805e\u96fb\u5b50\u7248(Free, MAX)'
|
||||||
__author__ = 'Hiroshi Miura'
|
__author__ = 'Hiroshi Miura'
|
||||||
description = 'News and current market affairs from Japan'
|
description = 'News and current market affairs from Japan, no subscription and getting max feed.'
|
||||||
cover_url = 'http://parts.nikkei.com/parts/ds/images/common/logo_r1.svg'
|
cover_url = 'http://parts.nikkei.com/parts/ds/images/common/logo_r1.svg'
|
||||||
masthead_url = 'http://parts.nikkei.com/parts/ds/images/common/logo_r1.svg'
|
masthead_url = 'http://parts.nikkei.com/parts/ds/images/common/logo_r1.svg'
|
||||||
oldest_article = 2
|
oldest_article = 2
|
||||||
|
@ -5,12 +5,12 @@ from calibre.ptempfile import PersistentTemporaryFile
|
|||||||
|
|
||||||
|
|
||||||
class NikkeiNet_subscription(BasicNewsRecipe):
|
class NikkeiNet_subscription(BasicNewsRecipe):
|
||||||
title = u'\u65e5\u7d4c\u65b0\u805e\u96fb\u5b50\u7248'
|
title = u'\u65e5\u7d4c\u65b0\u805e\u96fb\u5b50\u7248(MAX)'
|
||||||
__author__ = 'Hiroshi Miura'
|
__author__ = 'Hiroshi Miura'
|
||||||
description = 'News and current market affairs from Japan'
|
description = 'News and current market affairs from Japan, gather MAX articles'
|
||||||
needs_subscription = True
|
needs_subscription = True
|
||||||
oldest_article = 2
|
oldest_article = 2
|
||||||
max_articles_per_feed = 20
|
max_articles_per_feed = 10
|
||||||
language = 'ja'
|
language = 'ja'
|
||||||
remove_javascript = False
|
remove_javascript = False
|
||||||
temp_files = []
|
temp_files = []
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
'''
|
'''
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
'''
|
'''
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
'''
|
'''
|
||||||
@ -30,6 +28,9 @@ class NikkeiNet_sub_main(BasicNewsRecipe):
|
|||||||
{'class':"JSID_basePageMove JSID_baseAsyncSubmit cmn-form_area JSID_optForm_utoken"},
|
{'class':"JSID_basePageMove JSID_baseAsyncSubmit cmn-form_area JSID_optForm_utoken"},
|
||||||
{'class':"cmn-article_keyword cmn-clearfix"},
|
{'class':"cmn-article_keyword cmn-clearfix"},
|
||||||
{'class':"cmn-print_headline cmn-clearfix"},
|
{'class':"cmn-print_headline cmn-clearfix"},
|
||||||
|
{'class':"cmn-article_list"},
|
||||||
|
{'class':"cmn-dashedline"},
|
||||||
|
{'class':"cmn-hide"},
|
||||||
]
|
]
|
||||||
remove_tags_after = {'class':"cmn-pr_list"}
|
remove_tags_after = {'class':"cmn-pr_list"}
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
|
63
resources/recipes/yomiuri.recipe
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
|
'''
|
||||||
|
www.yomiuri.co.jp
|
||||||
|
'''
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
import re
|
||||||
|
|
||||||
|
class YOLNews(BasicNewsRecipe):
|
||||||
|
title = u'Yomiuri Online (Latest)'
|
||||||
|
__author__ = 'Hiroshi Miura'
|
||||||
|
oldest_article = 1
|
||||||
|
max_articles_per_feed = 50
|
||||||
|
description = 'Japanese traditional newspaper Yomiuri Online News'
|
||||||
|
publisher = 'Yomiuri Online News'
|
||||||
|
category = 'news, japan'
|
||||||
|
language = 'ja'
|
||||||
|
encoding = 'Shift_JIS'
|
||||||
|
index = 'http://www.yomiuri.co.jp/latestnews/'
|
||||||
|
remove_javascript = True
|
||||||
|
masthead_title = u'YOMIURI ONLINE'
|
||||||
|
|
||||||
|
remove_tags_before = {'class':"article-def"}
|
||||||
|
remove_tags = [{'class':"RelatedArticle"},
|
||||||
|
{'class':"sbtns"}
|
||||||
|
]
|
||||||
|
remove_tags_after = {'class':"date-def"}
|
||||||
|
|
||||||
|
def parse_feeds(self):
|
||||||
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||||
|
for curfeed in feeds:
|
||||||
|
delList = []
|
||||||
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
|
if re.search(r'rssad.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if len(delList)>0:
|
||||||
|
for d in delList:
|
||||||
|
index = curfeed.articles.index(d)
|
||||||
|
curfeed.articles[index:index+1] = []
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(self.index)
|
||||||
|
topstories = soup.find('ul',attrs={'class':'list-def'})
|
||||||
|
if topstories:
|
||||||
|
newsarticles = []
|
||||||
|
for itt in topstories.findAll('li'):
|
||||||
|
itema = itt.find('a',href=True)
|
||||||
|
if itema:
|
||||||
|
itd1 = itema.findNextSibling(text = True)
|
||||||
|
itd2 = itd1.findNextSibling(text = True)
|
||||||
|
itd3 = itd2.findNextSibling(text = True)
|
||||||
|
newsarticles.append({
|
||||||
|
'title' :itema.string
|
||||||
|
,'date' :''.join([itd1, itd2, itd3])
|
||||||
|
,'url' :'http://www.yomiuri.co.jp' + itema['href']
|
||||||
|
,'description':''
|
||||||
|
})
|
||||||
|
feeds.append(('latest', newsarticles))
|
||||||
|
return feeds
|
||||||
|
|
61
resources/recipes/yomiuri_world.recipe
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
|
||||||
|
'''
|
||||||
|
www.yomiuri.co.jp
|
||||||
|
'''
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
import re
|
||||||
|
|
||||||
|
class YOLNews(BasicNewsRecipe):
|
||||||
|
title = u'Yomiuri Online (World)'
|
||||||
|
__author__ = 'Hiroshi Miura'
|
||||||
|
oldest_article = 2
|
||||||
|
max_articles_per_feed = 50
|
||||||
|
description = 'Japanese traditional newspaper Yomiuri Online News/world news'
|
||||||
|
publisher = 'Yomiuri Online News'
|
||||||
|
category = 'news, japan'
|
||||||
|
language = 'ja'
|
||||||
|
encoding = 'Shift_JIS'
|
||||||
|
index = 'http://www.yomiuri.co.jp/world/'
|
||||||
|
remove_javascript = True
|
||||||
|
masthead_title = u"YOMIURI ONLINE"
|
||||||
|
|
||||||
|
remove_tags_before = {'class':"article-def"}
|
||||||
|
remove_tags = [{'class':"RelatedArticle"},
|
||||||
|
{'class':"sbtns"}
|
||||||
|
]
|
||||||
|
remove_tags_after = {'class':"date-def"}
|
||||||
|
|
||||||
|
def parse_feeds(self):
|
||||||
|
feeds = BasicNewsRecipe.parse_feeds(self)
|
||||||
|
for curfeed in feeds:
|
||||||
|
delList = []
|
||||||
|
for a,curarticle in enumerate(curfeed.articles):
|
||||||
|
if re.search(r'rssad.jp', curarticle.url):
|
||||||
|
delList.append(curarticle)
|
||||||
|
if len(delList)>0:
|
||||||
|
for d in delList:
|
||||||
|
index = curfeed.articles.index(d)
|
||||||
|
curfeed.articles[index:index+1] = []
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(self.index)
|
||||||
|
topstories = soup.find('ul',attrs={'class':'list-def'})
|
||||||
|
if topstories:
|
||||||
|
newsarticles = []
|
||||||
|
for itt in topstories.findAll('li'):
|
||||||
|
itema = itt.find('a',href=True)
|
||||||
|
if itema:
|
||||||
|
itd1 = itema.findNextSibling(text = True)
|
||||||
|
newsarticles.append({
|
||||||
|
'title' :itema.string
|
||||||
|
,'date' :''.join([itd1])
|
||||||
|
,'url' :'http://www.yomiuri.co.jp' + itema['href']
|
||||||
|
,'description':''
|
||||||
|
})
|
||||||
|
feeds.append(('World', newsarticles))
|
||||||
|
return feeds
|
||||||
|
|