Restore all sections to the Guardian newspaper download

This commit is contained in:
Kovid Goyal 2009-11-14 16:24:38 -07:00
parent db68607678
commit 869b907568

View File

@ -43,97 +43,45 @@ class Guardian(BasicNewsRecipe):
#match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;} #match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
''' '''
def parse_index(self): def find_sections(self):
soup = self.index_to_soup('http://www.guardian.co.uk/theguardian') soup = self.index_to_soup('http://www.guardian.co.uk/theguardian')
# find cover pic # find cover pic
img = soup.find( 'img',attrs ={'alt':'Guardian digital edition'}) img = soup.find( 'img',attrs ={'alt':'Guardian digital edition'})
if img is None: return None if img is not None:
else:
self.cover_url = img['src'] self.cover_url = img['src']
# end find cover pic # end find cover pic
sections = []
ans = []
for li in soup.findAll( 'li'):
section = ''
articles = []
if li.a and li.a.has_key('href'): idx = soup.find('div', id='book-index')
url = li.a['href'] for s in idx.findAll('strong', attrs={'class':'book'}):
if 'mainsection' in url: a = s.find('a', href=True)
section = self.tag_to_string(url) yield (self.tag_to_string(a), a['href'])
i = len(section)
index1 = section.rfind('/',0,i) def find_articles(self, url):
section = section[index1+1:i]
sections.append(section)
#find the articles in the Main Section start
soup = self.index_to_soup(url) soup = self.index_to_soup(url)
date = strftime('%a, %d %b') div = soup.find('div', attrs={'class':'book-index'})
descl = [] for ul in div.findAll('ul', attrs={'class':'trailblock'}):
for li in ul.findAll('li'):
for desclist in soup.findAll(name='div',attrs={'class':"trailtext"}): a = li.find(href=True)
descl.append(self.tag_to_string(desclist).strip()) if not a:
continue
t = -1
for tag in soup.findAll('h3'):
t = t+1
for a in tag.findAll('a'):
if t < len(descl):
desc = descl[t]
else:
desc = ''
if a and a.has_key('href'):
url2 = a['href']
else:
url2 =''
title = self.tag_to_string(a) title = self.tag_to_string(a)
url = a['href']
if not title or not url:
continue
tt = li.find('div', attrs={'class':'trailtext'})
if tt is not None:
for da in tt.findAll('a'): da.extract()
desc = self.tag_to_string(tt).strip()
yield {
'title': title, 'url':url, 'description':desc,
'date' : strftime('%a, %d %b'),
}
if len(articles) == 0: #First article def parse_index(self):
feeds = []
articles.append({ for title, href in self.find_sections():
'title':title, feeds.append((title, list(self.find_articles(href))))
'date':date, return feeds
'url':url2,
'description':desc,
})
else:
#eliminate duplicates start
if {'title':title,'date':date,'url':url2,'description':desc} in articles :
url2 = ''
#eliminate duplicates end
else:
if 'http://jobs.guardian.co.uk/' in url2:
url2 = ''
else:
articles.append({
'title':title,
'date':date,
'url':url2,
'description':desc,
})
#find the articles in the Main Section end
ans.append( articles)
else:
url =''
titles = map(self.find_title, sections)
ans1 = list(zip(titles,ans))
return ans1[2:]
def find_title(self, section):
d = {'topstories':'Top Stories', 'international':'International', 'editorialsandreply':'Editorials and Reply',
'commentanddebate':'Comment and Debate','uknews':'UK News','saturday':'Saturday','sunday':'Sunday',
'reviews':'Reviews', 'obituaries':'Obituaries'}
return d.get(section, section)
def preprocess_html(self, soup): def preprocess_html(self, soup):