diff --git a/resources/recipes/atlantic.recipe b/resources/recipes/atlantic.recipe index 4bf8237e16..de6d4bc8d7 100644 --- a/resources/recipes/atlantic.recipe +++ b/resources/recipes/atlantic.recipe @@ -5,76 +5,103 @@ __copyright__ = '2008, Kovid Goyal ' ''' theatlantic.com ''' -import re +import string + from calibre.web.feeds.news import BasicNewsRecipe +from calibre.ebooks.BeautifulSoup import Tag, NavigableString class TheAtlantic(BasicNewsRecipe): title = 'The Atlantic' __author__ = 'Kovid Goyal and Sujata Raman' description = 'Current affairs and politics focussed on the US' - INDEX = 'http://www.theatlantic.com/doc/current' + INDEX = 'http://www.theatlantic.com/magazine/toc/0/' language = 'en' - remove_tags_before = dict(name='div', id='storytop') - remove_tags = [ - dict(name='div', id=['seealso','storybottom', 'footer', 'ad_banner_top', 'sidebar','articletoolstop','subcontent',]), - dict(name='p', attrs={'id':["pagination"]}), - dict(name='table',attrs={'class':"tools"}), - dict(name='style'), - dict(name='a', href='/a/newsletters.mhtml') - ] - remove_attributes = ['icap', 'callout', 'style'] - no_stylesheets = True - conversion_options = { 'linearize_tables':True } + remove_tags_before = dict(name='div', id='articleHead') + remove_tags_after = dict(id='copyright') + remove_tags = [dict(id=['header', 'printAds', 'pageControls'])] + no_stylesheets = True + + + def print_version(self, url): + return url.replace('/archive/', '/print/') - extra_css = ''' - #timestamp{font-family:Arial,Helvetica,sans-serif; color:#666666 ;font-size:x-small} - #storytype{font-family:Arial,Helvetica,sans-serif; color:#D52B1E ;font-weight:bold; font-size:x-small} - h2{font-family:georgia,serif; font-style:italic;font-size:x-small;font-weight:normal;} - h1{font-family:georgia,serif; font-weight:bold; font-size:large} - #byline{font-family:georgia,serif; font-weight:bold; font-size:x-small} - #topgraf{font-family:Arial,Helvetica,sans-serif;font-size:x-small;font-weight:bold;} - .artsans{{font-family:Arial,Helvetica,sans-serif;font-size:x-small;} - ''' def parse_index(self): articles = [] soup = self.index_to_soup(self.INDEX) + sectit = soup.find('h1', attrs={'class':'sectionTitle'}) + if sectit is not None: + texts = sectit.findAll('cufontext') + texts = map(self.tag_to_string, texts[-2:]) + self.timefmt = ' [%s]'%(''.join(texts)) - issue = soup.find('span', attrs={'class':'issue'}) - if issue: - self.timefmt = ' [%s]'%self.tag_to_string(issue).rpartition('|')[-1].strip().replace('/', '-') - - cover = soup.find('img', alt=re.compile('Cover'), src=True) + cover = soup.find('img', src=True, attrs={'class':'cover'}) if cover is not None: - self.cover_url = 'http://theatlantic.com'+cover['src'] + self.cover_url = cover['src'] - for item in soup.findAll('div', attrs={'class':'item'}): - a = item.find('a') - if a and a.has_key('href'): + feeds = [] + for section in soup.findAll('div', attrs={'class':'magazineSection'}): + section_title = section.find(attrs={'class':'sectionHeader'}) + section_title = string.capwords(self.tag_to_string(section_title)) + self.log('Found section:', section_title) + articles = [] + for post in section.findAll('div', attrs={'class':'post'}): + h = post.find(['h3', 'h4']) + title = self.tag_to_string(h) + a = post.find('a', href=True) url = a['href'] - if not url.startswith('http://'): - url = 'http://www.theatlantic.com/'+url - url = url.replace('/doc/', '/doc/print/') - title = self.tag_to_string(a) - if title in ('VIDEO', 'AUDIO', 'INTERACTIVE MAP', 'SIDEBAR', 'RECIPES'): - continue - title = title.replace('&', '&') - byline = item.find(attrs={'class':'byline'}) - date = self.tag_to_string(byline) if byline else '' - description = '' + if url.startswith('/'): + url = 'http://www.theatlantic.com'+url + p = post.find('p', attrs={'class':'dek'}) + desc = None + self.log('\tFound article:', title, 'at', url) + if p is not None: + desc = self.tag_to_string(p) + self.log('\t\t', desc) + articles.append({'title':title, 'url':url, 'description':desc, + 'date':''}) + feeds.append((section_title, articles)) - self.log('\tFound article:', title) - self.log('\t\t', url) + poems = [] + self.log('Found section: Poems') + for poem in soup.findAll('div', attrs={'class':'poem'}): + title = self.tag_to_string(poem.find('h4')) + desc = self.tag_to_string(poem.find(attrs={'class':'author'})) + url = 'http://www.theatlantic.com'+poem.find('a')['href'] + self.log('\tFound article:', title, 'at', url) + self.log('\t\t', desc) + poems.append({'title':title, 'url':url, 'description':desc, + 'date':''}) + if poems: + feeds.append(('Poems', poems)) - articles.append({ - 'title':title, - 'date':date, - 'url':url, - 'description':description - }) + self.log('Found section: Advice') + div = soup.find(id='advice') + title = self.tag_to_string(div.find('h4')) + url = 'http://www.theatlantic.com'+div.find('a')['href'] + desc = self.tag_to_string(div.find('p')) + self.log('\tFound article:', title, 'at', url) + self.log('\t\t', desc) + feeds.append(('Advice', [{'title':title, 'url':url, 'description':desc, + 'date':''}])) + return feeds + def postprocess_html(self, soup, first): + for table in soup.findAll('table', align='right'): + img = table.find('img') + if img is not None: + img.extract() + caption = self.tag_to_string(table).strip() + div = Tag(soup, 'div') + div['style'] = 'text-align:center' + div.insert(0, img) + div.insert(1, Tag(soup, 'br')) + if caption: + div.insert(2, NavigableString(caption)) + table.replaceWith(div) + + return soup - return [('Current Issue', articles)]