mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Improved recipe for the Time magazine
This commit is contained in:
parent
5cfe7571a4
commit
b5b195b5f2
@ -1,95 +1,99 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Darko Miletic <darko.miletic at gmail.com>'
|
__copyright__ = '2008, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
'''
|
'''
|
||||||
time.com
|
time.com
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class Time(BasicNewsRecipe):
|
class Time(BasicNewsRecipe):
|
||||||
title = u'Time'
|
title = u'Time'
|
||||||
__author__ = 'Kovid Goyal'
|
__author__ = 'Kovid Goyal and Sujata Raman'
|
||||||
description = 'Weekly magazine'
|
description = 'Weekly magazine'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
language = _('English')
|
language = _('English')
|
||||||
|
extra_css = '''.headline {font-size: large;}
|
||||||
remove_tags_before = dict(id="artHd")
|
.fact { padding-top: 10pt }
|
||||||
remove_tags_after = {'class':"ltCol"}
|
h1 {font-family:Arial,Sans-serif}
|
||||||
remove_tags = [
|
.byline{font-family:Arial,Sans-serif; font-size:xx-small ;color:blue}
|
||||||
{'class':['articleTools', 'enlarge', 'search']},
|
.timestamp{font-family:Arial,Sans-serif; font-size:x-small ;color:gray}'''
|
||||||
{'id':['quigoArticle', 'contentTools', 'articleSideBar', 'header', 'navTop']},
|
remove_tags_before = dict(id="artHd")
|
||||||
{'target':'_blank'},
|
remove_tags_after = {'class':"ltCol"}
|
||||||
]
|
remove_tags = [
|
||||||
recursions = 1
|
{'class':['articleTools', 'enlarge', 'search','socialtools','blogtools','moretools','page','nextUp','next','subnav','RSS','line2','first','ybuzz','articlePagination','chiclets','imgcont','createListLink','rlinks','tabsWrap','pagination']},
|
||||||
match_regexps = [r'/[0-9,]+-(2|3|4|5|6|7|8|9)(,\d+){0,1}.html']
|
{'id':['quigoArticle', 'contentTools', 'articleSideBar', 'header', 'navTop','articleTools','feedmodule','feedmodule3','promos','footer','linksFooter','timeArchive','belt','relatedStories','packages','Features']},
|
||||||
|
{'target':'_blank'},
|
||||||
|
]
|
||||||
def parse_index(self):
|
recursions = 1
|
||||||
soup = self.index_to_soup('http://www.time.com/time/magazine')
|
match_regexps = [r'/[0-9,]+-(2|3|4|5|6|7|8|9)(,\d+){0,1}.html']
|
||||||
img = soup.find('a', title="View Large Cover", href=True)
|
|
||||||
if img is not None:
|
|
||||||
cover_url = 'http://www.time.com'+img['href']
|
def parse_index(self):
|
||||||
try:
|
soup = self.index_to_soup('http://www.time.com/time/magazine')
|
||||||
nsoup = self.index_to_soup(cover_url)
|
img = soup.find('a', title="View Large Cover", href=True)
|
||||||
img = nsoup.find('img', src=re.compile('archive/covers'))
|
if img is not None:
|
||||||
if img is not None:
|
cover_url = 'http://www.time.com'+img['href']
|
||||||
self.cover_url = img['src']
|
try:
|
||||||
except:
|
nsoup = self.index_to_soup(cover_url)
|
||||||
self.log.exception('Failed to fetch cover')
|
img = nsoup.find('img', src=re.compile('archive/covers'))
|
||||||
|
if img is not None:
|
||||||
|
self.cover_url = img['src']
|
||||||
feeds = []
|
except:
|
||||||
parent = soup.find(id='tocGuts')
|
self.log.exception('Failed to fetch cover')
|
||||||
for seched in parent.findAll(attrs={'class':'toc_seched'}):
|
|
||||||
section = self.tag_to_string(seched).capitalize()
|
|
||||||
articles = list(self.find_articles(seched))
|
feeds = []
|
||||||
feeds.append((section, articles))
|
parent = soup.find(id='tocGuts')
|
||||||
|
for seched in parent.findAll(attrs={'class':'toc_seched'}):
|
||||||
return feeds
|
section = self.tag_to_string(seched).capitalize()
|
||||||
|
articles = list(self.find_articles(seched))
|
||||||
def find_articles(self, seched):
|
feeds.append((section, articles))
|
||||||
for a in seched.findNextSiblings('a', href=True, attrs={'class':'toc_hed'}):
|
|
||||||
yield {
|
return feeds
|
||||||
'title' : self.tag_to_string(a),
|
|
||||||
'url' : 'http://www.time.com'+a['href'],
|
def find_articles(self, seched):
|
||||||
'date' : '',
|
for a in seched.findNextSiblings('a', href=True, attrs={'class':'toc_hed'}):
|
||||||
'description' : self.article_description(a)
|
yield {
|
||||||
}
|
'title' : self.tag_to_string(a),
|
||||||
|
'url' : 'http://www.time.com'+a['href'],
|
||||||
def article_description(self, a):
|
'date' : '',
|
||||||
ans = []
|
'description' : self.article_description(a)
|
||||||
while True:
|
}
|
||||||
t = a.nextSibling
|
|
||||||
if t is None:
|
def article_description(self, a):
|
||||||
break
|
ans = []
|
||||||
a = t
|
while True:
|
||||||
if getattr(t, 'name', False):
|
t = a.nextSibling
|
||||||
if t.get('class', '') == 'toc_parens' or t.name == 'br':
|
if t is None:
|
||||||
continue
|
break
|
||||||
if t.name in ('div', 'a'):
|
a = t
|
||||||
break
|
if getattr(t, 'name', False):
|
||||||
ans.append(self.tag_to_string(t))
|
if t.get('class', '') == 'toc_parens' or t.name == 'br':
|
||||||
else:
|
continue
|
||||||
ans.append(unicode(t))
|
if t.name in ('div', 'a'):
|
||||||
return u' '.join(ans).replace(u'\xa0', u'').strip()
|
break
|
||||||
|
ans.append(self.tag_to_string(t))
|
||||||
def postprocess_html(self, soup, first_page):
|
else:
|
||||||
div = soup.find(attrs={'class':'artPag'})
|
ans.append(unicode(t))
|
||||||
if div is not None:
|
return u' '.join(ans).replace(u'\xa0', u'').strip()
|
||||||
div.extract()
|
|
||||||
if not first_page:
|
def postprocess_html(self, soup, first_page):
|
||||||
for cls in ('photoBkt', 'artHd'):
|
div = soup.find(attrs={'class':'artPag'})
|
||||||
div = soup.find(attrs={'class':cls})
|
if div is not None:
|
||||||
if div is not None:
|
div.extract()
|
||||||
div.extract()
|
if not first_page:
|
||||||
div = soup.find(attrs={'class':'artTxt'})
|
for cls in ('photoBkt', 'artHd'):
|
||||||
if div is not None:
|
div = soup.find(attrs={'class':cls})
|
||||||
p = div.find('p')
|
if div is not None:
|
||||||
if p is not None:
|
div.extract()
|
||||||
p.extract()
|
div = soup.find(attrs={'class':'artTxt'})
|
||||||
|
if div is not None:
|
||||||
return soup
|
p = div.find('p')
|
||||||
|
if p is not None:
|
||||||
|
p.extract()
|
||||||
|
|
||||||
|
return soup
|
||||||
|
Loading…
x
Reference in New Issue
Block a user