Update GoComics

This commit is contained in:
Kovid Goyal 2013-09-22 11:09:32 +05:30
parent d0f179da82
commit 42d8cbc631

View File

@ -4,43 +4,28 @@ __copyright__ = 'Copyright 2010 Starson17'
www.gocomics.com www.gocomics.com
''' '''
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class GoComics(BasicNewsRecipe): class GoComics(BasicNewsRecipe):
title = 'Go Comics' title = 'Go Comics'
__author__ = 'Starson17' __author__ = 'Starson17'
__version__ = '1.06' __version__ = '1.06'
__date__ = '07 June 2011' __date__ = '07 June 2011'
description = u'200+ Comics - Customize for more days/comics: Defaults to 7 days, 25 comics - 20 general, 5 editorial.' description = u'200+ Comics - Customize for more days/comics: Defaults to 1 day, 25 comics - 20 general, 5 editorial.'
category = 'news, comics' category = 'news, comics'
language = 'en' language = 'en'
use_embedded_content= False
no_stylesheets = True no_stylesheets = True
remove_javascript = True remove_javascript = True
remove_attributes = ['style'] remove_attributes = ['style']
####### USER PREFERENCES - COMICS, IMAGE SIZE AND NUMBER OF COMICS TO RETRIEVE ######## # USER PREFERENCES - COMICS AND NUMBER OF COMICS TO RETRIEVE ########
# num_comics_to_get - I've tried up to 99 on Calvin&Hobbes # num_comics_to_get - I've tried up to 99 on Calvin&Hobbes
num_comics_to_get = 1 num_comics_to_get = 1
# comic_size 300 is small, 600 is medium, 900 is large, 1500 is extra-large
comic_size = 900
# CHOOSE COMIC STRIPS BELOW - REMOVE COMMENT '# ' FROM IN FRONT OF DESIRED STRIPS # CHOOSE COMIC STRIPS BELOW - REMOVE COMMENT '# ' FROM IN FRONT OF DESIRED STRIPS
# Please do not overload their servers by selecting all comics and 1000 strips from each! # Please do not overload their servers by selecting all comics and 1000 strips from each!
conversion_options = {'linearize_tables' : True keep_only_tags = [
, 'comment' : description dict(name='h1'),
, 'tags' : category dict(name='div', id=lambda x: x and x.startswith('mutable_')),
, 'language' : language
}
keep_only_tags = [dict(name='div', attrs={'class':['feature','banner']}),
]
remove_tags = [dict(name='a', attrs={'class':['beginning','prev','cal','next','newest']}),
dict(name='div', attrs={'class':['tag-wrapper']}),
dict(name='a', attrs={'href':re.compile(r'.*mutable_[0-9]+', re.IGNORECASE)}),
dict(name='img', attrs={'src':re.compile(r'.*mutable_[0-9]+', re.IGNORECASE)}),
dict(name='ul', attrs={'class':['share-nav','feature-nav']}),
] ]
def get_browser(self): def get_browser(self):
@ -50,7 +35,7 @@ class GoComics(BasicNewsRecipe):
def parse_index(self): def parse_index(self):
feeds = [] feeds = []
for title, url in [ for i, (title, url) in enumerate([ # {{{
#(u"2 Cows and a Chicken", u"http://www.gocomics.com/2cowsandachicken"), #(u"2 Cows and a Chicken", u"http://www.gocomics.com/2cowsandachicken"),
#(u"9 Chickweed Lane", u"http://www.gocomics.com/9chickweedlane"), #(u"9 Chickweed Lane", u"http://www.gocomics.com/9chickweedlane"),
#(u"Adam At Home", u"http://www.gocomics.com/adamathome"), #(u"Adam At Home", u"http://www.gocomics.com/adamathome"),
@ -271,7 +256,7 @@ class GoComics(BasicNewsRecipe):
(u"Strange Brew", u"http://www.gocomics.com/strangebrew"), (u"Strange Brew", u"http://www.gocomics.com/strangebrew"),
(u"The Argyle Sweater", u"http://www.gocomics.com/theargylesweater"), (u"The Argyle Sweater", u"http://www.gocomics.com/theargylesweater"),
# #
######## EDITORIAL CARTOONS ##################### # EDITORIAL CARTOONS #####################
#(u"Adam Zyglis", u"http://www.gocomics.com/adamzyglis"), #(u"Adam Zyglis", u"http://www.gocomics.com/adamzyglis"),
#(u"Andy Singer", u"http://www.gocomics.com/andysinger"), #(u"Andy Singer", u"http://www.gocomics.com/andysinger"),
#(u"Ben Sargent",u"http://www.gocomics.com/bensargent"), #(u"Ben Sargent",u"http://www.gocomics.com/bensargent"),
@ -363,81 +348,65 @@ class GoComics(BasicNewsRecipe):
#(u"Walt Handelsman",u"http://www.gocomics.com/walthandelsman"), #(u"Walt Handelsman",u"http://www.gocomics.com/walthandelsman"),
#(u"Wayne Stayskal",u"http://www.gocomics.com/waynestayskal"), #(u"Wayne Stayskal",u"http://www.gocomics.com/waynestayskal"),
#(u"Wit of the World",u"http://www.gocomics.com/witoftheworld"), #(u"Wit of the World",u"http://www.gocomics.com/witoftheworld"),
]: ]): # }}}
print 'Working on: ', title self.log('Working on: ', title, url)
articles = self.make_links(url) articles = self.make_links(url)
if articles: if articles:
feeds.append((title, articles)) feeds.append((title, articles))
if self.test and i > 0:
break
return feeds return feeds
def make_links(self, url): def make_links(self, url):
title = 'Temp' title = 'Temp'
current_articles = [] current_articles = []
pages = range(1, self.num_comics_to_get+1) if self.test:
for page in pages: self.num_comics_to_get = 2
num = self.num_comics_to_get
while num > 0:
num -= 1
page_soup = self.index_to_soup(url) page_soup = self.index_to_soup(url)
if page_soup: if not page_soup:
break
content = page_soup.find(id='content')
if content is None:
break
feature = content.find(name='div', attrs={'class':'feature'})
feature_nav = content.find(name='ul', attrs={'class':'feature-nav'})
if feature is None or feature_nav is None:
break
try: try:
strip_title = page_soup.find(name='div', attrs={'class':'top'}).h1.a.string a = feature.find('h1').find('a', href=True)
except:
self.log.exception('Failed to find current page link')
break
page_url = a['href']
if page_url.startswith('/'):
page_url = 'http://www.gocomics.com' + page_url
try:
strip_title = self.tag_to_string(feature.find('h1').find('a', href=True))
except: except:
strip_title = 'Error - no Title found' strip_title = 'Error - no Title found'
try: try:
date_title = page_soup.find('ul', attrs={'class': 'feature-nav'}).li.string date_title = self.tag_to_string(feature_nav.find('li'))
if not date_title:
date_title = page_soup.find('ul', attrs={'class': 'feature-nav'}).li.string
except: except:
date_title = 'Error - no Date found' date_title = 'Error - no Date found'
title = strip_title + ' - ' + date_title title = strip_title + ' - ' + date_title
for i in range(2):
try:
strip_url_date = page_soup.find(name='div', attrs={'class':'top'}).h1.a['href']
break # success - this is normal exit
except:
strip_url_date = None
continue # try to get strip_url_date again
for i in range(2):
try:
prev_strip_url_date = page_soup.find('a', attrs={'class': 'prev'})['href']
break # success - this is normal exit
except:
prev_strip_url_date = None
continue # try to get prev_strip_url_date again
if strip_url_date:
page_url = 'http://www.gocomics.com' + strip_url_date
else:
continue
if prev_strip_url_date:
prev_page_url = 'http://www.gocomics.com' + prev_strip_url_date
else:
continue
current_articles.append({'title': title, 'url': page_url, 'description':'', 'date':''}) current_articles.append({'title': title, 'url': page_url, 'description':'', 'date':''})
url = prev_page_url a = feature_nav.find('a', href=True, attrs={'class':'prev'})
if a is None:
break
url = a['href']
if url.startswith('/'):
url = 'http://www.gocomics.com' + url
current_articles.reverse() current_articles.reverse()
return current_articles return current_articles
def preprocess_html(self, soup): def preprocess_html(self, soup):
if soup.title: headings = soup.findAll('h1')
title_string = soup.title.string.strip() for h1 in headings[1:]:
_cd = title_string.split(',',1)[1] h1.extract()
comic_date = ' '.join(_cd.split(' ', 4)[0:-1]) self.adeify_images(soup)
if soup.h1.span: return soup
artist = soup.h1.span.string
soup.h1.span.string.replaceWith(comic_date + artist)
feature_item = soup.find('p',attrs={'class':'feature_item'})
if feature_item.a:
a_tag = feature_item.a
a_href = a_tag["href"]
img_tag = a_tag.img
img_tag["src"] = a_href
img_tag["width"] = self.comic_size
img_tag["height"] = None
return self.adeify_images(soup)
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
img {max-width:100%; min-width:100%;}
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
'''