This commit is contained in:
Kovid Goyal 2009-12-10 10:25:18 -07:00
parent 5ce0d7aba8
commit cd5131cf60
3 changed files with 236 additions and 276 deletions

View File

@ -1,45 +1,45 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class BangkokPostRecipe(BasicNewsRecipe): class BangkokPostRecipe(BasicNewsRecipe):
__license__ = 'GPL v3' __license__ = 'GPL v3'
__author__ = 'kwetal' __author__ = 'kwetal'
language = 'en_TH' language = 'en_TH'
version = 1 version = 1
title = u'Bangkok Post' title = u'Bangkok Post'
publisher = u'Post Publishing PCL' publisher = u'Post Publishing PCL'
category = u'News' category = u'News'
description = u'The world\'s window to Thailand' description = u'The world\'s window to Thailand'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
remove_javascript = True remove_javascript = True
use_embedded_content = False use_embedded_content = False
# Feeds from: http://www.bangkokpost.com/rss/ # Feeds from: http://www.bangkokpost.com/rss/
feeds = [] feeds = []
feeds.append((u'Breaking News', u'http://www.bangkokpost.com/rss/data/breakingnews.xml')) feeds.append((u'Breaking News', u'http://www.bangkokpost.com/rss/data/breakingnews.xml'))
feeds.append((u'Top Stories', u'http://www.bangkokpost.com/rss/data/topstories.xml')) feeds.append((u'Top Stories', u'http://www.bangkokpost.com/rss/data/topstories.xml'))
feeds.append((u'News', u'http://www.bangkokpost.com/rss/data/news.xml')) feeds.append((u'News', u'http://www.bangkokpost.com/rss/data/news.xml'))
feeds.append((u'Business', u'http://www.bangkokpost.com/rss/data/business.xml')) feeds.append((u'Business', u'http://www.bangkokpost.com/rss/data/business.xml'))
feeds.append((u'Opinion', u'http://www.bangkokpost.com/rss/data/opinion.xml')) feeds.append((u'Opinion', u'http://www.bangkokpost.com/rss/data/opinion.xml'))
feeds.append((u'Travel', u'http://www.bangkokpost.com/rss/data/travel.xml')) feeds.append((u'Travel', u'http://www.bangkokpost.com/rss/data/travel.xml'))
feeds.append((u'Leisure', u'http://www.bangkokpost.com/rss/data/leisure.xml')) feeds.append((u'Leisure', u'http://www.bangkokpost.com/rss/data/leisure.xml'))
feeds.append((u'Entertainment', u'http://www.bangkokpost.com/rss/data/entertainment.xml')) feeds.append((u'Entertainment', u'http://www.bangkokpost.com/rss/data/entertainment.xml'))
feeds.append((u'Auto', u'http://www.bangkokpost.com/rss/data/auto.xml')) feeds.append((u'Auto', u'http://www.bangkokpost.com/rss/data/auto.xml'))
feeds.append((u'Life', u'http://www.bangkokpost.com/rss/data/life.xml')) feeds.append((u'Life', u'http://www.bangkokpost.com/rss/data/life.xml'))
feeds.append((u'Tech', u'http://www.bangkokpost.com/rss/data/tect.xml')) feeds.append((u'Tech', u'http://www.bangkokpost.com/rss/data/tect.xml'))
keep_only_tags = [] keep_only_tags = []
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'entry'})) keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'entry'}))
remove_tags = [] remove_tags = []
remove_tags.append(dict(name = 'div', attrs = {'class': 'article-features'})) remove_tags.append(dict(name = 'div', attrs = {'class': 'article-features'}))
remove_tags.append(dict(name = 'div', attrs = {'class': 'socialBookmark'})) remove_tags.append(dict(name = 'div', attrs = {'class': 'socialBookmark'}))
remove_tags.append(dict(name = 'div', attrs = {'id': 'main-sns'})) remove_tags.append(dict(name = 'div', attrs = {'id': 'main-sns'}))
# Their YouTube movies are displayed in an iframe, if you want those you will have to parse the articles by hand. # Their YouTube movies are displayed in an iframe, if you want those you will have to parse the articles by hand.
# Setting self.recursion to 1, which might resolve this, makes calibre downloading a lot of PDF files, which will cause a very, very very, long download time # Setting self.recursion to 1, which might resolve this, makes calibre downloading a lot of PDF files, which will cause a very, very very, long download time
remove_tags.append(dict(name = 'iframe')) remove_tags.append(dict(name = 'iframe'))

View File

@ -1,97 +1,97 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, Comment from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, Comment
class GlennBeckRecipe(BasicNewsRecipe): class GlennBeckRecipe(BasicNewsRecipe):
__license__ = 'GPL v3' __license__ = 'GPL v3'
__author__ = 'kwetal' __author__ = 'kwetal'
language = 'en' language = 'en'
version = 1 version = 1
title = u'Glenn Beck' title = u'Glenn Beck'
publisher = u'Premiere Radio Networks' publisher = u'Premiere Radio Networks'
category = u'News, Opinion' category = u'News, Opinion'
description = u'The fusion of entertainment and enlightenment' description = u'The fusion of entertainment and enlightenment'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
remove_javascript = True remove_javascript = True
use_embedded_content = False use_embedded_content = False
feeds = [(u'Glenn Beck', u'http://feeds.feedburner.com/GlennBeckArticles')] feeds = [(u'Glenn Beck', u'http://feeds.feedburner.com/GlennBeckArticles')]
def preprocess_html(self, soup): def preprocess_html(self, soup):
# Their html is horribly broken; if we search for the div that has the content BeatifulSoup returns the div with only the headline and no content. # Their html is horribly broken; if we search for the div that has the content BeatifulSoup returns the div with only the headline and no content.
# This is due to illegal nesting of tags. So we do it the hard way. # This is due to illegal nesting of tags. So we do it the hard way.
# We can find this one, and we don't want it. # We can find this one, and we don't want it.
div = soup.find('div', attrs = {'id': 'extraInfo'}) div = soup.find('div', attrs = {'id': 'extraInfo'})
if div: if div:
div.extract() div.extract()
# Don't want these either. # Don't want these either.
iframes = soup.findAll('iframe') iframes = soup.findAll('iframe')
[iframe.extract() for iframe in iframes] [iframe.extract() for iframe in iframes]
# Get empty document. # Get empty document.
freshSoup = self.getFreshSoup() freshSoup = self.getFreshSoup()
# This is the broken div; but we can find the headline. # This is the broken div; but we can find the headline.
newsDiv = soup.find('div', attrs = {'class': 'news-detail'}) newsDiv = soup.find('div', attrs = {'class': 'news-detail'})
if newsDiv: if newsDiv:
if newsDiv.h1: if newsDiv.h1:
freshSoup.body.append(newsDiv.h1) freshSoup.body.append(newsDiv.h1)
# The content is wrapped in <p></p> tags, most of the time anyway. # The content is wrapped in <p></p> tags, most of the time anyway.
counter = 0 counter = 0
for p in soup.findAll('p'): for p in soup.findAll('p'):
if p.get('class') == 'smalltextwhite': if p.get('class') == 'smalltextwhite':
# But we don't want this one. # But we don't want this one.
continue continue
freshSoup.body.append(p) freshSoup.body.append(p)
counter += 1 counter += 1
# Debugging block # Debugging block
#h3 = Tag(freshSoup, 'h3') #h3 = Tag(freshSoup, 'h3')
#h3.append('First counter: ' + str(counter)) #h3.append('First counter: ' + str(counter))
#freshSoup.body.insert(0, h3) #freshSoup.body.insert(0, h3)
# In some articles the content is not wrapped in <p></p> tags. In that case the counter is low. # In some articles the content is not wrapped in <p></p> tags. In that case the counter is low.
# 2 is the magic number that seems to work. # 2 is the magic number that seems to work.
if counter <= 2: if counter <= 2:
# So they are playing hard-to-get: first throw out all comments. # So they are playing hard-to-get: first throw out all comments.
comments = soup.findAll(text = lambda text: isinstance(text, Comment)) comments = soup.findAll(text = lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments] [comment.extract() for comment in comments]
# Find all unwrapped strings. # Find all unwrapped strings.
for txt in soup.findAll(text = True): for txt in soup.findAll(text = True):
raw = txt.strip() raw = txt.strip()
# Debugging line # Debugging line
#para.append(raw + '(parent: ' + txt.parent.name + '; length: ' + str(len(raw)) + '; start: ' + raw[0:4] + ')') #para.append(raw + '(parent: ' + txt.parent.name + '; length: ' + str(len(raw)) + '; start: ' + raw[0:4] + ')')
if (txt.parent.name == 'body' and len(raw) > 0) and not (len(raw) == 6 and raw == '&nbsp;'): if (txt.parent.name == 'body' and len(raw) > 0) and not (len(raw) == 6 and raw == '&nbsp;'):
# This is our content; ignore the rest. # This is our content; ignore the rest.
para = Tag(freshSoup, 'p') para = Tag(freshSoup, 'p')
para.append(raw) para.append(raw)
freshSoup.body.append(para) freshSoup.body.append(para)
counter += 1 counter += 1
# Now if the counter is still 0 or 1 they did something completely different and we still have an empty article. In a last attempt, add the whole content div, just in case. # Now if the counter is still 0 or 1 they did something completely different and we still have an empty article. In a last attempt, add the whole content div, just in case.
if counter < 2: if counter < 2:
freshSoup.body.append(newsDiv) freshSoup.body.append(newsDiv)
# Debugging block # Debugging block
#h3 = Tag(freshSoup, 'h3') #h3 = Tag(freshSoup, 'h3')
#h3.append('Second counter: ' + str(counter)) #h3.append('Second counter: ' + str(counter))
#freshSoup.body.insert(1, h3) #freshSoup.body.insert(1, h3)
return freshSoup return freshSoup
def getFreshSoup(self, title = None): def getFreshSoup(self, title = None):
if title: if title:
return BeautifulSoup('<html><head><title>' + str(title) + '</title></head><body></body></html>') return BeautifulSoup('<html><head><title>' + str(title) + '</title></head><body></body></html>')
else: else:
return BeautifulSoup('<html><head><title></title></head><body></body></html>') return BeautifulSoup('<html><head><title></title></head><body></body></html>')

View File

@ -1,134 +1,94 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, Comment import re
import re
class HuffingtonPostRecipe(BasicNewsRecipe):
class HuffingtonPostRecipe(BasicNewsRecipe): __license__ = 'GPL v3'
__license__ = 'GPL v3' __author__ = 'kwetal'
__author__ = 'kwetal' language = 'en'
language = 'en' version = 2
version = 1
title = u'The Huffington Post'
title = u'The Huffington Post' publisher = u'huffingtonpost.com'
publisher = u'huffingtonpost.com' category = u'News, Politics'
category = u'News, Politics' description = u'Political Blog'
description = u'Political Blog'
oldest_article = 1.1
oldest_article = 1.5 max_articles_per_feed = 100
max_articles_per_feed = 100 use_embedded_content = True
use_embedded_content = False
encoding = 'utf-8'
no_stylesheets = True remove_empty_feeds = True
remove_javascript = True
# Seems to work best, but YMMV # Feeds from: http://www.huffingtonpost.com/syndication/
simultaneous_downloads = 1 feeds = []
feeds.append((u'Latest News', u'http://feeds.huffingtonpost.com/huffingtonpost/LatestNews'))
feeds = []
feeds.append((u'Latest News', u'http://feeds.huffingtonpost.com/huffingtonpost/LatestNews')) #feeds.append((u'Politics', u'http://www.huffingtonpost.com/feeds/verticals/politics/index.xml'))
# Works, but appears to be a subset of the politics-blog feed feeds.append((u'Politics: News', u'http://www.huffingtonpost.com/feeds/verticals/politics/news.xml'))
#feeds.append((u'Politics', u'http://www.huffingtonpost.com/feeds/verticals/politics/index.xml')) feeds.append((u'Politics: Blog', u'http://www.huffingtonpost.com/feeds/verticals/politics/blog.xml'))
feeds.append((u'Politics', u'http://www.huffingtonpost.com/feeds/verticals/politics/blog.xml'))
# Does not work #feeds.append((u'Media', u'http://www.huffingtonpost.com/feeds/verticals/media/index.xml'))
#feeds.append((u'Politics: News', u'http://www.huffingtonpost.com/feeds/verticals/politics/news.xml')) feeds.append((u'Media: News', u'http://www.huffingtonpost.com/feeds/verticals/media/news.xml'))
# Works, but appears to be a subset of the media-blog feed feeds.append((u'Media: Blog', u'http://www.huffingtonpost.com/feeds/verticals/media/blog.xml'))
#feeds.append((u'Media', u'http://www.huffingtonpost.com/feeds/verticals/media/index.xml'))
feeds.append((u'Media', u'http://www.huffingtonpost.com/feeds/verticals/media/blog.xml')) #feeds.append((u'Business', u'http://www.huffingtonpost.com/feeds/verticals/business/index.xml'))
# Does not work feeds.append((u'Business: News', u'http://www.huffingtonpost.com/feeds/verticals/business/news.xml'))
#feeds.append((u'Media: News', u'http://www.huffingtonpost.com/feeds/verticals/media/news.xml')) feeds.append((u'Business: Blogs', u'http://www.huffingtonpost.com/feeds/verticals/business/blog.xml'))
# Works, but appears to be a subset of the business-blog feed
#feeds.append((u'Business', u'http://www.huffingtonpost.com/feeds/verticals/business/index.xml')) #feeds.append((u'Entertainment', u'http://www.huffingtonpost.com/feeds/verticals/entertainment/index.xml'))
feeds.append((u'Business', u'http://www.huffingtonpost.com/feeds/verticals/business/blog.xml')) feeds.append((u'Entertainment: News', u'http://www.huffingtonpost.com/feeds/verticals/business/news.xml'))
# Does not work feeds.append((u'Entertainment: Blog', u'http://www.huffingtonpost.com/feeds/verticals/entertainment/blog.xml'))
#feeds.append((u'Business: News', u'http://www.huffingtonpost.com/feeds/verticals/business/news.xml'))
feeds.append((u'Original Reporting', u'http://www.huffingtonpost.com/tag/huffpolitics/feed')) #feeds.append((u'Living', u'http://www.huffingtonpost.com/feeds/verticals/living/index.xml'))
feeds.append((u'Original Posts', u'http://www.huffingtonpost.com/feeds/original_posts/index.xml')) feeds.append((u'Living: News', u'http://www.huffingtonpost.com/feeds/verticals/living/news.xml'))
feeds.append((u'Living: Blog', u'http://www.huffingtonpost.com/feeds/verticals/living/blog.xml'))
keep_only_tags = []
# For reporters posts #feeds.append((u'Style', u'http://www.huffingtonpost.com/feeds/verticals/style/index.xml'))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'about_reporter_name'})) feeds.append((u'Style: News', u'http://www.huffingtonpost.com/feeds/verticals/style/news.xml'))
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'entry'})) feeds.append((u'Style: Blog', u'http://www.huffingtonpost.com/feeds/verticals/style/blog.xml'))
# For blog posts
keep_only_tags.append(dict(name = 'div', attrs = {'id' : 'blog_author_info'})) #feeds.append((u'Green', u'http://www.huffingtonpost.com/feeds/verticals/green/index.xml'))
keep_only_tags.append(dict(name = 'div', attrs = {'id' : 'blog_title'})) feeds.append((u'Green: News', u'http://www.huffingtonpost.com/feeds/verticals/green/news.xml'))
feeds.append((u'Green: Blog', u'http://www.huffingtonpost.com/feeds/verticals/green/blog.xml'))
remove_tags = []
remove_tags.append(dict(name = 'div', attrs = {'class' : 'contin_below'})) #feeds.append((u'Technology', u'http://www.huffingtonpost.com/feeds/verticals/technology/index.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'adver_cont_below'})) feeds.append((u'Technology: News', u'http://www.huffingtonpost.com/feeds/verticals/technology/news.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'blogger_menu_content'})) feeds.append((u'Technology: Blog', u'http://www.huffingtonpost.com/feeds/verticals/technology/blog.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'chicklets'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'google-searcG-blogp'})) #feeds.append((u'Comedy', u'http://www.huffingtonpost.com/feeds/verticals/comedy/index.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'forma_email'})) feeds.append((u'Comedy: News', u'http://www.huffingtonpost.com/feeds/verticals/comedy/news.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'new_share_module'})) feeds.append((u'Comedy: Blog', u'http://www.huffingtonpost.com/feeds/verticals/comedy/blog.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'cse-branding-right'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'clear'})) #feeds.append((u'World', u'http://www.huffingtonpost.com/feeds/verticals/world/index.xml'))
remove_tags.append(dict(name = 'div', attrs = {'style' : re.compile('clear:both;*')})) feeds.append((u'World: News', u'http://www.huffingtonpost.com/feeds/verticals/world/news.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : re.compile('facebookvote_reaction_blog.*')})) feeds.append((u'World: Blog', u'http://www.huffingtonpost.com/feeds/verticals/world/blog.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : re.compile('read_more.*')}))
remove_tags.append(dict(name = 'div', attrs = {'class' : re.compile('facebookvote_v2.*')})) feeds.append((u'Original Reporting', u'http://www.huffingtonpost.com/tag/huffpolitics/feed'))
remove_tags.append(dict(name = 'div', attrs = {'class' : re.compile('facebookvote_reaction.*')})) feeds.append((u'Original Posts', u'http://www.huffingtonpost.com/feeds/original_posts/index.xml'))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'slideshow_poll'}))
remove_tags = []
remove_attributes = ['style'] remove_tags.append(dict(name='a', attrs={'href' : re.compile('http://feedads\.g\.doubleclick.net.*')}))
remove_tags.append(dict(name='div', attrs={'class' : 'feedflare'}))
extra_css = 'a[href] {color: blue; text-decoration: none; cursor: pointer;}'
remove_attributes = ['style']
def preprocess_html(self, soup):
# Condens the soup. extra_css = '''
soup = self.unComment(self.nukeHead(soup)) body{font-family:verdana,arial,helvetica,geneva,sans-serif ;}
h2{font-size: x-large; font-weight: bold; padding: 0em; margin-bottom: 0.2em;}
# Don't want the picture of the author a[href]{color: blue; text-decoration: none; cursor: pointer;}
blogAuthor = soup.find('div', attrs = {'id': 'blog_author_info'}) '''
if blogAuthor:
for img in blogAuthor.findAll('img'): def get_article_url(self, article):
img.extract() """
Workaround for Feedparser behaviour. If an item has more than one <link/> element, article.link is empty and
byline = soup.find('h2') article.links contains a list of dictionaries.
if byline: Todo: refactor to searching this list to avoid the hardcoded zero-index
h2 = Tag(soup, 'h2') """
raw = self.tag_to_string(byline) link = article.get('link')
h2.append(raw) if not link:
byline.replaceWith(h2) links = article.get('links')
else: if links:
byline = soup.find('div', attrs = {'class': re.compile('about_*reporter_*name')}) link = links[0]['href']
if byline:
h2 = Tag(soup, 'h2') return link
raw = self.tag_to_string(byline)
h2.append(raw.strip())
byline.replaceWith(h2)
headline = soup.find('h1')
if headline:
h1 = Tag(soup, 'h1')
raw = self.tag_to_string(headline)
h1.append(raw)
headline.replaceWith(h1)
return soup
def postprocess_html(self, soup, first):
# Get rid of those pesky <br /> tags
html = re.sub(r'\n<br />\n', '', str(soup))
newSoup = BeautifulSoup(html)
return newSoup
def nukeHead(self, soup):
titleStr = ''
newHead = Tag(soup, 'head')
newTitle = Tag(soup, 'title')
newHead.append(newTitle)
head = soup.head
if head:
title = head.title
if title:
titleStr = self.tag_to_string(title)
newTitle.append(titleStr)
head.replaceWith(newHead)
else:
soup.insert(0, newHead)
return soup
def unComment(self, soup):
comments = soup.findAll(text = lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
return soup