Big Oven by Starson17 and estadao updated. Another fix for metadata download with null titles

This commit is contained in:
Kovid Goyal 2010-07-03 16:14:23 -06:00
parent e151e3a1d5
commit e019322f3c
3 changed files with 85 additions and 17 deletions

View File

@ -0,0 +1,64 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BigOven(BasicNewsRecipe):
title = 'BigOven'
__author__ = 'Starson17'
description = 'Recipes for the Foodie in us all. Registration is free. A fake username and password just gives smaller photos.'
language = 'en'
category = 'news, food, recipes, gourmet'
publisher = 'Starson17'
use_embedded_content= False
no_stylesheets = True
oldest_article = 24
remove_javascript = True
remove_empty_feeds = True
cover_url = 'http://www.software.com/images/products/BigOven%20Logo_177_216.JPG'
max_articles_per_feed = 30
needs_subscription = True
conversion_options = {'linearize_tables' : True
, 'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
def get_browser(self):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open('http://www.bigoven.com/')
br.select_form(name='form1')
br['TopMenu_bo1$email'] = self.username
br['TopMenu_bo1$password'] = self.password
br.submit()
return br
remove_attributes = ['style', 'font']
keep_only_tags = [dict(name='h1')
,dict(name='div', attrs={'class':'img'})
,dict(name='div', attrs={'id':'intro'})
]
remove_tags = [dict(name='div', attrs={'style':["overflow: visible;"]})
,dict(name='div', attrs={'class':['ctas']})
#,dict(name='a', attrs={'class':['edit']})
,dict(name='p', attrs={'class':['byline']})
]
feeds = [(u'4 & 5 Star Rated Recipes', u'http://feeds.feedburner.com/Bigovencom-RecipeRaves?format=xml')]
def preprocess_html(self, soup):
for tag in soup.findAll(name='a', attrs={'class':['edit']}):
tag.parent.extract()
for tag in soup.findAll(name='a', attrs={'class':['deflink']}):
tag.replaceWith(tag.string)
return soup
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:medium;}
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
'''

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2010, elsuave'
'''
estadao.com.br
'''
@ -10,12 +10,12 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Estadao(BasicNewsRecipe):
title = 'O Estado de S. Paulo'
__author__ = 'Darko Miletic'
__author__ = 'elsuave (modified from Darko Miletic)'
description = 'News from Brasil in Portuguese'
publisher = 'O Estado de S. Paulo'
category = 'news, politics, Brasil'
oldest_article = 2
max_articles_per_feed = 100
max_articles_per_feed = 25
no_stylesheets = True
use_embedded_content = False
encoding = 'utf8'
@ -30,13 +30,14 @@ class Estadao(BasicNewsRecipe):
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
keep_only_tags = [dict(name='div', attrs={'id':'c1'})]
keep_only_tags = [
dict(name='div', attrs={'class':['bb-md-noticia','c5']})
]
remove_tags = [
dict(name=['script','object','form','ul'])
,dict(name='div', attrs={'id':['votacao','estadaohoje']})
,dict(name='p', attrs={'id':'ctrl_texto'})
,dict(name='p', attrs={'class':'texto'})
,dict(name='div', attrs={'class':['fnt2 Color_04 bold','right fnt2 innerTop15 dvTmFont','™_01 right outerLeft15','tituloBox','tags']})
,dict(name='div', attrs={'id':['bb-md-noticia-subcom']})
]
feeds = [
@ -51,13 +52,12 @@ class Estadao(BasicNewsRecipe):
,(u'Vida &', u'http://www.estadao.com.br/rss/vidae.xml')
]
def preprocess_html(self, soup):
ifr = soup.find('iframe')
if ifr:
ifr.extract()
for item in soup.findAll(style=True):
del item['style']
return soup
language = 'pt'
def get_article_url(self, article):
url = BasicNewsRecipe.get_article_url(self, article)
if '/Multimidia/' not in url:
return url

View File

@ -351,9 +351,13 @@ def search(title=None, author=None, publisher=None, isbn=None, isbndb_key=None,
if len(results) > 1:
if not results[0].comments or len(results[0].comments) == 0:
for r in results[1:]:
if title.lower() == r.title[:len(title)].lower() and r.comments and len(r.comments):
results[0].comments = r.comments
break
try:
if title and title.lower() == r.title[:len(title)].lower() \
and r.comments and len(r.comments):
results[0].comments = r.comments
break
except:
pass
# Find a pubdate
pubdate = None
for r in results: