diff --git a/resources/recipes/big_oven.recipe b/resources/recipes/big_oven.recipe new file mode 100644 index 0000000000..e1636daf72 --- /dev/null +++ b/resources/recipes/big_oven.recipe @@ -0,0 +1,64 @@ +from calibre.web.feeds.news import BasicNewsRecipe + +class BigOven(BasicNewsRecipe): + title = 'BigOven' + __author__ = 'Starson17' + description = 'Recipes for the Foodie in us all. Registration is free. A fake username and password just gives smaller photos.' + language = 'en' + category = 'news, food, recipes, gourmet' + publisher = 'Starson17' + use_embedded_content= False + no_stylesheets = True + oldest_article = 24 + remove_javascript = True + remove_empty_feeds = True + cover_url = 'http://www.software.com/images/products/BigOven%20Logo_177_216.JPG' + max_articles_per_feed = 30 + needs_subscription = True + + conversion_options = {'linearize_tables' : True + , 'comment' : description + , 'tags' : category + , 'publisher' : publisher + , 'language' : language + } + + def get_browser(self): + br = BasicNewsRecipe.get_browser() + if self.username is not None and self.password is not None: + br.open('http://www.bigoven.com/') + br.select_form(name='form1') + br['TopMenu_bo1$email'] = self.username + br['TopMenu_bo1$password'] = self.password + br.submit() + return br + + remove_attributes = ['style', 'font'] + + keep_only_tags = [dict(name='h1') + ,dict(name='div', attrs={'class':'img'}) + ,dict(name='div', attrs={'id':'intro'}) + ] + + remove_tags = [dict(name='div', attrs={'style':["overflow: visible;"]}) + ,dict(name='div', attrs={'class':['ctas']}) + #,dict(name='a', attrs={'class':['edit']}) + ,dict(name='p', attrs={'class':['byline']}) + ] + + feeds = [(u'4 & 5 Star Rated Recipes', u'http://feeds.feedburner.com/Bigovencom-RecipeRaves?format=xml')] + + def preprocess_html(self, soup): + for tag in soup.findAll(name='a', attrs={'class':['edit']}): + tag.parent.extract() + for tag in soup.findAll(name='a', attrs={'class':['deflink']}): + tag.replaceWith(tag.string) + return soup + + extra_css = ''' + h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;} + h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:medium;} + p{font-family:Arial,Helvetica,sans-serif;font-size:small;} + body{font-family:Helvetica,Arial,sans-serif;font-size:small;} + ''' + diff --git a/resources/recipes/estadao.recipe b/resources/recipes/estadao.recipe index e42eb0574d..4e520c1135 100644 --- a/resources/recipes/estadao.recipe +++ b/resources/recipes/estadao.recipe @@ -1,7 +1,7 @@ #!/usr/bin/env python __license__ = 'GPL v3' -__copyright__ = '2009, Darko Miletic ' +__copyright__ = '2010, elsuave' ''' estadao.com.br ''' @@ -10,12 +10,12 @@ from calibre.web.feeds.news import BasicNewsRecipe class Estadao(BasicNewsRecipe): title = 'O Estado de S. Paulo' - __author__ = 'Darko Miletic' + __author__ = 'elsuave (modified from Darko Miletic)' description = 'News from Brasil in Portuguese' publisher = 'O Estado de S. Paulo' category = 'news, politics, Brasil' oldest_article = 2 - max_articles_per_feed = 100 + max_articles_per_feed = 25 no_stylesheets = True use_embedded_content = False encoding = 'utf8' @@ -30,13 +30,14 @@ class Estadao(BasicNewsRecipe): html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"' - keep_only_tags = [dict(name='div', attrs={'id':'c1'})] + keep_only_tags = [ + dict(name='div', attrs={'class':['bb-md-noticia','c5']}) + ] remove_tags = [ dict(name=['script','object','form','ul']) - ,dict(name='div', attrs={'id':['votacao','estadaohoje']}) - ,dict(name='p', attrs={'id':'ctrl_texto'}) - ,dict(name='p', attrs={'class':'texto'}) + ,dict(name='div', attrs={'class':['fnt2 Color_04 bold','right fnt2 innerTop15 dvTmFont','™_01 right outerLeft15','tituloBox','tags']}) + ,dict(name='div', attrs={'id':['bb-md-noticia-subcom']}) ] feeds = [ @@ -51,13 +52,12 @@ class Estadao(BasicNewsRecipe): ,(u'Vida &', u'http://www.estadao.com.br/rss/vidae.xml') ] - def preprocess_html(self, soup): - ifr = soup.find('iframe') - if ifr: - ifr.extract() - for item in soup.findAll(style=True): - del item['style'] - return soup + language = 'pt' + def get_article_url(self, article): + url = BasicNewsRecipe.get_article_url(self, article) + if '/Multimidia/' not in url: + return url + diff --git a/src/calibre/ebooks/metadata/fetch.py b/src/calibre/ebooks/metadata/fetch.py index cb75d93f59..0613f64bfb 100644 --- a/src/calibre/ebooks/metadata/fetch.py +++ b/src/calibre/ebooks/metadata/fetch.py @@ -351,9 +351,13 @@ def search(title=None, author=None, publisher=None, isbn=None, isbndb_key=None, if len(results) > 1: if not results[0].comments or len(results[0].comments) == 0: for r in results[1:]: - if title.lower() == r.title[:len(title)].lower() and r.comments and len(r.comments): - results[0].comments = r.comments - break + try: + if title and title.lower() == r.title[:len(title)].lower() \ + and r.comments and len(r.comments): + results[0].comments = r.comments + break + except: + pass # Find a pubdate pubdate = None for r in results: