From 62759f161b15a4a3d1f126bc0c3f1add41aad878 Mon Sep 17 00:00:00 2001 From: Starson17 Date: Wed, 10 Nov 2010 11:58:15 -0500 Subject: [PATCH 01/11] Sorted user recipes in serialize_collection --- src/calibre/web/feeds/recipes/collection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/calibre/web/feeds/recipes/collection.py b/src/calibre/web/feeds/recipes/collection.py index 012e24a799..cc96131c4b 100644 --- a/src/calibre/web/feeds/recipes/collection.py +++ b/src/calibre/web/feeds/recipes/collection.py @@ -61,8 +61,8 @@ def serialize_recipe(urn, recipe_class): def serialize_collection(mapping_of_recipe_classes): collection = E.recipe_collection() - for urn, recipe_class in mapping_of_recipe_classes.items(): - recipe = serialize_recipe(urn, recipe_class) + for urn in sorted(mapping_of_recipe_classes.keys(), key = lambda key: mapping_of_recipe_classes[key].title): + recipe = serialize_recipe(urn, mapping_of_recipe_classes[urn]) collection.append(recipe) collection.set('count', str(len(collection))) return etree.tostring(collection, encoding='utf-8', xml_declaration=True, From 1b6bda73d191214de5234ed4a13269e22f274044 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 07:41:00 -0700 Subject: [PATCH 02/11] Fix #7592 (Merging Ebooks does not work in Windows Vista/Dutch language Calibre) --- src/calibre/translations/nl.po | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/calibre/translations/nl.po b/src/calibre/translations/nl.po index f6dd336ba0..b4c0d2bba9 100644 --- a/src/calibre/translations/nl.po +++ b/src/calibre/translations/nl.po @@ -4345,7 +4345,7 @@ msgid "" "changed.

Please confirm you want to proceed." msgstr "" "Boekformaten en metagegevens van de selectie zullen toegevoegd worden aan " -"het eerst geselecteerde boek. ISBN zal niet samengevoegd " +"het eerst geselecteerde boek (%s). ISBN zal niet samengevoegd " "worden.

De geselecteerde boeken zullen niet verwijderd of aangepast " "worden.

Bevestig als je wilt doorgaan." @@ -4360,7 +4360,7 @@ msgid "" "you sure you want to proceed?" msgstr "" "Boekformaten en metagegevens van de selectie zullen toegevoegd worden aan " -"het eerst geselecteerde boek. ISBN zal niet samengevoegd " +"het eerst geselecteerde boek (%s). ISBN zal niet samengevoegd " "worden.

Na samenvoeging zullen de geselecteerde boeken van je " "computer verwijderd worden.

Weet je zeker dat je door wilt " "gaan?" From a2413968545e14c4442b053188e264c87e06f14d Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 08:10:55 -0700 Subject: [PATCH 03/11] Zeit Online Premium by Steffen Siebert --- resources/recipes/zeitde_sub.recipe | 63 +++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 resources/recipes/zeitde_sub.recipe diff --git a/resources/recipes/zeitde_sub.recipe b/resources/recipes/zeitde_sub.recipe new file mode 100644 index 0000000000..5014837c5b --- /dev/null +++ b/resources/recipes/zeitde_sub.recipe @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# -*- coding: utf-8 mode: python -*- + +__license__ = 'GPL v3' +__copyright__ = '2010, Steffen Siebert ' +__docformat__ = 'restructuredtext de' +__version__ = '1.1' + +""" +Die Zeit EPUB +""" + +import os, urllib2, zipfile, re +from calibre.web.feeds.news import BasicNewsRecipe +from calibre.ptempfile import PersistentTemporaryFile + +class ZeitEPUBAbo(BasicNewsRecipe): + + title = u'Zeit Online Premium' + description = u'Das EPUB Abo der Zeit (needs subscription)' + language = 'de' + lang = 'de-DE' + + __author__ = 'Steffen Siebert' + needs_subscription = True + + conversion_options = { + 'no_default_epub_cover' : True + } + + def build_index(self): + domain = "http://premium.zeit.de" + url = domain + "/abovorteile/cgi-bin/_er_member/p4z.fpl?ER_Do=getUserData&ER_NextTemplate=login_ok" + + browser = self.get_browser() + browser.add_password("http://premium.zeit.de", self.username, self.password) + + try: + browser.open(url) + except urllib2.HTTPError: + self.report_progress(0,_("Can't login to download issue")) + raise ValueError('Failed to login, check your username and password') + + response = browser.follow_link(text="DIE ZEIT als E-Paper") + response = browser.follow_link(url_regex=re.compile('^http://contentserver.hgv-online.de/nodrm/fulfillment\\?distributor=zeit-online&orderid=zeit_online.*')) + + tmp = PersistentTemporaryFile(suffix='.epub') + self.report_progress(0,_('downloading epub')) + tmp.write(response.read()) + tmp.close() + + zfile = zipfile.ZipFile(tmp.name, 'r') + self.report_progress(0,_('extracting epub')) + + zfile.extractall(self.output_dir) + + tmp.close() + index = os.path.join(self.output_dir, 'content.opf') + + self.report_progress(1,_('epub downloaded and extracted')) + + return index + From e22de1bbdb1c663d5b1520238f8cab65e5a24aaa Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 08:32:48 -0700 Subject: [PATCH 04/11] Add cover to spiegel --- resources/recipes/spiegelde.recipe | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/resources/recipes/spiegelde.recipe b/resources/recipes/spiegelde.recipe index 705ffd0f7a..4fed3818b0 100644 --- a/resources/recipes/spiegelde.recipe +++ b/resources/recipes/spiegelde.recipe @@ -6,6 +6,7 @@ __copyright__ = '2009, Darko Miletic ' spiegel.de ''' +from time import strftime from calibre.web.feeds.news import BasicNewsRecipe class Spiegel_ger(BasicNewsRecipe): @@ -44,3 +45,6 @@ class Spiegel_ger(BasicNewsRecipe): rmain, rsep, rrest = main.rpartition(',') purl = rmain + ',druck-' + rrest + ',' + rest return purl + + def get_cover_url(self): + return 'http://wissen.spiegel.de/wissen/titel/SP/' + strftime("%Y/%W/%j/titel.jpg") From 4927b589e1de600448a350c90c3c2d851f903052 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 08:34:57 -0700 Subject: [PATCH 05/11] TSN by Nexus --- resources/recipes/tsn.recipe | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 resources/recipes/tsn.recipe diff --git a/resources/recipes/tsn.recipe b/resources/recipes/tsn.recipe new file mode 100644 index 0000000000..e822ebc633 --- /dev/null +++ b/resources/recipes/tsn.recipe @@ -0,0 +1,34 @@ +from calibre.web.feeds.news import BasicNewsRecipe + +class AdvancedUserRecipe1289990851(BasicNewsRecipe): + title = u'TSN' + oldest_article = 7 + max_articles_per_feed = 50 + language = 'en_CA' + __author__ = 'Nexus' + no_stylesheets = True + INDEX = 'http://tsn.ca/nhl/story/?id=nhl' + keep_only_tags = [dict(name='div', attrs={'id':['tsnColWrap']}), + dict(name='div', attrs={'id':['tsnStory']})] + remove_tags = [dict(name='div', attrs={'id':'tsnRelated'}), + dict(name='div', attrs={'class':'textSize'})] + + def parse_index(self): + feeds = [] + soup = self.index_to_soup(self.INDEX) + feed_parts = soup.findAll('div', attrs={'class': 'feature'}) + for feed_part in feed_parts: + articles = [] + if not feed_part.h2: + continue + feed_title = feed_part.h2.string + article_parts = feed_part.findAll('a') + for article_part in article_parts: + article_title = article_part.string + article_date = '' + article_url = 'http://tsn.ca/' + article_part['href'] + articles.append({'title': article_title, 'url': article_url, 'description':'', 'date':article_date}) + if articles: + feeds.append((feed_title, articles)) + return feeds + From 54f93e96b7daa979f45b50e696a7cea369323dfd Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 08:43:27 -0700 Subject: [PATCH 06/11] Improved NY times --- resources/recipes/nytimes.recipe | 304 +++++++++++++++++++-------- resources/recipes/nytimes_sub.recipe | 304 +++++++++++++++++++-------- 2 files changed, 440 insertions(+), 168 deletions(-) diff --git a/resources/recipes/nytimes.recipe b/resources/recipes/nytimes.recipe index 16ddea9f8c..fbb4641580 100644 --- a/resources/recipes/nytimes.recipe +++ b/resources/recipes/nytimes.recipe @@ -7,14 +7,22 @@ nytimes.com ''' import re, string, time from calibre import entity_to_unicode, strftime +from datetime import timedelta, date from calibre.web.feeds.recipes import BasicNewsRecipe from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, BeautifulStoneSoup + class NYTimes(BasicNewsRecipe): - # set headlinesOnly to True for the headlines-only version + # set headlinesOnly to True for the headlines-only version. If True, webEdition is ignored. headlinesOnly = True + # set webEdition to True for the Web edition of the newspaper. Set oldest_article to the + # number of days old an article can be for inclusion. If oldest_article = 0 all articles + # will be included. Note: oldest_article is ignored if webEdition = False + webEdition = False + oldest_article = 7 + # includeSections: List of sections to include. If empty, all sections found will be included. # Otherwise, only the sections named will be included. For example, # @@ -39,20 +47,76 @@ class NYTimes(BasicNewsRecipe): # from an article (if one exists). If one_picture_per_article = True, the image # will be moved to a location between the headline and the byline. # If one_picture_per_article = False, all images from the article will be included - # and shown in their original location. - one_picture_per_article = True + one_picture_per_article = False # The maximum number of articles that will be downloaded max_articles_per_feed = 100 + # Whether to omit duplicates of articles (typically arsing when articles are indexed in + # more than one section). If True, only the first occurance will be downloaded. + filterDuplicates = True + + # Sections to collect for the Web edition. + # Delete any you don't want, or use includeSections or excludeSections + web_sections = [(u'World',u'world'), + (u'U.S.',u'national'), + (u'Politics',u'politics'), + (u'New York',u'nyregion'), + (u'Business','business'), + (u'Technology',u'technology'), + (u'Sports',u'sports'), + (u'Science',u'science'), + (u'Health',u'health'), + (u'Opinion',u'opinion'), + (u'Arts',u'arts'), + (u'Books',u'books'), + (u'Movies',u'movies'), + (u'Music',u'arts/music'), + (u'Television',u'arts/television'), + (u'Style',u'style'), + (u'Dining & Wine',u'dining'), + (u'Fashion & Style',u'fashion'), + (u'Home & Garden',u'garden'), + (u'Travel',u'travel'), + ('Education',u'education'), + ('Multimedia',u'multimedia'), + (u'Obituaries',u'obituaries'), + (u'Sunday Magazine',u'magazine'), + (u'Week in Review',u'weekinreview')] + if headlinesOnly: title='New York Times Headlines' description = 'Headlines from the New York Times' + needs_subscription = False + elif webEdition: + title='New York Times (Web)' + description = 'New York Times on the Web' + needs_subscription = True else: title='New York Times' description = 'Today\'s New York Times' + needs_subscription = True + + + month_list = ['january','february','march','april','may','june','july','august','september','october','november','december'] + + def decode_us_date(self,datestr): + udate = datestr.strip().lower().split() + try: + m = self.month_list.index(udate[0])+1 + except: + return date.today() + d = int(udate[1]) + y = int(udate[2]) + try: + d = date(y,m,d) + except: + d = date.today + return d + + earliest_date = date.today() - timedelta(days=oldest_article) __author__ = 'GRiker/Kovid Goyal/Nick Redding' language = 'en' @@ -136,6 +200,12 @@ class NYTimes(BasicNewsRecipe): .image {text-align: center;} .source {text-align: left; }''' + + articles = {} + key = None + ans = [] + url_list = [] + def filter_ans(self, ans) : total_article_count = 0 idx = 0 @@ -164,6 +234,29 @@ class NYTimes(BasicNewsRecipe): self.log( "Queued %d articles" % total_article_count ) return ans + def exclude_url(self,url): + if not url.startswith("http"): + return True + if not url.endswith(".html"): + return True + if 'nytimes.com' not in url: + return True + if 'podcast' in url: + return True + if '/video/' in url: + return True + if '/slideshow/' in url: + return True + if '/magazine/index' in url: + return True + if '/interactive/' in url: + return True + if '/reference/' in url: + return True + if '/premium/' in url: + return True + return False + def fixChars(self,string): # Replace lsquo (\x91) fixed = re.sub("\x91","‘",string) @@ -249,7 +342,6 @@ class NYTimes(BasicNewsRecipe): return BeautifulSoup(_raw, markupMassage=massage) # Entry point - print "index_to_soup()" soup = get_the_soup( self.encoding, url_or_raw ) contentType = soup.find(True,attrs={'http-equiv':'Content-Type'}) docEncoding = str(contentType)[str(contentType).find('charset=') + len('charset='):str(contentType).rfind('"')] @@ -273,83 +365,110 @@ class NYTimes(BasicNewsRecipe): else: return description - def parse_todays_index(self): + def feed_title(self,div): + return ''.join(div.findAll(text=True, recursive=True)).strip() - def feed_title(div): - return ''.join(div.findAll(text=True, recursive=True)).strip() - - articles = {} - key = None - ans = [] - url_list = [] - - def handle_article(div): - a = div.find('a', href=True) - if not a: + def handle_article(self,div): + thumbnail = div.find('div','thumbnail') + if thumbnail: + thumbnail.extract() + a = div.find('a', href=True) + if not a: + return + url = re.sub(r'\?.*', '', a['href']) + if self.exclude_url(url): + return + url += '?pagewanted=all' + if self.filterDuplicates: + if url in self.url_list: return - url = re.sub(r'\?.*', '', a['href']) - if not url.startswith("http"): - return - if not url.endswith(".html"): - return - if 'podcast' in url: - return - if '/video/' in url: - return - url += '?pagewanted=all' - if url in url_list: - return - url_list.append(url) - title = self.tag_to_string(a, use_alt=True).strip() - description = '' - pubdate = strftime('%a, %d %b') - summary = div.find(True, attrs={'class':'summary'}) - if summary: - description = self.tag_to_string(summary, use_alt=False) - author = '' + self.url_list.append(url) + title = self.tag_to_string(a, use_alt=True).strip() + description = '' + pubdate = strftime('%a, %d %b') + summary = div.find(True, attrs={'class':'summary'}) + if summary: + description = self.tag_to_string(summary, use_alt=False) + author = '' + authorAttribution = div.find(True, attrs={'class':'byline'}) + if authorAttribution: + author = self.tag_to_string(authorAttribution, use_alt=False) + else: authorAttribution = div.find(True, attrs={'class':'byline'}) if authorAttribution: author = self.tag_to_string(authorAttribution, use_alt=False) - else: - authorAttribution = div.find(True, attrs={'class':'byline'}) - if authorAttribution: - author = self.tag_to_string(authorAttribution, use_alt=False) - feed = key if key is not None else 'Uncategorized' - if not articles.has_key(feed): - ans.append(feed) - articles[feed] = [] - articles[feed].append( - dict(title=title, url=url, date=pubdate, - description=description, author=author, - content='')) + feed = self.key if self.key is not None else 'Uncategorized' + if not self.articles.has_key(feed): + self.ans.append(feed) + self.articles[feed] = [] + self.articles[feed].append( + dict(title=title, url=url, date=pubdate, + description=description, author=author, + content='')) + def parse_web_edition(self): + + for (sec_title,index_url) in self.web_sections: + if self.includeSections != []: + if sec_title not in self.includeSections: + print "SECTION NOT INCLUDED: ",sec_title + continue + if sec_title in self.excludeSections: + print "SECTION EXCLUDED: ",sec_title + continue + print 'Index URL: '+'http://www.nytimes.com/pages/'+index_url+'/index.html' + soup = self.index_to_soup('http://www.nytimes.com/pages/'+index_url+'/index.html') + self.key = sec_title + # Find each article + for div in soup.findAll(True, + attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}): + if div['class'] in ['story', 'story headline'] : + self.handle_article(div) + elif div['class'] == 'headlinesOnly multiline flush': + for lidiv in div.findAll('li'): + self.handle_article(lidiv) + + self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)] + return self.filter_ans(self.ans) + + + def parse_todays_index(self): + soup = self.index_to_soup('http://www.nytimes.com/pages/todayspaper/index.html') - + skipping = False # Find each article for div in soup.findAll(True, attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}): if div['class'] in ['section-headline','sectionHeader']: - key = string.capwords(feed_title(div)) - key = key.replace('Op-ed','Op-Ed') - key = key.replace('U.s.','U.S.') + self.key = string.capwords(self.feed_title(div)) + self.key = self.key.replace('Op-ed','Op-Ed') + self.key = self.key.replace('U.s.','U.S.') + self.key = self.key.replace('N.y.','N.Y.') + skipping = False + if self.includeSections != []: + if self.key not in self.includeSections: + print "SECTION NOT INCLUDED: ",self.key + skipping = True + if self.key in self.excludeSections: + print "SECTION EXCLUDED: ",self.key + skipping = True + elif div['class'] in ['story', 'story headline'] : - handle_article(div) + if not skipping: + self.handle_article(div) elif div['class'] == 'headlinesOnly multiline flush': for lidiv in div.findAll('li'): - handle_article(lidiv) + if not skipping: + self.handle_article(lidiv) - ans = [(key, articles[key]) for key in ans if articles.has_key(key)] - return self.filter_ans(ans) + self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)] + return self.filter_ans(self.ans) def parse_headline_index(self): - articles = {} - ans = [] - url_list = [] - soup = self.index_to_soup('http://www.nytimes.com/pages/todaysheadlines/') # Fetch the content table @@ -363,15 +482,24 @@ class NYTimes(BasicNewsRecipe): for td_col in content_table.findAll('td', {'id' : re.compile('Column')}): for div_sec in td_col.findAll('div',recursive=False): for h6_sec_name in div_sec.findAll('h6',{'style' : re.compile('text-transform: *uppercase')}): + section_name = self.tag_to_string(h6_sec_name,use_alt=False) section_name = re.sub(r'^ *$','',section_name) + if section_name == '': continue + if self.includeSections != []: + if section_name not in self.includeSections: + print "SECTION NOT INCLUDED: ",section_name + continue + if section_name in self.excludeSections: + print "SECTION EXCLUDED: ",section_name + continue + section_name=string.capwords(section_name) - if section_name == 'U.s.': - section_name = 'U.S.' - elif section_name == 'Op-ed': - section_name = 'Op-Ed' + section_name = section_name.replace('Op-ed','Op-Ed') + section_name = section_name.replace('U.s.','U.S.') + section_name = section_name.replace('N.y.','N.Y.') pubdate = strftime('%a, %d %b') search_div = div_sec @@ -392,37 +520,32 @@ class NYTimes(BasicNewsRecipe): if not a: continue url = re.sub(r'\?.*', '', a['href']) - if not url.startswith("http"): - continue - if not url.endswith(".html"): - continue - if 'podcast' in url: - continue - if 'video' in url: + if self.exclude_url(url): continue url += '?pagewanted=all' - if url in url_list: - continue - url_list.append(url) - self.log("URL %s" % url) + if self.filterDuplicates: + if url in self.url_list: + continue + self.url_list.append(url) title = self.tag_to_string(a, use_alt=True).strip() desc = h3_item.find('p') if desc is not None: description = self.tag_to_string(desc,use_alt=False) else: description = '' - if not articles.has_key(section_name): - ans.append(section_name) - articles[section_name] = [] - articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content='')) + if not self.articles.has_key(section_name): + self.ans.append(section_name) + self.articles[section_name] = [] + self.articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content='')) - - ans = [(key, articles[key]) for key in ans if articles.has_key(key)] - return self.filter_ans(ans) + self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)] + return self.filter_ans(self.ans) def parse_index(self): if self.headlinesOnly: return self.parse_headline_index() + elif self.webEdition: + return self.parse_web_edition() else: return self.parse_todays_index() @@ -438,6 +561,21 @@ class NYTimes(BasicNewsRecipe): def preprocess_html(self, soup): + if self.webEdition & (self.oldest_article>0): + date_tag = soup.find(True,attrs={'class': ['dateline','date']}) + if date_tag: + date_str = self.tag_to_string(date_tag,use_alt=False) + date_str = date_str.replace('Published:','') + date_items = date_str.split(',') + try: + datestring = date_items[0]+' '+date_items[1] + article_date = self.decode_us_date(datestring) + except: + article_date = date.today() + if article_date < self.earliest_date: + self.log("Skipping article dated %s" % date_str) + return None + kicker_tag = soup.find(attrs={'class':'kicker'}) if kicker_tag: # remove Op_Ed author head shots tagline = self.tag_to_string(kicker_tag) @@ -462,7 +600,6 @@ class NYTimes(BasicNewsRecipe): for inlineImg in inlineImgs[1:]: inlineImg.extract() # Move firstImg before article body - #article_body = soup.find(True, {'id':'articleBody'}) cgFirst = soup.find(True, {'class':re.compile('columnGroup *first')}) if cgFirst: # Strip all sibling NavigableStrings: noise @@ -548,4 +685,3 @@ class NYTimes(BasicNewsRecipe): divTag.replaceWith(tag) return soup - diff --git a/resources/recipes/nytimes_sub.recipe b/resources/recipes/nytimes_sub.recipe index ed1ba75f0f..ad98b466e1 100644 --- a/resources/recipes/nytimes_sub.recipe +++ b/resources/recipes/nytimes_sub.recipe @@ -7,14 +7,22 @@ nytimes.com ''' import re, string, time from calibre import entity_to_unicode, strftime +from datetime import timedelta, date from calibre.web.feeds.recipes import BasicNewsRecipe from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, BeautifulStoneSoup + class NYTimes(BasicNewsRecipe): - # set headlinesOnly to True for the headlines-only version + # set headlinesOnly to True for the headlines-only version. If True, webEdition is ignored. headlinesOnly = False + # set webEdition to True for the Web edition of the newspaper. Set oldest_article to the + # number of days old an article can be for inclusion. If oldest_article = 0 all articles + # will be included. Note: oldest_article is ignored if webEdition = False + webEdition = False + oldest_article = 7 + # includeSections: List of sections to include. If empty, all sections found will be included. # Otherwise, only the sections named will be included. For example, # @@ -39,20 +47,76 @@ class NYTimes(BasicNewsRecipe): # from an article (if one exists). If one_picture_per_article = True, the image # will be moved to a location between the headline and the byline. # If one_picture_per_article = False, all images from the article will be included - # and shown in their original location. - one_picture_per_article = True + one_picture_per_article = False # The maximum number of articles that will be downloaded max_articles_per_feed = 100 + # Whether to omit duplicates of articles (typically arsing when articles are indexed in + # more than one section). If True, only the first occurance will be downloaded. + filterDuplicates = True + + # Sections to collect for the Web edition. + # Delete any you don't want, or use includeSections or excludeSections + web_sections = [(u'World',u'world'), + (u'U.S.',u'national'), + (u'Politics',u'politics'), + (u'New York',u'nyregion'), + (u'Business','business'), + (u'Technology',u'technology'), + (u'Sports',u'sports'), + (u'Science',u'science'), + (u'Health',u'health'), + (u'Opinion',u'opinion'), + (u'Arts',u'arts'), + (u'Books',u'books'), + (u'Movies',u'movies'), + (u'Music',u'arts/music'), + (u'Television',u'arts/television'), + (u'Style',u'style'), + (u'Dining & Wine',u'dining'), + (u'Fashion & Style',u'fashion'), + (u'Home & Garden',u'garden'), + (u'Travel',u'travel'), + ('Education',u'education'), + ('Multimedia',u'multimedia'), + (u'Obituaries',u'obituaries'), + (u'Sunday Magazine',u'magazine'), + (u'Week in Review',u'weekinreview')] + if headlinesOnly: title='New York Times Headlines' description = 'Headlines from the New York Times' + needs_subscription = False + elif webEdition: + title='New York Times (Web)' + description = 'New York Times on the Web' + needs_subscription = True else: title='New York Times' description = 'Today\'s New York Times' + needs_subscription = True + + + month_list = ['january','february','march','april','may','june','july','august','september','october','november','december'] + + def decode_us_date(self,datestr): + udate = datestr.strip().lower().split() + try: + m = self.month_list.index(udate[0])+1 + except: + return date.today() + d = int(udate[1]) + y = int(udate[2]) + try: + d = date(y,m,d) + except: + d = date.today + return d + + earliest_date = date.today() - timedelta(days=oldest_article) __author__ = 'GRiker/Kovid Goyal/Nick Redding' language = 'en' @@ -60,7 +124,6 @@ class NYTimes(BasicNewsRecipe): timefmt = '' - needs_subscription = True masthead_url = 'http://graphics8.nytimes.com/images/misc/nytlogo379x64.gif' cover_margins = (18,18,'grey99') @@ -137,6 +200,12 @@ class NYTimes(BasicNewsRecipe): .image {text-align: center;} .source {text-align: left; }''' + + articles = {} + key = None + ans = [] + url_list = [] + def filter_ans(self, ans) : total_article_count = 0 idx = 0 @@ -165,6 +234,29 @@ class NYTimes(BasicNewsRecipe): self.log( "Queued %d articles" % total_article_count ) return ans + def exclude_url(self,url): + if not url.startswith("http"): + return True + if not url.endswith(".html"): + return True + if 'nytimes.com' not in url: + return True + if 'podcast' in url: + return True + if '/video/' in url: + return True + if '/slideshow/' in url: + return True + if '/magazine/index' in url: + return True + if '/interactive/' in url: + return True + if '/reference/' in url: + return True + if '/premium/' in url: + return True + return False + def fixChars(self,string): # Replace lsquo (\x91) fixed = re.sub("\x91","‘",string) @@ -250,7 +342,6 @@ class NYTimes(BasicNewsRecipe): return BeautifulSoup(_raw, markupMassage=massage) # Entry point - print "index_to_soup()" soup = get_the_soup( self.encoding, url_or_raw ) contentType = soup.find(True,attrs={'http-equiv':'Content-Type'}) docEncoding = str(contentType)[str(contentType).find('charset=') + len('charset='):str(contentType).rfind('"')] @@ -274,83 +365,110 @@ class NYTimes(BasicNewsRecipe): else: return description - def parse_todays_index(self): + def feed_title(self,div): + return ''.join(div.findAll(text=True, recursive=True)).strip() - def feed_title(div): - return ''.join(div.findAll(text=True, recursive=True)).strip() - - articles = {} - key = None - ans = [] - url_list = [] - - def handle_article(div): - a = div.find('a', href=True) - if not a: + def handle_article(self,div): + thumbnail = div.find('div','thumbnail') + if thumbnail: + thumbnail.extract() + a = div.find('a', href=True) + if not a: + return + url = re.sub(r'\?.*', '', a['href']) + if self.exclude_url(url): + return + url += '?pagewanted=all' + if self.filterDuplicates: + if url in self.url_list: return - url = re.sub(r'\?.*', '', a['href']) - if not url.startswith("http"): - return - if not url.endswith(".html"): - return - if 'podcast' in url: - return - if '/video/' in url: - return - url += '?pagewanted=all' - if url in url_list: - return - url_list.append(url) - title = self.tag_to_string(a, use_alt=True).strip() - description = '' - pubdate = strftime('%a, %d %b') - summary = div.find(True, attrs={'class':'summary'}) - if summary: - description = self.tag_to_string(summary, use_alt=False) - author = '' + self.url_list.append(url) + title = self.tag_to_string(a, use_alt=True).strip() + description = '' + pubdate = strftime('%a, %d %b') + summary = div.find(True, attrs={'class':'summary'}) + if summary: + description = self.tag_to_string(summary, use_alt=False) + author = '' + authorAttribution = div.find(True, attrs={'class':'byline'}) + if authorAttribution: + author = self.tag_to_string(authorAttribution, use_alt=False) + else: authorAttribution = div.find(True, attrs={'class':'byline'}) if authorAttribution: author = self.tag_to_string(authorAttribution, use_alt=False) - else: - authorAttribution = div.find(True, attrs={'class':'byline'}) - if authorAttribution: - author = self.tag_to_string(authorAttribution, use_alt=False) - feed = key if key is not None else 'Uncategorized' - if not articles.has_key(feed): - ans.append(feed) - articles[feed] = [] - articles[feed].append( - dict(title=title, url=url, date=pubdate, - description=description, author=author, - content='')) + feed = self.key if self.key is not None else 'Uncategorized' + if not self.articles.has_key(feed): + self.ans.append(feed) + self.articles[feed] = [] + self.articles[feed].append( + dict(title=title, url=url, date=pubdate, + description=description, author=author, + content='')) + def parse_web_edition(self): + + for (sec_title,index_url) in self.web_sections: + if self.includeSections != []: + if sec_title not in self.includeSections: + print "SECTION NOT INCLUDED: ",sec_title + continue + if sec_title in self.excludeSections: + print "SECTION EXCLUDED: ",sec_title + continue + print 'Index URL: '+'http://www.nytimes.com/pages/'+index_url+'/index.html' + soup = self.index_to_soup('http://www.nytimes.com/pages/'+index_url+'/index.html') + self.key = sec_title + # Find each article + for div in soup.findAll(True, + attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}): + if div['class'] in ['story', 'story headline'] : + self.handle_article(div) + elif div['class'] == 'headlinesOnly multiline flush': + for lidiv in div.findAll('li'): + self.handle_article(lidiv) + + self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)] + return self.filter_ans(self.ans) + + + def parse_todays_index(self): + soup = self.index_to_soup('http://www.nytimes.com/pages/todayspaper/index.html') - + skipping = False # Find each article for div in soup.findAll(True, attrs={'class':['section-headline', 'story', 'story headline','sectionHeader','headlinesOnly multiline flush']}): if div['class'] in ['section-headline','sectionHeader']: - key = string.capwords(feed_title(div)) - key = key.replace('Op-ed','Op-Ed') - key = key.replace('U.s.','U.S.') + self.key = string.capwords(self.feed_title(div)) + self.key = self.key.replace('Op-ed','Op-Ed') + self.key = self.key.replace('U.s.','U.S.') + self.key = self.key.replace('N.y.','N.Y.') + skipping = False + if self.includeSections != []: + if self.key not in self.includeSections: + print "SECTION NOT INCLUDED: ",self.key + skipping = True + if self.key in self.excludeSections: + print "SECTION EXCLUDED: ",self.key + skipping = True + elif div['class'] in ['story', 'story headline'] : - handle_article(div) + if not skipping: + self.handle_article(div) elif div['class'] == 'headlinesOnly multiline flush': for lidiv in div.findAll('li'): - handle_article(lidiv) + if not skipping: + self.handle_article(lidiv) - ans = [(key, articles[key]) for key in ans if articles.has_key(key)] - return self.filter_ans(ans) + self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)] + return self.filter_ans(self.ans) def parse_headline_index(self): - articles = {} - ans = [] - url_list = [] - soup = self.index_to_soup('http://www.nytimes.com/pages/todaysheadlines/') # Fetch the content table @@ -364,15 +482,24 @@ class NYTimes(BasicNewsRecipe): for td_col in content_table.findAll('td', {'id' : re.compile('Column')}): for div_sec in td_col.findAll('div',recursive=False): for h6_sec_name in div_sec.findAll('h6',{'style' : re.compile('text-transform: *uppercase')}): + section_name = self.tag_to_string(h6_sec_name,use_alt=False) section_name = re.sub(r'^ *$','',section_name) + if section_name == '': continue + if self.includeSections != []: + if section_name not in self.includeSections: + print "SECTION NOT INCLUDED: ",section_name + continue + if section_name in self.excludeSections: + print "SECTION EXCLUDED: ",section_name + continue + section_name=string.capwords(section_name) - if section_name == 'U.s.': - section_name = 'U.S.' - elif section_name == 'Op-ed': - section_name = 'Op-Ed' + section_name = section_name.replace('Op-ed','Op-Ed') + section_name = section_name.replace('U.s.','U.S.') + section_name = section_name.replace('N.y.','N.Y.') pubdate = strftime('%a, %d %b') search_div = div_sec @@ -393,37 +520,32 @@ class NYTimes(BasicNewsRecipe): if not a: continue url = re.sub(r'\?.*', '', a['href']) - if not url.startswith("http"): - continue - if not url.endswith(".html"): - continue - if 'podcast' in url: - continue - if 'video' in url: + if self.exclude_url(url): continue url += '?pagewanted=all' - if url in url_list: - continue - url_list.append(url) - self.log("URL %s" % url) + if self.filterDuplicates: + if url in self.url_list: + continue + self.url_list.append(url) title = self.tag_to_string(a, use_alt=True).strip() desc = h3_item.find('p') if desc is not None: description = self.tag_to_string(desc,use_alt=False) else: description = '' - if not articles.has_key(section_name): - ans.append(section_name) - articles[section_name] = [] - articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content='')) + if not self.articles.has_key(section_name): + self.ans.append(section_name) + self.articles[section_name] = [] + self.articles[section_name].append(dict(title=title, url=url, date=pubdate, description=description, author=author, content='')) - - ans = [(key, articles[key]) for key in ans if articles.has_key(key)] - return self.filter_ans(ans) + self.ans = [(k, self.articles[k]) for k in self.ans if self.articles.has_key(k)] + return self.filter_ans(self.ans) def parse_index(self): if self.headlinesOnly: return self.parse_headline_index() + elif self.webEdition: + return self.parse_web_edition() else: return self.parse_todays_index() @@ -439,6 +561,21 @@ class NYTimes(BasicNewsRecipe): def preprocess_html(self, soup): + if self.webEdition & (self.oldest_article>0): + date_tag = soup.find(True,attrs={'class': ['dateline','date']}) + if date_tag: + date_str = self.tag_to_string(date_tag,use_alt=False) + date_str = date_str.replace('Published:','') + date_items = date_str.split(',') + try: + datestring = date_items[0]+' '+date_items[1] + article_date = self.decode_us_date(datestring) + except: + article_date = date.today() + if article_date < self.earliest_date: + self.log("Skipping article dated %s" % date_str) + return None + kicker_tag = soup.find(attrs={'class':'kicker'}) if kicker_tag: # remove Op_Ed author head shots tagline = self.tag_to_string(kicker_tag) @@ -463,7 +600,6 @@ class NYTimes(BasicNewsRecipe): for inlineImg in inlineImgs[1:]: inlineImg.extract() # Move firstImg before article body - #article_body = soup.find(True, {'id':'articleBody'}) cgFirst = soup.find(True, {'class':re.compile('columnGroup *first')}) if cgFirst: # Strip all sibling NavigableStrings: noise From 75905f5e6ee80e8a6331c3c4d1e13cd8d03245f7 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 08:55:03 -0700 Subject: [PATCH 07/11] ... --- src/calibre/utils/smtp.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/calibre/utils/smtp.py b/src/calibre/utils/smtp.py index 8af31b5d38..4b7ec3f0a3 100644 --- a/src/calibre/utils/smtp.py +++ b/src/calibre/utils/smtp.py @@ -105,7 +105,10 @@ def sendmail(msg, from_, to, localhost=None, verbose=0, timeout=30, try: s.sendmail(from_, to, msg) finally: - ret = s.quit() + try: + ret = s.quit() + except: + pass # Ignore so as to not hide original error return ret def option_parser(): From f457079597bcbcfb8d85000515e03b06499d80ab Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 09:04:17 -0700 Subject: [PATCH 08/11] ... --- src/calibre/web/feeds/recipes/collection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/calibre/web/feeds/recipes/collection.py b/src/calibre/web/feeds/recipes/collection.py index 70703004aa..5b34ddab0b 100644 --- a/src/calibre/web/feeds/recipes/collection.py +++ b/src/calibre/web/feeds/recipes/collection.py @@ -62,7 +62,8 @@ def serialize_recipe(urn, recipe_class): def serialize_collection(mapping_of_recipe_classes): collection = E.recipe_collection() for urn in sorted(mapping_of_recipe_classes.keys(), - key=lambda key: mapping_of_recipe_classes[key].title): + key=lambda key: getattr(mapping_of_recipe_classes[key], 'title', + 'zzz')): recipe = serialize_recipe(urn, mapping_of_recipe_classes[urn]) collection.append(recipe) collection.set('count', str(len(collection))) From 289437fe25aaa86cae297204375c2e094fe30a32 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 09:12:49 -0700 Subject: [PATCH 09/11] ... --- src/calibre/trac/bzr_commit_plugin.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/calibre/trac/bzr_commit_plugin.py b/src/calibre/trac/bzr_commit_plugin.py index f2a40e6266..df6bf699d1 100644 --- a/src/calibre/trac/bzr_commit_plugin.py +++ b/src/calibre/trac/bzr_commit_plugin.py @@ -56,6 +56,7 @@ class cmd_commit(_cmd_commit): summary = self.get_trac_summary(bug, url) if summary: msg = msg.replace('#%s'%bug, '#%s (%s)'%(bug, summary)) + msg = msg.replace('Fixesed', 'Fixed') return msg, bug, url, action From 256a54c63ca221b4b6fa8e0f6dd9d312f0a5b5dd Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 09:25:28 -0700 Subject: [PATCH 10/11] Add output profile and welcome wizard entries for the Nook Color --- src/calibre/customize/profiles.py | 11 ++++++++++- src/calibre/gui2/wizard/__init__.py | 20 +++++++++++++++----- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/calibre/customize/profiles.py b/src/calibre/customize/profiles.py index 36e2b9bdd2..2318c6724e 100644 --- a/src/calibre/customize/profiles.py +++ b/src/calibre/customize/profiles.py @@ -678,6 +678,15 @@ class NookOutput(OutputProfile): fbase = 16 fsizes = [12, 12, 14, 16, 18, 20, 22, 24] +class NookColorOutput(NookOutput): + name = 'Nook Color' + short_name = 'nook_color' + description = _('This profile is intended for the B&N Nook Color.') + + screen_size = (600, 980) + comic_screen_size = (584, 980) + dpi = 169 + class BambookOutput(OutputProfile): author = 'Li Fanxi' @@ -698,6 +707,6 @@ output_profiles = [OutputProfile, SonyReaderOutput, SonyReader300Output, iPadOutput, KoboReaderOutput, TabletOutput, SonyReaderLandscapeOutput, KindleDXOutput, IlliadOutput, IRexDR1000Output, IRexDR800Output, JetBook5Output, NookOutput, - BambookOutput, ] + BambookOutput, NookColorOutput] output_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower())) diff --git a/src/calibre/gui2/wizard/__init__.py b/src/calibre/gui2/wizard/__init__.py index 2ac0908ea9..e2f463b80b 100644 --- a/src/calibre/gui2/wizard/__init__.py +++ b/src/calibre/gui2/wizard/__init__.py @@ -38,6 +38,7 @@ class Device(object): name = 'Default' manufacturer = 'Default' id = 'default' + supports_color = False @classmethod def set_output_profile(cls): @@ -56,6 +57,12 @@ class Device(object): def commit(cls): cls.set_output_profile() cls.set_output_format() + if cls.supports_color: + from calibre.ebooks.conversion.config import load_defaults, save_defaults + recs = load_defaults('comic_input') + recs['dont_grayscale'] = True + save_defaults('comic_input', recs) + class Kindle(Device): @@ -138,6 +145,12 @@ class Nook(Sony505): manufacturer = 'Barnes & Noble' output_profile = 'nook' +class NookColor(Nook): + id = 'nook_color' + name = 'Nook Color' + output_profile = 'nook_color' + supports_color = True + class CybookG3(Device): name = 'Cybook Gen 3' @@ -178,6 +191,7 @@ class iPhone(Device): output_format = 'EPUB' manufacturer = 'Apple' id = 'iphone' + supports_color = True class Android(Device): @@ -185,6 +199,7 @@ class Android(Device): output_format = 'EPUB' manufacturer = 'Android' id = 'android' + supports_color = True class HanlinV3(Device): @@ -354,11 +369,6 @@ class StanzaPage(QWizardPage, StanzaUI): return FinishPage.ID def commit(self): - from calibre.ebooks.conversion.config import load_defaults, save_defaults - recs = load_defaults('comic_input') - recs['dont_grayscale'] = True - save_defaults('comic_input', recs) - p = self.set_port() if p is not None: from calibre.library.server import server_config From a340048c2b6b14c8390af65ec8ec96cf3d3a09a3 Mon Sep 17 00:00:00 2001 From: Kovid Goyal Date: Sat, 20 Nov 2010 09:39:21 -0700 Subject: [PATCH 11/11] Add a run welcome wizrd button to the preferences dialog --- src/calibre/gui2/actions/preferences.py | 4 +++- src/calibre/gui2/preferences/main.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/calibre/gui2/actions/preferences.py b/src/calibre/gui2/actions/preferences.py index d9957bd70d..be536ca4e4 100644 --- a/src/calibre/gui2/actions/preferences.py +++ b/src/calibre/gui2/actions/preferences.py @@ -5,7 +5,7 @@ __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal ' __docformat__ = 'restructuredtext en' -from PyQt4.Qt import QIcon, QMenu +from PyQt4.Qt import QIcon, QMenu, Qt from calibre.gui2.actions import InterfaceAction from calibre.gui2.preferences.main import Preferences @@ -41,5 +41,7 @@ class PreferencesAction(InterfaceAction): return d = Preferences(self.gui, initial_plugin=initial_plugin) d.show() + d.run_wizard_requested.connect(self.gui.run_wizard, + type=Qt.QueuedConnection) diff --git a/src/calibre/gui2/preferences/main.py b/src/calibre/gui2/preferences/main.py index c82ddcc022..fc01a33cf6 100644 --- a/src/calibre/gui2/preferences/main.py +++ b/src/calibre/gui2/preferences/main.py @@ -155,6 +155,8 @@ class Browser(QScrollArea): # {{{ class Preferences(QMainWindow): + run_wizard_requested = pyqtSignal() + def __init__(self, gui, initial_plugin=None): QMainWindow.__init__(self, gui) self.gui = gui @@ -195,6 +197,11 @@ class Preferences(QMainWindow): self.cw.setLayout(QVBoxLayout()) self.cw.layout().addWidget(self.stack) self.bb = QDialogButtonBox(QDialogButtonBox.Close) + self.wizard_button = self.bb.addButton(_('Run welcome wizard'), + self.bb.DestructiveRole) + self.wizard_button.setIcon(QIcon(I('wizard.png'))) + self.wizard_button.clicked.connect(self.run_wizard, + type=Qt.QueuedConnection) self.cw.layout().addWidget(self.bb) self.bb.rejected.connect(self.close, type=Qt.QueuedConnection) self.setCentralWidget(self.cw) @@ -240,6 +247,9 @@ class Preferences(QMainWindow): if plugin is not None: self.show_plugin(plugin) + def run_wizard(self): + self.close() + self.run_wizard_requested.emit() def show_plugin(self, plugin): self.showing_widget = plugin.create_widget(self.scroll_area)